xref: /openbmc/linux/arch/x86/xen/irq.c (revision 0ec53ecf)
1 #include <linux/hardirq.h>
2 
3 #include <asm/x86_init.h>
4 
5 #include <xen/interface/xen.h>
6 #include <xen/interface/sched.h>
7 #include <xen/interface/vcpu.h>
8 #include <xen/events.h>
9 
10 #include <asm/xen/hypercall.h>
11 #include <asm/xen/hypervisor.h>
12 
13 #include "xen-ops.h"
14 
15 /*
16  * Force a proper event-channel callback from Xen after clearing the
17  * callback mask. We do this in a very simple manner, by making a call
18  * down into Xen. The pending flag will be checked by Xen on return.
19  */
20 void xen_force_evtchn_callback(void)
21 {
22 	(void)HYPERVISOR_xen_version(0, NULL);
23 }
24 
25 static unsigned long xen_save_fl(void)
26 {
27 	struct vcpu_info *vcpu;
28 	unsigned long flags;
29 
30 	vcpu = this_cpu_read(xen_vcpu);
31 
32 	/* flag has opposite sense of mask */
33 	flags = !vcpu->evtchn_upcall_mask;
34 
35 	/* convert to IF type flag
36 	   -0 -> 0x00000000
37 	   -1 -> 0xffffffff
38 	*/
39 	return (-flags) & X86_EFLAGS_IF;
40 }
41 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
42 
43 static void xen_restore_fl(unsigned long flags)
44 {
45 	struct vcpu_info *vcpu;
46 
47 	/* convert from IF type flag */
48 	flags = !(flags & X86_EFLAGS_IF);
49 
50 	/* There's a one instruction preempt window here.  We need to
51 	   make sure we're don't switch CPUs between getting the vcpu
52 	   pointer and updating the mask. */
53 	preempt_disable();
54 	vcpu = this_cpu_read(xen_vcpu);
55 	vcpu->evtchn_upcall_mask = flags;
56 	preempt_enable_no_resched();
57 
58 	/* Doesn't matter if we get preempted here, because any
59 	   pending event will get dealt with anyway. */
60 
61 	if (flags == 0) {
62 		preempt_check_resched();
63 		barrier(); /* unmask then check (avoid races) */
64 		if (unlikely(vcpu->evtchn_upcall_pending))
65 			xen_force_evtchn_callback();
66 	}
67 }
68 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
69 
70 static void xen_irq_disable(void)
71 {
72 	/* There's a one instruction preempt window here.  We need to
73 	   make sure we're don't switch CPUs between getting the vcpu
74 	   pointer and updating the mask. */
75 	preempt_disable();
76 	this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
77 	preempt_enable_no_resched();
78 }
79 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
80 
81 static void xen_irq_enable(void)
82 {
83 	struct vcpu_info *vcpu;
84 
85 	/* We don't need to worry about being preempted here, since
86 	   either a) interrupts are disabled, so no preemption, or b)
87 	   the caller is confused and is trying to re-enable interrupts
88 	   on an indeterminate processor. */
89 
90 	vcpu = this_cpu_read(xen_vcpu);
91 	vcpu->evtchn_upcall_mask = 0;
92 
93 	/* Doesn't matter if we get preempted here, because any
94 	   pending event will get dealt with anyway. */
95 
96 	barrier(); /* unmask then check (avoid races) */
97 	if (unlikely(vcpu->evtchn_upcall_pending))
98 		xen_force_evtchn_callback();
99 }
100 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
101 
102 static void xen_safe_halt(void)
103 {
104 	/* Blocking includes an implicit local_irq_enable(). */
105 	if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
106 		BUG();
107 }
108 
109 static void xen_halt(void)
110 {
111 	if (irqs_disabled())
112 		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
113 	else
114 		xen_safe_halt();
115 }
116 
117 static const struct pv_irq_ops xen_irq_ops __initconst = {
118 	.save_fl = PV_CALLEE_SAVE(xen_save_fl),
119 	.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
120 	.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
121 	.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
122 
123 	.safe_halt = xen_safe_halt,
124 	.halt = xen_halt,
125 #ifdef CONFIG_X86_64
126 	.adjust_exception_frame = xen_adjust_exception_frame,
127 #endif
128 };
129 
130 void __init xen_init_irq_ops(void)
131 {
132 	pv_irq_ops = xen_irq_ops;
133 	x86_init.irqs.intr_init = xen_init_IRQ;
134 }
135