1 #include <linux/hardirq.h> 2 3 #include <asm/x86_init.h> 4 5 #include <xen/interface/xen.h> 6 #include <xen/interface/sched.h> 7 #include <xen/interface/vcpu.h> 8 9 #include <asm/xen/hypercall.h> 10 #include <asm/xen/hypervisor.h> 11 12 #include "xen-ops.h" 13 14 /* 15 * Force a proper event-channel callback from Xen after clearing the 16 * callback mask. We do this in a very simple manner, by making a call 17 * down into Xen. The pending flag will be checked by Xen on return. 18 */ 19 void xen_force_evtchn_callback(void) 20 { 21 (void)HYPERVISOR_xen_version(0, NULL); 22 } 23 24 static unsigned long xen_save_fl(void) 25 { 26 struct vcpu_info *vcpu; 27 unsigned long flags; 28 29 vcpu = this_cpu_read(xen_vcpu); 30 31 /* flag has opposite sense of mask */ 32 flags = !vcpu->evtchn_upcall_mask; 33 34 /* convert to IF type flag 35 -0 -> 0x00000000 36 -1 -> 0xffffffff 37 */ 38 return (-flags) & X86_EFLAGS_IF; 39 } 40 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); 41 42 static void xen_restore_fl(unsigned long flags) 43 { 44 struct vcpu_info *vcpu; 45 46 /* convert from IF type flag */ 47 flags = !(flags & X86_EFLAGS_IF); 48 49 /* There's a one instruction preempt window here. We need to 50 make sure we're don't switch CPUs between getting the vcpu 51 pointer and updating the mask. */ 52 preempt_disable(); 53 vcpu = this_cpu_read(xen_vcpu); 54 vcpu->evtchn_upcall_mask = flags; 55 preempt_enable_no_resched(); 56 57 /* Doesn't matter if we get preempted here, because any 58 pending event will get dealt with anyway. */ 59 60 if (flags == 0) { 61 preempt_check_resched(); 62 barrier(); /* unmask then check (avoid races) */ 63 if (unlikely(vcpu->evtchn_upcall_pending)) 64 xen_force_evtchn_callback(); 65 } 66 } 67 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); 68 69 static void xen_irq_disable(void) 70 { 71 /* There's a one instruction preempt window here. We need to 72 make sure we're don't switch CPUs between getting the vcpu 73 pointer and updating the mask. */ 74 preempt_disable(); 75 this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1; 76 preempt_enable_no_resched(); 77 } 78 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); 79 80 static void xen_irq_enable(void) 81 { 82 struct vcpu_info *vcpu; 83 84 /* We don't need to worry about being preempted here, since 85 either a) interrupts are disabled, so no preemption, or b) 86 the caller is confused and is trying to re-enable interrupts 87 on an indeterminate processor. */ 88 89 vcpu = this_cpu_read(xen_vcpu); 90 vcpu->evtchn_upcall_mask = 0; 91 92 /* Doesn't matter if we get preempted here, because any 93 pending event will get dealt with anyway. */ 94 95 barrier(); /* unmask then check (avoid races) */ 96 if (unlikely(vcpu->evtchn_upcall_pending)) 97 xen_force_evtchn_callback(); 98 } 99 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable); 100 101 static void xen_safe_halt(void) 102 { 103 /* Blocking includes an implicit local_irq_enable(). */ 104 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0) 105 BUG(); 106 } 107 108 static void xen_halt(void) 109 { 110 if (irqs_disabled()) 111 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 112 else 113 xen_safe_halt(); 114 } 115 116 static const struct pv_irq_ops xen_irq_ops __initconst = { 117 .save_fl = PV_CALLEE_SAVE(xen_save_fl), 118 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), 119 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), 120 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable), 121 122 .safe_halt = xen_safe_halt, 123 .halt = xen_halt, 124 #ifdef CONFIG_X86_64 125 .adjust_exception_frame = xen_adjust_exception_frame, 126 #endif 127 }; 128 129 void __init xen_init_irq_ops(void) 130 { 131 pv_irq_ops = xen_irq_ops; 132 x86_init.irqs.intr_init = xen_init_IRQ; 133 } 134