1 #include <linux/hardirq.h> 2 3 #include <xen/interface/xen.h> 4 #include <xen/interface/sched.h> 5 #include <xen/interface/vcpu.h> 6 7 #include <asm/xen/hypercall.h> 8 #include <asm/xen/hypervisor.h> 9 10 #include "xen-ops.h" 11 12 /* 13 * Force a proper event-channel callback from Xen after clearing the 14 * callback mask. We do this in a very simple manner, by making a call 15 * down into Xen. The pending flag will be checked by Xen on return. 16 */ 17 void xen_force_evtchn_callback(void) 18 { 19 (void)HYPERVISOR_xen_version(0, NULL); 20 } 21 22 static void __init __xen_init_IRQ(void) 23 { 24 #ifdef CONFIG_X86_64 25 int i; 26 27 /* Create identity vector->irq map */ 28 for(i = 0; i < NR_VECTORS; i++) { 29 int cpu; 30 31 for_each_possible_cpu(cpu) 32 per_cpu(vector_irq, cpu)[i] = i; 33 } 34 #endif /* CONFIG_X86_64 */ 35 36 xen_init_IRQ(); 37 } 38 39 static unsigned long xen_save_fl(void) 40 { 41 struct vcpu_info *vcpu; 42 unsigned long flags; 43 44 vcpu = x86_read_percpu(xen_vcpu); 45 46 /* flag has opposite sense of mask */ 47 flags = !vcpu->evtchn_upcall_mask; 48 49 /* convert to IF type flag 50 -0 -> 0x00000000 51 -1 -> 0xffffffff 52 */ 53 return (-flags) & X86_EFLAGS_IF; 54 } 55 56 static void xen_restore_fl(unsigned long flags) 57 { 58 struct vcpu_info *vcpu; 59 60 /* convert from IF type flag */ 61 flags = !(flags & X86_EFLAGS_IF); 62 63 /* There's a one instruction preempt window here. We need to 64 make sure we're don't switch CPUs between getting the vcpu 65 pointer and updating the mask. */ 66 preempt_disable(); 67 vcpu = x86_read_percpu(xen_vcpu); 68 vcpu->evtchn_upcall_mask = flags; 69 preempt_enable_no_resched(); 70 71 /* Doesn't matter if we get preempted here, because any 72 pending event will get dealt with anyway. */ 73 74 if (flags == 0) { 75 preempt_check_resched(); 76 barrier(); /* unmask then check (avoid races) */ 77 if (unlikely(vcpu->evtchn_upcall_pending)) 78 xen_force_evtchn_callback(); 79 } 80 } 81 82 static void xen_irq_disable(void) 83 { 84 /* There's a one instruction preempt window here. We need to 85 make sure we're don't switch CPUs between getting the vcpu 86 pointer and updating the mask. */ 87 preempt_disable(); 88 x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; 89 preempt_enable_no_resched(); 90 } 91 92 static void xen_irq_enable(void) 93 { 94 struct vcpu_info *vcpu; 95 96 /* We don't need to worry about being preempted here, since 97 either a) interrupts are disabled, so no preemption, or b) 98 the caller is confused and is trying to re-enable interrupts 99 on an indeterminate processor. */ 100 101 vcpu = x86_read_percpu(xen_vcpu); 102 vcpu->evtchn_upcall_mask = 0; 103 104 /* Doesn't matter if we get preempted here, because any 105 pending event will get dealt with anyway. */ 106 107 barrier(); /* unmask then check (avoid races) */ 108 if (unlikely(vcpu->evtchn_upcall_pending)) 109 xen_force_evtchn_callback(); 110 } 111 112 static void xen_safe_halt(void) 113 { 114 /* Blocking includes an implicit local_irq_enable(). */ 115 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0) 116 BUG(); 117 } 118 119 static void xen_halt(void) 120 { 121 if (irqs_disabled()) 122 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 123 else 124 xen_safe_halt(); 125 } 126 127 static const struct pv_irq_ops xen_irq_ops __initdata = { 128 .init_IRQ = __xen_init_IRQ, 129 .save_fl = xen_save_fl, 130 .restore_fl = xen_restore_fl, 131 .irq_disable = xen_irq_disable, 132 .irq_enable = xen_irq_enable, 133 .safe_halt = xen_safe_halt, 134 .halt = xen_halt, 135 #ifdef CONFIG_X86_64 136 .adjust_exception_frame = xen_adjust_exception_frame, 137 #endif 138 }; 139 140 void __init xen_init_irq_ops() 141 { 142 pv_irq_ops = xen_irq_ops; 143 } 144