1*d5de8841SJeremy Fitzhardinge /* 2*d5de8841SJeremy Fitzhardinge * Split spinlock implementation out into its own file, so it can be 3*d5de8841SJeremy Fitzhardinge * compiled in a FTRACE-compatible way. 4*d5de8841SJeremy Fitzhardinge */ 5*d5de8841SJeremy Fitzhardinge #include <linux/kernel_stat.h> 6*d5de8841SJeremy Fitzhardinge #include <linux/spinlock.h> 7*d5de8841SJeremy Fitzhardinge 8*d5de8841SJeremy Fitzhardinge #include <asm/paravirt.h> 9*d5de8841SJeremy Fitzhardinge 10*d5de8841SJeremy Fitzhardinge #include <xen/interface/xen.h> 11*d5de8841SJeremy Fitzhardinge #include <xen/events.h> 12*d5de8841SJeremy Fitzhardinge 13*d5de8841SJeremy Fitzhardinge #include "xen-ops.h" 14*d5de8841SJeremy Fitzhardinge 15*d5de8841SJeremy Fitzhardinge struct xen_spinlock { 16*d5de8841SJeremy Fitzhardinge unsigned char lock; /* 0 -> free; 1 -> locked */ 17*d5de8841SJeremy Fitzhardinge unsigned short spinners; /* count of waiting cpus */ 18*d5de8841SJeremy Fitzhardinge }; 19*d5de8841SJeremy Fitzhardinge 20*d5de8841SJeremy Fitzhardinge static int xen_spin_is_locked(struct raw_spinlock *lock) 21*d5de8841SJeremy Fitzhardinge { 22*d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 23*d5de8841SJeremy Fitzhardinge 24*d5de8841SJeremy Fitzhardinge return xl->lock != 0; 25*d5de8841SJeremy Fitzhardinge } 26*d5de8841SJeremy Fitzhardinge 27*d5de8841SJeremy Fitzhardinge static int xen_spin_is_contended(struct raw_spinlock *lock) 28*d5de8841SJeremy Fitzhardinge { 29*d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 30*d5de8841SJeremy Fitzhardinge 31*d5de8841SJeremy Fitzhardinge /* Not strictly true; this is only the count of contended 32*d5de8841SJeremy Fitzhardinge lock-takers entering the slow path. */ 33*d5de8841SJeremy Fitzhardinge return xl->spinners != 0; 34*d5de8841SJeremy Fitzhardinge } 35*d5de8841SJeremy Fitzhardinge 36*d5de8841SJeremy Fitzhardinge static int xen_spin_trylock(struct raw_spinlock *lock) 37*d5de8841SJeremy Fitzhardinge { 38*d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 39*d5de8841SJeremy Fitzhardinge u8 old = 1; 40*d5de8841SJeremy Fitzhardinge 41*d5de8841SJeremy Fitzhardinge asm("xchgb %b0,%1" 42*d5de8841SJeremy Fitzhardinge : "+q" (old), "+m" (xl->lock) : : "memory"); 43*d5de8841SJeremy Fitzhardinge 44*d5de8841SJeremy Fitzhardinge return old == 0; 45*d5de8841SJeremy Fitzhardinge } 46*d5de8841SJeremy Fitzhardinge 47*d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 48*d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 49*d5de8841SJeremy Fitzhardinge 50*d5de8841SJeremy Fitzhardinge static inline void spinning_lock(struct xen_spinlock *xl) 51*d5de8841SJeremy Fitzhardinge { 52*d5de8841SJeremy Fitzhardinge __get_cpu_var(lock_spinners) = xl; 53*d5de8841SJeremy Fitzhardinge wmb(); /* set lock of interest before count */ 54*d5de8841SJeremy Fitzhardinge asm(LOCK_PREFIX " incw %0" 55*d5de8841SJeremy Fitzhardinge : "+m" (xl->spinners) : : "memory"); 56*d5de8841SJeremy Fitzhardinge } 57*d5de8841SJeremy Fitzhardinge 58*d5de8841SJeremy Fitzhardinge static inline void unspinning_lock(struct xen_spinlock *xl) 59*d5de8841SJeremy Fitzhardinge { 60*d5de8841SJeremy Fitzhardinge asm(LOCK_PREFIX " decw %0" 61*d5de8841SJeremy Fitzhardinge : "+m" (xl->spinners) : : "memory"); 62*d5de8841SJeremy Fitzhardinge wmb(); /* decrement count before clearing lock */ 63*d5de8841SJeremy Fitzhardinge __get_cpu_var(lock_spinners) = NULL; 64*d5de8841SJeremy Fitzhardinge } 65*d5de8841SJeremy Fitzhardinge 66*d5de8841SJeremy Fitzhardinge static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) 67*d5de8841SJeremy Fitzhardinge { 68*d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 69*d5de8841SJeremy Fitzhardinge int irq = __get_cpu_var(lock_kicker_irq); 70*d5de8841SJeremy Fitzhardinge int ret; 71*d5de8841SJeremy Fitzhardinge 72*d5de8841SJeremy Fitzhardinge /* If kicker interrupts not initialized yet, just spin */ 73*d5de8841SJeremy Fitzhardinge if (irq == -1) 74*d5de8841SJeremy Fitzhardinge return 0; 75*d5de8841SJeremy Fitzhardinge 76*d5de8841SJeremy Fitzhardinge /* announce we're spinning */ 77*d5de8841SJeremy Fitzhardinge spinning_lock(xl); 78*d5de8841SJeremy Fitzhardinge 79*d5de8841SJeremy Fitzhardinge /* clear pending */ 80*d5de8841SJeremy Fitzhardinge xen_clear_irq_pending(irq); 81*d5de8841SJeremy Fitzhardinge 82*d5de8841SJeremy Fitzhardinge /* check again make sure it didn't become free while 83*d5de8841SJeremy Fitzhardinge we weren't looking */ 84*d5de8841SJeremy Fitzhardinge ret = xen_spin_trylock(lock); 85*d5de8841SJeremy Fitzhardinge if (ret) 86*d5de8841SJeremy Fitzhardinge goto out; 87*d5de8841SJeremy Fitzhardinge 88*d5de8841SJeremy Fitzhardinge /* block until irq becomes pending */ 89*d5de8841SJeremy Fitzhardinge xen_poll_irq(irq); 90*d5de8841SJeremy Fitzhardinge kstat_this_cpu.irqs[irq]++; 91*d5de8841SJeremy Fitzhardinge 92*d5de8841SJeremy Fitzhardinge out: 93*d5de8841SJeremy Fitzhardinge unspinning_lock(xl); 94*d5de8841SJeremy Fitzhardinge return ret; 95*d5de8841SJeremy Fitzhardinge } 96*d5de8841SJeremy Fitzhardinge 97*d5de8841SJeremy Fitzhardinge static void xen_spin_lock(struct raw_spinlock *lock) 98*d5de8841SJeremy Fitzhardinge { 99*d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 100*d5de8841SJeremy Fitzhardinge int timeout; 101*d5de8841SJeremy Fitzhardinge u8 oldval; 102*d5de8841SJeremy Fitzhardinge 103*d5de8841SJeremy Fitzhardinge do { 104*d5de8841SJeremy Fitzhardinge timeout = 1 << 10; 105*d5de8841SJeremy Fitzhardinge 106*d5de8841SJeremy Fitzhardinge asm("1: xchgb %1,%0\n" 107*d5de8841SJeremy Fitzhardinge " testb %1,%1\n" 108*d5de8841SJeremy Fitzhardinge " jz 3f\n" 109*d5de8841SJeremy Fitzhardinge "2: rep;nop\n" 110*d5de8841SJeremy Fitzhardinge " cmpb $0,%0\n" 111*d5de8841SJeremy Fitzhardinge " je 1b\n" 112*d5de8841SJeremy Fitzhardinge " dec %2\n" 113*d5de8841SJeremy Fitzhardinge " jnz 2b\n" 114*d5de8841SJeremy Fitzhardinge "3:\n" 115*d5de8841SJeremy Fitzhardinge : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) 116*d5de8841SJeremy Fitzhardinge : "1" (1) 117*d5de8841SJeremy Fitzhardinge : "memory"); 118*d5de8841SJeremy Fitzhardinge 119*d5de8841SJeremy Fitzhardinge } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock))); 120*d5de8841SJeremy Fitzhardinge } 121*d5de8841SJeremy Fitzhardinge 122*d5de8841SJeremy Fitzhardinge static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) 123*d5de8841SJeremy Fitzhardinge { 124*d5de8841SJeremy Fitzhardinge int cpu; 125*d5de8841SJeremy Fitzhardinge 126*d5de8841SJeremy Fitzhardinge for_each_online_cpu(cpu) { 127*d5de8841SJeremy Fitzhardinge /* XXX should mix up next cpu selection */ 128*d5de8841SJeremy Fitzhardinge if (per_cpu(lock_spinners, cpu) == xl) { 129*d5de8841SJeremy Fitzhardinge xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 130*d5de8841SJeremy Fitzhardinge break; 131*d5de8841SJeremy Fitzhardinge } 132*d5de8841SJeremy Fitzhardinge } 133*d5de8841SJeremy Fitzhardinge } 134*d5de8841SJeremy Fitzhardinge 135*d5de8841SJeremy Fitzhardinge static void xen_spin_unlock(struct raw_spinlock *lock) 136*d5de8841SJeremy Fitzhardinge { 137*d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 138*d5de8841SJeremy Fitzhardinge 139*d5de8841SJeremy Fitzhardinge smp_wmb(); /* make sure no writes get moved after unlock */ 140*d5de8841SJeremy Fitzhardinge xl->lock = 0; /* release lock */ 141*d5de8841SJeremy Fitzhardinge 142*d5de8841SJeremy Fitzhardinge /* make sure unlock happens before kick */ 143*d5de8841SJeremy Fitzhardinge barrier(); 144*d5de8841SJeremy Fitzhardinge 145*d5de8841SJeremy Fitzhardinge if (unlikely(xl->spinners)) 146*d5de8841SJeremy Fitzhardinge xen_spin_unlock_slow(xl); 147*d5de8841SJeremy Fitzhardinge } 148*d5de8841SJeremy Fitzhardinge 149*d5de8841SJeremy Fitzhardinge static irqreturn_t dummy_handler(int irq, void *dev_id) 150*d5de8841SJeremy Fitzhardinge { 151*d5de8841SJeremy Fitzhardinge BUG(); 152*d5de8841SJeremy Fitzhardinge return IRQ_HANDLED; 153*d5de8841SJeremy Fitzhardinge } 154*d5de8841SJeremy Fitzhardinge 155*d5de8841SJeremy Fitzhardinge void __cpuinit xen_init_lock_cpu(int cpu) 156*d5de8841SJeremy Fitzhardinge { 157*d5de8841SJeremy Fitzhardinge int irq; 158*d5de8841SJeremy Fitzhardinge const char *name; 159*d5de8841SJeremy Fitzhardinge 160*d5de8841SJeremy Fitzhardinge name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); 161*d5de8841SJeremy Fitzhardinge irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, 162*d5de8841SJeremy Fitzhardinge cpu, 163*d5de8841SJeremy Fitzhardinge dummy_handler, 164*d5de8841SJeremy Fitzhardinge IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 165*d5de8841SJeremy Fitzhardinge name, 166*d5de8841SJeremy Fitzhardinge NULL); 167*d5de8841SJeremy Fitzhardinge 168*d5de8841SJeremy Fitzhardinge if (irq >= 0) { 169*d5de8841SJeremy Fitzhardinge disable_irq(irq); /* make sure it's never delivered */ 170*d5de8841SJeremy Fitzhardinge per_cpu(lock_kicker_irq, cpu) = irq; 171*d5de8841SJeremy Fitzhardinge } 172*d5de8841SJeremy Fitzhardinge 173*d5de8841SJeremy Fitzhardinge printk("cpu %d spinlock event irq %d\n", cpu, irq); 174*d5de8841SJeremy Fitzhardinge } 175*d5de8841SJeremy Fitzhardinge 176*d5de8841SJeremy Fitzhardinge void __init xen_init_spinlocks(void) 177*d5de8841SJeremy Fitzhardinge { 178*d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_is_locked = xen_spin_is_locked; 179*d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_is_contended = xen_spin_is_contended; 180*d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_lock = xen_spin_lock; 181*d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_trylock = xen_spin_trylock; 182*d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_unlock = xen_spin_unlock; 183*d5de8841SJeremy Fitzhardinge } 184