1d5de8841SJeremy Fitzhardinge /* 2d5de8841SJeremy Fitzhardinge * Split spinlock implementation out into its own file, so it can be 3d5de8841SJeremy Fitzhardinge * compiled in a FTRACE-compatible way. 4d5de8841SJeremy Fitzhardinge */ 5d5de8841SJeremy Fitzhardinge #include <linux/kernel_stat.h> 6d5de8841SJeremy Fitzhardinge #include <linux/spinlock.h> 7994025caSJeremy Fitzhardinge #include <linux/debugfs.h> 8994025caSJeremy Fitzhardinge #include <linux/log2.h> 95a0e3ad6STejun Heo #include <linux/gfp.h> 10d5de8841SJeremy Fitzhardinge 11d5de8841SJeremy Fitzhardinge #include <asm/paravirt.h> 12d5de8841SJeremy Fitzhardinge 13d5de8841SJeremy Fitzhardinge #include <xen/interface/xen.h> 14d5de8841SJeremy Fitzhardinge #include <xen/events.h> 15d5de8841SJeremy Fitzhardinge 16d5de8841SJeremy Fitzhardinge #include "xen-ops.h" 17994025caSJeremy Fitzhardinge #include "debugfs.h" 18994025caSJeremy Fitzhardinge 19994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS 20994025caSJeremy Fitzhardinge static struct xen_spinlock_stats 21994025caSJeremy Fitzhardinge { 22994025caSJeremy Fitzhardinge u64 taken; 23994025caSJeremy Fitzhardinge u32 taken_slow; 24994025caSJeremy Fitzhardinge u32 taken_slow_nested; 25994025caSJeremy Fitzhardinge u32 taken_slow_pickup; 26994025caSJeremy Fitzhardinge u32 taken_slow_spurious; 271e696f63SJeremy Fitzhardinge u32 taken_slow_irqenable; 28994025caSJeremy Fitzhardinge 29994025caSJeremy Fitzhardinge u64 released; 30994025caSJeremy Fitzhardinge u32 released_slow; 31994025caSJeremy Fitzhardinge u32 released_slow_kicked; 32994025caSJeremy Fitzhardinge 33f8eca41fSJeremy Fitzhardinge #define HISTO_BUCKETS 30 34f8eca41fSJeremy Fitzhardinge u32 histo_spin_total[HISTO_BUCKETS+1]; 35f8eca41fSJeremy Fitzhardinge u32 histo_spin_spinning[HISTO_BUCKETS+1]; 36f8eca41fSJeremy Fitzhardinge u32 histo_spin_blocked[HISTO_BUCKETS+1]; 37994025caSJeremy Fitzhardinge 38f8eca41fSJeremy Fitzhardinge u64 time_total; 39f8eca41fSJeremy Fitzhardinge u64 time_spinning; 40f8eca41fSJeremy Fitzhardinge u64 time_blocked; 41994025caSJeremy Fitzhardinge } spinlock_stats; 42994025caSJeremy Fitzhardinge 43994025caSJeremy Fitzhardinge static u8 zero_stats; 44994025caSJeremy Fitzhardinge 45994025caSJeremy Fitzhardinge static unsigned lock_timeout = 1 << 10; 46994025caSJeremy Fitzhardinge #define TIMEOUT lock_timeout 47994025caSJeremy Fitzhardinge 48994025caSJeremy Fitzhardinge static inline void check_zero(void) 49994025caSJeremy Fitzhardinge { 50994025caSJeremy Fitzhardinge if (unlikely(zero_stats)) { 51994025caSJeremy Fitzhardinge memset(&spinlock_stats, 0, sizeof(spinlock_stats)); 52994025caSJeremy Fitzhardinge zero_stats = 0; 53994025caSJeremy Fitzhardinge } 54994025caSJeremy Fitzhardinge } 55994025caSJeremy Fitzhardinge 56994025caSJeremy Fitzhardinge #define ADD_STATS(elem, val) \ 57994025caSJeremy Fitzhardinge do { check_zero(); spinlock_stats.elem += (val); } while(0) 58994025caSJeremy Fitzhardinge 59994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void) 60994025caSJeremy Fitzhardinge { 61994025caSJeremy Fitzhardinge return xen_clocksource_read(); 62994025caSJeremy Fitzhardinge } 63994025caSJeremy Fitzhardinge 64994025caSJeremy Fitzhardinge static void __spin_time_accum(u64 delta, u32 *array) 65994025caSJeremy Fitzhardinge { 66994025caSJeremy Fitzhardinge unsigned index = ilog2(delta); 67994025caSJeremy Fitzhardinge 68994025caSJeremy Fitzhardinge check_zero(); 69994025caSJeremy Fitzhardinge 70994025caSJeremy Fitzhardinge if (index < HISTO_BUCKETS) 71994025caSJeremy Fitzhardinge array[index]++; 72994025caSJeremy Fitzhardinge else 73994025caSJeremy Fitzhardinge array[HISTO_BUCKETS]++; 74994025caSJeremy Fitzhardinge } 75994025caSJeremy Fitzhardinge 76f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_spinning(u64 start) 77994025caSJeremy Fitzhardinge { 78994025caSJeremy Fitzhardinge u32 delta = xen_clocksource_read() - start; 79994025caSJeremy Fitzhardinge 80f8eca41fSJeremy Fitzhardinge __spin_time_accum(delta, spinlock_stats.histo_spin_spinning); 81f8eca41fSJeremy Fitzhardinge spinlock_stats.time_spinning += delta; 82994025caSJeremy Fitzhardinge } 83994025caSJeremy Fitzhardinge 84f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_total(u64 start) 85994025caSJeremy Fitzhardinge { 86994025caSJeremy Fitzhardinge u32 delta = xen_clocksource_read() - start; 87994025caSJeremy Fitzhardinge 88f8eca41fSJeremy Fitzhardinge __spin_time_accum(delta, spinlock_stats.histo_spin_total); 89f8eca41fSJeremy Fitzhardinge spinlock_stats.time_total += delta; 90f8eca41fSJeremy Fitzhardinge } 91f8eca41fSJeremy Fitzhardinge 92f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_blocked(u64 start) 93f8eca41fSJeremy Fitzhardinge { 94f8eca41fSJeremy Fitzhardinge u32 delta = xen_clocksource_read() - start; 95f8eca41fSJeremy Fitzhardinge 96f8eca41fSJeremy Fitzhardinge __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); 97f8eca41fSJeremy Fitzhardinge spinlock_stats.time_blocked += delta; 98994025caSJeremy Fitzhardinge } 99994025caSJeremy Fitzhardinge #else /* !CONFIG_XEN_DEBUG_FS */ 100994025caSJeremy Fitzhardinge #define TIMEOUT (1 << 10) 101994025caSJeremy Fitzhardinge #define ADD_STATS(elem, val) do { (void)(val); } while(0) 102994025caSJeremy Fitzhardinge 103994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void) 104994025caSJeremy Fitzhardinge { 105994025caSJeremy Fitzhardinge return 0; 106994025caSJeremy Fitzhardinge } 107994025caSJeremy Fitzhardinge 108f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_total(u64 start) 109994025caSJeremy Fitzhardinge { 110994025caSJeremy Fitzhardinge } 111f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_spinning(u64 start) 112f8eca41fSJeremy Fitzhardinge { 113f8eca41fSJeremy Fitzhardinge } 114f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_blocked(u64 start) 115994025caSJeremy Fitzhardinge { 116994025caSJeremy Fitzhardinge } 117994025caSJeremy Fitzhardinge #endif /* CONFIG_XEN_DEBUG_FS */ 118d5de8841SJeremy Fitzhardinge 1197a7546b3SDavid Vrabel /* 1207a7546b3SDavid Vrabel * Size struct xen_spinlock so it's the same as arch_spinlock_t. 1217a7546b3SDavid Vrabel */ 1227a7546b3SDavid Vrabel #if NR_CPUS < 256 1237a7546b3SDavid Vrabel typedef u8 xen_spinners_t; 1247a7546b3SDavid Vrabel # define inc_spinners(xl) \ 1257a7546b3SDavid Vrabel asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory"); 1267a7546b3SDavid Vrabel # define dec_spinners(xl) \ 1277a7546b3SDavid Vrabel asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory"); 1287a7546b3SDavid Vrabel #else 1297a7546b3SDavid Vrabel typedef u16 xen_spinners_t; 1307a7546b3SDavid Vrabel # define inc_spinners(xl) \ 1317a7546b3SDavid Vrabel asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory"); 1327a7546b3SDavid Vrabel # define dec_spinners(xl) \ 1337a7546b3SDavid Vrabel asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory"); 1347a7546b3SDavid Vrabel #endif 1357a7546b3SDavid Vrabel 136d5de8841SJeremy Fitzhardinge struct xen_spinlock { 137d5de8841SJeremy Fitzhardinge unsigned char lock; /* 0 -> free; 1 -> locked */ 1387a7546b3SDavid Vrabel xen_spinners_t spinners; /* count of waiting cpus */ 139d5de8841SJeremy Fitzhardinge }; 140d5de8841SJeremy Fitzhardinge 141445c8951SThomas Gleixner static int xen_spin_is_locked(struct arch_spinlock *lock) 142d5de8841SJeremy Fitzhardinge { 143d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 144d5de8841SJeremy Fitzhardinge 145d5de8841SJeremy Fitzhardinge return xl->lock != 0; 146d5de8841SJeremy Fitzhardinge } 147d5de8841SJeremy Fitzhardinge 148445c8951SThomas Gleixner static int xen_spin_is_contended(struct arch_spinlock *lock) 149d5de8841SJeremy Fitzhardinge { 150d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 151d5de8841SJeremy Fitzhardinge 152d5de8841SJeremy Fitzhardinge /* Not strictly true; this is only the count of contended 153d5de8841SJeremy Fitzhardinge lock-takers entering the slow path. */ 154d5de8841SJeremy Fitzhardinge return xl->spinners != 0; 155d5de8841SJeremy Fitzhardinge } 156d5de8841SJeremy Fitzhardinge 157445c8951SThomas Gleixner static int xen_spin_trylock(struct arch_spinlock *lock) 158d5de8841SJeremy Fitzhardinge { 159d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 160d5de8841SJeremy Fitzhardinge u8 old = 1; 161d5de8841SJeremy Fitzhardinge 162d5de8841SJeremy Fitzhardinge asm("xchgb %b0,%1" 163d5de8841SJeremy Fitzhardinge : "+q" (old), "+m" (xl->lock) : : "memory"); 164d5de8841SJeremy Fitzhardinge 165d5de8841SJeremy Fitzhardinge return old == 0; 166d5de8841SJeremy Fitzhardinge } 167d5de8841SJeremy Fitzhardinge 168d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 169d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 170d5de8841SJeremy Fitzhardinge 171168d2f46SJeremy Fitzhardinge /* 172168d2f46SJeremy Fitzhardinge * Mark a cpu as interested in a lock. Returns the CPU's previous 173168d2f46SJeremy Fitzhardinge * lock of interest, in case we got preempted by an interrupt. 174168d2f46SJeremy Fitzhardinge */ 175168d2f46SJeremy Fitzhardinge static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) 176d5de8841SJeremy Fitzhardinge { 177168d2f46SJeremy Fitzhardinge struct xen_spinlock *prev; 178168d2f46SJeremy Fitzhardinge 179780f36d8SChristoph Lameter prev = __this_cpu_read(lock_spinners); 180780f36d8SChristoph Lameter __this_cpu_write(lock_spinners, xl); 181168d2f46SJeremy Fitzhardinge 182d5de8841SJeremy Fitzhardinge wmb(); /* set lock of interest before count */ 183168d2f46SJeremy Fitzhardinge 1847a7546b3SDavid Vrabel inc_spinners(xl); 185168d2f46SJeremy Fitzhardinge 186168d2f46SJeremy Fitzhardinge return prev; 187d5de8841SJeremy Fitzhardinge } 188d5de8841SJeremy Fitzhardinge 189168d2f46SJeremy Fitzhardinge /* 190168d2f46SJeremy Fitzhardinge * Mark a cpu as no longer interested in a lock. Restores previous 191168d2f46SJeremy Fitzhardinge * lock of interest (NULL for none). 192168d2f46SJeremy Fitzhardinge */ 193168d2f46SJeremy Fitzhardinge static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) 194d5de8841SJeremy Fitzhardinge { 1957a7546b3SDavid Vrabel dec_spinners(xl); 196168d2f46SJeremy Fitzhardinge wmb(); /* decrement count before restoring lock */ 197780f36d8SChristoph Lameter __this_cpu_write(lock_spinners, prev); 198d5de8841SJeremy Fitzhardinge } 199d5de8841SJeremy Fitzhardinge 200445c8951SThomas Gleixner static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) 201d5de8841SJeremy Fitzhardinge { 202d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 203168d2f46SJeremy Fitzhardinge struct xen_spinlock *prev; 204780f36d8SChristoph Lameter int irq = __this_cpu_read(lock_kicker_irq); 205d5de8841SJeremy Fitzhardinge int ret; 206f8eca41fSJeremy Fitzhardinge u64 start; 207d5de8841SJeremy Fitzhardinge 208d5de8841SJeremy Fitzhardinge /* If kicker interrupts not initialized yet, just spin */ 209d5de8841SJeremy Fitzhardinge if (irq == -1) 210d5de8841SJeremy Fitzhardinge return 0; 211d5de8841SJeremy Fitzhardinge 212f8eca41fSJeremy Fitzhardinge start = spin_time_start(); 213f8eca41fSJeremy Fitzhardinge 214d5de8841SJeremy Fitzhardinge /* announce we're spinning */ 215168d2f46SJeremy Fitzhardinge prev = spinning_lock(xl); 216d5de8841SJeremy Fitzhardinge 217994025caSJeremy Fitzhardinge ADD_STATS(taken_slow, 1); 218994025caSJeremy Fitzhardinge ADD_STATS(taken_slow_nested, prev != NULL); 219994025caSJeremy Fitzhardinge 220168d2f46SJeremy Fitzhardinge do { 2214d576b57SJeremy Fitzhardinge unsigned long flags; 2224d576b57SJeremy Fitzhardinge 223d5de8841SJeremy Fitzhardinge /* clear pending */ 224d5de8841SJeremy Fitzhardinge xen_clear_irq_pending(irq); 225d5de8841SJeremy Fitzhardinge 226d5de8841SJeremy Fitzhardinge /* check again make sure it didn't become free while 227d5de8841SJeremy Fitzhardinge we weren't looking */ 228d5de8841SJeremy Fitzhardinge ret = xen_spin_trylock(lock); 229168d2f46SJeremy Fitzhardinge if (ret) { 230994025caSJeremy Fitzhardinge ADD_STATS(taken_slow_pickup, 1); 231994025caSJeremy Fitzhardinge 232168d2f46SJeremy Fitzhardinge /* 233168d2f46SJeremy Fitzhardinge * If we interrupted another spinlock while it 234168d2f46SJeremy Fitzhardinge * was blocking, make sure it doesn't block 235168d2f46SJeremy Fitzhardinge * without rechecking the lock. 236168d2f46SJeremy Fitzhardinge */ 237168d2f46SJeremy Fitzhardinge if (prev != NULL) 238168d2f46SJeremy Fitzhardinge xen_set_irq_pending(irq); 239d5de8841SJeremy Fitzhardinge goto out; 240168d2f46SJeremy Fitzhardinge } 241d5de8841SJeremy Fitzhardinge 242df9ee292SDavid Howells flags = arch_local_save_flags(); 2434d576b57SJeremy Fitzhardinge if (irq_enable) { 2444d576b57SJeremy Fitzhardinge ADD_STATS(taken_slow_irqenable, 1); 2454d576b57SJeremy Fitzhardinge raw_local_irq_enable(); 2464d576b57SJeremy Fitzhardinge } 2474d576b57SJeremy Fitzhardinge 248168d2f46SJeremy Fitzhardinge /* 249168d2f46SJeremy Fitzhardinge * Block until irq becomes pending. If we're 250168d2f46SJeremy Fitzhardinge * interrupted at this point (after the trylock but 251168d2f46SJeremy Fitzhardinge * before entering the block), then the nested lock 252168d2f46SJeremy Fitzhardinge * handler guarantees that the irq will be left 253168d2f46SJeremy Fitzhardinge * pending if there's any chance the lock became free; 254168d2f46SJeremy Fitzhardinge * xen_poll_irq() returns immediately if the irq is 255168d2f46SJeremy Fitzhardinge * pending. 256168d2f46SJeremy Fitzhardinge */ 257d5de8841SJeremy Fitzhardinge xen_poll_irq(irq); 2584d576b57SJeremy Fitzhardinge 2594d576b57SJeremy Fitzhardinge raw_local_irq_restore(flags); 2604d576b57SJeremy Fitzhardinge 261994025caSJeremy Fitzhardinge ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); 262168d2f46SJeremy Fitzhardinge } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ 263168d2f46SJeremy Fitzhardinge 264d6c88a50SThomas Gleixner kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 265d5de8841SJeremy Fitzhardinge 266d5de8841SJeremy Fitzhardinge out: 267168d2f46SJeremy Fitzhardinge unspinning_lock(xl, prev); 268f8eca41fSJeremy Fitzhardinge spin_time_accum_blocked(start); 269f8eca41fSJeremy Fitzhardinge 270d5de8841SJeremy Fitzhardinge return ret; 271d5de8841SJeremy Fitzhardinge } 272d5de8841SJeremy Fitzhardinge 273445c8951SThomas Gleixner static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) 274d5de8841SJeremy Fitzhardinge { 275d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 276994025caSJeremy Fitzhardinge unsigned timeout; 277d5de8841SJeremy Fitzhardinge u8 oldval; 278994025caSJeremy Fitzhardinge u64 start_spin; 279994025caSJeremy Fitzhardinge 280994025caSJeremy Fitzhardinge ADD_STATS(taken, 1); 281994025caSJeremy Fitzhardinge 282994025caSJeremy Fitzhardinge start_spin = spin_time_start(); 283d5de8841SJeremy Fitzhardinge 284d5de8841SJeremy Fitzhardinge do { 285994025caSJeremy Fitzhardinge u64 start_spin_fast = spin_time_start(); 286994025caSJeremy Fitzhardinge 287994025caSJeremy Fitzhardinge timeout = TIMEOUT; 288d5de8841SJeremy Fitzhardinge 289d5de8841SJeremy Fitzhardinge asm("1: xchgb %1,%0\n" 290d5de8841SJeremy Fitzhardinge " testb %1,%1\n" 291d5de8841SJeremy Fitzhardinge " jz 3f\n" 292d5de8841SJeremy Fitzhardinge "2: rep;nop\n" 293d5de8841SJeremy Fitzhardinge " cmpb $0,%0\n" 294d5de8841SJeremy Fitzhardinge " je 1b\n" 295d5de8841SJeremy Fitzhardinge " dec %2\n" 296d5de8841SJeremy Fitzhardinge " jnz 2b\n" 297d5de8841SJeremy Fitzhardinge "3:\n" 298d5de8841SJeremy Fitzhardinge : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) 299d5de8841SJeremy Fitzhardinge : "1" (1) 300d5de8841SJeremy Fitzhardinge : "memory"); 301d5de8841SJeremy Fitzhardinge 302f8eca41fSJeremy Fitzhardinge spin_time_accum_spinning(start_spin_fast); 3031e696f63SJeremy Fitzhardinge 3041e696f63SJeremy Fitzhardinge } while (unlikely(oldval != 0 && 3051e696f63SJeremy Fitzhardinge (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable)))); 306994025caSJeremy Fitzhardinge 307f8eca41fSJeremy Fitzhardinge spin_time_accum_total(start_spin); 308d5de8841SJeremy Fitzhardinge } 309d5de8841SJeremy Fitzhardinge 310445c8951SThomas Gleixner static void xen_spin_lock(struct arch_spinlock *lock) 3111e696f63SJeremy Fitzhardinge { 3121e696f63SJeremy Fitzhardinge __xen_spin_lock(lock, false); 3131e696f63SJeremy Fitzhardinge } 3141e696f63SJeremy Fitzhardinge 315445c8951SThomas Gleixner static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) 3161e696f63SJeremy Fitzhardinge { 3171e696f63SJeremy Fitzhardinge __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); 3181e696f63SJeremy Fitzhardinge } 3191e696f63SJeremy Fitzhardinge 320d5de8841SJeremy Fitzhardinge static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) 321d5de8841SJeremy Fitzhardinge { 322d5de8841SJeremy Fitzhardinge int cpu; 323d5de8841SJeremy Fitzhardinge 324994025caSJeremy Fitzhardinge ADD_STATS(released_slow, 1); 325994025caSJeremy Fitzhardinge 326d5de8841SJeremy Fitzhardinge for_each_online_cpu(cpu) { 327d5de8841SJeremy Fitzhardinge /* XXX should mix up next cpu selection */ 328d5de8841SJeremy Fitzhardinge if (per_cpu(lock_spinners, cpu) == xl) { 329994025caSJeremy Fitzhardinge ADD_STATS(released_slow_kicked, 1); 330d5de8841SJeremy Fitzhardinge xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 331d5de8841SJeremy Fitzhardinge } 332d5de8841SJeremy Fitzhardinge } 333d5de8841SJeremy Fitzhardinge } 334d5de8841SJeremy Fitzhardinge 335445c8951SThomas Gleixner static void xen_spin_unlock(struct arch_spinlock *lock) 336d5de8841SJeremy Fitzhardinge { 337d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 338d5de8841SJeremy Fitzhardinge 339994025caSJeremy Fitzhardinge ADD_STATS(released, 1); 340994025caSJeremy Fitzhardinge 341d5de8841SJeremy Fitzhardinge smp_wmb(); /* make sure no writes get moved after unlock */ 342d5de8841SJeremy Fitzhardinge xl->lock = 0; /* release lock */ 343d5de8841SJeremy Fitzhardinge 3442496afbfSYang Xiaowei /* 3452496afbfSYang Xiaowei * Make sure unlock happens before checking for waiting 3462496afbfSYang Xiaowei * spinners. We need a strong barrier to enforce the 3472496afbfSYang Xiaowei * write-read ordering to different memory locations, as the 3482496afbfSYang Xiaowei * CPU makes no implied guarantees about their ordering. 3492496afbfSYang Xiaowei */ 3502496afbfSYang Xiaowei mb(); 351d5de8841SJeremy Fitzhardinge 352d5de8841SJeremy Fitzhardinge if (unlikely(xl->spinners)) 353d5de8841SJeremy Fitzhardinge xen_spin_unlock_slow(xl); 354d5de8841SJeremy Fitzhardinge } 355d5de8841SJeremy Fitzhardinge 356d5de8841SJeremy Fitzhardinge static irqreturn_t dummy_handler(int irq, void *dev_id) 357d5de8841SJeremy Fitzhardinge { 358d5de8841SJeremy Fitzhardinge BUG(); 359d5de8841SJeremy Fitzhardinge return IRQ_HANDLED; 360d5de8841SJeremy Fitzhardinge } 361d5de8841SJeremy Fitzhardinge 362d5de8841SJeremy Fitzhardinge void __cpuinit xen_init_lock_cpu(int cpu) 363d5de8841SJeremy Fitzhardinge { 364d5de8841SJeremy Fitzhardinge int irq; 365d5de8841SJeremy Fitzhardinge const char *name; 366d5de8841SJeremy Fitzhardinge 367*cb9c6f15SKonrad Rzeszutek Wilk WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n", 368*cb9c6f15SKonrad Rzeszutek Wilk cpu, per_cpu(lock_kicker_irq, cpu)); 369*cb9c6f15SKonrad Rzeszutek Wilk 370d5de8841SJeremy Fitzhardinge name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); 371d5de8841SJeremy Fitzhardinge irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, 372d5de8841SJeremy Fitzhardinge cpu, 373d5de8841SJeremy Fitzhardinge dummy_handler, 374d5de8841SJeremy Fitzhardinge IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 375d5de8841SJeremy Fitzhardinge name, 376d5de8841SJeremy Fitzhardinge NULL); 377d5de8841SJeremy Fitzhardinge 378d5de8841SJeremy Fitzhardinge if (irq >= 0) { 379d5de8841SJeremy Fitzhardinge disable_irq(irq); /* make sure it's never delivered */ 380d5de8841SJeremy Fitzhardinge per_cpu(lock_kicker_irq, cpu) = irq; 381d5de8841SJeremy Fitzhardinge } 382d5de8841SJeremy Fitzhardinge 383d5de8841SJeremy Fitzhardinge printk("cpu %d spinlock event irq %d\n", cpu, irq); 384d5de8841SJeremy Fitzhardinge } 385d5de8841SJeremy Fitzhardinge 386d68d82afSAlex Nixon void xen_uninit_lock_cpu(int cpu) 387d68d82afSAlex Nixon { 388d68d82afSAlex Nixon unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); 389*cb9c6f15SKonrad Rzeszutek Wilk per_cpu(lock_kicker_irq, cpu) = -1; 390d68d82afSAlex Nixon } 391d68d82afSAlex Nixon 392d5de8841SJeremy Fitzhardinge void __init xen_init_spinlocks(void) 393d5de8841SJeremy Fitzhardinge { 3947a7546b3SDavid Vrabel BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); 3957a7546b3SDavid Vrabel 396d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_is_locked = xen_spin_is_locked; 397d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_is_contended = xen_spin_is_contended; 398d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_lock = xen_spin_lock; 3991e696f63SJeremy Fitzhardinge pv_lock_ops.spin_lock_flags = xen_spin_lock_flags; 400d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_trylock = xen_spin_trylock; 401d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_unlock = xen_spin_unlock; 402d5de8841SJeremy Fitzhardinge } 403994025caSJeremy Fitzhardinge 404994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS 405994025caSJeremy Fitzhardinge 406994025caSJeremy Fitzhardinge static struct dentry *d_spin_debug; 407994025caSJeremy Fitzhardinge 408994025caSJeremy Fitzhardinge static int __init xen_spinlock_debugfs(void) 409994025caSJeremy Fitzhardinge { 410994025caSJeremy Fitzhardinge struct dentry *d_xen = xen_init_debugfs(); 411994025caSJeremy Fitzhardinge 412994025caSJeremy Fitzhardinge if (d_xen == NULL) 413994025caSJeremy Fitzhardinge return -ENOMEM; 414994025caSJeremy Fitzhardinge 415994025caSJeremy Fitzhardinge d_spin_debug = debugfs_create_dir("spinlocks", d_xen); 416994025caSJeremy Fitzhardinge 417994025caSJeremy Fitzhardinge debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); 418994025caSJeremy Fitzhardinge 419994025caSJeremy Fitzhardinge debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout); 420994025caSJeremy Fitzhardinge 421994025caSJeremy Fitzhardinge debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken); 422994025caSJeremy Fitzhardinge debugfs_create_u32("taken_slow", 0444, d_spin_debug, 423994025caSJeremy Fitzhardinge &spinlock_stats.taken_slow); 424994025caSJeremy Fitzhardinge debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug, 425994025caSJeremy Fitzhardinge &spinlock_stats.taken_slow_nested); 426994025caSJeremy Fitzhardinge debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, 427994025caSJeremy Fitzhardinge &spinlock_stats.taken_slow_pickup); 428994025caSJeremy Fitzhardinge debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, 429994025caSJeremy Fitzhardinge &spinlock_stats.taken_slow_spurious); 4301e696f63SJeremy Fitzhardinge debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug, 4311e696f63SJeremy Fitzhardinge &spinlock_stats.taken_slow_irqenable); 432994025caSJeremy Fitzhardinge 433994025caSJeremy Fitzhardinge debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released); 434994025caSJeremy Fitzhardinge debugfs_create_u32("released_slow", 0444, d_spin_debug, 435994025caSJeremy Fitzhardinge &spinlock_stats.released_slow); 436994025caSJeremy Fitzhardinge debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, 437994025caSJeremy Fitzhardinge &spinlock_stats.released_slow_kicked); 438994025caSJeremy Fitzhardinge 439994025caSJeremy Fitzhardinge debugfs_create_u64("time_spinning", 0444, d_spin_debug, 440f8eca41fSJeremy Fitzhardinge &spinlock_stats.time_spinning); 441f8eca41fSJeremy Fitzhardinge debugfs_create_u64("time_blocked", 0444, d_spin_debug, 442f8eca41fSJeremy Fitzhardinge &spinlock_stats.time_blocked); 443994025caSJeremy Fitzhardinge debugfs_create_u64("time_total", 0444, d_spin_debug, 444f8eca41fSJeremy Fitzhardinge &spinlock_stats.time_total); 445994025caSJeremy Fitzhardinge 4469fe2a701SSrivatsa Vaddagiri debugfs_create_u32_array("histo_total", 0444, d_spin_debug, 447f8eca41fSJeremy Fitzhardinge spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1); 4489fe2a701SSrivatsa Vaddagiri debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, 449f8eca41fSJeremy Fitzhardinge spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1); 4509fe2a701SSrivatsa Vaddagiri debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, 451f8eca41fSJeremy Fitzhardinge spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); 452994025caSJeremy Fitzhardinge 453994025caSJeremy Fitzhardinge return 0; 454994025caSJeremy Fitzhardinge } 455994025caSJeremy Fitzhardinge fs_initcall(xen_spinlock_debugfs); 456994025caSJeremy Fitzhardinge 457994025caSJeremy Fitzhardinge #endif /* CONFIG_XEN_DEBUG_FS */ 458