1d5de8841SJeremy Fitzhardinge /* 2d5de8841SJeremy Fitzhardinge * Split spinlock implementation out into its own file, so it can be 3d5de8841SJeremy Fitzhardinge * compiled in a FTRACE-compatible way. 4d5de8841SJeremy Fitzhardinge */ 5d5de8841SJeremy Fitzhardinge #include <linux/kernel_stat.h> 6d5de8841SJeremy Fitzhardinge #include <linux/spinlock.h> 7*994025caSJeremy Fitzhardinge #include <linux/debugfs.h> 8*994025caSJeremy Fitzhardinge #include <linux/log2.h> 9d5de8841SJeremy Fitzhardinge 10d5de8841SJeremy Fitzhardinge #include <asm/paravirt.h> 11d5de8841SJeremy Fitzhardinge 12d5de8841SJeremy Fitzhardinge #include <xen/interface/xen.h> 13d5de8841SJeremy Fitzhardinge #include <xen/events.h> 14d5de8841SJeremy Fitzhardinge 15d5de8841SJeremy Fitzhardinge #include "xen-ops.h" 16*994025caSJeremy Fitzhardinge #include "debugfs.h" 17*994025caSJeremy Fitzhardinge 18*994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS 19*994025caSJeremy Fitzhardinge static struct xen_spinlock_stats 20*994025caSJeremy Fitzhardinge { 21*994025caSJeremy Fitzhardinge u64 taken; 22*994025caSJeremy Fitzhardinge u32 taken_slow; 23*994025caSJeremy Fitzhardinge u32 taken_slow_nested; 24*994025caSJeremy Fitzhardinge u32 taken_slow_pickup; 25*994025caSJeremy Fitzhardinge u32 taken_slow_spurious; 26*994025caSJeremy Fitzhardinge 27*994025caSJeremy Fitzhardinge u64 released; 28*994025caSJeremy Fitzhardinge u32 released_slow; 29*994025caSJeremy Fitzhardinge u32 released_slow_kicked; 30*994025caSJeremy Fitzhardinge 31*994025caSJeremy Fitzhardinge #define HISTO_BUCKETS 20 32*994025caSJeremy Fitzhardinge u32 histo_spin_fast[HISTO_BUCKETS+1]; 33*994025caSJeremy Fitzhardinge u32 histo_spin[HISTO_BUCKETS+1]; 34*994025caSJeremy Fitzhardinge 35*994025caSJeremy Fitzhardinge u64 spinning_time; 36*994025caSJeremy Fitzhardinge u64 total_time; 37*994025caSJeremy Fitzhardinge } spinlock_stats; 38*994025caSJeremy Fitzhardinge 39*994025caSJeremy Fitzhardinge static u8 zero_stats; 40*994025caSJeremy Fitzhardinge 41*994025caSJeremy Fitzhardinge static unsigned lock_timeout = 1 << 10; 42*994025caSJeremy Fitzhardinge #define TIMEOUT lock_timeout 43*994025caSJeremy Fitzhardinge 44*994025caSJeremy Fitzhardinge static inline void check_zero(void) 45*994025caSJeremy Fitzhardinge { 46*994025caSJeremy Fitzhardinge if (unlikely(zero_stats)) { 47*994025caSJeremy Fitzhardinge memset(&spinlock_stats, 0, sizeof(spinlock_stats)); 48*994025caSJeremy Fitzhardinge zero_stats = 0; 49*994025caSJeremy Fitzhardinge } 50*994025caSJeremy Fitzhardinge } 51*994025caSJeremy Fitzhardinge 52*994025caSJeremy Fitzhardinge #define ADD_STATS(elem, val) \ 53*994025caSJeremy Fitzhardinge do { check_zero(); spinlock_stats.elem += (val); } while(0) 54*994025caSJeremy Fitzhardinge 55*994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void) 56*994025caSJeremy Fitzhardinge { 57*994025caSJeremy Fitzhardinge return xen_clocksource_read(); 58*994025caSJeremy Fitzhardinge } 59*994025caSJeremy Fitzhardinge 60*994025caSJeremy Fitzhardinge static void __spin_time_accum(u64 delta, u32 *array) 61*994025caSJeremy Fitzhardinge { 62*994025caSJeremy Fitzhardinge unsigned index = ilog2(delta); 63*994025caSJeremy Fitzhardinge 64*994025caSJeremy Fitzhardinge check_zero(); 65*994025caSJeremy Fitzhardinge 66*994025caSJeremy Fitzhardinge if (index < HISTO_BUCKETS) 67*994025caSJeremy Fitzhardinge array[index]++; 68*994025caSJeremy Fitzhardinge else 69*994025caSJeremy Fitzhardinge array[HISTO_BUCKETS]++; 70*994025caSJeremy Fitzhardinge } 71*994025caSJeremy Fitzhardinge 72*994025caSJeremy Fitzhardinge static inline void spin_time_accum_fast(u64 start) 73*994025caSJeremy Fitzhardinge { 74*994025caSJeremy Fitzhardinge u32 delta = xen_clocksource_read() - start; 75*994025caSJeremy Fitzhardinge 76*994025caSJeremy Fitzhardinge __spin_time_accum(delta, spinlock_stats.histo_spin_fast); 77*994025caSJeremy Fitzhardinge spinlock_stats.spinning_time += delta; 78*994025caSJeremy Fitzhardinge } 79*994025caSJeremy Fitzhardinge 80*994025caSJeremy Fitzhardinge static inline void spin_time_accum(u64 start) 81*994025caSJeremy Fitzhardinge { 82*994025caSJeremy Fitzhardinge u32 delta = xen_clocksource_read() - start; 83*994025caSJeremy Fitzhardinge 84*994025caSJeremy Fitzhardinge __spin_time_accum(delta, spinlock_stats.histo_spin); 85*994025caSJeremy Fitzhardinge spinlock_stats.total_time += delta; 86*994025caSJeremy Fitzhardinge } 87*994025caSJeremy Fitzhardinge #else /* !CONFIG_XEN_DEBUG_FS */ 88*994025caSJeremy Fitzhardinge #define TIMEOUT (1 << 10) 89*994025caSJeremy Fitzhardinge #define ADD_STATS(elem, val) do { (void)(val); } while(0) 90*994025caSJeremy Fitzhardinge 91*994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void) 92*994025caSJeremy Fitzhardinge { 93*994025caSJeremy Fitzhardinge return 0; 94*994025caSJeremy Fitzhardinge } 95*994025caSJeremy Fitzhardinge 96*994025caSJeremy Fitzhardinge static inline void spin_time_accum_fast(u64 start) 97*994025caSJeremy Fitzhardinge { 98*994025caSJeremy Fitzhardinge } 99*994025caSJeremy Fitzhardinge static inline void spin_time_accum(u64 start) 100*994025caSJeremy Fitzhardinge { 101*994025caSJeremy Fitzhardinge } 102*994025caSJeremy Fitzhardinge #endif /* CONFIG_XEN_DEBUG_FS */ 103d5de8841SJeremy Fitzhardinge 104d5de8841SJeremy Fitzhardinge struct xen_spinlock { 105d5de8841SJeremy Fitzhardinge unsigned char lock; /* 0 -> free; 1 -> locked */ 106d5de8841SJeremy Fitzhardinge unsigned short spinners; /* count of waiting cpus */ 107d5de8841SJeremy Fitzhardinge }; 108d5de8841SJeremy Fitzhardinge 109d5de8841SJeremy Fitzhardinge static int xen_spin_is_locked(struct raw_spinlock *lock) 110d5de8841SJeremy Fitzhardinge { 111d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 112d5de8841SJeremy Fitzhardinge 113d5de8841SJeremy Fitzhardinge return xl->lock != 0; 114d5de8841SJeremy Fitzhardinge } 115d5de8841SJeremy Fitzhardinge 116d5de8841SJeremy Fitzhardinge static int xen_spin_is_contended(struct raw_spinlock *lock) 117d5de8841SJeremy Fitzhardinge { 118d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 119d5de8841SJeremy Fitzhardinge 120d5de8841SJeremy Fitzhardinge /* Not strictly true; this is only the count of contended 121d5de8841SJeremy Fitzhardinge lock-takers entering the slow path. */ 122d5de8841SJeremy Fitzhardinge return xl->spinners != 0; 123d5de8841SJeremy Fitzhardinge } 124d5de8841SJeremy Fitzhardinge 125d5de8841SJeremy Fitzhardinge static int xen_spin_trylock(struct raw_spinlock *lock) 126d5de8841SJeremy Fitzhardinge { 127d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 128d5de8841SJeremy Fitzhardinge u8 old = 1; 129d5de8841SJeremy Fitzhardinge 130d5de8841SJeremy Fitzhardinge asm("xchgb %b0,%1" 131d5de8841SJeremy Fitzhardinge : "+q" (old), "+m" (xl->lock) : : "memory"); 132d5de8841SJeremy Fitzhardinge 133d5de8841SJeremy Fitzhardinge return old == 0; 134d5de8841SJeremy Fitzhardinge } 135d5de8841SJeremy Fitzhardinge 136d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 137d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 138d5de8841SJeremy Fitzhardinge 139168d2f46SJeremy Fitzhardinge /* 140168d2f46SJeremy Fitzhardinge * Mark a cpu as interested in a lock. Returns the CPU's previous 141168d2f46SJeremy Fitzhardinge * lock of interest, in case we got preempted by an interrupt. 142168d2f46SJeremy Fitzhardinge */ 143168d2f46SJeremy Fitzhardinge static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) 144d5de8841SJeremy Fitzhardinge { 145168d2f46SJeremy Fitzhardinge struct xen_spinlock *prev; 146168d2f46SJeremy Fitzhardinge 147168d2f46SJeremy Fitzhardinge prev = __get_cpu_var(lock_spinners); 148d5de8841SJeremy Fitzhardinge __get_cpu_var(lock_spinners) = xl; 149168d2f46SJeremy Fitzhardinge 150d5de8841SJeremy Fitzhardinge wmb(); /* set lock of interest before count */ 151168d2f46SJeremy Fitzhardinge 152d5de8841SJeremy Fitzhardinge asm(LOCK_PREFIX " incw %0" 153d5de8841SJeremy Fitzhardinge : "+m" (xl->spinners) : : "memory"); 154168d2f46SJeremy Fitzhardinge 155168d2f46SJeremy Fitzhardinge return prev; 156d5de8841SJeremy Fitzhardinge } 157d5de8841SJeremy Fitzhardinge 158168d2f46SJeremy Fitzhardinge /* 159168d2f46SJeremy Fitzhardinge * Mark a cpu as no longer interested in a lock. Restores previous 160168d2f46SJeremy Fitzhardinge * lock of interest (NULL for none). 161168d2f46SJeremy Fitzhardinge */ 162168d2f46SJeremy Fitzhardinge static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) 163d5de8841SJeremy Fitzhardinge { 164d5de8841SJeremy Fitzhardinge asm(LOCK_PREFIX " decw %0" 165d5de8841SJeremy Fitzhardinge : "+m" (xl->spinners) : : "memory"); 166168d2f46SJeremy Fitzhardinge wmb(); /* decrement count before restoring lock */ 167168d2f46SJeremy Fitzhardinge __get_cpu_var(lock_spinners) = prev; 168d5de8841SJeremy Fitzhardinge } 169d5de8841SJeremy Fitzhardinge 170d5de8841SJeremy Fitzhardinge static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) 171d5de8841SJeremy Fitzhardinge { 172d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 173168d2f46SJeremy Fitzhardinge struct xen_spinlock *prev; 174d5de8841SJeremy Fitzhardinge int irq = __get_cpu_var(lock_kicker_irq); 175d5de8841SJeremy Fitzhardinge int ret; 176d5de8841SJeremy Fitzhardinge 177d5de8841SJeremy Fitzhardinge /* If kicker interrupts not initialized yet, just spin */ 178d5de8841SJeremy Fitzhardinge if (irq == -1) 179d5de8841SJeremy Fitzhardinge return 0; 180d5de8841SJeremy Fitzhardinge 181d5de8841SJeremy Fitzhardinge /* announce we're spinning */ 182168d2f46SJeremy Fitzhardinge prev = spinning_lock(xl); 183d5de8841SJeremy Fitzhardinge 184*994025caSJeremy Fitzhardinge ADD_STATS(taken_slow, 1); 185*994025caSJeremy Fitzhardinge ADD_STATS(taken_slow_nested, prev != NULL); 186*994025caSJeremy Fitzhardinge 187168d2f46SJeremy Fitzhardinge do { 188d5de8841SJeremy Fitzhardinge /* clear pending */ 189d5de8841SJeremy Fitzhardinge xen_clear_irq_pending(irq); 190d5de8841SJeremy Fitzhardinge 191d5de8841SJeremy Fitzhardinge /* check again make sure it didn't become free while 192d5de8841SJeremy Fitzhardinge we weren't looking */ 193d5de8841SJeremy Fitzhardinge ret = xen_spin_trylock(lock); 194168d2f46SJeremy Fitzhardinge if (ret) { 195*994025caSJeremy Fitzhardinge ADD_STATS(taken_slow_pickup, 1); 196*994025caSJeremy Fitzhardinge 197168d2f46SJeremy Fitzhardinge /* 198168d2f46SJeremy Fitzhardinge * If we interrupted another spinlock while it 199168d2f46SJeremy Fitzhardinge * was blocking, make sure it doesn't block 200168d2f46SJeremy Fitzhardinge * without rechecking the lock. 201168d2f46SJeremy Fitzhardinge */ 202168d2f46SJeremy Fitzhardinge if (prev != NULL) 203168d2f46SJeremy Fitzhardinge xen_set_irq_pending(irq); 204d5de8841SJeremy Fitzhardinge goto out; 205168d2f46SJeremy Fitzhardinge } 206d5de8841SJeremy Fitzhardinge 207168d2f46SJeremy Fitzhardinge /* 208168d2f46SJeremy Fitzhardinge * Block until irq becomes pending. If we're 209168d2f46SJeremy Fitzhardinge * interrupted at this point (after the trylock but 210168d2f46SJeremy Fitzhardinge * before entering the block), then the nested lock 211168d2f46SJeremy Fitzhardinge * handler guarantees that the irq will be left 212168d2f46SJeremy Fitzhardinge * pending if there's any chance the lock became free; 213168d2f46SJeremy Fitzhardinge * xen_poll_irq() returns immediately if the irq is 214168d2f46SJeremy Fitzhardinge * pending. 215168d2f46SJeremy Fitzhardinge */ 216d5de8841SJeremy Fitzhardinge xen_poll_irq(irq); 217*994025caSJeremy Fitzhardinge ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); 218168d2f46SJeremy Fitzhardinge } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ 219168d2f46SJeremy Fitzhardinge 220d5de8841SJeremy Fitzhardinge kstat_this_cpu.irqs[irq]++; 221d5de8841SJeremy Fitzhardinge 222d5de8841SJeremy Fitzhardinge out: 223168d2f46SJeremy Fitzhardinge unspinning_lock(xl, prev); 224d5de8841SJeremy Fitzhardinge return ret; 225d5de8841SJeremy Fitzhardinge } 226d5de8841SJeremy Fitzhardinge 227d5de8841SJeremy Fitzhardinge static void xen_spin_lock(struct raw_spinlock *lock) 228d5de8841SJeremy Fitzhardinge { 229d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 230*994025caSJeremy Fitzhardinge unsigned timeout; 231d5de8841SJeremy Fitzhardinge u8 oldval; 232*994025caSJeremy Fitzhardinge u64 start_spin; 233*994025caSJeremy Fitzhardinge 234*994025caSJeremy Fitzhardinge ADD_STATS(taken, 1); 235*994025caSJeremy Fitzhardinge 236*994025caSJeremy Fitzhardinge start_spin = spin_time_start(); 237d5de8841SJeremy Fitzhardinge 238d5de8841SJeremy Fitzhardinge do { 239*994025caSJeremy Fitzhardinge u64 start_spin_fast = spin_time_start(); 240*994025caSJeremy Fitzhardinge 241*994025caSJeremy Fitzhardinge timeout = TIMEOUT; 242d5de8841SJeremy Fitzhardinge 243d5de8841SJeremy Fitzhardinge asm("1: xchgb %1,%0\n" 244d5de8841SJeremy Fitzhardinge " testb %1,%1\n" 245d5de8841SJeremy Fitzhardinge " jz 3f\n" 246d5de8841SJeremy Fitzhardinge "2: rep;nop\n" 247d5de8841SJeremy Fitzhardinge " cmpb $0,%0\n" 248d5de8841SJeremy Fitzhardinge " je 1b\n" 249d5de8841SJeremy Fitzhardinge " dec %2\n" 250d5de8841SJeremy Fitzhardinge " jnz 2b\n" 251d5de8841SJeremy Fitzhardinge "3:\n" 252d5de8841SJeremy Fitzhardinge : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) 253d5de8841SJeremy Fitzhardinge : "1" (1) 254d5de8841SJeremy Fitzhardinge : "memory"); 255d5de8841SJeremy Fitzhardinge 256*994025caSJeremy Fitzhardinge spin_time_accum_fast(start_spin_fast); 257*994025caSJeremy Fitzhardinge } while (unlikely(oldval != 0 && (TIMEOUT == ~0 || !xen_spin_lock_slow(lock)))); 258*994025caSJeremy Fitzhardinge 259*994025caSJeremy Fitzhardinge spin_time_accum(start_spin); 260d5de8841SJeremy Fitzhardinge } 261d5de8841SJeremy Fitzhardinge 262d5de8841SJeremy Fitzhardinge static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) 263d5de8841SJeremy Fitzhardinge { 264d5de8841SJeremy Fitzhardinge int cpu; 265d5de8841SJeremy Fitzhardinge 266*994025caSJeremy Fitzhardinge ADD_STATS(released_slow, 1); 267*994025caSJeremy Fitzhardinge 268d5de8841SJeremy Fitzhardinge for_each_online_cpu(cpu) { 269d5de8841SJeremy Fitzhardinge /* XXX should mix up next cpu selection */ 270d5de8841SJeremy Fitzhardinge if (per_cpu(lock_spinners, cpu) == xl) { 271*994025caSJeremy Fitzhardinge ADD_STATS(released_slow_kicked, 1); 272d5de8841SJeremy Fitzhardinge xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 273d5de8841SJeremy Fitzhardinge break; 274d5de8841SJeremy Fitzhardinge } 275d5de8841SJeremy Fitzhardinge } 276d5de8841SJeremy Fitzhardinge } 277d5de8841SJeremy Fitzhardinge 278d5de8841SJeremy Fitzhardinge static void xen_spin_unlock(struct raw_spinlock *lock) 279d5de8841SJeremy Fitzhardinge { 280d5de8841SJeremy Fitzhardinge struct xen_spinlock *xl = (struct xen_spinlock *)lock; 281d5de8841SJeremy Fitzhardinge 282*994025caSJeremy Fitzhardinge ADD_STATS(released, 1); 283*994025caSJeremy Fitzhardinge 284d5de8841SJeremy Fitzhardinge smp_wmb(); /* make sure no writes get moved after unlock */ 285d5de8841SJeremy Fitzhardinge xl->lock = 0; /* release lock */ 286d5de8841SJeremy Fitzhardinge 287d5de8841SJeremy Fitzhardinge /* make sure unlock happens before kick */ 288d5de8841SJeremy Fitzhardinge barrier(); 289d5de8841SJeremy Fitzhardinge 290d5de8841SJeremy Fitzhardinge if (unlikely(xl->spinners)) 291d5de8841SJeremy Fitzhardinge xen_spin_unlock_slow(xl); 292d5de8841SJeremy Fitzhardinge } 293d5de8841SJeremy Fitzhardinge 294d5de8841SJeremy Fitzhardinge static irqreturn_t dummy_handler(int irq, void *dev_id) 295d5de8841SJeremy Fitzhardinge { 296d5de8841SJeremy Fitzhardinge BUG(); 297d5de8841SJeremy Fitzhardinge return IRQ_HANDLED; 298d5de8841SJeremy Fitzhardinge } 299d5de8841SJeremy Fitzhardinge 300d5de8841SJeremy Fitzhardinge void __cpuinit xen_init_lock_cpu(int cpu) 301d5de8841SJeremy Fitzhardinge { 302d5de8841SJeremy Fitzhardinge int irq; 303d5de8841SJeremy Fitzhardinge const char *name; 304d5de8841SJeremy Fitzhardinge 305d5de8841SJeremy Fitzhardinge name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); 306d5de8841SJeremy Fitzhardinge irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, 307d5de8841SJeremy Fitzhardinge cpu, 308d5de8841SJeremy Fitzhardinge dummy_handler, 309d5de8841SJeremy Fitzhardinge IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 310d5de8841SJeremy Fitzhardinge name, 311d5de8841SJeremy Fitzhardinge NULL); 312d5de8841SJeremy Fitzhardinge 313d5de8841SJeremy Fitzhardinge if (irq >= 0) { 314d5de8841SJeremy Fitzhardinge disable_irq(irq); /* make sure it's never delivered */ 315d5de8841SJeremy Fitzhardinge per_cpu(lock_kicker_irq, cpu) = irq; 316d5de8841SJeremy Fitzhardinge } 317d5de8841SJeremy Fitzhardinge 318d5de8841SJeremy Fitzhardinge printk("cpu %d spinlock event irq %d\n", cpu, irq); 319d5de8841SJeremy Fitzhardinge } 320d5de8841SJeremy Fitzhardinge 321d5de8841SJeremy Fitzhardinge void __init xen_init_spinlocks(void) 322d5de8841SJeremy Fitzhardinge { 323d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_is_locked = xen_spin_is_locked; 324d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_is_contended = xen_spin_is_contended; 325d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_lock = xen_spin_lock; 326d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_trylock = xen_spin_trylock; 327d5de8841SJeremy Fitzhardinge pv_lock_ops.spin_unlock = xen_spin_unlock; 328d5de8841SJeremy Fitzhardinge } 329*994025caSJeremy Fitzhardinge 330*994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS 331*994025caSJeremy Fitzhardinge 332*994025caSJeremy Fitzhardinge static struct dentry *d_spin_debug; 333*994025caSJeremy Fitzhardinge 334*994025caSJeremy Fitzhardinge static int __init xen_spinlock_debugfs(void) 335*994025caSJeremy Fitzhardinge { 336*994025caSJeremy Fitzhardinge struct dentry *d_xen = xen_init_debugfs(); 337*994025caSJeremy Fitzhardinge 338*994025caSJeremy Fitzhardinge if (d_xen == NULL) 339*994025caSJeremy Fitzhardinge return -ENOMEM; 340*994025caSJeremy Fitzhardinge 341*994025caSJeremy Fitzhardinge d_spin_debug = debugfs_create_dir("spinlocks", d_xen); 342*994025caSJeremy Fitzhardinge 343*994025caSJeremy Fitzhardinge debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); 344*994025caSJeremy Fitzhardinge 345*994025caSJeremy Fitzhardinge debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout); 346*994025caSJeremy Fitzhardinge 347*994025caSJeremy Fitzhardinge debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken); 348*994025caSJeremy Fitzhardinge debugfs_create_u32("taken_slow", 0444, d_spin_debug, 349*994025caSJeremy Fitzhardinge &spinlock_stats.taken_slow); 350*994025caSJeremy Fitzhardinge debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug, 351*994025caSJeremy Fitzhardinge &spinlock_stats.taken_slow_nested); 352*994025caSJeremy Fitzhardinge debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, 353*994025caSJeremy Fitzhardinge &spinlock_stats.taken_slow_pickup); 354*994025caSJeremy Fitzhardinge debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, 355*994025caSJeremy Fitzhardinge &spinlock_stats.taken_slow_spurious); 356*994025caSJeremy Fitzhardinge 357*994025caSJeremy Fitzhardinge debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released); 358*994025caSJeremy Fitzhardinge debugfs_create_u32("released_slow", 0444, d_spin_debug, 359*994025caSJeremy Fitzhardinge &spinlock_stats.released_slow); 360*994025caSJeremy Fitzhardinge debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, 361*994025caSJeremy Fitzhardinge &spinlock_stats.released_slow_kicked); 362*994025caSJeremy Fitzhardinge 363*994025caSJeremy Fitzhardinge debugfs_create_u64("time_spinning", 0444, d_spin_debug, 364*994025caSJeremy Fitzhardinge &spinlock_stats.spinning_time); 365*994025caSJeremy Fitzhardinge debugfs_create_u64("time_total", 0444, d_spin_debug, 366*994025caSJeremy Fitzhardinge &spinlock_stats.total_time); 367*994025caSJeremy Fitzhardinge 368*994025caSJeremy Fitzhardinge xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug, 369*994025caSJeremy Fitzhardinge spinlock_stats.histo_spin, HISTO_BUCKETS + 1); 370*994025caSJeremy Fitzhardinge xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, 371*994025caSJeremy Fitzhardinge spinlock_stats.histo_spin_fast, HISTO_BUCKETS + 1); 372*994025caSJeremy Fitzhardinge 373*994025caSJeremy Fitzhardinge return 0; 374*994025caSJeremy Fitzhardinge } 375*994025caSJeremy Fitzhardinge fs_initcall(xen_spinlock_debugfs); 376*994025caSJeremy Fitzhardinge 377*994025caSJeremy Fitzhardinge #endif /* CONFIG_XEN_DEBUG_FS */ 378