xref: /openbmc/linux/arch/x86/xen/spinlock.c (revision d6abfdb2022368d8c6c4be3f11a06656601a6cc2)
1d5de8841SJeremy Fitzhardinge /*
2d5de8841SJeremy Fitzhardinge  * Split spinlock implementation out into its own file, so it can be
3d5de8841SJeremy Fitzhardinge  * compiled in a FTRACE-compatible way.
4d5de8841SJeremy Fitzhardinge  */
5d5de8841SJeremy Fitzhardinge #include <linux/kernel_stat.h>
6d5de8841SJeremy Fitzhardinge #include <linux/spinlock.h>
7994025caSJeremy Fitzhardinge #include <linux/debugfs.h>
8994025caSJeremy Fitzhardinge #include <linux/log2.h>
95a0e3ad6STejun Heo #include <linux/gfp.h>
10354e7b76SKonrad Rzeszutek Wilk #include <linux/slab.h>
11d5de8841SJeremy Fitzhardinge 
12d5de8841SJeremy Fitzhardinge #include <asm/paravirt.h>
13d5de8841SJeremy Fitzhardinge 
14d5de8841SJeremy Fitzhardinge #include <xen/interface/xen.h>
15d5de8841SJeremy Fitzhardinge #include <xen/events.h>
16d5de8841SJeremy Fitzhardinge 
17d5de8841SJeremy Fitzhardinge #include "xen-ops.h"
18994025caSJeremy Fitzhardinge #include "debugfs.h"
19994025caSJeremy Fitzhardinge 
2080bd58feSJeremy Fitzhardinge enum xen_contention_stat {
2180bd58feSJeremy Fitzhardinge 	TAKEN_SLOW,
2280bd58feSJeremy Fitzhardinge 	TAKEN_SLOW_PICKUP,
2380bd58feSJeremy Fitzhardinge 	TAKEN_SLOW_SPURIOUS,
2480bd58feSJeremy Fitzhardinge 	RELEASED_SLOW,
2580bd58feSJeremy Fitzhardinge 	RELEASED_SLOW_KICKED,
2680bd58feSJeremy Fitzhardinge 	NR_CONTENTION_STATS
2780bd58feSJeremy Fitzhardinge };
2880bd58feSJeremy Fitzhardinge 
2980bd58feSJeremy Fitzhardinge 
30994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS
3180bd58feSJeremy Fitzhardinge #define HISTO_BUCKETS	30
32994025caSJeremy Fitzhardinge static struct xen_spinlock_stats
33994025caSJeremy Fitzhardinge {
3480bd58feSJeremy Fitzhardinge 	u32 contention_stats[NR_CONTENTION_STATS];
35f8eca41fSJeremy Fitzhardinge 	u32 histo_spin_blocked[HISTO_BUCKETS+1];
36f8eca41fSJeremy Fitzhardinge 	u64 time_blocked;
37994025caSJeremy Fitzhardinge } spinlock_stats;
38994025caSJeremy Fitzhardinge 
39994025caSJeremy Fitzhardinge static u8 zero_stats;
40994025caSJeremy Fitzhardinge 
41994025caSJeremy Fitzhardinge static inline void check_zero(void)
42994025caSJeremy Fitzhardinge {
4380bd58feSJeremy Fitzhardinge 	u8 ret;
44*d6abfdb2SRaghavendra K T 	u8 old = READ_ONCE(zero_stats);
4580bd58feSJeremy Fitzhardinge 	if (unlikely(old)) {
4680bd58feSJeremy Fitzhardinge 		ret = cmpxchg(&zero_stats, old, 0);
4780bd58feSJeremy Fitzhardinge 		/* This ensures only one fellow resets the stat */
4880bd58feSJeremy Fitzhardinge 		if (ret == old)
49994025caSJeremy Fitzhardinge 			memset(&spinlock_stats, 0, sizeof(spinlock_stats));
50994025caSJeremy Fitzhardinge 	}
51994025caSJeremy Fitzhardinge }
52994025caSJeremy Fitzhardinge 
5380bd58feSJeremy Fitzhardinge static inline void add_stats(enum xen_contention_stat var, u32 val)
5480bd58feSJeremy Fitzhardinge {
5580bd58feSJeremy Fitzhardinge 	check_zero();
5680bd58feSJeremy Fitzhardinge 	spinlock_stats.contention_stats[var] += val;
5780bd58feSJeremy Fitzhardinge }
58994025caSJeremy Fitzhardinge 
59994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void)
60994025caSJeremy Fitzhardinge {
61994025caSJeremy Fitzhardinge 	return xen_clocksource_read();
62994025caSJeremy Fitzhardinge }
63994025caSJeremy Fitzhardinge 
64994025caSJeremy Fitzhardinge static void __spin_time_accum(u64 delta, u32 *array)
65994025caSJeremy Fitzhardinge {
66994025caSJeremy Fitzhardinge 	unsigned index = ilog2(delta);
67994025caSJeremy Fitzhardinge 
68994025caSJeremy Fitzhardinge 	check_zero();
69994025caSJeremy Fitzhardinge 
70994025caSJeremy Fitzhardinge 	if (index < HISTO_BUCKETS)
71994025caSJeremy Fitzhardinge 		array[index]++;
72994025caSJeremy Fitzhardinge 	else
73994025caSJeremy Fitzhardinge 		array[HISTO_BUCKETS]++;
74994025caSJeremy Fitzhardinge }
75994025caSJeremy Fitzhardinge 
76f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_blocked(u64 start)
77f8eca41fSJeremy Fitzhardinge {
78f8eca41fSJeremy Fitzhardinge 	u32 delta = xen_clocksource_read() - start;
79f8eca41fSJeremy Fitzhardinge 
80f8eca41fSJeremy Fitzhardinge 	__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
81f8eca41fSJeremy Fitzhardinge 	spinlock_stats.time_blocked += delta;
82994025caSJeremy Fitzhardinge }
83994025caSJeremy Fitzhardinge #else  /* !CONFIG_XEN_DEBUG_FS */
8480bd58feSJeremy Fitzhardinge static inline void add_stats(enum xen_contention_stat var, u32 val)
8580bd58feSJeremy Fitzhardinge {
8680bd58feSJeremy Fitzhardinge }
87994025caSJeremy Fitzhardinge 
88994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void)
89994025caSJeremy Fitzhardinge {
90994025caSJeremy Fitzhardinge 	return 0;
91994025caSJeremy Fitzhardinge }
92994025caSJeremy Fitzhardinge 
93f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_blocked(u64 start)
94994025caSJeremy Fitzhardinge {
95994025caSJeremy Fitzhardinge }
96994025caSJeremy Fitzhardinge #endif  /* CONFIG_XEN_DEBUG_FS */
97d5de8841SJeremy Fitzhardinge 
9880bd58feSJeremy Fitzhardinge struct xen_lock_waiting {
9980bd58feSJeremy Fitzhardinge 	struct arch_spinlock *lock;
10080bd58feSJeremy Fitzhardinge 	__ticket_t want;
101d5de8841SJeremy Fitzhardinge };
102d5de8841SJeremy Fitzhardinge 
103545ac138SJeremy Fitzhardinge static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
104354e7b76SKonrad Rzeszutek Wilk static DEFINE_PER_CPU(char *, irq_name);
10580bd58feSJeremy Fitzhardinge static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
10680bd58feSJeremy Fitzhardinge static cpumask_t waiting_cpus;
107d5de8841SJeremy Fitzhardinge 
108c3b7cb1fSKonrad Rzeszutek Wilk static bool xen_pvspin = true;
109dd41f818SAndi Kleen __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
110d5de8841SJeremy Fitzhardinge {
111780f36d8SChristoph Lameter 	int irq = __this_cpu_read(lock_kicker_irq);
11289cbc767SChristoph Lameter 	struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
11380bd58feSJeremy Fitzhardinge 	int cpu = smp_processor_id();
114f8eca41fSJeremy Fitzhardinge 	u64 start;
115*d6abfdb2SRaghavendra K T 	__ticket_t head;
11680bd58feSJeremy Fitzhardinge 	unsigned long flags;
117d5de8841SJeremy Fitzhardinge 
118d5de8841SJeremy Fitzhardinge 	/* If kicker interrupts not initialized yet, just spin */
119d5de8841SJeremy Fitzhardinge 	if (irq == -1)
12080bd58feSJeremy Fitzhardinge 		return;
121d5de8841SJeremy Fitzhardinge 
122f8eca41fSJeremy Fitzhardinge 	start = spin_time_start();
123f8eca41fSJeremy Fitzhardinge 
12480bd58feSJeremy Fitzhardinge 	/*
12580bd58feSJeremy Fitzhardinge 	 * Make sure an interrupt handler can't upset things in a
12680bd58feSJeremy Fitzhardinge 	 * partially setup state.
12780bd58feSJeremy Fitzhardinge 	 */
12880bd58feSJeremy Fitzhardinge 	local_irq_save(flags);
1291ed7bf5fSJeremy Fitzhardinge 	/*
1301ed7bf5fSJeremy Fitzhardinge 	 * We don't really care if we're overwriting some other
1311ed7bf5fSJeremy Fitzhardinge 	 * (lock,want) pair, as that would mean that we're currently
1321ed7bf5fSJeremy Fitzhardinge 	 * in an interrupt context, and the outer context had
1331ed7bf5fSJeremy Fitzhardinge 	 * interrupts enabled.  That has already kicked the VCPU out
1341ed7bf5fSJeremy Fitzhardinge 	 * of xen_poll_irq(), so it will just return spuriously and
1351ed7bf5fSJeremy Fitzhardinge 	 * retry with newly setup (lock,want).
1361ed7bf5fSJeremy Fitzhardinge 	 *
1371ed7bf5fSJeremy Fitzhardinge 	 * The ordering protocol on this is that the "lock" pointer
1381ed7bf5fSJeremy Fitzhardinge 	 * may only be set non-NULL if the "want" ticket is correct.
1391ed7bf5fSJeremy Fitzhardinge 	 * If we're updating "want", we must first clear "lock".
1401ed7bf5fSJeremy Fitzhardinge 	 */
1411ed7bf5fSJeremy Fitzhardinge 	w->lock = NULL;
1421ed7bf5fSJeremy Fitzhardinge 	smp_wmb();
14380bd58feSJeremy Fitzhardinge 	w->want = want;
14480bd58feSJeremy Fitzhardinge 	smp_wmb();
14580bd58feSJeremy Fitzhardinge 	w->lock = lock;
146994025caSJeremy Fitzhardinge 
14780bd58feSJeremy Fitzhardinge 	/* This uses set_bit, which atomic and therefore a barrier */
14880bd58feSJeremy Fitzhardinge 	cpumask_set_cpu(cpu, &waiting_cpus);
14980bd58feSJeremy Fitzhardinge 	add_stats(TAKEN_SLOW, 1);
1504d576b57SJeremy Fitzhardinge 
151d5de8841SJeremy Fitzhardinge 	/* clear pending */
152d5de8841SJeremy Fitzhardinge 	xen_clear_irq_pending(irq);
153d5de8841SJeremy Fitzhardinge 
15480bd58feSJeremy Fitzhardinge 	/* Only check lock once pending cleared */
15580bd58feSJeremy Fitzhardinge 	barrier();
15680bd58feSJeremy Fitzhardinge 
1571ed7bf5fSJeremy Fitzhardinge 	/*
1581ed7bf5fSJeremy Fitzhardinge 	 * Mark entry to slowpath before doing the pickup test to make
1591ed7bf5fSJeremy Fitzhardinge 	 * sure we don't deadlock with an unlocker.
1601ed7bf5fSJeremy Fitzhardinge 	 */
16196f853eaSJeremy Fitzhardinge 	__ticket_enter_slowpath(lock);
16296f853eaSJeremy Fitzhardinge 
163*d6abfdb2SRaghavendra K T 	/* make sure enter_slowpath, which is atomic does not cross the read */
164*d6abfdb2SRaghavendra K T 	smp_mb__after_atomic();
165*d6abfdb2SRaghavendra K T 
1661ed7bf5fSJeremy Fitzhardinge 	/*
1671ed7bf5fSJeremy Fitzhardinge 	 * check again make sure it didn't become free while
1681ed7bf5fSJeremy Fitzhardinge 	 * we weren't looking
1691ed7bf5fSJeremy Fitzhardinge 	 */
170*d6abfdb2SRaghavendra K T 	head = READ_ONCE(lock->tickets.head);
171*d6abfdb2SRaghavendra K T 	if (__tickets_equal(head, want)) {
17280bd58feSJeremy Fitzhardinge 		add_stats(TAKEN_SLOW_PICKUP, 1);
173d5de8841SJeremy Fitzhardinge 		goto out;
174168d2f46SJeremy Fitzhardinge 	}
1751ed7bf5fSJeremy Fitzhardinge 
1761ed7bf5fSJeremy Fitzhardinge 	/* Allow interrupts while blocked */
1771ed7bf5fSJeremy Fitzhardinge 	local_irq_restore(flags);
1781ed7bf5fSJeremy Fitzhardinge 
1791ed7bf5fSJeremy Fitzhardinge 	/*
1801ed7bf5fSJeremy Fitzhardinge 	 * If an interrupt happens here, it will leave the wakeup irq
1811ed7bf5fSJeremy Fitzhardinge 	 * pending, which will cause xen_poll_irq() to return
1821ed7bf5fSJeremy Fitzhardinge 	 * immediately.
1831ed7bf5fSJeremy Fitzhardinge 	 */
1841ed7bf5fSJeremy Fitzhardinge 
18580bd58feSJeremy Fitzhardinge 	/* Block until irq becomes pending (or perhaps a spurious wakeup) */
186d5de8841SJeremy Fitzhardinge 	xen_poll_irq(irq);
18780bd58feSJeremy Fitzhardinge 	add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
1881ed7bf5fSJeremy Fitzhardinge 
1891ed7bf5fSJeremy Fitzhardinge 	local_irq_save(flags);
1901ed7bf5fSJeremy Fitzhardinge 
191770144eaSThomas Gleixner 	kstat_incr_irq_this_cpu(irq);
192d5de8841SJeremy Fitzhardinge out:
19380bd58feSJeremy Fitzhardinge 	cpumask_clear_cpu(cpu, &waiting_cpus);
19480bd58feSJeremy Fitzhardinge 	w->lock = NULL;
1951ed7bf5fSJeremy Fitzhardinge 
19680bd58feSJeremy Fitzhardinge 	local_irq_restore(flags);
1971ed7bf5fSJeremy Fitzhardinge 
198f8eca41fSJeremy Fitzhardinge 	spin_time_accum_blocked(start);
199d5de8841SJeremy Fitzhardinge }
200354714ddSJeremy Fitzhardinge PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
201d5de8841SJeremy Fitzhardinge 
20280bd58feSJeremy Fitzhardinge static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
203d5de8841SJeremy Fitzhardinge {
204d5de8841SJeremy Fitzhardinge 	int cpu;
205d5de8841SJeremy Fitzhardinge 
20680bd58feSJeremy Fitzhardinge 	add_stats(RELEASED_SLOW, 1);
207994025caSJeremy Fitzhardinge 
20880bd58feSJeremy Fitzhardinge 	for_each_cpu(cpu, &waiting_cpus) {
20980bd58feSJeremy Fitzhardinge 		const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
21080bd58feSJeremy Fitzhardinge 
2111ed7bf5fSJeremy Fitzhardinge 		/* Make sure we read lock before want */
212*d6abfdb2SRaghavendra K T 		if (READ_ONCE(w->lock) == lock &&
213*d6abfdb2SRaghavendra K T 		    READ_ONCE(w->want) == next) {
21480bd58feSJeremy Fitzhardinge 			add_stats(RELEASED_SLOW_KICKED, 1);
215d5de8841SJeremy Fitzhardinge 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
21680bd58feSJeremy Fitzhardinge 			break;
217d5de8841SJeremy Fitzhardinge 		}
218d5de8841SJeremy Fitzhardinge 	}
219d5de8841SJeremy Fitzhardinge }
220d5de8841SJeremy Fitzhardinge 
221d5de8841SJeremy Fitzhardinge static irqreturn_t dummy_handler(int irq, void *dev_id)
222d5de8841SJeremy Fitzhardinge {
223d5de8841SJeremy Fitzhardinge 	BUG();
224d5de8841SJeremy Fitzhardinge 	return IRQ_HANDLED;
225d5de8841SJeremy Fitzhardinge }
226d5de8841SJeremy Fitzhardinge 
227148f9bb8SPaul Gortmaker void xen_init_lock_cpu(int cpu)
228d5de8841SJeremy Fitzhardinge {
229d5de8841SJeremy Fitzhardinge 	int irq;
230354e7b76SKonrad Rzeszutek Wilk 	char *name;
231d5de8841SJeremy Fitzhardinge 
2323310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
2333310bbedSKonrad Rzeszutek Wilk 		return;
2343310bbedSKonrad Rzeszutek Wilk 
235cb91f8f4SKonrad Rzeszutek Wilk 	WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
236cb9c6f15SKonrad Rzeszutek Wilk 	     cpu, per_cpu(lock_kicker_irq, cpu));
237cb9c6f15SKonrad Rzeszutek Wilk 
238d5de8841SJeremy Fitzhardinge 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
239d5de8841SJeremy Fitzhardinge 	irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
240d5de8841SJeremy Fitzhardinge 				     cpu,
241d5de8841SJeremy Fitzhardinge 				     dummy_handler,
2429d71cee6SMichael Opdenacker 				     IRQF_PERCPU|IRQF_NOBALANCING,
243d5de8841SJeremy Fitzhardinge 				     name,
244d5de8841SJeremy Fitzhardinge 				     NULL);
245d5de8841SJeremy Fitzhardinge 
246d5de8841SJeremy Fitzhardinge 	if (irq >= 0) {
247d5de8841SJeremy Fitzhardinge 		disable_irq(irq); /* make sure it's never delivered */
248d5de8841SJeremy Fitzhardinge 		per_cpu(lock_kicker_irq, cpu) = irq;
249354e7b76SKonrad Rzeszutek Wilk 		per_cpu(irq_name, cpu) = name;
250d5de8841SJeremy Fitzhardinge 	}
251d5de8841SJeremy Fitzhardinge 
252d5de8841SJeremy Fitzhardinge 	printk("cpu %d spinlock event irq %d\n", cpu, irq);
253d5de8841SJeremy Fitzhardinge }
254d5de8841SJeremy Fitzhardinge 
255d68d82afSAlex Nixon void xen_uninit_lock_cpu(int cpu)
256d68d82afSAlex Nixon {
2573310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
2583310bbedSKonrad Rzeszutek Wilk 		return;
2593310bbedSKonrad Rzeszutek Wilk 
260d68d82afSAlex Nixon 	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
261cb9c6f15SKonrad Rzeszutek Wilk 	per_cpu(lock_kicker_irq, cpu) = -1;
262354e7b76SKonrad Rzeszutek Wilk 	kfree(per_cpu(irq_name, cpu));
263354e7b76SKonrad Rzeszutek Wilk 	per_cpu(irq_name, cpu) = NULL;
264d68d82afSAlex Nixon }
265d68d82afSAlex Nixon 
266b8fa70b5SJeremy Fitzhardinge 
267a945928eSKonrad Rzeszutek Wilk /*
268a945928eSKonrad Rzeszutek Wilk  * Our init of PV spinlocks is split in two init functions due to us
269a945928eSKonrad Rzeszutek Wilk  * using paravirt patching and jump labels patching and having to do
270a945928eSKonrad Rzeszutek Wilk  * all of this before SMP code is invoked.
271a945928eSKonrad Rzeszutek Wilk  *
272a945928eSKonrad Rzeszutek Wilk  * The paravirt patching needs to be done _before_ the alternative asm code
273a945928eSKonrad Rzeszutek Wilk  * is started, otherwise we would not patch the core kernel code.
274a945928eSKonrad Rzeszutek Wilk  */
275d5de8841SJeremy Fitzhardinge void __init xen_init_spinlocks(void)
276d5de8841SJeremy Fitzhardinge {
27770dd4998SKonrad Rzeszutek Wilk 
278b8fa70b5SJeremy Fitzhardinge 	if (!xen_pvspin) {
279b8fa70b5SJeremy Fitzhardinge 		printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
280b8fa70b5SJeremy Fitzhardinge 		return;
281b8fa70b5SJeremy Fitzhardinge 	}
282e0fc17a9SKonrad Rzeszutek Wilk 	printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
283354714ddSJeremy Fitzhardinge 	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
28480bd58feSJeremy Fitzhardinge 	pv_lock_ops.unlock_kick = xen_unlock_kick;
285d5de8841SJeremy Fitzhardinge }
286994025caSJeremy Fitzhardinge 
287a945928eSKonrad Rzeszutek Wilk /*
288a945928eSKonrad Rzeszutek Wilk  * While the jump_label init code needs to happend _after_ the jump labels are
289a945928eSKonrad Rzeszutek Wilk  * enabled and before SMP is started. Hence we use pre-SMP initcall level
290a945928eSKonrad Rzeszutek Wilk  * init. We cannot do it in xen_init_spinlocks as that is done before
291a945928eSKonrad Rzeszutek Wilk  * jump labels are activated.
292a945928eSKonrad Rzeszutek Wilk  */
293a945928eSKonrad Rzeszutek Wilk static __init int xen_init_spinlocks_jump(void)
294a945928eSKonrad Rzeszutek Wilk {
295a945928eSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
296a945928eSKonrad Rzeszutek Wilk 		return 0;
297a945928eSKonrad Rzeszutek Wilk 
298e0fc17a9SKonrad Rzeszutek Wilk 	if (!xen_domain())
299e0fc17a9SKonrad Rzeszutek Wilk 		return 0;
300e0fc17a9SKonrad Rzeszutek Wilk 
301a945928eSKonrad Rzeszutek Wilk 	static_key_slow_inc(&paravirt_ticketlocks_enabled);
302a945928eSKonrad Rzeszutek Wilk 	return 0;
303a945928eSKonrad Rzeszutek Wilk }
304a945928eSKonrad Rzeszutek Wilk early_initcall(xen_init_spinlocks_jump);
305a945928eSKonrad Rzeszutek Wilk 
306b8fa70b5SJeremy Fitzhardinge static __init int xen_parse_nopvspin(char *arg)
307b8fa70b5SJeremy Fitzhardinge {
308b8fa70b5SJeremy Fitzhardinge 	xen_pvspin = false;
309b8fa70b5SJeremy Fitzhardinge 	return 0;
310b8fa70b5SJeremy Fitzhardinge }
311b8fa70b5SJeremy Fitzhardinge early_param("xen_nopvspin", xen_parse_nopvspin);
312b8fa70b5SJeremy Fitzhardinge 
313994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS
314994025caSJeremy Fitzhardinge 
315994025caSJeremy Fitzhardinge static struct dentry *d_spin_debug;
316994025caSJeremy Fitzhardinge 
317994025caSJeremy Fitzhardinge static int __init xen_spinlock_debugfs(void)
318994025caSJeremy Fitzhardinge {
319994025caSJeremy Fitzhardinge 	struct dentry *d_xen = xen_init_debugfs();
320994025caSJeremy Fitzhardinge 
321994025caSJeremy Fitzhardinge 	if (d_xen == NULL)
322994025caSJeremy Fitzhardinge 		return -ENOMEM;
323994025caSJeremy Fitzhardinge 
3243310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
3253310bbedSKonrad Rzeszutek Wilk 		return 0;
3263310bbedSKonrad Rzeszutek Wilk 
327994025caSJeremy Fitzhardinge 	d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
328994025caSJeremy Fitzhardinge 
329994025caSJeremy Fitzhardinge 	debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
330994025caSJeremy Fitzhardinge 
331994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow", 0444, d_spin_debug,
33280bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW]);
333994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
33480bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
335994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
33680bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
337994025caSJeremy Fitzhardinge 
338994025caSJeremy Fitzhardinge 	debugfs_create_u32("released_slow", 0444, d_spin_debug,
33980bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[RELEASED_SLOW]);
340994025caSJeremy Fitzhardinge 	debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
34180bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
342994025caSJeremy Fitzhardinge 
343f8eca41fSJeremy Fitzhardinge 	debugfs_create_u64("time_blocked", 0444, d_spin_debug,
344f8eca41fSJeremy Fitzhardinge 			   &spinlock_stats.time_blocked);
345994025caSJeremy Fitzhardinge 
3469fe2a701SSrivatsa Vaddagiri 	debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
347f8eca41fSJeremy Fitzhardinge 				spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
348994025caSJeremy Fitzhardinge 
349994025caSJeremy Fitzhardinge 	return 0;
350994025caSJeremy Fitzhardinge }
351994025caSJeremy Fitzhardinge fs_initcall(xen_spinlock_debugfs);
352994025caSJeremy Fitzhardinge 
353994025caSJeremy Fitzhardinge #endif	/* CONFIG_XEN_DEBUG_FS */
354