xref: /openbmc/linux/arch/x86/xen/spinlock.c (revision e95e6f176c61dd0e7bd9fdfb4956df1f9bfe99d4)
1d5de8841SJeremy Fitzhardinge /*
2d5de8841SJeremy Fitzhardinge  * Split spinlock implementation out into its own file, so it can be
3d5de8841SJeremy Fitzhardinge  * compiled in a FTRACE-compatible way.
4d5de8841SJeremy Fitzhardinge  */
5d5de8841SJeremy Fitzhardinge #include <linux/kernel_stat.h>
6d5de8841SJeremy Fitzhardinge #include <linux/spinlock.h>
7994025caSJeremy Fitzhardinge #include <linux/debugfs.h>
8994025caSJeremy Fitzhardinge #include <linux/log2.h>
95a0e3ad6STejun Heo #include <linux/gfp.h>
10354e7b76SKonrad Rzeszutek Wilk #include <linux/slab.h>
11d5de8841SJeremy Fitzhardinge 
12d5de8841SJeremy Fitzhardinge #include <asm/paravirt.h>
13d5de8841SJeremy Fitzhardinge 
14d5de8841SJeremy Fitzhardinge #include <xen/interface/xen.h>
15d5de8841SJeremy Fitzhardinge #include <xen/events.h>
16d5de8841SJeremy Fitzhardinge 
17d5de8841SJeremy Fitzhardinge #include "xen-ops.h"
18994025caSJeremy Fitzhardinge #include "debugfs.h"
19994025caSJeremy Fitzhardinge 
20*e95e6f17SDavid Vrabel static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
21*e95e6f17SDavid Vrabel static DEFINE_PER_CPU(char *, irq_name);
22*e95e6f17SDavid Vrabel static bool xen_pvspin = true;
23*e95e6f17SDavid Vrabel 
24*e95e6f17SDavid Vrabel #ifdef CONFIG_QUEUED_SPINLOCK
25*e95e6f17SDavid Vrabel 
26*e95e6f17SDavid Vrabel #include <asm/qspinlock.h>
27*e95e6f17SDavid Vrabel 
28*e95e6f17SDavid Vrabel static void xen_qlock_kick(int cpu)
29*e95e6f17SDavid Vrabel {
30*e95e6f17SDavid Vrabel 	xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
31*e95e6f17SDavid Vrabel }
32*e95e6f17SDavid Vrabel 
33*e95e6f17SDavid Vrabel /*
34*e95e6f17SDavid Vrabel  * Halt the current CPU & release it back to the host
35*e95e6f17SDavid Vrabel  */
36*e95e6f17SDavid Vrabel static void xen_qlock_wait(u8 *byte, u8 val)
37*e95e6f17SDavid Vrabel {
38*e95e6f17SDavid Vrabel 	int irq = __this_cpu_read(lock_kicker_irq);
39*e95e6f17SDavid Vrabel 
40*e95e6f17SDavid Vrabel 	/* If kicker interrupts not initialized yet, just spin */
41*e95e6f17SDavid Vrabel 	if (irq == -1)
42*e95e6f17SDavid Vrabel 		return;
43*e95e6f17SDavid Vrabel 
44*e95e6f17SDavid Vrabel 	/* clear pending */
45*e95e6f17SDavid Vrabel 	xen_clear_irq_pending(irq);
46*e95e6f17SDavid Vrabel 	barrier();
47*e95e6f17SDavid Vrabel 
48*e95e6f17SDavid Vrabel 	/*
49*e95e6f17SDavid Vrabel 	 * We check the byte value after clearing pending IRQ to make sure
50*e95e6f17SDavid Vrabel 	 * that we won't miss a wakeup event because of the clearing.
51*e95e6f17SDavid Vrabel 	 *
52*e95e6f17SDavid Vrabel 	 * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
53*e95e6f17SDavid Vrabel 	 * So it is effectively a memory barrier for x86.
54*e95e6f17SDavid Vrabel 	 */
55*e95e6f17SDavid Vrabel 	if (READ_ONCE(*byte) != val)
56*e95e6f17SDavid Vrabel 		return;
57*e95e6f17SDavid Vrabel 
58*e95e6f17SDavid Vrabel 	/*
59*e95e6f17SDavid Vrabel 	 * If an interrupt happens here, it will leave the wakeup irq
60*e95e6f17SDavid Vrabel 	 * pending, which will cause xen_poll_irq() to return
61*e95e6f17SDavid Vrabel 	 * immediately.
62*e95e6f17SDavid Vrabel 	 */
63*e95e6f17SDavid Vrabel 
64*e95e6f17SDavid Vrabel 	/* Block until irq becomes pending (or perhaps a spurious wakeup) */
65*e95e6f17SDavid Vrabel 	xen_poll_irq(irq);
66*e95e6f17SDavid Vrabel }
67*e95e6f17SDavid Vrabel 
68*e95e6f17SDavid Vrabel #else /* CONFIG_QUEUED_SPINLOCK */
69*e95e6f17SDavid Vrabel 
7080bd58feSJeremy Fitzhardinge enum xen_contention_stat {
7180bd58feSJeremy Fitzhardinge 	TAKEN_SLOW,
7280bd58feSJeremy Fitzhardinge 	TAKEN_SLOW_PICKUP,
7380bd58feSJeremy Fitzhardinge 	TAKEN_SLOW_SPURIOUS,
7480bd58feSJeremy Fitzhardinge 	RELEASED_SLOW,
7580bd58feSJeremy Fitzhardinge 	RELEASED_SLOW_KICKED,
7680bd58feSJeremy Fitzhardinge 	NR_CONTENTION_STATS
7780bd58feSJeremy Fitzhardinge };
7880bd58feSJeremy Fitzhardinge 
7980bd58feSJeremy Fitzhardinge 
80994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS
8180bd58feSJeremy Fitzhardinge #define HISTO_BUCKETS	30
82994025caSJeremy Fitzhardinge static struct xen_spinlock_stats
83994025caSJeremy Fitzhardinge {
8480bd58feSJeremy Fitzhardinge 	u32 contention_stats[NR_CONTENTION_STATS];
85f8eca41fSJeremy Fitzhardinge 	u32 histo_spin_blocked[HISTO_BUCKETS+1];
86f8eca41fSJeremy Fitzhardinge 	u64 time_blocked;
87994025caSJeremy Fitzhardinge } spinlock_stats;
88994025caSJeremy Fitzhardinge 
89994025caSJeremy Fitzhardinge static u8 zero_stats;
90994025caSJeremy Fitzhardinge 
91994025caSJeremy Fitzhardinge static inline void check_zero(void)
92994025caSJeremy Fitzhardinge {
9380bd58feSJeremy Fitzhardinge 	u8 ret;
94d6abfdb2SRaghavendra K T 	u8 old = READ_ONCE(zero_stats);
9580bd58feSJeremy Fitzhardinge 	if (unlikely(old)) {
9680bd58feSJeremy Fitzhardinge 		ret = cmpxchg(&zero_stats, old, 0);
9780bd58feSJeremy Fitzhardinge 		/* This ensures only one fellow resets the stat */
9880bd58feSJeremy Fitzhardinge 		if (ret == old)
99994025caSJeremy Fitzhardinge 			memset(&spinlock_stats, 0, sizeof(spinlock_stats));
100994025caSJeremy Fitzhardinge 	}
101994025caSJeremy Fitzhardinge }
102994025caSJeremy Fitzhardinge 
10380bd58feSJeremy Fitzhardinge static inline void add_stats(enum xen_contention_stat var, u32 val)
10480bd58feSJeremy Fitzhardinge {
10580bd58feSJeremy Fitzhardinge 	check_zero();
10680bd58feSJeremy Fitzhardinge 	spinlock_stats.contention_stats[var] += val;
10780bd58feSJeremy Fitzhardinge }
108994025caSJeremy Fitzhardinge 
109994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void)
110994025caSJeremy Fitzhardinge {
111994025caSJeremy Fitzhardinge 	return xen_clocksource_read();
112994025caSJeremy Fitzhardinge }
113994025caSJeremy Fitzhardinge 
114994025caSJeremy Fitzhardinge static void __spin_time_accum(u64 delta, u32 *array)
115994025caSJeremy Fitzhardinge {
116994025caSJeremy Fitzhardinge 	unsigned index = ilog2(delta);
117994025caSJeremy Fitzhardinge 
118994025caSJeremy Fitzhardinge 	check_zero();
119994025caSJeremy Fitzhardinge 
120994025caSJeremy Fitzhardinge 	if (index < HISTO_BUCKETS)
121994025caSJeremy Fitzhardinge 		array[index]++;
122994025caSJeremy Fitzhardinge 	else
123994025caSJeremy Fitzhardinge 		array[HISTO_BUCKETS]++;
124994025caSJeremy Fitzhardinge }
125994025caSJeremy Fitzhardinge 
126f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_blocked(u64 start)
127f8eca41fSJeremy Fitzhardinge {
128f8eca41fSJeremy Fitzhardinge 	u32 delta = xen_clocksource_read() - start;
129f8eca41fSJeremy Fitzhardinge 
130f8eca41fSJeremy Fitzhardinge 	__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
131f8eca41fSJeremy Fitzhardinge 	spinlock_stats.time_blocked += delta;
132994025caSJeremy Fitzhardinge }
133994025caSJeremy Fitzhardinge #else  /* !CONFIG_XEN_DEBUG_FS */
13480bd58feSJeremy Fitzhardinge static inline void add_stats(enum xen_contention_stat var, u32 val)
13580bd58feSJeremy Fitzhardinge {
13680bd58feSJeremy Fitzhardinge }
137994025caSJeremy Fitzhardinge 
138994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void)
139994025caSJeremy Fitzhardinge {
140994025caSJeremy Fitzhardinge 	return 0;
141994025caSJeremy Fitzhardinge }
142994025caSJeremy Fitzhardinge 
143f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_blocked(u64 start)
144994025caSJeremy Fitzhardinge {
145994025caSJeremy Fitzhardinge }
146994025caSJeremy Fitzhardinge #endif  /* CONFIG_XEN_DEBUG_FS */
147d5de8841SJeremy Fitzhardinge 
14880bd58feSJeremy Fitzhardinge struct xen_lock_waiting {
14980bd58feSJeremy Fitzhardinge 	struct arch_spinlock *lock;
15080bd58feSJeremy Fitzhardinge 	__ticket_t want;
151d5de8841SJeremy Fitzhardinge };
152d5de8841SJeremy Fitzhardinge 
15380bd58feSJeremy Fitzhardinge static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
15480bd58feSJeremy Fitzhardinge static cpumask_t waiting_cpus;
155d5de8841SJeremy Fitzhardinge 
156dd41f818SAndi Kleen __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
157d5de8841SJeremy Fitzhardinge {
158780f36d8SChristoph Lameter 	int irq = __this_cpu_read(lock_kicker_irq);
15989cbc767SChristoph Lameter 	struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
16080bd58feSJeremy Fitzhardinge 	int cpu = smp_processor_id();
161f8eca41fSJeremy Fitzhardinge 	u64 start;
162d6abfdb2SRaghavendra K T 	__ticket_t head;
16380bd58feSJeremy Fitzhardinge 	unsigned long flags;
164d5de8841SJeremy Fitzhardinge 
165d5de8841SJeremy Fitzhardinge 	/* If kicker interrupts not initialized yet, just spin */
166d5de8841SJeremy Fitzhardinge 	if (irq == -1)
16780bd58feSJeremy Fitzhardinge 		return;
168d5de8841SJeremy Fitzhardinge 
169f8eca41fSJeremy Fitzhardinge 	start = spin_time_start();
170f8eca41fSJeremy Fitzhardinge 
17180bd58feSJeremy Fitzhardinge 	/*
17280bd58feSJeremy Fitzhardinge 	 * Make sure an interrupt handler can't upset things in a
17380bd58feSJeremy Fitzhardinge 	 * partially setup state.
17480bd58feSJeremy Fitzhardinge 	 */
17580bd58feSJeremy Fitzhardinge 	local_irq_save(flags);
1761ed7bf5fSJeremy Fitzhardinge 	/*
1771ed7bf5fSJeremy Fitzhardinge 	 * We don't really care if we're overwriting some other
1781ed7bf5fSJeremy Fitzhardinge 	 * (lock,want) pair, as that would mean that we're currently
1791ed7bf5fSJeremy Fitzhardinge 	 * in an interrupt context, and the outer context had
1801ed7bf5fSJeremy Fitzhardinge 	 * interrupts enabled.  That has already kicked the VCPU out
1811ed7bf5fSJeremy Fitzhardinge 	 * of xen_poll_irq(), so it will just return spuriously and
1821ed7bf5fSJeremy Fitzhardinge 	 * retry with newly setup (lock,want).
1831ed7bf5fSJeremy Fitzhardinge 	 *
1841ed7bf5fSJeremy Fitzhardinge 	 * The ordering protocol on this is that the "lock" pointer
1851ed7bf5fSJeremy Fitzhardinge 	 * may only be set non-NULL if the "want" ticket is correct.
1861ed7bf5fSJeremy Fitzhardinge 	 * If we're updating "want", we must first clear "lock".
1871ed7bf5fSJeremy Fitzhardinge 	 */
1881ed7bf5fSJeremy Fitzhardinge 	w->lock = NULL;
1891ed7bf5fSJeremy Fitzhardinge 	smp_wmb();
19080bd58feSJeremy Fitzhardinge 	w->want = want;
19180bd58feSJeremy Fitzhardinge 	smp_wmb();
19280bd58feSJeremy Fitzhardinge 	w->lock = lock;
193994025caSJeremy Fitzhardinge 
19480bd58feSJeremy Fitzhardinge 	/* This uses set_bit, which atomic and therefore a barrier */
19580bd58feSJeremy Fitzhardinge 	cpumask_set_cpu(cpu, &waiting_cpus);
19680bd58feSJeremy Fitzhardinge 	add_stats(TAKEN_SLOW, 1);
1974d576b57SJeremy Fitzhardinge 
198d5de8841SJeremy Fitzhardinge 	/* clear pending */
199d5de8841SJeremy Fitzhardinge 	xen_clear_irq_pending(irq);
200d5de8841SJeremy Fitzhardinge 
20180bd58feSJeremy Fitzhardinge 	/* Only check lock once pending cleared */
20280bd58feSJeremy Fitzhardinge 	barrier();
20380bd58feSJeremy Fitzhardinge 
2041ed7bf5fSJeremy Fitzhardinge 	/*
2051ed7bf5fSJeremy Fitzhardinge 	 * Mark entry to slowpath before doing the pickup test to make
2061ed7bf5fSJeremy Fitzhardinge 	 * sure we don't deadlock with an unlocker.
2071ed7bf5fSJeremy Fitzhardinge 	 */
20896f853eaSJeremy Fitzhardinge 	__ticket_enter_slowpath(lock);
20996f853eaSJeremy Fitzhardinge 
210d6abfdb2SRaghavendra K T 	/* make sure enter_slowpath, which is atomic does not cross the read */
211d6abfdb2SRaghavendra K T 	smp_mb__after_atomic();
212d6abfdb2SRaghavendra K T 
2131ed7bf5fSJeremy Fitzhardinge 	/*
2141ed7bf5fSJeremy Fitzhardinge 	 * check again make sure it didn't become free while
2151ed7bf5fSJeremy Fitzhardinge 	 * we weren't looking
2161ed7bf5fSJeremy Fitzhardinge 	 */
217d6abfdb2SRaghavendra K T 	head = READ_ONCE(lock->tickets.head);
218d6abfdb2SRaghavendra K T 	if (__tickets_equal(head, want)) {
21980bd58feSJeremy Fitzhardinge 		add_stats(TAKEN_SLOW_PICKUP, 1);
220d5de8841SJeremy Fitzhardinge 		goto out;
221168d2f46SJeremy Fitzhardinge 	}
2221ed7bf5fSJeremy Fitzhardinge 
2231ed7bf5fSJeremy Fitzhardinge 	/* Allow interrupts while blocked */
2241ed7bf5fSJeremy Fitzhardinge 	local_irq_restore(flags);
2251ed7bf5fSJeremy Fitzhardinge 
2261ed7bf5fSJeremy Fitzhardinge 	/*
2271ed7bf5fSJeremy Fitzhardinge 	 * If an interrupt happens here, it will leave the wakeup irq
2281ed7bf5fSJeremy Fitzhardinge 	 * pending, which will cause xen_poll_irq() to return
2291ed7bf5fSJeremy Fitzhardinge 	 * immediately.
2301ed7bf5fSJeremy Fitzhardinge 	 */
2311ed7bf5fSJeremy Fitzhardinge 
23280bd58feSJeremy Fitzhardinge 	/* Block until irq becomes pending (or perhaps a spurious wakeup) */
233d5de8841SJeremy Fitzhardinge 	xen_poll_irq(irq);
23480bd58feSJeremy Fitzhardinge 	add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
2351ed7bf5fSJeremy Fitzhardinge 
2361ed7bf5fSJeremy Fitzhardinge 	local_irq_save(flags);
2371ed7bf5fSJeremy Fitzhardinge 
238770144eaSThomas Gleixner 	kstat_incr_irq_this_cpu(irq);
239d5de8841SJeremy Fitzhardinge out:
24080bd58feSJeremy Fitzhardinge 	cpumask_clear_cpu(cpu, &waiting_cpus);
24180bd58feSJeremy Fitzhardinge 	w->lock = NULL;
2421ed7bf5fSJeremy Fitzhardinge 
24380bd58feSJeremy Fitzhardinge 	local_irq_restore(flags);
2441ed7bf5fSJeremy Fitzhardinge 
245f8eca41fSJeremy Fitzhardinge 	spin_time_accum_blocked(start);
246d5de8841SJeremy Fitzhardinge }
247354714ddSJeremy Fitzhardinge PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
248d5de8841SJeremy Fitzhardinge 
24980bd58feSJeremy Fitzhardinge static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
250d5de8841SJeremy Fitzhardinge {
251d5de8841SJeremy Fitzhardinge 	int cpu;
252d5de8841SJeremy Fitzhardinge 
25380bd58feSJeremy Fitzhardinge 	add_stats(RELEASED_SLOW, 1);
254994025caSJeremy Fitzhardinge 
25580bd58feSJeremy Fitzhardinge 	for_each_cpu(cpu, &waiting_cpus) {
25680bd58feSJeremy Fitzhardinge 		const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
25780bd58feSJeremy Fitzhardinge 
2581ed7bf5fSJeremy Fitzhardinge 		/* Make sure we read lock before want */
259d6abfdb2SRaghavendra K T 		if (READ_ONCE(w->lock) == lock &&
260d6abfdb2SRaghavendra K T 		    READ_ONCE(w->want) == next) {
26180bd58feSJeremy Fitzhardinge 			add_stats(RELEASED_SLOW_KICKED, 1);
262d5de8841SJeremy Fitzhardinge 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
26380bd58feSJeremy Fitzhardinge 			break;
264d5de8841SJeremy Fitzhardinge 		}
265d5de8841SJeremy Fitzhardinge 	}
266d5de8841SJeremy Fitzhardinge }
267*e95e6f17SDavid Vrabel #endif /* CONFIG_QUEUED_SPINLOCK */
268d5de8841SJeremy Fitzhardinge 
269d5de8841SJeremy Fitzhardinge static irqreturn_t dummy_handler(int irq, void *dev_id)
270d5de8841SJeremy Fitzhardinge {
271d5de8841SJeremy Fitzhardinge 	BUG();
272d5de8841SJeremy Fitzhardinge 	return IRQ_HANDLED;
273d5de8841SJeremy Fitzhardinge }
274d5de8841SJeremy Fitzhardinge 
275148f9bb8SPaul Gortmaker void xen_init_lock_cpu(int cpu)
276d5de8841SJeremy Fitzhardinge {
277d5de8841SJeremy Fitzhardinge 	int irq;
278354e7b76SKonrad Rzeszutek Wilk 	char *name;
279d5de8841SJeremy Fitzhardinge 
2803310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
2813310bbedSKonrad Rzeszutek Wilk 		return;
2823310bbedSKonrad Rzeszutek Wilk 
283cb91f8f4SKonrad Rzeszutek Wilk 	WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
284cb9c6f15SKonrad Rzeszutek Wilk 	     cpu, per_cpu(lock_kicker_irq, cpu));
285cb9c6f15SKonrad Rzeszutek Wilk 
286d5de8841SJeremy Fitzhardinge 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
287d5de8841SJeremy Fitzhardinge 	irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
288d5de8841SJeremy Fitzhardinge 				     cpu,
289d5de8841SJeremy Fitzhardinge 				     dummy_handler,
2909d71cee6SMichael Opdenacker 				     IRQF_PERCPU|IRQF_NOBALANCING,
291d5de8841SJeremy Fitzhardinge 				     name,
292d5de8841SJeremy Fitzhardinge 				     NULL);
293d5de8841SJeremy Fitzhardinge 
294d5de8841SJeremy Fitzhardinge 	if (irq >= 0) {
295d5de8841SJeremy Fitzhardinge 		disable_irq(irq); /* make sure it's never delivered */
296d5de8841SJeremy Fitzhardinge 		per_cpu(lock_kicker_irq, cpu) = irq;
297354e7b76SKonrad Rzeszutek Wilk 		per_cpu(irq_name, cpu) = name;
298d5de8841SJeremy Fitzhardinge 	}
299d5de8841SJeremy Fitzhardinge 
300d5de8841SJeremy Fitzhardinge 	printk("cpu %d spinlock event irq %d\n", cpu, irq);
301d5de8841SJeremy Fitzhardinge }
302d5de8841SJeremy Fitzhardinge 
303d68d82afSAlex Nixon void xen_uninit_lock_cpu(int cpu)
304d68d82afSAlex Nixon {
3053310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
3063310bbedSKonrad Rzeszutek Wilk 		return;
3073310bbedSKonrad Rzeszutek Wilk 
308d68d82afSAlex Nixon 	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
309cb9c6f15SKonrad Rzeszutek Wilk 	per_cpu(lock_kicker_irq, cpu) = -1;
310354e7b76SKonrad Rzeszutek Wilk 	kfree(per_cpu(irq_name, cpu));
311354e7b76SKonrad Rzeszutek Wilk 	per_cpu(irq_name, cpu) = NULL;
312d68d82afSAlex Nixon }
313d68d82afSAlex Nixon 
314b8fa70b5SJeremy Fitzhardinge 
315a945928eSKonrad Rzeszutek Wilk /*
316a945928eSKonrad Rzeszutek Wilk  * Our init of PV spinlocks is split in two init functions due to us
317a945928eSKonrad Rzeszutek Wilk  * using paravirt patching and jump labels patching and having to do
318a945928eSKonrad Rzeszutek Wilk  * all of this before SMP code is invoked.
319a945928eSKonrad Rzeszutek Wilk  *
320a945928eSKonrad Rzeszutek Wilk  * The paravirt patching needs to be done _before_ the alternative asm code
321a945928eSKonrad Rzeszutek Wilk  * is started, otherwise we would not patch the core kernel code.
322a945928eSKonrad Rzeszutek Wilk  */
323d5de8841SJeremy Fitzhardinge void __init xen_init_spinlocks(void)
324d5de8841SJeremy Fitzhardinge {
32570dd4998SKonrad Rzeszutek Wilk 
326b8fa70b5SJeremy Fitzhardinge 	if (!xen_pvspin) {
327b8fa70b5SJeremy Fitzhardinge 		printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
328b8fa70b5SJeremy Fitzhardinge 		return;
329b8fa70b5SJeremy Fitzhardinge 	}
330e0fc17a9SKonrad Rzeszutek Wilk 	printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
331*e95e6f17SDavid Vrabel #ifdef CONFIG_QUEUED_SPINLOCK
332*e95e6f17SDavid Vrabel 	__pv_init_lock_hash();
333*e95e6f17SDavid Vrabel 	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
334*e95e6f17SDavid Vrabel 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
335*e95e6f17SDavid Vrabel 	pv_lock_ops.wait = xen_qlock_wait;
336*e95e6f17SDavid Vrabel 	pv_lock_ops.kick = xen_qlock_kick;
337*e95e6f17SDavid Vrabel #else
338354714ddSJeremy Fitzhardinge 	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
33980bd58feSJeremy Fitzhardinge 	pv_lock_ops.unlock_kick = xen_unlock_kick;
340*e95e6f17SDavid Vrabel #endif
341d5de8841SJeremy Fitzhardinge }
342994025caSJeremy Fitzhardinge 
343a945928eSKonrad Rzeszutek Wilk /*
344a945928eSKonrad Rzeszutek Wilk  * While the jump_label init code needs to happend _after_ the jump labels are
345a945928eSKonrad Rzeszutek Wilk  * enabled and before SMP is started. Hence we use pre-SMP initcall level
346a945928eSKonrad Rzeszutek Wilk  * init. We cannot do it in xen_init_spinlocks as that is done before
347a945928eSKonrad Rzeszutek Wilk  * jump labels are activated.
348a945928eSKonrad Rzeszutek Wilk  */
349a945928eSKonrad Rzeszutek Wilk static __init int xen_init_spinlocks_jump(void)
350a945928eSKonrad Rzeszutek Wilk {
351a945928eSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
352a945928eSKonrad Rzeszutek Wilk 		return 0;
353a945928eSKonrad Rzeszutek Wilk 
354e0fc17a9SKonrad Rzeszutek Wilk 	if (!xen_domain())
355e0fc17a9SKonrad Rzeszutek Wilk 		return 0;
356e0fc17a9SKonrad Rzeszutek Wilk 
357a945928eSKonrad Rzeszutek Wilk 	static_key_slow_inc(&paravirt_ticketlocks_enabled);
358a945928eSKonrad Rzeszutek Wilk 	return 0;
359a945928eSKonrad Rzeszutek Wilk }
360a945928eSKonrad Rzeszutek Wilk early_initcall(xen_init_spinlocks_jump);
361a945928eSKonrad Rzeszutek Wilk 
362b8fa70b5SJeremy Fitzhardinge static __init int xen_parse_nopvspin(char *arg)
363b8fa70b5SJeremy Fitzhardinge {
364b8fa70b5SJeremy Fitzhardinge 	xen_pvspin = false;
365b8fa70b5SJeremy Fitzhardinge 	return 0;
366b8fa70b5SJeremy Fitzhardinge }
367b8fa70b5SJeremy Fitzhardinge early_param("xen_nopvspin", xen_parse_nopvspin);
368b8fa70b5SJeremy Fitzhardinge 
369*e95e6f17SDavid Vrabel #if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCK)
370994025caSJeremy Fitzhardinge 
371994025caSJeremy Fitzhardinge static struct dentry *d_spin_debug;
372994025caSJeremy Fitzhardinge 
373994025caSJeremy Fitzhardinge static int __init xen_spinlock_debugfs(void)
374994025caSJeremy Fitzhardinge {
375994025caSJeremy Fitzhardinge 	struct dentry *d_xen = xen_init_debugfs();
376994025caSJeremy Fitzhardinge 
377994025caSJeremy Fitzhardinge 	if (d_xen == NULL)
378994025caSJeremy Fitzhardinge 		return -ENOMEM;
379994025caSJeremy Fitzhardinge 
3803310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
3813310bbedSKonrad Rzeszutek Wilk 		return 0;
3823310bbedSKonrad Rzeszutek Wilk 
383994025caSJeremy Fitzhardinge 	d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
384994025caSJeremy Fitzhardinge 
385994025caSJeremy Fitzhardinge 	debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
386994025caSJeremy Fitzhardinge 
387994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow", 0444, d_spin_debug,
38880bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW]);
389994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
39080bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
391994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
39280bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
393994025caSJeremy Fitzhardinge 
394994025caSJeremy Fitzhardinge 	debugfs_create_u32("released_slow", 0444, d_spin_debug,
39580bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[RELEASED_SLOW]);
396994025caSJeremy Fitzhardinge 	debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
39780bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
398994025caSJeremy Fitzhardinge 
399f8eca41fSJeremy Fitzhardinge 	debugfs_create_u64("time_blocked", 0444, d_spin_debug,
400f8eca41fSJeremy Fitzhardinge 			   &spinlock_stats.time_blocked);
401994025caSJeremy Fitzhardinge 
4029fe2a701SSrivatsa Vaddagiri 	debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
403f8eca41fSJeremy Fitzhardinge 				spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
404994025caSJeremy Fitzhardinge 
405994025caSJeremy Fitzhardinge 	return 0;
406994025caSJeremy Fitzhardinge }
407994025caSJeremy Fitzhardinge fs_initcall(xen_spinlock_debugfs);
408994025caSJeremy Fitzhardinge 
409994025caSJeremy Fitzhardinge #endif	/* CONFIG_XEN_DEBUG_FS */
410