xref: /openbmc/linux/arch/x86/xen/spinlock.c (revision 707e59ba494372a90d245f18b0c78982caa88e48)
1d5de8841SJeremy Fitzhardinge /*
2d5de8841SJeremy Fitzhardinge  * Split spinlock implementation out into its own file, so it can be
3d5de8841SJeremy Fitzhardinge  * compiled in a FTRACE-compatible way.
4d5de8841SJeremy Fitzhardinge  */
5d5de8841SJeremy Fitzhardinge #include <linux/kernel_stat.h>
6d5de8841SJeremy Fitzhardinge #include <linux/spinlock.h>
7994025caSJeremy Fitzhardinge #include <linux/debugfs.h>
8994025caSJeremy Fitzhardinge #include <linux/log2.h>
95a0e3ad6STejun Heo #include <linux/gfp.h>
10354e7b76SKonrad Rzeszutek Wilk #include <linux/slab.h>
11d5de8841SJeremy Fitzhardinge 
12d5de8841SJeremy Fitzhardinge #include <asm/paravirt.h>
13d5de8841SJeremy Fitzhardinge 
14d5de8841SJeremy Fitzhardinge #include <xen/interface/xen.h>
15d5de8841SJeremy Fitzhardinge #include <xen/events.h>
16d5de8841SJeremy Fitzhardinge 
17d5de8841SJeremy Fitzhardinge #include "xen-ops.h"
18994025caSJeremy Fitzhardinge #include "debugfs.h"
19994025caSJeremy Fitzhardinge 
20e95e6f17SDavid Vrabel static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
21e95e6f17SDavid Vrabel static DEFINE_PER_CPU(char *, irq_name);
22e95e6f17SDavid Vrabel static bool xen_pvspin = true;
23e95e6f17SDavid Vrabel 
2462c7a1e9SIngo Molnar #ifdef CONFIG_QUEUED_SPINLOCKS
25e95e6f17SDavid Vrabel 
26e95e6f17SDavid Vrabel #include <asm/qspinlock.h>
27e95e6f17SDavid Vrabel 
28e95e6f17SDavid Vrabel static void xen_qlock_kick(int cpu)
29e95e6f17SDavid Vrabel {
30*707e59baSRoss Lagerwall 	int irq = per_cpu(lock_kicker_irq, cpu);
31*707e59baSRoss Lagerwall 
32*707e59baSRoss Lagerwall 	/* Don't kick if the target's kicker interrupt is not initialized. */
33*707e59baSRoss Lagerwall 	if (irq == -1)
34*707e59baSRoss Lagerwall 		return;
35*707e59baSRoss Lagerwall 
36e95e6f17SDavid Vrabel 	xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
37e95e6f17SDavid Vrabel }
38e95e6f17SDavid Vrabel 
39e95e6f17SDavid Vrabel /*
40e95e6f17SDavid Vrabel  * Halt the current CPU & release it back to the host
41e95e6f17SDavid Vrabel  */
42e95e6f17SDavid Vrabel static void xen_qlock_wait(u8 *byte, u8 val)
43e95e6f17SDavid Vrabel {
44e95e6f17SDavid Vrabel 	int irq = __this_cpu_read(lock_kicker_irq);
45e95e6f17SDavid Vrabel 
46e95e6f17SDavid Vrabel 	/* If kicker interrupts not initialized yet, just spin */
47e95e6f17SDavid Vrabel 	if (irq == -1)
48e95e6f17SDavid Vrabel 		return;
49e95e6f17SDavid Vrabel 
50e95e6f17SDavid Vrabel 	/* clear pending */
51e95e6f17SDavid Vrabel 	xen_clear_irq_pending(irq);
52e95e6f17SDavid Vrabel 	barrier();
53e95e6f17SDavid Vrabel 
54e95e6f17SDavid Vrabel 	/*
55e95e6f17SDavid Vrabel 	 * We check the byte value after clearing pending IRQ to make sure
56e95e6f17SDavid Vrabel 	 * that we won't miss a wakeup event because of the clearing.
57e95e6f17SDavid Vrabel 	 *
58e95e6f17SDavid Vrabel 	 * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
59e95e6f17SDavid Vrabel 	 * So it is effectively a memory barrier for x86.
60e95e6f17SDavid Vrabel 	 */
61e95e6f17SDavid Vrabel 	if (READ_ONCE(*byte) != val)
62e95e6f17SDavid Vrabel 		return;
63e95e6f17SDavid Vrabel 
64e95e6f17SDavid Vrabel 	/*
65e95e6f17SDavid Vrabel 	 * If an interrupt happens here, it will leave the wakeup irq
66e95e6f17SDavid Vrabel 	 * pending, which will cause xen_poll_irq() to return
67e95e6f17SDavid Vrabel 	 * immediately.
68e95e6f17SDavid Vrabel 	 */
69e95e6f17SDavid Vrabel 
70e95e6f17SDavid Vrabel 	/* Block until irq becomes pending (or perhaps a spurious wakeup) */
71e95e6f17SDavid Vrabel 	xen_poll_irq(irq);
72e95e6f17SDavid Vrabel }
73e95e6f17SDavid Vrabel 
7462c7a1e9SIngo Molnar #else /* CONFIG_QUEUED_SPINLOCKS */
75e95e6f17SDavid Vrabel 
7680bd58feSJeremy Fitzhardinge enum xen_contention_stat {
7780bd58feSJeremy Fitzhardinge 	TAKEN_SLOW,
7880bd58feSJeremy Fitzhardinge 	TAKEN_SLOW_PICKUP,
7980bd58feSJeremy Fitzhardinge 	TAKEN_SLOW_SPURIOUS,
8080bd58feSJeremy Fitzhardinge 	RELEASED_SLOW,
8180bd58feSJeremy Fitzhardinge 	RELEASED_SLOW_KICKED,
8280bd58feSJeremy Fitzhardinge 	NR_CONTENTION_STATS
8380bd58feSJeremy Fitzhardinge };
8480bd58feSJeremy Fitzhardinge 
8580bd58feSJeremy Fitzhardinge 
86994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS
8780bd58feSJeremy Fitzhardinge #define HISTO_BUCKETS	30
88994025caSJeremy Fitzhardinge static struct xen_spinlock_stats
89994025caSJeremy Fitzhardinge {
9080bd58feSJeremy Fitzhardinge 	u32 contention_stats[NR_CONTENTION_STATS];
91f8eca41fSJeremy Fitzhardinge 	u32 histo_spin_blocked[HISTO_BUCKETS+1];
92f8eca41fSJeremy Fitzhardinge 	u64 time_blocked;
93994025caSJeremy Fitzhardinge } spinlock_stats;
94994025caSJeremy Fitzhardinge 
95994025caSJeremy Fitzhardinge static u8 zero_stats;
96994025caSJeremy Fitzhardinge 
97994025caSJeremy Fitzhardinge static inline void check_zero(void)
98994025caSJeremy Fitzhardinge {
9980bd58feSJeremy Fitzhardinge 	u8 ret;
100d6abfdb2SRaghavendra K T 	u8 old = READ_ONCE(zero_stats);
10180bd58feSJeremy Fitzhardinge 	if (unlikely(old)) {
10280bd58feSJeremy Fitzhardinge 		ret = cmpxchg(&zero_stats, old, 0);
10380bd58feSJeremy Fitzhardinge 		/* This ensures only one fellow resets the stat */
10480bd58feSJeremy Fitzhardinge 		if (ret == old)
105994025caSJeremy Fitzhardinge 			memset(&spinlock_stats, 0, sizeof(spinlock_stats));
106994025caSJeremy Fitzhardinge 	}
107994025caSJeremy Fitzhardinge }
108994025caSJeremy Fitzhardinge 
10980bd58feSJeremy Fitzhardinge static inline void add_stats(enum xen_contention_stat var, u32 val)
11080bd58feSJeremy Fitzhardinge {
11180bd58feSJeremy Fitzhardinge 	check_zero();
11280bd58feSJeremy Fitzhardinge 	spinlock_stats.contention_stats[var] += val;
11380bd58feSJeremy Fitzhardinge }
114994025caSJeremy Fitzhardinge 
115994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void)
116994025caSJeremy Fitzhardinge {
117994025caSJeremy Fitzhardinge 	return xen_clocksource_read();
118994025caSJeremy Fitzhardinge }
119994025caSJeremy Fitzhardinge 
120994025caSJeremy Fitzhardinge static void __spin_time_accum(u64 delta, u32 *array)
121994025caSJeremy Fitzhardinge {
122994025caSJeremy Fitzhardinge 	unsigned index = ilog2(delta);
123994025caSJeremy Fitzhardinge 
124994025caSJeremy Fitzhardinge 	check_zero();
125994025caSJeremy Fitzhardinge 
126994025caSJeremy Fitzhardinge 	if (index < HISTO_BUCKETS)
127994025caSJeremy Fitzhardinge 		array[index]++;
128994025caSJeremy Fitzhardinge 	else
129994025caSJeremy Fitzhardinge 		array[HISTO_BUCKETS]++;
130994025caSJeremy Fitzhardinge }
131994025caSJeremy Fitzhardinge 
132f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_blocked(u64 start)
133f8eca41fSJeremy Fitzhardinge {
134f8eca41fSJeremy Fitzhardinge 	u32 delta = xen_clocksource_read() - start;
135f8eca41fSJeremy Fitzhardinge 
136f8eca41fSJeremy Fitzhardinge 	__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
137f8eca41fSJeremy Fitzhardinge 	spinlock_stats.time_blocked += delta;
138994025caSJeremy Fitzhardinge }
139994025caSJeremy Fitzhardinge #else  /* !CONFIG_XEN_DEBUG_FS */
14080bd58feSJeremy Fitzhardinge static inline void add_stats(enum xen_contention_stat var, u32 val)
14180bd58feSJeremy Fitzhardinge {
14280bd58feSJeremy Fitzhardinge }
143994025caSJeremy Fitzhardinge 
144994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void)
145994025caSJeremy Fitzhardinge {
146994025caSJeremy Fitzhardinge 	return 0;
147994025caSJeremy Fitzhardinge }
148994025caSJeremy Fitzhardinge 
149f8eca41fSJeremy Fitzhardinge static inline void spin_time_accum_blocked(u64 start)
150994025caSJeremy Fitzhardinge {
151994025caSJeremy Fitzhardinge }
152994025caSJeremy Fitzhardinge #endif  /* CONFIG_XEN_DEBUG_FS */
153d5de8841SJeremy Fitzhardinge 
15480bd58feSJeremy Fitzhardinge struct xen_lock_waiting {
15580bd58feSJeremy Fitzhardinge 	struct arch_spinlock *lock;
15680bd58feSJeremy Fitzhardinge 	__ticket_t want;
157d5de8841SJeremy Fitzhardinge };
158d5de8841SJeremy Fitzhardinge 
15980bd58feSJeremy Fitzhardinge static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
16080bd58feSJeremy Fitzhardinge static cpumask_t waiting_cpus;
161d5de8841SJeremy Fitzhardinge 
162dd41f818SAndi Kleen __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
163d5de8841SJeremy Fitzhardinge {
164780f36d8SChristoph Lameter 	int irq = __this_cpu_read(lock_kicker_irq);
16589cbc767SChristoph Lameter 	struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
16680bd58feSJeremy Fitzhardinge 	int cpu = smp_processor_id();
167f8eca41fSJeremy Fitzhardinge 	u64 start;
168d6abfdb2SRaghavendra K T 	__ticket_t head;
16980bd58feSJeremy Fitzhardinge 	unsigned long flags;
170d5de8841SJeremy Fitzhardinge 
171d5de8841SJeremy Fitzhardinge 	/* If kicker interrupts not initialized yet, just spin */
172d5de8841SJeremy Fitzhardinge 	if (irq == -1)
17380bd58feSJeremy Fitzhardinge 		return;
174d5de8841SJeremy Fitzhardinge 
175f8eca41fSJeremy Fitzhardinge 	start = spin_time_start();
176f8eca41fSJeremy Fitzhardinge 
17780bd58feSJeremy Fitzhardinge 	/*
17880bd58feSJeremy Fitzhardinge 	 * Make sure an interrupt handler can't upset things in a
17980bd58feSJeremy Fitzhardinge 	 * partially setup state.
18080bd58feSJeremy Fitzhardinge 	 */
18180bd58feSJeremy Fitzhardinge 	local_irq_save(flags);
1821ed7bf5fSJeremy Fitzhardinge 	/*
1831ed7bf5fSJeremy Fitzhardinge 	 * We don't really care if we're overwriting some other
1841ed7bf5fSJeremy Fitzhardinge 	 * (lock,want) pair, as that would mean that we're currently
1851ed7bf5fSJeremy Fitzhardinge 	 * in an interrupt context, and the outer context had
1861ed7bf5fSJeremy Fitzhardinge 	 * interrupts enabled.  That has already kicked the VCPU out
1871ed7bf5fSJeremy Fitzhardinge 	 * of xen_poll_irq(), so it will just return spuriously and
1881ed7bf5fSJeremy Fitzhardinge 	 * retry with newly setup (lock,want).
1891ed7bf5fSJeremy Fitzhardinge 	 *
1901ed7bf5fSJeremy Fitzhardinge 	 * The ordering protocol on this is that the "lock" pointer
1911ed7bf5fSJeremy Fitzhardinge 	 * may only be set non-NULL if the "want" ticket is correct.
1921ed7bf5fSJeremy Fitzhardinge 	 * If we're updating "want", we must first clear "lock".
1931ed7bf5fSJeremy Fitzhardinge 	 */
1941ed7bf5fSJeremy Fitzhardinge 	w->lock = NULL;
1951ed7bf5fSJeremy Fitzhardinge 	smp_wmb();
19680bd58feSJeremy Fitzhardinge 	w->want = want;
19780bd58feSJeremy Fitzhardinge 	smp_wmb();
19880bd58feSJeremy Fitzhardinge 	w->lock = lock;
199994025caSJeremy Fitzhardinge 
20080bd58feSJeremy Fitzhardinge 	/* This uses set_bit, which atomic and therefore a barrier */
20180bd58feSJeremy Fitzhardinge 	cpumask_set_cpu(cpu, &waiting_cpus);
20280bd58feSJeremy Fitzhardinge 	add_stats(TAKEN_SLOW, 1);
2034d576b57SJeremy Fitzhardinge 
204d5de8841SJeremy Fitzhardinge 	/* clear pending */
205d5de8841SJeremy Fitzhardinge 	xen_clear_irq_pending(irq);
206d5de8841SJeremy Fitzhardinge 
20780bd58feSJeremy Fitzhardinge 	/* Only check lock once pending cleared */
20880bd58feSJeremy Fitzhardinge 	barrier();
20980bd58feSJeremy Fitzhardinge 
2101ed7bf5fSJeremy Fitzhardinge 	/*
2111ed7bf5fSJeremy Fitzhardinge 	 * Mark entry to slowpath before doing the pickup test to make
2121ed7bf5fSJeremy Fitzhardinge 	 * sure we don't deadlock with an unlocker.
2131ed7bf5fSJeremy Fitzhardinge 	 */
21496f853eaSJeremy Fitzhardinge 	__ticket_enter_slowpath(lock);
21596f853eaSJeremy Fitzhardinge 
216d6abfdb2SRaghavendra K T 	/* make sure enter_slowpath, which is atomic does not cross the read */
217d6abfdb2SRaghavendra K T 	smp_mb__after_atomic();
218d6abfdb2SRaghavendra K T 
2191ed7bf5fSJeremy Fitzhardinge 	/*
2201ed7bf5fSJeremy Fitzhardinge 	 * check again make sure it didn't become free while
2211ed7bf5fSJeremy Fitzhardinge 	 * we weren't looking
2221ed7bf5fSJeremy Fitzhardinge 	 */
223d6abfdb2SRaghavendra K T 	head = READ_ONCE(lock->tickets.head);
224d6abfdb2SRaghavendra K T 	if (__tickets_equal(head, want)) {
22580bd58feSJeremy Fitzhardinge 		add_stats(TAKEN_SLOW_PICKUP, 1);
226d5de8841SJeremy Fitzhardinge 		goto out;
227168d2f46SJeremy Fitzhardinge 	}
2281ed7bf5fSJeremy Fitzhardinge 
2291ed7bf5fSJeremy Fitzhardinge 	/* Allow interrupts while blocked */
2301ed7bf5fSJeremy Fitzhardinge 	local_irq_restore(flags);
2311ed7bf5fSJeremy Fitzhardinge 
2321ed7bf5fSJeremy Fitzhardinge 	/*
2331ed7bf5fSJeremy Fitzhardinge 	 * If an interrupt happens here, it will leave the wakeup irq
2341ed7bf5fSJeremy Fitzhardinge 	 * pending, which will cause xen_poll_irq() to return
2351ed7bf5fSJeremy Fitzhardinge 	 * immediately.
2361ed7bf5fSJeremy Fitzhardinge 	 */
2371ed7bf5fSJeremy Fitzhardinge 
23880bd58feSJeremy Fitzhardinge 	/* Block until irq becomes pending (or perhaps a spurious wakeup) */
239d5de8841SJeremy Fitzhardinge 	xen_poll_irq(irq);
24080bd58feSJeremy Fitzhardinge 	add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
2411ed7bf5fSJeremy Fitzhardinge 
2421ed7bf5fSJeremy Fitzhardinge 	local_irq_save(flags);
2431ed7bf5fSJeremy Fitzhardinge 
244770144eaSThomas Gleixner 	kstat_incr_irq_this_cpu(irq);
245d5de8841SJeremy Fitzhardinge out:
24680bd58feSJeremy Fitzhardinge 	cpumask_clear_cpu(cpu, &waiting_cpus);
24780bd58feSJeremy Fitzhardinge 	w->lock = NULL;
2481ed7bf5fSJeremy Fitzhardinge 
24980bd58feSJeremy Fitzhardinge 	local_irq_restore(flags);
2501ed7bf5fSJeremy Fitzhardinge 
251f8eca41fSJeremy Fitzhardinge 	spin_time_accum_blocked(start);
252d5de8841SJeremy Fitzhardinge }
253354714ddSJeremy Fitzhardinge PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
254d5de8841SJeremy Fitzhardinge 
25580bd58feSJeremy Fitzhardinge static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
256d5de8841SJeremy Fitzhardinge {
257d5de8841SJeremy Fitzhardinge 	int cpu;
258d5de8841SJeremy Fitzhardinge 
25980bd58feSJeremy Fitzhardinge 	add_stats(RELEASED_SLOW, 1);
260994025caSJeremy Fitzhardinge 
26180bd58feSJeremy Fitzhardinge 	for_each_cpu(cpu, &waiting_cpus) {
26280bd58feSJeremy Fitzhardinge 		const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
26380bd58feSJeremy Fitzhardinge 
2641ed7bf5fSJeremy Fitzhardinge 		/* Make sure we read lock before want */
265d6abfdb2SRaghavendra K T 		if (READ_ONCE(w->lock) == lock &&
266d6abfdb2SRaghavendra K T 		    READ_ONCE(w->want) == next) {
26780bd58feSJeremy Fitzhardinge 			add_stats(RELEASED_SLOW_KICKED, 1);
268d5de8841SJeremy Fitzhardinge 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
26980bd58feSJeremy Fitzhardinge 			break;
270d5de8841SJeremy Fitzhardinge 		}
271d5de8841SJeremy Fitzhardinge 	}
272d5de8841SJeremy Fitzhardinge }
27362c7a1e9SIngo Molnar #endif /* CONFIG_QUEUED_SPINLOCKS */
274d5de8841SJeremy Fitzhardinge 
275d5de8841SJeremy Fitzhardinge static irqreturn_t dummy_handler(int irq, void *dev_id)
276d5de8841SJeremy Fitzhardinge {
277d5de8841SJeremy Fitzhardinge 	BUG();
278d5de8841SJeremy Fitzhardinge 	return IRQ_HANDLED;
279d5de8841SJeremy Fitzhardinge }
280d5de8841SJeremy Fitzhardinge 
281148f9bb8SPaul Gortmaker void xen_init_lock_cpu(int cpu)
282d5de8841SJeremy Fitzhardinge {
283d5de8841SJeremy Fitzhardinge 	int irq;
284354e7b76SKonrad Rzeszutek Wilk 	char *name;
285d5de8841SJeremy Fitzhardinge 
2863310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
2873310bbedSKonrad Rzeszutek Wilk 		return;
2883310bbedSKonrad Rzeszutek Wilk 
289cb91f8f4SKonrad Rzeszutek Wilk 	WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
290cb9c6f15SKonrad Rzeszutek Wilk 	     cpu, per_cpu(lock_kicker_irq, cpu));
291cb9c6f15SKonrad Rzeszutek Wilk 
292d5de8841SJeremy Fitzhardinge 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
293d5de8841SJeremy Fitzhardinge 	irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
294d5de8841SJeremy Fitzhardinge 				     cpu,
295d5de8841SJeremy Fitzhardinge 				     dummy_handler,
2969d71cee6SMichael Opdenacker 				     IRQF_PERCPU|IRQF_NOBALANCING,
297d5de8841SJeremy Fitzhardinge 				     name,
298d5de8841SJeremy Fitzhardinge 				     NULL);
299d5de8841SJeremy Fitzhardinge 
300d5de8841SJeremy Fitzhardinge 	if (irq >= 0) {
301d5de8841SJeremy Fitzhardinge 		disable_irq(irq); /* make sure it's never delivered */
302d5de8841SJeremy Fitzhardinge 		per_cpu(lock_kicker_irq, cpu) = irq;
303354e7b76SKonrad Rzeszutek Wilk 		per_cpu(irq_name, cpu) = name;
304d5de8841SJeremy Fitzhardinge 	}
305d5de8841SJeremy Fitzhardinge 
306d5de8841SJeremy Fitzhardinge 	printk("cpu %d spinlock event irq %d\n", cpu, irq);
307d5de8841SJeremy Fitzhardinge }
308d5de8841SJeremy Fitzhardinge 
309d68d82afSAlex Nixon void xen_uninit_lock_cpu(int cpu)
310d68d82afSAlex Nixon {
3113310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
3123310bbedSKonrad Rzeszutek Wilk 		return;
3133310bbedSKonrad Rzeszutek Wilk 
314d68d82afSAlex Nixon 	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
315cb9c6f15SKonrad Rzeszutek Wilk 	per_cpu(lock_kicker_irq, cpu) = -1;
316354e7b76SKonrad Rzeszutek Wilk 	kfree(per_cpu(irq_name, cpu));
317354e7b76SKonrad Rzeszutek Wilk 	per_cpu(irq_name, cpu) = NULL;
318d68d82afSAlex Nixon }
319d68d82afSAlex Nixon 
320b8fa70b5SJeremy Fitzhardinge 
321a945928eSKonrad Rzeszutek Wilk /*
322a945928eSKonrad Rzeszutek Wilk  * Our init of PV spinlocks is split in two init functions due to us
323a945928eSKonrad Rzeszutek Wilk  * using paravirt patching and jump labels patching and having to do
324a945928eSKonrad Rzeszutek Wilk  * all of this before SMP code is invoked.
325a945928eSKonrad Rzeszutek Wilk  *
326a945928eSKonrad Rzeszutek Wilk  * The paravirt patching needs to be done _before_ the alternative asm code
327a945928eSKonrad Rzeszutek Wilk  * is started, otherwise we would not patch the core kernel code.
328a945928eSKonrad Rzeszutek Wilk  */
329d5de8841SJeremy Fitzhardinge void __init xen_init_spinlocks(void)
330d5de8841SJeremy Fitzhardinge {
33170dd4998SKonrad Rzeszutek Wilk 
332b8fa70b5SJeremy Fitzhardinge 	if (!xen_pvspin) {
333b8fa70b5SJeremy Fitzhardinge 		printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
334b8fa70b5SJeremy Fitzhardinge 		return;
335b8fa70b5SJeremy Fitzhardinge 	}
336e0fc17a9SKonrad Rzeszutek Wilk 	printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
33762c7a1e9SIngo Molnar #ifdef CONFIG_QUEUED_SPINLOCKS
338e95e6f17SDavid Vrabel 	__pv_init_lock_hash();
339e95e6f17SDavid Vrabel 	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
340e95e6f17SDavid Vrabel 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
341e95e6f17SDavid Vrabel 	pv_lock_ops.wait = xen_qlock_wait;
342e95e6f17SDavid Vrabel 	pv_lock_ops.kick = xen_qlock_kick;
343e95e6f17SDavid Vrabel #else
344354714ddSJeremy Fitzhardinge 	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
34580bd58feSJeremy Fitzhardinge 	pv_lock_ops.unlock_kick = xen_unlock_kick;
346e95e6f17SDavid Vrabel #endif
347d5de8841SJeremy Fitzhardinge }
348994025caSJeremy Fitzhardinge 
349a945928eSKonrad Rzeszutek Wilk /*
350a945928eSKonrad Rzeszutek Wilk  * While the jump_label init code needs to happend _after_ the jump labels are
351a945928eSKonrad Rzeszutek Wilk  * enabled and before SMP is started. Hence we use pre-SMP initcall level
352a945928eSKonrad Rzeszutek Wilk  * init. We cannot do it in xen_init_spinlocks as that is done before
353a945928eSKonrad Rzeszutek Wilk  * jump labels are activated.
354a945928eSKonrad Rzeszutek Wilk  */
355a945928eSKonrad Rzeszutek Wilk static __init int xen_init_spinlocks_jump(void)
356a945928eSKonrad Rzeszutek Wilk {
357a945928eSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
358a945928eSKonrad Rzeszutek Wilk 		return 0;
359a945928eSKonrad Rzeszutek Wilk 
360e0fc17a9SKonrad Rzeszutek Wilk 	if (!xen_domain())
361e0fc17a9SKonrad Rzeszutek Wilk 		return 0;
362e0fc17a9SKonrad Rzeszutek Wilk 
363a945928eSKonrad Rzeszutek Wilk 	static_key_slow_inc(&paravirt_ticketlocks_enabled);
364a945928eSKonrad Rzeszutek Wilk 	return 0;
365a945928eSKonrad Rzeszutek Wilk }
366a945928eSKonrad Rzeszutek Wilk early_initcall(xen_init_spinlocks_jump);
367a945928eSKonrad Rzeszutek Wilk 
368b8fa70b5SJeremy Fitzhardinge static __init int xen_parse_nopvspin(char *arg)
369b8fa70b5SJeremy Fitzhardinge {
370b8fa70b5SJeremy Fitzhardinge 	xen_pvspin = false;
371b8fa70b5SJeremy Fitzhardinge 	return 0;
372b8fa70b5SJeremy Fitzhardinge }
373b8fa70b5SJeremy Fitzhardinge early_param("xen_nopvspin", xen_parse_nopvspin);
374b8fa70b5SJeremy Fitzhardinge 
37562c7a1e9SIngo Molnar #if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS)
376994025caSJeremy Fitzhardinge 
377994025caSJeremy Fitzhardinge static struct dentry *d_spin_debug;
378994025caSJeremy Fitzhardinge 
379994025caSJeremy Fitzhardinge static int __init xen_spinlock_debugfs(void)
380994025caSJeremy Fitzhardinge {
381994025caSJeremy Fitzhardinge 	struct dentry *d_xen = xen_init_debugfs();
382994025caSJeremy Fitzhardinge 
383994025caSJeremy Fitzhardinge 	if (d_xen == NULL)
384994025caSJeremy Fitzhardinge 		return -ENOMEM;
385994025caSJeremy Fitzhardinge 
3863310bbedSKonrad Rzeszutek Wilk 	if (!xen_pvspin)
3873310bbedSKonrad Rzeszutek Wilk 		return 0;
3883310bbedSKonrad Rzeszutek Wilk 
389994025caSJeremy Fitzhardinge 	d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
390994025caSJeremy Fitzhardinge 
391994025caSJeremy Fitzhardinge 	debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
392994025caSJeremy Fitzhardinge 
393994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow", 0444, d_spin_debug,
39480bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW]);
395994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
39680bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
397994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
39880bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
399994025caSJeremy Fitzhardinge 
400994025caSJeremy Fitzhardinge 	debugfs_create_u32("released_slow", 0444, d_spin_debug,
40180bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[RELEASED_SLOW]);
402994025caSJeremy Fitzhardinge 	debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
40380bd58feSJeremy Fitzhardinge 			   &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
404994025caSJeremy Fitzhardinge 
405f8eca41fSJeremy Fitzhardinge 	debugfs_create_u64("time_blocked", 0444, d_spin_debug,
406f8eca41fSJeremy Fitzhardinge 			   &spinlock_stats.time_blocked);
407994025caSJeremy Fitzhardinge 
4089fe2a701SSrivatsa Vaddagiri 	debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
409f8eca41fSJeremy Fitzhardinge 				spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
410994025caSJeremy Fitzhardinge 
411994025caSJeremy Fitzhardinge 	return 0;
412994025caSJeremy Fitzhardinge }
413994025caSJeremy Fitzhardinge fs_initcall(xen_spinlock_debugfs);
414994025caSJeremy Fitzhardinge 
415994025caSJeremy Fitzhardinge #endif	/* CONFIG_XEN_DEBUG_FS */
416