xref: /openbmc/linux/arch/x86/xen/spinlock.c (revision 1e696f63)
1d5de8841SJeremy Fitzhardinge /*
2d5de8841SJeremy Fitzhardinge  * Split spinlock implementation out into its own file, so it can be
3d5de8841SJeremy Fitzhardinge  * compiled in a FTRACE-compatible way.
4d5de8841SJeremy Fitzhardinge  */
5d5de8841SJeremy Fitzhardinge #include <linux/kernel_stat.h>
6d5de8841SJeremy Fitzhardinge #include <linux/spinlock.h>
7994025caSJeremy Fitzhardinge #include <linux/debugfs.h>
8994025caSJeremy Fitzhardinge #include <linux/log2.h>
9d5de8841SJeremy Fitzhardinge 
10d5de8841SJeremy Fitzhardinge #include <asm/paravirt.h>
11d5de8841SJeremy Fitzhardinge 
12d5de8841SJeremy Fitzhardinge #include <xen/interface/xen.h>
13d5de8841SJeremy Fitzhardinge #include <xen/events.h>
14d5de8841SJeremy Fitzhardinge 
15d5de8841SJeremy Fitzhardinge #include "xen-ops.h"
16994025caSJeremy Fitzhardinge #include "debugfs.h"
17994025caSJeremy Fitzhardinge 
18994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS
19994025caSJeremy Fitzhardinge static struct xen_spinlock_stats
20994025caSJeremy Fitzhardinge {
21994025caSJeremy Fitzhardinge 	u64 taken;
22994025caSJeremy Fitzhardinge 	u32 taken_slow;
23994025caSJeremy Fitzhardinge 	u32 taken_slow_nested;
24994025caSJeremy Fitzhardinge 	u32 taken_slow_pickup;
25994025caSJeremy Fitzhardinge 	u32 taken_slow_spurious;
261e696f63SJeremy Fitzhardinge 	u32 taken_slow_irqenable;
27994025caSJeremy Fitzhardinge 
28994025caSJeremy Fitzhardinge 	u64 released;
29994025caSJeremy Fitzhardinge 	u32 released_slow;
30994025caSJeremy Fitzhardinge 	u32 released_slow_kicked;
31994025caSJeremy Fitzhardinge 
32994025caSJeremy Fitzhardinge #define HISTO_BUCKETS	20
33994025caSJeremy Fitzhardinge 	u32 histo_spin_fast[HISTO_BUCKETS+1];
34994025caSJeremy Fitzhardinge 	u32 histo_spin[HISTO_BUCKETS+1];
35994025caSJeremy Fitzhardinge 
36994025caSJeremy Fitzhardinge 	u64 spinning_time;
37994025caSJeremy Fitzhardinge 	u64 total_time;
38994025caSJeremy Fitzhardinge } spinlock_stats;
39994025caSJeremy Fitzhardinge 
40994025caSJeremy Fitzhardinge static u8 zero_stats;
41994025caSJeremy Fitzhardinge 
42994025caSJeremy Fitzhardinge static unsigned lock_timeout = 1 << 10;
43994025caSJeremy Fitzhardinge #define TIMEOUT lock_timeout
44994025caSJeremy Fitzhardinge 
45994025caSJeremy Fitzhardinge static inline void check_zero(void)
46994025caSJeremy Fitzhardinge {
47994025caSJeremy Fitzhardinge 	if (unlikely(zero_stats)) {
48994025caSJeremy Fitzhardinge 		memset(&spinlock_stats, 0, sizeof(spinlock_stats));
49994025caSJeremy Fitzhardinge 		zero_stats = 0;
50994025caSJeremy Fitzhardinge 	}
51994025caSJeremy Fitzhardinge }
52994025caSJeremy Fitzhardinge 
53994025caSJeremy Fitzhardinge #define ADD_STATS(elem, val)			\
54994025caSJeremy Fitzhardinge 	do { check_zero(); spinlock_stats.elem += (val); } while(0)
55994025caSJeremy Fitzhardinge 
56994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void)
57994025caSJeremy Fitzhardinge {
58994025caSJeremy Fitzhardinge 	return xen_clocksource_read();
59994025caSJeremy Fitzhardinge }
60994025caSJeremy Fitzhardinge 
61994025caSJeremy Fitzhardinge static void __spin_time_accum(u64 delta, u32 *array)
62994025caSJeremy Fitzhardinge {
63994025caSJeremy Fitzhardinge 	unsigned index = ilog2(delta);
64994025caSJeremy Fitzhardinge 
65994025caSJeremy Fitzhardinge 	check_zero();
66994025caSJeremy Fitzhardinge 
67994025caSJeremy Fitzhardinge 	if (index < HISTO_BUCKETS)
68994025caSJeremy Fitzhardinge 		array[index]++;
69994025caSJeremy Fitzhardinge 	else
70994025caSJeremy Fitzhardinge 		array[HISTO_BUCKETS]++;
71994025caSJeremy Fitzhardinge }
72994025caSJeremy Fitzhardinge 
73994025caSJeremy Fitzhardinge static inline void spin_time_accum_fast(u64 start)
74994025caSJeremy Fitzhardinge {
75994025caSJeremy Fitzhardinge 	u32 delta = xen_clocksource_read() - start;
76994025caSJeremy Fitzhardinge 
77994025caSJeremy Fitzhardinge 	__spin_time_accum(delta, spinlock_stats.histo_spin_fast);
78994025caSJeremy Fitzhardinge 	spinlock_stats.spinning_time += delta;
79994025caSJeremy Fitzhardinge }
80994025caSJeremy Fitzhardinge 
81994025caSJeremy Fitzhardinge static inline void spin_time_accum(u64 start)
82994025caSJeremy Fitzhardinge {
83994025caSJeremy Fitzhardinge 	u32 delta = xen_clocksource_read() - start;
84994025caSJeremy Fitzhardinge 
85994025caSJeremy Fitzhardinge 	__spin_time_accum(delta, spinlock_stats.histo_spin);
86994025caSJeremy Fitzhardinge 	spinlock_stats.total_time += delta;
87994025caSJeremy Fitzhardinge }
88994025caSJeremy Fitzhardinge #else  /* !CONFIG_XEN_DEBUG_FS */
89994025caSJeremy Fitzhardinge #define TIMEOUT			(1 << 10)
90994025caSJeremy Fitzhardinge #define ADD_STATS(elem, val)	do { (void)(val); } while(0)
91994025caSJeremy Fitzhardinge 
92994025caSJeremy Fitzhardinge static inline u64 spin_time_start(void)
93994025caSJeremy Fitzhardinge {
94994025caSJeremy Fitzhardinge 	return 0;
95994025caSJeremy Fitzhardinge }
96994025caSJeremy Fitzhardinge 
97994025caSJeremy Fitzhardinge static inline void spin_time_accum_fast(u64 start)
98994025caSJeremy Fitzhardinge {
99994025caSJeremy Fitzhardinge }
100994025caSJeremy Fitzhardinge static inline void spin_time_accum(u64 start)
101994025caSJeremy Fitzhardinge {
102994025caSJeremy Fitzhardinge }
103994025caSJeremy Fitzhardinge #endif  /* CONFIG_XEN_DEBUG_FS */
104d5de8841SJeremy Fitzhardinge 
105d5de8841SJeremy Fitzhardinge struct xen_spinlock {
106d5de8841SJeremy Fitzhardinge 	unsigned char lock;		/* 0 -> free; 1 -> locked */
107d5de8841SJeremy Fitzhardinge 	unsigned short spinners;	/* count of waiting cpus */
108d5de8841SJeremy Fitzhardinge };
109d5de8841SJeremy Fitzhardinge 
110d5de8841SJeremy Fitzhardinge static int xen_spin_is_locked(struct raw_spinlock *lock)
111d5de8841SJeremy Fitzhardinge {
112d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
113d5de8841SJeremy Fitzhardinge 
114d5de8841SJeremy Fitzhardinge 	return xl->lock != 0;
115d5de8841SJeremy Fitzhardinge }
116d5de8841SJeremy Fitzhardinge 
117d5de8841SJeremy Fitzhardinge static int xen_spin_is_contended(struct raw_spinlock *lock)
118d5de8841SJeremy Fitzhardinge {
119d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
120d5de8841SJeremy Fitzhardinge 
121d5de8841SJeremy Fitzhardinge 	/* Not strictly true; this is only the count of contended
122d5de8841SJeremy Fitzhardinge 	   lock-takers entering the slow path. */
123d5de8841SJeremy Fitzhardinge 	return xl->spinners != 0;
124d5de8841SJeremy Fitzhardinge }
125d5de8841SJeremy Fitzhardinge 
126d5de8841SJeremy Fitzhardinge static int xen_spin_trylock(struct raw_spinlock *lock)
127d5de8841SJeremy Fitzhardinge {
128d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
129d5de8841SJeremy Fitzhardinge 	u8 old = 1;
130d5de8841SJeremy Fitzhardinge 
131d5de8841SJeremy Fitzhardinge 	asm("xchgb %b0,%1"
132d5de8841SJeremy Fitzhardinge 	    : "+q" (old), "+m" (xl->lock) : : "memory");
133d5de8841SJeremy Fitzhardinge 
134d5de8841SJeremy Fitzhardinge 	return old == 0;
135d5de8841SJeremy Fitzhardinge }
136d5de8841SJeremy Fitzhardinge 
137d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
138d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
139d5de8841SJeremy Fitzhardinge 
140168d2f46SJeremy Fitzhardinge /*
141168d2f46SJeremy Fitzhardinge  * Mark a cpu as interested in a lock.  Returns the CPU's previous
142168d2f46SJeremy Fitzhardinge  * lock of interest, in case we got preempted by an interrupt.
143168d2f46SJeremy Fitzhardinge  */
144168d2f46SJeremy Fitzhardinge static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
145d5de8841SJeremy Fitzhardinge {
146168d2f46SJeremy Fitzhardinge 	struct xen_spinlock *prev;
147168d2f46SJeremy Fitzhardinge 
148168d2f46SJeremy Fitzhardinge 	prev = __get_cpu_var(lock_spinners);
149d5de8841SJeremy Fitzhardinge 	__get_cpu_var(lock_spinners) = xl;
150168d2f46SJeremy Fitzhardinge 
151d5de8841SJeremy Fitzhardinge 	wmb();			/* set lock of interest before count */
152168d2f46SJeremy Fitzhardinge 
153d5de8841SJeremy Fitzhardinge 	asm(LOCK_PREFIX " incw %0"
154d5de8841SJeremy Fitzhardinge 	    : "+m" (xl->spinners) : : "memory");
155168d2f46SJeremy Fitzhardinge 
156168d2f46SJeremy Fitzhardinge 	return prev;
157d5de8841SJeremy Fitzhardinge }
158d5de8841SJeremy Fitzhardinge 
159168d2f46SJeremy Fitzhardinge /*
160168d2f46SJeremy Fitzhardinge  * Mark a cpu as no longer interested in a lock.  Restores previous
161168d2f46SJeremy Fitzhardinge  * lock of interest (NULL for none).
162168d2f46SJeremy Fitzhardinge  */
163168d2f46SJeremy Fitzhardinge static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
164d5de8841SJeremy Fitzhardinge {
165d5de8841SJeremy Fitzhardinge 	asm(LOCK_PREFIX " decw %0"
166d5de8841SJeremy Fitzhardinge 	    : "+m" (xl->spinners) : : "memory");
167168d2f46SJeremy Fitzhardinge 	wmb();			/* decrement count before restoring lock */
168168d2f46SJeremy Fitzhardinge 	__get_cpu_var(lock_spinners) = prev;
169d5de8841SJeremy Fitzhardinge }
170d5de8841SJeremy Fitzhardinge 
1711e696f63SJeremy Fitzhardinge static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
172d5de8841SJeremy Fitzhardinge {
173d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
174168d2f46SJeremy Fitzhardinge 	struct xen_spinlock *prev;
175d5de8841SJeremy Fitzhardinge 	int irq = __get_cpu_var(lock_kicker_irq);
176d5de8841SJeremy Fitzhardinge 	int ret;
1771e696f63SJeremy Fitzhardinge 	unsigned long flags;
178d5de8841SJeremy Fitzhardinge 
179d5de8841SJeremy Fitzhardinge 	/* If kicker interrupts not initialized yet, just spin */
180d5de8841SJeremy Fitzhardinge 	if (irq == -1)
181d5de8841SJeremy Fitzhardinge 		return 0;
182d5de8841SJeremy Fitzhardinge 
183d5de8841SJeremy Fitzhardinge 	/* announce we're spinning */
184168d2f46SJeremy Fitzhardinge 	prev = spinning_lock(xl);
185d5de8841SJeremy Fitzhardinge 
1861e696f63SJeremy Fitzhardinge 	flags = __raw_local_save_flags();
1871e696f63SJeremy Fitzhardinge 	if (irq_enable) {
1881e696f63SJeremy Fitzhardinge 		ADD_STATS(taken_slow_irqenable, 1);
1891e696f63SJeremy Fitzhardinge 		raw_local_irq_enable();
1901e696f63SJeremy Fitzhardinge 	}
1911e696f63SJeremy Fitzhardinge 
192994025caSJeremy Fitzhardinge 	ADD_STATS(taken_slow, 1);
193994025caSJeremy Fitzhardinge 	ADD_STATS(taken_slow_nested, prev != NULL);
194994025caSJeremy Fitzhardinge 
195168d2f46SJeremy Fitzhardinge 	do {
196d5de8841SJeremy Fitzhardinge 		/* clear pending */
197d5de8841SJeremy Fitzhardinge 		xen_clear_irq_pending(irq);
198d5de8841SJeremy Fitzhardinge 
199d5de8841SJeremy Fitzhardinge 		/* check again make sure it didn't become free while
200d5de8841SJeremy Fitzhardinge 		   we weren't looking  */
201d5de8841SJeremy Fitzhardinge 		ret = xen_spin_trylock(lock);
202168d2f46SJeremy Fitzhardinge 		if (ret) {
203994025caSJeremy Fitzhardinge 			ADD_STATS(taken_slow_pickup, 1);
204994025caSJeremy Fitzhardinge 
205168d2f46SJeremy Fitzhardinge 			/*
206168d2f46SJeremy Fitzhardinge 			 * If we interrupted another spinlock while it
207168d2f46SJeremy Fitzhardinge 			 * was blocking, make sure it doesn't block
208168d2f46SJeremy Fitzhardinge 			 * without rechecking the lock.
209168d2f46SJeremy Fitzhardinge 			 */
210168d2f46SJeremy Fitzhardinge 			if (prev != NULL)
211168d2f46SJeremy Fitzhardinge 				xen_set_irq_pending(irq);
212d5de8841SJeremy Fitzhardinge 			goto out;
213168d2f46SJeremy Fitzhardinge 		}
214d5de8841SJeremy Fitzhardinge 
215168d2f46SJeremy Fitzhardinge 		/*
216168d2f46SJeremy Fitzhardinge 		 * Block until irq becomes pending.  If we're
217168d2f46SJeremy Fitzhardinge 		 * interrupted at this point (after the trylock but
218168d2f46SJeremy Fitzhardinge 		 * before entering the block), then the nested lock
219168d2f46SJeremy Fitzhardinge 		 * handler guarantees that the irq will be left
220168d2f46SJeremy Fitzhardinge 		 * pending if there's any chance the lock became free;
221168d2f46SJeremy Fitzhardinge 		 * xen_poll_irq() returns immediately if the irq is
222168d2f46SJeremy Fitzhardinge 		 * pending.
223168d2f46SJeremy Fitzhardinge 		 */
224d5de8841SJeremy Fitzhardinge 		xen_poll_irq(irq);
225994025caSJeremy Fitzhardinge 		ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
226168d2f46SJeremy Fitzhardinge 	} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
227168d2f46SJeremy Fitzhardinge 
228d5de8841SJeremy Fitzhardinge 	kstat_this_cpu.irqs[irq]++;
229d5de8841SJeremy Fitzhardinge 
230d5de8841SJeremy Fitzhardinge out:
2311e696f63SJeremy Fitzhardinge 	raw_local_irq_restore(flags);
232168d2f46SJeremy Fitzhardinge 	unspinning_lock(xl, prev);
233d5de8841SJeremy Fitzhardinge 	return ret;
234d5de8841SJeremy Fitzhardinge }
235d5de8841SJeremy Fitzhardinge 
2361e696f63SJeremy Fitzhardinge static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
237d5de8841SJeremy Fitzhardinge {
238d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
239994025caSJeremy Fitzhardinge 	unsigned timeout;
240d5de8841SJeremy Fitzhardinge 	u8 oldval;
241994025caSJeremy Fitzhardinge 	u64 start_spin;
242994025caSJeremy Fitzhardinge 
243994025caSJeremy Fitzhardinge 	ADD_STATS(taken, 1);
244994025caSJeremy Fitzhardinge 
245994025caSJeremy Fitzhardinge 	start_spin = spin_time_start();
246d5de8841SJeremy Fitzhardinge 
247d5de8841SJeremy Fitzhardinge 	do {
248994025caSJeremy Fitzhardinge 		u64 start_spin_fast = spin_time_start();
249994025caSJeremy Fitzhardinge 
250994025caSJeremy Fitzhardinge 		timeout = TIMEOUT;
251d5de8841SJeremy Fitzhardinge 
252d5de8841SJeremy Fitzhardinge 		asm("1: xchgb %1,%0\n"
253d5de8841SJeremy Fitzhardinge 		    "   testb %1,%1\n"
254d5de8841SJeremy Fitzhardinge 		    "   jz 3f\n"
255d5de8841SJeremy Fitzhardinge 		    "2: rep;nop\n"
256d5de8841SJeremy Fitzhardinge 		    "   cmpb $0,%0\n"
257d5de8841SJeremy Fitzhardinge 		    "   je 1b\n"
258d5de8841SJeremy Fitzhardinge 		    "   dec %2\n"
259d5de8841SJeremy Fitzhardinge 		    "   jnz 2b\n"
260d5de8841SJeremy Fitzhardinge 		    "3:\n"
261d5de8841SJeremy Fitzhardinge 		    : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
262d5de8841SJeremy Fitzhardinge 		    : "1" (1)
263d5de8841SJeremy Fitzhardinge 		    : "memory");
264d5de8841SJeremy Fitzhardinge 
265994025caSJeremy Fitzhardinge 		spin_time_accum_fast(start_spin_fast);
2661e696f63SJeremy Fitzhardinge 
2671e696f63SJeremy Fitzhardinge 	} while (unlikely(oldval != 0 &&
2681e696f63SJeremy Fitzhardinge 			  (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
269994025caSJeremy Fitzhardinge 
270994025caSJeremy Fitzhardinge 	spin_time_accum(start_spin);
271d5de8841SJeremy Fitzhardinge }
272d5de8841SJeremy Fitzhardinge 
2731e696f63SJeremy Fitzhardinge static void xen_spin_lock(struct raw_spinlock *lock)
2741e696f63SJeremy Fitzhardinge {
2751e696f63SJeremy Fitzhardinge 	__xen_spin_lock(lock, false);
2761e696f63SJeremy Fitzhardinge }
2771e696f63SJeremy Fitzhardinge 
2781e696f63SJeremy Fitzhardinge static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
2791e696f63SJeremy Fitzhardinge {
2801e696f63SJeremy Fitzhardinge 	__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
2811e696f63SJeremy Fitzhardinge }
2821e696f63SJeremy Fitzhardinge 
283d5de8841SJeremy Fitzhardinge static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
284d5de8841SJeremy Fitzhardinge {
285d5de8841SJeremy Fitzhardinge 	int cpu;
286d5de8841SJeremy Fitzhardinge 
287994025caSJeremy Fitzhardinge 	ADD_STATS(released_slow, 1);
288994025caSJeremy Fitzhardinge 
289d5de8841SJeremy Fitzhardinge 	for_each_online_cpu(cpu) {
290d5de8841SJeremy Fitzhardinge 		/* XXX should mix up next cpu selection */
291d5de8841SJeremy Fitzhardinge 		if (per_cpu(lock_spinners, cpu) == xl) {
292994025caSJeremy Fitzhardinge 			ADD_STATS(released_slow_kicked, 1);
293d5de8841SJeremy Fitzhardinge 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
294d5de8841SJeremy Fitzhardinge 			break;
295d5de8841SJeremy Fitzhardinge 		}
296d5de8841SJeremy Fitzhardinge 	}
297d5de8841SJeremy Fitzhardinge }
298d5de8841SJeremy Fitzhardinge 
299d5de8841SJeremy Fitzhardinge static void xen_spin_unlock(struct raw_spinlock *lock)
300d5de8841SJeremy Fitzhardinge {
301d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
302d5de8841SJeremy Fitzhardinge 
303994025caSJeremy Fitzhardinge 	ADD_STATS(released, 1);
304994025caSJeremy Fitzhardinge 
305d5de8841SJeremy Fitzhardinge 	smp_wmb();		/* make sure no writes get moved after unlock */
306d5de8841SJeremy Fitzhardinge 	xl->lock = 0;		/* release lock */
307d5de8841SJeremy Fitzhardinge 
308d5de8841SJeremy Fitzhardinge 	/* make sure unlock happens before kick */
309d5de8841SJeremy Fitzhardinge 	barrier();
310d5de8841SJeremy Fitzhardinge 
311d5de8841SJeremy Fitzhardinge 	if (unlikely(xl->spinners))
312d5de8841SJeremy Fitzhardinge 		xen_spin_unlock_slow(xl);
313d5de8841SJeremy Fitzhardinge }
314d5de8841SJeremy Fitzhardinge 
315d5de8841SJeremy Fitzhardinge static irqreturn_t dummy_handler(int irq, void *dev_id)
316d5de8841SJeremy Fitzhardinge {
317d5de8841SJeremy Fitzhardinge 	BUG();
318d5de8841SJeremy Fitzhardinge 	return IRQ_HANDLED;
319d5de8841SJeremy Fitzhardinge }
320d5de8841SJeremy Fitzhardinge 
321d5de8841SJeremy Fitzhardinge void __cpuinit xen_init_lock_cpu(int cpu)
322d5de8841SJeremy Fitzhardinge {
323d5de8841SJeremy Fitzhardinge 	int irq;
324d5de8841SJeremy Fitzhardinge 	const char *name;
325d5de8841SJeremy Fitzhardinge 
326d5de8841SJeremy Fitzhardinge 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
327d5de8841SJeremy Fitzhardinge 	irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
328d5de8841SJeremy Fitzhardinge 				     cpu,
329d5de8841SJeremy Fitzhardinge 				     dummy_handler,
330d5de8841SJeremy Fitzhardinge 				     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
331d5de8841SJeremy Fitzhardinge 				     name,
332d5de8841SJeremy Fitzhardinge 				     NULL);
333d5de8841SJeremy Fitzhardinge 
334d5de8841SJeremy Fitzhardinge 	if (irq >= 0) {
335d5de8841SJeremy Fitzhardinge 		disable_irq(irq); /* make sure it's never delivered */
336d5de8841SJeremy Fitzhardinge 		per_cpu(lock_kicker_irq, cpu) = irq;
337d5de8841SJeremy Fitzhardinge 	}
338d5de8841SJeremy Fitzhardinge 
339d5de8841SJeremy Fitzhardinge 	printk("cpu %d spinlock event irq %d\n", cpu, irq);
340d5de8841SJeremy Fitzhardinge }
341d5de8841SJeremy Fitzhardinge 
342d5de8841SJeremy Fitzhardinge void __init xen_init_spinlocks(void)
343d5de8841SJeremy Fitzhardinge {
344d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_is_locked = xen_spin_is_locked;
345d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_is_contended = xen_spin_is_contended;
346d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_lock = xen_spin_lock;
3471e696f63SJeremy Fitzhardinge 	pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
348d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_trylock = xen_spin_trylock;
349d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_unlock = xen_spin_unlock;
350d5de8841SJeremy Fitzhardinge }
351994025caSJeremy Fitzhardinge 
352994025caSJeremy Fitzhardinge #ifdef CONFIG_XEN_DEBUG_FS
353994025caSJeremy Fitzhardinge 
354994025caSJeremy Fitzhardinge static struct dentry *d_spin_debug;
355994025caSJeremy Fitzhardinge 
356994025caSJeremy Fitzhardinge static int __init xen_spinlock_debugfs(void)
357994025caSJeremy Fitzhardinge {
358994025caSJeremy Fitzhardinge 	struct dentry *d_xen = xen_init_debugfs();
359994025caSJeremy Fitzhardinge 
360994025caSJeremy Fitzhardinge 	if (d_xen == NULL)
361994025caSJeremy Fitzhardinge 		return -ENOMEM;
362994025caSJeremy Fitzhardinge 
363994025caSJeremy Fitzhardinge 	d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
364994025caSJeremy Fitzhardinge 
365994025caSJeremy Fitzhardinge 	debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
366994025caSJeremy Fitzhardinge 
367994025caSJeremy Fitzhardinge 	debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
368994025caSJeremy Fitzhardinge 
369994025caSJeremy Fitzhardinge 	debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
370994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow", 0444, d_spin_debug,
371994025caSJeremy Fitzhardinge 			   &spinlock_stats.taken_slow);
372994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
373994025caSJeremy Fitzhardinge 			   &spinlock_stats.taken_slow_nested);
374994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
375994025caSJeremy Fitzhardinge 			   &spinlock_stats.taken_slow_pickup);
376994025caSJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
377994025caSJeremy Fitzhardinge 			   &spinlock_stats.taken_slow_spurious);
3781e696f63SJeremy Fitzhardinge 	debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug,
3791e696f63SJeremy Fitzhardinge 			   &spinlock_stats.taken_slow_irqenable);
380994025caSJeremy Fitzhardinge 
381994025caSJeremy Fitzhardinge 	debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
382994025caSJeremy Fitzhardinge 	debugfs_create_u32("released_slow", 0444, d_spin_debug,
383994025caSJeremy Fitzhardinge 			   &spinlock_stats.released_slow);
384994025caSJeremy Fitzhardinge 	debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
385994025caSJeremy Fitzhardinge 			   &spinlock_stats.released_slow_kicked);
386994025caSJeremy Fitzhardinge 
387994025caSJeremy Fitzhardinge 	debugfs_create_u64("time_spinning", 0444, d_spin_debug,
388994025caSJeremy Fitzhardinge 			   &spinlock_stats.spinning_time);
389994025caSJeremy Fitzhardinge 	debugfs_create_u64("time_total", 0444, d_spin_debug,
390994025caSJeremy Fitzhardinge 			   &spinlock_stats.total_time);
391994025caSJeremy Fitzhardinge 
392994025caSJeremy Fitzhardinge 	xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
393994025caSJeremy Fitzhardinge 				     spinlock_stats.histo_spin, HISTO_BUCKETS + 1);
394994025caSJeremy Fitzhardinge 	xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
395994025caSJeremy Fitzhardinge 				     spinlock_stats.histo_spin_fast, HISTO_BUCKETS + 1);
396994025caSJeremy Fitzhardinge 
397994025caSJeremy Fitzhardinge 	return 0;
398994025caSJeremy Fitzhardinge }
399994025caSJeremy Fitzhardinge fs_initcall(xen_spinlock_debugfs);
400994025caSJeremy Fitzhardinge 
401994025caSJeremy Fitzhardinge #endif	/* CONFIG_XEN_DEBUG_FS */
402