xref: /openbmc/linux/arch/x86/xen/spinlock.c (revision 168d2f46)
1d5de8841SJeremy Fitzhardinge /*
2d5de8841SJeremy Fitzhardinge  * Split spinlock implementation out into its own file, so it can be
3d5de8841SJeremy Fitzhardinge  * compiled in a FTRACE-compatible way.
4d5de8841SJeremy Fitzhardinge  */
5d5de8841SJeremy Fitzhardinge #include <linux/kernel_stat.h>
6d5de8841SJeremy Fitzhardinge #include <linux/spinlock.h>
7d5de8841SJeremy Fitzhardinge 
8d5de8841SJeremy Fitzhardinge #include <asm/paravirt.h>
9d5de8841SJeremy Fitzhardinge 
10d5de8841SJeremy Fitzhardinge #include <xen/interface/xen.h>
11d5de8841SJeremy Fitzhardinge #include <xen/events.h>
12d5de8841SJeremy Fitzhardinge 
13d5de8841SJeremy Fitzhardinge #include "xen-ops.h"
14d5de8841SJeremy Fitzhardinge 
15d5de8841SJeremy Fitzhardinge struct xen_spinlock {
16d5de8841SJeremy Fitzhardinge 	unsigned char lock;		/* 0 -> free; 1 -> locked */
17d5de8841SJeremy Fitzhardinge 	unsigned short spinners;	/* count of waiting cpus */
18d5de8841SJeremy Fitzhardinge };
19d5de8841SJeremy Fitzhardinge 
20d5de8841SJeremy Fitzhardinge static int xen_spin_is_locked(struct raw_spinlock *lock)
21d5de8841SJeremy Fitzhardinge {
22d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
23d5de8841SJeremy Fitzhardinge 
24d5de8841SJeremy Fitzhardinge 	return xl->lock != 0;
25d5de8841SJeremy Fitzhardinge }
26d5de8841SJeremy Fitzhardinge 
27d5de8841SJeremy Fitzhardinge static int xen_spin_is_contended(struct raw_spinlock *lock)
28d5de8841SJeremy Fitzhardinge {
29d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
30d5de8841SJeremy Fitzhardinge 
31d5de8841SJeremy Fitzhardinge 	/* Not strictly true; this is only the count of contended
32d5de8841SJeremy Fitzhardinge 	   lock-takers entering the slow path. */
33d5de8841SJeremy Fitzhardinge 	return xl->spinners != 0;
34d5de8841SJeremy Fitzhardinge }
35d5de8841SJeremy Fitzhardinge 
36d5de8841SJeremy Fitzhardinge static int xen_spin_trylock(struct raw_spinlock *lock)
37d5de8841SJeremy Fitzhardinge {
38d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
39d5de8841SJeremy Fitzhardinge 	u8 old = 1;
40d5de8841SJeremy Fitzhardinge 
41d5de8841SJeremy Fitzhardinge 	asm("xchgb %b0,%1"
42d5de8841SJeremy Fitzhardinge 	    : "+q" (old), "+m" (xl->lock) : : "memory");
43d5de8841SJeremy Fitzhardinge 
44d5de8841SJeremy Fitzhardinge 	return old == 0;
45d5de8841SJeremy Fitzhardinge }
46d5de8841SJeremy Fitzhardinge 
47d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
48d5de8841SJeremy Fitzhardinge static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
49d5de8841SJeremy Fitzhardinge 
50168d2f46SJeremy Fitzhardinge /*
51168d2f46SJeremy Fitzhardinge  * Mark a cpu as interested in a lock.  Returns the CPU's previous
52168d2f46SJeremy Fitzhardinge  * lock of interest, in case we got preempted by an interrupt.
53168d2f46SJeremy Fitzhardinge  */
54168d2f46SJeremy Fitzhardinge static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
55d5de8841SJeremy Fitzhardinge {
56168d2f46SJeremy Fitzhardinge 	struct xen_spinlock *prev;
57168d2f46SJeremy Fitzhardinge 
58168d2f46SJeremy Fitzhardinge 	prev = __get_cpu_var(lock_spinners);
59d5de8841SJeremy Fitzhardinge 	__get_cpu_var(lock_spinners) = xl;
60168d2f46SJeremy Fitzhardinge 
61d5de8841SJeremy Fitzhardinge 	wmb();			/* set lock of interest before count */
62168d2f46SJeremy Fitzhardinge 
63d5de8841SJeremy Fitzhardinge 	asm(LOCK_PREFIX " incw %0"
64d5de8841SJeremy Fitzhardinge 	    : "+m" (xl->spinners) : : "memory");
65168d2f46SJeremy Fitzhardinge 
66168d2f46SJeremy Fitzhardinge 	return prev;
67d5de8841SJeremy Fitzhardinge }
68d5de8841SJeremy Fitzhardinge 
69168d2f46SJeremy Fitzhardinge /*
70168d2f46SJeremy Fitzhardinge  * Mark a cpu as no longer interested in a lock.  Restores previous
71168d2f46SJeremy Fitzhardinge  * lock of interest (NULL for none).
72168d2f46SJeremy Fitzhardinge  */
73168d2f46SJeremy Fitzhardinge static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
74d5de8841SJeremy Fitzhardinge {
75d5de8841SJeremy Fitzhardinge 	asm(LOCK_PREFIX " decw %0"
76d5de8841SJeremy Fitzhardinge 	    : "+m" (xl->spinners) : : "memory");
77168d2f46SJeremy Fitzhardinge 	wmb();			/* decrement count before restoring lock */
78168d2f46SJeremy Fitzhardinge 	__get_cpu_var(lock_spinners) = prev;
79d5de8841SJeremy Fitzhardinge }
80d5de8841SJeremy Fitzhardinge 
81d5de8841SJeremy Fitzhardinge static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
82d5de8841SJeremy Fitzhardinge {
83d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
84168d2f46SJeremy Fitzhardinge 	struct xen_spinlock *prev;
85d5de8841SJeremy Fitzhardinge 	int irq = __get_cpu_var(lock_kicker_irq);
86d5de8841SJeremy Fitzhardinge 	int ret;
87d5de8841SJeremy Fitzhardinge 
88d5de8841SJeremy Fitzhardinge 	/* If kicker interrupts not initialized yet, just spin */
89d5de8841SJeremy Fitzhardinge 	if (irq == -1)
90d5de8841SJeremy Fitzhardinge 		return 0;
91d5de8841SJeremy Fitzhardinge 
92d5de8841SJeremy Fitzhardinge 	/* announce we're spinning */
93168d2f46SJeremy Fitzhardinge 	prev = spinning_lock(xl);
94d5de8841SJeremy Fitzhardinge 
95168d2f46SJeremy Fitzhardinge 	do {
96d5de8841SJeremy Fitzhardinge 		/* clear pending */
97d5de8841SJeremy Fitzhardinge 		xen_clear_irq_pending(irq);
98d5de8841SJeremy Fitzhardinge 
99d5de8841SJeremy Fitzhardinge 		/* check again make sure it didn't become free while
100d5de8841SJeremy Fitzhardinge 		   we weren't looking  */
101d5de8841SJeremy Fitzhardinge 		ret = xen_spin_trylock(lock);
102168d2f46SJeremy Fitzhardinge 		if (ret) {
103168d2f46SJeremy Fitzhardinge 			/*
104168d2f46SJeremy Fitzhardinge 			 * If we interrupted another spinlock while it
105168d2f46SJeremy Fitzhardinge 			 * was blocking, make sure it doesn't block
106168d2f46SJeremy Fitzhardinge 			 * without rechecking the lock.
107168d2f46SJeremy Fitzhardinge 			 */
108168d2f46SJeremy Fitzhardinge 			if (prev != NULL)
109168d2f46SJeremy Fitzhardinge 				xen_set_irq_pending(irq);
110d5de8841SJeremy Fitzhardinge 			goto out;
111168d2f46SJeremy Fitzhardinge 		}
112d5de8841SJeremy Fitzhardinge 
113168d2f46SJeremy Fitzhardinge 		/*
114168d2f46SJeremy Fitzhardinge 		 * Block until irq becomes pending.  If we're
115168d2f46SJeremy Fitzhardinge 		 * interrupted at this point (after the trylock but
116168d2f46SJeremy Fitzhardinge 		 * before entering the block), then the nested lock
117168d2f46SJeremy Fitzhardinge 		 * handler guarantees that the irq will be left
118168d2f46SJeremy Fitzhardinge 		 * pending if there's any chance the lock became free;
119168d2f46SJeremy Fitzhardinge 		 * xen_poll_irq() returns immediately if the irq is
120168d2f46SJeremy Fitzhardinge 		 * pending.
121168d2f46SJeremy Fitzhardinge 		 */
122d5de8841SJeremy Fitzhardinge 		xen_poll_irq(irq);
123168d2f46SJeremy Fitzhardinge 	} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
124168d2f46SJeremy Fitzhardinge 
125d5de8841SJeremy Fitzhardinge 	kstat_this_cpu.irqs[irq]++;
126d5de8841SJeremy Fitzhardinge 
127d5de8841SJeremy Fitzhardinge out:
128168d2f46SJeremy Fitzhardinge 	unspinning_lock(xl, prev);
129d5de8841SJeremy Fitzhardinge 	return ret;
130d5de8841SJeremy Fitzhardinge }
131d5de8841SJeremy Fitzhardinge 
132d5de8841SJeremy Fitzhardinge static void xen_spin_lock(struct raw_spinlock *lock)
133d5de8841SJeremy Fitzhardinge {
134d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
135d5de8841SJeremy Fitzhardinge 	int timeout;
136d5de8841SJeremy Fitzhardinge 	u8 oldval;
137d5de8841SJeremy Fitzhardinge 
138d5de8841SJeremy Fitzhardinge 	do {
139d5de8841SJeremy Fitzhardinge 		timeout = 1 << 10;
140d5de8841SJeremy Fitzhardinge 
141d5de8841SJeremy Fitzhardinge 		asm("1: xchgb %1,%0\n"
142d5de8841SJeremy Fitzhardinge 		    "   testb %1,%1\n"
143d5de8841SJeremy Fitzhardinge 		    "   jz 3f\n"
144d5de8841SJeremy Fitzhardinge 		    "2: rep;nop\n"
145d5de8841SJeremy Fitzhardinge 		    "   cmpb $0,%0\n"
146d5de8841SJeremy Fitzhardinge 		    "   je 1b\n"
147d5de8841SJeremy Fitzhardinge 		    "   dec %2\n"
148d5de8841SJeremy Fitzhardinge 		    "   jnz 2b\n"
149d5de8841SJeremy Fitzhardinge 		    "3:\n"
150d5de8841SJeremy Fitzhardinge 		    : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
151d5de8841SJeremy Fitzhardinge 		    : "1" (1)
152d5de8841SJeremy Fitzhardinge 		    : "memory");
153d5de8841SJeremy Fitzhardinge 
154d5de8841SJeremy Fitzhardinge 	} while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
155d5de8841SJeremy Fitzhardinge }
156d5de8841SJeremy Fitzhardinge 
157d5de8841SJeremy Fitzhardinge static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
158d5de8841SJeremy Fitzhardinge {
159d5de8841SJeremy Fitzhardinge 	int cpu;
160d5de8841SJeremy Fitzhardinge 
161d5de8841SJeremy Fitzhardinge 	for_each_online_cpu(cpu) {
162d5de8841SJeremy Fitzhardinge 		/* XXX should mix up next cpu selection */
163d5de8841SJeremy Fitzhardinge 		if (per_cpu(lock_spinners, cpu) == xl) {
164d5de8841SJeremy Fitzhardinge 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
165d5de8841SJeremy Fitzhardinge 			break;
166d5de8841SJeremy Fitzhardinge 		}
167d5de8841SJeremy Fitzhardinge 	}
168d5de8841SJeremy Fitzhardinge }
169d5de8841SJeremy Fitzhardinge 
170d5de8841SJeremy Fitzhardinge static void xen_spin_unlock(struct raw_spinlock *lock)
171d5de8841SJeremy Fitzhardinge {
172d5de8841SJeremy Fitzhardinge 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
173d5de8841SJeremy Fitzhardinge 
174d5de8841SJeremy Fitzhardinge 	smp_wmb();		/* make sure no writes get moved after unlock */
175d5de8841SJeremy Fitzhardinge 	xl->lock = 0;		/* release lock */
176d5de8841SJeremy Fitzhardinge 
177d5de8841SJeremy Fitzhardinge 	/* make sure unlock happens before kick */
178d5de8841SJeremy Fitzhardinge 	barrier();
179d5de8841SJeremy Fitzhardinge 
180d5de8841SJeremy Fitzhardinge 	if (unlikely(xl->spinners))
181d5de8841SJeremy Fitzhardinge 		xen_spin_unlock_slow(xl);
182d5de8841SJeremy Fitzhardinge }
183d5de8841SJeremy Fitzhardinge 
184d5de8841SJeremy Fitzhardinge static irqreturn_t dummy_handler(int irq, void *dev_id)
185d5de8841SJeremy Fitzhardinge {
186d5de8841SJeremy Fitzhardinge 	BUG();
187d5de8841SJeremy Fitzhardinge 	return IRQ_HANDLED;
188d5de8841SJeremy Fitzhardinge }
189d5de8841SJeremy Fitzhardinge 
190d5de8841SJeremy Fitzhardinge void __cpuinit xen_init_lock_cpu(int cpu)
191d5de8841SJeremy Fitzhardinge {
192d5de8841SJeremy Fitzhardinge 	int irq;
193d5de8841SJeremy Fitzhardinge 	const char *name;
194d5de8841SJeremy Fitzhardinge 
195d5de8841SJeremy Fitzhardinge 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
196d5de8841SJeremy Fitzhardinge 	irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
197d5de8841SJeremy Fitzhardinge 				     cpu,
198d5de8841SJeremy Fitzhardinge 				     dummy_handler,
199d5de8841SJeremy Fitzhardinge 				     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
200d5de8841SJeremy Fitzhardinge 				     name,
201d5de8841SJeremy Fitzhardinge 				     NULL);
202d5de8841SJeremy Fitzhardinge 
203d5de8841SJeremy Fitzhardinge 	if (irq >= 0) {
204d5de8841SJeremy Fitzhardinge 		disable_irq(irq); /* make sure it's never delivered */
205d5de8841SJeremy Fitzhardinge 		per_cpu(lock_kicker_irq, cpu) = irq;
206d5de8841SJeremy Fitzhardinge 	}
207d5de8841SJeremy Fitzhardinge 
208d5de8841SJeremy Fitzhardinge 	printk("cpu %d spinlock event irq %d\n", cpu, irq);
209d5de8841SJeremy Fitzhardinge }
210d5de8841SJeremy Fitzhardinge 
211d5de8841SJeremy Fitzhardinge void __init xen_init_spinlocks(void)
212d5de8841SJeremy Fitzhardinge {
213d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_is_locked = xen_spin_is_locked;
214d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_is_contended = xen_spin_is_contended;
215d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_lock = xen_spin_lock;
216d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_trylock = xen_spin_trylock;
217d5de8841SJeremy Fitzhardinge 	pv_lock_ops.spin_unlock = xen_spin_unlock;
218d5de8841SJeremy Fitzhardinge }
219