xref: /openbmc/linux/arch/parisc/include/asm/ldcw.h (revision 527dcdccd60759ee38e6224c93f87a6194d970ad)
1*527dcdccSDavid Howells #ifndef __PARISC_LDCW_H
2*527dcdccSDavid Howells #define __PARISC_LDCW_H
3*527dcdccSDavid Howells 
4*527dcdccSDavid Howells #ifndef CONFIG_PA20
5*527dcdccSDavid Howells /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
6*527dcdccSDavid Howells    and GCC only guarantees 8-byte alignment for stack locals, we can't
7*527dcdccSDavid Howells    be assured of 16-byte alignment for atomic lock data even if we
8*527dcdccSDavid Howells    specify "__attribute ((aligned(16)))" in the type declaration.  So,
9*527dcdccSDavid Howells    we use a struct containing an array of four ints for the atomic lock
10*527dcdccSDavid Howells    type and dynamically select the 16-byte aligned int from the array
11*527dcdccSDavid Howells    for the semaphore.  */
12*527dcdccSDavid Howells 
13*527dcdccSDavid Howells #define __PA_LDCW_ALIGNMENT	16
14*527dcdccSDavid Howells #define __ldcw_align(a) ({					\
15*527dcdccSDavid Howells 	unsigned long __ret = (unsigned long) &(a)->lock[0];	\
16*527dcdccSDavid Howells 	__ret = (__ret + __PA_LDCW_ALIGNMENT - 1)		\
17*527dcdccSDavid Howells 		& ~(__PA_LDCW_ALIGNMENT - 1);			\
18*527dcdccSDavid Howells 	(volatile unsigned int *) __ret;			\
19*527dcdccSDavid Howells })
20*527dcdccSDavid Howells #define __LDCW	"ldcw"
21*527dcdccSDavid Howells 
22*527dcdccSDavid Howells #else /*CONFIG_PA20*/
23*527dcdccSDavid Howells /* From: "Jim Hull" <jim.hull of hp.com>
24*527dcdccSDavid Howells    I've attached a summary of the change, but basically, for PA 2.0, as
25*527dcdccSDavid Howells    long as the ",CO" (coherent operation) completer is specified, then the
26*527dcdccSDavid Howells    16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
27*527dcdccSDavid Howells    they only require "natural" alignment (4-byte for ldcw, 8-byte for
28*527dcdccSDavid Howells    ldcd). */
29*527dcdccSDavid Howells 
30*527dcdccSDavid Howells #define __PA_LDCW_ALIGNMENT	4
31*527dcdccSDavid Howells #define __ldcw_align(a) (&(a)->slock)
32*527dcdccSDavid Howells #define __LDCW	"ldcw,co"
33*527dcdccSDavid Howells 
34*527dcdccSDavid Howells #endif /*!CONFIG_PA20*/
35*527dcdccSDavid Howells 
36*527dcdccSDavid Howells /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
37*527dcdccSDavid Howells #define __ldcw(a) ({						\
38*527dcdccSDavid Howells 	unsigned __ret;						\
39*527dcdccSDavid Howells 	__asm__ __volatile__(__LDCW " 0(%2),%0"			\
40*527dcdccSDavid Howells 		: "=r" (__ret), "+m" (*(a)) : "r" (a));		\
41*527dcdccSDavid Howells 	__ret;							\
42*527dcdccSDavid Howells })
43*527dcdccSDavid Howells 
44*527dcdccSDavid Howells #ifdef CONFIG_SMP
45*527dcdccSDavid Howells # define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
46*527dcdccSDavid Howells #endif
47*527dcdccSDavid Howells 
48*527dcdccSDavid Howells #endif /* __PARISC_LDCW_H */
49