1819833afSPeter Tyser /*
2819833afSPeter Tyser  *  linux/include/asm-arm/proc-armv/system.h
3819833afSPeter Tyser  *
4819833afSPeter Tyser  *  Copyright (C) 1996 Russell King
5819833afSPeter Tyser  *
6819833afSPeter Tyser  * This program is free software; you can redistribute it and/or modify
7819833afSPeter Tyser  * it under the terms of the GNU General Public License version 2 as
8819833afSPeter Tyser  * published by the Free Software Foundation.
9819833afSPeter Tyser  */
10819833afSPeter Tyser #ifndef __ASM_PROC_SYSTEM_H
11819833afSPeter Tyser #define __ASM_PROC_SYSTEM_H
12819833afSPeter Tyser 
13819833afSPeter Tyser /*
14819833afSPeter Tyser  * Save the current interrupt enable state & disable IRQs
15819833afSPeter Tyser  */
160ae76531SDavid Feng #ifdef CONFIG_ARM64
170ae76531SDavid Feng 
180ae76531SDavid Feng /*
190ae76531SDavid Feng  * Save the current interrupt enable state
200ae76531SDavid Feng  * and disable IRQs/FIQs
210ae76531SDavid Feng  */
220ae76531SDavid Feng #define local_irq_save(flags)					\
230ae76531SDavid Feng 	({							\
240ae76531SDavid Feng 	asm volatile(						\
25*fe0d9252SShaibal.Dutta 	"mrs	%0, daif\n"					\
260ae76531SDavid Feng 	"msr	daifset, #3"					\
270ae76531SDavid Feng 	: "=r" (flags)						\
280ae76531SDavid Feng 	:							\
290ae76531SDavid Feng 	: "memory");						\
300ae76531SDavid Feng 	})
310ae76531SDavid Feng 
320ae76531SDavid Feng /*
330ae76531SDavid Feng  * restore saved IRQ & FIQ state
340ae76531SDavid Feng  */
350ae76531SDavid Feng #define local_irq_restore(flags)				\
360ae76531SDavid Feng 	({							\
370ae76531SDavid Feng 	asm volatile(						\
380ae76531SDavid Feng 	"msr	daif, %0"					\
390ae76531SDavid Feng 	:							\
400ae76531SDavid Feng 	: "r" (flags)						\
410ae76531SDavid Feng 	: "memory");						\
420ae76531SDavid Feng 	})
430ae76531SDavid Feng 
440ae76531SDavid Feng /*
450ae76531SDavid Feng  * Enable IRQs/FIQs
460ae76531SDavid Feng  */
470ae76531SDavid Feng #define local_irq_enable()					\
480ae76531SDavid Feng 	({							\
490ae76531SDavid Feng 	asm volatile(						\
500ae76531SDavid Feng 	"msr	daifclr, #3"					\
510ae76531SDavid Feng 	:							\
520ae76531SDavid Feng 	:							\
530ae76531SDavid Feng 	: "memory");						\
540ae76531SDavid Feng 	})
550ae76531SDavid Feng 
560ae76531SDavid Feng /*
570ae76531SDavid Feng  * Disable IRQs/FIQs
580ae76531SDavid Feng  */
590ae76531SDavid Feng #define local_irq_disable()					\
600ae76531SDavid Feng 	({							\
610ae76531SDavid Feng 	asm volatile(						\
620ae76531SDavid Feng 	"msr	daifset, #3"					\
630ae76531SDavid Feng 	:							\
640ae76531SDavid Feng 	:							\
650ae76531SDavid Feng 	: "memory");						\
660ae76531SDavid Feng 	})
670ae76531SDavid Feng 
680ae76531SDavid Feng #else	/* CONFIG_ARM64 */
690ae76531SDavid Feng 
70819833afSPeter Tyser #define local_irq_save(x)					\
71819833afSPeter Tyser 	({							\
72819833afSPeter Tyser 		unsigned long temp;				\
73819833afSPeter Tyser 	__asm__ __volatile__(					\
74819833afSPeter Tyser 	"mrs	%0, cpsr		@ local_irq_save\n"	\
75819833afSPeter Tyser "	orr	%1, %0, #128\n"					\
76819833afSPeter Tyser "	msr	cpsr_c, %1"					\
77819833afSPeter Tyser 	: "=r" (x), "=r" (temp)					\
78819833afSPeter Tyser 	:							\
79819833afSPeter Tyser 	: "memory");						\
80819833afSPeter Tyser 	})
81819833afSPeter Tyser 
82819833afSPeter Tyser /*
83819833afSPeter Tyser  * Enable IRQs
84819833afSPeter Tyser  */
85819833afSPeter Tyser #define local_irq_enable()					\
86819833afSPeter Tyser 	({							\
87819833afSPeter Tyser 		unsigned long temp;				\
88819833afSPeter Tyser 	__asm__ __volatile__(					\
89819833afSPeter Tyser 	"mrs	%0, cpsr		@ local_irq_enable\n"	\
90819833afSPeter Tyser "	bic	%0, %0, #128\n"					\
91819833afSPeter Tyser "	msr	cpsr_c, %0"					\
92819833afSPeter Tyser 	: "=r" (temp)						\
93819833afSPeter Tyser 	:							\
94819833afSPeter Tyser 	: "memory");						\
95819833afSPeter Tyser 	})
96819833afSPeter Tyser 
97819833afSPeter Tyser /*
98819833afSPeter Tyser  * Disable IRQs
99819833afSPeter Tyser  */
100819833afSPeter Tyser #define local_irq_disable()					\
101819833afSPeter Tyser 	({							\
102819833afSPeter Tyser 		unsigned long temp;				\
103819833afSPeter Tyser 	__asm__ __volatile__(					\
104819833afSPeter Tyser 	"mrs	%0, cpsr		@ local_irq_disable\n"	\
105819833afSPeter Tyser "	orr	%0, %0, #128\n"					\
106819833afSPeter Tyser "	msr	cpsr_c, %0"					\
107819833afSPeter Tyser 	: "=r" (temp)						\
108819833afSPeter Tyser 	:							\
109819833afSPeter Tyser 	: "memory");						\
110819833afSPeter Tyser 	})
111819833afSPeter Tyser 
112819833afSPeter Tyser /*
113819833afSPeter Tyser  * Enable FIQs
114819833afSPeter Tyser  */
115819833afSPeter Tyser #define __stf()							\
116819833afSPeter Tyser 	({							\
117819833afSPeter Tyser 		unsigned long temp;				\
118819833afSPeter Tyser 	__asm__ __volatile__(					\
119819833afSPeter Tyser 	"mrs	%0, cpsr		@ stf\n"		\
120819833afSPeter Tyser "	bic	%0, %0, #64\n"					\
121819833afSPeter Tyser "	msr	cpsr_c, %0"					\
122819833afSPeter Tyser 	: "=r" (temp)						\
123819833afSPeter Tyser 	:							\
124819833afSPeter Tyser 	: "memory");						\
125819833afSPeter Tyser 	})
126819833afSPeter Tyser 
127819833afSPeter Tyser /*
128819833afSPeter Tyser  * Disable FIQs
129819833afSPeter Tyser  */
130819833afSPeter Tyser #define __clf()							\
131819833afSPeter Tyser 	({							\
132819833afSPeter Tyser 		unsigned long temp;				\
133819833afSPeter Tyser 	__asm__ __volatile__(					\
134819833afSPeter Tyser 	"mrs	%0, cpsr		@ clf\n"		\
135819833afSPeter Tyser "	orr	%0, %0, #64\n"					\
136819833afSPeter Tyser "	msr	cpsr_c, %0"					\
137819833afSPeter Tyser 	: "=r" (temp)						\
138819833afSPeter Tyser 	:							\
139819833afSPeter Tyser 	: "memory");						\
140819833afSPeter Tyser 	})
141819833afSPeter Tyser 
142819833afSPeter Tyser /*
143819833afSPeter Tyser  * Save the current interrupt enable state.
144819833afSPeter Tyser  */
145819833afSPeter Tyser #define local_save_flags(x)					\
146819833afSPeter Tyser 	({							\
147819833afSPeter Tyser 	__asm__ __volatile__(					\
148819833afSPeter Tyser 	"mrs	%0, cpsr		@ local_save_flags\n"	\
149819833afSPeter Tyser 	  : "=r" (x)						\
150819833afSPeter Tyser 	  :							\
151819833afSPeter Tyser 	  : "memory");						\
152819833afSPeter Tyser 	})
153819833afSPeter Tyser 
154819833afSPeter Tyser /*
155819833afSPeter Tyser  * restore saved IRQ & FIQ state
156819833afSPeter Tyser  */
157819833afSPeter Tyser #define local_irq_restore(x)					\
158819833afSPeter Tyser 	__asm__ __volatile__(					\
159819833afSPeter Tyser 	"msr	cpsr_c, %0		@ local_irq_restore\n"	\
160819833afSPeter Tyser 	:							\
161819833afSPeter Tyser 	: "r" (x)						\
162819833afSPeter Tyser 	: "memory")
163819833afSPeter Tyser 
1640ae76531SDavid Feng #endif	/* CONFIG_ARM64 */
1650ae76531SDavid Feng 
1660ae76531SDavid Feng #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) || \
1670ae76531SDavid Feng 	defined(CONFIG_ARM64)
168819833afSPeter Tyser /*
169819833afSPeter Tyser  * On the StrongARM, "swp" is terminally broken since it bypasses the
170819833afSPeter Tyser  * cache totally.  This means that the cache becomes inconsistent, and,
171819833afSPeter Tyser  * since we use normal loads/stores as well, this is really bad.
172819833afSPeter Tyser  * Typically, this causes oopsen in filp_close, but could have other,
173819833afSPeter Tyser  * more disasterous effects.  There are two work-arounds:
174819833afSPeter Tyser  *  1. Disable interrupts and emulate the atomic swap
175819833afSPeter Tyser  *  2. Clean the cache, perform atomic swap, flush the cache
176819833afSPeter Tyser  *
177819833afSPeter Tyser  * We choose (1) since its the "easiest" to achieve here and is not
178819833afSPeter Tyser  * dependent on the processor type.
179819833afSPeter Tyser  */
180819833afSPeter Tyser #define swp_is_buggy
181819833afSPeter Tyser #endif
182819833afSPeter Tyser 
__xchg(unsigned long x,volatile void * ptr,int size)183819833afSPeter Tyser static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
184819833afSPeter Tyser {
185819833afSPeter Tyser 	extern void __bad_xchg(volatile void *, int);
186819833afSPeter Tyser 	unsigned long ret;
187819833afSPeter Tyser #ifdef swp_is_buggy
188819833afSPeter Tyser 	unsigned long flags;
189819833afSPeter Tyser #endif
190819833afSPeter Tyser 
191819833afSPeter Tyser 	switch (size) {
192819833afSPeter Tyser #ifdef swp_is_buggy
193819833afSPeter Tyser 		case 1:
194819833afSPeter Tyser 			local_irq_save(flags);
195819833afSPeter Tyser 			ret = *(volatile unsigned char *)ptr;
196819833afSPeter Tyser 			*(volatile unsigned char *)ptr = x;
197819833afSPeter Tyser 			local_irq_restore(flags);
198819833afSPeter Tyser 			break;
199819833afSPeter Tyser 
200819833afSPeter Tyser 		case 4:
201819833afSPeter Tyser 			local_irq_save(flags);
202819833afSPeter Tyser 			ret = *(volatile unsigned long *)ptr;
203819833afSPeter Tyser 			*(volatile unsigned long *)ptr = x;
204819833afSPeter Tyser 			local_irq_restore(flags);
205819833afSPeter Tyser 			break;
206819833afSPeter Tyser #else
207819833afSPeter Tyser 		case 1:	__asm__ __volatile__ ("swpb %0, %1, [%2]"
208819833afSPeter Tyser 					: "=&r" (ret)
209819833afSPeter Tyser 					: "r" (x), "r" (ptr)
210819833afSPeter Tyser 					: "memory");
211819833afSPeter Tyser 			break;
212819833afSPeter Tyser 		case 4:	__asm__ __volatile__ ("swp %0, %1, [%2]"
213819833afSPeter Tyser 					: "=&r" (ret)
214819833afSPeter Tyser 					: "r" (x), "r" (ptr)
215819833afSPeter Tyser 					: "memory");
216819833afSPeter Tyser 			break;
217819833afSPeter Tyser #endif
218819833afSPeter Tyser 		default: __bad_xchg(ptr, size), ret = 0;
219819833afSPeter Tyser 	}
220819833afSPeter Tyser 
221819833afSPeter Tyser 	return ret;
222819833afSPeter Tyser }
223819833afSPeter Tyser 
224819833afSPeter Tyser #endif
225