xref: /openbmc/linux/arch/arm/include/asm/assembler.h (revision 0b1f68e8)
1 /*
2  *  arch/arm/include/asm/assembler.h
3  *
4  *  Copyright (C) 1996-2000 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This file contains arm architecture specific defines
11  *  for the different processors.
12  *
13  *  Do not include any C declarations in this file - it is included by
14  *  assembler source.
15  */
16 #ifndef __ASM_ASSEMBLER_H__
17 #define __ASM_ASSEMBLER_H__
18 
19 #ifndef __ASSEMBLY__
20 #error "Only include this from assembly code"
21 #endif
22 
23 #include <asm/ptrace.h>
24 #include <asm/domain.h>
25 #include <asm/opcodes-virt.h>
26 #include <asm/asm-offsets.h>
27 
28 #define IOMEM(x)	(x)
29 
30 /*
31  * Endian independent macros for shifting bytes within registers.
32  */
33 #ifndef __ARMEB__
34 #define lspull          lsr
35 #define lspush          lsl
36 #define get_byte_0      lsl #0
37 #define get_byte_1	lsr #8
38 #define get_byte_2	lsr #16
39 #define get_byte_3	lsr #24
40 #define put_byte_0      lsl #0
41 #define put_byte_1	lsl #8
42 #define put_byte_2	lsl #16
43 #define put_byte_3	lsl #24
44 #else
45 #define lspull          lsl
46 #define lspush          lsr
47 #define get_byte_0	lsr #24
48 #define get_byte_1	lsr #16
49 #define get_byte_2	lsr #8
50 #define get_byte_3      lsl #0
51 #define put_byte_0	lsl #24
52 #define put_byte_1	lsl #16
53 #define put_byte_2	lsl #8
54 #define put_byte_3      lsl #0
55 #endif
56 
57 /* Select code for any configuration running in BE8 mode */
58 #ifdef CONFIG_CPU_ENDIAN_BE8
59 #define ARM_BE8(code...) code
60 #else
61 #define ARM_BE8(code...)
62 #endif
63 
64 /*
65  * Data preload for architectures that support it
66  */
67 #if __LINUX_ARM_ARCH__ >= 5
68 #define PLD(code...)	code
69 #else
70 #define PLD(code...)
71 #endif
72 
73 /*
74  * This can be used to enable code to cacheline align the destination
75  * pointer when bulk writing to memory.  Experiments on StrongARM and
76  * XScale didn't show this a worthwhile thing to do when the cache is not
77  * set to write-allocate (this would need further testing on XScale when WA
78  * is used).
79  *
80  * On Feroceon there is much to gain however, regardless of cache mode.
81  */
82 #ifdef CONFIG_CPU_FEROCEON
83 #define CALGN(code...) code
84 #else
85 #define CALGN(code...)
86 #endif
87 
88 /*
89  * Enable and disable interrupts
90  */
91 #if __LINUX_ARM_ARCH__ >= 6
92 	.macro	disable_irq_notrace
93 	cpsid	i
94 	.endm
95 
96 	.macro	enable_irq_notrace
97 	cpsie	i
98 	.endm
99 #else
100 	.macro	disable_irq_notrace
101 	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
102 	.endm
103 
104 	.macro	enable_irq_notrace
105 	msr	cpsr_c, #SVC_MODE
106 	.endm
107 #endif
108 
109 	.macro asm_trace_hardirqs_off
110 #if defined(CONFIG_TRACE_IRQFLAGS)
111 	stmdb   sp!, {r0-r3, ip, lr}
112 	bl	trace_hardirqs_off
113 	ldmia	sp!, {r0-r3, ip, lr}
114 #endif
115 	.endm
116 
117 	.macro asm_trace_hardirqs_on_cond, cond
118 #if defined(CONFIG_TRACE_IRQFLAGS)
119 	/*
120 	 * actually the registers should be pushed and pop'd conditionally, but
121 	 * after bl the flags are certainly clobbered
122 	 */
123 	stmdb   sp!, {r0-r3, ip, lr}
124 	bl\cond	trace_hardirqs_on
125 	ldmia	sp!, {r0-r3, ip, lr}
126 #endif
127 	.endm
128 
129 	.macro asm_trace_hardirqs_on
130 	asm_trace_hardirqs_on_cond al
131 	.endm
132 
133 	.macro disable_irq
134 	disable_irq_notrace
135 	asm_trace_hardirqs_off
136 	.endm
137 
138 	.macro enable_irq
139 	asm_trace_hardirqs_on
140 	enable_irq_notrace
141 	.endm
142 /*
143  * Save the current IRQ state and disable IRQs.  Note that this macro
144  * assumes FIQs are enabled, and that the processor is in SVC mode.
145  */
146 	.macro	save_and_disable_irqs, oldcpsr
147 #ifdef CONFIG_CPU_V7M
148 	mrs	\oldcpsr, primask
149 #else
150 	mrs	\oldcpsr, cpsr
151 #endif
152 	disable_irq
153 	.endm
154 
155 	.macro	save_and_disable_irqs_notrace, oldcpsr
156 	mrs	\oldcpsr, cpsr
157 	disable_irq_notrace
158 	.endm
159 
160 /*
161  * Restore interrupt state previously stored in a register.  We don't
162  * guarantee that this will preserve the flags.
163  */
164 	.macro	restore_irqs_notrace, oldcpsr
165 #ifdef CONFIG_CPU_V7M
166 	msr	primask, \oldcpsr
167 #else
168 	msr	cpsr_c, \oldcpsr
169 #endif
170 	.endm
171 
172 	.macro restore_irqs, oldcpsr
173 	tst	\oldcpsr, #PSR_I_BIT
174 	asm_trace_hardirqs_on_cond eq
175 	restore_irqs_notrace \oldcpsr
176 	.endm
177 
178 /*
179  * Get current thread_info.
180  */
181 	.macro	get_thread_info, rd
182  ARM(	mov	\rd, sp, lsr #13	)
183  THUMB(	mov	\rd, sp			)
184  THUMB(	lsr	\rd, \rd, #13		)
185 	mov	\rd, \rd, lsl #13
186 	.endm
187 
188 /*
189  * Increment/decrement the preempt count.
190  */
191 #ifdef CONFIG_PREEMPT_COUNT
192 	.macro	inc_preempt_count, ti, tmp
193 	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
194 	add	\tmp, \tmp, #1			@ increment it
195 	str	\tmp, [\ti, #TI_PREEMPT]
196 	.endm
197 
198 	.macro	dec_preempt_count, ti, tmp
199 	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
200 	sub	\tmp, \tmp, #1			@ decrement it
201 	str	\tmp, [\ti, #TI_PREEMPT]
202 	.endm
203 
204 	.macro	dec_preempt_count_ti, ti, tmp
205 	get_thread_info \ti
206 	dec_preempt_count \ti, \tmp
207 	.endm
208 #else
209 	.macro	inc_preempt_count, ti, tmp
210 	.endm
211 
212 	.macro	dec_preempt_count, ti, tmp
213 	.endm
214 
215 	.macro	dec_preempt_count_ti, ti, tmp
216 	.endm
217 #endif
218 
219 #define USER(x...)				\
220 9999:	x;					\
221 	.pushsection __ex_table,"a";		\
222 	.align	3;				\
223 	.long	9999b,9001f;			\
224 	.popsection
225 
226 #ifdef CONFIG_SMP
227 #define ALT_SMP(instr...)					\
228 9998:	instr
229 /*
230  * Note: if you get assembler errors from ALT_UP() when building with
231  * CONFIG_THUMB2_KERNEL, you almost certainly need to use
232  * ALT_SMP( W(instr) ... )
233  */
234 #define ALT_UP(instr...)					\
235 	.pushsection ".alt.smp.init", "a"			;\
236 	.long	9998b						;\
237 9997:	instr							;\
238 	.if . - 9997b != 4					;\
239 		.error "ALT_UP() content must assemble to exactly 4 bytes";\
240 	.endif							;\
241 	.popsection
242 #define ALT_UP_B(label)					\
243 	.equ	up_b_offset, label - 9998b			;\
244 	.pushsection ".alt.smp.init", "a"			;\
245 	.long	9998b						;\
246 	W(b)	. + up_b_offset					;\
247 	.popsection
248 #else
249 #define ALT_SMP(instr...)
250 #define ALT_UP(instr...) instr
251 #define ALT_UP_B(label) b label
252 #endif
253 
254 /*
255  * Instruction barrier
256  */
257 	.macro	instr_sync
258 #if __LINUX_ARM_ARCH__ >= 7
259 	isb
260 #elif __LINUX_ARM_ARCH__ == 6
261 	mcr	p15, 0, r0, c7, c5, 4
262 #endif
263 	.endm
264 
265 /*
266  * SMP data memory barrier
267  */
268 	.macro	smp_dmb mode
269 #ifdef CONFIG_SMP
270 #if __LINUX_ARM_ARCH__ >= 7
271 	.ifeqs "\mode","arm"
272 	ALT_SMP(dmb	ish)
273 	.else
274 	ALT_SMP(W(dmb)	ish)
275 	.endif
276 #elif __LINUX_ARM_ARCH__ == 6
277 	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
278 #else
279 #error Incompatible SMP platform
280 #endif
281 	.ifeqs "\mode","arm"
282 	ALT_UP(nop)
283 	.else
284 	ALT_UP(W(nop))
285 	.endif
286 #endif
287 	.endm
288 
289 #if defined(CONFIG_CPU_V7M)
290 	/*
291 	 * setmode is used to assert to be in svc mode during boot. For v7-M
292 	 * this is done in __v7m_setup, so setmode can be empty here.
293 	 */
294 	.macro	setmode, mode, reg
295 	.endm
296 #elif defined(CONFIG_THUMB2_KERNEL)
297 	.macro	setmode, mode, reg
298 	mov	\reg, #\mode
299 	msr	cpsr_c, \reg
300 	.endm
301 #else
302 	.macro	setmode, mode, reg
303 	msr	cpsr_c, #\mode
304 	.endm
305 #endif
306 
307 /*
308  * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
309  * a scratch register for the macro to overwrite.
310  *
311  * This macro is intended for forcing the CPU into SVC mode at boot time.
312  * you cannot return to the original mode.
313  */
314 .macro safe_svcmode_maskall reg:req
315 #if __LINUX_ARM_ARCH__ >= 6
316 	mrs	\reg , cpsr
317 	eor	\reg, \reg, #HYP_MODE
318 	tst	\reg, #MODE_MASK
319 	bic	\reg , \reg , #MODE_MASK
320 	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
321 THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
322 	bne	1f
323 	orr	\reg, \reg, #PSR_A_BIT
324 	adr	lr, BSYM(2f)
325 	msr	spsr_cxsf, \reg
326 	__MSR_ELR_HYP(14)
327 	__ERET
328 1:	msr	cpsr_c, \reg
329 2:
330 #else
331 /*
332  * workaround for possibly broken pre-v6 hardware
333  * (akita, Sharp Zaurus C-1000, PXA270-based)
334  */
335 	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
336 #endif
337 .endm
338 
339 /*
340  * STRT/LDRT access macros with ARM and Thumb-2 variants
341  */
342 #ifdef CONFIG_THUMB2_KERNEL
343 
344 	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
345 9999:
346 	.if	\inc == 1
347 	\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
348 	.elseif	\inc == 4
349 	\instr\cond\()\t\().w \reg, [\ptr, #\off]
350 	.else
351 	.error	"Unsupported inc macro argument"
352 	.endif
353 
354 	.pushsection __ex_table,"a"
355 	.align	3
356 	.long	9999b, \abort
357 	.popsection
358 	.endm
359 
360 	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
361 	@ explicit IT instruction needed because of the label
362 	@ introduced by the USER macro
363 	.ifnc	\cond,al
364 	.if	\rept == 1
365 	itt	\cond
366 	.elseif	\rept == 2
367 	ittt	\cond
368 	.else
369 	.error	"Unsupported rept macro argument"
370 	.endif
371 	.endif
372 
373 	@ Slightly optimised to avoid incrementing the pointer twice
374 	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
375 	.if	\rept == 2
376 	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
377 	.endif
378 
379 	add\cond \ptr, #\rept * \inc
380 	.endm
381 
382 #else	/* !CONFIG_THUMB2_KERNEL */
383 
384 	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
385 	.rept	\rept
386 9999:
387 	.if	\inc == 1
388 	\instr\cond\()b\()\t \reg, [\ptr], #\inc
389 	.elseif	\inc == 4
390 	\instr\cond\()\t \reg, [\ptr], #\inc
391 	.else
392 	.error	"Unsupported inc macro argument"
393 	.endif
394 
395 	.pushsection __ex_table,"a"
396 	.align	3
397 	.long	9999b, \abort
398 	.popsection
399 	.endr
400 	.endm
401 
402 #endif	/* CONFIG_THUMB2_KERNEL */
403 
404 	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
405 	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
406 	.endm
407 
408 	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
409 	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
410 	.endm
411 
412 /* Utility macro for declaring string literals */
413 	.macro	string name:req, string
414 	.type \name , #object
415 \name:
416 	.asciz "\string"
417 	.size \name , . - \name
418 	.endm
419 
420 	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
421 #ifndef CONFIG_CPU_USE_DOMAINS
422 	adds	\tmp, \addr, #\size - 1
423 	sbcccs	\tmp, \tmp, \limit
424 	bcs	\bad
425 #endif
426 	.endm
427 
428 #endif /* __ASM_ASSEMBLER_H__ */
429