xref: /openbmc/linux/arch/arm/include/asm/assembler.h (revision 0d928b0b)
1 /*
2  *  arch/arm/include/asm/assembler.h
3  *
4  *  Copyright (C) 1996-2000 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This file contains arm architecture specific defines
11  *  for the different processors.
12  *
13  *  Do not include any C declarations in this file - it is included by
14  *  assembler source.
15  */
16 #ifndef __ASSEMBLY__
17 #error "Only include this from assembly code"
18 #endif
19 
20 #include <asm/ptrace.h>
21 
22 /*
23  * Endian independent macros for shifting bytes within registers.
24  */
25 #ifndef __ARMEB__
26 #define pull            lsr
27 #define push            lsl
28 #define get_byte_0      lsl #0
29 #define get_byte_1	lsr #8
30 #define get_byte_2	lsr #16
31 #define get_byte_3	lsr #24
32 #define put_byte_0      lsl #0
33 #define put_byte_1	lsl #8
34 #define put_byte_2	lsl #16
35 #define put_byte_3	lsl #24
36 #else
37 #define pull            lsl
38 #define push            lsr
39 #define get_byte_0	lsr #24
40 #define get_byte_1	lsr #16
41 #define get_byte_2	lsr #8
42 #define get_byte_3      lsl #0
43 #define put_byte_0	lsl #24
44 #define put_byte_1	lsl #16
45 #define put_byte_2	lsl #8
46 #define put_byte_3      lsl #0
47 #endif
48 
49 /*
50  * Data preload for architectures that support it
51  */
52 #if __LINUX_ARM_ARCH__ >= 5
53 #define PLD(code...)	code
54 #else
55 #define PLD(code...)
56 #endif
57 
58 /*
59  * This can be used to enable code to cacheline align the destination
60  * pointer when bulk writing to memory.  Experiments on StrongARM and
61  * XScale didn't show this a worthwhile thing to do when the cache is not
62  * set to write-allocate (this would need further testing on XScale when WA
63  * is used).
64  *
65  * On Feroceon there is much to gain however, regardless of cache mode.
66  */
67 #ifdef CONFIG_CPU_FEROCEON
68 #define CALGN(code...) code
69 #else
70 #define CALGN(code...)
71 #endif
72 
73 /*
74  * Enable and disable interrupts
75  */
76 #if __LINUX_ARM_ARCH__ >= 6
77 	.macro	disable_irq_notrace
78 	cpsid	i
79 	.endm
80 
81 	.macro	enable_irq_notrace
82 	cpsie	i
83 	.endm
84 #else
85 	.macro	disable_irq_notrace
86 	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
87 	.endm
88 
89 	.macro	enable_irq_notrace
90 	msr	cpsr_c, #SVC_MODE
91 	.endm
92 #endif
93 
94 	.macro asm_trace_hardirqs_off
95 #if defined(CONFIG_TRACE_IRQFLAGS)
96 	stmdb   sp!, {r0-r3, ip, lr}
97 	bl	trace_hardirqs_off
98 	ldmia	sp!, {r0-r3, ip, lr}
99 #endif
100 	.endm
101 
102 	.macro asm_trace_hardirqs_on_cond, cond
103 #if defined(CONFIG_TRACE_IRQFLAGS)
104 	/*
105 	 * actually the registers should be pushed and pop'd conditionally, but
106 	 * after bl the flags are certainly clobbered
107 	 */
108 	stmdb   sp!, {r0-r3, ip, lr}
109 	bl\cond	trace_hardirqs_on
110 	ldmia	sp!, {r0-r3, ip, lr}
111 #endif
112 	.endm
113 
114 	.macro asm_trace_hardirqs_on
115 	asm_trace_hardirqs_on_cond al
116 	.endm
117 
118 	.macro disable_irq
119 	disable_irq_notrace
120 	asm_trace_hardirqs_off
121 	.endm
122 
123 	.macro enable_irq
124 	asm_trace_hardirqs_on
125 	enable_irq_notrace
126 	.endm
127 /*
128  * Save the current IRQ state and disable IRQs.  Note that this macro
129  * assumes FIQs are enabled, and that the processor is in SVC mode.
130  */
131 	.macro	save_and_disable_irqs, oldcpsr
132 	mrs	\oldcpsr, cpsr
133 	disable_irq
134 	.endm
135 
136 /*
137  * Restore interrupt state previously stored in a register.  We don't
138  * guarantee that this will preserve the flags.
139  */
140 	.macro	restore_irqs_notrace, oldcpsr
141 	msr	cpsr_c, \oldcpsr
142 	.endm
143 
144 	.macro restore_irqs, oldcpsr
145 	tst	\oldcpsr, #PSR_I_BIT
146 	asm_trace_hardirqs_on_cond eq
147 	restore_irqs_notrace \oldcpsr
148 	.endm
149 
150 #define USER(x...)				\
151 9999:	x;					\
152 	.section __ex_table,"a";		\
153 	.align	3;				\
154 	.long	9999b,9001f;			\
155 	.previous
156 
157 /*
158  * SMP data memory barrier
159  */
160 	.macro	smp_dmb
161 #ifdef CONFIG_SMP
162 #if __LINUX_ARM_ARCH__ >= 7
163 	dmb
164 #elif __LINUX_ARM_ARCH__ == 6
165 	mcr	p15, 0, r0, c7, c10, 5	@ dmb
166 #endif
167 #endif
168 	.endm
169