1 /* 2 * arch/arm/include/asm/assembler.h 3 * 4 * Copyright (C) 1996-2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This file contains arm architecture specific defines 11 * for the different processors. 12 * 13 * Do not include any C declarations in this file - it is included by 14 * assembler source. 15 */ 16 #ifndef __ASM_ASSEMBLER_H__ 17 #define __ASM_ASSEMBLER_H__ 18 19 #ifndef __ASSEMBLY__ 20 #error "Only include this from assembly code" 21 #endif 22 23 #include <asm/ptrace.h> 24 #include <asm/domain.h> 25 #include <asm/opcodes-virt.h> 26 27 #define IOMEM(x) (x) 28 29 /* 30 * Endian independent macros for shifting bytes within registers. 31 */ 32 #ifndef __ARMEB__ 33 #define pull lsr 34 #define push lsl 35 #define get_byte_0 lsl #0 36 #define get_byte_1 lsr #8 37 #define get_byte_2 lsr #16 38 #define get_byte_3 lsr #24 39 #define put_byte_0 lsl #0 40 #define put_byte_1 lsl #8 41 #define put_byte_2 lsl #16 42 #define put_byte_3 lsl #24 43 #else 44 #define pull lsl 45 #define push lsr 46 #define get_byte_0 lsr #24 47 #define get_byte_1 lsr #16 48 #define get_byte_2 lsr #8 49 #define get_byte_3 lsl #0 50 #define put_byte_0 lsl #24 51 #define put_byte_1 lsl #16 52 #define put_byte_2 lsl #8 53 #define put_byte_3 lsl #0 54 #endif 55 56 /* 57 * Data preload for architectures that support it 58 */ 59 #if __LINUX_ARM_ARCH__ >= 5 60 #define PLD(code...) code 61 #else 62 #define PLD(code...) 63 #endif 64 65 /* 66 * This can be used to enable code to cacheline align the destination 67 * pointer when bulk writing to memory. Experiments on StrongARM and 68 * XScale didn't show this a worthwhile thing to do when the cache is not 69 * set to write-allocate (this would need further testing on XScale when WA 70 * is used). 71 * 72 * On Feroceon there is much to gain however, regardless of cache mode. 73 */ 74 #ifdef CONFIG_CPU_FEROCEON 75 #define CALGN(code...) code 76 #else 77 #define CALGN(code...) 78 #endif 79 80 /* 81 * Enable and disable interrupts 82 */ 83 #if __LINUX_ARM_ARCH__ >= 6 84 .macro disable_irq_notrace 85 cpsid i 86 .endm 87 88 .macro enable_irq_notrace 89 cpsie i 90 .endm 91 #else 92 .macro disable_irq_notrace 93 msr cpsr_c, #PSR_I_BIT | SVC_MODE 94 .endm 95 96 .macro enable_irq_notrace 97 msr cpsr_c, #SVC_MODE 98 .endm 99 #endif 100 101 .macro asm_trace_hardirqs_off 102 #if defined(CONFIG_TRACE_IRQFLAGS) 103 stmdb sp!, {r0-r3, ip, lr} 104 bl trace_hardirqs_off 105 ldmia sp!, {r0-r3, ip, lr} 106 #endif 107 .endm 108 109 .macro asm_trace_hardirqs_on_cond, cond 110 #if defined(CONFIG_TRACE_IRQFLAGS) 111 /* 112 * actually the registers should be pushed and pop'd conditionally, but 113 * after bl the flags are certainly clobbered 114 */ 115 stmdb sp!, {r0-r3, ip, lr} 116 bl\cond trace_hardirqs_on 117 ldmia sp!, {r0-r3, ip, lr} 118 #endif 119 .endm 120 121 .macro asm_trace_hardirqs_on 122 asm_trace_hardirqs_on_cond al 123 .endm 124 125 .macro disable_irq 126 disable_irq_notrace 127 asm_trace_hardirqs_off 128 .endm 129 130 .macro enable_irq 131 asm_trace_hardirqs_on 132 enable_irq_notrace 133 .endm 134 /* 135 * Save the current IRQ state and disable IRQs. Note that this macro 136 * assumes FIQs are enabled, and that the processor is in SVC mode. 137 */ 138 .macro save_and_disable_irqs, oldcpsr 139 mrs \oldcpsr, cpsr 140 disable_irq 141 .endm 142 143 .macro save_and_disable_irqs_notrace, oldcpsr 144 mrs \oldcpsr, cpsr 145 disable_irq_notrace 146 .endm 147 148 /* 149 * Restore interrupt state previously stored in a register. We don't 150 * guarantee that this will preserve the flags. 151 */ 152 .macro restore_irqs_notrace, oldcpsr 153 msr cpsr_c, \oldcpsr 154 .endm 155 156 .macro restore_irqs, oldcpsr 157 tst \oldcpsr, #PSR_I_BIT 158 asm_trace_hardirqs_on_cond eq 159 restore_irqs_notrace \oldcpsr 160 .endm 161 162 #define USER(x...) \ 163 9999: x; \ 164 .pushsection __ex_table,"a"; \ 165 .align 3; \ 166 .long 9999b,9001f; \ 167 .popsection 168 169 #ifdef CONFIG_SMP 170 #define ALT_SMP(instr...) \ 171 9998: instr 172 /* 173 * Note: if you get assembler errors from ALT_UP() when building with 174 * CONFIG_THUMB2_KERNEL, you almost certainly need to use 175 * ALT_SMP( W(instr) ... ) 176 */ 177 #define ALT_UP(instr...) \ 178 .pushsection ".alt.smp.init", "a" ;\ 179 .long 9998b ;\ 180 9997: instr ;\ 181 .if . - 9997b != 4 ;\ 182 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 183 .endif ;\ 184 .popsection 185 #define ALT_UP_B(label) \ 186 .equ up_b_offset, label - 9998b ;\ 187 .pushsection ".alt.smp.init", "a" ;\ 188 .long 9998b ;\ 189 W(b) . + up_b_offset ;\ 190 .popsection 191 #else 192 #define ALT_SMP(instr...) 193 #define ALT_UP(instr...) instr 194 #define ALT_UP_B(label) b label 195 #endif 196 197 /* 198 * Instruction barrier 199 */ 200 .macro instr_sync 201 #if __LINUX_ARM_ARCH__ >= 7 202 isb 203 #elif __LINUX_ARM_ARCH__ == 6 204 mcr p15, 0, r0, c7, c5, 4 205 #endif 206 .endm 207 208 /* 209 * SMP data memory barrier 210 */ 211 .macro smp_dmb mode 212 #ifdef CONFIG_SMP 213 #if __LINUX_ARM_ARCH__ >= 7 214 .ifeqs "\mode","arm" 215 ALT_SMP(dmb) 216 .else 217 ALT_SMP(W(dmb)) 218 .endif 219 #elif __LINUX_ARM_ARCH__ == 6 220 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 221 #else 222 #error Incompatible SMP platform 223 #endif 224 .ifeqs "\mode","arm" 225 ALT_UP(nop) 226 .else 227 ALT_UP(W(nop)) 228 .endif 229 #endif 230 .endm 231 232 #ifdef CONFIG_THUMB2_KERNEL 233 .macro setmode, mode, reg 234 mov \reg, #\mode 235 msr cpsr_c, \reg 236 .endm 237 #else 238 .macro setmode, mode, reg 239 msr cpsr_c, #\mode 240 .endm 241 #endif 242 243 /* 244 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is 245 * a scratch register for the macro to overwrite. 246 * 247 * This macro is intended for forcing the CPU into SVC mode at boot time. 248 * you cannot return to the original mode. 249 * 250 * Beware, it also clobers LR. 251 */ 252 .macro safe_svcmode_maskall reg:req 253 #if __LINUX_ARM_ARCH__ >= 6 254 mrs \reg , cpsr 255 mov lr , \reg 256 and lr , lr , #MODE_MASK 257 cmp lr , #HYP_MODE 258 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT 259 bic \reg , \reg , #MODE_MASK 260 orr \reg , \reg , #SVC_MODE 261 THUMB( orr \reg , \reg , #PSR_T_BIT ) 262 bne 1f 263 orr \reg, \reg, #PSR_A_BIT 264 adr lr, BSYM(2f) 265 msr spsr_cxsf, \reg 266 __MSR_ELR_HYP(14) 267 __ERET 268 1: msr cpsr_c, \reg 269 2: 270 #else 271 /* 272 * workaround for possibly broken pre-v6 hardware 273 * (akita, Sharp Zaurus C-1000, PXA270-based) 274 */ 275 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg 276 #endif 277 .endm 278 279 /* 280 * STRT/LDRT access macros with ARM and Thumb-2 variants 281 */ 282 #ifdef CONFIG_THUMB2_KERNEL 283 284 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() 285 9999: 286 .if \inc == 1 287 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] 288 .elseif \inc == 4 289 \instr\cond\()\t\().w \reg, [\ptr, #\off] 290 .else 291 .error "Unsupported inc macro argument" 292 .endif 293 294 .pushsection __ex_table,"a" 295 .align 3 296 .long 9999b, \abort 297 .popsection 298 .endm 299 300 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 301 @ explicit IT instruction needed because of the label 302 @ introduced by the USER macro 303 .ifnc \cond,al 304 .if \rept == 1 305 itt \cond 306 .elseif \rept == 2 307 ittt \cond 308 .else 309 .error "Unsupported rept macro argument" 310 .endif 311 .endif 312 313 @ Slightly optimised to avoid incrementing the pointer twice 314 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 315 .if \rept == 2 316 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort 317 .endif 318 319 add\cond \ptr, #\rept * \inc 320 .endm 321 322 #else /* !CONFIG_THUMB2_KERNEL */ 323 324 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER() 325 .rept \rept 326 9999: 327 .if \inc == 1 328 \instr\cond\()b\()\t \reg, [\ptr], #\inc 329 .elseif \inc == 4 330 \instr\cond\()\t \reg, [\ptr], #\inc 331 .else 332 .error "Unsupported inc macro argument" 333 .endif 334 335 .pushsection __ex_table,"a" 336 .align 3 337 .long 9999b, \abort 338 .popsection 339 .endr 340 .endm 341 342 #endif /* CONFIG_THUMB2_KERNEL */ 343 344 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 345 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort 346 .endm 347 348 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 349 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 350 .endm 351 352 /* Utility macro for declaring string literals */ 353 .macro string name:req, string 354 .type \name , #object 355 \name: 356 .asciz "\string" 357 .size \name , . - \name 358 .endm 359 360 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req 361 #ifndef CONFIG_CPU_USE_DOMAINS 362 adds \tmp, \addr, #\size - 1 363 sbcccs \tmp, \tmp, \limit 364 bcs \bad 365 #endif 366 .endm 367 368 #endif /* __ASM_ASSEMBLER_H__ */ 369