1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm/include/asm/assembler.h 4 * 5 * Copyright (C) 1996-2000 Russell King 6 * 7 * This file contains arm architecture specific defines 8 * for the different processors. 9 * 10 * Do not include any C declarations in this file - it is included by 11 * assembler source. 12 */ 13 #ifndef __ASM_ASSEMBLER_H__ 14 #define __ASM_ASSEMBLER_H__ 15 16 #ifndef __ASSEMBLY__ 17 #error "Only include this from assembly code" 18 #endif 19 20 #include <asm/ptrace.h> 21 #include <asm/opcodes-virt.h> 22 #include <asm/asm-offsets.h> 23 #include <asm/page.h> 24 #include <asm/thread_info.h> 25 #include <asm/uaccess-asm.h> 26 27 #define IOMEM(x) (x) 28 29 /* 30 * Endian independent macros for shifting bytes within registers. 31 */ 32 #ifndef __ARMEB__ 33 #define lspull lsr 34 #define lspush lsl 35 #define get_byte_0 lsl #0 36 #define get_byte_1 lsr #8 37 #define get_byte_2 lsr #16 38 #define get_byte_3 lsr #24 39 #define put_byte_0 lsl #0 40 #define put_byte_1 lsl #8 41 #define put_byte_2 lsl #16 42 #define put_byte_3 lsl #24 43 #else 44 #define lspull lsl 45 #define lspush lsr 46 #define get_byte_0 lsr #24 47 #define get_byte_1 lsr #16 48 #define get_byte_2 lsr #8 49 #define get_byte_3 lsl #0 50 #define put_byte_0 lsl #24 51 #define put_byte_1 lsl #16 52 #define put_byte_2 lsl #8 53 #define put_byte_3 lsl #0 54 #endif 55 56 /* Select code for any configuration running in BE8 mode */ 57 #ifdef CONFIG_CPU_ENDIAN_BE8 58 #define ARM_BE8(code...) code 59 #else 60 #define ARM_BE8(code...) 61 #endif 62 63 /* 64 * Data preload for architectures that support it 65 */ 66 #if __LINUX_ARM_ARCH__ >= 5 67 #define PLD(code...) code 68 #else 69 #define PLD(code...) 70 #endif 71 72 /* 73 * This can be used to enable code to cacheline align the destination 74 * pointer when bulk writing to memory. Experiments on StrongARM and 75 * XScale didn't show this a worthwhile thing to do when the cache is not 76 * set to write-allocate (this would need further testing on XScale when WA 77 * is used). 78 * 79 * On Feroceon there is much to gain however, regardless of cache mode. 80 */ 81 #ifdef CONFIG_CPU_FEROCEON 82 #define CALGN(code...) code 83 #else 84 #define CALGN(code...) 85 #endif 86 87 #define IMM12_MASK 0xfff 88 89 /* 90 * Enable and disable interrupts 91 */ 92 #if __LINUX_ARM_ARCH__ >= 6 93 .macro disable_irq_notrace 94 cpsid i 95 .endm 96 97 .macro enable_irq_notrace 98 cpsie i 99 .endm 100 #else 101 .macro disable_irq_notrace 102 msr cpsr_c, #PSR_I_BIT | SVC_MODE 103 .endm 104 105 .macro enable_irq_notrace 106 msr cpsr_c, #SVC_MODE 107 .endm 108 #endif 109 110 #if __LINUX_ARM_ARCH__ < 7 111 .macro dsb, args 112 mcr p15, 0, r0, c7, c10, 4 113 .endm 114 115 .macro isb, args 116 mcr p15, 0, r0, c7, r5, 4 117 .endm 118 #endif 119 120 .macro asm_trace_hardirqs_off, save=1 121 #if defined(CONFIG_TRACE_IRQFLAGS) 122 .if \save 123 stmdb sp!, {r0-r3, ip, lr} 124 .endif 125 bl trace_hardirqs_off 126 .if \save 127 ldmia sp!, {r0-r3, ip, lr} 128 .endif 129 #endif 130 .endm 131 132 .macro asm_trace_hardirqs_on, cond=al, save=1 133 #if defined(CONFIG_TRACE_IRQFLAGS) 134 /* 135 * actually the registers should be pushed and pop'd conditionally, but 136 * after bl the flags are certainly clobbered 137 */ 138 .if \save 139 stmdb sp!, {r0-r3, ip, lr} 140 .endif 141 bl\cond trace_hardirqs_on 142 .if \save 143 ldmia sp!, {r0-r3, ip, lr} 144 .endif 145 #endif 146 .endm 147 148 .macro disable_irq, save=1 149 disable_irq_notrace 150 asm_trace_hardirqs_off \save 151 .endm 152 153 .macro enable_irq 154 asm_trace_hardirqs_on 155 enable_irq_notrace 156 .endm 157 /* 158 * Save the current IRQ state and disable IRQs. Note that this macro 159 * assumes FIQs are enabled, and that the processor is in SVC mode. 160 */ 161 .macro save_and_disable_irqs, oldcpsr 162 #ifdef CONFIG_CPU_V7M 163 mrs \oldcpsr, primask 164 #else 165 mrs \oldcpsr, cpsr 166 #endif 167 disable_irq 168 .endm 169 170 .macro save_and_disable_irqs_notrace, oldcpsr 171 #ifdef CONFIG_CPU_V7M 172 mrs \oldcpsr, primask 173 #else 174 mrs \oldcpsr, cpsr 175 #endif 176 disable_irq_notrace 177 .endm 178 179 /* 180 * Restore interrupt state previously stored in a register. We don't 181 * guarantee that this will preserve the flags. 182 */ 183 .macro restore_irqs_notrace, oldcpsr 184 #ifdef CONFIG_CPU_V7M 185 msr primask, \oldcpsr 186 #else 187 msr cpsr_c, \oldcpsr 188 #endif 189 .endm 190 191 .macro restore_irqs, oldcpsr 192 tst \oldcpsr, #PSR_I_BIT 193 asm_trace_hardirqs_on cond=eq 194 restore_irqs_notrace \oldcpsr 195 .endm 196 197 /* 198 * Assembly version of "adr rd, BSYM(sym)". This should only be used to 199 * reference local symbols in the same assembly file which are to be 200 * resolved by the assembler. Other usage is undefined. 201 */ 202 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 203 .macro badr\c, rd, sym 204 #ifdef CONFIG_THUMB2_KERNEL 205 adr\c \rd, \sym + 1 206 #else 207 adr\c \rd, \sym 208 #endif 209 .endm 210 .endr 211 212 .macro get_current, rd 213 #ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO 214 mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register 215 #else 216 get_thread_info \rd 217 ldr \rd, [\rd, #TI_TASK] 218 #endif 219 .endm 220 221 .macro set_current, rn 222 #ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO 223 mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register 224 #endif 225 .endm 226 227 .macro reload_current, t1:req, t2:req 228 #ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO 229 adr_l \t1, __entry_task @ get __entry_task base address 230 mrc p15, 0, \t2, c13, c0, 4 @ get per-CPU offset 231 ldr \t1, [\t1, \t2] @ load variable 232 mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO 233 #endif 234 .endm 235 236 /* 237 * Get current thread_info. 238 */ 239 .macro get_thread_info, rd 240 #ifdef CONFIG_THREAD_INFO_IN_TASK 241 /* thread_info is the first member of struct task_struct */ 242 get_current \rd 243 #else 244 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT ) 245 THUMB( mov \rd, sp ) 246 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT ) 247 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT 248 #endif 249 .endm 250 251 /* 252 * Increment/decrement the preempt count. 253 */ 254 #ifdef CONFIG_PREEMPT_COUNT 255 .macro inc_preempt_count, ti, tmp 256 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count 257 add \tmp, \tmp, #1 @ increment it 258 str \tmp, [\ti, #TI_PREEMPT] 259 .endm 260 261 .macro dec_preempt_count, ti, tmp 262 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count 263 sub \tmp, \tmp, #1 @ decrement it 264 str \tmp, [\ti, #TI_PREEMPT] 265 .endm 266 267 .macro dec_preempt_count_ti, ti, tmp 268 get_thread_info \ti 269 dec_preempt_count \ti, \tmp 270 .endm 271 #else 272 .macro inc_preempt_count, ti, tmp 273 .endm 274 275 .macro dec_preempt_count, ti, tmp 276 .endm 277 278 .macro dec_preempt_count_ti, ti, tmp 279 .endm 280 #endif 281 282 #define USERL(l, x...) \ 283 9999: x; \ 284 .pushsection __ex_table,"a"; \ 285 .align 3; \ 286 .long 9999b,l; \ 287 .popsection 288 289 #define USER(x...) USERL(9001f, x) 290 291 #ifdef CONFIG_SMP 292 #define ALT_SMP(instr...) \ 293 9998: instr 294 /* 295 * Note: if you get assembler errors from ALT_UP() when building with 296 * CONFIG_THUMB2_KERNEL, you almost certainly need to use 297 * ALT_SMP( W(instr) ... ) 298 */ 299 #define ALT_UP(instr...) \ 300 .pushsection ".alt.smp.init", "a" ;\ 301 .long 9998b - . ;\ 302 9997: instr ;\ 303 .if . - 9997b == 2 ;\ 304 nop ;\ 305 .endif ;\ 306 .if . - 9997b != 4 ;\ 307 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 308 .endif ;\ 309 .popsection 310 #define ALT_UP_B(label) \ 311 .pushsection ".alt.smp.init", "a" ;\ 312 .long 9998b - . ;\ 313 W(b) . + (label - 9998b) ;\ 314 .popsection 315 #else 316 #define ALT_SMP(instr...) 317 #define ALT_UP(instr...) instr 318 #define ALT_UP_B(label) b label 319 #endif 320 321 /* 322 * Instruction barrier 323 */ 324 .macro instr_sync 325 #if __LINUX_ARM_ARCH__ >= 7 326 isb 327 #elif __LINUX_ARM_ARCH__ == 6 328 mcr p15, 0, r0, c7, c5, 4 329 #endif 330 .endm 331 332 /* 333 * SMP data memory barrier 334 */ 335 .macro smp_dmb mode 336 #ifdef CONFIG_SMP 337 #if __LINUX_ARM_ARCH__ >= 7 338 .ifeqs "\mode","arm" 339 ALT_SMP(dmb ish) 340 .else 341 ALT_SMP(W(dmb) ish) 342 .endif 343 #elif __LINUX_ARM_ARCH__ == 6 344 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 345 #else 346 #error Incompatible SMP platform 347 #endif 348 .ifeqs "\mode","arm" 349 ALT_UP(nop) 350 .else 351 ALT_UP(W(nop)) 352 .endif 353 #endif 354 .endm 355 356 #if defined(CONFIG_CPU_V7M) 357 /* 358 * setmode is used to assert to be in svc mode during boot. For v7-M 359 * this is done in __v7m_setup, so setmode can be empty here. 360 */ 361 .macro setmode, mode, reg 362 .endm 363 #elif defined(CONFIG_THUMB2_KERNEL) 364 .macro setmode, mode, reg 365 mov \reg, #\mode 366 msr cpsr_c, \reg 367 .endm 368 #else 369 .macro setmode, mode, reg 370 msr cpsr_c, #\mode 371 .endm 372 #endif 373 374 /* 375 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is 376 * a scratch register for the macro to overwrite. 377 * 378 * This macro is intended for forcing the CPU into SVC mode at boot time. 379 * you cannot return to the original mode. 380 */ 381 .macro safe_svcmode_maskall reg:req 382 #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M) 383 mrs \reg , cpsr 384 eor \reg, \reg, #HYP_MODE 385 tst \reg, #MODE_MASK 386 bic \reg , \reg , #MODE_MASK 387 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE 388 THUMB( orr \reg , \reg , #PSR_T_BIT ) 389 bne 1f 390 orr \reg, \reg, #PSR_A_BIT 391 badr lr, 2f 392 msr spsr_cxsf, \reg 393 __MSR_ELR_HYP(14) 394 __ERET 395 1: msr cpsr_c, \reg 396 2: 397 #else 398 /* 399 * workaround for possibly broken pre-v6 hardware 400 * (akita, Sharp Zaurus C-1000, PXA270-based) 401 */ 402 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg 403 #endif 404 .endm 405 406 /* 407 * STRT/LDRT access macros with ARM and Thumb-2 variants 408 */ 409 #ifdef CONFIG_THUMB2_KERNEL 410 411 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() 412 9999: 413 .if \inc == 1 414 \instr\()b\t\cond\().w \reg, [\ptr, #\off] 415 .elseif \inc == 4 416 \instr\t\cond\().w \reg, [\ptr, #\off] 417 .else 418 .error "Unsupported inc macro argument" 419 .endif 420 421 .pushsection __ex_table,"a" 422 .align 3 423 .long 9999b, \abort 424 .popsection 425 .endm 426 427 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 428 @ explicit IT instruction needed because of the label 429 @ introduced by the USER macro 430 .ifnc \cond,al 431 .if \rept == 1 432 itt \cond 433 .elseif \rept == 2 434 ittt \cond 435 .else 436 .error "Unsupported rept macro argument" 437 .endif 438 .endif 439 440 @ Slightly optimised to avoid incrementing the pointer twice 441 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 442 .if \rept == 2 443 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort 444 .endif 445 446 add\cond \ptr, #\rept * \inc 447 .endm 448 449 #else /* !CONFIG_THUMB2_KERNEL */ 450 451 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER() 452 .rept \rept 453 9999: 454 .if \inc == 1 455 \instr\()b\t\cond \reg, [\ptr], #\inc 456 .elseif \inc == 4 457 \instr\t\cond \reg, [\ptr], #\inc 458 .else 459 .error "Unsupported inc macro argument" 460 .endif 461 462 .pushsection __ex_table,"a" 463 .align 3 464 .long 9999b, \abort 465 .popsection 466 .endr 467 .endm 468 469 #endif /* CONFIG_THUMB2_KERNEL */ 470 471 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 472 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort 473 .endm 474 475 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 476 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 477 .endm 478 479 /* Utility macro for declaring string literals */ 480 .macro string name:req, string 481 .type \name , #object 482 \name: 483 .asciz "\string" 484 .size \name , . - \name 485 .endm 486 487 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 488 .macro ret\c, reg 489 #if __LINUX_ARM_ARCH__ < 6 490 mov\c pc, \reg 491 #else 492 .ifeqs "\reg", "lr" 493 bx\c \reg 494 .else 495 mov\c pc, \reg 496 .endif 497 #endif 498 .endm 499 .endr 500 501 .macro ret.w, reg 502 ret \reg 503 #ifdef CONFIG_THUMB2_KERNEL 504 nop 505 #endif 506 .endm 507 508 .macro bug, msg, line 509 #ifdef CONFIG_THUMB2_KERNEL 510 1: .inst 0xde02 511 #else 512 1: .inst 0xe7f001f2 513 #endif 514 #ifdef CONFIG_DEBUG_BUGVERBOSE 515 .pushsection .rodata.str, "aMS", %progbits, 1 516 2: .asciz "\msg" 517 .popsection 518 .pushsection __bug_table, "aw" 519 .align 2 520 .word 1b, 2b 521 .hword \line 522 .popsection 523 #endif 524 .endm 525 526 #ifdef CONFIG_KPROBES 527 #define _ASM_NOKPROBE(entry) \ 528 .pushsection "_kprobe_blacklist", "aw" ; \ 529 .balign 4 ; \ 530 .long entry; \ 531 .popsection 532 #else 533 #define _ASM_NOKPROBE(entry) 534 #endif 535 536 .macro __adldst_l, op, reg, sym, tmp, c 537 .if __LINUX_ARM_ARCH__ < 7 538 ldr\c \tmp, .La\@ 539 .subsection 1 540 .align 2 541 .La\@: .long \sym - .Lpc\@ 542 .previous 543 .else 544 .ifnb \c 545 THUMB( ittt \c ) 546 .endif 547 movw\c \tmp, #:lower16:\sym - .Lpc\@ 548 movt\c \tmp, #:upper16:\sym - .Lpc\@ 549 .endif 550 551 #ifndef CONFIG_THUMB2_KERNEL 552 .set .Lpc\@, . + 8 // PC bias 553 .ifc \op, add 554 add\c \reg, \tmp, pc 555 .else 556 \op\c \reg, [pc, \tmp] 557 .endif 558 #else 559 .Lb\@: add\c \tmp, \tmp, pc 560 /* 561 * In Thumb-2 builds, the PC bias depends on whether we are currently 562 * emitting into a .arm or a .thumb section. The size of the add opcode 563 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when 564 * emitting in ARM mode, so let's use this to account for the bias. 565 */ 566 .set .Lpc\@, . + (. - .Lb\@) 567 568 .ifnc \op, add 569 \op\c \reg, [\tmp] 570 .endif 571 #endif 572 .endm 573 574 /* 575 * mov_l - move a constant value or [relocated] address into a register 576 */ 577 .macro mov_l, dst:req, imm:req 578 .if __LINUX_ARM_ARCH__ < 7 579 ldr \dst, =\imm 580 .else 581 movw \dst, #:lower16:\imm 582 movt \dst, #:upper16:\imm 583 .endif 584 .endm 585 586 /* 587 * adr_l - adr pseudo-op with unlimited range 588 * 589 * @dst: destination register 590 * @sym: name of the symbol 591 * @cond: conditional opcode suffix 592 */ 593 .macro adr_l, dst:req, sym:req, cond 594 __adldst_l add, \dst, \sym, \dst, \cond 595 .endm 596 597 /* 598 * ldr_l - ldr <literal> pseudo-op with unlimited range 599 * 600 * @dst: destination register 601 * @sym: name of the symbol 602 * @cond: conditional opcode suffix 603 */ 604 .macro ldr_l, dst:req, sym:req, cond 605 __adldst_l ldr, \dst, \sym, \dst, \cond 606 .endm 607 608 /* 609 * str_l - str <literal> pseudo-op with unlimited range 610 * 611 * @src: source register 612 * @sym: name of the symbol 613 * @tmp: mandatory scratch register 614 * @cond: conditional opcode suffix 615 */ 616 .macro str_l, src:req, sym:req, tmp:req, cond 617 __adldst_l str, \src, \sym, \tmp, \cond 618 .endm 619 620 /* 621 * rev_l - byte-swap a 32-bit value 622 * 623 * @val: source/destination register 624 * @tmp: scratch register 625 */ 626 .macro rev_l, val:req, tmp:req 627 .if __LINUX_ARM_ARCH__ < 6 628 eor \tmp, \val, \val, ror #16 629 bic \tmp, \tmp, #0x00ff0000 630 mov \val, \val, ror #8 631 eor \val, \val, \tmp, lsr #8 632 .else 633 rev \val, \val 634 .endif 635 .endm 636 637 #endif /* __ASM_ASSEMBLER_H__ */ 638