1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm/include/asm/assembler.h 4 * 5 * Copyright (C) 1996-2000 Russell King 6 * 7 * This file contains arm architecture specific defines 8 * for the different processors. 9 * 10 * Do not include any C declarations in this file - it is included by 11 * assembler source. 12 */ 13 #ifndef __ASM_ASSEMBLER_H__ 14 #define __ASM_ASSEMBLER_H__ 15 16 #ifndef __ASSEMBLY__ 17 #error "Only include this from assembly code" 18 #endif 19 20 #include <asm/ptrace.h> 21 #include <asm/opcodes-virt.h> 22 #include <asm/asm-offsets.h> 23 #include <asm/page.h> 24 #include <asm/thread_info.h> 25 #include <asm/uaccess-asm.h> 26 27 #define IOMEM(x) (x) 28 29 /* 30 * Endian independent macros for shifting bytes within registers. 31 */ 32 #ifndef __ARMEB__ 33 #define lspull lsr 34 #define lspush lsl 35 #define get_byte_0 lsl #0 36 #define get_byte_1 lsr #8 37 #define get_byte_2 lsr #16 38 #define get_byte_3 lsr #24 39 #define put_byte_0 lsl #0 40 #define put_byte_1 lsl #8 41 #define put_byte_2 lsl #16 42 #define put_byte_3 lsl #24 43 #else 44 #define lspull lsl 45 #define lspush lsr 46 #define get_byte_0 lsr #24 47 #define get_byte_1 lsr #16 48 #define get_byte_2 lsr #8 49 #define get_byte_3 lsl #0 50 #define put_byte_0 lsl #24 51 #define put_byte_1 lsl #16 52 #define put_byte_2 lsl #8 53 #define put_byte_3 lsl #0 54 #endif 55 56 /* Select code for any configuration running in BE8 mode */ 57 #ifdef CONFIG_CPU_ENDIAN_BE8 58 #define ARM_BE8(code...) code 59 #else 60 #define ARM_BE8(code...) 61 #endif 62 63 /* 64 * Data preload for architectures that support it 65 */ 66 #if __LINUX_ARM_ARCH__ >= 5 67 #define PLD(code...) code 68 #else 69 #define PLD(code...) 70 #endif 71 72 /* 73 * This can be used to enable code to cacheline align the destination 74 * pointer when bulk writing to memory. Experiments on StrongARM and 75 * XScale didn't show this a worthwhile thing to do when the cache is not 76 * set to write-allocate (this would need further testing on XScale when WA 77 * is used). 78 * 79 * On Feroceon there is much to gain however, regardless of cache mode. 80 */ 81 #ifdef CONFIG_CPU_FEROCEON 82 #define CALGN(code...) code 83 #else 84 #define CALGN(code...) 85 #endif 86 87 #define IMM12_MASK 0xfff 88 89 /* the frame pointer used for stack unwinding */ 90 ARM( fpreg .req r11 ) 91 THUMB( fpreg .req r7 ) 92 93 /* 94 * Enable and disable interrupts 95 */ 96 #if __LINUX_ARM_ARCH__ >= 6 97 .macro disable_irq_notrace 98 cpsid i 99 .endm 100 101 .macro enable_irq_notrace 102 cpsie i 103 .endm 104 #else 105 .macro disable_irq_notrace 106 msr cpsr_c, #PSR_I_BIT | SVC_MODE 107 .endm 108 109 .macro enable_irq_notrace 110 msr cpsr_c, #SVC_MODE 111 .endm 112 #endif 113 114 .macro asm_trace_hardirqs_off, save=1 115 #if defined(CONFIG_TRACE_IRQFLAGS) 116 .if \save 117 stmdb sp!, {r0-r3, ip, lr} 118 .endif 119 bl trace_hardirqs_off 120 .if \save 121 ldmia sp!, {r0-r3, ip, lr} 122 .endif 123 #endif 124 .endm 125 126 .macro asm_trace_hardirqs_on, cond=al, save=1 127 #if defined(CONFIG_TRACE_IRQFLAGS) 128 /* 129 * actually the registers should be pushed and pop'd conditionally, but 130 * after bl the flags are certainly clobbered 131 */ 132 .if \save 133 stmdb sp!, {r0-r3, ip, lr} 134 .endif 135 bl\cond trace_hardirqs_on 136 .if \save 137 ldmia sp!, {r0-r3, ip, lr} 138 .endif 139 #endif 140 .endm 141 142 .macro disable_irq, save=1 143 disable_irq_notrace 144 asm_trace_hardirqs_off \save 145 .endm 146 147 .macro enable_irq 148 asm_trace_hardirqs_on 149 enable_irq_notrace 150 .endm 151 /* 152 * Save the current IRQ state and disable IRQs. Note that this macro 153 * assumes FIQs are enabled, and that the processor is in SVC mode. 154 */ 155 .macro save_and_disable_irqs, oldcpsr 156 #ifdef CONFIG_CPU_V7M 157 mrs \oldcpsr, primask 158 #else 159 mrs \oldcpsr, cpsr 160 #endif 161 disable_irq 162 .endm 163 164 .macro save_and_disable_irqs_notrace, oldcpsr 165 #ifdef CONFIG_CPU_V7M 166 mrs \oldcpsr, primask 167 #else 168 mrs \oldcpsr, cpsr 169 #endif 170 disable_irq_notrace 171 .endm 172 173 /* 174 * Restore interrupt state previously stored in a register. We don't 175 * guarantee that this will preserve the flags. 176 */ 177 .macro restore_irqs_notrace, oldcpsr 178 #ifdef CONFIG_CPU_V7M 179 msr primask, \oldcpsr 180 #else 181 msr cpsr_c, \oldcpsr 182 #endif 183 .endm 184 185 .macro restore_irqs, oldcpsr 186 tst \oldcpsr, #PSR_I_BIT 187 asm_trace_hardirqs_on cond=eq 188 restore_irqs_notrace \oldcpsr 189 .endm 190 191 /* 192 * Assembly version of "adr rd, BSYM(sym)". This should only be used to 193 * reference local symbols in the same assembly file which are to be 194 * resolved by the assembler. Other usage is undefined. 195 */ 196 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 197 .macro badr\c, rd, sym 198 #ifdef CONFIG_THUMB2_KERNEL 199 adr\c \rd, \sym + 1 200 #else 201 adr\c \rd, \sym 202 #endif 203 .endm 204 .endr 205 206 .macro get_current, rd 207 #ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO 208 mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register 209 #else 210 get_thread_info \rd 211 ldr \rd, [\rd, #TI_TASK] 212 #endif 213 .endm 214 215 .macro set_current, rn 216 #ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO 217 mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register 218 #endif 219 .endm 220 221 .macro reload_current, t1:req, t2:req 222 #ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO 223 adr_l \t1, __entry_task @ get __entry_task base address 224 mrc p15, 0, \t2, c13, c0, 4 @ get per-CPU offset 225 ldr \t1, [\t1, \t2] @ load variable 226 mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO 227 #endif 228 .endm 229 230 /* 231 * Get current thread_info. 232 */ 233 .macro get_thread_info, rd 234 #ifdef CONFIG_THREAD_INFO_IN_TASK 235 /* thread_info is the first member of struct task_struct */ 236 get_current \rd 237 #else 238 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT ) 239 THUMB( mov \rd, sp ) 240 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT ) 241 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT 242 #endif 243 .endm 244 245 /* 246 * Increment/decrement the preempt count. 247 */ 248 #ifdef CONFIG_PREEMPT_COUNT 249 .macro inc_preempt_count, ti, tmp 250 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count 251 add \tmp, \tmp, #1 @ increment it 252 str \tmp, [\ti, #TI_PREEMPT] 253 .endm 254 255 .macro dec_preempt_count, ti, tmp 256 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count 257 sub \tmp, \tmp, #1 @ decrement it 258 str \tmp, [\ti, #TI_PREEMPT] 259 .endm 260 261 .macro dec_preempt_count_ti, ti, tmp 262 get_thread_info \ti 263 dec_preempt_count \ti, \tmp 264 .endm 265 #else 266 .macro inc_preempt_count, ti, tmp 267 .endm 268 269 .macro dec_preempt_count, ti, tmp 270 .endm 271 272 .macro dec_preempt_count_ti, ti, tmp 273 .endm 274 #endif 275 276 #define USERL(l, x...) \ 277 9999: x; \ 278 .pushsection __ex_table,"a"; \ 279 .align 3; \ 280 .long 9999b,l; \ 281 .popsection 282 283 #define USER(x...) USERL(9001f, x) 284 285 #ifdef CONFIG_SMP 286 #define ALT_SMP(instr...) \ 287 9998: instr 288 /* 289 * Note: if you get assembler errors from ALT_UP() when building with 290 * CONFIG_THUMB2_KERNEL, you almost certainly need to use 291 * ALT_SMP( W(instr) ... ) 292 */ 293 #define ALT_UP(instr...) \ 294 .pushsection ".alt.smp.init", "a" ;\ 295 .long 9998b - . ;\ 296 9997: instr ;\ 297 .if . - 9997b == 2 ;\ 298 nop ;\ 299 .endif ;\ 300 .if . - 9997b != 4 ;\ 301 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 302 .endif ;\ 303 .popsection 304 #define ALT_UP_B(label) \ 305 .pushsection ".alt.smp.init", "a" ;\ 306 .long 9998b - . ;\ 307 W(b) . + (label - 9998b) ;\ 308 .popsection 309 #else 310 #define ALT_SMP(instr...) 311 #define ALT_UP(instr...) instr 312 #define ALT_UP_B(label) b label 313 #endif 314 315 /* 316 * Instruction barrier 317 */ 318 .macro instr_sync 319 #if __LINUX_ARM_ARCH__ >= 7 320 isb 321 #elif __LINUX_ARM_ARCH__ == 6 322 mcr p15, 0, r0, c7, c5, 4 323 #endif 324 .endm 325 326 /* 327 * SMP data memory barrier 328 */ 329 .macro smp_dmb mode 330 #ifdef CONFIG_SMP 331 #if __LINUX_ARM_ARCH__ >= 7 332 .ifeqs "\mode","arm" 333 ALT_SMP(dmb ish) 334 .else 335 ALT_SMP(W(dmb) ish) 336 .endif 337 #elif __LINUX_ARM_ARCH__ == 6 338 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 339 #else 340 #error Incompatible SMP platform 341 #endif 342 .ifeqs "\mode","arm" 343 ALT_UP(nop) 344 .else 345 ALT_UP(W(nop)) 346 .endif 347 #endif 348 .endm 349 350 #if defined(CONFIG_CPU_V7M) 351 /* 352 * setmode is used to assert to be in svc mode during boot. For v7-M 353 * this is done in __v7m_setup, so setmode can be empty here. 354 */ 355 .macro setmode, mode, reg 356 .endm 357 #elif defined(CONFIG_THUMB2_KERNEL) 358 .macro setmode, mode, reg 359 mov \reg, #\mode 360 msr cpsr_c, \reg 361 .endm 362 #else 363 .macro setmode, mode, reg 364 msr cpsr_c, #\mode 365 .endm 366 #endif 367 368 /* 369 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is 370 * a scratch register for the macro to overwrite. 371 * 372 * This macro is intended for forcing the CPU into SVC mode at boot time. 373 * you cannot return to the original mode. 374 */ 375 .macro safe_svcmode_maskall reg:req 376 #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M) 377 mrs \reg , cpsr 378 eor \reg, \reg, #HYP_MODE 379 tst \reg, #MODE_MASK 380 bic \reg , \reg , #MODE_MASK 381 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE 382 THUMB( orr \reg , \reg , #PSR_T_BIT ) 383 bne 1f 384 orr \reg, \reg, #PSR_A_BIT 385 badr lr, 2f 386 msr spsr_cxsf, \reg 387 __MSR_ELR_HYP(14) 388 __ERET 389 1: msr cpsr_c, \reg 390 2: 391 #else 392 /* 393 * workaround for possibly broken pre-v6 hardware 394 * (akita, Sharp Zaurus C-1000, PXA270-based) 395 */ 396 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg 397 #endif 398 .endm 399 400 /* 401 * STRT/LDRT access macros with ARM and Thumb-2 variants 402 */ 403 #ifdef CONFIG_THUMB2_KERNEL 404 405 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() 406 9999: 407 .if \inc == 1 408 \instr\()b\t\cond\().w \reg, [\ptr, #\off] 409 .elseif \inc == 4 410 \instr\t\cond\().w \reg, [\ptr, #\off] 411 .else 412 .error "Unsupported inc macro argument" 413 .endif 414 415 .pushsection __ex_table,"a" 416 .align 3 417 .long 9999b, \abort 418 .popsection 419 .endm 420 421 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 422 @ explicit IT instruction needed because of the label 423 @ introduced by the USER macro 424 .ifnc \cond,al 425 .if \rept == 1 426 itt \cond 427 .elseif \rept == 2 428 ittt \cond 429 .else 430 .error "Unsupported rept macro argument" 431 .endif 432 .endif 433 434 @ Slightly optimised to avoid incrementing the pointer twice 435 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 436 .if \rept == 2 437 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort 438 .endif 439 440 add\cond \ptr, #\rept * \inc 441 .endm 442 443 #else /* !CONFIG_THUMB2_KERNEL */ 444 445 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER() 446 .rept \rept 447 9999: 448 .if \inc == 1 449 \instr\()b\t\cond \reg, [\ptr], #\inc 450 .elseif \inc == 4 451 \instr\t\cond \reg, [\ptr], #\inc 452 .else 453 .error "Unsupported inc macro argument" 454 .endif 455 456 .pushsection __ex_table,"a" 457 .align 3 458 .long 9999b, \abort 459 .popsection 460 .endr 461 .endm 462 463 #endif /* CONFIG_THUMB2_KERNEL */ 464 465 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 466 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort 467 .endm 468 469 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 470 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 471 .endm 472 473 /* Utility macro for declaring string literals */ 474 .macro string name:req, string 475 .type \name , #object 476 \name: 477 .asciz "\string" 478 .size \name , . - \name 479 .endm 480 481 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 482 .macro ret\c, reg 483 #if __LINUX_ARM_ARCH__ < 6 484 mov\c pc, \reg 485 #else 486 .ifeqs "\reg", "lr" 487 bx\c \reg 488 .else 489 mov\c pc, \reg 490 .endif 491 #endif 492 .endm 493 .endr 494 495 .macro ret.w, reg 496 ret \reg 497 #ifdef CONFIG_THUMB2_KERNEL 498 nop 499 #endif 500 .endm 501 502 .macro bug, msg, line 503 #ifdef CONFIG_THUMB2_KERNEL 504 1: .inst 0xde02 505 #else 506 1: .inst 0xe7f001f2 507 #endif 508 #ifdef CONFIG_DEBUG_BUGVERBOSE 509 .pushsection .rodata.str, "aMS", %progbits, 1 510 2: .asciz "\msg" 511 .popsection 512 .pushsection __bug_table, "aw" 513 .align 2 514 .word 1b, 2b 515 .hword \line 516 .popsection 517 #endif 518 .endm 519 520 #ifdef CONFIG_KPROBES 521 #define _ASM_NOKPROBE(entry) \ 522 .pushsection "_kprobe_blacklist", "aw" ; \ 523 .balign 4 ; \ 524 .long entry; \ 525 .popsection 526 #else 527 #define _ASM_NOKPROBE(entry) 528 #endif 529 530 .macro __adldst_l, op, reg, sym, tmp, c 531 .if __LINUX_ARM_ARCH__ < 7 532 ldr\c \tmp, .La\@ 533 .subsection 1 534 .align 2 535 .La\@: .long \sym - .Lpc\@ 536 .previous 537 .else 538 .ifnb \c 539 THUMB( ittt \c ) 540 .endif 541 movw\c \tmp, #:lower16:\sym - .Lpc\@ 542 movt\c \tmp, #:upper16:\sym - .Lpc\@ 543 .endif 544 545 #ifndef CONFIG_THUMB2_KERNEL 546 .set .Lpc\@, . + 8 // PC bias 547 .ifc \op, add 548 add\c \reg, \tmp, pc 549 .else 550 \op\c \reg, [pc, \tmp] 551 .endif 552 #else 553 .Lb\@: add\c \tmp, \tmp, pc 554 /* 555 * In Thumb-2 builds, the PC bias depends on whether we are currently 556 * emitting into a .arm or a .thumb section. The size of the add opcode 557 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when 558 * emitting in ARM mode, so let's use this to account for the bias. 559 */ 560 .set .Lpc\@, . + (. - .Lb\@) 561 562 .ifnc \op, add 563 \op\c \reg, [\tmp] 564 .endif 565 #endif 566 .endm 567 568 /* 569 * mov_l - move a constant value or [relocated] address into a register 570 */ 571 .macro mov_l, dst:req, imm:req, cond 572 .if __LINUX_ARM_ARCH__ < 7 573 ldr\cond \dst, =\imm 574 .else 575 movw\cond \dst, #:lower16:\imm 576 movt\cond \dst, #:upper16:\imm 577 .endif 578 .endm 579 580 /* 581 * adr_l - adr pseudo-op with unlimited range 582 * 583 * @dst: destination register 584 * @sym: name of the symbol 585 * @cond: conditional opcode suffix 586 */ 587 .macro adr_l, dst:req, sym:req, cond 588 __adldst_l add, \dst, \sym, \dst, \cond 589 .endm 590 591 /* 592 * ldr_l - ldr <literal> pseudo-op with unlimited range 593 * 594 * @dst: destination register 595 * @sym: name of the symbol 596 * @cond: conditional opcode suffix 597 */ 598 .macro ldr_l, dst:req, sym:req, cond 599 __adldst_l ldr, \dst, \sym, \dst, \cond 600 .endm 601 602 /* 603 * str_l - str <literal> pseudo-op with unlimited range 604 * 605 * @src: source register 606 * @sym: name of the symbol 607 * @tmp: mandatory scratch register 608 * @cond: conditional opcode suffix 609 */ 610 .macro str_l, src:req, sym:req, tmp:req, cond 611 __adldst_l str, \src, \sym, \tmp, \cond 612 .endm 613 614 .macro __ldst_va, op, reg, tmp, sym, cond 615 #if __LINUX_ARM_ARCH__ >= 7 || \ 616 (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) || \ 617 (defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000) 618 mov_l \tmp, \sym, \cond 619 \op\cond \reg, [\tmp] 620 #else 621 /* 622 * Avoid a literal load, by emitting a sequence of ADD/LDR instructions 623 * with the appropriate relocations. The combined sequence has a range 624 * of -/+ 256 MiB, which should be sufficient for the core kernel and 625 * for modules loaded into the module region. 626 */ 627 .globl \sym 628 .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym 629 .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym 630 .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym 631 .L0_\@: sub\cond \tmp, pc, #8 632 .L1_\@: sub\cond \tmp, \tmp, #4 633 .L2_\@: \op\cond \reg, [\tmp, #0] 634 #endif 635 .endm 636 637 /* 638 * ldr_va - load a 32-bit word from the virtual address of \sym 639 */ 640 .macro ldr_va, rd:req, sym:req, cond 641 __ldst_va ldr, \rd, \rd, \sym, \cond 642 .endm 643 644 /* 645 * str_va - store a 32-bit word to the virtual address of \sym 646 */ 647 .macro str_va, rn:req, sym:req, tmp:req, cond 648 __ldst_va str, \rn, \tmp, \sym, \cond 649 .endm 650 651 /* 652 * rev_l - byte-swap a 32-bit value 653 * 654 * @val: source/destination register 655 * @tmp: scratch register 656 */ 657 .macro rev_l, val:req, tmp:req 658 .if __LINUX_ARM_ARCH__ < 6 659 eor \tmp, \val, \val, ror #16 660 bic \tmp, \tmp, #0x00ff0000 661 mov \val, \val, ror #8 662 eor \val, \val, \tmp, lsr #8 663 .else 664 rev \val, \val 665 .endif 666 .endm 667 668 /* 669 * bl_r - branch and link to register 670 * 671 * @dst: target to branch to 672 * @c: conditional opcode suffix 673 */ 674 .macro bl_r, dst:req, c 675 .if __LINUX_ARM_ARCH__ < 6 676 mov\c lr, pc 677 mov\c pc, \dst 678 .else 679 blx\c \dst 680 .endif 681 .endm 682 683 #endif /* __ASM_ASSEMBLER_H__ */ 684