1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm/include/asm/assembler.h 4 * 5 * Copyright (C) 1996-2000 Russell King 6 * 7 * This file contains arm architecture specific defines 8 * for the different processors. 9 * 10 * Do not include any C declarations in this file - it is included by 11 * assembler source. 12 */ 13 #ifndef __ASM_ASSEMBLER_H__ 14 #define __ASM_ASSEMBLER_H__ 15 16 #ifndef __ASSEMBLY__ 17 #error "Only include this from assembly code" 18 #endif 19 20 #include <asm/ptrace.h> 21 #include <asm/opcodes-virt.h> 22 #include <asm/asm-offsets.h> 23 #include <asm/page.h> 24 #include <asm/thread_info.h> 25 #include <asm/uaccess-asm.h> 26 27 #define IOMEM(x) (x) 28 29 /* 30 * Endian independent macros for shifting bytes within registers. 31 */ 32 #ifndef __ARMEB__ 33 #define lspull lsr 34 #define lspush lsl 35 #define get_byte_0 lsl #0 36 #define get_byte_1 lsr #8 37 #define get_byte_2 lsr #16 38 #define get_byte_3 lsr #24 39 #define put_byte_0 lsl #0 40 #define put_byte_1 lsl #8 41 #define put_byte_2 lsl #16 42 #define put_byte_3 lsl #24 43 #else 44 #define lspull lsl 45 #define lspush lsr 46 #define get_byte_0 lsr #24 47 #define get_byte_1 lsr #16 48 #define get_byte_2 lsr #8 49 #define get_byte_3 lsl #0 50 #define put_byte_0 lsl #24 51 #define put_byte_1 lsl #16 52 #define put_byte_2 lsl #8 53 #define put_byte_3 lsl #0 54 #endif 55 56 /* Select code for any configuration running in BE8 mode */ 57 #ifdef CONFIG_CPU_ENDIAN_BE8 58 #define ARM_BE8(code...) code 59 #else 60 #define ARM_BE8(code...) 61 #endif 62 63 /* 64 * Data preload for architectures that support it 65 */ 66 #if __LINUX_ARM_ARCH__ >= 5 67 #define PLD(code...) code 68 #else 69 #define PLD(code...) 70 #endif 71 72 /* 73 * This can be used to enable code to cacheline align the destination 74 * pointer when bulk writing to memory. Experiments on StrongARM and 75 * XScale didn't show this a worthwhile thing to do when the cache is not 76 * set to write-allocate (this would need further testing on XScale when WA 77 * is used). 78 * 79 * On Feroceon there is much to gain however, regardless of cache mode. 80 */ 81 #ifdef CONFIG_CPU_FEROCEON 82 #define CALGN(code...) code 83 #else 84 #define CALGN(code...) 85 #endif 86 87 #define IMM12_MASK 0xfff 88 89 /* the frame pointer used for stack unwinding */ 90 ARM( fpreg .req r11 ) 91 THUMB( fpreg .req r7 ) 92 93 /* 94 * Enable and disable interrupts 95 */ 96 #if __LINUX_ARM_ARCH__ >= 6 97 .macro disable_irq_notrace 98 cpsid i 99 .endm 100 101 .macro enable_irq_notrace 102 cpsie i 103 .endm 104 #else 105 .macro disable_irq_notrace 106 msr cpsr_c, #PSR_I_BIT | SVC_MODE 107 .endm 108 109 .macro enable_irq_notrace 110 msr cpsr_c, #SVC_MODE 111 .endm 112 #endif 113 114 .macro asm_trace_hardirqs_off, save=1 115 #if defined(CONFIG_TRACE_IRQFLAGS) 116 .if \save 117 stmdb sp!, {r0-r3, ip, lr} 118 .endif 119 bl trace_hardirqs_off 120 .if \save 121 ldmia sp!, {r0-r3, ip, lr} 122 .endif 123 #endif 124 .endm 125 126 .macro asm_trace_hardirqs_on, cond=al, save=1 127 #if defined(CONFIG_TRACE_IRQFLAGS) 128 /* 129 * actually the registers should be pushed and pop'd conditionally, but 130 * after bl the flags are certainly clobbered 131 */ 132 .if \save 133 stmdb sp!, {r0-r3, ip, lr} 134 .endif 135 bl\cond trace_hardirqs_on 136 .if \save 137 ldmia sp!, {r0-r3, ip, lr} 138 .endif 139 #endif 140 .endm 141 142 .macro disable_irq, save=1 143 disable_irq_notrace 144 asm_trace_hardirqs_off \save 145 .endm 146 147 .macro enable_irq 148 asm_trace_hardirqs_on 149 enable_irq_notrace 150 .endm 151 /* 152 * Save the current IRQ state and disable IRQs. Note that this macro 153 * assumes FIQs are enabled, and that the processor is in SVC mode. 154 */ 155 .macro save_and_disable_irqs, oldcpsr 156 #ifdef CONFIG_CPU_V7M 157 mrs \oldcpsr, primask 158 #else 159 mrs \oldcpsr, cpsr 160 #endif 161 disable_irq 162 .endm 163 164 .macro save_and_disable_irqs_notrace, oldcpsr 165 #ifdef CONFIG_CPU_V7M 166 mrs \oldcpsr, primask 167 #else 168 mrs \oldcpsr, cpsr 169 #endif 170 disable_irq_notrace 171 .endm 172 173 /* 174 * Restore interrupt state previously stored in a register. We don't 175 * guarantee that this will preserve the flags. 176 */ 177 .macro restore_irqs_notrace, oldcpsr 178 #ifdef CONFIG_CPU_V7M 179 msr primask, \oldcpsr 180 #else 181 msr cpsr_c, \oldcpsr 182 #endif 183 .endm 184 185 .macro restore_irqs, oldcpsr 186 tst \oldcpsr, #PSR_I_BIT 187 asm_trace_hardirqs_on cond=eq 188 restore_irqs_notrace \oldcpsr 189 .endm 190 191 /* 192 * Assembly version of "adr rd, BSYM(sym)". This should only be used to 193 * reference local symbols in the same assembly file which are to be 194 * resolved by the assembler. Other usage is undefined. 195 */ 196 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 197 .macro badr\c, rd, sym 198 #ifdef CONFIG_THUMB2_KERNEL 199 adr\c \rd, \sym + 1 200 #else 201 adr\c \rd, \sym 202 #endif 203 .endm 204 .endr 205 206 /* 207 * Get current thread_info. 208 */ 209 .macro get_thread_info, rd 210 /* thread_info is the first member of struct task_struct */ 211 get_current \rd 212 .endm 213 214 /* 215 * Increment/decrement the preempt count. 216 */ 217 #ifdef CONFIG_PREEMPT_COUNT 218 .macro inc_preempt_count, ti, tmp 219 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count 220 add \tmp, \tmp, #1 @ increment it 221 str \tmp, [\ti, #TI_PREEMPT] 222 .endm 223 224 .macro dec_preempt_count, ti, tmp 225 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count 226 sub \tmp, \tmp, #1 @ decrement it 227 str \tmp, [\ti, #TI_PREEMPT] 228 .endm 229 230 .macro dec_preempt_count_ti, ti, tmp 231 get_thread_info \ti 232 dec_preempt_count \ti, \tmp 233 .endm 234 #else 235 .macro inc_preempt_count, ti, tmp 236 .endm 237 238 .macro dec_preempt_count, ti, tmp 239 .endm 240 241 .macro dec_preempt_count_ti, ti, tmp 242 .endm 243 #endif 244 245 #define USERL(l, x...) \ 246 9999: x; \ 247 .pushsection __ex_table,"a"; \ 248 .align 3; \ 249 .long 9999b,l; \ 250 .popsection 251 252 #define USER(x...) USERL(9001f, x) 253 254 #ifdef CONFIG_SMP 255 #define ALT_SMP(instr...) \ 256 9998: instr 257 /* 258 * Note: if you get assembler errors from ALT_UP() when building with 259 * CONFIG_THUMB2_KERNEL, you almost certainly need to use 260 * ALT_SMP( W(instr) ... ) 261 */ 262 #define ALT_UP(instr...) \ 263 .pushsection ".alt.smp.init", "a" ;\ 264 .long 9998b - . ;\ 265 9997: instr ;\ 266 .if . - 9997b == 2 ;\ 267 nop ;\ 268 .endif ;\ 269 .if . - 9997b != 4 ;\ 270 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 271 .endif ;\ 272 .popsection 273 #define ALT_UP_B(label) \ 274 .pushsection ".alt.smp.init", "a" ;\ 275 .long 9998b - . ;\ 276 W(b) . + (label - 9998b) ;\ 277 .popsection 278 #else 279 #define ALT_SMP(instr...) 280 #define ALT_UP(instr...) instr 281 #define ALT_UP_B(label) b label 282 #endif 283 284 /* 285 * this_cpu_offset - load the per-CPU offset of this CPU into 286 * register 'rd' 287 */ 288 .macro this_cpu_offset, rd:req 289 #ifdef CONFIG_SMP 290 ALT_SMP(mrc p15, 0, \rd, c13, c0, 4) 291 #ifdef CONFIG_CPU_V6 292 ALT_UP_B(.L1_\@) 293 .L0_\@: 294 .subsection 1 295 .L1_\@: ldr_va \rd, __per_cpu_offset 296 b .L0_\@ 297 .previous 298 #endif 299 #else 300 mov \rd, #0 301 #endif 302 .endm 303 304 /* 305 * set_current - store the task pointer of this CPU's current task 306 */ 307 .macro set_current, rn:req, tmp:req 308 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP) 309 9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register 310 #ifdef CONFIG_CPU_V6 311 ALT_UP_B(.L0_\@) 312 .subsection 1 313 .L0_\@: str_va \rn, __current, \tmp 314 b .L1_\@ 315 .previous 316 .L1_\@: 317 #endif 318 #else 319 str_va \rn, __current, \tmp 320 #endif 321 .endm 322 323 /* 324 * get_current - load the task pointer of this CPU's current task 325 */ 326 .macro get_current, rd:req 327 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP) 328 9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register 329 #ifdef CONFIG_CPU_V6 330 ALT_UP_B(.L0_\@) 331 .subsection 1 332 .L0_\@: ldr_va \rd, __current 333 b .L1_\@ 334 .previous 335 .L1_\@: 336 #endif 337 #else 338 ldr_va \rd, __current 339 #endif 340 .endm 341 342 /* 343 * reload_current - reload the task pointer of this CPU's current task 344 * into the TLS register 345 */ 346 .macro reload_current, t1:req, t2:req 347 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP) 348 #ifdef CONFIG_CPU_V6 349 ALT_SMP(nop) 350 ALT_UP_B(.L0_\@) 351 #endif 352 ldr_this_cpu \t1, __entry_task, \t1, \t2 353 mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO 354 .L0_\@: 355 #endif 356 .endm 357 358 /* 359 * Instruction barrier 360 */ 361 .macro instr_sync 362 #if __LINUX_ARM_ARCH__ >= 7 363 isb 364 #elif __LINUX_ARM_ARCH__ == 6 365 mcr p15, 0, r0, c7, c5, 4 366 #endif 367 .endm 368 369 /* 370 * SMP data memory barrier 371 */ 372 .macro smp_dmb mode 373 #ifdef CONFIG_SMP 374 #if __LINUX_ARM_ARCH__ >= 7 375 .ifeqs "\mode","arm" 376 ALT_SMP(dmb ish) 377 .else 378 ALT_SMP(W(dmb) ish) 379 .endif 380 #elif __LINUX_ARM_ARCH__ == 6 381 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 382 #else 383 #error Incompatible SMP platform 384 #endif 385 .ifeqs "\mode","arm" 386 ALT_UP(nop) 387 .else 388 ALT_UP(W(nop)) 389 .endif 390 #endif 391 .endm 392 393 #if defined(CONFIG_CPU_V7M) 394 /* 395 * setmode is used to assert to be in svc mode during boot. For v7-M 396 * this is done in __v7m_setup, so setmode can be empty here. 397 */ 398 .macro setmode, mode, reg 399 .endm 400 #elif defined(CONFIG_THUMB2_KERNEL) 401 .macro setmode, mode, reg 402 mov \reg, #\mode 403 msr cpsr_c, \reg 404 .endm 405 #else 406 .macro setmode, mode, reg 407 msr cpsr_c, #\mode 408 .endm 409 #endif 410 411 /* 412 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is 413 * a scratch register for the macro to overwrite. 414 * 415 * This macro is intended for forcing the CPU into SVC mode at boot time. 416 * you cannot return to the original mode. 417 */ 418 .macro safe_svcmode_maskall reg:req 419 #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M) 420 mrs \reg , cpsr 421 eor \reg, \reg, #HYP_MODE 422 tst \reg, #MODE_MASK 423 bic \reg , \reg , #MODE_MASK 424 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE 425 THUMB( orr \reg , \reg , #PSR_T_BIT ) 426 bne 1f 427 orr \reg, \reg, #PSR_A_BIT 428 badr lr, 2f 429 msr spsr_cxsf, \reg 430 __MSR_ELR_HYP(14) 431 __ERET 432 1: msr cpsr_c, \reg 433 2: 434 #else 435 /* 436 * workaround for possibly broken pre-v6 hardware 437 * (akita, Sharp Zaurus C-1000, PXA270-based) 438 */ 439 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg 440 #endif 441 .endm 442 443 /* 444 * STRT/LDRT access macros with ARM and Thumb-2 variants 445 */ 446 #ifdef CONFIG_THUMB2_KERNEL 447 448 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() 449 9999: 450 .if \inc == 1 451 \instr\()b\t\cond\().w \reg, [\ptr, #\off] 452 .elseif \inc == 4 453 \instr\t\cond\().w \reg, [\ptr, #\off] 454 .else 455 .error "Unsupported inc macro argument" 456 .endif 457 458 .pushsection __ex_table,"a" 459 .align 3 460 .long 9999b, \abort 461 .popsection 462 .endm 463 464 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 465 @ explicit IT instruction needed because of the label 466 @ introduced by the USER macro 467 .ifnc \cond,al 468 .if \rept == 1 469 itt \cond 470 .elseif \rept == 2 471 ittt \cond 472 .else 473 .error "Unsupported rept macro argument" 474 .endif 475 .endif 476 477 @ Slightly optimised to avoid incrementing the pointer twice 478 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 479 .if \rept == 2 480 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort 481 .endif 482 483 add\cond \ptr, #\rept * \inc 484 .endm 485 486 #else /* !CONFIG_THUMB2_KERNEL */ 487 488 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER() 489 .rept \rept 490 9999: 491 .if \inc == 1 492 \instr\()b\t\cond \reg, [\ptr], #\inc 493 .elseif \inc == 4 494 \instr\t\cond \reg, [\ptr], #\inc 495 .else 496 .error "Unsupported inc macro argument" 497 .endif 498 499 .pushsection __ex_table,"a" 500 .align 3 501 .long 9999b, \abort 502 .popsection 503 .endr 504 .endm 505 506 #endif /* CONFIG_THUMB2_KERNEL */ 507 508 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 509 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort 510 .endm 511 512 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 513 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 514 .endm 515 516 /* Utility macro for declaring string literals */ 517 .macro string name:req, string 518 .type \name , #object 519 \name: 520 .asciz "\string" 521 .size \name , . - \name 522 .endm 523 524 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 525 .macro ret\c, reg 526 #if __LINUX_ARM_ARCH__ < 6 527 mov\c pc, \reg 528 #else 529 .ifeqs "\reg", "lr" 530 bx\c \reg 531 .else 532 mov\c pc, \reg 533 .endif 534 #endif 535 .endm 536 .endr 537 538 .macro ret.w, reg 539 ret \reg 540 #ifdef CONFIG_THUMB2_KERNEL 541 nop 542 #endif 543 .endm 544 545 .macro bug, msg, line 546 #ifdef CONFIG_THUMB2_KERNEL 547 1: .inst 0xde02 548 #else 549 1: .inst 0xe7f001f2 550 #endif 551 #ifdef CONFIG_DEBUG_BUGVERBOSE 552 .pushsection .rodata.str, "aMS", %progbits, 1 553 2: .asciz "\msg" 554 .popsection 555 .pushsection __bug_table, "aw" 556 .align 2 557 .word 1b, 2b 558 .hword \line 559 .popsection 560 #endif 561 .endm 562 563 #ifdef CONFIG_KPROBES 564 #define _ASM_NOKPROBE(entry) \ 565 .pushsection "_kprobe_blacklist", "aw" ; \ 566 .balign 4 ; \ 567 .long entry; \ 568 .popsection 569 #else 570 #define _ASM_NOKPROBE(entry) 571 #endif 572 573 .macro __adldst_l, op, reg, sym, tmp, c 574 .if __LINUX_ARM_ARCH__ < 7 575 ldr\c \tmp, .La\@ 576 .subsection 1 577 .align 2 578 .La\@: .long \sym - .Lpc\@ 579 .previous 580 .else 581 .ifnb \c 582 THUMB( ittt \c ) 583 .endif 584 movw\c \tmp, #:lower16:\sym - .Lpc\@ 585 movt\c \tmp, #:upper16:\sym - .Lpc\@ 586 .endif 587 588 #ifndef CONFIG_THUMB2_KERNEL 589 .set .Lpc\@, . + 8 // PC bias 590 .ifc \op, add 591 add\c \reg, \tmp, pc 592 .else 593 \op\c \reg, [pc, \tmp] 594 .endif 595 #else 596 .Lb\@: add\c \tmp, \tmp, pc 597 /* 598 * In Thumb-2 builds, the PC bias depends on whether we are currently 599 * emitting into a .arm or a .thumb section. The size of the add opcode 600 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when 601 * emitting in ARM mode, so let's use this to account for the bias. 602 */ 603 .set .Lpc\@, . + (. - .Lb\@) 604 605 .ifnc \op, add 606 \op\c \reg, [\tmp] 607 .endif 608 #endif 609 .endm 610 611 /* 612 * mov_l - move a constant value or [relocated] address into a register 613 */ 614 .macro mov_l, dst:req, imm:req, cond 615 .if __LINUX_ARM_ARCH__ < 7 616 ldr\cond \dst, =\imm 617 .else 618 movw\cond \dst, #:lower16:\imm 619 movt\cond \dst, #:upper16:\imm 620 .endif 621 .endm 622 623 /* 624 * adr_l - adr pseudo-op with unlimited range 625 * 626 * @dst: destination register 627 * @sym: name of the symbol 628 * @cond: conditional opcode suffix 629 */ 630 .macro adr_l, dst:req, sym:req, cond 631 __adldst_l add, \dst, \sym, \dst, \cond 632 .endm 633 634 /* 635 * ldr_l - ldr <literal> pseudo-op with unlimited range 636 * 637 * @dst: destination register 638 * @sym: name of the symbol 639 * @cond: conditional opcode suffix 640 */ 641 .macro ldr_l, dst:req, sym:req, cond 642 __adldst_l ldr, \dst, \sym, \dst, \cond 643 .endm 644 645 /* 646 * str_l - str <literal> pseudo-op with unlimited range 647 * 648 * @src: source register 649 * @sym: name of the symbol 650 * @tmp: mandatory scratch register 651 * @cond: conditional opcode suffix 652 */ 653 .macro str_l, src:req, sym:req, tmp:req, cond 654 __adldst_l str, \src, \sym, \tmp, \cond 655 .endm 656 657 .macro __ldst_va, op, reg, tmp, sym, cond 658 #if __LINUX_ARM_ARCH__ >= 7 || \ 659 (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) || \ 660 (defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000) 661 mov_l \tmp, \sym, \cond 662 \op\cond \reg, [\tmp] 663 #else 664 /* 665 * Avoid a literal load, by emitting a sequence of ADD/LDR instructions 666 * with the appropriate relocations. The combined sequence has a range 667 * of -/+ 256 MiB, which should be sufficient for the core kernel and 668 * for modules loaded into the module region. 669 */ 670 .globl \sym 671 .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym 672 .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym 673 .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym 674 .L0_\@: sub\cond \tmp, pc, #8 675 .L1_\@: sub\cond \tmp, \tmp, #4 676 .L2_\@: \op\cond \reg, [\tmp, #0] 677 #endif 678 .endm 679 680 /* 681 * ldr_va - load a 32-bit word from the virtual address of \sym 682 */ 683 .macro ldr_va, rd:req, sym:req, cond 684 __ldst_va ldr, \rd, \rd, \sym, \cond 685 .endm 686 687 /* 688 * str_va - store a 32-bit word to the virtual address of \sym 689 */ 690 .macro str_va, rn:req, sym:req, tmp:req, cond 691 __ldst_va str, \rn, \tmp, \sym, \cond 692 .endm 693 694 /* 695 * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym', 696 * without using a temp register. Supported in ARM mode 697 * only. 698 */ 699 .macro ldr_this_cpu_armv6, rd:req, sym:req 700 this_cpu_offset \rd 701 .globl \sym 702 .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym 703 .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym 704 .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym 705 add \rd, \rd, pc 706 .L0_\@: sub \rd, \rd, #4 707 .L1_\@: sub \rd, \rd, #0 708 .L2_\@: ldr \rd, [\rd, #4] 709 .endm 710 711 /* 712 * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym' 713 * into register 'rd', which may be the stack pointer, 714 * using 't1' and 't2' as general temp registers. These 715 * are permitted to overlap with 'rd' if != sp 716 */ 717 .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req 718 #if __LINUX_ARM_ARCH__ >= 7 || \ 719 (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) || \ 720 (defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000) 721 this_cpu_offset \t1 722 mov_l \t2, \sym 723 ldr \rd, [\t1, \t2] 724 #else 725 ldr_this_cpu_armv6 \rd, \sym 726 #endif 727 .endm 728 729 /* 730 * rev_l - byte-swap a 32-bit value 731 * 732 * @val: source/destination register 733 * @tmp: scratch register 734 */ 735 .macro rev_l, val:req, tmp:req 736 .if __LINUX_ARM_ARCH__ < 6 737 eor \tmp, \val, \val, ror #16 738 bic \tmp, \tmp, #0x00ff0000 739 mov \val, \val, ror #8 740 eor \val, \val, \tmp, lsr #8 741 .else 742 rev \val, \val 743 .endif 744 .endm 745 746 /* 747 * bl_r - branch and link to register 748 * 749 * @dst: target to branch to 750 * @c: conditional opcode suffix 751 */ 752 .macro bl_r, dst:req, c 753 .if __LINUX_ARM_ARCH__ < 6 754 mov\c lr, pc 755 mov\c pc, \dst 756 .else 757 blx\c \dst 758 .endif 759 .endm 760 761 #endif /* __ASM_ASSEMBLER_H__ */ 762