1/* 2 * linux/arch/arm/kernel/entry-armv.S 3 * 4 * Copyright (C) 1996,1997,1998 Russell King. 5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Low-level vector interface routines 13 * 14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction 15 * that causes it to save wrong values... Be aware! 16 */ 17 18#include <linux/init.h> 19 20#include <asm/assembler.h> 21#include <asm/memory.h> 22#include <asm/glue-df.h> 23#include <asm/glue-pf.h> 24#include <asm/vfpmacros.h> 25#ifndef CONFIG_MULTI_IRQ_HANDLER 26#include <mach/entry-macro.S> 27#endif 28#include <asm/thread_notify.h> 29#include <asm/unwind.h> 30#include <asm/unistd.h> 31#include <asm/tls.h> 32#include <asm/system_info.h> 33 34#include "entry-header.S" 35#include <asm/entry-macro-multi.S> 36#include <asm/probes.h> 37 38/* 39 * Interrupt handling. 40 */ 41 .macro irq_handler 42#ifdef CONFIG_MULTI_IRQ_HANDLER 43 ldr r1, =handle_arch_irq 44 mov r0, sp 45 badr lr, 9997f 46 ldr pc, [r1] 47#else 48 arch_irq_handler_default 49#endif 509997: 51 .endm 52 53 .macro pabt_helper 54 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 55#ifdef MULTI_PABORT 56 ldr ip, .LCprocfns 57 mov lr, pc 58 ldr pc, [ip, #PROCESSOR_PABT_FUNC] 59#else 60 bl CPU_PABORT_HANDLER 61#endif 62 .endm 63 64 .macro dabt_helper 65 66 @ 67 @ Call the processor-specific abort handler: 68 @ 69 @ r2 - pt_regs 70 @ r4 - aborted context pc 71 @ r5 - aborted context psr 72 @ 73 @ The abort handler must return the aborted address in r0, and 74 @ the fault status register in r1. r9 must be preserved. 75 @ 76#ifdef MULTI_DABORT 77 ldr ip, .LCprocfns 78 mov lr, pc 79 ldr pc, [ip, #PROCESSOR_DABT_FUNC] 80#else 81 bl CPU_DABORT_HANDLER 82#endif 83 .endm 84 85#ifdef CONFIG_KPROBES 86 .section .kprobes.text,"ax",%progbits 87#else 88 .text 89#endif 90 91/* 92 * Invalid mode handlers 93 */ 94 .macro inv_entry, reason 95 sub sp, sp, #PT_REGS_SIZE 96 ARM( stmib sp, {r1 - lr} ) 97 THUMB( stmia sp, {r0 - r12} ) 98 THUMB( str sp, [sp, #S_SP] ) 99 THUMB( str lr, [sp, #S_LR] ) 100 mov r1, #\reason 101 .endm 102 103__pabt_invalid: 104 inv_entry BAD_PREFETCH 105 b common_invalid 106ENDPROC(__pabt_invalid) 107 108__dabt_invalid: 109 inv_entry BAD_DATA 110 b common_invalid 111ENDPROC(__dabt_invalid) 112 113__irq_invalid: 114 inv_entry BAD_IRQ 115 b common_invalid 116ENDPROC(__irq_invalid) 117 118__und_invalid: 119 inv_entry BAD_UNDEFINSTR 120 121 @ 122 @ XXX fall through to common_invalid 123 @ 124 125@ 126@ common_invalid - generic code for failed exception (re-entrant version of handlers) 127@ 128common_invalid: 129 zero_fp 130 131 ldmia r0, {r4 - r6} 132 add r0, sp, #S_PC @ here for interlock avoidance 133 mov r7, #-1 @ "" "" "" "" 134 str r4, [sp] @ save preserved r0 135 stmia r0, {r5 - r7} @ lr_<exception>, 136 @ cpsr_<exception>, "old_r0" 137 138 mov r0, sp 139 b bad_mode 140ENDPROC(__und_invalid) 141 142/* 143 * SVC mode handlers 144 */ 145 146#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) 147#define SPFIX(code...) code 148#else 149#define SPFIX(code...) 150#endif 151 152 .macro svc_entry, stack_hole=0, trace=1, uaccess=1 153 UNWIND(.fnstart ) 154 UNWIND(.save {r0 - pc} ) 155 sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4) 156#ifdef CONFIG_THUMB2_KERNEL 157 SPFIX( str r0, [sp] ) @ temporarily saved 158 SPFIX( mov r0, sp ) 159 SPFIX( tst r0, #4 ) @ test original stack alignment 160 SPFIX( ldr r0, [sp] ) @ restored 161#else 162 SPFIX( tst sp, #4 ) 163#endif 164 SPFIX( subeq sp, sp, #4 ) 165 stmia sp, {r1 - r12} 166 167 ldmia r0, {r3 - r5} 168 add r7, sp, #S_SP - 4 @ here for interlock avoidance 169 mov r6, #-1 @ "" "" "" "" 170 add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4) 171 SPFIX( addeq r2, r2, #4 ) 172 str r3, [sp, #-4]! @ save the "real" r0 copied 173 @ from the exception stack 174 175 mov r3, lr 176 177 @ 178 @ We are now ready to fill in the remaining blanks on the stack: 179 @ 180 @ r2 - sp_svc 181 @ r3 - lr_svc 182 @ r4 - lr_<exception>, already fixed up for correct return/restart 183 @ r5 - spsr_<exception> 184 @ r6 - orig_r0 (see pt_regs definition in ptrace.h) 185 @ 186 stmia r7, {r2 - r6} 187 188 get_thread_info tsk 189 ldr r0, [tsk, #TI_ADDR_LIMIT] 190 mov r1, #TASK_SIZE 191 str r1, [tsk, #TI_ADDR_LIMIT] 192 str r0, [sp, #SVC_ADDR_LIMIT] 193 194 uaccess_save r0 195 .if \uaccess 196 uaccess_disable r0 197 .endif 198 199 .if \trace 200#ifdef CONFIG_TRACE_IRQFLAGS 201 bl trace_hardirqs_off 202#endif 203 .endif 204 .endm 205 206 .align 5 207__dabt_svc: 208 svc_entry uaccess=0 209 mov r2, sp 210 dabt_helper 211 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR 212 svc_exit r5 @ return from exception 213 UNWIND(.fnend ) 214ENDPROC(__dabt_svc) 215 216 .align 5 217__irq_svc: 218 svc_entry 219 irq_handler 220 221#ifdef CONFIG_PREEMPT 222 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 223 ldr r0, [tsk, #TI_FLAGS] @ get flags 224 teq r8, #0 @ if preempt count != 0 225 movne r0, #0 @ force flags to 0 226 tst r0, #_TIF_NEED_RESCHED 227 blne svc_preempt 228#endif 229 230 svc_exit r5, irq = 1 @ return from exception 231 UNWIND(.fnend ) 232ENDPROC(__irq_svc) 233 234 .ltorg 235 236#ifdef CONFIG_PREEMPT 237svc_preempt: 238 mov r8, lr 2391: bl preempt_schedule_irq @ irq en/disable is done inside 240 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 241 tst r0, #_TIF_NEED_RESCHED 242 reteq r8 @ go again 243 b 1b 244#endif 245 246__und_fault: 247 @ Correct the PC such that it is pointing at the instruction 248 @ which caused the fault. If the faulting instruction was ARM 249 @ the PC will be pointing at the next instruction, and have to 250 @ subtract 4. Otherwise, it is Thumb, and the PC will be 251 @ pointing at the second half of the Thumb instruction. We 252 @ have to subtract 2. 253 ldr r2, [r0, #S_PC] 254 sub r2, r2, r1 255 str r2, [r0, #S_PC] 256 b do_undefinstr 257ENDPROC(__und_fault) 258 259 .align 5 260__und_svc: 261#ifdef CONFIG_KPROBES 262 @ If a kprobe is about to simulate a "stmdb sp..." instruction, 263 @ it obviously needs free stack space which then will belong to 264 @ the saved context. 265 svc_entry MAX_STACK_SIZE 266#else 267 svc_entry 268#endif 269 @ 270 @ call emulation code, which returns using r9 if it has emulated 271 @ the instruction, or the more conventional lr if we are to treat 272 @ this as a real undefined instruction 273 @ 274 @ r0 - instruction 275 @ 276#ifndef CONFIG_THUMB2_KERNEL 277 ldr r0, [r4, #-4] 278#else 279 mov r1, #2 280 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 281 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 282 blo __und_svc_fault 283 ldrh r9, [r4] @ bottom 16 bits 284 add r4, r4, #2 285 str r4, [sp, #S_PC] 286 orr r0, r9, r0, lsl #16 287#endif 288 badr r9, __und_svc_finish 289 mov r2, r4 290 bl call_fpe 291 292 mov r1, #4 @ PC correction to apply 293__und_svc_fault: 294 mov r0, sp @ struct pt_regs *regs 295 bl __und_fault 296 297__und_svc_finish: 298 get_thread_info tsk 299 ldr r5, [sp, #S_PSR] @ Get SVC cpsr 300 svc_exit r5 @ return from exception 301 UNWIND(.fnend ) 302ENDPROC(__und_svc) 303 304 .align 5 305__pabt_svc: 306 svc_entry 307 mov r2, sp @ regs 308 pabt_helper 309 svc_exit r5 @ return from exception 310 UNWIND(.fnend ) 311ENDPROC(__pabt_svc) 312 313 .align 5 314__fiq_svc: 315 svc_entry trace=0 316 mov r0, sp @ struct pt_regs *regs 317 bl handle_fiq_as_nmi 318 svc_exit_via_fiq 319 UNWIND(.fnend ) 320ENDPROC(__fiq_svc) 321 322 .align 5 323.LCcralign: 324 .word cr_alignment 325#ifdef MULTI_DABORT 326.LCprocfns: 327 .word processor 328#endif 329.LCfp: 330 .word fp_enter 331 332/* 333 * Abort mode handlers 334 */ 335 336@ 337@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode 338@ and reuses the same macros. However in abort mode we must also 339@ save/restore lr_abt and spsr_abt to make nested aborts safe. 340@ 341 .align 5 342__fiq_abt: 343 svc_entry trace=0 344 345 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) 346 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) 347 THUMB( msr cpsr_c, r0 ) 348 mov r1, lr @ Save lr_abt 349 mrs r2, spsr @ Save spsr_abt, abort is now safe 350 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) 351 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) 352 THUMB( msr cpsr_c, r0 ) 353 stmfd sp!, {r1 - r2} 354 355 add r0, sp, #8 @ struct pt_regs *regs 356 bl handle_fiq_as_nmi 357 358 ldmfd sp!, {r1 - r2} 359 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) 360 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) 361 THUMB( msr cpsr_c, r0 ) 362 mov lr, r1 @ Restore lr_abt, abort is unsafe 363 msr spsr_cxsf, r2 @ Restore spsr_abt 364 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) 365 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) 366 THUMB( msr cpsr_c, r0 ) 367 368 svc_exit_via_fiq 369 UNWIND(.fnend ) 370ENDPROC(__fiq_abt) 371 372/* 373 * User mode handlers 374 * 375 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE 376 */ 377 378#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7) 379#error "sizeof(struct pt_regs) must be a multiple of 8" 380#endif 381 382 .macro usr_entry, trace=1, uaccess=1 383 UNWIND(.fnstart ) 384 UNWIND(.cantunwind ) @ don't unwind the user space 385 sub sp, sp, #PT_REGS_SIZE 386 ARM( stmib sp, {r1 - r12} ) 387 THUMB( stmia sp, {r0 - r12} ) 388 389 ATRAP( mrc p15, 0, r7, c1, c0, 0) 390 ATRAP( ldr r8, .LCcralign) 391 392 ldmia r0, {r3 - r5} 393 add r0, sp, #S_PC @ here for interlock avoidance 394 mov r6, #-1 @ "" "" "" "" 395 396 str r3, [sp] @ save the "real" r0 copied 397 @ from the exception stack 398 399 ATRAP( ldr r8, [r8, #0]) 400 401 @ 402 @ We are now ready to fill in the remaining blanks on the stack: 403 @ 404 @ r4 - lr_<exception>, already fixed up for correct return/restart 405 @ r5 - spsr_<exception> 406 @ r6 - orig_r0 (see pt_regs definition in ptrace.h) 407 @ 408 @ Also, separately save sp_usr and lr_usr 409 @ 410 stmia r0, {r4 - r6} 411 ARM( stmdb r0, {sp, lr}^ ) 412 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 413 414 .if \uaccess 415 uaccess_disable ip 416 .endif 417 418 @ Enable the alignment trap while in kernel mode 419 ATRAP( teq r8, r7) 420 ATRAP( mcrne p15, 0, r8, c1, c0, 0) 421 422 @ 423 @ Clear FP to mark the first stack frame 424 @ 425 zero_fp 426 427 .if \trace 428#ifdef CONFIG_TRACE_IRQFLAGS 429 bl trace_hardirqs_off 430#endif 431 ct_user_exit save = 0 432 .endif 433 .endm 434 435 .macro kuser_cmpxchg_check 436#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) 437#ifndef CONFIG_MMU 438#warning "NPTL on non MMU needs fixing" 439#else 440 @ Make sure our user space atomic helper is restarted 441 @ if it was interrupted in a critical region. Here we 442 @ perform a quick test inline since it should be false 443 @ 99.9999% of the time. The rest is done out of line. 444 cmp r4, #TASK_SIZE 445 blhs kuser_cmpxchg64_fixup 446#endif 447#endif 448 .endm 449 450 .align 5 451__dabt_usr: 452 usr_entry uaccess=0 453 kuser_cmpxchg_check 454 mov r2, sp 455 dabt_helper 456 b ret_from_exception 457 UNWIND(.fnend ) 458ENDPROC(__dabt_usr) 459 460 .align 5 461__irq_usr: 462 usr_entry 463 kuser_cmpxchg_check 464 irq_handler 465 get_thread_info tsk 466 mov why, #0 467 b ret_to_user_from_irq 468 UNWIND(.fnend ) 469ENDPROC(__irq_usr) 470 471 .ltorg 472 473 .align 5 474__und_usr: 475 usr_entry uaccess=0 476 477 mov r2, r4 478 mov r3, r5 479 480 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the 481 @ faulting instruction depending on Thumb mode. 482 @ r3 = regs->ARM_cpsr 483 @ 484 @ The emulation code returns using r9 if it has emulated the 485 @ instruction, or the more conventional lr if we are to treat 486 @ this as a real undefined instruction 487 @ 488 badr r9, ret_from_exception 489 490 @ IRQs must be enabled before attempting to read the instruction from 491 @ user space since that could cause a page/translation fault if the 492 @ page table was modified by another CPU. 493 enable_irq 494 495 tst r3, #PSR_T_BIT @ Thumb mode? 496 bne __und_usr_thumb 497 sub r4, r2, #4 @ ARM instr at LR - 4 4981: ldrt r0, [r4] 499 ARM_BE8(rev r0, r0) @ little endian instruction 500 501 uaccess_disable ip 502 503 @ r0 = 32-bit ARM instruction which caused the exception 504 @ r2 = PC value for the following instruction (:= regs->ARM_pc) 505 @ r4 = PC value for the faulting instruction 506 @ lr = 32-bit undefined instruction function 507 badr lr, __und_usr_fault_32 508 b call_fpe 509 510__und_usr_thumb: 511 @ Thumb instruction 512 sub r4, r2, #2 @ First half of thumb instr at LR - 2 513#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 514/* 515 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms 516 * can never be supported in a single kernel, this code is not applicable at 517 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be 518 * made about .arch directives. 519 */ 520#if __LINUX_ARM_ARCH__ < 7 521/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ 522#define NEED_CPU_ARCHITECTURE 523 ldr r5, .LCcpu_architecture 524 ldr r5, [r5] 525 cmp r5, #CPU_ARCH_ARMv7 526 blo __und_usr_fault_16 @ 16bit undefined instruction 527/* 528 * The following code won't get run unless the running CPU really is v7, so 529 * coding round the lack of ldrht on older arches is pointless. Temporarily 530 * override the assembler target arch with the minimum required instead: 531 */ 532 .arch armv6t2 533#endif 5342: ldrht r5, [r4] 535ARM_BE8(rev16 r5, r5) @ little endian instruction 536 cmp r5, #0xe800 @ 32bit instruction if xx != 0 537 blo __und_usr_fault_16_pan @ 16bit undefined instruction 5383: ldrht r0, [r2] 539ARM_BE8(rev16 r0, r0) @ little endian instruction 540 uaccess_disable ip 541 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 542 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update 543 orr r0, r0, r5, lsl #16 544 badr lr, __und_usr_fault_32 545 @ r0 = the two 16-bit Thumb instructions which caused the exception 546 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) 547 @ r4 = PC value for the first 16-bit Thumb instruction 548 @ lr = 32bit undefined instruction function 549 550#if __LINUX_ARM_ARCH__ < 7 551/* If the target arch was overridden, change it back: */ 552#ifdef CONFIG_CPU_32v6K 553 .arch armv6k 554#else 555 .arch armv6 556#endif 557#endif /* __LINUX_ARM_ARCH__ < 7 */ 558#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ 559 b __und_usr_fault_16 560#endif 561 UNWIND(.fnend) 562ENDPROC(__und_usr) 563 564/* 565 * The out of line fixup for the ldrt instructions above. 566 */ 567 .pushsection .text.fixup, "ax" 568 .align 2 5694: str r4, [sp, #S_PC] @ retry current instruction 570 ret r9 571 .popsection 572 .pushsection __ex_table,"a" 573 .long 1b, 4b 574#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 575 .long 2b, 4b 576 .long 3b, 4b 577#endif 578 .popsection 579 580/* 581 * Check whether the instruction is a co-processor instruction. 582 * If yes, we need to call the relevant co-processor handler. 583 * 584 * Note that we don't do a full check here for the co-processor 585 * instructions; all instructions with bit 27 set are well 586 * defined. The only instructions that should fault are the 587 * co-processor instructions. However, we have to watch out 588 * for the ARM6/ARM7 SWI bug. 589 * 590 * NEON is a special case that has to be handled here. Not all 591 * NEON instructions are co-processor instructions, so we have 592 * to make a special case of checking for them. Plus, there's 593 * five groups of them, so we have a table of mask/opcode pairs 594 * to check against, and if any match then we branch off into the 595 * NEON handler code. 596 * 597 * Emulators may wish to make use of the following registers: 598 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) 599 * r2 = PC value to resume execution after successful emulation 600 * r9 = normal "successful" return address 601 * r10 = this threads thread_info structure 602 * lr = unrecognised instruction return address 603 * IRQs enabled, FIQs enabled. 604 */ 605 @ 606 @ Fall-through from Thumb-2 __und_usr 607 @ 608#ifdef CONFIG_NEON 609 get_thread_info r10 @ get current thread 610 adr r6, .LCneon_thumb_opcodes 611 b 2f 612#endif 613call_fpe: 614 get_thread_info r10 @ get current thread 615#ifdef CONFIG_NEON 616 adr r6, .LCneon_arm_opcodes 6172: ldr r5, [r6], #4 @ mask value 618 ldr r7, [r6], #4 @ opcode bits matching in mask 619 cmp r5, #0 @ end mask? 620 beq 1f 621 and r8, r0, r5 622 cmp r8, r7 @ NEON instruction? 623 bne 2b 624 mov r7, #1 625 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 626 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 627 b do_vfp @ let VFP handler handle this 6281: 629#endif 630 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 631 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 632 reteq lr 633 and r8, r0, #0x00000f00 @ mask out CP number 634 THUMB( lsr r8, r8, #8 ) 635 mov r7, #1 636 add r6, r10, #TI_USED_CP 637 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] 638 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] 639#ifdef CONFIG_IWMMXT 640 @ Test if we need to give access to iWMMXt coprocessors 641 ldr r5, [r10, #TI_FLAGS] 642 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 643 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 644 bcs iwmmxt_task_enable 645#endif 646 ARM( add pc, pc, r8, lsr #6 ) 647 THUMB( lsl r8, r8, #2 ) 648 THUMB( add pc, r8 ) 649 nop 650 651 ret.w lr @ CP#0 652 W(b) do_fpe @ CP#1 (FPE) 653 W(b) do_fpe @ CP#2 (FPE) 654 ret.w lr @ CP#3 655#ifdef CONFIG_CRUNCH 656 b crunch_task_enable @ CP#4 (MaverickCrunch) 657 b crunch_task_enable @ CP#5 (MaverickCrunch) 658 b crunch_task_enable @ CP#6 (MaverickCrunch) 659#else 660 ret.w lr @ CP#4 661 ret.w lr @ CP#5 662 ret.w lr @ CP#6 663#endif 664 ret.w lr @ CP#7 665 ret.w lr @ CP#8 666 ret.w lr @ CP#9 667#ifdef CONFIG_VFP 668 W(b) do_vfp @ CP#10 (VFP) 669 W(b) do_vfp @ CP#11 (VFP) 670#else 671 ret.w lr @ CP#10 (VFP) 672 ret.w lr @ CP#11 (VFP) 673#endif 674 ret.w lr @ CP#12 675 ret.w lr @ CP#13 676 ret.w lr @ CP#14 (Debug) 677 ret.w lr @ CP#15 (Control) 678 679#ifdef NEED_CPU_ARCHITECTURE 680 .align 2 681.LCcpu_architecture: 682 .word __cpu_architecture 683#endif 684 685#ifdef CONFIG_NEON 686 .align 6 687 688.LCneon_arm_opcodes: 689 .word 0xfe000000 @ mask 690 .word 0xf2000000 @ opcode 691 692 .word 0xff100000 @ mask 693 .word 0xf4000000 @ opcode 694 695 .word 0x00000000 @ mask 696 .word 0x00000000 @ opcode 697 698.LCneon_thumb_opcodes: 699 .word 0xef000000 @ mask 700 .word 0xef000000 @ opcode 701 702 .word 0xff100000 @ mask 703 .word 0xf9000000 @ opcode 704 705 .word 0x00000000 @ mask 706 .word 0x00000000 @ opcode 707#endif 708 709do_fpe: 710 ldr r4, .LCfp 711 add r10, r10, #TI_FPSTATE @ r10 = workspace 712 ldr pc, [r4] @ Call FP module USR entry point 713 714/* 715 * The FP module is called with these registers set: 716 * r0 = instruction 717 * r2 = PC+4 718 * r9 = normal "successful" return address 719 * r10 = FP workspace 720 * lr = unrecognised FP instruction return address 721 */ 722 723 .pushsection .data 724 .align 2 725ENTRY(fp_enter) 726 .word no_fp 727 .popsection 728 729ENTRY(no_fp) 730 ret lr 731ENDPROC(no_fp) 732 733__und_usr_fault_32: 734 mov r1, #4 735 b 1f 736__und_usr_fault_16_pan: 737 uaccess_disable ip 738__und_usr_fault_16: 739 mov r1, #2 7401: mov r0, sp 741 badr lr, ret_from_exception 742 b __und_fault 743ENDPROC(__und_usr_fault_32) 744ENDPROC(__und_usr_fault_16) 745 746 .align 5 747__pabt_usr: 748 usr_entry 749 mov r2, sp @ regs 750 pabt_helper 751 UNWIND(.fnend ) 752 /* fall through */ 753/* 754 * This is the return code to user mode for abort handlers 755 */ 756ENTRY(ret_from_exception) 757 UNWIND(.fnstart ) 758 UNWIND(.cantunwind ) 759 get_thread_info tsk 760 mov why, #0 761 b ret_to_user 762 UNWIND(.fnend ) 763ENDPROC(__pabt_usr) 764ENDPROC(ret_from_exception) 765 766 .align 5 767__fiq_usr: 768 usr_entry trace=0 769 kuser_cmpxchg_check 770 mov r0, sp @ struct pt_regs *regs 771 bl handle_fiq_as_nmi 772 get_thread_info tsk 773 restore_user_regs fast = 0, offset = 0 774 UNWIND(.fnend ) 775ENDPROC(__fiq_usr) 776 777/* 778 * Register switch for ARMv3 and ARMv4 processors 779 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 780 * previous and next are guaranteed not to be the same. 781 */ 782ENTRY(__switch_to) 783 UNWIND(.fnstart ) 784 UNWIND(.cantunwind ) 785 add ip, r1, #TI_CPU_SAVE 786 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack 787 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 788 THUMB( str sp, [ip], #4 ) 789 THUMB( str lr, [ip], #4 ) 790 ldr r4, [r2, #TI_TP_VALUE] 791 ldr r5, [r2, #TI_TP_VALUE + 4] 792#ifdef CONFIG_CPU_USE_DOMAINS 793 mrc p15, 0, r6, c3, c0, 0 @ Get domain register 794 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register 795 ldr r6, [r2, #TI_CPU_DOMAIN] 796#endif 797 switch_tls r1, r4, r5, r3, r7 798#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 799 ldr r7, [r2, #TI_TASK] 800 ldr r8, =__stack_chk_guard 801 .if (TSK_STACK_CANARY > IMM12_MASK) 802 add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK 803 .endif 804 ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK] 805#endif 806#ifdef CONFIG_CPU_USE_DOMAINS 807 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 808#endif 809 mov r5, r0 810 add r4, r2, #TI_CPU_SAVE 811 ldr r0, =thread_notify_head 812 mov r1, #THREAD_NOTIFY_SWITCH 813 bl atomic_notifier_call_chain 814#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 815 str r7, [r8] 816#endif 817 THUMB( mov ip, r4 ) 818 mov r0, r5 819 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously 820 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously 821 THUMB( ldr sp, [ip], #4 ) 822 THUMB( ldr pc, [ip] ) 823 UNWIND(.fnend ) 824ENDPROC(__switch_to) 825 826 __INIT 827 828/* 829 * User helpers. 830 * 831 * Each segment is 32-byte aligned and will be moved to the top of the high 832 * vector page. New segments (if ever needed) must be added in front of 833 * existing ones. This mechanism should be used only for things that are 834 * really small and justified, and not be abused freely. 835 * 836 * See Documentation/arm/kernel_user_helpers.txt for formal definitions. 837 */ 838 THUMB( .arm ) 839 840 .macro usr_ret, reg 841#ifdef CONFIG_ARM_THUMB 842 bx \reg 843#else 844 ret \reg 845#endif 846 .endm 847 848 .macro kuser_pad, sym, size 849 .if (. - \sym) & 3 850 .rept 4 - (. - \sym) & 3 851 .byte 0 852 .endr 853 .endif 854 .rept (\size - (. - \sym)) / 4 855 .word 0xe7fddef1 856 .endr 857 .endm 858 859#ifdef CONFIG_KUSER_HELPERS 860 .align 5 861 .globl __kuser_helper_start 862__kuser_helper_start: 863 864/* 865 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular 866 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. 867 */ 868 869__kuser_cmpxchg64: @ 0xffff0f60 870 871#if defined(CONFIG_CPU_32v6K) 872 873 stmfd sp!, {r4, r5, r6, r7} 874 ldrd r4, r5, [r0] @ load old val 875 ldrd r6, r7, [r1] @ load new val 876 smp_dmb arm 8771: ldrexd r0, r1, [r2] @ load current val 878 eors r3, r0, r4 @ compare with oldval (1) 879 eoreqs r3, r1, r5 @ compare with oldval (2) 880 strexdeq r3, r6, r7, [r2] @ store newval if eq 881 teqeq r3, #1 @ success? 882 beq 1b @ if no then retry 883 smp_dmb arm 884 rsbs r0, r3, #0 @ set returned val and C flag 885 ldmfd sp!, {r4, r5, r6, r7} 886 usr_ret lr 887 888#elif !defined(CONFIG_SMP) 889 890#ifdef CONFIG_MMU 891 892 /* 893 * The only thing that can break atomicity in this cmpxchg64 894 * implementation is either an IRQ or a data abort exception 895 * causing another process/thread to be scheduled in the middle of 896 * the critical sequence. The same strategy as for cmpxchg is used. 897 */ 898 stmfd sp!, {r4, r5, r6, lr} 899 ldmia r0, {r4, r5} @ load old val 900 ldmia r1, {r6, lr} @ load new val 9011: ldmia r2, {r0, r1} @ load current val 902 eors r3, r0, r4 @ compare with oldval (1) 903 eoreqs r3, r1, r5 @ compare with oldval (2) 9042: stmeqia r2, {r6, lr} @ store newval if eq 905 rsbs r0, r3, #0 @ set return val and C flag 906 ldmfd sp!, {r4, r5, r6, pc} 907 908 .text 909kuser_cmpxchg64_fixup: 910 @ Called from kuser_cmpxchg_fixup. 911 @ r4 = address of interrupted insn (must be preserved). 912 @ sp = saved regs. r7 and r8 are clobbered. 913 @ 1b = first critical insn, 2b = last critical insn. 914 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. 915 mov r7, #0xffff0fff 916 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) 917 subs r8, r4, r7 918 rsbcss r8, r8, #(2b - 1b) 919 strcs r7, [sp, #S_PC] 920#if __LINUX_ARM_ARCH__ < 6 921 bcc kuser_cmpxchg32_fixup 922#endif 923 ret lr 924 .previous 925 926#else 927#warning "NPTL on non MMU needs fixing" 928 mov r0, #-1 929 adds r0, r0, #0 930 usr_ret lr 931#endif 932 933#else 934#error "incoherent kernel configuration" 935#endif 936 937 kuser_pad __kuser_cmpxchg64, 64 938 939__kuser_memory_barrier: @ 0xffff0fa0 940 smp_dmb arm 941 usr_ret lr 942 943 kuser_pad __kuser_memory_barrier, 32 944 945__kuser_cmpxchg: @ 0xffff0fc0 946 947#if __LINUX_ARM_ARCH__ < 6 948 949#ifdef CONFIG_MMU 950 951 /* 952 * The only thing that can break atomicity in this cmpxchg 953 * implementation is either an IRQ or a data abort exception 954 * causing another process/thread to be scheduled in the middle 955 * of the critical sequence. To prevent this, code is added to 956 * the IRQ and data abort exception handlers to set the pc back 957 * to the beginning of the critical section if it is found to be 958 * within that critical section (see kuser_cmpxchg_fixup). 959 */ 9601: ldr r3, [r2] @ load current val 961 subs r3, r3, r0 @ compare with oldval 9622: streq r1, [r2] @ store newval if eq 963 rsbs r0, r3, #0 @ set return val and C flag 964 usr_ret lr 965 966 .text 967kuser_cmpxchg32_fixup: 968 @ Called from kuser_cmpxchg_check macro. 969 @ r4 = address of interrupted insn (must be preserved). 970 @ sp = saved regs. r7 and r8 are clobbered. 971 @ 1b = first critical insn, 2b = last critical insn. 972 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. 973 mov r7, #0xffff0fff 974 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 975 subs r8, r4, r7 976 rsbcss r8, r8, #(2b - 1b) 977 strcs r7, [sp, #S_PC] 978 ret lr 979 .previous 980 981#else 982#warning "NPTL on non MMU needs fixing" 983 mov r0, #-1 984 adds r0, r0, #0 985 usr_ret lr 986#endif 987 988#else 989 990 smp_dmb arm 9911: ldrex r3, [r2] 992 subs r3, r3, r0 993 strexeq r3, r1, [r2] 994 teqeq r3, #1 995 beq 1b 996 rsbs r0, r3, #0 997 /* beware -- each __kuser slot must be 8 instructions max */ 998 ALT_SMP(b __kuser_memory_barrier) 999 ALT_UP(usr_ret lr) 1000 1001#endif 1002 1003 kuser_pad __kuser_cmpxchg, 32 1004 1005__kuser_get_tls: @ 0xffff0fe0 1006 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 1007 usr_ret lr 1008 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code 1009 kuser_pad __kuser_get_tls, 16 1010 .rep 3 1011 .word 0 @ 0xffff0ff0 software TLS value, then 1012 .endr @ pad up to __kuser_helper_version 1013 1014__kuser_helper_version: @ 0xffff0ffc 1015 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 1016 1017 .globl __kuser_helper_end 1018__kuser_helper_end: 1019 1020#endif 1021 1022 THUMB( .thumb ) 1023 1024/* 1025 * Vector stubs. 1026 * 1027 * This code is copied to 0xffff1000 so we can use branches in the 1028 * vectors, rather than ldr's. Note that this code must not exceed 1029 * a page size. 1030 * 1031 * Common stub entry macro: 1032 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1033 * 1034 * SP points to a minimal amount of processor-private memory, the address 1035 * of which is copied into r0 for the mode specific abort handler. 1036 */ 1037 .macro vector_stub, name, mode, correction=0 1038 .align 5 1039 1040vector_\name: 1041 .if \correction 1042 sub lr, lr, #\correction 1043 .endif 1044 1045 @ 1046 @ Save r0, lr_<exception> (parent PC) and spsr_<exception> 1047 @ (parent CPSR) 1048 @ 1049 stmia sp, {r0, lr} @ save r0, lr 1050 mrs lr, spsr 1051 str lr, [sp, #8] @ save spsr 1052 1053 @ 1054 @ Prepare for SVC32 mode. IRQs remain disabled. 1055 @ 1056 mrs r0, cpsr 1057 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) 1058 msr spsr_cxsf, r0 1059 1060 @ 1061 @ the branch table must immediately follow this code 1062 @ 1063 and lr, lr, #0x0f 1064 THUMB( adr r0, 1f ) 1065 THUMB( ldr lr, [r0, lr, lsl #2] ) 1066 mov r0, sp 1067 ARM( ldr lr, [pc, lr, lsl #2] ) 1068 movs pc, lr @ branch to handler in SVC mode 1069ENDPROC(vector_\name) 1070 1071 .align 2 1072 @ handler addresses follow this label 10731: 1074 .endm 1075 1076 .section .stubs, "ax", %progbits 1077 @ This must be the first word 1078 .word vector_swi 1079 1080vector_rst: 1081 ARM( swi SYS_ERROR0 ) 1082 THUMB( svc #0 ) 1083 THUMB( nop ) 1084 b vector_und 1085 1086/* 1087 * Interrupt dispatcher 1088 */ 1089 vector_stub irq, IRQ_MODE, 4 1090 1091 .long __irq_usr @ 0 (USR_26 / USR_32) 1092 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 1093 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) 1094 .long __irq_svc @ 3 (SVC_26 / SVC_32) 1095 .long __irq_invalid @ 4 1096 .long __irq_invalid @ 5 1097 .long __irq_invalid @ 6 1098 .long __irq_invalid @ 7 1099 .long __irq_invalid @ 8 1100 .long __irq_invalid @ 9 1101 .long __irq_invalid @ a 1102 .long __irq_invalid @ b 1103 .long __irq_invalid @ c 1104 .long __irq_invalid @ d 1105 .long __irq_invalid @ e 1106 .long __irq_invalid @ f 1107 1108/* 1109 * Data abort dispatcher 1110 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1111 */ 1112 vector_stub dabt, ABT_MODE, 8 1113 1114 .long __dabt_usr @ 0 (USR_26 / USR_32) 1115 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 1116 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) 1117 .long __dabt_svc @ 3 (SVC_26 / SVC_32) 1118 .long __dabt_invalid @ 4 1119 .long __dabt_invalid @ 5 1120 .long __dabt_invalid @ 6 1121 .long __dabt_invalid @ 7 1122 .long __dabt_invalid @ 8 1123 .long __dabt_invalid @ 9 1124 .long __dabt_invalid @ a 1125 .long __dabt_invalid @ b 1126 .long __dabt_invalid @ c 1127 .long __dabt_invalid @ d 1128 .long __dabt_invalid @ e 1129 .long __dabt_invalid @ f 1130 1131/* 1132 * Prefetch abort dispatcher 1133 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1134 */ 1135 vector_stub pabt, ABT_MODE, 4 1136 1137 .long __pabt_usr @ 0 (USR_26 / USR_32) 1138 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 1139 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) 1140 .long __pabt_svc @ 3 (SVC_26 / SVC_32) 1141 .long __pabt_invalid @ 4 1142 .long __pabt_invalid @ 5 1143 .long __pabt_invalid @ 6 1144 .long __pabt_invalid @ 7 1145 .long __pabt_invalid @ 8 1146 .long __pabt_invalid @ 9 1147 .long __pabt_invalid @ a 1148 .long __pabt_invalid @ b 1149 .long __pabt_invalid @ c 1150 .long __pabt_invalid @ d 1151 .long __pabt_invalid @ e 1152 .long __pabt_invalid @ f 1153 1154/* 1155 * Undef instr entry dispatcher 1156 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1157 */ 1158 vector_stub und, UND_MODE 1159 1160 .long __und_usr @ 0 (USR_26 / USR_32) 1161 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 1162 .long __und_invalid @ 2 (IRQ_26 / IRQ_32) 1163 .long __und_svc @ 3 (SVC_26 / SVC_32) 1164 .long __und_invalid @ 4 1165 .long __und_invalid @ 5 1166 .long __und_invalid @ 6 1167 .long __und_invalid @ 7 1168 .long __und_invalid @ 8 1169 .long __und_invalid @ 9 1170 .long __und_invalid @ a 1171 .long __und_invalid @ b 1172 .long __und_invalid @ c 1173 .long __und_invalid @ d 1174 .long __und_invalid @ e 1175 .long __und_invalid @ f 1176 1177 .align 5 1178 1179/*============================================================================= 1180 * Address exception handler 1181 *----------------------------------------------------------------------------- 1182 * These aren't too critical. 1183 * (they're not supposed to happen, and won't happen in 32-bit data mode). 1184 */ 1185 1186vector_addrexcptn: 1187 b vector_addrexcptn 1188 1189/*============================================================================= 1190 * FIQ "NMI" handler 1191 *----------------------------------------------------------------------------- 1192 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 1193 * systems. 1194 */ 1195 vector_stub fiq, FIQ_MODE, 4 1196 1197 .long __fiq_usr @ 0 (USR_26 / USR_32) 1198 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32) 1199 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32) 1200 .long __fiq_svc @ 3 (SVC_26 / SVC_32) 1201 .long __fiq_svc @ 4 1202 .long __fiq_svc @ 5 1203 .long __fiq_svc @ 6 1204 .long __fiq_abt @ 7 1205 .long __fiq_svc @ 8 1206 .long __fiq_svc @ 9 1207 .long __fiq_svc @ a 1208 .long __fiq_svc @ b 1209 .long __fiq_svc @ c 1210 .long __fiq_svc @ d 1211 .long __fiq_svc @ e 1212 .long __fiq_svc @ f 1213 1214 .globl vector_fiq 1215 1216 .section .vectors, "ax", %progbits 1217.L__vectors_start: 1218 W(b) vector_rst 1219 W(b) vector_und 1220 W(ldr) pc, .L__vectors_start + 0x1000 1221 W(b) vector_pabt 1222 W(b) vector_dabt 1223 W(b) vector_addrexcptn 1224 W(b) vector_irq 1225 W(b) vector_fiq 1226 1227 .data 1228 .align 2 1229 1230 .globl cr_alignment 1231cr_alignment: 1232 .space 4 1233 1234#ifdef CONFIG_MULTI_IRQ_HANDLER 1235 .globl handle_arch_irq 1236handle_arch_irq: 1237 .space 4 1238#endif 1239