1/* 2 * linux/arch/arm/kernel/entry-armv.S 3 * 4 * Copyright (C) 1996,1997,1998 Russell King. 5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Low-level vector interface routines 13 * 14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction 15 * that causes it to save wrong values... Be aware! 16 */ 17 18#include <asm/memory.h> 19#include <asm/glue-df.h> 20#include <asm/glue-pf.h> 21#include <asm/vfpmacros.h> 22#include <mach/entry-macro.S> 23#include <asm/thread_notify.h> 24#include <asm/unwind.h> 25#include <asm/unistd.h> 26#include <asm/tls.h> 27 28#include "entry-header.S" 29#include <asm/entry-macro-multi.S> 30 31/* 32 * Interrupt handling. Preserves r7, r8, r9 33 */ 34 .macro irq_handler 35#ifdef CONFIG_MULTI_IRQ_HANDLER 36 ldr r5, =handle_arch_irq 37 mov r0, sp 38 ldr r5, [r5] 39 adr lr, BSYM(9997f) 40 teq r5, #0 41 movne pc, r5 42#endif 43 arch_irq_handler_default 449997: 45 .endm 46 47#ifdef CONFIG_KPROBES 48 .section .kprobes.text,"ax",%progbits 49#else 50 .text 51#endif 52 53/* 54 * Invalid mode handlers 55 */ 56 .macro inv_entry, reason 57 sub sp, sp, #S_FRAME_SIZE 58 ARM( stmib sp, {r1 - lr} ) 59 THUMB( stmia sp, {r0 - r12} ) 60 THUMB( str sp, [sp, #S_SP] ) 61 THUMB( str lr, [sp, #S_LR] ) 62 mov r1, #\reason 63 .endm 64 65__pabt_invalid: 66 inv_entry BAD_PREFETCH 67 b common_invalid 68ENDPROC(__pabt_invalid) 69 70__dabt_invalid: 71 inv_entry BAD_DATA 72 b common_invalid 73ENDPROC(__dabt_invalid) 74 75__irq_invalid: 76 inv_entry BAD_IRQ 77 b common_invalid 78ENDPROC(__irq_invalid) 79 80__und_invalid: 81 inv_entry BAD_UNDEFINSTR 82 83 @ 84 @ XXX fall through to common_invalid 85 @ 86 87@ 88@ common_invalid - generic code for failed exception (re-entrant version of handlers) 89@ 90common_invalid: 91 zero_fp 92 93 ldmia r0, {r4 - r6} 94 add r0, sp, #S_PC @ here for interlock avoidance 95 mov r7, #-1 @ "" "" "" "" 96 str r4, [sp] @ save preserved r0 97 stmia r0, {r5 - r7} @ lr_<exception>, 98 @ cpsr_<exception>, "old_r0" 99 100 mov r0, sp 101 b bad_mode 102ENDPROC(__und_invalid) 103 104/* 105 * SVC mode handlers 106 */ 107 108#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) 109#define SPFIX(code...) code 110#else 111#define SPFIX(code...) 112#endif 113 114 .macro svc_entry, stack_hole=0 115 UNWIND(.fnstart ) 116 UNWIND(.save {r0 - pc} ) 117 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 118#ifdef CONFIG_THUMB2_KERNEL 119 SPFIX( str r0, [sp] ) @ temporarily saved 120 SPFIX( mov r0, sp ) 121 SPFIX( tst r0, #4 ) @ test original stack alignment 122 SPFIX( ldr r0, [sp] ) @ restored 123#else 124 SPFIX( tst sp, #4 ) 125#endif 126 SPFIX( subeq sp, sp, #4 ) 127 stmia sp, {r1 - r12} 128 129 ldmia r0, {r1 - r3} 130 add r5, sp, #S_SP - 4 @ here for interlock avoidance 131 mov r4, #-1 @ "" "" "" "" 132 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) 133 SPFIX( addeq r0, r0, #4 ) 134 str r1, [sp, #-4]! @ save the "real" r0 copied 135 @ from the exception stack 136 137 mov r1, lr 138 139 @ 140 @ We are now ready to fill in the remaining blanks on the stack: 141 @ 142 @ r0 - sp_svc 143 @ r1 - lr_svc 144 @ r2 - lr_<exception>, already fixed up for correct return/restart 145 @ r3 - spsr_<exception> 146 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 147 @ 148 stmia r5, {r0 - r4} 149 .endm 150 151 .align 5 152__dabt_svc: 153 svc_entry 154 155 @ 156 @ get ready to re-enable interrupts if appropriate 157 @ 158 mrs r9, cpsr 159 tst r3, #PSR_I_BIT 160 biceq r9, r9, #PSR_I_BIT 161 162 @ 163 @ Call the processor-specific abort handler: 164 @ 165 @ r2 - aborted context pc 166 @ r3 - aborted context cpsr 167 @ 168 @ The abort handler must return the aborted address in r0, and 169 @ the fault status register in r1. r9 must be preserved. 170 @ 171#ifdef MULTI_DABORT 172 ldr r4, .LCprocfns 173 mov lr, pc 174 ldr pc, [r4, #PROCESSOR_DABT_FUNC] 175#else 176 bl CPU_DABORT_HANDLER 177#endif 178 179 @ 180 @ set desired IRQ state, then call main handler 181 @ 182 debug_entry r1 183 msr cpsr_c, r9 184 mov r2, sp 185 bl do_DataAbort 186 187 @ 188 @ IRQs off again before pulling preserved data off the stack 189 @ 190 disable_irq_notrace 191 192 @ 193 @ restore SPSR and restart the instruction 194 @ 195 ldr r2, [sp, #S_PSR] 196 svc_exit r2 @ return from exception 197 UNWIND(.fnend ) 198ENDPROC(__dabt_svc) 199 200 .align 5 201__irq_svc: 202 svc_entry 203 204#ifdef CONFIG_TRACE_IRQFLAGS 205 bl trace_hardirqs_off 206#endif 207#ifdef CONFIG_PREEMPT 208 get_thread_info tsk 209 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 210 add r7, r8, #1 @ increment it 211 str r7, [tsk, #TI_PREEMPT] 212#endif 213 214 irq_handler 215#ifdef CONFIG_PREEMPT 216 str r8, [tsk, #TI_PREEMPT] @ restore preempt count 217 ldr r0, [tsk, #TI_FLAGS] @ get flags 218 teq r8, #0 @ if preempt count != 0 219 movne r0, #0 @ force flags to 0 220 tst r0, #_TIF_NEED_RESCHED 221 blne svc_preempt 222#endif 223 ldr r4, [sp, #S_PSR] @ irqs are already disabled 224#ifdef CONFIG_TRACE_IRQFLAGS 225 tst r4, #PSR_I_BIT 226 bleq trace_hardirqs_on 227#endif 228 svc_exit r4 @ return from exception 229 UNWIND(.fnend ) 230ENDPROC(__irq_svc) 231 232 .ltorg 233 234#ifdef CONFIG_PREEMPT 235svc_preempt: 236 mov r8, lr 2371: bl preempt_schedule_irq @ irq en/disable is done inside 238 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 239 tst r0, #_TIF_NEED_RESCHED 240 moveq pc, r8 @ go again 241 b 1b 242#endif 243 244 .align 5 245__und_svc: 246#ifdef CONFIG_KPROBES 247 @ If a kprobe is about to simulate a "stmdb sp..." instruction, 248 @ it obviously needs free stack space which then will belong to 249 @ the saved context. 250 svc_entry 64 251#else 252 svc_entry 253#endif 254 255 @ 256 @ call emulation code, which returns using r9 if it has emulated 257 @ the instruction, or the more conventional lr if we are to treat 258 @ this as a real undefined instruction 259 @ 260 @ r0 - instruction 261 @ 262#ifndef CONFIG_THUMB2_KERNEL 263 ldr r0, [r2, #-4] 264#else 265 ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 266 and r9, r0, #0xf800 267 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 268 ldrhhs r9, [r2] @ bottom 16 bits 269 orrhs r0, r9, r0, lsl #16 270#endif 271 adr r9, BSYM(1f) 272 bl call_fpe 273 274 mov r0, sp @ struct pt_regs *regs 275 bl do_undefinstr 276 277 @ 278 @ IRQs off again before pulling preserved data off the stack 279 @ 2801: disable_irq_notrace 281 282 @ 283 @ restore SPSR and restart the instruction 284 @ 285 ldr r2, [sp, #S_PSR] @ Get SVC cpsr 286 svc_exit r2 @ return from exception 287 UNWIND(.fnend ) 288ENDPROC(__und_svc) 289 290 .align 5 291__pabt_svc: 292 svc_entry 293 294 @ 295 @ re-enable interrupts if appropriate 296 @ 297 mrs r9, cpsr 298 tst r3, #PSR_I_BIT 299 biceq r9, r9, #PSR_I_BIT 300 301 mov r0, r2 @ pass address of aborted instruction. 302#ifdef MULTI_PABORT 303 ldr r4, .LCprocfns 304 mov lr, pc 305 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 306#else 307 bl CPU_PABORT_HANDLER 308#endif 309 debug_entry r1 310 msr cpsr_c, r9 @ Maybe enable interrupts 311 mov r2, sp @ regs 312 bl do_PrefetchAbort @ call abort handler 313 314 @ 315 @ IRQs off again before pulling preserved data off the stack 316 @ 317 disable_irq_notrace 318 319 @ 320 @ restore SPSR and restart the instruction 321 @ 322 ldr r2, [sp, #S_PSR] 323 svc_exit r2 @ return from exception 324 UNWIND(.fnend ) 325ENDPROC(__pabt_svc) 326 327 .align 5 328.LCcralign: 329 .word cr_alignment 330#ifdef MULTI_DABORT 331.LCprocfns: 332 .word processor 333#endif 334.LCfp: 335 .word fp_enter 336 337/* 338 * User mode handlers 339 * 340 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE 341 */ 342 343#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) 344#error "sizeof(struct pt_regs) must be a multiple of 8" 345#endif 346 347 .macro usr_entry 348 UNWIND(.fnstart ) 349 UNWIND(.cantunwind ) @ don't unwind the user space 350 sub sp, sp, #S_FRAME_SIZE 351 ARM( stmib sp, {r1 - r12} ) 352 THUMB( stmia sp, {r0 - r12} ) 353 354 ldmia r0, {r1 - r3} 355 add r0, sp, #S_PC @ here for interlock avoidance 356 mov r4, #-1 @ "" "" "" "" 357 358 str r1, [sp] @ save the "real" r0 copied 359 @ from the exception stack 360 361 @ 362 @ We are now ready to fill in the remaining blanks on the stack: 363 @ 364 @ r2 - lr_<exception>, already fixed up for correct return/restart 365 @ r3 - spsr_<exception> 366 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 367 @ 368 @ Also, separately save sp_usr and lr_usr 369 @ 370 stmia r0, {r2 - r4} 371 ARM( stmdb r0, {sp, lr}^ ) 372 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 373 374 @ 375 @ Enable the alignment trap while in kernel mode 376 @ 377 alignment_trap r0 378 379 @ 380 @ Clear FP to mark the first stack frame 381 @ 382 zero_fp 383 .endm 384 385 .macro kuser_cmpxchg_check 386#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 387#ifndef CONFIG_MMU 388#warning "NPTL on non MMU needs fixing" 389#else 390 @ Make sure our user space atomic helper is restarted 391 @ if it was interrupted in a critical region. Here we 392 @ perform a quick test inline since it should be false 393 @ 99.9999% of the time. The rest is done out of line. 394 cmp r2, #TASK_SIZE 395 blhs kuser_cmpxchg_fixup 396#endif 397#endif 398 .endm 399 400 .align 5 401__dabt_usr: 402 usr_entry 403 kuser_cmpxchg_check 404 405 @ 406 @ Call the processor-specific abort handler: 407 @ 408 @ r2 - aborted context pc 409 @ r3 - aborted context cpsr 410 @ 411 @ The abort handler must return the aborted address in r0, and 412 @ the fault status register in r1. 413 @ 414#ifdef MULTI_DABORT 415 ldr r4, .LCprocfns 416 mov lr, pc 417 ldr pc, [r4, #PROCESSOR_DABT_FUNC] 418#else 419 bl CPU_DABORT_HANDLER 420#endif 421 422 @ 423 @ IRQs on, then call the main handler 424 @ 425 debug_entry r1 426 enable_irq 427 mov r2, sp 428 adr lr, BSYM(ret_from_exception) 429 b do_DataAbort 430 UNWIND(.fnend ) 431ENDPROC(__dabt_usr) 432 433 .align 5 434__irq_usr: 435 usr_entry 436 kuser_cmpxchg_check 437 438#ifdef CONFIG_IRQSOFF_TRACER 439 bl trace_hardirqs_off 440#endif 441 442 get_thread_info tsk 443#ifdef CONFIG_PREEMPT 444 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 445 add r7, r8, #1 @ increment it 446 str r7, [tsk, #TI_PREEMPT] 447#endif 448 449 irq_handler 450#ifdef CONFIG_PREEMPT 451 ldr r0, [tsk, #TI_PREEMPT] 452 str r8, [tsk, #TI_PREEMPT] 453 teq r0, r7 454 ARM( strne r0, [r0, -r0] ) 455 THUMB( movne r0, #0 ) 456 THUMB( strne r0, [r0] ) 457#endif 458 459 mov why, #0 460 b ret_to_user_from_irq 461 UNWIND(.fnend ) 462ENDPROC(__irq_usr) 463 464 .ltorg 465 466 .align 5 467__und_usr: 468 usr_entry 469 470 @ 471 @ fall through to the emulation code, which returns using r9 if 472 @ it has emulated the instruction, or the more conventional lr 473 @ if we are to treat this as a real undefined instruction 474 @ 475 @ r0 - instruction 476 @ 477 adr r9, BSYM(ret_from_exception) 478 adr lr, BSYM(__und_usr_unknown) 479 tst r3, #PSR_T_BIT @ Thumb mode? 480 itet eq @ explicit IT needed for the 1f label 481 subeq r4, r2, #4 @ ARM instr at LR - 4 482 subne r4, r2, #2 @ Thumb instr at LR - 2 4831: ldreqt r0, [r4] 484#ifdef CONFIG_CPU_ENDIAN_BE8 485 reveq r0, r0 @ little endian instruction 486#endif 487 beq call_fpe 488 @ Thumb instruction 489#if __LINUX_ARM_ARCH__ >= 7 4902: 491 ARM( ldrht r5, [r4], #2 ) 492 THUMB( ldrht r5, [r4] ) 493 THUMB( add r4, r4, #2 ) 494 and r0, r5, #0xf800 @ mask bits 111x x... .... .... 495 cmp r0, #0xe800 @ 32bit instruction if xx != 0 496 blo __und_usr_unknown 4973: ldrht r0, [r4] 498 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 499 orr r0, r0, r5, lsl #16 500#else 501 b __und_usr_unknown 502#endif 503 UNWIND(.fnend ) 504ENDPROC(__und_usr) 505 506 @ 507 @ fallthrough to call_fpe 508 @ 509 510/* 511 * The out of line fixup for the ldrt above. 512 */ 513 .pushsection .fixup, "ax" 5144: mov pc, r9 515 .popsection 516 .pushsection __ex_table,"a" 517 .long 1b, 4b 518#if __LINUX_ARM_ARCH__ >= 7 519 .long 2b, 4b 520 .long 3b, 4b 521#endif 522 .popsection 523 524/* 525 * Check whether the instruction is a co-processor instruction. 526 * If yes, we need to call the relevant co-processor handler. 527 * 528 * Note that we don't do a full check here for the co-processor 529 * instructions; all instructions with bit 27 set are well 530 * defined. The only instructions that should fault are the 531 * co-processor instructions. However, we have to watch out 532 * for the ARM6/ARM7 SWI bug. 533 * 534 * NEON is a special case that has to be handled here. Not all 535 * NEON instructions are co-processor instructions, so we have 536 * to make a special case of checking for them. Plus, there's 537 * five groups of them, so we have a table of mask/opcode pairs 538 * to check against, and if any match then we branch off into the 539 * NEON handler code. 540 * 541 * Emulators may wish to make use of the following registers: 542 * r0 = instruction opcode. 543 * r2 = PC+4 544 * r9 = normal "successful" return address 545 * r10 = this threads thread_info structure. 546 * lr = unrecognised instruction return address 547 */ 548 @ 549 @ Fall-through from Thumb-2 __und_usr 550 @ 551#ifdef CONFIG_NEON 552 adr r6, .LCneon_thumb_opcodes 553 b 2f 554#endif 555call_fpe: 556#ifdef CONFIG_NEON 557 adr r6, .LCneon_arm_opcodes 5582: 559 ldr r7, [r6], #4 @ mask value 560 cmp r7, #0 @ end mask? 561 beq 1f 562 and r8, r0, r7 563 ldr r7, [r6], #4 @ opcode bits matching in mask 564 cmp r8, r7 @ NEON instruction? 565 bne 2b 566 get_thread_info r10 567 mov r7, #1 568 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 569 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 570 b do_vfp @ let VFP handler handle this 5711: 572#endif 573 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 574 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 575#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) 576 and r8, r0, #0x0f000000 @ mask out op-code bits 577 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? 578#endif 579 moveq pc, lr 580 get_thread_info r10 @ get current thread 581 and r8, r0, #0x00000f00 @ mask out CP number 582 THUMB( lsr r8, r8, #8 ) 583 mov r7, #1 584 add r6, r10, #TI_USED_CP 585 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] 586 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] 587#ifdef CONFIG_IWMMXT 588 @ Test if we need to give access to iWMMXt coprocessors 589 ldr r5, [r10, #TI_FLAGS] 590 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 591 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 592 bcs iwmmxt_task_enable 593#endif 594 ARM( add pc, pc, r8, lsr #6 ) 595 THUMB( lsl r8, r8, #2 ) 596 THUMB( add pc, r8 ) 597 nop 598 599 movw_pc lr @ CP#0 600 W(b) do_fpe @ CP#1 (FPE) 601 W(b) do_fpe @ CP#2 (FPE) 602 movw_pc lr @ CP#3 603#ifdef CONFIG_CRUNCH 604 b crunch_task_enable @ CP#4 (MaverickCrunch) 605 b crunch_task_enable @ CP#5 (MaverickCrunch) 606 b crunch_task_enable @ CP#6 (MaverickCrunch) 607#else 608 movw_pc lr @ CP#4 609 movw_pc lr @ CP#5 610 movw_pc lr @ CP#6 611#endif 612 movw_pc lr @ CP#7 613 movw_pc lr @ CP#8 614 movw_pc lr @ CP#9 615#ifdef CONFIG_VFP 616 W(b) do_vfp @ CP#10 (VFP) 617 W(b) do_vfp @ CP#11 (VFP) 618#else 619 movw_pc lr @ CP#10 (VFP) 620 movw_pc lr @ CP#11 (VFP) 621#endif 622 movw_pc lr @ CP#12 623 movw_pc lr @ CP#13 624 movw_pc lr @ CP#14 (Debug) 625 movw_pc lr @ CP#15 (Control) 626 627#ifdef CONFIG_NEON 628 .align 6 629 630.LCneon_arm_opcodes: 631 .word 0xfe000000 @ mask 632 .word 0xf2000000 @ opcode 633 634 .word 0xff100000 @ mask 635 .word 0xf4000000 @ opcode 636 637 .word 0x00000000 @ mask 638 .word 0x00000000 @ opcode 639 640.LCneon_thumb_opcodes: 641 .word 0xef000000 @ mask 642 .word 0xef000000 @ opcode 643 644 .word 0xff100000 @ mask 645 .word 0xf9000000 @ opcode 646 647 .word 0x00000000 @ mask 648 .word 0x00000000 @ opcode 649#endif 650 651do_fpe: 652 enable_irq 653 ldr r4, .LCfp 654 add r10, r10, #TI_FPSTATE @ r10 = workspace 655 ldr pc, [r4] @ Call FP module USR entry point 656 657/* 658 * The FP module is called with these registers set: 659 * r0 = instruction 660 * r2 = PC+4 661 * r9 = normal "successful" return address 662 * r10 = FP workspace 663 * lr = unrecognised FP instruction return address 664 */ 665 666 .pushsection .data 667ENTRY(fp_enter) 668 .word no_fp 669 .popsection 670 671ENTRY(no_fp) 672 mov pc, lr 673ENDPROC(no_fp) 674 675__und_usr_unknown: 676 enable_irq 677 mov r0, sp 678 adr lr, BSYM(ret_from_exception) 679 b do_undefinstr 680ENDPROC(__und_usr_unknown) 681 682 .align 5 683__pabt_usr: 684 usr_entry 685 686 mov r0, r2 @ pass address of aborted instruction. 687#ifdef MULTI_PABORT 688 ldr r4, .LCprocfns 689 mov lr, pc 690 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 691#else 692 bl CPU_PABORT_HANDLER 693#endif 694 debug_entry r1 695 enable_irq @ Enable interrupts 696 mov r2, sp @ regs 697 bl do_PrefetchAbort @ call abort handler 698 UNWIND(.fnend ) 699 /* fall through */ 700/* 701 * This is the return code to user mode for abort handlers 702 */ 703ENTRY(ret_from_exception) 704 UNWIND(.fnstart ) 705 UNWIND(.cantunwind ) 706 get_thread_info tsk 707 mov why, #0 708 b ret_to_user 709 UNWIND(.fnend ) 710ENDPROC(__pabt_usr) 711ENDPROC(ret_from_exception) 712 713/* 714 * Register switch for ARMv3 and ARMv4 processors 715 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 716 * previous and next are guaranteed not to be the same. 717 */ 718ENTRY(__switch_to) 719 UNWIND(.fnstart ) 720 UNWIND(.cantunwind ) 721 add ip, r1, #TI_CPU_SAVE 722 ldr r3, [r2, #TI_TP_VALUE] 723 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack 724 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 725 THUMB( str sp, [ip], #4 ) 726 THUMB( str lr, [ip], #4 ) 727#ifdef CONFIG_CPU_USE_DOMAINS 728 ldr r6, [r2, #TI_CPU_DOMAIN] 729#endif 730 set_tls r3, r4, r5 731#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 732 ldr r7, [r2, #TI_TASK] 733 ldr r8, =__stack_chk_guard 734 ldr r7, [r7, #TSK_STACK_CANARY] 735#endif 736#ifdef CONFIG_CPU_USE_DOMAINS 737 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 738#endif 739 mov r5, r0 740 add r4, r2, #TI_CPU_SAVE 741 ldr r0, =thread_notify_head 742 mov r1, #THREAD_NOTIFY_SWITCH 743 bl atomic_notifier_call_chain 744#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 745 str r7, [r8] 746#endif 747 THUMB( mov ip, r4 ) 748 mov r0, r5 749 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously 750 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously 751 THUMB( ldr sp, [ip], #4 ) 752 THUMB( ldr pc, [ip] ) 753 UNWIND(.fnend ) 754ENDPROC(__switch_to) 755 756 __INIT 757 758/* 759 * User helpers. 760 * 761 * These are segment of kernel provided user code reachable from user space 762 * at a fixed address in kernel memory. This is used to provide user space 763 * with some operations which require kernel help because of unimplemented 764 * native feature and/or instructions in many ARM CPUs. The idea is for 765 * this code to be executed directly in user mode for best efficiency but 766 * which is too intimate with the kernel counter part to be left to user 767 * libraries. In fact this code might even differ from one CPU to another 768 * depending on the available instruction set and restrictions like on 769 * SMP systems. In other words, the kernel reserves the right to change 770 * this code as needed without warning. Only the entry points and their 771 * results are guaranteed to be stable. 772 * 773 * Each segment is 32-byte aligned and will be moved to the top of the high 774 * vector page. New segments (if ever needed) must be added in front of 775 * existing ones. This mechanism should be used only for things that are 776 * really small and justified, and not be abused freely. 777 * 778 * User space is expected to implement those things inline when optimizing 779 * for a processor that has the necessary native support, but only if such 780 * resulting binaries are already to be incompatible with earlier ARM 781 * processors due to the use of unsupported instructions other than what 782 * is provided here. In other words don't make binaries unable to run on 783 * earlier processors just for the sake of not using these kernel helpers 784 * if your compiled code is not going to use the new instructions for other 785 * purpose. 786 */ 787 THUMB( .arm ) 788 789 .macro usr_ret, reg 790#ifdef CONFIG_ARM_THUMB 791 bx \reg 792#else 793 mov pc, \reg 794#endif 795 .endm 796 797 .align 5 798 .globl __kuser_helper_start 799__kuser_helper_start: 800 801/* 802 * Reference prototype: 803 * 804 * void __kernel_memory_barrier(void) 805 * 806 * Input: 807 * 808 * lr = return address 809 * 810 * Output: 811 * 812 * none 813 * 814 * Clobbered: 815 * 816 * none 817 * 818 * Definition and user space usage example: 819 * 820 * typedef void (__kernel_dmb_t)(void); 821 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) 822 * 823 * Apply any needed memory barrier to preserve consistency with data modified 824 * manually and __kuser_cmpxchg usage. 825 * 826 * This could be used as follows: 827 * 828 * #define __kernel_dmb() \ 829 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ 830 * : : : "r0", "lr","cc" ) 831 */ 832 833__kuser_memory_barrier: @ 0xffff0fa0 834 smp_dmb arm 835 usr_ret lr 836 837 .align 5 838 839/* 840 * Reference prototype: 841 * 842 * int __kernel_cmpxchg(int oldval, int newval, int *ptr) 843 * 844 * Input: 845 * 846 * r0 = oldval 847 * r1 = newval 848 * r2 = ptr 849 * lr = return address 850 * 851 * Output: 852 * 853 * r0 = returned value (zero or non-zero) 854 * C flag = set if r0 == 0, clear if r0 != 0 855 * 856 * Clobbered: 857 * 858 * r3, ip, flags 859 * 860 * Definition and user space usage example: 861 * 862 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); 863 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) 864 * 865 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 866 * Return zero if *ptr was changed or non-zero if no exchange happened. 867 * The C flag is also set if *ptr was changed to allow for assembly 868 * optimization in the calling code. 869 * 870 * Notes: 871 * 872 * - This routine already includes memory barriers as needed. 873 * 874 * For example, a user space atomic_add implementation could look like this: 875 * 876 * #define atomic_add(ptr, val) \ 877 * ({ register unsigned int *__ptr asm("r2") = (ptr); \ 878 * register unsigned int __result asm("r1"); \ 879 * asm volatile ( \ 880 * "1: @ atomic_add\n\t" \ 881 * "ldr r0, [r2]\n\t" \ 882 * "mov r3, #0xffff0fff\n\t" \ 883 * "add lr, pc, #4\n\t" \ 884 * "add r1, r0, %2\n\t" \ 885 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ 886 * "bcc 1b" \ 887 * : "=&r" (__result) \ 888 * : "r" (__ptr), "rIL" (val) \ 889 * : "r0","r3","ip","lr","cc","memory" ); \ 890 * __result; }) 891 */ 892 893__kuser_cmpxchg: @ 0xffff0fc0 894 895#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 896 897 /* 898 * Poor you. No fast solution possible... 899 * The kernel itself must perform the operation. 900 * A special ghost syscall is used for that (see traps.c). 901 */ 902 stmfd sp!, {r7, lr} 903 ldr r7, 1f @ it's 20 bits 904 swi __ARM_NR_cmpxchg 905 ldmfd sp!, {r7, pc} 9061: .word __ARM_NR_cmpxchg 907 908#elif __LINUX_ARM_ARCH__ < 6 909 910#ifdef CONFIG_MMU 911 912 /* 913 * The only thing that can break atomicity in this cmpxchg 914 * implementation is either an IRQ or a data abort exception 915 * causing another process/thread to be scheduled in the middle 916 * of the critical sequence. To prevent this, code is added to 917 * the IRQ and data abort exception handlers to set the pc back 918 * to the beginning of the critical section if it is found to be 919 * within that critical section (see kuser_cmpxchg_fixup). 920 */ 9211: ldr r3, [r2] @ load current val 922 subs r3, r3, r0 @ compare with oldval 9232: streq r1, [r2] @ store newval if eq 924 rsbs r0, r3, #0 @ set return val and C flag 925 usr_ret lr 926 927 .text 928kuser_cmpxchg_fixup: 929 @ Called from kuser_cmpxchg_check macro. 930 @ r2 = address of interrupted insn (must be preserved). 931 @ sp = saved regs. r7 and r8 are clobbered. 932 @ 1b = first critical insn, 2b = last critical insn. 933 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. 934 mov r7, #0xffff0fff 935 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 936 subs r8, r2, r7 937 rsbcss r8, r8, #(2b - 1b) 938 strcs r7, [sp, #S_PC] 939 mov pc, lr 940 .previous 941 942#else 943#warning "NPTL on non MMU needs fixing" 944 mov r0, #-1 945 adds r0, r0, #0 946 usr_ret lr 947#endif 948 949#else 950 951 smp_dmb arm 9521: ldrex r3, [r2] 953 subs r3, r3, r0 954 strexeq r3, r1, [r2] 955 teqeq r3, #1 956 beq 1b 957 rsbs r0, r3, #0 958 /* beware -- each __kuser slot must be 8 instructions max */ 959 ALT_SMP(b __kuser_memory_barrier) 960 ALT_UP(usr_ret lr) 961 962#endif 963 964 .align 5 965 966/* 967 * Reference prototype: 968 * 969 * int __kernel_get_tls(void) 970 * 971 * Input: 972 * 973 * lr = return address 974 * 975 * Output: 976 * 977 * r0 = TLS value 978 * 979 * Clobbered: 980 * 981 * none 982 * 983 * Definition and user space usage example: 984 * 985 * typedef int (__kernel_get_tls_t)(void); 986 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) 987 * 988 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. 989 * 990 * This could be used as follows: 991 * 992 * #define __kernel_get_tls() \ 993 * ({ register unsigned int __val asm("r0"); \ 994 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ 995 * : "=r" (__val) : : "lr","cc" ); \ 996 * __val; }) 997 */ 998 999__kuser_get_tls: @ 0xffff0fe0 1000 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 1001 usr_ret lr 1002 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code 1003 .rep 4 1004 .word 0 @ 0xffff0ff0 software TLS value, then 1005 .endr @ pad up to __kuser_helper_version 1006 1007/* 1008 * Reference declaration: 1009 * 1010 * extern unsigned int __kernel_helper_version; 1011 * 1012 * Definition and user space usage example: 1013 * 1014 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) 1015 * 1016 * User space may read this to determine the curent number of helpers 1017 * available. 1018 */ 1019 1020__kuser_helper_version: @ 0xffff0ffc 1021 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 1022 1023 .globl __kuser_helper_end 1024__kuser_helper_end: 1025 1026 THUMB( .thumb ) 1027 1028/* 1029 * Vector stubs. 1030 * 1031 * This code is copied to 0xffff0200 so we can use branches in the 1032 * vectors, rather than ldr's. Note that this code must not 1033 * exceed 0x300 bytes. 1034 * 1035 * Common stub entry macro: 1036 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1037 * 1038 * SP points to a minimal amount of processor-private memory, the address 1039 * of which is copied into r0 for the mode specific abort handler. 1040 */ 1041 .macro vector_stub, name, mode, correction=0 1042 .align 5 1043 1044vector_\name: 1045 .if \correction 1046 sub lr, lr, #\correction 1047 .endif 1048 1049 @ 1050 @ Save r0, lr_<exception> (parent PC) and spsr_<exception> 1051 @ (parent CPSR) 1052 @ 1053 stmia sp, {r0, lr} @ save r0, lr 1054 mrs lr, spsr 1055 str lr, [sp, #8] @ save spsr 1056 1057 @ 1058 @ Prepare for SVC32 mode. IRQs remain disabled. 1059 @ 1060 mrs r0, cpsr 1061 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) 1062 msr spsr_cxsf, r0 1063 1064 @ 1065 @ the branch table must immediately follow this code 1066 @ 1067 and lr, lr, #0x0f 1068 THUMB( adr r0, 1f ) 1069 THUMB( ldr lr, [r0, lr, lsl #2] ) 1070 mov r0, sp 1071 ARM( ldr lr, [pc, lr, lsl #2] ) 1072 movs pc, lr @ branch to handler in SVC mode 1073ENDPROC(vector_\name) 1074 1075 .align 2 1076 @ handler addresses follow this label 10771: 1078 .endm 1079 1080 .globl __stubs_start 1081__stubs_start: 1082/* 1083 * Interrupt dispatcher 1084 */ 1085 vector_stub irq, IRQ_MODE, 4 1086 1087 .long __irq_usr @ 0 (USR_26 / USR_32) 1088 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 1089 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) 1090 .long __irq_svc @ 3 (SVC_26 / SVC_32) 1091 .long __irq_invalid @ 4 1092 .long __irq_invalid @ 5 1093 .long __irq_invalid @ 6 1094 .long __irq_invalid @ 7 1095 .long __irq_invalid @ 8 1096 .long __irq_invalid @ 9 1097 .long __irq_invalid @ a 1098 .long __irq_invalid @ b 1099 .long __irq_invalid @ c 1100 .long __irq_invalid @ d 1101 .long __irq_invalid @ e 1102 .long __irq_invalid @ f 1103 1104/* 1105 * Data abort dispatcher 1106 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1107 */ 1108 vector_stub dabt, ABT_MODE, 8 1109 1110 .long __dabt_usr @ 0 (USR_26 / USR_32) 1111 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 1112 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) 1113 .long __dabt_svc @ 3 (SVC_26 / SVC_32) 1114 .long __dabt_invalid @ 4 1115 .long __dabt_invalid @ 5 1116 .long __dabt_invalid @ 6 1117 .long __dabt_invalid @ 7 1118 .long __dabt_invalid @ 8 1119 .long __dabt_invalid @ 9 1120 .long __dabt_invalid @ a 1121 .long __dabt_invalid @ b 1122 .long __dabt_invalid @ c 1123 .long __dabt_invalid @ d 1124 .long __dabt_invalid @ e 1125 .long __dabt_invalid @ f 1126 1127/* 1128 * Prefetch abort dispatcher 1129 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1130 */ 1131 vector_stub pabt, ABT_MODE, 4 1132 1133 .long __pabt_usr @ 0 (USR_26 / USR_32) 1134 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 1135 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) 1136 .long __pabt_svc @ 3 (SVC_26 / SVC_32) 1137 .long __pabt_invalid @ 4 1138 .long __pabt_invalid @ 5 1139 .long __pabt_invalid @ 6 1140 .long __pabt_invalid @ 7 1141 .long __pabt_invalid @ 8 1142 .long __pabt_invalid @ 9 1143 .long __pabt_invalid @ a 1144 .long __pabt_invalid @ b 1145 .long __pabt_invalid @ c 1146 .long __pabt_invalid @ d 1147 .long __pabt_invalid @ e 1148 .long __pabt_invalid @ f 1149 1150/* 1151 * Undef instr entry dispatcher 1152 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1153 */ 1154 vector_stub und, UND_MODE 1155 1156 .long __und_usr @ 0 (USR_26 / USR_32) 1157 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 1158 .long __und_invalid @ 2 (IRQ_26 / IRQ_32) 1159 .long __und_svc @ 3 (SVC_26 / SVC_32) 1160 .long __und_invalid @ 4 1161 .long __und_invalid @ 5 1162 .long __und_invalid @ 6 1163 .long __und_invalid @ 7 1164 .long __und_invalid @ 8 1165 .long __und_invalid @ 9 1166 .long __und_invalid @ a 1167 .long __und_invalid @ b 1168 .long __und_invalid @ c 1169 .long __und_invalid @ d 1170 .long __und_invalid @ e 1171 .long __und_invalid @ f 1172 1173 .align 5 1174 1175/*============================================================================= 1176 * Undefined FIQs 1177 *----------------------------------------------------------------------------- 1178 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC 1179 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. 1180 * Basically to switch modes, we *HAVE* to clobber one register... brain 1181 * damage alert! I don't think that we can execute any code in here in any 1182 * other mode than FIQ... Ok you can switch to another mode, but you can't 1183 * get out of that mode without clobbering one register. 1184 */ 1185vector_fiq: 1186 disable_fiq 1187 subs pc, lr, #4 1188 1189/*============================================================================= 1190 * Address exception handler 1191 *----------------------------------------------------------------------------- 1192 * These aren't too critical. 1193 * (they're not supposed to happen, and won't happen in 32-bit data mode). 1194 */ 1195 1196vector_addrexcptn: 1197 b vector_addrexcptn 1198 1199/* 1200 * We group all the following data together to optimise 1201 * for CPUs with separate I & D caches. 1202 */ 1203 .align 5 1204 1205.LCvswi: 1206 .word vector_swi 1207 1208 .globl __stubs_end 1209__stubs_end: 1210 1211 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start 1212 1213 .globl __vectors_start 1214__vectors_start: 1215 ARM( swi SYS_ERROR0 ) 1216 THUMB( svc #0 ) 1217 THUMB( nop ) 1218 W(b) vector_und + stubs_offset 1219 W(ldr) pc, .LCvswi + stubs_offset 1220 W(b) vector_pabt + stubs_offset 1221 W(b) vector_dabt + stubs_offset 1222 W(b) vector_addrexcptn + stubs_offset 1223 W(b) vector_irq + stubs_offset 1224 W(b) vector_fiq + stubs_offset 1225 1226 .globl __vectors_end 1227__vectors_end: 1228 1229 .data 1230 1231 .globl cr_alignment 1232 .globl cr_no_alignment 1233cr_alignment: 1234 .space 4 1235cr_no_alignment: 1236 .space 4 1237 1238#ifdef CONFIG_MULTI_IRQ_HANDLER 1239 .globl handle_arch_irq 1240handle_arch_irq: 1241 .space 4 1242#endif 1243