1/* 2 * linux/arch/arm/kernel/entry-armv.S 3 * 4 * Copyright (C) 1996,1997,1998 Russell King. 5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Low-level vector interface routines 13 * 14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction 15 * that causes it to save wrong values... Be aware! 16 */ 17 18#include <asm/memory.h> 19#include <asm/glue.h> 20#include <asm/vfpmacros.h> 21#include <mach/entry-macro.S> 22#include <asm/thread_notify.h> 23#include <asm/unwind.h> 24 25#include "entry-header.S" 26 27/* 28 * Interrupt handling. Preserves r7, r8, r9 29 */ 30 .macro irq_handler 31 get_irqnr_preamble r5, lr 321: get_irqnr_and_base r0, r6, r5, lr 33 movne r1, sp 34 @ 35 @ routine called with r0 = irq number, r1 = struct pt_regs * 36 @ 37 adrne lr, BSYM(1b) 38 bne asm_do_IRQ 39 40#ifdef CONFIG_SMP 41 /* 42 * XXX 43 * 44 * this macro assumes that irqstat (r6) and base (r5) are 45 * preserved from get_irqnr_and_base above 46 */ 47 test_for_ipi r0, r6, r5, lr 48 movne r0, sp 49 adrne lr, BSYM(1b) 50 bne do_IPI 51 52#ifdef CONFIG_LOCAL_TIMERS 53 test_for_ltirq r0, r6, r5, lr 54 movne r0, sp 55 adrne lr, BSYM(1b) 56 bne do_local_timer 57#endif 58#endif 59 60 .endm 61 62#ifdef CONFIG_KPROBES 63 .section .kprobes.text,"ax",%progbits 64#else 65 .text 66#endif 67 68/* 69 * Invalid mode handlers 70 */ 71 .macro inv_entry, reason 72 sub sp, sp, #S_FRAME_SIZE 73 ARM( stmib sp, {r1 - lr} ) 74 THUMB( stmia sp, {r0 - r12} ) 75 THUMB( str sp, [sp, #S_SP] ) 76 THUMB( str lr, [sp, #S_LR] ) 77 mov r1, #\reason 78 .endm 79 80__pabt_invalid: 81 inv_entry BAD_PREFETCH 82 b common_invalid 83ENDPROC(__pabt_invalid) 84 85__dabt_invalid: 86 inv_entry BAD_DATA 87 b common_invalid 88ENDPROC(__dabt_invalid) 89 90__irq_invalid: 91 inv_entry BAD_IRQ 92 b common_invalid 93ENDPROC(__irq_invalid) 94 95__und_invalid: 96 inv_entry BAD_UNDEFINSTR 97 98 @ 99 @ XXX fall through to common_invalid 100 @ 101 102@ 103@ common_invalid - generic code for failed exception (re-entrant version of handlers) 104@ 105common_invalid: 106 zero_fp 107 108 ldmia r0, {r4 - r6} 109 add r0, sp, #S_PC @ here for interlock avoidance 110 mov r7, #-1 @ "" "" "" "" 111 str r4, [sp] @ save preserved r0 112 stmia r0, {r5 - r7} @ lr_<exception>, 113 @ cpsr_<exception>, "old_r0" 114 115 mov r0, sp 116 b bad_mode 117ENDPROC(__und_invalid) 118 119/* 120 * SVC mode handlers 121 */ 122 123#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) 124#define SPFIX(code...) code 125#else 126#define SPFIX(code...) 127#endif 128 129 .macro svc_entry, stack_hole=0 130 UNWIND(.fnstart ) 131 UNWIND(.save {r0 - pc} ) 132 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 133#ifdef CONFIG_THUMB2_KERNEL 134 SPFIX( str r0, [sp] ) @ temporarily saved 135 SPFIX( mov r0, sp ) 136 SPFIX( tst r0, #4 ) @ test original stack alignment 137 SPFIX( ldr r0, [sp] ) @ restored 138#else 139 SPFIX( tst sp, #4 ) 140#endif 141 SPFIX( subeq sp, sp, #4 ) 142 stmia sp, {r1 - r12} 143 144 ldmia r0, {r1 - r3} 145 add r5, sp, #S_SP - 4 @ here for interlock avoidance 146 mov r4, #-1 @ "" "" "" "" 147 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) 148 SPFIX( addeq r0, r0, #4 ) 149 str r1, [sp, #-4]! @ save the "real" r0 copied 150 @ from the exception stack 151 152 mov r1, lr 153 154 @ 155 @ We are now ready to fill in the remaining blanks on the stack: 156 @ 157 @ r0 - sp_svc 158 @ r1 - lr_svc 159 @ r2 - lr_<exception>, already fixed up for correct return/restart 160 @ r3 - spsr_<exception> 161 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 162 @ 163 stmia r5, {r0 - r4} 164 165 asm_trace_hardirqs_off 166 .endm 167 168 .align 5 169__dabt_svc: 170 svc_entry 171 172 @ 173 @ get ready to re-enable interrupts if appropriate 174 @ 175 mrs r9, cpsr 176 tst r3, #PSR_I_BIT 177 biceq r9, r9, #PSR_I_BIT 178 179 @ 180 @ Call the processor-specific abort handler: 181 @ 182 @ r2 - aborted context pc 183 @ r3 - aborted context cpsr 184 @ 185 @ The abort handler must return the aborted address in r0, and 186 @ the fault status register in r1. r9 must be preserved. 187 @ 188#ifdef MULTI_DABORT 189 ldr r4, .LCprocfns 190 mov lr, pc 191 ldr pc, [r4, #PROCESSOR_DABT_FUNC] 192#else 193 bl CPU_DABORT_HANDLER 194#endif 195 196 @ 197 @ set desired IRQ state, then call main handler 198 @ 199 msr cpsr_c, r9 200 mov r2, sp 201 bl do_DataAbort 202 203 @ 204 @ IRQs off again before pulling preserved data off the stack 205 @ 206 disable_irq 207 208 @ 209 @ restore SPSR and restart the instruction 210 @ 211 ldr r2, [sp, #S_PSR] 212 svc_exit r2 @ return from exception 213 UNWIND(.fnend ) 214ENDPROC(__dabt_svc) 215 216 .align 5 217__irq_svc: 218 svc_entry 219 220#ifdef CONFIG_PREEMPT 221 get_thread_info tsk 222 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 223 add r7, r8, #1 @ increment it 224 str r7, [tsk, #TI_PREEMPT] 225#endif 226 227 irq_handler 228#ifdef CONFIG_PREEMPT 229 str r8, [tsk, #TI_PREEMPT] @ restore preempt count 230 ldr r0, [tsk, #TI_FLAGS] @ get flags 231 teq r8, #0 @ if preempt count != 0 232 movne r0, #0 @ force flags to 0 233 tst r0, #_TIF_NEED_RESCHED 234 blne svc_preempt 235#endif 236 ldr r4, [sp, #S_PSR] @ irqs are already disabled 237#ifdef CONFIG_TRACE_IRQFLAGS 238 tst r4, #PSR_I_BIT 239 bleq trace_hardirqs_on 240#endif 241 svc_exit r4 @ return from exception 242 UNWIND(.fnend ) 243ENDPROC(__irq_svc) 244 245 .ltorg 246 247#ifdef CONFIG_PREEMPT 248svc_preempt: 249 mov r8, lr 2501: bl preempt_schedule_irq @ irq en/disable is done inside 251 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 252 tst r0, #_TIF_NEED_RESCHED 253 moveq pc, r8 @ go again 254 b 1b 255#endif 256 257 .align 5 258__und_svc: 259#ifdef CONFIG_KPROBES 260 @ If a kprobe is about to simulate a "stmdb sp..." instruction, 261 @ it obviously needs free stack space which then will belong to 262 @ the saved context. 263 svc_entry 64 264#else 265 svc_entry 266#endif 267 268 @ 269 @ call emulation code, which returns using r9 if it has emulated 270 @ the instruction, or the more conventional lr if we are to treat 271 @ this as a real undefined instruction 272 @ 273 @ r0 - instruction 274 @ 275 ldr r0, [r2, #-4] 276 adr r9, BSYM(1f) 277 bl call_fpe 278 279 mov r0, sp @ struct pt_regs *regs 280 bl do_undefinstr 281 282 @ 283 @ IRQs off again before pulling preserved data off the stack 284 @ 2851: disable_irq 286 287 @ 288 @ restore SPSR and restart the instruction 289 @ 290 ldr r2, [sp, #S_PSR] @ Get SVC cpsr 291 svc_exit r2 @ return from exception 292 UNWIND(.fnend ) 293ENDPROC(__und_svc) 294 295 .align 5 296__pabt_svc: 297 svc_entry 298 299 @ 300 @ re-enable interrupts if appropriate 301 @ 302 mrs r9, cpsr 303 tst r3, #PSR_I_BIT 304 biceq r9, r9, #PSR_I_BIT 305 306 @ 307 @ set args, then call main handler 308 @ 309 @ r0 - address of faulting instruction 310 @ r1 - pointer to registers on stack 311 @ 312#ifdef MULTI_PABORT 313 mov r0, r2 @ pass address of aborted instruction. 314 ldr r4, .LCprocfns 315 mov lr, pc 316 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 317#else 318 CPU_PABORT_HANDLER(r0, r2) 319#endif 320 msr cpsr_c, r9 @ Maybe enable interrupts 321 mov r1, sp @ regs 322 bl do_PrefetchAbort @ call abort handler 323 324 @ 325 @ IRQs off again before pulling preserved data off the stack 326 @ 327 disable_irq 328 329 @ 330 @ restore SPSR and restart the instruction 331 @ 332 ldr r2, [sp, #S_PSR] 333 svc_exit r2 @ return from exception 334 UNWIND(.fnend ) 335ENDPROC(__pabt_svc) 336 337 .align 5 338.LCcralign: 339 .word cr_alignment 340#ifdef MULTI_DABORT 341.LCprocfns: 342 .word processor 343#endif 344.LCfp: 345 .word fp_enter 346 347/* 348 * User mode handlers 349 * 350 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE 351 */ 352 353#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) 354#error "sizeof(struct pt_regs) must be a multiple of 8" 355#endif 356 357 .macro usr_entry 358 UNWIND(.fnstart ) 359 UNWIND(.cantunwind ) @ don't unwind the user space 360 sub sp, sp, #S_FRAME_SIZE 361 ARM( stmib sp, {r1 - r12} ) 362 THUMB( stmia sp, {r0 - r12} ) 363 364 ldmia r0, {r1 - r3} 365 add r0, sp, #S_PC @ here for interlock avoidance 366 mov r4, #-1 @ "" "" "" "" 367 368 str r1, [sp] @ save the "real" r0 copied 369 @ from the exception stack 370 371 @ 372 @ We are now ready to fill in the remaining blanks on the stack: 373 @ 374 @ r2 - lr_<exception>, already fixed up for correct return/restart 375 @ r3 - spsr_<exception> 376 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 377 @ 378 @ Also, separately save sp_usr and lr_usr 379 @ 380 stmia r0, {r2 - r4} 381 ARM( stmdb r0, {sp, lr}^ ) 382 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 383 384 @ 385 @ Enable the alignment trap while in kernel mode 386 @ 387 alignment_trap r0 388 389 @ 390 @ Clear FP to mark the first stack frame 391 @ 392 zero_fp 393 394 asm_trace_hardirqs_off 395 .endm 396 397 .macro kuser_cmpxchg_check 398#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 399#ifndef CONFIG_MMU 400#warning "NPTL on non MMU needs fixing" 401#else 402 @ Make sure our user space atomic helper is restarted 403 @ if it was interrupted in a critical region. Here we 404 @ perform a quick test inline since it should be false 405 @ 99.9999% of the time. The rest is done out of line. 406 cmp r2, #TASK_SIZE 407 blhs kuser_cmpxchg_fixup 408#endif 409#endif 410 .endm 411 412 .align 5 413__dabt_usr: 414 usr_entry 415 kuser_cmpxchg_check 416 417 @ 418 @ Call the processor-specific abort handler: 419 @ 420 @ r2 - aborted context pc 421 @ r3 - aborted context cpsr 422 @ 423 @ The abort handler must return the aborted address in r0, and 424 @ the fault status register in r1. 425 @ 426#ifdef MULTI_DABORT 427 ldr r4, .LCprocfns 428 mov lr, pc 429 ldr pc, [r4, #PROCESSOR_DABT_FUNC] 430#else 431 bl CPU_DABORT_HANDLER 432#endif 433 434 @ 435 @ IRQs on, then call the main handler 436 @ 437 enable_irq 438 mov r2, sp 439 adr lr, BSYM(ret_from_exception) 440 b do_DataAbort 441 UNWIND(.fnend ) 442ENDPROC(__dabt_usr) 443 444 .align 5 445__irq_usr: 446 usr_entry 447 kuser_cmpxchg_check 448 449 get_thread_info tsk 450#ifdef CONFIG_PREEMPT 451 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 452 add r7, r8, #1 @ increment it 453 str r7, [tsk, #TI_PREEMPT] 454#endif 455 456 irq_handler 457#ifdef CONFIG_PREEMPT 458 ldr r0, [tsk, #TI_PREEMPT] 459 str r8, [tsk, #TI_PREEMPT] 460 teq r0, r7 461 ARM( strne r0, [r0, -r0] ) 462 THUMB( movne r0, #0 ) 463 THUMB( strne r0, [r0] ) 464#endif 465#ifdef CONFIG_TRACE_IRQFLAGS 466 bl trace_hardirqs_on 467#endif 468 469 mov why, #0 470 b ret_to_user 471 UNWIND(.fnend ) 472ENDPROC(__irq_usr) 473 474 .ltorg 475 476 .align 5 477__und_usr: 478 usr_entry 479 480 @ 481 @ fall through to the emulation code, which returns using r9 if 482 @ it has emulated the instruction, or the more conventional lr 483 @ if we are to treat this as a real undefined instruction 484 @ 485 @ r0 - instruction 486 @ 487 adr r9, BSYM(ret_from_exception) 488 adr lr, BSYM(__und_usr_unknown) 489 tst r3, #PSR_T_BIT @ Thumb mode? 490 itet eq @ explicit IT needed for the 1f label 491 subeq r4, r2, #4 @ ARM instr at LR - 4 492 subne r4, r2, #2 @ Thumb instr at LR - 2 4931: ldreqt r0, [r4] 494#ifdef CONFIG_CPU_ENDIAN_BE8 495 reveq r0, r0 @ little endian instruction 496#endif 497 beq call_fpe 498 @ Thumb instruction 499#if __LINUX_ARM_ARCH__ >= 7 5002: 501 ARM( ldrht r5, [r4], #2 ) 502 THUMB( ldrht r5, [r4] ) 503 THUMB( add r4, r4, #2 ) 504 and r0, r5, #0xf800 @ mask bits 111x x... .... .... 505 cmp r0, #0xe800 @ 32bit instruction if xx != 0 506 blo __und_usr_unknown 5073: ldrht r0, [r4] 508 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 509 orr r0, r0, r5, lsl #16 510#else 511 b __und_usr_unknown 512#endif 513 UNWIND(.fnend ) 514ENDPROC(__und_usr) 515 516 @ 517 @ fallthrough to call_fpe 518 @ 519 520/* 521 * The out of line fixup for the ldrt above. 522 */ 523 .section .fixup, "ax" 5244: mov pc, r9 525 .previous 526 .section __ex_table,"a" 527 .long 1b, 4b 528#if __LINUX_ARM_ARCH__ >= 7 529 .long 2b, 4b 530 .long 3b, 4b 531#endif 532 .previous 533 534/* 535 * Check whether the instruction is a co-processor instruction. 536 * If yes, we need to call the relevant co-processor handler. 537 * 538 * Note that we don't do a full check here for the co-processor 539 * instructions; all instructions with bit 27 set are well 540 * defined. The only instructions that should fault are the 541 * co-processor instructions. However, we have to watch out 542 * for the ARM6/ARM7 SWI bug. 543 * 544 * NEON is a special case that has to be handled here. Not all 545 * NEON instructions are co-processor instructions, so we have 546 * to make a special case of checking for them. Plus, there's 547 * five groups of them, so we have a table of mask/opcode pairs 548 * to check against, and if any match then we branch off into the 549 * NEON handler code. 550 * 551 * Emulators may wish to make use of the following registers: 552 * r0 = instruction opcode. 553 * r2 = PC+4 554 * r9 = normal "successful" return address 555 * r10 = this threads thread_info structure. 556 * lr = unrecognised instruction return address 557 */ 558 @ 559 @ Fall-through from Thumb-2 __und_usr 560 @ 561#ifdef CONFIG_NEON 562 adr r6, .LCneon_thumb_opcodes 563 b 2f 564#endif 565call_fpe: 566#ifdef CONFIG_NEON 567 adr r6, .LCneon_arm_opcodes 5682: 569 ldr r7, [r6], #4 @ mask value 570 cmp r7, #0 @ end mask? 571 beq 1f 572 and r8, r0, r7 573 ldr r7, [r6], #4 @ opcode bits matching in mask 574 cmp r8, r7 @ NEON instruction? 575 bne 2b 576 get_thread_info r10 577 mov r7, #1 578 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 579 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 580 b do_vfp @ let VFP handler handle this 5811: 582#endif 583 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 584 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 585#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) 586 and r8, r0, #0x0f000000 @ mask out op-code bits 587 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? 588#endif 589 moveq pc, lr 590 get_thread_info r10 @ get current thread 591 and r8, r0, #0x00000f00 @ mask out CP number 592 THUMB( lsr r8, r8, #8 ) 593 mov r7, #1 594 add r6, r10, #TI_USED_CP 595 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] 596 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] 597#ifdef CONFIG_IWMMXT 598 @ Test if we need to give access to iWMMXt coprocessors 599 ldr r5, [r10, #TI_FLAGS] 600 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 601 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 602 bcs iwmmxt_task_enable 603#endif 604 ARM( add pc, pc, r8, lsr #6 ) 605 THUMB( lsl r8, r8, #2 ) 606 THUMB( add pc, r8 ) 607 nop 608 609 W(mov) pc, lr @ CP#0 610 W(b) do_fpe @ CP#1 (FPE) 611 W(b) do_fpe @ CP#2 (FPE) 612 W(mov) pc, lr @ CP#3 613#ifdef CONFIG_CRUNCH 614 b crunch_task_enable @ CP#4 (MaverickCrunch) 615 b crunch_task_enable @ CP#5 (MaverickCrunch) 616 b crunch_task_enable @ CP#6 (MaverickCrunch) 617#else 618 W(mov) pc, lr @ CP#4 619 W(mov) pc, lr @ CP#5 620 W(mov) pc, lr @ CP#6 621#endif 622 W(mov) pc, lr @ CP#7 623 W(mov) pc, lr @ CP#8 624 W(mov) pc, lr @ CP#9 625#ifdef CONFIG_VFP 626 W(b) do_vfp @ CP#10 (VFP) 627 W(b) do_vfp @ CP#11 (VFP) 628#else 629 W(mov) pc, lr @ CP#10 (VFP) 630 W(mov) pc, lr @ CP#11 (VFP) 631#endif 632 W(mov) pc, lr @ CP#12 633 W(mov) pc, lr @ CP#13 634 W(mov) pc, lr @ CP#14 (Debug) 635 W(mov) pc, lr @ CP#15 (Control) 636 637#ifdef CONFIG_NEON 638 .align 6 639 640.LCneon_arm_opcodes: 641 .word 0xfe000000 @ mask 642 .word 0xf2000000 @ opcode 643 644 .word 0xff100000 @ mask 645 .word 0xf4000000 @ opcode 646 647 .word 0x00000000 @ mask 648 .word 0x00000000 @ opcode 649 650.LCneon_thumb_opcodes: 651 .word 0xef000000 @ mask 652 .word 0xef000000 @ opcode 653 654 .word 0xff100000 @ mask 655 .word 0xf9000000 @ opcode 656 657 .word 0x00000000 @ mask 658 .word 0x00000000 @ opcode 659#endif 660 661do_fpe: 662 enable_irq 663 ldr r4, .LCfp 664 add r10, r10, #TI_FPSTATE @ r10 = workspace 665 ldr pc, [r4] @ Call FP module USR entry point 666 667/* 668 * The FP module is called with these registers set: 669 * r0 = instruction 670 * r2 = PC+4 671 * r9 = normal "successful" return address 672 * r10 = FP workspace 673 * lr = unrecognised FP instruction return address 674 */ 675 676 .data 677ENTRY(fp_enter) 678 .word no_fp 679 .previous 680 681no_fp: mov pc, lr 682 683__und_usr_unknown: 684 enable_irq 685 mov r0, sp 686 adr lr, BSYM(ret_from_exception) 687 b do_undefinstr 688ENDPROC(__und_usr_unknown) 689 690 .align 5 691__pabt_usr: 692 usr_entry 693 694#ifdef MULTI_PABORT 695 mov r0, r2 @ pass address of aborted instruction. 696 ldr r4, .LCprocfns 697 mov lr, pc 698 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 699#else 700 CPU_PABORT_HANDLER(r0, r2) 701#endif 702 enable_irq @ Enable interrupts 703 mov r1, sp @ regs 704 bl do_PrefetchAbort @ call abort handler 705 UNWIND(.fnend ) 706 /* fall through */ 707/* 708 * This is the return code to user mode for abort handlers 709 */ 710ENTRY(ret_from_exception) 711 UNWIND(.fnstart ) 712 UNWIND(.cantunwind ) 713 get_thread_info tsk 714 mov why, #0 715 b ret_to_user 716 UNWIND(.fnend ) 717ENDPROC(__pabt_usr) 718ENDPROC(ret_from_exception) 719 720/* 721 * Register switch for ARMv3 and ARMv4 processors 722 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 723 * previous and next are guaranteed not to be the same. 724 */ 725ENTRY(__switch_to) 726 UNWIND(.fnstart ) 727 UNWIND(.cantunwind ) 728 add ip, r1, #TI_CPU_SAVE 729 ldr r3, [r2, #TI_TP_VALUE] 730 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack 731 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 732 THUMB( str sp, [ip], #4 ) 733 THUMB( str lr, [ip], #4 ) 734#ifdef CONFIG_MMU 735 ldr r6, [r2, #TI_CPU_DOMAIN] 736#endif 737#if __LINUX_ARM_ARCH__ >= 6 738#ifdef CONFIG_CPU_32v6K 739 clrex 740#else 741 strex r5, r4, [ip] @ Clear exclusive monitor 742#endif 743#endif 744#if defined(CONFIG_HAS_TLS_REG) 745 mcr p15, 0, r3, c13, c0, 3 @ set TLS register 746#elif !defined(CONFIG_TLS_REG_EMUL) 747 mov r4, #0xffff0fff 748 str r3, [r4, #-15] @ TLS val at 0xffff0ff0 749#endif 750#ifdef CONFIG_MMU 751 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 752#endif 753 mov r5, r0 754 add r4, r2, #TI_CPU_SAVE 755 ldr r0, =thread_notify_head 756 mov r1, #THREAD_NOTIFY_SWITCH 757 bl atomic_notifier_call_chain 758 THUMB( mov ip, r4 ) 759 mov r0, r5 760 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously 761 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously 762 THUMB( ldr sp, [ip], #4 ) 763 THUMB( ldr pc, [ip] ) 764 UNWIND(.fnend ) 765ENDPROC(__switch_to) 766 767 __INIT 768 769/* 770 * User helpers. 771 * 772 * These are segment of kernel provided user code reachable from user space 773 * at a fixed address in kernel memory. This is used to provide user space 774 * with some operations which require kernel help because of unimplemented 775 * native feature and/or instructions in many ARM CPUs. The idea is for 776 * this code to be executed directly in user mode for best efficiency but 777 * which is too intimate with the kernel counter part to be left to user 778 * libraries. In fact this code might even differ from one CPU to another 779 * depending on the available instruction set and restrictions like on 780 * SMP systems. In other words, the kernel reserves the right to change 781 * this code as needed without warning. Only the entry points and their 782 * results are guaranteed to be stable. 783 * 784 * Each segment is 32-byte aligned and will be moved to the top of the high 785 * vector page. New segments (if ever needed) must be added in front of 786 * existing ones. This mechanism should be used only for things that are 787 * really small and justified, and not be abused freely. 788 * 789 * User space is expected to implement those things inline when optimizing 790 * for a processor that has the necessary native support, but only if such 791 * resulting binaries are already to be incompatible with earlier ARM 792 * processors due to the use of unsupported instructions other than what 793 * is provided here. In other words don't make binaries unable to run on 794 * earlier processors just for the sake of not using these kernel helpers 795 * if your compiled code is not going to use the new instructions for other 796 * purpose. 797 */ 798 THUMB( .arm ) 799 800 .macro usr_ret, reg 801#ifdef CONFIG_ARM_THUMB 802 bx \reg 803#else 804 mov pc, \reg 805#endif 806 .endm 807 808 .align 5 809 .globl __kuser_helper_start 810__kuser_helper_start: 811 812/* 813 * Reference prototype: 814 * 815 * void __kernel_memory_barrier(void) 816 * 817 * Input: 818 * 819 * lr = return address 820 * 821 * Output: 822 * 823 * none 824 * 825 * Clobbered: 826 * 827 * none 828 * 829 * Definition and user space usage example: 830 * 831 * typedef void (__kernel_dmb_t)(void); 832 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) 833 * 834 * Apply any needed memory barrier to preserve consistency with data modified 835 * manually and __kuser_cmpxchg usage. 836 * 837 * This could be used as follows: 838 * 839 * #define __kernel_dmb() \ 840 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ 841 * : : : "r0", "lr","cc" ) 842 */ 843 844__kuser_memory_barrier: @ 0xffff0fa0 845 smp_dmb 846 usr_ret lr 847 848 .align 5 849 850/* 851 * Reference prototype: 852 * 853 * int __kernel_cmpxchg(int oldval, int newval, int *ptr) 854 * 855 * Input: 856 * 857 * r0 = oldval 858 * r1 = newval 859 * r2 = ptr 860 * lr = return address 861 * 862 * Output: 863 * 864 * r0 = returned value (zero or non-zero) 865 * C flag = set if r0 == 0, clear if r0 != 0 866 * 867 * Clobbered: 868 * 869 * r3, ip, flags 870 * 871 * Definition and user space usage example: 872 * 873 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); 874 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) 875 * 876 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 877 * Return zero if *ptr was changed or non-zero if no exchange happened. 878 * The C flag is also set if *ptr was changed to allow for assembly 879 * optimization in the calling code. 880 * 881 * Notes: 882 * 883 * - This routine already includes memory barriers as needed. 884 * 885 * For example, a user space atomic_add implementation could look like this: 886 * 887 * #define atomic_add(ptr, val) \ 888 * ({ register unsigned int *__ptr asm("r2") = (ptr); \ 889 * register unsigned int __result asm("r1"); \ 890 * asm volatile ( \ 891 * "1: @ atomic_add\n\t" \ 892 * "ldr r0, [r2]\n\t" \ 893 * "mov r3, #0xffff0fff\n\t" \ 894 * "add lr, pc, #4\n\t" \ 895 * "add r1, r0, %2\n\t" \ 896 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ 897 * "bcc 1b" \ 898 * : "=&r" (__result) \ 899 * : "r" (__ptr), "rIL" (val) \ 900 * : "r0","r3","ip","lr","cc","memory" ); \ 901 * __result; }) 902 */ 903 904__kuser_cmpxchg: @ 0xffff0fc0 905 906#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 907 908 /* 909 * Poor you. No fast solution possible... 910 * The kernel itself must perform the operation. 911 * A special ghost syscall is used for that (see traps.c). 912 */ 913 stmfd sp!, {r7, lr} 914 mov r7, #0xff00 @ 0xfff0 into r7 for EABI 915 orr r7, r7, #0xf0 916 swi #0x9ffff0 917 ldmfd sp!, {r7, pc} 918 919#elif __LINUX_ARM_ARCH__ < 6 920 921#ifdef CONFIG_MMU 922 923 /* 924 * The only thing that can break atomicity in this cmpxchg 925 * implementation is either an IRQ or a data abort exception 926 * causing another process/thread to be scheduled in the middle 927 * of the critical sequence. To prevent this, code is added to 928 * the IRQ and data abort exception handlers to set the pc back 929 * to the beginning of the critical section if it is found to be 930 * within that critical section (see kuser_cmpxchg_fixup). 931 */ 9321: ldr r3, [r2] @ load current val 933 subs r3, r3, r0 @ compare with oldval 9342: streq r1, [r2] @ store newval if eq 935 rsbs r0, r3, #0 @ set return val and C flag 936 usr_ret lr 937 938 .text 939kuser_cmpxchg_fixup: 940 @ Called from kuser_cmpxchg_check macro. 941 @ r2 = address of interrupted insn (must be preserved). 942 @ sp = saved regs. r7 and r8 are clobbered. 943 @ 1b = first critical insn, 2b = last critical insn. 944 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. 945 mov r7, #0xffff0fff 946 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 947 subs r8, r2, r7 948 rsbcss r8, r8, #(2b - 1b) 949 strcs r7, [sp, #S_PC] 950 mov pc, lr 951 .previous 952 953#else 954#warning "NPTL on non MMU needs fixing" 955 mov r0, #-1 956 adds r0, r0, #0 957 usr_ret lr 958#endif 959 960#else 961 962#ifdef CONFIG_SMP 963 mcr p15, 0, r0, c7, c10, 5 @ dmb 964#endif 9651: ldrex r3, [r2] 966 subs r3, r3, r0 967 strexeq r3, r1, [r2] 968 teqeq r3, #1 969 beq 1b 970 rsbs r0, r3, #0 971 /* beware -- each __kuser slot must be 8 instructions max */ 972#ifdef CONFIG_SMP 973 b __kuser_memory_barrier 974#else 975 usr_ret lr 976#endif 977 978#endif 979 980 .align 5 981 982/* 983 * Reference prototype: 984 * 985 * int __kernel_get_tls(void) 986 * 987 * Input: 988 * 989 * lr = return address 990 * 991 * Output: 992 * 993 * r0 = TLS value 994 * 995 * Clobbered: 996 * 997 * none 998 * 999 * Definition and user space usage example: 1000 * 1001 * typedef int (__kernel_get_tls_t)(void); 1002 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) 1003 * 1004 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. 1005 * 1006 * This could be used as follows: 1007 * 1008 * #define __kernel_get_tls() \ 1009 * ({ register unsigned int __val asm("r0"); \ 1010 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ 1011 * : "=r" (__val) : : "lr","cc" ); \ 1012 * __val; }) 1013 */ 1014 1015__kuser_get_tls: @ 0xffff0fe0 1016 1017#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) 1018 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 1019#else 1020 mrc p15, 0, r0, c13, c0, 3 @ read TLS register 1021#endif 1022 usr_ret lr 1023 1024 .rep 5 1025 .word 0 @ pad up to __kuser_helper_version 1026 .endr 1027 1028/* 1029 * Reference declaration: 1030 * 1031 * extern unsigned int __kernel_helper_version; 1032 * 1033 * Definition and user space usage example: 1034 * 1035 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) 1036 * 1037 * User space may read this to determine the curent number of helpers 1038 * available. 1039 */ 1040 1041__kuser_helper_version: @ 0xffff0ffc 1042 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 1043 1044 .globl __kuser_helper_end 1045__kuser_helper_end: 1046 1047 THUMB( .thumb ) 1048 1049/* 1050 * Vector stubs. 1051 * 1052 * This code is copied to 0xffff0200 so we can use branches in the 1053 * vectors, rather than ldr's. Note that this code must not 1054 * exceed 0x300 bytes. 1055 * 1056 * Common stub entry macro: 1057 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1058 * 1059 * SP points to a minimal amount of processor-private memory, the address 1060 * of which is copied into r0 for the mode specific abort handler. 1061 */ 1062 .macro vector_stub, name, mode, correction=0 1063 .align 5 1064 1065vector_\name: 1066 .if \correction 1067 sub lr, lr, #\correction 1068 .endif 1069 1070 @ 1071 @ Save r0, lr_<exception> (parent PC) and spsr_<exception> 1072 @ (parent CPSR) 1073 @ 1074 stmia sp, {r0, lr} @ save r0, lr 1075 mrs lr, spsr 1076 str lr, [sp, #8] @ save spsr 1077 1078 @ 1079 @ Prepare for SVC32 mode. IRQs remain disabled. 1080 @ 1081 mrs r0, cpsr 1082 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) 1083 msr spsr_cxsf, r0 1084 1085 @ 1086 @ the branch table must immediately follow this code 1087 @ 1088 and lr, lr, #0x0f 1089 THUMB( adr r0, 1f ) 1090 THUMB( ldr lr, [r0, lr, lsl #2] ) 1091 mov r0, sp 1092 ARM( ldr lr, [pc, lr, lsl #2] ) 1093 movs pc, lr @ branch to handler in SVC mode 1094ENDPROC(vector_\name) 1095 1096 .align 2 1097 @ handler addresses follow this label 10981: 1099 .endm 1100 1101 .globl __stubs_start 1102__stubs_start: 1103/* 1104 * Interrupt dispatcher 1105 */ 1106 vector_stub irq, IRQ_MODE, 4 1107 1108 .long __irq_usr @ 0 (USR_26 / USR_32) 1109 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 1110 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) 1111 .long __irq_svc @ 3 (SVC_26 / SVC_32) 1112 .long __irq_invalid @ 4 1113 .long __irq_invalid @ 5 1114 .long __irq_invalid @ 6 1115 .long __irq_invalid @ 7 1116 .long __irq_invalid @ 8 1117 .long __irq_invalid @ 9 1118 .long __irq_invalid @ a 1119 .long __irq_invalid @ b 1120 .long __irq_invalid @ c 1121 .long __irq_invalid @ d 1122 .long __irq_invalid @ e 1123 .long __irq_invalid @ f 1124 1125/* 1126 * Data abort dispatcher 1127 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1128 */ 1129 vector_stub dabt, ABT_MODE, 8 1130 1131 .long __dabt_usr @ 0 (USR_26 / USR_32) 1132 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 1133 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) 1134 .long __dabt_svc @ 3 (SVC_26 / SVC_32) 1135 .long __dabt_invalid @ 4 1136 .long __dabt_invalid @ 5 1137 .long __dabt_invalid @ 6 1138 .long __dabt_invalid @ 7 1139 .long __dabt_invalid @ 8 1140 .long __dabt_invalid @ 9 1141 .long __dabt_invalid @ a 1142 .long __dabt_invalid @ b 1143 .long __dabt_invalid @ c 1144 .long __dabt_invalid @ d 1145 .long __dabt_invalid @ e 1146 .long __dabt_invalid @ f 1147 1148/* 1149 * Prefetch abort dispatcher 1150 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1151 */ 1152 vector_stub pabt, ABT_MODE, 4 1153 1154 .long __pabt_usr @ 0 (USR_26 / USR_32) 1155 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 1156 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) 1157 .long __pabt_svc @ 3 (SVC_26 / SVC_32) 1158 .long __pabt_invalid @ 4 1159 .long __pabt_invalid @ 5 1160 .long __pabt_invalid @ 6 1161 .long __pabt_invalid @ 7 1162 .long __pabt_invalid @ 8 1163 .long __pabt_invalid @ 9 1164 .long __pabt_invalid @ a 1165 .long __pabt_invalid @ b 1166 .long __pabt_invalid @ c 1167 .long __pabt_invalid @ d 1168 .long __pabt_invalid @ e 1169 .long __pabt_invalid @ f 1170 1171/* 1172 * Undef instr entry dispatcher 1173 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1174 */ 1175 vector_stub und, UND_MODE 1176 1177 .long __und_usr @ 0 (USR_26 / USR_32) 1178 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 1179 .long __und_invalid @ 2 (IRQ_26 / IRQ_32) 1180 .long __und_svc @ 3 (SVC_26 / SVC_32) 1181 .long __und_invalid @ 4 1182 .long __und_invalid @ 5 1183 .long __und_invalid @ 6 1184 .long __und_invalid @ 7 1185 .long __und_invalid @ 8 1186 .long __und_invalid @ 9 1187 .long __und_invalid @ a 1188 .long __und_invalid @ b 1189 .long __und_invalid @ c 1190 .long __und_invalid @ d 1191 .long __und_invalid @ e 1192 .long __und_invalid @ f 1193 1194 .align 5 1195 1196/*============================================================================= 1197 * Undefined FIQs 1198 *----------------------------------------------------------------------------- 1199 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC 1200 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. 1201 * Basically to switch modes, we *HAVE* to clobber one register... brain 1202 * damage alert! I don't think that we can execute any code in here in any 1203 * other mode than FIQ... Ok you can switch to another mode, but you can't 1204 * get out of that mode without clobbering one register. 1205 */ 1206vector_fiq: 1207 disable_fiq 1208 subs pc, lr, #4 1209 1210/*============================================================================= 1211 * Address exception handler 1212 *----------------------------------------------------------------------------- 1213 * These aren't too critical. 1214 * (they're not supposed to happen, and won't happen in 32-bit data mode). 1215 */ 1216 1217vector_addrexcptn: 1218 b vector_addrexcptn 1219 1220/* 1221 * We group all the following data together to optimise 1222 * for CPUs with separate I & D caches. 1223 */ 1224 .align 5 1225 1226.LCvswi: 1227 .word vector_swi 1228 1229 .globl __stubs_end 1230__stubs_end: 1231 1232 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start 1233 1234 .globl __vectors_start 1235__vectors_start: 1236 ARM( swi SYS_ERROR0 ) 1237 THUMB( svc #0 ) 1238 THUMB( nop ) 1239 W(b) vector_und + stubs_offset 1240 W(ldr) pc, .LCvswi + stubs_offset 1241 W(b) vector_pabt + stubs_offset 1242 W(b) vector_dabt + stubs_offset 1243 W(b) vector_addrexcptn + stubs_offset 1244 W(b) vector_irq + stubs_offset 1245 W(b) vector_fiq + stubs_offset 1246 1247 .globl __vectors_end 1248__vectors_end: 1249 1250 .data 1251 1252 .globl cr_alignment 1253 .globl cr_no_alignment 1254cr_alignment: 1255 .space 4 1256cr_no_alignment: 1257 .space 4 1258