1/* SPDX-License-Identifier: GPL-2.0 */ 2#include <linux/init.h> 3#include <linux/linkage.h> 4 5#include <asm/assembler.h> 6#include <asm/asm-offsets.h> 7#include <asm/errno.h> 8#include <asm/thread_info.h> 9#include <asm/uaccess-asm.h> 10#include <asm/v7m.h> 11 12@ Bad Abort numbers 13@ ----------------- 14@ 15#define BAD_PREFETCH 0 16#define BAD_DATA 1 17#define BAD_ADDREXCPTN 2 18#define BAD_IRQ 3 19#define BAD_UNDEFINSTR 4 20 21@ 22@ Most of the stack format comes from struct pt_regs, but with 23@ the addition of 8 bytes for storing syscall args 5 and 6. 24@ This _must_ remain a multiple of 8 for EABI. 25@ 26#define S_OFF 8 27 28/* 29 * The SWI code relies on the fact that R0 is at the bottom of the stack 30 * (due to slow/fast restore user regs). 31 */ 32#if S_R0 != 0 33#error "Please fix" 34#endif 35 36 .macro zero_fp 37#ifdef CONFIG_FRAME_POINTER 38 mov fp, #0 39#endif 40 .endm 41 42#ifdef CONFIG_ALIGNMENT_TRAP 43#define ATRAP(x...) x 44#else 45#define ATRAP(x...) 46#endif 47 48 .macro alignment_trap, rtmp1, rtmp2, label 49#ifdef CONFIG_ALIGNMENT_TRAP 50 mrc p15, 0, \rtmp2, c1, c0, 0 51 ldr \rtmp1, \label 52 ldr \rtmp1, [\rtmp1] 53 teq \rtmp1, \rtmp2 54 mcrne p15, 0, \rtmp1, c1, c0, 0 55#endif 56 .endm 57 58#ifdef CONFIG_CPU_V7M 59/* 60 * ARMv7-M exception entry/exit macros. 61 * 62 * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are 63 * automatically saved on the current stack (32 words) before 64 * switching to the exception stack (SP_main). 65 * 66 * If exception is taken while in user mode, SP_main is 67 * empty. Otherwise, SP_main is aligned to 64 bit automatically 68 * (CCR.STKALIGN set). 69 * 70 * Linux assumes that the interrupts are disabled when entering an 71 * exception handler and it may BUG if this is not the case. Interrupts 72 * are disabled during entry and reenabled in the exit macro. 73 * 74 * v7m_exception_slow_exit is used when returning from SVC or PendSV. 75 * When returning to kernel mode, we don't return from exception. 76 */ 77 .macro v7m_exception_entry 78 @ determine the location of the registers saved by the core during 79 @ exception entry. Depending on the mode the cpu was in when the 80 @ exception happend that is either on the main or the process stack. 81 @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack 82 @ was used. 83 tst lr, #EXC_RET_STACK_MASK 84 mrsne r12, psp 85 moveq r12, sp 86 87 @ we cannot rely on r0-r3 and r12 matching the value saved in the 88 @ exception frame because of tail-chaining. So these have to be 89 @ reloaded. 90 ldmia r12!, {r0-r3} 91 92 @ Linux expects to have irqs off. Do it here before taking stack space 93 cpsid i 94 95 sub sp, #PT_REGS_SIZE-S_IP 96 stmdb sp!, {r0-r11} 97 98 @ load saved r12, lr, return address and xPSR. 99 @ r0-r7 are used for signals and never touched from now on. Clobbering 100 @ r8-r12 is OK. 101 mov r9, r12 102 ldmia r9!, {r8, r10-r12} 103 104 @ calculate the original stack pointer value. 105 @ r9 currently points to the memory location just above the auto saved 106 @ xPSR. 107 @ The cpu might automatically 8-byte align the stack. Bit 9 108 @ of the saved xPSR specifies if stack aligning took place. In this case 109 @ another 32-bit value is included in the stack. 110 111 tst r12, V7M_xPSR_FRAMEPTRALIGN 112 addne r9, r9, #4 113 114 @ store saved r12 using str to have a register to hold the base for stm 115 str r8, [sp, #S_IP] 116 add r8, sp, #S_SP 117 @ store r13-r15, xPSR 118 stmia r8!, {r9-r12} 119 @ store old_r0 120 str r0, [r8] 121 .endm 122 123 /* 124 * PENDSV and SVCALL are configured to have the same exception 125 * priorities. As a kernel thread runs at SVCALL execution priority it 126 * can never be preempted and so we will never have to return to a 127 * kernel thread here. 128 */ 129 .macro v7m_exception_slow_exit ret_r0 130 cpsid i 131 ldr lr, =exc_ret 132 ldr lr, [lr] 133 134 @ read original r12, sp, lr, pc and xPSR 135 add r12, sp, #S_IP 136 ldmia r12, {r1-r5} 137 138 @ an exception frame is always 8-byte aligned. To tell the hardware if 139 @ the sp to be restored is aligned or not set bit 9 of the saved xPSR 140 @ accordingly. 141 tst r2, #4 142 subne r2, r2, #4 143 orrne r5, V7M_xPSR_FRAMEPTRALIGN 144 biceq r5, V7M_xPSR_FRAMEPTRALIGN 145 146 @ ensure bit 0 is cleared in the PC, otherwise behaviour is 147 @ unpredictable 148 bic r4, #1 149 150 @ write basic exception frame 151 stmdb r2!, {r1, r3-r5} 152 ldmia sp, {r1, r3-r5} 153 .if \ret_r0 154 stmdb r2!, {r0, r3-r5} 155 .else 156 stmdb r2!, {r1, r3-r5} 157 .endif 158 159 @ restore process sp 160 msr psp, r2 161 162 @ restore original r4-r11 163 ldmia sp!, {r0-r11} 164 165 @ restore main sp 166 add sp, sp, #PT_REGS_SIZE-S_IP 167 168 cpsie i 169 bx lr 170 .endm 171#endif /* CONFIG_CPU_V7M */ 172 173 @ 174 @ Store/load the USER SP and LR registers by switching to the SYS 175 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not 176 @ available. Should only be called from SVC mode 177 @ 178 .macro store_user_sp_lr, rd, rtemp, offset = 0 179 mrs \rtemp, cpsr 180 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 181 msr cpsr_c, \rtemp @ switch to the SYS mode 182 183 str sp, [\rd, #\offset] @ save sp_usr 184 str lr, [\rd, #\offset + 4] @ save lr_usr 185 186 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 187 msr cpsr_c, \rtemp @ switch back to the SVC mode 188 .endm 189 190 .macro load_user_sp_lr, rd, rtemp, offset = 0 191 mrs \rtemp, cpsr 192 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 193 msr cpsr_c, \rtemp @ switch to the SYS mode 194 195 ldr sp, [\rd, #\offset] @ load sp_usr 196 ldr lr, [\rd, #\offset + 4] @ load lr_usr 197 198 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 199 msr cpsr_c, \rtemp @ switch back to the SVC mode 200 .endm 201 202 203 .macro svc_exit, rpsr, irq = 0 204 .if \irq != 0 205 @ IRQs already off 206#ifdef CONFIG_TRACE_IRQFLAGS 207 @ The parent context IRQs must have been enabled to get here in 208 @ the first place, so there's no point checking the PSR I bit. 209 bl trace_hardirqs_on 210#endif 211 .else 212 @ IRQs off again before pulling preserved data off the stack 213 disable_irq_notrace 214#ifdef CONFIG_TRACE_IRQFLAGS 215 tst \rpsr, #PSR_I_BIT 216 bleq trace_hardirqs_on 217 tst \rpsr, #PSR_I_BIT 218 blne trace_hardirqs_off 219#endif 220 .endif 221 uaccess_exit tsk, r0, r1 222 223#ifndef CONFIG_THUMB2_KERNEL 224 @ ARM mode SVC restore 225 msr spsr_cxsf, \rpsr 226#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 227 @ We must avoid clrex due to Cortex-A15 erratum #830321 228 sub r0, sp, #4 @ uninhabited address 229 strex r1, r2, [r0] @ clear the exclusive monitor 230#endif 231 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 232#else 233 @ Thumb mode SVC restore 234 ldr lr, [sp, #S_SP] @ top of the stack 235 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc 236 237 @ We must avoid clrex due to Cortex-A15 erratum #830321 238 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor 239 240 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context 241 ldmia sp, {r0 - r12} 242 mov sp, lr 243 ldr lr, [sp], #4 244 rfeia sp! 245#endif 246 .endm 247 248 @ 249 @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit 250 @ 251 @ This macro acts in a similar manner to svc_exit but switches to FIQ 252 @ mode to restore the final part of the register state. 253 @ 254 @ We cannot use the normal svc_exit procedure because that would 255 @ clobber spsr_svc (FIQ could be delivered during the first few 256 @ instructions of vector_swi meaning its contents have not been 257 @ saved anywhere). 258 @ 259 @ Note that, unlike svc_exit, this macro also does not allow a caller 260 @ supplied rpsr. This is because the FIQ exceptions are not re-entrant 261 @ and the handlers cannot call into the scheduler (meaning the value 262 @ on the stack remains correct). 263 @ 264 .macro svc_exit_via_fiq 265 uaccess_exit tsk, r0, r1 266#ifndef CONFIG_THUMB2_KERNEL 267 @ ARM mode restore 268 mov r0, sp 269 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will 270 @ clobber state restored below) 271 msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT 272 add r8, r0, #S_PC 273 ldr r9, [r0, #S_PSR] 274 msr spsr_cxsf, r9 275 ldr r0, [r0, #S_R0] 276 ldmia r8, {pc}^ 277#else 278 @ Thumb mode restore 279 add r0, sp, #S_R2 280 ldr lr, [sp, #S_LR] 281 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will 282 @ clobber state restored below) 283 ldmia r0, {r2 - r12} 284 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT 285 msr cpsr_c, r1 286 sub r0, #S_R2 287 add r8, r0, #S_PC 288 ldmia r0, {r0 - r1} 289 rfeia r8 290#endif 291 .endm 292 293 294 .macro restore_user_regs, fast = 0, offset = 0 295#if defined(CONFIG_CPU_32v6K) && !defined(CONFIG_CPU_V6) 296 @ The TLS register update is deferred until return to user space so we 297 @ can use it for other things while running in the kernel 298 get_thread_info r1 299 ldr r1, [r1, #TI_TP_VALUE] 300 mcr p15, 0, r1, c13, c0, 3 @ set TLS register 301#endif 302 303 uaccess_enable r1, isb=0 304#ifndef CONFIG_THUMB2_KERNEL 305 @ ARM mode restore 306 mov r2, sp 307 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr 308 ldr lr, [r2, #\offset + S_PC]! @ get pc 309 tst r1, #PSR_I_BIT | 0x0f 310 bne 1f 311 msr spsr_cxsf, r1 @ save in spsr_svc 312#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 313 @ We must avoid clrex due to Cortex-A15 erratum #830321 314 strex r1, r2, [r2] @ clear the exclusive monitor 315#endif 316 .if \fast 317 ldmdb r2, {r1 - lr}^ @ get calling r1 - lr 318 .else 319 ldmdb r2, {r0 - lr}^ @ get calling r0 - lr 320 .endif 321 mov r0, r0 @ ARMv5T and earlier require a nop 322 @ after ldm {}^ 323 add sp, sp, #\offset + PT_REGS_SIZE 324 movs pc, lr @ return & move spsr_svc into cpsr 3251: bug "Returning to usermode but unexpected PSR bits set?", \@ 326#elif defined(CONFIG_CPU_V7M) 327 @ V7M restore. 328 @ Note that we don't need to do clrex here as clearing the local 329 @ monitor is part of the exception entry and exit sequence. 330 .if \offset 331 add sp, #\offset 332 .endif 333 v7m_exception_slow_exit ret_r0 = \fast 334#else 335 @ Thumb mode restore 336 mov r2, sp 337 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 338 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 339 ldr lr, [sp, #\offset + S_PC] @ get pc 340 add sp, sp, #\offset + S_SP 341 tst r1, #PSR_I_BIT | 0x0f 342 bne 1f 343 msr spsr_cxsf, r1 @ save in spsr_svc 344 345 @ We must avoid clrex due to Cortex-A15 erratum #830321 346 strex r1, r2, [sp] @ clear the exclusive monitor 347 348 .if \fast 349 ldmdb sp, {r1 - r12} @ get calling r1 - r12 350 .else 351 ldmdb sp, {r0 - r12} @ get calling r0 - r12 352 .endif 353 add sp, sp, #PT_REGS_SIZE - S_SP 354 movs pc, lr @ return & move spsr_svc into cpsr 3551: bug "Returning to usermode but unexpected PSR bits set?", \@ 356#endif /* !CONFIG_THUMB2_KERNEL */ 357 .endm 358 359/* 360 * Context tracking subsystem. Used to instrument transitions 361 * between user and kernel mode. 362 */ 363 .macro ct_user_exit, save = 1 364#ifdef CONFIG_CONTEXT_TRACKING 365 .if \save 366 stmdb sp!, {r0-r3, ip, lr} 367 bl context_tracking_user_exit 368 ldmia sp!, {r0-r3, ip, lr} 369 .else 370 bl context_tracking_user_exit 371 .endif 372#endif 373 .endm 374 375 .macro ct_user_enter, save = 1 376#ifdef CONFIG_CONTEXT_TRACKING 377 .if \save 378 stmdb sp!, {r0-r3, ip, lr} 379 bl context_tracking_user_enter 380 ldmia sp!, {r0-r3, ip, lr} 381 .else 382 bl context_tracking_user_enter 383 .endif 384#endif 385 .endm 386 387 .macro invoke_syscall, table, nr, tmp, ret, reload=0 388#ifdef CONFIG_CPU_SPECTRE 389 mov \tmp, \nr 390 cmp \tmp, #NR_syscalls @ check upper syscall limit 391 movcs \tmp, #0 392 csdb 393 badr lr, \ret @ return address 394 .if \reload 395 add r1, sp, #S_R0 + S_OFF @ pointer to regs 396 ldmiacc r1, {r0 - r6} @ reload r0-r6 397 stmiacc sp, {r4, r5} @ update stack arguments 398 .endif 399 ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine 400#else 401 cmp \nr, #NR_syscalls @ check upper syscall limit 402 badr lr, \ret @ return address 403 .if \reload 404 add r1, sp, #S_R0 + S_OFF @ pointer to regs 405 ldmiacc r1, {r0 - r6} @ reload r0-r6 406 stmiacc sp, {r4, r5} @ update stack arguments 407 .endif 408 ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine 409#endif 410 .endm 411 412/* 413 * These are the registers used in the syscall handler, and allow us to 414 * have in theory up to 7 arguments to a function - r0 to r6. 415 * 416 * r7 is reserved for the system call number for thumb mode. 417 * 418 * Note that tbl == why is intentional. 419 * 420 * We must set at least "tsk" and "why" when calling ret_with_reschedule. 421 */ 422scno .req r7 @ syscall number 423tbl .req r8 @ syscall table pointer 424why .req r8 @ Linux syscall (!= 0) 425tsk .req r9 @ current thread_info 426