1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/arch/arm/kernel/entry-v7m.S 4 * 5 * Copyright (C) 2008 ARM Ltd. 6 * 7 * Low-level vector interface routines for the ARMv7-M architecture 8 */ 9#include <asm/page.h> 10#include <asm/glue.h> 11#include <asm/thread_notify.h> 12#include <asm/v7m.h> 13 14#include "entry-header.S" 15 16#ifdef CONFIG_TRACE_IRQFLAGS 17#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation" 18#endif 19 20__invalid_entry: 21 v7m_exception_entry 22#ifdef CONFIG_PRINTK 23 adr r0, strerr 24 mrs r1, ipsr 25 mov r2, lr 26 bl _printk 27#endif 28 mov r0, sp 29 bl show_regs 301: b 1b 31ENDPROC(__invalid_entry) 32 33strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n" 34 35 .align 2 36__irq_entry: 37 v7m_exception_entry 38 39 @ 40 @ Invoke the IRQ handler 41 @ 42 mov r0, sp 43 ldr_this_cpu sp, irq_stack_ptr, r1, r2 44 45 @ 46 @ If we took the interrupt while running in the kernel, we may already 47 @ be using the IRQ stack, so revert to the original value in that case. 48 @ 49 subs r2, sp, r0 @ SP above bottom of IRQ stack? 50 rsbscs r2, r2, #THREAD_SIZE @ ... and below the top? 51 movcs sp, r0 52 53 push {r0, lr} @ preserve LR and original SP 54 55 @ routine called with r0 = struct pt_regs * 56 bl generic_handle_arch_irq 57 58 pop {r0, lr} 59 mov sp, r0 60 61 @ 62 @ Check for any pending work if returning to user 63 @ 64 ldr r1, =BASEADDR_V7M_SCB 65 ldr r0, [r1, V7M_SCB_ICSR] 66 tst r0, V7M_SCB_ICSR_RETTOBASE 67 beq 2f 68 69 get_thread_info tsk 70 ldr r2, [tsk, #TI_FLAGS] 71 movs r2, r2, lsl #16 72 beq 2f @ no work pending 73 mov r0, #V7M_SCB_ICSR_PENDSVSET 74 str r0, [r1, V7M_SCB_ICSR] @ raise PendSV 75 762: 77 @ registers r0-r3 and r12 are automatically restored on exception 78 @ return. r4-r7 were not clobbered in v7m_exception_entry so for 79 @ correctness they don't need to be restored. So only r8-r11 must be 80 @ restored here. The easiest way to do so is to restore r0-r7, too. 81 ldmia sp!, {r0-r11} 82 add sp, #PT_REGS_SIZE-S_IP 83 cpsie i 84 bx lr 85ENDPROC(__irq_entry) 86 87__pendsv_entry: 88 v7m_exception_entry 89 90 ldr r1, =BASEADDR_V7M_SCB 91 mov r0, #V7M_SCB_ICSR_PENDSVCLR 92 str r0, [r1, V7M_SCB_ICSR] @ clear PendSV 93 94 @ execute the pending work, including reschedule 95 get_thread_info tsk 96 mov why, #0 97 b ret_to_user_from_irq 98ENDPROC(__pendsv_entry) 99 100/* 101 * Register switch for ARMv7-M processors. 102 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 103 * previous and next are guaranteed not to be the same. 104 */ 105ENTRY(__switch_to) 106 .fnstart 107 .cantunwind 108 add ip, r1, #TI_CPU_SAVE 109 stmia ip!, {r4 - r11} @ Store most regs on stack 110 str sp, [ip], #4 111 str lr, [ip], #4 112 mov r5, r0 113 mov r6, r2 @ Preserve 'next' 114 add r4, r2, #TI_CPU_SAVE 115 ldr r0, =thread_notify_head 116 mov r1, #THREAD_NOTIFY_SWITCH 117 bl atomic_notifier_call_chain 118 mov r0, r5 119 mov r1, r6 120 ldmia r4, {r4 - r12, lr} @ Load all regs saved previously 121 set_current r1, r2 122 mov sp, ip 123 bx lr 124 .fnend 125ENDPROC(__switch_to) 126 127 .data 128#if CONFIG_CPU_V7M_NUM_IRQ <= 112 129 .align 9 130#else 131 .align 10 132#endif 133 134/* 135 * Vector table (Natural alignment need to be ensured) 136 */ 137ENTRY(vector_table) 138 .long 0 @ 0 - Reset stack pointer 139 .long __invalid_entry @ 1 - Reset 140 .long __invalid_entry @ 2 - NMI 141 .long __invalid_entry @ 3 - HardFault 142 .long __invalid_entry @ 4 - MemManage 143 .long __invalid_entry @ 5 - BusFault 144 .long __invalid_entry @ 6 - UsageFault 145 .long __invalid_entry @ 7 - Reserved 146 .long __invalid_entry @ 8 - Reserved 147 .long __invalid_entry @ 9 - Reserved 148 .long __invalid_entry @ 10 - Reserved 149 .long vector_swi @ 11 - SVCall 150 .long __invalid_entry @ 12 - Debug Monitor 151 .long __invalid_entry @ 13 - Reserved 152 .long __pendsv_entry @ 14 - PendSV 153 .long __invalid_entry @ 15 - SysTick 154 .rept CONFIG_CPU_V7M_NUM_IRQ 155 .long __irq_entry @ External Interrupts 156 .endr 157 .align 2 158 .globl exc_ret 159exc_ret: 160 .space 4 161