xref: /openbmc/linux/arch/arm/kernel/entry-v7m.S (revision 82e6fdd6)
1/*
2 * linux/arch/arm/kernel/entry-v7m.S
3 *
4 * Copyright (C) 2008 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Low-level vector interface routines for the ARMv7-M architecture
11 */
12#include <asm/memory.h>
13#include <asm/glue.h>
14#include <asm/thread_notify.h>
15#include <asm/v7m.h>
16
17#include "entry-header.S"
18
19#ifdef CONFIG_TRACE_IRQFLAGS
20#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
21#endif
22
23__invalid_entry:
24	v7m_exception_entry
25#ifdef CONFIG_PRINTK
26	adr	r0, strerr
27	mrs	r1, ipsr
28	mov	r2, lr
29	bl	printk
30#endif
31	mov	r0, sp
32	bl	show_regs
331:	b	1b
34ENDPROC(__invalid_entry)
35
36strerr:	.asciz	"\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
37
38	.align	2
39__irq_entry:
40	v7m_exception_entry
41
42	@
43	@ Invoke the IRQ handler
44	@
45	mrs	r0, ipsr
46	ldr	r1, =V7M_xPSR_EXCEPTIONNO
47	and	r0, r1
48	sub	r0, #16
49	mov	r1, sp
50	stmdb	sp!, {lr}
51	@ routine called with r0 = irq number, r1 = struct pt_regs *
52	bl	nvic_handle_irq
53
54	pop	{lr}
55	@
56	@ Check for any pending work if returning to user
57	@
58	ldr	r1, =BASEADDR_V7M_SCB
59	ldr	r0, [r1, V7M_SCB_ICSR]
60	tst	r0, V7M_SCB_ICSR_RETTOBASE
61	beq	2f
62
63	get_thread_info tsk
64	ldr	r2, [tsk, #TI_FLAGS]
65	tst	r2, #_TIF_WORK_MASK
66	beq	2f			@ no work pending
67	mov	r0, #V7M_SCB_ICSR_PENDSVSET
68	str	r0, [r1, V7M_SCB_ICSR]	@ raise PendSV
69
702:
71	@ registers r0-r3 and r12 are automatically restored on exception
72	@ return. r4-r7 were not clobbered in v7m_exception_entry so for
73	@ correctness they don't need to be restored. So only r8-r11 must be
74	@ restored here. The easiest way to do so is to restore r0-r7, too.
75	ldmia	sp!, {r0-r11}
76	add	sp, #PT_REGS_SIZE-S_IP
77	cpsie	i
78	bx	lr
79ENDPROC(__irq_entry)
80
81__pendsv_entry:
82	v7m_exception_entry
83
84	ldr	r1, =BASEADDR_V7M_SCB
85	mov	r0, #V7M_SCB_ICSR_PENDSVCLR
86	str	r0, [r1, V7M_SCB_ICSR]	@ clear PendSV
87
88	@ execute the pending work, including reschedule
89	get_thread_info tsk
90	mov	why, #0
91	b	ret_to_user_from_irq
92ENDPROC(__pendsv_entry)
93
94/*
95 * Register switch for ARMv7-M processors.
96 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
97 * previous and next are guaranteed not to be the same.
98 */
99ENTRY(__switch_to)
100	.fnstart
101	.cantunwind
102	add	ip, r1, #TI_CPU_SAVE
103	stmia	ip!, {r4 - r11}		@ Store most regs on stack
104	str	sp, [ip], #4
105	str	lr, [ip], #4
106	mov	r5, r0
107	add	r4, r2, #TI_CPU_SAVE
108	ldr	r0, =thread_notify_head
109	mov	r1, #THREAD_NOTIFY_SWITCH
110	bl	atomic_notifier_call_chain
111	mov	ip, r4
112	mov	r0, r5
113	ldmia	ip!, {r4 - r11}		@ Load all regs saved previously
114	ldr	sp, [ip]
115	ldr	pc, [ip, #4]!
116	.fnend
117ENDPROC(__switch_to)
118
119	.data
120#if CONFIG_CPU_V7M_NUM_IRQ <= 112
121	.align	9
122#else
123	.align	10
124#endif
125
126/*
127 * Vector table (Natural alignment need to be ensured)
128 */
129ENTRY(vector_table)
130	.long	0			@ 0 - Reset stack pointer
131	.long	__invalid_entry		@ 1 - Reset
132	.long	__invalid_entry		@ 2 - NMI
133	.long	__invalid_entry		@ 3 - HardFault
134	.long	__invalid_entry		@ 4 - MemManage
135	.long	__invalid_entry		@ 5 - BusFault
136	.long	__invalid_entry		@ 6 - UsageFault
137	.long	__invalid_entry		@ 7 - Reserved
138	.long	__invalid_entry		@ 8 - Reserved
139	.long	__invalid_entry		@ 9 - Reserved
140	.long	__invalid_entry		@ 10 - Reserved
141	.long	vector_swi		@ 11 - SVCall
142	.long	__invalid_entry		@ 12 - Debug Monitor
143	.long	__invalid_entry		@ 13 - Reserved
144	.long	__pendsv_entry		@ 14 - PendSV
145	.long	__invalid_entry		@ 15 - SysTick
146	.rept	CONFIG_CPU_V7M_NUM_IRQ
147	.long	__irq_entry		@ External Interrupts
148	.endr
149