xref: /openbmc/linux/arch/csky/kernel/entry.S (revision 55fd7e02)
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK    0xffc
17#define PTE_INDX_SHIFT  10
18#define _PGDIR_SHIFT    22
19
20.macro	zero_fp
21#ifdef CONFIG_STACKTRACE
22	movi	r8, 0
23#endif
24.endm
25
26.macro tlbop_begin name, val0, val1, val2
27ENTRY(csky_\name)
28	mtcr    a3, ss2
29	mtcr    r6, ss3
30	mtcr    a2, ss4
31
32	RD_PGDR	r6
33	RD_MEH	a3
34#ifdef CONFIG_CPU_HAS_TLBI
35	tlbi.vaas a3
36	sync.is
37
38	btsti	a3, 31
39	bf	1f
40	RD_PGDR_K r6
411:
42#else
43	bgeni	a2, 31
44	WR_MCIR	a2
45	bgeni	a2, 25
46	WR_MCIR	a2
47#endif
48	bclri   r6, 0
49	lrw	a2, va_pa_offset
50	ld.w	a2, (a2, 0)
51	subu	r6, a2
52	bseti	r6, 31
53
54	mov     a2, a3
55	lsri    a2, _PGDIR_SHIFT
56	lsli    a2, 2
57	addu    r6, a2
58	ldw     r6, (r6)
59
60	lrw	a2, va_pa_offset
61	ld.w	a2, (a2, 0)
62	subu	r6, a2
63	bseti	r6, 31
64
65	lsri    a3, PTE_INDX_SHIFT
66	lrw     a2, PTE_INDX_MSK
67	and     a3, a2
68	addu    r6, a3
69	ldw     a3, (r6)
70
71	movi	a2, (_PAGE_PRESENT | \val0)
72	and     a3, a2
73	cmpne   a3, a2
74	bt	\name
75
76	/* First read/write the page, just update the flags */
77	ldw     a3, (r6)
78	bgeni   a2, PAGE_VALID_BIT
79	bseti   a2, PAGE_ACCESSED_BIT
80	bseti   a2, \val1
81	bseti   a2, \val2
82	or      a3, a2
83	stw     a3, (r6)
84
85	/* Some cpu tlb-hardrefill bypass the cache */
86#ifdef CONFIG_CPU_NEED_TLBSYNC
87	movi	a2, 0x22
88	bseti	a2, 6
89	mtcr	r6, cr22
90	mtcr	a2, cr17
91	sync
92#endif
93
94	mfcr    a3, ss2
95	mfcr    r6, ss3
96	mfcr    a2, ss4
97	rte
98\name:
99	mfcr    a3, ss2
100	mfcr    r6, ss3
101	mfcr    a2, ss4
102	SAVE_ALL 0
103.endm
104.macro tlbop_end is_write
105	zero_fp
106	RD_MEH	a2
107	psrset  ee, ie
108	mov     a0, sp
109	movi    a1, \is_write
110	jbsr    do_page_fault
111	jmpi    ret_from_exception
112.endm
113
114.text
115
116tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
117tlbop_end 0
118
119tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
120tlbop_end 1
121
122tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
123#ifndef CONFIG_CPU_HAS_LDSTEX
124jbsr csky_cmpxchg_fixup
125#endif
126tlbop_end 1
127
128ENTRY(csky_systemcall)
129	SAVE_ALL TRAP0_SIZE
130	zero_fp
131	psrset  ee, ie
132
133	lrw     r9, __NR_syscalls
134	cmphs   syscallid, r9		/* Check nr of syscall */
135	bt      1f
136
137	lrw     r9, sys_call_table
138	ixw     r9, syscallid
139	ldw     syscallid, (r9)
140	cmpnei  syscallid, 0
141	bf      ret_from_exception
142
143	mov     r9, sp
144	bmaski  r10, THREAD_SHIFT
145	andn    r9, r10
146	ldw     r10, (r9, TINFO_FLAGS)
147	lrw	r9, _TIF_SYSCALL_WORK
148	and	r10, r9
149	cmpnei	r10, 0
150	bt      csky_syscall_trace
151#if defined(__CSKYABIV2__)
152	subi    sp, 8
153	stw  	r5, (sp, 0x4)
154	stw  	r4, (sp, 0x0)
155	jsr     syscallid                      /* Do system call */
156	addi 	sp, 8
157#else
158	jsr     syscallid
159#endif
160	stw     a0, (sp, LSAVE_A0)      /* Save return value */
1611:
162#ifdef CONFIG_DEBUG_RSEQ
163	mov	a0, sp
164	jbsr	rseq_syscall
165#endif
166	jmpi    ret_from_exception
167
168csky_syscall_trace:
169	mov	a0, sp                  /* sp = pt_regs pointer */
170	jbsr	syscall_trace_enter
171	/* Prepare args before do system call */
172	ldw	a0, (sp, LSAVE_A0)
173	ldw	a1, (sp, LSAVE_A1)
174	ldw	a2, (sp, LSAVE_A2)
175	ldw	a3, (sp, LSAVE_A3)
176#if defined(__CSKYABIV2__)
177	subi	sp, 8
178	ldw	r9, (sp, LSAVE_A4)
179	stw	r9, (sp, 0x0)
180	ldw	r9, (sp, LSAVE_A5)
181	stw	r9, (sp, 0x4)
182	jsr	syscallid                     /* Do system call */
183	addi	sp, 8
184#else
185	ldw	r6, (sp, LSAVE_A4)
186	ldw	r7, (sp, LSAVE_A5)
187	jsr	syscallid                     /* Do system call */
188#endif
189	stw	a0, (sp, LSAVE_A0)	/* Save return value */
190
191#ifdef CONFIG_DEBUG_RSEQ
192	mov	a0, sp
193	jbsr	rseq_syscall
194#endif
195	mov     a0, sp                  /* right now, sp --> pt_regs */
196	jbsr    syscall_trace_exit
197	br	ret_from_exception
198
199ENTRY(ret_from_kernel_thread)
200	jbsr	schedule_tail
201	mov	a0, r10
202	jsr	r9
203	jbsr	ret_from_exception
204
205ENTRY(ret_from_fork)
206	jbsr	schedule_tail
207	mov	r9, sp
208	bmaski	r10, THREAD_SHIFT
209	andn	r9, r10
210	ldw	r10, (r9, TINFO_FLAGS)
211	lrw	r9, _TIF_SYSCALL_WORK
212	and	r10, r9
213	cmpnei	r10, 0
214	bf	ret_from_exception
215	mov	a0, sp			/* sp = pt_regs pointer */
216	jbsr	syscall_trace_exit
217
218ret_from_exception:
219	psrclr	ie
220	ld	r9, (sp, LSAVE_PSR)
221	btsti	r9, 31
222
223	bt	1f
224	/*
225	 * Load address of current->thread_info, Then get address of task_struct
226	 * Get task_needreshed in task_struct
227	 */
228	mov	r9, sp
229	bmaski	r10, THREAD_SHIFT
230	andn	r9, r10
231
232	ldw	r10, (r9, TINFO_FLAGS)
233	lrw	r9, _TIF_WORK_MASK
234	and	r10, r9
235	cmpnei	r10, 0
236	bt	exit_work
2371:
238#ifdef CONFIG_PREEMPTION
239	mov	r9, sp
240	bmaski	r10, THREAD_SHIFT
241	andn	r9, r10
242
243	ldw	r10, (r9, TINFO_PREEMPT)
244	cmpnei	r10, 0
245	bt	2f
246	jbsr	preempt_schedule_irq	/* irq en/disable is done inside */
2472:
248#endif
249
250#ifdef CONFIG_TRACE_IRQFLAGS
251	ld	r10, (sp, LSAVE_PSR)
252	btsti	r10, 6
253	bf	2f
254	jbsr	trace_hardirqs_on
2552:
256#endif
257	RESTORE_ALL
258
259exit_work:
260	lrw	r9, ret_from_exception
261	mov	lr, r9
262
263	btsti	r10, TIF_NEED_RESCHED
264	bt	work_resched
265
266	psrset	ie
267	mov	a0, sp
268	mov	a1, r10
269	jmpi	do_notify_resume
270
271work_resched:
272	jmpi	schedule
273
274ENTRY(csky_trap)
275	SAVE_ALL 0
276	zero_fp
277	psrset	ee
278	mov	a0, sp                 /* Push Stack pointer arg */
279	jbsr	trap_c                 /* Call C-level trap handler */
280	jmpi	ret_from_exception
281
282/*
283 * Prototype from libc for abiv1:
284 * register unsigned int __result asm("a0");
285 * asm( "trap 3" :"=r"(__result)::);
286 */
287ENTRY(csky_get_tls)
288	USPTOKSP
289
290	/* increase epc for continue */
291	mfcr	a0, epc
292	addi	a0, TRAP0_SIZE
293	mtcr	a0, epc
294
295	/* get current task thread_info with kernel 8K stack */
296	bmaski	a0, THREAD_SHIFT
297	not	a0
298	subi	sp, 1
299	and	a0, sp
300	addi	sp, 1
301
302	/* get tls */
303	ldw	a0, (a0, TINFO_TP_VALUE)
304
305	KSPTOUSP
306	rte
307
308ENTRY(csky_irq)
309	SAVE_ALL 0
310	zero_fp
311	psrset	ee
312
313#ifdef CONFIG_TRACE_IRQFLAGS
314	jbsr	trace_hardirqs_off
315#endif
316
317
318	mov	a0, sp
319	jbsr	csky_do_IRQ
320
321	jmpi	ret_from_exception
322
323/*
324 * a0 =  prev task_struct *
325 * a1 =  next task_struct *
326 * a0 =  return next
327 */
328ENTRY(__switch_to)
329	lrw	a3, TASK_THREAD
330	addu	a3, a0
331
332	SAVE_SWITCH_STACK
333
334	stw	sp, (a3, THREAD_KSP)
335
336	/* Set up next process to run */
337	lrw	a3, TASK_THREAD
338	addu	a3, a1
339
340	ldw	sp, (a3, THREAD_KSP)	/* Set next kernel sp */
341
342#if  defined(__CSKYABIV2__)
343	addi	a3, a1, TASK_THREAD_INFO
344	ldw	tls, (a3, TINFO_TP_VALUE)
345#endif
346
347	RESTORE_SWITCH_STACK
348
349	rts
350ENDPROC(__switch_to)
351