xref: /openbmc/linux/arch/csky/kernel/entry.S (revision 3a83e4e6)
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK    0xffc
17#define PTE_INDX_SHIFT  10
18#define _PGDIR_SHIFT    22
19
20.macro	zero_fp
21#ifdef CONFIG_STACKTRACE
22	movi	r8, 0
23#endif
24.endm
25
26.macro	context_tracking
27#ifdef CONFIG_CONTEXT_TRACKING
28	mfcr	a0, epsr
29	btsti	a0, 31
30	bt	1f
31	jbsr	context_tracking_user_exit
32	ldw	a0, (sp, LSAVE_A0)
33	ldw	a1, (sp, LSAVE_A1)
34	ldw	a2, (sp, LSAVE_A2)
35	ldw	a3, (sp, LSAVE_A3)
36#if defined(__CSKYABIV1__)
37	ldw	r6, (sp, LSAVE_A4)
38	ldw	r7, (sp, LSAVE_A5)
39#endif
401:
41#endif
42.endm
43
44.macro tlbop_begin name, val0, val1, val2
45ENTRY(csky_\name)
46	mtcr    a3, ss2
47	mtcr    r6, ss3
48	mtcr    a2, ss4
49
50	RD_PGDR	r6
51	RD_MEH	a3
52#ifdef CONFIG_CPU_HAS_TLBI
53	tlbi.vaas a3
54	sync.is
55
56	btsti	a3, 31
57	bf	1f
58	RD_PGDR_K r6
591:
60#else
61	bgeni	a2, 31
62	WR_MCIR	a2
63	bgeni	a2, 25
64	WR_MCIR	a2
65#endif
66	bclri   r6, 0
67	lrw	a2, va_pa_offset
68	ld.w	a2, (a2, 0)
69	subu	r6, a2
70	bseti	r6, 31
71
72	mov     a2, a3
73	lsri    a2, _PGDIR_SHIFT
74	lsli    a2, 2
75	addu    r6, a2
76	ldw     r6, (r6)
77
78	lrw	a2, va_pa_offset
79	ld.w	a2, (a2, 0)
80	subu	r6, a2
81	bseti	r6, 31
82
83	lsri    a3, PTE_INDX_SHIFT
84	lrw     a2, PTE_INDX_MSK
85	and     a3, a2
86	addu    r6, a3
87	ldw     a3, (r6)
88
89	movi	a2, (_PAGE_PRESENT | \val0)
90	and     a3, a2
91	cmpne   a3, a2
92	bt	\name
93
94	/* First read/write the page, just update the flags */
95	ldw     a3, (r6)
96	bgeni   a2, PAGE_VALID_BIT
97	bseti   a2, PAGE_ACCESSED_BIT
98	bseti   a2, \val1
99	bseti   a2, \val2
100	or      a3, a2
101	stw     a3, (r6)
102
103	/* Some cpu tlb-hardrefill bypass the cache */
104#ifdef CONFIG_CPU_NEED_TLBSYNC
105	movi	a2, 0x22
106	bseti	a2, 6
107	mtcr	r6, cr22
108	mtcr	a2, cr17
109	sync
110#endif
111
112	mfcr    a3, ss2
113	mfcr    r6, ss3
114	mfcr    a2, ss4
115	rte
116\name:
117	mfcr    a3, ss2
118	mfcr    r6, ss3
119	mfcr    a2, ss4
120	SAVE_ALL 0
121.endm
122.macro tlbop_end is_write
123	zero_fp
124	context_tracking
125	RD_MEH	a2
126	psrset  ee, ie
127	mov     a0, sp
128	movi    a1, \is_write
129	jbsr    do_page_fault
130	jmpi    ret_from_exception
131.endm
132
133.text
134
135tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
136tlbop_end 0
137
138tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
139tlbop_end 1
140
141tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
142#ifndef CONFIG_CPU_HAS_LDSTEX
143jbsr csky_cmpxchg_fixup
144#endif
145tlbop_end 1
146
147ENTRY(csky_systemcall)
148	SAVE_ALL TRAP0_SIZE
149	zero_fp
150	context_tracking
151	psrset  ee, ie
152
153	lrw     r9, __NR_syscalls
154	cmphs   syscallid, r9		/* Check nr of syscall */
155	bt      1f
156
157	lrw     r9, sys_call_table
158	ixw     r9, syscallid
159	ldw     syscallid, (r9)
160	cmpnei  syscallid, 0
161	bf      ret_from_exception
162
163	mov     r9, sp
164	bmaski  r10, THREAD_SHIFT
165	andn    r9, r10
166	ldw     r10, (r9, TINFO_FLAGS)
167	lrw	r9, _TIF_SYSCALL_WORK
168	and	r10, r9
169	cmpnei	r10, 0
170	bt      csky_syscall_trace
171#if defined(__CSKYABIV2__)
172	subi    sp, 8
173	stw  	r5, (sp, 0x4)
174	stw  	r4, (sp, 0x0)
175	jsr     syscallid                      /* Do system call */
176	addi 	sp, 8
177#else
178	jsr     syscallid
179#endif
180	stw     a0, (sp, LSAVE_A0)      /* Save return value */
1811:
182#ifdef CONFIG_DEBUG_RSEQ
183	mov	a0, sp
184	jbsr	rseq_syscall
185#endif
186	jmpi    ret_from_exception
187
188csky_syscall_trace:
189	mov	a0, sp                  /* sp = pt_regs pointer */
190	jbsr	syscall_trace_enter
191	cmpnei	a0, 0
192	bt	1f
193	/* Prepare args before do system call */
194	ldw	a0, (sp, LSAVE_A0)
195	ldw	a1, (sp, LSAVE_A1)
196	ldw	a2, (sp, LSAVE_A2)
197	ldw	a3, (sp, LSAVE_A3)
198#if defined(__CSKYABIV2__)
199	subi	sp, 8
200	ldw	r9, (sp, LSAVE_A4)
201	stw	r9, (sp, 0x0)
202	ldw	r9, (sp, LSAVE_A5)
203	stw	r9, (sp, 0x4)
204	jsr	syscallid                     /* Do system call */
205	addi	sp, 8
206#else
207	ldw	r6, (sp, LSAVE_A4)
208	ldw	r7, (sp, LSAVE_A5)
209	jsr	syscallid                     /* Do system call */
210#endif
211	stw	a0, (sp, LSAVE_A0)	/* Save return value */
212
2131:
214#ifdef CONFIG_DEBUG_RSEQ
215	mov	a0, sp
216	jbsr	rseq_syscall
217#endif
218	mov     a0, sp                  /* right now, sp --> pt_regs */
219	jbsr    syscall_trace_exit
220	br	ret_from_exception
221
222ENTRY(ret_from_kernel_thread)
223	jbsr	schedule_tail
224	mov	a0, r10
225	jsr	r9
226	jbsr	ret_from_exception
227
228ENTRY(ret_from_fork)
229	jbsr	schedule_tail
230	mov	r9, sp
231	bmaski	r10, THREAD_SHIFT
232	andn	r9, r10
233	ldw	r10, (r9, TINFO_FLAGS)
234	lrw	r9, _TIF_SYSCALL_WORK
235	and	r10, r9
236	cmpnei	r10, 0
237	bf	ret_from_exception
238	mov	a0, sp			/* sp = pt_regs pointer */
239	jbsr	syscall_trace_exit
240
241ret_from_exception:
242	psrclr	ie
243	ld	r9, (sp, LSAVE_PSR)
244	btsti	r9, 31
245
246	bt	1f
247	/*
248	 * Load address of current->thread_info, Then get address of task_struct
249	 * Get task_needreshed in task_struct
250	 */
251	mov	r9, sp
252	bmaski	r10, THREAD_SHIFT
253	andn	r9, r10
254
255	ldw	r10, (r9, TINFO_FLAGS)
256	lrw	r9, _TIF_WORK_MASK
257	and	r10, r9
258	cmpnei	r10, 0
259	bt	exit_work
260#ifdef CONFIG_CONTEXT_TRACKING
261	jbsr	context_tracking_user_enter
262#endif
2631:
264#ifdef CONFIG_PREEMPTION
265	mov	r9, sp
266	bmaski	r10, THREAD_SHIFT
267	andn	r9, r10
268
269	ldw	r10, (r9, TINFO_PREEMPT)
270	cmpnei	r10, 0
271	bt	2f
272	jbsr	preempt_schedule_irq	/* irq en/disable is done inside */
2732:
274#endif
275
276#ifdef CONFIG_TRACE_IRQFLAGS
277	ld	r10, (sp, LSAVE_PSR)
278	btsti	r10, 6
279	bf	2f
280	jbsr	trace_hardirqs_on
2812:
282#endif
283	RESTORE_ALL
284
285exit_work:
286	lrw	r9, ret_from_exception
287	mov	lr, r9
288
289	btsti	r10, TIF_NEED_RESCHED
290	bt	work_resched
291
292	psrset	ie
293	mov	a0, sp
294	mov	a1, r10
295	jmpi	do_notify_resume
296
297work_resched:
298	jmpi	schedule
299
300ENTRY(csky_trap)
301	SAVE_ALL 0
302	zero_fp
303	context_tracking
304	psrset	ee
305	mov	a0, sp                 /* Push Stack pointer arg */
306	jbsr	trap_c                 /* Call C-level trap handler */
307	jmpi	ret_from_exception
308
309/*
310 * Prototype from libc for abiv1:
311 * register unsigned int __result asm("a0");
312 * asm( "trap 3" :"=r"(__result)::);
313 */
314ENTRY(csky_get_tls)
315	USPTOKSP
316
317	/* increase epc for continue */
318	mfcr	a0, epc
319	addi	a0, TRAP0_SIZE
320	mtcr	a0, epc
321
322	/* get current task thread_info with kernel 8K stack */
323	bmaski	a0, THREAD_SHIFT
324	not	a0
325	subi	sp, 1
326	and	a0, sp
327	addi	sp, 1
328
329	/* get tls */
330	ldw	a0, (a0, TINFO_TP_VALUE)
331
332	KSPTOUSP
333	rte
334
335ENTRY(csky_irq)
336	SAVE_ALL 0
337	zero_fp
338	context_tracking
339	psrset	ee
340
341#ifdef CONFIG_TRACE_IRQFLAGS
342	jbsr	trace_hardirqs_off
343#endif
344
345
346	mov	a0, sp
347	jbsr	csky_do_IRQ
348
349	jmpi	ret_from_exception
350
351/*
352 * a0 =  prev task_struct *
353 * a1 =  next task_struct *
354 * a0 =  return next
355 */
356ENTRY(__switch_to)
357	lrw	a3, TASK_THREAD
358	addu	a3, a0
359
360	SAVE_SWITCH_STACK
361
362	stw	sp, (a3, THREAD_KSP)
363
364	/* Set up next process to run */
365	lrw	a3, TASK_THREAD
366	addu	a3, a1
367
368	ldw	sp, (a3, THREAD_KSP)	/* Set next kernel sp */
369
370#if  defined(__CSKYABIV2__)
371	addi	a3, a1, TASK_THREAD_INFO
372	ldw	tls, (a3, TINFO_TP_VALUE)
373#endif
374
375	RESTORE_SWITCH_STACK
376
377	rts
378ENDPROC(__switch_to)
379