xref: /openbmc/linux/arch/csky/kernel/entry.S (revision 4fc4dca8)
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK    0xffc
17#define PTE_INDX_SHIFT  10
18#define _PGDIR_SHIFT    22
19
20.macro tlbop_begin name, val0, val1, val2
21ENTRY(csky_\name)
22	mtcr    a3, ss2
23	mtcr    r6, ss3
24	mtcr    a2, ss4
25
26	RD_PGDR	r6
27	RD_MEH	a3
28#ifdef CONFIG_CPU_HAS_TLBI
29	tlbi.vaas a3
30	sync.is
31
32	btsti	a3, 31
33	bf	1f
34	RD_PGDR_K r6
351:
36#else
37	bgeni	a2, 31
38	WR_MCIR	a2
39	bgeni	a2, 25
40	WR_MCIR	a2
41#endif
42	bclri   r6, 0
43	lrw	a2, va_pa_offset
44	ld.w	a2, (a2, 0)
45	subu	r6, a2
46	bseti	r6, 31
47
48	mov     a2, a3
49	lsri    a2, _PGDIR_SHIFT
50	lsli    a2, 2
51	addu    r6, a2
52	ldw     r6, (r6)
53
54	lrw	a2, va_pa_offset
55	ld.w	a2, (a2, 0)
56	subu	r6, a2
57	bseti	r6, 31
58
59	lsri    a3, PTE_INDX_SHIFT
60	lrw     a2, PTE_INDX_MSK
61	and     a3, a2
62	addu    r6, a3
63	ldw     a3, (r6)
64
65	movi	a2, (_PAGE_PRESENT | \val0)
66	and     a3, a2
67	cmpne   a3, a2
68	bt	\name
69
70	/* First read/write the page, just update the flags */
71	ldw     a3, (r6)
72	bgeni   a2, PAGE_VALID_BIT
73	bseti   a2, PAGE_ACCESSED_BIT
74	bseti   a2, \val1
75	bseti   a2, \val2
76	or      a3, a2
77	stw     a3, (r6)
78
79	/* Some cpu tlb-hardrefill bypass the cache */
80#ifdef CONFIG_CPU_NEED_TLBSYNC
81	movi	a2, 0x22
82	bseti	a2, 6
83	mtcr	r6, cr22
84	mtcr	a2, cr17
85	sync
86#endif
87
88	mfcr    a3, ss2
89	mfcr    r6, ss3
90	mfcr    a2, ss4
91	rte
92\name:
93	mfcr    a3, ss2
94	mfcr    r6, ss3
95	mfcr    a2, ss4
96	SAVE_ALL 0
97.endm
98.macro tlbop_end is_write
99	RD_MEH	a2
100	psrset  ee, ie
101	mov     a0, sp
102	movi    a1, \is_write
103	jbsr    do_page_fault
104	jmpi    ret_from_exception
105.endm
106
107.text
108
109tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
110tlbop_end 0
111
112tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
113tlbop_end 1
114
115tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
116#ifndef CONFIG_CPU_HAS_LDSTEX
117jbsr csky_cmpxchg_fixup
118#endif
119tlbop_end 1
120
121ENTRY(csky_systemcall)
122	SAVE_ALL TRAP0_SIZE
123
124	psrset  ee, ie
125
126	lrw     r11, __NR_syscalls
127	cmphs   syscallid, r11		/* Check nr of syscall */
128	bt      ret_from_exception
129
130	lrw     r13, sys_call_table
131	ixw     r13, syscallid
132	ldw     r11, (r13)
133	cmpnei  r11, 0
134	bf      ret_from_exception
135
136	mov     r9, sp
137	bmaski  r10, THREAD_SHIFT
138	andn    r9, r10
139	ldw     r8, (r9, TINFO_FLAGS)
140	ANDI_R3	r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
141	cmpnei	r8, 0
142	bt      csky_syscall_trace
143#if defined(__CSKYABIV2__)
144	subi    sp, 8
145	stw  	r5, (sp, 0x4)
146	stw  	r4, (sp, 0x0)
147	jsr     r11                      /* Do system call */
148	addi 	sp, 8
149#else
150	jsr     r11
151#endif
152	stw     a0, (sp, LSAVE_A0)      /* Save return value */
153	jmpi    ret_from_exception
154
155csky_syscall_trace:
156	mov	a0, sp                  /* sp = pt_regs pointer */
157	jbsr	syscall_trace_enter
158	/* Prepare args before do system call */
159	ldw	a0, (sp, LSAVE_A0)
160	ldw	a1, (sp, LSAVE_A1)
161	ldw	a2, (sp, LSAVE_A2)
162	ldw	a3, (sp, LSAVE_A3)
163#if defined(__CSKYABIV2__)
164	subi	sp, 8
165	stw	r5, (sp, 0x4)
166	stw	r4, (sp, 0x0)
167#else
168	ldw	r6, (sp, LSAVE_A4)
169	ldw	r7, (sp, LSAVE_A5)
170#endif
171	jsr	r11                     /* Do system call */
172#if defined(__CSKYABIV2__)
173	addi	sp, 8
174#endif
175	stw	a0, (sp, LSAVE_A0)	/* Save return value */
176
177	mov     a0, sp                  /* right now, sp --> pt_regs */
178	jbsr    syscall_trace_exit
179	br	ret_from_exception
180
181ENTRY(ret_from_kernel_thread)
182	jbsr	schedule_tail
183	mov	a0, r8
184	jsr	r9
185	jbsr	ret_from_exception
186
187ENTRY(ret_from_fork)
188	jbsr	schedule_tail
189	mov	r9, sp
190	bmaski	r10, THREAD_SHIFT
191	andn	r9, r10
192	ldw	r8, (r9, TINFO_FLAGS)
193	ANDI_R3	r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
194	cmpnei	r8, 0
195	bf	ret_from_exception
196	mov	a0, sp			/* sp = pt_regs pointer */
197	jbsr	syscall_trace_exit
198
199ret_from_exception:
200	ld	syscallid, (sp, LSAVE_PSR)
201	btsti	syscallid, 31
202	bt	1f
203
204	/*
205	 * Load address of current->thread_info, Then get address of task_struct
206	 * Get task_needreshed in task_struct
207	 */
208	mov	r9, sp
209	bmaski	r10, THREAD_SHIFT
210	andn	r9, r10
211
212	ldw	r8, (r9, TINFO_FLAGS)
213	andi	r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
214	cmpnei	r8, 0
215	bt	exit_work
2161:
217	RESTORE_ALL
218
219exit_work:
220	lrw	syscallid, ret_from_exception
221	mov	lr, syscallid
222
223	btsti	r8, TIF_NEED_RESCHED
224	bt	work_resched
225
226	mov	a0, sp
227	mov	a1, r8
228	jmpi	do_notify_resume
229
230work_resched:
231	jmpi	schedule
232
233ENTRY(csky_trap)
234	SAVE_ALL 0
235	psrset	ee
236	mov	a0, sp                 /* Push Stack pointer arg */
237	jbsr	trap_c                 /* Call C-level trap handler */
238	jmpi	ret_from_exception
239
240/*
241 * Prototype from libc for abiv1:
242 * register unsigned int __result asm("a0");
243 * asm( "trap 3" :"=r"(__result)::);
244 */
245ENTRY(csky_get_tls)
246	USPTOKSP
247
248	/* increase epc for continue */
249	mfcr	a0, epc
250	addi	a0, TRAP0_SIZE
251	mtcr	a0, epc
252
253	/* get current task thread_info with kernel 8K stack */
254	bmaski	a0, THREAD_SHIFT
255	not	a0
256	subi	sp, 1
257	and	a0, sp
258	addi	sp, 1
259
260	/* get tls */
261	ldw	a0, (a0, TINFO_TP_VALUE)
262
263	KSPTOUSP
264	rte
265
266ENTRY(csky_irq)
267	SAVE_ALL 0
268	psrset	ee
269
270#ifdef CONFIG_PREEMPT
271	mov	r9, sp			/* Get current stack  pointer */
272	bmaski	r10, THREAD_SHIFT
273	andn	r9, r10			/* Get thread_info */
274
275	/*
276	 * Get task_struct->stack.preempt_count for current,
277	 * and increase 1.
278	 */
279	ldw	r8, (r9, TINFO_PREEMPT)
280	addi	r8, 1
281	stw	r8, (r9, TINFO_PREEMPT)
282#endif
283
284	mov	a0, sp
285	jbsr	csky_do_IRQ
286
287#ifdef CONFIG_PREEMPT
288	subi	r8, 1
289	stw	r8, (r9, TINFO_PREEMPT)
290	cmpnei	r8, 0
291	bt	2f
292	ldw	r8, (r9, TINFO_FLAGS)
293	btsti	r8, TIF_NEED_RESCHED
294	bf	2f
2951:
296	jbsr	preempt_schedule_irq	/* irq en/disable is done inside */
297	ldw	r7, (r9, TINFO_FLAGS)	/* get new tasks TI_FLAGS */
298	btsti	r7, TIF_NEED_RESCHED
299	bt	1b			/* go again */
300#endif
3012:
302	jmpi	ret_from_exception
303
304/*
305 * a0 =  prev task_struct *
306 * a1 =  next task_struct *
307 * a0 =  return next
308 */
309ENTRY(__switch_to)
310	lrw	a3, TASK_THREAD
311	addu	a3, a0
312
313	mfcr	a2, psr			/* Save PSR value */
314	stw	a2, (a3, THREAD_SR)	/* Save PSR in task struct */
315	bclri	a2, 6			/* Disable interrupts */
316	mtcr	a2, psr
317
318	SAVE_SWITCH_STACK
319
320	stw	sp, (a3, THREAD_KSP)
321
322	/* Set up next process to run */
323	lrw	a3, TASK_THREAD
324	addu	a3, a1
325
326	ldw	sp, (a3, THREAD_KSP)	/* Set next kernel sp */
327
328	ldw	a2, (a3, THREAD_SR)	/* Set next PSR */
329	mtcr	a2, psr
330
331#if  defined(__CSKYABIV2__)
332	addi	r7, a1, TASK_THREAD_INFO
333	ldw	tls, (r7, TINFO_TP_VALUE)
334#endif
335
336	RESTORE_SWITCH_STACK
337
338	rts
339ENDPROC(__switch_to)
340