xref: /openbmc/linux/arch/loongarch/mm/tlbex.S (revision 2f164822)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
6#include <asm/export.h>
7#include <asm/loongarch.h>
8#include <asm/page.h>
9#include <asm/pgtable.h>
10#include <asm/regdef.h>
11#include <asm/stackframe.h>
12
13#define INVTLB_ADDR_GFALSE_AND_ASID	5
14
15#define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
16#define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
17#define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
18#define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
19
20	.macro tlb_do_page_fault, write
21	SYM_FUNC_START(tlb_do_page_fault_\write)
22	SAVE_ALL
23	csrrd		a2, LOONGARCH_CSR_BADV
24	move		a0, sp
25	REG_S		a2, sp, PT_BVADDR
26	li.w		a1, \write
27	bl		do_page_fault
28	RESTORE_ALL_AND_RET
29	SYM_FUNC_END(tlb_do_page_fault_\write)
30	.endm
31
32	tlb_do_page_fault 0
33	tlb_do_page_fault 1
34
35SYM_FUNC_START(handle_tlb_protect)
36	BACKUP_T0T1
37	SAVE_ALL
38	move		a0, sp
39	move		a1, zero
40	csrrd		a2, LOONGARCH_CSR_BADV
41	REG_S		a2, sp, PT_BVADDR
42	la_abs		t0, do_page_fault
43	jirl		ra, t0, 0
44	RESTORE_ALL_AND_RET
45SYM_FUNC_END(handle_tlb_protect)
46
47SYM_FUNC_START(handle_tlb_load)
48	csrwr		t0, EXCEPTION_KS0
49	csrwr		t1, EXCEPTION_KS1
50	csrwr		ra, EXCEPTION_KS2
51
52	/*
53	 * The vmalloc handling is not in the hotpath.
54	 */
55	csrrd		t0, LOONGARCH_CSR_BADV
56	bltz		t0, vmalloc_load
57	csrrd		t1, LOONGARCH_CSR_PGDL
58
59vmalloc_done_load:
60	/* Get PGD offset in bytes */
61	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
62	alsl.d		t1, ra, t1, 3
63#if CONFIG_PGTABLE_LEVELS > 3
64	ld.d		t1, t1, 0
65	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
66	alsl.d		t1, ra, t1, 3
67#endif
68#if CONFIG_PGTABLE_LEVELS > 2
69	ld.d		t1, t1, 0
70	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
71	alsl.d		t1, ra, t1, 3
72#endif
73	ld.d		ra, t1, 0
74
75	/*
76	 * For huge tlb entries, pmde doesn't contain an address but
77	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
78	 * see if we need to jump to huge tlb processing.
79	 */
80	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
81	bltz		ra, tlb_huge_update_load
82
83	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
84	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
85	alsl.d		t1, t0, ra, _PTE_T_LOG2
86
87#ifdef CONFIG_SMP
88smp_pgtable_change_load:
89	ll.d		t0, t1, 0
90#else
91	ld.d		t0, t1, 0
92#endif
93	andi		ra, t0, _PAGE_PRESENT
94	beqz		ra, nopage_tlb_load
95
96	ori		t0, t0, _PAGE_VALID
97#ifdef CONFIG_SMP
98	sc.d		t0, t1, 0
99	beqz		t0, smp_pgtable_change_load
100#else
101	st.d		t0, t1, 0
102#endif
103	tlbsrch
104	bstrins.d	t1, zero, 3, 3
105	ld.d		t0, t1, 0
106	ld.d		t1, t1, 8
107	csrwr		t0, LOONGARCH_CSR_TLBELO0
108	csrwr		t1, LOONGARCH_CSR_TLBELO1
109	tlbwr
110
111	csrrd		t0, EXCEPTION_KS0
112	csrrd		t1, EXCEPTION_KS1
113	csrrd		ra, EXCEPTION_KS2
114	ertn
115
116#ifdef CONFIG_64BIT
117vmalloc_load:
118	la_abs		t1, swapper_pg_dir
119	b		vmalloc_done_load
120#endif
121
122	/* This is the entry point of a huge page. */
123tlb_huge_update_load:
124#ifdef CONFIG_SMP
125	ll.d		ra, t1, 0
126#endif
127	andi		t0, ra, _PAGE_PRESENT
128	beqz		t0, nopage_tlb_load
129
130#ifdef CONFIG_SMP
131	ori		t0, ra, _PAGE_VALID
132	sc.d		t0, t1, 0
133	beqz		t0, tlb_huge_update_load
134	ori		t0, ra, _PAGE_VALID
135#else
136	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
137	ori		t0, ra, _PAGE_VALID
138	st.d		t0, t1, 0
139#endif
140	csrrd		ra, LOONGARCH_CSR_ASID
141	csrrd		t1, LOONGARCH_CSR_BADV
142	andi		ra, ra, CSR_ASID_ASID
143	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
144
145	/*
146	 * A huge PTE describes an area the size of the
147	 * configured huge page size. This is twice the
148	 * of the large TLB entry size we intend to use.
149	 * A TLB entry half the size of the configured
150	 * huge page size is configured into entrylo0
151	 * and entrylo1 to cover the contiguous huge PTE
152	 * address space.
153	 */
154	/* Huge page: Move Global bit */
155	xori		t0, t0, _PAGE_HUGE
156	lu12i.w		t1, _PAGE_HGLOBAL >> 12
157	and		t1, t0, t1
158	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
159	or		t0, t0, t1
160
161	move		ra, t0
162	csrwr		ra, LOONGARCH_CSR_TLBELO0
163
164	/* Convert to entrylo1 */
165	addi.d		t1, zero, 1
166	slli.d		t1, t1, (HPAGE_SHIFT - 1)
167	add.d		t0, t0, t1
168	csrwr		t0, LOONGARCH_CSR_TLBELO1
169
170	/* Set huge page tlb entry size */
171	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
172	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
173	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
174
175	tlbfill
176
177	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
178	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
179	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
180
181	csrrd		t0, EXCEPTION_KS0
182	csrrd		t1, EXCEPTION_KS1
183	csrrd		ra, EXCEPTION_KS2
184	ertn
185
186nopage_tlb_load:
187	dbar		0
188	csrrd		ra, EXCEPTION_KS2
189	la_abs		t0, tlb_do_page_fault_0
190	jr		t0
191SYM_FUNC_END(handle_tlb_load)
192
193SYM_FUNC_START(handle_tlb_store)
194	csrwr		t0, EXCEPTION_KS0
195	csrwr		t1, EXCEPTION_KS1
196	csrwr		ra, EXCEPTION_KS2
197
198	/*
199	 * The vmalloc handling is not in the hotpath.
200	 */
201	csrrd		t0, LOONGARCH_CSR_BADV
202	bltz		t0, vmalloc_store
203	csrrd		t1, LOONGARCH_CSR_PGDL
204
205vmalloc_done_store:
206	/* Get PGD offset in bytes */
207	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
208	alsl.d		t1, ra, t1, 3
209#if CONFIG_PGTABLE_LEVELS > 3
210	ld.d		t1, t1, 0
211	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
212	alsl.d		t1, ra, t1, 3
213#endif
214#if CONFIG_PGTABLE_LEVELS > 2
215	ld.d		t1, t1, 0
216	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
217	alsl.d		t1, ra, t1, 3
218#endif
219	ld.d		ra, t1, 0
220
221	/*
222	 * For huge tlb entries, pmde doesn't contain an address but
223	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
224	 * see if we need to jump to huge tlb processing.
225	 */
226	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
227	bltz		ra, tlb_huge_update_store
228
229	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
230	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
231	alsl.d		t1, t0, ra, _PTE_T_LOG2
232
233#ifdef CONFIG_SMP
234smp_pgtable_change_store:
235	ll.d		t0, t1, 0
236#else
237	ld.d		t0, t1, 0
238#endif
239	andi		ra, t0, _PAGE_PRESENT | _PAGE_WRITE
240	xori		ra, ra, _PAGE_PRESENT | _PAGE_WRITE
241	bnez		ra, nopage_tlb_store
242
243	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
244#ifdef CONFIG_SMP
245	sc.d		t0, t1, 0
246	beqz		t0, smp_pgtable_change_store
247#else
248	st.d		t0, t1, 0
249#endif
250	tlbsrch
251	bstrins.d	t1, zero, 3, 3
252	ld.d		t0, t1, 0
253	ld.d		t1, t1, 8
254	csrwr		t0, LOONGARCH_CSR_TLBELO0
255	csrwr		t1, LOONGARCH_CSR_TLBELO1
256	tlbwr
257
258	csrrd		t0, EXCEPTION_KS0
259	csrrd		t1, EXCEPTION_KS1
260	csrrd		ra, EXCEPTION_KS2
261	ertn
262
263#ifdef CONFIG_64BIT
264vmalloc_store:
265	la_abs		t1, swapper_pg_dir
266	b		vmalloc_done_store
267#endif
268
269	/* This is the entry point of a huge page. */
270tlb_huge_update_store:
271#ifdef CONFIG_SMP
272	ll.d		ra, t1, 0
273#endif
274	andi		t0, ra, _PAGE_PRESENT | _PAGE_WRITE
275	xori		t0, t0, _PAGE_PRESENT | _PAGE_WRITE
276	bnez		t0, nopage_tlb_store
277
278#ifdef CONFIG_SMP
279	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
280	sc.d		t0, t1, 0
281	beqz		t0, tlb_huge_update_store
282	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
283#else
284	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
285	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
286	st.d		t0, t1, 0
287#endif
288	csrrd		ra, LOONGARCH_CSR_ASID
289	csrrd		t1, LOONGARCH_CSR_BADV
290	andi		ra, ra, CSR_ASID_ASID
291	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
292
293	/*
294	 * A huge PTE describes an area the size of the
295	 * configured huge page size. This is twice the
296	 * of the large TLB entry size we intend to use.
297	 * A TLB entry half the size of the configured
298	 * huge page size is configured into entrylo0
299	 * and entrylo1 to cover the contiguous huge PTE
300	 * address space.
301	 */
302	/* Huge page: Move Global bit */
303	xori		t0, t0, _PAGE_HUGE
304	lu12i.w		t1, _PAGE_HGLOBAL >> 12
305	and		t1, t0, t1
306	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
307	or		t0, t0, t1
308
309	move		ra, t0
310	csrwr		ra, LOONGARCH_CSR_TLBELO0
311
312	/* Convert to entrylo1 */
313	addi.d		t1, zero, 1
314	slli.d		t1, t1, (HPAGE_SHIFT - 1)
315	add.d		t0, t0, t1
316	csrwr		t0, LOONGARCH_CSR_TLBELO1
317
318	/* Set huge page tlb entry size */
319	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
320	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
321	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
322
323	tlbfill
324
325	/* Reset default page size */
326	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
327	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
328	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
329
330	csrrd		t0, EXCEPTION_KS0
331	csrrd		t1, EXCEPTION_KS1
332	csrrd		ra, EXCEPTION_KS2
333	ertn
334
335nopage_tlb_store:
336	dbar		0
337	csrrd		ra, EXCEPTION_KS2
338	la_abs		t0, tlb_do_page_fault_1
339	jr		t0
340SYM_FUNC_END(handle_tlb_store)
341
342SYM_FUNC_START(handle_tlb_modify)
343	csrwr		t0, EXCEPTION_KS0
344	csrwr		t1, EXCEPTION_KS1
345	csrwr		ra, EXCEPTION_KS2
346
347	/*
348	 * The vmalloc handling is not in the hotpath.
349	 */
350	csrrd		t0, LOONGARCH_CSR_BADV
351	bltz		t0, vmalloc_modify
352	csrrd		t1, LOONGARCH_CSR_PGDL
353
354vmalloc_done_modify:
355	/* Get PGD offset in bytes */
356	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
357	alsl.d		t1, ra, t1, 3
358#if CONFIG_PGTABLE_LEVELS > 3
359	ld.d		t1, t1, 0
360	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
361	alsl.d		t1, ra, t1, 3
362#endif
363#if CONFIG_PGTABLE_LEVELS > 2
364	ld.d		t1, t1, 0
365	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
366	alsl.d		t1, ra, t1, 3
367#endif
368	ld.d		ra, t1, 0
369
370	/*
371	 * For huge tlb entries, pmde doesn't contain an address but
372	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
373	 * see if we need to jump to huge tlb processing.
374	 */
375	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
376	bltz		ra, tlb_huge_update_modify
377
378	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
379	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
380	alsl.d		t1, t0, ra, _PTE_T_LOG2
381
382#ifdef CONFIG_SMP
383smp_pgtable_change_modify:
384	ll.d		t0, t1, 0
385#else
386	ld.d		t0, t1, 0
387#endif
388	andi		ra, t0, _PAGE_WRITE
389	beqz		ra, nopage_tlb_modify
390
391	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
392#ifdef CONFIG_SMP
393	sc.d		t0, t1, 0
394	beqz		t0, smp_pgtable_change_modify
395#else
396	st.d		t0, t1, 0
397#endif
398	tlbsrch
399	bstrins.d	t1, zero, 3, 3
400	ld.d		t0, t1, 0
401	ld.d		t1, t1, 8
402	csrwr		t0, LOONGARCH_CSR_TLBELO0
403	csrwr		t1, LOONGARCH_CSR_TLBELO1
404	tlbwr
405
406	csrrd		t0, EXCEPTION_KS0
407	csrrd		t1, EXCEPTION_KS1
408	csrrd		ra, EXCEPTION_KS2
409	ertn
410
411#ifdef CONFIG_64BIT
412vmalloc_modify:
413	la_abs		t1, swapper_pg_dir
414	b		vmalloc_done_modify
415#endif
416
417	/* This is the entry point of a huge page. */
418tlb_huge_update_modify:
419#ifdef CONFIG_SMP
420	ll.d		ra, t1, 0
421#endif
422	andi		t0, ra, _PAGE_WRITE
423	beqz		t0, nopage_tlb_modify
424
425#ifdef CONFIG_SMP
426	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
427	sc.d		t0, t1, 0
428	beqz		t0, tlb_huge_update_modify
429	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
430#else
431	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
432	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
433	st.d		t0, t1, 0
434#endif
435	csrrd		ra, LOONGARCH_CSR_ASID
436	csrrd		t1, LOONGARCH_CSR_BADV
437	andi		ra, ra, CSR_ASID_ASID
438	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
439
440	/*
441	 * A huge PTE describes an area the size of the
442	 * configured huge page size. This is twice the
443	 * of the large TLB entry size we intend to use.
444	 * A TLB entry half the size of the configured
445	 * huge page size is configured into entrylo0
446	 * and entrylo1 to cover the contiguous huge PTE
447	 * address space.
448	 */
449	/* Huge page: Move Global bit */
450	xori		t0, t0, _PAGE_HUGE
451	lu12i.w		t1, _PAGE_HGLOBAL >> 12
452	and		t1, t0, t1
453	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
454	or		t0, t0, t1
455
456	move		ra, t0
457	csrwr		ra, LOONGARCH_CSR_TLBELO0
458
459	/* Convert to entrylo1 */
460	addi.d		t1, zero, 1
461	slli.d		t1, t1, (HPAGE_SHIFT - 1)
462	add.d		t0, t0, t1
463	csrwr		t0, LOONGARCH_CSR_TLBELO1
464
465	/* Set huge page tlb entry size */
466	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
467	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
468	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
469
470	tlbfill
471
472	/* Reset default page size */
473	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
474	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
475	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
476
477	csrrd		t0, EXCEPTION_KS0
478	csrrd		t1, EXCEPTION_KS1
479	csrrd		ra, EXCEPTION_KS2
480	ertn
481
482nopage_tlb_modify:
483	dbar		0
484	csrrd		ra, EXCEPTION_KS2
485	la_abs		t0, tlb_do_page_fault_1
486	jr		t0
487SYM_FUNC_END(handle_tlb_modify)
488
489SYM_FUNC_START(handle_tlb_refill)
490	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
491	csrrd		t0, LOONGARCH_CSR_PGD
492	lddir		t0, t0, 3
493#if CONFIG_PGTABLE_LEVELS > 3
494	lddir		t0, t0, 2
495#endif
496#if CONFIG_PGTABLE_LEVELS > 2
497	lddir		t0, t0, 1
498#endif
499	ldpte		t0, 0
500	ldpte		t0, 1
501	tlbfill
502	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
503	ertn
504SYM_FUNC_END(handle_tlb_refill)
505