xref: /openbmc/linux/arch/loongarch/mm/tlbex.S (revision e8069f5a)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
6#include <asm/export.h>
7#include <asm/loongarch.h>
8#include <asm/page.h>
9#include <asm/pgtable.h>
10#include <asm/regdef.h>
11#include <asm/stackframe.h>
12
13#define INVTLB_ADDR_GFALSE_AND_ASID	5
14
15#define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
16#define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
17#define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
18#define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
19
20	.macro tlb_do_page_fault, write
21	SYM_FUNC_START(tlb_do_page_fault_\write)
22	SAVE_ALL
23	csrrd		a2, LOONGARCH_CSR_BADV
24	move		a0, sp
25	REG_S		a2, sp, PT_BVADDR
26	li.w		a1, \write
27	bl		do_page_fault
28	RESTORE_ALL_AND_RET
29	SYM_FUNC_END(tlb_do_page_fault_\write)
30	.endm
31
32	tlb_do_page_fault 0
33	tlb_do_page_fault 1
34
35SYM_FUNC_START(handle_tlb_protect)
36	BACKUP_T0T1
37	SAVE_ALL
38	move		a0, sp
39	move		a1, zero
40	csrrd		a2, LOONGARCH_CSR_BADV
41	REG_S		a2, sp, PT_BVADDR
42	la_abs		t0, do_page_fault
43	jirl		ra, t0, 0
44	RESTORE_ALL_AND_RET
45SYM_FUNC_END(handle_tlb_protect)
46
47SYM_FUNC_START(handle_tlb_load)
48	csrwr		t0, EXCEPTION_KS0
49	csrwr		t1, EXCEPTION_KS1
50	csrwr		ra, EXCEPTION_KS2
51
52	/*
53	 * The vmalloc handling is not in the hotpath.
54	 */
55	csrrd		t0, LOONGARCH_CSR_BADV
56	bltz		t0, vmalloc_load
57	csrrd		t1, LOONGARCH_CSR_PGDL
58
59vmalloc_done_load:
60	/* Get PGD offset in bytes */
61	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
62	alsl.d		t1, ra, t1, 3
63#if CONFIG_PGTABLE_LEVELS > 3
64	ld.d		t1, t1, 0
65	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
66	alsl.d		t1, ra, t1, 3
67#endif
68#if CONFIG_PGTABLE_LEVELS > 2
69	ld.d		t1, t1, 0
70	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
71	alsl.d		t1, ra, t1, 3
72#endif
73	ld.d		ra, t1, 0
74
75	/*
76	 * For huge tlb entries, pmde doesn't contain an address but
77	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
78	 * see if we need to jump to huge tlb processing.
79	 */
80	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
81	bltz		ra, tlb_huge_update_load
82
83	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
84	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
85	alsl.d		t1, t0, ra, _PTE_T_LOG2
86
87#ifdef CONFIG_SMP
88smp_pgtable_change_load:
89	ll.d		t0, t1, 0
90#else
91	ld.d		t0, t1, 0
92#endif
93	andi		ra, t0, _PAGE_PRESENT
94	beqz		ra, nopage_tlb_load
95
96	ori		t0, t0, _PAGE_VALID
97#ifdef CONFIG_SMP
98	sc.d		t0, t1, 0
99	beqz		t0, smp_pgtable_change_load
100#else
101	st.d		t0, t1, 0
102#endif
103	tlbsrch
104	bstrins.d	t1, zero, 3, 3
105	ld.d		t0, t1, 0
106	ld.d		t1, t1, 8
107	csrwr		t0, LOONGARCH_CSR_TLBELO0
108	csrwr		t1, LOONGARCH_CSR_TLBELO1
109	tlbwr
110
111	csrrd		t0, EXCEPTION_KS0
112	csrrd		t1, EXCEPTION_KS1
113	csrrd		ra, EXCEPTION_KS2
114	ertn
115
116#ifdef CONFIG_64BIT
117vmalloc_load:
118	la_abs		t1, swapper_pg_dir
119	b		vmalloc_done_load
120#endif
121
122	/* This is the entry point of a huge page. */
123tlb_huge_update_load:
124#ifdef CONFIG_SMP
125	ll.d		ra, t1, 0
126#endif
127	andi		t0, ra, _PAGE_PRESENT
128	beqz		t0, nopage_tlb_load
129
130#ifdef CONFIG_SMP
131	ori		t0, ra, _PAGE_VALID
132	sc.d		t0, t1, 0
133	beqz		t0, tlb_huge_update_load
134	ori		t0, ra, _PAGE_VALID
135#else
136	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
137	ori		t0, ra, _PAGE_VALID
138	st.d		t0, t1, 0
139#endif
140	csrrd		ra, LOONGARCH_CSR_ASID
141	csrrd		t1, LOONGARCH_CSR_BADV
142	andi		ra, ra, CSR_ASID_ASID
143	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
144
145	/*
146	 * A huge PTE describes an area the size of the
147	 * configured huge page size. This is twice the
148	 * of the large TLB entry size we intend to use.
149	 * A TLB entry half the size of the configured
150	 * huge page size is configured into entrylo0
151	 * and entrylo1 to cover the contiguous huge PTE
152	 * address space.
153	 */
154	/* Huge page: Move Global bit */
155	xori		t0, t0, _PAGE_HUGE
156	lu12i.w		t1, _PAGE_HGLOBAL >> 12
157	and		t1, t0, t1
158	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
159	or		t0, t0, t1
160
161	move		ra, t0
162	csrwr		ra, LOONGARCH_CSR_TLBELO0
163
164	/* Convert to entrylo1 */
165	addi.d		t1, zero, 1
166	slli.d		t1, t1, (HPAGE_SHIFT - 1)
167	add.d		t0, t0, t1
168	csrwr		t0, LOONGARCH_CSR_TLBELO1
169
170	/* Set huge page tlb entry size */
171	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
172	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
173	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
174
175	tlbfill
176
177	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
178	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
179	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
180
181	csrrd		t0, EXCEPTION_KS0
182	csrrd		t1, EXCEPTION_KS1
183	csrrd		ra, EXCEPTION_KS2
184	ertn
185
186nopage_tlb_load:
187	dbar		0x700
188	csrrd		ra, EXCEPTION_KS2
189	la_abs		t0, tlb_do_page_fault_0
190	jr		t0
191SYM_FUNC_END(handle_tlb_load)
192
193SYM_FUNC_START(handle_tlb_load_ptw)
194	csrwr		t0, LOONGARCH_CSR_KS0
195	csrwr		t1, LOONGARCH_CSR_KS1
196	la_abs		t0, tlb_do_page_fault_0
197	jr		t0
198SYM_FUNC_END(handle_tlb_load_ptw)
199
200SYM_FUNC_START(handle_tlb_store)
201	csrwr		t0, EXCEPTION_KS0
202	csrwr		t1, EXCEPTION_KS1
203	csrwr		ra, EXCEPTION_KS2
204
205	/*
206	 * The vmalloc handling is not in the hotpath.
207	 */
208	csrrd		t0, LOONGARCH_CSR_BADV
209	bltz		t0, vmalloc_store
210	csrrd		t1, LOONGARCH_CSR_PGDL
211
212vmalloc_done_store:
213	/* Get PGD offset in bytes */
214	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
215	alsl.d		t1, ra, t1, 3
216#if CONFIG_PGTABLE_LEVELS > 3
217	ld.d		t1, t1, 0
218	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
219	alsl.d		t1, ra, t1, 3
220#endif
221#if CONFIG_PGTABLE_LEVELS > 2
222	ld.d		t1, t1, 0
223	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
224	alsl.d		t1, ra, t1, 3
225#endif
226	ld.d		ra, t1, 0
227
228	/*
229	 * For huge tlb entries, pmde doesn't contain an address but
230	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
231	 * see if we need to jump to huge tlb processing.
232	 */
233	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
234	bltz		ra, tlb_huge_update_store
235
236	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
237	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
238	alsl.d		t1, t0, ra, _PTE_T_LOG2
239
240#ifdef CONFIG_SMP
241smp_pgtable_change_store:
242	ll.d		t0, t1, 0
243#else
244	ld.d		t0, t1, 0
245#endif
246	andi		ra, t0, _PAGE_PRESENT | _PAGE_WRITE
247	xori		ra, ra, _PAGE_PRESENT | _PAGE_WRITE
248	bnez		ra, nopage_tlb_store
249
250	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
251#ifdef CONFIG_SMP
252	sc.d		t0, t1, 0
253	beqz		t0, smp_pgtable_change_store
254#else
255	st.d		t0, t1, 0
256#endif
257	tlbsrch
258	bstrins.d	t1, zero, 3, 3
259	ld.d		t0, t1, 0
260	ld.d		t1, t1, 8
261	csrwr		t0, LOONGARCH_CSR_TLBELO0
262	csrwr		t1, LOONGARCH_CSR_TLBELO1
263	tlbwr
264
265	csrrd		t0, EXCEPTION_KS0
266	csrrd		t1, EXCEPTION_KS1
267	csrrd		ra, EXCEPTION_KS2
268	ertn
269
270#ifdef CONFIG_64BIT
271vmalloc_store:
272	la_abs		t1, swapper_pg_dir
273	b		vmalloc_done_store
274#endif
275
276	/* This is the entry point of a huge page. */
277tlb_huge_update_store:
278#ifdef CONFIG_SMP
279	ll.d		ra, t1, 0
280#endif
281	andi		t0, ra, _PAGE_PRESENT | _PAGE_WRITE
282	xori		t0, t0, _PAGE_PRESENT | _PAGE_WRITE
283	bnez		t0, nopage_tlb_store
284
285#ifdef CONFIG_SMP
286	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
287	sc.d		t0, t1, 0
288	beqz		t0, tlb_huge_update_store
289	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
290#else
291	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
292	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
293	st.d		t0, t1, 0
294#endif
295	csrrd		ra, LOONGARCH_CSR_ASID
296	csrrd		t1, LOONGARCH_CSR_BADV
297	andi		ra, ra, CSR_ASID_ASID
298	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
299
300	/*
301	 * A huge PTE describes an area the size of the
302	 * configured huge page size. This is twice the
303	 * of the large TLB entry size we intend to use.
304	 * A TLB entry half the size of the configured
305	 * huge page size is configured into entrylo0
306	 * and entrylo1 to cover the contiguous huge PTE
307	 * address space.
308	 */
309	/* Huge page: Move Global bit */
310	xori		t0, t0, _PAGE_HUGE
311	lu12i.w		t1, _PAGE_HGLOBAL >> 12
312	and		t1, t0, t1
313	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
314	or		t0, t0, t1
315
316	move		ra, t0
317	csrwr		ra, LOONGARCH_CSR_TLBELO0
318
319	/* Convert to entrylo1 */
320	addi.d		t1, zero, 1
321	slli.d		t1, t1, (HPAGE_SHIFT - 1)
322	add.d		t0, t0, t1
323	csrwr		t0, LOONGARCH_CSR_TLBELO1
324
325	/* Set huge page tlb entry size */
326	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
327	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
328	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
329
330	tlbfill
331
332	/* Reset default page size */
333	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
334	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
335	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
336
337	csrrd		t0, EXCEPTION_KS0
338	csrrd		t1, EXCEPTION_KS1
339	csrrd		ra, EXCEPTION_KS2
340	ertn
341
342nopage_tlb_store:
343	dbar		0x700
344	csrrd		ra, EXCEPTION_KS2
345	la_abs		t0, tlb_do_page_fault_1
346	jr		t0
347SYM_FUNC_END(handle_tlb_store)
348
349SYM_FUNC_START(handle_tlb_store_ptw)
350	csrwr		t0, LOONGARCH_CSR_KS0
351	csrwr		t1, LOONGARCH_CSR_KS1
352	la_abs		t0, tlb_do_page_fault_1
353	jr		t0
354SYM_FUNC_END(handle_tlb_store_ptw)
355
356SYM_FUNC_START(handle_tlb_modify)
357	csrwr		t0, EXCEPTION_KS0
358	csrwr		t1, EXCEPTION_KS1
359	csrwr		ra, EXCEPTION_KS2
360
361	/*
362	 * The vmalloc handling is not in the hotpath.
363	 */
364	csrrd		t0, LOONGARCH_CSR_BADV
365	bltz		t0, vmalloc_modify
366	csrrd		t1, LOONGARCH_CSR_PGDL
367
368vmalloc_done_modify:
369	/* Get PGD offset in bytes */
370	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
371	alsl.d		t1, ra, t1, 3
372#if CONFIG_PGTABLE_LEVELS > 3
373	ld.d		t1, t1, 0
374	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
375	alsl.d		t1, ra, t1, 3
376#endif
377#if CONFIG_PGTABLE_LEVELS > 2
378	ld.d		t1, t1, 0
379	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
380	alsl.d		t1, ra, t1, 3
381#endif
382	ld.d		ra, t1, 0
383
384	/*
385	 * For huge tlb entries, pmde doesn't contain an address but
386	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
387	 * see if we need to jump to huge tlb processing.
388	 */
389	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
390	bltz		ra, tlb_huge_update_modify
391
392	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
393	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
394	alsl.d		t1, t0, ra, _PTE_T_LOG2
395
396#ifdef CONFIG_SMP
397smp_pgtable_change_modify:
398	ll.d		t0, t1, 0
399#else
400	ld.d		t0, t1, 0
401#endif
402	andi		ra, t0, _PAGE_WRITE
403	beqz		ra, nopage_tlb_modify
404
405	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
406#ifdef CONFIG_SMP
407	sc.d		t0, t1, 0
408	beqz		t0, smp_pgtable_change_modify
409#else
410	st.d		t0, t1, 0
411#endif
412	tlbsrch
413	bstrins.d	t1, zero, 3, 3
414	ld.d		t0, t1, 0
415	ld.d		t1, t1, 8
416	csrwr		t0, LOONGARCH_CSR_TLBELO0
417	csrwr		t1, LOONGARCH_CSR_TLBELO1
418	tlbwr
419
420	csrrd		t0, EXCEPTION_KS0
421	csrrd		t1, EXCEPTION_KS1
422	csrrd		ra, EXCEPTION_KS2
423	ertn
424
425#ifdef CONFIG_64BIT
426vmalloc_modify:
427	la_abs		t1, swapper_pg_dir
428	b		vmalloc_done_modify
429#endif
430
431	/* This is the entry point of a huge page. */
432tlb_huge_update_modify:
433#ifdef CONFIG_SMP
434	ll.d		ra, t1, 0
435#endif
436	andi		t0, ra, _PAGE_WRITE
437	beqz		t0, nopage_tlb_modify
438
439#ifdef CONFIG_SMP
440	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
441	sc.d		t0, t1, 0
442	beqz		t0, tlb_huge_update_modify
443	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
444#else
445	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
446	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
447	st.d		t0, t1, 0
448#endif
449	csrrd		ra, LOONGARCH_CSR_ASID
450	csrrd		t1, LOONGARCH_CSR_BADV
451	andi		ra, ra, CSR_ASID_ASID
452	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
453
454	/*
455	 * A huge PTE describes an area the size of the
456	 * configured huge page size. This is twice the
457	 * of the large TLB entry size we intend to use.
458	 * A TLB entry half the size of the configured
459	 * huge page size is configured into entrylo0
460	 * and entrylo1 to cover the contiguous huge PTE
461	 * address space.
462	 */
463	/* Huge page: Move Global bit */
464	xori		t0, t0, _PAGE_HUGE
465	lu12i.w		t1, _PAGE_HGLOBAL >> 12
466	and		t1, t0, t1
467	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
468	or		t0, t0, t1
469
470	move		ra, t0
471	csrwr		ra, LOONGARCH_CSR_TLBELO0
472
473	/* Convert to entrylo1 */
474	addi.d		t1, zero, 1
475	slli.d		t1, t1, (HPAGE_SHIFT - 1)
476	add.d		t0, t0, t1
477	csrwr		t0, LOONGARCH_CSR_TLBELO1
478
479	/* Set huge page tlb entry size */
480	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
481	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
482	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
483
484	tlbfill
485
486	/* Reset default page size */
487	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
488	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
489	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
490
491	csrrd		t0, EXCEPTION_KS0
492	csrrd		t1, EXCEPTION_KS1
493	csrrd		ra, EXCEPTION_KS2
494	ertn
495
496nopage_tlb_modify:
497	dbar		0x700
498	csrrd		ra, EXCEPTION_KS2
499	la_abs		t0, tlb_do_page_fault_1
500	jr		t0
501SYM_FUNC_END(handle_tlb_modify)
502
503SYM_FUNC_START(handle_tlb_modify_ptw)
504	csrwr		t0, LOONGARCH_CSR_KS0
505	csrwr		t1, LOONGARCH_CSR_KS1
506	la_abs		t0, tlb_do_page_fault_1
507	jr		t0
508SYM_FUNC_END(handle_tlb_modify_ptw)
509
510SYM_FUNC_START(handle_tlb_refill)
511	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
512	csrrd		t0, LOONGARCH_CSR_PGD
513	lddir		t0, t0, 3
514#if CONFIG_PGTABLE_LEVELS > 3
515	lddir		t0, t0, 2
516#endif
517#if CONFIG_PGTABLE_LEVELS > 2
518	lddir		t0, t0, 1
519#endif
520	ldpte		t0, 0
521	ldpte		t0, 1
522	tlbfill
523	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
524	ertn
525SYM_FUNC_END(handle_tlb_refill)
526