xref: /openbmc/linux/arch/loongarch/mm/tlbex.S (revision 00c2ca84)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
6#include <asm/loongarch.h>
7#include <asm/page.h>
8#include <asm/pgtable.h>
9#include <asm/regdef.h>
10#include <asm/stackframe.h>
11
12#define INVTLB_ADDR_GFALSE_AND_ASID	5
13
14#define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
15#define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
16#define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
17#define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
18
19	.macro tlb_do_page_fault, write
20	SYM_CODE_START(tlb_do_page_fault_\write)
21	SAVE_ALL
22	csrrd		a2, LOONGARCH_CSR_BADV
23	move		a0, sp
24	REG_S		a2, sp, PT_BVADDR
25	li.w		a1, \write
26	bl		do_page_fault
27	RESTORE_ALL_AND_RET
28	SYM_CODE_END(tlb_do_page_fault_\write)
29	.endm
30
31	tlb_do_page_fault 0
32	tlb_do_page_fault 1
33
34SYM_CODE_START(handle_tlb_protect)
35	BACKUP_T0T1
36	SAVE_ALL
37	move		a0, sp
38	move		a1, zero
39	csrrd		a2, LOONGARCH_CSR_BADV
40	REG_S		a2, sp, PT_BVADDR
41	la_abs		t0, do_page_fault
42	jirl		ra, t0, 0
43	RESTORE_ALL_AND_RET
44SYM_CODE_END(handle_tlb_protect)
45
46SYM_CODE_START(handle_tlb_load)
47	csrwr		t0, EXCEPTION_KS0
48	csrwr		t1, EXCEPTION_KS1
49	csrwr		ra, EXCEPTION_KS2
50
51	/*
52	 * The vmalloc handling is not in the hotpath.
53	 */
54	csrrd		t0, LOONGARCH_CSR_BADV
55	bltz		t0, vmalloc_load
56	csrrd		t1, LOONGARCH_CSR_PGDL
57
58vmalloc_done_load:
59	/* Get PGD offset in bytes */
60	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
61	alsl.d		t1, ra, t1, 3
62#if CONFIG_PGTABLE_LEVELS > 3
63	ld.d		t1, t1, 0
64	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
65	alsl.d		t1, ra, t1, 3
66#endif
67#if CONFIG_PGTABLE_LEVELS > 2
68	ld.d		t1, t1, 0
69	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
70	alsl.d		t1, ra, t1, 3
71#endif
72	ld.d		ra, t1, 0
73
74	/*
75	 * For huge tlb entries, pmde doesn't contain an address but
76	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
77	 * see if we need to jump to huge tlb processing.
78	 */
79	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
80	bltz		ra, tlb_huge_update_load
81
82	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
83	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
84	alsl.d		t1, t0, ra, _PTE_T_LOG2
85
86#ifdef CONFIG_SMP
87smp_pgtable_change_load:
88	ll.d		t0, t1, 0
89#else
90	ld.d		t0, t1, 0
91#endif
92	andi		ra, t0, _PAGE_PRESENT
93	beqz		ra, nopage_tlb_load
94
95	ori		t0, t0, _PAGE_VALID
96#ifdef CONFIG_SMP
97	sc.d		t0, t1, 0
98	beqz		t0, smp_pgtable_change_load
99#else
100	st.d		t0, t1, 0
101#endif
102	tlbsrch
103	bstrins.d	t1, zero, 3, 3
104	ld.d		t0, t1, 0
105	ld.d		t1, t1, 8
106	csrwr		t0, LOONGARCH_CSR_TLBELO0
107	csrwr		t1, LOONGARCH_CSR_TLBELO1
108	tlbwr
109
110	csrrd		t0, EXCEPTION_KS0
111	csrrd		t1, EXCEPTION_KS1
112	csrrd		ra, EXCEPTION_KS2
113	ertn
114
115#ifdef CONFIG_64BIT
116vmalloc_load:
117	la_abs		t1, swapper_pg_dir
118	b		vmalloc_done_load
119#endif
120
121	/* This is the entry point of a huge page. */
122tlb_huge_update_load:
123#ifdef CONFIG_SMP
124	ll.d		ra, t1, 0
125#endif
126	andi		t0, ra, _PAGE_PRESENT
127	beqz		t0, nopage_tlb_load
128
129#ifdef CONFIG_SMP
130	ori		t0, ra, _PAGE_VALID
131	sc.d		t0, t1, 0
132	beqz		t0, tlb_huge_update_load
133	ori		t0, ra, _PAGE_VALID
134#else
135	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
136	ori		t0, ra, _PAGE_VALID
137	st.d		t0, t1, 0
138#endif
139	csrrd		ra, LOONGARCH_CSR_ASID
140	csrrd		t1, LOONGARCH_CSR_BADV
141	andi		ra, ra, CSR_ASID_ASID
142	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
143
144	/*
145	 * A huge PTE describes an area the size of the
146	 * configured huge page size. This is twice the
147	 * of the large TLB entry size we intend to use.
148	 * A TLB entry half the size of the configured
149	 * huge page size is configured into entrylo0
150	 * and entrylo1 to cover the contiguous huge PTE
151	 * address space.
152	 */
153	/* Huge page: Move Global bit */
154	xori		t0, t0, _PAGE_HUGE
155	lu12i.w		t1, _PAGE_HGLOBAL >> 12
156	and		t1, t0, t1
157	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
158	or		t0, t0, t1
159
160	move		ra, t0
161	csrwr		ra, LOONGARCH_CSR_TLBELO0
162
163	/* Convert to entrylo1 */
164	addi.d		t1, zero, 1
165	slli.d		t1, t1, (HPAGE_SHIFT - 1)
166	add.d		t0, t0, t1
167	csrwr		t0, LOONGARCH_CSR_TLBELO1
168
169	/* Set huge page tlb entry size */
170	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
171	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
172	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
173
174	tlbfill
175
176	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
177	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
178	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
179
180	csrrd		t0, EXCEPTION_KS0
181	csrrd		t1, EXCEPTION_KS1
182	csrrd		ra, EXCEPTION_KS2
183	ertn
184
185nopage_tlb_load:
186	dbar		0x700
187	csrrd		ra, EXCEPTION_KS2
188	la_abs		t0, tlb_do_page_fault_0
189	jr		t0
190SYM_CODE_END(handle_tlb_load)
191
192SYM_CODE_START(handle_tlb_load_ptw)
193	csrwr		t0, LOONGARCH_CSR_KS0
194	csrwr		t1, LOONGARCH_CSR_KS1
195	la_abs		t0, tlb_do_page_fault_0
196	jr		t0
197SYM_CODE_END(handle_tlb_load_ptw)
198
199SYM_CODE_START(handle_tlb_store)
200	csrwr		t0, EXCEPTION_KS0
201	csrwr		t1, EXCEPTION_KS1
202	csrwr		ra, EXCEPTION_KS2
203
204	/*
205	 * The vmalloc handling is not in the hotpath.
206	 */
207	csrrd		t0, LOONGARCH_CSR_BADV
208	bltz		t0, vmalloc_store
209	csrrd		t1, LOONGARCH_CSR_PGDL
210
211vmalloc_done_store:
212	/* Get PGD offset in bytes */
213	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
214	alsl.d		t1, ra, t1, 3
215#if CONFIG_PGTABLE_LEVELS > 3
216	ld.d		t1, t1, 0
217	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
218	alsl.d		t1, ra, t1, 3
219#endif
220#if CONFIG_PGTABLE_LEVELS > 2
221	ld.d		t1, t1, 0
222	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
223	alsl.d		t1, ra, t1, 3
224#endif
225	ld.d		ra, t1, 0
226
227	/*
228	 * For huge tlb entries, pmde doesn't contain an address but
229	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
230	 * see if we need to jump to huge tlb processing.
231	 */
232	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
233	bltz		ra, tlb_huge_update_store
234
235	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
236	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
237	alsl.d		t1, t0, ra, _PTE_T_LOG2
238
239#ifdef CONFIG_SMP
240smp_pgtable_change_store:
241	ll.d		t0, t1, 0
242#else
243	ld.d		t0, t1, 0
244#endif
245	andi		ra, t0, _PAGE_PRESENT | _PAGE_WRITE
246	xori		ra, ra, _PAGE_PRESENT | _PAGE_WRITE
247	bnez		ra, nopage_tlb_store
248
249	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
250#ifdef CONFIG_SMP
251	sc.d		t0, t1, 0
252	beqz		t0, smp_pgtable_change_store
253#else
254	st.d		t0, t1, 0
255#endif
256	tlbsrch
257	bstrins.d	t1, zero, 3, 3
258	ld.d		t0, t1, 0
259	ld.d		t1, t1, 8
260	csrwr		t0, LOONGARCH_CSR_TLBELO0
261	csrwr		t1, LOONGARCH_CSR_TLBELO1
262	tlbwr
263
264	csrrd		t0, EXCEPTION_KS0
265	csrrd		t1, EXCEPTION_KS1
266	csrrd		ra, EXCEPTION_KS2
267	ertn
268
269#ifdef CONFIG_64BIT
270vmalloc_store:
271	la_abs		t1, swapper_pg_dir
272	b		vmalloc_done_store
273#endif
274
275	/* This is the entry point of a huge page. */
276tlb_huge_update_store:
277#ifdef CONFIG_SMP
278	ll.d		ra, t1, 0
279#endif
280	andi		t0, ra, _PAGE_PRESENT | _PAGE_WRITE
281	xori		t0, t0, _PAGE_PRESENT | _PAGE_WRITE
282	bnez		t0, nopage_tlb_store
283
284#ifdef CONFIG_SMP
285	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
286	sc.d		t0, t1, 0
287	beqz		t0, tlb_huge_update_store
288	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
289#else
290	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
291	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
292	st.d		t0, t1, 0
293#endif
294	csrrd		ra, LOONGARCH_CSR_ASID
295	csrrd		t1, LOONGARCH_CSR_BADV
296	andi		ra, ra, CSR_ASID_ASID
297	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
298
299	/*
300	 * A huge PTE describes an area the size of the
301	 * configured huge page size. This is twice the
302	 * of the large TLB entry size we intend to use.
303	 * A TLB entry half the size of the configured
304	 * huge page size is configured into entrylo0
305	 * and entrylo1 to cover the contiguous huge PTE
306	 * address space.
307	 */
308	/* Huge page: Move Global bit */
309	xori		t0, t0, _PAGE_HUGE
310	lu12i.w		t1, _PAGE_HGLOBAL >> 12
311	and		t1, t0, t1
312	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
313	or		t0, t0, t1
314
315	move		ra, t0
316	csrwr		ra, LOONGARCH_CSR_TLBELO0
317
318	/* Convert to entrylo1 */
319	addi.d		t1, zero, 1
320	slli.d		t1, t1, (HPAGE_SHIFT - 1)
321	add.d		t0, t0, t1
322	csrwr		t0, LOONGARCH_CSR_TLBELO1
323
324	/* Set huge page tlb entry size */
325	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
326	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
327	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
328
329	tlbfill
330
331	/* Reset default page size */
332	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
333	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
334	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
335
336	csrrd		t0, EXCEPTION_KS0
337	csrrd		t1, EXCEPTION_KS1
338	csrrd		ra, EXCEPTION_KS2
339	ertn
340
341nopage_tlb_store:
342	dbar		0x700
343	csrrd		ra, EXCEPTION_KS2
344	la_abs		t0, tlb_do_page_fault_1
345	jr		t0
346SYM_CODE_END(handle_tlb_store)
347
348SYM_CODE_START(handle_tlb_store_ptw)
349	csrwr		t0, LOONGARCH_CSR_KS0
350	csrwr		t1, LOONGARCH_CSR_KS1
351	la_abs		t0, tlb_do_page_fault_1
352	jr		t0
353SYM_CODE_END(handle_tlb_store_ptw)
354
355SYM_CODE_START(handle_tlb_modify)
356	csrwr		t0, EXCEPTION_KS0
357	csrwr		t1, EXCEPTION_KS1
358	csrwr		ra, EXCEPTION_KS2
359
360	/*
361	 * The vmalloc handling is not in the hotpath.
362	 */
363	csrrd		t0, LOONGARCH_CSR_BADV
364	bltz		t0, vmalloc_modify
365	csrrd		t1, LOONGARCH_CSR_PGDL
366
367vmalloc_done_modify:
368	/* Get PGD offset in bytes */
369	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
370	alsl.d		t1, ra, t1, 3
371#if CONFIG_PGTABLE_LEVELS > 3
372	ld.d		t1, t1, 0
373	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
374	alsl.d		t1, ra, t1, 3
375#endif
376#if CONFIG_PGTABLE_LEVELS > 2
377	ld.d		t1, t1, 0
378	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
379	alsl.d		t1, ra, t1, 3
380#endif
381	ld.d		ra, t1, 0
382
383	/*
384	 * For huge tlb entries, pmde doesn't contain an address but
385	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
386	 * see if we need to jump to huge tlb processing.
387	 */
388	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
389	bltz		ra, tlb_huge_update_modify
390
391	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
392	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
393	alsl.d		t1, t0, ra, _PTE_T_LOG2
394
395#ifdef CONFIG_SMP
396smp_pgtable_change_modify:
397	ll.d		t0, t1, 0
398#else
399	ld.d		t0, t1, 0
400#endif
401	andi		ra, t0, _PAGE_WRITE
402	beqz		ra, nopage_tlb_modify
403
404	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
405#ifdef CONFIG_SMP
406	sc.d		t0, t1, 0
407	beqz		t0, smp_pgtable_change_modify
408#else
409	st.d		t0, t1, 0
410#endif
411	tlbsrch
412	bstrins.d	t1, zero, 3, 3
413	ld.d		t0, t1, 0
414	ld.d		t1, t1, 8
415	csrwr		t0, LOONGARCH_CSR_TLBELO0
416	csrwr		t1, LOONGARCH_CSR_TLBELO1
417	tlbwr
418
419	csrrd		t0, EXCEPTION_KS0
420	csrrd		t1, EXCEPTION_KS1
421	csrrd		ra, EXCEPTION_KS2
422	ertn
423
424#ifdef CONFIG_64BIT
425vmalloc_modify:
426	la_abs		t1, swapper_pg_dir
427	b		vmalloc_done_modify
428#endif
429
430	/* This is the entry point of a huge page. */
431tlb_huge_update_modify:
432#ifdef CONFIG_SMP
433	ll.d		ra, t1, 0
434#endif
435	andi		t0, ra, _PAGE_WRITE
436	beqz		t0, nopage_tlb_modify
437
438#ifdef CONFIG_SMP
439	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
440	sc.d		t0, t1, 0
441	beqz		t0, tlb_huge_update_modify
442	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
443#else
444	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
445	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
446	st.d		t0, t1, 0
447#endif
448	csrrd		ra, LOONGARCH_CSR_ASID
449	csrrd		t1, LOONGARCH_CSR_BADV
450	andi		ra, ra, CSR_ASID_ASID
451	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
452
453	/*
454	 * A huge PTE describes an area the size of the
455	 * configured huge page size. This is twice the
456	 * of the large TLB entry size we intend to use.
457	 * A TLB entry half the size of the configured
458	 * huge page size is configured into entrylo0
459	 * and entrylo1 to cover the contiguous huge PTE
460	 * address space.
461	 */
462	/* Huge page: Move Global bit */
463	xori		t0, t0, _PAGE_HUGE
464	lu12i.w		t1, _PAGE_HGLOBAL >> 12
465	and		t1, t0, t1
466	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
467	or		t0, t0, t1
468
469	move		ra, t0
470	csrwr		ra, LOONGARCH_CSR_TLBELO0
471
472	/* Convert to entrylo1 */
473	addi.d		t1, zero, 1
474	slli.d		t1, t1, (HPAGE_SHIFT - 1)
475	add.d		t0, t0, t1
476	csrwr		t0, LOONGARCH_CSR_TLBELO1
477
478	/* Set huge page tlb entry size */
479	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
480	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
481	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
482
483	tlbfill
484
485	/* Reset default page size */
486	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
487	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
488	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
489
490	csrrd		t0, EXCEPTION_KS0
491	csrrd		t1, EXCEPTION_KS1
492	csrrd		ra, EXCEPTION_KS2
493	ertn
494
495nopage_tlb_modify:
496	dbar		0x700
497	csrrd		ra, EXCEPTION_KS2
498	la_abs		t0, tlb_do_page_fault_1
499	jr		t0
500SYM_CODE_END(handle_tlb_modify)
501
502SYM_CODE_START(handle_tlb_modify_ptw)
503	csrwr		t0, LOONGARCH_CSR_KS0
504	csrwr		t1, LOONGARCH_CSR_KS1
505	la_abs		t0, tlb_do_page_fault_1
506	jr		t0
507SYM_CODE_END(handle_tlb_modify_ptw)
508
509SYM_CODE_START(handle_tlb_refill)
510	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
511	csrrd		t0, LOONGARCH_CSR_PGD
512	lddir		t0, t0, 3
513#if CONFIG_PGTABLE_LEVELS > 3
514	lddir		t0, t0, 2
515#endif
516#if CONFIG_PGTABLE_LEVELS > 2
517	lddir		t0, t0, 1
518#endif
519	ldpte		t0, 0
520	ldpte		t0, 1
521	tlbfill
522	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
523	ertn
524SYM_CODE_END(handle_tlb_refill)
525