xref: /openbmc/linux/arch/loongarch/mm/tlbex.S (revision fadbafc1)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
6#include <asm/export.h>
7#include <asm/loongarch.h>
8#include <asm/page.h>
9#include <asm/pgtable.h>
10#include <asm/regdef.h>
11#include <asm/stackframe.h>
12
13#define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
14#define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
15#define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
16#define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
17
18	.macro tlb_do_page_fault, write
19	SYM_FUNC_START(tlb_do_page_fault_\write)
20	SAVE_ALL
21	csrrd		a2, LOONGARCH_CSR_BADV
22	move		a0, sp
23	REG_S		a2, sp, PT_BVADDR
24	li.w		a1, \write
25	la.abs		t0, do_page_fault
26	jirl		ra, t0, 0
27	RESTORE_ALL_AND_RET
28	SYM_FUNC_END(tlb_do_page_fault_\write)
29	.endm
30
31	tlb_do_page_fault 0
32	tlb_do_page_fault 1
33
34SYM_FUNC_START(handle_tlb_protect)
35	BACKUP_T0T1
36	SAVE_ALL
37	move		a0, sp
38	move		a1, zero
39	csrrd		a2, LOONGARCH_CSR_BADV
40	REG_S		a2, sp, PT_BVADDR
41	la.abs		t0, do_page_fault
42	jirl		ra, t0, 0
43	RESTORE_ALL_AND_RET
44SYM_FUNC_END(handle_tlb_protect)
45
46SYM_FUNC_START(handle_tlb_load)
47	csrwr		t0, EXCEPTION_KS0
48	csrwr		t1, EXCEPTION_KS1
49	csrwr		ra, EXCEPTION_KS2
50
51	/*
52	 * The vmalloc handling is not in the hotpath.
53	 */
54	csrrd		t0, LOONGARCH_CSR_BADV
55	bltz		t0, vmalloc_load
56	csrrd		t1, LOONGARCH_CSR_PGDL
57
58vmalloc_done_load:
59	/* Get PGD offset in bytes */
60	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
61	alsl.d		t1, ra, t1, 3
62#if CONFIG_PGTABLE_LEVELS > 3
63	ld.d		t1, t1, 0
64	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
65	alsl.d		t1, ra, t1, 3
66#endif
67#if CONFIG_PGTABLE_LEVELS > 2
68	ld.d		t1, t1, 0
69	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
70	alsl.d		t1, ra, t1, 3
71#endif
72	ld.d		ra, t1, 0
73
74	/*
75	 * For huge tlb entries, pmde doesn't contain an address but
76	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
77	 * see if we need to jump to huge tlb processing.
78	 */
79	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
80	bltz		ra, tlb_huge_update_load
81
82	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
83	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
84	alsl.d		t1, t0, ra, _PTE_T_LOG2
85
86#ifdef CONFIG_SMP
87smp_pgtable_change_load:
88	ll.d		t0, t1, 0
89#else
90	ld.d		t0, t1, 0
91#endif
92	andi		ra, t0, _PAGE_PRESENT
93	beqz		ra, nopage_tlb_load
94
95	ori		t0, t0, _PAGE_VALID
96#ifdef CONFIG_SMP
97	sc.d		t0, t1, 0
98	beqz		t0, smp_pgtable_change_load
99#else
100	st.d		t0, t1, 0
101#endif
102	tlbsrch
103	bstrins.d	t1, zero, 3, 3
104	ld.d		t0, t1, 0
105	ld.d		t1, t1, 8
106	csrwr		t0, LOONGARCH_CSR_TLBELO0
107	csrwr		t1, LOONGARCH_CSR_TLBELO1
108	tlbwr
109
110	csrrd		t0, EXCEPTION_KS0
111	csrrd		t1, EXCEPTION_KS1
112	csrrd		ra, EXCEPTION_KS2
113	ertn
114
115#ifdef CONFIG_64BIT
116vmalloc_load:
117	la.abs		t1, swapper_pg_dir
118	b		vmalloc_done_load
119#endif
120
121	/* This is the entry point of a huge page. */
122tlb_huge_update_load:
123#ifdef CONFIG_SMP
124	ll.d		ra, t1, 0
125#endif
126	andi		t0, ra, _PAGE_PRESENT
127	beqz		t0, nopage_tlb_load
128
129#ifdef CONFIG_SMP
130	ori		t0, ra, _PAGE_VALID
131	sc.d		t0, t1, 0
132	beqz		t0, tlb_huge_update_load
133	ori		t0, ra, _PAGE_VALID
134#else
135	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
136	ori		t0, ra, _PAGE_VALID
137	st.d		t0, t1, 0
138#endif
139	tlbsrch
140	addu16i.d	t1, zero, -(CSR_TLBIDX_EHINV >> 16)
141	addi.d		ra, t1, 0
142	csrxchg		ra, t1, LOONGARCH_CSR_TLBIDX
143	tlbwr
144
145	csrxchg		zero, t1, LOONGARCH_CSR_TLBIDX
146
147	/*
148	 * A huge PTE describes an area the size of the
149	 * configured huge page size. This is twice the
150	 * of the large TLB entry size we intend to use.
151	 * A TLB entry half the size of the configured
152	 * huge page size is configured into entrylo0
153	 * and entrylo1 to cover the contiguous huge PTE
154	 * address space.
155	 */
156	/* Huge page: Move Global bit */
157	xori		t0, t0, _PAGE_HUGE
158	lu12i.w		t1, _PAGE_HGLOBAL >> 12
159	and		t1, t0, t1
160	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
161	or		t0, t0, t1
162
163	move		ra, t0
164	csrwr		ra, LOONGARCH_CSR_TLBELO0
165
166	/* Convert to entrylo1 */
167	addi.d		t1, zero, 1
168	slli.d		t1, t1, (HPAGE_SHIFT - 1)
169	add.d		t0, t0, t1
170	csrwr		t0, LOONGARCH_CSR_TLBELO1
171
172	/* Set huge page tlb entry size */
173	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
174	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
175	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
176
177	tlbfill
178
179	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
180	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
181	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
182
183	csrrd		t0, EXCEPTION_KS0
184	csrrd		t1, EXCEPTION_KS1
185	csrrd		ra, EXCEPTION_KS2
186	ertn
187
188nopage_tlb_load:
189	dbar		0
190	csrrd		ra, EXCEPTION_KS2
191	la.abs		t0, tlb_do_page_fault_0
192	jr		t0
193SYM_FUNC_END(handle_tlb_load)
194
195SYM_FUNC_START(handle_tlb_store)
196	csrwr		t0, EXCEPTION_KS0
197	csrwr		t1, EXCEPTION_KS1
198	csrwr		ra, EXCEPTION_KS2
199
200	/*
201	 * The vmalloc handling is not in the hotpath.
202	 */
203	csrrd		t0, LOONGARCH_CSR_BADV
204	bltz		t0, vmalloc_store
205	csrrd		t1, LOONGARCH_CSR_PGDL
206
207vmalloc_done_store:
208	/* Get PGD offset in bytes */
209	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
210	alsl.d		t1, ra, t1, 3
211#if CONFIG_PGTABLE_LEVELS > 3
212	ld.d		t1, t1, 0
213	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
214	alsl.d		t1, ra, t1, 3
215#endif
216#if CONFIG_PGTABLE_LEVELS > 2
217	ld.d		t1, t1, 0
218	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
219	alsl.d		t1, ra, t1, 3
220#endif
221	ld.d		ra, t1, 0
222
223	/*
224	 * For huge tlb entries, pmde doesn't contain an address but
225	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
226	 * see if we need to jump to huge tlb processing.
227	 */
228	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
229	bltz		ra, tlb_huge_update_store
230
231	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
232	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
233	alsl.d		t1, t0, ra, _PTE_T_LOG2
234
235#ifdef CONFIG_SMP
236smp_pgtable_change_store:
237	ll.d		t0, t1, 0
238#else
239	ld.d		t0, t1, 0
240#endif
241	andi		ra, t0, _PAGE_PRESENT | _PAGE_WRITE
242	xori		ra, ra, _PAGE_PRESENT | _PAGE_WRITE
243	bnez		ra, nopage_tlb_store
244
245	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
246#ifdef CONFIG_SMP
247	sc.d		t0, t1, 0
248	beqz		t0, smp_pgtable_change_store
249#else
250	st.d		t0, t1, 0
251#endif
252	tlbsrch
253	bstrins.d	t1, zero, 3, 3
254	ld.d		t0, t1, 0
255	ld.d		t1, t1, 8
256	csrwr		t0, LOONGARCH_CSR_TLBELO0
257	csrwr		t1, LOONGARCH_CSR_TLBELO1
258	tlbwr
259
260	csrrd		t0, EXCEPTION_KS0
261	csrrd		t1, EXCEPTION_KS1
262	csrrd		ra, EXCEPTION_KS2
263	ertn
264
265#ifdef CONFIG_64BIT
266vmalloc_store:
267	la.abs		t1, swapper_pg_dir
268	b		vmalloc_done_store
269#endif
270
271	/* This is the entry point of a huge page. */
272tlb_huge_update_store:
273#ifdef CONFIG_SMP
274	ll.d		ra, t1, 0
275#endif
276	andi		t0, ra, _PAGE_PRESENT | _PAGE_WRITE
277	xori		t0, t0, _PAGE_PRESENT | _PAGE_WRITE
278	bnez		t0, nopage_tlb_store
279
280#ifdef CONFIG_SMP
281	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
282	sc.d		t0, t1, 0
283	beqz		t0, tlb_huge_update_store
284	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
285#else
286	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
287	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
288	st.d		t0, t1, 0
289#endif
290	tlbsrch
291	addu16i.d	t1, zero, -(CSR_TLBIDX_EHINV >> 16)
292	addi.d		ra, t1, 0
293	csrxchg		ra, t1, LOONGARCH_CSR_TLBIDX
294	tlbwr
295
296	csrxchg		zero, t1, LOONGARCH_CSR_TLBIDX
297	/*
298	 * A huge PTE describes an area the size of the
299	 * configured huge page size. This is twice the
300	 * of the large TLB entry size we intend to use.
301	 * A TLB entry half the size of the configured
302	 * huge page size is configured into entrylo0
303	 * and entrylo1 to cover the contiguous huge PTE
304	 * address space.
305	 */
306	/* Huge page: Move Global bit */
307	xori		t0, t0, _PAGE_HUGE
308	lu12i.w		t1, _PAGE_HGLOBAL >> 12
309	and		t1, t0, t1
310	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
311	or		t0, t0, t1
312
313	move		ra, t0
314	csrwr		ra, LOONGARCH_CSR_TLBELO0
315
316	/* Convert to entrylo1 */
317	addi.d		t1, zero, 1
318	slli.d		t1, t1, (HPAGE_SHIFT - 1)
319	add.d		t0, t0, t1
320	csrwr		t0, LOONGARCH_CSR_TLBELO1
321
322	/* Set huge page tlb entry size */
323	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
324	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
325	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
326
327	tlbfill
328
329	/* Reset default page size */
330	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
331	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
332	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
333
334	csrrd		t0, EXCEPTION_KS0
335	csrrd		t1, EXCEPTION_KS1
336	csrrd		ra, EXCEPTION_KS2
337	ertn
338
339nopage_tlb_store:
340	dbar		0
341	csrrd		ra, EXCEPTION_KS2
342	la.abs		t0, tlb_do_page_fault_1
343	jr		t0
344SYM_FUNC_END(handle_tlb_store)
345
346SYM_FUNC_START(handle_tlb_modify)
347	csrwr		t0, EXCEPTION_KS0
348	csrwr		t1, EXCEPTION_KS1
349	csrwr		ra, EXCEPTION_KS2
350
351	/*
352	 * The vmalloc handling is not in the hotpath.
353	 */
354	csrrd		t0, LOONGARCH_CSR_BADV
355	bltz		t0, vmalloc_modify
356	csrrd		t1, LOONGARCH_CSR_PGDL
357
358vmalloc_done_modify:
359	/* Get PGD offset in bytes */
360	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
361	alsl.d		t1, ra, t1, 3
362#if CONFIG_PGTABLE_LEVELS > 3
363	ld.d		t1, t1, 0
364	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
365	alsl.d		t1, ra, t1, 3
366#endif
367#if CONFIG_PGTABLE_LEVELS > 2
368	ld.d		t1, t1, 0
369	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
370	alsl.d		t1, ra, t1, 3
371#endif
372	ld.d		ra, t1, 0
373
374	/*
375	 * For huge tlb entries, pmde doesn't contain an address but
376	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
377	 * see if we need to jump to huge tlb processing.
378	 */
379	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
380	bltz		ra, tlb_huge_update_modify
381
382	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
383	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
384	alsl.d		t1, t0, ra, _PTE_T_LOG2
385
386#ifdef CONFIG_SMP
387smp_pgtable_change_modify:
388	ll.d		t0, t1, 0
389#else
390	ld.d		t0, t1, 0
391#endif
392	andi		ra, t0, _PAGE_WRITE
393	beqz		ra, nopage_tlb_modify
394
395	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
396#ifdef CONFIG_SMP
397	sc.d		t0, t1, 0
398	beqz		t0, smp_pgtable_change_modify
399#else
400	st.d		t0, t1, 0
401#endif
402	tlbsrch
403	bstrins.d	t1, zero, 3, 3
404	ld.d		t0, t1, 0
405	ld.d		t1, t1, 8
406	csrwr		t0, LOONGARCH_CSR_TLBELO0
407	csrwr		t1, LOONGARCH_CSR_TLBELO1
408	tlbwr
409
410	csrrd		t0, EXCEPTION_KS0
411	csrrd		t1, EXCEPTION_KS1
412	csrrd		ra, EXCEPTION_KS2
413	ertn
414
415#ifdef CONFIG_64BIT
416vmalloc_modify:
417	la.abs		t1, swapper_pg_dir
418	b		vmalloc_done_modify
419#endif
420
421	/* This is the entry point of a huge page. */
422tlb_huge_update_modify:
423#ifdef CONFIG_SMP
424	ll.d		ra, t1, 0
425#endif
426	andi		t0, ra, _PAGE_WRITE
427	beqz		t0, nopage_tlb_modify
428
429#ifdef CONFIG_SMP
430	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
431	sc.d		t0, t1, 0
432	beqz		t0, tlb_huge_update_modify
433	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
434#else
435	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
436	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
437	st.d		t0, t1, 0
438#endif
439	/*
440	 * A huge PTE describes an area the size of the
441	 * configured huge page size. This is twice the
442	 * of the large TLB entry size we intend to use.
443	 * A TLB entry half the size of the configured
444	 * huge page size is configured into entrylo0
445	 * and entrylo1 to cover the contiguous huge PTE
446	 * address space.
447	 */
448	/* Huge page: Move Global bit */
449	xori		t0, t0, _PAGE_HUGE
450	lu12i.w		t1, _PAGE_HGLOBAL >> 12
451	and		t1, t0, t1
452	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
453	or		t0, t0, t1
454
455	move		ra, t0
456	csrwr		ra, LOONGARCH_CSR_TLBELO0
457
458	/* Convert to entrylo1 */
459	addi.d		t1, zero, 1
460	slli.d		t1, t1, (HPAGE_SHIFT - 1)
461	add.d		t0, t0, t1
462	csrwr		t0, LOONGARCH_CSR_TLBELO1
463
464	/* Set huge page tlb entry size */
465	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
466	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
467	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
468
469	tlbwr
470
471	/* Reset default page size */
472	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
473	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
474	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
475
476	csrrd		t0, EXCEPTION_KS0
477	csrrd		t1, EXCEPTION_KS1
478	csrrd		ra, EXCEPTION_KS2
479	ertn
480
481nopage_tlb_modify:
482	dbar		0
483	csrrd		ra, EXCEPTION_KS2
484	la.abs		t0, tlb_do_page_fault_1
485	jr		t0
486SYM_FUNC_END(handle_tlb_modify)
487
488SYM_FUNC_START(handle_tlb_refill)
489	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
490	csrrd		t0, LOONGARCH_CSR_PGD
491	lddir		t0, t0, 3
492#if CONFIG_PGTABLE_LEVELS > 3
493	lddir		t0, t0, 2
494#endif
495#if CONFIG_PGTABLE_LEVELS > 2
496	lddir		t0, t0, 1
497#endif
498	ldpte		t0, 0
499	ldpte		t0, 1
500	tlbfill
501	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
502	ertn
503SYM_FUNC_END(handle_tlb_refill)
504