xref: /openbmc/linux/arch/loongarch/mm/tlbex.S (revision 7cc39531)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
6#include <asm/export.h>
7#include <asm/loongarch.h>
8#include <asm/page.h>
9#include <asm/pgtable.h>
10#include <asm/regdef.h>
11#include <asm/stackframe.h>
12
13	.macro tlb_do_page_fault, write
14	SYM_FUNC_START(tlb_do_page_fault_\write)
15	SAVE_ALL
16	csrrd	a2, LOONGARCH_CSR_BADV
17	move	a0, sp
18	REG_S	a2, sp, PT_BVADDR
19	li.w	a1, \write
20	la.abs	t0, do_page_fault
21	jirl	ra, t0, 0
22	RESTORE_ALL_AND_RET
23	SYM_FUNC_END(tlb_do_page_fault_\write)
24	.endm
25
26	tlb_do_page_fault 0
27	tlb_do_page_fault 1
28
29SYM_FUNC_START(handle_tlb_protect)
30	BACKUP_T0T1
31	SAVE_ALL
32	move	a0, sp
33	move	a1, zero
34	csrrd	a2, LOONGARCH_CSR_BADV
35	REG_S	a2, sp, PT_BVADDR
36	la.abs	t0, do_page_fault
37	jirl	ra, t0, 0
38	RESTORE_ALL_AND_RET
39SYM_FUNC_END(handle_tlb_protect)
40
41SYM_FUNC_START(handle_tlb_load)
42	csrwr	t0, EXCEPTION_KS0
43	csrwr	t1, EXCEPTION_KS1
44	csrwr	ra, EXCEPTION_KS2
45
46	/*
47	 * The vmalloc handling is not in the hotpath.
48	 */
49	csrrd	t0, LOONGARCH_CSR_BADV
50	bltz	t0, vmalloc_load
51	csrrd	t1, LOONGARCH_CSR_PGDL
52
53vmalloc_done_load:
54	/* Get PGD offset in bytes */
55	srli.d	t0, t0, PGDIR_SHIFT
56	andi	t0, t0, (PTRS_PER_PGD - 1)
57	slli.d	t0, t0, 3
58	add.d	t1, t1, t0
59#if CONFIG_PGTABLE_LEVELS > 3
60	csrrd	t0, LOONGARCH_CSR_BADV
61	ld.d	t1, t1, 0
62	srli.d	t0, t0, PUD_SHIFT
63	andi	t0, t0, (PTRS_PER_PUD - 1)
64	slli.d	t0, t0, 3
65	add.d	t1, t1, t0
66#endif
67#if CONFIG_PGTABLE_LEVELS > 2
68	csrrd	t0, LOONGARCH_CSR_BADV
69	ld.d	t1, t1, 0
70	srli.d	t0, t0, PMD_SHIFT
71	andi	t0, t0, (PTRS_PER_PMD - 1)
72	slli.d	t0, t0, 3
73	add.d	t1, t1, t0
74#endif
75	ld.d	ra, t1, 0
76
77	/*
78	 * For huge tlb entries, pmde doesn't contain an address but
79	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
80	 * see if we need to jump to huge tlb processing.
81	 */
82	andi	t0, ra, _PAGE_HUGE
83	bnez	t0, tlb_huge_update_load
84
85	csrrd	t0, LOONGARCH_CSR_BADV
86	srli.d	t0, t0, PAGE_SHIFT
87	andi	t0, t0, (PTRS_PER_PTE - 1)
88	slli.d	t0, t0, _PTE_T_LOG2
89	add.d	t1, ra, t0
90
91#ifdef CONFIG_SMP
92smp_pgtable_change_load:
93#endif
94#ifdef CONFIG_SMP
95	ll.d	t0, t1, 0
96#else
97	ld.d	t0, t1, 0
98#endif
99	tlbsrch
100
101	srli.d	ra, t0, _PAGE_PRESENT_SHIFT
102	andi	ra, ra, 1
103	beqz	ra, nopage_tlb_load
104
105	ori	t0, t0, _PAGE_VALID
106#ifdef CONFIG_SMP
107	sc.d	t0, t1, 0
108	beqz	t0, smp_pgtable_change_load
109#else
110	st.d	t0, t1, 0
111#endif
112	ori	t1, t1, 8
113	xori	t1, t1, 8
114	ld.d	t0, t1, 0
115	ld.d	t1, t1, 8
116	csrwr	t0, LOONGARCH_CSR_TLBELO0
117	csrwr	t1, LOONGARCH_CSR_TLBELO1
118	tlbwr
119leave_load:
120	csrrd	t0, EXCEPTION_KS0
121	csrrd	t1, EXCEPTION_KS1
122	csrrd	ra, EXCEPTION_KS2
123	ertn
124#ifdef CONFIG_64BIT
125vmalloc_load:
126	la.abs	t1, swapper_pg_dir
127	b	vmalloc_done_load
128#endif
129
130	/*
131	 * This is the entry point when build_tlbchange_handler_head
132	 * spots a huge page.
133	 */
134tlb_huge_update_load:
135#ifdef CONFIG_SMP
136	ll.d	t0, t1, 0
137#else
138	ld.d	t0, t1, 0
139#endif
140	srli.d	ra, t0, _PAGE_PRESENT_SHIFT
141	andi	ra, ra, 1
142	beqz	ra, nopage_tlb_load
143	tlbsrch
144
145	ori	t0, t0, _PAGE_VALID
146#ifdef CONFIG_SMP
147	sc.d	t0, t1, 0
148	beqz	t0, tlb_huge_update_load
149	ld.d	t0, t1, 0
150#else
151	st.d	t0, t1, 0
152#endif
153	addu16i.d	t1, zero, -(CSR_TLBIDX_EHINV >> 16)
154	addi.d		ra, t1, 0
155	csrxchg		ra, t1, LOONGARCH_CSR_TLBIDX
156	tlbwr
157
158	csrxchg	zero, t1, LOONGARCH_CSR_TLBIDX
159
160	/*
161	 * A huge PTE describes an area the size of the
162	 * configured huge page size. This is twice the
163	 * of the large TLB entry size we intend to use.
164	 * A TLB entry half the size of the configured
165	 * huge page size is configured into entrylo0
166	 * and entrylo1 to cover the contiguous huge PTE
167	 * address space.
168	 */
169	/* Huge page: Move Global bit */
170	xori	t0, t0, _PAGE_HUGE
171	lu12i.w	t1, _PAGE_HGLOBAL >> 12
172	and	t1, t0, t1
173	srli.d	t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
174	or	t0, t0, t1
175
176	addi.d	ra, t0, 0
177	csrwr	t0, LOONGARCH_CSR_TLBELO0
178	addi.d	t0, ra, 0
179
180	/* Convert to entrylo1 */
181	addi.d	t1, zero, 1
182	slli.d	t1, t1, (HPAGE_SHIFT - 1)
183	add.d	t0, t0, t1
184	csrwr	t0, LOONGARCH_CSR_TLBELO1
185
186	/* Set huge page tlb entry size */
187	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
188	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
189	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
190
191	tlbfill
192
193	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
194	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
195	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
196
197nopage_tlb_load:
198	dbar	0
199	csrrd	ra, EXCEPTION_KS2
200	la.abs	t0, tlb_do_page_fault_0
201	jr	t0
202SYM_FUNC_END(handle_tlb_load)
203
204SYM_FUNC_START(handle_tlb_store)
205	csrwr	t0, EXCEPTION_KS0
206	csrwr	t1, EXCEPTION_KS1
207	csrwr	ra, EXCEPTION_KS2
208
209	/*
210	 * The vmalloc handling is not in the hotpath.
211	 */
212	csrrd	t0, LOONGARCH_CSR_BADV
213	bltz	t0, vmalloc_store
214	csrrd	t1, LOONGARCH_CSR_PGDL
215
216vmalloc_done_store:
217	/* Get PGD offset in bytes */
218	srli.d	t0, t0, PGDIR_SHIFT
219	andi	t0, t0, (PTRS_PER_PGD - 1)
220	slli.d	t0, t0, 3
221	add.d	t1, t1, t0
222
223#if CONFIG_PGTABLE_LEVELS > 3
224	csrrd	t0, LOONGARCH_CSR_BADV
225	ld.d	t1, t1, 0
226	srli.d	t0, t0, PUD_SHIFT
227	andi	t0, t0, (PTRS_PER_PUD - 1)
228	slli.d	t0, t0, 3
229	add.d	t1, t1, t0
230#endif
231#if CONFIG_PGTABLE_LEVELS > 2
232	csrrd	t0, LOONGARCH_CSR_BADV
233	ld.d	t1, t1, 0
234	srli.d	t0, t0, PMD_SHIFT
235	andi	t0, t0, (PTRS_PER_PMD - 1)
236	slli.d	t0, t0, 3
237	add.d	t1, t1, t0
238#endif
239	ld.d	ra, t1, 0
240
241	/*
242	 * For huge tlb entries, pmde doesn't contain an address but
243	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
244	 * see if we need to jump to huge tlb processing.
245	 */
246	andi	t0, ra, _PAGE_HUGE
247	bnez	t0, tlb_huge_update_store
248
249	csrrd	t0, LOONGARCH_CSR_BADV
250	srli.d	t0, t0, PAGE_SHIFT
251	andi	t0, t0, (PTRS_PER_PTE - 1)
252	slli.d	t0, t0, _PTE_T_LOG2
253	add.d	t1, ra, t0
254
255#ifdef CONFIG_SMP
256smp_pgtable_change_store:
257#endif
258#ifdef CONFIG_SMP
259	ll.d	t0, t1, 0
260#else
261	ld.d	t0, t1, 0
262#endif
263	tlbsrch
264
265	srli.d	ra, t0, _PAGE_PRESENT_SHIFT
266	andi	ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
267	xori	ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
268	bnez	ra, nopage_tlb_store
269
270	ori	t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
271#ifdef CONFIG_SMP
272	sc.d	t0, t1, 0
273	beqz	t0, smp_pgtable_change_store
274#else
275	st.d	t0, t1, 0
276#endif
277
278	ori	t1, t1, 8
279	xori	t1, t1, 8
280	ld.d	t0, t1, 0
281	ld.d	t1, t1, 8
282	csrwr	t0, LOONGARCH_CSR_TLBELO0
283	csrwr	t1, LOONGARCH_CSR_TLBELO1
284	tlbwr
285leave_store:
286	csrrd	t0, EXCEPTION_KS0
287	csrrd	t1, EXCEPTION_KS1
288	csrrd	ra, EXCEPTION_KS2
289	ertn
290#ifdef CONFIG_64BIT
291vmalloc_store:
292	la.abs	t1, swapper_pg_dir
293	b	vmalloc_done_store
294#endif
295
296	/*
297	 * This is the entry point when build_tlbchange_handler_head
298	 * spots a huge page.
299	 */
300tlb_huge_update_store:
301#ifdef CONFIG_SMP
302	ll.d	t0, t1, 0
303#else
304	ld.d	t0, t1, 0
305#endif
306	srli.d	ra, t0, _PAGE_PRESENT_SHIFT
307	andi	ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
308	xori	ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
309	bnez	ra, nopage_tlb_store
310
311	tlbsrch
312	ori	t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
313
314#ifdef CONFIG_SMP
315	sc.d	t0, t1, 0
316	beqz	t0, tlb_huge_update_store
317	ld.d	t0, t1, 0
318#else
319	st.d	t0, t1, 0
320#endif
321	addu16i.d	t1, zero, -(CSR_TLBIDX_EHINV >> 16)
322	addi.d		ra, t1, 0
323	csrxchg		ra, t1, LOONGARCH_CSR_TLBIDX
324	tlbwr
325
326	csrxchg	zero, t1, LOONGARCH_CSR_TLBIDX
327	/*
328	 * A huge PTE describes an area the size of the
329	 * configured huge page size. This is twice the
330	 * of the large TLB entry size we intend to use.
331	 * A TLB entry half the size of the configured
332	 * huge page size is configured into entrylo0
333	 * and entrylo1 to cover the contiguous huge PTE
334	 * address space.
335	 */
336	/* Huge page: Move Global bit */
337	xori	t0, t0, _PAGE_HUGE
338	lu12i.w	t1, _PAGE_HGLOBAL >> 12
339	and	t1, t0, t1
340	srli.d	t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
341	or	t0, t0, t1
342
343	addi.d	ra, t0, 0
344	csrwr	t0, LOONGARCH_CSR_TLBELO0
345	addi.d	t0, ra, 0
346
347	/* Convert to entrylo1 */
348	addi.d	t1, zero, 1
349	slli.d	t1, t1, (HPAGE_SHIFT - 1)
350	add.d	t0, t0, t1
351	csrwr	t0, LOONGARCH_CSR_TLBELO1
352
353	/* Set huge page tlb entry size */
354	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
355	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
356	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
357
358	tlbfill
359
360	/* Reset default page size */
361	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
362	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
363	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
364
365nopage_tlb_store:
366	dbar	0
367	csrrd	ra, EXCEPTION_KS2
368	la.abs	t0, tlb_do_page_fault_1
369	jr	t0
370SYM_FUNC_END(handle_tlb_store)
371
372SYM_FUNC_START(handle_tlb_modify)
373	csrwr	t0, EXCEPTION_KS0
374	csrwr	t1, EXCEPTION_KS1
375	csrwr	ra, EXCEPTION_KS2
376
377	/*
378	 * The vmalloc handling is not in the hotpath.
379	 */
380	csrrd	t0, LOONGARCH_CSR_BADV
381	bltz	t0, vmalloc_modify
382	csrrd	t1, LOONGARCH_CSR_PGDL
383
384vmalloc_done_modify:
385	/* Get PGD offset in bytes */
386	srli.d	t0, t0, PGDIR_SHIFT
387	andi	t0, t0, (PTRS_PER_PGD - 1)
388	slli.d	t0, t0, 3
389	add.d	t1, t1, t0
390#if CONFIG_PGTABLE_LEVELS > 3
391	csrrd	t0, LOONGARCH_CSR_BADV
392	ld.d	t1, t1, 0
393	srli.d	t0, t0, PUD_SHIFT
394	andi	t0, t0, (PTRS_PER_PUD - 1)
395	slli.d	t0, t0, 3
396	add.d	t1, t1, t0
397#endif
398#if CONFIG_PGTABLE_LEVELS > 2
399	csrrd	t0, LOONGARCH_CSR_BADV
400	ld.d	t1, t1, 0
401	srli.d	t0, t0, PMD_SHIFT
402	andi	t0, t0, (PTRS_PER_PMD - 1)
403	slli.d	t0, t0, 3
404	add.d	t1, t1, t0
405#endif
406	ld.d	ra, t1, 0
407
408	/*
409	 * For huge tlb entries, pmde doesn't contain an address but
410	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
411	 * see if we need to jump to huge tlb processing.
412	 */
413	andi	t0, ra, _PAGE_HUGE
414	bnez	t0, tlb_huge_update_modify
415
416	csrrd	t0, LOONGARCH_CSR_BADV
417	srli.d	t0, t0, PAGE_SHIFT
418	andi	t0, t0, (PTRS_PER_PTE - 1)
419	slli.d	t0, t0, _PTE_T_LOG2
420	add.d	t1, ra, t0
421
422#ifdef CONFIG_SMP
423smp_pgtable_change_modify:
424#endif
425#ifdef CONFIG_SMP
426	ll.d	t0, t1, 0
427#else
428	ld.d	t0, t1, 0
429#endif
430	tlbsrch
431
432	srli.d	ra, t0, _PAGE_WRITE_SHIFT
433	andi	ra, ra, 1
434	beqz	ra, nopage_tlb_modify
435
436	ori	t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
437#ifdef CONFIG_SMP
438	sc.d	t0, t1, 0
439	beqz	t0, smp_pgtable_change_modify
440#else
441	st.d	t0, t1, 0
442#endif
443	ori	t1, t1, 8
444	xori	t1, t1, 8
445	ld.d	t0, t1, 0
446	ld.d	t1, t1, 8
447	csrwr	t0, LOONGARCH_CSR_TLBELO0
448	csrwr	t1, LOONGARCH_CSR_TLBELO1
449	tlbwr
450leave_modify:
451	csrrd	t0, EXCEPTION_KS0
452	csrrd	t1, EXCEPTION_KS1
453	csrrd	ra, EXCEPTION_KS2
454	ertn
455#ifdef CONFIG_64BIT
456vmalloc_modify:
457	la.abs	t1, swapper_pg_dir
458	b	vmalloc_done_modify
459#endif
460
461	/*
462	 * This is the entry point when
463	 * build_tlbchange_handler_head spots a huge page.
464	 */
465tlb_huge_update_modify:
466#ifdef CONFIG_SMP
467	ll.d	t0, t1, 0
468#else
469	ld.d	t0, t1, 0
470#endif
471
472	srli.d	ra, t0, _PAGE_WRITE_SHIFT
473	andi	ra, ra, 1
474	beqz	ra, nopage_tlb_modify
475
476	tlbsrch
477	ori	t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
478
479#ifdef CONFIG_SMP
480	sc.d	t0, t1, 0
481	beqz	t0, tlb_huge_update_modify
482	ld.d	t0, t1, 0
483#else
484	st.d	t0, t1, 0
485#endif
486	/*
487	 * A huge PTE describes an area the size of the
488	 * configured huge page size. This is twice the
489	 * of the large TLB entry size we intend to use.
490	 * A TLB entry half the size of the configured
491	 * huge page size is configured into entrylo0
492	 * and entrylo1 to cover the contiguous huge PTE
493	 * address space.
494	 */
495	/* Huge page: Move Global bit */
496	xori	t0, t0, _PAGE_HUGE
497	lu12i.w	t1, _PAGE_HGLOBAL >> 12
498	and	t1, t0, t1
499	srli.d	t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
500	or	t0, t0, t1
501
502	addi.d	ra, t0, 0
503	csrwr	t0, LOONGARCH_CSR_TLBELO0
504	addi.d	t0, ra, 0
505
506	/* Convert to entrylo1 */
507	addi.d	t1, zero, 1
508	slli.d	t1, t1, (HPAGE_SHIFT - 1)
509	add.d	t0, t0, t1
510	csrwr	t0, LOONGARCH_CSR_TLBELO1
511
512	/* Set huge page tlb entry size */
513	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
514	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
515	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
516
517	tlbwr
518
519	/* Reset default page size */
520	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
521	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
522	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
523
524nopage_tlb_modify:
525	dbar	0
526	csrrd	ra, EXCEPTION_KS2
527	la.abs	t0, tlb_do_page_fault_1
528	jr	t0
529SYM_FUNC_END(handle_tlb_modify)
530
531SYM_FUNC_START(handle_tlb_refill)
532	csrwr	t0, LOONGARCH_CSR_TLBRSAVE
533	csrrd	t0, LOONGARCH_CSR_PGD
534	lddir	t0, t0, 3
535#if CONFIG_PGTABLE_LEVELS > 3
536	lddir	t0, t0, 2
537#endif
538#if CONFIG_PGTABLE_LEVELS > 2
539	lddir	t0, t0, 1
540#endif
541	ldpte	t0, 0
542	ldpte	t0, 1
543	tlbfill
544	csrrd	t0, LOONGARCH_CSR_TLBRSAVE
545	ertn
546SYM_FUNC_END(handle_tlb_refill)
547