xref: /openbmc/linux/arch/sparc/kernel/ktlb.S (revision bc5aa3a0)
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <asm/head.h>
10#include <asm/asi.h>
11#include <asm/page.h>
12#include <asm/pgtable.h>
13#include <asm/tsb.h>
14
15	.text
16	.align		32
17
18kvmap_itlb:
19	/* g6: TAG TARGET */
20	mov		TLB_TAG_ACCESS, %g4
21	ldxa		[%g4] ASI_IMMU, %g4
22
23	/* The kernel executes in context zero, therefore we do not
24	 * need to clear the context ID bits out of %g4 here.
25	 */
26
27	/* sun4v_itlb_miss branches here with the missing virtual
28	 * address already loaded into %g4
29	 */
30kvmap_itlb_4v:
31
32	/* Catch kernel NULL pointer calls.  */
33	sethi		%hi(PAGE_SIZE), %g5
34	cmp		%g4, %g5
35	blu,pn		%xcc, kvmap_itlb_longpath
36	 nop
37
38	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
39
40kvmap_itlb_tsb_miss:
41	sethi		%hi(LOW_OBP_ADDRESS), %g5
42	cmp		%g4, %g5
43	blu,pn		%xcc, kvmap_itlb_vmalloc_addr
44	 mov		0x1, %g5
45	sllx		%g5, 32, %g5
46	cmp		%g4, %g5
47	blu,pn		%xcc, kvmap_itlb_obp
48	 nop
49
50kvmap_itlb_vmalloc_addr:
51	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
52
53	TSB_LOCK_TAG(%g1, %g2, %g7)
54	TSB_WRITE(%g1, %g5, %g6)
55
56	/* fallthrough to TLB load */
57
58kvmap_itlb_load:
59
60661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
61	retry
62	.section	.sun4v_2insn_patch, "ax"
63	.word		661b
64	nop
65	nop
66	.previous
67
68	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
69	 * instruction get nop'd out and we get here to branch
70	 * to the sun4v tlb load code.  The registers are setup
71	 * as follows:
72	 *
73	 * %g4: vaddr
74	 * %g5: PTE
75	 * %g6:	TAG
76	 *
77	 * The sun4v TLB load wants the PTE in %g3 so we fix that
78	 * up here.
79	 */
80	ba,pt		%xcc, sun4v_itlb_load
81	 mov		%g5, %g3
82
83kvmap_itlb_longpath:
84
85661:	rdpr	%pstate, %g5
86	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
87	.section .sun4v_2insn_patch, "ax"
88	.word	661b
89	SET_GL(1)
90	nop
91	.previous
92
93	rdpr	%tpc, %g5
94	ba,pt	%xcc, sparc64_realfault_common
95	 mov	FAULT_CODE_ITLB, %g4
96
97kvmap_itlb_obp:
98	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
99
100	TSB_LOCK_TAG(%g1, %g2, %g7)
101
102	TSB_WRITE(%g1, %g5, %g6)
103
104	ba,pt		%xcc, kvmap_itlb_load
105	 nop
106
107kvmap_dtlb_obp:
108	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
109
110	TSB_LOCK_TAG(%g1, %g2, %g7)
111
112	TSB_WRITE(%g1, %g5, %g6)
113
114	ba,pt		%xcc, kvmap_dtlb_load
115	 nop
116
117kvmap_linear_early:
118	sethi		%hi(kern_linear_pte_xor), %g7
119	ldx		[%g7 + %lo(kern_linear_pte_xor)], %g2
120	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
121	 xor		%g2, %g4, %g5
122
123	.align		32
124kvmap_dtlb_tsb4m_load:
125	TSB_LOCK_TAG(%g1, %g2, %g7)
126	TSB_WRITE(%g1, %g5, %g6)
127	ba,pt		%xcc, kvmap_dtlb_load
128	 nop
129
130kvmap_dtlb:
131	/* %g6: TAG TARGET */
132	mov		TLB_TAG_ACCESS, %g4
133	ldxa		[%g4] ASI_DMMU, %g4
134
135	/* The kernel executes in context zero, therefore we do not
136	 * need to clear the context ID bits out of %g4 here.
137	 */
138
139	/* sun4v_dtlb_miss branches here with the missing virtual
140	 * address already loaded into %g4
141	 */
142kvmap_dtlb_4v:
143	brgez,pn	%g4, kvmap_dtlb_nonlinear
144	 nop
145
146#ifdef CONFIG_DEBUG_PAGEALLOC
147	/* Index through the base page size TSB even for linear
148	 * mappings when using page allocation debugging.
149	 */
150	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
151#else
152	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
153	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
154#endif
155	/* Linear mapping TSB lookup failed.  Fallthrough to kernel
156	 * page table based lookup.
157	 */
158	.globl		kvmap_linear_patch
159kvmap_linear_patch:
160	ba,a,pt		%xcc, kvmap_linear_early
161
162kvmap_dtlb_vmalloc_addr:
163	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
164
165	TSB_LOCK_TAG(%g1, %g2, %g7)
166	TSB_WRITE(%g1, %g5, %g6)
167
168	/* fallthrough to TLB load */
169
170kvmap_dtlb_load:
171
172661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
173	retry
174	.section	.sun4v_2insn_patch, "ax"
175	.word		661b
176	nop
177	nop
178	.previous
179
180	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
181	 * instruction get nop'd out and we get here to branch
182	 * to the sun4v tlb load code.  The registers are setup
183	 * as follows:
184	 *
185	 * %g4: vaddr
186	 * %g5: PTE
187	 * %g6:	TAG
188	 *
189	 * The sun4v TLB load wants the PTE in %g3 so we fix that
190	 * up here.
191	 */
192	ba,pt		%xcc, sun4v_dtlb_load
193	 mov		%g5, %g3
194
195#ifdef CONFIG_SPARSEMEM_VMEMMAP
196kvmap_vmemmap:
197	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
198	ba,a,pt		%xcc, kvmap_dtlb_load
199#endif
200
201kvmap_dtlb_nonlinear:
202	/* Catch kernel NULL pointer derefs.  */
203	sethi		%hi(PAGE_SIZE), %g5
204	cmp		%g4, %g5
205	bleu,pn		%xcc, kvmap_dtlb_longpath
206	 nop
207
208#ifdef CONFIG_SPARSEMEM_VMEMMAP
209	/* Do not use the TSB for vmemmap.  */
210	sethi		%hi(VMEMMAP_BASE), %g5
211	ldx		[%g5 + %lo(VMEMMAP_BASE)], %g5
212	cmp		%g4,%g5
213	bgeu,pn		%xcc, kvmap_vmemmap
214	 nop
215#endif
216
217	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
218
219kvmap_dtlb_tsbmiss:
220	sethi		%hi(MODULES_VADDR), %g5
221	cmp		%g4, %g5
222	blu,pn		%xcc, kvmap_dtlb_longpath
223	 sethi		%hi(VMALLOC_END), %g5
224	ldx		[%g5 + %lo(VMALLOC_END)], %g5
225	cmp		%g4, %g5
226	bgeu,pn		%xcc, kvmap_dtlb_longpath
227	 nop
228
229kvmap_check_obp:
230	sethi		%hi(LOW_OBP_ADDRESS), %g5
231	cmp		%g4, %g5
232	blu,pn		%xcc, kvmap_dtlb_vmalloc_addr
233	 mov		0x1, %g5
234	sllx		%g5, 32, %g5
235	cmp		%g4, %g5
236	blu,pn		%xcc, kvmap_dtlb_obp
237	 nop
238	ba,pt		%xcc, kvmap_dtlb_vmalloc_addr
239	 nop
240
241kvmap_dtlb_longpath:
242
243661:	rdpr	%pstate, %g5
244	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
245	.section .sun4v_2insn_patch, "ax"
246	.word	661b
247	SET_GL(1)
248	ldxa		[%g0] ASI_SCRATCHPAD, %g5
249	.previous
250
251	rdpr	%tl, %g3
252	cmp	%g3, 1
253
254661:	mov	TLB_TAG_ACCESS, %g4
255	ldxa	[%g4] ASI_DMMU, %g5
256	.section .sun4v_2insn_patch, "ax"
257	.word	661b
258	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
259	nop
260	.previous
261
262	/* The kernel executes in context zero, therefore we do not
263	 * need to clear the context ID bits out of %g5 here.
264	 */
265
266	be,pt	%xcc, sparc64_realfault_common
267	 mov	FAULT_CODE_DTLB, %g4
268	ba,pt	%xcc, winfix_trampoline
269	 nop
270