xref: /openbmc/linux/arch/sparc/kernel/tsb.S (revision 54cbac81)
1/* tsb.S: Sparc64 TSB table handling.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6
7#include <asm/tsb.h>
8#include <asm/hypervisor.h>
9#include <asm/page.h>
10#include <asm/cpudata.h>
11#include <asm/mmu.h>
12
13	.text
14	.align	32
15
16	/* Invoked from TLB miss handler, we are in the
17	 * MMU global registers and they are setup like
18	 * this:
19	 *
20	 * %g1: TSB entry pointer
21	 * %g2:	available temporary
22	 * %g3:	FAULT_CODE_{D,I}TLB
23	 * %g4:	available temporary
24	 * %g5:	available temporary
25	 * %g6: TAG TARGET
26	 * %g7:	available temporary, will be loaded by us with
27	 *      the physical address base of the linux page
28	 *      tables for the current address space
29	 */
30tsb_miss_dtlb:
31	mov		TLB_TAG_ACCESS, %g4
32	ba,pt		%xcc, tsb_miss_page_table_walk
33	 ldxa		[%g4] ASI_DMMU, %g4
34
35tsb_miss_itlb:
36	mov		TLB_TAG_ACCESS, %g4
37	ba,pt		%xcc, tsb_miss_page_table_walk
38	 ldxa		[%g4] ASI_IMMU, %g4
39
40	/* At this point we have:
41	 * %g1 --	PAGE_SIZE TSB entry address
42	 * %g3 --	FAULT_CODE_{D,I}TLB
43	 * %g4 --	missing virtual address
44	 * %g6 --	TAG TARGET (vaddr >> 22)
45	 */
46tsb_miss_page_table_walk:
47	TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
48
49	/* Before committing to a full page table walk,
50	 * check the huge page TSB.
51	 */
52#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
53
54661:	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
55	nop
56	.section	.sun4v_2insn_patch, "ax"
57	.word		661b
58	mov		SCRATCHPAD_UTSBREG2, %g5
59	ldxa		[%g5] ASI_SCRATCHPAD, %g5
60	.previous
61
62	cmp		%g5, -1
63	be,pt		%xcc, 80f
64	 nop
65
66	/* We need an aligned pair of registers containing 2 values
67	 * which can be easily rematerialized.  %g6 and %g7 foot the
68	 * bill just nicely.  We'll save %g6 away into %g2 for the
69	 * huge page TSB TAG comparison.
70	 *
71	 * Perform a huge page TSB lookup.
72	 */
73	mov		%g6, %g2
74	and		%g5, 0x7, %g6
75	mov		512, %g7
76	andn		%g5, 0x7, %g5
77	sllx		%g7, %g6, %g7
78	srlx		%g4, HPAGE_SHIFT, %g6
79	sub		%g7, 1, %g7
80	and		%g6, %g7, %g6
81	sllx		%g6, 4, %g6
82	add		%g5, %g6, %g5
83
84	TSB_LOAD_QUAD(%g5, %g6)
85	cmp		%g6, %g2
86	be,a,pt		%xcc, tsb_tlb_reload
87	 mov		%g7, %g5
88
89	/* No match, remember the huge page TSB entry address,
90	 * and restore %g6 and %g7.
91	 */
92	TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
93	srlx		%g4, 22, %g6
9480:	stx		%g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
95
96#endif
97
98	ldx		[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
99
100	/* At this point we have:
101	 * %g1 --	TSB entry address
102	 * %g3 --	FAULT_CODE_{D,I}TLB
103	 * %g4 --	missing virtual address
104	 * %g6 --	TAG TARGET (vaddr >> 22)
105	 * %g7 --	page table physical address
106	 *
107	 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
108	 * TSB both lack a matching entry.
109	 */
110tsb_miss_page_table_walk_sun4v_fastpath:
111	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
112
113	/* Valid PTE is now in %g5.  */
114
115#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
116661:	sethi		%uhi(_PAGE_SZALL_4U), %g7
117	sllx		%g7, 32, %g7
118	.section	.sun4v_2insn_patch, "ax"
119	.word		661b
120	mov		_PAGE_SZALL_4V, %g7
121	nop
122	.previous
123
124	and		%g5, %g7, %g2
125
126661:	sethi		%uhi(_PAGE_SZHUGE_4U), %g7
127	sllx		%g7, 32, %g7
128	.section	.sun4v_2insn_patch, "ax"
129	.word		661b
130	mov		_PAGE_SZHUGE_4V, %g7
131	nop
132	.previous
133
134	cmp		%g2, %g7
135	bne,pt		%xcc, 60f
136	 nop
137
138	/* It is a huge page, use huge page TSB entry address we
139	 * calculated above.
140	 */
141	TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
142	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
143	cmp		%g2, -1
144	movne		%xcc, %g2, %g1
14560:
146#endif
147
148	/* At this point we have:
149	 * %g1 --	TSB entry address
150	 * %g3 --	FAULT_CODE_{D,I}TLB
151	 * %g5 --	valid PTE
152	 * %g6 --	TAG TARGET (vaddr >> 22)
153	 */
154tsb_reload:
155	TSB_LOCK_TAG(%g1, %g2, %g7)
156	TSB_WRITE(%g1, %g5, %g6)
157
158	/* Finally, load TLB and return from trap.  */
159tsb_tlb_reload:
160	cmp		%g3, FAULT_CODE_DTLB
161	bne,pn		%xcc, tsb_itlb_load
162	 nop
163
164tsb_dtlb_load:
165
166661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
167	retry
168	.section	.sun4v_2insn_patch, "ax"
169	.word		661b
170	nop
171	nop
172	.previous
173
174	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
175	 * instruction get nop'd out and we get here to branch
176	 * to the sun4v tlb load code.  The registers are setup
177	 * as follows:
178	 *
179	 * %g4: vaddr
180	 * %g5: PTE
181	 * %g6:	TAG
182	 *
183	 * The sun4v TLB load wants the PTE in %g3 so we fix that
184	 * up here.
185	 */
186	ba,pt		%xcc, sun4v_dtlb_load
187	 mov		%g5, %g3
188
189tsb_itlb_load:
190	/* Executable bit must be set.  */
191661:	sethi		%hi(_PAGE_EXEC_4U), %g4
192	andcc		%g5, %g4, %g0
193	.section	.sun4v_2insn_patch, "ax"
194	.word		661b
195	andcc		%g5, _PAGE_EXEC_4V, %g0
196	nop
197	.previous
198
199	be,pn		%xcc, tsb_do_fault
200	 nop
201
202661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
203	retry
204	.section	.sun4v_2insn_patch, "ax"
205	.word		661b
206	nop
207	nop
208	.previous
209
210	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
211	 * instruction get nop'd out and we get here to branch
212	 * to the sun4v tlb load code.  The registers are setup
213	 * as follows:
214	 *
215	 * %g4: vaddr
216	 * %g5: PTE
217	 * %g6:	TAG
218	 *
219	 * The sun4v TLB load wants the PTE in %g3 so we fix that
220	 * up here.
221	 */
222	ba,pt		%xcc, sun4v_itlb_load
223	 mov		%g5, %g3
224
225	/* No valid entry in the page tables, do full fault
226	 * processing.
227	 */
228
229	.globl		tsb_do_fault
230tsb_do_fault:
231	cmp		%g3, FAULT_CODE_DTLB
232
233661:	rdpr		%pstate, %g5
234	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
235	.section	.sun4v_2insn_patch, "ax"
236	.word		661b
237	SET_GL(1)
238	ldxa		[%g0] ASI_SCRATCHPAD, %g4
239	.previous
240
241	bne,pn		%xcc, tsb_do_itlb_fault
242	 nop
243
244tsb_do_dtlb_fault:
245	rdpr	%tl, %g3
246	cmp	%g3, 1
247
248661:	mov	TLB_TAG_ACCESS, %g4
249	ldxa	[%g4] ASI_DMMU, %g5
250	.section .sun4v_2insn_patch, "ax"
251	.word	661b
252	ldx	[%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
253	nop
254	.previous
255
256	be,pt	%xcc, sparc64_realfault_common
257	 mov	FAULT_CODE_DTLB, %g4
258	ba,pt	%xcc, winfix_trampoline
259	 nop
260
261tsb_do_itlb_fault:
262	rdpr	%tpc, %g5
263	ba,pt	%xcc, sparc64_realfault_common
264	 mov	FAULT_CODE_ITLB, %g4
265
266	.globl	sparc64_realfault_common
267sparc64_realfault_common:
268	/* fault code in %g4, fault address in %g5, etrap will
269	 * preserve these two values in %l4 and %l5 respectively
270	 */
271	ba,pt	%xcc, etrap			! Save trap state
2721:	 rd	%pc, %g7			! ...
273	stb	%l4, [%g6 + TI_FAULT_CODE]	! Save fault code
274	stx	%l5, [%g6 + TI_FAULT_ADDR]	! Save fault address
275	call	do_sparc64_fault		! Call fault handler
276	 add	%sp, PTREGS_OFF, %o0		! Compute pt_regs arg
277	ba,pt	%xcc, rtrap			! Restore cpu state
278	 nop					! Delay slot (fill me)
279
280winfix_trampoline:
281	rdpr	%tpc, %g3			! Prepare winfixup TNPC
282	or	%g3, 0x7c, %g3			! Compute branch offset
283	wrpr	%g3, %tnpc			! Write it into TNPC
284	done					! Trap return
285
286	/* Insert an entry into the TSB.
287	 *
288	 * %o0: TSB entry pointer (virt or phys address)
289	 * %o1: tag
290	 * %o2:	pte
291	 */
292	.align	32
293	.globl	__tsb_insert
294__tsb_insert:
295	rdpr	%pstate, %o5
296	wrpr	%o5, PSTATE_IE, %pstate
297	TSB_LOCK_TAG(%o0, %g2, %g3)
298	TSB_WRITE(%o0, %o2, %o1)
299	wrpr	%o5, %pstate
300	retl
301	 nop
302	.size	__tsb_insert, .-__tsb_insert
303
304	/* Flush the given TSB entry if it has the matching
305	 * tag.
306	 *
307	 * %o0: TSB entry pointer (virt or phys address)
308	 * %o1:	tag
309	 */
310	.align	32
311	.globl	tsb_flush
312	.type	tsb_flush,#function
313tsb_flush:
314	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
3151:	TSB_LOAD_TAG(%o0, %g1)
316	srlx	%g1, 32, %o3
317	andcc	%o3, %g2, %g0
318	bne,pn	%icc, 1b
319	 nop
320	cmp	%g1, %o1
321	mov	1, %o3
322	bne,pt	%xcc, 2f
323	 sllx	%o3, TSB_TAG_INVALID_BIT, %o3
324	TSB_CAS_TAG(%o0, %g1, %o3)
325	cmp	%g1, %o3
326	bne,pn	%xcc, 1b
327	 nop
3282:	retl
329	 nop
330	.size	tsb_flush, .-tsb_flush
331
332	/* Reload MMU related context switch state at
333	 * schedule() time.
334	 *
335	 * %o0: page table physical address
336	 * %o1:	TSB base config pointer
337	 * %o2:	TSB huge config pointer, or NULL if none
338	 * %o3:	Hypervisor TSB descriptor physical address
339	 *
340	 * We have to run this whole thing with interrupts
341	 * disabled so that the current cpu doesn't change
342	 * due to preemption.
343	 */
344	.align	32
345	.globl	__tsb_context_switch
346	.type	__tsb_context_switch,#function
347__tsb_context_switch:
348	rdpr	%pstate, %g1
349	wrpr	%g1, PSTATE_IE, %pstate
350
351	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
352
353	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
354
355	ldx	[%o1 + TSB_CONFIG_REG_VAL], %o0
356	brz,pt	%o2, 1f
357	 mov	-1, %g3
358
359	ldx	[%o2 + TSB_CONFIG_REG_VAL], %g3
360
3611:	stx	%g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
362
363	sethi	%hi(tlb_type), %g2
364	lduw	[%g2 + %lo(tlb_type)], %g2
365	cmp	%g2, 3
366	bne,pt	%icc, 50f
367	 nop
368
369	/* Hypervisor TSB switch. */
370	mov	SCRATCHPAD_UTSBREG1, %o5
371	stxa	%o0, [%o5] ASI_SCRATCHPAD
372	mov	SCRATCHPAD_UTSBREG2, %o5
373	stxa	%g3, [%o5] ASI_SCRATCHPAD
374
375	mov	2, %o0
376	cmp	%g3, -1
377	move	%xcc, 1, %o0
378
379	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
380	mov	%o3, %o1
381	ta	HV_FAST_TRAP
382
383	/* Finish up.  */
384	ba,pt	%xcc, 9f
385	 nop
386
387	/* SUN4U TSB switch.  */
38850:	mov	TSB_REG, %o5
389	stxa	%o0, [%o5] ASI_DMMU
390	membar	#Sync
391	stxa	%o0, [%o5] ASI_IMMU
392	membar	#Sync
393
3942:	ldx	[%o1 + TSB_CONFIG_MAP_VADDR], %o4
395	brz	%o4, 9f
396	 ldx	[%o1 + TSB_CONFIG_MAP_PTE], %o5
397
398	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
399	mov	TLB_TAG_ACCESS, %g3
400	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
401	stxa	%o4, [%g3] ASI_DMMU
402	membar	#Sync
403	sllx	%g2, 3, %g2
404	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
405	membar	#Sync
406
407	brz,pt	%o2, 9f
408	 nop
409
410	ldx	[%o2 + TSB_CONFIG_MAP_VADDR], %o4
411	ldx	[%o2 + TSB_CONFIG_MAP_PTE], %o5
412	mov	TLB_TAG_ACCESS, %g3
413	stxa	%o4, [%g3] ASI_DMMU
414	membar	#Sync
415	sub	%g2, (1 << 3), %g2
416	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
417	membar	#Sync
418
4199:
420	wrpr	%g1, %pstate
421
422	retl
423	 nop
424	.size	__tsb_context_switch, .-__tsb_context_switch
425
426#define TSB_PASS_BITS	((1 << TSB_TAG_LOCK_BIT) | \
427			 (1 << TSB_TAG_INVALID_BIT))
428
429	.align	32
430	.globl	copy_tsb
431	.type	copy_tsb,#function
432copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
433			 * %o2=new_tsb_base, %o3=new_tsb_size
434			 */
435	sethi		%uhi(TSB_PASS_BITS), %g7
436	srlx		%o3, 4, %o3
437	add		%o0, %o1, %g1	/* end of old tsb */
438	sllx		%g7, 32, %g7
439	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */
440
441661:	prefetcha	[%o0] ASI_N, #one_read
442	.section	.tsb_phys_patch, "ax"
443	.word		661b
444	prefetcha	[%o0] ASI_PHYS_USE_EC, #one_read
445	.previous
446
44790:	andcc		%o0, (64 - 1), %g0
448	bne		1f
449	 add		%o0, 64, %o5
450
451661:	prefetcha	[%o5] ASI_N, #one_read
452	.section	.tsb_phys_patch, "ax"
453	.word		661b
454	prefetcha	[%o5] ASI_PHYS_USE_EC, #one_read
455	.previous
456
4571:	TSB_LOAD_QUAD(%o0, %g2)		/* %g2/%g3 == TSB entry */
458	andcc		%g2, %g7, %g0	/* LOCK or INVALID set? */
459	bne,pn		%xcc, 80f	/* Skip it */
460	 sllx		%g2, 22, %o4	/* TAG --> VADDR */
461
462	/* This can definitely be computed faster... */
463	srlx		%o0, 4, %o5	/* Build index */
464	and		%o5, 511, %o5	/* Mask index */
465	sllx		%o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
466	or		%o4, %o5, %o4	/* Full VADDR. */
467	srlx		%o4, PAGE_SHIFT, %o4 /* Shift down to create index */
468	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
469	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
470	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
471	add		%o4, 0x8, %o4	/* Advance to TTE */
472	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */
473
47480:	add		%o0, 16, %o0
475	cmp		%o0, %g1
476	bne,pt		%xcc, 90b
477	 nop
478
479	retl
480	 nop
481	.size		copy_tsb, .-copy_tsb
482
483	/* Set the invalid bit in all TSB entries.  */
484	.align		32
485	.globl		tsb_init
486	.type		tsb_init,#function
487tsb_init:		/* %o0 = TSB vaddr, %o1 = size in bytes */
488	prefetch	[%o0 + 0x000], #n_writes
489	mov		1, %g1
490	prefetch	[%o0 + 0x040], #n_writes
491	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
492	prefetch	[%o0 + 0x080], #n_writes
4931:	prefetch	[%o0 + 0x0c0], #n_writes
494	stx		%g1, [%o0 + 0x00]
495	stx		%g1, [%o0 + 0x10]
496	stx		%g1, [%o0 + 0x20]
497	stx		%g1, [%o0 + 0x30]
498	prefetch	[%o0 + 0x100], #n_writes
499	stx		%g1, [%o0 + 0x40]
500	stx		%g1, [%o0 + 0x50]
501	stx		%g1, [%o0 + 0x60]
502	stx		%g1, [%o0 + 0x70]
503	prefetch	[%o0 + 0x140], #n_writes
504	stx		%g1, [%o0 + 0x80]
505	stx		%g1, [%o0 + 0x90]
506	stx		%g1, [%o0 + 0xa0]
507	stx		%g1, [%o0 + 0xb0]
508	prefetch	[%o0 + 0x180], #n_writes
509	stx		%g1, [%o0 + 0xc0]
510	stx		%g1, [%o0 + 0xd0]
511	stx		%g1, [%o0 + 0xe0]
512	stx		%g1, [%o0 + 0xf0]
513	subcc		%o1, 0x100, %o1
514	bne,pt		%xcc, 1b
515	 add		%o0, 0x100, %o0
516	retl
517	 nop
518	nop
519	nop
520	.size		tsb_init, .-tsb_init
521
522	.globl		NGtsb_init
523	.type		NGtsb_init,#function
524NGtsb_init:
525	rd		%asi, %g2
526	mov		1, %g1
527	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
528	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
5291:	stxa		%g1, [%o0 + 0x00] %asi
530	stxa		%g1, [%o0 + 0x10] %asi
531	stxa		%g1, [%o0 + 0x20] %asi
532	stxa		%g1, [%o0 + 0x30] %asi
533	stxa		%g1, [%o0 + 0x40] %asi
534	stxa		%g1, [%o0 + 0x50] %asi
535	stxa		%g1, [%o0 + 0x60] %asi
536	stxa		%g1, [%o0 + 0x70] %asi
537	stxa		%g1, [%o0 + 0x80] %asi
538	stxa		%g1, [%o0 + 0x90] %asi
539	stxa		%g1, [%o0 + 0xa0] %asi
540	stxa		%g1, [%o0 + 0xb0] %asi
541	stxa		%g1, [%o0 + 0xc0] %asi
542	stxa		%g1, [%o0 + 0xd0] %asi
543	stxa		%g1, [%o0 + 0xe0] %asi
544	stxa		%g1, [%o0 + 0xf0] %asi
545	subcc		%o1, 0x100, %o1
546	bne,pt		%xcc, 1b
547	 add		%o0, 0x100, %o0
548	membar		#Sync
549	retl
550	 wr		%g2, 0x0, %asi
551	.size		NGtsb_init, .-NGtsb_init
552