xref: /openbmc/linux/arch/sparc/kernel/tsb.S (revision 711aab1d)
1/* tsb.S: Sparc64 TSB table handling.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6
7#include <asm/tsb.h>
8#include <asm/hypervisor.h>
9#include <asm/page.h>
10#include <asm/cpudata.h>
11#include <asm/mmu.h>
12
13	.text
14	.align	32
15
16	/* Invoked from TLB miss handler, we are in the
17	 * MMU global registers and they are setup like
18	 * this:
19	 *
20	 * %g1: TSB entry pointer
21	 * %g2:	available temporary
22	 * %g3:	FAULT_CODE_{D,I}TLB
23	 * %g4:	available temporary
24	 * %g5:	available temporary
25	 * %g6: TAG TARGET
26	 * %g7:	available temporary, will be loaded by us with
27	 *      the physical address base of the linux page
28	 *      tables for the current address space
29	 */
30tsb_miss_dtlb:
31	mov		TLB_TAG_ACCESS, %g4
32	ldxa		[%g4] ASI_DMMU, %g4
33	srlx		%g4, PAGE_SHIFT, %g4
34	ba,pt		%xcc, tsb_miss_page_table_walk
35	 sllx		%g4, PAGE_SHIFT, %g4
36
37tsb_miss_itlb:
38	mov		TLB_TAG_ACCESS, %g4
39	ldxa		[%g4] ASI_IMMU, %g4
40	srlx		%g4, PAGE_SHIFT, %g4
41	ba,pt		%xcc, tsb_miss_page_table_walk
42	 sllx		%g4, PAGE_SHIFT, %g4
43
44	/* At this point we have:
45	 * %g1 --	PAGE_SIZE TSB entry address
46	 * %g3 --	FAULT_CODE_{D,I}TLB
47	 * %g4 --	missing virtual address
48	 * %g6 --	TAG TARGET (vaddr >> 22)
49	 */
50tsb_miss_page_table_walk:
51	TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
52
53	/* Before committing to a full page table walk,
54	 * check the huge page TSB.
55	 */
56#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
57
58661:	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
59	nop
60	.section	.sun4v_2insn_patch, "ax"
61	.word		661b
62	mov		SCRATCHPAD_UTSBREG2, %g5
63	ldxa		[%g5] ASI_SCRATCHPAD, %g5
64	.previous
65
66	cmp		%g5, -1
67	be,pt		%xcc, 80f
68	 nop
69
70	/* We need an aligned pair of registers containing 2 values
71	 * which can be easily rematerialized.  %g6 and %g7 foot the
72	 * bill just nicely.  We'll save %g6 away into %g2 for the
73	 * huge page TSB TAG comparison.
74	 *
75	 * Perform a huge page TSB lookup.
76	 */
77	mov		%g6, %g2
78	and		%g5, 0x7, %g6
79	mov		512, %g7
80	andn		%g5, 0x7, %g5
81	sllx		%g7, %g6, %g7
82	srlx		%g4, REAL_HPAGE_SHIFT, %g6
83	sub		%g7, 1, %g7
84	and		%g6, %g7, %g6
85	sllx		%g6, 4, %g6
86	add		%g5, %g6, %g5
87
88	TSB_LOAD_QUAD(%g5, %g6)
89	cmp		%g6, %g2
90	be,a,pt		%xcc, tsb_tlb_reload
91	 mov		%g7, %g5
92
93	/* No match, remember the huge page TSB entry address,
94	 * and restore %g6 and %g7.
95	 */
96	TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
97	srlx		%g4, 22, %g6
9880:	stx		%g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
99
100#endif
101
102	ldx		[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
103
104	/* At this point we have:
105	 * %g1 --	TSB entry address
106	 * %g3 --	FAULT_CODE_{D,I}TLB
107	 * %g4 --	missing virtual address
108	 * %g6 --	TAG TARGET (vaddr >> 22)
109	 * %g7 --	page table physical address
110	 *
111	 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
112	 * TSB both lack a matching entry.
113	 */
114tsb_miss_page_table_walk_sun4v_fastpath:
115	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
116
117	/* Valid PTE is now in %g5.  */
118
119#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
120	sethi		%uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7
121	sllx		%g7, 32, %g7
122
123	andcc		%g5, %g7, %g0
124	be,pt		%xcc, 60f
125	 nop
126
127	/* It is a huge page, use huge page TSB entry address we
128	 * calculated above.  If the huge page TSB has not been
129	 * allocated, setup a trap stack and call hugetlb_setup()
130	 * to do so, then return from the trap to replay the TLB
131	 * miss.
132	 *
133	 * This is necessary to handle the case of transparent huge
134	 * pages where we don't really have a non-atomic context
135	 * in which to allocate the hugepage TSB hash table.  When
136	 * the 'mm' faults in the hugepage for the first time, we
137	 * thus handle it here.  This also makes sure that we can
138	 * allocate the TSB hash table on the correct NUMA node.
139	 */
140	TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
141	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
142	cmp		%g1, -1
143	bne,pt		%xcc, 60f
144	 nop
145
146661:	rdpr		%pstate, %g5
147	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
148	.section	.sun4v_2insn_patch, "ax"
149	.word		661b
150	SET_GL(1)
151	nop
152	.previous
153
154	rdpr	%tl, %g7
155	cmp	%g7, 1
156	bne,pn	%xcc, winfix_trampoline
157	 mov	%g3, %g4
158	ba,pt	%xcc, etrap
159	 rd	%pc, %g7
160	call	hugetlb_setup
161	 add	%sp, PTREGS_OFF, %o0
162	ba,pt	%xcc, rtrap
163	 nop
164
16560:
166#endif
167
168	/* At this point we have:
169	 * %g1 --	TSB entry address
170	 * %g3 --	FAULT_CODE_{D,I}TLB
171	 * %g5 --	valid PTE
172	 * %g6 --	TAG TARGET (vaddr >> 22)
173	 */
174tsb_reload:
175	TSB_LOCK_TAG(%g1, %g2, %g7)
176	TSB_WRITE(%g1, %g5, %g6)
177
178	/* Finally, load TLB and return from trap.  */
179tsb_tlb_reload:
180	cmp		%g3, FAULT_CODE_DTLB
181	bne,pn		%xcc, tsb_itlb_load
182	 nop
183
184tsb_dtlb_load:
185
186661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
187	retry
188	.section	.sun4v_2insn_patch, "ax"
189	.word		661b
190	nop
191	nop
192	.previous
193
194	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
195	 * instruction get nop'd out and we get here to branch
196	 * to the sun4v tlb load code.  The registers are setup
197	 * as follows:
198	 *
199	 * %g4: vaddr
200	 * %g5: PTE
201	 * %g6:	TAG
202	 *
203	 * The sun4v TLB load wants the PTE in %g3 so we fix that
204	 * up here.
205	 */
206	ba,pt		%xcc, sun4v_dtlb_load
207	 mov		%g5, %g3
208
209tsb_itlb_load:
210	/* Executable bit must be set.  */
211661:	sethi		%hi(_PAGE_EXEC_4U), %g4
212	andcc		%g5, %g4, %g0
213	.section	.sun4v_2insn_patch, "ax"
214	.word		661b
215	andcc		%g5, _PAGE_EXEC_4V, %g0
216	nop
217	.previous
218
219	be,pn		%xcc, tsb_do_fault
220	 nop
221
222661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
223	retry
224	.section	.sun4v_2insn_patch, "ax"
225	.word		661b
226	nop
227	nop
228	.previous
229
230	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
231	 * instruction get nop'd out and we get here to branch
232	 * to the sun4v tlb load code.  The registers are setup
233	 * as follows:
234	 *
235	 * %g4: vaddr
236	 * %g5: PTE
237	 * %g6:	TAG
238	 *
239	 * The sun4v TLB load wants the PTE in %g3 so we fix that
240	 * up here.
241	 */
242	ba,pt		%xcc, sun4v_itlb_load
243	 mov		%g5, %g3
244
245	/* No valid entry in the page tables, do full fault
246	 * processing.
247	 */
248
249	.globl		tsb_do_fault
250tsb_do_fault:
251	cmp		%g3, FAULT_CODE_DTLB
252
253661:	rdpr		%pstate, %g5
254	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
255	.section	.sun4v_2insn_patch, "ax"
256	.word		661b
257	SET_GL(1)
258	ldxa		[%g0] ASI_SCRATCHPAD, %g4
259	.previous
260
261	bne,pn		%xcc, tsb_do_itlb_fault
262	 nop
263
264tsb_do_dtlb_fault:
265	rdpr	%tl, %g3
266	cmp	%g3, 1
267
268661:	mov	TLB_TAG_ACCESS, %g4
269	ldxa	[%g4] ASI_DMMU, %g5
270	.section .sun4v_2insn_patch, "ax"
271	.word	661b
272	ldx	[%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
273	nop
274	.previous
275
276	/* Clear context ID bits.  */
277	srlx		%g5, PAGE_SHIFT, %g5
278	sllx		%g5, PAGE_SHIFT, %g5
279
280	be,pt	%xcc, sparc64_realfault_common
281	 mov	FAULT_CODE_DTLB, %g4
282	ba,pt	%xcc, winfix_trampoline
283	 nop
284
285tsb_do_itlb_fault:
286	rdpr	%tpc, %g5
287	ba,pt	%xcc, sparc64_realfault_common
288	 mov	FAULT_CODE_ITLB, %g4
289
290	.globl	sparc64_realfault_common
291sparc64_realfault_common:
292	/* fault code in %g4, fault address in %g5, etrap will
293	 * preserve these two values in %l4 and %l5 respectively
294	 */
295	ba,pt	%xcc, etrap			! Save trap state
2961:	 rd	%pc, %g7			! ...
297	stb	%l4, [%g6 + TI_FAULT_CODE]	! Save fault code
298	stx	%l5, [%g6 + TI_FAULT_ADDR]	! Save fault address
299	call	do_sparc64_fault		! Call fault handler
300	 add	%sp, PTREGS_OFF, %o0		! Compute pt_regs arg
301	ba,pt	%xcc, rtrap			! Restore cpu state
302	 nop					! Delay slot (fill me)
303
304winfix_trampoline:
305	rdpr	%tpc, %g3			! Prepare winfixup TNPC
306	or	%g3, 0x7c, %g3			! Compute branch offset
307	wrpr	%g3, %tnpc			! Write it into TNPC
308	done					! Trap return
309
310	/* Insert an entry into the TSB.
311	 *
312	 * %o0: TSB entry pointer (virt or phys address)
313	 * %o1: tag
314	 * %o2:	pte
315	 */
316	.align	32
317	.globl	__tsb_insert
318__tsb_insert:
319	rdpr	%pstate, %o5
320	wrpr	%o5, PSTATE_IE, %pstate
321	TSB_LOCK_TAG(%o0, %g2, %g3)
322	TSB_WRITE(%o0, %o2, %o1)
323	wrpr	%o5, %pstate
324	retl
325	 nop
326	.size	__tsb_insert, .-__tsb_insert
327
328	/* Flush the given TSB entry if it has the matching
329	 * tag.
330	 *
331	 * %o0: TSB entry pointer (virt or phys address)
332	 * %o1:	tag
333	 */
334	.align	32
335	.globl	tsb_flush
336	.type	tsb_flush,#function
337tsb_flush:
338	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
3391:	TSB_LOAD_TAG(%o0, %g1)
340	srlx	%g1, 32, %o3
341	andcc	%o3, %g2, %g0
342	bne,pn	%icc, 1b
343	 nop
344	cmp	%g1, %o1
345	mov	1, %o3
346	bne,pt	%xcc, 2f
347	 sllx	%o3, TSB_TAG_INVALID_BIT, %o3
348	TSB_CAS_TAG(%o0, %g1, %o3)
349	cmp	%g1, %o3
350	bne,pn	%xcc, 1b
351	 nop
3522:	retl
353	 nop
354	.size	tsb_flush, .-tsb_flush
355
356	/* Reload MMU related context switch state at
357	 * schedule() time.
358	 *
359	 * %o0: page table physical address
360	 * %o1:	TSB base config pointer
361	 * %o2:	TSB huge config pointer, or NULL if none
362	 * %o3:	Hypervisor TSB descriptor physical address
363	 * %o4: Secondary context to load, if non-zero
364	 *
365	 * We have to run this whole thing with interrupts
366	 * disabled so that the current cpu doesn't change
367	 * due to preemption.
368	 */
369	.align	32
370	.globl	__tsb_context_switch
371	.type	__tsb_context_switch,#function
372__tsb_context_switch:
373	rdpr	%pstate, %g1
374	wrpr	%g1, PSTATE_IE, %pstate
375
376	brz,pn	%o4, 1f
377	 mov	SECONDARY_CONTEXT, %o5
378
379661:	stxa	%o4, [%o5] ASI_DMMU
380	.section .sun4v_1insn_patch, "ax"
381	.word	661b
382	stxa	%o4, [%o5] ASI_MMU
383	.previous
384	flush	%g6
385
3861:
387	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
388
389	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
390
391	ldx	[%o1 + TSB_CONFIG_REG_VAL], %o0
392	brz,pt	%o2, 1f
393	 mov	-1, %g3
394
395	ldx	[%o2 + TSB_CONFIG_REG_VAL], %g3
396
3971:	stx	%g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
398
399	sethi	%hi(tlb_type), %g2
400	lduw	[%g2 + %lo(tlb_type)], %g2
401	cmp	%g2, 3
402	bne,pt	%icc, 50f
403	 nop
404
405	/* Hypervisor TSB switch. */
406	mov	SCRATCHPAD_UTSBREG1, %o5
407	stxa	%o0, [%o5] ASI_SCRATCHPAD
408	mov	SCRATCHPAD_UTSBREG2, %o5
409	stxa	%g3, [%o5] ASI_SCRATCHPAD
410
411	mov	2, %o0
412	cmp	%g3, -1
413	move	%xcc, 1, %o0
414
415	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
416	mov	%o3, %o1
417	ta	HV_FAST_TRAP
418
419	/* Finish up.  */
420	ba,pt	%xcc, 9f
421	 nop
422
423	/* SUN4U TSB switch.  */
42450:	mov	TSB_REG, %o5
425	stxa	%o0, [%o5] ASI_DMMU
426	membar	#Sync
427	stxa	%o0, [%o5] ASI_IMMU
428	membar	#Sync
429
4302:	ldx	[%o1 + TSB_CONFIG_MAP_VADDR], %o4
431	brz	%o4, 9f
432	 ldx	[%o1 + TSB_CONFIG_MAP_PTE], %o5
433
434	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
435	mov	TLB_TAG_ACCESS, %g3
436	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
437	stxa	%o4, [%g3] ASI_DMMU
438	membar	#Sync
439	sllx	%g2, 3, %g2
440	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
441	membar	#Sync
442
443	brz,pt	%o2, 9f
444	 nop
445
446	ldx	[%o2 + TSB_CONFIG_MAP_VADDR], %o4
447	ldx	[%o2 + TSB_CONFIG_MAP_PTE], %o5
448	mov	TLB_TAG_ACCESS, %g3
449	stxa	%o4, [%g3] ASI_DMMU
450	membar	#Sync
451	sub	%g2, (1 << 3), %g2
452	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
453	membar	#Sync
454
4559:
456	wrpr	%g1, %pstate
457
458	retl
459	 nop
460	.size	__tsb_context_switch, .-__tsb_context_switch
461
462#define TSB_PASS_BITS	((1 << TSB_TAG_LOCK_BIT) | \
463			 (1 << TSB_TAG_INVALID_BIT))
464
465	.align	32
466	.globl	copy_tsb
467	.type	copy_tsb,#function
468copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
469			 * %o2=new_tsb_base, %o3=new_tsb_size
470			 * %o4=page_size_shift
471			 */
472	sethi		%uhi(TSB_PASS_BITS), %g7
473	srlx		%o3, 4, %o3
474	add		%o0, %o1, %o1	/* end of old tsb */
475	sllx		%g7, 32, %g7
476	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */
477
478	mov		%o4, %g1	/* page_size_shift */
479
480661:	prefetcha	[%o0] ASI_N, #one_read
481	.section	.tsb_phys_patch, "ax"
482	.word		661b
483	prefetcha	[%o0] ASI_PHYS_USE_EC, #one_read
484	.previous
485
48690:	andcc		%o0, (64 - 1), %g0
487	bne		1f
488	 add		%o0, 64, %o5
489
490661:	prefetcha	[%o5] ASI_N, #one_read
491	.section	.tsb_phys_patch, "ax"
492	.word		661b
493	prefetcha	[%o5] ASI_PHYS_USE_EC, #one_read
494	.previous
495
4961:	TSB_LOAD_QUAD(%o0, %g2)		/* %g2/%g3 == TSB entry */
497	andcc		%g2, %g7, %g0	/* LOCK or INVALID set? */
498	bne,pn		%xcc, 80f	/* Skip it */
499	 sllx		%g2, 22, %o4	/* TAG --> VADDR */
500
501	/* This can definitely be computed faster... */
502	srlx		%o0, 4, %o5	/* Build index */
503	and		%o5, 511, %o5	/* Mask index */
504	sllx		%o5, %g1, %o5	/* Put into vaddr position */
505	or		%o4, %o5, %o4	/* Full VADDR. */
506	srlx		%o4, %g1, %o4	/* Shift down to create index */
507	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
508	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
509	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
510	add		%o4, 0x8, %o4	/* Advance to TTE */
511	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */
512
51380:	add		%o0, 16, %o0
514	cmp		%o0, %o1
515	bne,pt		%xcc, 90b
516	 nop
517
518	retl
519	 nop
520	.size		copy_tsb, .-copy_tsb
521
522	/* Set the invalid bit in all TSB entries.  */
523	.align		32
524	.globl		tsb_init
525	.type		tsb_init,#function
526tsb_init:		/* %o0 = TSB vaddr, %o1 = size in bytes */
527	prefetch	[%o0 + 0x000], #n_writes
528	mov		1, %g1
529	prefetch	[%o0 + 0x040], #n_writes
530	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
531	prefetch	[%o0 + 0x080], #n_writes
5321:	prefetch	[%o0 + 0x0c0], #n_writes
533	stx		%g1, [%o0 + 0x00]
534	stx		%g1, [%o0 + 0x10]
535	stx		%g1, [%o0 + 0x20]
536	stx		%g1, [%o0 + 0x30]
537	prefetch	[%o0 + 0x100], #n_writes
538	stx		%g1, [%o0 + 0x40]
539	stx		%g1, [%o0 + 0x50]
540	stx		%g1, [%o0 + 0x60]
541	stx		%g1, [%o0 + 0x70]
542	prefetch	[%o0 + 0x140], #n_writes
543	stx		%g1, [%o0 + 0x80]
544	stx		%g1, [%o0 + 0x90]
545	stx		%g1, [%o0 + 0xa0]
546	stx		%g1, [%o0 + 0xb0]
547	prefetch	[%o0 + 0x180], #n_writes
548	stx		%g1, [%o0 + 0xc0]
549	stx		%g1, [%o0 + 0xd0]
550	stx		%g1, [%o0 + 0xe0]
551	stx		%g1, [%o0 + 0xf0]
552	subcc		%o1, 0x100, %o1
553	bne,pt		%xcc, 1b
554	 add		%o0, 0x100, %o0
555	retl
556	 nop
557	nop
558	nop
559	.size		tsb_init, .-tsb_init
560
561	.globl		NGtsb_init
562	.type		NGtsb_init,#function
563NGtsb_init:
564	rd		%asi, %g2
565	mov		1, %g1
566	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
567	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
5681:	stxa		%g1, [%o0 + 0x00] %asi
569	stxa		%g1, [%o0 + 0x10] %asi
570	stxa		%g1, [%o0 + 0x20] %asi
571	stxa		%g1, [%o0 + 0x30] %asi
572	stxa		%g1, [%o0 + 0x40] %asi
573	stxa		%g1, [%o0 + 0x50] %asi
574	stxa		%g1, [%o0 + 0x60] %asi
575	stxa		%g1, [%o0 + 0x70] %asi
576	stxa		%g1, [%o0 + 0x80] %asi
577	stxa		%g1, [%o0 + 0x90] %asi
578	stxa		%g1, [%o0 + 0xa0] %asi
579	stxa		%g1, [%o0 + 0xb0] %asi
580	stxa		%g1, [%o0 + 0xc0] %asi
581	stxa		%g1, [%o0 + 0xd0] %asi
582	stxa		%g1, [%o0 + 0xe0] %asi
583	stxa		%g1, [%o0 + 0xf0] %asi
584	subcc		%o1, 0x100, %o1
585	bne,pt		%xcc, 1b
586	 add		%o0, 0x100, %o0
587	membar		#Sync
588	retl
589	 wr		%g2, 0x0, %asi
590	.size		NGtsb_init, .-NGtsb_init
591