xref: /openbmc/linux/arch/sparc/lib/clear_page.S (revision 4c79e98b)
1/* clear_page.S: UltraSparc optimized clear page.
2 *
3 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <asm/visasm.h>
8#include <asm/thread_info.h>
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/spitfire.h>
12#include <asm/head.h>
13#include <asm/export.h>
14
15	/* What we used to do was lock a TLB entry into a specific
16	 * TLB slot, clear the page with interrupts disabled, then
17	 * restore the original TLB entry.  This was great for
18	 * disturbing the TLB as little as possible, but it meant
19	 * we had to keep interrupts disabled for a long time.
20	 *
21	 * Now, we simply use the normal TLB loading mechanism,
22	 * and this makes the cpu choose a slot all by itself.
23	 * Then we do a normal TLB flush on exit.  We need only
24	 * disable preemption during the clear.
25	 */
26
27	.text
28
29	.globl		_clear_page
30	EXPORT_SYMBOL(_clear_page)
31_clear_page:		/* %o0=dest */
32	ba,pt		%xcc, clear_page_common
33	 clr		%o4
34
35	/* This thing is pretty important, it shows up
36	 * on the profiles via do_anonymous_page().
37	 */
38	.align		32
39	.globl		clear_user_page
40	EXPORT_SYMBOL(clear_user_page)
41clear_user_page:	/* %o0=dest, %o1=vaddr */
42	lduw		[%g6 + TI_PRE_COUNT], %o2
43	sethi		%hi(PAGE_OFFSET), %g2
44	sethi		%hi(PAGE_SIZE), %o4
45
46	ldx		[%g2 + %lo(PAGE_OFFSET)], %g2
47	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
48
49	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
50	sub		%o0, %g2, %g1		! paddr
51
52	and		%o1, %o4, %o0		! vaddr D-cache alias bit
53
54	or		%g1, %g3, %g1		! TTE data
55	sethi		%hi(TLBTEMP_BASE), %o3
56
57	add		%o2, 1, %o4
58	add		%o0, %o3, %o0		! TTE vaddr
59
60	/* Disable preemption.  */
61	mov		TLB_TAG_ACCESS, %g3
62	stw		%o4, [%g6 + TI_PRE_COUNT]
63
64	/* Load TLB entry.  */
65	rdpr		%pstate, %o4
66	wrpr		%o4, PSTATE_IE, %pstate
67	stxa		%o0, [%g3] ASI_DMMU
68	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
69	sethi		%hi(KERNBASE), %g1
70	flush		%g1
71	wrpr		%o4, 0x0, %pstate
72
73	mov		1, %o4
74
75clear_page_common:
76	VISEntryHalf
77	membar		#StoreLoad | #StoreStore | #LoadStore
78	fzero		%f0
79	sethi		%hi(PAGE_SIZE/64), %o1
80	mov		%o0, %g1		! remember vaddr for tlbflush
81	fzero		%f2
82	or		%o1, %lo(PAGE_SIZE/64), %o1
83	faddd		%f0, %f2, %f4
84	fmuld		%f0, %f2, %f6
85	faddd		%f0, %f2, %f8
86	fmuld		%f0, %f2, %f10
87
88	faddd		%f0, %f2, %f12
89	fmuld		%f0, %f2, %f14
901:	stda		%f0, [%o0 + %g0] ASI_BLK_P
91	subcc		%o1, 1, %o1
92	bne,pt		%icc, 1b
93	 add		%o0, 0x40, %o0
94	membar		#Sync
95	VISExitHalf
96
97	brz,pn		%o4, out
98	 nop
99
100	stxa		%g0, [%g1] ASI_DMMU_DEMAP
101	membar		#Sync
102	stw		%o2, [%g6 + TI_PRE_COUNT]
103
104out:	retl
105	 nop
106
107