1 /*
2  * arch/xtensa/include/asm/initialize_mmu.h
3  *
4  * Initializes MMU:
5  *
6  *      For the new V3 MMU we remap the TLB from virtual == physical
7  *      to the standard Linux mapping used in earlier MMU's.
8  *
9  *      The the MMU we also support a new configuration register that
10  *      specifies how the S32C1I instruction operates with the cache
11  *      controller.
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file "COPYING" in the main directory of
15  * this archive for more details.
16  *
17  * Copyright (C) 2008 - 2012 Tensilica, Inc.
18  *
19  *   Marc Gauthier <marc@tensilica.com>
20  *   Pete Delaney <piet@tensilica.com>
21  */
22 
23 #ifndef _XTENSA_INITIALIZE_MMU_H
24 #define _XTENSA_INITIALIZE_MMU_H
25 
26 #include <asm/pgtable.h>
27 #include <asm/vectors.h>
28 
29 #define CA_BYPASS	(_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
30 #define CA_WRITEBACK	(_PAGE_CA_WB     | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
31 
32 #ifdef __ASSEMBLY__
33 
34 #define XTENSA_HWVERSION_RC_2009_0 230000
35 
36 	.macro	initialize_mmu
37 
38 #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
39 /*
40  * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
41  * For details see Documentation/xtensa/atomctl.txt
42  */
43 #if XCHAL_DCACHE_IS_COHERENT
44 	movi	a3, 0x25	/* For SMP/MX -- internal for writeback,
45 				 * RCW otherwise
46 				 */
47 #else
48 	movi	a3, 0x29	/* non-MX -- Most cores use Std Memory
49 				 * Controlers which usually can't use RCW
50 				 */
51 #endif
52 	wsr	a3, atomctl
53 #endif  /* XCHAL_HAVE_S32C1I &&
54 	 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
55 	 */
56 
57 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
58 /*
59  * Have MMU v3
60  */
61 
62 #if !XCHAL_HAVE_VECBASE
63 # error "MMU v3 requires reloc vectors"
64 #endif
65 
66 	movi	a1, 0
67 	_call0	1f
68 	_j	2f
69 
70 	.align	4
71 1:	movi	a2, 0x10000000
72 	movi	a3, 0x18000000
73 	add	a2, a2, a0
74 9:	bgeu	a2, a3, 9b	/* PC is out of the expected range */
75 
76 	/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
77 
78 	movi	a2, 0x40000006
79 	idtlb	a2
80 	iitlb	a2
81 	isync
82 
83 	/* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
84 	 * and jump to the new mapping.
85 	 */
86 
87 	srli	a3, a0, 27
88 	slli	a3, a3, 27
89 	addi	a3, a3, CA_BYPASS
90 	addi	a7, a2, -1
91 	wdtlb	a3, a7
92 	witlb	a3, a7
93 	isync
94 
95 	slli	a4, a0, 5
96 	srli	a4, a4, 5
97 	addi	a5, a2, -6
98 	add	a4, a4, a5
99 	jx	a4
100 
101 	/* Step 3: unmap everything other than current area.
102 	 *	   Start at 0x60000000, wrap around, and end with 0x20000000
103 	 */
104 2:	movi	a4, 0x20000000
105 	add	a5, a2, a4
106 3:	idtlb	a5
107 	iitlb	a5
108 	add	a5, a5, a4
109 	bne	a5, a2, 3b
110 
111 	/* Step 4: Setup MMU with the old V2 mappings. */
112 	movi	a6, 0x01000000
113 	wsr	a6, ITLBCFG
114 	wsr	a6, DTLBCFG
115 	isync
116 
117 	movi	a5, 0xd0000005
118 	movi	a4, CA_WRITEBACK
119 	wdtlb	a4, a5
120 	witlb	a4, a5
121 
122 	movi	a5, 0xd8000005
123 	movi	a4, CA_BYPASS
124 	wdtlb	a4, a5
125 	witlb	a4, a5
126 
127 	movi	a5, XCHAL_KIO_CACHED_VADDR + 6
128 	movi	a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
129 	wdtlb	a4, a5
130 	witlb	a4, a5
131 
132 	movi	a5, XCHAL_KIO_BYPASS_VADDR + 6
133 	movi	a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
134 	wdtlb	a4, a5
135 	witlb	a4, a5
136 
137 	isync
138 
139 	/* Jump to self, using MMU v2 mappings. */
140 	movi	a4, 1f
141 	jx	a4
142 
143 1:
144 	movi    a2, VECBASE_RESET_VADDR
145 	wsr	a2, vecbase
146 
147 	/* Step 5: remove temporary mapping. */
148 	idtlb	a7
149 	iitlb	a7
150 	isync
151 
152 	movi	a0, 0
153 	wsr	a0, ptevaddr
154 	rsync
155 
156 #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
157 	  XCHAL_HAVE_SPANNING_WAY */
158 
159 	.endm
160 
161 #endif /*__ASSEMBLY__*/
162 
163 #endif /* _XTENSA_INITIALIZE_MMU_H */
164