xref: /openbmc/u-boot/arch/arm/lib/cache-cp15.c (revision acf1500138bb6b0496fe09d6bffdf8eac3d6ecab)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2002
4  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
5  */
6 
7 #include <common.h>
8 #include <asm/system.h>
9 #include <asm/cache.h>
10 #include <linux/compiler.h>
11 
12 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
13 
14 DECLARE_GLOBAL_DATA_PTR;
15 
16 __weak void arm_init_before_mmu(void)
17 {
18 }
19 
20 __weak void arm_init_domains(void)
21 {
22 }
23 
24 void set_section_dcache(int section, enum dcache_option option)
25 {
26 #ifdef CONFIG_ARMV7_LPAE
27 	u64 *page_table = (u64 *)gd->arch.tlb_addr;
28 	/* Need to set the access flag to not fault */
29 	u64 value = TTB_SECT_AP | TTB_SECT_AF;
30 #else
31 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
32 	u32 value = TTB_SECT_AP;
33 #endif
34 
35 	/* Add the page offset */
36 	value |= ((u32)section << MMU_SECTION_SHIFT);
37 
38 	/* Add caching bits */
39 	value |= option;
40 
41 	/* Set PTE */
42 	page_table[section] = value;
43 }
44 
45 __weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
46 {
47 	debug("%s: Warning: not implemented\n", __func__);
48 }
49 
50 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
51 				     enum dcache_option option)
52 {
53 #ifdef CONFIG_ARMV7_LPAE
54 	u64 *page_table = (u64 *)gd->arch.tlb_addr;
55 #else
56 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
57 #endif
58 	unsigned long startpt, stoppt;
59 	unsigned long upto, end;
60 
61 	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
62 	start = start >> MMU_SECTION_SHIFT;
63 #ifdef CONFIG_ARMV7_LPAE
64 	debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size,
65 	      option);
66 #else
67 	debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size,
68 	      option);
69 #endif
70 	for (upto = start; upto < end; upto++)
71 		set_section_dcache(upto, option);
72 
73 	/*
74 	 * Make sure range is cache line aligned
75 	 * Only CPU maintains page tables, hence it is safe to always
76 	 * flush complete cache lines...
77 	 */
78 
79 	startpt = (unsigned long)&page_table[start];
80 	startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
81 	stoppt = (unsigned long)&page_table[end];
82 	stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
83 	mmu_page_table_flush(startpt, stoppt);
84 }
85 
86 __weak void dram_bank_mmu_setup(int bank)
87 {
88 	bd_t *bd = gd->bd;
89 	int	i;
90 
91 	debug("%s: bank: %d\n", __func__, bank);
92 	for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
93 	     i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
94 		 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
95 	     i++) {
96 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
97 		set_section_dcache(i, DCACHE_WRITETHROUGH);
98 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
99 		set_section_dcache(i, DCACHE_WRITEALLOC);
100 #else
101 		set_section_dcache(i, DCACHE_WRITEBACK);
102 #endif
103 	}
104 }
105 
106 /* to activate the MMU we need to set up virtual memory: use 1M areas */
107 static inline void mmu_setup(void)
108 {
109 	int i;
110 	u32 reg;
111 
112 	arm_init_before_mmu();
113 	/* Set up an identity-mapping for all 4GB, rw for everyone */
114 	for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++)
115 		set_section_dcache(i, DCACHE_OFF);
116 
117 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
118 		dram_bank_mmu_setup(i);
119 	}
120 
121 #if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4
122 	/* Set up 4 PTE entries pointing to our 4 1GB page tables */
123 	for (i = 0; i < 4; i++) {
124 		u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
125 		u64 tpt = gd->arch.tlb_addr + (4096 * i);
126 		page_table[i] = tpt | TTB_PAGETABLE;
127 	}
128 
129 	reg = TTBCR_EAE;
130 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
131 	reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
132 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
133 	reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
134 #else
135 	reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
136 #endif
137 
138 	if (is_hyp()) {
139 		/* Set HTCR to enable LPAE */
140 		asm volatile("mcr p15, 4, %0, c2, c0, 2"
141 			: : "r" (reg) : "memory");
142 		/* Set HTTBR0 */
143 		asm volatile("mcrr p15, 4, %0, %1, c2"
144 			:
145 			: "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
146 			: "memory");
147 		/* Set HMAIR */
148 		asm volatile("mcr p15, 4, %0, c10, c2, 0"
149 			: : "r" (MEMORY_ATTRIBUTES) : "memory");
150 	} else {
151 		/* Set TTBCR to enable LPAE */
152 		asm volatile("mcr p15, 0, %0, c2, c0, 2"
153 			: : "r" (reg) : "memory");
154 		/* Set 64-bit TTBR0 */
155 		asm volatile("mcrr p15, 0, %0, %1, c2"
156 			:
157 			: "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
158 			: "memory");
159 		/* Set MAIR */
160 		asm volatile("mcr p15, 0, %0, c10, c2, 0"
161 			: : "r" (MEMORY_ATTRIBUTES) : "memory");
162 	}
163 #elif defined(CONFIG_CPU_V7A)
164 	if (is_hyp()) {
165 		/* Set HTCR to disable LPAE */
166 		asm volatile("mcr p15, 4, %0, c2, c0, 2"
167 			: : "r" (0) : "memory");
168 	} else {
169 		/* Set TTBCR to disable LPAE */
170 		asm volatile("mcr p15, 0, %0, c2, c0, 2"
171 			: : "r" (0) : "memory");
172 	}
173 	/* Set TTBR0 */
174 	reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
175 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
176 	reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
177 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
178 	reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
179 #else
180 	reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
181 #endif
182 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
183 		     : : "r" (reg) : "memory");
184 #else
185 	/* Copy the page table address to cp15 */
186 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
187 		     : : "r" (gd->arch.tlb_addr) : "memory");
188 #endif
189 	/* Set the access control to all-supervisor */
190 	asm volatile("mcr p15, 0, %0, c3, c0, 0"
191 		     : : "r" (~0));
192 
193 	arm_init_domains();
194 
195 	/* and enable the mmu */
196 	reg = get_cr();	/* get control reg. */
197 	set_cr(reg | CR_M);
198 }
199 
200 static int mmu_enabled(void)
201 {
202 	return get_cr() & CR_M;
203 }
204 
205 /* cache_bit must be either CR_I or CR_C */
206 static void cache_enable(uint32_t cache_bit)
207 {
208 	uint32_t reg;
209 
210 	/* The data cache is not active unless the mmu is enabled too */
211 	if ((cache_bit == CR_C) && !mmu_enabled())
212 		mmu_setup();
213 	reg = get_cr();	/* get control reg. */
214 	set_cr(reg | cache_bit);
215 }
216 
217 /* cache_bit must be either CR_I or CR_C */
218 static void cache_disable(uint32_t cache_bit)
219 {
220 	uint32_t reg;
221 
222 	reg = get_cr();
223 
224 	if (cache_bit == CR_C) {
225 		/* if cache isn;t enabled no need to disable */
226 		if ((reg & CR_C) != CR_C)
227 			return;
228 		/* if disabling data cache, disable mmu too */
229 		cache_bit |= CR_M;
230 	}
231 	reg = get_cr();
232 
233 	if (cache_bit == (CR_C | CR_M))
234 		flush_dcache_all();
235 	set_cr(reg & ~cache_bit);
236 }
237 #endif
238 
239 #ifdef CONFIG_SYS_ICACHE_OFF
240 void icache_enable (void)
241 {
242 	return;
243 }
244 
245 void icache_disable (void)
246 {
247 	return;
248 }
249 
250 int icache_status (void)
251 {
252 	return 0;					/* always off */
253 }
254 #else
255 void icache_enable(void)
256 {
257 	cache_enable(CR_I);
258 }
259 
260 void icache_disable(void)
261 {
262 	cache_disable(CR_I);
263 }
264 
265 int icache_status(void)
266 {
267 	return (get_cr() & CR_I) != 0;
268 }
269 #endif
270 
271 #ifdef CONFIG_SYS_DCACHE_OFF
272 void dcache_enable (void)
273 {
274 	return;
275 }
276 
277 void dcache_disable (void)
278 {
279 	return;
280 }
281 
282 int dcache_status (void)
283 {
284 	return 0;					/* always off */
285 }
286 #else
287 void dcache_enable(void)
288 {
289 	cache_enable(CR_C);
290 }
291 
292 void dcache_disable(void)
293 {
294 	cache_disable(CR_C);
295 }
296 
297 int dcache_status(void)
298 {
299 	return (get_cr() & CR_C) != 0;
300 }
301 #endif
302