xref: /openbmc/u-boot/arch/arm/lib/cache-cp15.c (revision 6b44ae6b)
1 /*
2  * (C) Copyright 2002
3  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  */
7 
8 #include <common.h>
9 #include <asm/system.h>
10 #include <asm/cache.h>
11 #include <linux/compiler.h>
12 
13 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
14 
15 DECLARE_GLOBAL_DATA_PTR;
16 
17 __weak void arm_init_before_mmu(void)
18 {
19 }
20 
21 __weak void arm_init_domains(void)
22 {
23 }
24 
25 static void cp_delay (void)
26 {
27 	volatile int i;
28 
29 	/* copro seems to need some delay between reading and writing */
30 	for (i = 0; i < 100; i++)
31 		nop();
32 	asm volatile("" : : : "memory");
33 }
34 
35 void set_section_dcache(int section, enum dcache_option option)
36 {
37 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
38 	u32 value;
39 
40 	value = (section << MMU_SECTION_SHIFT) | (3 << 10);
41 	value |= option;
42 	page_table[section] = value;
43 }
44 
45 __weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
46 {
47 	debug("%s: Warning: not implemented\n", __func__);
48 }
49 
50 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
51 				     enum dcache_option option)
52 {
53 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
54 	unsigned long upto, end;
55 
56 	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
57 	start = start >> MMU_SECTION_SHIFT;
58 	debug("%s: start=%pa, size=%zu, option=%d\n", __func__, &start, size,
59 	      option);
60 	for (upto = start; upto < end; upto++)
61 		set_section_dcache(upto, option);
62 	mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]);
63 }
64 
65 __weak void dram_bank_mmu_setup(int bank)
66 {
67 	bd_t *bd = gd->bd;
68 	int	i;
69 
70 	debug("%s: bank: %d\n", __func__, bank);
71 	for (i = bd->bi_dram[bank].start >> 20;
72 	     i < (bd->bi_dram[bank].start >> 20) + (bd->bi_dram[bank].size >> 20);
73 	     i++) {
74 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
75 		set_section_dcache(i, DCACHE_WRITETHROUGH);
76 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
77 		set_section_dcache(i, DCACHE_WRITEALLOC);
78 #else
79 		set_section_dcache(i, DCACHE_WRITEBACK);
80 #endif
81 	}
82 }
83 
84 /* to activate the MMU we need to set up virtual memory: use 1M areas */
85 static inline void mmu_setup(void)
86 {
87 	int i;
88 	u32 reg;
89 
90 	arm_init_before_mmu();
91 	/* Set up an identity-mapping for all 4GB, rw for everyone */
92 	for (i = 0; i < 4096; i++)
93 		set_section_dcache(i, DCACHE_OFF);
94 
95 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
96 		dram_bank_mmu_setup(i);
97 	}
98 
99 #ifdef CONFIG_ARMV7
100 	/* Set TTBR0 */
101 	reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
102 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
103 	reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
104 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
105 	reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
106 #else
107 	reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
108 #endif
109 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
110 		     : : "r" (reg) : "memory");
111 #else
112 	/* Copy the page table address to cp15 */
113 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
114 		     : : "r" (gd->arch.tlb_addr) : "memory");
115 #endif
116 	/* Set the access control to all-supervisor */
117 	asm volatile("mcr p15, 0, %0, c3, c0, 0"
118 		     : : "r" (~0));
119 
120 	arm_init_domains();
121 
122 	/* and enable the mmu */
123 	reg = get_cr();	/* get control reg. */
124 	cp_delay();
125 	set_cr(reg | CR_M);
126 }
127 
128 static int mmu_enabled(void)
129 {
130 	return get_cr() & CR_M;
131 }
132 
133 /* cache_bit must be either CR_I or CR_C */
134 static void cache_enable(uint32_t cache_bit)
135 {
136 	uint32_t reg;
137 
138 	/* The data cache is not active unless the mmu is enabled too */
139 	if ((cache_bit == CR_C) && !mmu_enabled())
140 		mmu_setup();
141 	reg = get_cr();	/* get control reg. */
142 	cp_delay();
143 	set_cr(reg | cache_bit);
144 }
145 
146 /* cache_bit must be either CR_I or CR_C */
147 static void cache_disable(uint32_t cache_bit)
148 {
149 	uint32_t reg;
150 
151 	reg = get_cr();
152 	cp_delay();
153 
154 	if (cache_bit == CR_C) {
155 		/* if cache isn;t enabled no need to disable */
156 		if ((reg & CR_C) != CR_C)
157 			return;
158 		/* if disabling data cache, disable mmu too */
159 		cache_bit |= CR_M;
160 	}
161 	reg = get_cr();
162 	cp_delay();
163 	if (cache_bit == (CR_C | CR_M))
164 		flush_dcache_all();
165 	set_cr(reg & ~cache_bit);
166 }
167 #endif
168 
169 #ifdef CONFIG_SYS_ICACHE_OFF
170 void icache_enable (void)
171 {
172 	return;
173 }
174 
175 void icache_disable (void)
176 {
177 	return;
178 }
179 
180 int icache_status (void)
181 {
182 	return 0;					/* always off */
183 }
184 #else
185 void icache_enable(void)
186 {
187 	cache_enable(CR_I);
188 }
189 
190 void icache_disable(void)
191 {
192 	cache_disable(CR_I);
193 }
194 
195 int icache_status(void)
196 {
197 	return (get_cr() & CR_I) != 0;
198 }
199 #endif
200 
201 #ifdef CONFIG_SYS_DCACHE_OFF
202 void dcache_enable (void)
203 {
204 	return;
205 }
206 
207 void dcache_disable (void)
208 {
209 	return;
210 }
211 
212 int dcache_status (void)
213 {
214 	return 0;					/* always off */
215 }
216 #else
217 void dcache_enable(void)
218 {
219 	cache_enable(CR_C);
220 }
221 
222 void dcache_disable(void)
223 {
224 	cache_disable(CR_C);
225 }
226 
227 int dcache_status(void)
228 {
229 	return (get_cr() & CR_C) != 0;
230 }
231 #endif
232