1 /* 2 * (C) Copyright 2002 3 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <asm/system.h> 10 #include <asm/cache.h> 11 #include <linux/compiler.h> 12 13 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) 14 15 DECLARE_GLOBAL_DATA_PTR; 16 17 __weak void arm_init_before_mmu(void) 18 { 19 } 20 21 __weak void arm_init_domains(void) 22 { 23 } 24 25 static void cp_delay (void) 26 { 27 volatile int i; 28 29 /* copro seems to need some delay between reading and writing */ 30 for (i = 0; i < 100; i++) 31 nop(); 32 asm volatile("" : : : "memory"); 33 } 34 35 void set_section_dcache(int section, enum dcache_option option) 36 { 37 #ifdef CONFIG_ARMV7_LPAE 38 u64 *page_table = (u64 *)gd->arch.tlb_addr; 39 /* Need to set the access flag to not fault */ 40 u64 value = TTB_SECT_AP | TTB_SECT_AF; 41 #else 42 u32 *page_table = (u32 *)gd->arch.tlb_addr; 43 u32 value = TTB_SECT_AP; 44 #endif 45 46 /* Add the page offset */ 47 value |= ((u32)section << MMU_SECTION_SHIFT); 48 49 /* Add caching bits */ 50 value |= option; 51 52 /* Set PTE */ 53 page_table[section] = value; 54 } 55 56 __weak void mmu_page_table_flush(unsigned long start, unsigned long stop) 57 { 58 debug("%s: Warning: not implemented\n", __func__); 59 } 60 61 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 62 enum dcache_option option) 63 { 64 #ifdef CONFIG_ARMV7_LPAE 65 u64 *page_table = (u64 *)gd->arch.tlb_addr; 66 #else 67 u32 *page_table = (u32 *)gd->arch.tlb_addr; 68 #endif 69 unsigned long startpt, stoppt; 70 unsigned long upto, end; 71 72 end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; 73 start = start >> MMU_SECTION_SHIFT; 74 #ifdef CONFIG_ARMV7_LPAE 75 debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size, 76 option); 77 #else 78 debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size, 79 option); 80 #endif 81 for (upto = start; upto < end; upto++) 82 set_section_dcache(upto, option); 83 84 /* 85 * Make sure range is cache line aligned 86 * Only CPU maintains page tables, hence it is safe to always 87 * flush complete cache lines... 88 */ 89 90 startpt = (unsigned long)&page_table[start]; 91 startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1); 92 stoppt = (unsigned long)&page_table[end]; 93 stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE); 94 mmu_page_table_flush(startpt, stoppt); 95 } 96 97 __weak void dram_bank_mmu_setup(int bank) 98 { 99 bd_t *bd = gd->bd; 100 int i; 101 102 debug("%s: bank: %d\n", __func__, bank); 103 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT; 104 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) + 105 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT); 106 i++) { 107 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 108 set_section_dcache(i, DCACHE_WRITETHROUGH); 109 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 110 set_section_dcache(i, DCACHE_WRITEALLOC); 111 #else 112 set_section_dcache(i, DCACHE_WRITEBACK); 113 #endif 114 } 115 } 116 117 /* to activate the MMU we need to set up virtual memory: use 1M areas */ 118 static inline void mmu_setup(void) 119 { 120 int i; 121 u32 reg; 122 123 arm_init_before_mmu(); 124 /* Set up an identity-mapping for all 4GB, rw for everyone */ 125 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++) 126 set_section_dcache(i, DCACHE_OFF); 127 128 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 129 dram_bank_mmu_setup(i); 130 } 131 132 #if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4 133 /* Set up 4 PTE entries pointing to our 4 1GB page tables */ 134 for (i = 0; i < 4; i++) { 135 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4)); 136 u64 tpt = gd->arch.tlb_addr + (4096 * i); 137 page_table[i] = tpt | TTB_PAGETABLE; 138 } 139 140 reg = TTBCR_EAE; 141 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 142 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT; 143 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 144 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA; 145 #else 146 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA; 147 #endif 148 149 if (is_hyp()) { 150 /* Set HTCR to enable LPAE */ 151 asm volatile("mcr p15, 4, %0, c2, c0, 2" 152 : : "r" (reg) : "memory"); 153 /* Set HTTBR0 */ 154 asm volatile("mcrr p15, 4, %0, %1, c2" 155 : 156 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) 157 : "memory"); 158 /* Set HMAIR */ 159 asm volatile("mcr p15, 4, %0, c10, c2, 0" 160 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 161 } else { 162 /* Set TTBCR to enable LPAE */ 163 asm volatile("mcr p15, 0, %0, c2, c0, 2" 164 : : "r" (reg) : "memory"); 165 /* Set 64-bit TTBR0 */ 166 asm volatile("mcrr p15, 0, %0, %1, c2" 167 : 168 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) 169 : "memory"); 170 /* Set MAIR */ 171 asm volatile("mcr p15, 0, %0, c10, c2, 0" 172 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 173 } 174 #elif defined(CONFIG_CPU_V7) 175 if (is_hyp()) { 176 /* Set HTCR to disable LPAE */ 177 asm volatile("mcr p15, 4, %0, c2, c0, 2" 178 : : "r" (0) : "memory"); 179 } else { 180 /* Set TTBCR to disable LPAE */ 181 asm volatile("mcr p15, 0, %0, c2, c0, 2" 182 : : "r" (0) : "memory"); 183 } 184 /* Set TTBR0 */ 185 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK; 186 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 187 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT; 188 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 189 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA; 190 #else 191 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB; 192 #endif 193 asm volatile("mcr p15, 0, %0, c2, c0, 0" 194 : : "r" (reg) : "memory"); 195 #else 196 /* Copy the page table address to cp15 */ 197 asm volatile("mcr p15, 0, %0, c2, c0, 0" 198 : : "r" (gd->arch.tlb_addr) : "memory"); 199 #endif 200 /* Set the access control to all-supervisor */ 201 asm volatile("mcr p15, 0, %0, c3, c0, 0" 202 : : "r" (~0)); 203 204 arm_init_domains(); 205 206 /* and enable the mmu */ 207 reg = get_cr(); /* get control reg. */ 208 cp_delay(); 209 set_cr(reg | CR_M); 210 } 211 212 static int mmu_enabled(void) 213 { 214 return get_cr() & CR_M; 215 } 216 217 /* cache_bit must be either CR_I or CR_C */ 218 static void cache_enable(uint32_t cache_bit) 219 { 220 uint32_t reg; 221 222 /* The data cache is not active unless the mmu is enabled too */ 223 if ((cache_bit == CR_C) && !mmu_enabled()) 224 mmu_setup(); 225 reg = get_cr(); /* get control reg. */ 226 cp_delay(); 227 set_cr(reg | cache_bit); 228 } 229 230 /* cache_bit must be either CR_I or CR_C */ 231 static void cache_disable(uint32_t cache_bit) 232 { 233 uint32_t reg; 234 235 reg = get_cr(); 236 cp_delay(); 237 238 if (cache_bit == CR_C) { 239 /* if cache isn;t enabled no need to disable */ 240 if ((reg & CR_C) != CR_C) 241 return; 242 /* if disabling data cache, disable mmu too */ 243 cache_bit |= CR_M; 244 } 245 reg = get_cr(); 246 cp_delay(); 247 if (cache_bit == (CR_C | CR_M)) 248 flush_dcache_all(); 249 set_cr(reg & ~cache_bit); 250 } 251 #endif 252 253 #ifdef CONFIG_SYS_ICACHE_OFF 254 void icache_enable (void) 255 { 256 return; 257 } 258 259 void icache_disable (void) 260 { 261 return; 262 } 263 264 int icache_status (void) 265 { 266 return 0; /* always off */ 267 } 268 #else 269 void icache_enable(void) 270 { 271 cache_enable(CR_I); 272 } 273 274 void icache_disable(void) 275 { 276 cache_disable(CR_I); 277 } 278 279 int icache_status(void) 280 { 281 return (get_cr() & CR_I) != 0; 282 } 283 #endif 284 285 #ifdef CONFIG_SYS_DCACHE_OFF 286 void dcache_enable (void) 287 { 288 return; 289 } 290 291 void dcache_disable (void) 292 { 293 return; 294 } 295 296 int dcache_status (void) 297 { 298 return 0; /* always off */ 299 } 300 #else 301 void dcache_enable(void) 302 { 303 cache_enable(CR_C); 304 } 305 306 void dcache_disable(void) 307 { 308 cache_disable(CR_C); 309 } 310 311 int dcache_status(void) 312 { 313 return (get_cr() & CR_C) != 0; 314 } 315 #endif 316