1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * (C) Copyright 2002 4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. 5 */ 6 7 #include <common.h> 8 #include <asm/system.h> 9 #include <asm/cache.h> 10 #include <linux/compiler.h> 11 #include <asm/armv7_mpu.h> 12 13 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) 14 15 DECLARE_GLOBAL_DATA_PTR; 16 17 #ifdef CONFIG_SYS_ARM_MMU 18 __weak void arm_init_before_mmu(void) 19 { 20 } 21 22 __weak void arm_init_domains(void) 23 { 24 } 25 26 void set_section_dcache(int section, enum dcache_option option) 27 { 28 #ifdef CONFIG_ARMV7_LPAE 29 u64 *page_table = (u64 *)gd->arch.tlb_addr; 30 /* Need to set the access flag to not fault */ 31 u64 value = TTB_SECT_AP | TTB_SECT_AF; 32 #else 33 u32 *page_table = (u32 *)gd->arch.tlb_addr; 34 u32 value = TTB_SECT_AP; 35 #endif 36 37 /* Add the page offset */ 38 value |= ((u32)section << MMU_SECTION_SHIFT); 39 40 /* Add caching bits */ 41 value |= option; 42 43 /* Set PTE */ 44 page_table[section] = value; 45 } 46 47 __weak void mmu_page_table_flush(unsigned long start, unsigned long stop) 48 { 49 debug("%s: Warning: not implemented\n", __func__); 50 } 51 52 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 53 enum dcache_option option) 54 { 55 #ifdef CONFIG_ARMV7_LPAE 56 u64 *page_table = (u64 *)gd->arch.tlb_addr; 57 #else 58 u32 *page_table = (u32 *)gd->arch.tlb_addr; 59 #endif 60 unsigned long startpt, stoppt; 61 unsigned long upto, end; 62 63 end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; 64 start = start >> MMU_SECTION_SHIFT; 65 #ifdef CONFIG_ARMV7_LPAE 66 debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size, 67 option); 68 #else 69 debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size, 70 option); 71 #endif 72 for (upto = start; upto < end; upto++) 73 set_section_dcache(upto, option); 74 75 /* 76 * Make sure range is cache line aligned 77 * Only CPU maintains page tables, hence it is safe to always 78 * flush complete cache lines... 79 */ 80 81 startpt = (unsigned long)&page_table[start]; 82 startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1); 83 stoppt = (unsigned long)&page_table[end]; 84 stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE); 85 mmu_page_table_flush(startpt, stoppt); 86 } 87 88 __weak void dram_bank_mmu_setup(int bank) 89 { 90 bd_t *bd = gd->bd; 91 int i; 92 93 debug("%s: bank: %d\n", __func__, bank); 94 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT; 95 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) + 96 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT); 97 i++) { 98 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 99 set_section_dcache(i, DCACHE_WRITETHROUGH); 100 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 101 set_section_dcache(i, DCACHE_WRITEALLOC); 102 #else 103 set_section_dcache(i, DCACHE_WRITEBACK); 104 #endif 105 } 106 } 107 108 /* to activate the MMU we need to set up virtual memory: use 1M areas */ 109 static inline void mmu_setup(void) 110 { 111 int i; 112 u32 reg; 113 114 arm_init_before_mmu(); 115 /* Set up an identity-mapping for all 4GB, rw for everyone */ 116 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++) 117 set_section_dcache(i, DCACHE_OFF); 118 119 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 120 dram_bank_mmu_setup(i); 121 } 122 123 #if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4 124 /* Set up 4 PTE entries pointing to our 4 1GB page tables */ 125 for (i = 0; i < 4; i++) { 126 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4)); 127 u64 tpt = gd->arch.tlb_addr + (4096 * i); 128 page_table[i] = tpt | TTB_PAGETABLE; 129 } 130 131 reg = TTBCR_EAE; 132 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 133 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT; 134 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 135 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA; 136 #else 137 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA; 138 #endif 139 140 if (is_hyp()) { 141 /* Set HTCR to enable LPAE */ 142 asm volatile("mcr p15, 4, %0, c2, c0, 2" 143 : : "r" (reg) : "memory"); 144 /* Set HTTBR0 */ 145 asm volatile("mcrr p15, 4, %0, %1, c2" 146 : 147 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) 148 : "memory"); 149 /* Set HMAIR */ 150 asm volatile("mcr p15, 4, %0, c10, c2, 0" 151 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 152 } else { 153 /* Set TTBCR to enable LPAE */ 154 asm volatile("mcr p15, 0, %0, c2, c0, 2" 155 : : "r" (reg) : "memory"); 156 /* Set 64-bit TTBR0 */ 157 asm volatile("mcrr p15, 0, %0, %1, c2" 158 : 159 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) 160 : "memory"); 161 /* Set MAIR */ 162 asm volatile("mcr p15, 0, %0, c10, c2, 0" 163 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 164 } 165 #elif defined(CONFIG_CPU_V7A) 166 if (is_hyp()) { 167 /* Set HTCR to disable LPAE */ 168 asm volatile("mcr p15, 4, %0, c2, c0, 2" 169 : : "r" (0) : "memory"); 170 } else { 171 /* Set TTBCR to disable LPAE */ 172 asm volatile("mcr p15, 0, %0, c2, c0, 2" 173 : : "r" (0) : "memory"); 174 } 175 /* Set TTBR0 */ 176 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK; 177 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 178 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT; 179 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 180 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA; 181 #else 182 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB; 183 #endif 184 asm volatile("mcr p15, 0, %0, c2, c0, 0" 185 : : "r" (reg) : "memory"); 186 #else 187 /* Copy the page table address to cp15 */ 188 asm volatile("mcr p15, 0, %0, c2, c0, 0" 189 : : "r" (gd->arch.tlb_addr) : "memory"); 190 #endif 191 /* Set the access control to all-supervisor */ 192 asm volatile("mcr p15, 0, %0, c3, c0, 0" 193 : : "r" (~0)); 194 195 arm_init_domains(); 196 197 /* and enable the mmu */ 198 reg = get_cr(); /* get control reg. */ 199 set_cr(reg | CR_M); 200 } 201 202 static int mmu_enabled(void) 203 { 204 return get_cr() & CR_M; 205 } 206 #endif /* CONFIG_SYS_ARM_MMU */ 207 208 /* cache_bit must be either CR_I or CR_C */ 209 static void cache_enable(uint32_t cache_bit) 210 { 211 uint32_t reg; 212 213 /* The data cache is not active unless the mmu/mpu is enabled too */ 214 #ifdef CONFIG_SYS_ARM_MMU 215 if ((cache_bit == CR_C) && !mmu_enabled()) 216 mmu_setup(); 217 #elif defined(CONFIG_SYS_ARM_MPU) 218 if ((cache_bit == CR_C) && !mpu_enabled()) { 219 printf("Consider enabling MPU before enabling caches\n"); 220 return; 221 } 222 #endif 223 reg = get_cr(); /* get control reg. */ 224 set_cr(reg | cache_bit); 225 } 226 227 /* cache_bit must be either CR_I or CR_C */ 228 static void cache_disable(uint32_t cache_bit) 229 { 230 uint32_t reg; 231 232 reg = get_cr(); 233 234 if (cache_bit == CR_C) { 235 /* if cache isn;t enabled no need to disable */ 236 if ((reg & CR_C) != CR_C) 237 return; 238 /* if disabling data cache, disable mmu too */ 239 cache_bit |= CR_M; 240 } 241 reg = get_cr(); 242 243 if (cache_bit == (CR_C | CR_M)) 244 flush_dcache_all(); 245 set_cr(reg & ~cache_bit); 246 } 247 #endif 248 249 #ifdef CONFIG_SYS_ICACHE_OFF 250 void icache_enable (void) 251 { 252 return; 253 } 254 255 void icache_disable (void) 256 { 257 return; 258 } 259 260 int icache_status (void) 261 { 262 return 0; /* always off */ 263 } 264 #else 265 void icache_enable(void) 266 { 267 cache_enable(CR_I); 268 } 269 270 void icache_disable(void) 271 { 272 cache_disable(CR_I); 273 } 274 275 int icache_status(void) 276 { 277 return (get_cr() & CR_I) != 0; 278 } 279 #endif 280 281 #ifdef CONFIG_SYS_DCACHE_OFF 282 void dcache_enable (void) 283 { 284 return; 285 } 286 287 void dcache_disable (void) 288 { 289 return; 290 } 291 292 int dcache_status (void) 293 { 294 return 0; /* always off */ 295 } 296 #else 297 void dcache_enable(void) 298 { 299 cache_enable(CR_C); 300 } 301 302 void dcache_disable(void) 303 { 304 cache_disable(CR_C); 305 } 306 307 int dcache_status(void) 308 { 309 return (get_cr() & CR_C) != 0; 310 } 311 #endif 312