1 /* 2 * (C) Copyright 2002 3 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <asm/system.h> 10 #include <asm/cache.h> 11 #include <linux/compiler.h> 12 13 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) 14 15 DECLARE_GLOBAL_DATA_PTR; 16 17 __weak void arm_init_before_mmu(void) 18 { 19 } 20 21 __weak void arm_init_domains(void) 22 { 23 } 24 25 static void cp_delay (void) 26 { 27 volatile int i; 28 29 /* copro seems to need some delay between reading and writing */ 30 for (i = 0; i < 100; i++) 31 nop(); 32 asm volatile("" : : : "memory"); 33 } 34 35 void set_section_dcache(int section, enum dcache_option option) 36 { 37 #ifdef CONFIG_ARMV7_LPAE 38 u64 *page_table = (u64 *)gd->arch.tlb_addr; 39 /* Need to set the access flag to not fault */ 40 u64 value = TTB_SECT_AP | TTB_SECT_AF; 41 #else 42 u32 *page_table = (u32 *)gd->arch.tlb_addr; 43 u32 value = TTB_SECT_AP; 44 #endif 45 46 /* Add the page offset */ 47 value |= ((u32)section << MMU_SECTION_SHIFT); 48 49 /* Add caching bits */ 50 value |= option; 51 52 /* Set PTE */ 53 page_table[section] = value; 54 } 55 56 __weak void mmu_page_table_flush(unsigned long start, unsigned long stop) 57 { 58 debug("%s: Warning: not implemented\n", __func__); 59 } 60 61 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 62 enum dcache_option option) 63 { 64 u32 *page_table = (u32 *)gd->arch.tlb_addr; 65 unsigned long upto, end; 66 67 end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; 68 start = start >> MMU_SECTION_SHIFT; 69 debug("%s: start=%pa, size=%zu, option=%d\n", __func__, &start, size, 70 option); 71 for (upto = start; upto < end; upto++) 72 set_section_dcache(upto, option); 73 mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]); 74 } 75 76 __weak void dram_bank_mmu_setup(int bank) 77 { 78 bd_t *bd = gd->bd; 79 int i; 80 81 debug("%s: bank: %d\n", __func__, bank); 82 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT; 83 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) + 84 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT); 85 i++) { 86 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 87 set_section_dcache(i, DCACHE_WRITETHROUGH); 88 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 89 set_section_dcache(i, DCACHE_WRITEALLOC); 90 #else 91 set_section_dcache(i, DCACHE_WRITEBACK); 92 #endif 93 } 94 } 95 96 /* to activate the MMU we need to set up virtual memory: use 1M areas */ 97 static inline void mmu_setup(void) 98 { 99 int i; 100 u32 reg; 101 102 arm_init_before_mmu(); 103 /* Set up an identity-mapping for all 4GB, rw for everyone */ 104 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++) 105 set_section_dcache(i, DCACHE_OFF); 106 107 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 108 dram_bank_mmu_setup(i); 109 } 110 111 #ifdef CONFIG_ARMV7_LPAE 112 /* Set up 4 PTE entries pointing to our 4 1GB page tables */ 113 for (i = 0; i < 4; i++) { 114 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4)); 115 u64 tpt = gd->arch.tlb_addr + (4096 * i); 116 page_table[i] = tpt | TTB_PAGETABLE; 117 } 118 119 reg = TTBCR_EAE; 120 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 121 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT; 122 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 123 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA; 124 #else 125 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA; 126 #endif 127 128 if (is_hyp()) { 129 /* Set HCTR to enable LPAE */ 130 asm volatile("mcr p15, 4, %0, c2, c0, 2" 131 : : "r" (reg) : "memory"); 132 /* Set HTTBR0 */ 133 asm volatile("mcrr p15, 4, %0, %1, c2" 134 : 135 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) 136 : "memory"); 137 /* Set HMAIR */ 138 asm volatile("mcr p15, 4, %0, c10, c2, 0" 139 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 140 } else { 141 /* Set TTBCR to enable LPAE */ 142 asm volatile("mcr p15, 0, %0, c2, c0, 2" 143 : : "r" (reg) : "memory"); 144 /* Set 64-bit TTBR0 */ 145 asm volatile("mcrr p15, 0, %0, %1, c2" 146 : 147 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) 148 : "memory"); 149 /* Set MAIR */ 150 asm volatile("mcr p15, 0, %0, c10, c2, 0" 151 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 152 } 153 #elif defined(CONFIG_CPU_V7) 154 /* Set TTBR0 */ 155 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK; 156 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 157 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT; 158 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 159 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA; 160 #else 161 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB; 162 #endif 163 asm volatile("mcr p15, 0, %0, c2, c0, 0" 164 : : "r" (reg) : "memory"); 165 #else 166 /* Copy the page table address to cp15 */ 167 asm volatile("mcr p15, 0, %0, c2, c0, 0" 168 : : "r" (gd->arch.tlb_addr) : "memory"); 169 #endif 170 /* Set the access control to all-supervisor */ 171 asm volatile("mcr p15, 0, %0, c3, c0, 0" 172 : : "r" (~0)); 173 174 arm_init_domains(); 175 176 /* and enable the mmu */ 177 reg = get_cr(); /* get control reg. */ 178 cp_delay(); 179 set_cr(reg | CR_M); 180 } 181 182 static int mmu_enabled(void) 183 { 184 return get_cr() & CR_M; 185 } 186 187 /* cache_bit must be either CR_I or CR_C */ 188 static void cache_enable(uint32_t cache_bit) 189 { 190 uint32_t reg; 191 192 /* The data cache is not active unless the mmu is enabled too */ 193 if ((cache_bit == CR_C) && !mmu_enabled()) 194 mmu_setup(); 195 reg = get_cr(); /* get control reg. */ 196 cp_delay(); 197 set_cr(reg | cache_bit); 198 } 199 200 /* cache_bit must be either CR_I or CR_C */ 201 static void cache_disable(uint32_t cache_bit) 202 { 203 uint32_t reg; 204 205 reg = get_cr(); 206 cp_delay(); 207 208 if (cache_bit == CR_C) { 209 /* if cache isn;t enabled no need to disable */ 210 if ((reg & CR_C) != CR_C) 211 return; 212 /* if disabling data cache, disable mmu too */ 213 cache_bit |= CR_M; 214 } 215 reg = get_cr(); 216 cp_delay(); 217 if (cache_bit == (CR_C | CR_M)) 218 flush_dcache_all(); 219 set_cr(reg & ~cache_bit); 220 } 221 #endif 222 223 #ifdef CONFIG_SYS_ICACHE_OFF 224 void icache_enable (void) 225 { 226 return; 227 } 228 229 void icache_disable (void) 230 { 231 return; 232 } 233 234 int icache_status (void) 235 { 236 return 0; /* always off */ 237 } 238 #else 239 void icache_enable(void) 240 { 241 cache_enable(CR_I); 242 } 243 244 void icache_disable(void) 245 { 246 cache_disable(CR_I); 247 } 248 249 int icache_status(void) 250 { 251 return (get_cr() & CR_I) != 0; 252 } 253 #endif 254 255 #ifdef CONFIG_SYS_DCACHE_OFF 256 void dcache_enable (void) 257 { 258 return; 259 } 260 261 void dcache_disable (void) 262 { 263 return; 264 } 265 266 int dcache_status (void) 267 { 268 return 0; /* always off */ 269 } 270 #else 271 void dcache_enable(void) 272 { 273 cache_enable(CR_C); 274 } 275 276 void dcache_disable(void) 277 { 278 cache_disable(CR_C); 279 } 280 281 int dcache_status(void) 282 { 283 return (get_cr() & CR_C) != 0; 284 } 285 #endif 286