1 /* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <asm/system.h> 10 #include <asm/armv8/mmu.h> 11 12 DECLARE_GLOBAL_DATA_PTR; 13 14 #ifndef CONFIG_SYS_DCACHE_OFF 15 16 static void set_pgtable_section(u64 section, u64 memory_type) 17 { 18 u64 *page_table = (u64 *)gd->arch.tlb_addr; 19 u64 value; 20 21 value = (section << SECTION_SHIFT) | PMD_TYPE_SECT | PMD_SECT_AF; 22 value |= PMD_ATTRINDX(memory_type); 23 page_table[section] = value; 24 } 25 26 /* to activate the MMU we need to set up virtual memory */ 27 static void mmu_setup(void) 28 { 29 int i, j, el; 30 bd_t *bd = gd->bd; 31 32 /* Setup an identity-mapping for all spaces */ 33 for (i = 0; i < (PGTABLE_SIZE >> 3); i++) 34 set_pgtable_section(i, MT_DEVICE_NGNRNE); 35 36 /* Setup an identity-mapping for all RAM space */ 37 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 38 ulong start = bd->bi_dram[i].start; 39 ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size; 40 for (j = start >> SECTION_SHIFT; 41 j < end >> SECTION_SHIFT; j++) { 42 set_pgtable_section(j, MT_NORMAL); 43 } 44 } 45 46 /* load TTBR0 */ 47 el = current_el(); 48 if (el == 1) { 49 asm volatile("msr ttbr0_el1, %0" 50 : : "r" (gd->arch.tlb_addr) : "memory"); 51 asm volatile("msr tcr_el1, %0" 52 : : "r" (TCR_FLAGS | TCR_EL1_IPS_BITS) 53 : "memory"); 54 asm volatile("msr mair_el1, %0" 55 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 56 } else if (el == 2) { 57 asm volatile("msr ttbr0_el2, %0" 58 : : "r" (gd->arch.tlb_addr) : "memory"); 59 asm volatile("msr tcr_el2, %0" 60 : : "r" (TCR_FLAGS | TCR_EL2_IPS_BITS) 61 : "memory"); 62 asm volatile("msr mair_el2, %0" 63 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 64 } else { 65 asm volatile("msr ttbr0_el3, %0" 66 : : "r" (gd->arch.tlb_addr) : "memory"); 67 asm volatile("msr tcr_el3, %0" 68 : : "r" (TCR_FLAGS | TCR_EL2_IPS_BITS) 69 : "memory"); 70 asm volatile("msr mair_el3, %0" 71 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 72 } 73 74 /* enable the mmu */ 75 set_sctlr(get_sctlr() | CR_M); 76 } 77 78 /* 79 * Performs a invalidation of the entire data cache at all levels 80 */ 81 void invalidate_dcache_all(void) 82 { 83 __asm_invalidate_dcache_all(); 84 } 85 86 /* 87 * Performs a clean & invalidation of the entire data cache at all levels 88 */ 89 void flush_dcache_all(void) 90 { 91 __asm_flush_dcache_all(); 92 } 93 94 /* 95 * Invalidates range in all levels of D-cache/unified cache 96 */ 97 void invalidate_dcache_range(unsigned long start, unsigned long stop) 98 { 99 __asm_flush_dcache_range(start, stop); 100 } 101 102 /* 103 * Flush range(clean & invalidate) from all levels of D-cache/unified cache 104 */ 105 void flush_dcache_range(unsigned long start, unsigned long stop) 106 { 107 __asm_flush_dcache_range(start, stop); 108 } 109 110 void dcache_enable(void) 111 { 112 /* The data cache is not active unless the mmu is enabled */ 113 if (!(get_sctlr() & CR_M)) { 114 invalidate_dcache_all(); 115 __asm_invalidate_tlb_all(); 116 mmu_setup(); 117 } 118 119 set_sctlr(get_sctlr() | CR_C); 120 } 121 122 void dcache_disable(void) 123 { 124 uint32_t sctlr; 125 126 sctlr = get_sctlr(); 127 128 /* if cache isn't enabled no need to disable */ 129 if (!(sctlr & CR_C)) 130 return; 131 132 set_sctlr(sctlr & ~(CR_C|CR_M)); 133 134 flush_dcache_all(); 135 __asm_invalidate_tlb_all(); 136 } 137 138 int dcache_status(void) 139 { 140 return (get_sctlr() & CR_C) != 0; 141 } 142 143 #else /* CONFIG_SYS_DCACHE_OFF */ 144 145 void invalidate_dcache_all(void) 146 { 147 } 148 149 void flush_dcache_all(void) 150 { 151 } 152 153 void invalidate_dcache_range(unsigned long start, unsigned long stop) 154 { 155 } 156 157 void flush_dcache_range(unsigned long start, unsigned long stop) 158 { 159 } 160 161 void dcache_enable(void) 162 { 163 } 164 165 void dcache_disable(void) 166 { 167 } 168 169 int dcache_status(void) 170 { 171 return 0; 172 } 173 174 #endif /* CONFIG_SYS_DCACHE_OFF */ 175 176 #ifndef CONFIG_SYS_ICACHE_OFF 177 178 void icache_enable(void) 179 { 180 __asm_invalidate_icache_all(); 181 set_sctlr(get_sctlr() | CR_I); 182 } 183 184 void icache_disable(void) 185 { 186 set_sctlr(get_sctlr() & ~CR_I); 187 } 188 189 int icache_status(void) 190 { 191 return (get_sctlr() & CR_I) != 0; 192 } 193 194 void invalidate_icache_all(void) 195 { 196 __asm_invalidate_icache_all(); 197 } 198 199 #else /* CONFIG_SYS_ICACHE_OFF */ 200 201 void icache_enable(void) 202 { 203 } 204 205 void icache_disable(void) 206 { 207 } 208 209 int icache_status(void) 210 { 211 return 0; 212 } 213 214 void invalidate_icache_all(void) 215 { 216 } 217 218 #endif /* CONFIG_SYS_ICACHE_OFF */ 219 220 /* 221 * Enable dCache & iCache, whether cache is actually enabled 222 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF 223 */ 224 void enable_caches(void) 225 { 226 icache_enable(); 227 dcache_enable(); 228 } 229 230 /* 231 * Flush range from all levels of d-cache/unified-cache 232 */ 233 void flush_cache(unsigned long start, unsigned long size) 234 { 235 flush_dcache_range(start, start + size); 236 } 237