1 /* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <asm/system.h> 10 #include <asm/armv8/mmu.h> 11 12 DECLARE_GLOBAL_DATA_PTR; 13 14 #ifndef CONFIG_SYS_DCACHE_OFF 15 void set_pgtable_section(u64 *page_table, u64 index, u64 section, 16 u64 memory_type) 17 { 18 u64 value; 19 20 value = section | PMD_TYPE_SECT | PMD_SECT_AF; 21 value |= PMD_ATTRINDX(memory_type); 22 page_table[index] = value; 23 } 24 25 /* to activate the MMU we need to set up virtual memory */ 26 static void mmu_setup(void) 27 { 28 bd_t *bd = gd->bd; 29 u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j; 30 int el; 31 32 /* Setup an identity-mapping for all spaces */ 33 for (i = 0; i < (PGTABLE_SIZE >> 3); i++) { 34 set_pgtable_section(page_table, i, i << SECTION_SHIFT, 35 MT_DEVICE_NGNRNE); 36 } 37 38 /* Setup an identity-mapping for all RAM space */ 39 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 40 ulong start = bd->bi_dram[i].start; 41 ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size; 42 for (j = start >> SECTION_SHIFT; 43 j < end >> SECTION_SHIFT; j++) { 44 set_pgtable_section(page_table, j, j << SECTION_SHIFT, 45 MT_NORMAL); 46 } 47 } 48 49 /* load TTBR0 */ 50 el = current_el(); 51 if (el == 1) { 52 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 53 TCR_FLAGS | TCR_EL1_IPS_BITS, 54 MEMORY_ATTRIBUTES); 55 } else if (el == 2) { 56 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 57 TCR_FLAGS | TCR_EL2_IPS_BITS, 58 MEMORY_ATTRIBUTES); 59 } else { 60 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 61 TCR_FLAGS | TCR_EL3_IPS_BITS, 62 MEMORY_ATTRIBUTES); 63 } 64 /* enable the mmu */ 65 set_sctlr(get_sctlr() | CR_M); 66 } 67 68 /* 69 * Performs a invalidation of the entire data cache at all levels 70 */ 71 void invalidate_dcache_all(void) 72 { 73 __asm_invalidate_dcache_all(); 74 } 75 76 /* 77 * Performs a clean & invalidation of the entire data cache at all levels. 78 * This function needs to be inline to avoid using stack. 79 * __asm_flush_l3_cache return status of timeout 80 */ 81 inline void flush_dcache_all(void) 82 { 83 int ret; 84 85 __asm_flush_dcache_all(); 86 ret = __asm_flush_l3_cache(); 87 if (ret) 88 debug("flushing dcache returns 0x%x\n", ret); 89 else 90 debug("flushing dcache successfully.\n"); 91 } 92 93 /* 94 * Invalidates range in all levels of D-cache/unified cache 95 */ 96 void invalidate_dcache_range(unsigned long start, unsigned long stop) 97 { 98 __asm_flush_dcache_range(start, stop); 99 } 100 101 /* 102 * Flush range(clean & invalidate) from all levels of D-cache/unified cache 103 */ 104 void flush_dcache_range(unsigned long start, unsigned long stop) 105 { 106 __asm_flush_dcache_range(start, stop); 107 } 108 109 void dcache_enable(void) 110 { 111 /* The data cache is not active unless the mmu is enabled */ 112 if (!(get_sctlr() & CR_M)) { 113 invalidate_dcache_all(); 114 __asm_invalidate_tlb_all(); 115 mmu_setup(); 116 } 117 118 set_sctlr(get_sctlr() | CR_C); 119 } 120 121 void dcache_disable(void) 122 { 123 uint32_t sctlr; 124 125 sctlr = get_sctlr(); 126 127 /* if cache isn't enabled no need to disable */ 128 if (!(sctlr & CR_C)) 129 return; 130 131 set_sctlr(sctlr & ~(CR_C|CR_M)); 132 133 flush_dcache_all(); 134 __asm_invalidate_tlb_all(); 135 } 136 137 int dcache_status(void) 138 { 139 return (get_sctlr() & CR_C) != 0; 140 } 141 142 u64 *__weak arch_get_page_table(void) { 143 puts("No page table offset defined\n"); 144 145 return NULL; 146 } 147 148 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 149 enum dcache_option option) 150 { 151 u64 *page_table = arch_get_page_table(); 152 u64 upto, end; 153 154 if (page_table == NULL) 155 return; 156 157 end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >> 158 MMU_SECTION_SHIFT; 159 start = start >> MMU_SECTION_SHIFT; 160 for (upto = start; upto < end; upto++) { 161 page_table[upto] &= ~PMD_ATTRINDX_MASK; 162 page_table[upto] |= PMD_ATTRINDX(option); 163 } 164 asm volatile("dsb sy"); 165 __asm_invalidate_tlb_all(); 166 asm volatile("dsb sy"); 167 asm volatile("isb"); 168 start = start << MMU_SECTION_SHIFT; 169 end = end << MMU_SECTION_SHIFT; 170 flush_dcache_range(start, end); 171 asm volatile("dsb sy"); 172 } 173 #else /* CONFIG_SYS_DCACHE_OFF */ 174 175 void invalidate_dcache_all(void) 176 { 177 } 178 179 void flush_dcache_all(void) 180 { 181 } 182 183 void dcache_enable(void) 184 { 185 } 186 187 void dcache_disable(void) 188 { 189 } 190 191 int dcache_status(void) 192 { 193 return 0; 194 } 195 196 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 197 enum dcache_option option) 198 { 199 } 200 201 #endif /* CONFIG_SYS_DCACHE_OFF */ 202 203 #ifndef CONFIG_SYS_ICACHE_OFF 204 205 void icache_enable(void) 206 { 207 __asm_invalidate_icache_all(); 208 set_sctlr(get_sctlr() | CR_I); 209 } 210 211 void icache_disable(void) 212 { 213 set_sctlr(get_sctlr() & ~CR_I); 214 } 215 216 int icache_status(void) 217 { 218 return (get_sctlr() & CR_I) != 0; 219 } 220 221 void invalidate_icache_all(void) 222 { 223 __asm_invalidate_icache_all(); 224 } 225 226 #else /* CONFIG_SYS_ICACHE_OFF */ 227 228 void icache_enable(void) 229 { 230 } 231 232 void icache_disable(void) 233 { 234 } 235 236 int icache_status(void) 237 { 238 return 0; 239 } 240 241 void invalidate_icache_all(void) 242 { 243 } 244 245 #endif /* CONFIG_SYS_ICACHE_OFF */ 246 247 /* 248 * Enable dCache & iCache, whether cache is actually enabled 249 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF 250 */ 251 void __weak enable_caches(void) 252 { 253 icache_enable(); 254 dcache_enable(); 255 } 256