1 #ifndef __ASM_ARM_SYSTEM_H 2 #define __ASM_ARM_SYSTEM_H 3 4 #ifdef CONFIG_ARM64 5 6 /* 7 * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions 8 */ 9 #define CR_M (1 << 0) /* MMU enable */ 10 #define CR_A (1 << 1) /* Alignment abort enable */ 11 #define CR_C (1 << 2) /* Dcache enable */ 12 #define CR_SA (1 << 3) /* Stack Alignment Check Enable */ 13 #define CR_I (1 << 12) /* Icache enable */ 14 #define CR_WXN (1 << 19) /* Write Permision Imply XN */ 15 #define CR_EE (1 << 25) /* Exception (Big) Endian */ 16 17 #define PGTABLE_SIZE (0x10000) 18 /* 2MB granularity */ 19 #define MMU_SECTION_SHIFT 21 20 #define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT) 21 22 #ifndef __ASSEMBLY__ 23 24 enum dcache_option { 25 DCACHE_OFF = 0x3, 26 }; 27 28 #define isb() \ 29 ({asm volatile( \ 30 "isb" : : : "memory"); \ 31 }) 32 33 #define wfi() \ 34 ({asm volatile( \ 35 "wfi" : : : "memory"); \ 36 }) 37 38 static inline unsigned int current_el(void) 39 { 40 unsigned int el; 41 asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc"); 42 return el >> 2; 43 } 44 45 static inline unsigned int get_sctlr(void) 46 { 47 unsigned int el, val; 48 49 el = current_el(); 50 if (el == 1) 51 asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc"); 52 else if (el == 2) 53 asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc"); 54 else 55 asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc"); 56 57 return val; 58 } 59 60 static inline void set_sctlr(unsigned int val) 61 { 62 unsigned int el; 63 64 el = current_el(); 65 if (el == 1) 66 asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc"); 67 else if (el == 2) 68 asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc"); 69 else 70 asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc"); 71 72 asm volatile("isb"); 73 } 74 75 void __asm_flush_dcache_all(void); 76 void __asm_invalidate_dcache_all(void); 77 void __asm_flush_dcache_range(u64 start, u64 end); 78 void __asm_invalidate_tlb_all(void); 79 void __asm_invalidate_icache_all(void); 80 int __asm_flush_l3_cache(void); 81 82 void armv8_switch_to_el2(void); 83 void armv8_switch_to_el1(void); 84 void gic_init(void); 85 void gic_send_sgi(unsigned long sgino); 86 void wait_for_wakeup(void); 87 void protect_secure_region(void); 88 void smp_kick_all_cpus(void); 89 90 void flush_l3_cache(void); 91 92 #endif /* __ASSEMBLY__ */ 93 94 #else /* CONFIG_ARM64 */ 95 96 #ifdef __KERNEL__ 97 98 #define CPU_ARCH_UNKNOWN 0 99 #define CPU_ARCH_ARMv3 1 100 #define CPU_ARCH_ARMv4 2 101 #define CPU_ARCH_ARMv4T 3 102 #define CPU_ARCH_ARMv5 4 103 #define CPU_ARCH_ARMv5T 5 104 #define CPU_ARCH_ARMv5TE 6 105 #define CPU_ARCH_ARMv5TEJ 7 106 #define CPU_ARCH_ARMv6 8 107 #define CPU_ARCH_ARMv7 9 108 109 /* 110 * CR1 bits (CP#15 CR1) 111 */ 112 #define CR_M (1 << 0) /* MMU enable */ 113 #define CR_A (1 << 1) /* Alignment abort enable */ 114 #define CR_C (1 << 2) /* Dcache enable */ 115 #define CR_W (1 << 3) /* Write buffer enable */ 116 #define CR_P (1 << 4) /* 32-bit exception handler */ 117 #define CR_D (1 << 5) /* 32-bit data address range */ 118 #define CR_L (1 << 6) /* Implementation defined */ 119 #define CR_B (1 << 7) /* Big endian */ 120 #define CR_S (1 << 8) /* System MMU protection */ 121 #define CR_R (1 << 9) /* ROM MMU protection */ 122 #define CR_F (1 << 10) /* Implementation defined */ 123 #define CR_Z (1 << 11) /* Implementation defined */ 124 #define CR_I (1 << 12) /* Icache enable */ 125 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 126 #define CR_RR (1 << 14) /* Round Robin cache replacement */ 127 #define CR_L4 (1 << 15) /* LDR pc can set T bit */ 128 #define CR_DT (1 << 16) 129 #define CR_IT (1 << 18) 130 #define CR_ST (1 << 19) 131 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 132 #define CR_U (1 << 22) /* Unaligned access operation */ 133 #define CR_XP (1 << 23) /* Extended page tables */ 134 #define CR_VE (1 << 24) /* Vectored interrupts */ 135 #define CR_EE (1 << 25) /* Exception (Big) Endian */ 136 #define CR_TRE (1 << 28) /* TEX remap enable */ 137 #define CR_AFE (1 << 29) /* Access flag enable */ 138 #define CR_TE (1 << 30) /* Thumb exception enable */ 139 140 #define PGTABLE_SIZE (4096 * 4) 141 142 /* 143 * This is used to ensure the compiler did actually allocate the register we 144 * asked it for some inline assembly sequences. Apparently we can't trust 145 * the compiler from one version to another so a bit of paranoia won't hurt. 146 * This string is meant to be concatenated with the inline asm string and 147 * will cause compilation to stop on mismatch. 148 * (for details, see gcc PR 15089) 149 */ 150 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 151 152 #ifndef __ASSEMBLY__ 153 154 /** 155 * save_boot_params() - Save boot parameters before starting reset sequence 156 * 157 * If you provide this function it will be called immediately U-Boot starts, 158 * both for SPL and U-Boot proper. 159 * 160 * All registers are unchanged from U-Boot entry. No registers need be 161 * preserved. 162 * 163 * This is not a normal C function. There is no stack. Return by branching to 164 * save_boot_params_ret. 165 * 166 * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3); 167 */ 168 169 /** 170 * save_boot_params_ret() - Return from save_boot_params() 171 * 172 * If you provide save_boot_params(), then you should jump back to this 173 * function when done. Try to preserve all registers. 174 * 175 * If your implementation of save_boot_params() is in C then it is acceptable 176 * to simply call save_boot_params_ret() at the end of your function. Since 177 * there is no link register set up, you cannot just exit the function. U-Boot 178 * will return to the (initialised) value of lr, and likely crash/hang. 179 * 180 * If your implementation of save_boot_params() is in assembler then you 181 * should use 'b' or 'bx' to return to save_boot_params_ret. 182 */ 183 void save_boot_params_ret(void); 184 185 #define isb() __asm__ __volatile__ ("" : : : "memory") 186 187 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 188 189 #ifdef __ARM_ARCH_7A__ 190 #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") 191 #else 192 #define wfi() 193 #endif 194 195 static inline unsigned int get_cr(void) 196 { 197 unsigned int val; 198 asm volatile("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 199 return val; 200 } 201 202 static inline void set_cr(unsigned int val) 203 { 204 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 205 : : "r" (val) : "cc"); 206 isb(); 207 } 208 209 static inline unsigned int get_dacr(void) 210 { 211 unsigned int val; 212 asm("mrc p15, 0, %0, c3, c0, 0 @ get DACR" : "=r" (val) : : "cc"); 213 return val; 214 } 215 216 static inline void set_dacr(unsigned int val) 217 { 218 asm volatile("mcr p15, 0, %0, c3, c0, 0 @ set DACR" 219 : : "r" (val) : "cc"); 220 isb(); 221 } 222 223 #ifdef CONFIG_ARMV7 224 /* Short-Descriptor Translation Table Level 1 Bits */ 225 #define TTB_SECT_NS_MASK (1 << 19) 226 #define TTB_SECT_NG_MASK (1 << 17) 227 #define TTB_SECT_S_MASK (1 << 16) 228 /* Note: TTB AP bits are set elsewhere */ 229 #define TTB_SECT_TEX(x) ((x & 0x7) << 12) 230 #define TTB_SECT_DOMAIN(x) ((x & 0xf) << 5) 231 #define TTB_SECT_XN_MASK (1 << 4) 232 #define TTB_SECT_C_MASK (1 << 3) 233 #define TTB_SECT_B_MASK (1 << 2) 234 #define TTB_SECT (2 << 0) 235 236 /* options available for data cache on each page */ 237 enum dcache_option { 238 DCACHE_OFF = TTB_SECT_S_MASK | TTB_SECT_DOMAIN(0) | 239 TTB_SECT_XN_MASK | TTB_SECT, 240 DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK, 241 DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK, 242 DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1), 243 }; 244 #else 245 /* options available for data cache on each page */ 246 enum dcache_option { 247 DCACHE_OFF = 0x12, 248 DCACHE_WRITETHROUGH = 0x1a, 249 DCACHE_WRITEBACK = 0x1e, 250 DCACHE_WRITEALLOC = 0x16, 251 }; 252 #endif 253 254 /* Size of an MMU section */ 255 enum { 256 MMU_SECTION_SHIFT = 20, 257 MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT, 258 }; 259 260 #ifdef CONFIG_ARMV7 261 /* TTBR0 bits */ 262 #define TTBR0_BASE_ADDR_MASK 0xFFFFC000 263 #define TTBR0_RGN_NC (0 << 3) 264 #define TTBR0_RGN_WBWA (1 << 3) 265 #define TTBR0_RGN_WT (2 << 3) 266 #define TTBR0_RGN_WB (3 << 3) 267 /* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */ 268 #define TTBR0_IRGN_NC (0 << 0 | 0 << 6) 269 #define TTBR0_IRGN_WBWA (0 << 0 | 1 << 6) 270 #define TTBR0_IRGN_WT (1 << 0 | 0 << 6) 271 #define TTBR0_IRGN_WB (1 << 0 | 1 << 6) 272 #endif 273 274 /** 275 * Register an update to the page tables, and flush the TLB 276 * 277 * \param start start address of update in page table 278 * \param stop stop address of update in page table 279 */ 280 void mmu_page_table_flush(unsigned long start, unsigned long stop); 281 282 #endif /* __ASSEMBLY__ */ 283 284 #define arch_align_stack(x) (x) 285 286 #endif /* __KERNEL__ */ 287 288 #endif /* CONFIG_ARM64 */ 289 290 #ifndef __ASSEMBLY__ 291 /** 292 * Change the cache settings for a region. 293 * 294 * \param start start address of memory region to change 295 * \param size size of memory region to change 296 * \param option dcache option to select 297 */ 298 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 299 enum dcache_option option); 300 301 #ifdef CONFIG_SYS_NONCACHED_MEMORY 302 void noncached_init(void); 303 phys_addr_t noncached_alloc(size_t size, size_t align); 304 #endif /* CONFIG_SYS_NONCACHED_MEMORY */ 305 306 #endif /* __ASSEMBLY__ */ 307 308 #endif 309