1 #ifndef __ASM_ARM_SYSTEM_H 2 #define __ASM_ARM_SYSTEM_H 3 4 #include <common.h> 5 #include <linux/compiler.h> 6 #include <asm/barriers.h> 7 8 #ifdef CONFIG_ARM64 9 10 /* 11 * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions 12 */ 13 #define CR_M (1 << 0) /* MMU enable */ 14 #define CR_A (1 << 1) /* Alignment abort enable */ 15 #define CR_C (1 << 2) /* Dcache enable */ 16 #define CR_SA (1 << 3) /* Stack Alignment Check Enable */ 17 #define CR_I (1 << 12) /* Icache enable */ 18 #define CR_WXN (1 << 19) /* Write Permision Imply XN */ 19 #define CR_EE (1 << 25) /* Exception (Big) Endian */ 20 21 #ifndef __ASSEMBLY__ 22 23 u64 get_page_table_size(void); 24 #define PGTABLE_SIZE get_page_table_size() 25 26 /* 2MB granularity */ 27 #define MMU_SECTION_SHIFT 21 28 #define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT) 29 30 /* These constants need to be synced to the MT_ types in asm/armv8/mmu.h */ 31 enum dcache_option { 32 DCACHE_OFF = 0 << 2, 33 DCACHE_WRITETHROUGH = 3 << 2, 34 DCACHE_WRITEBACK = 4 << 2, 35 DCACHE_WRITEALLOC = 4 << 2, 36 }; 37 38 #define wfi() \ 39 ({asm volatile( \ 40 "wfi" : : : "memory"); \ 41 }) 42 43 static inline unsigned int current_el(void) 44 { 45 unsigned int el; 46 asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc"); 47 return el >> 2; 48 } 49 50 static inline unsigned int get_sctlr(void) 51 { 52 unsigned int el, val; 53 54 el = current_el(); 55 if (el == 1) 56 asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc"); 57 else if (el == 2) 58 asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc"); 59 else 60 asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc"); 61 62 return val; 63 } 64 65 static inline void set_sctlr(unsigned int val) 66 { 67 unsigned int el; 68 69 el = current_el(); 70 if (el == 1) 71 asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc"); 72 else if (el == 2) 73 asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc"); 74 else 75 asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc"); 76 77 asm volatile("isb"); 78 } 79 80 static inline unsigned long read_mpidr(void) 81 { 82 unsigned long val; 83 84 asm volatile("mrs %0, mpidr_el1" : "=r" (val)); 85 86 return val; 87 } 88 89 #define BSP_COREID 0 90 91 void __asm_flush_dcache_all(void); 92 void __asm_invalidate_dcache_all(void); 93 void __asm_flush_dcache_range(u64 start, u64 end); 94 void __asm_invalidate_tlb_all(void); 95 void __asm_invalidate_icache_all(void); 96 int __asm_flush_l3_cache(void); 97 void __asm_switch_ttbr(u64 new_ttbr); 98 99 void armv8_switch_to_el2(void); 100 void armv8_switch_to_el1(void); 101 void gic_init(void); 102 void gic_send_sgi(unsigned long sgino); 103 void wait_for_wakeup(void); 104 void protect_secure_region(void); 105 void smp_kick_all_cpus(void); 106 107 void flush_l3_cache(void); 108 109 /* 110 *Issue a hypervisor call in accordance with ARM "SMC Calling convention", 111 * DEN0028A 112 * 113 * @args: input and output arguments 114 * 115 */ 116 void hvc_call(struct pt_regs *args); 117 118 /* 119 *Issue a secure monitor call in accordance with ARM "SMC Calling convention", 120 * DEN0028A 121 * 122 * @args: input and output arguments 123 * 124 */ 125 void smc_call(struct pt_regs *args); 126 127 void __noreturn psci_system_reset(bool smc); 128 129 #endif /* __ASSEMBLY__ */ 130 131 #else /* CONFIG_ARM64 */ 132 133 #ifdef __KERNEL__ 134 135 #define CPU_ARCH_UNKNOWN 0 136 #define CPU_ARCH_ARMv3 1 137 #define CPU_ARCH_ARMv4 2 138 #define CPU_ARCH_ARMv4T 3 139 #define CPU_ARCH_ARMv5 4 140 #define CPU_ARCH_ARMv5T 5 141 #define CPU_ARCH_ARMv5TE 6 142 #define CPU_ARCH_ARMv5TEJ 7 143 #define CPU_ARCH_ARMv6 8 144 #define CPU_ARCH_ARMv7 9 145 146 /* 147 * CR1 bits (CP#15 CR1) 148 */ 149 #define CR_M (1 << 0) /* MMU enable */ 150 #define CR_A (1 << 1) /* Alignment abort enable */ 151 #define CR_C (1 << 2) /* Dcache enable */ 152 #define CR_W (1 << 3) /* Write buffer enable */ 153 #define CR_P (1 << 4) /* 32-bit exception handler */ 154 #define CR_D (1 << 5) /* 32-bit data address range */ 155 #define CR_L (1 << 6) /* Implementation defined */ 156 #define CR_B (1 << 7) /* Big endian */ 157 #define CR_S (1 << 8) /* System MMU protection */ 158 #define CR_R (1 << 9) /* ROM MMU protection */ 159 #define CR_F (1 << 10) /* Implementation defined */ 160 #define CR_Z (1 << 11) /* Implementation defined */ 161 #define CR_I (1 << 12) /* Icache enable */ 162 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 163 #define CR_RR (1 << 14) /* Round Robin cache replacement */ 164 #define CR_L4 (1 << 15) /* LDR pc can set T bit */ 165 #define CR_DT (1 << 16) 166 #define CR_IT (1 << 18) 167 #define CR_ST (1 << 19) 168 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 169 #define CR_U (1 << 22) /* Unaligned access operation */ 170 #define CR_XP (1 << 23) /* Extended page tables */ 171 #define CR_VE (1 << 24) /* Vectored interrupts */ 172 #define CR_EE (1 << 25) /* Exception (Big) Endian */ 173 #define CR_TRE (1 << 28) /* TEX remap enable */ 174 #define CR_AFE (1 << 29) /* Access flag enable */ 175 #define CR_TE (1 << 30) /* Thumb exception enable */ 176 177 #if defined(CONFIG_ARMV7_LPAE) && !defined(PGTABLE_SIZE) 178 #define PGTABLE_SIZE (4096 * 5) 179 #elif !defined(PGTABLE_SIZE) 180 #define PGTABLE_SIZE (4096 * 4) 181 #endif 182 183 /* 184 * This is used to ensure the compiler did actually allocate the register we 185 * asked it for some inline assembly sequences. Apparently we can't trust 186 * the compiler from one version to another so a bit of paranoia won't hurt. 187 * This string is meant to be concatenated with the inline asm string and 188 * will cause compilation to stop on mismatch. 189 * (for details, see gcc PR 15089) 190 */ 191 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 192 193 #ifndef __ASSEMBLY__ 194 195 /** 196 * save_boot_params() - Save boot parameters before starting reset sequence 197 * 198 * If you provide this function it will be called immediately U-Boot starts, 199 * both for SPL and U-Boot proper. 200 * 201 * All registers are unchanged from U-Boot entry. No registers need be 202 * preserved. 203 * 204 * This is not a normal C function. There is no stack. Return by branching to 205 * save_boot_params_ret. 206 * 207 * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3); 208 */ 209 210 /** 211 * save_boot_params_ret() - Return from save_boot_params() 212 * 213 * If you provide save_boot_params(), then you should jump back to this 214 * function when done. Try to preserve all registers. 215 * 216 * If your implementation of save_boot_params() is in C then it is acceptable 217 * to simply call save_boot_params_ret() at the end of your function. Since 218 * there is no link register set up, you cannot just exit the function. U-Boot 219 * will return to the (initialised) value of lr, and likely crash/hang. 220 * 221 * If your implementation of save_boot_params() is in assembler then you 222 * should use 'b' or 'bx' to return to save_boot_params_ret. 223 */ 224 void save_boot_params_ret(void); 225 226 #ifdef CONFIG_ARMV7_LPAE 227 void switch_to_hypervisor_ret(void); 228 #endif 229 230 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 231 232 #ifdef __ARM_ARCH_7A__ 233 #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") 234 #else 235 #define wfi() 236 #endif 237 238 static inline unsigned long get_cpsr(void) 239 { 240 unsigned long cpsr; 241 242 asm volatile("mrs %0, cpsr" : "=r"(cpsr): ); 243 return cpsr; 244 } 245 246 static inline int is_hyp(void) 247 { 248 #ifdef CONFIG_ARMV7_LPAE 249 /* HYP mode requires LPAE ... */ 250 return ((get_cpsr() & 0x1f) == 0x1a); 251 #else 252 /* ... so without LPAE support we can optimize all hyp code away */ 253 return 0; 254 #endif 255 } 256 257 static inline unsigned int get_cr(void) 258 { 259 unsigned int val; 260 261 if (is_hyp()) 262 asm volatile("mrc p15, 4, %0, c1, c0, 0 @ get CR" : "=r" (val) 263 : 264 : "cc"); 265 else 266 asm volatile("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) 267 : 268 : "cc"); 269 return val; 270 } 271 272 static inline void set_cr(unsigned int val) 273 { 274 if (is_hyp()) 275 asm volatile("mcr p15, 4, %0, c1, c0, 0 @ set CR" : 276 : "r" (val) 277 : "cc"); 278 else 279 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : 280 : "r" (val) 281 : "cc"); 282 isb(); 283 } 284 285 static inline unsigned int get_dacr(void) 286 { 287 unsigned int val; 288 asm("mrc p15, 0, %0, c3, c0, 0 @ get DACR" : "=r" (val) : : "cc"); 289 return val; 290 } 291 292 static inline void set_dacr(unsigned int val) 293 { 294 asm volatile("mcr p15, 0, %0, c3, c0, 0 @ set DACR" 295 : : "r" (val) : "cc"); 296 isb(); 297 } 298 299 #ifdef CONFIG_ARMV7_LPAE 300 /* Long-Descriptor Translation Table Level 1/2 Bits */ 301 #define TTB_SECT_XN_MASK (1ULL << 54) 302 #define TTB_SECT_NG_MASK (1 << 11) 303 #define TTB_SECT_AF (1 << 10) 304 #define TTB_SECT_SH_MASK (3 << 8) 305 #define TTB_SECT_NS_MASK (1 << 5) 306 #define TTB_SECT_AP (1 << 6) 307 /* Note: TTB AP bits are set elsewhere */ 308 #define TTB_SECT_MAIR(x) ((x & 0x7) << 2) /* Index into MAIR */ 309 #define TTB_SECT (1 << 0) 310 #define TTB_PAGETABLE (3 << 0) 311 312 /* TTBCR flags */ 313 #define TTBCR_EAE (1 << 31) 314 #define TTBCR_T0SZ(x) ((x) << 0) 315 #define TTBCR_T1SZ(x) ((x) << 16) 316 #define TTBCR_USING_TTBR0 (TTBCR_T0SZ(0) | TTBCR_T1SZ(0)) 317 #define TTBCR_IRGN0_NC (0 << 8) 318 #define TTBCR_IRGN0_WBWA (1 << 8) 319 #define TTBCR_IRGN0_WT (2 << 8) 320 #define TTBCR_IRGN0_WBNWA (3 << 8) 321 #define TTBCR_IRGN0_MASK (3 << 8) 322 #define TTBCR_ORGN0_NC (0 << 10) 323 #define TTBCR_ORGN0_WBWA (1 << 10) 324 #define TTBCR_ORGN0_WT (2 << 10) 325 #define TTBCR_ORGN0_WBNWA (3 << 10) 326 #define TTBCR_ORGN0_MASK (3 << 10) 327 #define TTBCR_SHARED_NON (0 << 12) 328 #define TTBCR_SHARED_OUTER (2 << 12) 329 #define TTBCR_SHARED_INNER (3 << 12) 330 #define TTBCR_EPD0 (0 << 7) 331 332 /* 333 * Memory types 334 */ 335 #define MEMORY_ATTRIBUTES ((0x00 << (0 * 8)) | (0x88 << (1 * 8)) | \ 336 (0xcc << (2 * 8)) | (0xff << (3 * 8))) 337 338 /* options available for data cache on each page */ 339 enum dcache_option { 340 DCACHE_OFF = TTB_SECT | TTB_SECT_MAIR(0), 341 DCACHE_WRITETHROUGH = TTB_SECT | TTB_SECT_MAIR(1), 342 DCACHE_WRITEBACK = TTB_SECT | TTB_SECT_MAIR(2), 343 DCACHE_WRITEALLOC = TTB_SECT | TTB_SECT_MAIR(3), 344 }; 345 #elif defined(CONFIG_CPU_V7) 346 /* Short-Descriptor Translation Table Level 1 Bits */ 347 #define TTB_SECT_NS_MASK (1 << 19) 348 #define TTB_SECT_NG_MASK (1 << 17) 349 #define TTB_SECT_S_MASK (1 << 16) 350 /* Note: TTB AP bits are set elsewhere */ 351 #define TTB_SECT_AP (3 << 10) 352 #define TTB_SECT_TEX(x) ((x & 0x7) << 12) 353 #define TTB_SECT_DOMAIN(x) ((x & 0xf) << 5) 354 #define TTB_SECT_XN_MASK (1 << 4) 355 #define TTB_SECT_C_MASK (1 << 3) 356 #define TTB_SECT_B_MASK (1 << 2) 357 #define TTB_SECT (2 << 0) 358 359 /* options available for data cache on each page */ 360 enum dcache_option { 361 DCACHE_OFF = TTB_SECT_DOMAIN(0) | TTB_SECT_XN_MASK | TTB_SECT, 362 DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK, 363 DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK, 364 DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1), 365 }; 366 #else 367 #define TTB_SECT_AP (3 << 10) 368 /* options available for data cache on each page */ 369 enum dcache_option { 370 DCACHE_OFF = 0x12, 371 DCACHE_WRITETHROUGH = 0x1a, 372 DCACHE_WRITEBACK = 0x1e, 373 DCACHE_WRITEALLOC = 0x16, 374 }; 375 #endif 376 377 /* Size of an MMU section */ 378 enum { 379 #ifdef CONFIG_ARMV7_LPAE 380 MMU_SECTION_SHIFT = 21, /* 2MB */ 381 #else 382 MMU_SECTION_SHIFT = 20, /* 1MB */ 383 #endif 384 MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT, 385 }; 386 387 #ifdef CONFIG_CPU_V7 388 /* TTBR0 bits */ 389 #define TTBR0_BASE_ADDR_MASK 0xFFFFC000 390 #define TTBR0_RGN_NC (0 << 3) 391 #define TTBR0_RGN_WBWA (1 << 3) 392 #define TTBR0_RGN_WT (2 << 3) 393 #define TTBR0_RGN_WB (3 << 3) 394 /* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */ 395 #define TTBR0_IRGN_NC (0 << 0 | 0 << 6) 396 #define TTBR0_IRGN_WBWA (0 << 0 | 1 << 6) 397 #define TTBR0_IRGN_WT (1 << 0 | 0 << 6) 398 #define TTBR0_IRGN_WB (1 << 0 | 1 << 6) 399 #endif 400 401 /** 402 * Register an update to the page tables, and flush the TLB 403 * 404 * \param start start address of update in page table 405 * \param stop stop address of update in page table 406 */ 407 void mmu_page_table_flush(unsigned long start, unsigned long stop); 408 409 #endif /* __ASSEMBLY__ */ 410 411 #define arch_align_stack(x) (x) 412 413 #endif /* __KERNEL__ */ 414 415 #endif /* CONFIG_ARM64 */ 416 417 #ifndef __ASSEMBLY__ 418 /** 419 * Change the cache settings for a region. 420 * 421 * \param start start address of memory region to change 422 * \param size size of memory region to change 423 * \param option dcache option to select 424 */ 425 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 426 enum dcache_option option); 427 428 #ifdef CONFIG_SYS_NONCACHED_MEMORY 429 void noncached_init(void); 430 phys_addr_t noncached_alloc(size_t size, size_t align); 431 #endif /* CONFIG_SYS_NONCACHED_MEMORY */ 432 433 #endif /* __ASSEMBLY__ */ 434 435 #endif 436