xref: /openbmc/u-boot/arch/arm/include/asm/system.h (revision 06feb5d0)
1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
3 
4 #include <common.h>
5 #include <linux/compiler.h>
6 #include <asm/barriers.h>
7 
8 #ifdef CONFIG_ARM64
9 
10 /*
11  * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions
12  */
13 #define CR_M		(1 << 0)	/* MMU enable			*/
14 #define CR_A		(1 << 1)	/* Alignment abort enable	*/
15 #define CR_C		(1 << 2)	/* Dcache enable		*/
16 #define CR_SA		(1 << 3)	/* Stack Alignment Check Enable	*/
17 #define CR_I		(1 << 12)	/* Icache enable		*/
18 #define CR_WXN		(1 << 19)	/* Write Permision Imply XN	*/
19 #define CR_EE		(1 << 25)	/* Exception (Big) Endian	*/
20 
21 #define ES_TO_AARCH64		1
22 #define ES_TO_AARCH32		0
23 
24 /*
25  * SCR_EL3 bits definitions
26  */
27 #define SCR_EL3_RW_AARCH64	(1 << 10) /* Next lower level is AArch64     */
28 #define SCR_EL3_RW_AARCH32	(0 << 10) /* Lower lowers level are AArch32  */
29 #define SCR_EL3_HCE_EN		(1 << 8)  /* Hypervisor Call enable          */
30 #define SCR_EL3_SMD_DIS		(1 << 7)  /* Secure Monitor Call disable     */
31 #define SCR_EL3_RES1		(3 << 4)  /* Reserved, RES1                  */
32 #define SCR_EL3_NS_EN		(1 << 0)  /* EL0 and EL1 in Non-scure state  */
33 
34 /*
35  * SPSR_EL3/SPSR_EL2 bits definitions
36  */
37 #define SPSR_EL_END_LE		(0 << 9)  /* Exception Little-endian          */
38 #define SPSR_EL_DEBUG_MASK	(1 << 9)  /* Debug exception masked           */
39 #define SPSR_EL_ASYN_MASK	(1 << 8)  /* Asynchronous data abort masked   */
40 #define SPSR_EL_SERR_MASK	(1 << 8)  /* System Error exception masked    */
41 #define SPSR_EL_IRQ_MASK	(1 << 7)  /* IRQ exception masked             */
42 #define SPSR_EL_FIQ_MASK	(1 << 6)  /* FIQ exception masked             */
43 #define SPSR_EL_T_A32		(0 << 5)  /* AArch32 instruction set A32      */
44 #define SPSR_EL_M_AARCH64	(0 << 4)  /* Exception taken from AArch64     */
45 #define SPSR_EL_M_AARCH32	(1 << 4)  /* Exception taken from AArch32     */
46 #define SPSR_EL_M_SVC		(0x3)     /* Exception taken from SVC mode    */
47 #define SPSR_EL_M_HYP		(0xa)     /* Exception taken from HYP mode    */
48 #define SPSR_EL_M_EL1H		(5)       /* Exception taken from EL1h mode   */
49 #define SPSR_EL_M_EL2H		(9)       /* Exception taken from EL2h mode   */
50 
51 /*
52  * CPTR_EL2 bits definitions
53  */
54 #define CPTR_EL2_RES1		(3 << 12 | 0x3ff)           /* Reserved, RES1 */
55 
56 /*
57  * SCTLR_EL2 bits definitions
58  */
59 #define SCTLR_EL2_RES1		(3 << 28 | 3 << 22 | 1 << 18 | 1 << 16 |\
60 				 1 << 11 | 3 << 4)	    /* Reserved, RES1 */
61 #define SCTLR_EL2_EE_LE		(0 << 25) /* Exception Little-endian          */
62 #define SCTLR_EL2_WXN_DIS	(0 << 19) /* Write permission is not XN       */
63 #define SCTLR_EL2_ICACHE_DIS	(0 << 12) /* Instruction cache disabled       */
64 #define SCTLR_EL2_SA_DIS	(0 << 3)  /* Stack Alignment Check disabled   */
65 #define SCTLR_EL2_DCACHE_DIS	(0 << 2)  /* Data cache disabled              */
66 #define SCTLR_EL2_ALIGN_DIS	(0 << 1)  /* Alignment check disabled         */
67 #define SCTLR_EL2_MMU_DIS	(0)       /* MMU disabled                     */
68 
69 /*
70  * CNTHCTL_EL2 bits definitions
71  */
72 #define CNTHCTL_EL2_EL1PCEN_EN	(1 << 1)  /* Physical timer regs accessible   */
73 #define CNTHCTL_EL2_EL1PCTEN_EN	(1 << 0)  /* Physical counter accessible      */
74 
75 /*
76  * HCR_EL2 bits definitions
77  */
78 #define HCR_EL2_RW_AARCH64	(1 << 31) /* EL1 is AArch64                   */
79 #define HCR_EL2_RW_AARCH32	(0 << 31) /* Lower levels are AArch32         */
80 #define HCR_EL2_HCD_DIS		(1 << 29) /* Hypervisor Call disabled         */
81 
82 /*
83  * CPACR_EL1 bits definitions
84  */
85 #define CPACR_EL1_FPEN_EN	(3 << 20) /* SIMD and FP instruction enabled  */
86 
87 /*
88  * SCTLR_EL1 bits definitions
89  */
90 #define SCTLR_EL1_RES1		(3 << 28 | 3 << 22 | 1 << 20 |\
91 				 1 << 11) /* Reserved, RES1                   */
92 #define SCTLR_EL1_UCI_DIS	(0 << 26) /* Cache instruction disabled       */
93 #define SCTLR_EL1_EE_LE		(0 << 25) /* Exception Little-endian          */
94 #define SCTLR_EL1_WXN_DIS	(0 << 19) /* Write permission is not XN       */
95 #define SCTLR_EL1_NTWE_DIS	(0 << 18) /* WFE instruction disabled         */
96 #define SCTLR_EL1_NTWI_DIS	(0 << 16) /* WFI instruction disabled         */
97 #define SCTLR_EL1_UCT_DIS	(0 << 15) /* CTR_EL0 access disabled          */
98 #define SCTLR_EL1_DZE_DIS	(0 << 14) /* DC ZVA instruction disabled      */
99 #define SCTLR_EL1_ICACHE_DIS	(0 << 12) /* Instruction cache disabled       */
100 #define SCTLR_EL1_UMA_DIS	(0 << 9)  /* User Mask Access disabled        */
101 #define SCTLR_EL1_SED_EN	(0 << 8)  /* SETEND instruction enabled       */
102 #define SCTLR_EL1_ITD_EN	(0 << 7)  /* IT instruction enabled           */
103 #define SCTLR_EL1_CP15BEN_DIS	(0 << 5)  /* CP15 barrier operation disabled  */
104 #define SCTLR_EL1_SA0_DIS	(0 << 4)  /* Stack Alignment EL0 disabled     */
105 #define SCTLR_EL1_SA_DIS	(0 << 3)  /* Stack Alignment EL1 disabled     */
106 #define SCTLR_EL1_DCACHE_DIS	(0 << 2)  /* Data cache disabled              */
107 #define SCTLR_EL1_ALIGN_DIS	(0 << 1)  /* Alignment check disabled         */
108 #define SCTLR_EL1_MMU_DIS	(0)       /* MMU disabled                     */
109 
110 #ifndef __ASSEMBLY__
111 
112 u64 get_page_table_size(void);
113 #define PGTABLE_SIZE	get_page_table_size()
114 
115 /* 2MB granularity */
116 #define MMU_SECTION_SHIFT	21
117 #define MMU_SECTION_SIZE	(1 << MMU_SECTION_SHIFT)
118 
119 /* These constants need to be synced to the MT_ types in asm/armv8/mmu.h */
120 enum dcache_option {
121 	DCACHE_OFF = 0 << 2,
122 	DCACHE_WRITETHROUGH = 3 << 2,
123 	DCACHE_WRITEBACK = 4 << 2,
124 	DCACHE_WRITEALLOC = 4 << 2,
125 };
126 
127 #define wfi()				\
128 	({asm volatile(			\
129 	"wfi" : : : "memory");		\
130 	})
131 
132 static inline unsigned int current_el(void)
133 {
134 	unsigned int el;
135 	asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
136 	return el >> 2;
137 }
138 
139 static inline unsigned int get_sctlr(void)
140 {
141 	unsigned int el, val;
142 
143 	el = current_el();
144 	if (el == 1)
145 		asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
146 	else if (el == 2)
147 		asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
148 	else
149 		asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
150 
151 	return val;
152 }
153 
154 static inline void set_sctlr(unsigned int val)
155 {
156 	unsigned int el;
157 
158 	el = current_el();
159 	if (el == 1)
160 		asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
161 	else if (el == 2)
162 		asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
163 	else
164 		asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
165 
166 	asm volatile("isb");
167 }
168 
169 static inline unsigned long read_mpidr(void)
170 {
171 	unsigned long val;
172 
173 	asm volatile("mrs %0, mpidr_el1" : "=r" (val));
174 
175 	return val;
176 }
177 
178 #define BSP_COREID	0
179 
180 void __asm_flush_dcache_all(void);
181 void __asm_invalidate_dcache_all(void);
182 void __asm_flush_dcache_range(u64 start, u64 end);
183 
184 /**
185  * __asm_invalidate_dcache_range() - Invalidate a range of virtual addresses
186  *
187  * This performance an invalidate from @start to @end - 1. Both addresses
188  * should be cache-aligned, otherwise this function will align the start
189  * address and may continue past the end address.
190  *
191  * Data in the address range is evicted from the cache and is not written back
192  * to memory.
193  *
194  * @start: Start address to invalidate
195  * @end: End address to invalidate up to (exclusive)
196  */
197 void __asm_invalidate_dcache_range(u64 start, u64 end);
198 void __asm_invalidate_tlb_all(void);
199 void __asm_invalidate_icache_all(void);
200 int __asm_invalidate_l3_dcache(void);
201 int __asm_flush_l3_dcache(void);
202 int __asm_invalidate_l3_icache(void);
203 void __asm_switch_ttbr(u64 new_ttbr);
204 
205 /*
206  * Switch from EL3 to EL2 for ARMv8
207  *
208  * @args:        For loading 64-bit OS, fdt address.
209  *               For loading 32-bit OS, zero.
210  * @mach_nr:     For loading 64-bit OS, zero.
211  *               For loading 32-bit OS, machine nr
212  * @fdt_addr:    For loading 64-bit OS, zero.
213  *               For loading 32-bit OS, fdt address.
214  * @arg4:	 Input argument.
215  * @entry_point: kernel entry point
216  * @es_flag:     execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32
217  */
218 void __noreturn armv8_switch_to_el2(u64 args, u64 mach_nr, u64 fdt_addr,
219 				    u64 arg4, u64 entry_point, u64 es_flag);
220 /*
221  * Switch from EL2 to EL1 for ARMv8
222  *
223  * @args:        For loading 64-bit OS, fdt address.
224  *               For loading 32-bit OS, zero.
225  * @mach_nr:     For loading 64-bit OS, zero.
226  *               For loading 32-bit OS, machine nr
227  * @fdt_addr:    For loading 64-bit OS, zero.
228  *               For loading 32-bit OS, fdt address.
229  * @arg4:	 Input argument.
230  * @entry_point: kernel entry point
231  * @es_flag:     execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32
232  */
233 void armv8_switch_to_el1(u64 args, u64 mach_nr, u64 fdt_addr,
234 			 u64 arg4, u64 entry_point, u64 es_flag);
235 void armv8_el2_to_aarch32(u64 args, u64 mach_nr, u64 fdt_addr,
236 			  u64 arg4, u64 entry_point);
237 void gic_init(void);
238 void gic_send_sgi(unsigned long sgino);
239 void wait_for_wakeup(void);
240 void protect_secure_region(void);
241 void smp_kick_all_cpus(void);
242 
243 void flush_l3_cache(void);
244 void mmu_change_region_attr(phys_addr_t start, size_t size, u64 attrs);
245 
246 /*
247  *Issue a secure monitor call in accordance with ARM "SMC Calling convention",
248  * DEN0028A
249  *
250  * @args: input and output arguments
251  *
252  */
253 void smc_call(struct pt_regs *args);
254 
255 void __noreturn psci_system_reset(void);
256 void __noreturn psci_system_off(void);
257 
258 #ifdef CONFIG_ARMV8_PSCI
259 extern char __secure_start[];
260 extern char __secure_end[];
261 extern char __secure_stack_start[];
262 extern char __secure_stack_end[];
263 
264 void armv8_setup_psci(void);
265 void psci_setup_vectors(void);
266 void psci_arch_init(void);
267 #endif
268 
269 #endif	/* __ASSEMBLY__ */
270 
271 #else /* CONFIG_ARM64 */
272 
273 #ifdef __KERNEL__
274 
275 #define CPU_ARCH_UNKNOWN	0
276 #define CPU_ARCH_ARMv3		1
277 #define CPU_ARCH_ARMv4		2
278 #define CPU_ARCH_ARMv4T		3
279 #define CPU_ARCH_ARMv5		4
280 #define CPU_ARCH_ARMv5T		5
281 #define CPU_ARCH_ARMv5TE	6
282 #define CPU_ARCH_ARMv5TEJ	7
283 #define CPU_ARCH_ARMv6		8
284 #define CPU_ARCH_ARMv7		9
285 
286 /*
287  * CR1 bits (CP#15 CR1)
288  */
289 #define CR_M	(1 << 0)	/* MMU enable				*/
290 #define CR_A	(1 << 1)	/* Alignment abort enable		*/
291 #define CR_C	(1 << 2)	/* Dcache enable			*/
292 #define CR_W	(1 << 3)	/* Write buffer enable			*/
293 #define CR_P	(1 << 4)	/* 32-bit exception handler		*/
294 #define CR_D	(1 << 5)	/* 32-bit data address range		*/
295 #define CR_L	(1 << 6)	/* Implementation defined		*/
296 #define CR_B	(1 << 7)	/* Big endian				*/
297 #define CR_S	(1 << 8)	/* System MMU protection		*/
298 #define CR_R	(1 << 9)	/* ROM MMU protection			*/
299 #define CR_F	(1 << 10)	/* Implementation defined		*/
300 #define CR_Z	(1 << 11)	/* Implementation defined		*/
301 #define CR_I	(1 << 12)	/* Icache enable			*/
302 #define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
303 #define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
304 #define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
305 #define CR_DT	(1 << 16)
306 #define CR_IT	(1 << 18)
307 #define CR_ST	(1 << 19)
308 #define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
309 #define CR_U	(1 << 22)	/* Unaligned access operation		*/
310 #define CR_XP	(1 << 23)	/* Extended page tables			*/
311 #define CR_VE	(1 << 24)	/* Vectored interrupts			*/
312 #define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/
313 #define CR_TRE	(1 << 28)	/* TEX remap enable			*/
314 #define CR_AFE	(1 << 29)	/* Access flag enable			*/
315 #define CR_TE	(1 << 30)	/* Thumb exception enable		*/
316 
317 #if defined(CONFIG_ARMV7_LPAE) && !defined(PGTABLE_SIZE)
318 #define PGTABLE_SIZE		(4096 * 5)
319 #elif !defined(PGTABLE_SIZE)
320 #define PGTABLE_SIZE		(4096 * 4)
321 #endif
322 
323 /*
324  * This is used to ensure the compiler did actually allocate the register we
325  * asked it for some inline assembly sequences.  Apparently we can't trust
326  * the compiler from one version to another so a bit of paranoia won't hurt.
327  * This string is meant to be concatenated with the inline asm string and
328  * will cause compilation to stop on mismatch.
329  * (for details, see gcc PR 15089)
330  */
331 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
332 
333 #ifndef __ASSEMBLY__
334 
335 /**
336  * save_boot_params() - Save boot parameters before starting reset sequence
337  *
338  * If you provide this function it will be called immediately U-Boot starts,
339  * both for SPL and U-Boot proper.
340  *
341  * All registers are unchanged from U-Boot entry. No registers need be
342  * preserved.
343  *
344  * This is not a normal C function. There is no stack. Return by branching to
345  * save_boot_params_ret.
346  *
347  * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3);
348  */
349 
350 /**
351  * save_boot_params_ret() - Return from save_boot_params()
352  *
353  * If you provide save_boot_params(), then you should jump back to this
354  * function when done. Try to preserve all registers.
355  *
356  * If your implementation of save_boot_params() is in C then it is acceptable
357  * to simply call save_boot_params_ret() at the end of your function. Since
358  * there is no link register set up, you cannot just exit the function. U-Boot
359  * will return to the (initialised) value of lr, and likely crash/hang.
360  *
361  * If your implementation of save_boot_params() is in assembler then you
362  * should use 'b' or 'bx' to return to save_boot_params_ret.
363  */
364 void save_boot_params_ret(void);
365 
366 #ifdef CONFIG_ARMV7_LPAE
367 void switch_to_hypervisor_ret(void);
368 #endif
369 
370 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
371 
372 #ifdef __ARM_ARCH_7A__
373 #define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
374 #else
375 #define wfi()
376 #endif
377 
378 static inline unsigned long get_cpsr(void)
379 {
380 	unsigned long cpsr;
381 
382 	asm volatile("mrs %0, cpsr" : "=r"(cpsr): );
383 	return cpsr;
384 }
385 
386 static inline int is_hyp(void)
387 {
388 #ifdef CONFIG_ARMV7_LPAE
389 	/* HYP mode requires LPAE ... */
390 	return ((get_cpsr() & 0x1f) == 0x1a);
391 #else
392 	/* ... so without LPAE support we can optimize all hyp code away */
393 	return 0;
394 #endif
395 }
396 
397 static inline unsigned int get_cr(void)
398 {
399 	unsigned int val;
400 
401 	if (is_hyp())
402 		asm volatile("mrc p15, 4, %0, c1, c0, 0	@ get CR" : "=r" (val)
403 								  :
404 								  : "cc");
405 	else
406 		asm volatile("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val)
407 								  :
408 								  : "cc");
409 	return val;
410 }
411 
412 static inline void set_cr(unsigned int val)
413 {
414 	if (is_hyp())
415 		asm volatile("mcr p15, 4, %0, c1, c0, 0	@ set CR" :
416 								  : "r" (val)
417 								  : "cc");
418 	else
419 		asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" :
420 								  : "r" (val)
421 								  : "cc");
422 	isb();
423 }
424 
425 static inline unsigned int get_dacr(void)
426 {
427 	unsigned int val;
428 	asm("mrc p15, 0, %0, c3, c0, 0	@ get DACR" : "=r" (val) : : "cc");
429 	return val;
430 }
431 
432 static inline void set_dacr(unsigned int val)
433 {
434 	asm volatile("mcr p15, 0, %0, c3, c0, 0	@ set DACR"
435 	  : : "r" (val) : "cc");
436 	isb();
437 }
438 
439 #ifdef CONFIG_ARMV7_LPAE
440 /* Long-Descriptor Translation Table Level 1/2 Bits */
441 #define TTB_SECT_XN_MASK	(1ULL << 54)
442 #define TTB_SECT_NG_MASK	(1 << 11)
443 #define TTB_SECT_AF		(1 << 10)
444 #define TTB_SECT_SH_MASK	(3 << 8)
445 #define TTB_SECT_NS_MASK	(1 << 5)
446 #define TTB_SECT_AP		(1 << 6)
447 /* Note: TTB AP bits are set elsewhere */
448 #define TTB_SECT_MAIR(x)	((x & 0x7) << 2) /* Index into MAIR */
449 #define TTB_SECT		(1 << 0)
450 #define TTB_PAGETABLE		(3 << 0)
451 
452 /* TTBCR flags */
453 #define TTBCR_EAE		(1 << 31)
454 #define TTBCR_T0SZ(x)		((x) << 0)
455 #define TTBCR_T1SZ(x)		((x) << 16)
456 #define TTBCR_USING_TTBR0	(TTBCR_T0SZ(0) | TTBCR_T1SZ(0))
457 #define TTBCR_IRGN0_NC		(0 << 8)
458 #define TTBCR_IRGN0_WBWA	(1 << 8)
459 #define TTBCR_IRGN0_WT		(2 << 8)
460 #define TTBCR_IRGN0_WBNWA	(3 << 8)
461 #define TTBCR_IRGN0_MASK	(3 << 8)
462 #define TTBCR_ORGN0_NC		(0 << 10)
463 #define TTBCR_ORGN0_WBWA	(1 << 10)
464 #define TTBCR_ORGN0_WT		(2 << 10)
465 #define TTBCR_ORGN0_WBNWA	(3 << 10)
466 #define TTBCR_ORGN0_MASK	(3 << 10)
467 #define TTBCR_SHARED_NON	(0 << 12)
468 #define TTBCR_SHARED_OUTER	(2 << 12)
469 #define TTBCR_SHARED_INNER	(3 << 12)
470 #define TTBCR_EPD0		(0 << 7)
471 
472 /*
473  * Memory types
474  */
475 #define MEMORY_ATTRIBUTES	((0x00 << (0 * 8)) | (0x88 << (1 * 8)) | \
476 				 (0xcc << (2 * 8)) | (0xff << (3 * 8)))
477 
478 /* options available for data cache on each page */
479 enum dcache_option {
480 	DCACHE_OFF = TTB_SECT | TTB_SECT_MAIR(0) | TTB_SECT_XN_MASK,
481 	DCACHE_WRITETHROUGH = TTB_SECT | TTB_SECT_MAIR(1),
482 	DCACHE_WRITEBACK = TTB_SECT | TTB_SECT_MAIR(2),
483 	DCACHE_WRITEALLOC = TTB_SECT | TTB_SECT_MAIR(3),
484 };
485 #elif defined(CONFIG_CPU_V7)
486 /* Short-Descriptor Translation Table Level 1 Bits */
487 #define TTB_SECT_NS_MASK	(1 << 19)
488 #define TTB_SECT_NG_MASK	(1 << 17)
489 #define TTB_SECT_S_MASK		(1 << 16)
490 /* Note: TTB AP bits are set elsewhere */
491 #define TTB_SECT_AP		(3 << 10)
492 #define TTB_SECT_TEX(x)		((x & 0x7) << 12)
493 #define TTB_SECT_DOMAIN(x)	((x & 0xf) << 5)
494 #define TTB_SECT_XN_MASK	(1 << 4)
495 #define TTB_SECT_C_MASK		(1 << 3)
496 #define TTB_SECT_B_MASK		(1 << 2)
497 #define TTB_SECT			(2 << 0)
498 
499 /* options available for data cache on each page */
500 enum dcache_option {
501 	DCACHE_OFF = TTB_SECT_DOMAIN(0) | TTB_SECT_XN_MASK | TTB_SECT,
502 	DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK,
503 	DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK,
504 	DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1),
505 };
506 #else
507 #define TTB_SECT_AP		(3 << 10)
508 /* options available for data cache on each page */
509 enum dcache_option {
510 	DCACHE_OFF = 0x12,
511 	DCACHE_WRITETHROUGH = 0x1a,
512 	DCACHE_WRITEBACK = 0x1e,
513 	DCACHE_WRITEALLOC = 0x16,
514 };
515 #endif
516 
517 /* Size of an MMU section */
518 enum {
519 #ifdef CONFIG_ARMV7_LPAE
520 	MMU_SECTION_SHIFT	= 21, /* 2MB */
521 #else
522 	MMU_SECTION_SHIFT	= 20, /* 1MB */
523 #endif
524 	MMU_SECTION_SIZE	= 1 << MMU_SECTION_SHIFT,
525 };
526 
527 #ifdef CONFIG_CPU_V7
528 /* TTBR0 bits */
529 #define TTBR0_BASE_ADDR_MASK	0xFFFFC000
530 #define TTBR0_RGN_NC			(0 << 3)
531 #define TTBR0_RGN_WBWA			(1 << 3)
532 #define TTBR0_RGN_WT			(2 << 3)
533 #define TTBR0_RGN_WB			(3 << 3)
534 /* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */
535 #define TTBR0_IRGN_NC			(0 << 0 | 0 << 6)
536 #define TTBR0_IRGN_WBWA			(0 << 0 | 1 << 6)
537 #define TTBR0_IRGN_WT			(1 << 0 | 0 << 6)
538 #define TTBR0_IRGN_WB			(1 << 0 | 1 << 6)
539 #endif
540 
541 /**
542  * Register an update to the page tables, and flush the TLB
543  *
544  * \param start		start address of update in page table
545  * \param stop		stop address of update in page table
546  */
547 void mmu_page_table_flush(unsigned long start, unsigned long stop);
548 
549 #endif /* __ASSEMBLY__ */
550 
551 #define arch_align_stack(x) (x)
552 
553 #endif /* __KERNEL__ */
554 
555 #endif /* CONFIG_ARM64 */
556 
557 #ifndef __ASSEMBLY__
558 /**
559  * Change the cache settings for a region.
560  *
561  * \param start		start address of memory region to change
562  * \param size		size of memory region to change
563  * \param option	dcache option to select
564  */
565 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
566 				     enum dcache_option option);
567 
568 #ifdef CONFIG_SYS_NONCACHED_MEMORY
569 void noncached_init(void);
570 phys_addr_t noncached_alloc(size_t size, size_t align);
571 #endif /* CONFIG_SYS_NONCACHED_MEMORY */
572 
573 #endif /* __ASSEMBLY__ */
574 
575 #endif
576