xref: /openbmc/u-boot/arch/arm/include/asm/system.h (revision 0ae7653128c80a4f2920cbe9b124792c2fd9d9e0)
1819833afSPeter Tyser #ifndef __ASM_ARM_SYSTEM_H
2819833afSPeter Tyser #define __ASM_ARM_SYSTEM_H
3819833afSPeter Tyser 
4*0ae76531SDavid Feng #ifdef CONFIG_ARM64
5*0ae76531SDavid Feng 
6*0ae76531SDavid Feng /*
7*0ae76531SDavid Feng  * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions
8*0ae76531SDavid Feng  */
9*0ae76531SDavid Feng #define CR_M		(1 << 0)	/* MMU enable			*/
10*0ae76531SDavid Feng #define CR_A		(1 << 1)	/* Alignment abort enable	*/
11*0ae76531SDavid Feng #define CR_C		(1 << 2)	/* Dcache enable		*/
12*0ae76531SDavid Feng #define CR_SA		(1 << 3)	/* Stack Alignment Check Enable	*/
13*0ae76531SDavid Feng #define CR_I		(1 << 12)	/* Icache enable		*/
14*0ae76531SDavid Feng #define CR_WXN		(1 << 19)	/* Write Permision Imply XN	*/
15*0ae76531SDavid Feng #define CR_EE		(1 << 25)	/* Exception (Big) Endian	*/
16*0ae76531SDavid Feng 
17*0ae76531SDavid Feng #define PGTABLE_SIZE	(0x10000)
18*0ae76531SDavid Feng 
19*0ae76531SDavid Feng #ifndef __ASSEMBLY__
20*0ae76531SDavid Feng 
21*0ae76531SDavid Feng #define isb()				\
22*0ae76531SDavid Feng 	({asm volatile(			\
23*0ae76531SDavid Feng 	"isb" : : : "memory");		\
24*0ae76531SDavid Feng 	})
25*0ae76531SDavid Feng 
26*0ae76531SDavid Feng #define wfi()				\
27*0ae76531SDavid Feng 	({asm volatile(			\
28*0ae76531SDavid Feng 	"wfi" : : : "memory");		\
29*0ae76531SDavid Feng 	})
30*0ae76531SDavid Feng 
31*0ae76531SDavid Feng static inline unsigned int current_el(void)
32*0ae76531SDavid Feng {
33*0ae76531SDavid Feng 	unsigned int el;
34*0ae76531SDavid Feng 	asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
35*0ae76531SDavid Feng 	return el >> 2;
36*0ae76531SDavid Feng }
37*0ae76531SDavid Feng 
38*0ae76531SDavid Feng static inline unsigned int get_sctlr(void)
39*0ae76531SDavid Feng {
40*0ae76531SDavid Feng 	unsigned int el, val;
41*0ae76531SDavid Feng 
42*0ae76531SDavid Feng 	el = current_el();
43*0ae76531SDavid Feng 	if (el == 1)
44*0ae76531SDavid Feng 		asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
45*0ae76531SDavid Feng 	else if (el == 2)
46*0ae76531SDavid Feng 		asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
47*0ae76531SDavid Feng 	else
48*0ae76531SDavid Feng 		asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
49*0ae76531SDavid Feng 
50*0ae76531SDavid Feng 	return val;
51*0ae76531SDavid Feng }
52*0ae76531SDavid Feng 
53*0ae76531SDavid Feng static inline void set_sctlr(unsigned int val)
54*0ae76531SDavid Feng {
55*0ae76531SDavid Feng 	unsigned int el;
56*0ae76531SDavid Feng 
57*0ae76531SDavid Feng 	el = current_el();
58*0ae76531SDavid Feng 	if (el == 1)
59*0ae76531SDavid Feng 		asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
60*0ae76531SDavid Feng 	else if (el == 2)
61*0ae76531SDavid Feng 		asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
62*0ae76531SDavid Feng 	else
63*0ae76531SDavid Feng 		asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
64*0ae76531SDavid Feng 
65*0ae76531SDavid Feng 	asm volatile("isb");
66*0ae76531SDavid Feng }
67*0ae76531SDavid Feng 
68*0ae76531SDavid Feng void __asm_flush_dcache_all(void);
69*0ae76531SDavid Feng void __asm_flush_dcache_range(u64 start, u64 end);
70*0ae76531SDavid Feng void __asm_invalidate_tlb_all(void);
71*0ae76531SDavid Feng void __asm_invalidate_icache_all(void);
72*0ae76531SDavid Feng 
73*0ae76531SDavid Feng void armv8_switch_to_el2(void);
74*0ae76531SDavid Feng void armv8_switch_to_el1(void);
75*0ae76531SDavid Feng void gic_init(void);
76*0ae76531SDavid Feng void gic_send_sgi(unsigned long sgino);
77*0ae76531SDavid Feng void wait_for_wakeup(void);
78*0ae76531SDavid Feng void smp_kick_all_cpus(void);
79*0ae76531SDavid Feng 
80*0ae76531SDavid Feng #endif	/* __ASSEMBLY__ */
81*0ae76531SDavid Feng 
82*0ae76531SDavid Feng #else /* CONFIG_ARM64 */
83*0ae76531SDavid Feng 
84819833afSPeter Tyser #ifdef __KERNEL__
85819833afSPeter Tyser 
86819833afSPeter Tyser #define CPU_ARCH_UNKNOWN	0
87819833afSPeter Tyser #define CPU_ARCH_ARMv3		1
88819833afSPeter Tyser #define CPU_ARCH_ARMv4		2
89819833afSPeter Tyser #define CPU_ARCH_ARMv4T		3
90819833afSPeter Tyser #define CPU_ARCH_ARMv5		4
91819833afSPeter Tyser #define CPU_ARCH_ARMv5T		5
92819833afSPeter Tyser #define CPU_ARCH_ARMv5TE	6
93819833afSPeter Tyser #define CPU_ARCH_ARMv5TEJ	7
94819833afSPeter Tyser #define CPU_ARCH_ARMv6		8
95819833afSPeter Tyser #define CPU_ARCH_ARMv7		9
96819833afSPeter Tyser 
97819833afSPeter Tyser /*
98819833afSPeter Tyser  * CR1 bits (CP#15 CR1)
99819833afSPeter Tyser  */
100819833afSPeter Tyser #define CR_M	(1 << 0)	/* MMU enable				*/
101819833afSPeter Tyser #define CR_A	(1 << 1)	/* Alignment abort enable		*/
102819833afSPeter Tyser #define CR_C	(1 << 2)	/* Dcache enable			*/
103819833afSPeter Tyser #define CR_W	(1 << 3)	/* Write buffer enable			*/
104819833afSPeter Tyser #define CR_P	(1 << 4)	/* 32-bit exception handler		*/
105819833afSPeter Tyser #define CR_D	(1 << 5)	/* 32-bit data address range		*/
106819833afSPeter Tyser #define CR_L	(1 << 6)	/* Implementation defined		*/
107819833afSPeter Tyser #define CR_B	(1 << 7)	/* Big endian				*/
108819833afSPeter Tyser #define CR_S	(1 << 8)	/* System MMU protection		*/
109819833afSPeter Tyser #define CR_R	(1 << 9)	/* ROM MMU protection			*/
110819833afSPeter Tyser #define CR_F	(1 << 10)	/* Implementation defined		*/
111819833afSPeter Tyser #define CR_Z	(1 << 11)	/* Implementation defined		*/
112819833afSPeter Tyser #define CR_I	(1 << 12)	/* Icache enable			*/
113819833afSPeter Tyser #define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
114819833afSPeter Tyser #define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
115819833afSPeter Tyser #define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
116819833afSPeter Tyser #define CR_DT	(1 << 16)
117819833afSPeter Tyser #define CR_IT	(1 << 18)
118819833afSPeter Tyser #define CR_ST	(1 << 19)
119819833afSPeter Tyser #define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
120819833afSPeter Tyser #define CR_U	(1 << 22)	/* Unaligned access operation		*/
121819833afSPeter Tyser #define CR_XP	(1 << 23)	/* Extended page tables			*/
122819833afSPeter Tyser #define CR_VE	(1 << 24)	/* Vectored interrupts			*/
123819833afSPeter Tyser #define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/
124819833afSPeter Tyser #define CR_TRE	(1 << 28)	/* TEX remap enable			*/
125819833afSPeter Tyser #define CR_AFE	(1 << 29)	/* Access flag enable			*/
126819833afSPeter Tyser #define CR_TE	(1 << 30)	/* Thumb exception enable		*/
127819833afSPeter Tyser 
128*0ae76531SDavid Feng #define PGTABLE_SIZE		(4096 * 4)
129*0ae76531SDavid Feng 
130819833afSPeter Tyser /*
131819833afSPeter Tyser  * This is used to ensure the compiler did actually allocate the register we
132819833afSPeter Tyser  * asked it for some inline assembly sequences.  Apparently we can't trust
133819833afSPeter Tyser  * the compiler from one version to another so a bit of paranoia won't hurt.
134819833afSPeter Tyser  * This string is meant to be concatenated with the inline asm string and
135819833afSPeter Tyser  * will cause compilation to stop on mismatch.
136819833afSPeter Tyser  * (for details, see gcc PR 15089)
137819833afSPeter Tyser  */
138819833afSPeter Tyser #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
139819833afSPeter Tyser 
140819833afSPeter Tyser #ifndef __ASSEMBLY__
141819833afSPeter Tyser 
142819833afSPeter Tyser #define isb() __asm__ __volatile__ ("" : : : "memory")
143819833afSPeter Tyser 
144819833afSPeter Tyser #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
145819833afSPeter Tyser 
1462ff467c0SRob Herring #ifdef __ARM_ARCH_7A__
1472ff467c0SRob Herring #define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
1482ff467c0SRob Herring #else
1492ff467c0SRob Herring #define wfi()
1502ff467c0SRob Herring #endif
1512ff467c0SRob Herring 
152819833afSPeter Tyser static inline unsigned int get_cr(void)
153819833afSPeter Tyser {
154819833afSPeter Tyser 	unsigned int val;
155819833afSPeter Tyser 	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
156819833afSPeter Tyser 	return val;
157819833afSPeter Tyser }
158819833afSPeter Tyser 
159819833afSPeter Tyser static inline void set_cr(unsigned int val)
160819833afSPeter Tyser {
161819833afSPeter Tyser 	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR"
162819833afSPeter Tyser 	  : : "r" (val) : "cc");
163819833afSPeter Tyser 	isb();
164819833afSPeter Tyser }
165819833afSPeter Tyser 
166de63ac27SR Sricharan static inline unsigned int get_dacr(void)
167de63ac27SR Sricharan {
168de63ac27SR Sricharan 	unsigned int val;
169de63ac27SR Sricharan 	asm("mrc p15, 0, %0, c3, c0, 0	@ get DACR" : "=r" (val) : : "cc");
170de63ac27SR Sricharan 	return val;
171de63ac27SR Sricharan }
172de63ac27SR Sricharan 
173de63ac27SR Sricharan static inline void set_dacr(unsigned int val)
174de63ac27SR Sricharan {
175de63ac27SR Sricharan 	asm volatile("mcr p15, 0, %0, c3, c0, 0	@ set DACR"
176de63ac27SR Sricharan 	  : : "r" (val) : "cc");
177de63ac27SR Sricharan 	isb();
178de63ac27SR Sricharan }
179de63ac27SR Sricharan 
1800dde7f53SSimon Glass /* options available for data cache on each page */
1810dde7f53SSimon Glass enum dcache_option {
1820dde7f53SSimon Glass 	DCACHE_OFF = 0x12,
1830dde7f53SSimon Glass 	DCACHE_WRITETHROUGH = 0x1a,
1840dde7f53SSimon Glass 	DCACHE_WRITEBACK = 0x1e,
1850dde7f53SSimon Glass };
1860dde7f53SSimon Glass 
1870dde7f53SSimon Glass /* Size of an MMU section */
1880dde7f53SSimon Glass enum {
1890dde7f53SSimon Glass 	MMU_SECTION_SHIFT	= 20,
1900dde7f53SSimon Glass 	MMU_SECTION_SIZE	= 1 << MMU_SECTION_SHIFT,
1910dde7f53SSimon Glass };
1920dde7f53SSimon Glass 
1930dde7f53SSimon Glass /**
1940dde7f53SSimon Glass  * Change the cache settings for a region.
1950dde7f53SSimon Glass  *
1960dde7f53SSimon Glass  * \param start		start address of memory region to change
1970dde7f53SSimon Glass  * \param size		size of memory region to change
1980dde7f53SSimon Glass  * \param option	dcache option to select
1990dde7f53SSimon Glass  */
2000dde7f53SSimon Glass void mmu_set_region_dcache_behaviour(u32 start, int size,
2010dde7f53SSimon Glass 				     enum dcache_option option);
2020dde7f53SSimon Glass 
2030dde7f53SSimon Glass /**
2040dde7f53SSimon Glass  * Register an update to the page tables, and flush the TLB
2050dde7f53SSimon Glass  *
2060dde7f53SSimon Glass  * \param start		start address of update in page table
2070dde7f53SSimon Glass  * \param stop		stop address of update in page table
2080dde7f53SSimon Glass  */
2090dde7f53SSimon Glass void mmu_page_table_flush(unsigned long start, unsigned long stop);
2100dde7f53SSimon Glass 
211819833afSPeter Tyser #endif /* __ASSEMBLY__ */
212819833afSPeter Tyser 
213819833afSPeter Tyser #define arch_align_stack(x) (x)
214819833afSPeter Tyser 
215819833afSPeter Tyser #endif /* __KERNEL__ */
216819833afSPeter Tyser 
217*0ae76531SDavid Feng #endif /* CONFIG_ARM64 */
218*0ae76531SDavid Feng 
219819833afSPeter Tyser #endif
220