xref: /openbmc/u-boot/arch/arm/include/asm/system.h (revision 1e6ad55c058200010bb0649524a2c874e7049242)
1819833afSPeter Tyser #ifndef __ASM_ARM_SYSTEM_H
2819833afSPeter Tyser #define __ASM_ARM_SYSTEM_H
3819833afSPeter Tyser 
40ae76531SDavid Feng #ifdef CONFIG_ARM64
50ae76531SDavid Feng 
60ae76531SDavid Feng /*
70ae76531SDavid Feng  * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions
80ae76531SDavid Feng  */
90ae76531SDavid Feng #define CR_M		(1 << 0)	/* MMU enable			*/
100ae76531SDavid Feng #define CR_A		(1 << 1)	/* Alignment abort enable	*/
110ae76531SDavid Feng #define CR_C		(1 << 2)	/* Dcache enable		*/
120ae76531SDavid Feng #define CR_SA		(1 << 3)	/* Stack Alignment Check Enable	*/
130ae76531SDavid Feng #define CR_I		(1 << 12)	/* Icache enable		*/
140ae76531SDavid Feng #define CR_WXN		(1 << 19)	/* Write Permision Imply XN	*/
150ae76531SDavid Feng #define CR_EE		(1 << 25)	/* Exception (Big) Endian	*/
160ae76531SDavid Feng 
170ae76531SDavid Feng #define PGTABLE_SIZE	(0x10000)
180ae76531SDavid Feng 
190ae76531SDavid Feng #ifndef __ASSEMBLY__
200ae76531SDavid Feng 
210ae76531SDavid Feng #define isb()				\
220ae76531SDavid Feng 	({asm volatile(			\
230ae76531SDavid Feng 	"isb" : : : "memory");		\
240ae76531SDavid Feng 	})
250ae76531SDavid Feng 
260ae76531SDavid Feng #define wfi()				\
270ae76531SDavid Feng 	({asm volatile(			\
280ae76531SDavid Feng 	"wfi" : : : "memory");		\
290ae76531SDavid Feng 	})
300ae76531SDavid Feng 
310ae76531SDavid Feng static inline unsigned int current_el(void)
320ae76531SDavid Feng {
330ae76531SDavid Feng 	unsigned int el;
340ae76531SDavid Feng 	asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
350ae76531SDavid Feng 	return el >> 2;
360ae76531SDavid Feng }
370ae76531SDavid Feng 
380ae76531SDavid Feng static inline unsigned int get_sctlr(void)
390ae76531SDavid Feng {
400ae76531SDavid Feng 	unsigned int el, val;
410ae76531SDavid Feng 
420ae76531SDavid Feng 	el = current_el();
430ae76531SDavid Feng 	if (el == 1)
440ae76531SDavid Feng 		asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
450ae76531SDavid Feng 	else if (el == 2)
460ae76531SDavid Feng 		asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
470ae76531SDavid Feng 	else
480ae76531SDavid Feng 		asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
490ae76531SDavid Feng 
500ae76531SDavid Feng 	return val;
510ae76531SDavid Feng }
520ae76531SDavid Feng 
530ae76531SDavid Feng static inline void set_sctlr(unsigned int val)
540ae76531SDavid Feng {
550ae76531SDavid Feng 	unsigned int el;
560ae76531SDavid Feng 
570ae76531SDavid Feng 	el = current_el();
580ae76531SDavid Feng 	if (el == 1)
590ae76531SDavid Feng 		asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
600ae76531SDavid Feng 	else if (el == 2)
610ae76531SDavid Feng 		asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
620ae76531SDavid Feng 	else
630ae76531SDavid Feng 		asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
640ae76531SDavid Feng 
650ae76531SDavid Feng 	asm volatile("isb");
660ae76531SDavid Feng }
670ae76531SDavid Feng 
680ae76531SDavid Feng void __asm_flush_dcache_all(void);
69*1e6ad55cSYork Sun void __asm_invalidate_dcache_all(void);
700ae76531SDavid Feng void __asm_flush_dcache_range(u64 start, u64 end);
710ae76531SDavid Feng void __asm_invalidate_tlb_all(void);
720ae76531SDavid Feng void __asm_invalidate_icache_all(void);
730ae76531SDavid Feng 
740ae76531SDavid Feng void armv8_switch_to_el2(void);
750ae76531SDavid Feng void armv8_switch_to_el1(void);
760ae76531SDavid Feng void gic_init(void);
770ae76531SDavid Feng void gic_send_sgi(unsigned long sgino);
780ae76531SDavid Feng void wait_for_wakeup(void);
790ae76531SDavid Feng void smp_kick_all_cpus(void);
800ae76531SDavid Feng 
810ae76531SDavid Feng #endif	/* __ASSEMBLY__ */
820ae76531SDavid Feng 
830ae76531SDavid Feng #else /* CONFIG_ARM64 */
840ae76531SDavid Feng 
85819833afSPeter Tyser #ifdef __KERNEL__
86819833afSPeter Tyser 
87819833afSPeter Tyser #define CPU_ARCH_UNKNOWN	0
88819833afSPeter Tyser #define CPU_ARCH_ARMv3		1
89819833afSPeter Tyser #define CPU_ARCH_ARMv4		2
90819833afSPeter Tyser #define CPU_ARCH_ARMv4T		3
91819833afSPeter Tyser #define CPU_ARCH_ARMv5		4
92819833afSPeter Tyser #define CPU_ARCH_ARMv5T		5
93819833afSPeter Tyser #define CPU_ARCH_ARMv5TE	6
94819833afSPeter Tyser #define CPU_ARCH_ARMv5TEJ	7
95819833afSPeter Tyser #define CPU_ARCH_ARMv6		8
96819833afSPeter Tyser #define CPU_ARCH_ARMv7		9
97819833afSPeter Tyser 
98819833afSPeter Tyser /*
99819833afSPeter Tyser  * CR1 bits (CP#15 CR1)
100819833afSPeter Tyser  */
101819833afSPeter Tyser #define CR_M	(1 << 0)	/* MMU enable				*/
102819833afSPeter Tyser #define CR_A	(1 << 1)	/* Alignment abort enable		*/
103819833afSPeter Tyser #define CR_C	(1 << 2)	/* Dcache enable			*/
104819833afSPeter Tyser #define CR_W	(1 << 3)	/* Write buffer enable			*/
105819833afSPeter Tyser #define CR_P	(1 << 4)	/* 32-bit exception handler		*/
106819833afSPeter Tyser #define CR_D	(1 << 5)	/* 32-bit data address range		*/
107819833afSPeter Tyser #define CR_L	(1 << 6)	/* Implementation defined		*/
108819833afSPeter Tyser #define CR_B	(1 << 7)	/* Big endian				*/
109819833afSPeter Tyser #define CR_S	(1 << 8)	/* System MMU protection		*/
110819833afSPeter Tyser #define CR_R	(1 << 9)	/* ROM MMU protection			*/
111819833afSPeter Tyser #define CR_F	(1 << 10)	/* Implementation defined		*/
112819833afSPeter Tyser #define CR_Z	(1 << 11)	/* Implementation defined		*/
113819833afSPeter Tyser #define CR_I	(1 << 12)	/* Icache enable			*/
114819833afSPeter Tyser #define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
115819833afSPeter Tyser #define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
116819833afSPeter Tyser #define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
117819833afSPeter Tyser #define CR_DT	(1 << 16)
118819833afSPeter Tyser #define CR_IT	(1 << 18)
119819833afSPeter Tyser #define CR_ST	(1 << 19)
120819833afSPeter Tyser #define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
121819833afSPeter Tyser #define CR_U	(1 << 22)	/* Unaligned access operation		*/
122819833afSPeter Tyser #define CR_XP	(1 << 23)	/* Extended page tables			*/
123819833afSPeter Tyser #define CR_VE	(1 << 24)	/* Vectored interrupts			*/
124819833afSPeter Tyser #define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/
125819833afSPeter Tyser #define CR_TRE	(1 << 28)	/* TEX remap enable			*/
126819833afSPeter Tyser #define CR_AFE	(1 << 29)	/* Access flag enable			*/
127819833afSPeter Tyser #define CR_TE	(1 << 30)	/* Thumb exception enable		*/
128819833afSPeter Tyser 
1290ae76531SDavid Feng #define PGTABLE_SIZE		(4096 * 4)
1300ae76531SDavid Feng 
131819833afSPeter Tyser /*
132819833afSPeter Tyser  * This is used to ensure the compiler did actually allocate the register we
133819833afSPeter Tyser  * asked it for some inline assembly sequences.  Apparently we can't trust
134819833afSPeter Tyser  * the compiler from one version to another so a bit of paranoia won't hurt.
135819833afSPeter Tyser  * This string is meant to be concatenated with the inline asm string and
136819833afSPeter Tyser  * will cause compilation to stop on mismatch.
137819833afSPeter Tyser  * (for details, see gcc PR 15089)
138819833afSPeter Tyser  */
139819833afSPeter Tyser #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
140819833afSPeter Tyser 
141819833afSPeter Tyser #ifndef __ASSEMBLY__
142819833afSPeter Tyser 
143819833afSPeter Tyser #define isb() __asm__ __volatile__ ("" : : : "memory")
144819833afSPeter Tyser 
145819833afSPeter Tyser #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
146819833afSPeter Tyser 
1472ff467c0SRob Herring #ifdef __ARM_ARCH_7A__
1482ff467c0SRob Herring #define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
1492ff467c0SRob Herring #else
1502ff467c0SRob Herring #define wfi()
1512ff467c0SRob Herring #endif
1522ff467c0SRob Herring 
153819833afSPeter Tyser static inline unsigned int get_cr(void)
154819833afSPeter Tyser {
155819833afSPeter Tyser 	unsigned int val;
156819833afSPeter Tyser 	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
157819833afSPeter Tyser 	return val;
158819833afSPeter Tyser }
159819833afSPeter Tyser 
160819833afSPeter Tyser static inline void set_cr(unsigned int val)
161819833afSPeter Tyser {
162819833afSPeter Tyser 	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR"
163819833afSPeter Tyser 	  : : "r" (val) : "cc");
164819833afSPeter Tyser 	isb();
165819833afSPeter Tyser }
166819833afSPeter Tyser 
167de63ac27SR Sricharan static inline unsigned int get_dacr(void)
168de63ac27SR Sricharan {
169de63ac27SR Sricharan 	unsigned int val;
170de63ac27SR Sricharan 	asm("mrc p15, 0, %0, c3, c0, 0	@ get DACR" : "=r" (val) : : "cc");
171de63ac27SR Sricharan 	return val;
172de63ac27SR Sricharan }
173de63ac27SR Sricharan 
174de63ac27SR Sricharan static inline void set_dacr(unsigned int val)
175de63ac27SR Sricharan {
176de63ac27SR Sricharan 	asm volatile("mcr p15, 0, %0, c3, c0, 0	@ set DACR"
177de63ac27SR Sricharan 	  : : "r" (val) : "cc");
178de63ac27SR Sricharan 	isb();
179de63ac27SR Sricharan }
180de63ac27SR Sricharan 
1810dde7f53SSimon Glass /* options available for data cache on each page */
1820dde7f53SSimon Glass enum dcache_option {
1830dde7f53SSimon Glass 	DCACHE_OFF = 0x12,
1840dde7f53SSimon Glass 	DCACHE_WRITETHROUGH = 0x1a,
1850dde7f53SSimon Glass 	DCACHE_WRITEBACK = 0x1e,
1860dde7f53SSimon Glass };
1870dde7f53SSimon Glass 
1880dde7f53SSimon Glass /* Size of an MMU section */
1890dde7f53SSimon Glass enum {
1900dde7f53SSimon Glass 	MMU_SECTION_SHIFT	= 20,
1910dde7f53SSimon Glass 	MMU_SECTION_SIZE	= 1 << MMU_SECTION_SHIFT,
1920dde7f53SSimon Glass };
1930dde7f53SSimon Glass 
1940dde7f53SSimon Glass /**
1950dde7f53SSimon Glass  * Change the cache settings for a region.
1960dde7f53SSimon Glass  *
1970dde7f53SSimon Glass  * \param start		start address of memory region to change
1980dde7f53SSimon Glass  * \param size		size of memory region to change
1990dde7f53SSimon Glass  * \param option	dcache option to select
2000dde7f53SSimon Glass  */
2010dde7f53SSimon Glass void mmu_set_region_dcache_behaviour(u32 start, int size,
2020dde7f53SSimon Glass 				     enum dcache_option option);
2030dde7f53SSimon Glass 
2040dde7f53SSimon Glass /**
2050dde7f53SSimon Glass  * Register an update to the page tables, and flush the TLB
2060dde7f53SSimon Glass  *
2070dde7f53SSimon Glass  * \param start		start address of update in page table
2080dde7f53SSimon Glass  * \param stop		stop address of update in page table
2090dde7f53SSimon Glass  */
2100dde7f53SSimon Glass void mmu_page_table_flush(unsigned long start, unsigned long stop);
2110dde7f53SSimon Glass 
212819833afSPeter Tyser #endif /* __ASSEMBLY__ */
213819833afSPeter Tyser 
214819833afSPeter Tyser #define arch_align_stack(x) (x)
215819833afSPeter Tyser 
216819833afSPeter Tyser #endif /* __KERNEL__ */
217819833afSPeter Tyser 
2180ae76531SDavid Feng #endif /* CONFIG_ARM64 */
2190ae76531SDavid Feng 
220819833afSPeter Tyser #endif
221