xref: /openbmc/u-boot/arch/arm/cpu/armv7/virt-v7.c (revision 01a97a11)
1 /*
2  * (C) Copyright 2013
3  * Andre Przywara, Linaro <andre.przywara@linaro.org>
4  *
5  * Routines to transition ARMv7 processors from secure into non-secure state
6  * and from non-secure SVC into HYP mode
7  * needed to enable ARMv7 virtualization for current hypervisors
8  *
9  * SPDX-License-Identifier:	GPL-2.0+
10  */
11 
12 #include <common.h>
13 #include <asm/armv7.h>
14 #include <asm/gic.h>
15 #include <asm/io.h>
16 #include <asm/secure.h>
17 
18 static unsigned int read_id_pfr1(void)
19 {
20 	unsigned int reg;
21 
22 	asm("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg));
23 	return reg;
24 }
25 
26 static unsigned long get_gicd_base_address(void)
27 {
28 #ifdef CONFIG_ARM_GIC_BASE_ADDRESS
29 	return CONFIG_ARM_GIC_BASE_ADDRESS + GIC_DIST_OFFSET;
30 #else
31 	unsigned periphbase;
32 
33 	/* get the GIC base address from the CBAR register */
34 	asm("mrc p15, 4, %0, c15, c0, 0\n" : "=r" (periphbase));
35 
36 	/* the PERIPHBASE can be mapped above 4 GB (lower 8 bits used to
37 	 * encode this). Bail out here since we cannot access this without
38 	 * enabling paging.
39 	 */
40 	if ((periphbase & 0xff) != 0) {
41 		printf("nonsec: PERIPHBASE is above 4 GB, no access.\n");
42 		return -1;
43 	}
44 
45 	return (periphbase & CBAR_MASK) + GIC_DIST_OFFSET;
46 #endif
47 }
48 
49 /* Define a specific version of this function to enable any available
50  * hardware protections for the reserved region */
51 void __weak protect_secure_section(void) {}
52 
53 static void relocate_secure_section(void)
54 {
55 #ifdef CONFIG_ARMV7_SECURE_BASE
56 	size_t sz = __secure_end - __secure_start;
57 
58 	memcpy((void *)CONFIG_ARMV7_SECURE_BASE, __secure_start, sz);
59 	flush_dcache_range(CONFIG_ARMV7_SECURE_BASE,
60 			   CONFIG_ARMV7_SECURE_BASE + sz + 1);
61 	protect_secure_section();
62 	invalidate_icache_all();
63 #endif
64 }
65 
66 static void kick_secondary_cpus_gic(unsigned long gicdaddr)
67 {
68 	/* kick all CPUs (except this one) by writing to GICD_SGIR */
69 	writel(1U << 24, gicdaddr + GICD_SGIR);
70 }
71 
72 void __weak smp_kick_all_cpus(void)
73 {
74 	unsigned long gic_dist_addr;
75 
76 	gic_dist_addr = get_gicd_base_address();
77 	if (gic_dist_addr == -1)
78 		return;
79 
80 	kick_secondary_cpus_gic(gic_dist_addr);
81 }
82 
83 __weak void psci_board_init(void)
84 {
85 }
86 
87 int armv7_init_nonsec(void)
88 {
89 	unsigned int reg;
90 	unsigned itlinesnr, i;
91 	unsigned long gic_dist_addr;
92 
93 	/* check whether the CPU supports the security extensions */
94 	reg = read_id_pfr1();
95 	if ((reg & 0xF0) == 0) {
96 		printf("nonsec: Security extensions not implemented.\n");
97 		return -1;
98 	}
99 
100 	/* the SCR register will be set directly in the monitor mode handler,
101 	 * according to the spec one should not tinker with it in secure state
102 	 * in SVC mode. Do not try to read it once in non-secure state,
103 	 * any access to it will trap.
104 	 */
105 
106 	gic_dist_addr = get_gicd_base_address();
107 	if (gic_dist_addr == -1)
108 		return -1;
109 
110 	/* enable the GIC distributor */
111 	writel(readl(gic_dist_addr + GICD_CTLR) | 0x03,
112 	       gic_dist_addr + GICD_CTLR);
113 
114 	/* TYPER[4:0] contains an encoded number of available interrupts */
115 	itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f;
116 
117 	/* set all bits in the GIC group registers to one to allow access
118 	 * from non-secure state. The first 32 interrupts are private per
119 	 * CPU and will be set later when enabling the GIC for each core
120 	 */
121 	for (i = 1; i <= itlinesnr; i++)
122 		writel((unsigned)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i);
123 
124 	psci_board_init();
125 
126 	/*
127 	 * Relocate secure section before any cpu runs in secure ram.
128 	 * smp_kick_all_cpus may enable other cores and runs into secure
129 	 * ram, so need to relocate secure section before enabling other
130 	 * cores.
131 	 */
132 	relocate_secure_section();
133 
134 #ifndef CONFIG_ARMV7_PSCI
135 	smp_set_core_boot_addr((unsigned long)secure_ram_addr(_smp_pen), -1);
136 	smp_kick_all_cpus();
137 #endif
138 
139 	/* call the non-sec switching code on this CPU also */
140 	secure_ram_addr(_nonsec_init)();
141 	return 0;
142 }
143