1 /* 2 * (C) Copyright 2013 3 * Andre Przywara, Linaro <andre.przywara@linaro.org> 4 * 5 * Routines to transition ARMv7 processors from secure into non-secure state 6 * and from non-secure SVC into HYP mode 7 * needed to enable ARMv7 virtualization for current hypervisors 8 * 9 * SPDX-License-Identifier: GPL-2.0+ 10 */ 11 12 #include <common.h> 13 #include <asm/armv7.h> 14 #include <asm/gic.h> 15 #include <asm/io.h> 16 #include <asm/secure.h> 17 18 static unsigned int read_id_pfr1(void) 19 { 20 unsigned int reg; 21 22 asm("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg)); 23 return reg; 24 } 25 26 static unsigned long get_gicd_base_address(void) 27 { 28 #ifdef CONFIG_ARM_GIC_BASE_ADDRESS 29 return CONFIG_ARM_GIC_BASE_ADDRESS + GIC_DIST_OFFSET; 30 #else 31 unsigned periphbase; 32 33 /* get the GIC base address from the CBAR register */ 34 asm("mrc p15, 4, %0, c15, c0, 0\n" : "=r" (periphbase)); 35 36 /* the PERIPHBASE can be mapped above 4 GB (lower 8 bits used to 37 * encode this). Bail out here since we cannot access this without 38 * enabling paging. 39 */ 40 if ((periphbase & 0xff) != 0) { 41 printf("nonsec: PERIPHBASE is above 4 GB, no access.\n"); 42 return -1; 43 } 44 45 return (periphbase & CBAR_MASK) + GIC_DIST_OFFSET; 46 #endif 47 } 48 49 /* Define a specific version of this function to enable any available 50 * hardware protections for the reserved region */ 51 void __weak protect_secure_section(void) {} 52 53 static void relocate_secure_section(void) 54 { 55 #ifdef CONFIG_ARMV7_SECURE_BASE 56 size_t sz = __secure_end - __secure_start; 57 unsigned long szflush = ALIGN(sz + 1, CONFIG_SYS_CACHELINE_SIZE); 58 59 memcpy((void *)CONFIG_ARMV7_SECURE_BASE, __secure_start, sz); 60 61 flush_dcache_range(CONFIG_ARMV7_SECURE_BASE, 62 CONFIG_ARMV7_SECURE_BASE + szflush); 63 protect_secure_section(); 64 invalidate_icache_all(); 65 #endif 66 } 67 68 static void kick_secondary_cpus_gic(unsigned long gicdaddr) 69 { 70 /* kick all CPUs (except this one) by writing to GICD_SGIR */ 71 writel(1U << 24, gicdaddr + GICD_SGIR); 72 } 73 74 void __weak smp_kick_all_cpus(void) 75 { 76 unsigned long gic_dist_addr; 77 78 gic_dist_addr = get_gicd_base_address(); 79 if (gic_dist_addr == -1) 80 return; 81 82 kick_secondary_cpus_gic(gic_dist_addr); 83 } 84 85 __weak void psci_board_init(void) 86 { 87 } 88 89 int armv7_init_nonsec(void) 90 { 91 unsigned int reg; 92 unsigned itlinesnr, i; 93 unsigned long gic_dist_addr; 94 95 /* check whether the CPU supports the security extensions */ 96 reg = read_id_pfr1(); 97 if ((reg & 0xF0) == 0) { 98 printf("nonsec: Security extensions not implemented.\n"); 99 return -1; 100 } 101 102 /* the SCR register will be set directly in the monitor mode handler, 103 * according to the spec one should not tinker with it in secure state 104 * in SVC mode. Do not try to read it once in non-secure state, 105 * any access to it will trap. 106 */ 107 108 gic_dist_addr = get_gicd_base_address(); 109 if (gic_dist_addr == -1) 110 return -1; 111 112 /* enable the GIC distributor */ 113 writel(readl(gic_dist_addr + GICD_CTLR) | 0x03, 114 gic_dist_addr + GICD_CTLR); 115 116 /* TYPER[4:0] contains an encoded number of available interrupts */ 117 itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f; 118 119 /* set all bits in the GIC group registers to one to allow access 120 * from non-secure state. The first 32 interrupts are private per 121 * CPU and will be set later when enabling the GIC for each core 122 */ 123 for (i = 1; i <= itlinesnr; i++) 124 writel((unsigned)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i); 125 126 psci_board_init(); 127 128 /* 129 * Relocate secure section before any cpu runs in secure ram. 130 * smp_kick_all_cpus may enable other cores and runs into secure 131 * ram, so need to relocate secure section before enabling other 132 * cores. 133 */ 134 relocate_secure_section(); 135 136 #ifndef CONFIG_ARMV7_PSCI 137 smp_set_core_boot_addr((unsigned long)secure_ram_addr(_smp_pen), -1); 138 smp_kick_all_cpus(); 139 #endif 140 141 /* call the non-sec switching code on this CPU also */ 142 secure_ram_addr(_nonsec_init)(); 143 return 0; 144 } 145