1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * (C) Copyright 2013 4 * Andre Przywara, Linaro <andre.przywara@linaro.org> 5 * 6 * Routines to transition ARMv7 processors from secure into non-secure state 7 * and from non-secure SVC into HYP mode 8 * needed to enable ARMv7 virtualization for current hypervisors 9 */ 10 11 #include <common.h> 12 #include <asm/armv7.h> 13 #include <asm/gic.h> 14 #include <asm/io.h> 15 #include <asm/secure.h> 16 17 static unsigned int read_id_pfr1(void) 18 { 19 unsigned int reg; 20 21 asm("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg)); 22 return reg; 23 } 24 25 static unsigned long get_gicd_base_address(void) 26 { 27 #ifdef CONFIG_ARM_GIC_BASE_ADDRESS 28 return CONFIG_ARM_GIC_BASE_ADDRESS + GIC_DIST_OFFSET; 29 #else 30 unsigned periphbase; 31 32 /* get the GIC base address from the CBAR register */ 33 asm("mrc p15, 4, %0, c15, c0, 0\n" : "=r" (periphbase)); 34 35 /* the PERIPHBASE can be mapped above 4 GB (lower 8 bits used to 36 * encode this). Bail out here since we cannot access this without 37 * enabling paging. 38 */ 39 if ((periphbase & 0xff) != 0) { 40 printf("nonsec: PERIPHBASE is above 4 GB, no access.\n"); 41 return -1; 42 } 43 44 return (periphbase & CBAR_MASK) + GIC_DIST_OFFSET; 45 #endif 46 } 47 48 /* Define a specific version of this function to enable any available 49 * hardware protections for the reserved region */ 50 void __weak protect_secure_section(void) {} 51 52 static void relocate_secure_section(void) 53 { 54 #ifdef CONFIG_ARMV7_SECURE_BASE 55 size_t sz = __secure_end - __secure_start; 56 unsigned long szflush = ALIGN(sz + 1, CONFIG_SYS_CACHELINE_SIZE); 57 58 memcpy((void *)CONFIG_ARMV7_SECURE_BASE, __secure_start, sz); 59 60 flush_dcache_range(CONFIG_ARMV7_SECURE_BASE, 61 CONFIG_ARMV7_SECURE_BASE + szflush); 62 protect_secure_section(); 63 invalidate_icache_all(); 64 #endif 65 } 66 67 static void kick_secondary_cpus_gic(unsigned long gicdaddr) 68 { 69 /* kick all CPUs (except this one) by writing to GICD_SGIR */ 70 writel(1U << 24, gicdaddr + GICD_SGIR); 71 } 72 73 void __weak smp_kick_all_cpus(void) 74 { 75 unsigned long gic_dist_addr; 76 77 gic_dist_addr = get_gicd_base_address(); 78 if (gic_dist_addr == -1) 79 return; 80 81 kick_secondary_cpus_gic(gic_dist_addr); 82 } 83 84 __weak void psci_board_init(void) 85 { 86 } 87 88 int armv7_init_nonsec(void) 89 { 90 unsigned int reg; 91 unsigned itlinesnr, i; 92 unsigned long gic_dist_addr; 93 94 /* check whether the CPU supports the security extensions */ 95 reg = read_id_pfr1(); 96 if ((reg & 0xF0) == 0) { 97 printf("nonsec: Security extensions not implemented.\n"); 98 return -1; 99 } 100 101 /* the SCR register will be set directly in the monitor mode handler, 102 * according to the spec one should not tinker with it in secure state 103 * in SVC mode. Do not try to read it once in non-secure state, 104 * any access to it will trap. 105 */ 106 107 gic_dist_addr = get_gicd_base_address(); 108 if (gic_dist_addr == -1) 109 return -1; 110 111 /* enable the GIC distributor */ 112 writel(readl(gic_dist_addr + GICD_CTLR) | 0x03, 113 gic_dist_addr + GICD_CTLR); 114 115 /* TYPER[4:0] contains an encoded number of available interrupts */ 116 itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f; 117 118 /* set all bits in the GIC group registers to one to allow access 119 * from non-secure state. The first 32 interrupts are private per 120 * CPU and will be set later when enabling the GIC for each core 121 */ 122 for (i = 1; i <= itlinesnr; i++) 123 writel((unsigned)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i); 124 125 psci_board_init(); 126 127 /* 128 * Relocate secure section before any cpu runs in secure ram. 129 * smp_kick_all_cpus may enable other cores and runs into secure 130 * ram, so need to relocate secure section before enabling other 131 * cores. 132 */ 133 relocate_secure_section(); 134 135 #ifndef CONFIG_ARMV7_PSCI 136 smp_set_core_boot_addr((unsigned long)secure_ram_addr(_smp_pen), -1); 137 smp_kick_all_cpus(); 138 #endif 139 140 /* call the non-sec switching code on this CPU also */ 141 secure_ram_addr(_nonsec_init)(); 142 return 0; 143 } 144