1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2011 Freescale Semiconductor, Inc. 4 * Copyright 2011 Linaro Ltd. 5 */ 6 7 #include <linux/init.h> 8 #include <linux/io.h> 9 #include <linux/iopoll.h> 10 #include <linux/of.h> 11 #include <linux/of_address.h> 12 #include <linux/reset-controller.h> 13 #include <linux/smp.h> 14 #include <asm/smp_plat.h> 15 #include "common.h" 16 #include "hardware.h" 17 18 #define SRC_SCR 0x000 19 #define SRC_GPR1_V1 0x020 20 #define SRC_GPR1_V2 0x074 21 #define SRC_GPR1(gpr_v2) ((gpr_v2) ? SRC_GPR1_V2 : SRC_GPR1_V1) 22 #define BP_SRC_SCR_WARM_RESET_ENABLE 0 23 #define BP_SRC_SCR_SW_GPU_RST 1 24 #define BP_SRC_SCR_SW_VPU_RST 2 25 #define BP_SRC_SCR_SW_IPU1_RST 3 26 #define BP_SRC_SCR_SW_OPEN_VG_RST 4 27 #define BP_SRC_SCR_SW_IPU2_RST 12 28 #define BP_SRC_SCR_CORE1_RST 14 29 #define BP_SRC_SCR_CORE1_ENABLE 22 30 /* below is for i.MX7D */ 31 #define SRC_A7RCR1 0x008 32 #define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1 33 #define GPC_CPU_PGC_SW_PUP_REQ 0xf0 34 #define GPC_CPU_PGC_SW_PDN_REQ 0xfc 35 #define GPC_PGC_C1 0x840 36 #define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2 37 38 static void __iomem *src_base; 39 static DEFINE_SPINLOCK(scr_lock); 40 static bool gpr_v2; 41 static void __iomem *gpc_base; 42 43 static const int sw_reset_bits[5] = { 44 BP_SRC_SCR_SW_GPU_RST, 45 BP_SRC_SCR_SW_VPU_RST, 46 BP_SRC_SCR_SW_IPU1_RST, 47 BP_SRC_SCR_SW_OPEN_VG_RST, 48 BP_SRC_SCR_SW_IPU2_RST 49 }; 50 51 static int imx_src_reset_module(struct reset_controller_dev *rcdev, 52 unsigned long sw_reset_idx) 53 { 54 unsigned long timeout; 55 unsigned long flags; 56 int bit; 57 u32 val; 58 59 if (sw_reset_idx >= ARRAY_SIZE(sw_reset_bits)) 60 return -EINVAL; 61 62 bit = 1 << sw_reset_bits[sw_reset_idx]; 63 64 spin_lock_irqsave(&scr_lock, flags); 65 val = readl_relaxed(src_base + SRC_SCR); 66 val |= bit; 67 writel_relaxed(val, src_base + SRC_SCR); 68 spin_unlock_irqrestore(&scr_lock, flags); 69 70 timeout = jiffies + msecs_to_jiffies(1000); 71 while (readl(src_base + SRC_SCR) & bit) { 72 if (time_after(jiffies, timeout)) 73 return -ETIME; 74 cpu_relax(); 75 } 76 77 return 0; 78 } 79 80 static const struct reset_control_ops imx_src_ops = { 81 .reset = imx_src_reset_module, 82 }; 83 84 static struct reset_controller_dev imx_reset_controller = { 85 .ops = &imx_src_ops, 86 .nr_resets = ARRAY_SIZE(sw_reset_bits), 87 }; 88 89 static void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset) 90 { 91 writel_relaxed(enable, gpc_base + offset); 92 } 93 94 /* 95 * The motivation for bringing up the second i.MX7D core inside the kernel 96 * is that legacy vendor bootloaders usually do not implement PSCI support. 97 * This is a significant blocker for systems in the field that are running old 98 * bootloader versions to upgrade to a modern mainline kernel version, as only 99 * one CPU of the i.MX7D would be brought up. 100 * Bring up the second i.MX7D core inside the kernel to make the migration 101 * path to mainline kernel easier for the existing iMX7D users. 102 */ 103 void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn) 104 { 105 u32 reg = pdn ? GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ; 106 u32 val, pup; 107 int ret; 108 109 imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C1); 110 val = readl_relaxed(gpc_base + reg); 111 val |= BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7; 112 writel_relaxed(val, gpc_base + reg); 113 114 ret = readl_relaxed_poll_timeout_atomic(gpc_base + reg, pup, 115 !(pup & BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7), 116 5, 1000000); 117 if (ret < 0) { 118 pr_err("i.MX7D: CORE1_A7 power up timeout\n"); 119 val &= ~BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7; 120 writel_relaxed(val, gpc_base + reg); 121 } 122 123 imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C1); 124 } 125 126 void imx_enable_cpu(int cpu, bool enable) 127 { 128 u32 mask, val; 129 130 cpu = cpu_logical_map(cpu); 131 spin_lock(&scr_lock); 132 if (gpr_v2) { 133 if (enable) 134 imx_gpcv2_set_core1_pdn_pup_by_software(false); 135 136 mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1); 137 val = readl_relaxed(src_base + SRC_A7RCR1); 138 val = enable ? val | mask : val & ~mask; 139 writel_relaxed(val, src_base + SRC_A7RCR1); 140 } else { 141 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); 142 val = readl_relaxed(src_base + SRC_SCR); 143 val = enable ? val | mask : val & ~mask; 144 val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1); 145 writel_relaxed(val, src_base + SRC_SCR); 146 } 147 spin_unlock(&scr_lock); 148 } 149 150 void imx_set_cpu_jump(int cpu, void *jump_addr) 151 { 152 cpu = cpu_logical_map(cpu); 153 writel_relaxed(__pa_symbol(jump_addr), 154 src_base + SRC_GPR1(gpr_v2) + cpu * 8); 155 } 156 157 u32 imx_get_cpu_arg(int cpu) 158 { 159 cpu = cpu_logical_map(cpu); 160 return readl_relaxed(src_base + SRC_GPR1(gpr_v2) + cpu * 8 + 4); 161 } 162 163 void imx_set_cpu_arg(int cpu, u32 arg) 164 { 165 cpu = cpu_logical_map(cpu); 166 writel_relaxed(arg, src_base + SRC_GPR1(gpr_v2) + cpu * 8 + 4); 167 } 168 169 void __init imx_src_init(void) 170 { 171 struct device_node *np; 172 u32 val; 173 174 np = of_find_compatible_node(NULL, NULL, "fsl,imx51-src"); 175 if (!np) 176 return; 177 src_base = of_iomap(np, 0); 178 WARN_ON(!src_base); 179 180 imx_reset_controller.of_node = np; 181 if (IS_ENABLED(CONFIG_RESET_CONTROLLER)) 182 reset_controller_register(&imx_reset_controller); 183 184 /* 185 * force warm reset sources to generate cold reset 186 * for a more reliable restart 187 */ 188 spin_lock(&scr_lock); 189 val = readl_relaxed(src_base + SRC_SCR); 190 val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE); 191 writel_relaxed(val, src_base + SRC_SCR); 192 spin_unlock(&scr_lock); 193 } 194 195 void __init imx7_src_init(void) 196 { 197 struct device_node *np; 198 199 gpr_v2 = true; 200 201 np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-src"); 202 if (!np) 203 return; 204 205 src_base = of_iomap(np, 0); 206 if (!src_base) 207 return; 208 209 np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-gpc"); 210 if (!np) 211 return; 212 213 gpc_base = of_iomap(np, 0); 214 if (!gpc_base) 215 return; 216 } 217