1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPU complex suspend & resume functions for Tegra SoCs 4 * 5 * Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved. 6 */ 7 8 #include <linux/clk/tegra.h> 9 #include <linux/cpumask.h> 10 #include <linux/cpu_pm.h> 11 #include <linux/delay.h> 12 #include <linux/err.h> 13 #include <linux/io.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/suspend.h> 18 19 #include <linux/firmware/trusted_foundations.h> 20 21 #include <soc/tegra/flowctrl.h> 22 #include <soc/tegra/fuse.h> 23 #include <soc/tegra/pm.h> 24 #include <soc/tegra/pmc.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/firmware.h> 28 #include <asm/idmap.h> 29 #include <asm/proc-fns.h> 30 #include <asm/smp_plat.h> 31 #include <asm/suspend.h> 32 #include <asm/tlbflush.h> 33 34 #include "iomap.h" 35 #include "pm.h" 36 #include "reset.h" 37 #include "sleep.h" 38 39 #ifdef CONFIG_PM_SLEEP 40 static DEFINE_SPINLOCK(tegra_lp2_lock); 41 static u32 iram_save_size; 42 static void *iram_save_addr; 43 struct tegra_lp1_iram tegra_lp1_iram; 44 void (*tegra_tear_down_cpu)(void); 45 void (*tegra_sleep_core_finish)(unsigned long v2p); 46 static int (*tegra_sleep_func)(unsigned long v2p); 47 48 static void tegra_tear_down_cpu_init(void) 49 { 50 switch (tegra_get_chip_id()) { 51 case TEGRA20: 52 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)) 53 tegra_tear_down_cpu = tegra20_tear_down_cpu; 54 break; 55 case TEGRA30: 56 case TEGRA114: 57 case TEGRA124: 58 if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) || 59 IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) || 60 IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)) 61 tegra_tear_down_cpu = tegra30_tear_down_cpu; 62 break; 63 } 64 } 65 66 /* 67 * restore_cpu_complex 68 * 69 * restores cpu clock setting, clears flow controller 70 * 71 * Always called on CPU 0. 72 */ 73 static void restore_cpu_complex(void) 74 { 75 int cpu = smp_processor_id(); 76 77 BUG_ON(cpu != 0); 78 79 #ifdef CONFIG_SMP 80 cpu = cpu_logical_map(cpu); 81 #endif 82 83 /* Restore the CPU clock settings */ 84 tegra_cpu_clock_resume(); 85 86 flowctrl_cpu_suspend_exit(cpu); 87 } 88 89 /* 90 * suspend_cpu_complex 91 * 92 * saves pll state for use by restart_plls, prepares flow controller for 93 * transition to suspend state 94 * 95 * Must always be called on cpu 0. 96 */ 97 static void suspend_cpu_complex(void) 98 { 99 int cpu = smp_processor_id(); 100 101 BUG_ON(cpu != 0); 102 103 #ifdef CONFIG_SMP 104 cpu = cpu_logical_map(cpu); 105 #endif 106 107 /* Save the CPU clock settings */ 108 tegra_cpu_clock_suspend(); 109 110 flowctrl_cpu_suspend_enter(cpu); 111 } 112 113 void tegra_pm_clear_cpu_in_lp2(void) 114 { 115 int phy_cpu_id = cpu_logical_map(smp_processor_id()); 116 u32 *cpu_in_lp2 = tegra_cpu_lp2_mask; 117 118 spin_lock(&tegra_lp2_lock); 119 120 BUG_ON(!(*cpu_in_lp2 & BIT(phy_cpu_id))); 121 *cpu_in_lp2 &= ~BIT(phy_cpu_id); 122 123 spin_unlock(&tegra_lp2_lock); 124 } 125 126 void tegra_pm_set_cpu_in_lp2(void) 127 { 128 int phy_cpu_id = cpu_logical_map(smp_processor_id()); 129 u32 *cpu_in_lp2 = tegra_cpu_lp2_mask; 130 131 spin_lock(&tegra_lp2_lock); 132 133 BUG_ON((*cpu_in_lp2 & BIT(phy_cpu_id))); 134 *cpu_in_lp2 |= BIT(phy_cpu_id); 135 136 spin_unlock(&tegra_lp2_lock); 137 } 138 139 static int tegra_sleep_cpu(unsigned long v2p) 140 { 141 if (tegra_cpu_car_ops->rail_off_ready && 142 WARN_ON(!tegra_cpu_rail_off_ready())) 143 return -EBUSY; 144 145 /* 146 * L2 cache disabling using kernel API only allowed when all 147 * secondary CPU's are offline. Cache have to be disabled with 148 * MMU-on if cache maintenance is done via Trusted Foundations 149 * firmware. Note that CPUIDLE won't ever enter powergate on Tegra30 150 * if any of secondary CPU's is online and this is the LP2-idle 151 * code-path only for Tegra20/30. 152 */ 153 #ifdef CONFIG_OUTER_CACHE 154 if (trusted_foundations_registered() && outer_cache.disable) 155 outer_cache.disable(); 156 #endif 157 /* 158 * Note that besides of setting up CPU reset vector this firmware 159 * call may also do the following, depending on the FW version: 160 * 1) Disable L2. But this doesn't matter since we already 161 * disabled the L2. 162 * 2) Disable D-cache. This need to be taken into account in 163 * particular by the tegra_disable_clean_inv_dcache() which 164 * shall avoid the re-disable. 165 */ 166 call_firmware_op(prepare_idle, TF_PM_MODE_LP2); 167 168 setup_mm_for_reboot(); 169 tegra_sleep_cpu_finish(v2p); 170 171 /* should never here */ 172 BUG(); 173 174 return 0; 175 } 176 177 static void tegra_pm_set(enum tegra_suspend_mode mode) 178 { 179 u32 value; 180 181 switch (tegra_get_chip_id()) { 182 case TEGRA20: 183 case TEGRA30: 184 break; 185 default: 186 /* Turn off CRAIL */ 187 value = flowctrl_read_cpu_csr(0); 188 value &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK; 189 value |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL; 190 flowctrl_write_cpu_csr(0, value); 191 break; 192 } 193 194 tegra_pmc_enter_suspend_mode(mode); 195 } 196 197 int tegra_pm_enter_lp2(void) 198 { 199 int err; 200 201 tegra_pm_set(TEGRA_SUSPEND_LP2); 202 203 cpu_cluster_pm_enter(); 204 suspend_cpu_complex(); 205 206 err = cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu); 207 208 /* 209 * Resume L2 cache if it wasn't re-enabled early during resume, 210 * which is the case for Tegra30 that has to re-enable the cache 211 * via firmware call. In other cases cache is already enabled and 212 * hence re-enabling is a no-op. This is always a no-op on Tegra114+. 213 */ 214 outer_resume(); 215 216 restore_cpu_complex(); 217 cpu_cluster_pm_exit(); 218 219 call_firmware_op(prepare_idle, TF_PM_MODE_NONE); 220 221 return err; 222 } 223 224 enum tegra_suspend_mode tegra_pm_validate_suspend_mode( 225 enum tegra_suspend_mode mode) 226 { 227 /* 228 * The Tegra devices support suspending to LP1 or lower currently. 229 */ 230 if (mode > TEGRA_SUSPEND_LP1) 231 return TEGRA_SUSPEND_LP1; 232 233 return mode; 234 } 235 236 static int tegra_sleep_core(unsigned long v2p) 237 { 238 /* 239 * Cache have to be disabled with MMU-on if cache maintenance is done 240 * via Trusted Foundations firmware. This is a no-op on Tegra114+. 241 */ 242 if (trusted_foundations_registered()) 243 outer_disable(); 244 245 call_firmware_op(prepare_idle, TF_PM_MODE_LP1); 246 247 setup_mm_for_reboot(); 248 tegra_sleep_core_finish(v2p); 249 250 /* should never here */ 251 BUG(); 252 253 return 0; 254 } 255 256 /* 257 * tegra_lp1_iram_hook 258 * 259 * Hooking the address of LP1 reset vector and SDRAM self-refresh code in 260 * SDRAM. These codes not be copied to IRAM in this fuction. We need to 261 * copy these code to IRAM before LP0/LP1 suspend and restore the content 262 * of IRAM after resume. 263 */ 264 static bool tegra_lp1_iram_hook(void) 265 { 266 switch (tegra_get_chip_id()) { 267 case TEGRA20: 268 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)) 269 tegra20_lp1_iram_hook(); 270 break; 271 case TEGRA30: 272 case TEGRA114: 273 case TEGRA124: 274 if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) || 275 IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) || 276 IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)) 277 tegra30_lp1_iram_hook(); 278 break; 279 default: 280 break; 281 } 282 283 if (!tegra_lp1_iram.start_addr || !tegra_lp1_iram.end_addr) 284 return false; 285 286 iram_save_size = tegra_lp1_iram.end_addr - tegra_lp1_iram.start_addr; 287 iram_save_addr = kmalloc(iram_save_size, GFP_KERNEL); 288 if (!iram_save_addr) 289 return false; 290 291 return true; 292 } 293 294 static bool tegra_sleep_core_init(void) 295 { 296 switch (tegra_get_chip_id()) { 297 case TEGRA20: 298 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)) 299 tegra20_sleep_core_init(); 300 break; 301 case TEGRA30: 302 case TEGRA114: 303 case TEGRA124: 304 if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) || 305 IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) || 306 IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)) 307 tegra30_sleep_core_init(); 308 break; 309 default: 310 break; 311 } 312 313 if (!tegra_sleep_core_finish) 314 return false; 315 316 return true; 317 } 318 319 static void tegra_suspend_enter_lp1(void) 320 { 321 /* copy the reset vector & SDRAM shutdown code into IRAM */ 322 memcpy(iram_save_addr, IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), 323 iram_save_size); 324 memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), 325 tegra_lp1_iram.start_addr, iram_save_size); 326 327 *((u32 *)tegra_cpu_lp1_mask) = 1; 328 } 329 330 static void tegra_suspend_exit_lp1(void) 331 { 332 /* restore IRAM */ 333 memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), iram_save_addr, 334 iram_save_size); 335 336 *(u32 *)tegra_cpu_lp1_mask = 0; 337 } 338 339 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = { 340 [TEGRA_SUSPEND_NONE] = "none", 341 [TEGRA_SUSPEND_LP2] = "LP2", 342 [TEGRA_SUSPEND_LP1] = "LP1", 343 [TEGRA_SUSPEND_LP0] = "LP0", 344 }; 345 346 static int tegra_suspend_enter(suspend_state_t state) 347 { 348 enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode(); 349 350 if (WARN_ON(mode < TEGRA_SUSPEND_NONE || 351 mode >= TEGRA_MAX_SUSPEND_MODE)) 352 return -EINVAL; 353 354 pr_info("Entering suspend state %s\n", lp_state[mode]); 355 356 tegra_pm_set(mode); 357 358 local_fiq_disable(); 359 360 suspend_cpu_complex(); 361 switch (mode) { 362 case TEGRA_SUSPEND_LP1: 363 tegra_suspend_enter_lp1(); 364 break; 365 case TEGRA_SUSPEND_LP2: 366 tegra_pm_set_cpu_in_lp2(); 367 break; 368 default: 369 break; 370 } 371 372 cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, tegra_sleep_func); 373 374 /* 375 * Resume L2 cache if it wasn't re-enabled early during resume, 376 * which is the case for Tegra30 that has to re-enable the cache 377 * via firmware call. In other cases cache is already enabled and 378 * hence re-enabling is a no-op. 379 */ 380 outer_resume(); 381 382 switch (mode) { 383 case TEGRA_SUSPEND_LP1: 384 tegra_suspend_exit_lp1(); 385 break; 386 case TEGRA_SUSPEND_LP2: 387 tegra_pm_clear_cpu_in_lp2(); 388 break; 389 default: 390 break; 391 } 392 restore_cpu_complex(); 393 394 local_fiq_enable(); 395 396 call_firmware_op(prepare_idle, TF_PM_MODE_NONE); 397 398 return 0; 399 } 400 401 static const struct platform_suspend_ops tegra_suspend_ops = { 402 .valid = suspend_valid_only_mem, 403 .enter = tegra_suspend_enter, 404 }; 405 406 void __init tegra_init_suspend(void) 407 { 408 enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode(); 409 410 if (mode == TEGRA_SUSPEND_NONE) 411 return; 412 413 tegra_tear_down_cpu_init(); 414 415 if (mode >= TEGRA_SUSPEND_LP1) { 416 if (!tegra_lp1_iram_hook() || !tegra_sleep_core_init()) { 417 pr_err("%s: unable to allocate memory for SDRAM" 418 "self-refresh -- LP0/LP1 unavailable\n", 419 __func__); 420 tegra_pmc_set_suspend_mode(TEGRA_SUSPEND_LP2); 421 mode = TEGRA_SUSPEND_LP2; 422 } 423 } 424 425 /* set up sleep function for cpu_suspend */ 426 switch (mode) { 427 case TEGRA_SUSPEND_LP1: 428 tegra_sleep_func = tegra_sleep_core; 429 break; 430 case TEGRA_SUSPEND_LP2: 431 tegra_sleep_func = tegra_sleep_cpu; 432 break; 433 default: 434 break; 435 } 436 437 suspend_set_ops(&tegra_suspend_ops); 438 } 439 440 int tegra_pm_park_secondary_cpu(unsigned long cpu) 441 { 442 if (cpu > 0) { 443 tegra_disable_clean_inv_dcache(TEGRA_FLUSH_CACHE_LOUIS); 444 445 if (tegra_get_chip_id() == TEGRA20) 446 tegra20_hotplug_shutdown(); 447 else 448 tegra30_hotplug_shutdown(); 449 } 450 451 return -EINVAL; 452 } 453 #endif 454