1b2b9762fSSantosh Shilimkar /* 2b2b9762fSSantosh Shilimkar * OMAP MPUSS low power code 3b2b9762fSSantosh Shilimkar * 4b2b9762fSSantosh Shilimkar * Copyright (C) 2011 Texas Instruments, Inc. 5b2b9762fSSantosh Shilimkar * Santosh Shilimkar <santosh.shilimkar@ti.com> 6b2b9762fSSantosh Shilimkar * 7b2b9762fSSantosh Shilimkar * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU 8b2b9762fSSantosh Shilimkar * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller, 9b2b9762fSSantosh Shilimkar * CPU0 and CPU1 LPRM modules. 10b2b9762fSSantosh Shilimkar * CPU0, CPU1 and MPUSS each have there own power domain and 11b2b9762fSSantosh Shilimkar * hence multiple low power combinations of MPUSS are possible. 12b2b9762fSSantosh Shilimkar * 13b2b9762fSSantosh Shilimkar * The CPU0 and CPU1 can't support Closed switch Retention (CSWR) 14b2b9762fSSantosh Shilimkar * because the mode is not supported by hw constraints of dormant 15b2b9762fSSantosh Shilimkar * mode. While waking up from the dormant mode, a reset signal 16b2b9762fSSantosh Shilimkar * to the Cortex-A9 processor must be asserted by the external 17b2b9762fSSantosh Shilimkar * power controller. 18b2b9762fSSantosh Shilimkar * 19b2b9762fSSantosh Shilimkar * With architectural inputs and hardware recommendations, only 20b2b9762fSSantosh Shilimkar * below modes are supported from power gain vs latency point of view. 21b2b9762fSSantosh Shilimkar * 22b2b9762fSSantosh Shilimkar * CPU0 CPU1 MPUSS 23b2b9762fSSantosh Shilimkar * ---------------------------------------------- 24b2b9762fSSantosh Shilimkar * ON ON ON 25b2b9762fSSantosh Shilimkar * ON(Inactive) OFF ON(Inactive) 26b2b9762fSSantosh Shilimkar * OFF OFF CSWR 273ba2a739SSantosh Shilimkar * OFF OFF OSWR 283ba2a739SSantosh Shilimkar * OFF OFF OFF(Device OFF *TBD) 29b2b9762fSSantosh Shilimkar * ---------------------------------------------- 30b2b9762fSSantosh Shilimkar * 31b2b9762fSSantosh Shilimkar * Note: CPU0 is the master core and it is the last CPU to go down 32b2b9762fSSantosh Shilimkar * and first to wake-up when MPUSS low power states are excercised 33b2b9762fSSantosh Shilimkar * 34b2b9762fSSantosh Shilimkar * 35b2b9762fSSantosh Shilimkar * This program is free software; you can redistribute it and/or modify 36b2b9762fSSantosh Shilimkar * it under the terms of the GNU General Public License version 2 as 37b2b9762fSSantosh Shilimkar * published by the Free Software Foundation. 38b2b9762fSSantosh Shilimkar */ 39b2b9762fSSantosh Shilimkar 40b2b9762fSSantosh Shilimkar #include <linux/kernel.h> 41b2b9762fSSantosh Shilimkar #include <linux/io.h> 42b2b9762fSSantosh Shilimkar #include <linux/errno.h> 43b2b9762fSSantosh Shilimkar #include <linux/linkage.h> 44b2b9762fSSantosh Shilimkar #include <linux/smp.h> 45b2b9762fSSantosh Shilimkar 46b2b9762fSSantosh Shilimkar #include <asm/cacheflush.h> 47b2b9762fSSantosh Shilimkar #include <asm/tlbflush.h> 48b2b9762fSSantosh Shilimkar #include <asm/smp_scu.h> 49b2b9762fSSantosh Shilimkar #include <asm/pgalloc.h> 50b2b9762fSSantosh Shilimkar #include <asm/suspend.h> 515e94c6e3SSantosh Shilimkar #include <asm/hardware/cache-l2x0.h> 52b2b9762fSSantosh Shilimkar 53e4c060dbSTony Lindgren #include "soc.h" 54b2b9762fSSantosh Shilimkar #include "common.h" 55c49f34bcSTony Lindgren #include "omap44xx.h" 56b2b9762fSSantosh Shilimkar #include "omap4-sar-layout.h" 57b2b9762fSSantosh Shilimkar #include "pm.h" 583ba2a739SSantosh Shilimkar #include "prcm_mpu44xx.h" 59a89726d3SSantosh Shilimkar #include "prcm_mpu54xx.h" 603ba2a739SSantosh Shilimkar #include "prminst44xx.h" 613ba2a739SSantosh Shilimkar #include "prcm44xx.h" 623ba2a739SSantosh Shilimkar #include "prm44xx.h" 633ba2a739SSantosh Shilimkar #include "prm-regbits-44xx.h" 64b2b9762fSSantosh Shilimkar 65b2b9762fSSantosh Shilimkar #ifdef CONFIG_SMP 66b2b9762fSSantosh Shilimkar 67b2b9762fSSantosh Shilimkar struct omap4_cpu_pm_info { 68b2b9762fSSantosh Shilimkar struct powerdomain *pwrdm; 69b2b9762fSSantosh Shilimkar void __iomem *scu_sar_addr; 70b2b9762fSSantosh Shilimkar void __iomem *wkup_sar_addr; 715e94c6e3SSantosh Shilimkar void __iomem *l2x0_sar_addr; 72ff999b8aSSantosh Shilimkar void (*secondary_startup)(void); 73b2b9762fSSantosh Shilimkar }; 74b2b9762fSSantosh Shilimkar 759f192cf7SSantosh Shilimkar /** 769f192cf7SSantosh Shilimkar * struct cpu_pm_ops - CPU pm operations 779f192cf7SSantosh Shilimkar * @finish_suspend: CPU suspend finisher function pointer 789f192cf7SSantosh Shilimkar * @resume: CPU resume function pointer 799f192cf7SSantosh Shilimkar * @scu_prepare: CPU Snoop Control program function pointer 809f192cf7SSantosh Shilimkar * 819f192cf7SSantosh Shilimkar * Structure holds functions pointer for CPU low power operations like 829f192cf7SSantosh Shilimkar * suspend, resume and scu programming. 839f192cf7SSantosh Shilimkar */ 849f192cf7SSantosh Shilimkar struct cpu_pm_ops { 859f192cf7SSantosh Shilimkar int (*finish_suspend)(unsigned long cpu_state); 869f192cf7SSantosh Shilimkar void (*resume)(void); 879f192cf7SSantosh Shilimkar void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); 889f192cf7SSantosh Shilimkar }; 899f192cf7SSantosh Shilimkar 90b2b9762fSSantosh Shilimkar static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); 91e44f9a77SSantosh Shilimkar static struct powerdomain *mpuss_pd; 925e94c6e3SSantosh Shilimkar static void __iomem *sar_base; 93a89726d3SSantosh Shilimkar static u32 cpu_context_offset; 94b2b9762fSSantosh Shilimkar 959f192cf7SSantosh Shilimkar static int default_finish_suspend(unsigned long cpu_state) 969f192cf7SSantosh Shilimkar { 979f192cf7SSantosh Shilimkar omap_do_wfi(); 989f192cf7SSantosh Shilimkar return 0; 999f192cf7SSantosh Shilimkar } 1009f192cf7SSantosh Shilimkar 1019f192cf7SSantosh Shilimkar static void dummy_cpu_resume(void) 1029f192cf7SSantosh Shilimkar {} 1039f192cf7SSantosh Shilimkar 1049f192cf7SSantosh Shilimkar static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) 1059f192cf7SSantosh Shilimkar {} 1069f192cf7SSantosh Shilimkar 1079f192cf7SSantosh Shilimkar struct cpu_pm_ops omap_pm_ops = { 1089f192cf7SSantosh Shilimkar .finish_suspend = default_finish_suspend, 1099f192cf7SSantosh Shilimkar .resume = dummy_cpu_resume, 1109f192cf7SSantosh Shilimkar .scu_prepare = dummy_scu_prepare, 1119f192cf7SSantosh Shilimkar }; 1129f192cf7SSantosh Shilimkar 113b2b9762fSSantosh Shilimkar /* 114b2b9762fSSantosh Shilimkar * Program the wakeup routine address for the CPU0 and CPU1 115b2b9762fSSantosh Shilimkar * used for OFF or DORMANT wakeup. 116b2b9762fSSantosh Shilimkar */ 117b2b9762fSSantosh Shilimkar static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr) 118b2b9762fSSantosh Shilimkar { 119b2b9762fSSantosh Shilimkar struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 120b2b9762fSSantosh Shilimkar 121edfaf05cSVictor Kamensky writel_relaxed(addr, pm_info->wkup_sar_addr); 122b2b9762fSSantosh Shilimkar } 123b2b9762fSSantosh Shilimkar 124b2b9762fSSantosh Shilimkar /* 125b2b9762fSSantosh Shilimkar * Store the SCU power status value to scratchpad memory 126b2b9762fSSantosh Shilimkar */ 127b2b9762fSSantosh Shilimkar static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) 128b2b9762fSSantosh Shilimkar { 129b2b9762fSSantosh Shilimkar struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 130b2b9762fSSantosh Shilimkar u32 scu_pwr_st; 131b2b9762fSSantosh Shilimkar 132b2b9762fSSantosh Shilimkar switch (cpu_state) { 133b2b9762fSSantosh Shilimkar case PWRDM_POWER_RET: 134b2b9762fSSantosh Shilimkar scu_pwr_st = SCU_PM_DORMANT; 135b2b9762fSSantosh Shilimkar break; 136b2b9762fSSantosh Shilimkar case PWRDM_POWER_OFF: 137b2b9762fSSantosh Shilimkar scu_pwr_st = SCU_PM_POWEROFF; 138b2b9762fSSantosh Shilimkar break; 139b2b9762fSSantosh Shilimkar case PWRDM_POWER_ON: 140b2b9762fSSantosh Shilimkar case PWRDM_POWER_INACTIVE: 141b2b9762fSSantosh Shilimkar default: 142b2b9762fSSantosh Shilimkar scu_pwr_st = SCU_PM_NORMAL; 143b2b9762fSSantosh Shilimkar break; 144b2b9762fSSantosh Shilimkar } 145b2b9762fSSantosh Shilimkar 146edfaf05cSVictor Kamensky writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr); 147b2b9762fSSantosh Shilimkar } 148b2b9762fSSantosh Shilimkar 1493ba2a739SSantosh Shilimkar /* Helper functions for MPUSS OSWR */ 1503ba2a739SSantosh Shilimkar static inline void mpuss_clear_prev_logic_pwrst(void) 1513ba2a739SSantosh Shilimkar { 1523ba2a739SSantosh Shilimkar u32 reg; 1533ba2a739SSantosh Shilimkar 1543ba2a739SSantosh Shilimkar reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, 1553ba2a739SSantosh Shilimkar OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 1563ba2a739SSantosh Shilimkar omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION, 1573ba2a739SSantosh Shilimkar OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 1583ba2a739SSantosh Shilimkar } 1593ba2a739SSantosh Shilimkar 1603ba2a739SSantosh Shilimkar static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) 1613ba2a739SSantosh Shilimkar { 1623ba2a739SSantosh Shilimkar u32 reg; 1633ba2a739SSantosh Shilimkar 1643ba2a739SSantosh Shilimkar if (cpu_id) { 1653ba2a739SSantosh Shilimkar reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST, 166a89726d3SSantosh Shilimkar cpu_context_offset); 1673ba2a739SSantosh Shilimkar omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST, 168a89726d3SSantosh Shilimkar cpu_context_offset); 1693ba2a739SSantosh Shilimkar } else { 1703ba2a739SSantosh Shilimkar reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST, 171a89726d3SSantosh Shilimkar cpu_context_offset); 1723ba2a739SSantosh Shilimkar omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST, 173a89726d3SSantosh Shilimkar cpu_context_offset); 1743ba2a739SSantosh Shilimkar } 1753ba2a739SSantosh Shilimkar } 1763ba2a739SSantosh Shilimkar 1775e94c6e3SSantosh Shilimkar /* 1785e94c6e3SSantosh Shilimkar * Store the CPU cluster state for L2X0 low power operations. 1795e94c6e3SSantosh Shilimkar */ 1805e94c6e3SSantosh Shilimkar static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) 1815e94c6e3SSantosh Shilimkar { 1825e94c6e3SSantosh Shilimkar struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 1835e94c6e3SSantosh Shilimkar 184edfaf05cSVictor Kamensky writel_relaxed(save_state, pm_info->l2x0_sar_addr); 1855e94c6e3SSantosh Shilimkar } 1865e94c6e3SSantosh Shilimkar 1875e94c6e3SSantosh Shilimkar /* 1885e94c6e3SSantosh Shilimkar * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to 1895e94c6e3SSantosh Shilimkar * in every restore MPUSS OFF path. 1905e94c6e3SSantosh Shilimkar */ 1915e94c6e3SSantosh Shilimkar #ifdef CONFIG_CACHE_L2X0 1927a09b28eSRussell King static void __init save_l2x0_context(void) 1935e94c6e3SSantosh Shilimkar { 194eb3d3ec5SLinus Torvalds writel_relaxed(l2x0_saved_regs.aux_ctrl, 1957a09b28eSRussell King sar_base + L2X0_AUXCTRL_OFFSET); 196eb3d3ec5SLinus Torvalds writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1977a09b28eSRussell King sar_base + L2X0_PREFETCH_CTRL_OFFSET); 1989f192cf7SSantosh Shilimkar } 1995e94c6e3SSantosh Shilimkar #else 2007a09b28eSRussell King static void __init save_l2x0_context(void) 2015e94c6e3SSantosh Shilimkar {} 2025e94c6e3SSantosh Shilimkar #endif 2035e94c6e3SSantosh Shilimkar 204b2b9762fSSantosh Shilimkar /** 205b2b9762fSSantosh Shilimkar * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function 206b2b9762fSSantosh Shilimkar * The purpose of this function is to manage low power programming 207b2b9762fSSantosh Shilimkar * of OMAP4 MPUSS subsystem 208b2b9762fSSantosh Shilimkar * @cpu : CPU ID 209b2b9762fSSantosh Shilimkar * @power_state: Low power state. 210e44f9a77SSantosh Shilimkar * 211e44f9a77SSantosh Shilimkar * MPUSS states for the context save: 212e44f9a77SSantosh Shilimkar * save_state = 213e44f9a77SSantosh Shilimkar * 0 - Nothing lost and no need to save: MPUSS INACTIVE 214e44f9a77SSantosh Shilimkar * 1 - CPUx L1 and logic lost: MPUSS CSWR 215e44f9a77SSantosh Shilimkar * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR 216e44f9a77SSantosh Shilimkar * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF 217b2b9762fSSantosh Shilimkar */ 218b2b9762fSSantosh Shilimkar int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) 219b2b9762fSSantosh Shilimkar { 22032d174edSPaul Walmsley struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 221b2b9762fSSantosh Shilimkar unsigned int save_state = 0; 222b2b9762fSSantosh Shilimkar unsigned int wakeup_cpu; 223b2b9762fSSantosh Shilimkar 224b2b9762fSSantosh Shilimkar if (omap_rev() == OMAP4430_REV_ES1_0) 225b2b9762fSSantosh Shilimkar return -ENXIO; 226b2b9762fSSantosh Shilimkar 227b2b9762fSSantosh Shilimkar switch (power_state) { 228b2b9762fSSantosh Shilimkar case PWRDM_POWER_ON: 229b2b9762fSSantosh Shilimkar case PWRDM_POWER_INACTIVE: 230b2b9762fSSantosh Shilimkar save_state = 0; 231b2b9762fSSantosh Shilimkar break; 232b2b9762fSSantosh Shilimkar case PWRDM_POWER_OFF: 233b2b9762fSSantosh Shilimkar save_state = 1; 234b2b9762fSSantosh Shilimkar break; 235b2b9762fSSantosh Shilimkar case PWRDM_POWER_RET: 236b2b9762fSSantosh Shilimkar default: 237b2b9762fSSantosh Shilimkar /* 238b2b9762fSSantosh Shilimkar * CPUx CSWR is invalid hardware state. Also CPUx OSWR 239b2b9762fSSantosh Shilimkar * doesn't make much scense, since logic is lost and $L1 240b2b9762fSSantosh Shilimkar * needs to be cleaned because of coherency. This makes 241b2b9762fSSantosh Shilimkar * CPUx OSWR equivalent to CPUX OFF and hence not supported 242b2b9762fSSantosh Shilimkar */ 243b2b9762fSSantosh Shilimkar WARN_ON(1); 244b2b9762fSSantosh Shilimkar return -ENXIO; 245b2b9762fSSantosh Shilimkar } 246b2b9762fSSantosh Shilimkar 247e0555489SKevin Hilman pwrdm_pre_transition(NULL); 24849404dd0SSantosh Shilimkar 2493ba2a739SSantosh Shilimkar /* 2503ba2a739SSantosh Shilimkar * Check MPUSS next state and save interrupt controller if needed. 2513ba2a739SSantosh Shilimkar * In MPUSS OSWR or device OFF, interrupt controller contest is lost. 2523ba2a739SSantosh Shilimkar */ 2533ba2a739SSantosh Shilimkar mpuss_clear_prev_logic_pwrst(); 2543ba2a739SSantosh Shilimkar if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) && 2553ba2a739SSantosh Shilimkar (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF)) 2563ba2a739SSantosh Shilimkar save_state = 2; 2573ba2a739SSantosh Shilimkar 2583ba2a739SSantosh Shilimkar cpu_clear_prev_logic_pwrst(cpu); 25932d174edSPaul Walmsley pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 2609f192cf7SSantosh Shilimkar set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.resume)); 2619f192cf7SSantosh Shilimkar omap_pm_ops.scu_prepare(cpu, power_state); 2625e94c6e3SSantosh Shilimkar l2x0_pwrst_prepare(cpu, save_state); 263b2b9762fSSantosh Shilimkar 264b2b9762fSSantosh Shilimkar /* 265b2b9762fSSantosh Shilimkar * Call low level function with targeted low power state. 266b2b9762fSSantosh Shilimkar */ 26772433ebaSSantosh Shilimkar if (save_state) 2689f192cf7SSantosh Shilimkar cpu_suspend(save_state, omap_pm_ops.finish_suspend); 26972433ebaSSantosh Shilimkar else 2709f192cf7SSantosh Shilimkar omap_pm_ops.finish_suspend(save_state); 271b2b9762fSSantosh Shilimkar 27274ed7bdcSStrashko, Grygorii if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu) 27374ed7bdcSStrashko, Grygorii gic_dist_enable(); 27474ed7bdcSStrashko, Grygorii 275b2b9762fSSantosh Shilimkar /* 276b2b9762fSSantosh Shilimkar * Restore the CPUx power state to ON otherwise CPUx 277b2b9762fSSantosh Shilimkar * power domain can transitions to programmed low power 278b2b9762fSSantosh Shilimkar * state while doing WFI outside the low powe code. On 279b2b9762fSSantosh Shilimkar * secure devices, CPUx does WFI which can result in 280b2b9762fSSantosh Shilimkar * domain transition 281b2b9762fSSantosh Shilimkar */ 282b2b9762fSSantosh Shilimkar wakeup_cpu = smp_processor_id(); 28332d174edSPaul Walmsley pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 284b2b9762fSSantosh Shilimkar 285e0555489SKevin Hilman pwrdm_post_transition(NULL); 28649404dd0SSantosh Shilimkar 287b2b9762fSSantosh Shilimkar return 0; 288b2b9762fSSantosh Shilimkar } 289b2b9762fSSantosh Shilimkar 290b5b4f288SSantosh Shilimkar /** 291b5b4f288SSantosh Shilimkar * omap4_hotplug_cpu: OMAP4 CPU hotplug entry 292b5b4f288SSantosh Shilimkar * @cpu : CPU ID 293b5b4f288SSantosh Shilimkar * @power_state: CPU low power state. 294b5b4f288SSantosh Shilimkar */ 2958bd26e3aSPaul Gortmaker int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) 296b5b4f288SSantosh Shilimkar { 297ff999b8aSSantosh Shilimkar struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 29832d174edSPaul Walmsley unsigned int cpu_state = 0; 299b5b4f288SSantosh Shilimkar 300b5b4f288SSantosh Shilimkar if (omap_rev() == OMAP4430_REV_ES1_0) 301b5b4f288SSantosh Shilimkar return -ENXIO; 302b5b4f288SSantosh Shilimkar 303b5b4f288SSantosh Shilimkar if (power_state == PWRDM_POWER_OFF) 304b5b4f288SSantosh Shilimkar cpu_state = 1; 305b5b4f288SSantosh Shilimkar 30632d174edSPaul Walmsley pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 30732d174edSPaul Walmsley pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 308ff999b8aSSantosh Shilimkar set_cpu_wakeup_addr(cpu, virt_to_phys(pm_info->secondary_startup)); 3099f192cf7SSantosh Shilimkar omap_pm_ops.scu_prepare(cpu, power_state); 310b5b4f288SSantosh Shilimkar 311b5b4f288SSantosh Shilimkar /* 312260db902SMasanari Iida * CPU never retuns back if targeted power state is OFF mode. 313b5b4f288SSantosh Shilimkar * CPU ONLINE follows normal CPU ONLINE ptah via 314baf4b7d3SSantosh Shilimkar * omap4_secondary_startup(). 315b5b4f288SSantosh Shilimkar */ 3169f192cf7SSantosh Shilimkar omap_pm_ops.finish_suspend(cpu_state); 317b5b4f288SSantosh Shilimkar 31832d174edSPaul Walmsley pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 319b5b4f288SSantosh Shilimkar return 0; 320b5b4f288SSantosh Shilimkar } 321b5b4f288SSantosh Shilimkar 322b5b4f288SSantosh Shilimkar 323b2b9762fSSantosh Shilimkar /* 324*6d846c46SSantosh Shilimkar * Enable Mercury Fast HG retention mode by default. 325*6d846c46SSantosh Shilimkar */ 326*6d846c46SSantosh Shilimkar static void enable_mercury_retention_mode(void) 327*6d846c46SSantosh Shilimkar { 328*6d846c46SSantosh Shilimkar u32 reg; 329*6d846c46SSantosh Shilimkar 330*6d846c46SSantosh Shilimkar reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST, 331*6d846c46SSantosh Shilimkar OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); 332*6d846c46SSantosh Shilimkar /* Enable HG_EN, HG_RAMPUP = fast mode */ 333*6d846c46SSantosh Shilimkar reg |= BIT(24) | BIT(25); 334*6d846c46SSantosh Shilimkar omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST, 335*6d846c46SSantosh Shilimkar OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); 336*6d846c46SSantosh Shilimkar } 337*6d846c46SSantosh Shilimkar 338*6d846c46SSantosh Shilimkar /* 339b2b9762fSSantosh Shilimkar * Initialise OMAP4 MPUSS 340b2b9762fSSantosh Shilimkar */ 341b2b9762fSSantosh Shilimkar int __init omap4_mpuss_init(void) 342b2b9762fSSantosh Shilimkar { 343b2b9762fSSantosh Shilimkar struct omap4_cpu_pm_info *pm_info; 344b2b9762fSSantosh Shilimkar 345b2b9762fSSantosh Shilimkar if (omap_rev() == OMAP4430_REV_ES1_0) { 346b2b9762fSSantosh Shilimkar WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); 347b2b9762fSSantosh Shilimkar return -ENODEV; 348b2b9762fSSantosh Shilimkar } 349b2b9762fSSantosh Shilimkar 3505e94c6e3SSantosh Shilimkar sar_base = omap4_get_sar_ram_base(); 3515e94c6e3SSantosh Shilimkar 352b2b9762fSSantosh Shilimkar /* Initilaise per CPU PM information */ 353b2b9762fSSantosh Shilimkar pm_info = &per_cpu(omap4_pm_info, 0x0); 354b2b9762fSSantosh Shilimkar pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; 355b2b9762fSSantosh Shilimkar pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET; 3565e94c6e3SSantosh Shilimkar pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; 357b2b9762fSSantosh Shilimkar pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); 358b2b9762fSSantosh Shilimkar if (!pm_info->pwrdm) { 359b2b9762fSSantosh Shilimkar pr_err("Lookup failed for CPU0 pwrdm\n"); 360b2b9762fSSantosh Shilimkar return -ENODEV; 361b2b9762fSSantosh Shilimkar } 362b2b9762fSSantosh Shilimkar 363b2b9762fSSantosh Shilimkar /* Clear CPU previous power domain state */ 364b2b9762fSSantosh Shilimkar pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 3653ba2a739SSantosh Shilimkar cpu_clear_prev_logic_pwrst(0); 366b2b9762fSSantosh Shilimkar 367b2b9762fSSantosh Shilimkar /* Initialise CPU0 power domain state to ON */ 368b2b9762fSSantosh Shilimkar pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 369b2b9762fSSantosh Shilimkar 370b2b9762fSSantosh Shilimkar pm_info = &per_cpu(omap4_pm_info, 0x1); 371b2b9762fSSantosh Shilimkar pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; 372b2b9762fSSantosh Shilimkar pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 3735e94c6e3SSantosh Shilimkar pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; 374ff999b8aSSantosh Shilimkar if (cpu_is_omap446x()) 375baf4b7d3SSantosh Shilimkar pm_info->secondary_startup = omap4460_secondary_startup; 376ff999b8aSSantosh Shilimkar else 377baf4b7d3SSantosh Shilimkar pm_info->secondary_startup = omap4_secondary_startup; 378ff999b8aSSantosh Shilimkar 379b2b9762fSSantosh Shilimkar pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); 380b2b9762fSSantosh Shilimkar if (!pm_info->pwrdm) { 381b2b9762fSSantosh Shilimkar pr_err("Lookup failed for CPU1 pwrdm\n"); 382b2b9762fSSantosh Shilimkar return -ENODEV; 383b2b9762fSSantosh Shilimkar } 384b2b9762fSSantosh Shilimkar 385b2b9762fSSantosh Shilimkar /* Clear CPU previous power domain state */ 386b2b9762fSSantosh Shilimkar pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 3873ba2a739SSantosh Shilimkar cpu_clear_prev_logic_pwrst(1); 388b2b9762fSSantosh Shilimkar 389b2b9762fSSantosh Shilimkar /* Initialise CPU1 power domain state to ON */ 390b2b9762fSSantosh Shilimkar pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 391b2b9762fSSantosh Shilimkar 392e44f9a77SSantosh Shilimkar mpuss_pd = pwrdm_lookup("mpu_pwrdm"); 393e44f9a77SSantosh Shilimkar if (!mpuss_pd) { 394e44f9a77SSantosh Shilimkar pr_err("Failed to lookup MPUSS power domain\n"); 395e44f9a77SSantosh Shilimkar return -ENODEV; 396e44f9a77SSantosh Shilimkar } 397e44f9a77SSantosh Shilimkar pwrdm_clear_all_prev_pwrst(mpuss_pd); 3983ba2a739SSantosh Shilimkar mpuss_clear_prev_logic_pwrst(); 399e44f9a77SSantosh Shilimkar 400b2b9762fSSantosh Shilimkar /* Save device type on scratchpad for low level code to use */ 401b2b9762fSSantosh Shilimkar if (omap_type() != OMAP2_DEVICE_TYPE_GP) 402edfaf05cSVictor Kamensky writel_relaxed(1, sar_base + OMAP_TYPE_OFFSET); 403b2b9762fSSantosh Shilimkar else 404edfaf05cSVictor Kamensky writel_relaxed(0, sar_base + OMAP_TYPE_OFFSET); 405b2b9762fSSantosh Shilimkar 4065e94c6e3SSantosh Shilimkar save_l2x0_context(); 4075e94c6e3SSantosh Shilimkar 4089f192cf7SSantosh Shilimkar if (cpu_is_omap44xx()) { 4099f192cf7SSantosh Shilimkar omap_pm_ops.finish_suspend = omap4_finish_suspend; 4109f192cf7SSantosh Shilimkar omap_pm_ops.resume = omap4_cpu_resume; 4119f192cf7SSantosh Shilimkar omap_pm_ops.scu_prepare = scu_pwrst_prepare; 412a89726d3SSantosh Shilimkar cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET; 413a89726d3SSantosh Shilimkar } else if (soc_is_omap54xx() || soc_is_dra7xx()) { 414a89726d3SSantosh Shilimkar cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET; 415*6d846c46SSantosh Shilimkar enable_mercury_retention_mode(); 4169f192cf7SSantosh Shilimkar } 4179f192cf7SSantosh Shilimkar 418b2b9762fSSantosh Shilimkar return 0; 419b2b9762fSSantosh Shilimkar } 420b2b9762fSSantosh Shilimkar 421b2b9762fSSantosh Shilimkar #endif 422