1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2013 ARM/Linaro 4 * 5 * Authors: Daniel Lezcano <daniel.lezcano@linaro.org> 6 * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 7 * Nicolas Pitre <nicolas.pitre@linaro.org> 8 * 9 * Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 10 * Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org> 11 */ 12 #include <linux/cpuidle.h> 13 #include <linux/cpu_pm.h> 14 #include <linux/slab.h> 15 #include <linux/of.h> 16 17 #include <asm/cpu.h> 18 #include <asm/cputype.h> 19 #include <asm/cpuidle.h> 20 #include <asm/mcpm.h> 21 #include <asm/smp_plat.h> 22 #include <asm/suspend.h> 23 24 #include "dt_idle_states.h" 25 26 static int bl_enter_powerdown(struct cpuidle_device *dev, 27 struct cpuidle_driver *drv, int idx); 28 29 /* 30 * NB: Owing to current menu governor behaviour big and LITTLE 31 * index 1 states have to define exit_latency and target_residency for 32 * cluster state since, when all CPUs in a cluster hit it, the cluster 33 * can be shutdown. This means that when a single CPU enters this state 34 * the exit_latency and target_residency values are somewhat overkill. 35 * There is no notion of cluster states in the menu governor, so CPUs 36 * have to define CPU states where possibly the cluster will be shutdown 37 * depending on the state of other CPUs. idle states entry and exit happen 38 * at random times; however the cluster state provides target_residency 39 * values as if all CPUs in a cluster enter the state at once; this is 40 * somewhat optimistic and behaviour should be fixed either in the governor 41 * or in the MCPM back-ends. 42 * To make this driver 100% generic the number of states and the exit_latency 43 * target_residency values must be obtained from device tree bindings. 44 * 45 * exit_latency: refers to the TC2 vexpress test chip and depends on the 46 * current cluster operating point. It is the time it takes to get the CPU 47 * up and running when the CPU is powered up on cluster wake-up from shutdown. 48 * Current values for big and LITTLE clusters are provided for clusters 49 * running at default operating points. 50 * 51 * target_residency: it is the minimum amount of time the cluster has 52 * to be down to break even in terms of power consumption. cluster 53 * shutdown has inherent dynamic power costs (L2 writebacks to DRAM 54 * being the main factor) that depend on the current operating points. 55 * The current values for both clusters are provided for a CPU whose half 56 * of L2 lines are dirty and require cleaning to DRAM, and takes into 57 * account leakage static power values related to the vexpress TC2 testchip. 58 */ 59 static struct cpuidle_driver bl_idle_little_driver = { 60 .name = "little_idle", 61 .owner = THIS_MODULE, 62 .states[0] = ARM_CPUIDLE_WFI_STATE, 63 .states[1] = { 64 .enter = bl_enter_powerdown, 65 .exit_latency = 700, 66 .target_residency = 2500, 67 .flags = CPUIDLE_FLAG_TIMER_STOP, 68 .name = "C1", 69 .desc = "ARM little-cluster power down", 70 }, 71 .state_count = 2, 72 }; 73 74 static const struct of_device_id bl_idle_state_match[] __initconst = { 75 { .compatible = "arm,idle-state", 76 .data = bl_enter_powerdown }, 77 { }, 78 }; 79 80 static struct cpuidle_driver bl_idle_big_driver = { 81 .name = "big_idle", 82 .owner = THIS_MODULE, 83 .states[0] = ARM_CPUIDLE_WFI_STATE, 84 .states[1] = { 85 .enter = bl_enter_powerdown, 86 .exit_latency = 500, 87 .target_residency = 2000, 88 .flags = CPUIDLE_FLAG_TIMER_STOP, 89 .name = "C1", 90 .desc = "ARM big-cluster power down", 91 }, 92 .state_count = 2, 93 }; 94 95 /* 96 * notrace prevents trace shims from getting inserted where they 97 * should not. Global jumps and ldrex/strex must not be inserted 98 * in power down sequences where caches and MMU may be turned off. 99 */ 100 static int notrace bl_powerdown_finisher(unsigned long arg) 101 { 102 /* MCPM works with HW CPU identifiers */ 103 unsigned int mpidr = read_cpuid_mpidr(); 104 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 105 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 106 107 mcpm_set_entry_vector(cpu, cluster, cpu_resume); 108 mcpm_cpu_suspend(); 109 110 /* return value != 0 means failure */ 111 return 1; 112 } 113 114 /** 115 * bl_enter_powerdown - Programs CPU to enter the specified state 116 * @dev: cpuidle device 117 * @drv: The target state to be programmed 118 * @idx: state index 119 * 120 * Called from the CPUidle framework to program the device to the 121 * specified target state selected by the governor. 122 */ 123 static int bl_enter_powerdown(struct cpuidle_device *dev, 124 struct cpuidle_driver *drv, int idx) 125 { 126 cpu_pm_enter(); 127 128 cpu_suspend(0, bl_powerdown_finisher); 129 130 /* signals the MCPM core that CPU is out of low power state */ 131 mcpm_cpu_powered_up(); 132 133 cpu_pm_exit(); 134 135 return idx; 136 } 137 138 static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id) 139 { 140 struct cpumask *cpumask; 141 int cpu; 142 143 cpumask = kzalloc(cpumask_size(), GFP_KERNEL); 144 if (!cpumask) 145 return -ENOMEM; 146 147 for_each_possible_cpu(cpu) 148 if (smp_cpuid_part(cpu) == part_id) 149 cpumask_set_cpu(cpu, cpumask); 150 151 drv->cpumask = cpumask; 152 153 return 0; 154 } 155 156 static const struct of_device_id compatible_machine_match[] = { 157 { .compatible = "arm,vexpress,v2p-ca15_a7" }, 158 { .compatible = "samsung,exynos5420" }, 159 { .compatible = "samsung,exynos5800" }, 160 {}, 161 }; 162 163 static int __init bl_idle_init(void) 164 { 165 int ret; 166 struct device_node *root = of_find_node_by_path("/"); 167 const struct of_device_id *match_id; 168 169 if (!root) 170 return -ENODEV; 171 172 /* 173 * Initialize the driver just for a compliant set of machines 174 */ 175 match_id = of_match_node(compatible_machine_match, root); 176 177 of_node_put(root); 178 179 if (!match_id) 180 return -ENODEV; 181 182 if (!mcpm_is_available()) 183 return -EUNATCH; 184 185 /* 186 * For now the differentiation between little and big cores 187 * is based on the part number. A7 cores are considered little 188 * cores, A15 are considered big cores. This distinction may 189 * evolve in the future with a more generic matching approach. 190 */ 191 ret = bl_idle_driver_init(&bl_idle_little_driver, 192 ARM_CPU_PART_CORTEX_A7); 193 if (ret) 194 return ret; 195 196 ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15); 197 if (ret) 198 goto out_uninit_little; 199 200 /* Start at index 1, index 0 standard WFI */ 201 ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1); 202 if (ret < 0) 203 goto out_uninit_big; 204 205 /* Start at index 1, index 0 standard WFI */ 206 ret = dt_init_idle_driver(&bl_idle_little_driver, 207 bl_idle_state_match, 1); 208 if (ret < 0) 209 goto out_uninit_big; 210 211 ret = cpuidle_register(&bl_idle_little_driver, NULL); 212 if (ret) 213 goto out_uninit_big; 214 215 ret = cpuidle_register(&bl_idle_big_driver, NULL); 216 if (ret) 217 goto out_unregister_little; 218 219 return 0; 220 221 out_unregister_little: 222 cpuidle_unregister(&bl_idle_little_driver); 223 out_uninit_big: 224 kfree(bl_idle_big_driver.cpumask); 225 out_uninit_little: 226 kfree(bl_idle_little_driver.cpumask); 227 228 return ret; 229 } 230 device_initcall(bl_idle_init); 231