1 /*
2  * Copyright (c) 2013 ARM/Linaro
3  *
4  * Authors: Daniel Lezcano <daniel.lezcano@linaro.org>
5  *          Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
6  *          Nicolas Pitre <nicolas.pitre@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
13  * Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org>
14  */
15 #include <linux/cpuidle.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 
20 #include <asm/cpu.h>
21 #include <asm/cputype.h>
22 #include <asm/cpuidle.h>
23 #include <asm/mcpm.h>
24 #include <asm/smp_plat.h>
25 #include <asm/suspend.h>
26 
27 #include "dt_idle_states.h"
28 
29 static int bl_enter_powerdown(struct cpuidle_device *dev,
30 			      struct cpuidle_driver *drv, int idx);
31 
32 /*
33  * NB: Owing to current menu governor behaviour big and LITTLE
34  * index 1 states have to define exit_latency and target_residency for
35  * cluster state since, when all CPUs in a cluster hit it, the cluster
36  * can be shutdown. This means that when a single CPU enters this state
37  * the exit_latency and target_residency values are somewhat overkill.
38  * There is no notion of cluster states in the menu governor, so CPUs
39  * have to define CPU states where possibly the cluster will be shutdown
40  * depending on the state of other CPUs. idle states entry and exit happen
41  * at random times; however the cluster state provides target_residency
42  * values as if all CPUs in a cluster enter the state at once; this is
43  * somewhat optimistic and behaviour should be fixed either in the governor
44  * or in the MCPM back-ends.
45  * To make this driver 100% generic the number of states and the exit_latency
46  * target_residency values must be obtained from device tree bindings.
47  *
48  * exit_latency: refers to the TC2 vexpress test chip and depends on the
49  * current cluster operating point. It is the time it takes to get the CPU
50  * up and running when the CPU is powered up on cluster wake-up from shutdown.
51  * Current values for big and LITTLE clusters are provided for clusters
52  * running at default operating points.
53  *
54  * target_residency: it is the minimum amount of time the cluster has
55  * to be down to break even in terms of power consumption. cluster
56  * shutdown has inherent dynamic power costs (L2 writebacks to DRAM
57  * being the main factor) that depend on the current operating points.
58  * The current values for both clusters are provided for a CPU whose half
59  * of L2 lines are dirty and require cleaning to DRAM, and takes into
60  * account leakage static power values related to the vexpress TC2 testchip.
61  */
62 static struct cpuidle_driver bl_idle_little_driver = {
63 	.name = "little_idle",
64 	.owner = THIS_MODULE,
65 	.states[0] = ARM_CPUIDLE_WFI_STATE,
66 	.states[1] = {
67 		.enter			= bl_enter_powerdown,
68 		.exit_latency		= 700,
69 		.target_residency	= 2500,
70 		.flags			= CPUIDLE_FLAG_TIMER_STOP,
71 		.name			= "C1",
72 		.desc			= "ARM little-cluster power down",
73 	},
74 	.state_count = 2,
75 };
76 
77 static const struct of_device_id bl_idle_state_match[] __initconst = {
78 	{ .compatible = "arm,idle-state",
79 	  .data = bl_enter_powerdown },
80 	{ },
81 };
82 
83 static struct cpuidle_driver bl_idle_big_driver = {
84 	.name = "big_idle",
85 	.owner = THIS_MODULE,
86 	.states[0] = ARM_CPUIDLE_WFI_STATE,
87 	.states[1] = {
88 		.enter			= bl_enter_powerdown,
89 		.exit_latency		= 500,
90 		.target_residency	= 2000,
91 		.flags			= CPUIDLE_FLAG_TIMER_STOP,
92 		.name			= "C1",
93 		.desc			= "ARM big-cluster power down",
94 	},
95 	.state_count = 2,
96 };
97 
98 /*
99  * notrace prevents trace shims from getting inserted where they
100  * should not. Global jumps and ldrex/strex must not be inserted
101  * in power down sequences where caches and MMU may be turned off.
102  */
103 static int notrace bl_powerdown_finisher(unsigned long arg)
104 {
105 	/* MCPM works with HW CPU identifiers */
106 	unsigned int mpidr = read_cpuid_mpidr();
107 	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
108 	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
109 
110 	mcpm_set_entry_vector(cpu, cluster, cpu_resume);
111 
112 	/*
113 	 * Residency value passed to mcpm_cpu_suspend back-end
114 	 * has to be given clear semantics. Set to 0 as a
115 	 * temporary value.
116 	 */
117 	mcpm_cpu_suspend(0);
118 
119 	/* return value != 0 means failure */
120 	return 1;
121 }
122 
123 /**
124  * bl_enter_powerdown - Programs CPU to enter the specified state
125  * @dev: cpuidle device
126  * @drv: The target state to be programmed
127  * @idx: state index
128  *
129  * Called from the CPUidle framework to program the device to the
130  * specified target state selected by the governor.
131  */
132 static int bl_enter_powerdown(struct cpuidle_device *dev,
133 				struct cpuidle_driver *drv, int idx)
134 {
135 	cpu_pm_enter();
136 
137 	cpu_suspend(0, bl_powerdown_finisher);
138 
139 	/* signals the MCPM core that CPU is out of low power state */
140 	mcpm_cpu_powered_up();
141 
142 	cpu_pm_exit();
143 
144 	return idx;
145 }
146 
147 static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
148 {
149 	struct cpumask *cpumask;
150 	int cpu;
151 
152 	cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
153 	if (!cpumask)
154 		return -ENOMEM;
155 
156 	for_each_possible_cpu(cpu)
157 		if (smp_cpuid_part(cpu) == part_id)
158 			cpumask_set_cpu(cpu, cpumask);
159 
160 	drv->cpumask = cpumask;
161 
162 	return 0;
163 }
164 
165 static const struct of_device_id compatible_machine_match[] = {
166 	{ .compatible = "arm,vexpress,v2p-ca15_a7" },
167 	{ .compatible = "samsung,exynos5420" },
168 	{ .compatible = "samsung,exynos5800" },
169 	{},
170 };
171 
172 static int __init bl_idle_init(void)
173 {
174 	int ret;
175 	struct device_node *root = of_find_node_by_path("/");
176 
177 	if (!root)
178 		return -ENODEV;
179 
180 	/*
181 	 * Initialize the driver just for a compliant set of machines
182 	 */
183 	if (!of_match_node(compatible_machine_match, root))
184 		return -ENODEV;
185 
186 	if (!mcpm_is_available())
187 		return -EUNATCH;
188 
189 	/*
190 	 * For now the differentiation between little and big cores
191 	 * is based on the part number. A7 cores are considered little
192 	 * cores, A15 are considered big cores. This distinction may
193 	 * evolve in the future with a more generic matching approach.
194 	 */
195 	ret = bl_idle_driver_init(&bl_idle_little_driver,
196 				  ARM_CPU_PART_CORTEX_A7);
197 	if (ret)
198 		return ret;
199 
200 	ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
201 	if (ret)
202 		goto out_uninit_little;
203 
204 	/* Start at index 1, index 0 standard WFI */
205 	ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1);
206 	if (ret < 0)
207 		goto out_uninit_big;
208 
209 	/* Start at index 1, index 0 standard WFI */
210 	ret = dt_init_idle_driver(&bl_idle_little_driver,
211 				  bl_idle_state_match, 1);
212 	if (ret < 0)
213 		goto out_uninit_big;
214 
215 	ret = cpuidle_register(&bl_idle_little_driver, NULL);
216 	if (ret)
217 		goto out_uninit_big;
218 
219 	ret = cpuidle_register(&bl_idle_big_driver, NULL);
220 	if (ret)
221 		goto out_unregister_little;
222 
223 	return 0;
224 
225 out_unregister_little:
226 	cpuidle_unregister(&bl_idle_little_driver);
227 out_uninit_big:
228 	kfree(bl_idle_big_driver.cpumask);
229 out_uninit_little:
230 	kfree(bl_idle_little_driver.cpumask);
231 
232 	return ret;
233 }
234 device_initcall(bl_idle_init);
235