1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PM domains for CPUs via genpd - managed by cpuidle-psci. 4 * 5 * Copyright (C) 2019 Linaro Ltd. 6 * Author: Ulf Hansson <ulf.hansson@linaro.org> 7 * 8 */ 9 10 #define pr_fmt(fmt) "CPUidle PSCI: " fmt 11 12 #include <linux/cpu.h> 13 #include <linux/device.h> 14 #include <linux/kernel.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_domain.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/psci.h> 19 #include <linux/slab.h> 20 #include <linux/string.h> 21 22 #include "cpuidle-psci.h" 23 24 struct psci_pd_provider { 25 struct list_head link; 26 struct device_node *node; 27 }; 28 29 static LIST_HEAD(psci_pd_providers); 30 static bool psci_pd_allow_domain_state; 31 32 static int psci_pd_power_off(struct generic_pm_domain *pd) 33 { 34 struct genpd_power_state *state = &pd->states[pd->state_idx]; 35 u32 *pd_state; 36 37 if (!state->data) 38 return 0; 39 40 if (!psci_pd_allow_domain_state) 41 return -EBUSY; 42 43 /* OSI mode is enabled, set the corresponding domain state. */ 44 pd_state = state->data; 45 psci_set_domain_state(*pd_state); 46 47 return 0; 48 } 49 50 static int psci_pd_parse_state_nodes(struct genpd_power_state *states, 51 int state_count) 52 { 53 int i, ret; 54 u32 psci_state, *psci_state_buf; 55 56 for (i = 0; i < state_count; i++) { 57 ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode), 58 &psci_state); 59 if (ret) 60 goto free_state; 61 62 psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL); 63 if (!psci_state_buf) { 64 ret = -ENOMEM; 65 goto free_state; 66 } 67 *psci_state_buf = psci_state; 68 states[i].data = psci_state_buf; 69 } 70 71 return 0; 72 73 free_state: 74 i--; 75 for (; i >= 0; i--) 76 kfree(states[i].data); 77 return ret; 78 } 79 80 static int psci_pd_parse_states(struct device_node *np, 81 struct genpd_power_state **states, int *state_count) 82 { 83 int ret; 84 85 /* Parse the domain idle states. */ 86 ret = of_genpd_parse_idle_states(np, states, state_count); 87 if (ret) 88 return ret; 89 90 /* Fill out the PSCI specifics for each found state. */ 91 ret = psci_pd_parse_state_nodes(*states, *state_count); 92 if (ret) 93 kfree(*states); 94 95 return ret; 96 } 97 98 static void psci_pd_free_states(struct genpd_power_state *states, 99 unsigned int state_count) 100 { 101 int i; 102 103 for (i = 0; i < state_count; i++) 104 kfree(states[i].data); 105 kfree(states); 106 } 107 108 static int psci_pd_init(struct device_node *np, bool use_osi) 109 { 110 struct generic_pm_domain *pd; 111 struct psci_pd_provider *pd_provider; 112 struct dev_power_governor *pd_gov; 113 struct genpd_power_state *states = NULL; 114 int ret = -ENOMEM, state_count = 0; 115 116 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 117 if (!pd) 118 goto out; 119 120 pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); 121 if (!pd_provider) 122 goto free_pd; 123 124 pd->name = kasprintf(GFP_KERNEL, "%pOF", np); 125 if (!pd->name) 126 goto free_pd_prov; 127 128 /* 129 * Parse the domain idle states and let genpd manage the state selection 130 * for those being compatible with "domain-idle-state". 131 */ 132 ret = psci_pd_parse_states(np, &states, &state_count); 133 if (ret) 134 goto free_name; 135 136 pd->free_states = psci_pd_free_states; 137 pd->name = kbasename(pd->name); 138 pd->states = states; 139 pd->state_count = state_count; 140 pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; 141 142 /* Allow power off when OSI has been successfully enabled. */ 143 if (use_osi) 144 pd->power_off = psci_pd_power_off; 145 else 146 pd->flags |= GENPD_FLAG_ALWAYS_ON; 147 148 /* Use governor for CPU PM domains if it has some states to manage. */ 149 pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL; 150 151 ret = pm_genpd_init(pd, pd_gov, false); 152 if (ret) { 153 psci_pd_free_states(states, state_count); 154 goto free_name; 155 } 156 157 ret = of_genpd_add_provider_simple(np, pd); 158 if (ret) 159 goto remove_pd; 160 161 pd_provider->node = of_node_get(np); 162 list_add(&pd_provider->link, &psci_pd_providers); 163 164 pr_debug("init PM domain %s\n", pd->name); 165 return 0; 166 167 remove_pd: 168 pm_genpd_remove(pd); 169 free_name: 170 kfree(pd->name); 171 free_pd_prov: 172 kfree(pd_provider); 173 free_pd: 174 kfree(pd); 175 out: 176 pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); 177 return ret; 178 } 179 180 static void psci_pd_remove(void) 181 { 182 struct psci_pd_provider *pd_provider, *it; 183 struct generic_pm_domain *genpd; 184 185 list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) { 186 of_genpd_del_provider(pd_provider->node); 187 188 genpd = of_genpd_remove_last(pd_provider->node); 189 if (!IS_ERR(genpd)) 190 kfree(genpd); 191 192 of_node_put(pd_provider->node); 193 list_del(&pd_provider->link); 194 kfree(pd_provider); 195 } 196 } 197 198 static int psci_pd_init_topology(struct device_node *np) 199 { 200 struct device_node *node; 201 struct of_phandle_args child, parent; 202 int ret; 203 204 for_each_child_of_node(np, node) { 205 if (of_parse_phandle_with_args(node, "power-domains", 206 "#power-domain-cells", 0, &parent)) 207 continue; 208 209 child.np = node; 210 child.args_count = 0; 211 ret = of_genpd_add_subdomain(&parent, &child); 212 of_node_put(parent.np); 213 if (ret) { 214 of_node_put(node); 215 return ret; 216 } 217 } 218 219 return 0; 220 } 221 222 static bool psci_pd_try_set_osi_mode(void) 223 { 224 int ret; 225 226 if (!psci_has_osi_support()) 227 return false; 228 229 ret = psci_set_osi_mode(true); 230 if (ret) { 231 pr_warn("failed to enable OSI mode: %d\n", ret); 232 return false; 233 } 234 235 return true; 236 } 237 238 static void psci_cpuidle_domain_sync_state(struct device *dev) 239 { 240 /* 241 * All devices have now been attached/probed to the PM domain topology, 242 * hence it's fine to allow domain states to be picked. 243 */ 244 psci_pd_allow_domain_state = true; 245 } 246 247 static const struct of_device_id psci_of_match[] = { 248 { .compatible = "arm,psci-1.0" }, 249 {} 250 }; 251 252 static int psci_cpuidle_domain_probe(struct platform_device *pdev) 253 { 254 struct device_node *np = pdev->dev.of_node; 255 struct device_node *node; 256 bool use_osi; 257 int ret = 0, pd_count = 0; 258 259 if (!np) 260 return -ENODEV; 261 262 /* If OSI mode is supported, let's try to enable it. */ 263 use_osi = psci_pd_try_set_osi_mode(); 264 265 /* 266 * Parse child nodes for the "#power-domain-cells" property and 267 * initialize a genpd/genpd-of-provider pair when it's found. 268 */ 269 for_each_child_of_node(np, node) { 270 if (!of_find_property(node, "#power-domain-cells", NULL)) 271 continue; 272 273 ret = psci_pd_init(node, use_osi); 274 if (ret) 275 goto put_node; 276 277 pd_count++; 278 } 279 280 /* Bail out if not using the hierarchical CPU topology. */ 281 if (!pd_count) 282 goto no_pd; 283 284 /* Link genpd masters/subdomains to model the CPU topology. */ 285 ret = psci_pd_init_topology(np); 286 if (ret) 287 goto remove_pd; 288 289 pr_info("Initialized CPU PM domain topology\n"); 290 return 0; 291 292 put_node: 293 of_node_put(node); 294 remove_pd: 295 psci_pd_remove(); 296 pr_err("failed to create CPU PM domains ret=%d\n", ret); 297 no_pd: 298 if (use_osi) 299 psci_set_osi_mode(false); 300 return ret; 301 } 302 303 static struct platform_driver psci_cpuidle_domain_driver = { 304 .probe = psci_cpuidle_domain_probe, 305 .driver = { 306 .name = "psci-cpuidle-domain", 307 .of_match_table = psci_of_match, 308 .sync_state = psci_cpuidle_domain_sync_state, 309 }, 310 }; 311 312 static int __init psci_idle_init_domains(void) 313 { 314 return platform_driver_register(&psci_cpuidle_domain_driver); 315 } 316 subsys_initcall(psci_idle_init_domains); 317 318 struct device *psci_dt_attach_cpu(int cpu) 319 { 320 struct device *dev; 321 322 dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci"); 323 if (IS_ERR_OR_NULL(dev)) 324 return dev; 325 326 pm_runtime_irq_safe(dev); 327 if (cpu_online(cpu)) 328 pm_runtime_get_sync(dev); 329 330 return dev; 331 } 332 333 void psci_dt_detach_cpu(struct device *dev) 334 { 335 if (IS_ERR_OR_NULL(dev)) 336 return; 337 338 dev_pm_domain_detach(dev, false); 339 } 340