1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PM domains for CPUs via genpd - managed by cpuidle-psci. 4 * 5 * Copyright (C) 2019 Linaro Ltd. 6 * Author: Ulf Hansson <ulf.hansson@linaro.org> 7 * 8 */ 9 10 #define pr_fmt(fmt) "CPUidle PSCI: " fmt 11 12 #include <linux/cpu.h> 13 #include <linux/device.h> 14 #include <linux/kernel.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_domain.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/psci.h> 19 #include <linux/slab.h> 20 #include <linux/string.h> 21 22 #include "cpuidle-psci.h" 23 24 struct psci_pd_provider { 25 struct list_head link; 26 struct device_node *node; 27 }; 28 29 static LIST_HEAD(psci_pd_providers); 30 static bool psci_pd_allow_domain_state; 31 32 static int psci_pd_power_off(struct generic_pm_domain *pd) 33 { 34 struct genpd_power_state *state = &pd->states[pd->state_idx]; 35 u32 *pd_state; 36 37 if (!state->data) 38 return 0; 39 40 if (!psci_pd_allow_domain_state) 41 return -EBUSY; 42 43 /* OSI mode is enabled, set the corresponding domain state. */ 44 pd_state = state->data; 45 psci_set_domain_state(*pd_state); 46 47 return 0; 48 } 49 50 static int psci_pd_parse_state_nodes(struct genpd_power_state *states, 51 int state_count) 52 { 53 int i, ret; 54 u32 psci_state, *psci_state_buf; 55 56 for (i = 0; i < state_count; i++) { 57 ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode), 58 &psci_state); 59 if (ret) 60 goto free_state; 61 62 psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL); 63 if (!psci_state_buf) { 64 ret = -ENOMEM; 65 goto free_state; 66 } 67 *psci_state_buf = psci_state; 68 states[i].data = psci_state_buf; 69 } 70 71 return 0; 72 73 free_state: 74 i--; 75 for (; i >= 0; i--) 76 kfree(states[i].data); 77 return ret; 78 } 79 80 static int psci_pd_parse_states(struct device_node *np, 81 struct genpd_power_state **states, int *state_count) 82 { 83 int ret; 84 85 /* Parse the domain idle states. */ 86 ret = of_genpd_parse_idle_states(np, states, state_count); 87 if (ret) 88 return ret; 89 90 /* Fill out the PSCI specifics for each found state. */ 91 ret = psci_pd_parse_state_nodes(*states, *state_count); 92 if (ret) 93 kfree(*states); 94 95 return ret; 96 } 97 98 static void psci_pd_free_states(struct genpd_power_state *states, 99 unsigned int state_count) 100 { 101 int i; 102 103 for (i = 0; i < state_count; i++) 104 kfree(states[i].data); 105 kfree(states); 106 } 107 108 static int psci_pd_init(struct device_node *np) 109 { 110 struct generic_pm_domain *pd; 111 struct psci_pd_provider *pd_provider; 112 struct dev_power_governor *pd_gov; 113 struct genpd_power_state *states = NULL; 114 int ret = -ENOMEM, state_count = 0; 115 116 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 117 if (!pd) 118 goto out; 119 120 pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); 121 if (!pd_provider) 122 goto free_pd; 123 124 pd->name = kasprintf(GFP_KERNEL, "%pOF", np); 125 if (!pd->name) 126 goto free_pd_prov; 127 128 /* 129 * Parse the domain idle states and let genpd manage the state selection 130 * for those being compatible with "domain-idle-state". 131 */ 132 ret = psci_pd_parse_states(np, &states, &state_count); 133 if (ret) 134 goto free_name; 135 136 pd->free_states = psci_pd_free_states; 137 pd->name = kbasename(pd->name); 138 pd->power_off = psci_pd_power_off; 139 pd->states = states; 140 pd->state_count = state_count; 141 pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; 142 143 /* Use governor for CPU PM domains if it has some states to manage. */ 144 pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL; 145 146 ret = pm_genpd_init(pd, pd_gov, false); 147 if (ret) { 148 psci_pd_free_states(states, state_count); 149 goto free_name; 150 } 151 152 ret = of_genpd_add_provider_simple(np, pd); 153 if (ret) 154 goto remove_pd; 155 156 pd_provider->node = of_node_get(np); 157 list_add(&pd_provider->link, &psci_pd_providers); 158 159 pr_debug("init PM domain %s\n", pd->name); 160 return 0; 161 162 remove_pd: 163 pm_genpd_remove(pd); 164 free_name: 165 kfree(pd->name); 166 free_pd_prov: 167 kfree(pd_provider); 168 free_pd: 169 kfree(pd); 170 out: 171 pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); 172 return ret; 173 } 174 175 static void psci_pd_remove(void) 176 { 177 struct psci_pd_provider *pd_provider, *it; 178 struct generic_pm_domain *genpd; 179 180 list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) { 181 of_genpd_del_provider(pd_provider->node); 182 183 genpd = of_genpd_remove_last(pd_provider->node); 184 if (!IS_ERR(genpd)) 185 kfree(genpd); 186 187 of_node_put(pd_provider->node); 188 list_del(&pd_provider->link); 189 kfree(pd_provider); 190 } 191 } 192 193 static int psci_pd_init_topology(struct device_node *np, bool add) 194 { 195 struct device_node *node; 196 struct of_phandle_args child, parent; 197 int ret; 198 199 for_each_child_of_node(np, node) { 200 if (of_parse_phandle_with_args(node, "power-domains", 201 "#power-domain-cells", 0, &parent)) 202 continue; 203 204 child.np = node; 205 child.args_count = 0; 206 207 ret = add ? of_genpd_add_subdomain(&parent, &child) : 208 of_genpd_remove_subdomain(&parent, &child); 209 of_node_put(parent.np); 210 if (ret) { 211 of_node_put(node); 212 return ret; 213 } 214 } 215 216 return 0; 217 } 218 219 static int psci_pd_add_topology(struct device_node *np) 220 { 221 return psci_pd_init_topology(np, true); 222 } 223 224 static void psci_pd_remove_topology(struct device_node *np) 225 { 226 psci_pd_init_topology(np, false); 227 } 228 229 static void psci_cpuidle_domain_sync_state(struct device *dev) 230 { 231 /* 232 * All devices have now been attached/probed to the PM domain topology, 233 * hence it's fine to allow domain states to be picked. 234 */ 235 psci_pd_allow_domain_state = true; 236 } 237 238 static const struct of_device_id psci_of_match[] = { 239 { .compatible = "arm,psci-1.0" }, 240 {} 241 }; 242 243 static int psci_cpuidle_domain_probe(struct platform_device *pdev) 244 { 245 struct device_node *np = pdev->dev.of_node; 246 struct device_node *node; 247 int ret = 0, pd_count = 0; 248 249 if (!np) 250 return -ENODEV; 251 252 /* Currently limit the hierarchical topology to be used in OSI mode. */ 253 if (!psci_has_osi_support()) 254 return 0; 255 256 /* 257 * Parse child nodes for the "#power-domain-cells" property and 258 * initialize a genpd/genpd-of-provider pair when it's found. 259 */ 260 for_each_child_of_node(np, node) { 261 if (!of_find_property(node, "#power-domain-cells", NULL)) 262 continue; 263 264 ret = psci_pd_init(node); 265 if (ret) 266 goto put_node; 267 268 pd_count++; 269 } 270 271 /* Bail out if not using the hierarchical CPU topology. */ 272 if (!pd_count) 273 return 0; 274 275 /* Link genpd masters/subdomains to model the CPU topology. */ 276 ret = psci_pd_add_topology(np); 277 if (ret) 278 goto remove_pd; 279 280 /* Try to enable OSI mode. */ 281 ret = psci_set_osi_mode(); 282 if (ret) { 283 pr_warn("failed to enable OSI mode: %d\n", ret); 284 psci_pd_remove_topology(np); 285 goto remove_pd; 286 } 287 288 pr_info("Initialized CPU PM domain topology\n"); 289 return 0; 290 291 put_node: 292 of_node_put(node); 293 remove_pd: 294 if (pd_count) 295 psci_pd_remove(); 296 pr_err("failed to create CPU PM domains ret=%d\n", ret); 297 return ret; 298 } 299 300 static struct platform_driver psci_cpuidle_domain_driver = { 301 .probe = psci_cpuidle_domain_probe, 302 .driver = { 303 .name = "psci-cpuidle-domain", 304 .of_match_table = psci_of_match, 305 .sync_state = psci_cpuidle_domain_sync_state, 306 }, 307 }; 308 309 static int __init psci_idle_init_domains(void) 310 { 311 return platform_driver_register(&psci_cpuidle_domain_driver); 312 } 313 subsys_initcall(psci_idle_init_domains); 314 315 struct device *psci_dt_attach_cpu(int cpu) 316 { 317 struct device *dev; 318 319 dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci"); 320 if (IS_ERR_OR_NULL(dev)) 321 return dev; 322 323 pm_runtime_irq_safe(dev); 324 if (cpu_online(cpu)) 325 pm_runtime_get_sync(dev); 326 327 return dev; 328 } 329 330 void psci_dt_detach_cpu(struct device *dev) 331 { 332 if (IS_ERR_OR_NULL(dev)) 333 return; 334 335 dev_pm_domain_detach(dev, false); 336 } 337