1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * PM domains for CPUs via genpd.
4  *
5  * Copyright (C) 2019 Linaro Ltd.
6  * Author: Ulf Hansson <ulf.hansson@linaro.org>
7  *
8  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
9  * Copyright (c) 2022 Ventana Micro Systems Inc.
10  */
11 
12 #define pr_fmt(fmt) "dt-idle-genpd: " fmt
13 
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/kernel.h>
17 #include <linux/pm_domain.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 
22 #include "dt_idle_genpd.h"
23 
pd_parse_state_nodes(int (* parse_state)(struct device_node *,u32 *),struct genpd_power_state * states,int state_count)24 static int pd_parse_state_nodes(
25 			int (*parse_state)(struct device_node *, u32 *),
26 			struct genpd_power_state *states, int state_count)
27 {
28 	int i, ret;
29 	u32 state, *state_buf;
30 
31 	for (i = 0; i < state_count; i++) {
32 		ret = parse_state(to_of_node(states[i].fwnode), &state);
33 		if (ret)
34 			goto free_state;
35 
36 		state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
37 		if (!state_buf) {
38 			ret = -ENOMEM;
39 			goto free_state;
40 		}
41 		*state_buf = state;
42 		states[i].data = state_buf;
43 	}
44 
45 	return 0;
46 
47 free_state:
48 	i--;
49 	for (; i >= 0; i--)
50 		kfree(states[i].data);
51 	return ret;
52 }
53 
pd_parse_states(struct device_node * np,int (* parse_state)(struct device_node *,u32 *),struct genpd_power_state ** states,int * state_count)54 static int pd_parse_states(struct device_node *np,
55 			   int (*parse_state)(struct device_node *, u32 *),
56 			   struct genpd_power_state **states,
57 			   int *state_count)
58 {
59 	int ret;
60 
61 	/* Parse the domain idle states. */
62 	ret = of_genpd_parse_idle_states(np, states, state_count);
63 	if (ret)
64 		return ret;
65 
66 	/* Fill out the dt specifics for each found state. */
67 	ret = pd_parse_state_nodes(parse_state, *states, *state_count);
68 	if (ret)
69 		kfree(*states);
70 
71 	return ret;
72 }
73 
pd_free_states(struct genpd_power_state * states,unsigned int state_count)74 static void pd_free_states(struct genpd_power_state *states,
75 			    unsigned int state_count)
76 {
77 	int i;
78 
79 	for (i = 0; i < state_count; i++)
80 		kfree(states[i].data);
81 	kfree(states);
82 }
83 
dt_idle_pd_free(struct generic_pm_domain * pd)84 void dt_idle_pd_free(struct generic_pm_domain *pd)
85 {
86 	pd_free_states(pd->states, pd->state_count);
87 	kfree(pd->name);
88 	kfree(pd);
89 }
90 
dt_idle_pd_alloc(struct device_node * np,int (* parse_state)(struct device_node *,u32 *))91 struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
92 			int (*parse_state)(struct device_node *, u32 *))
93 {
94 	struct generic_pm_domain *pd;
95 	struct genpd_power_state *states = NULL;
96 	int ret, state_count = 0;
97 
98 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
99 	if (!pd)
100 		goto out;
101 
102 	pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
103 	if (!pd->name)
104 		goto free_pd;
105 
106 	/*
107 	 * Parse the domain idle states and let genpd manage the state selection
108 	 * for those being compatible with "domain-idle-state".
109 	 */
110 	ret = pd_parse_states(np, parse_state, &states, &state_count);
111 	if (ret)
112 		goto free_name;
113 
114 	pd->free_states = pd_free_states;
115 	pd->name = kbasename(pd->name);
116 	pd->states = states;
117 	pd->state_count = state_count;
118 
119 	pr_debug("alloc PM domain %s\n", pd->name);
120 	return pd;
121 
122 free_name:
123 	kfree(pd->name);
124 free_pd:
125 	kfree(pd);
126 out:
127 	pr_err("failed to alloc PM domain %pOF\n", np);
128 	return NULL;
129 }
130 
dt_idle_pd_init_topology(struct device_node * np)131 int dt_idle_pd_init_topology(struct device_node *np)
132 {
133 	struct device_node *node;
134 	struct of_phandle_args child, parent;
135 	int ret;
136 
137 	for_each_child_of_node(np, node) {
138 		if (of_parse_phandle_with_args(node, "power-domains",
139 					"#power-domain-cells", 0, &parent))
140 			continue;
141 
142 		child.np = node;
143 		child.args_count = 0;
144 		ret = of_genpd_add_subdomain(&parent, &child);
145 		of_node_put(parent.np);
146 		if (ret) {
147 			of_node_put(node);
148 			return ret;
149 		}
150 	}
151 
152 	return 0;
153 }
154 
dt_idle_pd_remove_topology(struct device_node * np)155 int dt_idle_pd_remove_topology(struct device_node *np)
156 {
157 	struct device_node *node;
158 	struct of_phandle_args child, parent;
159 	int ret;
160 
161 	for_each_child_of_node(np, node) {
162 		if (of_parse_phandle_with_args(node, "power-domains",
163 					"#power-domain-cells", 0, &parent))
164 			continue;
165 
166 		child.np = node;
167 		child.args_count = 0;
168 		ret = of_genpd_remove_subdomain(&parent, &child);
169 		of_node_put(parent.np);
170 		if (ret) {
171 			of_node_put(node);
172 			return ret;
173 		}
174 	}
175 
176 	return 0;
177 }
178 
dt_idle_attach_cpu(int cpu,const char * name)179 struct device *dt_idle_attach_cpu(int cpu, const char *name)
180 {
181 	struct device *dev;
182 
183 	dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name);
184 	if (IS_ERR_OR_NULL(dev))
185 		return dev;
186 
187 	pm_runtime_irq_safe(dev);
188 	if (cpu_online(cpu))
189 		pm_runtime_get_sync(dev);
190 
191 	dev_pm_syscore_device(dev, true);
192 
193 	return dev;
194 }
195 
dt_idle_detach_cpu(struct device * dev)196 void dt_idle_detach_cpu(struct device *dev)
197 {
198 	if (IS_ERR_OR_NULL(dev))
199 		return;
200 
201 	dev_pm_domain_detach(dev, false);
202 }
203