1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (c) 2021, Michael Srba
3 
4 #include <linux/clk.h>
5 #include <linux/delay.h>
6 #include <linux/io.h>
7 #include <linux/mfd/syscon.h>
8 #include <linux/module.h>
9 #include <linux/of_platform.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_clock.h>
12 #include <linux/pm_domain.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/regmap.h>
15 #include <linux/reset.h>
16 
17 /* AXI Halt Register Offsets */
18 #define AXI_HALTREQ_REG			0x0
19 #define AXI_HALTACK_REG			0x4
20 #define AXI_IDLE_REG			0x8
21 
22 #define SSCAON_CONFIG0_CLAMP_EN_OVRD		BIT(4)
23 #define SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL	BIT(5)
24 
25 static const char *const qcom_ssc_block_pd_names[] = {
26 	"ssc_cx",
27 	"ssc_mx"
28 };
29 
30 struct qcom_ssc_block_bus_data {
31 	const char *const *pd_names;
32 	struct device *pds[ARRAY_SIZE(qcom_ssc_block_pd_names)];
33 	char __iomem *reg_mpm_sscaon_config0;
34 	char __iomem *reg_mpm_sscaon_config1;
35 	struct regmap *halt_map;
36 	struct clk *xo_clk;
37 	struct clk *aggre2_clk;
38 	struct clk *gcc_im_sleep_clk;
39 	struct clk *aggre2_north_clk;
40 	struct clk *ssc_xo_clk;
41 	struct clk *ssc_ahbs_clk;
42 	struct reset_control *ssc_bcr;
43 	struct reset_control *ssc_reset;
44 	u32 ssc_axi_halt;
45 	int num_pds;
46 };
47 
48 static void reg32_set_bits(char __iomem *reg, u32 value)
49 {
50 	u32 tmp = ioread32(reg);
51 
52 	iowrite32(tmp | value, reg);
53 }
54 
55 static void reg32_clear_bits(char __iomem *reg, u32 value)
56 {
57 	u32 tmp = ioread32(reg);
58 
59 	iowrite32(tmp & (~value), reg);
60 }
61 
62 static int qcom_ssc_block_bus_init(struct device *dev)
63 {
64 	int ret;
65 
66 	struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
67 
68 	ret = clk_prepare_enable(data->xo_clk);
69 	if (ret) {
70 		dev_err(dev, "error enabling xo_clk: %d\n", ret);
71 		goto err_xo_clk;
72 	}
73 
74 	ret = clk_prepare_enable(data->aggre2_clk);
75 	if (ret) {
76 		dev_err(dev, "error enabling aggre2_clk: %d\n", ret);
77 		goto err_aggre2_clk;
78 	}
79 
80 	ret = clk_prepare_enable(data->gcc_im_sleep_clk);
81 	if (ret) {
82 		dev_err(dev, "error enabling gcc_im_sleep_clk: %d\n", ret);
83 		goto err_gcc_im_sleep_clk;
84 	}
85 
86 	/*
87 	 * We need to intervene here because the HW logic driving these signals cannot handle
88 	 * initialization after power collapse by itself.
89 	 */
90 	reg32_clear_bits(data->reg_mpm_sscaon_config0,
91 			 SSCAON_CONFIG0_CLAMP_EN_OVRD | SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL);
92 	/* override few_ack/rest_ack */
93 	reg32_clear_bits(data->reg_mpm_sscaon_config1, BIT(31));
94 
95 	ret = clk_prepare_enable(data->aggre2_north_clk);
96 	if (ret) {
97 		dev_err(dev, "error enabling aggre2_north_clk: %d\n", ret);
98 		goto err_aggre2_north_clk;
99 	}
100 
101 	ret = reset_control_deassert(data->ssc_reset);
102 	if (ret) {
103 		dev_err(dev, "error deasserting ssc_reset: %d\n", ret);
104 		goto err_ssc_reset;
105 	}
106 
107 	ret = reset_control_deassert(data->ssc_bcr);
108 	if (ret) {
109 		dev_err(dev, "error deasserting ssc_bcr: %d\n", ret);
110 		goto err_ssc_bcr;
111 	}
112 
113 	regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 0);
114 
115 	ret = clk_prepare_enable(data->ssc_xo_clk);
116 	if (ret) {
117 		dev_err(dev, "error deasserting ssc_xo_clk: %d\n", ret);
118 		goto err_ssc_xo_clk;
119 	}
120 
121 	ret = clk_prepare_enable(data->ssc_ahbs_clk);
122 	if (ret) {
123 		dev_err(dev, "error deasserting ssc_ahbs_clk: %d\n", ret);
124 		goto err_ssc_ahbs_clk;
125 	}
126 
127 	return 0;
128 
129 err_ssc_ahbs_clk:
130 	clk_disable(data->ssc_xo_clk);
131 
132 err_ssc_xo_clk:
133 	regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
134 
135 	reset_control_assert(data->ssc_bcr);
136 
137 err_ssc_bcr:
138 	reset_control_assert(data->ssc_reset);
139 
140 err_ssc_reset:
141 	clk_disable(data->aggre2_north_clk);
142 
143 err_aggre2_north_clk:
144 	reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
145 	reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
146 
147 	clk_disable(data->gcc_im_sleep_clk);
148 
149 err_gcc_im_sleep_clk:
150 	clk_disable(data->aggre2_clk);
151 
152 err_aggre2_clk:
153 	clk_disable(data->xo_clk);
154 
155 err_xo_clk:
156 	return ret;
157 }
158 
159 static void qcom_ssc_block_bus_deinit(struct device *dev)
160 {
161 	int ret;
162 
163 	struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
164 
165 	clk_disable(data->ssc_xo_clk);
166 	clk_disable(data->ssc_ahbs_clk);
167 
168 	ret = reset_control_assert(data->ssc_bcr);
169 	if (ret)
170 		dev_err(dev, "error asserting ssc_bcr: %d\n", ret);
171 
172 	regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
173 
174 	reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
175 	reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
176 
177 	ret = reset_control_assert(data->ssc_reset);
178 	if (ret)
179 		dev_err(dev, "error asserting ssc_reset: %d\n", ret);
180 
181 	clk_disable(data->gcc_im_sleep_clk);
182 
183 	clk_disable(data->aggre2_north_clk);
184 
185 	clk_disable(data->aggre2_clk);
186 	clk_disable(data->xo_clk);
187 }
188 
189 static int qcom_ssc_block_bus_pds_attach(struct device *dev, struct device **pds,
190 					 const char *const *pd_names, size_t num_pds)
191 {
192 	int ret;
193 	int i;
194 
195 	for (i = 0; i < num_pds; i++) {
196 		pds[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
197 		if (IS_ERR_OR_NULL(pds[i])) {
198 			ret = PTR_ERR(pds[i]) ? : -ENODATA;
199 			goto unroll_attach;
200 		}
201 	}
202 
203 	return num_pds;
204 
205 unroll_attach:
206 	for (i--; i >= 0; i--)
207 		dev_pm_domain_detach(pds[i], false);
208 
209 	return ret;
210 };
211 
212 static void qcom_ssc_block_bus_pds_detach(struct device *dev, struct device **pds, size_t num_pds)
213 {
214 	int i;
215 
216 	for (i = 0; i < num_pds; i++)
217 		dev_pm_domain_detach(pds[i], false);
218 }
219 
220 static int qcom_ssc_block_bus_pds_enable(struct device **pds, size_t num_pds)
221 {
222 	int ret;
223 	int i;
224 
225 	for (i = 0; i < num_pds; i++) {
226 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
227 		ret = pm_runtime_get_sync(pds[i]);
228 		if (ret < 0)
229 			goto unroll_pd_votes;
230 	}
231 
232 	return 0;
233 
234 unroll_pd_votes:
235 	for (i--; i >= 0; i--) {
236 		dev_pm_genpd_set_performance_state(pds[i], 0);
237 		pm_runtime_put(pds[i]);
238 	}
239 
240 	return ret;
241 };
242 
243 static void qcom_ssc_block_bus_pds_disable(struct device **pds, size_t num_pds)
244 {
245 	int i;
246 
247 	for (i = 0; i < num_pds; i++) {
248 		dev_pm_genpd_set_performance_state(pds[i], 0);
249 		pm_runtime_put(pds[i]);
250 	}
251 }
252 
253 static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
254 {
255 	struct qcom_ssc_block_bus_data *data;
256 	struct device_node *np = pdev->dev.of_node;
257 	struct of_phandle_args halt_args;
258 	struct resource *res;
259 	int ret;
260 
261 	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
262 	if (!data)
263 		return -ENOMEM;
264 
265 	platform_set_drvdata(pdev, data);
266 
267 	data->pd_names = qcom_ssc_block_pd_names;
268 	data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
269 
270 	/* power domains */
271 	ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
272 	if (ret < 0)
273 		return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
274 
275 	ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
276 	if (ret < 0)
277 		return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
278 
279 	/* low level overrides for when the HW logic doesn't "just work" */
280 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
281 	data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
282 	if (IS_ERR(data->reg_mpm_sscaon_config0))
283 		return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config0),
284 				     "Failed to ioremap mpm_sscaon_config0\n");
285 
286 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config1");
287 	data->reg_mpm_sscaon_config1 = devm_ioremap_resource(&pdev->dev, res);
288 	if (IS_ERR(data->reg_mpm_sscaon_config1))
289 		return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config1),
290 				     "Failed to ioremap mpm_sscaon_config1\n");
291 
292 	/* resets */
293 	data->ssc_bcr = devm_reset_control_get_exclusive(&pdev->dev, "ssc_bcr");
294 	if (IS_ERR(data->ssc_bcr))
295 		return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_bcr),
296 				     "Failed to acquire reset: scc_bcr\n");
297 
298 	data->ssc_reset = devm_reset_control_get_exclusive(&pdev->dev, "ssc_reset");
299 	if (IS_ERR(data->ssc_reset))
300 		return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_reset),
301 				     "Failed to acquire reset: ssc_reset:\n");
302 
303 	/* clocks */
304 	data->xo_clk = devm_clk_get(&pdev->dev, "xo");
305 	if (IS_ERR(data->xo_clk))
306 		return dev_err_probe(&pdev->dev, PTR_ERR(data->xo_clk),
307 				     "Failed to get clock: xo\n");
308 
309 	data->aggre2_clk = devm_clk_get(&pdev->dev, "aggre2");
310 	if (IS_ERR(data->aggre2_clk))
311 		return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_clk),
312 				     "Failed to get clock: aggre2\n");
313 
314 	data->gcc_im_sleep_clk = devm_clk_get(&pdev->dev, "gcc_im_sleep");
315 	if (IS_ERR(data->gcc_im_sleep_clk))
316 		return dev_err_probe(&pdev->dev, PTR_ERR(data->gcc_im_sleep_clk),
317 				     "Failed to get clock: gcc_im_sleep\n");
318 
319 	data->aggre2_north_clk = devm_clk_get(&pdev->dev, "aggre2_north");
320 	if (IS_ERR(data->aggre2_north_clk))
321 		return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_north_clk),
322 				     "Failed to get clock: aggre2_north\n");
323 
324 	data->ssc_xo_clk = devm_clk_get(&pdev->dev, "ssc_xo");
325 	if (IS_ERR(data->ssc_xo_clk))
326 		return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_xo_clk),
327 				     "Failed to get clock: ssc_xo\n");
328 
329 	data->ssc_ahbs_clk = devm_clk_get(&pdev->dev, "ssc_ahbs");
330 	if (IS_ERR(data->ssc_ahbs_clk))
331 		return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_ahbs_clk),
332 				     "Failed to get clock: ssc_ahbs\n");
333 
334 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, "qcom,halt-regs", 1, 0,
335 					       &halt_args);
336 	if (ret < 0)
337 		return dev_err_probe(&pdev->dev, ret, "Failed to parse qcom,halt-regs\n");
338 
339 	data->halt_map = syscon_node_to_regmap(halt_args.np);
340 	of_node_put(halt_args.np);
341 	if (IS_ERR(data->halt_map))
342 		return PTR_ERR(data->halt_map);
343 
344 	data->ssc_axi_halt = halt_args.args[0];
345 
346 	qcom_ssc_block_bus_init(&pdev->dev);
347 
348 	of_platform_populate(np, NULL, NULL, &pdev->dev);
349 
350 	return 0;
351 }
352 
353 static int qcom_ssc_block_bus_remove(struct platform_device *pdev)
354 {
355 	struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev);
356 
357 	qcom_ssc_block_bus_deinit(&pdev->dev);
358 
359 	iounmap(data->reg_mpm_sscaon_config0);
360 	iounmap(data->reg_mpm_sscaon_config1);
361 
362 	qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
363 	qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
364 	pm_runtime_disable(&pdev->dev);
365 	pm_clk_destroy(&pdev->dev);
366 
367 	return 0;
368 }
369 
370 static const struct of_device_id qcom_ssc_block_bus_of_match[] = {
371 	{ .compatible = "qcom,ssc-block-bus", },
372 	{ /* sentinel */ }
373 };
374 MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
375 
376 static struct platform_driver qcom_ssc_block_bus_driver = {
377 	.probe = qcom_ssc_block_bus_probe,
378 	.remove = qcom_ssc_block_bus_remove,
379 	.driver = {
380 		.name = "qcom-ssc-block-bus",
381 		.of_match_table = qcom_ssc_block_bus_of_match,
382 	},
383 };
384 
385 module_platform_driver(qcom_ssc_block_bus_driver);
386 
387 MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB");
388 MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
389 MODULE_LICENSE("GPL v2");
390