1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright(c) 2015-17 Intel Corporation
3 
4 /*
5  *  skl-ssp-clk.c - ASoC skylake ssp clock driver
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/err.h>
11 #include <linux/platform_device.h>
12 #include <linux/clk-provider.h>
13 #include <linux/clkdev.h>
14 #include "skl.h"
15 #include "skl-ssp-clk.h"
16 #include "skl-topology.h"
17 
18 #define to_skl_clk(_hw)	container_of(_hw, struct skl_clk, hw)
19 
20 struct skl_clk_parent {
21 	struct clk_hw *hw;
22 	struct clk_lookup *lookup;
23 };
24 
25 struct skl_clk {
26 	struct clk_hw hw;
27 	struct clk_lookup *lookup;
28 	unsigned long rate;
29 	struct skl_clk_pdata *pdata;
30 	u32 id;
31 };
32 
33 struct skl_clk_data {
34 	struct skl_clk_parent parent[SKL_MAX_CLK_SRC];
35 	struct skl_clk *clk[SKL_MAX_CLK_CNT];
36 	u8 avail_clk_cnt;
37 };
38 
39 static int skl_get_clk_type(u32 index)
40 {
41 	switch (index) {
42 	case 0 ... (SKL_SCLK_OFS - 1):
43 		return SKL_MCLK;
44 
45 	case SKL_SCLK_OFS ... (SKL_SCLKFS_OFS - 1):
46 		return SKL_SCLK;
47 
48 	case SKL_SCLKFS_OFS ... (SKL_MAX_CLK_CNT - 1):
49 		return SKL_SCLK_FS;
50 
51 	default:
52 		return -EINVAL;
53 	}
54 }
55 
56 static int skl_get_vbus_id(u32 index, u8 clk_type)
57 {
58 	switch (clk_type) {
59 	case SKL_MCLK:
60 		return index;
61 
62 	case SKL_SCLK:
63 		return index - SKL_SCLK_OFS;
64 
65 	case SKL_SCLK_FS:
66 		return index - SKL_SCLKFS_OFS;
67 
68 	default:
69 		return -EINVAL;
70 	}
71 }
72 
73 static void skl_fill_clk_ipc(struct skl_clk_rate_cfg_table *rcfg, u8 clk_type)
74 {
75 	struct nhlt_fmt_cfg *fmt_cfg;
76 	union skl_clk_ctrl_ipc *ipc;
77 	struct wav_fmt *wfmt;
78 
79 	if (!rcfg)
80 		return;
81 
82 	ipc = &rcfg->dma_ctl_ipc;
83 	if (clk_type == SKL_SCLK_FS) {
84 		fmt_cfg = (struct nhlt_fmt_cfg *)rcfg->config;
85 		wfmt = &fmt_cfg->fmt_ext.fmt;
86 
87 		/* Remove TLV Header size */
88 		ipc->sclk_fs.hdr.size = sizeof(struct skl_dmactrl_sclkfs_cfg) -
89 						sizeof(struct skl_tlv_hdr);
90 		ipc->sclk_fs.sampling_frequency = wfmt->samples_per_sec;
91 		ipc->sclk_fs.bit_depth = wfmt->bits_per_sample;
92 		ipc->sclk_fs.valid_bit_depth =
93 			fmt_cfg->fmt_ext.sample.valid_bits_per_sample;
94 		ipc->sclk_fs.number_of_channels = wfmt->channels;
95 	} else {
96 		ipc->mclk.hdr.type = DMA_CLK_CONTROLS;
97 		/* Remove TLV Header size */
98 		ipc->mclk.hdr.size = sizeof(struct skl_dmactrl_mclk_cfg) -
99 						sizeof(struct skl_tlv_hdr);
100 	}
101 }
102 
103 /* Sends dma control IPC to turn the clock ON/OFF */
104 static int skl_send_clk_dma_control(struct skl *skl,
105 				struct skl_clk_rate_cfg_table *rcfg,
106 				u32 vbus_id, u8 clk_type,
107 				bool enable)
108 {
109 	struct nhlt_specific_cfg *sp_cfg;
110 	u32 i2s_config_size, node_id = 0;
111 	struct nhlt_fmt_cfg *fmt_cfg;
112 	union skl_clk_ctrl_ipc *ipc;
113 	void *i2s_config = NULL;
114 	u8 *data, size;
115 	int ret;
116 
117 	if (!rcfg)
118 		return -EIO;
119 
120 	ipc = &rcfg->dma_ctl_ipc;
121 	fmt_cfg = (struct nhlt_fmt_cfg *)rcfg->config;
122 	sp_cfg = &fmt_cfg->config;
123 
124 	if (clk_type == SKL_SCLK_FS) {
125 		ipc->sclk_fs.hdr.type =
126 			enable ? DMA_TRANSMITION_START : DMA_TRANSMITION_STOP;
127 		data = (u8 *)&ipc->sclk_fs;
128 		size = sizeof(struct skl_dmactrl_sclkfs_cfg);
129 	} else {
130 		/* 1 to enable mclk, 0 to enable sclk */
131 		if (clk_type == SKL_SCLK)
132 			ipc->mclk.mclk = 0;
133 		else
134 			ipc->mclk.mclk = 1;
135 
136 		ipc->mclk.keep_running = enable;
137 		ipc->mclk.warm_up_over = enable;
138 		ipc->mclk.clk_stop_over = !enable;
139 		data = (u8 *)&ipc->mclk;
140 		size = sizeof(struct skl_dmactrl_mclk_cfg);
141 	}
142 
143 	i2s_config_size = sp_cfg->size + size;
144 	i2s_config = kzalloc(i2s_config_size, GFP_KERNEL);
145 	if (!i2s_config)
146 		return -ENOMEM;
147 
148 	/* copy blob */
149 	memcpy(i2s_config, sp_cfg->caps, sp_cfg->size);
150 
151 	/* copy additional dma controls information */
152 	memcpy(i2s_config + sp_cfg->size, data, size);
153 
154 	node_id = ((SKL_DMA_I2S_LINK_INPUT_CLASS << 8) | (vbus_id << 4));
155 	ret = skl_dsp_set_dma_control(skl->skl_sst, (u32 *)i2s_config,
156 					i2s_config_size, node_id);
157 	kfree(i2s_config);
158 
159 	return ret;
160 }
161 
162 static struct skl_clk_rate_cfg_table *skl_get_rate_cfg(
163 		struct skl_clk_rate_cfg_table *rcfg,
164 				unsigned long rate)
165 {
166 	int i;
167 
168 	for (i = 0; (i < SKL_MAX_CLK_RATES) && rcfg[i].rate; i++) {
169 		if (rcfg[i].rate == rate)
170 			return &rcfg[i];
171 	}
172 
173 	return NULL;
174 }
175 
176 static int skl_clk_change_status(struct skl_clk *clkdev,
177 				bool enable)
178 {
179 	struct skl_clk_rate_cfg_table *rcfg;
180 	int vbus_id, clk_type;
181 
182 	clk_type = skl_get_clk_type(clkdev->id);
183 	if (clk_type < 0)
184 		return clk_type;
185 
186 	vbus_id = skl_get_vbus_id(clkdev->id, clk_type);
187 	if (vbus_id < 0)
188 		return vbus_id;
189 
190 	rcfg = skl_get_rate_cfg(clkdev->pdata->ssp_clks[clkdev->id].rate_cfg,
191 						clkdev->rate);
192 	if (!rcfg)
193 		return -EINVAL;
194 
195 	return skl_send_clk_dma_control(clkdev->pdata->pvt_data, rcfg,
196 					vbus_id, clk_type, enable);
197 }
198 
199 static int skl_clk_prepare(struct clk_hw *hw)
200 {
201 	struct skl_clk *clkdev = to_skl_clk(hw);
202 
203 	return skl_clk_change_status(clkdev, true);
204 }
205 
206 static void skl_clk_unprepare(struct clk_hw *hw)
207 {
208 	struct skl_clk *clkdev = to_skl_clk(hw);
209 
210 	skl_clk_change_status(clkdev, false);
211 }
212 
213 static int skl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
214 					unsigned long parent_rate)
215 {
216 	struct skl_clk *clkdev = to_skl_clk(hw);
217 	struct skl_clk_rate_cfg_table *rcfg;
218 	int clk_type;
219 
220 	if (!rate)
221 		return -EINVAL;
222 
223 	rcfg = skl_get_rate_cfg(clkdev->pdata->ssp_clks[clkdev->id].rate_cfg,
224 							rate);
225 	if (!rcfg)
226 		return -EINVAL;
227 
228 	clk_type = skl_get_clk_type(clkdev->id);
229 	if (clk_type < 0)
230 		return clk_type;
231 
232 	skl_fill_clk_ipc(rcfg, clk_type);
233 	clkdev->rate = rate;
234 
235 	return 0;
236 }
237 
238 static unsigned long skl_clk_recalc_rate(struct clk_hw *hw,
239 				unsigned long parent_rate)
240 {
241 	struct skl_clk *clkdev = to_skl_clk(hw);
242 
243 	if (clkdev->rate)
244 		return clkdev->rate;
245 
246 	return 0;
247 }
248 
249 /* Not supported by clk driver. Implemented to satisfy clk fw */
250 static long skl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
251 			       unsigned long *parent_rate)
252 {
253 	return rate;
254 }
255 
256 /*
257  * prepare/unprepare are used instead of enable/disable as IPC will be sent
258  * in non-atomic context.
259  */
260 static const struct clk_ops skl_clk_ops = {
261 	.prepare = skl_clk_prepare,
262 	.unprepare = skl_clk_unprepare,
263 	.set_rate = skl_clk_set_rate,
264 	.round_rate = skl_clk_round_rate,
265 	.recalc_rate = skl_clk_recalc_rate,
266 };
267 
268 static void unregister_parent_src_clk(struct skl_clk_parent *pclk,
269 					unsigned int id)
270 {
271 	while (id--) {
272 		clkdev_drop(pclk[id].lookup);
273 		clk_hw_unregister_fixed_rate(pclk[id].hw);
274 	}
275 }
276 
277 static void unregister_src_clk(struct skl_clk_data *dclk)
278 {
279 	while (dclk->avail_clk_cnt--)
280 		clkdev_drop(dclk->clk[dclk->avail_clk_cnt]->lookup);
281 }
282 
283 static int skl_register_parent_clks(struct device *dev,
284 			struct skl_clk_parent *parent,
285 			struct skl_clk_parent_src *pclk)
286 {
287 	int i, ret;
288 
289 	for (i = 0; i < SKL_MAX_CLK_SRC; i++) {
290 
291 		/* Register Parent clock */
292 		parent[i].hw = clk_hw_register_fixed_rate(dev, pclk[i].name,
293 				pclk[i].parent_name, 0, pclk[i].rate);
294 		if (IS_ERR(parent[i].hw)) {
295 			ret = PTR_ERR(parent[i].hw);
296 			goto err;
297 		}
298 
299 		parent[i].lookup = clkdev_hw_create(parent[i].hw, pclk[i].name,
300 									NULL);
301 		if (!parent[i].lookup) {
302 			clk_hw_unregister_fixed_rate(parent[i].hw);
303 			ret = -ENOMEM;
304 			goto err;
305 		}
306 	}
307 
308 	return 0;
309 err:
310 	unregister_parent_src_clk(parent, i);
311 	return ret;
312 }
313 
314 /* Assign fmt_config to clk_data */
315 static struct skl_clk *register_skl_clk(struct device *dev,
316 			struct skl_ssp_clk *clk,
317 			struct skl_clk_pdata *clk_pdata, int id)
318 {
319 	struct clk_init_data init;
320 	struct skl_clk *clkdev;
321 	int ret;
322 
323 	clkdev = devm_kzalloc(dev, sizeof(*clkdev), GFP_KERNEL);
324 	if (!clkdev)
325 		return ERR_PTR(-ENOMEM);
326 
327 	init.name = clk->name;
328 	init.ops = &skl_clk_ops;
329 	init.flags = CLK_SET_RATE_GATE;
330 	init.parent_names = &clk->parent_name;
331 	init.num_parents = 1;
332 	clkdev->hw.init = &init;
333 	clkdev->pdata = clk_pdata;
334 
335 	clkdev->id = id;
336 	ret = devm_clk_hw_register(dev, &clkdev->hw);
337 	if (ret) {
338 		clkdev = ERR_PTR(ret);
339 		return clkdev;
340 	}
341 
342 	clkdev->lookup = clkdev_hw_create(&clkdev->hw, init.name, NULL);
343 	if (!clkdev->lookup)
344 		clkdev = ERR_PTR(-ENOMEM);
345 
346 	return clkdev;
347 }
348 
349 static int skl_clk_dev_probe(struct platform_device *pdev)
350 {
351 	struct device *dev = &pdev->dev;
352 	struct device *parent_dev = dev->parent;
353 	struct skl_clk_parent_src *parent_clks;
354 	struct skl_clk_pdata *clk_pdata;
355 	struct skl_clk_data *data;
356 	struct skl_ssp_clk *clks;
357 	int ret, i;
358 
359 	clk_pdata = dev_get_platdata(&pdev->dev);
360 	parent_clks = clk_pdata->parent_clks;
361 	clks = clk_pdata->ssp_clks;
362 	if (!parent_clks || !clks)
363 		return -EIO;
364 
365 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
366 	if (!data)
367 		return -ENOMEM;
368 
369 	/* Register Parent clock */
370 	ret = skl_register_parent_clks(parent_dev, data->parent, parent_clks);
371 	if (ret < 0)
372 		return ret;
373 
374 	for (i = 0; i < clk_pdata->num_clks; i++) {
375 		/*
376 		 * Only register valid clocks
377 		 * i.e. for which nhlt entry is present.
378 		 */
379 		if (clks[i].rate_cfg[0].rate == 0)
380 			continue;
381 
382 		data->clk[data->avail_clk_cnt] = register_skl_clk(dev,
383 				&clks[i], clk_pdata, i);
384 
385 		if (IS_ERR(data->clk[data->avail_clk_cnt])) {
386 			ret = PTR_ERR(data->clk[data->avail_clk_cnt++]);
387 			goto err_unreg_skl_clk;
388 		}
389 	}
390 
391 	platform_set_drvdata(pdev, data);
392 
393 	return 0;
394 
395 err_unreg_skl_clk:
396 	unregister_src_clk(data);
397 	unregister_parent_src_clk(data->parent, SKL_MAX_CLK_SRC);
398 
399 	return ret;
400 }
401 
402 static int skl_clk_dev_remove(struct platform_device *pdev)
403 {
404 	struct skl_clk_data *data;
405 
406 	data = platform_get_drvdata(pdev);
407 	unregister_src_clk(data);
408 	unregister_parent_src_clk(data->parent, SKL_MAX_CLK_SRC);
409 
410 	return 0;
411 }
412 
413 static struct platform_driver skl_clk_driver = {
414 	.driver = {
415 		.name = "skl-ssp-clk",
416 	},
417 	.probe = skl_clk_dev_probe,
418 	.remove = skl_clk_dev_remove,
419 };
420 
421 module_platform_driver(skl_clk_driver);
422 
423 MODULE_DESCRIPTION("Skylake clock driver");
424 MODULE_AUTHOR("Jaikrishna Nemallapudi <jaikrishnax.nemallapudi@intel.com>");
425 MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
426 MODULE_LICENSE("GPL v2");
427 MODULE_ALIAS("platform:skl-ssp-clk");
428