1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel Atom platform clocks driver for BayTrail and CherryTrail SoCs
4 *
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Irina Tirdea <irina.tirdea@intel.com>
7 */
8
9 #include <linux/clk-provider.h>
10 #include <linux/clkdev.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/platform_data/x86/clk-pmc-atom.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16
17 #define PLT_CLK_NAME_BASE "pmc_plt_clk"
18
19 #define PMC_CLK_CTL_OFFSET 0x60
20 #define PMC_CLK_CTL_SIZE 4
21 #define PMC_CLK_NUM 6
22 #define PMC_CLK_CTL_GATED_ON_D3 0x0
23 #define PMC_CLK_CTL_FORCE_ON 0x1
24 #define PMC_CLK_CTL_FORCE_OFF 0x2
25 #define PMC_CLK_CTL_RESERVED 0x3
26 #define PMC_MASK_CLK_CTL GENMASK(1, 0)
27 #define PMC_MASK_CLK_FREQ BIT(2)
28 #define PMC_CLK_FREQ_XTAL (0 << 2) /* 25 MHz */
29 #define PMC_CLK_FREQ_PLL (1 << 2) /* 19.2 MHz */
30
31 struct clk_plt_fixed {
32 struct clk_hw *clk;
33 struct clk_lookup *lookup;
34 };
35
36 struct clk_plt {
37 struct clk_hw hw;
38 void __iomem *reg;
39 struct clk_lookup *lookup;
40 /* protect access to PMC registers */
41 spinlock_t lock;
42 };
43
44 #define to_clk_plt(_hw) container_of(_hw, struct clk_plt, hw)
45
46 struct clk_plt_data {
47 struct clk_plt_fixed **parents;
48 u8 nparents;
49 struct clk_plt *clks[PMC_CLK_NUM];
50 struct clk_lookup *mclk_lookup;
51 struct clk_lookup *ether_clk_lookup;
52 };
53
54 /* Return an index in parent table */
plt_reg_to_parent(int reg)55 static inline int plt_reg_to_parent(int reg)
56 {
57 switch (reg & PMC_MASK_CLK_FREQ) {
58 default:
59 case PMC_CLK_FREQ_XTAL:
60 return 0;
61 case PMC_CLK_FREQ_PLL:
62 return 1;
63 }
64 }
65
66 /* Return clk index of parent */
plt_parent_to_reg(int index)67 static inline int plt_parent_to_reg(int index)
68 {
69 switch (index) {
70 default:
71 case 0:
72 return PMC_CLK_FREQ_XTAL;
73 case 1:
74 return PMC_CLK_FREQ_PLL;
75 }
76 }
77
78 /* Abstract status in simpler enabled/disabled value */
plt_reg_to_enabled(int reg)79 static inline int plt_reg_to_enabled(int reg)
80 {
81 switch (reg & PMC_MASK_CLK_CTL) {
82 case PMC_CLK_CTL_GATED_ON_D3:
83 case PMC_CLK_CTL_FORCE_ON:
84 return 1; /* enabled */
85 case PMC_CLK_CTL_FORCE_OFF:
86 case PMC_CLK_CTL_RESERVED:
87 default:
88 return 0; /* disabled */
89 }
90 }
91
plt_clk_reg_update(struct clk_plt * clk,u32 mask,u32 val)92 static void plt_clk_reg_update(struct clk_plt *clk, u32 mask, u32 val)
93 {
94 u32 tmp;
95 unsigned long flags;
96
97 spin_lock_irqsave(&clk->lock, flags);
98
99 tmp = readl(clk->reg);
100 tmp = (tmp & ~mask) | (val & mask);
101 writel(tmp, clk->reg);
102
103 spin_unlock_irqrestore(&clk->lock, flags);
104 }
105
plt_clk_set_parent(struct clk_hw * hw,u8 index)106 static int plt_clk_set_parent(struct clk_hw *hw, u8 index)
107 {
108 struct clk_plt *clk = to_clk_plt(hw);
109
110 plt_clk_reg_update(clk, PMC_MASK_CLK_FREQ, plt_parent_to_reg(index));
111
112 return 0;
113 }
114
plt_clk_get_parent(struct clk_hw * hw)115 static u8 plt_clk_get_parent(struct clk_hw *hw)
116 {
117 struct clk_plt *clk = to_clk_plt(hw);
118 u32 value;
119
120 value = readl(clk->reg);
121
122 return plt_reg_to_parent(value);
123 }
124
plt_clk_enable(struct clk_hw * hw)125 static int plt_clk_enable(struct clk_hw *hw)
126 {
127 struct clk_plt *clk = to_clk_plt(hw);
128
129 plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_ON);
130
131 return 0;
132 }
133
plt_clk_disable(struct clk_hw * hw)134 static void plt_clk_disable(struct clk_hw *hw)
135 {
136 struct clk_plt *clk = to_clk_plt(hw);
137
138 plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_OFF);
139 }
140
plt_clk_is_enabled(struct clk_hw * hw)141 static int plt_clk_is_enabled(struct clk_hw *hw)
142 {
143 struct clk_plt *clk = to_clk_plt(hw);
144 u32 value;
145
146 value = readl(clk->reg);
147
148 return plt_reg_to_enabled(value);
149 }
150
151 static const struct clk_ops plt_clk_ops = {
152 .enable = plt_clk_enable,
153 .disable = plt_clk_disable,
154 .is_enabled = plt_clk_is_enabled,
155 .get_parent = plt_clk_get_parent,
156 .set_parent = plt_clk_set_parent,
157 .determine_rate = __clk_mux_determine_rate,
158 };
159
plt_clk_register(struct platform_device * pdev,int id,const struct pmc_clk_data * pmc_data,const char ** parent_names,int num_parents)160 static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
161 const struct pmc_clk_data *pmc_data,
162 const char **parent_names,
163 int num_parents)
164 {
165 struct clk_plt *pclk;
166 struct clk_init_data init;
167 int ret;
168
169 pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL);
170 if (!pclk)
171 return ERR_PTR(-ENOMEM);
172
173 init.name = kasprintf(GFP_KERNEL, "%s_%d", PLT_CLK_NAME_BASE, id);
174 init.ops = &plt_clk_ops;
175 init.flags = 0;
176 init.parent_names = parent_names;
177 init.num_parents = num_parents;
178
179 pclk->hw.init = &init;
180 pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
181 spin_lock_init(&pclk->lock);
182
183 /*
184 * On some systems, the pmc_plt_clocks already enabled by the
185 * firmware are being marked as critical to avoid them being
186 * gated by the clock framework.
187 */
188 if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
189 init.flags |= CLK_IS_CRITICAL;
190
191 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
192 if (ret) {
193 pclk = ERR_PTR(ret);
194 goto err_free_init;
195 }
196
197 pclk->lookup = clkdev_hw_create(&pclk->hw, init.name, NULL);
198 if (!pclk->lookup) {
199 pclk = ERR_PTR(-ENOMEM);
200 goto err_free_init;
201 }
202
203 err_free_init:
204 kfree(init.name);
205 return pclk;
206 }
207
plt_clk_unregister(struct clk_plt * pclk)208 static void plt_clk_unregister(struct clk_plt *pclk)
209 {
210 clkdev_drop(pclk->lookup);
211 }
212
plt_clk_register_fixed_rate(struct platform_device * pdev,const char * name,const char * parent_name,unsigned long fixed_rate)213 static struct clk_plt_fixed *plt_clk_register_fixed_rate(struct platform_device *pdev,
214 const char *name,
215 const char *parent_name,
216 unsigned long fixed_rate)
217 {
218 struct clk_plt_fixed *pclk;
219
220 pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL);
221 if (!pclk)
222 return ERR_PTR(-ENOMEM);
223
224 pclk->clk = clk_hw_register_fixed_rate(&pdev->dev, name, parent_name,
225 0, fixed_rate);
226 if (IS_ERR(pclk->clk))
227 return ERR_CAST(pclk->clk);
228
229 pclk->lookup = clkdev_hw_create(pclk->clk, name, NULL);
230 if (!pclk->lookup) {
231 clk_hw_unregister_fixed_rate(pclk->clk);
232 return ERR_PTR(-ENOMEM);
233 }
234
235 return pclk;
236 }
237
plt_clk_unregister_fixed_rate(struct clk_plt_fixed * pclk)238 static void plt_clk_unregister_fixed_rate(struct clk_plt_fixed *pclk)
239 {
240 clkdev_drop(pclk->lookup);
241 clk_hw_unregister_fixed_rate(pclk->clk);
242 }
243
plt_clk_unregister_fixed_rate_loop(struct clk_plt_data * data,unsigned int i)244 static void plt_clk_unregister_fixed_rate_loop(struct clk_plt_data *data,
245 unsigned int i)
246 {
247 while (i--)
248 plt_clk_unregister_fixed_rate(data->parents[i]);
249 }
250
plt_clk_free_parent_names_loop(const char ** parent_names,unsigned int i)251 static void plt_clk_free_parent_names_loop(const char **parent_names,
252 unsigned int i)
253 {
254 while (i--)
255 kfree_const(parent_names[i]);
256 kfree(parent_names);
257 }
258
plt_clk_unregister_loop(struct clk_plt_data * data,unsigned int i)259 static void plt_clk_unregister_loop(struct clk_plt_data *data,
260 unsigned int i)
261 {
262 while (i--)
263 plt_clk_unregister(data->clks[i]);
264 }
265
plt_clk_register_parents(struct platform_device * pdev,struct clk_plt_data * data,const struct pmc_clk * clks)266 static const char **plt_clk_register_parents(struct platform_device *pdev,
267 struct clk_plt_data *data,
268 const struct pmc_clk *clks)
269 {
270 const char **parent_names;
271 unsigned int i;
272 int err;
273 int nparents = 0;
274
275 data->nparents = 0;
276 while (clks[nparents].name)
277 nparents++;
278
279 data->parents = devm_kcalloc(&pdev->dev, nparents,
280 sizeof(*data->parents), GFP_KERNEL);
281 if (!data->parents)
282 return ERR_PTR(-ENOMEM);
283
284 parent_names = kcalloc(nparents, sizeof(*parent_names),
285 GFP_KERNEL);
286 if (!parent_names)
287 return ERR_PTR(-ENOMEM);
288
289 for (i = 0; i < nparents; i++) {
290 data->parents[i] =
291 plt_clk_register_fixed_rate(pdev, clks[i].name,
292 clks[i].parent_name,
293 clks[i].freq);
294 if (IS_ERR(data->parents[i])) {
295 err = PTR_ERR(data->parents[i]);
296 goto err_unreg;
297 }
298 parent_names[i] = kstrdup_const(clks[i].name, GFP_KERNEL);
299 }
300
301 data->nparents = nparents;
302 return parent_names;
303
304 err_unreg:
305 plt_clk_unregister_fixed_rate_loop(data, i);
306 plt_clk_free_parent_names_loop(parent_names, i);
307 return ERR_PTR(err);
308 }
309
plt_clk_unregister_parents(struct clk_plt_data * data)310 static void plt_clk_unregister_parents(struct clk_plt_data *data)
311 {
312 plt_clk_unregister_fixed_rate_loop(data, data->nparents);
313 }
314
plt_clk_probe(struct platform_device * pdev)315 static int plt_clk_probe(struct platform_device *pdev)
316 {
317 const struct pmc_clk_data *pmc_data;
318 const char **parent_names;
319 struct clk_plt_data *data;
320 unsigned int i;
321 int err;
322
323 pmc_data = dev_get_platdata(&pdev->dev);
324 if (!pmc_data || !pmc_data->clks)
325 return -EINVAL;
326
327 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
328 if (!data)
329 return -ENOMEM;
330
331 parent_names = plt_clk_register_parents(pdev, data, pmc_data->clks);
332 if (IS_ERR(parent_names))
333 return PTR_ERR(parent_names);
334
335 for (i = 0; i < PMC_CLK_NUM; i++) {
336 data->clks[i] = plt_clk_register(pdev, i, pmc_data,
337 parent_names, data->nparents);
338 if (IS_ERR(data->clks[i])) {
339 err = PTR_ERR(data->clks[i]);
340 goto err_unreg_clk_plt;
341 }
342 }
343 data->mclk_lookup = clkdev_hw_create(&data->clks[3]->hw, "mclk", NULL);
344 if (!data->mclk_lookup) {
345 err = -ENOMEM;
346 goto err_unreg_clk_plt;
347 }
348
349 data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
350 "ether_clk", NULL);
351 if (!data->ether_clk_lookup) {
352 err = -ENOMEM;
353 goto err_drop_mclk;
354 }
355
356 plt_clk_free_parent_names_loop(parent_names, data->nparents);
357
358 platform_set_drvdata(pdev, data);
359 return 0;
360
361 err_drop_mclk:
362 clkdev_drop(data->mclk_lookup);
363 err_unreg_clk_plt:
364 plt_clk_unregister_loop(data, i);
365 plt_clk_unregister_parents(data);
366 plt_clk_free_parent_names_loop(parent_names, data->nparents);
367 return err;
368 }
369
plt_clk_remove(struct platform_device * pdev)370 static void plt_clk_remove(struct platform_device *pdev)
371 {
372 struct clk_plt_data *data;
373
374 data = platform_get_drvdata(pdev);
375
376 clkdev_drop(data->ether_clk_lookup);
377 clkdev_drop(data->mclk_lookup);
378 plt_clk_unregister_loop(data, PMC_CLK_NUM);
379 plt_clk_unregister_parents(data);
380 }
381
382 static struct platform_driver plt_clk_driver = {
383 .driver = {
384 .name = "clk-pmc-atom",
385 },
386 .probe = plt_clk_probe,
387 .remove_new = plt_clk_remove,
388 };
389 builtin_platform_driver(plt_clk_driver);
390