1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
4 */
5
6 /*
7 * Each of the CPU clusters (Power and Perf) on msm8996 are
8 * clocked via 2 PLLs, a primary and alternate. There are also
9 * 2 Mux'es, a primary and secondary all connected together
10 * as shown below
11 *
12 * +-------+
13 * XO | |
14 * +------------------>0 |
15 * SYS_APCS_AUX | |
16 * +------------------>3 |
17 * | |
18 * PLL/2 | SMUX +----+
19 * +------->1 | |
20 * | | | |
21 * | +-------+ | +-------+
22 * | +---->0 |
23 * | | |
24 * +---------------+ | +----------->1 | CPU clk
25 * |Primary PLL +----+ PLL_EARLY | | +------>
26 * | +------+-----------+ +------>2 PMUX |
27 * +---------------+ | | | |
28 * | +------+ | +-->3 |
29 * +--^+ ACD +-----+ | +-------+
30 * +---------------+ +------+ |
31 * |Alt PLL | |
32 * | +---------------------------+
33 * +---------------+ PLL_EARLY
34 *
35 * The primary PLL is what drives the CPU clk, except for times
36 * when we are reprogramming the PLL itself (for rate changes) when
37 * we temporarily switch to an alternate PLL.
38 *
39 * The primary PLL operates on a single VCO range, between 600MHz
40 * and 3GHz. However the CPUs do support OPPs with frequencies
41 * between 300MHz and 600MHz. In order to support running the CPUs
42 * at those frequencies we end up having to lock the PLL at twice
43 * the rate and drive the CPU clk via the PLL/2 output and SMUX.
44 *
45 * So for frequencies above 600MHz we follow the following path
46 * Primary PLL --> PLL_EARLY --> PMUX(1) --> CPU clk
47 * and for frequencies between 300MHz and 600MHz we follow
48 * Primary PLL --> PLL/2 --> SMUX(1) --> PMUX(0) --> CPU clk
49 *
50 * ACD stands for Adaptive Clock Distribution and is used to
51 * detect voltage droops.
52 */
53
54 #include <linux/bitfield.h>
55 #include <linux/clk.h>
56 #include <linux/clk-provider.h>
57 #include <linux/io.h>
58 #include <linux/module.h>
59 #include <linux/platform_device.h>
60 #include <linux/regmap.h>
61 #include <soc/qcom/kryo-l2-accessors.h>
62
63 #include <asm/cputype.h>
64
65 #include "clk-alpha-pll.h"
66 #include "clk-regmap.h"
67 #include "clk-regmap-mux.h"
68
69 enum _pmux_input {
70 SMUX_INDEX = 0,
71 PLL_INDEX,
72 ACD_INDEX,
73 ALT_INDEX,
74 NUM_OF_PMUX_INPUTS
75 };
76
77 #define DIV_2_THRESHOLD 600000000
78 #define PWRCL_REG_OFFSET 0x0
79 #define PERFCL_REG_OFFSET 0x80000
80 #define MUX_OFFSET 0x40
81 #define CLK_CTL_OFFSET 0x44
82 #define CLK_CTL_AUTO_CLK_SEL BIT(8)
83 #define ALT_PLL_OFFSET 0x100
84 #define SSSCTL_OFFSET 0x160
85 #define PSCTL_OFFSET 0x164
86
87 #define PMUX_MASK 0x3
88 #define MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK GENMASK(5, 4)
89 #define MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL \
90 FIELD_PREP(MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, 0x03)
91
92 static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = {
93 [PLL_OFF_L_VAL] = 0x04,
94 [PLL_OFF_ALPHA_VAL] = 0x08,
95 [PLL_OFF_USER_CTL] = 0x10,
96 [PLL_OFF_CONFIG_CTL] = 0x18,
97 [PLL_OFF_CONFIG_CTL_U] = 0x1c,
98 [PLL_OFF_TEST_CTL] = 0x20,
99 [PLL_OFF_TEST_CTL_U] = 0x24,
100 [PLL_OFF_STATUS] = 0x28,
101 };
102
103 static const u8 alt_pll_regs[PLL_OFF_MAX_REGS] = {
104 [PLL_OFF_L_VAL] = 0x04,
105 [PLL_OFF_ALPHA_VAL] = 0x08,
106 [PLL_OFF_USER_CTL] = 0x10,
107 [PLL_OFF_CONFIG_CTL] = 0x18,
108 [PLL_OFF_TEST_CTL] = 0x20,
109 [PLL_OFF_STATUS] = 0x28,
110 };
111
112 /* PLLs */
113
114 static const struct alpha_pll_config hfpll_config = {
115 .l = 54,
116 .config_ctl_val = 0x200d4828,
117 .config_ctl_hi_val = 0x006,
118 .test_ctl_val = 0x1c000000,
119 .test_ctl_hi_val = 0x00004000,
120 .pre_div_mask = BIT(12),
121 .post_div_mask = 0x3 << 8,
122 .post_div_val = 0x1 << 8,
123 .main_output_mask = BIT(0),
124 .early_output_mask = BIT(3),
125 };
126
127 static const struct clk_parent_data pll_parent[] = {
128 { .fw_name = "xo" },
129 };
130
131 static struct clk_alpha_pll pwrcl_pll = {
132 .offset = PWRCL_REG_OFFSET,
133 .regs = prim_pll_regs,
134 .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
135 .clkr.hw.init = &(struct clk_init_data){
136 .name = "pwrcl_pll",
137 .parent_data = pll_parent,
138 .num_parents = ARRAY_SIZE(pll_parent),
139 .ops = &clk_alpha_pll_hwfsm_ops,
140 },
141 };
142
143 static struct clk_alpha_pll perfcl_pll = {
144 .offset = PERFCL_REG_OFFSET,
145 .regs = prim_pll_regs,
146 .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
147 .clkr.hw.init = &(struct clk_init_data){
148 .name = "perfcl_pll",
149 .parent_data = pll_parent,
150 .num_parents = ARRAY_SIZE(pll_parent),
151 .ops = &clk_alpha_pll_hwfsm_ops,
152 },
153 };
154
155 static struct clk_fixed_factor pwrcl_pll_postdiv = {
156 .mult = 1,
157 .div = 2,
158 .hw.init = &(struct clk_init_data){
159 .name = "pwrcl_pll_postdiv",
160 .parent_data = &(const struct clk_parent_data){
161 .hw = &pwrcl_pll.clkr.hw
162 },
163 .num_parents = 1,
164 .ops = &clk_fixed_factor_ops,
165 .flags = CLK_SET_RATE_PARENT,
166 },
167 };
168
169 static struct clk_fixed_factor perfcl_pll_postdiv = {
170 .mult = 1,
171 .div = 2,
172 .hw.init = &(struct clk_init_data){
173 .name = "perfcl_pll_postdiv",
174 .parent_data = &(const struct clk_parent_data){
175 .hw = &perfcl_pll.clkr.hw
176 },
177 .num_parents = 1,
178 .ops = &clk_fixed_factor_ops,
179 .flags = CLK_SET_RATE_PARENT,
180 },
181 };
182
183 static struct clk_fixed_factor perfcl_pll_acd = {
184 .mult = 1,
185 .div = 1,
186 .hw.init = &(struct clk_init_data){
187 .name = "perfcl_pll_acd",
188 .parent_data = &(const struct clk_parent_data){
189 .hw = &perfcl_pll.clkr.hw
190 },
191 .num_parents = 1,
192 .ops = &clk_fixed_factor_ops,
193 .flags = CLK_SET_RATE_PARENT,
194 },
195 };
196
197 static struct clk_fixed_factor pwrcl_pll_acd = {
198 .mult = 1,
199 .div = 1,
200 .hw.init = &(struct clk_init_data){
201 .name = "pwrcl_pll_acd",
202 .parent_data = &(const struct clk_parent_data){
203 .hw = &pwrcl_pll.clkr.hw
204 },
205 .num_parents = 1,
206 .ops = &clk_fixed_factor_ops,
207 .flags = CLK_SET_RATE_PARENT,
208 },
209 };
210
211 static const struct pll_vco alt_pll_vco_modes[] = {
212 VCO(3, 250000000, 500000000),
213 VCO(2, 500000000, 750000000),
214 VCO(1, 750000000, 1000000000),
215 VCO(0, 1000000000, 2150400000),
216 };
217
218 static const struct alpha_pll_config altpll_config = {
219 .l = 16,
220 .vco_val = 0x3 << 20,
221 .vco_mask = 0x3 << 20,
222 .config_ctl_val = 0x4001051b,
223 .post_div_mask = 0x3 << 8,
224 .post_div_val = 0x1 << 8,
225 .main_output_mask = BIT(0),
226 .early_output_mask = BIT(3),
227 };
228
229 static struct clk_alpha_pll pwrcl_alt_pll = {
230 .offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET,
231 .regs = alt_pll_regs,
232 .vco_table = alt_pll_vco_modes,
233 .num_vco = ARRAY_SIZE(alt_pll_vco_modes),
234 .flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
235 .clkr.hw.init = &(struct clk_init_data) {
236 .name = "pwrcl_alt_pll",
237 .parent_data = pll_parent,
238 .num_parents = ARRAY_SIZE(pll_parent),
239 .ops = &clk_alpha_pll_hwfsm_ops,
240 },
241 };
242
243 static struct clk_alpha_pll perfcl_alt_pll = {
244 .offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET,
245 .regs = alt_pll_regs,
246 .vco_table = alt_pll_vco_modes,
247 .num_vco = ARRAY_SIZE(alt_pll_vco_modes),
248 .flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
249 .clkr.hw.init = &(struct clk_init_data) {
250 .name = "perfcl_alt_pll",
251 .parent_data = pll_parent,
252 .num_parents = ARRAY_SIZE(pll_parent),
253 .ops = &clk_alpha_pll_hwfsm_ops,
254 },
255 };
256
257 struct clk_cpu_8996_pmux {
258 u32 reg;
259 struct notifier_block nb;
260 struct clk_regmap clkr;
261 };
262
263 static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
264 void *data);
265
266 #define to_clk_cpu_8996_pmux_nb(_nb) \
267 container_of(_nb, struct clk_cpu_8996_pmux, nb)
268
to_clk_cpu_8996_pmux_hw(struct clk_hw * hw)269 static inline struct clk_cpu_8996_pmux *to_clk_cpu_8996_pmux_hw(struct clk_hw *hw)
270 {
271 return container_of(to_clk_regmap(hw), struct clk_cpu_8996_pmux, clkr);
272 }
273
clk_cpu_8996_pmux_get_parent(struct clk_hw * hw)274 static u8 clk_cpu_8996_pmux_get_parent(struct clk_hw *hw)
275 {
276 struct clk_regmap *clkr = to_clk_regmap(hw);
277 struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
278 u32 val;
279
280 regmap_read(clkr->regmap, cpuclk->reg, &val);
281
282 return FIELD_GET(PMUX_MASK, val);
283 }
284
clk_cpu_8996_pmux_set_parent(struct clk_hw * hw,u8 index)285 static int clk_cpu_8996_pmux_set_parent(struct clk_hw *hw, u8 index)
286 {
287 struct clk_regmap *clkr = to_clk_regmap(hw);
288 struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
289 u32 val;
290
291 val = FIELD_PREP(PMUX_MASK, index);
292
293 return regmap_update_bits(clkr->regmap, cpuclk->reg, PMUX_MASK, val);
294 }
295
clk_cpu_8996_pmux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)296 static int clk_cpu_8996_pmux_determine_rate(struct clk_hw *hw,
297 struct clk_rate_request *req)
298 {
299 struct clk_hw *parent;
300
301 if (req->rate < (DIV_2_THRESHOLD / 2))
302 return -EINVAL;
303
304 if (req->rate < DIV_2_THRESHOLD)
305 parent = clk_hw_get_parent_by_index(hw, SMUX_INDEX);
306 else
307 parent = clk_hw_get_parent_by_index(hw, ACD_INDEX);
308 if (!parent)
309 return -EINVAL;
310
311 req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
312 req->best_parent_hw = parent;
313
314 return 0;
315 }
316
317 static const struct clk_ops clk_cpu_8996_pmux_ops = {
318 .set_parent = clk_cpu_8996_pmux_set_parent,
319 .get_parent = clk_cpu_8996_pmux_get_parent,
320 .determine_rate = clk_cpu_8996_pmux_determine_rate,
321 };
322
323 static const struct parent_map smux_parent_map[] = {
324 { .cfg = 0, }, /* xo */
325 { .cfg = 1, }, /* pll */
326 { .cfg = 3, }, /* sys_apcs_aux */
327 };
328
329 static const struct clk_parent_data pwrcl_smux_parents[] = {
330 { .fw_name = "xo" },
331 { .hw = &pwrcl_pll_postdiv.hw },
332 { .fw_name = "sys_apcs_aux" },
333 };
334
335 static const struct clk_parent_data perfcl_smux_parents[] = {
336 { .fw_name = "xo" },
337 { .hw = &perfcl_pll_postdiv.hw },
338 { .fw_name = "sys_apcs_aux" },
339 };
340
341 static struct clk_regmap_mux pwrcl_smux = {
342 .reg = PWRCL_REG_OFFSET + MUX_OFFSET,
343 .shift = 2,
344 .width = 2,
345 .parent_map = smux_parent_map,
346 .clkr.hw.init = &(struct clk_init_data) {
347 .name = "pwrcl_smux",
348 .parent_data = pwrcl_smux_parents,
349 .num_parents = ARRAY_SIZE(pwrcl_smux_parents),
350 .ops = &clk_regmap_mux_closest_ops,
351 .flags = CLK_SET_RATE_PARENT,
352 },
353 };
354
355 static struct clk_regmap_mux perfcl_smux = {
356 .reg = PERFCL_REG_OFFSET + MUX_OFFSET,
357 .shift = 2,
358 .width = 2,
359 .parent_map = smux_parent_map,
360 .clkr.hw.init = &(struct clk_init_data) {
361 .name = "perfcl_smux",
362 .parent_data = perfcl_smux_parents,
363 .num_parents = ARRAY_SIZE(perfcl_smux_parents),
364 .ops = &clk_regmap_mux_closest_ops,
365 .flags = CLK_SET_RATE_PARENT,
366 },
367 };
368
369 static const struct clk_hw *pwrcl_pmux_parents[] = {
370 [SMUX_INDEX] = &pwrcl_smux.clkr.hw,
371 [PLL_INDEX] = &pwrcl_pll.clkr.hw,
372 [ACD_INDEX] = &pwrcl_pll_acd.hw,
373 [ALT_INDEX] = &pwrcl_alt_pll.clkr.hw,
374 };
375
376 static const struct clk_hw *perfcl_pmux_parents[] = {
377 [SMUX_INDEX] = &perfcl_smux.clkr.hw,
378 [PLL_INDEX] = &perfcl_pll.clkr.hw,
379 [ACD_INDEX] = &perfcl_pll_acd.hw,
380 [ALT_INDEX] = &perfcl_alt_pll.clkr.hw,
381 };
382
383 static struct clk_cpu_8996_pmux pwrcl_pmux = {
384 .reg = PWRCL_REG_OFFSET + MUX_OFFSET,
385 .nb.notifier_call = cpu_clk_notifier_cb,
386 .clkr.hw.init = &(struct clk_init_data) {
387 .name = "pwrcl_pmux",
388 .parent_hws = pwrcl_pmux_parents,
389 .num_parents = ARRAY_SIZE(pwrcl_pmux_parents),
390 .ops = &clk_cpu_8996_pmux_ops,
391 /* CPU clock is critical and should never be gated */
392 .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
393 },
394 };
395
396 static struct clk_cpu_8996_pmux perfcl_pmux = {
397 .reg = PERFCL_REG_OFFSET + MUX_OFFSET,
398 .nb.notifier_call = cpu_clk_notifier_cb,
399 .clkr.hw.init = &(struct clk_init_data) {
400 .name = "perfcl_pmux",
401 .parent_hws = perfcl_pmux_parents,
402 .num_parents = ARRAY_SIZE(perfcl_pmux_parents),
403 .ops = &clk_cpu_8996_pmux_ops,
404 /* CPU clock is critical and should never be gated */
405 .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
406 },
407 };
408
409 static const struct regmap_config cpu_msm8996_regmap_config = {
410 .reg_bits = 32,
411 .reg_stride = 4,
412 .val_bits = 32,
413 .max_register = 0x80210,
414 .fast_io = true,
415 .val_format_endian = REGMAP_ENDIAN_LITTLE,
416 };
417
418 static struct clk_hw *cpu_msm8996_hw_clks[] = {
419 &pwrcl_pll_postdiv.hw,
420 &perfcl_pll_postdiv.hw,
421 &pwrcl_pll_acd.hw,
422 &perfcl_pll_acd.hw,
423 };
424
425 static struct clk_regmap *cpu_msm8996_clks[] = {
426 &pwrcl_pll.clkr,
427 &perfcl_pll.clkr,
428 &pwrcl_alt_pll.clkr,
429 &perfcl_alt_pll.clkr,
430 &pwrcl_smux.clkr,
431 &perfcl_smux.clkr,
432 &pwrcl_pmux.clkr,
433 &perfcl_pmux.clkr,
434 };
435
436 static void qcom_cpu_clk_msm8996_acd_init(struct regmap *regmap);
437
qcom_cpu_clk_msm8996_register_clks(struct device * dev,struct regmap * regmap)438 static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
439 struct regmap *regmap)
440 {
441 int i, ret;
442
443 /* Select GPLL0 for 300MHz for both clusters */
444 regmap_write(regmap, PERFCL_REG_OFFSET + MUX_OFFSET, 0xc);
445 regmap_write(regmap, PWRCL_REG_OFFSET + MUX_OFFSET, 0xc);
446
447 /* Ensure write goes through before PLLs are reconfigured */
448 udelay(5);
449
450 /* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */
451 regmap_update_bits(regmap, PWRCL_REG_OFFSET + MUX_OFFSET,
452 MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
453 MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
454 regmap_update_bits(regmap, PERFCL_REG_OFFSET + MUX_OFFSET,
455 MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
456 MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
457
458 clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config);
459 clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
460 clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
461 clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
462
463 /* Wait for PLL(s) to lock */
464 udelay(50);
465
466 /* Enable auto clock selection for both clusters */
467 regmap_update_bits(regmap, PWRCL_REG_OFFSET + CLK_CTL_OFFSET,
468 CLK_CTL_AUTO_CLK_SEL, CLK_CTL_AUTO_CLK_SEL);
469 regmap_update_bits(regmap, PERFCL_REG_OFFSET + CLK_CTL_OFFSET,
470 CLK_CTL_AUTO_CLK_SEL, CLK_CTL_AUTO_CLK_SEL);
471
472 /* Ensure write goes through before muxes are switched */
473 udelay(5);
474
475 qcom_cpu_clk_msm8996_acd_init(regmap);
476
477 /* Pulse swallower and soft-start settings */
478 regmap_write(regmap, PWRCL_REG_OFFSET + PSCTL_OFFSET, 0x00030005);
479 regmap_write(regmap, PERFCL_REG_OFFSET + PSCTL_OFFSET, 0x00030005);
480
481 /* Switch clusters to use the ACD leg */
482 regmap_write(regmap, PWRCL_REG_OFFSET + MUX_OFFSET, 0x32);
483 regmap_write(regmap, PERFCL_REG_OFFSET + MUX_OFFSET, 0x32);
484
485 for (i = 0; i < ARRAY_SIZE(cpu_msm8996_hw_clks); i++) {
486 ret = devm_clk_hw_register(dev, cpu_msm8996_hw_clks[i]);
487 if (ret)
488 return ret;
489 }
490
491 for (i = 0; i < ARRAY_SIZE(cpu_msm8996_clks); i++) {
492 ret = devm_clk_register_regmap(dev, cpu_msm8996_clks[i]);
493 if (ret)
494 return ret;
495 }
496
497 /* Enable alt PLLs */
498 clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk);
499 clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk);
500
501 devm_clk_notifier_register(dev, pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
502 devm_clk_notifier_register(dev, perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
503
504 return ret;
505 }
506
507 #define CPU_CLUSTER_AFFINITY_MASK 0xf00
508 #define PWRCL_AFFINITY_MASK 0x000
509 #define PERFCL_AFFINITY_MASK 0x100
510
511 #define L2ACDCR_REG 0x580ULL
512 #define L2ACDTD_REG 0x581ULL
513 #define L2ACDDVMRC_REG 0x584ULL
514 #define L2ACDSSCR_REG 0x589ULL
515
516 static DEFINE_SPINLOCK(qcom_clk_acd_lock);
517
qcom_cpu_clk_msm8996_acd_init(struct regmap * regmap)518 static void qcom_cpu_clk_msm8996_acd_init(struct regmap *regmap)
519 {
520 u64 hwid;
521 u32 val;
522 unsigned long flags;
523
524 spin_lock_irqsave(&qcom_clk_acd_lock, flags);
525
526 val = kryo_l2_get_indirect_reg(L2ACDTD_REG);
527 if (val == 0x00006a11)
528 goto out;
529
530 kryo_l2_set_indirect_reg(L2ACDTD_REG, 0x00006a11);
531 kryo_l2_set_indirect_reg(L2ACDDVMRC_REG, 0x000e0f0f);
532 kryo_l2_set_indirect_reg(L2ACDSSCR_REG, 0x00000601);
533
534 kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
535
536 hwid = read_cpuid_mpidr();
537 if ((hwid & CPU_CLUSTER_AFFINITY_MASK) == PWRCL_AFFINITY_MASK)
538 regmap_write(regmap, PWRCL_REG_OFFSET + SSSCTL_OFFSET, 0xf);
539 else
540 regmap_write(regmap, PERFCL_REG_OFFSET + SSSCTL_OFFSET, 0xf);
541
542 out:
543 spin_unlock_irqrestore(&qcom_clk_acd_lock, flags);
544 }
545
cpu_clk_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)546 static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
547 void *data)
548 {
549 struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_nb(nb);
550 struct clk_notifier_data *cnd = data;
551
552 switch (event) {
553 case PRE_RATE_CHANGE:
554 qcom_cpu_clk_msm8996_acd_init(cpuclk->clkr.regmap);
555
556 /*
557 * Avoid overvolting. clk_core_set_rate_nolock() walks from top
558 * to bottom, so it will change the rate of the PLL before
559 * chaging the parent of PMUX. This can result in pmux getting
560 * clocked twice the expected rate.
561 *
562 * Manually switch to PLL/2 here.
563 */
564 if (cnd->new_rate < DIV_2_THRESHOLD &&
565 cnd->old_rate > DIV_2_THRESHOLD)
566 clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, SMUX_INDEX);
567
568 break;
569 case ABORT_RATE_CHANGE:
570 /* Revert manual change */
571 if (cnd->new_rate < DIV_2_THRESHOLD &&
572 cnd->old_rate > DIV_2_THRESHOLD)
573 clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ACD_INDEX);
574 break;
575 default:
576 break;
577 }
578
579 return NOTIFY_OK;
580 };
581
qcom_cpu_clk_msm8996_driver_probe(struct platform_device * pdev)582 static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
583 {
584 static void __iomem *base;
585 struct regmap *regmap;
586 struct clk_hw_onecell_data *data;
587 struct device *dev = &pdev->dev;
588 int ret;
589
590 data = devm_kzalloc(dev, struct_size(data, hws, 2), GFP_KERNEL);
591 if (!data)
592 return -ENOMEM;
593 data->num = 2;
594
595 base = devm_platform_ioremap_resource(pdev, 0);
596 if (IS_ERR(base))
597 return PTR_ERR(base);
598
599 regmap = devm_regmap_init_mmio(dev, base, &cpu_msm8996_regmap_config);
600 if (IS_ERR(regmap))
601 return PTR_ERR(regmap);
602
603 ret = qcom_cpu_clk_msm8996_register_clks(dev, regmap);
604 if (ret)
605 return ret;
606
607 data->hws[0] = &pwrcl_pmux.clkr.hw;
608 data->hws[1] = &perfcl_pmux.clkr.hw;
609
610 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
611 }
612
613 static const struct of_device_id qcom_cpu_clk_msm8996_match_table[] = {
614 { .compatible = "qcom,msm8996-apcc" },
615 {}
616 };
617 MODULE_DEVICE_TABLE(of, qcom_cpu_clk_msm8996_match_table);
618
619 static struct platform_driver qcom_cpu_clk_msm8996_driver = {
620 .probe = qcom_cpu_clk_msm8996_driver_probe,
621 .driver = {
622 .name = "qcom-msm8996-apcc",
623 .of_match_table = qcom_cpu_clk_msm8996_match_table,
624 },
625 };
626 module_platform_driver(qcom_cpu_clk_msm8996_driver);
627
628 MODULE_DESCRIPTION("QCOM MSM8996 CPU Clock Driver");
629 MODULE_LICENSE("GPL v2");
630