xref: /openbmc/linux/drivers/clk/qcom/clk-cpu-8996.c (revision 81165aca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020, The Linux Foundation. All rights reserved.
4  */
5 
6 /*
7  * Each of the CPU clusters (Power and Perf) on msm8996 are
8  * clocked via 2 PLLs, a primary and alternate. There are also
9  * 2 Mux'es, a primary and secondary all connected together
10  * as shown below
11  *
12  *                              +-------+
13  *               XO             |       |
14  *           +------------------>0      |
15  *                              |       |
16  *                    PLL/2     | SMUX  +----+
17  *                      +------->1      |    |
18  *                      |       |       |    |
19  *                      |       +-------+    |    +-------+
20  *                      |                    +---->0      |
21  *                      |                         |       |
22  * +---------------+    |             +----------->1      | CPU clk
23  * |Primary PLL    +----+ PLL_EARLY   |           |       +------>
24  * |               +------+-----------+    +------>2 PMUX |
25  * +---------------+      |                |      |       |
26  *                        |   +------+     |   +-->3      |
27  *                        +--^+  ACD +-----+   |  +-------+
28  * +---------------+          +------+         |
29  * |Alt PLL        |                           |
30  * |               +---------------------------+
31  * +---------------+         PLL_EARLY
32  *
33  * The primary PLL is what drives the CPU clk, except for times
34  * when we are reprogramming the PLL itself (for rate changes) when
35  * we temporarily switch to an alternate PLL.
36  *
37  * The primary PLL operates on a single VCO range, between 600MHz
38  * and 3GHz. However the CPUs do support OPPs with frequencies
39  * between 300MHz and 600MHz. In order to support running the CPUs
40  * at those frequencies we end up having to lock the PLL at twice
41  * the rate and drive the CPU clk via the PLL/2 output and SMUX.
42  *
43  * So for frequencies above 600MHz we follow the following path
44  *  Primary PLL --> PLL_EARLY --> PMUX(1) --> CPU clk
45  * and for frequencies between 300MHz and 600MHz we follow
46  *  Primary PLL --> PLL/2 --> SMUX(1) --> PMUX(0) --> CPU clk
47  *
48  * ACD stands for Adaptive Clock Distribution and is used to
49  * detect voltage droops.
50  */
51 
52 #include <linux/clk.h>
53 #include <linux/clk-provider.h>
54 #include <linux/io.h>
55 #include <linux/module.h>
56 #include <linux/platform_device.h>
57 #include <linux/regmap.h>
58 #include <soc/qcom/kryo-l2-accessors.h>
59 
60 #include "clk-alpha-pll.h"
61 #include "clk-regmap.h"
62 #include "clk-regmap-mux.h"
63 
64 enum _pmux_input {
65 	SMUX_INDEX = 0,
66 	PLL_INDEX,
67 	ACD_INDEX,
68 	ALT_INDEX,
69 	NUM_OF_PMUX_INPUTS
70 };
71 
72 #define DIV_2_THRESHOLD		600000000
73 #define PWRCL_REG_OFFSET 0x0
74 #define PERFCL_REG_OFFSET 0x80000
75 #define MUX_OFFSET	0x40
76 #define ALT_PLL_OFFSET	0x100
77 #define SSSCTL_OFFSET 0x160
78 
79 static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = {
80 	[PLL_OFF_L_VAL] = 0x04,
81 	[PLL_OFF_ALPHA_VAL] = 0x08,
82 	[PLL_OFF_USER_CTL] = 0x10,
83 	[PLL_OFF_CONFIG_CTL] = 0x18,
84 	[PLL_OFF_CONFIG_CTL_U] = 0x1c,
85 	[PLL_OFF_TEST_CTL] = 0x20,
86 	[PLL_OFF_TEST_CTL_U] = 0x24,
87 	[PLL_OFF_STATUS] = 0x28,
88 };
89 
90 static const u8 alt_pll_regs[PLL_OFF_MAX_REGS] = {
91 	[PLL_OFF_L_VAL] = 0x04,
92 	[PLL_OFF_ALPHA_VAL] = 0x08,
93 	[PLL_OFF_ALPHA_VAL_U] = 0x0c,
94 	[PLL_OFF_USER_CTL] = 0x10,
95 	[PLL_OFF_USER_CTL_U] = 0x14,
96 	[PLL_OFF_CONFIG_CTL] = 0x18,
97 	[PLL_OFF_TEST_CTL] = 0x20,
98 	[PLL_OFF_TEST_CTL_U] = 0x24,
99 	[PLL_OFF_STATUS] = 0x28,
100 };
101 
102 /* PLLs */
103 
104 static const struct alpha_pll_config hfpll_config = {
105 	.l = 60,
106 	.config_ctl_val = 0x200d4aa8,
107 	.config_ctl_hi_val = 0x006,
108 	.pre_div_mask = BIT(12),
109 	.post_div_mask = 0x3 << 8,
110 	.post_div_val = 0x1 << 8,
111 	.main_output_mask = BIT(0),
112 	.early_output_mask = BIT(3),
113 };
114 
115 static const struct clk_parent_data pll_parent[] = {
116 	{ .fw_name = "xo" },
117 };
118 
119 static struct clk_alpha_pll pwrcl_pll = {
120 	.offset = PWRCL_REG_OFFSET,
121 	.regs = prim_pll_regs,
122 	.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
123 	.clkr.hw.init = &(struct clk_init_data){
124 		.name = "pwrcl_pll",
125 		.parent_data = pll_parent,
126 		.num_parents = ARRAY_SIZE(pll_parent),
127 		.ops = &clk_alpha_pll_huayra_ops,
128 	},
129 };
130 
131 static struct clk_alpha_pll perfcl_pll = {
132 	.offset = PERFCL_REG_OFFSET,
133 	.regs = prim_pll_regs,
134 	.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
135 	.clkr.hw.init = &(struct clk_init_data){
136 		.name = "perfcl_pll",
137 		.parent_data = pll_parent,
138 		.num_parents = ARRAY_SIZE(pll_parent),
139 		.ops = &clk_alpha_pll_huayra_ops,
140 	},
141 };
142 
143 static struct clk_fixed_factor pwrcl_pll_postdiv = {
144 	.mult = 1,
145 	.div = 2,
146 	.hw.init = &(struct clk_init_data){
147 		.name = "pwrcl_pll_postdiv",
148 		.parent_data = &(const struct clk_parent_data){
149 			.hw = &pwrcl_pll.clkr.hw
150 		},
151 		.num_parents = 1,
152 		.ops = &clk_fixed_factor_ops,
153 		.flags = CLK_SET_RATE_PARENT,
154 	},
155 };
156 
157 static struct clk_fixed_factor perfcl_pll_postdiv = {
158 	.mult = 1,
159 	.div = 2,
160 	.hw.init = &(struct clk_init_data){
161 		.name = "perfcl_pll_postdiv",
162 		.parent_data = &(const struct clk_parent_data){
163 			.hw = &perfcl_pll.clkr.hw
164 		},
165 		.num_parents = 1,
166 		.ops = &clk_fixed_factor_ops,
167 		.flags = CLK_SET_RATE_PARENT,
168 	},
169 };
170 
171 static struct clk_fixed_factor perfcl_pll_acd = {
172 	.mult = 1,
173 	.div = 1,
174 	.hw.init = &(struct clk_init_data){
175 		.name = "perfcl_pll_acd",
176 		.parent_data = &(const struct clk_parent_data){
177 			.hw = &perfcl_pll.clkr.hw
178 		},
179 		.num_parents = 1,
180 		.ops = &clk_fixed_factor_ops,
181 		.flags = CLK_SET_RATE_PARENT,
182 	},
183 };
184 
185 static struct clk_fixed_factor pwrcl_pll_acd = {
186 	.mult = 1,
187 	.div = 1,
188 	.hw.init = &(struct clk_init_data){
189 		.name = "pwrcl_pll_acd",
190 		.parent_data = &(const struct clk_parent_data){
191 			.hw = &pwrcl_pll.clkr.hw
192 		},
193 		.num_parents = 1,
194 		.ops = &clk_fixed_factor_ops,
195 		.flags = CLK_SET_RATE_PARENT,
196 	},
197 };
198 
199 static const struct pll_vco alt_pll_vco_modes[] = {
200 	VCO(3,  250000000,  500000000),
201 	VCO(2,  500000000,  750000000),
202 	VCO(1,  750000000, 1000000000),
203 	VCO(0, 1000000000, 2150400000),
204 };
205 
206 static const struct alpha_pll_config altpll_config = {
207 	.l = 16,
208 	.vco_val = 0x3 << 20,
209 	.vco_mask = 0x3 << 20,
210 	.config_ctl_val = 0x4001051b,
211 	.post_div_mask = 0x3 << 8,
212 	.post_div_val = 0x1 << 8,
213 	.main_output_mask = BIT(0),
214 	.early_output_mask = BIT(3),
215 };
216 
217 static struct clk_alpha_pll pwrcl_alt_pll = {
218 	.offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET,
219 	.regs = alt_pll_regs,
220 	.vco_table = alt_pll_vco_modes,
221 	.num_vco = ARRAY_SIZE(alt_pll_vco_modes),
222 	.flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
223 	.clkr.hw.init = &(struct clk_init_data) {
224 		.name = "pwrcl_alt_pll",
225 		.parent_data = pll_parent,
226 		.num_parents = ARRAY_SIZE(pll_parent),
227 		.ops = &clk_alpha_pll_hwfsm_ops,
228 	},
229 };
230 
231 static struct clk_alpha_pll perfcl_alt_pll = {
232 	.offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET,
233 	.regs = alt_pll_regs,
234 	.vco_table = alt_pll_vco_modes,
235 	.num_vco = ARRAY_SIZE(alt_pll_vco_modes),
236 	.flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
237 	.clkr.hw.init = &(struct clk_init_data) {
238 		.name = "perfcl_alt_pll",
239 		.parent_data = pll_parent,
240 		.num_parents = ARRAY_SIZE(pll_parent),
241 		.ops = &clk_alpha_pll_hwfsm_ops,
242 	},
243 };
244 
245 struct clk_cpu_8996_pmux {
246 	u32	reg;
247 	u8	shift;
248 	u8	width;
249 	struct notifier_block nb;
250 	struct clk_hw	*pll;
251 	struct clk_hw	*pll_div_2;
252 	struct clk_regmap clkr;
253 };
254 
255 static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
256 			       void *data);
257 
258 #define to_clk_cpu_8996_pmux_nb(_nb) \
259 	container_of(_nb, struct clk_cpu_8996_pmux, nb)
260 
261 static inline struct clk_cpu_8996_pmux *to_clk_cpu_8996_pmux_hw(struct clk_hw *hw)
262 {
263 	return container_of(to_clk_regmap(hw), struct clk_cpu_8996_pmux, clkr);
264 }
265 
266 static u8 clk_cpu_8996_pmux_get_parent(struct clk_hw *hw)
267 {
268 	struct clk_regmap *clkr = to_clk_regmap(hw);
269 	struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
270 	u32 mask = GENMASK(cpuclk->width - 1, 0);
271 	u32 val;
272 
273 	regmap_read(clkr->regmap, cpuclk->reg, &val);
274 	val >>= cpuclk->shift;
275 
276 	return val & mask;
277 }
278 
279 static int clk_cpu_8996_pmux_set_parent(struct clk_hw *hw, u8 index)
280 {
281 	struct clk_regmap *clkr = to_clk_regmap(hw);
282 	struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
283 	u32 mask = GENMASK(cpuclk->width + cpuclk->shift - 1, cpuclk->shift);
284 	u32 val;
285 
286 	val = index;
287 	val <<= cpuclk->shift;
288 
289 	return regmap_update_bits(clkr->regmap, cpuclk->reg, mask, val);
290 }
291 
292 static int clk_cpu_8996_pmux_determine_rate(struct clk_hw *hw,
293 					   struct clk_rate_request *req)
294 {
295 	struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
296 	struct clk_hw *parent = cpuclk->pll;
297 
298 	if (cpuclk->pll_div_2 && req->rate < DIV_2_THRESHOLD) {
299 		if (req->rate < (DIV_2_THRESHOLD / 2))
300 			return -EINVAL;
301 
302 		parent = cpuclk->pll_div_2;
303 	}
304 
305 	req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
306 	req->best_parent_hw = parent;
307 
308 	return 0;
309 }
310 
311 static const struct clk_ops clk_cpu_8996_pmux_ops = {
312 	.set_parent = clk_cpu_8996_pmux_set_parent,
313 	.get_parent = clk_cpu_8996_pmux_get_parent,
314 	.determine_rate = clk_cpu_8996_pmux_determine_rate,
315 };
316 
317 static const struct clk_parent_data pwrcl_smux_parents[] = {
318 	{ .fw_name = "xo" },
319 	{ .hw = &pwrcl_pll_postdiv.hw },
320 };
321 
322 static const struct clk_parent_data perfcl_smux_parents[] = {
323 	{ .fw_name = "xo" },
324 	{ .hw = &perfcl_pll_postdiv.hw },
325 };
326 
327 static struct clk_regmap_mux pwrcl_smux = {
328 	.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
329 	.shift = 2,
330 	.width = 2,
331 	.clkr.hw.init = &(struct clk_init_data) {
332 		.name = "pwrcl_smux",
333 		.parent_data = pwrcl_smux_parents,
334 		.num_parents = ARRAY_SIZE(pwrcl_smux_parents),
335 		.ops = &clk_regmap_mux_closest_ops,
336 		.flags = CLK_SET_RATE_PARENT,
337 	},
338 };
339 
340 static struct clk_regmap_mux perfcl_smux = {
341 	.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
342 	.shift = 2,
343 	.width = 2,
344 	.clkr.hw.init = &(struct clk_init_data) {
345 		.name = "perfcl_smux",
346 		.parent_data = perfcl_smux_parents,
347 		.num_parents = ARRAY_SIZE(perfcl_smux_parents),
348 		.ops = &clk_regmap_mux_closest_ops,
349 		.flags = CLK_SET_RATE_PARENT,
350 	},
351 };
352 
353 static const struct clk_hw *pwrcl_pmux_parents[] = {
354 	[SMUX_INDEX] = &pwrcl_smux.clkr.hw,
355 	[PLL_INDEX] = &pwrcl_pll.clkr.hw,
356 	[ACD_INDEX] = &pwrcl_pll_acd.hw,
357 	[ALT_INDEX] = &pwrcl_alt_pll.clkr.hw,
358 };
359 
360 static const struct clk_hw *perfcl_pmux_parents[] = {
361 	[SMUX_INDEX] = &perfcl_smux.clkr.hw,
362 	[PLL_INDEX] = &perfcl_pll.clkr.hw,
363 	[ACD_INDEX] = &perfcl_pll_acd.hw,
364 	[ALT_INDEX] = &perfcl_alt_pll.clkr.hw,
365 };
366 
367 static struct clk_cpu_8996_pmux pwrcl_pmux = {
368 	.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
369 	.shift = 0,
370 	.width = 2,
371 	.pll = &pwrcl_pll_acd.clkr.hw,
372 	.pll_div_2 = &pwrcl_smux.clkr.hw,
373 	.nb.notifier_call = cpu_clk_notifier_cb,
374 	.clkr.hw.init = &(struct clk_init_data) {
375 		.name = "pwrcl_pmux",
376 		.parent_hws = pwrcl_pmux_parents,
377 		.num_parents = ARRAY_SIZE(pwrcl_pmux_parents),
378 		.ops = &clk_cpu_8996_pmux_ops,
379 		/* CPU clock is critical and should never be gated */
380 		.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
381 	},
382 };
383 
384 static struct clk_cpu_8996_pmux perfcl_pmux = {
385 	.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
386 	.shift = 0,
387 	.width = 2,
388 	.pll = &perfcl_pll_acd.clkr.hw,
389 	.pll_div_2 = &perfcl_smux.clkr.hw,
390 	.nb.notifier_call = cpu_clk_notifier_cb,
391 	.clkr.hw.init = &(struct clk_init_data) {
392 		.name = "perfcl_pmux",
393 		.parent_hws = perfcl_pmux_parents,
394 		.num_parents = ARRAY_SIZE(perfcl_pmux_parents),
395 		.ops = &clk_cpu_8996_pmux_ops,
396 		/* CPU clock is critical and should never be gated */
397 		.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
398 	},
399 };
400 
401 static const struct regmap_config cpu_msm8996_regmap_config = {
402 	.reg_bits		= 32,
403 	.reg_stride		= 4,
404 	.val_bits		= 32,
405 	.max_register		= 0x80210,
406 	.fast_io		= true,
407 	.val_format_endian	= REGMAP_ENDIAN_LITTLE,
408 };
409 
410 static struct clk_hw *cpu_msm8996_hw_clks[] = {
411 	&pwrcl_pll_postdiv.hw,
412 	&perfcl_pll_postdiv.hw,
413 	&pwrcl_pll_acd.hw,
414 	&perfcl_pll_acd.hw,
415 };
416 
417 static struct clk_regmap *cpu_msm8996_clks[] = {
418 	&pwrcl_pll.clkr,
419 	&perfcl_pll.clkr,
420 	&pwrcl_alt_pll.clkr,
421 	&perfcl_alt_pll.clkr,
422 	&pwrcl_smux.clkr,
423 	&perfcl_smux.clkr,
424 	&pwrcl_pmux.clkr,
425 	&perfcl_pmux.clkr,
426 };
427 
428 static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
429 					      struct regmap *regmap)
430 {
431 	int i, ret;
432 
433 	for (i = 0; i < ARRAY_SIZE(cpu_msm8996_hw_clks); i++) {
434 		ret = devm_clk_hw_register(dev, cpu_msm8996_hw_clks[i]);
435 		if (ret)
436 			return ret;
437 	}
438 
439 	for (i = 0; i < ARRAY_SIZE(cpu_msm8996_clks); i++) {
440 		ret = devm_clk_register_regmap(dev, cpu_msm8996_clks[i]);
441 		if (ret)
442 			return ret;
443 	}
444 
445 	clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config);
446 	clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
447 	clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
448 	clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
449 
450 	/* Enable alt PLLs */
451 	clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk);
452 	clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk);
453 
454 	devm_clk_notifier_register(dev, pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
455 	devm_clk_notifier_register(dev, perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
456 
457 	return ret;
458 }
459 
460 #define CPU_AFINITY_MASK 0xFFF
461 #define PWRCL_CPU_REG_MASK 0x3
462 #define PERFCL_CPU_REG_MASK 0x103
463 
464 #define L2ACDCR_REG 0x580ULL
465 #define L2ACDTD_REG 0x581ULL
466 #define L2ACDDVMRC_REG 0x584ULL
467 #define L2ACDSSCR_REG 0x589ULL
468 
469 static DEFINE_SPINLOCK(qcom_clk_acd_lock);
470 static void __iomem *base;
471 
472 static void qcom_cpu_clk_msm8996_acd_init(void __iomem *base)
473 {
474 	u64 hwid;
475 	unsigned long flags;
476 
477 	spin_lock_irqsave(&qcom_clk_acd_lock, flags);
478 
479 	hwid = read_cpuid_mpidr() & CPU_AFINITY_MASK;
480 
481 	kryo_l2_set_indirect_reg(L2ACDTD_REG, 0x00006a11);
482 	kryo_l2_set_indirect_reg(L2ACDDVMRC_REG, 0x000e0f0f);
483 	kryo_l2_set_indirect_reg(L2ACDSSCR_REG, 0x00000601);
484 
485 	if (PWRCL_CPU_REG_MASK == (hwid | PWRCL_CPU_REG_MASK)) {
486 		writel(0xf, base + PWRCL_REG_OFFSET + SSSCTL_OFFSET);
487 		kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
488 	}
489 
490 	if (PERFCL_CPU_REG_MASK == (hwid | PERFCL_CPU_REG_MASK)) {
491 		kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
492 		writel(0xf, base + PERFCL_REG_OFFSET + SSSCTL_OFFSET);
493 	}
494 
495 	spin_unlock_irqrestore(&qcom_clk_acd_lock, flags);
496 }
497 
498 static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
499 			       void *data)
500 {
501 	struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_nb(nb);
502 	struct clk_notifier_data *cnd = data;
503 	int ret;
504 
505 	switch (event) {
506 	case PRE_RATE_CHANGE:
507 		ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ALT_INDEX);
508 		qcom_cpu_clk_msm8996_acd_init(base);
509 		break;
510 	case POST_RATE_CHANGE:
511 		if (cnd->new_rate < DIV_2_THRESHOLD)
512 			ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
513 							   SMUX_INDEX);
514 		else
515 			ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
516 							   ACD_INDEX);
517 		break;
518 	default:
519 		ret = 0;
520 		break;
521 	}
522 
523 	return notifier_from_errno(ret);
524 };
525 
526 static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
527 {
528 	struct regmap *regmap;
529 	struct clk_hw_onecell_data *data;
530 	struct device *dev = &pdev->dev;
531 	int ret;
532 
533 	data = devm_kzalloc(dev, struct_size(data, hws, 2), GFP_KERNEL);
534 	if (!data)
535 		return -ENOMEM;
536 
537 	base = devm_platform_ioremap_resource(pdev, 0);
538 	if (IS_ERR(base))
539 		return PTR_ERR(base);
540 
541 	regmap = devm_regmap_init_mmio(dev, base, &cpu_msm8996_regmap_config);
542 	if (IS_ERR(regmap))
543 		return PTR_ERR(regmap);
544 
545 	ret = qcom_cpu_clk_msm8996_register_clks(dev, regmap);
546 	if (ret)
547 		return ret;
548 
549 	qcom_cpu_clk_msm8996_acd_init(base);
550 
551 	data->hws[0] = &pwrcl_pmux.clkr.hw;
552 	data->hws[1] = &perfcl_pmux.clkr.hw;
553 	data->num = 2;
554 
555 	return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
556 }
557 
558 static const struct of_device_id qcom_cpu_clk_msm8996_match_table[] = {
559 	{ .compatible = "qcom,msm8996-apcc" },
560 	{}
561 };
562 MODULE_DEVICE_TABLE(of, qcom_cpu_clk_msm8996_match_table);
563 
564 static struct platform_driver qcom_cpu_clk_msm8996_driver = {
565 	.probe = qcom_cpu_clk_msm8996_driver_probe,
566 	.driver = {
567 		.name = "qcom-msm8996-apcc",
568 		.of_match_table = qcom_cpu_clk_msm8996_match_table,
569 	},
570 };
571 module_platform_driver(qcom_cpu_clk_msm8996_driver);
572 
573 MODULE_DESCRIPTION("QCOM MSM8996 CPU Clock Driver");
574 MODULE_LICENSE("GPL v2");
575