1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/cpufreq.h>
8 #include <linux/init.h>
9 #include <linux/interconnect.h>
10 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/of_address.h>
14 #include <linux/of_platform.h>
15 #include <linux/pm_opp.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/units.h>
19 
20 #define LUT_MAX_ENTRIES			40U
21 #define LUT_SRC				GENMASK(31, 30)
22 #define LUT_L_VAL			GENMASK(7, 0)
23 #define LUT_CORE_COUNT			GENMASK(18, 16)
24 #define LUT_VOLT			GENMASK(11, 0)
25 #define CLK_HW_DIV			2
26 #define LUT_TURBO_IND			1
27 
28 #define GT_IRQ_STATUS			BIT(2)
29 
30 struct qcom_cpufreq_soc_data {
31 	u32 reg_enable;
32 	u32 reg_domain_state;
33 	u32 reg_dcvs_ctrl;
34 	u32 reg_freq_lut;
35 	u32 reg_volt_lut;
36 	u32 reg_intr_clr;
37 	u32 reg_current_vote;
38 	u32 reg_perf_state;
39 	u8 lut_row_size;
40 };
41 
42 struct qcom_cpufreq_data {
43 	void __iomem *base;
44 	struct resource *res;
45 	const struct qcom_cpufreq_soc_data *soc_data;
46 
47 	/*
48 	 * Mutex to synchronize between de-init sequence and re-starting LMh
49 	 * polling/interrupts
50 	 */
51 	struct mutex throttle_lock;
52 	int throttle_irq;
53 	char irq_name[15];
54 	bool cancel_throttle;
55 	struct delayed_work throttle_work;
56 	struct cpufreq_policy *policy;
57 
58 	bool per_core_dcvs;
59 };
60 
61 static unsigned long cpu_hw_rate, xo_rate;
62 static bool icc_scaling_enabled;
63 
64 static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
65 			       unsigned long freq_khz)
66 {
67 	unsigned long freq_hz = freq_khz * 1000;
68 	struct dev_pm_opp *opp;
69 	struct device *dev;
70 	int ret;
71 
72 	dev = get_cpu_device(policy->cpu);
73 	if (!dev)
74 		return -ENODEV;
75 
76 	opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
77 	if (IS_ERR(opp))
78 		return PTR_ERR(opp);
79 
80 	ret = dev_pm_opp_set_opp(dev, opp);
81 	dev_pm_opp_put(opp);
82 	return ret;
83 }
84 
85 static int qcom_cpufreq_update_opp(struct device *cpu_dev,
86 				   unsigned long freq_khz,
87 				   unsigned long volt)
88 {
89 	unsigned long freq_hz = freq_khz * 1000;
90 	int ret;
91 
92 	/* Skip voltage update if the opp table is not available */
93 	if (!icc_scaling_enabled)
94 		return dev_pm_opp_add(cpu_dev, freq_hz, volt);
95 
96 	ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
97 	if (ret) {
98 		dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
99 		return ret;
100 	}
101 
102 	return dev_pm_opp_enable(cpu_dev, freq_hz);
103 }
104 
105 static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
106 					unsigned int index)
107 {
108 	struct qcom_cpufreq_data *data = policy->driver_data;
109 	const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
110 	unsigned long freq = policy->freq_table[index].frequency;
111 	unsigned int i;
112 
113 	writel_relaxed(index, data->base + soc_data->reg_perf_state);
114 
115 	if (data->per_core_dcvs)
116 		for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
117 			writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
118 
119 	if (icc_scaling_enabled)
120 		qcom_cpufreq_set_bw(policy, freq);
121 
122 	return 0;
123 }
124 
125 static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
126 {
127 	struct qcom_cpufreq_data *data;
128 	const struct qcom_cpufreq_soc_data *soc_data;
129 	struct cpufreq_policy *policy;
130 	unsigned int index;
131 
132 	policy = cpufreq_cpu_get_raw(cpu);
133 	if (!policy)
134 		return 0;
135 
136 	data = policy->driver_data;
137 	soc_data = data->soc_data;
138 
139 	index = readl_relaxed(data->base + soc_data->reg_perf_state);
140 	index = min(index, LUT_MAX_ENTRIES - 1);
141 
142 	return policy->freq_table[index].frequency;
143 }
144 
145 static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
146 						unsigned int target_freq)
147 {
148 	struct qcom_cpufreq_data *data = policy->driver_data;
149 	const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
150 	unsigned int index;
151 	unsigned int i;
152 
153 	index = policy->cached_resolved_idx;
154 	writel_relaxed(index, data->base + soc_data->reg_perf_state);
155 
156 	if (data->per_core_dcvs)
157 		for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
158 			writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
159 
160 	return policy->freq_table[index].frequency;
161 }
162 
163 static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
164 				    struct cpufreq_policy *policy)
165 {
166 	u32 data, src, lval, i, core_count, prev_freq = 0, freq;
167 	u32 volt;
168 	struct cpufreq_frequency_table	*table;
169 	struct dev_pm_opp *opp;
170 	unsigned long rate;
171 	int ret;
172 	struct qcom_cpufreq_data *drv_data = policy->driver_data;
173 	const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
174 
175 	table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
176 	if (!table)
177 		return -ENOMEM;
178 
179 	ret = dev_pm_opp_of_add_table(cpu_dev);
180 	if (!ret) {
181 		/* Disable all opps and cross-validate against LUT later */
182 		icc_scaling_enabled = true;
183 		for (rate = 0; ; rate++) {
184 			opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
185 			if (IS_ERR(opp))
186 				break;
187 
188 			dev_pm_opp_put(opp);
189 			dev_pm_opp_disable(cpu_dev, rate);
190 		}
191 	} else if (ret != -ENODEV) {
192 		dev_err(cpu_dev, "Invalid opp table in device tree\n");
193 		return ret;
194 	} else {
195 		policy->fast_switch_possible = true;
196 		icc_scaling_enabled = false;
197 	}
198 
199 	for (i = 0; i < LUT_MAX_ENTRIES; i++) {
200 		data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
201 				      i * soc_data->lut_row_size);
202 		src = FIELD_GET(LUT_SRC, data);
203 		lval = FIELD_GET(LUT_L_VAL, data);
204 		core_count = FIELD_GET(LUT_CORE_COUNT, data);
205 
206 		data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
207 				      i * soc_data->lut_row_size);
208 		volt = FIELD_GET(LUT_VOLT, data) * 1000;
209 
210 		if (src)
211 			freq = xo_rate * lval / 1000;
212 		else
213 			freq = cpu_hw_rate / 1000;
214 
215 		if (freq != prev_freq && core_count != LUT_TURBO_IND) {
216 			if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
217 				table[i].frequency = freq;
218 				dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
219 				freq, core_count);
220 			} else {
221 				dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
222 				table[i].frequency = CPUFREQ_ENTRY_INVALID;
223 			}
224 
225 		} else if (core_count == LUT_TURBO_IND) {
226 			table[i].frequency = CPUFREQ_ENTRY_INVALID;
227 		}
228 
229 		/*
230 		 * Two of the same frequencies with the same core counts means
231 		 * end of table
232 		 */
233 		if (i > 0 && prev_freq == freq) {
234 			struct cpufreq_frequency_table *prev = &table[i - 1];
235 
236 			/*
237 			 * Only treat the last frequency that might be a boost
238 			 * as the boost frequency
239 			 */
240 			if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
241 				if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
242 					prev->frequency = prev_freq;
243 					prev->flags = CPUFREQ_BOOST_FREQ;
244 				} else {
245 					dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
246 						 freq);
247 				}
248 			}
249 
250 			break;
251 		}
252 
253 		prev_freq = freq;
254 	}
255 
256 	table[i].frequency = CPUFREQ_TABLE_END;
257 	policy->freq_table = table;
258 	dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
259 
260 	return 0;
261 }
262 
263 static void qcom_get_related_cpus(int index, struct cpumask *m)
264 {
265 	struct device_node *cpu_np;
266 	struct of_phandle_args args;
267 	int cpu, ret;
268 
269 	for_each_possible_cpu(cpu) {
270 		cpu_np = of_cpu_device_node_get(cpu);
271 		if (!cpu_np)
272 			continue;
273 
274 		ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
275 						 "#freq-domain-cells", 0,
276 						 &args);
277 		of_node_put(cpu_np);
278 		if (ret < 0)
279 			continue;
280 
281 		if (index == args.args[0])
282 			cpumask_set_cpu(cpu, m);
283 	}
284 }
285 
286 static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
287 {
288 	unsigned int lval;
289 
290 	if (data->soc_data->reg_current_vote)
291 		lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
292 	else
293 		lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
294 
295 	return lval * xo_rate;
296 }
297 
298 static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
299 {
300 	struct cpufreq_policy *policy = data->policy;
301 	int cpu = cpumask_first(policy->related_cpus);
302 	struct device *dev = get_cpu_device(cpu);
303 	unsigned long freq_hz, throttled_freq;
304 	struct dev_pm_opp *opp;
305 
306 	/*
307 	 * Get the h/w throttled frequency, normalize it using the
308 	 * registered opp table and use it to calculate thermal pressure.
309 	 */
310 	freq_hz = qcom_lmh_get_throttle_freq(data);
311 
312 	opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
313 	if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
314 		opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
315 
316 	if (IS_ERR(opp)) {
317 		dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
318 	} else {
319 		throttled_freq = freq_hz / HZ_PER_KHZ;
320 
321 		/* Update thermal pressure (the boost frequencies are accepted) */
322 		arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
323 
324 		dev_pm_opp_put(opp);
325 	}
326 
327 	/*
328 	 * In the unlikely case policy is unregistered do not enable
329 	 * polling or h/w interrupt
330 	 */
331 	mutex_lock(&data->throttle_lock);
332 	if (data->cancel_throttle)
333 		goto out;
334 
335 	/*
336 	 * If h/w throttled frequency is higher than what cpufreq has requested
337 	 * for, then stop polling and switch back to interrupt mechanism.
338 	 */
339 	if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
340 		enable_irq(data->throttle_irq);
341 	else
342 		mod_delayed_work(system_highpri_wq, &data->throttle_work,
343 				 msecs_to_jiffies(10));
344 
345 out:
346 	mutex_unlock(&data->throttle_lock);
347 }
348 
349 static void qcom_lmh_dcvs_poll(struct work_struct *work)
350 {
351 	struct qcom_cpufreq_data *data;
352 
353 	data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
354 	qcom_lmh_dcvs_notify(data);
355 }
356 
357 static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
358 {
359 	struct qcom_cpufreq_data *c_data = data;
360 
361 	/* Disable interrupt and enable polling */
362 	disable_irq_nosync(c_data->throttle_irq);
363 	schedule_delayed_work(&c_data->throttle_work, 0);
364 
365 	if (c_data->soc_data->reg_intr_clr)
366 		writel_relaxed(GT_IRQ_STATUS,
367 			       c_data->base + c_data->soc_data->reg_intr_clr);
368 
369 	return IRQ_HANDLED;
370 }
371 
372 static const struct qcom_cpufreq_soc_data qcom_soc_data = {
373 	.reg_enable = 0x0,
374 	.reg_dcvs_ctrl = 0xbc,
375 	.reg_freq_lut = 0x110,
376 	.reg_volt_lut = 0x114,
377 	.reg_current_vote = 0x704,
378 	.reg_perf_state = 0x920,
379 	.lut_row_size = 32,
380 };
381 
382 static const struct qcom_cpufreq_soc_data epss_soc_data = {
383 	.reg_enable = 0x0,
384 	.reg_domain_state = 0x20,
385 	.reg_dcvs_ctrl = 0xb0,
386 	.reg_freq_lut = 0x100,
387 	.reg_volt_lut = 0x200,
388 	.reg_intr_clr = 0x308,
389 	.reg_perf_state = 0x320,
390 	.lut_row_size = 4,
391 };
392 
393 static const struct of_device_id qcom_cpufreq_hw_match[] = {
394 	{ .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
395 	{ .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
396 	{}
397 };
398 MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
399 
400 static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
401 {
402 	struct qcom_cpufreq_data *data = policy->driver_data;
403 	struct platform_device *pdev = cpufreq_get_driver_data();
404 	int ret;
405 
406 	/*
407 	 * Look for LMh interrupt. If no interrupt line is specified /
408 	 * if there is an error, allow cpufreq to be enabled as usual.
409 	 */
410 	data->throttle_irq = platform_get_irq_optional(pdev, index);
411 	if (data->throttle_irq == -ENXIO)
412 		return 0;
413 	if (data->throttle_irq < 0)
414 		return data->throttle_irq;
415 
416 	data->cancel_throttle = false;
417 	data->policy = policy;
418 
419 	mutex_init(&data->throttle_lock);
420 	INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
421 
422 	snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
423 	ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
424 				   IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
425 	if (ret) {
426 		dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
427 		return 0;
428 	}
429 
430 	ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
431 	if (ret)
432 		dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
433 			data->irq_name, data->throttle_irq);
434 
435 	return 0;
436 }
437 
438 static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
439 {
440 	struct qcom_cpufreq_data *data = policy->driver_data;
441 	struct platform_device *pdev = cpufreq_get_driver_data();
442 	int ret;
443 
444 	if (data->throttle_irq <= 0)
445 		return 0;
446 
447 	mutex_lock(&data->throttle_lock);
448 	data->cancel_throttle = false;
449 	mutex_unlock(&data->throttle_lock);
450 
451 	ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
452 	if (ret)
453 		dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
454 			data->irq_name, data->throttle_irq);
455 
456 	return ret;
457 }
458 
459 static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
460 {
461 	struct qcom_cpufreq_data *data = policy->driver_data;
462 
463 	if (data->throttle_irq <= 0)
464 		return 0;
465 
466 	mutex_lock(&data->throttle_lock);
467 	data->cancel_throttle = true;
468 	mutex_unlock(&data->throttle_lock);
469 
470 	cancel_delayed_work_sync(&data->throttle_work);
471 	irq_set_affinity_and_hint(data->throttle_irq, NULL);
472 	disable_irq_nosync(data->throttle_irq);
473 
474 	return 0;
475 }
476 
477 static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
478 {
479 	if (data->throttle_irq <= 0)
480 		return;
481 
482 	free_irq(data->throttle_irq, data);
483 }
484 
485 static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
486 {
487 	struct platform_device *pdev = cpufreq_get_driver_data();
488 	struct device *dev = &pdev->dev;
489 	struct of_phandle_args args;
490 	struct device_node *cpu_np;
491 	struct device *cpu_dev;
492 	struct resource *res;
493 	void __iomem *base;
494 	struct qcom_cpufreq_data *data;
495 	int ret, index;
496 
497 	cpu_dev = get_cpu_device(policy->cpu);
498 	if (!cpu_dev) {
499 		pr_err("%s: failed to get cpu%d device\n", __func__,
500 		       policy->cpu);
501 		return -ENODEV;
502 	}
503 
504 	cpu_np = of_cpu_device_node_get(policy->cpu);
505 	if (!cpu_np)
506 		return -EINVAL;
507 
508 	ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
509 					 "#freq-domain-cells", 0, &args);
510 	of_node_put(cpu_np);
511 	if (ret)
512 		return ret;
513 
514 	index = args.args[0];
515 
516 	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
517 	if (!res) {
518 		dev_err(dev, "failed to get mem resource %d\n", index);
519 		return -ENODEV;
520 	}
521 
522 	if (!request_mem_region(res->start, resource_size(res), res->name)) {
523 		dev_err(dev, "failed to request resource %pR\n", res);
524 		return -EBUSY;
525 	}
526 
527 	base = ioremap(res->start, resource_size(res));
528 	if (!base) {
529 		dev_err(dev, "failed to map resource %pR\n", res);
530 		ret = -ENOMEM;
531 		goto release_region;
532 	}
533 
534 	data = kzalloc(sizeof(*data), GFP_KERNEL);
535 	if (!data) {
536 		ret = -ENOMEM;
537 		goto unmap_base;
538 	}
539 
540 	data->soc_data = of_device_get_match_data(&pdev->dev);
541 	data->base = base;
542 	data->res = res;
543 
544 	/* HW should be in enabled state to proceed */
545 	if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
546 		dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
547 		ret = -ENODEV;
548 		goto error;
549 	}
550 
551 	if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
552 		data->per_core_dcvs = true;
553 
554 	qcom_get_related_cpus(index, policy->cpus);
555 	if (cpumask_empty(policy->cpus)) {
556 		dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
557 		ret = -ENOENT;
558 		goto error;
559 	}
560 
561 	policy->driver_data = data;
562 	policy->dvfs_possible_from_any_cpu = true;
563 
564 	ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
565 	if (ret) {
566 		dev_err(dev, "Domain-%d failed to read LUT\n", index);
567 		goto error;
568 	}
569 
570 	ret = dev_pm_opp_get_opp_count(cpu_dev);
571 	if (ret <= 0) {
572 		dev_err(cpu_dev, "Failed to add OPPs\n");
573 		ret = -ENODEV;
574 		goto error;
575 	}
576 
577 	if (policy_has_boost_freq(policy)) {
578 		ret = cpufreq_enable_boost_support();
579 		if (ret)
580 			dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
581 	}
582 
583 	ret = qcom_cpufreq_hw_lmh_init(policy, index);
584 	if (ret)
585 		goto error;
586 
587 	return 0;
588 error:
589 	kfree(data);
590 unmap_base:
591 	iounmap(base);
592 release_region:
593 	release_mem_region(res->start, resource_size(res));
594 	return ret;
595 }
596 
597 static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
598 {
599 	struct device *cpu_dev = get_cpu_device(policy->cpu);
600 	struct qcom_cpufreq_data *data = policy->driver_data;
601 	struct resource *res = data->res;
602 	void __iomem *base = data->base;
603 
604 	dev_pm_opp_remove_all_dynamic(cpu_dev);
605 	dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
606 	qcom_cpufreq_hw_lmh_exit(data);
607 	kfree(policy->freq_table);
608 	kfree(data);
609 	iounmap(base);
610 	release_mem_region(res->start, resource_size(res));
611 
612 	return 0;
613 }
614 
615 static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
616 {
617 	struct qcom_cpufreq_data *data = policy->driver_data;
618 
619 	if (data->throttle_irq >= 0)
620 		enable_irq(data->throttle_irq);
621 }
622 
623 static struct freq_attr *qcom_cpufreq_hw_attr[] = {
624 	&cpufreq_freq_attr_scaling_available_freqs,
625 	&cpufreq_freq_attr_scaling_boost_freqs,
626 	NULL
627 };
628 
629 static struct cpufreq_driver cpufreq_qcom_hw_driver = {
630 	.flags		= CPUFREQ_NEED_INITIAL_FREQ_CHECK |
631 			  CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
632 			  CPUFREQ_IS_COOLING_DEV,
633 	.verify		= cpufreq_generic_frequency_table_verify,
634 	.target_index	= qcom_cpufreq_hw_target_index,
635 	.get		= qcom_cpufreq_hw_get,
636 	.init		= qcom_cpufreq_hw_cpu_init,
637 	.exit		= qcom_cpufreq_hw_cpu_exit,
638 	.online		= qcom_cpufreq_hw_cpu_online,
639 	.offline	= qcom_cpufreq_hw_cpu_offline,
640 	.register_em	= cpufreq_register_em_with_opp,
641 	.fast_switch    = qcom_cpufreq_hw_fast_switch,
642 	.name		= "qcom-cpufreq-hw",
643 	.attr		= qcom_cpufreq_hw_attr,
644 	.ready		= qcom_cpufreq_ready,
645 };
646 
647 static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
648 {
649 	struct device *cpu_dev;
650 	struct clk *clk;
651 	int ret;
652 
653 	clk = clk_get(&pdev->dev, "xo");
654 	if (IS_ERR(clk))
655 		return PTR_ERR(clk);
656 
657 	xo_rate = clk_get_rate(clk);
658 	clk_put(clk);
659 
660 	clk = clk_get(&pdev->dev, "alternate");
661 	if (IS_ERR(clk))
662 		return PTR_ERR(clk);
663 
664 	cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
665 	clk_put(clk);
666 
667 	cpufreq_qcom_hw_driver.driver_data = pdev;
668 
669 	/* Check for optional interconnect paths on CPU0 */
670 	cpu_dev = get_cpu_device(0);
671 	if (!cpu_dev)
672 		return -EPROBE_DEFER;
673 
674 	ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
675 	if (ret)
676 		return ret;
677 
678 	ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
679 	if (ret)
680 		dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
681 	else
682 		dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
683 
684 	return ret;
685 }
686 
687 static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
688 {
689 	return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
690 }
691 
692 static struct platform_driver qcom_cpufreq_hw_driver = {
693 	.probe = qcom_cpufreq_hw_driver_probe,
694 	.remove = qcom_cpufreq_hw_driver_remove,
695 	.driver = {
696 		.name = "qcom-cpufreq-hw",
697 		.of_match_table = qcom_cpufreq_hw_match,
698 	},
699 };
700 
701 static int __init qcom_cpufreq_hw_init(void)
702 {
703 	return platform_driver_register(&qcom_cpufreq_hw_driver);
704 }
705 postcore_initcall(qcom_cpufreq_hw_init);
706 
707 static void __exit qcom_cpufreq_hw_exit(void)
708 {
709 	platform_driver_unregister(&qcom_cpufreq_hw_driver);
710 }
711 module_exit(qcom_cpufreq_hw_exit);
712 
713 MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
714 MODULE_LICENSE("GPL v2");
715