1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPPC (Collaborative Processor Performance Control) driver for
4  * interfacing with the CPUfreq layer and governors. See
5  * cppc_acpi.c for CPPC specific methods.
6  *
7  * (C) Copyright 2014, 2015 Linaro Ltd.
8  * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
9  */
10 
11 #define pr_fmt(fmt)	"CPPC Cpufreq:"	fmt
12 
13 #include <linux/arch_topology.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/delay.h>
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/dmi.h>
20 #include <linux/irq_work.h>
21 #include <linux/kthread.h>
22 #include <linux/time.h>
23 #include <linux/vmalloc.h>
24 #include <uapi/linux/sched/types.h>
25 
26 #include <asm/unaligned.h>
27 
28 #include <acpi/cppc_acpi.h>
29 
30 /* Minimum struct length needed for the DMI processor entry we want */
31 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH	48
32 
33 /* Offset in the DMI processor structure for the max frequency */
34 #define DMI_PROCESSOR_MAX_SPEED		0x14
35 
36 /*
37  * This list contains information parsed from per CPU ACPI _CPC and _PSD
38  * structures: e.g. the highest and lowest supported performance, capabilities,
39  * desired performance, level requested etc. Depending on the share_type, not
40  * all CPUs will have an entry in the list.
41  */
42 static LIST_HEAD(cpu_data_list);
43 
44 static bool boost_supported;
45 
46 struct cppc_workaround_oem_info {
47 	char oem_id[ACPI_OEM_ID_SIZE + 1];
48 	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
49 	u32 oem_revision;
50 };
51 
52 static struct cppc_workaround_oem_info wa_info[] = {
53 	{
54 		.oem_id		= "HISI  ",
55 		.oem_table_id	= "HIP07   ",
56 		.oem_revision	= 0,
57 	}, {
58 		.oem_id		= "HISI  ",
59 		.oem_table_id	= "HIP08   ",
60 		.oem_revision	= 0,
61 	}
62 };
63 
64 #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
65 
66 /* Frequency invariance support */
67 struct cppc_freq_invariance {
68 	int cpu;
69 	struct irq_work irq_work;
70 	struct kthread_work work;
71 	struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
72 	struct cppc_cpudata *cpu_data;
73 };
74 
75 static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
76 static struct kthread_worker *kworker_fie;
77 
78 static struct cpufreq_driver cppc_cpufreq_driver;
79 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
80 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
81 				 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
82 				 struct cppc_perf_fb_ctrs *fb_ctrs_t1);
83 
84 /**
85  * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
86  * @work: The work item.
87  *
88  * The CPPC driver register itself with the topology core to provide its own
89  * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
90  * gets called by the scheduler on every tick.
91  *
92  * Note that the arch specific counters have higher priority than CPPC counters,
93  * if available, though the CPPC driver doesn't need to have any special
94  * handling for that.
95  *
96  * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
97  * reach here from hard-irq context), which then schedules a normal work item
98  * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
99  * based on the counter updates since the last tick.
100  */
101 static void cppc_scale_freq_workfn(struct kthread_work *work)
102 {
103 	struct cppc_freq_invariance *cppc_fi;
104 	struct cppc_perf_fb_ctrs fb_ctrs = {0};
105 	struct cppc_cpudata *cpu_data;
106 	unsigned long local_freq_scale;
107 	u64 perf;
108 
109 	cppc_fi = container_of(work, struct cppc_freq_invariance, work);
110 	cpu_data = cppc_fi->cpu_data;
111 
112 	if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
113 		pr_warn("%s: failed to read perf counters\n", __func__);
114 		return;
115 	}
116 
117 	perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
118 				     &fb_ctrs);
119 	cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
120 
121 	perf <<= SCHED_CAPACITY_SHIFT;
122 	local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
123 
124 	/* This can happen due to counter's overflow */
125 	if (unlikely(local_freq_scale > 1024))
126 		local_freq_scale = 1024;
127 
128 	per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
129 }
130 
131 static void cppc_irq_work(struct irq_work *irq_work)
132 {
133 	struct cppc_freq_invariance *cppc_fi;
134 
135 	cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
136 	kthread_queue_work(kworker_fie, &cppc_fi->work);
137 }
138 
139 static void cppc_scale_freq_tick(void)
140 {
141 	struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
142 
143 	/*
144 	 * cppc_get_perf_ctrs() can potentially sleep, call that from the right
145 	 * context.
146 	 */
147 	irq_work_queue(&cppc_fi->irq_work);
148 }
149 
150 static struct scale_freq_data cppc_sftd = {
151 	.source = SCALE_FREQ_SOURCE_CPPC,
152 	.set_freq_scale = cppc_scale_freq_tick,
153 };
154 
155 static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
156 {
157 	struct cppc_freq_invariance *cppc_fi;
158 	int cpu, ret;
159 
160 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
161 		return;
162 
163 	for_each_cpu(cpu, policy->cpus) {
164 		cppc_fi = &per_cpu(cppc_freq_inv, cpu);
165 		cppc_fi->cpu = cpu;
166 		cppc_fi->cpu_data = policy->driver_data;
167 		kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
168 		init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
169 
170 		ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
171 		if (ret) {
172 			pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
173 				__func__, cpu, ret);
174 
175 			/*
176 			 * Don't abort if the CPU was offline while the driver
177 			 * was getting registered.
178 			 */
179 			if (cpu_online(cpu))
180 				return;
181 		}
182 	}
183 
184 	/* Register for freq-invariance */
185 	topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
186 }
187 
188 /*
189  * We free all the resources on policy's removal and not on CPU removal as the
190  * irq-work are per-cpu and the hotplug core takes care of flushing the pending
191  * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work
192  * fires on another CPU after the concerned CPU is removed, it won't harm.
193  *
194  * We just need to make sure to remove them all on policy->exit().
195  */
196 static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
197 {
198 	struct cppc_freq_invariance *cppc_fi;
199 	int cpu;
200 
201 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
202 		return;
203 
204 	/* policy->cpus will be empty here, use related_cpus instead */
205 	topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
206 
207 	for_each_cpu(cpu, policy->related_cpus) {
208 		cppc_fi = &per_cpu(cppc_freq_inv, cpu);
209 		irq_work_sync(&cppc_fi->irq_work);
210 		kthread_cancel_work_sync(&cppc_fi->work);
211 	}
212 }
213 
214 static void __init cppc_freq_invariance_init(void)
215 {
216 	struct sched_attr attr = {
217 		.size		= sizeof(struct sched_attr),
218 		.sched_policy	= SCHED_DEADLINE,
219 		.sched_nice	= 0,
220 		.sched_priority	= 0,
221 		/*
222 		 * Fake (unused) bandwidth; workaround to "fix"
223 		 * priority inheritance.
224 		 */
225 		.sched_runtime	= 1000000,
226 		.sched_deadline = 10000000,
227 		.sched_period	= 10000000,
228 	};
229 	int ret;
230 
231 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
232 		return;
233 
234 	kworker_fie = kthread_create_worker(0, "cppc_fie");
235 	if (IS_ERR(kworker_fie))
236 		return;
237 
238 	ret = sched_setattr_nocheck(kworker_fie->task, &attr);
239 	if (ret) {
240 		pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
241 			ret);
242 		kthread_destroy_worker(kworker_fie);
243 		return;
244 	}
245 }
246 
247 static void cppc_freq_invariance_exit(void)
248 {
249 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
250 		return;
251 
252 	kthread_destroy_worker(kworker_fie);
253 	kworker_fie = NULL;
254 }
255 
256 #else
257 static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
258 {
259 }
260 
261 static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
262 {
263 }
264 
265 static inline void cppc_freq_invariance_init(void)
266 {
267 }
268 
269 static inline void cppc_freq_invariance_exit(void)
270 {
271 }
272 #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
273 
274 /* Callback function used to retrieve the max frequency from DMI */
275 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
276 {
277 	const u8 *dmi_data = (const u8 *)dm;
278 	u16 *mhz = (u16 *)private;
279 
280 	if (dm->type == DMI_ENTRY_PROCESSOR &&
281 	    dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
282 		u16 val = (u16)get_unaligned((const u16 *)
283 				(dmi_data + DMI_PROCESSOR_MAX_SPEED));
284 		*mhz = val > *mhz ? val : *mhz;
285 	}
286 }
287 
288 /* Look up the max frequency in DMI */
289 static u64 cppc_get_dmi_max_khz(void)
290 {
291 	u16 mhz = 0;
292 
293 	dmi_walk(cppc_find_dmi_mhz, &mhz);
294 
295 	/*
296 	 * Real stupid fallback value, just in case there is no
297 	 * actual value set.
298 	 */
299 	mhz = mhz ? mhz : 1;
300 
301 	return (1000 * mhz);
302 }
303 
304 /*
305  * If CPPC lowest_freq and nominal_freq registers are exposed then we can
306  * use them to convert perf to freq and vice versa
307  *
308  * If the perf/freq point lies between Nominal and Lowest, we can treat
309  * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
310  * and extrapolate the rest
311  * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
312  */
313 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
314 					     unsigned int perf)
315 {
316 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
317 	static u64 max_khz;
318 	u64 mul, div;
319 
320 	if (caps->lowest_freq && caps->nominal_freq) {
321 		if (perf >= caps->nominal_perf) {
322 			mul = caps->nominal_freq;
323 			div = caps->nominal_perf;
324 		} else {
325 			mul = caps->nominal_freq - caps->lowest_freq;
326 			div = caps->nominal_perf - caps->lowest_perf;
327 		}
328 	} else {
329 		if (!max_khz)
330 			max_khz = cppc_get_dmi_max_khz();
331 		mul = max_khz;
332 		div = caps->highest_perf;
333 	}
334 	return (u64)perf * mul / div;
335 }
336 
337 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
338 					     unsigned int freq)
339 {
340 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
341 	static u64 max_khz;
342 	u64  mul, div;
343 
344 	if (caps->lowest_freq && caps->nominal_freq) {
345 		if (freq >= caps->nominal_freq) {
346 			mul = caps->nominal_perf;
347 			div = caps->nominal_freq;
348 		} else {
349 			mul = caps->lowest_perf;
350 			div = caps->lowest_freq;
351 		}
352 	} else {
353 		if (!max_khz)
354 			max_khz = cppc_get_dmi_max_khz();
355 		mul = caps->highest_perf;
356 		div = max_khz;
357 	}
358 
359 	return (u64)freq * mul / div;
360 }
361 
362 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
363 				   unsigned int target_freq,
364 				   unsigned int relation)
365 
366 {
367 	struct cppc_cpudata *cpu_data = policy->driver_data;
368 	unsigned int cpu = policy->cpu;
369 	struct cpufreq_freqs freqs;
370 	u32 desired_perf;
371 	int ret = 0;
372 
373 	desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
374 	/* Return if it is exactly the same perf */
375 	if (desired_perf == cpu_data->perf_ctrls.desired_perf)
376 		return ret;
377 
378 	cpu_data->perf_ctrls.desired_perf = desired_perf;
379 	freqs.old = policy->cur;
380 	freqs.new = target_freq;
381 
382 	cpufreq_freq_transition_begin(policy, &freqs);
383 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
384 	cpufreq_freq_transition_end(policy, &freqs, ret != 0);
385 
386 	if (ret)
387 		pr_debug("Failed to set target on CPU:%d. ret:%d\n",
388 			 cpu, ret);
389 
390 	return ret;
391 }
392 
393 static int cppc_verify_policy(struct cpufreq_policy_data *policy)
394 {
395 	cpufreq_verify_within_cpu_limits(policy);
396 	return 0;
397 }
398 
399 /*
400  * The PCC subspace describes the rate at which platform can accept commands
401  * on the shared PCC channel (including READs which do not count towards freq
402  * transition requests), so ideally we need to use the PCC values as a fallback
403  * if we don't have a platform specific transition_delay_us
404  */
405 #ifdef CONFIG_ARM64
406 #include <asm/cputype.h>
407 
408 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
409 {
410 	unsigned long implementor = read_cpuid_implementor();
411 	unsigned long part_num = read_cpuid_part_number();
412 
413 	switch (implementor) {
414 	case ARM_CPU_IMP_QCOM:
415 		switch (part_num) {
416 		case QCOM_CPU_PART_FALKOR_V1:
417 		case QCOM_CPU_PART_FALKOR:
418 			return 10000;
419 		}
420 	}
421 	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
422 }
423 
424 #else
425 
426 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
427 {
428 	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
429 }
430 #endif
431 
432 
433 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
434 {
435 	struct cppc_cpudata *cpu_data;
436 	int ret;
437 
438 	cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
439 	if (!cpu_data)
440 		goto out;
441 
442 	if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
443 		goto free_cpu;
444 
445 	ret = acpi_get_psd_map(cpu, cpu_data);
446 	if (ret) {
447 		pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
448 		goto free_mask;
449 	}
450 
451 	ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
452 	if (ret) {
453 		pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
454 		goto free_mask;
455 	}
456 
457 	/* Convert the lowest and nominal freq from MHz to KHz */
458 	cpu_data->perf_caps.lowest_freq *= 1000;
459 	cpu_data->perf_caps.nominal_freq *= 1000;
460 
461 	list_add(&cpu_data->node, &cpu_data_list);
462 
463 	return cpu_data;
464 
465 free_mask:
466 	free_cpumask_var(cpu_data->shared_cpu_map);
467 free_cpu:
468 	kfree(cpu_data);
469 out:
470 	return NULL;
471 }
472 
473 static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
474 {
475 	struct cppc_cpudata *cpu_data = policy->driver_data;
476 
477 	list_del(&cpu_data->node);
478 	free_cpumask_var(cpu_data->shared_cpu_map);
479 	kfree(cpu_data);
480 	policy->driver_data = NULL;
481 }
482 
483 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
484 {
485 	unsigned int cpu = policy->cpu;
486 	struct cppc_cpudata *cpu_data;
487 	struct cppc_perf_caps *caps;
488 	int ret;
489 
490 	cpu_data = cppc_cpufreq_get_cpu_data(cpu);
491 	if (!cpu_data) {
492 		pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
493 		return -ENODEV;
494 	}
495 	caps = &cpu_data->perf_caps;
496 	policy->driver_data = cpu_data;
497 
498 	/*
499 	 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
500 	 * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
501 	 */
502 	policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
503 					       caps->lowest_nonlinear_perf);
504 	policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
505 					       caps->nominal_perf);
506 
507 	/*
508 	 * Set cpuinfo.min_freq to Lowest to make the full range of performance
509 	 * available if userspace wants to use any perf between lowest & lowest
510 	 * nonlinear perf
511 	 */
512 	policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
513 							    caps->lowest_perf);
514 	policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
515 							    caps->nominal_perf);
516 
517 	policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
518 	policy->shared_type = cpu_data->shared_type;
519 
520 	switch (policy->shared_type) {
521 	case CPUFREQ_SHARED_TYPE_HW:
522 	case CPUFREQ_SHARED_TYPE_NONE:
523 		/* Nothing to be done - we'll have a policy for each CPU */
524 		break;
525 	case CPUFREQ_SHARED_TYPE_ANY:
526 		/*
527 		 * All CPUs in the domain will share a policy and all cpufreq
528 		 * operations will use a single cppc_cpudata structure stored
529 		 * in policy->driver_data.
530 		 */
531 		cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
532 		break;
533 	default:
534 		pr_debug("Unsupported CPU co-ord type: %d\n",
535 			 policy->shared_type);
536 		ret = -EFAULT;
537 		goto out;
538 	}
539 
540 	/*
541 	 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
542 	 * is supported.
543 	 */
544 	if (caps->highest_perf > caps->nominal_perf)
545 		boost_supported = true;
546 
547 	/* Set policy->cur to max now. The governors will adjust later. */
548 	policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
549 	cpu_data->perf_ctrls.desired_perf =  caps->highest_perf;
550 
551 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
552 	if (ret) {
553 		pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
554 			 caps->highest_perf, cpu, ret);
555 		goto out;
556 	}
557 
558 	cppc_cpufreq_cpu_fie_init(policy);
559 	return 0;
560 
561 out:
562 	cppc_cpufreq_put_cpu_data(policy);
563 	return ret;
564 }
565 
566 static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
567 {
568 	struct cppc_cpudata *cpu_data = policy->driver_data;
569 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
570 	unsigned int cpu = policy->cpu;
571 	int ret;
572 
573 	cppc_cpufreq_cpu_fie_exit(policy);
574 
575 	cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
576 
577 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
578 	if (ret)
579 		pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
580 			 caps->lowest_perf, cpu, ret);
581 
582 	cppc_cpufreq_put_cpu_data(policy);
583 	return 0;
584 }
585 
586 static inline u64 get_delta(u64 t1, u64 t0)
587 {
588 	if (t1 > t0 || t0 > ~(u32)0)
589 		return t1 - t0;
590 
591 	return (u32)t1 - (u32)t0;
592 }
593 
594 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
595 				 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
596 				 struct cppc_perf_fb_ctrs *fb_ctrs_t1)
597 {
598 	u64 delta_reference, delta_delivered;
599 	u64 reference_perf;
600 
601 	reference_perf = fb_ctrs_t0->reference_perf;
602 
603 	delta_reference = get_delta(fb_ctrs_t1->reference,
604 				    fb_ctrs_t0->reference);
605 	delta_delivered = get_delta(fb_ctrs_t1->delivered,
606 				    fb_ctrs_t0->delivered);
607 
608 	/* Check to avoid divide-by zero and invalid delivered_perf */
609 	if (!delta_reference || !delta_delivered)
610 		return cpu_data->perf_ctrls.desired_perf;
611 
612 	return (reference_perf * delta_delivered) / delta_reference;
613 }
614 
615 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
616 {
617 	struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
618 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
619 	struct cppc_cpudata *cpu_data = policy->driver_data;
620 	u64 delivered_perf;
621 	int ret;
622 
623 	cpufreq_cpu_put(policy);
624 
625 	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
626 	if (ret)
627 		return ret;
628 
629 	udelay(2); /* 2usec delay between sampling */
630 
631 	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
632 	if (ret)
633 		return ret;
634 
635 	delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
636 					       &fb_ctrs_t1);
637 
638 	return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
639 }
640 
641 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
642 {
643 	struct cppc_cpudata *cpu_data = policy->driver_data;
644 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
645 	int ret;
646 
647 	if (!boost_supported) {
648 		pr_err("BOOST not supported by CPU or firmware\n");
649 		return -EINVAL;
650 	}
651 
652 	if (state)
653 		policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
654 						       caps->highest_perf);
655 	else
656 		policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
657 						       caps->nominal_perf);
658 	policy->cpuinfo.max_freq = policy->max;
659 
660 	ret = freq_qos_update_request(policy->max_freq_req, policy->max);
661 	if (ret < 0)
662 		return ret;
663 
664 	return 0;
665 }
666 
667 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
668 {
669 	struct cppc_cpudata *cpu_data = policy->driver_data;
670 
671 	return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
672 }
673 cpufreq_freq_attr_ro(freqdomain_cpus);
674 
675 static struct freq_attr *cppc_cpufreq_attr[] = {
676 	&freqdomain_cpus,
677 	NULL,
678 };
679 
680 static struct cpufreq_driver cppc_cpufreq_driver = {
681 	.flags = CPUFREQ_CONST_LOOPS,
682 	.verify = cppc_verify_policy,
683 	.target = cppc_cpufreq_set_target,
684 	.get = cppc_cpufreq_get_rate,
685 	.init = cppc_cpufreq_cpu_init,
686 	.exit = cppc_cpufreq_cpu_exit,
687 	.set_boost = cppc_cpufreq_set_boost,
688 	.attr = cppc_cpufreq_attr,
689 	.name = "cppc_cpufreq",
690 };
691 
692 /*
693  * HISI platform does not support delivered performance counter and
694  * reference performance counter. It can calculate the performance using the
695  * platform specific mechanism. We reuse the desired performance register to
696  * store the real performance calculated by the platform.
697  */
698 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
699 {
700 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
701 	struct cppc_cpudata *cpu_data = policy->driver_data;
702 	u64 desired_perf;
703 	int ret;
704 
705 	cpufreq_cpu_put(policy);
706 
707 	ret = cppc_get_desired_perf(cpu, &desired_perf);
708 	if (ret < 0)
709 		return -EIO;
710 
711 	return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
712 }
713 
714 static void cppc_check_hisi_workaround(void)
715 {
716 	struct acpi_table_header *tbl;
717 	acpi_status status = AE_OK;
718 	int i;
719 
720 	status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
721 	if (ACPI_FAILURE(status) || !tbl)
722 		return;
723 
724 	for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
725 		if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
726 		    !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
727 		    wa_info[i].oem_revision == tbl->oem_revision) {
728 			/* Overwrite the get() callback */
729 			cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
730 			break;
731 		}
732 	}
733 
734 	acpi_put_table(tbl);
735 }
736 
737 static int __init cppc_cpufreq_init(void)
738 {
739 	int ret;
740 
741 	if ((acpi_disabled) || !acpi_cpc_valid())
742 		return -ENODEV;
743 
744 	INIT_LIST_HEAD(&cpu_data_list);
745 
746 	cppc_check_hisi_workaround();
747 	cppc_freq_invariance_init();
748 
749 	ret = cpufreq_register_driver(&cppc_cpufreq_driver);
750 	if (ret)
751 		cppc_freq_invariance_exit();
752 
753 	return ret;
754 }
755 
756 static inline void free_cpu_data(void)
757 {
758 	struct cppc_cpudata *iter, *tmp;
759 
760 	list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
761 		free_cpumask_var(iter->shared_cpu_map);
762 		list_del(&iter->node);
763 		kfree(iter);
764 	}
765 
766 }
767 
768 static void __exit cppc_cpufreq_exit(void)
769 {
770 	cpufreq_unregister_driver(&cppc_cpufreq_driver);
771 	cppc_freq_invariance_exit();
772 
773 	free_cpu_data();
774 }
775 
776 module_exit(cppc_cpufreq_exit);
777 MODULE_AUTHOR("Ashwin Chaugule");
778 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
779 MODULE_LICENSE("GPL");
780 
781 late_initcall(cppc_cpufreq_init);
782 
783 static const struct acpi_device_id cppc_acpi_ids[] __used = {
784 	{ACPI_PROCESSOR_DEVICE_HID, },
785 	{}
786 };
787 
788 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
789