1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPPC (Collaborative Processor Performance Control) driver for
4  * interfacing with the CPUfreq layer and governors. See
5  * cppc_acpi.c for CPPC specific methods.
6  *
7  * (C) Copyright 2014, 2015 Linaro Ltd.
8  * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
9  */
10 
11 #define pr_fmt(fmt)	"CPPC Cpufreq:"	fmt
12 
13 #include <linux/arch_topology.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/delay.h>
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/dmi.h>
20 #include <linux/irq_work.h>
21 #include <linux/kthread.h>
22 #include <linux/time.h>
23 #include <linux/vmalloc.h>
24 #include <uapi/linux/sched/types.h>
25 
26 #include <asm/unaligned.h>
27 
28 #include <acpi/cppc_acpi.h>
29 
30 /* Minimum struct length needed for the DMI processor entry we want */
31 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH	48
32 
33 /* Offset in the DMI processor structure for the max frequency */
34 #define DMI_PROCESSOR_MAX_SPEED		0x14
35 
36 /*
37  * This list contains information parsed from per CPU ACPI _CPC and _PSD
38  * structures: e.g. the highest and lowest supported performance, capabilities,
39  * desired performance, level requested etc. Depending on the share_type, not
40  * all CPUs will have an entry in the list.
41  */
42 static LIST_HEAD(cpu_data_list);
43 
44 static bool boost_supported;
45 
46 struct cppc_workaround_oem_info {
47 	char oem_id[ACPI_OEM_ID_SIZE + 1];
48 	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
49 	u32 oem_revision;
50 };
51 
52 static struct cppc_workaround_oem_info wa_info[] = {
53 	{
54 		.oem_id		= "HISI  ",
55 		.oem_table_id	= "HIP07   ",
56 		.oem_revision	= 0,
57 	}, {
58 		.oem_id		= "HISI  ",
59 		.oem_table_id	= "HIP08   ",
60 		.oem_revision	= 0,
61 	}
62 };
63 
64 #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
65 
66 /* Frequency invariance support */
67 struct cppc_freq_invariance {
68 	int cpu;
69 	struct irq_work irq_work;
70 	struct kthread_work work;
71 	struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
72 	struct cppc_cpudata *cpu_data;
73 };
74 
75 static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
76 static struct kthread_worker *kworker_fie;
77 
78 static struct cpufreq_driver cppc_cpufreq_driver;
79 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
80 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
81 				 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
82 				 struct cppc_perf_fb_ctrs *fb_ctrs_t1);
83 
84 /**
85  * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
86  * @work: The work item.
87  *
88  * The CPPC driver register itself with the topology core to provide its own
89  * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
90  * gets called by the scheduler on every tick.
91  *
92  * Note that the arch specific counters have higher priority than CPPC counters,
93  * if available, though the CPPC driver doesn't need to have any special
94  * handling for that.
95  *
96  * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
97  * reach here from hard-irq context), which then schedules a normal work item
98  * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
99  * based on the counter updates since the last tick.
100  */
101 static void cppc_scale_freq_workfn(struct kthread_work *work)
102 {
103 	struct cppc_freq_invariance *cppc_fi;
104 	struct cppc_perf_fb_ctrs fb_ctrs = {0};
105 	struct cppc_cpudata *cpu_data;
106 	unsigned long local_freq_scale;
107 	u64 perf;
108 
109 	cppc_fi = container_of(work, struct cppc_freq_invariance, work);
110 	cpu_data = cppc_fi->cpu_data;
111 
112 	if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
113 		pr_warn("%s: failed to read perf counters\n", __func__);
114 		return;
115 	}
116 
117 	perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
118 				     &fb_ctrs);
119 	cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
120 
121 	perf <<= SCHED_CAPACITY_SHIFT;
122 	local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
123 
124 	/* This can happen due to counter's overflow */
125 	if (unlikely(local_freq_scale > 1024))
126 		local_freq_scale = 1024;
127 
128 	per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
129 }
130 
131 static void cppc_irq_work(struct irq_work *irq_work)
132 {
133 	struct cppc_freq_invariance *cppc_fi;
134 
135 	cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
136 	kthread_queue_work(kworker_fie, &cppc_fi->work);
137 }
138 
139 static void cppc_scale_freq_tick(void)
140 {
141 	struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
142 
143 	/*
144 	 * cppc_get_perf_ctrs() can potentially sleep, call that from the right
145 	 * context.
146 	 */
147 	irq_work_queue(&cppc_fi->irq_work);
148 }
149 
150 static struct scale_freq_data cppc_sftd = {
151 	.source = SCALE_FREQ_SOURCE_CPPC,
152 	.set_freq_scale = cppc_scale_freq_tick,
153 };
154 
155 static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
156 {
157 	struct cppc_freq_invariance *cppc_fi;
158 	int cpu, ret;
159 
160 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
161 		return;
162 
163 	for_each_cpu(cpu, policy->cpus) {
164 		cppc_fi = &per_cpu(cppc_freq_inv, cpu);
165 		cppc_fi->cpu = cpu;
166 		cppc_fi->cpu_data = policy->driver_data;
167 		kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
168 		init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
169 
170 		ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
171 		if (ret) {
172 			pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
173 				__func__, cpu, ret);
174 
175 			/*
176 			 * Don't abort if the CPU was offline while the driver
177 			 * was getting registered.
178 			 */
179 			if (cpu_online(cpu))
180 				return;
181 		}
182 	}
183 
184 	/* Register for freq-invariance */
185 	topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
186 }
187 
188 /*
189  * We free all the resources on policy's removal and not on CPU removal as the
190  * irq-work are per-cpu and the hotplug core takes care of flushing the pending
191  * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work
192  * fires on another CPU after the concerned CPU is removed, it won't harm.
193  *
194  * We just need to make sure to remove them all on policy->exit().
195  */
196 static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
197 {
198 	struct cppc_freq_invariance *cppc_fi;
199 	int cpu;
200 
201 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
202 		return;
203 
204 	/* policy->cpus will be empty here, use related_cpus instead */
205 	topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
206 
207 	for_each_cpu(cpu, policy->related_cpus) {
208 		cppc_fi = &per_cpu(cppc_freq_inv, cpu);
209 		irq_work_sync(&cppc_fi->irq_work);
210 		kthread_cancel_work_sync(&cppc_fi->work);
211 	}
212 }
213 
214 static void __init cppc_freq_invariance_init(void)
215 {
216 	struct sched_attr attr = {
217 		.size		= sizeof(struct sched_attr),
218 		.sched_policy	= SCHED_DEADLINE,
219 		.sched_nice	= 0,
220 		.sched_priority	= 0,
221 		/*
222 		 * Fake (unused) bandwidth; workaround to "fix"
223 		 * priority inheritance.
224 		 */
225 		.sched_runtime	= 1000000,
226 		.sched_deadline = 10000000,
227 		.sched_period	= 10000000,
228 	};
229 	int ret;
230 
231 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
232 		return;
233 
234 	kworker_fie = kthread_create_worker(0, "cppc_fie");
235 	if (IS_ERR(kworker_fie))
236 		return;
237 
238 	ret = sched_setattr_nocheck(kworker_fie->task, &attr);
239 	if (ret) {
240 		pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
241 			ret);
242 		kthread_destroy_worker(kworker_fie);
243 		return;
244 	}
245 }
246 
247 static void cppc_freq_invariance_exit(void)
248 {
249 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
250 		return;
251 
252 	kthread_destroy_worker(kworker_fie);
253 	kworker_fie = NULL;
254 }
255 
256 #else
257 static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
258 {
259 }
260 
261 static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
262 {
263 }
264 
265 static inline void cppc_freq_invariance_init(void)
266 {
267 }
268 
269 static inline void cppc_freq_invariance_exit(void)
270 {
271 }
272 #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
273 
274 /* Callback function used to retrieve the max frequency from DMI */
275 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
276 {
277 	const u8 *dmi_data = (const u8 *)dm;
278 	u16 *mhz = (u16 *)private;
279 
280 	if (dm->type == DMI_ENTRY_PROCESSOR &&
281 	    dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
282 		u16 val = (u16)get_unaligned((const u16 *)
283 				(dmi_data + DMI_PROCESSOR_MAX_SPEED));
284 		*mhz = val > *mhz ? val : *mhz;
285 	}
286 }
287 
288 /* Look up the max frequency in DMI */
289 static u64 cppc_get_dmi_max_khz(void)
290 {
291 	u16 mhz = 0;
292 
293 	dmi_walk(cppc_find_dmi_mhz, &mhz);
294 
295 	/*
296 	 * Real stupid fallback value, just in case there is no
297 	 * actual value set.
298 	 */
299 	mhz = mhz ? mhz : 1;
300 
301 	return (1000 * mhz);
302 }
303 
304 /*
305  * If CPPC lowest_freq and nominal_freq registers are exposed then we can
306  * use them to convert perf to freq and vice versa. The conversion is
307  * extrapolated as an affine function passing by the 2 points:
308  *  - (Low perf, Low freq)
309  *  - (Nominal perf, Nominal perf)
310  */
311 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
312 					     unsigned int perf)
313 {
314 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
315 	s64 retval, offset = 0;
316 	static u64 max_khz;
317 	u64 mul, div;
318 
319 	if (caps->lowest_freq && caps->nominal_freq) {
320 		mul = caps->nominal_freq - caps->lowest_freq;
321 		div = caps->nominal_perf - caps->lowest_perf;
322 		offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
323 	} else {
324 		if (!max_khz)
325 			max_khz = cppc_get_dmi_max_khz();
326 		mul = max_khz;
327 		div = caps->highest_perf;
328 	}
329 
330 	retval = offset + div64_u64(perf * mul, div);
331 	if (retval >= 0)
332 		return retval;
333 	return 0;
334 }
335 
336 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
337 					     unsigned int freq)
338 {
339 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
340 	s64 retval, offset = 0;
341 	static u64 max_khz;
342 	u64  mul, div;
343 
344 	if (caps->lowest_freq && caps->nominal_freq) {
345 		mul = caps->nominal_perf - caps->lowest_perf;
346 		div = caps->nominal_freq - caps->lowest_freq;
347 		offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
348 	} else {
349 		if (!max_khz)
350 			max_khz = cppc_get_dmi_max_khz();
351 		mul = caps->highest_perf;
352 		div = max_khz;
353 	}
354 
355 	retval = offset + div64_u64(freq * mul, div);
356 	if (retval >= 0)
357 		return retval;
358 	return 0;
359 }
360 
361 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
362 				   unsigned int target_freq,
363 				   unsigned int relation)
364 
365 {
366 	struct cppc_cpudata *cpu_data = policy->driver_data;
367 	unsigned int cpu = policy->cpu;
368 	struct cpufreq_freqs freqs;
369 	u32 desired_perf;
370 	int ret = 0;
371 
372 	desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
373 	/* Return if it is exactly the same perf */
374 	if (desired_perf == cpu_data->perf_ctrls.desired_perf)
375 		return ret;
376 
377 	cpu_data->perf_ctrls.desired_perf = desired_perf;
378 	freqs.old = policy->cur;
379 	freqs.new = target_freq;
380 
381 	cpufreq_freq_transition_begin(policy, &freqs);
382 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
383 	cpufreq_freq_transition_end(policy, &freqs, ret != 0);
384 
385 	if (ret)
386 		pr_debug("Failed to set target on CPU:%d. ret:%d\n",
387 			 cpu, ret);
388 
389 	return ret;
390 }
391 
392 static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
393 					      unsigned int target_freq)
394 {
395 	struct cppc_cpudata *cpu_data = policy->driver_data;
396 	unsigned int cpu = policy->cpu;
397 	u32 desired_perf;
398 	int ret;
399 
400 	desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
401 	cpu_data->perf_ctrls.desired_perf = desired_perf;
402 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
403 
404 	if (ret) {
405 		pr_debug("Failed to set target on CPU:%d. ret:%d\n",
406 			 cpu, ret);
407 		return 0;
408 	}
409 
410 	return target_freq;
411 }
412 
413 static int cppc_verify_policy(struct cpufreq_policy_data *policy)
414 {
415 	cpufreq_verify_within_cpu_limits(policy);
416 	return 0;
417 }
418 
419 /*
420  * The PCC subspace describes the rate at which platform can accept commands
421  * on the shared PCC channel (including READs which do not count towards freq
422  * transition requests), so ideally we need to use the PCC values as a fallback
423  * if we don't have a platform specific transition_delay_us
424  */
425 #ifdef CONFIG_ARM64
426 #include <asm/cputype.h>
427 
428 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
429 {
430 	unsigned long implementor = read_cpuid_implementor();
431 	unsigned long part_num = read_cpuid_part_number();
432 
433 	switch (implementor) {
434 	case ARM_CPU_IMP_QCOM:
435 		switch (part_num) {
436 		case QCOM_CPU_PART_FALKOR_V1:
437 		case QCOM_CPU_PART_FALKOR:
438 			return 10000;
439 		}
440 	}
441 	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
442 }
443 
444 static DEFINE_PER_CPU(unsigned int, efficiency_class);
445 static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
446 
447 /* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
448 #define CPPC_EM_CAP_STEP	(20)
449 /* Increase the cost value by CPPC_EM_COST_STEP every performance state. */
450 #define CPPC_EM_COST_STEP	(1)
451 /* Add a cost gap correspnding to the energy of 4 CPUs. */
452 #define CPPC_EM_COST_GAP	(4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
453 				/ CPPC_EM_CAP_STEP)
454 
455 static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
456 {
457 	struct cppc_perf_caps *perf_caps;
458 	unsigned int min_cap, max_cap;
459 	struct cppc_cpudata *cpu_data;
460 	int cpu = policy->cpu;
461 
462 	cpu_data = policy->driver_data;
463 	perf_caps = &cpu_data->perf_caps;
464 	max_cap = arch_scale_cpu_capacity(cpu);
465 	min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
466 	if ((min_cap == 0) || (max_cap < min_cap))
467 		return 0;
468 	return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
469 }
470 
471 /*
472  * The cost is defined as:
473  *   cost = power * max_frequency / frequency
474  */
475 static inline unsigned long compute_cost(int cpu, int step)
476 {
477 	return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) +
478 			step * CPPC_EM_COST_STEP;
479 }
480 
481 static int cppc_get_cpu_power(struct device *cpu_dev,
482 		unsigned long *power, unsigned long *KHz)
483 {
484 	unsigned long perf_step, perf_prev, perf, perf_check;
485 	unsigned int min_step, max_step, step, step_check;
486 	unsigned long prev_freq = *KHz;
487 	unsigned int min_cap, max_cap;
488 	struct cpufreq_policy *policy;
489 
490 	struct cppc_perf_caps *perf_caps;
491 	struct cppc_cpudata *cpu_data;
492 
493 	policy = cpufreq_cpu_get_raw(cpu_dev->id);
494 	cpu_data = policy->driver_data;
495 	perf_caps = &cpu_data->perf_caps;
496 	max_cap = arch_scale_cpu_capacity(cpu_dev->id);
497 	min_cap = div_u64(max_cap * perf_caps->lowest_perf,
498 			perf_caps->highest_perf);
499 
500 	perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
501 	min_step = min_cap / CPPC_EM_CAP_STEP;
502 	max_step = max_cap / CPPC_EM_CAP_STEP;
503 
504 	perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
505 	step = perf_prev / perf_step;
506 
507 	if (step > max_step)
508 		return -EINVAL;
509 
510 	if (min_step == max_step) {
511 		step = max_step;
512 		perf = perf_caps->highest_perf;
513 	} else if (step < min_step) {
514 		step = min_step;
515 		perf = perf_caps->lowest_perf;
516 	} else {
517 		step++;
518 		if (step == max_step)
519 			perf = perf_caps->highest_perf;
520 		else
521 			perf = step * perf_step;
522 	}
523 
524 	*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
525 	perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
526 	step_check = perf_check / perf_step;
527 
528 	/*
529 	 * To avoid bad integer approximation, check that new frequency value
530 	 * increased and that the new frequency will be converted to the
531 	 * desired step value.
532 	 */
533 	while ((*KHz == prev_freq) || (step_check != step)) {
534 		perf++;
535 		*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
536 		perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
537 		step_check = perf_check / perf_step;
538 	}
539 
540 	/*
541 	 * With an artificial EM, only the cost value is used. Still the power
542 	 * is populated such as 0 < power < EM_MAX_POWER. This allows to add
543 	 * more sense to the artificial performance states.
544 	 */
545 	*power = compute_cost(cpu_dev->id, step);
546 
547 	return 0;
548 }
549 
550 static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
551 		unsigned long *cost)
552 {
553 	unsigned long perf_step, perf_prev;
554 	struct cppc_perf_caps *perf_caps;
555 	struct cpufreq_policy *policy;
556 	struct cppc_cpudata *cpu_data;
557 	unsigned int max_cap;
558 	int step;
559 
560 	policy = cpufreq_cpu_get_raw(cpu_dev->id);
561 	cpu_data = policy->driver_data;
562 	perf_caps = &cpu_data->perf_caps;
563 	max_cap = arch_scale_cpu_capacity(cpu_dev->id);
564 
565 	perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
566 	perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
567 	step = perf_prev / perf_step;
568 
569 	*cost = compute_cost(cpu_dev->id, step);
570 
571 	return 0;
572 }
573 
574 static int populate_efficiency_class(void)
575 {
576 	struct acpi_madt_generic_interrupt *gicc;
577 	DECLARE_BITMAP(used_classes, 256) = {};
578 	int class, cpu, index;
579 
580 	for_each_possible_cpu(cpu) {
581 		gicc = acpi_cpu_get_madt_gicc(cpu);
582 		class = gicc->efficiency_class;
583 		bitmap_set(used_classes, class, 1);
584 	}
585 
586 	if (bitmap_weight(used_classes, 256) <= 1) {
587 		pr_debug("Efficiency classes are all equal (=%d). "
588 			"No EM registered", class);
589 		return -EINVAL;
590 	}
591 
592 	/*
593 	 * Squeeze efficiency class values on [0:#efficiency_class-1].
594 	 * Values are per spec in [0:255].
595 	 */
596 	index = 0;
597 	for_each_set_bit(class, used_classes, 256) {
598 		for_each_possible_cpu(cpu) {
599 			gicc = acpi_cpu_get_madt_gicc(cpu);
600 			if (gicc->efficiency_class == class)
601 				per_cpu(efficiency_class, cpu) = index;
602 		}
603 		index++;
604 	}
605 	cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
606 
607 	return 0;
608 }
609 
610 static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
611 {
612 	struct cppc_cpudata *cpu_data;
613 	struct em_data_callback em_cb =
614 		EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
615 
616 	cpu_data = policy->driver_data;
617 	em_dev_register_perf_domain(get_cpu_device(policy->cpu),
618 			get_perf_level_count(policy), &em_cb,
619 			cpu_data->shared_cpu_map, 0);
620 }
621 
622 #else
623 
624 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
625 {
626 	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
627 }
628 static int populate_efficiency_class(void)
629 {
630 	return 0;
631 }
632 static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
633 {
634 }
635 #endif
636 
637 
638 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
639 {
640 	struct cppc_cpudata *cpu_data;
641 	int ret;
642 
643 	cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
644 	if (!cpu_data)
645 		goto out;
646 
647 	if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
648 		goto free_cpu;
649 
650 	ret = acpi_get_psd_map(cpu, cpu_data);
651 	if (ret) {
652 		pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
653 		goto free_mask;
654 	}
655 
656 	ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
657 	if (ret) {
658 		pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
659 		goto free_mask;
660 	}
661 
662 	/* Convert the lowest and nominal freq from MHz to KHz */
663 	cpu_data->perf_caps.lowest_freq *= 1000;
664 	cpu_data->perf_caps.nominal_freq *= 1000;
665 
666 	list_add(&cpu_data->node, &cpu_data_list);
667 
668 	return cpu_data;
669 
670 free_mask:
671 	free_cpumask_var(cpu_data->shared_cpu_map);
672 free_cpu:
673 	kfree(cpu_data);
674 out:
675 	return NULL;
676 }
677 
678 static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
679 {
680 	struct cppc_cpudata *cpu_data = policy->driver_data;
681 
682 	list_del(&cpu_data->node);
683 	free_cpumask_var(cpu_data->shared_cpu_map);
684 	kfree(cpu_data);
685 	policy->driver_data = NULL;
686 }
687 
688 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
689 {
690 	unsigned int cpu = policy->cpu;
691 	struct cppc_cpudata *cpu_data;
692 	struct cppc_perf_caps *caps;
693 	int ret;
694 
695 	cpu_data = cppc_cpufreq_get_cpu_data(cpu);
696 	if (!cpu_data) {
697 		pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
698 		return -ENODEV;
699 	}
700 	caps = &cpu_data->perf_caps;
701 	policy->driver_data = cpu_data;
702 
703 	/*
704 	 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
705 	 * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
706 	 */
707 	policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
708 					       caps->lowest_nonlinear_perf);
709 	policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
710 					       caps->nominal_perf);
711 
712 	/*
713 	 * Set cpuinfo.min_freq to Lowest to make the full range of performance
714 	 * available if userspace wants to use any perf between lowest & lowest
715 	 * nonlinear perf
716 	 */
717 	policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
718 							    caps->lowest_perf);
719 	policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
720 							    caps->nominal_perf);
721 
722 	policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
723 	policy->shared_type = cpu_data->shared_type;
724 
725 	switch (policy->shared_type) {
726 	case CPUFREQ_SHARED_TYPE_HW:
727 	case CPUFREQ_SHARED_TYPE_NONE:
728 		/* Nothing to be done - we'll have a policy for each CPU */
729 		break;
730 	case CPUFREQ_SHARED_TYPE_ANY:
731 		/*
732 		 * All CPUs in the domain will share a policy and all cpufreq
733 		 * operations will use a single cppc_cpudata structure stored
734 		 * in policy->driver_data.
735 		 */
736 		cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
737 		break;
738 	default:
739 		pr_debug("Unsupported CPU co-ord type: %d\n",
740 			 policy->shared_type);
741 		ret = -EFAULT;
742 		goto out;
743 	}
744 
745 	policy->fast_switch_possible = cppc_allow_fast_switch();
746 	policy->dvfs_possible_from_any_cpu = true;
747 
748 	/*
749 	 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
750 	 * is supported.
751 	 */
752 	if (caps->highest_perf > caps->nominal_perf)
753 		boost_supported = true;
754 
755 	/* Set policy->cur to max now. The governors will adjust later. */
756 	policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
757 	cpu_data->perf_ctrls.desired_perf =  caps->highest_perf;
758 
759 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
760 	if (ret) {
761 		pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
762 			 caps->highest_perf, cpu, ret);
763 		goto out;
764 	}
765 
766 	cppc_cpufreq_cpu_fie_init(policy);
767 	return 0;
768 
769 out:
770 	cppc_cpufreq_put_cpu_data(policy);
771 	return ret;
772 }
773 
774 static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
775 {
776 	struct cppc_cpudata *cpu_data = policy->driver_data;
777 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
778 	unsigned int cpu = policy->cpu;
779 	int ret;
780 
781 	cppc_cpufreq_cpu_fie_exit(policy);
782 
783 	cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
784 
785 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
786 	if (ret)
787 		pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
788 			 caps->lowest_perf, cpu, ret);
789 
790 	cppc_cpufreq_put_cpu_data(policy);
791 	return 0;
792 }
793 
794 static inline u64 get_delta(u64 t1, u64 t0)
795 {
796 	if (t1 > t0 || t0 > ~(u32)0)
797 		return t1 - t0;
798 
799 	return (u32)t1 - (u32)t0;
800 }
801 
802 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
803 				 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
804 				 struct cppc_perf_fb_ctrs *fb_ctrs_t1)
805 {
806 	u64 delta_reference, delta_delivered;
807 	u64 reference_perf;
808 
809 	reference_perf = fb_ctrs_t0->reference_perf;
810 
811 	delta_reference = get_delta(fb_ctrs_t1->reference,
812 				    fb_ctrs_t0->reference);
813 	delta_delivered = get_delta(fb_ctrs_t1->delivered,
814 				    fb_ctrs_t0->delivered);
815 
816 	/* Check to avoid divide-by zero and invalid delivered_perf */
817 	if (!delta_reference || !delta_delivered)
818 		return cpu_data->perf_ctrls.desired_perf;
819 
820 	return (reference_perf * delta_delivered) / delta_reference;
821 }
822 
823 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
824 {
825 	struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
826 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
827 	struct cppc_cpudata *cpu_data = policy->driver_data;
828 	u64 delivered_perf;
829 	int ret;
830 
831 	cpufreq_cpu_put(policy);
832 
833 	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
834 	if (ret)
835 		return ret;
836 
837 	udelay(2); /* 2usec delay between sampling */
838 
839 	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
840 	if (ret)
841 		return ret;
842 
843 	delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
844 					       &fb_ctrs_t1);
845 
846 	return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
847 }
848 
849 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
850 {
851 	struct cppc_cpudata *cpu_data = policy->driver_data;
852 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
853 	int ret;
854 
855 	if (!boost_supported) {
856 		pr_err("BOOST not supported by CPU or firmware\n");
857 		return -EINVAL;
858 	}
859 
860 	if (state)
861 		policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
862 						       caps->highest_perf);
863 	else
864 		policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
865 						       caps->nominal_perf);
866 	policy->cpuinfo.max_freq = policy->max;
867 
868 	ret = freq_qos_update_request(policy->max_freq_req, policy->max);
869 	if (ret < 0)
870 		return ret;
871 
872 	return 0;
873 }
874 
875 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
876 {
877 	struct cppc_cpudata *cpu_data = policy->driver_data;
878 
879 	return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
880 }
881 cpufreq_freq_attr_ro(freqdomain_cpus);
882 
883 static struct freq_attr *cppc_cpufreq_attr[] = {
884 	&freqdomain_cpus,
885 	NULL,
886 };
887 
888 static struct cpufreq_driver cppc_cpufreq_driver = {
889 	.flags = CPUFREQ_CONST_LOOPS,
890 	.verify = cppc_verify_policy,
891 	.target = cppc_cpufreq_set_target,
892 	.get = cppc_cpufreq_get_rate,
893 	.fast_switch = cppc_cpufreq_fast_switch,
894 	.init = cppc_cpufreq_cpu_init,
895 	.exit = cppc_cpufreq_cpu_exit,
896 	.set_boost = cppc_cpufreq_set_boost,
897 	.attr = cppc_cpufreq_attr,
898 	.name = "cppc_cpufreq",
899 };
900 
901 /*
902  * HISI platform does not support delivered performance counter and
903  * reference performance counter. It can calculate the performance using the
904  * platform specific mechanism. We reuse the desired performance register to
905  * store the real performance calculated by the platform.
906  */
907 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
908 {
909 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
910 	struct cppc_cpudata *cpu_data = policy->driver_data;
911 	u64 desired_perf;
912 	int ret;
913 
914 	cpufreq_cpu_put(policy);
915 
916 	ret = cppc_get_desired_perf(cpu, &desired_perf);
917 	if (ret < 0)
918 		return -EIO;
919 
920 	return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
921 }
922 
923 static void cppc_check_hisi_workaround(void)
924 {
925 	struct acpi_table_header *tbl;
926 	acpi_status status = AE_OK;
927 	int i;
928 
929 	status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
930 	if (ACPI_FAILURE(status) || !tbl)
931 		return;
932 
933 	for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
934 		if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
935 		    !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
936 		    wa_info[i].oem_revision == tbl->oem_revision) {
937 			/* Overwrite the get() callback */
938 			cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
939 			break;
940 		}
941 	}
942 
943 	acpi_put_table(tbl);
944 }
945 
946 static int __init cppc_cpufreq_init(void)
947 {
948 	int ret;
949 
950 	if ((acpi_disabled) || !acpi_cpc_valid())
951 		return -ENODEV;
952 
953 	cppc_check_hisi_workaround();
954 	cppc_freq_invariance_init();
955 	populate_efficiency_class();
956 
957 	ret = cpufreq_register_driver(&cppc_cpufreq_driver);
958 	if (ret)
959 		cppc_freq_invariance_exit();
960 
961 	return ret;
962 }
963 
964 static inline void free_cpu_data(void)
965 {
966 	struct cppc_cpudata *iter, *tmp;
967 
968 	list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
969 		free_cpumask_var(iter->shared_cpu_map);
970 		list_del(&iter->node);
971 		kfree(iter);
972 	}
973 
974 }
975 
976 static void __exit cppc_cpufreq_exit(void)
977 {
978 	cpufreq_unregister_driver(&cppc_cpufreq_driver);
979 	cppc_freq_invariance_exit();
980 
981 	free_cpu_data();
982 }
983 
984 module_exit(cppc_cpufreq_exit);
985 MODULE_AUTHOR("Ashwin Chaugule");
986 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
987 MODULE_LICENSE("GPL");
988 
989 late_initcall(cppc_cpufreq_init);
990 
991 static const struct acpi_device_id cppc_acpi_ids[] __used = {
992 	{ACPI_PROCESSOR_DEVICE_HID, },
993 	{}
994 };
995 
996 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
997