xref: /openbmc/linux/drivers/platform/x86/intel/turbo_max_3.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1*1fef1c04SKate Hsuan // SPDX-License-Identifier: GPL-2.0
2*1fef1c04SKate Hsuan /*
3*1fef1c04SKate Hsuan  * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
4*1fef1c04SKate Hsuan  * Copyright (c) 2017, Intel Corporation.
5*1fef1c04SKate Hsuan  * All rights reserved.
6*1fef1c04SKate Hsuan  *
7*1fef1c04SKate Hsuan  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8*1fef1c04SKate Hsuan  */
9*1fef1c04SKate Hsuan #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10*1fef1c04SKate Hsuan 
11*1fef1c04SKate Hsuan #include <linux/cpufeature.h>
12*1fef1c04SKate Hsuan #include <linux/cpuhotplug.h>
13*1fef1c04SKate Hsuan #include <linux/init.h>
14*1fef1c04SKate Hsuan #include <linux/kernel.h>
15*1fef1c04SKate Hsuan #include <linux/topology.h>
16*1fef1c04SKate Hsuan #include <linux/workqueue.h>
17*1fef1c04SKate Hsuan 
18*1fef1c04SKate Hsuan #include <asm/cpu_device_id.h>
19*1fef1c04SKate Hsuan #include <asm/intel-family.h>
20*1fef1c04SKate Hsuan 
21*1fef1c04SKate Hsuan #define MSR_OC_MAILBOX			0x150
22*1fef1c04SKate Hsuan #define MSR_OC_MAILBOX_CMD_OFFSET	32
23*1fef1c04SKate Hsuan #define MSR_OC_MAILBOX_RSP_OFFSET	32
24*1fef1c04SKate Hsuan #define MSR_OC_MAILBOX_BUSY_BIT		63
25*1fef1c04SKate Hsuan #define OC_MAILBOX_FC_CONTROL_CMD	0x1C
26*1fef1c04SKate Hsuan 
27*1fef1c04SKate Hsuan /*
28*1fef1c04SKate Hsuan  * Typical latency to get mail box response is ~3us, It takes +3 us to
29*1fef1c04SKate Hsuan  * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
30*1fef1c04SKate Hsuan  * system. So for most of the time, the first mailbox read should have the
31*1fef1c04SKate Hsuan  * response, but to avoid some boundary cases retry twice.
32*1fef1c04SKate Hsuan  */
33*1fef1c04SKate Hsuan #define OC_MAILBOX_RETRY_COUNT		2
34*1fef1c04SKate Hsuan 
get_oc_core_priority(unsigned int cpu)35*1fef1c04SKate Hsuan static int get_oc_core_priority(unsigned int cpu)
36*1fef1c04SKate Hsuan {
37*1fef1c04SKate Hsuan 	u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
38*1fef1c04SKate Hsuan 	int ret, i;
39*1fef1c04SKate Hsuan 
40*1fef1c04SKate Hsuan 	/* Issue favored core read command */
41*1fef1c04SKate Hsuan 	value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
42*1fef1c04SKate Hsuan 	/* Set the busy bit to indicate OS is trying to issue command */
43*1fef1c04SKate Hsuan 	value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
44*1fef1c04SKate Hsuan 	ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
45*1fef1c04SKate Hsuan 	if (ret) {
46*1fef1c04SKate Hsuan 		pr_debug("cpu %d OC mailbox write failed\n", cpu);
47*1fef1c04SKate Hsuan 		return ret;
48*1fef1c04SKate Hsuan 	}
49*1fef1c04SKate Hsuan 
50*1fef1c04SKate Hsuan 	for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
51*1fef1c04SKate Hsuan 		ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
52*1fef1c04SKate Hsuan 		if (ret) {
53*1fef1c04SKate Hsuan 			pr_debug("cpu %d OC mailbox read failed\n", cpu);
54*1fef1c04SKate Hsuan 			break;
55*1fef1c04SKate Hsuan 		}
56*1fef1c04SKate Hsuan 
57*1fef1c04SKate Hsuan 		if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
58*1fef1c04SKate Hsuan 			pr_debug("cpu %d OC mailbox still processing\n", cpu);
59*1fef1c04SKate Hsuan 			ret = -EBUSY;
60*1fef1c04SKate Hsuan 			continue;
61*1fef1c04SKate Hsuan 		}
62*1fef1c04SKate Hsuan 
63*1fef1c04SKate Hsuan 		if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
64*1fef1c04SKate Hsuan 			pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
65*1fef1c04SKate Hsuan 			ret = -ENXIO;
66*1fef1c04SKate Hsuan 			break;
67*1fef1c04SKate Hsuan 		}
68*1fef1c04SKate Hsuan 
69*1fef1c04SKate Hsuan 		ret = value & 0xff;
70*1fef1c04SKate Hsuan 		pr_debug("cpu %d max_ratio %d\n", cpu, ret);
71*1fef1c04SKate Hsuan 		break;
72*1fef1c04SKate Hsuan 	}
73*1fef1c04SKate Hsuan 
74*1fef1c04SKate Hsuan 	return ret;
75*1fef1c04SKate Hsuan }
76*1fef1c04SKate Hsuan 
77*1fef1c04SKate Hsuan /*
78*1fef1c04SKate Hsuan  * The work item is needed to avoid CPU hotplug locking issues. The function
79*1fef1c04SKate Hsuan  * itmt_legacy_set_priority() is called from CPU online callback, so can't
80*1fef1c04SKate Hsuan  * call sched_set_itmt_support() from there as this function will aquire
81*1fef1c04SKate Hsuan  * hotplug locks in its path.
82*1fef1c04SKate Hsuan  */
itmt_legacy_work_fn(struct work_struct * work)83*1fef1c04SKate Hsuan static void itmt_legacy_work_fn(struct work_struct *work)
84*1fef1c04SKate Hsuan {
85*1fef1c04SKate Hsuan 	sched_set_itmt_support();
86*1fef1c04SKate Hsuan }
87*1fef1c04SKate Hsuan 
88*1fef1c04SKate Hsuan static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
89*1fef1c04SKate Hsuan 
itmt_legacy_cpu_online(unsigned int cpu)90*1fef1c04SKate Hsuan static int itmt_legacy_cpu_online(unsigned int cpu)
91*1fef1c04SKate Hsuan {
92*1fef1c04SKate Hsuan 	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
93*1fef1c04SKate Hsuan 	int priority;
94*1fef1c04SKate Hsuan 
95*1fef1c04SKate Hsuan 	priority = get_oc_core_priority(cpu);
96*1fef1c04SKate Hsuan 	if (priority < 0)
97*1fef1c04SKate Hsuan 		return 0;
98*1fef1c04SKate Hsuan 
99*1fef1c04SKate Hsuan 	sched_set_itmt_core_prio(priority, cpu);
100*1fef1c04SKate Hsuan 
101*1fef1c04SKate Hsuan 	/* Enable ITMT feature when a core with different priority is found */
102*1fef1c04SKate Hsuan 	if (max_highest_perf <= min_highest_perf) {
103*1fef1c04SKate Hsuan 		if (priority > max_highest_perf)
104*1fef1c04SKate Hsuan 			max_highest_perf = priority;
105*1fef1c04SKate Hsuan 
106*1fef1c04SKate Hsuan 		if (priority < min_highest_perf)
107*1fef1c04SKate Hsuan 			min_highest_perf = priority;
108*1fef1c04SKate Hsuan 
109*1fef1c04SKate Hsuan 		if (max_highest_perf > min_highest_perf)
110*1fef1c04SKate Hsuan 			schedule_work(&sched_itmt_work);
111*1fef1c04SKate Hsuan 	}
112*1fef1c04SKate Hsuan 
113*1fef1c04SKate Hsuan 	return 0;
114*1fef1c04SKate Hsuan }
115*1fef1c04SKate Hsuan 
116*1fef1c04SKate Hsuan static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
117*1fef1c04SKate Hsuan 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,	NULL),
118*1fef1c04SKate Hsuan 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,	NULL),
119*1fef1c04SKate Hsuan 	{}
120*1fef1c04SKate Hsuan };
121*1fef1c04SKate Hsuan 
itmt_legacy_init(void)122*1fef1c04SKate Hsuan static int __init itmt_legacy_init(void)
123*1fef1c04SKate Hsuan {
124*1fef1c04SKate Hsuan 	const struct x86_cpu_id *id;
125*1fef1c04SKate Hsuan 	int ret;
126*1fef1c04SKate Hsuan 
127*1fef1c04SKate Hsuan 	id = x86_match_cpu(itmt_legacy_cpu_ids);
128*1fef1c04SKate Hsuan 	if (!id)
129*1fef1c04SKate Hsuan 		return -ENODEV;
130*1fef1c04SKate Hsuan 
131*1fef1c04SKate Hsuan 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
132*1fef1c04SKate Hsuan 				"platform/x86/turbo_max_3:online",
133*1fef1c04SKate Hsuan 				itmt_legacy_cpu_online,	NULL);
134*1fef1c04SKate Hsuan 	if (ret < 0)
135*1fef1c04SKate Hsuan 		return ret;
136*1fef1c04SKate Hsuan 
137*1fef1c04SKate Hsuan 	return 0;
138*1fef1c04SKate Hsuan }
139*1fef1c04SKate Hsuan late_initcall(itmt_legacy_init)
140