xref: /openbmc/linux/arch/x86/kernel/cpu/umwait.c (revision e721eb06)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/syscore_ops.h>
3 #include <linux/suspend.h>
4 #include <linux/cpu.h>
5 
6 #include <asm/msr.h>
7 #include <asm/mwait.h>
8 
9 #define UMWAIT_C02_ENABLE	0
10 
11 #define UMWAIT_CTRL_VAL(max_time, c02_disable)				\
12 	(((max_time) & MSR_IA32_UMWAIT_CONTROL_TIME_MASK) |		\
13 	((c02_disable) & MSR_IA32_UMWAIT_CONTROL_C02_DISABLE))
14 
15 /*
16  * Cache IA32_UMWAIT_CONTROL MSR. This is a systemwide control. By default,
17  * umwait max time is 100000 in TSC-quanta and C0.2 is enabled
18  */
19 static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
20 
21 u32 get_umwait_control_msr(void)
22 {
23 	return umwait_control_cached;
24 }
25 EXPORT_SYMBOL_GPL(get_umwait_control_msr);
26 
27 /*
28  * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
29  * hardware or BIOS before kernel boot.
30  */
31 static u32 orig_umwait_control_cached __ro_after_init;
32 
33 /*
34  * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
35  * the sysfs write functions.
36  */
37 static DEFINE_MUTEX(umwait_lock);
38 
39 static void umwait_update_control_msr(void * unused)
40 {
41 	lockdep_assert_irqs_disabled();
42 	wrmsr(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached), 0);
43 }
44 
45 /*
46  * The CPU hotplug callback sets the control MSR to the global control
47  * value.
48  *
49  * Disable interrupts so the read of umwait_control_cached and the WRMSR
50  * are protected against a concurrent sysfs write. Otherwise the sysfs
51  * write could update the cached value after it had been read on this CPU
52  * and issue the IPI before the old value had been written. The IPI would
53  * interrupt, write the new value and after return from IPI the previous
54  * value would be written by this CPU.
55  *
56  * With interrupts disabled the upcoming CPU either sees the new control
57  * value or the IPI is updating this CPU to the new control value after
58  * interrupts have been reenabled.
59  */
60 static int umwait_cpu_online(unsigned int cpu)
61 {
62 	local_irq_disable();
63 	umwait_update_control_msr(NULL);
64 	local_irq_enable();
65 	return 0;
66 }
67 
68 /*
69  * The CPU hotplug callback sets the control MSR to the original control
70  * value.
71  */
72 static int umwait_cpu_offline(unsigned int cpu)
73 {
74 	/*
75 	 * This code is protected by the CPU hotplug already and
76 	 * orig_umwait_control_cached is never changed after it caches
77 	 * the original control MSR value in umwait_init(). So there
78 	 * is no race condition here.
79 	 */
80 	wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0);
81 
82 	return 0;
83 }
84 
85 /*
86  * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
87  * is the only active CPU at this time. The MSR is set up on the APs via the
88  * CPU hotplug callback.
89  *
90  * This function is invoked on resume from suspend and hibernation. On
91  * resume from suspend the restore should be not required, but we neither
92  * trust the firmware nor does it matter if the same value is written
93  * again.
94  */
95 static void umwait_syscore_resume(void)
96 {
97 	umwait_update_control_msr(NULL);
98 }
99 
100 static struct syscore_ops umwait_syscore_ops = {
101 	.resume	= umwait_syscore_resume,
102 };
103 
104 /* sysfs interface */
105 
106 /*
107  * When bit 0 in IA32_UMWAIT_CONTROL MSR is 1, C0.2 is disabled.
108  * Otherwise, C0.2 is enabled.
109  */
110 static inline bool umwait_ctrl_c02_enabled(u32 ctrl)
111 {
112 	return !(ctrl & MSR_IA32_UMWAIT_CONTROL_C02_DISABLE);
113 }
114 
115 static inline u32 umwait_ctrl_max_time(u32 ctrl)
116 {
117 	return ctrl & MSR_IA32_UMWAIT_CONTROL_TIME_MASK;
118 }
119 
120 static inline void umwait_update_control(u32 maxtime, bool c02_enable)
121 {
122 	u32 ctrl = maxtime & MSR_IA32_UMWAIT_CONTROL_TIME_MASK;
123 
124 	if (!c02_enable)
125 		ctrl |= MSR_IA32_UMWAIT_CONTROL_C02_DISABLE;
126 
127 	WRITE_ONCE(umwait_control_cached, ctrl);
128 	/* Propagate to all CPUs */
129 	on_each_cpu(umwait_update_control_msr, NULL, 1);
130 }
131 
132 static ssize_t
133 enable_c02_show(struct device *dev, struct device_attribute *attr, char *buf)
134 {
135 	u32 ctrl = READ_ONCE(umwait_control_cached);
136 
137 	return sprintf(buf, "%d\n", umwait_ctrl_c02_enabled(ctrl));
138 }
139 
140 static ssize_t enable_c02_store(struct device *dev,
141 				struct device_attribute *attr,
142 				const char *buf, size_t count)
143 {
144 	bool c02_enable;
145 	u32 ctrl;
146 	int ret;
147 
148 	ret = kstrtobool(buf, &c02_enable);
149 	if (ret)
150 		return ret;
151 
152 	mutex_lock(&umwait_lock);
153 
154 	ctrl = READ_ONCE(umwait_control_cached);
155 	if (c02_enable != umwait_ctrl_c02_enabled(ctrl))
156 		umwait_update_control(ctrl, c02_enable);
157 
158 	mutex_unlock(&umwait_lock);
159 
160 	return count;
161 }
162 static DEVICE_ATTR_RW(enable_c02);
163 
164 static ssize_t
165 max_time_show(struct device *kobj, struct device_attribute *attr, char *buf)
166 {
167 	u32 ctrl = READ_ONCE(umwait_control_cached);
168 
169 	return sprintf(buf, "%u\n", umwait_ctrl_max_time(ctrl));
170 }
171 
172 static ssize_t max_time_store(struct device *kobj,
173 			      struct device_attribute *attr,
174 			      const char *buf, size_t count)
175 {
176 	u32 max_time, ctrl;
177 	int ret;
178 
179 	ret = kstrtou32(buf, 0, &max_time);
180 	if (ret)
181 		return ret;
182 
183 	/* bits[1:0] must be zero */
184 	if (max_time & ~MSR_IA32_UMWAIT_CONTROL_TIME_MASK)
185 		return -EINVAL;
186 
187 	mutex_lock(&umwait_lock);
188 
189 	ctrl = READ_ONCE(umwait_control_cached);
190 	if (max_time != umwait_ctrl_max_time(ctrl))
191 		umwait_update_control(max_time, umwait_ctrl_c02_enabled(ctrl));
192 
193 	mutex_unlock(&umwait_lock);
194 
195 	return count;
196 }
197 static DEVICE_ATTR_RW(max_time);
198 
199 static struct attribute *umwait_attrs[] = {
200 	&dev_attr_enable_c02.attr,
201 	&dev_attr_max_time.attr,
202 	NULL
203 };
204 
205 static struct attribute_group umwait_attr_group = {
206 	.attrs = umwait_attrs,
207 	.name = "umwait_control",
208 };
209 
210 static int __init umwait_init(void)
211 {
212 	struct device *dev;
213 	int ret;
214 
215 	if (!boot_cpu_has(X86_FEATURE_WAITPKG))
216 		return -ENODEV;
217 
218 	/*
219 	 * Cache the original control MSR value before the control MSR is
220 	 * changed. This is the only place where orig_umwait_control_cached
221 	 * is modified.
222 	 */
223 	rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
224 
225 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
226 				umwait_cpu_online, umwait_cpu_offline);
227 	if (ret < 0) {
228 		/*
229 		 * On failure, the control MSR on all CPUs has the
230 		 * original control value.
231 		 */
232 		return ret;
233 	}
234 
235 	register_syscore_ops(&umwait_syscore_ops);
236 
237 	/*
238 	 * Add umwait control interface. Ignore failure, so at least the
239 	 * default values are set up in case the machine manages to boot.
240 	 */
241 	dev = cpu_subsys.dev_root;
242 	return sysfs_create_group(&dev->kobj, &umwait_attr_group);
243 }
244 device_initcall(umwait_init);
245