1 /*
2  * From Coreboot file of same name
3  *
4  * Copyright (C) 2007-2009 coresystems GmbH
5  * Copyright (C) 2011 The Chromium Authors
6  *
7  * SPDX-License-Identifier:	GPL-2.0
8  */
9 
10 #include <common.h>
11 #include <cpu.h>
12 #include <dm.h>
13 #include <fdtdec.h>
14 #include <malloc.h>
15 #include <asm/cpu.h>
16 #include <asm/cpu_x86.h>
17 #include <asm/msr.h>
18 #include <asm/msr-index.h>
19 #include <asm/mtrr.h>
20 #include <asm/processor.h>
21 #include <asm/speedstep.h>
22 #include <asm/turbo.h>
23 #include <asm/arch/model_206ax.h>
24 
25 DECLARE_GLOBAL_DATA_PTR;
26 
27 static void enable_vmx(void)
28 {
29 	struct cpuid_result regs;
30 #ifdef CONFIG_ENABLE_VMX
31 	int enable = true;
32 #else
33 	int enable = false;
34 #endif
35 	msr_t msr;
36 
37 	regs = cpuid(1);
38 	/* Check that the VMX is supported before reading or writing the MSR. */
39 	if (!((regs.ecx & CPUID_VMX) || (regs.ecx & CPUID_SMX)))
40 		return;
41 
42 	msr = msr_read(MSR_IA32_FEATURE_CONTROL);
43 
44 	if (msr.lo & (1 << 0)) {
45 		debug("VMX is locked, so %s will do nothing\n", __func__);
46 		/* VMX locked. If we set it again we get an illegal
47 		 * instruction
48 		 */
49 		return;
50 	}
51 
52 	/* The IA32_FEATURE_CONTROL MSR may initialize with random values.
53 	 * It must be cleared regardless of VMX config setting.
54 	 */
55 	msr.hi = 0;
56 	msr.lo = 0;
57 
58 	debug("%s VMX\n", enable ? "Enabling" : "Disabling");
59 
60 	/*
61 	 * Even though the Intel manual says you must set the lock bit in
62 	 * addition to the VMX bit in order for VMX to work, it is incorrect.
63 	 * Thus we leave it unlocked for the OS to manage things itself.
64 	 * This is good for a few reasons:
65 	 * - No need to reflash the bios just to toggle the lock bit.
66 	 * - The VMX bits really really should match each other across cores,
67 	 *   so hard locking it on one while another has the opposite setting
68 	 *   can easily lead to crashes as code using VMX migrates between
69 	 *   them.
70 	 * - Vendors that want to "upsell" from a bios that disables+locks to
71 	 *   one that doesn't is sleazy.
72 	 * By leaving this to the OS (e.g. Linux), people can do exactly what
73 	 * they want on the fly, and do it correctly (e.g. across multiple
74 	 * cores).
75 	 */
76 	if (enable) {
77 		msr.lo |= (1 << 2);
78 		if (regs.ecx & CPUID_SMX)
79 			msr.lo |= (1 << 1);
80 	}
81 
82 	msr_write(MSR_IA32_FEATURE_CONTROL, msr);
83 }
84 
85 /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
86 static const u8 power_limit_time_sec_to_msr[] = {
87 	[0]   = 0x00,
88 	[1]   = 0x0a,
89 	[2]   = 0x0b,
90 	[3]   = 0x4b,
91 	[4]   = 0x0c,
92 	[5]   = 0x2c,
93 	[6]   = 0x4c,
94 	[7]   = 0x6c,
95 	[8]   = 0x0d,
96 	[10]  = 0x2d,
97 	[12]  = 0x4d,
98 	[14]  = 0x6d,
99 	[16]  = 0x0e,
100 	[20]  = 0x2e,
101 	[24]  = 0x4e,
102 	[28]  = 0x6e,
103 	[32]  = 0x0f,
104 	[40]  = 0x2f,
105 	[48]  = 0x4f,
106 	[56]  = 0x6f,
107 	[64]  = 0x10,
108 	[80]  = 0x30,
109 	[96]  = 0x50,
110 	[112] = 0x70,
111 	[128] = 0x11,
112 };
113 
114 /* Convert POWER_LIMIT_1_TIME MSR value to seconds */
115 static const u8 power_limit_time_msr_to_sec[] = {
116 	[0x00] = 0,
117 	[0x0a] = 1,
118 	[0x0b] = 2,
119 	[0x4b] = 3,
120 	[0x0c] = 4,
121 	[0x2c] = 5,
122 	[0x4c] = 6,
123 	[0x6c] = 7,
124 	[0x0d] = 8,
125 	[0x2d] = 10,
126 	[0x4d] = 12,
127 	[0x6d] = 14,
128 	[0x0e] = 16,
129 	[0x2e] = 20,
130 	[0x4e] = 24,
131 	[0x6e] = 28,
132 	[0x0f] = 32,
133 	[0x2f] = 40,
134 	[0x4f] = 48,
135 	[0x6f] = 56,
136 	[0x10] = 64,
137 	[0x30] = 80,
138 	[0x50] = 96,
139 	[0x70] = 112,
140 	[0x11] = 128,
141 };
142 
143 int cpu_config_tdp_levels(void)
144 {
145 	struct cpuid_result result;
146 	msr_t platform_info;
147 
148 	/* Minimum CPU revision */
149 	result = cpuid(1);
150 	if (result.eax < IVB_CONFIG_TDP_MIN_CPUID)
151 		return 0;
152 
153 	/* Bits 34:33 indicate how many levels supported */
154 	platform_info = msr_read(MSR_PLATFORM_INFO);
155 	return (platform_info.hi >> 1) & 3;
156 }
157 
158 /*
159  * Configure processor power limits if possible
160  * This must be done AFTER set of BIOS_RESET_CPL
161  */
162 void set_power_limits(u8 power_limit_1_time)
163 {
164 	msr_t msr = msr_read(MSR_PLATFORM_INFO);
165 	msr_t limit;
166 	unsigned power_unit;
167 	unsigned tdp, min_power, max_power, max_time;
168 	u8 power_limit_1_val;
169 
170 	if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr))
171 		return;
172 
173 	if (!(msr.lo & PLATFORM_INFO_SET_TDP))
174 		return;
175 
176 	/* Get units */
177 	msr = msr_read(MSR_PKG_POWER_SKU_UNIT);
178 	power_unit = 2 << ((msr.lo & 0xf) - 1);
179 
180 	/* Get power defaults for this SKU */
181 	msr = msr_read(MSR_PKG_POWER_SKU);
182 	tdp = msr.lo & 0x7fff;
183 	min_power = (msr.lo >> 16) & 0x7fff;
184 	max_power = msr.hi & 0x7fff;
185 	max_time = (msr.hi >> 16) & 0x7f;
186 
187 	debug("CPU TDP: %u Watts\n", tdp / power_unit);
188 
189 	if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
190 		power_limit_1_time = power_limit_time_msr_to_sec[max_time];
191 
192 	if (min_power > 0 && tdp < min_power)
193 		tdp = min_power;
194 
195 	if (max_power > 0 && tdp > max_power)
196 		tdp = max_power;
197 
198 	power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
199 
200 	/* Set long term power limit to TDP */
201 	limit.lo = 0;
202 	limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
203 	limit.lo |= PKG_POWER_LIMIT_EN;
204 	limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
205 		PKG_POWER_LIMIT_TIME_SHIFT;
206 
207 	/* Set short term power limit to 1.25 * TDP */
208 	limit.hi = 0;
209 	limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
210 	limit.hi |= PKG_POWER_LIMIT_EN;
211 	/* Power limit 2 time is only programmable on SNB EP/EX */
212 
213 	msr_write(MSR_PKG_POWER_LIMIT, limit);
214 
215 	/* Use nominal TDP values for CPUs with configurable TDP */
216 	if (cpu_config_tdp_levels()) {
217 		msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
218 		limit.hi = 0;
219 		limit.lo = msr.lo & 0xff;
220 		msr_write(MSR_TURBO_ACTIVATION_RATIO, limit);
221 	}
222 }
223 
224 static void configure_c_states(void)
225 {
226 	struct cpuid_result result;
227 	msr_t msr;
228 
229 	msr = msr_read(MSR_PMG_CST_CONFIG_CTL);
230 	msr.lo |= (1 << 28);	/* C1 Auto Undemotion Enable */
231 	msr.lo |= (1 << 27);	/* C3 Auto Undemotion Enable */
232 	msr.lo |= (1 << 26);	/* C1 Auto Demotion Enable */
233 	msr.lo |= (1 << 25);	/* C3 Auto Demotion Enable */
234 	msr.lo &= ~(1 << 10);	/* Disable IO MWAIT redirection */
235 	msr.lo |= 7;		/* No package C-state limit */
236 	msr_write(MSR_PMG_CST_CONFIG_CTL, msr);
237 
238 	msr = msr_read(MSR_PMG_IO_CAPTURE_ADR);
239 	msr.lo &= ~0x7ffff;
240 	msr.lo |= (PMB0_BASE + 4);	/* LVL_2 base address */
241 	msr.lo |= (2 << 16);		/* CST Range: C7 is max C-state */
242 	msr_write(MSR_PMG_IO_CAPTURE_ADR, msr);
243 
244 	msr = msr_read(MSR_MISC_PWR_MGMT);
245 	msr.lo &= ~(1 << 0);	/* Enable P-state HW_ALL coordination */
246 	msr_write(MSR_MISC_PWR_MGMT, msr);
247 
248 	msr = msr_read(MSR_POWER_CTL);
249 	msr.lo |= (1 << 18);	/* Enable Energy Perf Bias MSR 0x1b0 */
250 	msr.lo |= (1 << 1);	/* C1E Enable */
251 	msr.lo |= (1 << 0);	/* Bi-directional PROCHOT# */
252 	msr_write(MSR_POWER_CTL, msr);
253 
254 	/* C3 Interrupt Response Time Limit */
255 	msr.hi = 0;
256 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x50;
257 	msr_write(MSR_PKGC3_IRTL, msr);
258 
259 	/* C6 Interrupt Response Time Limit */
260 	msr.hi = 0;
261 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x68;
262 	msr_write(MSR_PKGC6_IRTL, msr);
263 
264 	/* C7 Interrupt Response Time Limit */
265 	msr.hi = 0;
266 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x6D;
267 	msr_write(MSR_PKGC7_IRTL, msr);
268 
269 	/* Primary Plane Current Limit */
270 	msr = msr_read(MSR_PP0_CURRENT_CONFIG);
271 	msr.lo &= ~0x1fff;
272 	msr.lo |= PP0_CURRENT_LIMIT;
273 	msr_write(MSR_PP0_CURRENT_CONFIG, msr);
274 
275 	/* Secondary Plane Current Limit */
276 	msr = msr_read(MSR_PP1_CURRENT_CONFIG);
277 	msr.lo &= ~0x1fff;
278 	result = cpuid(1);
279 	if (result.eax >= 0x30600)
280 		msr.lo |= PP1_CURRENT_LIMIT_IVB;
281 	else
282 		msr.lo |= PP1_CURRENT_LIMIT_SNB;
283 	msr_write(MSR_PP1_CURRENT_CONFIG, msr);
284 }
285 
286 static int configure_thermal_target(struct udevice *dev)
287 {
288 	int tcc_offset;
289 	msr_t msr;
290 
291 	tcc_offset = fdtdec_get_int(gd->fdt_blob, dev->of_offset, "tcc-offset",
292 				    0);
293 
294 	/* Set TCC activaiton offset if supported */
295 	msr = msr_read(MSR_PLATFORM_INFO);
296 	if ((msr.lo & (1 << 30)) && tcc_offset) {
297 		msr = msr_read(MSR_TEMPERATURE_TARGET);
298 		msr.lo &= ~(0xf << 24); /* Bits 27:24 */
299 		msr.lo |= (tcc_offset & 0xf) << 24;
300 		msr_write(MSR_TEMPERATURE_TARGET, msr);
301 	}
302 
303 	return 0;
304 }
305 
306 static void configure_misc(void)
307 {
308 	msr_t msr;
309 
310 	msr = msr_read(IA32_MISC_ENABLE);
311 	msr.lo |= (1 << 0);	  /* Fast String enable */
312 	msr.lo |= (1 << 3);	  /* TM1/TM2/EMTTM enable */
313 	msr.lo |= (1 << 16);	  /* Enhanced SpeedStep Enable */
314 	msr_write(IA32_MISC_ENABLE, msr);
315 
316 	/* Disable Thermal interrupts */
317 	msr.lo = 0;
318 	msr.hi = 0;
319 	msr_write(IA32_THERM_INTERRUPT, msr);
320 
321 	/* Enable package critical interrupt only */
322 	msr.lo = 1 << 4;
323 	msr.hi = 0;
324 	msr_write(IA32_PACKAGE_THERM_INTERRUPT, msr);
325 }
326 
327 static void enable_lapic_tpr(void)
328 {
329 	msr_t msr;
330 
331 	msr = msr_read(MSR_PIC_MSG_CONTROL);
332 	msr.lo &= ~(1 << 10);	/* Enable APIC TPR updates */
333 	msr_write(MSR_PIC_MSG_CONTROL, msr);
334 }
335 
336 static void configure_dca_cap(void)
337 {
338 	struct cpuid_result cpuid_regs;
339 	msr_t msr;
340 
341 	/* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
342 	cpuid_regs = cpuid(1);
343 	if (cpuid_regs.ecx & (1 << 18)) {
344 		msr = msr_read(IA32_PLATFORM_DCA_CAP);
345 		msr.lo |= 1;
346 		msr_write(IA32_PLATFORM_DCA_CAP, msr);
347 	}
348 }
349 
350 static void set_max_ratio(void)
351 {
352 	msr_t msr, perf_ctl;
353 
354 	perf_ctl.hi = 0;
355 
356 	/* Check for configurable TDP option */
357 	if (cpu_config_tdp_levels()) {
358 		/* Set to nominal TDP ratio */
359 		msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
360 		perf_ctl.lo = (msr.lo & 0xff) << 8;
361 	} else {
362 		/* Platform Info bits 15:8 give max ratio */
363 		msr = msr_read(MSR_PLATFORM_INFO);
364 		perf_ctl.lo = msr.lo & 0xff00;
365 	}
366 	msr_write(MSR_IA32_PERF_CTL, perf_ctl);
367 
368 	debug("model_x06ax: frequency set to %d\n",
369 	      ((perf_ctl.lo >> 8) & 0xff) * SANDYBRIDGE_BCLK);
370 }
371 
372 static void set_energy_perf_bias(u8 policy)
373 {
374 	msr_t msr;
375 
376 	/* Energy Policy is bits 3:0 */
377 	msr = msr_read(IA32_ENERGY_PERFORMANCE_BIAS);
378 	msr.lo &= ~0xf;
379 	msr.lo |= policy & 0xf;
380 	msr_write(IA32_ENERGY_PERFORMANCE_BIAS, msr);
381 
382 	debug("model_x06ax: energy policy set to %u\n", policy);
383 }
384 
385 static void configure_mca(void)
386 {
387 	msr_t msr;
388 	int i;
389 
390 	msr.lo = 0;
391 	msr.hi = 0;
392 	/* This should only be done on a cold boot */
393 	for (i = 0; i < 7; i++)
394 		msr_write(IA32_MC0_STATUS + (i * 4), msr);
395 }
396 
397 #if CONFIG_USBDEBUG
398 static unsigned ehci_debug_addr;
399 #endif
400 
401 static int model_206ax_init(struct udevice *dev)
402 {
403 	int ret;
404 
405 	/* Clear out pending MCEs */
406 	configure_mca();
407 
408 #if CONFIG_USBDEBUG
409 	/* Is this caution really needed? */
410 	if (!ehci_debug_addr)
411 		ehci_debug_addr = get_ehci_debug();
412 	set_ehci_debug(0);
413 #endif
414 
415 #if CONFIG_USBDEBUG
416 	set_ehci_debug(ehci_debug_addr);
417 #endif
418 
419 	/* Enable the local cpu apics */
420 	enable_lapic_tpr();
421 
422 	/* Enable virtualization if enabled in CMOS */
423 	enable_vmx();
424 
425 	/* Configure C States */
426 	configure_c_states();
427 
428 	/* Configure Enhanced SpeedStep and Thermal Sensors */
429 	configure_misc();
430 
431 	/* Thermal throttle activation offset */
432 	ret = configure_thermal_target(dev);
433 	if (ret) {
434 		debug("Cannot set thermal target\n");
435 		return ret;
436 	}
437 
438 	/* Enable Direct Cache Access */
439 	configure_dca_cap();
440 
441 	/* Set energy policy */
442 	set_energy_perf_bias(ENERGY_POLICY_NORMAL);
443 
444 	/* Set Max Ratio */
445 	set_max_ratio();
446 
447 	/* Enable Turbo */
448 	turbo_enable();
449 
450 	return 0;
451 }
452 
453 static int model_206ax_get_info(struct udevice *dev, struct cpu_info *info)
454 {
455 	msr_t msr;
456 
457 	msr = msr_read(MSR_IA32_PERF_CTL);
458 	info->cpu_freq = ((msr.lo >> 8) & 0xff) * SANDYBRIDGE_BCLK * 1000000;
459 	info->features = 1 << CPU_FEAT_L1_CACHE | 1 << CPU_FEAT_MMU |
460 		1 << CPU_FEAT_UCODE;
461 
462 	return 0;
463 }
464 
465 static int model_206ax_get_count(struct udevice *dev)
466 {
467 	return 4;
468 }
469 
470 static int cpu_x86_model_206ax_probe(struct udevice *dev)
471 {
472 	if (dev->seq == 0)
473 		model_206ax_init(dev);
474 
475 	return 0;
476 }
477 
478 static const struct cpu_ops cpu_x86_model_206ax_ops = {
479 	.get_desc	= cpu_x86_get_desc,
480 	.get_info	= model_206ax_get_info,
481 	.get_count	= model_206ax_get_count,
482 	.get_vendor	= cpu_x86_get_vendor,
483 };
484 
485 static const struct udevice_id cpu_x86_model_206ax_ids[] = {
486 	{ .compatible = "intel,core-gen3" },
487 	{ }
488 };
489 
490 U_BOOT_DRIVER(cpu_x86_model_206ax_drv) = {
491 	.name		= "cpu_x86_model_206ax",
492 	.id		= UCLASS_CPU,
493 	.of_match	= cpu_x86_model_206ax_ids,
494 	.bind		= cpu_x86_bind,
495 	.probe		= cpu_x86_model_206ax_probe,
496 	.ops		= &cpu_x86_model_206ax_ops,
497 };
498