1 /*
2  * From Coreboot file of same name
3  *
4  * Copyright (C) 2007-2009 coresystems GmbH
5  * Copyright (C) 2011 The Chromium Authors
6  *
7  * SPDX-License-Identifier:	GPL-2.0
8  */
9 
10 #include <common.h>
11 #include <cpu.h>
12 #include <dm.h>
13 #include <fdtdec.h>
14 #include <malloc.h>
15 #include <asm/cpu.h>
16 #include <asm/cpu_x86.h>
17 #include <asm/msr.h>
18 #include <asm/msr-index.h>
19 #include <asm/mtrr.h>
20 #include <asm/processor.h>
21 #include <asm/speedstep.h>
22 #include <asm/turbo.h>
23 #include <asm/arch/bd82x6x.h>
24 #include <asm/arch/model_206ax.h>
25 
26 static void enable_vmx(void)
27 {
28 	struct cpuid_result regs;
29 #ifdef CONFIG_ENABLE_VMX
30 	int enable = true;
31 #else
32 	int enable = false;
33 #endif
34 	msr_t msr;
35 
36 	regs = cpuid(1);
37 	/* Check that the VMX is supported before reading or writing the MSR. */
38 	if (!((regs.ecx & CPUID_VMX) || (regs.ecx & CPUID_SMX)))
39 		return;
40 
41 	msr = msr_read(MSR_IA32_FEATURE_CONTROL);
42 
43 	if (msr.lo & (1 << 0)) {
44 		debug("VMX is locked, so %s will do nothing\n", __func__);
45 		/* VMX locked. If we set it again we get an illegal
46 		 * instruction
47 		 */
48 		return;
49 	}
50 
51 	/* The IA32_FEATURE_CONTROL MSR may initialize with random values.
52 	 * It must be cleared regardless of VMX config setting.
53 	 */
54 	msr.hi = 0;
55 	msr.lo = 0;
56 
57 	debug("%s VMX\n", enable ? "Enabling" : "Disabling");
58 
59 	/*
60 	 * Even though the Intel manual says you must set the lock bit in
61 	 * addition to the VMX bit in order for VMX to work, it is incorrect.
62 	 * Thus we leave it unlocked for the OS to manage things itself.
63 	 * This is good for a few reasons:
64 	 * - No need to reflash the bios just to toggle the lock bit.
65 	 * - The VMX bits really really should match each other across cores,
66 	 *   so hard locking it on one while another has the opposite setting
67 	 *   can easily lead to crashes as code using VMX migrates between
68 	 *   them.
69 	 * - Vendors that want to "upsell" from a bios that disables+locks to
70 	 *   one that doesn't is sleazy.
71 	 * By leaving this to the OS (e.g. Linux), people can do exactly what
72 	 * they want on the fly, and do it correctly (e.g. across multiple
73 	 * cores).
74 	 */
75 	if (enable) {
76 		msr.lo |= (1 << 2);
77 		if (regs.ecx & CPUID_SMX)
78 			msr.lo |= (1 << 1);
79 	}
80 
81 	msr_write(MSR_IA32_FEATURE_CONTROL, msr);
82 }
83 
84 /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
85 static const u8 power_limit_time_sec_to_msr[] = {
86 	[0]   = 0x00,
87 	[1]   = 0x0a,
88 	[2]   = 0x0b,
89 	[3]   = 0x4b,
90 	[4]   = 0x0c,
91 	[5]   = 0x2c,
92 	[6]   = 0x4c,
93 	[7]   = 0x6c,
94 	[8]   = 0x0d,
95 	[10]  = 0x2d,
96 	[12]  = 0x4d,
97 	[14]  = 0x6d,
98 	[16]  = 0x0e,
99 	[20]  = 0x2e,
100 	[24]  = 0x4e,
101 	[28]  = 0x6e,
102 	[32]  = 0x0f,
103 	[40]  = 0x2f,
104 	[48]  = 0x4f,
105 	[56]  = 0x6f,
106 	[64]  = 0x10,
107 	[80]  = 0x30,
108 	[96]  = 0x50,
109 	[112] = 0x70,
110 	[128] = 0x11,
111 };
112 
113 /* Convert POWER_LIMIT_1_TIME MSR value to seconds */
114 static const u8 power_limit_time_msr_to_sec[] = {
115 	[0x00] = 0,
116 	[0x0a] = 1,
117 	[0x0b] = 2,
118 	[0x4b] = 3,
119 	[0x0c] = 4,
120 	[0x2c] = 5,
121 	[0x4c] = 6,
122 	[0x6c] = 7,
123 	[0x0d] = 8,
124 	[0x2d] = 10,
125 	[0x4d] = 12,
126 	[0x6d] = 14,
127 	[0x0e] = 16,
128 	[0x2e] = 20,
129 	[0x4e] = 24,
130 	[0x6e] = 28,
131 	[0x0f] = 32,
132 	[0x2f] = 40,
133 	[0x4f] = 48,
134 	[0x6f] = 56,
135 	[0x10] = 64,
136 	[0x30] = 80,
137 	[0x50] = 96,
138 	[0x70] = 112,
139 	[0x11] = 128,
140 };
141 
142 int cpu_config_tdp_levels(void)
143 {
144 	struct cpuid_result result;
145 	msr_t platform_info;
146 
147 	/* Minimum CPU revision */
148 	result = cpuid(1);
149 	if (result.eax < IVB_CONFIG_TDP_MIN_CPUID)
150 		return 0;
151 
152 	/* Bits 34:33 indicate how many levels supported */
153 	platform_info = msr_read(MSR_PLATFORM_INFO);
154 	return (platform_info.hi >> 1) & 3;
155 }
156 
157 /*
158  * Configure processor power limits if possible
159  * This must be done AFTER set of BIOS_RESET_CPL
160  */
161 void set_power_limits(u8 power_limit_1_time)
162 {
163 	msr_t msr = msr_read(MSR_PLATFORM_INFO);
164 	msr_t limit;
165 	unsigned power_unit;
166 	unsigned tdp, min_power, max_power, max_time;
167 	u8 power_limit_1_val;
168 
169 	if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr))
170 		return;
171 
172 	if (!(msr.lo & PLATFORM_INFO_SET_TDP))
173 		return;
174 
175 	/* Get units */
176 	msr = msr_read(MSR_PKG_POWER_SKU_UNIT);
177 	power_unit = 2 << ((msr.lo & 0xf) - 1);
178 
179 	/* Get power defaults for this SKU */
180 	msr = msr_read(MSR_PKG_POWER_SKU);
181 	tdp = msr.lo & 0x7fff;
182 	min_power = (msr.lo >> 16) & 0x7fff;
183 	max_power = msr.hi & 0x7fff;
184 	max_time = (msr.hi >> 16) & 0x7f;
185 
186 	debug("CPU TDP: %u Watts\n", tdp / power_unit);
187 
188 	if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
189 		power_limit_1_time = power_limit_time_msr_to_sec[max_time];
190 
191 	if (min_power > 0 && tdp < min_power)
192 		tdp = min_power;
193 
194 	if (max_power > 0 && tdp > max_power)
195 		tdp = max_power;
196 
197 	power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
198 
199 	/* Set long term power limit to TDP */
200 	limit.lo = 0;
201 	limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
202 	limit.lo |= PKG_POWER_LIMIT_EN;
203 	limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
204 		PKG_POWER_LIMIT_TIME_SHIFT;
205 
206 	/* Set short term power limit to 1.25 * TDP */
207 	limit.hi = 0;
208 	limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
209 	limit.hi |= PKG_POWER_LIMIT_EN;
210 	/* Power limit 2 time is only programmable on SNB EP/EX */
211 
212 	msr_write(MSR_PKG_POWER_LIMIT, limit);
213 
214 	/* Use nominal TDP values for CPUs with configurable TDP */
215 	if (cpu_config_tdp_levels()) {
216 		msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
217 		limit.hi = 0;
218 		limit.lo = msr.lo & 0xff;
219 		msr_write(MSR_TURBO_ACTIVATION_RATIO, limit);
220 	}
221 }
222 
223 static void configure_c_states(void)
224 {
225 	struct cpuid_result result;
226 	msr_t msr;
227 
228 	msr = msr_read(MSR_PMG_CST_CONFIG_CTL);
229 	msr.lo |= (1 << 28);	/* C1 Auto Undemotion Enable */
230 	msr.lo |= (1 << 27);	/* C3 Auto Undemotion Enable */
231 	msr.lo |= (1 << 26);	/* C1 Auto Demotion Enable */
232 	msr.lo |= (1 << 25);	/* C3 Auto Demotion Enable */
233 	msr.lo &= ~(1 << 10);	/* Disable IO MWAIT redirection */
234 	msr.lo |= 7;		/* No package C-state limit */
235 	msr_write(MSR_PMG_CST_CONFIG_CTL, msr);
236 
237 	msr = msr_read(MSR_PMG_IO_CAPTURE_ADR);
238 	msr.lo &= ~0x7ffff;
239 	msr.lo |= (PMB0_BASE + 4);	/* LVL_2 base address */
240 	msr.lo |= (2 << 16);		/* CST Range: C7 is max C-state */
241 	msr_write(MSR_PMG_IO_CAPTURE_ADR, msr);
242 
243 	msr = msr_read(MSR_MISC_PWR_MGMT);
244 	msr.lo &= ~(1 << 0);	/* Enable P-state HW_ALL coordination */
245 	msr_write(MSR_MISC_PWR_MGMT, msr);
246 
247 	msr = msr_read(MSR_POWER_CTL);
248 	msr.lo |= (1 << 18);	/* Enable Energy Perf Bias MSR 0x1b0 */
249 	msr.lo |= (1 << 1);	/* C1E Enable */
250 	msr.lo |= (1 << 0);	/* Bi-directional PROCHOT# */
251 	msr_write(MSR_POWER_CTL, msr);
252 
253 	/* C3 Interrupt Response Time Limit */
254 	msr.hi = 0;
255 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x50;
256 	msr_write(MSR_PKGC3_IRTL, msr);
257 
258 	/* C6 Interrupt Response Time Limit */
259 	msr.hi = 0;
260 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x68;
261 	msr_write(MSR_PKGC6_IRTL, msr);
262 
263 	/* C7 Interrupt Response Time Limit */
264 	msr.hi = 0;
265 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x6D;
266 	msr_write(MSR_PKGC7_IRTL, msr);
267 
268 	/* Primary Plane Current Limit */
269 	msr = msr_read(MSR_PP0_CURRENT_CONFIG);
270 	msr.lo &= ~0x1fff;
271 	msr.lo |= PP0_CURRENT_LIMIT;
272 	msr_write(MSR_PP0_CURRENT_CONFIG, msr);
273 
274 	/* Secondary Plane Current Limit */
275 	msr = msr_read(MSR_PP1_CURRENT_CONFIG);
276 	msr.lo &= ~0x1fff;
277 	result = cpuid(1);
278 	if (result.eax >= 0x30600)
279 		msr.lo |= PP1_CURRENT_LIMIT_IVB;
280 	else
281 		msr.lo |= PP1_CURRENT_LIMIT_SNB;
282 	msr_write(MSR_PP1_CURRENT_CONFIG, msr);
283 }
284 
285 static int configure_thermal_target(struct udevice *dev)
286 {
287 	int tcc_offset;
288 	msr_t msr;
289 
290 	tcc_offset = fdtdec_get_int(gd->fdt_blob, dev->of_offset, "tcc-offset",
291 				    0);
292 
293 	/* Set TCC activaiton offset if supported */
294 	msr = msr_read(MSR_PLATFORM_INFO);
295 	if ((msr.lo & (1 << 30)) && tcc_offset) {
296 		msr = msr_read(MSR_TEMPERATURE_TARGET);
297 		msr.lo &= ~(0xf << 24); /* Bits 27:24 */
298 		msr.lo |= (tcc_offset & 0xf) << 24;
299 		msr_write(MSR_TEMPERATURE_TARGET, msr);
300 	}
301 
302 	return 0;
303 }
304 
305 static void configure_misc(void)
306 {
307 	msr_t msr;
308 
309 	msr = msr_read(IA32_MISC_ENABLE);
310 	msr.lo |= (1 << 0);	  /* Fast String enable */
311 	msr.lo |= (1 << 3);	  /* TM1/TM2/EMTTM enable */
312 	msr.lo |= (1 << 16);	  /* Enhanced SpeedStep Enable */
313 	msr_write(IA32_MISC_ENABLE, msr);
314 
315 	/* Disable Thermal interrupts */
316 	msr.lo = 0;
317 	msr.hi = 0;
318 	msr_write(IA32_THERM_INTERRUPT, msr);
319 
320 	/* Enable package critical interrupt only */
321 	msr.lo = 1 << 4;
322 	msr.hi = 0;
323 	msr_write(IA32_PACKAGE_THERM_INTERRUPT, msr);
324 }
325 
326 static void enable_lapic_tpr(void)
327 {
328 	msr_t msr;
329 
330 	msr = msr_read(MSR_PIC_MSG_CONTROL);
331 	msr.lo &= ~(1 << 10);	/* Enable APIC TPR updates */
332 	msr_write(MSR_PIC_MSG_CONTROL, msr);
333 }
334 
335 static void configure_dca_cap(void)
336 {
337 	struct cpuid_result cpuid_regs;
338 	msr_t msr;
339 
340 	/* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
341 	cpuid_regs = cpuid(1);
342 	if (cpuid_regs.ecx & (1 << 18)) {
343 		msr = msr_read(IA32_PLATFORM_DCA_CAP);
344 		msr.lo |= 1;
345 		msr_write(IA32_PLATFORM_DCA_CAP, msr);
346 	}
347 }
348 
349 static void set_max_ratio(void)
350 {
351 	msr_t msr, perf_ctl;
352 
353 	perf_ctl.hi = 0;
354 
355 	/* Check for configurable TDP option */
356 	if (cpu_config_tdp_levels()) {
357 		/* Set to nominal TDP ratio */
358 		msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
359 		perf_ctl.lo = (msr.lo & 0xff) << 8;
360 	} else {
361 		/* Platform Info bits 15:8 give max ratio */
362 		msr = msr_read(MSR_PLATFORM_INFO);
363 		perf_ctl.lo = msr.lo & 0xff00;
364 	}
365 	msr_write(MSR_IA32_PERF_CTL, perf_ctl);
366 
367 	debug("model_x06ax: frequency set to %d\n",
368 	      ((perf_ctl.lo >> 8) & 0xff) * SANDYBRIDGE_BCLK);
369 }
370 
371 static void set_energy_perf_bias(u8 policy)
372 {
373 	msr_t msr;
374 
375 	/* Energy Policy is bits 3:0 */
376 	msr = msr_read(IA32_ENERGY_PERFORMANCE_BIAS);
377 	msr.lo &= ~0xf;
378 	msr.lo |= policy & 0xf;
379 	msr_write(IA32_ENERGY_PERFORMANCE_BIAS, msr);
380 
381 	debug("model_x06ax: energy policy set to %u\n", policy);
382 }
383 
384 static void configure_mca(void)
385 {
386 	msr_t msr;
387 	int i;
388 
389 	msr.lo = 0;
390 	msr.hi = 0;
391 	/* This should only be done on a cold boot */
392 	for (i = 0; i < 7; i++)
393 		msr_write(IA32_MC0_STATUS + (i * 4), msr);
394 }
395 
396 #if CONFIG_USBDEBUG
397 static unsigned ehci_debug_addr;
398 #endif
399 
400 static int model_206ax_init(struct udevice *dev)
401 {
402 	int ret;
403 
404 	/* Clear out pending MCEs */
405 	configure_mca();
406 
407 #if CONFIG_USBDEBUG
408 	/* Is this caution really needed? */
409 	if (!ehci_debug_addr)
410 		ehci_debug_addr = get_ehci_debug();
411 	set_ehci_debug(0);
412 #endif
413 
414 #if CONFIG_USBDEBUG
415 	set_ehci_debug(ehci_debug_addr);
416 #endif
417 
418 	/* Enable the local cpu apics */
419 	enable_lapic_tpr();
420 
421 	/* Enable virtualization if enabled in CMOS */
422 	enable_vmx();
423 
424 	/* Configure C States */
425 	configure_c_states();
426 
427 	/* Configure Enhanced SpeedStep and Thermal Sensors */
428 	configure_misc();
429 
430 	/* Thermal throttle activation offset */
431 	ret = configure_thermal_target(dev);
432 	if (ret) {
433 		debug("Cannot set thermal target\n");
434 		return ret;
435 	}
436 
437 	/* Enable Direct Cache Access */
438 	configure_dca_cap();
439 
440 	/* Set energy policy */
441 	set_energy_perf_bias(ENERGY_POLICY_NORMAL);
442 
443 	/* Set Max Ratio */
444 	set_max_ratio();
445 
446 	/* Enable Turbo */
447 	turbo_enable();
448 
449 	return 0;
450 }
451 
452 static int model_206ax_get_info(struct udevice *dev, struct cpu_info *info)
453 {
454 	msr_t msr;
455 
456 	msr = msr_read(MSR_IA32_PERF_CTL);
457 	info->cpu_freq = ((msr.lo >> 8) & 0xff) * SANDYBRIDGE_BCLK * 1000000;
458 	info->features = 1 << CPU_FEAT_L1_CACHE | 1 << CPU_FEAT_MMU |
459 		1 << CPU_FEAT_UCODE;
460 
461 	return 0;
462 }
463 
464 static int model_206ax_get_count(struct udevice *dev)
465 {
466 	return 4;
467 }
468 
469 static int cpu_x86_model_206ax_probe(struct udevice *dev)
470 {
471 	if (dev->seq == 0)
472 		model_206ax_init(dev);
473 
474 	return 0;
475 }
476 
477 static const struct cpu_ops cpu_x86_model_206ax_ops = {
478 	.get_desc	= cpu_x86_get_desc,
479 	.get_info	= model_206ax_get_info,
480 	.get_count	= model_206ax_get_count,
481 };
482 
483 static const struct udevice_id cpu_x86_model_206ax_ids[] = {
484 	{ .compatible = "intel,core-gen3" },
485 	{ }
486 };
487 
488 U_BOOT_DRIVER(cpu_x86_model_206ax_drv) = {
489 	.name		= "cpu_x86_model_206ax",
490 	.id		= UCLASS_CPU,
491 	.of_match	= cpu_x86_model_206ax_ids,
492 	.bind		= cpu_x86_bind,
493 	.probe		= cpu_x86_model_206ax_probe,
494 	.ops		= &cpu_x86_model_206ax_ops,
495 };
496