1 /*
2  * From Coreboot file of same name
3  *
4  * Copyright (C) 2007-2009 coresystems GmbH
5  * Copyright (C) 2011 The Chromium Authors
6  *
7  * SPDX-License-Identifier:	GPL-2.0
8  */
9 
10 #include <common.h>
11 #include <fdtdec.h>
12 #include <malloc.h>
13 #include <asm/acpi.h>
14 #include <asm/cpu.h>
15 #include <asm/lapic.h>
16 #include <asm/lapic_def.h>
17 #include <asm/msr.h>
18 #include <asm/mtrr.h>
19 #include <asm/processor.h>
20 #include <asm/speedstep.h>
21 #include <asm/turbo.h>
22 #include <asm/arch/model_206ax.h>
23 
24 static void enable_vmx(void)
25 {
26 	struct cpuid_result regs;
27 #ifdef CONFIG_ENABLE_VMX
28 	int enable = true;
29 #else
30 	int enable = false;
31 #endif
32 	msr_t msr;
33 
34 	regs = cpuid(1);
35 	/* Check that the VMX is supported before reading or writing the MSR. */
36 	if (!((regs.ecx & CPUID_VMX) || (regs.ecx & CPUID_SMX)))
37 		return;
38 
39 	msr = msr_read(MSR_IA32_FEATURE_CONTROL);
40 
41 	if (msr.lo & (1 << 0)) {
42 		debug("VMX is locked, so %s will do nothing\n", __func__);
43 		/* VMX locked. If we set it again we get an illegal
44 		 * instruction
45 		 */
46 		return;
47 	}
48 
49 	/* The IA32_FEATURE_CONTROL MSR may initialize with random values.
50 	 * It must be cleared regardless of VMX config setting.
51 	 */
52 	msr.hi = 0;
53 	msr.lo = 0;
54 
55 	debug("%s VMX\n", enable ? "Enabling" : "Disabling");
56 
57 	/*
58 	 * Even though the Intel manual says you must set the lock bit in
59 	 * addition to the VMX bit in order for VMX to work, it is incorrect.
60 	 * Thus we leave it unlocked for the OS to manage things itself.
61 	 * This is good for a few reasons:
62 	 * - No need to reflash the bios just to toggle the lock bit.
63 	 * - The VMX bits really really should match each other across cores,
64 	 *   so hard locking it on one while another has the opposite setting
65 	 *   can easily lead to crashes as code using VMX migrates between
66 	 *   them.
67 	 * - Vendors that want to "upsell" from a bios that disables+locks to
68 	 *   one that doesn't is sleazy.
69 	 * By leaving this to the OS (e.g. Linux), people can do exactly what
70 	 * they want on the fly, and do it correctly (e.g. across multiple
71 	 * cores).
72 	 */
73 	if (enable) {
74 		msr.lo |= (1 << 2);
75 		if (regs.ecx & CPUID_SMX)
76 			msr.lo |= (1 << 1);
77 	}
78 
79 	msr_write(MSR_IA32_FEATURE_CONTROL, msr);
80 }
81 
82 /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
83 static const u8 power_limit_time_sec_to_msr[] = {
84 	[0]   = 0x00,
85 	[1]   = 0x0a,
86 	[2]   = 0x0b,
87 	[3]   = 0x4b,
88 	[4]   = 0x0c,
89 	[5]   = 0x2c,
90 	[6]   = 0x4c,
91 	[7]   = 0x6c,
92 	[8]   = 0x0d,
93 	[10]  = 0x2d,
94 	[12]  = 0x4d,
95 	[14]  = 0x6d,
96 	[16]  = 0x0e,
97 	[20]  = 0x2e,
98 	[24]  = 0x4e,
99 	[28]  = 0x6e,
100 	[32]  = 0x0f,
101 	[40]  = 0x2f,
102 	[48]  = 0x4f,
103 	[56]  = 0x6f,
104 	[64]  = 0x10,
105 	[80]  = 0x30,
106 	[96]  = 0x50,
107 	[112] = 0x70,
108 	[128] = 0x11,
109 };
110 
111 /* Convert POWER_LIMIT_1_TIME MSR value to seconds */
112 static const u8 power_limit_time_msr_to_sec[] = {
113 	[0x00] = 0,
114 	[0x0a] = 1,
115 	[0x0b] = 2,
116 	[0x4b] = 3,
117 	[0x0c] = 4,
118 	[0x2c] = 5,
119 	[0x4c] = 6,
120 	[0x6c] = 7,
121 	[0x0d] = 8,
122 	[0x2d] = 10,
123 	[0x4d] = 12,
124 	[0x6d] = 14,
125 	[0x0e] = 16,
126 	[0x2e] = 20,
127 	[0x4e] = 24,
128 	[0x6e] = 28,
129 	[0x0f] = 32,
130 	[0x2f] = 40,
131 	[0x4f] = 48,
132 	[0x6f] = 56,
133 	[0x10] = 64,
134 	[0x30] = 80,
135 	[0x50] = 96,
136 	[0x70] = 112,
137 	[0x11] = 128,
138 };
139 
140 int cpu_config_tdp_levels(void)
141 {
142 	struct cpuid_result result;
143 	msr_t platform_info;
144 
145 	/* Minimum CPU revision */
146 	result = cpuid(1);
147 	if (result.eax < IVB_CONFIG_TDP_MIN_CPUID)
148 		return 0;
149 
150 	/* Bits 34:33 indicate how many levels supported */
151 	platform_info = msr_read(MSR_PLATFORM_INFO);
152 	return (platform_info.hi >> 1) & 3;
153 }
154 
155 /*
156  * Configure processor power limits if possible
157  * This must be done AFTER set of BIOS_RESET_CPL
158  */
159 void set_power_limits(u8 power_limit_1_time)
160 {
161 	msr_t msr = msr_read(MSR_PLATFORM_INFO);
162 	msr_t limit;
163 	unsigned power_unit;
164 	unsigned tdp, min_power, max_power, max_time;
165 	u8 power_limit_1_val;
166 
167 	if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr))
168 		return;
169 
170 	if (!(msr.lo & PLATFORM_INFO_SET_TDP))
171 		return;
172 
173 	/* Get units */
174 	msr = msr_read(MSR_PKG_POWER_SKU_UNIT);
175 	power_unit = 2 << ((msr.lo & 0xf) - 1);
176 
177 	/* Get power defaults for this SKU */
178 	msr = msr_read(MSR_PKG_POWER_SKU);
179 	tdp = msr.lo & 0x7fff;
180 	min_power = (msr.lo >> 16) & 0x7fff;
181 	max_power = msr.hi & 0x7fff;
182 	max_time = (msr.hi >> 16) & 0x7f;
183 
184 	debug("CPU TDP: %u Watts\n", tdp / power_unit);
185 
186 	if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
187 		power_limit_1_time = power_limit_time_msr_to_sec[max_time];
188 
189 	if (min_power > 0 && tdp < min_power)
190 		tdp = min_power;
191 
192 	if (max_power > 0 && tdp > max_power)
193 		tdp = max_power;
194 
195 	power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
196 
197 	/* Set long term power limit to TDP */
198 	limit.lo = 0;
199 	limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
200 	limit.lo |= PKG_POWER_LIMIT_EN;
201 	limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
202 		PKG_POWER_LIMIT_TIME_SHIFT;
203 
204 	/* Set short term power limit to 1.25 * TDP */
205 	limit.hi = 0;
206 	limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
207 	limit.hi |= PKG_POWER_LIMIT_EN;
208 	/* Power limit 2 time is only programmable on SNB EP/EX */
209 
210 	msr_write(MSR_PKG_POWER_LIMIT, limit);
211 
212 	/* Use nominal TDP values for CPUs with configurable TDP */
213 	if (cpu_config_tdp_levels()) {
214 		msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
215 		limit.hi = 0;
216 		limit.lo = msr.lo & 0xff;
217 		msr_write(MSR_TURBO_ACTIVATION_RATIO, limit);
218 	}
219 }
220 
221 static void configure_c_states(void)
222 {
223 	struct cpuid_result result;
224 	msr_t msr;
225 
226 	msr = msr_read(MSR_PMG_CST_CONFIG_CTL);
227 	msr.lo |= (1 << 28);	/* C1 Auto Undemotion Enable */
228 	msr.lo |= (1 << 27);	/* C3 Auto Undemotion Enable */
229 	msr.lo |= (1 << 26);	/* C1 Auto Demotion Enable */
230 	msr.lo |= (1 << 25);	/* C3 Auto Demotion Enable */
231 	msr.lo &= ~(1 << 10);	/* Disable IO MWAIT redirection */
232 	msr.lo |= 7;		/* No package C-state limit */
233 	msr_write(MSR_PMG_CST_CONFIG_CTL, msr);
234 
235 	msr = msr_read(MSR_PMG_IO_CAPTURE_ADR);
236 	msr.lo &= ~0x7ffff;
237 	msr.lo |= (PMB0_BASE + 4);	/* LVL_2 base address */
238 	msr.lo |= (2 << 16);		/* CST Range: C7 is max C-state */
239 	msr_write(MSR_PMG_IO_CAPTURE_ADR, msr);
240 
241 	msr = msr_read(MSR_MISC_PWR_MGMT);
242 	msr.lo &= ~(1 << 0);	/* Enable P-state HW_ALL coordination */
243 	msr_write(MSR_MISC_PWR_MGMT, msr);
244 
245 	msr = msr_read(MSR_POWER_CTL);
246 	msr.lo |= (1 << 18);	/* Enable Energy Perf Bias MSR 0x1b0 */
247 	msr.lo |= (1 << 1);	/* C1E Enable */
248 	msr.lo |= (1 << 0);	/* Bi-directional PROCHOT# */
249 	msr_write(MSR_POWER_CTL, msr);
250 
251 	/* C3 Interrupt Response Time Limit */
252 	msr.hi = 0;
253 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x50;
254 	msr_write(MSR_PKGC3_IRTL, msr);
255 
256 	/* C6 Interrupt Response Time Limit */
257 	msr.hi = 0;
258 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x68;
259 	msr_write(MSR_PKGC6_IRTL, msr);
260 
261 	/* C7 Interrupt Response Time Limit */
262 	msr.hi = 0;
263 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x6D;
264 	msr_write(MSR_PKGC7_IRTL, msr);
265 
266 	/* Primary Plane Current Limit */
267 	msr = msr_read(MSR_PP0_CURRENT_CONFIG);
268 	msr.lo &= ~0x1fff;
269 	msr.lo |= PP0_CURRENT_LIMIT;
270 	msr_write(MSR_PP0_CURRENT_CONFIG, msr);
271 
272 	/* Secondary Plane Current Limit */
273 	msr = msr_read(MSR_PP1_CURRENT_CONFIG);
274 	msr.lo &= ~0x1fff;
275 	result = cpuid(1);
276 	if (result.eax >= 0x30600)
277 		msr.lo |= PP1_CURRENT_LIMIT_IVB;
278 	else
279 		msr.lo |= PP1_CURRENT_LIMIT_SNB;
280 	msr_write(MSR_PP1_CURRENT_CONFIG, msr);
281 }
282 
283 static int configure_thermal_target(void)
284 {
285 	int tcc_offset;
286 	msr_t msr;
287 	int node;
288 
289 	/* Find pointer to CPU configuration */
290 	node = fdtdec_next_compatible(gd->fdt_blob, 0,
291 				      COMPAT_INTEL_MODEL_206AX);
292 	if (node < 0)
293 		return -ENOENT;
294 	tcc_offset = fdtdec_get_int(gd->fdt_blob, node, "tcc-offset", 0);
295 
296 	/* Set TCC activaiton offset if supported */
297 	msr = msr_read(MSR_PLATFORM_INFO);
298 	if ((msr.lo & (1 << 30)) && tcc_offset) {
299 		msr = msr_read(MSR_TEMPERATURE_TARGET);
300 		msr.lo &= ~(0xf << 24); /* Bits 27:24 */
301 		msr.lo |= (tcc_offset & 0xf) << 24;
302 		msr_write(MSR_TEMPERATURE_TARGET, msr);
303 	}
304 
305 	return 0;
306 }
307 
308 static void configure_misc(void)
309 {
310 	msr_t msr;
311 
312 	msr = msr_read(IA32_MISC_ENABLE);
313 	msr.lo |= (1 << 0);	  /* Fast String enable */
314 	msr.lo |= (1 << 3);	  /* TM1/TM2/EMTTM enable */
315 	msr.lo |= (1 << 16);	  /* Enhanced SpeedStep Enable */
316 	msr_write(IA32_MISC_ENABLE, msr);
317 
318 	/* Disable Thermal interrupts */
319 	msr.lo = 0;
320 	msr.hi = 0;
321 	msr_write(IA32_THERM_INTERRUPT, msr);
322 
323 	/* Enable package critical interrupt only */
324 	msr.lo = 1 << 4;
325 	msr.hi = 0;
326 	msr_write(IA32_PACKAGE_THERM_INTERRUPT, msr);
327 }
328 
329 static void enable_lapic_tpr(void)
330 {
331 	msr_t msr;
332 
333 	msr = msr_read(MSR_PIC_MSG_CONTROL);
334 	msr.lo &= ~(1 << 10);	/* Enable APIC TPR updates */
335 	msr_write(MSR_PIC_MSG_CONTROL, msr);
336 }
337 
338 static void configure_dca_cap(void)
339 {
340 	struct cpuid_result cpuid_regs;
341 	msr_t msr;
342 
343 	/* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
344 	cpuid_regs = cpuid(1);
345 	if (cpuid_regs.ecx & (1 << 18)) {
346 		msr = msr_read(IA32_PLATFORM_DCA_CAP);
347 		msr.lo |= 1;
348 		msr_write(IA32_PLATFORM_DCA_CAP, msr);
349 	}
350 }
351 
352 static void set_max_ratio(void)
353 {
354 	msr_t msr, perf_ctl;
355 
356 	perf_ctl.hi = 0;
357 
358 	/* Check for configurable TDP option */
359 	if (cpu_config_tdp_levels()) {
360 		/* Set to nominal TDP ratio */
361 		msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
362 		perf_ctl.lo = (msr.lo & 0xff) << 8;
363 	} else {
364 		/* Platform Info bits 15:8 give max ratio */
365 		msr = msr_read(MSR_PLATFORM_INFO);
366 		perf_ctl.lo = msr.lo & 0xff00;
367 	}
368 	msr_write(IA32_PERF_CTL, perf_ctl);
369 
370 	debug("model_x06ax: frequency set to %d\n",
371 	      ((perf_ctl.lo >> 8) & 0xff) * SANDYBRIDGE_BCLK);
372 }
373 
374 static void set_energy_perf_bias(u8 policy)
375 {
376 	msr_t msr;
377 
378 	/* Energy Policy is bits 3:0 */
379 	msr = msr_read(IA32_ENERGY_PERFORMANCE_BIAS);
380 	msr.lo &= ~0xf;
381 	msr.lo |= policy & 0xf;
382 	msr_write(IA32_ENERGY_PERFORMANCE_BIAS, msr);
383 
384 	debug("model_x06ax: energy policy set to %u\n", policy);
385 }
386 
387 static void configure_mca(void)
388 {
389 	msr_t msr;
390 	int i;
391 
392 	msr.lo = 0;
393 	msr.hi = 0;
394 	/* This should only be done on a cold boot */
395 	for (i = 0; i < 7; i++)
396 		msr_write(IA32_MC0_STATUS + (i * 4), msr);
397 }
398 
399 #if CONFIG_USBDEBUG
400 static unsigned ehci_debug_addr;
401 #endif
402 
403 /*
404  * Initialize any extra cores/threads in this package.
405  */
406 static int intel_cores_init(struct x86_cpu_priv *cpu)
407 {
408 	struct cpuid_result result;
409 	unsigned threads_per_package, threads_per_core, i;
410 
411 	/* Logical processors (threads) per core */
412 	result = cpuid_ext(0xb, 0);
413 	threads_per_core = result.ebx & 0xffff;
414 
415 	/* Logical processors (threads) per package */
416 	result = cpuid_ext(0xb, 1);
417 	threads_per_package = result.ebx & 0xffff;
418 
419 	debug("CPU: %u has %u cores, %u threads per core\n",
420 	      cpu->apic_id, threads_per_package / threads_per_core,
421 	      threads_per_core);
422 
423 	for (i = 1; i < threads_per_package; ++i) {
424 		struct x86_cpu_priv *new_cpu;
425 
426 		new_cpu = calloc(1, sizeof(*new_cpu));
427 		if (!new_cpu)
428 			return -ENOMEM;
429 
430 		new_cpu->apic_id = cpu->apic_id + i;
431 
432 		/* Update APIC ID if no hyperthreading */
433 		if (threads_per_core == 1)
434 			new_cpu->apic_id <<= 1;
435 
436 		debug("CPU: %u has core %u\n", cpu->apic_id, new_cpu->apic_id);
437 
438 #if CONFIG_SMP && CONFIG_MAX_CPUS > 1
439 		/* Start the new cpu */
440 		if (!start_cpu(new_cpu)) {
441 			/* Record the error in cpu? */
442 			printk(BIOS_ERR, "CPU %u would not start!\n",
443 			       new_cpu->apic_id);
444 			new_cpu->start_err = 1;
445 		}
446 #endif
447 	}
448 
449 	return 0;
450 }
451 
452 int model_206ax_init(struct x86_cpu_priv *cpu)
453 {
454 	int ret;
455 
456 	/* Clear out pending MCEs */
457 	configure_mca();
458 
459 #if CONFIG_USBDEBUG
460 	/* Is this caution really needed? */
461 	if (!ehci_debug_addr)
462 		ehci_debug_addr = get_ehci_debug();
463 	set_ehci_debug(0);
464 #endif
465 
466 	/* Setup MTRRs based on physical address size */
467 #if 0 /* TODO: Implement this */
468 	struct cpuid_result cpuid_regs;
469 
470 	cpuid_regs = cpuid(0x80000008);
471 	x86_setup_fixed_mtrrs();
472 	x86_setup_var_mtrrs(cpuid_regs.eax & 0xff, 2);
473 	x86_mtrr_check();
474 #endif
475 
476 #if CONFIG_USBDEBUG
477 	set_ehci_debug(ehci_debug_addr);
478 #endif
479 
480 	/* Enable the local cpu apics */
481 	enable_lapic_tpr();
482 	lapic_setup();
483 
484 	/* Enable virtualization if enabled in CMOS */
485 	enable_vmx();
486 
487 	/* Configure C States */
488 	configure_c_states();
489 
490 	/* Configure Enhanced SpeedStep and Thermal Sensors */
491 	configure_misc();
492 
493 	/* Thermal throttle activation offset */
494 	ret = configure_thermal_target();
495 	if (ret)
496 		return ret;
497 
498 	/* Enable Direct Cache Access */
499 	configure_dca_cap();
500 
501 	/* Set energy policy */
502 	set_energy_perf_bias(ENERGY_POLICY_NORMAL);
503 
504 	/* Set Max Ratio */
505 	set_max_ratio();
506 
507 	/* Enable Turbo */
508 	turbo_enable();
509 
510 	/* Start up extra cores */
511 	intel_cores_init(cpu);
512 
513 	return 0;
514 }
515