1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Google, Inc 4 * 5 * Based on code from coreboot src/soc/intel/broadwell/cpu.c 6 */ 7 8 #include <common.h> 9 #include <dm.h> 10 #include <cpu.h> 11 #include <asm/cpu.h> 12 #include <asm/cpu_x86.h> 13 #include <asm/cpu_common.h> 14 #include <asm/intel_regs.h> 15 #include <asm/msr.h> 16 #include <asm/post.h> 17 #include <asm/turbo.h> 18 #include <asm/arch/cpu.h> 19 #include <asm/arch/pch.h> 20 #include <asm/arch/rcb.h> 21 22 struct cpu_broadwell_priv { 23 bool ht_disabled; 24 }; 25 26 /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */ 27 static const u8 power_limit_time_sec_to_msr[] = { 28 [0] = 0x00, 29 [1] = 0x0a, 30 [2] = 0x0b, 31 [3] = 0x4b, 32 [4] = 0x0c, 33 [5] = 0x2c, 34 [6] = 0x4c, 35 [7] = 0x6c, 36 [8] = 0x0d, 37 [10] = 0x2d, 38 [12] = 0x4d, 39 [14] = 0x6d, 40 [16] = 0x0e, 41 [20] = 0x2e, 42 [24] = 0x4e, 43 [28] = 0x6e, 44 [32] = 0x0f, 45 [40] = 0x2f, 46 [48] = 0x4f, 47 [56] = 0x6f, 48 [64] = 0x10, 49 [80] = 0x30, 50 [96] = 0x50, 51 [112] = 0x70, 52 [128] = 0x11, 53 }; 54 55 /* Convert POWER_LIMIT_1_TIME MSR value to seconds */ 56 static const u8 power_limit_time_msr_to_sec[] = { 57 [0x00] = 0, 58 [0x0a] = 1, 59 [0x0b] = 2, 60 [0x4b] = 3, 61 [0x0c] = 4, 62 [0x2c] = 5, 63 [0x4c] = 6, 64 [0x6c] = 7, 65 [0x0d] = 8, 66 [0x2d] = 10, 67 [0x4d] = 12, 68 [0x6d] = 14, 69 [0x0e] = 16, 70 [0x2e] = 20, 71 [0x4e] = 24, 72 [0x6e] = 28, 73 [0x0f] = 32, 74 [0x2f] = 40, 75 [0x4f] = 48, 76 [0x6f] = 56, 77 [0x10] = 64, 78 [0x30] = 80, 79 [0x50] = 96, 80 [0x70] = 112, 81 [0x11] = 128, 82 }; 83 84 int arch_cpu_init_dm(void) 85 { 86 struct udevice *dev; 87 int ret; 88 89 /* Start up the LPC so we have serial */ 90 ret = uclass_first_device(UCLASS_LPC, &dev); 91 if (ret) 92 return ret; 93 if (!dev) 94 return -ENODEV; 95 ret = cpu_set_flex_ratio_to_tdp_nominal(); 96 if (ret) 97 return ret; 98 99 return 0; 100 } 101 102 void set_max_freq(void) 103 { 104 msr_t msr, perf_ctl, platform_info; 105 106 /* Check for configurable TDP option */ 107 platform_info = msr_read(MSR_PLATFORM_INFO); 108 109 if ((platform_info.hi >> 1) & 3) { 110 /* Set to nominal TDP ratio */ 111 msr = msr_read(MSR_CONFIG_TDP_NOMINAL); 112 perf_ctl.lo = (msr.lo & 0xff) << 8; 113 } else { 114 /* Platform Info bits 15:8 give max ratio */ 115 msr = msr_read(MSR_PLATFORM_INFO); 116 perf_ctl.lo = msr.lo & 0xff00; 117 } 118 119 perf_ctl.hi = 0; 120 msr_write(IA32_PERF_CTL, perf_ctl); 121 122 debug("CPU: frequency set to %d MHz\n", 123 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK); 124 } 125 126 int arch_cpu_init(void) 127 { 128 post_code(POST_CPU_INIT); 129 130 return x86_cpu_init_f(); 131 } 132 133 int checkcpu(void) 134 { 135 int ret; 136 137 set_max_freq(); 138 139 ret = cpu_common_init(); 140 if (ret) 141 return ret; 142 gd->arch.pei_boot_mode = PEI_BOOT_NONE; 143 144 return 0; 145 } 146 147 int print_cpuinfo(void) 148 { 149 char processor_name[CPU_MAX_NAME_LEN]; 150 const char *name; 151 152 /* Print processor name */ 153 name = cpu_get_name(processor_name); 154 printf("CPU: %s\n", name); 155 156 return 0; 157 } 158 159 /* 160 * The core 100MHz BLCK is disabled in deeper c-states. One needs to calibrate 161 * the 100MHz BCLCK against the 24MHz BLCK to restore the clocks properly 162 * when a core is woken up 163 */ 164 static int pcode_ready(void) 165 { 166 int wait_count; 167 const int delay_step = 10; 168 169 wait_count = 0; 170 do { 171 if (!(readl(MCHBAR_REG(BIOS_MAILBOX_INTERFACE)) & 172 MAILBOX_RUN_BUSY)) 173 return 0; 174 wait_count += delay_step; 175 udelay(delay_step); 176 } while (wait_count < 1000); 177 178 return -ETIMEDOUT; 179 } 180 181 static u32 pcode_mailbox_read(u32 command) 182 { 183 int ret; 184 185 ret = pcode_ready(); 186 if (ret) { 187 debug("PCODE: mailbox timeout on wait ready\n"); 188 return ret; 189 } 190 191 /* Send command and start transaction */ 192 writel(command | MAILBOX_RUN_BUSY, MCHBAR_REG(BIOS_MAILBOX_INTERFACE)); 193 194 ret = pcode_ready(); 195 if (ret) { 196 debug("PCODE: mailbox timeout on completion\n"); 197 return ret; 198 } 199 200 /* Read mailbox */ 201 return readl(MCHBAR_REG(BIOS_MAILBOX_DATA)); 202 } 203 204 static int pcode_mailbox_write(u32 command, u32 data) 205 { 206 int ret; 207 208 ret = pcode_ready(); 209 if (ret) { 210 debug("PCODE: mailbox timeout on wait ready\n"); 211 return ret; 212 } 213 214 writel(data, MCHBAR_REG(BIOS_MAILBOX_DATA)); 215 216 /* Send command and start transaction */ 217 writel(command | MAILBOX_RUN_BUSY, MCHBAR_REG(BIOS_MAILBOX_INTERFACE)); 218 219 ret = pcode_ready(); 220 if (ret) { 221 debug("PCODE: mailbox timeout on completion\n"); 222 return ret; 223 } 224 225 return 0; 226 } 227 228 /* @dev is the CPU device */ 229 static void initialize_vr_config(struct udevice *dev) 230 { 231 int ramp, min_vid; 232 msr_t msr; 233 234 debug("Initializing VR config\n"); 235 236 /* Configure VR_CURRENT_CONFIG */ 237 msr = msr_read(MSR_VR_CURRENT_CONFIG); 238 /* 239 * Preserve bits 63 and 62. Bit 62 is PSI4 enable, but it is only valid 240 * on ULT systems 241 */ 242 msr.hi &= 0xc0000000; 243 msr.hi |= (0x01 << (52 - 32)); /* PSI3 threshold - 1A */ 244 msr.hi |= (0x05 << (42 - 32)); /* PSI2 threshold - 5A */ 245 msr.hi |= (0x14 << (32 - 32)); /* PSI1 threshold - 20A */ 246 msr.hi |= (1 << (62 - 32)); /* Enable PSI4 */ 247 /* Leave the max instantaneous current limit (12:0) to default */ 248 msr_write(MSR_VR_CURRENT_CONFIG, msr); 249 250 /* Configure VR_MISC_CONFIG MSR */ 251 msr = msr_read(MSR_VR_MISC_CONFIG); 252 /* Set the IOUT_SLOPE scalar applied to dIout in U10.1.9 format */ 253 msr.hi &= ~(0x3ff << (40 - 32)); 254 msr.hi |= (0x200 << (40 - 32)); /* 1.0 */ 255 /* Set IOUT_OFFSET to 0 */ 256 msr.hi &= ~0xff; 257 /* Set entry ramp rate to slow */ 258 msr.hi &= ~(1 << (51 - 32)); 259 /* Enable decay mode on C-state entry */ 260 msr.hi |= (1 << (52 - 32)); 261 /* Set the slow ramp rate */ 262 msr.hi &= ~(0x3 << (53 - 32)); 263 /* Configure the C-state exit ramp rate */ 264 ramp = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 265 "intel,slow-ramp", -1); 266 if (ramp != -1) { 267 /* Configured slow ramp rate */ 268 msr.hi |= ((ramp & 0x3) << (53 - 32)); 269 /* Set exit ramp rate to slow */ 270 msr.hi &= ~(1 << (50 - 32)); 271 } else { 272 /* Fast ramp rate / 4 */ 273 msr.hi |= (0x01 << (53 - 32)); 274 /* Set exit ramp rate to fast */ 275 msr.hi |= (1 << (50 - 32)); 276 } 277 /* Set MIN_VID (31:24) to allow CPU to have full control */ 278 msr.lo &= ~0xff000000; 279 min_vid = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 280 "intel,min-vid", 0); 281 msr.lo |= (min_vid & 0xff) << 24; 282 msr_write(MSR_VR_MISC_CONFIG, msr); 283 284 /* Configure VR_MISC_CONFIG2 MSR */ 285 msr = msr_read(MSR_VR_MISC_CONFIG2); 286 msr.lo &= ~0xffff; 287 /* 288 * Allow CPU to control minimum voltage completely (15:8) and 289 * set the fast ramp voltage in 10mV steps 290 */ 291 if (cpu_get_family_model() == BROADWELL_FAMILY_ULT) 292 msr.lo |= 0x006a; /* 1.56V */ 293 else 294 msr.lo |= 0x006f; /* 1.60V */ 295 msr_write(MSR_VR_MISC_CONFIG2, msr); 296 297 /* Set C9/C10 VCC Min */ 298 pcode_mailbox_write(MAILBOX_BIOS_CMD_WRITE_C9C10_VOLTAGE, 0x1f1f); 299 } 300 301 static int calibrate_24mhz_bclk(void) 302 { 303 int err_code; 304 int ret; 305 306 ret = pcode_ready(); 307 if (ret) 308 return ret; 309 310 /* A non-zero value initiates the PCODE calibration */ 311 writel(~0, MCHBAR_REG(BIOS_MAILBOX_DATA)); 312 writel(MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL, 313 MCHBAR_REG(BIOS_MAILBOX_INTERFACE)); 314 315 ret = pcode_ready(); 316 if (ret) 317 return ret; 318 319 err_code = readl(MCHBAR_REG(BIOS_MAILBOX_INTERFACE)) & 0xff; 320 321 debug("PCODE: 24MHz BLCK calibration response: %d\n", err_code); 322 323 /* Read the calibrated value */ 324 writel(MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_READ_CALIBRATION, 325 MCHBAR_REG(BIOS_MAILBOX_INTERFACE)); 326 327 ret = pcode_ready(); 328 if (ret) 329 return ret; 330 331 debug("PCODE: 24MHz BLCK calibration value: 0x%08x\n", 332 readl(MCHBAR_REG(BIOS_MAILBOX_DATA))); 333 334 return 0; 335 } 336 337 static void configure_pch_power_sharing(void) 338 { 339 u32 pch_power, pch_power_ext, pmsync, pmsync2; 340 int i; 341 342 /* Read PCH Power levels from PCODE */ 343 pch_power = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER); 344 pch_power_ext = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT); 345 346 debug("PCH Power: PCODE Levels 0x%08x 0x%08x\n", pch_power, 347 pch_power_ext); 348 349 pmsync = readl(RCB_REG(PMSYNC_CONFIG)); 350 pmsync2 = readl(RCB_REG(PMSYNC_CONFIG2)); 351 352 /* 353 * Program PMSYNC_TPR_CONFIG PCH power limit values 354 * pmsync[0:4] = mailbox[0:5] 355 * pmsync[8:12] = mailbox[6:11] 356 * pmsync[16:20] = mailbox[12:17] 357 */ 358 for (i = 0; i < 3; i++) { 359 u32 level = pch_power & 0x3f; 360 pch_power >>= 6; 361 pmsync &= ~(0x1f << (i * 8)); 362 pmsync |= (level & 0x1f) << (i * 8); 363 } 364 writel(pmsync, RCB_REG(PMSYNC_CONFIG)); 365 366 /* 367 * Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values 368 * pmsync2[0:4] = mailbox[23:18] 369 * pmsync2[8:12] = mailbox_ext[6:11] 370 * pmsync2[16:20] = mailbox_ext[12:17] 371 * pmsync2[24:28] = mailbox_ext[18:22] 372 */ 373 pmsync2 &= ~0x1f; 374 pmsync2 |= pch_power & 0x1f; 375 376 for (i = 1; i < 4; i++) { 377 u32 level = pch_power_ext & 0x3f; 378 pch_power_ext >>= 6; 379 pmsync2 &= ~(0x1f << (i * 8)); 380 pmsync2 |= (level & 0x1f) << (i * 8); 381 } 382 writel(pmsync2, RCB_REG(PMSYNC_CONFIG2)); 383 } 384 385 static int bsp_init_before_ap_bringup(struct udevice *dev) 386 { 387 int ret; 388 389 initialize_vr_config(dev); 390 ret = calibrate_24mhz_bclk(); 391 if (ret) 392 return ret; 393 configure_pch_power_sharing(); 394 395 return 0; 396 } 397 398 int cpu_config_tdp_levels(void) 399 { 400 msr_t platform_info; 401 402 /* Bits 34:33 indicate how many levels supported */ 403 platform_info = msr_read(MSR_PLATFORM_INFO); 404 return (platform_info.hi >> 1) & 3; 405 } 406 407 static void set_max_ratio(void) 408 { 409 msr_t msr, perf_ctl; 410 411 perf_ctl.hi = 0; 412 413 /* Check for configurable TDP option */ 414 if (turbo_get_state() == TURBO_ENABLED) { 415 msr = msr_read(MSR_NHM_TURBO_RATIO_LIMIT); 416 perf_ctl.lo = (msr.lo & 0xff) << 8; 417 } else if (cpu_config_tdp_levels()) { 418 /* Set to nominal TDP ratio */ 419 msr = msr_read(MSR_CONFIG_TDP_NOMINAL); 420 perf_ctl.lo = (msr.lo & 0xff) << 8; 421 } else { 422 /* Platform Info bits 15:8 give max ratio */ 423 msr = msr_read(MSR_PLATFORM_INFO); 424 perf_ctl.lo = msr.lo & 0xff00; 425 } 426 msr_write(IA32_PERF_CTL, perf_ctl); 427 428 debug("cpu: frequency set to %d\n", 429 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK); 430 } 431 432 int broadwell_init(struct udevice *dev) 433 { 434 struct cpu_broadwell_priv *priv = dev_get_priv(dev); 435 int num_threads; 436 int num_cores; 437 msr_t msr; 438 int ret; 439 440 msr = msr_read(CORE_THREAD_COUNT_MSR); 441 num_threads = (msr.lo >> 0) & 0xffff; 442 num_cores = (msr.lo >> 16) & 0xffff; 443 debug("CPU has %u cores, %u threads enabled\n", num_cores, 444 num_threads); 445 446 priv->ht_disabled = num_threads == num_cores; 447 448 ret = bsp_init_before_ap_bringup(dev); 449 if (ret) 450 return ret; 451 452 set_max_ratio(); 453 454 return ret; 455 } 456 457 static void configure_mca(void) 458 { 459 msr_t msr; 460 const unsigned int mcg_cap_msr = 0x179; 461 int i; 462 int num_banks; 463 464 msr = msr_read(mcg_cap_msr); 465 num_banks = msr.lo & 0xff; 466 msr.lo = 0; 467 msr.hi = 0; 468 /* 469 * TODO(adurbin): This should only be done on a cold boot. Also, some 470 * of these banks are core vs package scope. For now every CPU clears 471 * every bank 472 */ 473 for (i = 0; i < num_banks; i++) 474 msr_write(MSR_IA32_MC0_STATUS + (i * 4), msr); 475 } 476 477 static void enable_lapic_tpr(void) 478 { 479 msr_t msr; 480 481 msr = msr_read(MSR_PIC_MSG_CONTROL); 482 msr.lo &= ~(1 << 10); /* Enable APIC TPR updates */ 483 msr_write(MSR_PIC_MSG_CONTROL, msr); 484 } 485 486 487 static void configure_c_states(void) 488 { 489 msr_t msr; 490 491 msr = msr_read(MSR_PMG_CST_CONFIG_CONTROL); 492 msr.lo |= (1 << 31); /* Timed MWAIT Enable */ 493 msr.lo |= (1 << 30); /* Package c-state Undemotion Enable */ 494 msr.lo |= (1 << 29); /* Package c-state Demotion Enable */ 495 msr.lo |= (1 << 28); /* C1 Auto Undemotion Enable */ 496 msr.lo |= (1 << 27); /* C3 Auto Undemotion Enable */ 497 msr.lo |= (1 << 26); /* C1 Auto Demotion Enable */ 498 msr.lo |= (1 << 25); /* C3 Auto Demotion Enable */ 499 msr.lo &= ~(1 << 10); /* Disable IO MWAIT redirection */ 500 /* The deepest package c-state defaults to factory-configured value */ 501 msr_write(MSR_PMG_CST_CONFIG_CONTROL, msr); 502 503 msr = msr_read(MSR_MISC_PWR_MGMT); 504 msr.lo &= ~(1 << 0); /* Enable P-state HW_ALL coordination */ 505 msr_write(MSR_MISC_PWR_MGMT, msr); 506 507 msr = msr_read(MSR_POWER_CTL); 508 msr.lo |= (1 << 18); /* Enable Energy Perf Bias MSR 0x1b0 */ 509 msr.lo |= (1 << 1); /* C1E Enable */ 510 msr.lo |= (1 << 0); /* Bi-directional PROCHOT# */ 511 msr_write(MSR_POWER_CTL, msr); 512 513 /* C-state Interrupt Response Latency Control 0 - package C3 latency */ 514 msr.hi = 0; 515 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT; 516 msr_write(MSR_C_STATE_LATENCY_CONTROL_0, msr); 517 518 /* C-state Interrupt Response Latency Control 1 */ 519 msr.hi = 0; 520 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT; 521 msr_write(MSR_C_STATE_LATENCY_CONTROL_1, msr); 522 523 /* C-state Interrupt Response Latency Control 2 - package C6/C7 short */ 524 msr.hi = 0; 525 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT; 526 msr_write(MSR_C_STATE_LATENCY_CONTROL_2, msr); 527 528 /* C-state Interrupt Response Latency Control 3 - package C8 */ 529 msr.hi = 0; 530 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_3_LIMIT; 531 msr_write(MSR_C_STATE_LATENCY_CONTROL_3, msr); 532 533 /* C-state Interrupt Response Latency Control 4 - package C9 */ 534 msr.hi = 0; 535 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_4_LIMIT; 536 msr_write(MSR_C_STATE_LATENCY_CONTROL_4, msr); 537 538 /* C-state Interrupt Response Latency Control 5 - package C10 */ 539 msr.hi = 0; 540 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_5_LIMIT; 541 msr_write(MSR_C_STATE_LATENCY_CONTROL_5, msr); 542 } 543 544 static void configure_misc(void) 545 { 546 msr_t msr; 547 548 msr = msr_read(MSR_IA32_MISC_ENABLE); 549 msr.lo |= (1 << 0); /* Fast String enable */ 550 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */ 551 msr.lo |= (1 << 16); /* Enhanced SpeedStep Enable */ 552 msr_write(MSR_IA32_MISC_ENABLE, msr); 553 554 /* Disable thermal interrupts */ 555 msr.lo = 0; 556 msr.hi = 0; 557 msr_write(MSR_IA32_THERM_INTERRUPT, msr); 558 559 /* Enable package critical interrupt only */ 560 msr.lo = 1 << 4; 561 msr.hi = 0; 562 msr_write(MSR_IA32_PACKAGE_THERM_INTERRUPT, msr); 563 } 564 565 static void configure_thermal_target(struct udevice *dev) 566 { 567 int tcc_offset; 568 msr_t msr; 569 570 tcc_offset = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 571 "intel,tcc-offset", 0); 572 573 /* Set TCC activaiton offset if supported */ 574 msr = msr_read(MSR_PLATFORM_INFO); 575 if ((msr.lo & (1 << 30)) && tcc_offset) { 576 msr = msr_read(MSR_TEMPERATURE_TARGET); 577 msr.lo &= ~(0xf << 24); /* Bits 27:24 */ 578 msr.lo |= (tcc_offset & 0xf) << 24; 579 msr_write(MSR_TEMPERATURE_TARGET, msr); 580 } 581 } 582 583 static void configure_dca_cap(void) 584 { 585 struct cpuid_result cpuid_regs; 586 msr_t msr; 587 588 /* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */ 589 cpuid_regs = cpuid(1); 590 if (cpuid_regs.ecx & (1 << 18)) { 591 msr = msr_read(MSR_IA32_PLATFORM_DCA_CAP); 592 msr.lo |= 1; 593 msr_write(MSR_IA32_PLATFORM_DCA_CAP, msr); 594 } 595 } 596 597 static void set_energy_perf_bias(u8 policy) 598 { 599 msr_t msr; 600 int ecx; 601 602 /* Determine if energy efficient policy is supported */ 603 ecx = cpuid_ecx(0x6); 604 if (!(ecx & (1 << 3))) 605 return; 606 607 /* Energy Policy is bits 3:0 */ 608 msr = msr_read(MSR_IA32_ENERGY_PERFORMANCE_BIAS); 609 msr.lo &= ~0xf; 610 msr.lo |= policy & 0xf; 611 msr_write(MSR_IA32_ENERGY_PERFORMANCE_BIAS, msr); 612 613 debug("cpu: energy policy set to %u\n", policy); 614 } 615 616 /* All CPUs including BSP will run the following function */ 617 static void cpu_core_init(struct udevice *dev) 618 { 619 /* Clear out pending MCEs */ 620 configure_mca(); 621 622 /* Enable the local cpu apics */ 623 enable_lapic_tpr(); 624 625 /* Configure C States */ 626 configure_c_states(); 627 628 /* Configure Enhanced SpeedStep and Thermal Sensors */ 629 configure_misc(); 630 631 /* Thermal throttle activation offset */ 632 configure_thermal_target(dev); 633 634 /* Enable Direct Cache Access */ 635 configure_dca_cap(); 636 637 /* Set energy policy */ 638 set_energy_perf_bias(ENERGY_POLICY_NORMAL); 639 640 /* Enable Turbo */ 641 turbo_enable(); 642 } 643 644 /* 645 * Configure processor power limits if possible 646 * This must be done AFTER set of BIOS_RESET_CPL 647 */ 648 void cpu_set_power_limits(int power_limit_1_time) 649 { 650 msr_t msr; 651 msr_t limit; 652 unsigned power_unit; 653 unsigned tdp, min_power, max_power, max_time; 654 u8 power_limit_1_val; 655 656 msr = msr_read(MSR_PLATFORM_INFO); 657 if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr)) 658 power_limit_1_time = 28; 659 660 if (!(msr.lo & PLATFORM_INFO_SET_TDP)) 661 return; 662 663 /* Get units */ 664 msr = msr_read(MSR_PKG_POWER_SKU_UNIT); 665 power_unit = 2 << ((msr.lo & 0xf) - 1); 666 667 /* Get power defaults for this SKU */ 668 msr = msr_read(MSR_PKG_POWER_SKU); 669 tdp = msr.lo & 0x7fff; 670 min_power = (msr.lo >> 16) & 0x7fff; 671 max_power = msr.hi & 0x7fff; 672 max_time = (msr.hi >> 16) & 0x7f; 673 674 debug("CPU TDP: %u Watts\n", tdp / power_unit); 675 676 if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time) 677 power_limit_1_time = power_limit_time_msr_to_sec[max_time]; 678 679 if (min_power > 0 && tdp < min_power) 680 tdp = min_power; 681 682 if (max_power > 0 && tdp > max_power) 683 tdp = max_power; 684 685 power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time]; 686 687 /* Set long term power limit to TDP */ 688 limit.lo = 0; 689 limit.lo |= tdp & PKG_POWER_LIMIT_MASK; 690 limit.lo |= PKG_POWER_LIMIT_EN; 691 limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) << 692 PKG_POWER_LIMIT_TIME_SHIFT; 693 694 /* Set short term power limit to 1.25 * TDP */ 695 limit.hi = 0; 696 limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK; 697 limit.hi |= PKG_POWER_LIMIT_EN; 698 /* Power limit 2 time is only programmable on server SKU */ 699 700 msr_write(MSR_PKG_POWER_LIMIT, limit); 701 702 /* Set power limit values in MCHBAR as well */ 703 writel(limit.lo, MCHBAR_REG(MCH_PKG_POWER_LIMIT_LO)); 704 writel(limit.hi, MCHBAR_REG(MCH_PKG_POWER_LIMIT_HI)); 705 706 /* Set DDR RAPL power limit by copying from MMIO to MSR */ 707 msr.lo = readl(MCHBAR_REG(MCH_DDR_POWER_LIMIT_LO)); 708 msr.hi = readl(MCHBAR_REG(MCH_DDR_POWER_LIMIT_HI)); 709 msr_write(MSR_DDR_RAPL_LIMIT, msr); 710 711 /* Use nominal TDP values for CPUs with configurable TDP */ 712 if (cpu_config_tdp_levels()) { 713 msr = msr_read(MSR_CONFIG_TDP_NOMINAL); 714 limit.hi = 0; 715 limit.lo = msr.lo & 0xff; 716 msr_write(MSR_TURBO_ACTIVATION_RATIO, limit); 717 } 718 } 719 720 static int broadwell_get_info(struct udevice *dev, struct cpu_info *info) 721 { 722 msr_t msr; 723 724 msr = msr_read(IA32_PERF_CTL); 725 info->cpu_freq = ((msr.lo >> 8) & 0xff) * BROADWELL_BCLK * 1000000; 726 info->features = 1 << CPU_FEAT_L1_CACHE | 1 << CPU_FEAT_MMU | 727 1 << CPU_FEAT_UCODE | 1 << CPU_FEAT_DEVICE_ID; 728 729 return 0; 730 } 731 732 static int broadwell_get_count(struct udevice *dev) 733 { 734 return 4; 735 } 736 737 static int cpu_x86_broadwell_probe(struct udevice *dev) 738 { 739 if (dev->seq == 0) { 740 cpu_core_init(dev); 741 return broadwell_init(dev); 742 } 743 744 return 0; 745 } 746 747 static const struct cpu_ops cpu_x86_broadwell_ops = { 748 .get_desc = cpu_x86_get_desc, 749 .get_info = broadwell_get_info, 750 .get_count = broadwell_get_count, 751 .get_vendor = cpu_x86_get_vendor, 752 }; 753 754 static const struct udevice_id cpu_x86_broadwell_ids[] = { 755 { .compatible = "intel,core-i3-gen5" }, 756 { } 757 }; 758 759 U_BOOT_DRIVER(cpu_x86_broadwell_drv) = { 760 .name = "cpu_x86_broadwell", 761 .id = UCLASS_CPU, 762 .of_match = cpu_x86_broadwell_ids, 763 .bind = cpu_x86_bind, 764 .probe = cpu_x86_broadwell_probe, 765 .ops = &cpu_x86_broadwell_ops, 766 .priv_auto_alloc_size = sizeof(struct cpu_broadwell_priv), 767 .flags = DM_FLAG_PRE_RELOC, 768 }; 769