1 /* 2 * (c) 2003-2012 Advanced Micro Devices, Inc. 3 * Your use of this code is subject to the terms and conditions of the 4 * GNU general public license version 2. See "COPYING" or 5 * http://www.gnu.org/licenses/gpl.html 6 * 7 * Maintainer: 8 * Andreas Herrmann <herrmann.der.user@googlemail.com> 9 * 10 * Based on the powernow-k7.c module written by Dave Jones. 11 * (C) 2003 Dave Jones on behalf of SuSE Labs 12 * (C) 2004 Dominik Brodowski <linux@brodo.de> 13 * (C) 2004 Pavel Machek <pavel@ucw.cz> 14 * Licensed under the terms of the GNU GPL License version 2. 15 * Based upon datasheets & sample CPUs kindly provided by AMD. 16 * 17 * Valuable input gratefully received from Dave Jones, Pavel Machek, 18 * Dominik Brodowski, Jacob Shin, and others. 19 * Originally developed by Paul Devriendt. 20 * 21 * Processor information obtained from Chapter 9 (Power and Thermal 22 * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for 23 * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x 24 * Power Management" in BKDGs for newer AMD CPU families. 25 * 26 * Tables for specific CPUs can be inferred from AMD's processor 27 * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf) 28 */ 29 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <linux/kernel.h> 33 #include <linux/smp.h> 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/cpufreq.h> 37 #include <linux/slab.h> 38 #include <linux/string.h> 39 #include <linux/cpumask.h> 40 #include <linux/io.h> 41 #include <linux/delay.h> 42 43 #include <asm/msr.h> 44 #include <asm/cpu_device_id.h> 45 46 #include <linux/acpi.h> 47 #include <linux/mutex.h> 48 #include <acpi/processor.h> 49 50 #define VERSION "version 2.20.00" 51 #include "powernow-k8.h" 52 53 /* serialize freq changes */ 54 static DEFINE_MUTEX(fidvid_mutex); 55 56 static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); 57 58 static struct cpufreq_driver cpufreq_amd64_driver; 59 60 #ifndef CONFIG_SMP 61 static inline const struct cpumask *cpu_core_mask(int cpu) 62 { 63 return cpumask_of(0); 64 } 65 #endif 66 67 /* Return a frequency in MHz, given an input fid */ 68 static u32 find_freq_from_fid(u32 fid) 69 { 70 return 800 + (fid * 100); 71 } 72 73 /* Return a frequency in KHz, given an input fid */ 74 static u32 find_khz_freq_from_fid(u32 fid) 75 { 76 return 1000 * find_freq_from_fid(fid); 77 } 78 79 /* Return the vco fid for an input fid 80 * 81 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 82 * only from corresponding high fids. This returns "high" fid corresponding to 83 * "low" one. 84 */ 85 static u32 convert_fid_to_vco_fid(u32 fid) 86 { 87 if (fid < HI_FID_TABLE_BOTTOM) 88 return 8 + (2 * fid); 89 else 90 return fid; 91 } 92 93 /* 94 * Return 1 if the pending bit is set. Unless we just instructed the processor 95 * to transition to a new state, seeing this bit set is really bad news. 96 */ 97 static int pending_bit_stuck(void) 98 { 99 u32 lo, hi; 100 101 rdmsr(MSR_FIDVID_STATUS, lo, hi); 102 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; 103 } 104 105 /* 106 * Update the global current fid / vid values from the status msr. 107 * Returns 1 on error. 108 */ 109 static int query_current_values_with_pending_wait(struct powernow_k8_data *data) 110 { 111 u32 lo, hi; 112 u32 i = 0; 113 114 do { 115 if (i++ > 10000) { 116 pr_debug("detected change pending stuck\n"); 117 return 1; 118 } 119 rdmsr(MSR_FIDVID_STATUS, lo, hi); 120 } while (lo & MSR_S_LO_CHANGE_PENDING); 121 122 data->currvid = hi & MSR_S_HI_CURRENT_VID; 123 data->currfid = lo & MSR_S_LO_CURRENT_FID; 124 125 return 0; 126 } 127 128 /* the isochronous relief time */ 129 static void count_off_irt(struct powernow_k8_data *data) 130 { 131 udelay((1 << data->irt) * 10); 132 return; 133 } 134 135 /* the voltage stabilization time */ 136 static void count_off_vst(struct powernow_k8_data *data) 137 { 138 udelay(data->vstable * VST_UNITS_20US); 139 return; 140 } 141 142 /* need to init the control msr to a safe value (for each cpu) */ 143 static void fidvid_msr_init(void) 144 { 145 u32 lo, hi; 146 u8 fid, vid; 147 148 rdmsr(MSR_FIDVID_STATUS, lo, hi); 149 vid = hi & MSR_S_HI_CURRENT_VID; 150 fid = lo & MSR_S_LO_CURRENT_FID; 151 lo = fid | (vid << MSR_C_LO_VID_SHIFT); 152 hi = MSR_C_HI_STP_GNT_BENIGN; 153 pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); 154 wrmsr(MSR_FIDVID_CTL, lo, hi); 155 } 156 157 /* write the new fid value along with the other control fields to the msr */ 158 static int write_new_fid(struct powernow_k8_data *data, u32 fid) 159 { 160 u32 lo; 161 u32 savevid = data->currvid; 162 u32 i = 0; 163 164 if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { 165 pr_err("internal error - overflow on fid write\n"); 166 return 1; 167 } 168 169 lo = fid; 170 lo |= (data->currvid << MSR_C_LO_VID_SHIFT); 171 lo |= MSR_C_LO_INIT_FID_VID; 172 173 pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", 174 fid, lo, data->plllock * PLL_LOCK_CONVERSION); 175 176 do { 177 wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); 178 if (i++ > 100) { 179 pr_err("Hardware error - pending bit very stuck - no further pstate changes possible\n"); 180 return 1; 181 } 182 } while (query_current_values_with_pending_wait(data)); 183 184 count_off_irt(data); 185 186 if (savevid != data->currvid) { 187 pr_err("vid change on fid trans, old 0x%x, new 0x%x\n", 188 savevid, data->currvid); 189 return 1; 190 } 191 192 if (fid != data->currfid) { 193 pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid, 194 data->currfid); 195 return 1; 196 } 197 198 return 0; 199 } 200 201 /* Write a new vid to the hardware */ 202 static int write_new_vid(struct powernow_k8_data *data, u32 vid) 203 { 204 u32 lo; 205 u32 savefid = data->currfid; 206 int i = 0; 207 208 if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { 209 pr_err("internal error - overflow on vid write\n"); 210 return 1; 211 } 212 213 lo = data->currfid; 214 lo |= (vid << MSR_C_LO_VID_SHIFT); 215 lo |= MSR_C_LO_INIT_FID_VID; 216 217 pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", 218 vid, lo, STOP_GRANT_5NS); 219 220 do { 221 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); 222 if (i++ > 100) { 223 pr_err("internal error - pending bit very stuck - no further pstate changes possible\n"); 224 return 1; 225 } 226 } while (query_current_values_with_pending_wait(data)); 227 228 if (savefid != data->currfid) { 229 pr_err("fid changed on vid trans, old 0x%x new 0x%x\n", 230 savefid, data->currfid); 231 return 1; 232 } 233 234 if (vid != data->currvid) { 235 pr_err("vid trans failed, vid 0x%x, curr 0x%x\n", 236 vid, data->currvid); 237 return 1; 238 } 239 240 return 0; 241 } 242 243 /* 244 * Reduce the vid by the max of step or reqvid. 245 * Decreasing vid codes represent increasing voltages: 246 * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off. 247 */ 248 static int decrease_vid_code_by_step(struct powernow_k8_data *data, 249 u32 reqvid, u32 step) 250 { 251 if ((data->currvid - reqvid) > step) 252 reqvid = data->currvid - step; 253 254 if (write_new_vid(data, reqvid)) 255 return 1; 256 257 count_off_vst(data); 258 259 return 0; 260 } 261 262 /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ 263 static int transition_fid_vid(struct powernow_k8_data *data, 264 u32 reqfid, u32 reqvid) 265 { 266 if (core_voltage_pre_transition(data, reqvid, reqfid)) 267 return 1; 268 269 if (core_frequency_transition(data, reqfid)) 270 return 1; 271 272 if (core_voltage_post_transition(data, reqvid)) 273 return 1; 274 275 if (query_current_values_with_pending_wait(data)) 276 return 1; 277 278 if ((reqfid != data->currfid) || (reqvid != data->currvid)) { 279 pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", 280 smp_processor_id(), 281 reqfid, reqvid, data->currfid, data->currvid); 282 return 1; 283 } 284 285 pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", 286 smp_processor_id(), data->currfid, data->currvid); 287 288 return 0; 289 } 290 291 /* Phase 1 - core voltage transition ... setup voltage */ 292 static int core_voltage_pre_transition(struct powernow_k8_data *data, 293 u32 reqvid, u32 reqfid) 294 { 295 u32 rvosteps = data->rvo; 296 u32 savefid = data->currfid; 297 u32 maxvid, lo, rvomult = 1; 298 299 pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n", 300 smp_processor_id(), 301 data->currfid, data->currvid, reqvid, data->rvo); 302 303 if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP)) 304 rvomult = 2; 305 rvosteps *= rvomult; 306 rdmsr(MSR_FIDVID_STATUS, lo, maxvid); 307 maxvid = 0x1f & (maxvid >> 16); 308 pr_debug("ph1 maxvid=0x%x\n", maxvid); 309 if (reqvid < maxvid) /* lower numbers are higher voltages */ 310 reqvid = maxvid; 311 312 while (data->currvid > reqvid) { 313 pr_debug("ph1: curr 0x%x, req vid 0x%x\n", 314 data->currvid, reqvid); 315 if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) 316 return 1; 317 } 318 319 while ((rvosteps > 0) && 320 ((rvomult * data->rvo + data->currvid) > reqvid)) { 321 if (data->currvid == maxvid) { 322 rvosteps = 0; 323 } else { 324 pr_debug("ph1: changing vid for rvo, req 0x%x\n", 325 data->currvid - 1); 326 if (decrease_vid_code_by_step(data, data->currvid-1, 1)) 327 return 1; 328 rvosteps--; 329 } 330 } 331 332 if (query_current_values_with_pending_wait(data)) 333 return 1; 334 335 if (savefid != data->currfid) { 336 pr_err("ph1 err, currfid changed 0x%x\n", data->currfid); 337 return 1; 338 } 339 340 pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", 341 data->currfid, data->currvid); 342 343 return 0; 344 } 345 346 /* Phase 2 - core frequency transition */ 347 static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) 348 { 349 u32 vcoreqfid, vcocurrfid, vcofiddiff; 350 u32 fid_interval, savevid = data->currvid; 351 352 if (data->currfid == reqfid) { 353 pr_err("ph2 null fid transition 0x%x\n", data->currfid); 354 return 0; 355 } 356 357 pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", 358 smp_processor_id(), 359 data->currfid, data->currvid, reqfid); 360 361 vcoreqfid = convert_fid_to_vco_fid(reqfid); 362 vcocurrfid = convert_fid_to_vco_fid(data->currfid); 363 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid 364 : vcoreqfid - vcocurrfid; 365 366 if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP)) 367 vcofiddiff = 0; 368 369 while (vcofiddiff > 2) { 370 (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); 371 372 if (reqfid > data->currfid) { 373 if (data->currfid > LO_FID_TABLE_TOP) { 374 if (write_new_fid(data, 375 data->currfid + fid_interval)) 376 return 1; 377 } else { 378 if (write_new_fid 379 (data, 380 2 + convert_fid_to_vco_fid(data->currfid))) 381 return 1; 382 } 383 } else { 384 if (write_new_fid(data, data->currfid - fid_interval)) 385 return 1; 386 } 387 388 vcocurrfid = convert_fid_to_vco_fid(data->currfid); 389 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid 390 : vcoreqfid - vcocurrfid; 391 } 392 393 if (write_new_fid(data, reqfid)) 394 return 1; 395 396 if (query_current_values_with_pending_wait(data)) 397 return 1; 398 399 if (data->currfid != reqfid) { 400 pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", 401 data->currfid, reqfid); 402 return 1; 403 } 404 405 if (savevid != data->currvid) { 406 pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n", 407 savevid, data->currvid); 408 return 1; 409 } 410 411 pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", 412 data->currfid, data->currvid); 413 414 return 0; 415 } 416 417 /* Phase 3 - core voltage transition flow ... jump to the final vid. */ 418 static int core_voltage_post_transition(struct powernow_k8_data *data, 419 u32 reqvid) 420 { 421 u32 savefid = data->currfid; 422 u32 savereqvid = reqvid; 423 424 pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", 425 smp_processor_id(), 426 data->currfid, data->currvid); 427 428 if (reqvid != data->currvid) { 429 if (write_new_vid(data, reqvid)) 430 return 1; 431 432 if (savefid != data->currfid) { 433 pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n", 434 savefid, data->currfid); 435 return 1; 436 } 437 438 if (data->currvid != reqvid) { 439 pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x", 440 reqvid, data->currvid); 441 return 1; 442 } 443 } 444 445 if (query_current_values_with_pending_wait(data)) 446 return 1; 447 448 if (savereqvid != data->currvid) { 449 pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); 450 return 1; 451 } 452 453 if (savefid != data->currfid) { 454 pr_debug("ph3 failed, currfid changed 0x%x\n", 455 data->currfid); 456 return 1; 457 } 458 459 pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", 460 data->currfid, data->currvid); 461 462 return 0; 463 } 464 465 static const struct x86_cpu_id powernow_k8_ids[] = { 466 /* IO based frequency switching */ 467 { X86_VENDOR_AMD, 0xf }, 468 {} 469 }; 470 MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids); 471 472 static void check_supported_cpu(void *_rc) 473 { 474 u32 eax, ebx, ecx, edx; 475 int *rc = _rc; 476 477 *rc = -ENODEV; 478 479 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 480 481 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { 482 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || 483 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { 484 pr_info("Processor cpuid %x not supported\n", eax); 485 return; 486 } 487 488 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); 489 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { 490 pr_info("No frequency change capabilities detected\n"); 491 return; 492 } 493 494 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); 495 if ((edx & P_STATE_TRANSITION_CAPABLE) 496 != P_STATE_TRANSITION_CAPABLE) { 497 pr_info("Power state transitions not supported\n"); 498 return; 499 } 500 *rc = 0; 501 } 502 } 503 504 static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, 505 u8 maxvid) 506 { 507 unsigned int j; 508 u8 lastfid = 0xff; 509 510 for (j = 0; j < data->numps; j++) { 511 if (pst[j].vid > LEAST_VID) { 512 pr_err(FW_BUG "vid %d invalid : 0x%x\n", j, 513 pst[j].vid); 514 return -EINVAL; 515 } 516 if (pst[j].vid < data->rvo) { 517 /* vid + rvo >= 0 */ 518 pr_err(FW_BUG "0 vid exceeded with pstate %d\n", j); 519 return -ENODEV; 520 } 521 if (pst[j].vid < maxvid + data->rvo) { 522 /* vid + rvo >= maxvid */ 523 pr_err(FW_BUG "maxvid exceeded with pstate %d\n", j); 524 return -ENODEV; 525 } 526 if (pst[j].fid > MAX_FID) { 527 pr_err(FW_BUG "maxfid exceeded with pstate %d\n", j); 528 return -ENODEV; 529 } 530 if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { 531 /* Only first fid is allowed to be in "low" range */ 532 pr_err(FW_BUG "two low fids - %d : 0x%x\n", j, 533 pst[j].fid); 534 return -EINVAL; 535 } 536 if (pst[j].fid < lastfid) 537 lastfid = pst[j].fid; 538 } 539 if (lastfid & 1) { 540 pr_err(FW_BUG "lastfid invalid\n"); 541 return -EINVAL; 542 } 543 if (lastfid > LO_FID_TABLE_TOP) 544 pr_info(FW_BUG "first fid not from lo freq table\n"); 545 546 return 0; 547 } 548 549 static void invalidate_entry(struct cpufreq_frequency_table *powernow_table, 550 unsigned int entry) 551 { 552 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; 553 } 554 555 static void print_basics(struct powernow_k8_data *data) 556 { 557 int j; 558 for (j = 0; j < data->numps; j++) { 559 if (data->powernow_table[j].frequency != 560 CPUFREQ_ENTRY_INVALID) { 561 pr_info("fid 0x%x (%d MHz), vid 0x%x\n", 562 data->powernow_table[j].driver_data & 0xff, 563 data->powernow_table[j].frequency/1000, 564 data->powernow_table[j].driver_data >> 8); 565 } 566 } 567 if (data->batps) 568 pr_info("Only %d pstates on battery\n", data->batps); 569 } 570 571 static int fill_powernow_table(struct powernow_k8_data *data, 572 struct pst_s *pst, u8 maxvid) 573 { 574 struct cpufreq_frequency_table *powernow_table; 575 unsigned int j; 576 577 if (data->batps) { 578 /* use ACPI support to get full speed on mains power */ 579 pr_warn("Only %d pstates usable (use ACPI driver for full range\n", 580 data->batps); 581 data->numps = data->batps; 582 } 583 584 for (j = 1; j < data->numps; j++) { 585 if (pst[j-1].fid >= pst[j].fid) { 586 pr_err("PST out of sequence\n"); 587 return -EINVAL; 588 } 589 } 590 591 if (data->numps < 2) { 592 pr_err("no p states to transition\n"); 593 return -ENODEV; 594 } 595 596 if (check_pst_table(data, pst, maxvid)) 597 return -EINVAL; 598 599 powernow_table = kzalloc((sizeof(*powernow_table) 600 * (data->numps + 1)), GFP_KERNEL); 601 if (!powernow_table) { 602 pr_err("powernow_table memory alloc failure\n"); 603 return -ENOMEM; 604 } 605 606 for (j = 0; j < data->numps; j++) { 607 int freq; 608 powernow_table[j].driver_data = pst[j].fid; /* lower 8 bits */ 609 powernow_table[j].driver_data |= (pst[j].vid << 8); /* upper 8 bits */ 610 freq = find_khz_freq_from_fid(pst[j].fid); 611 powernow_table[j].frequency = freq; 612 } 613 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; 614 powernow_table[data->numps].driver_data = 0; 615 616 if (query_current_values_with_pending_wait(data)) { 617 kfree(powernow_table); 618 return -EIO; 619 } 620 621 pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); 622 data->powernow_table = powernow_table; 623 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) 624 print_basics(data); 625 626 for (j = 0; j < data->numps; j++) 627 if ((pst[j].fid == data->currfid) && 628 (pst[j].vid == data->currvid)) 629 return 0; 630 631 pr_debug("currfid/vid do not match PST, ignoring\n"); 632 return 0; 633 } 634 635 /* Find and validate the PSB/PST table in BIOS. */ 636 static int find_psb_table(struct powernow_k8_data *data) 637 { 638 struct psb_s *psb; 639 unsigned int i; 640 u32 mvs; 641 u8 maxvid; 642 u32 cpst = 0; 643 u32 thiscpuid; 644 645 for (i = 0xc0000; i < 0xffff0; i += 0x10) { 646 /* Scan BIOS looking for the signature. */ 647 /* It can not be at ffff0 - it is too big. */ 648 649 psb = phys_to_virt(i); 650 if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) 651 continue; 652 653 pr_debug("found PSB header at 0x%p\n", psb); 654 655 pr_debug("table vers: 0x%x\n", psb->tableversion); 656 if (psb->tableversion != PSB_VERSION_1_4) { 657 pr_err(FW_BUG "PSB table is not v1.4\n"); 658 return -ENODEV; 659 } 660 661 pr_debug("flags: 0x%x\n", psb->flags1); 662 if (psb->flags1) { 663 pr_err(FW_BUG "unknown flags\n"); 664 return -ENODEV; 665 } 666 667 data->vstable = psb->vstable; 668 pr_debug("voltage stabilization time: %d(*20us)\n", 669 data->vstable); 670 671 pr_debug("flags2: 0x%x\n", psb->flags2); 672 data->rvo = psb->flags2 & 3; 673 data->irt = ((psb->flags2) >> 2) & 3; 674 mvs = ((psb->flags2) >> 4) & 3; 675 data->vidmvs = 1 << mvs; 676 data->batps = ((psb->flags2) >> 6) & 3; 677 678 pr_debug("ramp voltage offset: %d\n", data->rvo); 679 pr_debug("isochronous relief time: %d\n", data->irt); 680 pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); 681 682 pr_debug("numpst: 0x%x\n", psb->num_tables); 683 cpst = psb->num_tables; 684 if ((psb->cpuid == 0x00000fc0) || 685 (psb->cpuid == 0x00000fe0)) { 686 thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 687 if ((thiscpuid == 0x00000fc0) || 688 (thiscpuid == 0x00000fe0)) 689 cpst = 1; 690 } 691 if (cpst != 1) { 692 pr_err(FW_BUG "numpst must be 1\n"); 693 return -ENODEV; 694 } 695 696 data->plllock = psb->plllocktime; 697 pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); 698 pr_debug("maxfid: 0x%x\n", psb->maxfid); 699 pr_debug("maxvid: 0x%x\n", psb->maxvid); 700 maxvid = psb->maxvid; 701 702 data->numps = psb->numps; 703 pr_debug("numpstates: 0x%x\n", data->numps); 704 return fill_powernow_table(data, 705 (struct pst_s *)(psb+1), maxvid); 706 } 707 /* 708 * If you see this message, complain to BIOS manufacturer. If 709 * he tells you "we do not support Linux" or some similar 710 * nonsense, remember that Windows 2000 uses the same legacy 711 * mechanism that the old Linux PSB driver uses. Tell them it 712 * is broken with Windows 2000. 713 * 714 * The reference to the AMD documentation is chapter 9 in the 715 * BIOS and Kernel Developer's Guide, which is available on 716 * www.amd.com 717 */ 718 pr_err(FW_BUG "No PSB or ACPI _PSS objects\n"); 719 pr_err("Make sure that your BIOS is up to date and Cool'N'Quiet support is enabled in BIOS setup\n"); 720 return -ENODEV; 721 } 722 723 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, 724 unsigned int index) 725 { 726 u64 control; 727 728 if (!data->acpi_data.state_count) 729 return; 730 731 control = data->acpi_data.states[index].control; 732 data->irt = (control >> IRT_SHIFT) & IRT_MASK; 733 data->rvo = (control >> RVO_SHIFT) & RVO_MASK; 734 data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 735 data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; 736 data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK); 737 data->vstable = (control >> VST_SHIFT) & VST_MASK; 738 } 739 740 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 741 { 742 struct cpufreq_frequency_table *powernow_table; 743 int ret_val = -ENODEV; 744 u64 control, status; 745 746 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 747 pr_debug("register performance failed: bad ACPI data\n"); 748 return -EIO; 749 } 750 751 /* verify the data contained in the ACPI structures */ 752 if (data->acpi_data.state_count <= 1) { 753 pr_debug("No ACPI P-States\n"); 754 goto err_out; 755 } 756 757 control = data->acpi_data.control_register.space_id; 758 status = data->acpi_data.status_register.space_id; 759 760 if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || 761 (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 762 pr_debug("Invalid control/status registers (%llx - %llx)\n", 763 control, status); 764 goto err_out; 765 } 766 767 /* fill in data->powernow_table */ 768 powernow_table = kzalloc((sizeof(*powernow_table) 769 * (data->acpi_data.state_count + 1)), GFP_KERNEL); 770 if (!powernow_table) { 771 pr_debug("powernow_table memory alloc failure\n"); 772 goto err_out; 773 } 774 775 /* fill in data */ 776 data->numps = data->acpi_data.state_count; 777 powernow_k8_acpi_pst_values(data, 0); 778 779 ret_val = fill_powernow_table_fidvid(data, powernow_table); 780 if (ret_val) 781 goto err_out_mem; 782 783 powernow_table[data->acpi_data.state_count].frequency = 784 CPUFREQ_TABLE_END; 785 data->powernow_table = powernow_table; 786 787 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) 788 print_basics(data); 789 790 /* notify BIOS that we exist */ 791 acpi_processor_notify_smm(THIS_MODULE); 792 793 if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 794 pr_err("unable to alloc powernow_k8_data cpumask\n"); 795 ret_val = -ENOMEM; 796 goto err_out_mem; 797 } 798 799 return 0; 800 801 err_out_mem: 802 kfree(powernow_table); 803 804 err_out: 805 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 806 807 /* data->acpi_data.state_count informs us at ->exit() 808 * whether ACPI was used */ 809 data->acpi_data.state_count = 0; 810 811 return ret_val; 812 } 813 814 static int fill_powernow_table_fidvid(struct powernow_k8_data *data, 815 struct cpufreq_frequency_table *powernow_table) 816 { 817 int i; 818 819 for (i = 0; i < data->acpi_data.state_count; i++) { 820 u32 fid; 821 u32 vid; 822 u32 freq, index; 823 u64 status, control; 824 825 if (data->exttype) { 826 status = data->acpi_data.states[i].status; 827 fid = status & EXT_FID_MASK; 828 vid = (status >> VID_SHIFT) & EXT_VID_MASK; 829 } else { 830 control = data->acpi_data.states[i].control; 831 fid = control & FID_MASK; 832 vid = (control >> VID_SHIFT) & VID_MASK; 833 } 834 835 pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); 836 837 index = fid | (vid<<8); 838 powernow_table[i].driver_data = index; 839 840 freq = find_khz_freq_from_fid(fid); 841 powernow_table[i].frequency = freq; 842 843 /* verify frequency is OK */ 844 if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { 845 pr_debug("invalid freq %u kHz, ignoring\n", freq); 846 invalidate_entry(powernow_table, i); 847 continue; 848 } 849 850 /* verify voltage is OK - 851 * BIOSs are using "off" to indicate invalid */ 852 if (vid == VID_OFF) { 853 pr_debug("invalid vid %u, ignoring\n", vid); 854 invalidate_entry(powernow_table, i); 855 continue; 856 } 857 858 if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { 859 pr_info("invalid freq entries %u kHz vs. %u kHz\n", 860 freq, (unsigned int) 861 (data->acpi_data.states[i].core_frequency 862 * 1000)); 863 invalidate_entry(powernow_table, i); 864 continue; 865 } 866 } 867 return 0; 868 } 869 870 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 871 { 872 if (data->acpi_data.state_count) 873 acpi_processor_unregister_performance(&data->acpi_data, 874 data->cpu); 875 free_cpumask_var(data->acpi_data.shared_cpu_map); 876 } 877 878 static int get_transition_latency(struct powernow_k8_data *data) 879 { 880 int max_latency = 0; 881 int i; 882 for (i = 0; i < data->acpi_data.state_count; i++) { 883 int cur_latency = data->acpi_data.states[i].transition_latency 884 + data->acpi_data.states[i].bus_master_latency; 885 if (cur_latency > max_latency) 886 max_latency = cur_latency; 887 } 888 if (max_latency == 0) { 889 pr_err(FW_WARN "Invalid zero transition latency\n"); 890 max_latency = 1; 891 } 892 /* value in usecs, needs to be in nanoseconds */ 893 return 1000 * max_latency; 894 } 895 896 /* Take a frequency, and issue the fid/vid transition command */ 897 static int transition_frequency_fidvid(struct powernow_k8_data *data, 898 unsigned int index) 899 { 900 struct cpufreq_policy *policy; 901 u32 fid = 0; 902 u32 vid = 0; 903 int res; 904 struct cpufreq_freqs freqs; 905 906 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); 907 908 /* fid/vid correctness check for k8 */ 909 /* fid are the lower 8 bits of the index we stored into 910 * the cpufreq frequency table in find_psb_table, vid 911 * are the upper 8 bits. 912 */ 913 fid = data->powernow_table[index].driver_data & 0xFF; 914 vid = (data->powernow_table[index].driver_data & 0xFF00) >> 8; 915 916 pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); 917 918 if (query_current_values_with_pending_wait(data)) 919 return 1; 920 921 if ((data->currvid == vid) && (data->currfid == fid)) { 922 pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", 923 fid, vid); 924 return 0; 925 } 926 927 pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", 928 smp_processor_id(), fid, vid); 929 freqs.old = find_khz_freq_from_fid(data->currfid); 930 freqs.new = find_khz_freq_from_fid(fid); 931 932 policy = cpufreq_cpu_get(smp_processor_id()); 933 cpufreq_cpu_put(policy); 934 935 cpufreq_freq_transition_begin(policy, &freqs); 936 res = transition_fid_vid(data, fid, vid); 937 cpufreq_freq_transition_end(policy, &freqs, res); 938 939 return res; 940 } 941 942 struct powernowk8_target_arg { 943 struct cpufreq_policy *pol; 944 unsigned newstate; 945 }; 946 947 static long powernowk8_target_fn(void *arg) 948 { 949 struct powernowk8_target_arg *pta = arg; 950 struct cpufreq_policy *pol = pta->pol; 951 unsigned newstate = pta->newstate; 952 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 953 u32 checkfid; 954 u32 checkvid; 955 int ret; 956 957 if (!data) 958 return -EINVAL; 959 960 checkfid = data->currfid; 961 checkvid = data->currvid; 962 963 if (pending_bit_stuck()) { 964 pr_err("failing targ, change pending bit set\n"); 965 return -EIO; 966 } 967 968 pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n", 969 pol->cpu, data->powernow_table[newstate].frequency, pol->min, 970 pol->max); 971 972 if (query_current_values_with_pending_wait(data)) 973 return -EIO; 974 975 pr_debug("targ: curr fid 0x%x, vid 0x%x\n", 976 data->currfid, data->currvid); 977 978 if ((checkvid != data->currvid) || 979 (checkfid != data->currfid)) { 980 pr_info("error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", 981 checkfid, data->currfid, 982 checkvid, data->currvid); 983 } 984 985 mutex_lock(&fidvid_mutex); 986 987 powernow_k8_acpi_pst_values(data, newstate); 988 989 ret = transition_frequency_fidvid(data, newstate); 990 991 if (ret) { 992 pr_err("transition frequency failed\n"); 993 mutex_unlock(&fidvid_mutex); 994 return 1; 995 } 996 mutex_unlock(&fidvid_mutex); 997 998 pol->cur = find_khz_freq_from_fid(data->currfid); 999 1000 return 0; 1001 } 1002 1003 /* Driver entry point to switch to the target frequency */ 1004 static int powernowk8_target(struct cpufreq_policy *pol, unsigned index) 1005 { 1006 struct powernowk8_target_arg pta = { .pol = pol, .newstate = index }; 1007 1008 return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); 1009 } 1010 1011 struct init_on_cpu { 1012 struct powernow_k8_data *data; 1013 int rc; 1014 }; 1015 1016 static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu) 1017 { 1018 struct init_on_cpu *init_on_cpu = _init_on_cpu; 1019 1020 if (pending_bit_stuck()) { 1021 pr_err("failing init, change pending bit set\n"); 1022 init_on_cpu->rc = -ENODEV; 1023 return; 1024 } 1025 1026 if (query_current_values_with_pending_wait(init_on_cpu->data)) { 1027 init_on_cpu->rc = -ENODEV; 1028 return; 1029 } 1030 1031 fidvid_msr_init(); 1032 1033 init_on_cpu->rc = 0; 1034 } 1035 1036 #define MISSING_PSS_MSG \ 1037 FW_BUG "No compatible ACPI _PSS objects found.\n" \ 1038 FW_BUG "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" \ 1039 FW_BUG "If that doesn't help, try upgrading your BIOS.\n" 1040 1041 /* per CPU init entry point to the driver */ 1042 static int powernowk8_cpu_init(struct cpufreq_policy *pol) 1043 { 1044 struct powernow_k8_data *data; 1045 struct init_on_cpu init_on_cpu; 1046 int rc, cpu; 1047 1048 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1049 if (rc) 1050 return -ENODEV; 1051 1052 data = kzalloc(sizeof(*data), GFP_KERNEL); 1053 if (!data) { 1054 pr_err("unable to alloc powernow_k8_data"); 1055 return -ENOMEM; 1056 } 1057 1058 data->cpu = pol->cpu; 1059 1060 if (powernow_k8_cpu_init_acpi(data)) { 1061 /* 1062 * Use the PSB BIOS structure. This is only available on 1063 * an UP version, and is deprecated by AMD. 1064 */ 1065 if (num_online_cpus() != 1) { 1066 pr_err_once(MISSING_PSS_MSG); 1067 goto err_out; 1068 } 1069 if (pol->cpu != 0) { 1070 pr_err(FW_BUG "No ACPI _PSS objects for CPU other than CPU0. Complain to your BIOS vendor.\n"); 1071 goto err_out; 1072 } 1073 rc = find_psb_table(data); 1074 if (rc) 1075 goto err_out; 1076 1077 /* Take a crude guess here. 1078 * That guess was in microseconds, so multiply with 1000 */ 1079 pol->cpuinfo.transition_latency = ( 1080 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + 1081 ((1 << data->irt) * 30)) * 1000; 1082 } else /* ACPI _PSS objects available */ 1083 pol->cpuinfo.transition_latency = get_transition_latency(data); 1084 1085 /* only run on specific CPU from here on */ 1086 init_on_cpu.data = data; 1087 smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu, 1088 &init_on_cpu, 1); 1089 rc = init_on_cpu.rc; 1090 if (rc != 0) 1091 goto err_out_exit_acpi; 1092 1093 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); 1094 data->available_cores = pol->cpus; 1095 1096 /* min/max the cpu is capable of */ 1097 if (cpufreq_table_validate_and_show(pol, data->powernow_table)) { 1098 pr_err(FW_BUG "invalid powernow_table\n"); 1099 powernow_k8_cpu_exit_acpi(data); 1100 kfree(data->powernow_table); 1101 kfree(data); 1102 return -EINVAL; 1103 } 1104 1105 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", 1106 data->currfid, data->currvid); 1107 1108 /* Point all the CPUs in this policy to the same data */ 1109 for_each_cpu(cpu, pol->cpus) 1110 per_cpu(powernow_data, cpu) = data; 1111 1112 return 0; 1113 1114 err_out_exit_acpi: 1115 powernow_k8_cpu_exit_acpi(data); 1116 1117 err_out: 1118 kfree(data); 1119 return -ENODEV; 1120 } 1121 1122 static int powernowk8_cpu_exit(struct cpufreq_policy *pol) 1123 { 1124 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1125 int cpu; 1126 1127 if (!data) 1128 return -EINVAL; 1129 1130 powernow_k8_cpu_exit_acpi(data); 1131 1132 kfree(data->powernow_table); 1133 kfree(data); 1134 for_each_cpu(cpu, pol->cpus) 1135 per_cpu(powernow_data, cpu) = NULL; 1136 1137 return 0; 1138 } 1139 1140 static void query_values_on_cpu(void *_err) 1141 { 1142 int *err = _err; 1143 struct powernow_k8_data *data = __this_cpu_read(powernow_data); 1144 1145 *err = query_current_values_with_pending_wait(data); 1146 } 1147 1148 static unsigned int powernowk8_get(unsigned int cpu) 1149 { 1150 struct powernow_k8_data *data = per_cpu(powernow_data, cpu); 1151 unsigned int khz = 0; 1152 int err; 1153 1154 if (!data) 1155 return 0; 1156 1157 smp_call_function_single(cpu, query_values_on_cpu, &err, true); 1158 if (err) 1159 goto out; 1160 1161 khz = find_khz_freq_from_fid(data->currfid); 1162 1163 1164 out: 1165 return khz; 1166 } 1167 1168 static struct cpufreq_driver cpufreq_amd64_driver = { 1169 .flags = CPUFREQ_ASYNC_NOTIFICATION, 1170 .verify = cpufreq_generic_frequency_table_verify, 1171 .target_index = powernowk8_target, 1172 .bios_limit = acpi_processor_get_bios_limit, 1173 .init = powernowk8_cpu_init, 1174 .exit = powernowk8_cpu_exit, 1175 .get = powernowk8_get, 1176 .name = "powernow-k8", 1177 .attr = cpufreq_generic_attr, 1178 }; 1179 1180 static void __request_acpi_cpufreq(void) 1181 { 1182 const char *cur_drv, *drv = "acpi-cpufreq"; 1183 1184 cur_drv = cpufreq_get_current_driver(); 1185 if (!cur_drv) 1186 goto request; 1187 1188 if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv)))) 1189 pr_warn("WTF driver: %s\n", cur_drv); 1190 1191 return; 1192 1193 request: 1194 pr_warn("This CPU is not supported anymore, using acpi-cpufreq instead.\n"); 1195 request_module(drv); 1196 } 1197 1198 /* driver entry point for init */ 1199 static int powernowk8_init(void) 1200 { 1201 unsigned int i, supported_cpus = 0; 1202 int ret; 1203 1204 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { 1205 __request_acpi_cpufreq(); 1206 return -ENODEV; 1207 } 1208 1209 if (!x86_match_cpu(powernow_k8_ids)) 1210 return -ENODEV; 1211 1212 get_online_cpus(); 1213 for_each_online_cpu(i) { 1214 smp_call_function_single(i, check_supported_cpu, &ret, 1); 1215 if (!ret) 1216 supported_cpus++; 1217 } 1218 1219 if (supported_cpus != num_online_cpus()) { 1220 put_online_cpus(); 1221 return -ENODEV; 1222 } 1223 put_online_cpus(); 1224 1225 ret = cpufreq_register_driver(&cpufreq_amd64_driver); 1226 if (ret) 1227 return ret; 1228 1229 pr_info("Found %d %s (%d cpu cores) (" VERSION ")\n", 1230 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); 1231 1232 return ret; 1233 } 1234 1235 /* driver entry point for term */ 1236 static void __exit powernowk8_exit(void) 1237 { 1238 pr_debug("exit\n"); 1239 1240 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1241 } 1242 1243 MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>"); 1244 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>"); 1245 MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); 1246 MODULE_LICENSE("GPL"); 1247 1248 late_initcall(powernowk8_init); 1249 module_exit(powernowk8_exit); 1250