1 /* 2 * (c) 2003-2012 Advanced Micro Devices, Inc. 3 * Your use of this code is subject to the terms and conditions of the 4 * GNU general public license version 2. See "COPYING" or 5 * http://www.gnu.org/licenses/gpl.html 6 * 7 * Maintainer: 8 * Andreas Herrmann <herrmann.der.user@googlemail.com> 9 * 10 * Based on the powernow-k7.c module written by Dave Jones. 11 * (C) 2003 Dave Jones on behalf of SuSE Labs 12 * (C) 2004 Dominik Brodowski <linux@brodo.de> 13 * (C) 2004 Pavel Machek <pavel@ucw.cz> 14 * Licensed under the terms of the GNU GPL License version 2. 15 * Based upon datasheets & sample CPUs kindly provided by AMD. 16 * 17 * Valuable input gratefully received from Dave Jones, Pavel Machek, 18 * Dominik Brodowski, Jacob Shin, and others. 19 * Originally developed by Paul Devriendt. 20 * 21 * Processor information obtained from Chapter 9 (Power and Thermal 22 * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for 23 * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x 24 * Power Management" in BKDGs for newer AMD CPU families. 25 * 26 * Tables for specific CPUs can be inferred from AMD's processor 27 * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf) 28 */ 29 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <linux/kernel.h> 33 #include <linux/smp.h> 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/cpufreq.h> 37 #include <linux/slab.h> 38 #include <linux/string.h> 39 #include <linux/cpumask.h> 40 #include <linux/io.h> 41 #include <linux/delay.h> 42 43 #include <asm/msr.h> 44 #include <asm/cpu_device_id.h> 45 46 #include <linux/acpi.h> 47 #include <linux/mutex.h> 48 #include <acpi/processor.h> 49 50 #define VERSION "version 2.20.00" 51 #include "powernow-k8.h" 52 53 /* serialize freq changes */ 54 static DEFINE_MUTEX(fidvid_mutex); 55 56 static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); 57 58 static struct cpufreq_driver cpufreq_amd64_driver; 59 60 /* Return a frequency in MHz, given an input fid */ 61 static u32 find_freq_from_fid(u32 fid) 62 { 63 return 800 + (fid * 100); 64 } 65 66 /* Return a frequency in KHz, given an input fid */ 67 static u32 find_khz_freq_from_fid(u32 fid) 68 { 69 return 1000 * find_freq_from_fid(fid); 70 } 71 72 /* Return the vco fid for an input fid 73 * 74 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 75 * only from corresponding high fids. This returns "high" fid corresponding to 76 * "low" one. 77 */ 78 static u32 convert_fid_to_vco_fid(u32 fid) 79 { 80 if (fid < HI_FID_TABLE_BOTTOM) 81 return 8 + (2 * fid); 82 else 83 return fid; 84 } 85 86 /* 87 * Return 1 if the pending bit is set. Unless we just instructed the processor 88 * to transition to a new state, seeing this bit set is really bad news. 89 */ 90 static int pending_bit_stuck(void) 91 { 92 u32 lo, hi; 93 94 rdmsr(MSR_FIDVID_STATUS, lo, hi); 95 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; 96 } 97 98 /* 99 * Update the global current fid / vid values from the status msr. 100 * Returns 1 on error. 101 */ 102 static int query_current_values_with_pending_wait(struct powernow_k8_data *data) 103 { 104 u32 lo, hi; 105 u32 i = 0; 106 107 do { 108 if (i++ > 10000) { 109 pr_debug("detected change pending stuck\n"); 110 return 1; 111 } 112 rdmsr(MSR_FIDVID_STATUS, lo, hi); 113 } while (lo & MSR_S_LO_CHANGE_PENDING); 114 115 data->currvid = hi & MSR_S_HI_CURRENT_VID; 116 data->currfid = lo & MSR_S_LO_CURRENT_FID; 117 118 return 0; 119 } 120 121 /* the isochronous relief time */ 122 static void count_off_irt(struct powernow_k8_data *data) 123 { 124 udelay((1 << data->irt) * 10); 125 return; 126 } 127 128 /* the voltage stabilization time */ 129 static void count_off_vst(struct powernow_k8_data *data) 130 { 131 udelay(data->vstable * VST_UNITS_20US); 132 return; 133 } 134 135 /* need to init the control msr to a safe value (for each cpu) */ 136 static void fidvid_msr_init(void) 137 { 138 u32 lo, hi; 139 u8 fid, vid; 140 141 rdmsr(MSR_FIDVID_STATUS, lo, hi); 142 vid = hi & MSR_S_HI_CURRENT_VID; 143 fid = lo & MSR_S_LO_CURRENT_FID; 144 lo = fid | (vid << MSR_C_LO_VID_SHIFT); 145 hi = MSR_C_HI_STP_GNT_BENIGN; 146 pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); 147 wrmsr(MSR_FIDVID_CTL, lo, hi); 148 } 149 150 /* write the new fid value along with the other control fields to the msr */ 151 static int write_new_fid(struct powernow_k8_data *data, u32 fid) 152 { 153 u32 lo; 154 u32 savevid = data->currvid; 155 u32 i = 0; 156 157 if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { 158 pr_err("internal error - overflow on fid write\n"); 159 return 1; 160 } 161 162 lo = fid; 163 lo |= (data->currvid << MSR_C_LO_VID_SHIFT); 164 lo |= MSR_C_LO_INIT_FID_VID; 165 166 pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", 167 fid, lo, data->plllock * PLL_LOCK_CONVERSION); 168 169 do { 170 wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); 171 if (i++ > 100) { 172 pr_err("Hardware error - pending bit very stuck - no further pstate changes possible\n"); 173 return 1; 174 } 175 } while (query_current_values_with_pending_wait(data)); 176 177 count_off_irt(data); 178 179 if (savevid != data->currvid) { 180 pr_err("vid change on fid trans, old 0x%x, new 0x%x\n", 181 savevid, data->currvid); 182 return 1; 183 } 184 185 if (fid != data->currfid) { 186 pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid, 187 data->currfid); 188 return 1; 189 } 190 191 return 0; 192 } 193 194 /* Write a new vid to the hardware */ 195 static int write_new_vid(struct powernow_k8_data *data, u32 vid) 196 { 197 u32 lo; 198 u32 savefid = data->currfid; 199 int i = 0; 200 201 if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { 202 pr_err("internal error - overflow on vid write\n"); 203 return 1; 204 } 205 206 lo = data->currfid; 207 lo |= (vid << MSR_C_LO_VID_SHIFT); 208 lo |= MSR_C_LO_INIT_FID_VID; 209 210 pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", 211 vid, lo, STOP_GRANT_5NS); 212 213 do { 214 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); 215 if (i++ > 100) { 216 pr_err("internal error - pending bit very stuck - no further pstate changes possible\n"); 217 return 1; 218 } 219 } while (query_current_values_with_pending_wait(data)); 220 221 if (savefid != data->currfid) { 222 pr_err("fid changed on vid trans, old 0x%x new 0x%x\n", 223 savefid, data->currfid); 224 return 1; 225 } 226 227 if (vid != data->currvid) { 228 pr_err("vid trans failed, vid 0x%x, curr 0x%x\n", 229 vid, data->currvid); 230 return 1; 231 } 232 233 return 0; 234 } 235 236 /* 237 * Reduce the vid by the max of step or reqvid. 238 * Decreasing vid codes represent increasing voltages: 239 * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off. 240 */ 241 static int decrease_vid_code_by_step(struct powernow_k8_data *data, 242 u32 reqvid, u32 step) 243 { 244 if ((data->currvid - reqvid) > step) 245 reqvid = data->currvid - step; 246 247 if (write_new_vid(data, reqvid)) 248 return 1; 249 250 count_off_vst(data); 251 252 return 0; 253 } 254 255 /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ 256 static int transition_fid_vid(struct powernow_k8_data *data, 257 u32 reqfid, u32 reqvid) 258 { 259 if (core_voltage_pre_transition(data, reqvid, reqfid)) 260 return 1; 261 262 if (core_frequency_transition(data, reqfid)) 263 return 1; 264 265 if (core_voltage_post_transition(data, reqvid)) 266 return 1; 267 268 if (query_current_values_with_pending_wait(data)) 269 return 1; 270 271 if ((reqfid != data->currfid) || (reqvid != data->currvid)) { 272 pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", 273 smp_processor_id(), 274 reqfid, reqvid, data->currfid, data->currvid); 275 return 1; 276 } 277 278 pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", 279 smp_processor_id(), data->currfid, data->currvid); 280 281 return 0; 282 } 283 284 /* Phase 1 - core voltage transition ... setup voltage */ 285 static int core_voltage_pre_transition(struct powernow_k8_data *data, 286 u32 reqvid, u32 reqfid) 287 { 288 u32 rvosteps = data->rvo; 289 u32 savefid = data->currfid; 290 u32 maxvid, lo, rvomult = 1; 291 292 pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n", 293 smp_processor_id(), 294 data->currfid, data->currvid, reqvid, data->rvo); 295 296 if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP)) 297 rvomult = 2; 298 rvosteps *= rvomult; 299 rdmsr(MSR_FIDVID_STATUS, lo, maxvid); 300 maxvid = 0x1f & (maxvid >> 16); 301 pr_debug("ph1 maxvid=0x%x\n", maxvid); 302 if (reqvid < maxvid) /* lower numbers are higher voltages */ 303 reqvid = maxvid; 304 305 while (data->currvid > reqvid) { 306 pr_debug("ph1: curr 0x%x, req vid 0x%x\n", 307 data->currvid, reqvid); 308 if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) 309 return 1; 310 } 311 312 while ((rvosteps > 0) && 313 ((rvomult * data->rvo + data->currvid) > reqvid)) { 314 if (data->currvid == maxvid) { 315 rvosteps = 0; 316 } else { 317 pr_debug("ph1: changing vid for rvo, req 0x%x\n", 318 data->currvid - 1); 319 if (decrease_vid_code_by_step(data, data->currvid-1, 1)) 320 return 1; 321 rvosteps--; 322 } 323 } 324 325 if (query_current_values_with_pending_wait(data)) 326 return 1; 327 328 if (savefid != data->currfid) { 329 pr_err("ph1 err, currfid changed 0x%x\n", data->currfid); 330 return 1; 331 } 332 333 pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", 334 data->currfid, data->currvid); 335 336 return 0; 337 } 338 339 /* Phase 2 - core frequency transition */ 340 static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) 341 { 342 u32 vcoreqfid, vcocurrfid, vcofiddiff; 343 u32 fid_interval, savevid = data->currvid; 344 345 if (data->currfid == reqfid) { 346 pr_err("ph2 null fid transition 0x%x\n", data->currfid); 347 return 0; 348 } 349 350 pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", 351 smp_processor_id(), 352 data->currfid, data->currvid, reqfid); 353 354 vcoreqfid = convert_fid_to_vco_fid(reqfid); 355 vcocurrfid = convert_fid_to_vco_fid(data->currfid); 356 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid 357 : vcoreqfid - vcocurrfid; 358 359 if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP)) 360 vcofiddiff = 0; 361 362 while (vcofiddiff > 2) { 363 (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); 364 365 if (reqfid > data->currfid) { 366 if (data->currfid > LO_FID_TABLE_TOP) { 367 if (write_new_fid(data, 368 data->currfid + fid_interval)) 369 return 1; 370 } else { 371 if (write_new_fid 372 (data, 373 2 + convert_fid_to_vco_fid(data->currfid))) 374 return 1; 375 } 376 } else { 377 if (write_new_fid(data, data->currfid - fid_interval)) 378 return 1; 379 } 380 381 vcocurrfid = convert_fid_to_vco_fid(data->currfid); 382 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid 383 : vcoreqfid - vcocurrfid; 384 } 385 386 if (write_new_fid(data, reqfid)) 387 return 1; 388 389 if (query_current_values_with_pending_wait(data)) 390 return 1; 391 392 if (data->currfid != reqfid) { 393 pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", 394 data->currfid, reqfid); 395 return 1; 396 } 397 398 if (savevid != data->currvid) { 399 pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n", 400 savevid, data->currvid); 401 return 1; 402 } 403 404 pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", 405 data->currfid, data->currvid); 406 407 return 0; 408 } 409 410 /* Phase 3 - core voltage transition flow ... jump to the final vid. */ 411 static int core_voltage_post_transition(struct powernow_k8_data *data, 412 u32 reqvid) 413 { 414 u32 savefid = data->currfid; 415 u32 savereqvid = reqvid; 416 417 pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", 418 smp_processor_id(), 419 data->currfid, data->currvid); 420 421 if (reqvid != data->currvid) { 422 if (write_new_vid(data, reqvid)) 423 return 1; 424 425 if (savefid != data->currfid) { 426 pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n", 427 savefid, data->currfid); 428 return 1; 429 } 430 431 if (data->currvid != reqvid) { 432 pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x", 433 reqvid, data->currvid); 434 return 1; 435 } 436 } 437 438 if (query_current_values_with_pending_wait(data)) 439 return 1; 440 441 if (savereqvid != data->currvid) { 442 pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); 443 return 1; 444 } 445 446 if (savefid != data->currfid) { 447 pr_debug("ph3 failed, currfid changed 0x%x\n", 448 data->currfid); 449 return 1; 450 } 451 452 pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", 453 data->currfid, data->currvid); 454 455 return 0; 456 } 457 458 static const struct x86_cpu_id powernow_k8_ids[] = { 459 /* IO based frequency switching */ 460 { X86_VENDOR_AMD, 0xf }, 461 {} 462 }; 463 MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids); 464 465 static void check_supported_cpu(void *_rc) 466 { 467 u32 eax, ebx, ecx, edx; 468 int *rc = _rc; 469 470 *rc = -ENODEV; 471 472 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 473 474 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { 475 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || 476 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { 477 pr_info("Processor cpuid %x not supported\n", eax); 478 return; 479 } 480 481 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); 482 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { 483 pr_info("No frequency change capabilities detected\n"); 484 return; 485 } 486 487 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); 488 if ((edx & P_STATE_TRANSITION_CAPABLE) 489 != P_STATE_TRANSITION_CAPABLE) { 490 pr_info("Power state transitions not supported\n"); 491 return; 492 } 493 *rc = 0; 494 } 495 } 496 497 static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, 498 u8 maxvid) 499 { 500 unsigned int j; 501 u8 lastfid = 0xff; 502 503 for (j = 0; j < data->numps; j++) { 504 if (pst[j].vid > LEAST_VID) { 505 pr_err(FW_BUG "vid %d invalid : 0x%x\n", j, 506 pst[j].vid); 507 return -EINVAL; 508 } 509 if (pst[j].vid < data->rvo) { 510 /* vid + rvo >= 0 */ 511 pr_err(FW_BUG "0 vid exceeded with pstate %d\n", j); 512 return -ENODEV; 513 } 514 if (pst[j].vid < maxvid + data->rvo) { 515 /* vid + rvo >= maxvid */ 516 pr_err(FW_BUG "maxvid exceeded with pstate %d\n", j); 517 return -ENODEV; 518 } 519 if (pst[j].fid > MAX_FID) { 520 pr_err(FW_BUG "maxfid exceeded with pstate %d\n", j); 521 return -ENODEV; 522 } 523 if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { 524 /* Only first fid is allowed to be in "low" range */ 525 pr_err(FW_BUG "two low fids - %d : 0x%x\n", j, 526 pst[j].fid); 527 return -EINVAL; 528 } 529 if (pst[j].fid < lastfid) 530 lastfid = pst[j].fid; 531 } 532 if (lastfid & 1) { 533 pr_err(FW_BUG "lastfid invalid\n"); 534 return -EINVAL; 535 } 536 if (lastfid > LO_FID_TABLE_TOP) 537 pr_info(FW_BUG "first fid not from lo freq table\n"); 538 539 return 0; 540 } 541 542 static void invalidate_entry(struct cpufreq_frequency_table *powernow_table, 543 unsigned int entry) 544 { 545 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; 546 } 547 548 static void print_basics(struct powernow_k8_data *data) 549 { 550 int j; 551 for (j = 0; j < data->numps; j++) { 552 if (data->powernow_table[j].frequency != 553 CPUFREQ_ENTRY_INVALID) { 554 pr_info("fid 0x%x (%d MHz), vid 0x%x\n", 555 data->powernow_table[j].driver_data & 0xff, 556 data->powernow_table[j].frequency/1000, 557 data->powernow_table[j].driver_data >> 8); 558 } 559 } 560 if (data->batps) 561 pr_info("Only %d pstates on battery\n", data->batps); 562 } 563 564 static int fill_powernow_table(struct powernow_k8_data *data, 565 struct pst_s *pst, u8 maxvid) 566 { 567 struct cpufreq_frequency_table *powernow_table; 568 unsigned int j; 569 570 if (data->batps) { 571 /* use ACPI support to get full speed on mains power */ 572 pr_warn("Only %d pstates usable (use ACPI driver for full range\n", 573 data->batps); 574 data->numps = data->batps; 575 } 576 577 for (j = 1; j < data->numps; j++) { 578 if (pst[j-1].fid >= pst[j].fid) { 579 pr_err("PST out of sequence\n"); 580 return -EINVAL; 581 } 582 } 583 584 if (data->numps < 2) { 585 pr_err("no p states to transition\n"); 586 return -ENODEV; 587 } 588 589 if (check_pst_table(data, pst, maxvid)) 590 return -EINVAL; 591 592 powernow_table = kzalloc((sizeof(*powernow_table) 593 * (data->numps + 1)), GFP_KERNEL); 594 if (!powernow_table) 595 return -ENOMEM; 596 597 for (j = 0; j < data->numps; j++) { 598 int freq; 599 powernow_table[j].driver_data = pst[j].fid; /* lower 8 bits */ 600 powernow_table[j].driver_data |= (pst[j].vid << 8); /* upper 8 bits */ 601 freq = find_khz_freq_from_fid(pst[j].fid); 602 powernow_table[j].frequency = freq; 603 } 604 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; 605 powernow_table[data->numps].driver_data = 0; 606 607 if (query_current_values_with_pending_wait(data)) { 608 kfree(powernow_table); 609 return -EIO; 610 } 611 612 pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); 613 data->powernow_table = powernow_table; 614 if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu) 615 print_basics(data); 616 617 for (j = 0; j < data->numps; j++) 618 if ((pst[j].fid == data->currfid) && 619 (pst[j].vid == data->currvid)) 620 return 0; 621 622 pr_debug("currfid/vid do not match PST, ignoring\n"); 623 return 0; 624 } 625 626 /* Find and validate the PSB/PST table in BIOS. */ 627 static int find_psb_table(struct powernow_k8_data *data) 628 { 629 struct psb_s *psb; 630 unsigned int i; 631 u32 mvs; 632 u8 maxvid; 633 u32 cpst = 0; 634 u32 thiscpuid; 635 636 for (i = 0xc0000; i < 0xffff0; i += 0x10) { 637 /* Scan BIOS looking for the signature. */ 638 /* It can not be at ffff0 - it is too big. */ 639 640 psb = phys_to_virt(i); 641 if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) 642 continue; 643 644 pr_debug("found PSB header at 0x%p\n", psb); 645 646 pr_debug("table vers: 0x%x\n", psb->tableversion); 647 if (psb->tableversion != PSB_VERSION_1_4) { 648 pr_err(FW_BUG "PSB table is not v1.4\n"); 649 return -ENODEV; 650 } 651 652 pr_debug("flags: 0x%x\n", psb->flags1); 653 if (psb->flags1) { 654 pr_err(FW_BUG "unknown flags\n"); 655 return -ENODEV; 656 } 657 658 data->vstable = psb->vstable; 659 pr_debug("voltage stabilization time: %d(*20us)\n", 660 data->vstable); 661 662 pr_debug("flags2: 0x%x\n", psb->flags2); 663 data->rvo = psb->flags2 & 3; 664 data->irt = ((psb->flags2) >> 2) & 3; 665 mvs = ((psb->flags2) >> 4) & 3; 666 data->vidmvs = 1 << mvs; 667 data->batps = ((psb->flags2) >> 6) & 3; 668 669 pr_debug("ramp voltage offset: %d\n", data->rvo); 670 pr_debug("isochronous relief time: %d\n", data->irt); 671 pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); 672 673 pr_debug("numpst: 0x%x\n", psb->num_tables); 674 cpst = psb->num_tables; 675 if ((psb->cpuid == 0x00000fc0) || 676 (psb->cpuid == 0x00000fe0)) { 677 thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 678 if ((thiscpuid == 0x00000fc0) || 679 (thiscpuid == 0x00000fe0)) 680 cpst = 1; 681 } 682 if (cpst != 1) { 683 pr_err(FW_BUG "numpst must be 1\n"); 684 return -ENODEV; 685 } 686 687 data->plllock = psb->plllocktime; 688 pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); 689 pr_debug("maxfid: 0x%x\n", psb->maxfid); 690 pr_debug("maxvid: 0x%x\n", psb->maxvid); 691 maxvid = psb->maxvid; 692 693 data->numps = psb->numps; 694 pr_debug("numpstates: 0x%x\n", data->numps); 695 return fill_powernow_table(data, 696 (struct pst_s *)(psb+1), maxvid); 697 } 698 /* 699 * If you see this message, complain to BIOS manufacturer. If 700 * he tells you "we do not support Linux" or some similar 701 * nonsense, remember that Windows 2000 uses the same legacy 702 * mechanism that the old Linux PSB driver uses. Tell them it 703 * is broken with Windows 2000. 704 * 705 * The reference to the AMD documentation is chapter 9 in the 706 * BIOS and Kernel Developer's Guide, which is available on 707 * www.amd.com 708 */ 709 pr_err(FW_BUG "No PSB or ACPI _PSS objects\n"); 710 pr_err("Make sure that your BIOS is up to date and Cool'N'Quiet support is enabled in BIOS setup\n"); 711 return -ENODEV; 712 } 713 714 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, 715 unsigned int index) 716 { 717 u64 control; 718 719 if (!data->acpi_data.state_count) 720 return; 721 722 control = data->acpi_data.states[index].control; 723 data->irt = (control >> IRT_SHIFT) & IRT_MASK; 724 data->rvo = (control >> RVO_SHIFT) & RVO_MASK; 725 data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 726 data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; 727 data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK); 728 data->vstable = (control >> VST_SHIFT) & VST_MASK; 729 } 730 731 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 732 { 733 struct cpufreq_frequency_table *powernow_table; 734 int ret_val = -ENODEV; 735 u64 control, status; 736 737 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 738 pr_debug("register performance failed: bad ACPI data\n"); 739 return -EIO; 740 } 741 742 /* verify the data contained in the ACPI structures */ 743 if (data->acpi_data.state_count <= 1) { 744 pr_debug("No ACPI P-States\n"); 745 goto err_out; 746 } 747 748 control = data->acpi_data.control_register.space_id; 749 status = data->acpi_data.status_register.space_id; 750 751 if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || 752 (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 753 pr_debug("Invalid control/status registers (%llx - %llx)\n", 754 control, status); 755 goto err_out; 756 } 757 758 /* fill in data->powernow_table */ 759 powernow_table = kzalloc((sizeof(*powernow_table) 760 * (data->acpi_data.state_count + 1)), GFP_KERNEL); 761 if (!powernow_table) 762 goto err_out; 763 764 /* fill in data */ 765 data->numps = data->acpi_data.state_count; 766 powernow_k8_acpi_pst_values(data, 0); 767 768 ret_val = fill_powernow_table_fidvid(data, powernow_table); 769 if (ret_val) 770 goto err_out_mem; 771 772 powernow_table[data->acpi_data.state_count].frequency = 773 CPUFREQ_TABLE_END; 774 data->powernow_table = powernow_table; 775 776 if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu) 777 print_basics(data); 778 779 /* notify BIOS that we exist */ 780 acpi_processor_notify_smm(THIS_MODULE); 781 782 if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 783 pr_err("unable to alloc powernow_k8_data cpumask\n"); 784 ret_val = -ENOMEM; 785 goto err_out_mem; 786 } 787 788 return 0; 789 790 err_out_mem: 791 kfree(powernow_table); 792 793 err_out: 794 acpi_processor_unregister_performance(data->cpu); 795 796 /* data->acpi_data.state_count informs us at ->exit() 797 * whether ACPI was used */ 798 data->acpi_data.state_count = 0; 799 800 return ret_val; 801 } 802 803 static int fill_powernow_table_fidvid(struct powernow_k8_data *data, 804 struct cpufreq_frequency_table *powernow_table) 805 { 806 int i; 807 808 for (i = 0; i < data->acpi_data.state_count; i++) { 809 u32 fid; 810 u32 vid; 811 u32 freq, index; 812 u64 status, control; 813 814 if (data->exttype) { 815 status = data->acpi_data.states[i].status; 816 fid = status & EXT_FID_MASK; 817 vid = (status >> VID_SHIFT) & EXT_VID_MASK; 818 } else { 819 control = data->acpi_data.states[i].control; 820 fid = control & FID_MASK; 821 vid = (control >> VID_SHIFT) & VID_MASK; 822 } 823 824 pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); 825 826 index = fid | (vid<<8); 827 powernow_table[i].driver_data = index; 828 829 freq = find_khz_freq_from_fid(fid); 830 powernow_table[i].frequency = freq; 831 832 /* verify frequency is OK */ 833 if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { 834 pr_debug("invalid freq %u kHz, ignoring\n", freq); 835 invalidate_entry(powernow_table, i); 836 continue; 837 } 838 839 /* verify voltage is OK - 840 * BIOSs are using "off" to indicate invalid */ 841 if (vid == VID_OFF) { 842 pr_debug("invalid vid %u, ignoring\n", vid); 843 invalidate_entry(powernow_table, i); 844 continue; 845 } 846 847 if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { 848 pr_info("invalid freq entries %u kHz vs. %u kHz\n", 849 freq, (unsigned int) 850 (data->acpi_data.states[i].core_frequency 851 * 1000)); 852 invalidate_entry(powernow_table, i); 853 continue; 854 } 855 } 856 return 0; 857 } 858 859 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 860 { 861 if (data->acpi_data.state_count) 862 acpi_processor_unregister_performance(data->cpu); 863 free_cpumask_var(data->acpi_data.shared_cpu_map); 864 } 865 866 static int get_transition_latency(struct powernow_k8_data *data) 867 { 868 int max_latency = 0; 869 int i; 870 for (i = 0; i < data->acpi_data.state_count; i++) { 871 int cur_latency = data->acpi_data.states[i].transition_latency 872 + data->acpi_data.states[i].bus_master_latency; 873 if (cur_latency > max_latency) 874 max_latency = cur_latency; 875 } 876 if (max_latency == 0) { 877 pr_err(FW_WARN "Invalid zero transition latency\n"); 878 max_latency = 1; 879 } 880 /* value in usecs, needs to be in nanoseconds */ 881 return 1000 * max_latency; 882 } 883 884 /* Take a frequency, and issue the fid/vid transition command */ 885 static int transition_frequency_fidvid(struct powernow_k8_data *data, 886 unsigned int index) 887 { 888 struct cpufreq_policy *policy; 889 u32 fid = 0; 890 u32 vid = 0; 891 int res; 892 struct cpufreq_freqs freqs; 893 894 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); 895 896 /* fid/vid correctness check for k8 */ 897 /* fid are the lower 8 bits of the index we stored into 898 * the cpufreq frequency table in find_psb_table, vid 899 * are the upper 8 bits. 900 */ 901 fid = data->powernow_table[index].driver_data & 0xFF; 902 vid = (data->powernow_table[index].driver_data & 0xFF00) >> 8; 903 904 pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); 905 906 if (query_current_values_with_pending_wait(data)) 907 return 1; 908 909 if ((data->currvid == vid) && (data->currfid == fid)) { 910 pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", 911 fid, vid); 912 return 0; 913 } 914 915 pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", 916 smp_processor_id(), fid, vid); 917 freqs.old = find_khz_freq_from_fid(data->currfid); 918 freqs.new = find_khz_freq_from_fid(fid); 919 920 policy = cpufreq_cpu_get(smp_processor_id()); 921 cpufreq_cpu_put(policy); 922 923 cpufreq_freq_transition_begin(policy, &freqs); 924 res = transition_fid_vid(data, fid, vid); 925 cpufreq_freq_transition_end(policy, &freqs, res); 926 927 return res; 928 } 929 930 struct powernowk8_target_arg { 931 struct cpufreq_policy *pol; 932 unsigned newstate; 933 }; 934 935 static long powernowk8_target_fn(void *arg) 936 { 937 struct powernowk8_target_arg *pta = arg; 938 struct cpufreq_policy *pol = pta->pol; 939 unsigned newstate = pta->newstate; 940 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 941 u32 checkfid; 942 u32 checkvid; 943 int ret; 944 945 if (!data) 946 return -EINVAL; 947 948 checkfid = data->currfid; 949 checkvid = data->currvid; 950 951 if (pending_bit_stuck()) { 952 pr_err("failing targ, change pending bit set\n"); 953 return -EIO; 954 } 955 956 pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n", 957 pol->cpu, data->powernow_table[newstate].frequency, pol->min, 958 pol->max); 959 960 if (query_current_values_with_pending_wait(data)) 961 return -EIO; 962 963 pr_debug("targ: curr fid 0x%x, vid 0x%x\n", 964 data->currfid, data->currvid); 965 966 if ((checkvid != data->currvid) || 967 (checkfid != data->currfid)) { 968 pr_info("error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", 969 checkfid, data->currfid, 970 checkvid, data->currvid); 971 } 972 973 mutex_lock(&fidvid_mutex); 974 975 powernow_k8_acpi_pst_values(data, newstate); 976 977 ret = transition_frequency_fidvid(data, newstate); 978 979 if (ret) { 980 pr_err("transition frequency failed\n"); 981 mutex_unlock(&fidvid_mutex); 982 return 1; 983 } 984 mutex_unlock(&fidvid_mutex); 985 986 pol->cur = find_khz_freq_from_fid(data->currfid); 987 988 return 0; 989 } 990 991 /* Driver entry point to switch to the target frequency */ 992 static int powernowk8_target(struct cpufreq_policy *pol, unsigned index) 993 { 994 struct powernowk8_target_arg pta = { .pol = pol, .newstate = index }; 995 996 return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); 997 } 998 999 struct init_on_cpu { 1000 struct powernow_k8_data *data; 1001 int rc; 1002 }; 1003 1004 static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu) 1005 { 1006 struct init_on_cpu *init_on_cpu = _init_on_cpu; 1007 1008 if (pending_bit_stuck()) { 1009 pr_err("failing init, change pending bit set\n"); 1010 init_on_cpu->rc = -ENODEV; 1011 return; 1012 } 1013 1014 if (query_current_values_with_pending_wait(init_on_cpu->data)) { 1015 init_on_cpu->rc = -ENODEV; 1016 return; 1017 } 1018 1019 fidvid_msr_init(); 1020 1021 init_on_cpu->rc = 0; 1022 } 1023 1024 #define MISSING_PSS_MSG \ 1025 FW_BUG "No compatible ACPI _PSS objects found.\n" \ 1026 FW_BUG "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" \ 1027 FW_BUG "If that doesn't help, try upgrading your BIOS.\n" 1028 1029 /* per CPU init entry point to the driver */ 1030 static int powernowk8_cpu_init(struct cpufreq_policy *pol) 1031 { 1032 struct powernow_k8_data *data; 1033 struct init_on_cpu init_on_cpu; 1034 int rc, cpu; 1035 1036 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1037 if (rc) 1038 return -ENODEV; 1039 1040 data = kzalloc(sizeof(*data), GFP_KERNEL); 1041 if (!data) 1042 return -ENOMEM; 1043 1044 data->cpu = pol->cpu; 1045 1046 if (powernow_k8_cpu_init_acpi(data)) { 1047 /* 1048 * Use the PSB BIOS structure. This is only available on 1049 * an UP version, and is deprecated by AMD. 1050 */ 1051 if (num_online_cpus() != 1) { 1052 pr_err_once(MISSING_PSS_MSG); 1053 goto err_out; 1054 } 1055 if (pol->cpu != 0) { 1056 pr_err(FW_BUG "No ACPI _PSS objects for CPU other than CPU0. Complain to your BIOS vendor.\n"); 1057 goto err_out; 1058 } 1059 rc = find_psb_table(data); 1060 if (rc) 1061 goto err_out; 1062 1063 /* Take a crude guess here. 1064 * That guess was in microseconds, so multiply with 1000 */ 1065 pol->cpuinfo.transition_latency = ( 1066 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + 1067 ((1 << data->irt) * 30)) * 1000; 1068 } else /* ACPI _PSS objects available */ 1069 pol->cpuinfo.transition_latency = get_transition_latency(data); 1070 1071 /* only run on specific CPU from here on */ 1072 init_on_cpu.data = data; 1073 smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu, 1074 &init_on_cpu, 1); 1075 rc = init_on_cpu.rc; 1076 if (rc != 0) 1077 goto err_out_exit_acpi; 1078 1079 cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu)); 1080 data->available_cores = pol->cpus; 1081 1082 /* min/max the cpu is capable of */ 1083 if (cpufreq_table_validate_and_show(pol, data->powernow_table)) { 1084 pr_err(FW_BUG "invalid powernow_table\n"); 1085 powernow_k8_cpu_exit_acpi(data); 1086 kfree(data->powernow_table); 1087 kfree(data); 1088 return -EINVAL; 1089 } 1090 1091 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", 1092 data->currfid, data->currvid); 1093 1094 /* Point all the CPUs in this policy to the same data */ 1095 for_each_cpu(cpu, pol->cpus) 1096 per_cpu(powernow_data, cpu) = data; 1097 1098 return 0; 1099 1100 err_out_exit_acpi: 1101 powernow_k8_cpu_exit_acpi(data); 1102 1103 err_out: 1104 kfree(data); 1105 return -ENODEV; 1106 } 1107 1108 static int powernowk8_cpu_exit(struct cpufreq_policy *pol) 1109 { 1110 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1111 int cpu; 1112 1113 if (!data) 1114 return -EINVAL; 1115 1116 powernow_k8_cpu_exit_acpi(data); 1117 1118 kfree(data->powernow_table); 1119 kfree(data); 1120 for_each_cpu(cpu, pol->cpus) 1121 per_cpu(powernow_data, cpu) = NULL; 1122 1123 return 0; 1124 } 1125 1126 static void query_values_on_cpu(void *_err) 1127 { 1128 int *err = _err; 1129 struct powernow_k8_data *data = __this_cpu_read(powernow_data); 1130 1131 *err = query_current_values_with_pending_wait(data); 1132 } 1133 1134 static unsigned int powernowk8_get(unsigned int cpu) 1135 { 1136 struct powernow_k8_data *data = per_cpu(powernow_data, cpu); 1137 unsigned int khz = 0; 1138 int err; 1139 1140 if (!data) 1141 return 0; 1142 1143 smp_call_function_single(cpu, query_values_on_cpu, &err, true); 1144 if (err) 1145 goto out; 1146 1147 khz = find_khz_freq_from_fid(data->currfid); 1148 1149 1150 out: 1151 return khz; 1152 } 1153 1154 static struct cpufreq_driver cpufreq_amd64_driver = { 1155 .flags = CPUFREQ_ASYNC_NOTIFICATION, 1156 .verify = cpufreq_generic_frequency_table_verify, 1157 .target_index = powernowk8_target, 1158 .bios_limit = acpi_processor_get_bios_limit, 1159 .init = powernowk8_cpu_init, 1160 .exit = powernowk8_cpu_exit, 1161 .get = powernowk8_get, 1162 .name = "powernow-k8", 1163 .attr = cpufreq_generic_attr, 1164 }; 1165 1166 static void __request_acpi_cpufreq(void) 1167 { 1168 const char drv[] = "acpi-cpufreq"; 1169 const char *cur_drv; 1170 1171 cur_drv = cpufreq_get_current_driver(); 1172 if (!cur_drv) 1173 goto request; 1174 1175 if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv)))) 1176 pr_warn("WTF driver: %s\n", cur_drv); 1177 1178 return; 1179 1180 request: 1181 pr_warn("This CPU is not supported anymore, using acpi-cpufreq instead.\n"); 1182 request_module(drv); 1183 } 1184 1185 /* driver entry point for init */ 1186 static int powernowk8_init(void) 1187 { 1188 unsigned int i, supported_cpus = 0; 1189 int ret; 1190 1191 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { 1192 __request_acpi_cpufreq(); 1193 return -ENODEV; 1194 } 1195 1196 if (!x86_match_cpu(powernow_k8_ids)) 1197 return -ENODEV; 1198 1199 get_online_cpus(); 1200 for_each_online_cpu(i) { 1201 smp_call_function_single(i, check_supported_cpu, &ret, 1); 1202 if (!ret) 1203 supported_cpus++; 1204 } 1205 1206 if (supported_cpus != num_online_cpus()) { 1207 put_online_cpus(); 1208 return -ENODEV; 1209 } 1210 put_online_cpus(); 1211 1212 ret = cpufreq_register_driver(&cpufreq_amd64_driver); 1213 if (ret) 1214 return ret; 1215 1216 pr_info("Found %d %s (%d cpu cores) (" VERSION ")\n", 1217 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); 1218 1219 return ret; 1220 } 1221 1222 /* driver entry point for term */ 1223 static void __exit powernowk8_exit(void) 1224 { 1225 pr_debug("exit\n"); 1226 1227 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1228 } 1229 1230 MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>"); 1231 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>"); 1232 MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); 1233 MODULE_LICENSE("GPL"); 1234 1235 late_initcall(powernowk8_init); 1236 module_exit(powernowk8_exit); 1237