1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h 4 * processor hardware monitoring 5 * 6 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> 7 * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net> 8 * 9 * Implementation notes: 10 * - CCD register address information as well as the calculation to 11 * convert raw register values is from https://github.com/ocerman/zenpower. 12 * The information is not confirmed from chip datasheets, but experiments 13 * suggest that it provides reasonable temperature values. 14 * - Register addresses to read chip voltage and current are also from 15 * https://github.com/ocerman/zenpower, and not confirmed from chip 16 * datasheets. Current calibration is board specific and not typically 17 * shared by board vendors. For this reason, current values are 18 * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC 19 * current. Reported values can be adjusted using the sensors configuration 20 * file. 21 */ 22 23 #include <linux/bitops.h> 24 #include <linux/debugfs.h> 25 #include <linux/err.h> 26 #include <linux/hwmon.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/pci_ids.h> 31 #include <asm/amd_nb.h> 32 #include <asm/processor.h> 33 34 MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); 35 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 36 MODULE_LICENSE("GPL"); 37 38 static bool force; 39 module_param(force, bool, 0444); 40 MODULE_PARM_DESC(force, "force loading on processors with erratum 319"); 41 42 /* Provide lock for writing to NB_SMU_IND_ADDR */ 43 static DEFINE_MUTEX(nb_smu_ind_mutex); 44 45 #ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 46 #define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3 47 #endif 48 49 /* CPUID function 0x80000001, ebx */ 50 #define CPUID_PKGTYPE_MASK GENMASK(31, 28) 51 #define CPUID_PKGTYPE_F 0x00000000 52 #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 53 54 /* DRAM controller (PCI function 2) */ 55 #define REG_DCT0_CONFIG_HIGH 0x094 56 #define DDR3_MODE BIT(8) 57 58 /* miscellaneous (PCI function 3) */ 59 #define REG_HARDWARE_THERMAL_CONTROL 0x64 60 #define HTC_ENABLE BIT(0) 61 62 #define REG_REPORTED_TEMPERATURE 0xa4 63 64 #define REG_NORTHBRIDGE_CAPABILITIES 0xe8 65 #define NB_CAP_HTC BIT(10) 66 67 /* 68 * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL 69 * and REG_REPORTED_TEMPERATURE have been moved to 70 * D0F0xBC_xD820_0C64 [Hardware Temperature Control] 71 * D0F0xBC_xD820_0CA4 [Reported Temperature Control] 72 */ 73 #define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64 74 #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 75 76 /* F17h M01h Access througn SMN */ 77 #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 78 79 #define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4)) 80 #define F17H_M70H_CCD_TEMP_VALID BIT(11) 81 #define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0) 82 83 #define F17H_M01H_SVI 0x0005A000 84 #define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc) 85 #define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10) 86 87 #define CUR_TEMP_SHIFT 21 88 #define CUR_TEMP_RANGE_SEL_MASK BIT(19) 89 90 #define CFACTOR_ICORE 1000000 /* 1A / LSB */ 91 #define CFACTOR_ISOC 250000 /* 0.25A / LSB */ 92 93 struct k10temp_data { 94 struct pci_dev *pdev; 95 void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); 96 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); 97 int temp_offset; 98 u32 temp_adjust_mask; 99 u32 show_temp; 100 u32 svi_addr[2]; 101 bool is_zen; 102 bool show_current; 103 int cfactor[2]; 104 }; 105 106 #define TCTL_BIT 0 107 #define TDIE_BIT 1 108 #define TCCD_BIT(x) ((x) + 2) 109 110 #define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel)) 111 #define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT) 112 113 struct tctl_offset { 114 u8 model; 115 char const *id; 116 int offset; 117 }; 118 119 static const struct tctl_offset tctl_offset_table[] = { 120 { 0x17, "AMD Ryzen 5 1600X", 20000 }, 121 { 0x17, "AMD Ryzen 7 1700X", 20000 }, 122 { 0x17, "AMD Ryzen 7 1800X", 20000 }, 123 { 0x17, "AMD Ryzen 7 2700X", 10000 }, 124 { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */ 125 { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ 126 }; 127 128 static bool is_threadripper(void) 129 { 130 return strstr(boot_cpu_data.x86_model_id, "Threadripper"); 131 } 132 133 static bool is_epyc(void) 134 { 135 return strstr(boot_cpu_data.x86_model_id, "EPYC"); 136 } 137 138 static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) 139 { 140 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); 141 } 142 143 static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) 144 { 145 pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); 146 } 147 148 static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn, 149 unsigned int base, int offset, u32 *val) 150 { 151 mutex_lock(&nb_smu_ind_mutex); 152 pci_bus_write_config_dword(pdev->bus, devfn, 153 base, offset); 154 pci_bus_read_config_dword(pdev->bus, devfn, 155 base + 4, val); 156 mutex_unlock(&nb_smu_ind_mutex); 157 } 158 159 static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval) 160 { 161 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, 162 F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval); 163 } 164 165 static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) 166 { 167 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, 168 F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval); 169 } 170 171 static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) 172 { 173 amd_smn_read(amd_pci_dev_to_node_id(pdev), 174 F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); 175 } 176 177 static long get_raw_temp(struct k10temp_data *data) 178 { 179 u32 regval; 180 long temp; 181 182 data->read_tempreg(data->pdev, ®val); 183 temp = (regval >> CUR_TEMP_SHIFT) * 125; 184 if (regval & data->temp_adjust_mask) 185 temp -= 49000; 186 return temp; 187 } 188 189 const char *k10temp_temp_label[] = { 190 "Tctl", 191 "Tdie", 192 "Tccd1", 193 "Tccd2", 194 "Tccd3", 195 "Tccd4", 196 "Tccd5", 197 "Tccd6", 198 "Tccd7", 199 "Tccd8", 200 }; 201 202 const char *k10temp_in_label[] = { 203 "Vcore", 204 "Vsoc", 205 }; 206 207 const char *k10temp_curr_label[] = { 208 "Icore", 209 "Isoc", 210 }; 211 212 static int k10temp_read_labels(struct device *dev, 213 enum hwmon_sensor_types type, 214 u32 attr, int channel, const char **str) 215 { 216 switch (type) { 217 case hwmon_temp: 218 *str = k10temp_temp_label[channel]; 219 break; 220 case hwmon_in: 221 *str = k10temp_in_label[channel]; 222 break; 223 case hwmon_curr: 224 *str = k10temp_curr_label[channel]; 225 break; 226 default: 227 return -EOPNOTSUPP; 228 } 229 return 0; 230 } 231 232 static int k10temp_read_curr(struct device *dev, u32 attr, int channel, 233 long *val) 234 { 235 struct k10temp_data *data = dev_get_drvdata(dev); 236 u32 regval; 237 238 switch (attr) { 239 case hwmon_curr_input: 240 amd_smn_read(amd_pci_dev_to_node_id(data->pdev), 241 data->svi_addr[channel], ®val); 242 *val = DIV_ROUND_CLOSEST(data->cfactor[channel] * 243 (regval & 0xff), 244 1000); 245 break; 246 default: 247 return -EOPNOTSUPP; 248 } 249 return 0; 250 } 251 252 static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val) 253 { 254 struct k10temp_data *data = dev_get_drvdata(dev); 255 u32 regval; 256 257 switch (attr) { 258 case hwmon_in_input: 259 amd_smn_read(amd_pci_dev_to_node_id(data->pdev), 260 data->svi_addr[channel], ®val); 261 regval = (regval >> 16) & 0xff; 262 *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100); 263 break; 264 default: 265 return -EOPNOTSUPP; 266 } 267 return 0; 268 } 269 270 static int k10temp_read_temp(struct device *dev, u32 attr, int channel, 271 long *val) 272 { 273 struct k10temp_data *data = dev_get_drvdata(dev); 274 u32 regval; 275 276 switch (attr) { 277 case hwmon_temp_input: 278 switch (channel) { 279 case 0: /* Tctl */ 280 *val = get_raw_temp(data); 281 if (*val < 0) 282 *val = 0; 283 break; 284 case 1: /* Tdie */ 285 *val = get_raw_temp(data) - data->temp_offset; 286 if (*val < 0) 287 *val = 0; 288 break; 289 case 2 ... 9: /* Tccd{1-8} */ 290 amd_smn_read(amd_pci_dev_to_node_id(data->pdev), 291 F17H_M70H_CCD_TEMP(channel - 2), ®val); 292 *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000; 293 break; 294 default: 295 return -EOPNOTSUPP; 296 } 297 break; 298 case hwmon_temp_max: 299 *val = 70 * 1000; 300 break; 301 case hwmon_temp_crit: 302 data->read_htcreg(data->pdev, ®val); 303 *val = ((regval >> 16) & 0x7f) * 500 + 52000; 304 break; 305 case hwmon_temp_crit_hyst: 306 data->read_htcreg(data->pdev, ®val); 307 *val = (((regval >> 16) & 0x7f) 308 - ((regval >> 24) & 0xf)) * 500 + 52000; 309 break; 310 default: 311 return -EOPNOTSUPP; 312 } 313 return 0; 314 } 315 316 static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, 317 u32 attr, int channel, long *val) 318 { 319 switch (type) { 320 case hwmon_temp: 321 return k10temp_read_temp(dev, attr, channel, val); 322 case hwmon_in: 323 return k10temp_read_in(dev, attr, channel, val); 324 case hwmon_curr: 325 return k10temp_read_curr(dev, attr, channel, val); 326 default: 327 return -EOPNOTSUPP; 328 } 329 } 330 331 static umode_t k10temp_is_visible(const void *_data, 332 enum hwmon_sensor_types type, 333 u32 attr, int channel) 334 { 335 const struct k10temp_data *data = _data; 336 struct pci_dev *pdev = data->pdev; 337 u32 reg; 338 339 switch (type) { 340 case hwmon_temp: 341 switch (attr) { 342 case hwmon_temp_input: 343 if (!HAVE_TEMP(data, channel)) 344 return 0; 345 break; 346 case hwmon_temp_max: 347 if (channel || data->is_zen) 348 return 0; 349 break; 350 case hwmon_temp_crit: 351 case hwmon_temp_crit_hyst: 352 if (channel || !data->read_htcreg) 353 return 0; 354 355 pci_read_config_dword(pdev, 356 REG_NORTHBRIDGE_CAPABILITIES, 357 ®); 358 if (!(reg & NB_CAP_HTC)) 359 return 0; 360 361 data->read_htcreg(data->pdev, ®); 362 if (!(reg & HTC_ENABLE)) 363 return 0; 364 break; 365 case hwmon_temp_label: 366 /* Show temperature labels only on Zen CPUs */ 367 if (!data->is_zen || !HAVE_TEMP(data, channel)) 368 return 0; 369 break; 370 default: 371 return 0; 372 } 373 break; 374 case hwmon_in: 375 case hwmon_curr: 376 if (!data->show_current) 377 return 0; 378 break; 379 default: 380 return 0; 381 } 382 return 0444; 383 } 384 385 static bool has_erratum_319(struct pci_dev *pdev) 386 { 387 u32 pkg_type, reg_dram_cfg; 388 389 if (boot_cpu_data.x86 != 0x10) 390 return false; 391 392 /* 393 * Erratum 319: The thermal sensor of Socket F/AM2+ processors 394 * may be unreliable. 395 */ 396 pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK; 397 if (pkg_type == CPUID_PKGTYPE_F) 398 return true; 399 if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3) 400 return false; 401 402 /* DDR3 memory implies socket AM3, which is good */ 403 pci_bus_read_config_dword(pdev->bus, 404 PCI_DEVFN(PCI_SLOT(pdev->devfn), 2), 405 REG_DCT0_CONFIG_HIGH, ®_dram_cfg); 406 if (reg_dram_cfg & DDR3_MODE) 407 return false; 408 409 /* 410 * Unfortunately it is possible to run a socket AM3 CPU with DDR2 411 * memory. We blacklist all the cores which do exist in socket AM2+ 412 * format. It still isn't perfect, as RB-C2 cores exist in both AM2+ 413 * and AM3 formats, but that's the best we can do. 414 */ 415 return boot_cpu_data.x86_model < 4 || 416 (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); 417 } 418 419 #ifdef CONFIG_DEBUG_FS 420 421 static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev, 422 u32 addr, int count) 423 { 424 u32 reg; 425 int i; 426 427 for (i = 0; i < count; i++) { 428 if (!(i & 3)) 429 seq_printf(s, "0x%06x: ", addr + i * 4); 430 amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, ®); 431 seq_printf(s, "%08x ", reg); 432 if ((i & 3) == 3) 433 seq_puts(s, "\n"); 434 } 435 } 436 437 static int svi_show(struct seq_file *s, void *unused) 438 { 439 struct k10temp_data *data = s->private; 440 441 k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32); 442 return 0; 443 } 444 DEFINE_SHOW_ATTRIBUTE(svi); 445 446 static int thm_show(struct seq_file *s, void *unused) 447 { 448 struct k10temp_data *data = s->private; 449 450 k10temp_smn_regs_show(s, data->pdev, 451 F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256); 452 return 0; 453 } 454 DEFINE_SHOW_ATTRIBUTE(thm); 455 456 static void k10temp_debugfs_cleanup(void *ddir) 457 { 458 debugfs_remove_recursive(ddir); 459 } 460 461 static void k10temp_init_debugfs(struct k10temp_data *data) 462 { 463 struct dentry *debugfs; 464 char name[32]; 465 466 /* Only show debugfs data for Family 17h/18h CPUs */ 467 if (!data->is_zen) 468 return; 469 470 scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev)); 471 472 debugfs = debugfs_create_dir(name, NULL); 473 if (debugfs) { 474 debugfs_create_file("svi", 0444, debugfs, data, &svi_fops); 475 debugfs_create_file("thm", 0444, debugfs, data, &thm_fops); 476 devm_add_action_or_reset(&data->pdev->dev, 477 k10temp_debugfs_cleanup, debugfs); 478 } 479 } 480 481 #else 482 483 static void k10temp_init_debugfs(struct k10temp_data *data) 484 { 485 } 486 487 #endif 488 489 static const struct hwmon_channel_info *k10temp_info[] = { 490 HWMON_CHANNEL_INFO(temp, 491 HWMON_T_INPUT | HWMON_T_MAX | 492 HWMON_T_CRIT | HWMON_T_CRIT_HYST | 493 HWMON_T_LABEL, 494 HWMON_T_INPUT | HWMON_T_LABEL, 495 HWMON_T_INPUT | HWMON_T_LABEL, 496 HWMON_T_INPUT | HWMON_T_LABEL, 497 HWMON_T_INPUT | HWMON_T_LABEL, 498 HWMON_T_INPUT | HWMON_T_LABEL, 499 HWMON_T_INPUT | HWMON_T_LABEL, 500 HWMON_T_INPUT | HWMON_T_LABEL, 501 HWMON_T_INPUT | HWMON_T_LABEL, 502 HWMON_T_INPUT | HWMON_T_LABEL), 503 HWMON_CHANNEL_INFO(in, 504 HWMON_I_INPUT | HWMON_I_LABEL, 505 HWMON_I_INPUT | HWMON_I_LABEL), 506 HWMON_CHANNEL_INFO(curr, 507 HWMON_C_INPUT | HWMON_C_LABEL, 508 HWMON_C_INPUT | HWMON_C_LABEL), 509 NULL 510 }; 511 512 static const struct hwmon_ops k10temp_hwmon_ops = { 513 .is_visible = k10temp_is_visible, 514 .read = k10temp_read, 515 .read_string = k10temp_read_labels, 516 }; 517 518 static const struct hwmon_chip_info k10temp_chip_info = { 519 .ops = &k10temp_hwmon_ops, 520 .info = k10temp_info, 521 }; 522 523 static void k10temp_get_ccd_support(struct pci_dev *pdev, 524 struct k10temp_data *data, int limit) 525 { 526 u32 regval; 527 int i; 528 529 for (i = 0; i < limit; i++) { 530 amd_smn_read(amd_pci_dev_to_node_id(pdev), 531 F17H_M70H_CCD_TEMP(i), ®val); 532 if (regval & F17H_M70H_CCD_TEMP_VALID) 533 data->show_temp |= BIT(TCCD_BIT(i)); 534 } 535 } 536 537 static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) 538 { 539 int unreliable = has_erratum_319(pdev); 540 struct device *dev = &pdev->dev; 541 struct k10temp_data *data; 542 struct device *hwmon_dev; 543 int i; 544 545 if (unreliable) { 546 if (!force) { 547 dev_err(dev, 548 "unreliable CPU thermal sensor; monitoring disabled\n"); 549 return -ENODEV; 550 } 551 dev_warn(dev, 552 "unreliable CPU thermal sensor; check erratum 319\n"); 553 } 554 555 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 556 if (!data) 557 return -ENOMEM; 558 559 data->pdev = pdev; 560 data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */ 561 562 if (boot_cpu_data.x86 == 0x15 && 563 ((boot_cpu_data.x86_model & 0xf0) == 0x60 || 564 (boot_cpu_data.x86_model & 0xf0) == 0x70)) { 565 data->read_htcreg = read_htcreg_nb_f15; 566 data->read_tempreg = read_tempreg_nb_f15; 567 } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { 568 data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK; 569 data->read_tempreg = read_tempreg_nb_f17; 570 data->show_temp |= BIT(TDIE_BIT); /* show Tdie */ 571 data->is_zen = true; 572 573 switch (boot_cpu_data.x86_model) { 574 case 0x1: /* Zen */ 575 case 0x8: /* Zen+ */ 576 case 0x11: /* Zen APU */ 577 case 0x18: /* Zen+ APU */ 578 data->show_current = !is_threadripper() && !is_epyc(); 579 data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0; 580 data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1; 581 data->cfactor[0] = CFACTOR_ICORE; 582 data->cfactor[1] = CFACTOR_ISOC; 583 k10temp_get_ccd_support(pdev, data, 4); 584 break; 585 case 0x31: /* Zen2 Threadripper */ 586 case 0x71: /* Zen2 */ 587 data->show_current = !is_threadripper() && !is_epyc(); 588 data->cfactor[0] = CFACTOR_ICORE; 589 data->cfactor[1] = CFACTOR_ISOC; 590 data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1; 591 data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0; 592 k10temp_get_ccd_support(pdev, data, 8); 593 break; 594 } 595 } else { 596 data->read_htcreg = read_htcreg_pci; 597 data->read_tempreg = read_tempreg_pci; 598 } 599 600 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { 601 const struct tctl_offset *entry = &tctl_offset_table[i]; 602 603 if (boot_cpu_data.x86 == entry->model && 604 strstr(boot_cpu_data.x86_model_id, entry->id)) { 605 data->temp_offset = entry->offset; 606 break; 607 } 608 } 609 610 hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data, 611 &k10temp_chip_info, 612 NULL); 613 if (IS_ERR(hwmon_dev)) 614 return PTR_ERR(hwmon_dev); 615 616 k10temp_init_debugfs(data); 617 618 return 0; 619 } 620 621 static const struct pci_device_id k10temp_id_table[] = { 622 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 623 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, 624 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 625 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 626 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 627 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 628 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 629 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) }, 630 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 631 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 632 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 633 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, 634 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, 635 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, 636 { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 637 {} 638 }; 639 MODULE_DEVICE_TABLE(pci, k10temp_id_table); 640 641 static struct pci_driver k10temp_driver = { 642 .name = "k10temp", 643 .id_table = k10temp_id_table, 644 .probe = k10temp_probe, 645 }; 646 647 module_pci_driver(k10temp_driver); 648