1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright (c) 2018-2021 Intel Corporation 3 4 #include <linux/auxiliary_bus.h> 5 #include <linux/bitfield.h> 6 #include <linux/bitops.h> 7 #include <linux/devm-helpers.h> 8 #include <linux/hwmon.h> 9 #include <linux/jiffies.h> 10 #include <linux/module.h> 11 #include <linux/peci.h> 12 #include <linux/peci-cpu.h> 13 #include <linux/units.h> 14 #include <linux/workqueue.h> 15 16 #include "common.h" 17 18 #define DIMM_MASK_CHECK_DELAY_JIFFIES msecs_to_jiffies(5000) 19 20 /* Max number of channel ranks and DIMM index per channel */ 21 #define CHAN_RANK_MAX_ON_HSX 8 22 #define DIMM_IDX_MAX_ON_HSX 3 23 #define CHAN_RANK_MAX_ON_BDX 4 24 #define DIMM_IDX_MAX_ON_BDX 3 25 #define CHAN_RANK_MAX_ON_BDXD 2 26 #define DIMM_IDX_MAX_ON_BDXD 2 27 #define CHAN_RANK_MAX_ON_SKX 6 28 #define DIMM_IDX_MAX_ON_SKX 2 29 #define CHAN_RANK_MAX_ON_ICX 8 30 #define DIMM_IDX_MAX_ON_ICX 2 31 #define CHAN_RANK_MAX_ON_ICXD 4 32 #define DIMM_IDX_MAX_ON_ICXD 2 33 #define CHAN_RANK_MAX_ON_SPR 8 34 #define DIMM_IDX_MAX_ON_SPR 2 35 36 #define CHAN_RANK_MAX CHAN_RANK_MAX_ON_HSX 37 #define DIMM_IDX_MAX DIMM_IDX_MAX_ON_HSX 38 #define DIMM_NUMS_MAX (CHAN_RANK_MAX * DIMM_IDX_MAX) 39 40 #define CPU_SEG_MASK GENMASK(23, 16) 41 #define GET_CPU_SEG(x) (((x) & CPU_SEG_MASK) >> 16) 42 #define CPU_BUS_MASK GENMASK(7, 0) 43 #define GET_CPU_BUS(x) ((x) & CPU_BUS_MASK) 44 45 #define DIMM_TEMP_MAX GENMASK(15, 8) 46 #define DIMM_TEMP_CRIT GENMASK(23, 16) 47 #define GET_TEMP_MAX(x) (((x) & DIMM_TEMP_MAX) >> 8) 48 #define GET_TEMP_CRIT(x) (((x) & DIMM_TEMP_CRIT) >> 16) 49 50 #define NO_DIMM_RETRY_COUNT_MAX 5 51 52 struct peci_dimmtemp; 53 54 struct dimm_info { 55 int chan_rank_max; 56 int dimm_idx_max; 57 u8 min_peci_revision; 58 int (*read_thresholds)(struct peci_dimmtemp *priv, int dimm_order, 59 int chan_rank, u32 *data); 60 }; 61 62 struct peci_dimm_thresholds { 63 long temp_max; 64 long temp_crit; 65 struct peci_sensor_state state; 66 }; 67 68 enum peci_dimm_threshold_type { 69 temp_max_type, 70 temp_crit_type, 71 }; 72 73 struct peci_dimmtemp { 74 struct peci_device *peci_dev; 75 struct device *dev; 76 const char *name; 77 const struct dimm_info *gen_info; 78 struct delayed_work detect_work; 79 struct { 80 struct peci_sensor_data temp; 81 struct peci_dimm_thresholds thresholds; 82 } dimm[DIMM_NUMS_MAX]; 83 char **dimmtemp_label; 84 DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX); 85 u8 no_dimm_retry_count; 86 }; 87 88 static u8 __dimm_temp(u32 reg, int dimm_order) 89 { 90 return (reg >> (dimm_order * 8)) & 0xff; 91 } 92 93 static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no, long *val) 94 { 95 int dimm_order = dimm_no % priv->gen_info->dimm_idx_max; 96 int chan_rank = dimm_no / priv->gen_info->dimm_idx_max; 97 int ret = 0; 98 u32 data; 99 100 mutex_lock(&priv->dimm[dimm_no].temp.state.lock); 101 if (!peci_sensor_need_update(&priv->dimm[dimm_no].temp.state)) 102 goto skip_update; 103 104 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &data); 105 if (ret) 106 goto unlock; 107 108 priv->dimm[dimm_no].temp.value = __dimm_temp(data, dimm_order) * MILLIDEGREE_PER_DEGREE; 109 110 peci_sensor_mark_updated(&priv->dimm[dimm_no].temp.state); 111 112 skip_update: 113 *val = priv->dimm[dimm_no].temp.value; 114 unlock: 115 mutex_unlock(&priv->dimm[dimm_no].temp.state.lock); 116 return ret; 117 } 118 119 static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no) 120 { 121 int dimm_order = dimm_no % priv->gen_info->dimm_idx_max; 122 int chan_rank = dimm_no / priv->gen_info->dimm_idx_max; 123 u32 data; 124 int ret; 125 126 if (!peci_sensor_need_update(&priv->dimm[dimm_no].thresholds.state)) 127 return 0; 128 129 ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data); 130 if (ret == -ENODATA) /* Use default or previous value */ 131 return 0; 132 if (ret) 133 return ret; 134 135 priv->dimm[dimm_no].thresholds.temp_max = GET_TEMP_MAX(data) * MILLIDEGREE_PER_DEGREE; 136 priv->dimm[dimm_no].thresholds.temp_crit = GET_TEMP_CRIT(data) * MILLIDEGREE_PER_DEGREE; 137 138 peci_sensor_mark_updated(&priv->dimm[dimm_no].thresholds.state); 139 140 return 0; 141 } 142 143 static int get_dimm_thresholds(struct peci_dimmtemp *priv, enum peci_dimm_threshold_type type, 144 int dimm_no, long *val) 145 { 146 int ret; 147 148 mutex_lock(&priv->dimm[dimm_no].thresholds.state.lock); 149 ret = update_thresholds(priv, dimm_no); 150 if (ret) 151 goto unlock; 152 153 switch (type) { 154 case temp_max_type: 155 *val = priv->dimm[dimm_no].thresholds.temp_max; 156 break; 157 case temp_crit_type: 158 *val = priv->dimm[dimm_no].thresholds.temp_crit; 159 break; 160 default: 161 ret = -EOPNOTSUPP; 162 break; 163 } 164 unlock: 165 mutex_unlock(&priv->dimm[dimm_no].thresholds.state.lock); 166 167 return ret; 168 } 169 170 static int dimmtemp_read_string(struct device *dev, 171 enum hwmon_sensor_types type, 172 u32 attr, int channel, const char **str) 173 { 174 struct peci_dimmtemp *priv = dev_get_drvdata(dev); 175 176 if (attr != hwmon_temp_label) 177 return -EOPNOTSUPP; 178 179 *str = (const char *)priv->dimmtemp_label[channel]; 180 181 return 0; 182 } 183 184 static int dimmtemp_read(struct device *dev, enum hwmon_sensor_types type, 185 u32 attr, int channel, long *val) 186 { 187 struct peci_dimmtemp *priv = dev_get_drvdata(dev); 188 189 switch (attr) { 190 case hwmon_temp_input: 191 return get_dimm_temp(priv, channel, val); 192 case hwmon_temp_max: 193 return get_dimm_thresholds(priv, temp_max_type, channel, val); 194 case hwmon_temp_crit: 195 return get_dimm_thresholds(priv, temp_crit_type, channel, val); 196 default: 197 break; 198 } 199 200 return -EOPNOTSUPP; 201 } 202 203 static umode_t dimmtemp_is_visible(const void *data, enum hwmon_sensor_types type, 204 u32 attr, int channel) 205 { 206 const struct peci_dimmtemp *priv = data; 207 208 if (test_bit(channel, priv->dimm_mask)) 209 return 0444; 210 211 return 0; 212 } 213 214 static const struct hwmon_ops peci_dimmtemp_ops = { 215 .is_visible = dimmtemp_is_visible, 216 .read_string = dimmtemp_read_string, 217 .read = dimmtemp_read, 218 }; 219 220 static int check_populated_dimms(struct peci_dimmtemp *priv) 221 { 222 int chan_rank_max = priv->gen_info->chan_rank_max; 223 int dimm_idx_max = priv->gen_info->dimm_idx_max; 224 u32 chan_rank_empty = 0; 225 u32 dimm_mask = 0; 226 int chan_rank, dimm_idx, ret; 227 u32 pcs; 228 229 BUILD_BUG_ON(BITS_PER_TYPE(chan_rank_empty) < CHAN_RANK_MAX); 230 BUILD_BUG_ON(BITS_PER_TYPE(dimm_mask) < DIMM_NUMS_MAX); 231 if (chan_rank_max * dimm_idx_max > DIMM_NUMS_MAX) { 232 WARN_ONCE(1, "Unsupported number of DIMMs - chan_rank_max: %d, dimm_idx_max: %d", 233 chan_rank_max, dimm_idx_max); 234 return -EINVAL; 235 } 236 237 for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) { 238 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &pcs); 239 if (ret) { 240 /* 241 * Overall, we expect either success or -EINVAL in 242 * order to determine whether DIMM is populated or not. 243 * For anything else we fall back to deferring the 244 * detection to be performed at a later point in time. 245 */ 246 if (ret == -EINVAL) { 247 chan_rank_empty |= BIT(chan_rank); 248 continue; 249 } 250 251 return -EAGAIN; 252 } 253 254 for (dimm_idx = 0; dimm_idx < dimm_idx_max; dimm_idx++) 255 if (__dimm_temp(pcs, dimm_idx)) 256 dimm_mask |= BIT(chan_rank * dimm_idx_max + dimm_idx); 257 } 258 259 /* 260 * If we got all -EINVALs, it means that the CPU doesn't have any 261 * DIMMs. Unfortunately, it may also happen at the very start of 262 * host platform boot. Retrying a couple of times lets us make sure 263 * that the state is persistent. 264 */ 265 if (chan_rank_empty == GENMASK(chan_rank_max - 1, 0)) { 266 if (priv->no_dimm_retry_count < NO_DIMM_RETRY_COUNT_MAX) { 267 priv->no_dimm_retry_count++; 268 269 return -EAGAIN; 270 } 271 272 return -ENODEV; 273 } 274 275 /* 276 * It's possible that memory training is not done yet. In this case we 277 * defer the detection to be performed at a later point in time. 278 */ 279 if (!dimm_mask) { 280 priv->no_dimm_retry_count = 0; 281 return -EAGAIN; 282 } 283 284 dev_dbg(priv->dev, "Scanned populated DIMMs: %#x\n", dimm_mask); 285 286 bitmap_from_arr32(priv->dimm_mask, &dimm_mask, DIMM_NUMS_MAX); 287 288 return 0; 289 } 290 291 static int create_dimm_temp_label(struct peci_dimmtemp *priv, int chan) 292 { 293 int rank = chan / priv->gen_info->dimm_idx_max; 294 int idx = chan % priv->gen_info->dimm_idx_max; 295 296 priv->dimmtemp_label[chan] = devm_kasprintf(priv->dev, GFP_KERNEL, 297 "DIMM %c%d", 'A' + rank, 298 idx + 1); 299 if (!priv->dimmtemp_label[chan]) 300 return -ENOMEM; 301 302 return 0; 303 } 304 305 static const struct hwmon_channel_info * const peci_dimmtemp_temp_info[] = { 306 HWMON_CHANNEL_INFO(temp, 307 [0 ... DIMM_NUMS_MAX - 1] = HWMON_T_LABEL | 308 HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT), 309 NULL 310 }; 311 312 static const struct hwmon_chip_info peci_dimmtemp_chip_info = { 313 .ops = &peci_dimmtemp_ops, 314 .info = peci_dimmtemp_temp_info, 315 }; 316 317 static int create_dimm_temp_info(struct peci_dimmtemp *priv) 318 { 319 int ret, i, channels; 320 struct device *dev; 321 322 /* 323 * We expect to either find populated DIMMs and carry on with creating 324 * sensors, or find out that there are no DIMMs populated. 325 * All other states mean that the platform never reached the state that 326 * allows to check DIMM state - causing us to retry later on. 327 */ 328 ret = check_populated_dimms(priv); 329 if (ret == -ENODEV) { 330 dev_dbg(priv->dev, "No DIMMs found\n"); 331 return 0; 332 } else if (ret) { 333 schedule_delayed_work(&priv->detect_work, DIMM_MASK_CHECK_DELAY_JIFFIES); 334 dev_dbg(priv->dev, "Deferred populating DIMM temp info\n"); 335 return ret; 336 } 337 338 channels = priv->gen_info->chan_rank_max * priv->gen_info->dimm_idx_max; 339 340 priv->dimmtemp_label = devm_kzalloc(priv->dev, channels * sizeof(char *), GFP_KERNEL); 341 if (!priv->dimmtemp_label) 342 return -ENOMEM; 343 344 for_each_set_bit(i, priv->dimm_mask, DIMM_NUMS_MAX) { 345 ret = create_dimm_temp_label(priv, i); 346 if (ret) 347 return ret; 348 mutex_init(&priv->dimm[i].thresholds.state.lock); 349 mutex_init(&priv->dimm[i].temp.state.lock); 350 } 351 352 dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv, 353 &peci_dimmtemp_chip_info, NULL); 354 if (IS_ERR(dev)) { 355 dev_err(priv->dev, "Failed to register hwmon device\n"); 356 return PTR_ERR(dev); 357 } 358 359 dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name); 360 361 return 0; 362 } 363 364 static void create_dimm_temp_info_delayed(struct work_struct *work) 365 { 366 struct peci_dimmtemp *priv = container_of(to_delayed_work(work), 367 struct peci_dimmtemp, 368 detect_work); 369 int ret; 370 371 ret = create_dimm_temp_info(priv); 372 if (ret && ret != -EAGAIN) 373 dev_err(priv->dev, "Failed to populate DIMM temp info\n"); 374 } 375 376 static int peci_dimmtemp_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) 377 { 378 struct device *dev = &adev->dev; 379 struct peci_device *peci_dev = to_peci_device(dev->parent); 380 struct peci_dimmtemp *priv; 381 int ret; 382 383 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 384 if (!priv) 385 return -ENOMEM; 386 387 priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_dimmtemp.cpu%d", 388 peci_dev->info.socket_id); 389 if (!priv->name) 390 return -ENOMEM; 391 392 priv->dev = dev; 393 priv->peci_dev = peci_dev; 394 priv->gen_info = (const struct dimm_info *)id->driver_data; 395 396 /* 397 * This is just a sanity check. Since we're using commands that are 398 * guaranteed to be supported on a given platform, we should never see 399 * revision lower than expected. 400 */ 401 if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision) 402 dev_warn(priv->dev, 403 "Unexpected PECI revision %#x, some features may be unavailable\n", 404 peci_dev->info.peci_revision); 405 406 ret = devm_delayed_work_autocancel(priv->dev, &priv->detect_work, 407 create_dimm_temp_info_delayed); 408 if (ret) 409 return ret; 410 411 ret = create_dimm_temp_info(priv); 412 if (ret && ret != -EAGAIN) { 413 dev_err(dev, "Failed to populate DIMM temp info\n"); 414 return ret; 415 } 416 417 return 0; 418 } 419 420 static int 421 read_thresholds_hsx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 422 { 423 u8 dev, func; 424 u16 reg; 425 int ret; 426 427 /* 428 * Device 20, Function 0: IMC 0 channel 0 -> rank 0 429 * Device 20, Function 1: IMC 0 channel 1 -> rank 1 430 * Device 21, Function 0: IMC 0 channel 2 -> rank 2 431 * Device 21, Function 1: IMC 0 channel 3 -> rank 3 432 * Device 23, Function 0: IMC 1 channel 0 -> rank 4 433 * Device 23, Function 1: IMC 1 channel 1 -> rank 5 434 * Device 24, Function 0: IMC 1 channel 2 -> rank 6 435 * Device 24, Function 1: IMC 1 channel 3 -> rank 7 436 */ 437 dev = 20 + chan_rank / 2 + chan_rank / 4; 438 func = chan_rank % 2; 439 reg = 0x120 + dimm_order * 4; 440 441 ret = peci_pci_local_read(priv->peci_dev, 1, dev, func, reg, data); 442 if (ret) 443 return ret; 444 445 return 0; 446 } 447 448 static int 449 read_thresholds_bdxd(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 450 { 451 u8 dev, func; 452 u16 reg; 453 int ret; 454 455 /* 456 * Device 10, Function 2: IMC 0 channel 0 -> rank 0 457 * Device 10, Function 6: IMC 0 channel 1 -> rank 1 458 * Device 12, Function 2: IMC 1 channel 0 -> rank 2 459 * Device 12, Function 6: IMC 1 channel 1 -> rank 3 460 */ 461 dev = 10 + chan_rank / 2 * 2; 462 func = (chan_rank % 2) ? 6 : 2; 463 reg = 0x120 + dimm_order * 4; 464 465 ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data); 466 if (ret) 467 return ret; 468 469 return 0; 470 } 471 472 static int 473 read_thresholds_skx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 474 { 475 u8 dev, func; 476 u16 reg; 477 int ret; 478 479 /* 480 * Device 10, Function 2: IMC 0 channel 0 -> rank 0 481 * Device 10, Function 6: IMC 0 channel 1 -> rank 1 482 * Device 11, Function 2: IMC 0 channel 2 -> rank 2 483 * Device 12, Function 2: IMC 1 channel 0 -> rank 3 484 * Device 12, Function 6: IMC 1 channel 1 -> rank 4 485 * Device 13, Function 2: IMC 1 channel 2 -> rank 5 486 */ 487 dev = 10 + chan_rank / 3 * 2 + (chan_rank % 3 == 2 ? 1 : 0); 488 func = chan_rank % 3 == 1 ? 6 : 2; 489 reg = 0x120 + dimm_order * 4; 490 491 ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data); 492 if (ret) 493 return ret; 494 495 return 0; 496 } 497 498 static int 499 read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 500 { 501 u32 reg_val; 502 u64 offset; 503 int ret; 504 u8 dev; 505 506 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, ®_val); 507 if (ret || !(reg_val & BIT(31))) 508 return -ENODATA; /* Use default or previous value */ 509 510 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, ®_val); 511 if (ret) 512 return -ENODATA; /* Use default or previous value */ 513 514 /* 515 * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0 516 * Device 26, Offset 264e0: IMC 0 channel 1 -> rank 1 517 * Device 27, Offset 224e0: IMC 1 channel 0 -> rank 2 518 * Device 27, Offset 264e0: IMC 1 channel 1 -> rank 3 519 * Device 28, Offset 224e0: IMC 2 channel 0 -> rank 4 520 * Device 28, Offset 264e0: IMC 2 channel 1 -> rank 5 521 * Device 29, Offset 224e0: IMC 3 channel 0 -> rank 6 522 * Device 29, Offset 264e0: IMC 3 channel 1 -> rank 7 523 */ 524 dev = 26 + chan_rank / 2; 525 offset = 0x224e0 + dimm_order * 4 + (chan_rank % 2) * 0x4000; 526 527 ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val), 528 dev, 0, offset, data); 529 if (ret) 530 return ret; 531 532 return 0; 533 } 534 535 static int 536 read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 537 { 538 u32 reg_val; 539 u64 offset; 540 int ret; 541 u8 dev; 542 543 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, ®_val); 544 if (ret || !(reg_val & BIT(31))) 545 return -ENODATA; /* Use default or previous value */ 546 547 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, ®_val); 548 if (ret) 549 return -ENODATA; /* Use default or previous value */ 550 551 /* 552 * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0 553 * Device 26, Offset 299a8: IMC 0 channel 1 -> rank 1 554 * Device 27, Offset 219a8: IMC 1 channel 0 -> rank 2 555 * Device 27, Offset 299a8: IMC 1 channel 1 -> rank 3 556 * Device 28, Offset 219a8: IMC 2 channel 0 -> rank 4 557 * Device 28, Offset 299a8: IMC 2 channel 1 -> rank 5 558 * Device 29, Offset 219a8: IMC 3 channel 0 -> rank 6 559 * Device 29, Offset 299a8: IMC 3 channel 1 -> rank 7 560 */ 561 dev = 26 + chan_rank / 2; 562 offset = 0x219a8 + dimm_order * 4 + (chan_rank % 2) * 0x8000; 563 564 ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val), 565 dev, 0, offset, data); 566 if (ret) 567 return ret; 568 569 return 0; 570 } 571 572 static const struct dimm_info dimm_hsx = { 573 .chan_rank_max = CHAN_RANK_MAX_ON_HSX, 574 .dimm_idx_max = DIMM_IDX_MAX_ON_HSX, 575 .min_peci_revision = 0x33, 576 .read_thresholds = &read_thresholds_hsx, 577 }; 578 579 static const struct dimm_info dimm_bdx = { 580 .chan_rank_max = CHAN_RANK_MAX_ON_BDX, 581 .dimm_idx_max = DIMM_IDX_MAX_ON_BDX, 582 .min_peci_revision = 0x33, 583 .read_thresholds = &read_thresholds_hsx, 584 }; 585 586 static const struct dimm_info dimm_bdxd = { 587 .chan_rank_max = CHAN_RANK_MAX_ON_BDXD, 588 .dimm_idx_max = DIMM_IDX_MAX_ON_BDXD, 589 .min_peci_revision = 0x33, 590 .read_thresholds = &read_thresholds_bdxd, 591 }; 592 593 static const struct dimm_info dimm_skx = { 594 .chan_rank_max = CHAN_RANK_MAX_ON_SKX, 595 .dimm_idx_max = DIMM_IDX_MAX_ON_SKX, 596 .min_peci_revision = 0x33, 597 .read_thresholds = &read_thresholds_skx, 598 }; 599 600 static const struct dimm_info dimm_icx = { 601 .chan_rank_max = CHAN_RANK_MAX_ON_ICX, 602 .dimm_idx_max = DIMM_IDX_MAX_ON_ICX, 603 .min_peci_revision = 0x40, 604 .read_thresholds = &read_thresholds_icx, 605 }; 606 607 static const struct dimm_info dimm_icxd = { 608 .chan_rank_max = CHAN_RANK_MAX_ON_ICXD, 609 .dimm_idx_max = DIMM_IDX_MAX_ON_ICXD, 610 .min_peci_revision = 0x40, 611 .read_thresholds = &read_thresholds_icx, 612 }; 613 614 static const struct dimm_info dimm_spr = { 615 .chan_rank_max = CHAN_RANK_MAX_ON_SPR, 616 .dimm_idx_max = DIMM_IDX_MAX_ON_SPR, 617 .min_peci_revision = 0x40, 618 .read_thresholds = &read_thresholds_spr, 619 }; 620 621 static const struct auxiliary_device_id peci_dimmtemp_ids[] = { 622 { 623 .name = "peci_cpu.dimmtemp.hsx", 624 .driver_data = (kernel_ulong_t)&dimm_hsx, 625 }, 626 { 627 .name = "peci_cpu.dimmtemp.bdx", 628 .driver_data = (kernel_ulong_t)&dimm_bdx, 629 }, 630 { 631 .name = "peci_cpu.dimmtemp.bdxd", 632 .driver_data = (kernel_ulong_t)&dimm_bdxd, 633 }, 634 { 635 .name = "peci_cpu.dimmtemp.skx", 636 .driver_data = (kernel_ulong_t)&dimm_skx, 637 }, 638 { 639 .name = "peci_cpu.dimmtemp.icx", 640 .driver_data = (kernel_ulong_t)&dimm_icx, 641 }, 642 { 643 .name = "peci_cpu.dimmtemp.icxd", 644 .driver_data = (kernel_ulong_t)&dimm_icxd, 645 }, 646 { 647 .name = "peci_cpu.dimmtemp.spr", 648 .driver_data = (kernel_ulong_t)&dimm_spr, 649 }, 650 { } 651 }; 652 MODULE_DEVICE_TABLE(auxiliary, peci_dimmtemp_ids); 653 654 static struct auxiliary_driver peci_dimmtemp_driver = { 655 .probe = peci_dimmtemp_probe, 656 .id_table = peci_dimmtemp_ids, 657 }; 658 659 module_auxiliary_driver(peci_dimmtemp_driver); 660 661 MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>"); 662 MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>"); 663 MODULE_DESCRIPTION("PECI dimmtemp driver"); 664 MODULE_LICENSE("GPL"); 665 MODULE_IMPORT_NS(PECI_CPU); 666