1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright (c) 2018-2021 Intel Corporation 3 4 #include <linux/auxiliary_bus.h> 5 #include <linux/bitfield.h> 6 #include <linux/bitops.h> 7 #include <linux/devm-helpers.h> 8 #include <linux/hwmon.h> 9 #include <linux/jiffies.h> 10 #include <linux/module.h> 11 #include <linux/peci.h> 12 #include <linux/peci-cpu.h> 13 #include <linux/units.h> 14 #include <linux/workqueue.h> 15 16 #include "common.h" 17 18 #define DIMM_MASK_CHECK_DELAY_JIFFIES msecs_to_jiffies(5000) 19 20 /* Max number of channel ranks and DIMM index per channel */ 21 #define CHAN_RANK_MAX_ON_HSX 8 22 #define DIMM_IDX_MAX_ON_HSX 3 23 #define CHAN_RANK_MAX_ON_BDX 4 24 #define DIMM_IDX_MAX_ON_BDX 3 25 #define CHAN_RANK_MAX_ON_BDXD 2 26 #define DIMM_IDX_MAX_ON_BDXD 2 27 #define CHAN_RANK_MAX_ON_SKX 6 28 #define DIMM_IDX_MAX_ON_SKX 2 29 #define CHAN_RANK_MAX_ON_ICX 8 30 #define DIMM_IDX_MAX_ON_ICX 2 31 #define CHAN_RANK_MAX_ON_ICXD 4 32 #define DIMM_IDX_MAX_ON_ICXD 2 33 #define CHAN_RANK_MAX_ON_SPR 8 34 #define DIMM_IDX_MAX_ON_SPR 2 35 36 #define CHAN_RANK_MAX CHAN_RANK_MAX_ON_HSX 37 #define DIMM_IDX_MAX DIMM_IDX_MAX_ON_HSX 38 #define DIMM_NUMS_MAX (CHAN_RANK_MAX * DIMM_IDX_MAX) 39 40 #define CPU_SEG_MASK GENMASK(23, 16) 41 #define GET_CPU_SEG(x) (((x) & CPU_SEG_MASK) >> 16) 42 #define CPU_BUS_MASK GENMASK(7, 0) 43 #define GET_CPU_BUS(x) ((x) & CPU_BUS_MASK) 44 45 #define DIMM_TEMP_MAX GENMASK(15, 8) 46 #define DIMM_TEMP_CRIT GENMASK(23, 16) 47 #define GET_TEMP_MAX(x) (((x) & DIMM_TEMP_MAX) >> 8) 48 #define GET_TEMP_CRIT(x) (((x) & DIMM_TEMP_CRIT) >> 16) 49 50 #define NO_DIMM_RETRY_COUNT_MAX 5 51 52 struct peci_dimmtemp; 53 54 struct dimm_info { 55 int chan_rank_max; 56 int dimm_idx_max; 57 u8 min_peci_revision; 58 int (*read_thresholds)(struct peci_dimmtemp *priv, int dimm_order, 59 int chan_rank, u32 *data); 60 }; 61 62 struct peci_dimm_thresholds { 63 long temp_max; 64 long temp_crit; 65 struct peci_sensor_state state; 66 }; 67 68 enum peci_dimm_threshold_type { 69 temp_max_type, 70 temp_crit_type, 71 }; 72 73 struct peci_dimmtemp { 74 struct peci_device *peci_dev; 75 struct device *dev; 76 const char *name; 77 const struct dimm_info *gen_info; 78 struct delayed_work detect_work; 79 struct { 80 struct peci_sensor_data temp; 81 struct peci_dimm_thresholds thresholds; 82 } dimm[DIMM_NUMS_MAX]; 83 char **dimmtemp_label; 84 DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX); 85 u8 no_dimm_retry_count; 86 }; 87 88 static u8 __dimm_temp(u32 reg, int dimm_order) 89 { 90 return (reg >> (dimm_order * 8)) & 0xff; 91 } 92 93 static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no, long *val) 94 { 95 int dimm_order = dimm_no % priv->gen_info->dimm_idx_max; 96 int chan_rank = dimm_no / priv->gen_info->dimm_idx_max; 97 int ret = 0; 98 u32 data; 99 100 mutex_lock(&priv->dimm[dimm_no].temp.state.lock); 101 if (!peci_sensor_need_update(&priv->dimm[dimm_no].temp.state)) 102 goto skip_update; 103 104 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &data); 105 if (ret) 106 goto unlock; 107 108 priv->dimm[dimm_no].temp.value = __dimm_temp(data, dimm_order) * MILLIDEGREE_PER_DEGREE; 109 110 peci_sensor_mark_updated(&priv->dimm[dimm_no].temp.state); 111 112 skip_update: 113 *val = priv->dimm[dimm_no].temp.value; 114 unlock: 115 mutex_unlock(&priv->dimm[dimm_no].temp.state.lock); 116 return ret; 117 } 118 119 static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no) 120 { 121 int dimm_order = dimm_no % priv->gen_info->dimm_idx_max; 122 int chan_rank = dimm_no / priv->gen_info->dimm_idx_max; 123 u32 data; 124 int ret; 125 126 if (!peci_sensor_need_update(&priv->dimm[dimm_no].thresholds.state)) 127 return 0; 128 129 ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data); 130 if (ret == -ENODATA) /* Use default or previous value */ 131 return 0; 132 if (ret) 133 return ret; 134 135 priv->dimm[dimm_no].thresholds.temp_max = GET_TEMP_MAX(data) * MILLIDEGREE_PER_DEGREE; 136 priv->dimm[dimm_no].thresholds.temp_crit = GET_TEMP_CRIT(data) * MILLIDEGREE_PER_DEGREE; 137 138 peci_sensor_mark_updated(&priv->dimm[dimm_no].thresholds.state); 139 140 return 0; 141 } 142 143 static int get_dimm_thresholds(struct peci_dimmtemp *priv, enum peci_dimm_threshold_type type, 144 int dimm_no, long *val) 145 { 146 int ret; 147 148 mutex_lock(&priv->dimm[dimm_no].thresholds.state.lock); 149 ret = update_thresholds(priv, dimm_no); 150 if (ret) 151 goto unlock; 152 153 switch (type) { 154 case temp_max_type: 155 *val = priv->dimm[dimm_no].thresholds.temp_max; 156 break; 157 case temp_crit_type: 158 *val = priv->dimm[dimm_no].thresholds.temp_crit; 159 break; 160 default: 161 ret = -EOPNOTSUPP; 162 break; 163 } 164 unlock: 165 mutex_unlock(&priv->dimm[dimm_no].thresholds.state.lock); 166 167 return ret; 168 } 169 170 static int dimmtemp_read_string(struct device *dev, 171 enum hwmon_sensor_types type, 172 u32 attr, int channel, const char **str) 173 { 174 struct peci_dimmtemp *priv = dev_get_drvdata(dev); 175 176 if (attr != hwmon_temp_label) 177 return -EOPNOTSUPP; 178 179 *str = (const char *)priv->dimmtemp_label[channel]; 180 181 return 0; 182 } 183 184 static int dimmtemp_read(struct device *dev, enum hwmon_sensor_types type, 185 u32 attr, int channel, long *val) 186 { 187 struct peci_dimmtemp *priv = dev_get_drvdata(dev); 188 189 switch (attr) { 190 case hwmon_temp_input: 191 return get_dimm_temp(priv, channel, val); 192 case hwmon_temp_max: 193 return get_dimm_thresholds(priv, temp_max_type, channel, val); 194 case hwmon_temp_crit: 195 return get_dimm_thresholds(priv, temp_crit_type, channel, val); 196 default: 197 break; 198 } 199 200 return -EOPNOTSUPP; 201 } 202 203 static umode_t dimmtemp_is_visible(const void *data, enum hwmon_sensor_types type, 204 u32 attr, int channel) 205 { 206 const struct peci_dimmtemp *priv = data; 207 208 if (test_bit(channel, priv->dimm_mask)) 209 return 0444; 210 211 return 0; 212 } 213 214 static const struct hwmon_ops peci_dimmtemp_ops = { 215 .is_visible = dimmtemp_is_visible, 216 .read_string = dimmtemp_read_string, 217 .read = dimmtemp_read, 218 }; 219 220 static int check_populated_dimms(struct peci_dimmtemp *priv) 221 { 222 int chan_rank_max = priv->gen_info->chan_rank_max; 223 int dimm_idx_max = priv->gen_info->dimm_idx_max; 224 DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX); 225 DECLARE_BITMAP(chan_rank_empty, CHAN_RANK_MAX); 226 227 int chan_rank, dimm_idx, ret, i; 228 u32 pcs; 229 230 if (chan_rank_max * dimm_idx_max > DIMM_NUMS_MAX) { 231 WARN_ONCE(1, "Unsupported number of DIMMs - chan_rank_max: %d, dimm_idx_max: %d", 232 chan_rank_max, dimm_idx_max); 233 return -EINVAL; 234 } 235 236 bitmap_zero(dimm_mask, DIMM_NUMS_MAX); 237 bitmap_zero(chan_rank_empty, CHAN_RANK_MAX); 238 239 for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) { 240 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &pcs); 241 if (ret) { 242 /* 243 * Overall, we expect either success or -EINVAL in 244 * order to determine whether DIMM is populated or not. 245 * For anything else we fall back to deferring the 246 * detection to be performed at a later point in time. 247 */ 248 if (ret == -EINVAL) { 249 bitmap_set(chan_rank_empty, chan_rank, 1); 250 continue; 251 } 252 253 return -EAGAIN; 254 } 255 256 for (dimm_idx = 0; dimm_idx < dimm_idx_max; dimm_idx++) 257 if (__dimm_temp(pcs, dimm_idx)) 258 bitmap_set(dimm_mask, chan_rank * dimm_idx_max + dimm_idx, 1); 259 } 260 261 /* 262 * If we got all -EINVALs, it means that the CPU doesn't have any 263 * DIMMs. Unfortunately, it may also happen at the very start of 264 * host platform boot. Retrying a couple of times lets us make sure 265 * that the state is persistent. 266 */ 267 if (bitmap_full(chan_rank_empty, chan_rank_max)) { 268 if (priv->no_dimm_retry_count < NO_DIMM_RETRY_COUNT_MAX) { 269 priv->no_dimm_retry_count++; 270 271 return -EAGAIN; 272 } 273 274 return -ENODEV; 275 } 276 277 /* 278 * It's possible that memory training is not done yet. In this case we 279 * defer the detection to be performed at a later point in time. 280 */ 281 if (bitmap_empty(dimm_mask, DIMM_NUMS_MAX)) { 282 priv->no_dimm_retry_count = 0; 283 return -EAGAIN; 284 } 285 286 for_each_set_bit(i, dimm_mask, DIMM_NUMS_MAX) { 287 dev_dbg(priv->dev, "Found DIMM%#x\n", i); 288 } 289 290 bitmap_copy(priv->dimm_mask, dimm_mask, DIMM_NUMS_MAX); 291 292 return 0; 293 } 294 295 static int create_dimm_temp_label(struct peci_dimmtemp *priv, int chan) 296 { 297 int rank = chan / priv->gen_info->dimm_idx_max; 298 int idx = chan % priv->gen_info->dimm_idx_max; 299 300 priv->dimmtemp_label[chan] = devm_kasprintf(priv->dev, GFP_KERNEL, 301 "DIMM %c%d", 'A' + rank, 302 idx + 1); 303 if (!priv->dimmtemp_label[chan]) 304 return -ENOMEM; 305 306 return 0; 307 } 308 309 static const struct hwmon_channel_info * const peci_dimmtemp_temp_info[] = { 310 HWMON_CHANNEL_INFO(temp, 311 [0 ... DIMM_NUMS_MAX - 1] = HWMON_T_LABEL | 312 HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT), 313 NULL 314 }; 315 316 static const struct hwmon_chip_info peci_dimmtemp_chip_info = { 317 .ops = &peci_dimmtemp_ops, 318 .info = peci_dimmtemp_temp_info, 319 }; 320 321 static int create_dimm_temp_info(struct peci_dimmtemp *priv) 322 { 323 int ret, i, channels; 324 struct device *dev; 325 326 /* 327 * We expect to either find populated DIMMs and carry on with creating 328 * sensors, or find out that there are no DIMMs populated. 329 * All other states mean that the platform never reached the state that 330 * allows to check DIMM state - causing us to retry later on. 331 */ 332 ret = check_populated_dimms(priv); 333 if (ret == -ENODEV) { 334 dev_dbg(priv->dev, "No DIMMs found\n"); 335 return 0; 336 } else if (ret) { 337 schedule_delayed_work(&priv->detect_work, DIMM_MASK_CHECK_DELAY_JIFFIES); 338 dev_dbg(priv->dev, "Deferred populating DIMM temp info\n"); 339 return ret; 340 } 341 342 channels = priv->gen_info->chan_rank_max * priv->gen_info->dimm_idx_max; 343 344 priv->dimmtemp_label = devm_kzalloc(priv->dev, channels * sizeof(char *), GFP_KERNEL); 345 if (!priv->dimmtemp_label) 346 return -ENOMEM; 347 348 for_each_set_bit(i, priv->dimm_mask, DIMM_NUMS_MAX) { 349 ret = create_dimm_temp_label(priv, i); 350 if (ret) 351 return ret; 352 mutex_init(&priv->dimm[i].thresholds.state.lock); 353 mutex_init(&priv->dimm[i].temp.state.lock); 354 } 355 356 dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv, 357 &peci_dimmtemp_chip_info, NULL); 358 if (IS_ERR(dev)) { 359 dev_err(priv->dev, "Failed to register hwmon device\n"); 360 return PTR_ERR(dev); 361 } 362 363 dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name); 364 365 return 0; 366 } 367 368 static void create_dimm_temp_info_delayed(struct work_struct *work) 369 { 370 struct peci_dimmtemp *priv = container_of(to_delayed_work(work), 371 struct peci_dimmtemp, 372 detect_work); 373 int ret; 374 375 ret = create_dimm_temp_info(priv); 376 if (ret && ret != -EAGAIN) 377 dev_err(priv->dev, "Failed to populate DIMM temp info\n"); 378 } 379 380 static int peci_dimmtemp_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) 381 { 382 struct device *dev = &adev->dev; 383 struct peci_device *peci_dev = to_peci_device(dev->parent); 384 struct peci_dimmtemp *priv; 385 int ret; 386 387 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 388 if (!priv) 389 return -ENOMEM; 390 391 priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_dimmtemp.cpu%d", 392 peci_dev->info.socket_id); 393 if (!priv->name) 394 return -ENOMEM; 395 396 priv->dev = dev; 397 priv->peci_dev = peci_dev; 398 priv->gen_info = (const struct dimm_info *)id->driver_data; 399 400 /* 401 * This is just a sanity check. Since we're using commands that are 402 * guaranteed to be supported on a given platform, we should never see 403 * revision lower than expected. 404 */ 405 if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision) 406 dev_warn(priv->dev, 407 "Unexpected PECI revision %#x, some features may be unavailable\n", 408 peci_dev->info.peci_revision); 409 410 ret = devm_delayed_work_autocancel(priv->dev, &priv->detect_work, 411 create_dimm_temp_info_delayed); 412 if (ret) 413 return ret; 414 415 ret = create_dimm_temp_info(priv); 416 if (ret && ret != -EAGAIN) { 417 dev_err(dev, "Failed to populate DIMM temp info\n"); 418 return ret; 419 } 420 421 return 0; 422 } 423 424 static int 425 read_thresholds_hsx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 426 { 427 u8 dev, func; 428 u16 reg; 429 int ret; 430 431 /* 432 * Device 20, Function 0: IMC 0 channel 0 -> rank 0 433 * Device 20, Function 1: IMC 0 channel 1 -> rank 1 434 * Device 21, Function 0: IMC 0 channel 2 -> rank 2 435 * Device 21, Function 1: IMC 0 channel 3 -> rank 3 436 * Device 23, Function 0: IMC 1 channel 0 -> rank 4 437 * Device 23, Function 1: IMC 1 channel 1 -> rank 5 438 * Device 24, Function 0: IMC 1 channel 2 -> rank 6 439 * Device 24, Function 1: IMC 1 channel 3 -> rank 7 440 */ 441 dev = 20 + chan_rank / 2 + chan_rank / 4; 442 func = chan_rank % 2; 443 reg = 0x120 + dimm_order * 4; 444 445 ret = peci_pci_local_read(priv->peci_dev, 1, dev, func, reg, data); 446 if (ret) 447 return ret; 448 449 return 0; 450 } 451 452 static int 453 read_thresholds_bdxd(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 454 { 455 u8 dev, func; 456 u16 reg; 457 int ret; 458 459 /* 460 * Device 10, Function 2: IMC 0 channel 0 -> rank 0 461 * Device 10, Function 6: IMC 0 channel 1 -> rank 1 462 * Device 12, Function 2: IMC 1 channel 0 -> rank 2 463 * Device 12, Function 6: IMC 1 channel 1 -> rank 3 464 */ 465 dev = 10 + chan_rank / 2 * 2; 466 func = (chan_rank % 2) ? 6 : 2; 467 reg = 0x120 + dimm_order * 4; 468 469 ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data); 470 if (ret) 471 return ret; 472 473 return 0; 474 } 475 476 static int 477 read_thresholds_skx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 478 { 479 u8 dev, func; 480 u16 reg; 481 int ret; 482 483 /* 484 * Device 10, Function 2: IMC 0 channel 0 -> rank 0 485 * Device 10, Function 6: IMC 0 channel 1 -> rank 1 486 * Device 11, Function 2: IMC 0 channel 2 -> rank 2 487 * Device 12, Function 2: IMC 1 channel 0 -> rank 3 488 * Device 12, Function 6: IMC 1 channel 1 -> rank 4 489 * Device 13, Function 2: IMC 1 channel 2 -> rank 5 490 */ 491 dev = 10 + chan_rank / 3 * 2 + (chan_rank % 3 == 2 ? 1 : 0); 492 func = chan_rank % 3 == 1 ? 6 : 2; 493 reg = 0x120 + dimm_order * 4; 494 495 ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data); 496 if (ret) 497 return ret; 498 499 return 0; 500 } 501 502 static int 503 read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 504 { 505 u32 reg_val; 506 u64 offset; 507 int ret; 508 u8 dev; 509 510 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, ®_val); 511 if (ret || !(reg_val & BIT(31))) 512 return -ENODATA; /* Use default or previous value */ 513 514 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, ®_val); 515 if (ret) 516 return -ENODATA; /* Use default or previous value */ 517 518 /* 519 * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0 520 * Device 26, Offset 264e0: IMC 0 channel 1 -> rank 1 521 * Device 27, Offset 224e0: IMC 1 channel 0 -> rank 2 522 * Device 27, Offset 264e0: IMC 1 channel 1 -> rank 3 523 * Device 28, Offset 224e0: IMC 2 channel 0 -> rank 4 524 * Device 28, Offset 264e0: IMC 2 channel 1 -> rank 5 525 * Device 29, Offset 224e0: IMC 3 channel 0 -> rank 6 526 * Device 29, Offset 264e0: IMC 3 channel 1 -> rank 7 527 */ 528 dev = 26 + chan_rank / 2; 529 offset = 0x224e0 + dimm_order * 4 + (chan_rank % 2) * 0x4000; 530 531 ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val), 532 dev, 0, offset, data); 533 if (ret) 534 return ret; 535 536 return 0; 537 } 538 539 static int 540 read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) 541 { 542 u32 reg_val; 543 u64 offset; 544 int ret; 545 u8 dev; 546 547 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, ®_val); 548 if (ret || !(reg_val & BIT(31))) 549 return -ENODATA; /* Use default or previous value */ 550 551 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, ®_val); 552 if (ret) 553 return -ENODATA; /* Use default or previous value */ 554 555 /* 556 * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0 557 * Device 26, Offset 299a8: IMC 0 channel 1 -> rank 1 558 * Device 27, Offset 219a8: IMC 1 channel 0 -> rank 2 559 * Device 27, Offset 299a8: IMC 1 channel 1 -> rank 3 560 * Device 28, Offset 219a8: IMC 2 channel 0 -> rank 4 561 * Device 28, Offset 299a8: IMC 2 channel 1 -> rank 5 562 * Device 29, Offset 219a8: IMC 3 channel 0 -> rank 6 563 * Device 29, Offset 299a8: IMC 3 channel 1 -> rank 7 564 */ 565 dev = 26 + chan_rank / 2; 566 offset = 0x219a8 + dimm_order * 4 + (chan_rank % 2) * 0x8000; 567 568 ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val), 569 dev, 0, offset, data); 570 if (ret) 571 return ret; 572 573 return 0; 574 } 575 576 static const struct dimm_info dimm_hsx = { 577 .chan_rank_max = CHAN_RANK_MAX_ON_HSX, 578 .dimm_idx_max = DIMM_IDX_MAX_ON_HSX, 579 .min_peci_revision = 0x33, 580 .read_thresholds = &read_thresholds_hsx, 581 }; 582 583 static const struct dimm_info dimm_bdx = { 584 .chan_rank_max = CHAN_RANK_MAX_ON_BDX, 585 .dimm_idx_max = DIMM_IDX_MAX_ON_BDX, 586 .min_peci_revision = 0x33, 587 .read_thresholds = &read_thresholds_hsx, 588 }; 589 590 static const struct dimm_info dimm_bdxd = { 591 .chan_rank_max = CHAN_RANK_MAX_ON_BDXD, 592 .dimm_idx_max = DIMM_IDX_MAX_ON_BDXD, 593 .min_peci_revision = 0x33, 594 .read_thresholds = &read_thresholds_bdxd, 595 }; 596 597 static const struct dimm_info dimm_skx = { 598 .chan_rank_max = CHAN_RANK_MAX_ON_SKX, 599 .dimm_idx_max = DIMM_IDX_MAX_ON_SKX, 600 .min_peci_revision = 0x33, 601 .read_thresholds = &read_thresholds_skx, 602 }; 603 604 static const struct dimm_info dimm_icx = { 605 .chan_rank_max = CHAN_RANK_MAX_ON_ICX, 606 .dimm_idx_max = DIMM_IDX_MAX_ON_ICX, 607 .min_peci_revision = 0x40, 608 .read_thresholds = &read_thresholds_icx, 609 }; 610 611 static const struct dimm_info dimm_icxd = { 612 .chan_rank_max = CHAN_RANK_MAX_ON_ICXD, 613 .dimm_idx_max = DIMM_IDX_MAX_ON_ICXD, 614 .min_peci_revision = 0x40, 615 .read_thresholds = &read_thresholds_icx, 616 }; 617 618 static const struct dimm_info dimm_spr = { 619 .chan_rank_max = CHAN_RANK_MAX_ON_SPR, 620 .dimm_idx_max = DIMM_IDX_MAX_ON_SPR, 621 .min_peci_revision = 0x40, 622 .read_thresholds = &read_thresholds_spr, 623 }; 624 625 static const struct auxiliary_device_id peci_dimmtemp_ids[] = { 626 { 627 .name = "peci_cpu.dimmtemp.hsx", 628 .driver_data = (kernel_ulong_t)&dimm_hsx, 629 }, 630 { 631 .name = "peci_cpu.dimmtemp.bdx", 632 .driver_data = (kernel_ulong_t)&dimm_bdx, 633 }, 634 { 635 .name = "peci_cpu.dimmtemp.bdxd", 636 .driver_data = (kernel_ulong_t)&dimm_bdxd, 637 }, 638 { 639 .name = "peci_cpu.dimmtemp.skx", 640 .driver_data = (kernel_ulong_t)&dimm_skx, 641 }, 642 { 643 .name = "peci_cpu.dimmtemp.icx", 644 .driver_data = (kernel_ulong_t)&dimm_icx, 645 }, 646 { 647 .name = "peci_cpu.dimmtemp.icxd", 648 .driver_data = (kernel_ulong_t)&dimm_icxd, 649 }, 650 { 651 .name = "peci_cpu.dimmtemp.spr", 652 .driver_data = (kernel_ulong_t)&dimm_spr, 653 }, 654 { } 655 }; 656 MODULE_DEVICE_TABLE(auxiliary, peci_dimmtemp_ids); 657 658 static struct auxiliary_driver peci_dimmtemp_driver = { 659 .probe = peci_dimmtemp_probe, 660 .id_table = peci_dimmtemp_ids, 661 }; 662 663 module_auxiliary_driver(peci_dimmtemp_driver); 664 665 MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>"); 666 MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>"); 667 MODULE_DESCRIPTION("PECI dimmtemp driver"); 668 MODULE_LICENSE("GPL"); 669 MODULE_IMPORT_NS(PECI_CPU); 670