1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/slab.h> 3 #include <linux/pci.h> 4 #include <asm/apicdef.h> 5 #include <asm/intel-family.h> 6 #include <linux/io-64-nonatomic-lo-hi.h> 7 8 #include <linux/perf_event.h> 9 #include "../perf_event.h" 10 11 #define UNCORE_PMU_NAME_LEN 32 12 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) 13 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) 14 15 #define UNCORE_FIXED_EVENT 0xff 16 #define UNCORE_PMC_IDX_MAX_GENERIC 8 17 #define UNCORE_PMC_IDX_MAX_FIXED 1 18 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1 19 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC 20 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \ 21 UNCORE_PMC_IDX_MAX_FIXED) 22 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \ 23 UNCORE_PMC_IDX_MAX_FREERUNNING) 24 25 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ 26 ((dev << 24) | (func << 16) | (type << 8) | idx) 27 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) 28 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff) 29 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff) 30 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 31 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 32 #define UNCORE_EXTRA_PCI_DEV 0xff 33 #define UNCORE_EXTRA_PCI_DEV_MAX 4 34 35 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 36 37 struct pci_extra_dev { 38 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX]; 39 }; 40 41 struct intel_uncore_ops; 42 struct intel_uncore_pmu; 43 struct intel_uncore_box; 44 struct uncore_event_desc; 45 struct freerunning_counters; 46 struct intel_uncore_topology; 47 48 struct intel_uncore_type { 49 const char *name; 50 int num_counters; 51 int num_boxes; 52 int perf_ctr_bits; 53 int fixed_ctr_bits; 54 int num_freerunning_types; 55 int type_id; 56 unsigned perf_ctr; 57 unsigned event_ctl; 58 unsigned event_mask; 59 unsigned event_mask_ext; 60 unsigned fixed_ctr; 61 unsigned fixed_ctl; 62 unsigned box_ctl; 63 u64 *box_ctls; /* Unit ctrl addr of the first box of each die */ 64 union { 65 unsigned msr_offset; 66 unsigned mmio_offset; 67 }; 68 unsigned mmio_map_size; 69 unsigned num_shared_regs:8; 70 unsigned single_fixed:1; 71 unsigned pair_ctr_ctl:1; 72 union { 73 unsigned *msr_offsets; 74 unsigned *pci_offsets; 75 unsigned *mmio_offsets; 76 }; 77 unsigned *box_ids; 78 struct event_constraint unconstrainted; 79 struct event_constraint *constraints; 80 struct intel_uncore_pmu *pmus; 81 struct intel_uncore_ops *ops; 82 struct uncore_event_desc *event_descs; 83 struct freerunning_counters *freerunning; 84 const struct attribute_group *attr_groups[4]; 85 const struct attribute_group **attr_update; 86 struct pmu *pmu; /* for custom pmu ops */ 87 /* 88 * Uncore PMU would store relevant platform topology configuration here 89 * to identify which platform component each PMON block of that type is 90 * supposed to monitor. 91 */ 92 struct intel_uncore_topology **topology; 93 /* 94 * Optional callbacks for managing mapping of Uncore units to PMONs 95 */ 96 int (*get_topology)(struct intel_uncore_type *type); 97 void (*set_mapping)(struct intel_uncore_type *type); 98 void (*cleanup_mapping)(struct intel_uncore_type *type); 99 }; 100 101 #define pmu_group attr_groups[0] 102 #define format_group attr_groups[1] 103 #define events_group attr_groups[2] 104 105 struct intel_uncore_ops { 106 void (*init_box)(struct intel_uncore_box *); 107 void (*exit_box)(struct intel_uncore_box *); 108 void (*disable_box)(struct intel_uncore_box *); 109 void (*enable_box)(struct intel_uncore_box *); 110 void (*disable_event)(struct intel_uncore_box *, struct perf_event *); 111 void (*enable_event)(struct intel_uncore_box *, struct perf_event *); 112 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); 113 int (*hw_config)(struct intel_uncore_box *, struct perf_event *); 114 struct event_constraint *(*get_constraint)(struct intel_uncore_box *, 115 struct perf_event *); 116 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); 117 }; 118 119 struct intel_uncore_pmu { 120 struct pmu pmu; 121 char name[UNCORE_PMU_NAME_LEN]; 122 int pmu_idx; 123 int func_id; 124 bool registered; 125 atomic_t activeboxes; 126 struct intel_uncore_type *type; 127 struct intel_uncore_box **boxes; 128 }; 129 130 struct intel_uncore_extra_reg { 131 raw_spinlock_t lock; 132 u64 config, config1, config2; 133 atomic_t ref; 134 }; 135 136 struct intel_uncore_box { 137 int dieid; /* Logical die ID */ 138 int n_active; /* number of active events */ 139 int n_events; 140 int cpu; /* cpu to collect events */ 141 unsigned long flags; 142 atomic_t refcnt; 143 struct perf_event *events[UNCORE_PMC_IDX_MAX]; 144 struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; 145 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; 146 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 147 u64 tags[UNCORE_PMC_IDX_MAX]; 148 struct pci_dev *pci_dev; 149 struct intel_uncore_pmu *pmu; 150 u64 hrtimer_duration; /* hrtimer timeout for this box */ 151 struct hrtimer hrtimer; 152 struct list_head list; 153 struct list_head active_list; 154 void __iomem *io_addr; 155 struct intel_uncore_extra_reg shared_regs[]; 156 }; 157 158 /* CFL uncore 8th cbox MSRs */ 159 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70 160 #define CFL_UNC_CBO_7_PER_CTR0 0xf76 161 162 #define UNCORE_BOX_FLAG_INITIATED 0 163 /* event config registers are 8-byte apart */ 164 #define UNCORE_BOX_FLAG_CTL_OFFS8 1 165 /* CFL 8th CBOX has different MSR space */ 166 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2 167 168 struct uncore_event_desc { 169 struct device_attribute attr; 170 const char *config; 171 }; 172 173 struct freerunning_counters { 174 unsigned int counter_base; 175 unsigned int counter_offset; 176 unsigned int box_offset; 177 unsigned int num_counters; 178 unsigned int bits; 179 unsigned *box_offsets; 180 }; 181 182 struct uncore_iio_topology { 183 int pci_bus_no; 184 int segment; 185 }; 186 187 struct uncore_upi_topology { 188 int die_to; 189 int pmu_idx_to; 190 int enabled; 191 }; 192 193 struct intel_uncore_topology { 194 int pmu_idx; 195 union { 196 void *untyped; 197 struct uncore_iio_topology *iio; 198 struct uncore_upi_topology *upi; 199 }; 200 }; 201 202 struct pci2phy_map { 203 struct list_head list; 204 int segment; 205 int pbus_to_dieid[256]; 206 }; 207 208 struct pci2phy_map *__find_pci2phy_map(int segment); 209 int uncore_pcibus_to_dieid(struct pci_bus *bus); 210 int uncore_die_to_segment(int die); 211 212 ssize_t uncore_event_show(struct device *dev, 213 struct device_attribute *attr, char *buf); 214 215 static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev) 216 { 217 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); 218 } 219 220 #define to_device_attribute(n) container_of(n, struct device_attribute, attr) 221 #define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr) 222 #define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n)) 223 224 extern int __uncore_max_dies; 225 #define uncore_max_dies() (__uncore_max_dies) 226 227 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ 228 { \ 229 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ 230 .config = _config, \ 231 } 232 233 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ 234 static ssize_t __uncore_##_var##_show(struct device *dev, \ 235 struct device_attribute *attr, \ 236 char *page) \ 237 { \ 238 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 239 return sprintf(page, _format "\n"); \ 240 } \ 241 static struct device_attribute format_attr_##_var = \ 242 __ATTR(_name, 0444, __uncore_##_var##_show, NULL) 243 244 static inline bool uncore_pmc_fixed(int idx) 245 { 246 return idx == UNCORE_PMC_IDX_FIXED; 247 } 248 249 static inline bool uncore_pmc_freerunning(int idx) 250 { 251 return idx == UNCORE_PMC_IDX_FREERUNNING; 252 } 253 254 static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box, 255 unsigned long offset) 256 { 257 if (offset < box->pmu->type->mmio_map_size) 258 return true; 259 260 pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n", 261 offset, box->pmu->type->name); 262 263 return false; 264 } 265 266 static inline 267 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box) 268 { 269 return box->pmu->type->box_ctl + 270 box->pmu->type->mmio_offset * box->pmu->pmu_idx; 271 } 272 273 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) 274 { 275 return box->pmu->type->box_ctl; 276 } 277 278 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) 279 { 280 return box->pmu->type->fixed_ctl; 281 } 282 283 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) 284 { 285 return box->pmu->type->fixed_ctr; 286 } 287 288 static inline 289 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) 290 { 291 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags)) 292 return idx * 8 + box->pmu->type->event_ctl; 293 294 return idx * 4 + box->pmu->type->event_ctl; 295 } 296 297 static inline 298 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) 299 { 300 return idx * 8 + box->pmu->type->perf_ctr; 301 } 302 303 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) 304 { 305 struct intel_uncore_pmu *pmu = box->pmu; 306 return pmu->type->msr_offsets ? 307 pmu->type->msr_offsets[pmu->pmu_idx] : 308 pmu->type->msr_offset * pmu->pmu_idx; 309 } 310 311 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) 312 { 313 if (!box->pmu->type->box_ctl) 314 return 0; 315 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); 316 } 317 318 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) 319 { 320 if (!box->pmu->type->fixed_ctl) 321 return 0; 322 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); 323 } 324 325 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) 326 { 327 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); 328 } 329 330 331 /* 332 * In the uncore document, there is no event-code assigned to free running 333 * counters. Some events need to be defined to indicate the free running 334 * counters. The events are encoded as event-code + umask-code. 335 * 336 * The event-code for all free running counters is 0xff, which is the same as 337 * the fixed counters. 338 * 339 * The umask-code is used to distinguish a fixed counter and a free running 340 * counter, and different types of free running counters. 341 * - For fixed counters, the umask-code is 0x0X. 342 * X indicates the index of the fixed counter, which starts from 0. 343 * - For free running counters, the umask-code uses the rest of the space. 344 * It would bare the format of 0xXY. 345 * X stands for the type of free running counters, which starts from 1. 346 * Y stands for the index of free running counters of same type, which 347 * starts from 0. 348 * 349 * For example, there are three types of IIO free running counters on Skylake 350 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters. 351 * The event-code for all the free running counters is 0xff. 352 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type, 353 * which umask-code starts from 0x10. 354 * So 'ioclk' is encoded as event=0xff,umask=0x10 355 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is 356 * the second type, which umask-code starts from 0x20. 357 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22 358 */ 359 static inline unsigned int uncore_freerunning_idx(u64 config) 360 { 361 return ((config >> 8) & 0xf); 362 } 363 364 #define UNCORE_FREERUNNING_UMASK_START 0x10 365 366 static inline unsigned int uncore_freerunning_type(u64 config) 367 { 368 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf); 369 } 370 371 static inline 372 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, 373 struct perf_event *event) 374 { 375 unsigned int type = uncore_freerunning_type(event->hw.config); 376 unsigned int idx = uncore_freerunning_idx(event->hw.config); 377 struct intel_uncore_pmu *pmu = box->pmu; 378 379 return pmu->type->freerunning[type].counter_base + 380 pmu->type->freerunning[type].counter_offset * idx + 381 (pmu->type->freerunning[type].box_offsets ? 382 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : 383 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); 384 } 385 386 static inline 387 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) 388 { 389 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 390 return CFL_UNC_CBO_7_PERFEVTSEL0 + 391 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 392 } else { 393 return box->pmu->type->event_ctl + 394 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 395 uncore_msr_box_offset(box); 396 } 397 } 398 399 static inline 400 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) 401 { 402 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 403 return CFL_UNC_CBO_7_PER_CTR0 + 404 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 405 } else { 406 return box->pmu->type->perf_ctr + 407 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 408 uncore_msr_box_offset(box); 409 } 410 } 411 412 static inline 413 unsigned uncore_fixed_ctl(struct intel_uncore_box *box) 414 { 415 if (box->pci_dev || box->io_addr) 416 return uncore_pci_fixed_ctl(box); 417 else 418 return uncore_msr_fixed_ctl(box); 419 } 420 421 static inline 422 unsigned uncore_fixed_ctr(struct intel_uncore_box *box) 423 { 424 if (box->pci_dev || box->io_addr) 425 return uncore_pci_fixed_ctr(box); 426 else 427 return uncore_msr_fixed_ctr(box); 428 } 429 430 static inline 431 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) 432 { 433 if (box->pci_dev || box->io_addr) 434 return uncore_pci_event_ctl(box, idx); 435 else 436 return uncore_msr_event_ctl(box, idx); 437 } 438 439 static inline 440 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) 441 { 442 if (box->pci_dev || box->io_addr) 443 return uncore_pci_perf_ctr(box, idx); 444 else 445 return uncore_msr_perf_ctr(box, idx); 446 } 447 448 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) 449 { 450 return box->pmu->type->perf_ctr_bits; 451 } 452 453 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) 454 { 455 return box->pmu->type->fixed_ctr_bits; 456 } 457 458 static inline 459 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, 460 struct perf_event *event) 461 { 462 unsigned int type = uncore_freerunning_type(event->hw.config); 463 464 return box->pmu->type->freerunning[type].bits; 465 } 466 467 static inline int uncore_num_freerunning(struct intel_uncore_box *box, 468 struct perf_event *event) 469 { 470 unsigned int type = uncore_freerunning_type(event->hw.config); 471 472 return box->pmu->type->freerunning[type].num_counters; 473 } 474 475 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, 476 struct perf_event *event) 477 { 478 return box->pmu->type->num_freerunning_types; 479 } 480 481 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, 482 struct perf_event *event) 483 { 484 unsigned int type = uncore_freerunning_type(event->hw.config); 485 unsigned int idx = uncore_freerunning_idx(event->hw.config); 486 487 return (type < uncore_num_freerunning_types(box, event)) && 488 (idx < uncore_num_freerunning(box, event)); 489 } 490 491 static inline int uncore_num_counters(struct intel_uncore_box *box) 492 { 493 return box->pmu->type->num_counters; 494 } 495 496 static inline bool is_freerunning_event(struct perf_event *event) 497 { 498 u64 cfg = event->attr.config; 499 500 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) && 501 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); 502 } 503 504 /* Check and reject invalid config */ 505 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box, 506 struct perf_event *event) 507 { 508 if (is_freerunning_event(event)) 509 return 0; 510 511 return -EINVAL; 512 } 513 514 static inline void uncore_disable_event(struct intel_uncore_box *box, 515 struct perf_event *event) 516 { 517 box->pmu->type->ops->disable_event(box, event); 518 } 519 520 static inline void uncore_enable_event(struct intel_uncore_box *box, 521 struct perf_event *event) 522 { 523 box->pmu->type->ops->enable_event(box, event); 524 } 525 526 static inline u64 uncore_read_counter(struct intel_uncore_box *box, 527 struct perf_event *event) 528 { 529 return box->pmu->type->ops->read_counter(box, event); 530 } 531 532 static inline void uncore_box_init(struct intel_uncore_box *box) 533 { 534 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 535 if (box->pmu->type->ops->init_box) 536 box->pmu->type->ops->init_box(box); 537 } 538 } 539 540 static inline void uncore_box_exit(struct intel_uncore_box *box) 541 { 542 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 543 if (box->pmu->type->ops->exit_box) 544 box->pmu->type->ops->exit_box(box); 545 } 546 } 547 548 static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 549 { 550 return (box->dieid < 0); 551 } 552 553 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) 554 { 555 return container_of(event->pmu, struct intel_uncore_pmu, pmu); 556 } 557 558 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) 559 { 560 return event->pmu_private; 561 } 562 563 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); 564 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); 565 void uncore_mmio_exit_box(struct intel_uncore_box *box); 566 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, 567 struct perf_event *event); 568 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); 569 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); 570 void uncore_pmu_event_start(struct perf_event *event, int flags); 571 void uncore_pmu_event_stop(struct perf_event *event, int flags); 572 int uncore_pmu_event_add(struct perf_event *event, int flags); 573 void uncore_pmu_event_del(struct perf_event *event, int flags); 574 void uncore_pmu_event_read(struct perf_event *event); 575 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); 576 struct event_constraint * 577 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); 578 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); 579 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); 580 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu); 581 582 extern struct intel_uncore_type *empty_uncore[]; 583 extern struct intel_uncore_type **uncore_msr_uncores; 584 extern struct intel_uncore_type **uncore_pci_uncores; 585 extern struct intel_uncore_type **uncore_mmio_uncores; 586 extern struct pci_driver *uncore_pci_driver; 587 extern struct pci_driver *uncore_pci_sub_driver; 588 extern raw_spinlock_t pci2phy_map_lock; 589 extern struct list_head pci2phy_map_head; 590 extern struct pci_extra_dev *uncore_extra_pci_dev; 591 extern struct event_constraint uncore_constraint_empty; 592 593 /* uncore_snb.c */ 594 int snb_uncore_pci_init(void); 595 int ivb_uncore_pci_init(void); 596 int hsw_uncore_pci_init(void); 597 int bdw_uncore_pci_init(void); 598 int skl_uncore_pci_init(void); 599 void snb_uncore_cpu_init(void); 600 void nhm_uncore_cpu_init(void); 601 void skl_uncore_cpu_init(void); 602 void icl_uncore_cpu_init(void); 603 void tgl_uncore_cpu_init(void); 604 void adl_uncore_cpu_init(void); 605 void tgl_uncore_mmio_init(void); 606 void tgl_l_uncore_mmio_init(void); 607 void adl_uncore_mmio_init(void); 608 int snb_pci2phy_map_init(int devid); 609 610 /* uncore_snbep.c */ 611 int snbep_uncore_pci_init(void); 612 void snbep_uncore_cpu_init(void); 613 int ivbep_uncore_pci_init(void); 614 void ivbep_uncore_cpu_init(void); 615 int hswep_uncore_pci_init(void); 616 void hswep_uncore_cpu_init(void); 617 int bdx_uncore_pci_init(void); 618 void bdx_uncore_cpu_init(void); 619 int knl_uncore_pci_init(void); 620 void knl_uncore_cpu_init(void); 621 int skx_uncore_pci_init(void); 622 void skx_uncore_cpu_init(void); 623 int snr_uncore_pci_init(void); 624 void snr_uncore_cpu_init(void); 625 void snr_uncore_mmio_init(void); 626 int icx_uncore_pci_init(void); 627 void icx_uncore_cpu_init(void); 628 void icx_uncore_mmio_init(void); 629 int spr_uncore_pci_init(void); 630 void spr_uncore_cpu_init(void); 631 void spr_uncore_mmio_init(void); 632 633 /* uncore_nhmex.c */ 634 void nhmex_uncore_cpu_init(void); 635