1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/slab.h> 3 #include <linux/pci.h> 4 #include <asm/apicdef.h> 5 #include <linux/io-64-nonatomic-lo-hi.h> 6 7 #include <linux/perf_event.h> 8 #include "../perf_event.h" 9 10 #define UNCORE_PMU_NAME_LEN 32 11 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) 12 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) 13 14 #define UNCORE_FIXED_EVENT 0xff 15 #define UNCORE_PMC_IDX_MAX_GENERIC 8 16 #define UNCORE_PMC_IDX_MAX_FIXED 1 17 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1 18 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC 19 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \ 20 UNCORE_PMC_IDX_MAX_FIXED) 21 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \ 22 UNCORE_PMC_IDX_MAX_FREERUNNING) 23 24 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ 25 ((dev << 24) | (func << 16) | (type << 8) | idx) 26 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) 27 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff) 28 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff) 29 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 30 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 31 #define UNCORE_EXTRA_PCI_DEV 0xff 32 #define UNCORE_EXTRA_PCI_DEV_MAX 4 33 34 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 35 36 struct pci_extra_dev { 37 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX]; 38 }; 39 40 struct intel_uncore_ops; 41 struct intel_uncore_pmu; 42 struct intel_uncore_box; 43 struct uncore_event_desc; 44 struct freerunning_counters; 45 46 struct intel_uncore_type { 47 const char *name; 48 int num_counters; 49 int num_boxes; 50 int perf_ctr_bits; 51 int fixed_ctr_bits; 52 int num_freerunning_types; 53 unsigned perf_ctr; 54 unsigned event_ctl; 55 unsigned event_mask; 56 unsigned event_mask_ext; 57 unsigned fixed_ctr; 58 unsigned fixed_ctl; 59 unsigned box_ctl; 60 union { 61 unsigned msr_offset; 62 unsigned mmio_offset; 63 }; 64 unsigned num_shared_regs:8; 65 unsigned single_fixed:1; 66 unsigned pair_ctr_ctl:1; 67 unsigned *msr_offsets; 68 struct event_constraint unconstrainted; 69 struct event_constraint *constraints; 70 struct intel_uncore_pmu *pmus; 71 struct intel_uncore_ops *ops; 72 struct uncore_event_desc *event_descs; 73 struct freerunning_counters *freerunning; 74 const struct attribute_group *attr_groups[4]; 75 struct pmu *pmu; /* for custom pmu ops */ 76 }; 77 78 #define pmu_group attr_groups[0] 79 #define format_group attr_groups[1] 80 #define events_group attr_groups[2] 81 82 struct intel_uncore_ops { 83 void (*init_box)(struct intel_uncore_box *); 84 void (*exit_box)(struct intel_uncore_box *); 85 void (*disable_box)(struct intel_uncore_box *); 86 void (*enable_box)(struct intel_uncore_box *); 87 void (*disable_event)(struct intel_uncore_box *, struct perf_event *); 88 void (*enable_event)(struct intel_uncore_box *, struct perf_event *); 89 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); 90 int (*hw_config)(struct intel_uncore_box *, struct perf_event *); 91 struct event_constraint *(*get_constraint)(struct intel_uncore_box *, 92 struct perf_event *); 93 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); 94 }; 95 96 struct intel_uncore_pmu { 97 struct pmu pmu; 98 char name[UNCORE_PMU_NAME_LEN]; 99 int pmu_idx; 100 int func_id; 101 bool registered; 102 atomic_t activeboxes; 103 struct intel_uncore_type *type; 104 struct intel_uncore_box **boxes; 105 }; 106 107 struct intel_uncore_extra_reg { 108 raw_spinlock_t lock; 109 u64 config, config1, config2; 110 atomic_t ref; 111 }; 112 113 struct intel_uncore_box { 114 int pci_phys_id; 115 int dieid; /* Logical die ID */ 116 int n_active; /* number of active events */ 117 int n_events; 118 int cpu; /* cpu to collect events */ 119 unsigned long flags; 120 atomic_t refcnt; 121 struct perf_event *events[UNCORE_PMC_IDX_MAX]; 122 struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; 123 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; 124 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 125 u64 tags[UNCORE_PMC_IDX_MAX]; 126 struct pci_dev *pci_dev; 127 struct intel_uncore_pmu *pmu; 128 u64 hrtimer_duration; /* hrtimer timeout for this box */ 129 struct hrtimer hrtimer; 130 struct list_head list; 131 struct list_head active_list; 132 void __iomem *io_addr; 133 struct intel_uncore_extra_reg shared_regs[0]; 134 }; 135 136 /* CFL uncore 8th cbox MSRs */ 137 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70 138 #define CFL_UNC_CBO_7_PER_CTR0 0xf76 139 140 #define UNCORE_BOX_FLAG_INITIATED 0 141 /* event config registers are 8-byte apart */ 142 #define UNCORE_BOX_FLAG_CTL_OFFS8 1 143 /* CFL 8th CBOX has different MSR space */ 144 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2 145 146 struct uncore_event_desc { 147 struct kobj_attribute attr; 148 const char *config; 149 }; 150 151 struct freerunning_counters { 152 unsigned int counter_base; 153 unsigned int counter_offset; 154 unsigned int box_offset; 155 unsigned int num_counters; 156 unsigned int bits; 157 unsigned *box_offsets; 158 }; 159 160 struct pci2phy_map { 161 struct list_head list; 162 int segment; 163 int pbus_to_physid[256]; 164 }; 165 166 struct pci2phy_map *__find_pci2phy_map(int segment); 167 int uncore_pcibus_to_physid(struct pci_bus *bus); 168 169 ssize_t uncore_event_show(struct kobject *kobj, 170 struct kobj_attribute *attr, char *buf); 171 172 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ 173 { \ 174 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ 175 .config = _config, \ 176 } 177 178 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ 179 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ 180 struct kobj_attribute *attr, \ 181 char *page) \ 182 { \ 183 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 184 return sprintf(page, _format "\n"); \ 185 } \ 186 static struct kobj_attribute format_attr_##_var = \ 187 __ATTR(_name, 0444, __uncore_##_var##_show, NULL) 188 189 static inline bool uncore_pmc_fixed(int idx) 190 { 191 return idx == UNCORE_PMC_IDX_FIXED; 192 } 193 194 static inline bool uncore_pmc_freerunning(int idx) 195 { 196 return idx == UNCORE_PMC_IDX_FREERUNNING; 197 } 198 199 static inline 200 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box) 201 { 202 return box->pmu->type->box_ctl + 203 box->pmu->type->mmio_offset * box->pmu->pmu_idx; 204 } 205 206 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) 207 { 208 return box->pmu->type->box_ctl; 209 } 210 211 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) 212 { 213 return box->pmu->type->fixed_ctl; 214 } 215 216 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) 217 { 218 return box->pmu->type->fixed_ctr; 219 } 220 221 static inline 222 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) 223 { 224 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags)) 225 return idx * 8 + box->pmu->type->event_ctl; 226 227 return idx * 4 + box->pmu->type->event_ctl; 228 } 229 230 static inline 231 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) 232 { 233 return idx * 8 + box->pmu->type->perf_ctr; 234 } 235 236 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) 237 { 238 struct intel_uncore_pmu *pmu = box->pmu; 239 return pmu->type->msr_offsets ? 240 pmu->type->msr_offsets[pmu->pmu_idx] : 241 pmu->type->msr_offset * pmu->pmu_idx; 242 } 243 244 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) 245 { 246 if (!box->pmu->type->box_ctl) 247 return 0; 248 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); 249 } 250 251 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) 252 { 253 if (!box->pmu->type->fixed_ctl) 254 return 0; 255 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); 256 } 257 258 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) 259 { 260 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); 261 } 262 263 264 /* 265 * In the uncore document, there is no event-code assigned to free running 266 * counters. Some events need to be defined to indicate the free running 267 * counters. The events are encoded as event-code + umask-code. 268 * 269 * The event-code for all free running counters is 0xff, which is the same as 270 * the fixed counters. 271 * 272 * The umask-code is used to distinguish a fixed counter and a free running 273 * counter, and different types of free running counters. 274 * - For fixed counters, the umask-code is 0x0X. 275 * X indicates the index of the fixed counter, which starts from 0. 276 * - For free running counters, the umask-code uses the rest of the space. 277 * It would bare the format of 0xXY. 278 * X stands for the type of free running counters, which starts from 1. 279 * Y stands for the index of free running counters of same type, which 280 * starts from 0. 281 * 282 * For example, there are three types of IIO free running counters on Skylake 283 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters. 284 * The event-code for all the free running counters is 0xff. 285 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type, 286 * which umask-code starts from 0x10. 287 * So 'ioclk' is encoded as event=0xff,umask=0x10 288 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is 289 * the second type, which umask-code starts from 0x20. 290 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22 291 */ 292 static inline unsigned int uncore_freerunning_idx(u64 config) 293 { 294 return ((config >> 8) & 0xf); 295 } 296 297 #define UNCORE_FREERUNNING_UMASK_START 0x10 298 299 static inline unsigned int uncore_freerunning_type(u64 config) 300 { 301 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf); 302 } 303 304 static inline 305 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, 306 struct perf_event *event) 307 { 308 unsigned int type = uncore_freerunning_type(event->hw.config); 309 unsigned int idx = uncore_freerunning_idx(event->hw.config); 310 struct intel_uncore_pmu *pmu = box->pmu; 311 312 return pmu->type->freerunning[type].counter_base + 313 pmu->type->freerunning[type].counter_offset * idx + 314 (pmu->type->freerunning[type].box_offsets ? 315 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : 316 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); 317 } 318 319 static inline 320 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) 321 { 322 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 323 return CFL_UNC_CBO_7_PERFEVTSEL0 + 324 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 325 } else { 326 return box->pmu->type->event_ctl + 327 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 328 uncore_msr_box_offset(box); 329 } 330 } 331 332 static inline 333 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) 334 { 335 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 336 return CFL_UNC_CBO_7_PER_CTR0 + 337 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 338 } else { 339 return box->pmu->type->perf_ctr + 340 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 341 uncore_msr_box_offset(box); 342 } 343 } 344 345 static inline 346 unsigned uncore_fixed_ctl(struct intel_uncore_box *box) 347 { 348 if (box->pci_dev || box->io_addr) 349 return uncore_pci_fixed_ctl(box); 350 else 351 return uncore_msr_fixed_ctl(box); 352 } 353 354 static inline 355 unsigned uncore_fixed_ctr(struct intel_uncore_box *box) 356 { 357 if (box->pci_dev || box->io_addr) 358 return uncore_pci_fixed_ctr(box); 359 else 360 return uncore_msr_fixed_ctr(box); 361 } 362 363 static inline 364 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) 365 { 366 if (box->pci_dev || box->io_addr) 367 return uncore_pci_event_ctl(box, idx); 368 else 369 return uncore_msr_event_ctl(box, idx); 370 } 371 372 static inline 373 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) 374 { 375 if (box->pci_dev || box->io_addr) 376 return uncore_pci_perf_ctr(box, idx); 377 else 378 return uncore_msr_perf_ctr(box, idx); 379 } 380 381 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) 382 { 383 return box->pmu->type->perf_ctr_bits; 384 } 385 386 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) 387 { 388 return box->pmu->type->fixed_ctr_bits; 389 } 390 391 static inline 392 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, 393 struct perf_event *event) 394 { 395 unsigned int type = uncore_freerunning_type(event->hw.config); 396 397 return box->pmu->type->freerunning[type].bits; 398 } 399 400 static inline int uncore_num_freerunning(struct intel_uncore_box *box, 401 struct perf_event *event) 402 { 403 unsigned int type = uncore_freerunning_type(event->hw.config); 404 405 return box->pmu->type->freerunning[type].num_counters; 406 } 407 408 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, 409 struct perf_event *event) 410 { 411 return box->pmu->type->num_freerunning_types; 412 } 413 414 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, 415 struct perf_event *event) 416 { 417 unsigned int type = uncore_freerunning_type(event->hw.config); 418 unsigned int idx = uncore_freerunning_idx(event->hw.config); 419 420 return (type < uncore_num_freerunning_types(box, event)) && 421 (idx < uncore_num_freerunning(box, event)); 422 } 423 424 static inline int uncore_num_counters(struct intel_uncore_box *box) 425 { 426 return box->pmu->type->num_counters; 427 } 428 429 static inline bool is_freerunning_event(struct perf_event *event) 430 { 431 u64 cfg = event->attr.config; 432 433 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) && 434 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); 435 } 436 437 /* Check and reject invalid config */ 438 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box, 439 struct perf_event *event) 440 { 441 if (is_freerunning_event(event)) 442 return 0; 443 444 return -EINVAL; 445 } 446 447 static inline void uncore_disable_event(struct intel_uncore_box *box, 448 struct perf_event *event) 449 { 450 box->pmu->type->ops->disable_event(box, event); 451 } 452 453 static inline void uncore_enable_event(struct intel_uncore_box *box, 454 struct perf_event *event) 455 { 456 box->pmu->type->ops->enable_event(box, event); 457 } 458 459 static inline u64 uncore_read_counter(struct intel_uncore_box *box, 460 struct perf_event *event) 461 { 462 return box->pmu->type->ops->read_counter(box, event); 463 } 464 465 static inline void uncore_box_init(struct intel_uncore_box *box) 466 { 467 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 468 if (box->pmu->type->ops->init_box) 469 box->pmu->type->ops->init_box(box); 470 } 471 } 472 473 static inline void uncore_box_exit(struct intel_uncore_box *box) 474 { 475 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 476 if (box->pmu->type->ops->exit_box) 477 box->pmu->type->ops->exit_box(box); 478 } 479 } 480 481 static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 482 { 483 return (box->dieid < 0); 484 } 485 486 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) 487 { 488 return container_of(event->pmu, struct intel_uncore_pmu, pmu); 489 } 490 491 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) 492 { 493 return event->pmu_private; 494 } 495 496 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); 497 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); 498 void uncore_mmio_exit_box(struct intel_uncore_box *box); 499 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, 500 struct perf_event *event); 501 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); 502 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); 503 void uncore_pmu_event_start(struct perf_event *event, int flags); 504 void uncore_pmu_event_stop(struct perf_event *event, int flags); 505 int uncore_pmu_event_add(struct perf_event *event, int flags); 506 void uncore_pmu_event_del(struct perf_event *event, int flags); 507 void uncore_pmu_event_read(struct perf_event *event); 508 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); 509 struct event_constraint * 510 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); 511 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); 512 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); 513 514 extern struct intel_uncore_type **uncore_msr_uncores; 515 extern struct intel_uncore_type **uncore_pci_uncores; 516 extern struct intel_uncore_type **uncore_mmio_uncores; 517 extern struct pci_driver *uncore_pci_driver; 518 extern raw_spinlock_t pci2phy_map_lock; 519 extern struct list_head pci2phy_map_head; 520 extern struct pci_extra_dev *uncore_extra_pci_dev; 521 extern struct event_constraint uncore_constraint_empty; 522 523 /* uncore_snb.c */ 524 int snb_uncore_pci_init(void); 525 int ivb_uncore_pci_init(void); 526 int hsw_uncore_pci_init(void); 527 int bdw_uncore_pci_init(void); 528 int skl_uncore_pci_init(void); 529 void snb_uncore_cpu_init(void); 530 void nhm_uncore_cpu_init(void); 531 void skl_uncore_cpu_init(void); 532 void icl_uncore_cpu_init(void); 533 void tgl_uncore_mmio_init(void); 534 void tgl_l_uncore_mmio_init(void); 535 int snb_pci2phy_map_init(int devid); 536 537 /* uncore_snbep.c */ 538 int snbep_uncore_pci_init(void); 539 void snbep_uncore_cpu_init(void); 540 int ivbep_uncore_pci_init(void); 541 void ivbep_uncore_cpu_init(void); 542 int hswep_uncore_pci_init(void); 543 void hswep_uncore_cpu_init(void); 544 int bdx_uncore_pci_init(void); 545 void bdx_uncore_cpu_init(void); 546 int knl_uncore_pci_init(void); 547 void knl_uncore_cpu_init(void); 548 int skx_uncore_pci_init(void); 549 void skx_uncore_cpu_init(void); 550 int snr_uncore_pci_init(void); 551 void snr_uncore_cpu_init(void); 552 void snr_uncore_mmio_init(void); 553 int icx_uncore_pci_init(void); 554 void icx_uncore_cpu_init(void); 555 void icx_uncore_mmio_init(void); 556 557 /* uncore_nhmex.c */ 558 void nhmex_uncore_cpu_init(void); 559