1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/slab.h> 3 #include <linux/pci.h> 4 #include <asm/apicdef.h> 5 #include <linux/io-64-nonatomic-lo-hi.h> 6 7 #include <linux/perf_event.h> 8 #include "../perf_event.h" 9 10 #define UNCORE_PMU_NAME_LEN 32 11 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) 12 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) 13 14 #define UNCORE_FIXED_EVENT 0xff 15 #define UNCORE_PMC_IDX_MAX_GENERIC 8 16 #define UNCORE_PMC_IDX_MAX_FIXED 1 17 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1 18 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC 19 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \ 20 UNCORE_PMC_IDX_MAX_FIXED) 21 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \ 22 UNCORE_PMC_IDX_MAX_FREERUNNING) 23 24 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ 25 ((dev << 24) | (func << 16) | (type << 8) | idx) 26 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) 27 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff) 28 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff) 29 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 30 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 31 #define UNCORE_EXTRA_PCI_DEV 0xff 32 #define UNCORE_EXTRA_PCI_DEV_MAX 4 33 34 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 35 36 struct pci_extra_dev { 37 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX]; 38 }; 39 40 struct intel_uncore_ops; 41 struct intel_uncore_pmu; 42 struct intel_uncore_box; 43 struct uncore_event_desc; 44 struct freerunning_counters; 45 46 struct intel_uncore_type { 47 const char *name; 48 int num_counters; 49 int num_boxes; 50 int perf_ctr_bits; 51 int fixed_ctr_bits; 52 int num_freerunning_types; 53 unsigned perf_ctr; 54 unsigned event_ctl; 55 unsigned event_mask; 56 unsigned event_mask_ext; 57 unsigned fixed_ctr; 58 unsigned fixed_ctl; 59 unsigned box_ctl; 60 union { 61 unsigned msr_offset; 62 unsigned mmio_offset; 63 }; 64 unsigned num_shared_regs:8; 65 unsigned single_fixed:1; 66 unsigned pair_ctr_ctl:1; 67 unsigned *msr_offsets; 68 struct event_constraint unconstrainted; 69 struct event_constraint *constraints; 70 struct intel_uncore_pmu *pmus; 71 struct intel_uncore_ops *ops; 72 struct uncore_event_desc *event_descs; 73 struct freerunning_counters *freerunning; 74 const struct attribute_group *attr_groups[4]; 75 struct pmu *pmu; /* for custom pmu ops */ 76 }; 77 78 #define pmu_group attr_groups[0] 79 #define format_group attr_groups[1] 80 #define events_group attr_groups[2] 81 82 struct intel_uncore_ops { 83 void (*init_box)(struct intel_uncore_box *); 84 void (*exit_box)(struct intel_uncore_box *); 85 void (*disable_box)(struct intel_uncore_box *); 86 void (*enable_box)(struct intel_uncore_box *); 87 void (*disable_event)(struct intel_uncore_box *, struct perf_event *); 88 void (*enable_event)(struct intel_uncore_box *, struct perf_event *); 89 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); 90 int (*hw_config)(struct intel_uncore_box *, struct perf_event *); 91 struct event_constraint *(*get_constraint)(struct intel_uncore_box *, 92 struct perf_event *); 93 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); 94 }; 95 96 struct intel_uncore_pmu { 97 struct pmu pmu; 98 char name[UNCORE_PMU_NAME_LEN]; 99 int pmu_idx; 100 int func_id; 101 bool registered; 102 atomic_t activeboxes; 103 struct intel_uncore_type *type; 104 struct intel_uncore_box **boxes; 105 }; 106 107 struct intel_uncore_extra_reg { 108 raw_spinlock_t lock; 109 u64 config, config1, config2; 110 atomic_t ref; 111 }; 112 113 struct intel_uncore_box { 114 int pci_phys_id; 115 int dieid; /* Logical die ID */ 116 int n_active; /* number of active events */ 117 int n_events; 118 int cpu; /* cpu to collect events */ 119 unsigned long flags; 120 atomic_t refcnt; 121 struct perf_event *events[UNCORE_PMC_IDX_MAX]; 122 struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; 123 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; 124 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 125 u64 tags[UNCORE_PMC_IDX_MAX]; 126 struct pci_dev *pci_dev; 127 struct intel_uncore_pmu *pmu; 128 u64 hrtimer_duration; /* hrtimer timeout for this box */ 129 struct hrtimer hrtimer; 130 struct list_head list; 131 struct list_head active_list; 132 void __iomem *io_addr; 133 struct intel_uncore_extra_reg shared_regs[0]; 134 }; 135 136 /* CFL uncore 8th cbox MSRs */ 137 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70 138 #define CFL_UNC_CBO_7_PER_CTR0 0xf76 139 140 #define UNCORE_BOX_FLAG_INITIATED 0 141 /* event config registers are 8-byte apart */ 142 #define UNCORE_BOX_FLAG_CTL_OFFS8 1 143 /* CFL 8th CBOX has different MSR space */ 144 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2 145 146 struct uncore_event_desc { 147 struct kobj_attribute attr; 148 const char *config; 149 }; 150 151 struct freerunning_counters { 152 unsigned int counter_base; 153 unsigned int counter_offset; 154 unsigned int box_offset; 155 unsigned int num_counters; 156 unsigned int bits; 157 }; 158 159 struct pci2phy_map { 160 struct list_head list; 161 int segment; 162 int pbus_to_physid[256]; 163 }; 164 165 struct pci2phy_map *__find_pci2phy_map(int segment); 166 int uncore_pcibus_to_physid(struct pci_bus *bus); 167 168 ssize_t uncore_event_show(struct kobject *kobj, 169 struct kobj_attribute *attr, char *buf); 170 171 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ 172 { \ 173 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ 174 .config = _config, \ 175 } 176 177 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ 178 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ 179 struct kobj_attribute *attr, \ 180 char *page) \ 181 { \ 182 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 183 return sprintf(page, _format "\n"); \ 184 } \ 185 static struct kobj_attribute format_attr_##_var = \ 186 __ATTR(_name, 0444, __uncore_##_var##_show, NULL) 187 188 static inline bool uncore_pmc_fixed(int idx) 189 { 190 return idx == UNCORE_PMC_IDX_FIXED; 191 } 192 193 static inline bool uncore_pmc_freerunning(int idx) 194 { 195 return idx == UNCORE_PMC_IDX_FREERUNNING; 196 } 197 198 static inline 199 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box) 200 { 201 return box->pmu->type->box_ctl + 202 box->pmu->type->mmio_offset * box->pmu->pmu_idx; 203 } 204 205 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) 206 { 207 return box->pmu->type->box_ctl; 208 } 209 210 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) 211 { 212 return box->pmu->type->fixed_ctl; 213 } 214 215 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) 216 { 217 return box->pmu->type->fixed_ctr; 218 } 219 220 static inline 221 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) 222 { 223 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags)) 224 return idx * 8 + box->pmu->type->event_ctl; 225 226 return idx * 4 + box->pmu->type->event_ctl; 227 } 228 229 static inline 230 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) 231 { 232 return idx * 8 + box->pmu->type->perf_ctr; 233 } 234 235 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) 236 { 237 struct intel_uncore_pmu *pmu = box->pmu; 238 return pmu->type->msr_offsets ? 239 pmu->type->msr_offsets[pmu->pmu_idx] : 240 pmu->type->msr_offset * pmu->pmu_idx; 241 } 242 243 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) 244 { 245 if (!box->pmu->type->box_ctl) 246 return 0; 247 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); 248 } 249 250 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) 251 { 252 if (!box->pmu->type->fixed_ctl) 253 return 0; 254 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); 255 } 256 257 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) 258 { 259 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); 260 } 261 262 263 /* 264 * In the uncore document, there is no event-code assigned to free running 265 * counters. Some events need to be defined to indicate the free running 266 * counters. The events are encoded as event-code + umask-code. 267 * 268 * The event-code for all free running counters is 0xff, which is the same as 269 * the fixed counters. 270 * 271 * The umask-code is used to distinguish a fixed counter and a free running 272 * counter, and different types of free running counters. 273 * - For fixed counters, the umask-code is 0x0X. 274 * X indicates the index of the fixed counter, which starts from 0. 275 * - For free running counters, the umask-code uses the rest of the space. 276 * It would bare the format of 0xXY. 277 * X stands for the type of free running counters, which starts from 1. 278 * Y stands for the index of free running counters of same type, which 279 * starts from 0. 280 * 281 * For example, there are three types of IIO free running counters on Skylake 282 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters. 283 * The event-code for all the free running counters is 0xff. 284 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type, 285 * which umask-code starts from 0x10. 286 * So 'ioclk' is encoded as event=0xff,umask=0x10 287 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is 288 * the second type, which umask-code starts from 0x20. 289 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22 290 */ 291 static inline unsigned int uncore_freerunning_idx(u64 config) 292 { 293 return ((config >> 8) & 0xf); 294 } 295 296 #define UNCORE_FREERUNNING_UMASK_START 0x10 297 298 static inline unsigned int uncore_freerunning_type(u64 config) 299 { 300 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf); 301 } 302 303 static inline 304 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, 305 struct perf_event *event) 306 { 307 unsigned int type = uncore_freerunning_type(event->hw.config); 308 unsigned int idx = uncore_freerunning_idx(event->hw.config); 309 struct intel_uncore_pmu *pmu = box->pmu; 310 311 return pmu->type->freerunning[type].counter_base + 312 pmu->type->freerunning[type].counter_offset * idx + 313 pmu->type->freerunning[type].box_offset * pmu->pmu_idx; 314 } 315 316 static inline 317 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) 318 { 319 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 320 return CFL_UNC_CBO_7_PERFEVTSEL0 + 321 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 322 } else { 323 return box->pmu->type->event_ctl + 324 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 325 uncore_msr_box_offset(box); 326 } 327 } 328 329 static inline 330 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) 331 { 332 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 333 return CFL_UNC_CBO_7_PER_CTR0 + 334 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 335 } else { 336 return box->pmu->type->perf_ctr + 337 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 338 uncore_msr_box_offset(box); 339 } 340 } 341 342 static inline 343 unsigned uncore_fixed_ctl(struct intel_uncore_box *box) 344 { 345 if (box->pci_dev || box->io_addr) 346 return uncore_pci_fixed_ctl(box); 347 else 348 return uncore_msr_fixed_ctl(box); 349 } 350 351 static inline 352 unsigned uncore_fixed_ctr(struct intel_uncore_box *box) 353 { 354 if (box->pci_dev || box->io_addr) 355 return uncore_pci_fixed_ctr(box); 356 else 357 return uncore_msr_fixed_ctr(box); 358 } 359 360 static inline 361 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) 362 { 363 if (box->pci_dev || box->io_addr) 364 return uncore_pci_event_ctl(box, idx); 365 else 366 return uncore_msr_event_ctl(box, idx); 367 } 368 369 static inline 370 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) 371 { 372 if (box->pci_dev || box->io_addr) 373 return uncore_pci_perf_ctr(box, idx); 374 else 375 return uncore_msr_perf_ctr(box, idx); 376 } 377 378 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) 379 { 380 return box->pmu->type->perf_ctr_bits; 381 } 382 383 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) 384 { 385 return box->pmu->type->fixed_ctr_bits; 386 } 387 388 static inline 389 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, 390 struct perf_event *event) 391 { 392 unsigned int type = uncore_freerunning_type(event->hw.config); 393 394 return box->pmu->type->freerunning[type].bits; 395 } 396 397 static inline int uncore_num_freerunning(struct intel_uncore_box *box, 398 struct perf_event *event) 399 { 400 unsigned int type = uncore_freerunning_type(event->hw.config); 401 402 return box->pmu->type->freerunning[type].num_counters; 403 } 404 405 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, 406 struct perf_event *event) 407 { 408 return box->pmu->type->num_freerunning_types; 409 } 410 411 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, 412 struct perf_event *event) 413 { 414 unsigned int type = uncore_freerunning_type(event->hw.config); 415 unsigned int idx = uncore_freerunning_idx(event->hw.config); 416 417 return (type < uncore_num_freerunning_types(box, event)) && 418 (idx < uncore_num_freerunning(box, event)); 419 } 420 421 static inline int uncore_num_counters(struct intel_uncore_box *box) 422 { 423 return box->pmu->type->num_counters; 424 } 425 426 static inline bool is_freerunning_event(struct perf_event *event) 427 { 428 u64 cfg = event->attr.config; 429 430 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) && 431 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); 432 } 433 434 /* Check and reject invalid config */ 435 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box, 436 struct perf_event *event) 437 { 438 if (is_freerunning_event(event)) 439 return 0; 440 441 return -EINVAL; 442 } 443 444 static inline void uncore_disable_box(struct intel_uncore_box *box) 445 { 446 if (box->pmu->type->ops->disable_box) 447 box->pmu->type->ops->disable_box(box); 448 } 449 450 static inline void uncore_enable_box(struct intel_uncore_box *box) 451 { 452 if (box->pmu->type->ops->enable_box) 453 box->pmu->type->ops->enable_box(box); 454 } 455 456 static inline void uncore_disable_event(struct intel_uncore_box *box, 457 struct perf_event *event) 458 { 459 box->pmu->type->ops->disable_event(box, event); 460 } 461 462 static inline void uncore_enable_event(struct intel_uncore_box *box, 463 struct perf_event *event) 464 { 465 box->pmu->type->ops->enable_event(box, event); 466 } 467 468 static inline u64 uncore_read_counter(struct intel_uncore_box *box, 469 struct perf_event *event) 470 { 471 return box->pmu->type->ops->read_counter(box, event); 472 } 473 474 static inline void uncore_box_init(struct intel_uncore_box *box) 475 { 476 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 477 if (box->pmu->type->ops->init_box) 478 box->pmu->type->ops->init_box(box); 479 } 480 } 481 482 static inline void uncore_box_exit(struct intel_uncore_box *box) 483 { 484 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 485 if (box->pmu->type->ops->exit_box) 486 box->pmu->type->ops->exit_box(box); 487 } 488 } 489 490 static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 491 { 492 return (box->dieid < 0); 493 } 494 495 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) 496 { 497 return container_of(event->pmu, struct intel_uncore_pmu, pmu); 498 } 499 500 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) 501 { 502 return event->pmu_private; 503 } 504 505 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); 506 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); 507 void uncore_mmio_exit_box(struct intel_uncore_box *box); 508 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, 509 struct perf_event *event); 510 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); 511 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); 512 void uncore_pmu_event_start(struct perf_event *event, int flags); 513 void uncore_pmu_event_stop(struct perf_event *event, int flags); 514 int uncore_pmu_event_add(struct perf_event *event, int flags); 515 void uncore_pmu_event_del(struct perf_event *event, int flags); 516 void uncore_pmu_event_read(struct perf_event *event); 517 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); 518 struct event_constraint * 519 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); 520 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); 521 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); 522 523 extern struct intel_uncore_type **uncore_msr_uncores; 524 extern struct intel_uncore_type **uncore_pci_uncores; 525 extern struct intel_uncore_type **uncore_mmio_uncores; 526 extern struct pci_driver *uncore_pci_driver; 527 extern raw_spinlock_t pci2phy_map_lock; 528 extern struct list_head pci2phy_map_head; 529 extern struct pci_extra_dev *uncore_extra_pci_dev; 530 extern struct event_constraint uncore_constraint_empty; 531 532 /* uncore_snb.c */ 533 int snb_uncore_pci_init(void); 534 int ivb_uncore_pci_init(void); 535 int hsw_uncore_pci_init(void); 536 int bdw_uncore_pci_init(void); 537 int skl_uncore_pci_init(void); 538 void snb_uncore_cpu_init(void); 539 void nhm_uncore_cpu_init(void); 540 void skl_uncore_cpu_init(void); 541 void icl_uncore_cpu_init(void); 542 int snb_pci2phy_map_init(int devid); 543 544 /* uncore_snbep.c */ 545 int snbep_uncore_pci_init(void); 546 void snbep_uncore_cpu_init(void); 547 int ivbep_uncore_pci_init(void); 548 void ivbep_uncore_cpu_init(void); 549 int hswep_uncore_pci_init(void); 550 void hswep_uncore_cpu_init(void); 551 int bdx_uncore_pci_init(void); 552 void bdx_uncore_cpu_init(void); 553 int knl_uncore_pci_init(void); 554 void knl_uncore_cpu_init(void); 555 int skx_uncore_pci_init(void); 556 void skx_uncore_cpu_init(void); 557 int snr_uncore_pci_init(void); 558 void snr_uncore_cpu_init(void); 559 void snr_uncore_mmio_init(void); 560 561 /* uncore_nhmex.c */ 562 void nhmex_uncore_cpu_init(void); 563