1 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ 2 #include "uncore.h" 3 4 /* Uncore IMC PCI IDs */ 5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 11 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 12 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c 13 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 14 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 15 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f 16 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f 17 18 /* SNB event control */ 19 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 20 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 21 #define SNB_UNC_CTL_EDGE_DET (1 << 18) 22 #define SNB_UNC_CTL_EN (1 << 22) 23 #define SNB_UNC_CTL_INVERT (1 << 23) 24 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 25 #define NHM_UNC_CTL_CMASK_MASK 0xff000000 26 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) 27 28 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 29 SNB_UNC_CTL_UMASK_MASK | \ 30 SNB_UNC_CTL_EDGE_DET | \ 31 SNB_UNC_CTL_INVERT | \ 32 SNB_UNC_CTL_CMASK_MASK) 33 34 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 35 SNB_UNC_CTL_UMASK_MASK | \ 36 SNB_UNC_CTL_EDGE_DET | \ 37 SNB_UNC_CTL_INVERT | \ 38 NHM_UNC_CTL_CMASK_MASK) 39 40 /* SNB global control register */ 41 #define SNB_UNC_PERF_GLOBAL_CTL 0x391 42 #define SNB_UNC_FIXED_CTR_CTRL 0x394 43 #define SNB_UNC_FIXED_CTR 0x395 44 45 /* SNB uncore global control */ 46 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) 47 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) 48 49 /* SNB Cbo register */ 50 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 51 #define SNB_UNC_CBO_0_PER_CTR0 0x706 52 #define SNB_UNC_CBO_MSR_OFFSET 0x10 53 54 /* SNB ARB register */ 55 #define SNB_UNC_ARB_PER_CTR0 0x3b0 56 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 57 #define SNB_UNC_ARB_MSR_OFFSET 0x10 58 59 /* NHM global control register */ 60 #define NHM_UNC_PERF_GLOBAL_CTL 0x391 61 #define NHM_UNC_FIXED_CTR 0x394 62 #define NHM_UNC_FIXED_CTR_CTRL 0x395 63 64 /* NHM uncore global control */ 65 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) 66 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) 67 68 /* NHM uncore register */ 69 #define NHM_UNC_PERFEVTSEL0 0x3c0 70 #define NHM_UNC_UNCORE_PMC0 0x3b0 71 72 /* SKL uncore global control */ 73 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 74 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) 75 76 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 77 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 78 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 79 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 80 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); 81 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); 82 83 /* Sandy Bridge uncore support */ 84 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 85 { 86 struct hw_perf_event *hwc = &event->hw; 87 88 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 89 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 90 else 91 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); 92 } 93 94 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 95 { 96 wrmsrl(event->hw.config_base, 0); 97 } 98 99 static void snb_uncore_msr_init_box(struct intel_uncore_box *box) 100 { 101 if (box->pmu->pmu_idx == 0) { 102 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 103 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 104 } 105 } 106 107 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) 108 { 109 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 110 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 111 } 112 113 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 114 { 115 if (box->pmu->pmu_idx == 0) 116 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); 117 } 118 119 static struct uncore_event_desc snb_uncore_events[] = { 120 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 121 { /* end: all zeroes */ }, 122 }; 123 124 static struct attribute *snb_uncore_formats_attr[] = { 125 &format_attr_event.attr, 126 &format_attr_umask.attr, 127 &format_attr_edge.attr, 128 &format_attr_inv.attr, 129 &format_attr_cmask5.attr, 130 NULL, 131 }; 132 133 static struct attribute_group snb_uncore_format_group = { 134 .name = "format", 135 .attrs = snb_uncore_formats_attr, 136 }; 137 138 static struct intel_uncore_ops snb_uncore_msr_ops = { 139 .init_box = snb_uncore_msr_init_box, 140 .enable_box = snb_uncore_msr_enable_box, 141 .exit_box = snb_uncore_msr_exit_box, 142 .disable_event = snb_uncore_msr_disable_event, 143 .enable_event = snb_uncore_msr_enable_event, 144 .read_counter = uncore_msr_read_counter, 145 }; 146 147 static struct event_constraint snb_uncore_arb_constraints[] = { 148 UNCORE_EVENT_CONSTRAINT(0x80, 0x1), 149 UNCORE_EVENT_CONSTRAINT(0x83, 0x1), 150 EVENT_CONSTRAINT_END 151 }; 152 153 static struct intel_uncore_type snb_uncore_cbox = { 154 .name = "cbox", 155 .num_counters = 2, 156 .num_boxes = 4, 157 .perf_ctr_bits = 44, 158 .fixed_ctr_bits = 48, 159 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 160 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 161 .fixed_ctr = SNB_UNC_FIXED_CTR, 162 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 163 .single_fixed = 1, 164 .event_mask = SNB_UNC_RAW_EVENT_MASK, 165 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 166 .ops = &snb_uncore_msr_ops, 167 .format_group = &snb_uncore_format_group, 168 .event_descs = snb_uncore_events, 169 }; 170 171 static struct intel_uncore_type snb_uncore_arb = { 172 .name = "arb", 173 .num_counters = 2, 174 .num_boxes = 1, 175 .perf_ctr_bits = 44, 176 .perf_ctr = SNB_UNC_ARB_PER_CTR0, 177 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, 178 .event_mask = SNB_UNC_RAW_EVENT_MASK, 179 .msr_offset = SNB_UNC_ARB_MSR_OFFSET, 180 .constraints = snb_uncore_arb_constraints, 181 .ops = &snb_uncore_msr_ops, 182 .format_group = &snb_uncore_format_group, 183 }; 184 185 static struct intel_uncore_type *snb_msr_uncores[] = { 186 &snb_uncore_cbox, 187 &snb_uncore_arb, 188 NULL, 189 }; 190 191 void snb_uncore_cpu_init(void) 192 { 193 uncore_msr_uncores = snb_msr_uncores; 194 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 195 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 196 } 197 198 static void skl_uncore_msr_init_box(struct intel_uncore_box *box) 199 { 200 if (box->pmu->pmu_idx == 0) { 201 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 202 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 203 } 204 } 205 206 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) 207 { 208 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 209 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 210 } 211 212 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 213 { 214 if (box->pmu->pmu_idx == 0) 215 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); 216 } 217 218 static struct intel_uncore_ops skl_uncore_msr_ops = { 219 .init_box = skl_uncore_msr_init_box, 220 .enable_box = skl_uncore_msr_enable_box, 221 .exit_box = skl_uncore_msr_exit_box, 222 .disable_event = snb_uncore_msr_disable_event, 223 .enable_event = snb_uncore_msr_enable_event, 224 .read_counter = uncore_msr_read_counter, 225 }; 226 227 static struct intel_uncore_type skl_uncore_cbox = { 228 .name = "cbox", 229 .num_counters = 4, 230 .num_boxes = 5, 231 .perf_ctr_bits = 44, 232 .fixed_ctr_bits = 48, 233 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 234 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 235 .fixed_ctr = SNB_UNC_FIXED_CTR, 236 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 237 .single_fixed = 1, 238 .event_mask = SNB_UNC_RAW_EVENT_MASK, 239 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 240 .ops = &skl_uncore_msr_ops, 241 .format_group = &snb_uncore_format_group, 242 .event_descs = snb_uncore_events, 243 }; 244 245 static struct intel_uncore_type *skl_msr_uncores[] = { 246 &skl_uncore_cbox, 247 &snb_uncore_arb, 248 NULL, 249 }; 250 251 void skl_uncore_cpu_init(void) 252 { 253 uncore_msr_uncores = skl_msr_uncores; 254 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 255 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 256 snb_uncore_arb.ops = &skl_uncore_msr_ops; 257 } 258 259 enum { 260 SNB_PCI_UNCORE_IMC, 261 }; 262 263 static struct uncore_event_desc snb_uncore_imc_events[] = { 264 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), 265 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), 266 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), 267 268 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), 269 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), 270 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), 271 272 { /* end: all zeroes */ }, 273 }; 274 275 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff 276 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 277 278 /* page size multiple covering all config regs */ 279 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 280 281 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 282 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 283 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 284 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 285 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE 286 287 static struct attribute *snb_uncore_imc_formats_attr[] = { 288 &format_attr_event.attr, 289 NULL, 290 }; 291 292 static struct attribute_group snb_uncore_imc_format_group = { 293 .name = "format", 294 .attrs = snb_uncore_imc_formats_attr, 295 }; 296 297 static void snb_uncore_imc_init_box(struct intel_uncore_box *box) 298 { 299 struct pci_dev *pdev = box->pci_dev; 300 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; 301 resource_size_t addr; 302 u32 pci_dword; 303 304 pci_read_config_dword(pdev, where, &pci_dword); 305 addr = pci_dword; 306 307 #ifdef CONFIG_PHYS_ADDR_T_64BIT 308 pci_read_config_dword(pdev, where + 4, &pci_dword); 309 addr |= ((resource_size_t)pci_dword << 32); 310 #endif 311 312 addr &= ~(PAGE_SIZE - 1); 313 314 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); 315 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; 316 } 317 318 static void snb_uncore_imc_exit_box(struct intel_uncore_box *box) 319 { 320 iounmap(box->io_addr); 321 } 322 323 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) 324 {} 325 326 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) 327 {} 328 329 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) 330 {} 331 332 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) 333 {} 334 335 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) 336 { 337 struct hw_perf_event *hwc = &event->hw; 338 339 return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); 340 } 341 342 /* 343 * custom event_init() function because we define our own fixed, free 344 * running counters, so we do not want to conflict with generic uncore 345 * logic. Also simplifies processing 346 */ 347 static int snb_uncore_imc_event_init(struct perf_event *event) 348 { 349 struct intel_uncore_pmu *pmu; 350 struct intel_uncore_box *box; 351 struct hw_perf_event *hwc = &event->hw; 352 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; 353 int idx, base; 354 355 if (event->attr.type != event->pmu->type) 356 return -ENOENT; 357 358 pmu = uncore_event_to_pmu(event); 359 /* no device found for this pmu */ 360 if (pmu->func_id < 0) 361 return -ENOENT; 362 363 /* Sampling not supported yet */ 364 if (hwc->sample_period) 365 return -EINVAL; 366 367 /* unsupported modes and filters */ 368 if (event->attr.exclude_user || 369 event->attr.exclude_kernel || 370 event->attr.exclude_hv || 371 event->attr.exclude_idle || 372 event->attr.exclude_host || 373 event->attr.exclude_guest || 374 event->attr.sample_period) /* no sampling */ 375 return -EINVAL; 376 377 /* 378 * Place all uncore events for a particular physical package 379 * onto a single cpu 380 */ 381 if (event->cpu < 0) 382 return -EINVAL; 383 384 /* check only supported bits are set */ 385 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) 386 return -EINVAL; 387 388 box = uncore_pmu_to_box(pmu, event->cpu); 389 if (!box || box->cpu < 0) 390 return -EINVAL; 391 392 event->cpu = box->cpu; 393 event->pmu_private = box; 394 395 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; 396 397 event->hw.idx = -1; 398 event->hw.last_tag = ~0ULL; 399 event->hw.extra_reg.idx = EXTRA_REG_NONE; 400 event->hw.branch_reg.idx = EXTRA_REG_NONE; 401 /* 402 * check event is known (whitelist, determines counter) 403 */ 404 switch (cfg) { 405 case SNB_UNCORE_PCI_IMC_DATA_READS: 406 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; 407 idx = UNCORE_PMC_IDX_FIXED; 408 break; 409 case SNB_UNCORE_PCI_IMC_DATA_WRITES: 410 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; 411 idx = UNCORE_PMC_IDX_FIXED + 1; 412 break; 413 default: 414 return -EINVAL; 415 } 416 417 /* must be done before validate_group */ 418 event->hw.event_base = base; 419 event->hw.config = cfg; 420 event->hw.idx = idx; 421 422 /* no group validation needed, we have free running counters */ 423 424 return 0; 425 } 426 427 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) 428 { 429 return 0; 430 } 431 432 static void snb_uncore_imc_event_start(struct perf_event *event, int flags) 433 { 434 struct intel_uncore_box *box = uncore_event_to_box(event); 435 u64 count; 436 437 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 438 return; 439 440 event->hw.state = 0; 441 box->n_active++; 442 443 list_add_tail(&event->active_entry, &box->active_list); 444 445 count = snb_uncore_imc_read_counter(box, event); 446 local64_set(&event->hw.prev_count, count); 447 448 if (box->n_active == 1) 449 uncore_pmu_start_hrtimer(box); 450 } 451 452 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) 453 { 454 struct intel_uncore_box *box = uncore_event_to_box(event); 455 struct hw_perf_event *hwc = &event->hw; 456 457 if (!(hwc->state & PERF_HES_STOPPED)) { 458 box->n_active--; 459 460 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 461 hwc->state |= PERF_HES_STOPPED; 462 463 list_del(&event->active_entry); 464 465 if (box->n_active == 0) 466 uncore_pmu_cancel_hrtimer(box); 467 } 468 469 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 470 /* 471 * Drain the remaining delta count out of a event 472 * that we are disabling: 473 */ 474 uncore_perf_event_update(box, event); 475 hwc->state |= PERF_HES_UPTODATE; 476 } 477 } 478 479 static int snb_uncore_imc_event_add(struct perf_event *event, int flags) 480 { 481 struct intel_uncore_box *box = uncore_event_to_box(event); 482 struct hw_perf_event *hwc = &event->hw; 483 484 if (!box) 485 return -ENODEV; 486 487 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 488 if (!(flags & PERF_EF_START)) 489 hwc->state |= PERF_HES_ARCH; 490 491 snb_uncore_imc_event_start(event, 0); 492 493 return 0; 494 } 495 496 static void snb_uncore_imc_event_del(struct perf_event *event, int flags) 497 { 498 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); 499 } 500 501 int snb_pci2phy_map_init(int devid) 502 { 503 struct pci_dev *dev = NULL; 504 struct pci2phy_map *map; 505 int bus, segment; 506 507 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); 508 if (!dev) 509 return -ENOTTY; 510 511 bus = dev->bus->number; 512 segment = pci_domain_nr(dev->bus); 513 514 raw_spin_lock(&pci2phy_map_lock); 515 map = __find_pci2phy_map(segment); 516 if (!map) { 517 raw_spin_unlock(&pci2phy_map_lock); 518 pci_dev_put(dev); 519 return -ENOMEM; 520 } 521 map->pbus_to_physid[bus] = 0; 522 raw_spin_unlock(&pci2phy_map_lock); 523 524 pci_dev_put(dev); 525 526 return 0; 527 } 528 529 static struct pmu snb_uncore_imc_pmu = { 530 .task_ctx_nr = perf_invalid_context, 531 .event_init = snb_uncore_imc_event_init, 532 .add = snb_uncore_imc_event_add, 533 .del = snb_uncore_imc_event_del, 534 .start = snb_uncore_imc_event_start, 535 .stop = snb_uncore_imc_event_stop, 536 .read = uncore_pmu_event_read, 537 }; 538 539 static struct intel_uncore_ops snb_uncore_imc_ops = { 540 .init_box = snb_uncore_imc_init_box, 541 .exit_box = snb_uncore_imc_exit_box, 542 .enable_box = snb_uncore_imc_enable_box, 543 .disable_box = snb_uncore_imc_disable_box, 544 .disable_event = snb_uncore_imc_disable_event, 545 .enable_event = snb_uncore_imc_enable_event, 546 .hw_config = snb_uncore_imc_hw_config, 547 .read_counter = snb_uncore_imc_read_counter, 548 }; 549 550 static struct intel_uncore_type snb_uncore_imc = { 551 .name = "imc", 552 .num_counters = 2, 553 .num_boxes = 1, 554 .fixed_ctr_bits = 32, 555 .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, 556 .event_descs = snb_uncore_imc_events, 557 .format_group = &snb_uncore_imc_format_group, 558 .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 559 .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, 560 .ops = &snb_uncore_imc_ops, 561 .pmu = &snb_uncore_imc_pmu, 562 }; 563 564 static struct intel_uncore_type *snb_pci_uncores[] = { 565 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, 566 NULL, 567 }; 568 569 static const struct pci_device_id snb_uncore_pci_ids[] = { 570 { /* IMC */ 571 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), 572 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 573 }, 574 { /* end: all zeroes */ }, 575 }; 576 577 static const struct pci_device_id ivb_uncore_pci_ids[] = { 578 { /* IMC */ 579 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), 580 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 581 }, 582 { /* IMC */ 583 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), 584 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 585 }, 586 { /* end: all zeroes */ }, 587 }; 588 589 static const struct pci_device_id hsw_uncore_pci_ids[] = { 590 { /* IMC */ 591 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), 592 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 593 }, 594 { /* IMC */ 595 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), 596 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 597 }, 598 { /* end: all zeroes */ }, 599 }; 600 601 static const struct pci_device_id bdw_uncore_pci_ids[] = { 602 { /* IMC */ 603 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC), 604 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 605 }, 606 { /* end: all zeroes */ }, 607 }; 608 609 static const struct pci_device_id skl_uncore_pci_ids[] = { 610 { /* IMC */ 611 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), 612 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 613 }, 614 { /* IMC */ 615 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), 616 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 617 }, 618 { /* IMC */ 619 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), 620 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 621 }, 622 { /* IMC */ 623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), 624 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 625 }, 626 { /* IMC */ 627 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), 628 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 629 }, 630 { /* IMC */ 631 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), 632 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 633 }, 634 635 { /* end: all zeroes */ }, 636 }; 637 638 static struct pci_driver snb_uncore_pci_driver = { 639 .name = "snb_uncore", 640 .id_table = snb_uncore_pci_ids, 641 }; 642 643 static struct pci_driver ivb_uncore_pci_driver = { 644 .name = "ivb_uncore", 645 .id_table = ivb_uncore_pci_ids, 646 }; 647 648 static struct pci_driver hsw_uncore_pci_driver = { 649 .name = "hsw_uncore", 650 .id_table = hsw_uncore_pci_ids, 651 }; 652 653 static struct pci_driver bdw_uncore_pci_driver = { 654 .name = "bdw_uncore", 655 .id_table = bdw_uncore_pci_ids, 656 }; 657 658 static struct pci_driver skl_uncore_pci_driver = { 659 .name = "skl_uncore", 660 .id_table = skl_uncore_pci_ids, 661 }; 662 663 struct imc_uncore_pci_dev { 664 __u32 pci_id; 665 struct pci_driver *driver; 666 }; 667 #define IMC_DEV(a, d) \ 668 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } 669 670 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { 671 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), 672 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ 673 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ 674 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 675 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 676 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 677 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ 678 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 679 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ 680 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ 681 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ 682 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ 683 { /* end marker */ } 684 }; 685 686 687 #define for_each_imc_pci_id(x, t) \ 688 for (x = (t); (x)->pci_id; x++) 689 690 static struct pci_driver *imc_uncore_find_dev(void) 691 { 692 const struct imc_uncore_pci_dev *p; 693 int ret; 694 695 for_each_imc_pci_id(p, desktop_imc_pci_ids) { 696 ret = snb_pci2phy_map_init(p->pci_id); 697 if (ret == 0) 698 return p->driver; 699 } 700 return NULL; 701 } 702 703 static int imc_uncore_pci_init(void) 704 { 705 struct pci_driver *imc_drv = imc_uncore_find_dev(); 706 707 if (!imc_drv) 708 return -ENODEV; 709 710 uncore_pci_uncores = snb_pci_uncores; 711 uncore_pci_driver = imc_drv; 712 713 return 0; 714 } 715 716 int snb_uncore_pci_init(void) 717 { 718 return imc_uncore_pci_init(); 719 } 720 721 int ivb_uncore_pci_init(void) 722 { 723 return imc_uncore_pci_init(); 724 } 725 int hsw_uncore_pci_init(void) 726 { 727 return imc_uncore_pci_init(); 728 } 729 730 int bdw_uncore_pci_init(void) 731 { 732 return imc_uncore_pci_init(); 733 } 734 735 int skl_uncore_pci_init(void) 736 { 737 return imc_uncore_pci_init(); 738 } 739 740 /* end of Sandy Bridge uncore support */ 741 742 /* Nehalem uncore support */ 743 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) 744 { 745 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); 746 } 747 748 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) 749 { 750 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); 751 } 752 753 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 754 { 755 struct hw_perf_event *hwc = &event->hw; 756 757 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 758 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 759 else 760 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); 761 } 762 763 static struct attribute *nhm_uncore_formats_attr[] = { 764 &format_attr_event.attr, 765 &format_attr_umask.attr, 766 &format_attr_edge.attr, 767 &format_attr_inv.attr, 768 &format_attr_cmask8.attr, 769 NULL, 770 }; 771 772 static struct attribute_group nhm_uncore_format_group = { 773 .name = "format", 774 .attrs = nhm_uncore_formats_attr, 775 }; 776 777 static struct uncore_event_desc nhm_uncore_events[] = { 778 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 779 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), 780 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), 781 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), 782 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), 783 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), 784 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), 785 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), 786 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), 787 { /* end: all zeroes */ }, 788 }; 789 790 static struct intel_uncore_ops nhm_uncore_msr_ops = { 791 .disable_box = nhm_uncore_msr_disable_box, 792 .enable_box = nhm_uncore_msr_enable_box, 793 .disable_event = snb_uncore_msr_disable_event, 794 .enable_event = nhm_uncore_msr_enable_event, 795 .read_counter = uncore_msr_read_counter, 796 }; 797 798 static struct intel_uncore_type nhm_uncore = { 799 .name = "", 800 .num_counters = 8, 801 .num_boxes = 1, 802 .perf_ctr_bits = 48, 803 .fixed_ctr_bits = 48, 804 .event_ctl = NHM_UNC_PERFEVTSEL0, 805 .perf_ctr = NHM_UNC_UNCORE_PMC0, 806 .fixed_ctr = NHM_UNC_FIXED_CTR, 807 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, 808 .event_mask = NHM_UNC_RAW_EVENT_MASK, 809 .event_descs = nhm_uncore_events, 810 .ops = &nhm_uncore_msr_ops, 811 .format_group = &nhm_uncore_format_group, 812 }; 813 814 static struct intel_uncore_type *nhm_msr_uncores[] = { 815 &nhm_uncore, 816 NULL, 817 }; 818 819 void nhm_uncore_cpu_init(void) 820 { 821 uncore_msr_uncores = nhm_msr_uncores; 822 } 823 824 /* end of Nehalem uncore support */ 825