1 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ 2 #include "uncore.h" 3 4 /* Uncore IMC PCI IDs */ 5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 11 #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f 12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c 13 14 /* SNB event control */ 15 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 16 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 17 #define SNB_UNC_CTL_EDGE_DET (1 << 18) 18 #define SNB_UNC_CTL_EN (1 << 22) 19 #define SNB_UNC_CTL_INVERT (1 << 23) 20 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 21 #define NHM_UNC_CTL_CMASK_MASK 0xff000000 22 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) 23 24 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 25 SNB_UNC_CTL_UMASK_MASK | \ 26 SNB_UNC_CTL_EDGE_DET | \ 27 SNB_UNC_CTL_INVERT | \ 28 SNB_UNC_CTL_CMASK_MASK) 29 30 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 31 SNB_UNC_CTL_UMASK_MASK | \ 32 SNB_UNC_CTL_EDGE_DET | \ 33 SNB_UNC_CTL_INVERT | \ 34 NHM_UNC_CTL_CMASK_MASK) 35 36 /* SNB global control register */ 37 #define SNB_UNC_PERF_GLOBAL_CTL 0x391 38 #define SNB_UNC_FIXED_CTR_CTRL 0x394 39 #define SNB_UNC_FIXED_CTR 0x395 40 41 /* SNB uncore global control */ 42 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) 43 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) 44 45 /* SNB Cbo register */ 46 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 47 #define SNB_UNC_CBO_0_PER_CTR0 0x706 48 #define SNB_UNC_CBO_MSR_OFFSET 0x10 49 50 /* SNB ARB register */ 51 #define SNB_UNC_ARB_PER_CTR0 0x3b0 52 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 53 #define SNB_UNC_ARB_MSR_OFFSET 0x10 54 55 /* NHM global control register */ 56 #define NHM_UNC_PERF_GLOBAL_CTL 0x391 57 #define NHM_UNC_FIXED_CTR 0x394 58 #define NHM_UNC_FIXED_CTR_CTRL 0x395 59 60 /* NHM uncore global control */ 61 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) 62 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) 63 64 /* NHM uncore register */ 65 #define NHM_UNC_PERFEVTSEL0 0x3c0 66 #define NHM_UNC_UNCORE_PMC0 0x3b0 67 68 /* SKL uncore global control */ 69 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 70 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) 71 72 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 73 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 74 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 75 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 76 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); 77 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); 78 79 /* Sandy Bridge uncore support */ 80 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 81 { 82 struct hw_perf_event *hwc = &event->hw; 83 84 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 85 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 86 else 87 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); 88 } 89 90 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 91 { 92 wrmsrl(event->hw.config_base, 0); 93 } 94 95 static void snb_uncore_msr_init_box(struct intel_uncore_box *box) 96 { 97 if (box->pmu->pmu_idx == 0) { 98 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 99 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 100 } 101 } 102 103 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) 104 { 105 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 106 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 107 } 108 109 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 110 { 111 if (box->pmu->pmu_idx == 0) 112 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); 113 } 114 115 static struct uncore_event_desc snb_uncore_events[] = { 116 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 117 { /* end: all zeroes */ }, 118 }; 119 120 static struct attribute *snb_uncore_formats_attr[] = { 121 &format_attr_event.attr, 122 &format_attr_umask.attr, 123 &format_attr_edge.attr, 124 &format_attr_inv.attr, 125 &format_attr_cmask5.attr, 126 NULL, 127 }; 128 129 static struct attribute_group snb_uncore_format_group = { 130 .name = "format", 131 .attrs = snb_uncore_formats_attr, 132 }; 133 134 static struct intel_uncore_ops snb_uncore_msr_ops = { 135 .init_box = snb_uncore_msr_init_box, 136 .enable_box = snb_uncore_msr_enable_box, 137 .exit_box = snb_uncore_msr_exit_box, 138 .disable_event = snb_uncore_msr_disable_event, 139 .enable_event = snb_uncore_msr_enable_event, 140 .read_counter = uncore_msr_read_counter, 141 }; 142 143 static struct event_constraint snb_uncore_arb_constraints[] = { 144 UNCORE_EVENT_CONSTRAINT(0x80, 0x1), 145 UNCORE_EVENT_CONSTRAINT(0x83, 0x1), 146 EVENT_CONSTRAINT_END 147 }; 148 149 static struct intel_uncore_type snb_uncore_cbox = { 150 .name = "cbox", 151 .num_counters = 2, 152 .num_boxes = 4, 153 .perf_ctr_bits = 44, 154 .fixed_ctr_bits = 48, 155 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 156 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 157 .fixed_ctr = SNB_UNC_FIXED_CTR, 158 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 159 .single_fixed = 1, 160 .event_mask = SNB_UNC_RAW_EVENT_MASK, 161 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 162 .ops = &snb_uncore_msr_ops, 163 .format_group = &snb_uncore_format_group, 164 .event_descs = snb_uncore_events, 165 }; 166 167 static struct intel_uncore_type snb_uncore_arb = { 168 .name = "arb", 169 .num_counters = 2, 170 .num_boxes = 1, 171 .perf_ctr_bits = 44, 172 .perf_ctr = SNB_UNC_ARB_PER_CTR0, 173 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, 174 .event_mask = SNB_UNC_RAW_EVENT_MASK, 175 .msr_offset = SNB_UNC_ARB_MSR_OFFSET, 176 .constraints = snb_uncore_arb_constraints, 177 .ops = &snb_uncore_msr_ops, 178 .format_group = &snb_uncore_format_group, 179 }; 180 181 static struct intel_uncore_type *snb_msr_uncores[] = { 182 &snb_uncore_cbox, 183 &snb_uncore_arb, 184 NULL, 185 }; 186 187 void snb_uncore_cpu_init(void) 188 { 189 uncore_msr_uncores = snb_msr_uncores; 190 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 191 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 192 } 193 194 static void skl_uncore_msr_init_box(struct intel_uncore_box *box) 195 { 196 if (box->pmu->pmu_idx == 0) { 197 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 198 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 199 } 200 } 201 202 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) 203 { 204 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 205 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 206 } 207 208 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 209 { 210 if (box->pmu->pmu_idx == 0) 211 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); 212 } 213 214 static struct intel_uncore_ops skl_uncore_msr_ops = { 215 .init_box = skl_uncore_msr_init_box, 216 .enable_box = skl_uncore_msr_enable_box, 217 .exit_box = skl_uncore_msr_exit_box, 218 .disable_event = snb_uncore_msr_disable_event, 219 .enable_event = snb_uncore_msr_enable_event, 220 .read_counter = uncore_msr_read_counter, 221 }; 222 223 static struct intel_uncore_type skl_uncore_cbox = { 224 .name = "cbox", 225 .num_counters = 4, 226 .num_boxes = 5, 227 .perf_ctr_bits = 44, 228 .fixed_ctr_bits = 48, 229 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 230 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 231 .fixed_ctr = SNB_UNC_FIXED_CTR, 232 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 233 .single_fixed = 1, 234 .event_mask = SNB_UNC_RAW_EVENT_MASK, 235 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 236 .ops = &skl_uncore_msr_ops, 237 .format_group = &snb_uncore_format_group, 238 .event_descs = snb_uncore_events, 239 }; 240 241 static struct intel_uncore_type *skl_msr_uncores[] = { 242 &skl_uncore_cbox, 243 &snb_uncore_arb, 244 NULL, 245 }; 246 247 void skl_uncore_cpu_init(void) 248 { 249 uncore_msr_uncores = skl_msr_uncores; 250 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 251 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 252 snb_uncore_arb.ops = &skl_uncore_msr_ops; 253 } 254 255 enum { 256 SNB_PCI_UNCORE_IMC, 257 }; 258 259 static struct uncore_event_desc snb_uncore_imc_events[] = { 260 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), 261 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), 262 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), 263 264 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), 265 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), 266 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), 267 268 { /* end: all zeroes */ }, 269 }; 270 271 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff 272 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 273 274 /* page size multiple covering all config regs */ 275 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 276 277 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 278 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 279 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 280 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 281 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE 282 283 static struct attribute *snb_uncore_imc_formats_attr[] = { 284 &format_attr_event.attr, 285 NULL, 286 }; 287 288 static struct attribute_group snb_uncore_imc_format_group = { 289 .name = "format", 290 .attrs = snb_uncore_imc_formats_attr, 291 }; 292 293 static void snb_uncore_imc_init_box(struct intel_uncore_box *box) 294 { 295 struct pci_dev *pdev = box->pci_dev; 296 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; 297 resource_size_t addr; 298 u32 pci_dword; 299 300 pci_read_config_dword(pdev, where, &pci_dword); 301 addr = pci_dword; 302 303 #ifdef CONFIG_PHYS_ADDR_T_64BIT 304 pci_read_config_dword(pdev, where + 4, &pci_dword); 305 addr |= ((resource_size_t)pci_dword << 32); 306 #endif 307 308 addr &= ~(PAGE_SIZE - 1); 309 310 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); 311 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; 312 } 313 314 static void snb_uncore_imc_exit_box(struct intel_uncore_box *box) 315 { 316 iounmap(box->io_addr); 317 } 318 319 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) 320 {} 321 322 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) 323 {} 324 325 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) 326 {} 327 328 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) 329 {} 330 331 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) 332 { 333 struct hw_perf_event *hwc = &event->hw; 334 335 return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); 336 } 337 338 /* 339 * custom event_init() function because we define our own fixed, free 340 * running counters, so we do not want to conflict with generic uncore 341 * logic. Also simplifies processing 342 */ 343 static int snb_uncore_imc_event_init(struct perf_event *event) 344 { 345 struct intel_uncore_pmu *pmu; 346 struct intel_uncore_box *box; 347 struct hw_perf_event *hwc = &event->hw; 348 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; 349 int idx, base; 350 351 if (event->attr.type != event->pmu->type) 352 return -ENOENT; 353 354 pmu = uncore_event_to_pmu(event); 355 /* no device found for this pmu */ 356 if (pmu->func_id < 0) 357 return -ENOENT; 358 359 /* Sampling not supported yet */ 360 if (hwc->sample_period) 361 return -EINVAL; 362 363 /* unsupported modes and filters */ 364 if (event->attr.exclude_user || 365 event->attr.exclude_kernel || 366 event->attr.exclude_hv || 367 event->attr.exclude_idle || 368 event->attr.exclude_host || 369 event->attr.exclude_guest || 370 event->attr.sample_period) /* no sampling */ 371 return -EINVAL; 372 373 /* 374 * Place all uncore events for a particular physical package 375 * onto a single cpu 376 */ 377 if (event->cpu < 0) 378 return -EINVAL; 379 380 /* check only supported bits are set */ 381 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) 382 return -EINVAL; 383 384 box = uncore_pmu_to_box(pmu, event->cpu); 385 if (!box || box->cpu < 0) 386 return -EINVAL; 387 388 event->cpu = box->cpu; 389 event->pmu_private = box; 390 391 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; 392 393 event->hw.idx = -1; 394 event->hw.last_tag = ~0ULL; 395 event->hw.extra_reg.idx = EXTRA_REG_NONE; 396 event->hw.branch_reg.idx = EXTRA_REG_NONE; 397 /* 398 * check event is known (whitelist, determines counter) 399 */ 400 switch (cfg) { 401 case SNB_UNCORE_PCI_IMC_DATA_READS: 402 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; 403 idx = UNCORE_PMC_IDX_FIXED; 404 break; 405 case SNB_UNCORE_PCI_IMC_DATA_WRITES: 406 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; 407 idx = UNCORE_PMC_IDX_FIXED + 1; 408 break; 409 default: 410 return -EINVAL; 411 } 412 413 /* must be done before validate_group */ 414 event->hw.event_base = base; 415 event->hw.config = cfg; 416 event->hw.idx = idx; 417 418 /* no group validation needed, we have free running counters */ 419 420 return 0; 421 } 422 423 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) 424 { 425 return 0; 426 } 427 428 static void snb_uncore_imc_event_start(struct perf_event *event, int flags) 429 { 430 struct intel_uncore_box *box = uncore_event_to_box(event); 431 u64 count; 432 433 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 434 return; 435 436 event->hw.state = 0; 437 box->n_active++; 438 439 list_add_tail(&event->active_entry, &box->active_list); 440 441 count = snb_uncore_imc_read_counter(box, event); 442 local64_set(&event->hw.prev_count, count); 443 444 if (box->n_active == 1) 445 uncore_pmu_start_hrtimer(box); 446 } 447 448 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) 449 { 450 struct intel_uncore_box *box = uncore_event_to_box(event); 451 struct hw_perf_event *hwc = &event->hw; 452 453 if (!(hwc->state & PERF_HES_STOPPED)) { 454 box->n_active--; 455 456 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 457 hwc->state |= PERF_HES_STOPPED; 458 459 list_del(&event->active_entry); 460 461 if (box->n_active == 0) 462 uncore_pmu_cancel_hrtimer(box); 463 } 464 465 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 466 /* 467 * Drain the remaining delta count out of a event 468 * that we are disabling: 469 */ 470 uncore_perf_event_update(box, event); 471 hwc->state |= PERF_HES_UPTODATE; 472 } 473 } 474 475 static int snb_uncore_imc_event_add(struct perf_event *event, int flags) 476 { 477 struct intel_uncore_box *box = uncore_event_to_box(event); 478 struct hw_perf_event *hwc = &event->hw; 479 480 if (!box) 481 return -ENODEV; 482 483 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 484 if (!(flags & PERF_EF_START)) 485 hwc->state |= PERF_HES_ARCH; 486 487 snb_uncore_imc_event_start(event, 0); 488 489 box->n_events++; 490 491 return 0; 492 } 493 494 static void snb_uncore_imc_event_del(struct perf_event *event, int flags) 495 { 496 struct intel_uncore_box *box = uncore_event_to_box(event); 497 int i; 498 499 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); 500 501 for (i = 0; i < box->n_events; i++) { 502 if (event == box->event_list[i]) { 503 --box->n_events; 504 break; 505 } 506 } 507 } 508 509 int snb_pci2phy_map_init(int devid) 510 { 511 struct pci_dev *dev = NULL; 512 struct pci2phy_map *map; 513 int bus, segment; 514 515 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); 516 if (!dev) 517 return -ENOTTY; 518 519 bus = dev->bus->number; 520 segment = pci_domain_nr(dev->bus); 521 522 raw_spin_lock(&pci2phy_map_lock); 523 map = __find_pci2phy_map(segment); 524 if (!map) { 525 raw_spin_unlock(&pci2phy_map_lock); 526 pci_dev_put(dev); 527 return -ENOMEM; 528 } 529 map->pbus_to_physid[bus] = 0; 530 raw_spin_unlock(&pci2phy_map_lock); 531 532 pci_dev_put(dev); 533 534 return 0; 535 } 536 537 static struct pmu snb_uncore_imc_pmu = { 538 .task_ctx_nr = perf_invalid_context, 539 .event_init = snb_uncore_imc_event_init, 540 .add = snb_uncore_imc_event_add, 541 .del = snb_uncore_imc_event_del, 542 .start = snb_uncore_imc_event_start, 543 .stop = snb_uncore_imc_event_stop, 544 .read = uncore_pmu_event_read, 545 }; 546 547 static struct intel_uncore_ops snb_uncore_imc_ops = { 548 .init_box = snb_uncore_imc_init_box, 549 .exit_box = snb_uncore_imc_exit_box, 550 .enable_box = snb_uncore_imc_enable_box, 551 .disable_box = snb_uncore_imc_disable_box, 552 .disable_event = snb_uncore_imc_disable_event, 553 .enable_event = snb_uncore_imc_enable_event, 554 .hw_config = snb_uncore_imc_hw_config, 555 .read_counter = snb_uncore_imc_read_counter, 556 }; 557 558 static struct intel_uncore_type snb_uncore_imc = { 559 .name = "imc", 560 .num_counters = 2, 561 .num_boxes = 1, 562 .fixed_ctr_bits = 32, 563 .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, 564 .event_descs = snb_uncore_imc_events, 565 .format_group = &snb_uncore_imc_format_group, 566 .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 567 .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, 568 .ops = &snb_uncore_imc_ops, 569 .pmu = &snb_uncore_imc_pmu, 570 }; 571 572 static struct intel_uncore_type *snb_pci_uncores[] = { 573 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, 574 NULL, 575 }; 576 577 static const struct pci_device_id snb_uncore_pci_ids[] = { 578 { /* IMC */ 579 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), 580 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 581 }, 582 { /* end: all zeroes */ }, 583 }; 584 585 static const struct pci_device_id ivb_uncore_pci_ids[] = { 586 { /* IMC */ 587 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), 588 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 589 }, 590 { /* IMC */ 591 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), 592 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 593 }, 594 { /* end: all zeroes */ }, 595 }; 596 597 static const struct pci_device_id hsw_uncore_pci_ids[] = { 598 { /* IMC */ 599 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), 600 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 601 }, 602 { /* IMC */ 603 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), 604 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 605 }, 606 { /* end: all zeroes */ }, 607 }; 608 609 static const struct pci_device_id bdw_uncore_pci_ids[] = { 610 { /* IMC */ 611 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC), 612 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 613 }, 614 { /* end: all zeroes */ }, 615 }; 616 617 static const struct pci_device_id skl_uncore_pci_ids[] = { 618 { /* IMC */ 619 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), 620 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 621 }, 622 { /* IMC */ 623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), 624 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 625 }, 626 627 { /* end: all zeroes */ }, 628 }; 629 630 static struct pci_driver snb_uncore_pci_driver = { 631 .name = "snb_uncore", 632 .id_table = snb_uncore_pci_ids, 633 }; 634 635 static struct pci_driver ivb_uncore_pci_driver = { 636 .name = "ivb_uncore", 637 .id_table = ivb_uncore_pci_ids, 638 }; 639 640 static struct pci_driver hsw_uncore_pci_driver = { 641 .name = "hsw_uncore", 642 .id_table = hsw_uncore_pci_ids, 643 }; 644 645 static struct pci_driver bdw_uncore_pci_driver = { 646 .name = "bdw_uncore", 647 .id_table = bdw_uncore_pci_ids, 648 }; 649 650 static struct pci_driver skl_uncore_pci_driver = { 651 .name = "skl_uncore", 652 .id_table = skl_uncore_pci_ids, 653 }; 654 655 struct imc_uncore_pci_dev { 656 __u32 pci_id; 657 struct pci_driver *driver; 658 }; 659 #define IMC_DEV(a, d) \ 660 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } 661 662 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { 663 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), 664 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ 665 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ 666 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 667 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 668 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 669 IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ 670 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 671 { /* end marker */ } 672 }; 673 674 675 #define for_each_imc_pci_id(x, t) \ 676 for (x = (t); (x)->pci_id; x++) 677 678 static struct pci_driver *imc_uncore_find_dev(void) 679 { 680 const struct imc_uncore_pci_dev *p; 681 int ret; 682 683 for_each_imc_pci_id(p, desktop_imc_pci_ids) { 684 ret = snb_pci2phy_map_init(p->pci_id); 685 if (ret == 0) 686 return p->driver; 687 } 688 return NULL; 689 } 690 691 static int imc_uncore_pci_init(void) 692 { 693 struct pci_driver *imc_drv = imc_uncore_find_dev(); 694 695 if (!imc_drv) 696 return -ENODEV; 697 698 uncore_pci_uncores = snb_pci_uncores; 699 uncore_pci_driver = imc_drv; 700 701 return 0; 702 } 703 704 int snb_uncore_pci_init(void) 705 { 706 return imc_uncore_pci_init(); 707 } 708 709 int ivb_uncore_pci_init(void) 710 { 711 return imc_uncore_pci_init(); 712 } 713 int hsw_uncore_pci_init(void) 714 { 715 return imc_uncore_pci_init(); 716 } 717 718 int bdw_uncore_pci_init(void) 719 { 720 return imc_uncore_pci_init(); 721 } 722 723 int skl_uncore_pci_init(void) 724 { 725 return imc_uncore_pci_init(); 726 } 727 728 /* end of Sandy Bridge uncore support */ 729 730 /* Nehalem uncore support */ 731 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) 732 { 733 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); 734 } 735 736 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) 737 { 738 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); 739 } 740 741 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 742 { 743 struct hw_perf_event *hwc = &event->hw; 744 745 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 746 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 747 else 748 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); 749 } 750 751 static struct attribute *nhm_uncore_formats_attr[] = { 752 &format_attr_event.attr, 753 &format_attr_umask.attr, 754 &format_attr_edge.attr, 755 &format_attr_inv.attr, 756 &format_attr_cmask8.attr, 757 NULL, 758 }; 759 760 static struct attribute_group nhm_uncore_format_group = { 761 .name = "format", 762 .attrs = nhm_uncore_formats_attr, 763 }; 764 765 static struct uncore_event_desc nhm_uncore_events[] = { 766 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 767 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), 768 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), 769 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), 770 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), 771 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), 772 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), 773 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), 774 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), 775 { /* end: all zeroes */ }, 776 }; 777 778 static struct intel_uncore_ops nhm_uncore_msr_ops = { 779 .disable_box = nhm_uncore_msr_disable_box, 780 .enable_box = nhm_uncore_msr_enable_box, 781 .disable_event = snb_uncore_msr_disable_event, 782 .enable_event = nhm_uncore_msr_enable_event, 783 .read_counter = uncore_msr_read_counter, 784 }; 785 786 static struct intel_uncore_type nhm_uncore = { 787 .name = "", 788 .num_counters = 8, 789 .num_boxes = 1, 790 .perf_ctr_bits = 48, 791 .fixed_ctr_bits = 48, 792 .event_ctl = NHM_UNC_PERFEVTSEL0, 793 .perf_ctr = NHM_UNC_UNCORE_PMC0, 794 .fixed_ctr = NHM_UNC_FIXED_CTR, 795 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, 796 .event_mask = NHM_UNC_RAW_EVENT_MASK, 797 .event_descs = nhm_uncore_events, 798 .ops = &nhm_uncore_msr_ops, 799 .format_group = &nhm_uncore_format_group, 800 }; 801 802 static struct intel_uncore_type *nhm_msr_uncores[] = { 803 &nhm_uncore, 804 NULL, 805 }; 806 807 void nhm_uncore_cpu_init(void) 808 { 809 uncore_msr_uncores = nhm_msr_uncores; 810 } 811 812 /* end of Nehalem uncore support */ 813