1 // SPDX-License-Identifier: GPL-2.0 2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ 3 #include "uncore.h" 4 5 /* Uncore IMC PCI IDs */ 6 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 7 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 8 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 9 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 10 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 11 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 13 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c 14 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 15 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 16 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f 17 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f 18 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c 19 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 20 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 21 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f 22 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f 23 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc 24 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 25 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 26 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 27 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f 28 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f 29 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 30 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 31 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 32 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 33 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 34 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 35 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca 36 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 37 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02 38 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12 39 40 /* SNB event control */ 41 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 42 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 43 #define SNB_UNC_CTL_EDGE_DET (1 << 18) 44 #define SNB_UNC_CTL_EN (1 << 22) 45 #define SNB_UNC_CTL_INVERT (1 << 23) 46 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 47 #define NHM_UNC_CTL_CMASK_MASK 0xff000000 48 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) 49 50 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 51 SNB_UNC_CTL_UMASK_MASK | \ 52 SNB_UNC_CTL_EDGE_DET | \ 53 SNB_UNC_CTL_INVERT | \ 54 SNB_UNC_CTL_CMASK_MASK) 55 56 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 57 SNB_UNC_CTL_UMASK_MASK | \ 58 SNB_UNC_CTL_EDGE_DET | \ 59 SNB_UNC_CTL_INVERT | \ 60 NHM_UNC_CTL_CMASK_MASK) 61 62 /* SNB global control register */ 63 #define SNB_UNC_PERF_GLOBAL_CTL 0x391 64 #define SNB_UNC_FIXED_CTR_CTRL 0x394 65 #define SNB_UNC_FIXED_CTR 0x395 66 67 /* SNB uncore global control */ 68 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) 69 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) 70 71 /* SNB Cbo register */ 72 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 73 #define SNB_UNC_CBO_0_PER_CTR0 0x706 74 #define SNB_UNC_CBO_MSR_OFFSET 0x10 75 76 /* SNB ARB register */ 77 #define SNB_UNC_ARB_PER_CTR0 0x3b0 78 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 79 #define SNB_UNC_ARB_MSR_OFFSET 0x10 80 81 /* NHM global control register */ 82 #define NHM_UNC_PERF_GLOBAL_CTL 0x391 83 #define NHM_UNC_FIXED_CTR 0x394 84 #define NHM_UNC_FIXED_CTR_CTRL 0x395 85 86 /* NHM uncore global control */ 87 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) 88 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) 89 90 /* NHM uncore register */ 91 #define NHM_UNC_PERFEVTSEL0 0x3c0 92 #define NHM_UNC_UNCORE_PMC0 0x3b0 93 94 /* SKL uncore global control */ 95 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 96 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) 97 98 /* ICL Cbo register */ 99 #define ICL_UNC_CBO_CONFIG 0x396 100 #define ICL_UNC_NUM_CBO_MASK 0xf 101 #define ICL_UNC_CBO_0_PER_CTR0 0x702 102 #define ICL_UNC_CBO_MSR_OFFSET 0x8 103 104 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 105 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 106 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 107 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 108 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); 109 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); 110 111 /* Sandy Bridge uncore support */ 112 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 113 { 114 struct hw_perf_event *hwc = &event->hw; 115 116 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 117 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 118 else 119 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); 120 } 121 122 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 123 { 124 wrmsrl(event->hw.config_base, 0); 125 } 126 127 static void snb_uncore_msr_init_box(struct intel_uncore_box *box) 128 { 129 if (box->pmu->pmu_idx == 0) { 130 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 131 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 132 } 133 } 134 135 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) 136 { 137 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 138 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 139 } 140 141 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 142 { 143 if (box->pmu->pmu_idx == 0) 144 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); 145 } 146 147 static struct uncore_event_desc snb_uncore_events[] = { 148 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 149 { /* end: all zeroes */ }, 150 }; 151 152 static struct attribute *snb_uncore_formats_attr[] = { 153 &format_attr_event.attr, 154 &format_attr_umask.attr, 155 &format_attr_edge.attr, 156 &format_attr_inv.attr, 157 &format_attr_cmask5.attr, 158 NULL, 159 }; 160 161 static const struct attribute_group snb_uncore_format_group = { 162 .name = "format", 163 .attrs = snb_uncore_formats_attr, 164 }; 165 166 static struct intel_uncore_ops snb_uncore_msr_ops = { 167 .init_box = snb_uncore_msr_init_box, 168 .enable_box = snb_uncore_msr_enable_box, 169 .exit_box = snb_uncore_msr_exit_box, 170 .disable_event = snb_uncore_msr_disable_event, 171 .enable_event = snb_uncore_msr_enable_event, 172 .read_counter = uncore_msr_read_counter, 173 }; 174 175 static struct event_constraint snb_uncore_arb_constraints[] = { 176 UNCORE_EVENT_CONSTRAINT(0x80, 0x1), 177 UNCORE_EVENT_CONSTRAINT(0x83, 0x1), 178 EVENT_CONSTRAINT_END 179 }; 180 181 static struct intel_uncore_type snb_uncore_cbox = { 182 .name = "cbox", 183 .num_counters = 2, 184 .num_boxes = 4, 185 .perf_ctr_bits = 44, 186 .fixed_ctr_bits = 48, 187 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 188 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 189 .fixed_ctr = SNB_UNC_FIXED_CTR, 190 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 191 .single_fixed = 1, 192 .event_mask = SNB_UNC_RAW_EVENT_MASK, 193 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 194 .ops = &snb_uncore_msr_ops, 195 .format_group = &snb_uncore_format_group, 196 .event_descs = snb_uncore_events, 197 }; 198 199 static struct intel_uncore_type snb_uncore_arb = { 200 .name = "arb", 201 .num_counters = 2, 202 .num_boxes = 1, 203 .perf_ctr_bits = 44, 204 .perf_ctr = SNB_UNC_ARB_PER_CTR0, 205 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, 206 .event_mask = SNB_UNC_RAW_EVENT_MASK, 207 .msr_offset = SNB_UNC_ARB_MSR_OFFSET, 208 .constraints = snb_uncore_arb_constraints, 209 .ops = &snb_uncore_msr_ops, 210 .format_group = &snb_uncore_format_group, 211 }; 212 213 static struct intel_uncore_type *snb_msr_uncores[] = { 214 &snb_uncore_cbox, 215 &snb_uncore_arb, 216 NULL, 217 }; 218 219 void snb_uncore_cpu_init(void) 220 { 221 uncore_msr_uncores = snb_msr_uncores; 222 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 223 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 224 } 225 226 static void skl_uncore_msr_init_box(struct intel_uncore_box *box) 227 { 228 if (box->pmu->pmu_idx == 0) { 229 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 230 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 231 } 232 233 /* The 8th CBOX has different MSR space */ 234 if (box->pmu->pmu_idx == 7) 235 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); 236 } 237 238 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) 239 { 240 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 241 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 242 } 243 244 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 245 { 246 if (box->pmu->pmu_idx == 0) 247 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); 248 } 249 250 static struct intel_uncore_ops skl_uncore_msr_ops = { 251 .init_box = skl_uncore_msr_init_box, 252 .enable_box = skl_uncore_msr_enable_box, 253 .exit_box = skl_uncore_msr_exit_box, 254 .disable_event = snb_uncore_msr_disable_event, 255 .enable_event = snb_uncore_msr_enable_event, 256 .read_counter = uncore_msr_read_counter, 257 }; 258 259 static struct intel_uncore_type skl_uncore_cbox = { 260 .name = "cbox", 261 .num_counters = 4, 262 .num_boxes = 8, 263 .perf_ctr_bits = 44, 264 .fixed_ctr_bits = 48, 265 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 266 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 267 .fixed_ctr = SNB_UNC_FIXED_CTR, 268 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 269 .single_fixed = 1, 270 .event_mask = SNB_UNC_RAW_EVENT_MASK, 271 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 272 .ops = &skl_uncore_msr_ops, 273 .format_group = &snb_uncore_format_group, 274 .event_descs = snb_uncore_events, 275 }; 276 277 static struct intel_uncore_type *skl_msr_uncores[] = { 278 &skl_uncore_cbox, 279 &snb_uncore_arb, 280 NULL, 281 }; 282 283 void skl_uncore_cpu_init(void) 284 { 285 uncore_msr_uncores = skl_msr_uncores; 286 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 287 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 288 snb_uncore_arb.ops = &skl_uncore_msr_ops; 289 } 290 291 static struct intel_uncore_type icl_uncore_cbox = { 292 .name = "cbox", 293 .num_counters = 4, 294 .perf_ctr_bits = 44, 295 .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, 296 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 297 .event_mask = SNB_UNC_RAW_EVENT_MASK, 298 .msr_offset = ICL_UNC_CBO_MSR_OFFSET, 299 .ops = &skl_uncore_msr_ops, 300 .format_group = &snb_uncore_format_group, 301 }; 302 303 static struct uncore_event_desc icl_uncore_events[] = { 304 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"), 305 { /* end: all zeroes */ }, 306 }; 307 308 static struct attribute *icl_uncore_clock_formats_attr[] = { 309 &format_attr_event.attr, 310 NULL, 311 }; 312 313 static struct attribute_group icl_uncore_clock_format_group = { 314 .name = "format", 315 .attrs = icl_uncore_clock_formats_attr, 316 }; 317 318 static struct intel_uncore_type icl_uncore_clockbox = { 319 .name = "clock", 320 .num_counters = 1, 321 .num_boxes = 1, 322 .fixed_ctr_bits = 48, 323 .fixed_ctr = SNB_UNC_FIXED_CTR, 324 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 325 .single_fixed = 1, 326 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 327 .format_group = &icl_uncore_clock_format_group, 328 .ops = &skl_uncore_msr_ops, 329 .event_descs = icl_uncore_events, 330 }; 331 332 static struct intel_uncore_type *icl_msr_uncores[] = { 333 &icl_uncore_cbox, 334 &snb_uncore_arb, 335 &icl_uncore_clockbox, 336 NULL, 337 }; 338 339 static int icl_get_cbox_num(void) 340 { 341 u64 num_boxes; 342 343 rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); 344 345 return num_boxes & ICL_UNC_NUM_CBO_MASK; 346 } 347 348 void icl_uncore_cpu_init(void) 349 { 350 uncore_msr_uncores = icl_msr_uncores; 351 icl_uncore_cbox.num_boxes = icl_get_cbox_num(); 352 snb_uncore_arb.ops = &skl_uncore_msr_ops; 353 } 354 355 enum { 356 SNB_PCI_UNCORE_IMC, 357 }; 358 359 static struct uncore_event_desc snb_uncore_imc_events[] = { 360 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), 361 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), 362 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), 363 364 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), 365 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), 366 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), 367 368 { /* end: all zeroes */ }, 369 }; 370 371 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff 372 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 373 374 /* page size multiple covering all config regs */ 375 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 376 377 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 378 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 379 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 380 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 381 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE 382 383 enum perf_snb_uncore_imc_freerunning_types { 384 SNB_PCI_UNCORE_IMC_DATA = 0, 385 SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, 386 }; 387 388 static struct freerunning_counters snb_uncore_imc_freerunning[] = { 389 [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 }, 390 }; 391 392 static struct attribute *snb_uncore_imc_formats_attr[] = { 393 &format_attr_event.attr, 394 NULL, 395 }; 396 397 static const struct attribute_group snb_uncore_imc_format_group = { 398 .name = "format", 399 .attrs = snb_uncore_imc_formats_attr, 400 }; 401 402 static void snb_uncore_imc_init_box(struct intel_uncore_box *box) 403 { 404 struct pci_dev *pdev = box->pci_dev; 405 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; 406 resource_size_t addr; 407 u32 pci_dword; 408 409 pci_read_config_dword(pdev, where, &pci_dword); 410 addr = pci_dword; 411 412 #ifdef CONFIG_PHYS_ADDR_T_64BIT 413 pci_read_config_dword(pdev, where + 4, &pci_dword); 414 addr |= ((resource_size_t)pci_dword << 32); 415 #endif 416 417 addr &= ~(PAGE_SIZE - 1); 418 419 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); 420 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; 421 } 422 423 static void snb_uncore_imc_exit_box(struct intel_uncore_box *box) 424 { 425 iounmap(box->io_addr); 426 } 427 428 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) 429 {} 430 431 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) 432 {} 433 434 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) 435 {} 436 437 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) 438 {} 439 440 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) 441 { 442 struct hw_perf_event *hwc = &event->hw; 443 444 return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); 445 } 446 447 /* 448 * Keep the custom event_init() function compatible with old event 449 * encoding for free running counters. 450 */ 451 static int snb_uncore_imc_event_init(struct perf_event *event) 452 { 453 struct intel_uncore_pmu *pmu; 454 struct intel_uncore_box *box; 455 struct hw_perf_event *hwc = &event->hw; 456 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; 457 int idx, base; 458 459 if (event->attr.type != event->pmu->type) 460 return -ENOENT; 461 462 pmu = uncore_event_to_pmu(event); 463 /* no device found for this pmu */ 464 if (pmu->func_id < 0) 465 return -ENOENT; 466 467 /* Sampling not supported yet */ 468 if (hwc->sample_period) 469 return -EINVAL; 470 471 /* unsupported modes and filters */ 472 if (event->attr.sample_period) /* no sampling */ 473 return -EINVAL; 474 475 /* 476 * Place all uncore events for a particular physical package 477 * onto a single cpu 478 */ 479 if (event->cpu < 0) 480 return -EINVAL; 481 482 /* check only supported bits are set */ 483 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) 484 return -EINVAL; 485 486 box = uncore_pmu_to_box(pmu, event->cpu); 487 if (!box || box->cpu < 0) 488 return -EINVAL; 489 490 event->cpu = box->cpu; 491 event->pmu_private = box; 492 493 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; 494 495 event->hw.idx = -1; 496 event->hw.last_tag = ~0ULL; 497 event->hw.extra_reg.idx = EXTRA_REG_NONE; 498 event->hw.branch_reg.idx = EXTRA_REG_NONE; 499 /* 500 * check event is known (whitelist, determines counter) 501 */ 502 switch (cfg) { 503 case SNB_UNCORE_PCI_IMC_DATA_READS: 504 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; 505 idx = UNCORE_PMC_IDX_FREERUNNING; 506 break; 507 case SNB_UNCORE_PCI_IMC_DATA_WRITES: 508 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; 509 idx = UNCORE_PMC_IDX_FREERUNNING; 510 break; 511 default: 512 return -EINVAL; 513 } 514 515 /* must be done before validate_group */ 516 event->hw.event_base = base; 517 event->hw.idx = idx; 518 519 /* Convert to standard encoding format for freerunning counters */ 520 event->hw.config = ((cfg - 1) << 8) | 0x10ff; 521 522 /* no group validation needed, we have free running counters */ 523 524 return 0; 525 } 526 527 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) 528 { 529 return 0; 530 } 531 532 int snb_pci2phy_map_init(int devid) 533 { 534 struct pci_dev *dev = NULL; 535 struct pci2phy_map *map; 536 int bus, segment; 537 538 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); 539 if (!dev) 540 return -ENOTTY; 541 542 bus = dev->bus->number; 543 segment = pci_domain_nr(dev->bus); 544 545 raw_spin_lock(&pci2phy_map_lock); 546 map = __find_pci2phy_map(segment); 547 if (!map) { 548 raw_spin_unlock(&pci2phy_map_lock); 549 pci_dev_put(dev); 550 return -ENOMEM; 551 } 552 map->pbus_to_physid[bus] = 0; 553 raw_spin_unlock(&pci2phy_map_lock); 554 555 pci_dev_put(dev); 556 557 return 0; 558 } 559 560 static struct pmu snb_uncore_imc_pmu = { 561 .task_ctx_nr = perf_invalid_context, 562 .event_init = snb_uncore_imc_event_init, 563 .add = uncore_pmu_event_add, 564 .del = uncore_pmu_event_del, 565 .start = uncore_pmu_event_start, 566 .stop = uncore_pmu_event_stop, 567 .read = uncore_pmu_event_read, 568 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 569 }; 570 571 static struct intel_uncore_ops snb_uncore_imc_ops = { 572 .init_box = snb_uncore_imc_init_box, 573 .exit_box = snb_uncore_imc_exit_box, 574 .enable_box = snb_uncore_imc_enable_box, 575 .disable_box = snb_uncore_imc_disable_box, 576 .disable_event = snb_uncore_imc_disable_event, 577 .enable_event = snb_uncore_imc_enable_event, 578 .hw_config = snb_uncore_imc_hw_config, 579 .read_counter = snb_uncore_imc_read_counter, 580 }; 581 582 static struct intel_uncore_type snb_uncore_imc = { 583 .name = "imc", 584 .num_counters = 2, 585 .num_boxes = 1, 586 .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, 587 .freerunning = snb_uncore_imc_freerunning, 588 .event_descs = snb_uncore_imc_events, 589 .format_group = &snb_uncore_imc_format_group, 590 .ops = &snb_uncore_imc_ops, 591 .pmu = &snb_uncore_imc_pmu, 592 }; 593 594 static struct intel_uncore_type *snb_pci_uncores[] = { 595 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, 596 NULL, 597 }; 598 599 static const struct pci_device_id snb_uncore_pci_ids[] = { 600 { /* IMC */ 601 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), 602 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 603 }, 604 { /* end: all zeroes */ }, 605 }; 606 607 static const struct pci_device_id ivb_uncore_pci_ids[] = { 608 { /* IMC */ 609 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), 610 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 611 }, 612 { /* IMC */ 613 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), 614 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 615 }, 616 { /* end: all zeroes */ }, 617 }; 618 619 static const struct pci_device_id hsw_uncore_pci_ids[] = { 620 { /* IMC */ 621 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), 622 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 623 }, 624 { /* IMC */ 625 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), 626 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 627 }, 628 { /* end: all zeroes */ }, 629 }; 630 631 static const struct pci_device_id bdw_uncore_pci_ids[] = { 632 { /* IMC */ 633 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC), 634 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 635 }, 636 { /* end: all zeroes */ }, 637 }; 638 639 static const struct pci_device_id skl_uncore_pci_ids[] = { 640 { /* IMC */ 641 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), 642 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 643 }, 644 { /* IMC */ 645 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), 646 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 647 }, 648 { /* IMC */ 649 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), 650 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 651 }, 652 { /* IMC */ 653 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), 654 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 655 }, 656 { /* IMC */ 657 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), 658 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 659 }, 660 { /* IMC */ 661 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), 662 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 663 }, 664 { /* IMC */ 665 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), 666 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 667 }, 668 { /* IMC */ 669 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), 670 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 671 }, 672 { /* IMC */ 673 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), 674 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 675 }, 676 { /* IMC */ 677 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), 678 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 679 }, 680 { /* IMC */ 681 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), 682 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 683 }, 684 { /* IMC */ 685 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), 686 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 687 }, 688 { /* IMC */ 689 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), 690 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 691 }, 692 { /* IMC */ 693 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), 694 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 695 }, 696 { /* IMC */ 697 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), 698 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 699 }, 700 { /* IMC */ 701 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), 702 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 703 }, 704 { /* IMC */ 705 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), 706 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 707 }, 708 { /* IMC */ 709 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), 710 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 711 }, 712 { /* IMC */ 713 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), 714 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 715 }, 716 { /* IMC */ 717 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), 718 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 719 }, 720 { /* IMC */ 721 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), 722 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 723 }, 724 { /* IMC */ 725 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), 726 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 727 }, 728 { /* IMC */ 729 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), 730 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 731 }, 732 { /* IMC */ 733 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), 734 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 735 }, 736 { /* IMC */ 737 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), 738 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 739 }, 740 { /* end: all zeroes */ }, 741 }; 742 743 static const struct pci_device_id icl_uncore_pci_ids[] = { 744 { /* IMC */ 745 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC), 746 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 747 }, 748 { /* IMC */ 749 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC), 750 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 751 }, 752 { /* end: all zeroes */ }, 753 }; 754 755 static struct pci_driver snb_uncore_pci_driver = { 756 .name = "snb_uncore", 757 .id_table = snb_uncore_pci_ids, 758 }; 759 760 static struct pci_driver ivb_uncore_pci_driver = { 761 .name = "ivb_uncore", 762 .id_table = ivb_uncore_pci_ids, 763 }; 764 765 static struct pci_driver hsw_uncore_pci_driver = { 766 .name = "hsw_uncore", 767 .id_table = hsw_uncore_pci_ids, 768 }; 769 770 static struct pci_driver bdw_uncore_pci_driver = { 771 .name = "bdw_uncore", 772 .id_table = bdw_uncore_pci_ids, 773 }; 774 775 static struct pci_driver skl_uncore_pci_driver = { 776 .name = "skl_uncore", 777 .id_table = skl_uncore_pci_ids, 778 }; 779 780 static struct pci_driver icl_uncore_pci_driver = { 781 .name = "icl_uncore", 782 .id_table = icl_uncore_pci_ids, 783 }; 784 785 struct imc_uncore_pci_dev { 786 __u32 pci_id; 787 struct pci_driver *driver; 788 }; 789 #define IMC_DEV(a, d) \ 790 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } 791 792 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { 793 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), 794 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ 795 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ 796 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 797 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 798 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 799 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ 800 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 801 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ 802 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ 803 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ 804 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ 805 IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ 806 IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ 807 IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ 808 IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ 809 IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ 810 IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ 811 IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ 812 IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ 813 IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ 814 IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ 815 IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ 816 IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ 817 IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ 818 IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ 819 IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ 820 IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ 821 IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ 822 IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ 823 IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ 824 IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ 825 IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ 826 { /* end marker */ } 827 }; 828 829 830 #define for_each_imc_pci_id(x, t) \ 831 for (x = (t); (x)->pci_id; x++) 832 833 static struct pci_driver *imc_uncore_find_dev(void) 834 { 835 const struct imc_uncore_pci_dev *p; 836 int ret; 837 838 for_each_imc_pci_id(p, desktop_imc_pci_ids) { 839 ret = snb_pci2phy_map_init(p->pci_id); 840 if (ret == 0) 841 return p->driver; 842 } 843 return NULL; 844 } 845 846 static int imc_uncore_pci_init(void) 847 { 848 struct pci_driver *imc_drv = imc_uncore_find_dev(); 849 850 if (!imc_drv) 851 return -ENODEV; 852 853 uncore_pci_uncores = snb_pci_uncores; 854 uncore_pci_driver = imc_drv; 855 856 return 0; 857 } 858 859 int snb_uncore_pci_init(void) 860 { 861 return imc_uncore_pci_init(); 862 } 863 864 int ivb_uncore_pci_init(void) 865 { 866 return imc_uncore_pci_init(); 867 } 868 int hsw_uncore_pci_init(void) 869 { 870 return imc_uncore_pci_init(); 871 } 872 873 int bdw_uncore_pci_init(void) 874 { 875 return imc_uncore_pci_init(); 876 } 877 878 int skl_uncore_pci_init(void) 879 { 880 return imc_uncore_pci_init(); 881 } 882 883 /* end of Sandy Bridge uncore support */ 884 885 /* Nehalem uncore support */ 886 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) 887 { 888 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); 889 } 890 891 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) 892 { 893 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); 894 } 895 896 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 897 { 898 struct hw_perf_event *hwc = &event->hw; 899 900 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 901 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 902 else 903 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); 904 } 905 906 static struct attribute *nhm_uncore_formats_attr[] = { 907 &format_attr_event.attr, 908 &format_attr_umask.attr, 909 &format_attr_edge.attr, 910 &format_attr_inv.attr, 911 &format_attr_cmask8.attr, 912 NULL, 913 }; 914 915 static const struct attribute_group nhm_uncore_format_group = { 916 .name = "format", 917 .attrs = nhm_uncore_formats_attr, 918 }; 919 920 static struct uncore_event_desc nhm_uncore_events[] = { 921 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 922 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), 923 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), 924 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), 925 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), 926 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), 927 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), 928 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), 929 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), 930 { /* end: all zeroes */ }, 931 }; 932 933 static struct intel_uncore_ops nhm_uncore_msr_ops = { 934 .disable_box = nhm_uncore_msr_disable_box, 935 .enable_box = nhm_uncore_msr_enable_box, 936 .disable_event = snb_uncore_msr_disable_event, 937 .enable_event = nhm_uncore_msr_enable_event, 938 .read_counter = uncore_msr_read_counter, 939 }; 940 941 static struct intel_uncore_type nhm_uncore = { 942 .name = "", 943 .num_counters = 8, 944 .num_boxes = 1, 945 .perf_ctr_bits = 48, 946 .fixed_ctr_bits = 48, 947 .event_ctl = NHM_UNC_PERFEVTSEL0, 948 .perf_ctr = NHM_UNC_UNCORE_PMC0, 949 .fixed_ctr = NHM_UNC_FIXED_CTR, 950 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, 951 .event_mask = NHM_UNC_RAW_EVENT_MASK, 952 .event_descs = nhm_uncore_events, 953 .ops = &nhm_uncore_msr_ops, 954 .format_group = &nhm_uncore_format_group, 955 }; 956 957 static struct intel_uncore_type *nhm_msr_uncores[] = { 958 &nhm_uncore, 959 NULL, 960 }; 961 962 void nhm_uncore_cpu_init(void) 963 { 964 uncore_msr_uncores = nhm_msr_uncores; 965 } 966 967 /* end of Nehalem uncore support */ 968