1 /*
2 * Support cstate residency counters
3 *
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
16 *
17 */
18
19 /*
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
25 * access code.
26 *
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
29 *
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
36 *
37 * All of these counters are specified in the Intel® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
39 *
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
42 * perf code: 0x00
43 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
44 * MTL
45 * Scope: Core (each processor core has a MSR)
46 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * perf code: 0x01
48 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
49 * CNL,KBL,CML,TNT
50 * Scope: Core
51 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
52 * perf code: 0x02
53 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
54 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
55 * TGL,TNT,RKL,ADL,RPL,SPR,MTL
56 * Scope: Core
57 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
58 * perf code: 0x03
59 * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
60 * ICL,TGL,RKL,ADL,RPL,MTL
61 * Scope: Core
62 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
63 * perf code: 0x00
64 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
65 * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
66 * RPL,SPR,MTL
67 * Scope: Package (physical package)
68 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
69 * perf code: 0x01
70 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
71 * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
72 * ADL,RPL,MTL
73 * Scope: Package (physical package)
74 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
75 * perf code: 0x02
76 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
77 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
78 * TGL,TNT,RKL,ADL,RPL,SPR,MTL
79 * Scope: Package (physical package)
80 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
81 * perf code: 0x03
82 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
83 * KBL,CML,ICL,TGL,RKL
84 * Scope: Package (physical package)
85 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
86 * perf code: 0x04
87 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
88 * ADL,RPL,MTL
89 * Scope: Package (physical package)
90 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
91 * perf code: 0x05
92 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
93 * Scope: Package (physical package)
94 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
95 * perf code: 0x06
96 * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
97 * TNT,RKL,ADL,RPL,MTL
98 * Scope: Package (physical package)
99 *
100 */
101
102 #include <linux/module.h>
103 #include <linux/slab.h>
104 #include <linux/perf_event.h>
105 #include <linux/nospec.h>
106 #include <asm/cpu_device_id.h>
107 #include <asm/intel-family.h>
108 #include "../perf_event.h"
109 #include "../probe.h"
110
111 MODULE_LICENSE("GPL");
112
113 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
114 static ssize_t __cstate_##_var##_show(struct device *dev, \
115 struct device_attribute *attr, \
116 char *page) \
117 { \
118 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
119 return sprintf(page, _format "\n"); \
120 } \
121 static struct device_attribute format_attr_##_var = \
122 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
123
124 static ssize_t cstate_get_attr_cpumask(struct device *dev,
125 struct device_attribute *attr,
126 char *buf);
127
128 /* Model -> events mapping */
129 struct cstate_model {
130 unsigned long core_events;
131 unsigned long pkg_events;
132 unsigned long quirks;
133 };
134
135 /* Quirk flags */
136 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
137 #define KNL_CORE_C6_MSR (1UL << 1)
138
139 struct perf_cstate_msr {
140 u64 msr;
141 struct perf_pmu_events_attr *attr;
142 };
143
144
145 /* cstate_core PMU */
146 static struct pmu cstate_core_pmu;
147 static bool has_cstate_core;
148
149 enum perf_cstate_core_events {
150 PERF_CSTATE_CORE_C1_RES = 0,
151 PERF_CSTATE_CORE_C3_RES,
152 PERF_CSTATE_CORE_C6_RES,
153 PERF_CSTATE_CORE_C7_RES,
154
155 PERF_CSTATE_CORE_EVENT_MAX,
156 };
157
158 PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00");
159 PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01");
160 PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02");
161 PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03");
162
163 static unsigned long core_msr_mask;
164
165 PMU_EVENT_GROUP(events, cstate_core_c1);
166 PMU_EVENT_GROUP(events, cstate_core_c3);
167 PMU_EVENT_GROUP(events, cstate_core_c6);
168 PMU_EVENT_GROUP(events, cstate_core_c7);
169
test_msr(int idx,void * data)170 static bool test_msr(int idx, void *data)
171 {
172 return test_bit(idx, (unsigned long *) data);
173 }
174
175 static struct perf_msr core_msr[] = {
176 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr },
177 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr },
178 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr },
179 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr },
180 };
181
182 static struct attribute *attrs_empty[] = {
183 NULL,
184 };
185
186 /*
187 * There are no default events, but we need to create
188 * "events" group (with empty attrs) before updating
189 * it with detected events.
190 */
191 static struct attribute_group core_events_attr_group = {
192 .name = "events",
193 .attrs = attrs_empty,
194 };
195
196 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
197 static struct attribute *core_format_attrs[] = {
198 &format_attr_core_event.attr,
199 NULL,
200 };
201
202 static struct attribute_group core_format_attr_group = {
203 .name = "format",
204 .attrs = core_format_attrs,
205 };
206
207 static cpumask_t cstate_core_cpu_mask;
208 static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
209
210 static struct attribute *cstate_cpumask_attrs[] = {
211 &dev_attr_cpumask.attr,
212 NULL,
213 };
214
215 static struct attribute_group cpumask_attr_group = {
216 .attrs = cstate_cpumask_attrs,
217 };
218
219 static const struct attribute_group *core_attr_groups[] = {
220 &core_events_attr_group,
221 &core_format_attr_group,
222 &cpumask_attr_group,
223 NULL,
224 };
225
226 /* cstate_pkg PMU */
227 static struct pmu cstate_pkg_pmu;
228 static bool has_cstate_pkg;
229
230 enum perf_cstate_pkg_events {
231 PERF_CSTATE_PKG_C2_RES = 0,
232 PERF_CSTATE_PKG_C3_RES,
233 PERF_CSTATE_PKG_C6_RES,
234 PERF_CSTATE_PKG_C7_RES,
235 PERF_CSTATE_PKG_C8_RES,
236 PERF_CSTATE_PKG_C9_RES,
237 PERF_CSTATE_PKG_C10_RES,
238
239 PERF_CSTATE_PKG_EVENT_MAX,
240 };
241
242 PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00");
243 PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01");
244 PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02");
245 PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03");
246 PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04");
247 PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05");
248 PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06");
249
250 static unsigned long pkg_msr_mask;
251
252 PMU_EVENT_GROUP(events, cstate_pkg_c2);
253 PMU_EVENT_GROUP(events, cstate_pkg_c3);
254 PMU_EVENT_GROUP(events, cstate_pkg_c6);
255 PMU_EVENT_GROUP(events, cstate_pkg_c7);
256 PMU_EVENT_GROUP(events, cstate_pkg_c8);
257 PMU_EVENT_GROUP(events, cstate_pkg_c9);
258 PMU_EVENT_GROUP(events, cstate_pkg_c10);
259
260 static struct perf_msr pkg_msr[] = {
261 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr },
262 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr },
263 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr },
264 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr },
265 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr },
266 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr },
267 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr },
268 };
269
270 static struct attribute_group pkg_events_attr_group = {
271 .name = "events",
272 .attrs = attrs_empty,
273 };
274
275 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
276 static struct attribute *pkg_format_attrs[] = {
277 &format_attr_pkg_event.attr,
278 NULL,
279 };
280 static struct attribute_group pkg_format_attr_group = {
281 .name = "format",
282 .attrs = pkg_format_attrs,
283 };
284
285 static cpumask_t cstate_pkg_cpu_mask;
286
287 static const struct attribute_group *pkg_attr_groups[] = {
288 &pkg_events_attr_group,
289 &pkg_format_attr_group,
290 &cpumask_attr_group,
291 NULL,
292 };
293
cstate_get_attr_cpumask(struct device * dev,struct device_attribute * attr,char * buf)294 static ssize_t cstate_get_attr_cpumask(struct device *dev,
295 struct device_attribute *attr,
296 char *buf)
297 {
298 struct pmu *pmu = dev_get_drvdata(dev);
299
300 if (pmu == &cstate_core_pmu)
301 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
302 else if (pmu == &cstate_pkg_pmu)
303 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
304 else
305 return 0;
306 }
307
cstate_pmu_event_init(struct perf_event * event)308 static int cstate_pmu_event_init(struct perf_event *event)
309 {
310 u64 cfg = event->attr.config;
311 int cpu;
312
313 if (event->attr.type != event->pmu->type)
314 return -ENOENT;
315
316 /* unsupported modes and filters */
317 if (event->attr.sample_period) /* no sampling */
318 return -EINVAL;
319
320 if (event->cpu < 0)
321 return -EINVAL;
322
323 if (event->pmu == &cstate_core_pmu) {
324 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
325 return -EINVAL;
326 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX);
327 if (!(core_msr_mask & (1 << cfg)))
328 return -EINVAL;
329 event->hw.event_base = core_msr[cfg].msr;
330 cpu = cpumask_any_and(&cstate_core_cpu_mask,
331 topology_sibling_cpumask(event->cpu));
332 } else if (event->pmu == &cstate_pkg_pmu) {
333 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
334 return -EINVAL;
335 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
336 if (!(pkg_msr_mask & (1 << cfg)))
337 return -EINVAL;
338 event->hw.event_base = pkg_msr[cfg].msr;
339 cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
340 topology_die_cpumask(event->cpu));
341 } else {
342 return -ENOENT;
343 }
344
345 if (cpu >= nr_cpu_ids)
346 return -ENODEV;
347
348 event->cpu = cpu;
349 event->hw.config = cfg;
350 event->hw.idx = -1;
351 return 0;
352 }
353
cstate_pmu_read_counter(struct perf_event * event)354 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
355 {
356 u64 val;
357
358 rdmsrl(event->hw.event_base, val);
359 return val;
360 }
361
cstate_pmu_event_update(struct perf_event * event)362 static void cstate_pmu_event_update(struct perf_event *event)
363 {
364 struct hw_perf_event *hwc = &event->hw;
365 u64 prev_raw_count, new_raw_count;
366
367 prev_raw_count = local64_read(&hwc->prev_count);
368 do {
369 new_raw_count = cstate_pmu_read_counter(event);
370 } while (!local64_try_cmpxchg(&hwc->prev_count,
371 &prev_raw_count, new_raw_count));
372
373 local64_add(new_raw_count - prev_raw_count, &event->count);
374 }
375
cstate_pmu_event_start(struct perf_event * event,int mode)376 static void cstate_pmu_event_start(struct perf_event *event, int mode)
377 {
378 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
379 }
380
cstate_pmu_event_stop(struct perf_event * event,int mode)381 static void cstate_pmu_event_stop(struct perf_event *event, int mode)
382 {
383 cstate_pmu_event_update(event);
384 }
385
cstate_pmu_event_del(struct perf_event * event,int mode)386 static void cstate_pmu_event_del(struct perf_event *event, int mode)
387 {
388 cstate_pmu_event_stop(event, PERF_EF_UPDATE);
389 }
390
cstate_pmu_event_add(struct perf_event * event,int mode)391 static int cstate_pmu_event_add(struct perf_event *event, int mode)
392 {
393 if (mode & PERF_EF_START)
394 cstate_pmu_event_start(event, mode);
395
396 return 0;
397 }
398
399 /*
400 * Check if exiting cpu is the designated reader. If so migrate the
401 * events when there is a valid target available
402 */
cstate_cpu_exit(unsigned int cpu)403 static int cstate_cpu_exit(unsigned int cpu)
404 {
405 unsigned int target;
406
407 if (has_cstate_core &&
408 cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
409
410 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
411 /* Migrate events if there is a valid target */
412 if (target < nr_cpu_ids) {
413 cpumask_set_cpu(target, &cstate_core_cpu_mask);
414 perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
415 }
416 }
417
418 if (has_cstate_pkg &&
419 cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
420
421 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
422 /* Migrate events if there is a valid target */
423 if (target < nr_cpu_ids) {
424 cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
425 perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
426 }
427 }
428 return 0;
429 }
430
cstate_cpu_init(unsigned int cpu)431 static int cstate_cpu_init(unsigned int cpu)
432 {
433 unsigned int target;
434
435 /*
436 * If this is the first online thread of that core, set it in
437 * the core cpu mask as the designated reader.
438 */
439 target = cpumask_any_and(&cstate_core_cpu_mask,
440 topology_sibling_cpumask(cpu));
441
442 if (has_cstate_core && target >= nr_cpu_ids)
443 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
444
445 /*
446 * If this is the first online thread of that package, set it
447 * in the package cpu mask as the designated reader.
448 */
449 target = cpumask_any_and(&cstate_pkg_cpu_mask,
450 topology_die_cpumask(cpu));
451 if (has_cstate_pkg && target >= nr_cpu_ids)
452 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
453
454 return 0;
455 }
456
457 static const struct attribute_group *core_attr_update[] = {
458 &group_cstate_core_c1,
459 &group_cstate_core_c3,
460 &group_cstate_core_c6,
461 &group_cstate_core_c7,
462 NULL,
463 };
464
465 static const struct attribute_group *pkg_attr_update[] = {
466 &group_cstate_pkg_c2,
467 &group_cstate_pkg_c3,
468 &group_cstate_pkg_c6,
469 &group_cstate_pkg_c7,
470 &group_cstate_pkg_c8,
471 &group_cstate_pkg_c9,
472 &group_cstate_pkg_c10,
473 NULL,
474 };
475
476 static struct pmu cstate_core_pmu = {
477 .attr_groups = core_attr_groups,
478 .attr_update = core_attr_update,
479 .name = "cstate_core",
480 .task_ctx_nr = perf_invalid_context,
481 .event_init = cstate_pmu_event_init,
482 .add = cstate_pmu_event_add,
483 .del = cstate_pmu_event_del,
484 .start = cstate_pmu_event_start,
485 .stop = cstate_pmu_event_stop,
486 .read = cstate_pmu_event_update,
487 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
488 .module = THIS_MODULE,
489 };
490
491 static struct pmu cstate_pkg_pmu = {
492 .attr_groups = pkg_attr_groups,
493 .attr_update = pkg_attr_update,
494 .name = "cstate_pkg",
495 .task_ctx_nr = perf_invalid_context,
496 .event_init = cstate_pmu_event_init,
497 .add = cstate_pmu_event_add,
498 .del = cstate_pmu_event_del,
499 .start = cstate_pmu_event_start,
500 .stop = cstate_pmu_event_stop,
501 .read = cstate_pmu_event_update,
502 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
503 .module = THIS_MODULE,
504 };
505
506 static const struct cstate_model nhm_cstates __initconst = {
507 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
508 BIT(PERF_CSTATE_CORE_C6_RES),
509
510 .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) |
511 BIT(PERF_CSTATE_PKG_C6_RES) |
512 BIT(PERF_CSTATE_PKG_C7_RES),
513 };
514
515 static const struct cstate_model snb_cstates __initconst = {
516 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
517 BIT(PERF_CSTATE_CORE_C6_RES) |
518 BIT(PERF_CSTATE_CORE_C7_RES),
519
520 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
521 BIT(PERF_CSTATE_PKG_C3_RES) |
522 BIT(PERF_CSTATE_PKG_C6_RES) |
523 BIT(PERF_CSTATE_PKG_C7_RES),
524 };
525
526 static const struct cstate_model hswult_cstates __initconst = {
527 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
528 BIT(PERF_CSTATE_CORE_C6_RES) |
529 BIT(PERF_CSTATE_CORE_C7_RES),
530
531 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
532 BIT(PERF_CSTATE_PKG_C3_RES) |
533 BIT(PERF_CSTATE_PKG_C6_RES) |
534 BIT(PERF_CSTATE_PKG_C7_RES) |
535 BIT(PERF_CSTATE_PKG_C8_RES) |
536 BIT(PERF_CSTATE_PKG_C9_RES) |
537 BIT(PERF_CSTATE_PKG_C10_RES),
538 };
539
540 static const struct cstate_model cnl_cstates __initconst = {
541 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
542 BIT(PERF_CSTATE_CORE_C3_RES) |
543 BIT(PERF_CSTATE_CORE_C6_RES) |
544 BIT(PERF_CSTATE_CORE_C7_RES),
545
546 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
547 BIT(PERF_CSTATE_PKG_C3_RES) |
548 BIT(PERF_CSTATE_PKG_C6_RES) |
549 BIT(PERF_CSTATE_PKG_C7_RES) |
550 BIT(PERF_CSTATE_PKG_C8_RES) |
551 BIT(PERF_CSTATE_PKG_C9_RES) |
552 BIT(PERF_CSTATE_PKG_C10_RES),
553 };
554
555 static const struct cstate_model icl_cstates __initconst = {
556 .core_events = BIT(PERF_CSTATE_CORE_C6_RES) |
557 BIT(PERF_CSTATE_CORE_C7_RES),
558
559 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
560 BIT(PERF_CSTATE_PKG_C3_RES) |
561 BIT(PERF_CSTATE_PKG_C6_RES) |
562 BIT(PERF_CSTATE_PKG_C7_RES) |
563 BIT(PERF_CSTATE_PKG_C8_RES) |
564 BIT(PERF_CSTATE_PKG_C9_RES) |
565 BIT(PERF_CSTATE_PKG_C10_RES),
566 };
567
568 static const struct cstate_model icx_cstates __initconst = {
569 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
570 BIT(PERF_CSTATE_CORE_C6_RES),
571
572 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
573 BIT(PERF_CSTATE_PKG_C6_RES),
574 };
575
576 static const struct cstate_model adl_cstates __initconst = {
577 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
578 BIT(PERF_CSTATE_CORE_C6_RES) |
579 BIT(PERF_CSTATE_CORE_C7_RES),
580
581 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
582 BIT(PERF_CSTATE_PKG_C3_RES) |
583 BIT(PERF_CSTATE_PKG_C6_RES) |
584 BIT(PERF_CSTATE_PKG_C8_RES) |
585 BIT(PERF_CSTATE_PKG_C10_RES),
586 };
587
588 static const struct cstate_model slm_cstates __initconst = {
589 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
590 BIT(PERF_CSTATE_CORE_C6_RES),
591
592 .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
593 .quirks = SLM_PKG_C6_USE_C7_MSR,
594 };
595
596
597 static const struct cstate_model knl_cstates __initconst = {
598 .core_events = BIT(PERF_CSTATE_CORE_C6_RES),
599
600 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
601 BIT(PERF_CSTATE_PKG_C3_RES) |
602 BIT(PERF_CSTATE_PKG_C6_RES),
603 .quirks = KNL_CORE_C6_MSR,
604 };
605
606
607 static const struct cstate_model glm_cstates __initconst = {
608 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
609 BIT(PERF_CSTATE_CORE_C3_RES) |
610 BIT(PERF_CSTATE_CORE_C6_RES),
611
612 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
613 BIT(PERF_CSTATE_PKG_C3_RES) |
614 BIT(PERF_CSTATE_PKG_C6_RES) |
615 BIT(PERF_CSTATE_PKG_C10_RES),
616 };
617
618
619 static const struct x86_cpu_id intel_cstates_match[] __initconst = {
620 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates),
621 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates),
622 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates),
623
624 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates),
625 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates),
626 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates),
627
628 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates),
629 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates),
630
631 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates),
632 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates),
633
634 X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates),
635 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates),
636 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates),
637
638 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates),
639
640 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates),
641 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates),
642 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates),
643
644 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates),
645 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates),
646 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates),
647 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates),
648
649 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates),
650 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates),
651 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates),
652
653 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates),
654 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates),
655 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates),
656 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates),
657
658 X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates),
659
660 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates),
661 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates),
662
663 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates),
664 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates),
665 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates),
666 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates),
667 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates),
668 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates),
669 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates),
670
671 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates),
672 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
673 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
674 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
675 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
676 X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates),
677 X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &icx_cstates),
678 X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &icx_cstates),
679
680 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
681 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
682 X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates),
683 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates),
684 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates),
685 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates),
686 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates),
687 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates),
688 X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &adl_cstates),
689 X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &adl_cstates),
690 { },
691 };
692 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
693
cstate_probe(const struct cstate_model * cm)694 static int __init cstate_probe(const struct cstate_model *cm)
695 {
696 /* SLM has different MSR for PKG C6 */
697 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
698 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
699
700 /* KNL has different MSR for CORE C6 */
701 if (cm->quirks & KNL_CORE_C6_MSR)
702 pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
703
704
705 core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX,
706 true, (void *) &cm->core_events);
707
708 pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
709 true, (void *) &cm->pkg_events);
710
711 has_cstate_core = !!core_msr_mask;
712 has_cstate_pkg = !!pkg_msr_mask;
713
714 return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
715 }
716
cstate_cleanup(void)717 static inline void cstate_cleanup(void)
718 {
719 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
720 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
721
722 if (has_cstate_core)
723 perf_pmu_unregister(&cstate_core_pmu);
724
725 if (has_cstate_pkg)
726 perf_pmu_unregister(&cstate_pkg_pmu);
727 }
728
cstate_init(void)729 static int __init cstate_init(void)
730 {
731 int err;
732
733 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
734 "perf/x86/cstate:starting", cstate_cpu_init, NULL);
735 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
736 "perf/x86/cstate:online", NULL, cstate_cpu_exit);
737
738 if (has_cstate_core) {
739 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
740 if (err) {
741 has_cstate_core = false;
742 pr_info("Failed to register cstate core pmu\n");
743 cstate_cleanup();
744 return err;
745 }
746 }
747
748 if (has_cstate_pkg) {
749 if (topology_max_die_per_package() > 1) {
750 err = perf_pmu_register(&cstate_pkg_pmu,
751 "cstate_die", -1);
752 } else {
753 err = perf_pmu_register(&cstate_pkg_pmu,
754 cstate_pkg_pmu.name, -1);
755 }
756 if (err) {
757 has_cstate_pkg = false;
758 pr_info("Failed to register cstate pkg pmu\n");
759 cstate_cleanup();
760 return err;
761 }
762 }
763 return 0;
764 }
765
cstate_pmu_init(void)766 static int __init cstate_pmu_init(void)
767 {
768 const struct x86_cpu_id *id;
769 int err;
770
771 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
772 return -ENODEV;
773
774 id = x86_match_cpu(intel_cstates_match);
775 if (!id)
776 return -ENODEV;
777
778 err = cstate_probe((const struct cstate_model *) id->driver_data);
779 if (err)
780 return err;
781
782 return cstate_init();
783 }
784 module_init(cstate_pmu_init);
785
cstate_pmu_exit(void)786 static void __exit cstate_pmu_exit(void)
787 {
788 cstate_cleanup();
789 }
790 module_exit(cstate_pmu_exit);
791