xref: /openbmc/linux/arch/x86/events/amd/uncore.c (revision 409e1a31)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Advanced Micro Devices, Inc.
4  *
5  * Author: Jacob Shin <jacob.shin@amd.com>
6  */
7 
8 #include <linux/perf_event.h>
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 
16 #include <asm/cpufeature.h>
17 #include <asm/perf_event.h>
18 #include <asm/msr.h>
19 #include <asm/smp.h>
20 
21 #define NUM_COUNTERS_NB		4
22 #define NUM_COUNTERS_L2		4
23 #define NUM_COUNTERS_L3		6
24 #define MAX_COUNTERS		6
25 
26 #define RDPMC_BASE_NB		6
27 #define RDPMC_BASE_LLC		10
28 
29 #define COUNTER_SHIFT		16
30 
31 #undef pr_fmt
32 #define pr_fmt(fmt)	"amd_uncore: " fmt
33 
34 static int num_counters_llc;
35 static int num_counters_nb;
36 static bool l3_mask;
37 
38 static HLIST_HEAD(uncore_unused_list);
39 
40 struct amd_uncore {
41 	int id;
42 	int refcnt;
43 	int cpu;
44 	int num_counters;
45 	int rdpmc_base;
46 	u32 msr_base;
47 	cpumask_t *active_mask;
48 	struct pmu *pmu;
49 	struct perf_event *events[MAX_COUNTERS];
50 	struct hlist_node node;
51 };
52 
53 static struct amd_uncore * __percpu *amd_uncore_nb;
54 static struct amd_uncore * __percpu *amd_uncore_llc;
55 
56 static struct pmu amd_nb_pmu;
57 static struct pmu amd_llc_pmu;
58 
59 static cpumask_t amd_nb_active_mask;
60 static cpumask_t amd_llc_active_mask;
61 
62 static bool is_nb_event(struct perf_event *event)
63 {
64 	return event->pmu->type == amd_nb_pmu.type;
65 }
66 
67 static bool is_llc_event(struct perf_event *event)
68 {
69 	return event->pmu->type == amd_llc_pmu.type;
70 }
71 
72 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
73 {
74 	if (is_nb_event(event) && amd_uncore_nb)
75 		return *per_cpu_ptr(amd_uncore_nb, event->cpu);
76 	else if (is_llc_event(event) && amd_uncore_llc)
77 		return *per_cpu_ptr(amd_uncore_llc, event->cpu);
78 
79 	return NULL;
80 }
81 
82 static void amd_uncore_read(struct perf_event *event)
83 {
84 	struct hw_perf_event *hwc = &event->hw;
85 	u64 prev, new;
86 	s64 delta;
87 
88 	/*
89 	 * since we do not enable counter overflow interrupts,
90 	 * we do not have to worry about prev_count changing on us
91 	 */
92 
93 	prev = local64_read(&hwc->prev_count);
94 	rdpmcl(hwc->event_base_rdpmc, new);
95 	local64_set(&hwc->prev_count, new);
96 	delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
97 	delta >>= COUNTER_SHIFT;
98 	local64_add(delta, &event->count);
99 }
100 
101 static void amd_uncore_start(struct perf_event *event, int flags)
102 {
103 	struct hw_perf_event *hwc = &event->hw;
104 
105 	if (flags & PERF_EF_RELOAD)
106 		wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
107 
108 	hwc->state = 0;
109 	wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
110 	perf_event_update_userpage(event);
111 }
112 
113 static void amd_uncore_stop(struct perf_event *event, int flags)
114 {
115 	struct hw_perf_event *hwc = &event->hw;
116 
117 	wrmsrl(hwc->config_base, hwc->config);
118 	hwc->state |= PERF_HES_STOPPED;
119 
120 	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
121 		amd_uncore_read(event);
122 		hwc->state |= PERF_HES_UPTODATE;
123 	}
124 }
125 
126 static int amd_uncore_add(struct perf_event *event, int flags)
127 {
128 	int i;
129 	struct amd_uncore *uncore = event_to_amd_uncore(event);
130 	struct hw_perf_event *hwc = &event->hw;
131 
132 	/* are we already assigned? */
133 	if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
134 		goto out;
135 
136 	for (i = 0; i < uncore->num_counters; i++) {
137 		if (uncore->events[i] == event) {
138 			hwc->idx = i;
139 			goto out;
140 		}
141 	}
142 
143 	/* if not, take the first available counter */
144 	hwc->idx = -1;
145 	for (i = 0; i < uncore->num_counters; i++) {
146 		if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
147 			hwc->idx = i;
148 			break;
149 		}
150 	}
151 
152 out:
153 	if (hwc->idx == -1)
154 		return -EBUSY;
155 
156 	hwc->config_base = uncore->msr_base + (2 * hwc->idx);
157 	hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
158 	hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
159 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
160 
161 	if (flags & PERF_EF_START)
162 		amd_uncore_start(event, PERF_EF_RELOAD);
163 
164 	return 0;
165 }
166 
167 static void amd_uncore_del(struct perf_event *event, int flags)
168 {
169 	int i;
170 	struct amd_uncore *uncore = event_to_amd_uncore(event);
171 	struct hw_perf_event *hwc = &event->hw;
172 
173 	amd_uncore_stop(event, PERF_EF_UPDATE);
174 
175 	for (i = 0; i < uncore->num_counters; i++) {
176 		if (cmpxchg(&uncore->events[i], event, NULL) == event)
177 			break;
178 	}
179 
180 	hwc->idx = -1;
181 }
182 
183 /*
184  * Convert logical CPU number to L3 PMC Config ThreadMask format
185  */
186 static u64 l3_thread_slice_mask(int cpu)
187 {
188 	u64 thread_mask, core = topology_core_id(cpu);
189 	unsigned int shift, thread = 0;
190 
191 	if (topology_smt_supported() && !topology_is_primary_thread(cpu))
192 		thread = 1;
193 
194 	if (boot_cpu_data.x86 <= 0x18) {
195 		shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread;
196 		thread_mask = BIT_ULL(shift);
197 
198 		return AMD64_L3_SLICE_MASK | thread_mask;
199 	}
200 
201 	core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK;
202 	shift = AMD64_L3_THREAD_SHIFT + thread;
203 	thread_mask = BIT_ULL(shift);
204 
205 	return AMD64_L3_EN_ALL_SLICES | core | thread_mask;
206 }
207 
208 static int amd_uncore_event_init(struct perf_event *event)
209 {
210 	struct amd_uncore *uncore;
211 	struct hw_perf_event *hwc = &event->hw;
212 
213 	if (event->attr.type != event->pmu->type)
214 		return -ENOENT;
215 
216 	/*
217 	 * NB and Last level cache counters (MSRs) are shared across all cores
218 	 * that share the same NB / Last level cache.  On family 16h and below,
219 	 * Interrupts can be directed to a single target core, however, event
220 	 * counts generated by processes running on other cores cannot be masked
221 	 * out. So we do not support sampling and per-thread events via
222 	 * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
223 	 */
224 	hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
225 	hwc->idx = -1;
226 
227 	if (event->cpu < 0)
228 		return -EINVAL;
229 
230 	/*
231 	 * SliceMask and ThreadMask need to be set for certain L3 events.
232 	 * For other events, the two fields do not affect the count.
233 	 */
234 	if (l3_mask && is_llc_event(event))
235 		hwc->config |= l3_thread_slice_mask(event->cpu);
236 
237 	uncore = event_to_amd_uncore(event);
238 	if (!uncore)
239 		return -ENODEV;
240 
241 	/*
242 	 * since request can come in to any of the shared cores, we will remap
243 	 * to a single common cpu.
244 	 */
245 	event->cpu = uncore->cpu;
246 
247 	return 0;
248 }
249 
250 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
251 					    struct device_attribute *attr,
252 					    char *buf)
253 {
254 	cpumask_t *active_mask;
255 	struct pmu *pmu = dev_get_drvdata(dev);
256 
257 	if (pmu->type == amd_nb_pmu.type)
258 		active_mask = &amd_nb_active_mask;
259 	else if (pmu->type == amd_llc_pmu.type)
260 		active_mask = &amd_llc_active_mask;
261 	else
262 		return 0;
263 
264 	return cpumap_print_to_pagebuf(true, buf, active_mask);
265 }
266 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
267 
268 static struct attribute *amd_uncore_attrs[] = {
269 	&dev_attr_cpumask.attr,
270 	NULL,
271 };
272 
273 static struct attribute_group amd_uncore_attr_group = {
274 	.attrs = amd_uncore_attrs,
275 };
276 
277 /*
278  * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
279  * on family
280  */
281 #define AMD_FORMAT_ATTR(_dev, _name, _format)				     \
282 static ssize_t								     \
283 _dev##_show##_name(struct device *dev,					     \
284 		struct device_attribute *attr,				     \
285 		char *page)						     \
286 {									     \
287 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			     \
288 	return sprintf(page, _format "\n");				     \
289 }									     \
290 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
291 
292 /* Used for each uncore counter type */
293 #define AMD_ATTRIBUTE(_name)						     \
294 static struct attribute *amd_uncore_format_attr_##_name[] = {		     \
295 	&format_attr_event_##_name.attr,				     \
296 	&format_attr_umask.attr,					     \
297 	NULL,								     \
298 };									     \
299 static struct attribute_group amd_uncore_format_group_##_name = {	     \
300 	.name = "format",						     \
301 	.attrs = amd_uncore_format_attr_##_name,			     \
302 };									     \
303 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
304 	&amd_uncore_attr_group,						     \
305 	&amd_uncore_format_group_##_name,				     \
306 	NULL,								     \
307 };
308 
309 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
310 AMD_FORMAT_ATTR(umask, , "config:8-15");
311 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
312 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
313 AMD_ATTRIBUTE(df);
314 AMD_ATTRIBUTE(l3);
315 
316 static struct pmu amd_nb_pmu = {
317 	.task_ctx_nr	= perf_invalid_context,
318 	.event_init	= amd_uncore_event_init,
319 	.add		= amd_uncore_add,
320 	.del		= amd_uncore_del,
321 	.start		= amd_uncore_start,
322 	.stop		= amd_uncore_stop,
323 	.read		= amd_uncore_read,
324 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
325 };
326 
327 static struct pmu amd_llc_pmu = {
328 	.task_ctx_nr	= perf_invalid_context,
329 	.event_init	= amd_uncore_event_init,
330 	.add		= amd_uncore_add,
331 	.del		= amd_uncore_del,
332 	.start		= amd_uncore_start,
333 	.stop		= amd_uncore_stop,
334 	.read		= amd_uncore_read,
335 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
336 };
337 
338 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
339 {
340 	return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
341 			cpu_to_node(cpu));
342 }
343 
344 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
345 {
346 	struct amd_uncore *uncore_nb = NULL, *uncore_llc;
347 
348 	if (amd_uncore_nb) {
349 		uncore_nb = amd_uncore_alloc(cpu);
350 		if (!uncore_nb)
351 			goto fail;
352 		uncore_nb->cpu = cpu;
353 		uncore_nb->num_counters = num_counters_nb;
354 		uncore_nb->rdpmc_base = RDPMC_BASE_NB;
355 		uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
356 		uncore_nb->active_mask = &amd_nb_active_mask;
357 		uncore_nb->pmu = &amd_nb_pmu;
358 		uncore_nb->id = -1;
359 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
360 	}
361 
362 	if (amd_uncore_llc) {
363 		uncore_llc = amd_uncore_alloc(cpu);
364 		if (!uncore_llc)
365 			goto fail;
366 		uncore_llc->cpu = cpu;
367 		uncore_llc->num_counters = num_counters_llc;
368 		uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
369 		uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
370 		uncore_llc->active_mask = &amd_llc_active_mask;
371 		uncore_llc->pmu = &amd_llc_pmu;
372 		uncore_llc->id = -1;
373 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
374 	}
375 
376 	return 0;
377 
378 fail:
379 	if (amd_uncore_nb)
380 		*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
381 	kfree(uncore_nb);
382 	return -ENOMEM;
383 }
384 
385 static struct amd_uncore *
386 amd_uncore_find_online_sibling(struct amd_uncore *this,
387 			       struct amd_uncore * __percpu *uncores)
388 {
389 	unsigned int cpu;
390 	struct amd_uncore *that;
391 
392 	for_each_online_cpu(cpu) {
393 		that = *per_cpu_ptr(uncores, cpu);
394 
395 		if (!that)
396 			continue;
397 
398 		if (this == that)
399 			continue;
400 
401 		if (this->id == that->id) {
402 			hlist_add_head(&this->node, &uncore_unused_list);
403 			this = that;
404 			break;
405 		}
406 	}
407 
408 	this->refcnt++;
409 	return this;
410 }
411 
412 static int amd_uncore_cpu_starting(unsigned int cpu)
413 {
414 	unsigned int eax, ebx, ecx, edx;
415 	struct amd_uncore *uncore;
416 
417 	if (amd_uncore_nb) {
418 		uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
419 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
420 		uncore->id = ecx & 0xff;
421 
422 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
423 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
424 	}
425 
426 	if (amd_uncore_llc) {
427 		uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
428 		uncore->id = per_cpu(cpu_llc_id, cpu);
429 
430 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
431 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
432 	}
433 
434 	return 0;
435 }
436 
437 static void uncore_clean_online(void)
438 {
439 	struct amd_uncore *uncore;
440 	struct hlist_node *n;
441 
442 	hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
443 		hlist_del(&uncore->node);
444 		kfree(uncore);
445 	}
446 }
447 
448 static void uncore_online(unsigned int cpu,
449 			  struct amd_uncore * __percpu *uncores)
450 {
451 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
452 
453 	uncore_clean_online();
454 
455 	if (cpu == uncore->cpu)
456 		cpumask_set_cpu(cpu, uncore->active_mask);
457 }
458 
459 static int amd_uncore_cpu_online(unsigned int cpu)
460 {
461 	if (amd_uncore_nb)
462 		uncore_online(cpu, amd_uncore_nb);
463 
464 	if (amd_uncore_llc)
465 		uncore_online(cpu, amd_uncore_llc);
466 
467 	return 0;
468 }
469 
470 static void uncore_down_prepare(unsigned int cpu,
471 				struct amd_uncore * __percpu *uncores)
472 {
473 	unsigned int i;
474 	struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
475 
476 	if (this->cpu != cpu)
477 		return;
478 
479 	/* this cpu is going down, migrate to a shared sibling if possible */
480 	for_each_online_cpu(i) {
481 		struct amd_uncore *that = *per_cpu_ptr(uncores, i);
482 
483 		if (cpu == i)
484 			continue;
485 
486 		if (this == that) {
487 			perf_pmu_migrate_context(this->pmu, cpu, i);
488 			cpumask_clear_cpu(cpu, that->active_mask);
489 			cpumask_set_cpu(i, that->active_mask);
490 			that->cpu = i;
491 			break;
492 		}
493 	}
494 }
495 
496 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
497 {
498 	if (amd_uncore_nb)
499 		uncore_down_prepare(cpu, amd_uncore_nb);
500 
501 	if (amd_uncore_llc)
502 		uncore_down_prepare(cpu, amd_uncore_llc);
503 
504 	return 0;
505 }
506 
507 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
508 {
509 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
510 
511 	if (cpu == uncore->cpu)
512 		cpumask_clear_cpu(cpu, uncore->active_mask);
513 
514 	if (!--uncore->refcnt)
515 		kfree(uncore);
516 	*per_cpu_ptr(uncores, cpu) = NULL;
517 }
518 
519 static int amd_uncore_cpu_dead(unsigned int cpu)
520 {
521 	if (amd_uncore_nb)
522 		uncore_dead(cpu, amd_uncore_nb);
523 
524 	if (amd_uncore_llc)
525 		uncore_dead(cpu, amd_uncore_llc);
526 
527 	return 0;
528 }
529 
530 static int __init amd_uncore_init(void)
531 {
532 	int ret = -ENODEV;
533 
534 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
535 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
536 		return -ENODEV;
537 
538 	if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
539 		return -ENODEV;
540 
541 	if (boot_cpu_data.x86 >= 0x17) {
542 		/*
543 		 * For F17h and above, the Northbridge counters are
544 		 * repurposed as Data Fabric counters. Also, L3
545 		 * counters are supported too. The PMUs are exported
546 		 * based on family as either L2 or L3 and NB or DF.
547 		 */
548 		num_counters_nb		  = NUM_COUNTERS_NB;
549 		num_counters_llc	  = NUM_COUNTERS_L3;
550 		amd_nb_pmu.name		  = "amd_df";
551 		amd_llc_pmu.name	  = "amd_l3";
552 		format_attr_event_df.show = &event_show_df;
553 		format_attr_event_l3.show = &event_show_l3;
554 		l3_mask			  = true;
555 	} else {
556 		num_counters_nb		  = NUM_COUNTERS_NB;
557 		num_counters_llc	  = NUM_COUNTERS_L2;
558 		amd_nb_pmu.name		  = "amd_nb";
559 		amd_llc_pmu.name	  = "amd_l2";
560 		format_attr_event_df	  = format_attr_event;
561 		format_attr_event_l3	  = format_attr_event;
562 		l3_mask			  = false;
563 	}
564 
565 	amd_nb_pmu.attr_groups	= amd_uncore_attr_groups_df;
566 	amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
567 
568 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
569 		amd_uncore_nb = alloc_percpu(struct amd_uncore *);
570 		if (!amd_uncore_nb) {
571 			ret = -ENOMEM;
572 			goto fail_nb;
573 		}
574 		ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
575 		if (ret)
576 			goto fail_nb;
577 
578 		pr_info("%s NB counters detected\n",
579 			boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
580 				"HYGON" : "AMD");
581 		ret = 0;
582 	}
583 
584 	if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
585 		amd_uncore_llc = alloc_percpu(struct amd_uncore *);
586 		if (!amd_uncore_llc) {
587 			ret = -ENOMEM;
588 			goto fail_llc;
589 		}
590 		ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
591 		if (ret)
592 			goto fail_llc;
593 
594 		pr_info("%s LLC counters detected\n",
595 			boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
596 				"HYGON" : "AMD");
597 		ret = 0;
598 	}
599 
600 	/*
601 	 * Install callbacks. Core will call them for each online cpu.
602 	 */
603 	if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
604 			      "perf/x86/amd/uncore:prepare",
605 			      amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
606 		goto fail_llc;
607 
608 	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
609 			      "perf/x86/amd/uncore:starting",
610 			      amd_uncore_cpu_starting, NULL))
611 		goto fail_prep;
612 	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
613 			      "perf/x86/amd/uncore:online",
614 			      amd_uncore_cpu_online,
615 			      amd_uncore_cpu_down_prepare))
616 		goto fail_start;
617 	return 0;
618 
619 fail_start:
620 	cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
621 fail_prep:
622 	cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
623 fail_llc:
624 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
625 		perf_pmu_unregister(&amd_nb_pmu);
626 	if (amd_uncore_llc)
627 		free_percpu(amd_uncore_llc);
628 fail_nb:
629 	if (amd_uncore_nb)
630 		free_percpu(amd_uncore_nb);
631 
632 	return ret;
633 }
634 device_initcall(amd_uncore_init);
635