xref: /openbmc/linux/arch/x86/events/amd/uncore.c (revision 9689dbbe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Advanced Micro Devices, Inc.
4  *
5  * Author: Jacob Shin <jacob.shin@amd.com>
6  */
7 
8 #include <linux/perf_event.h>
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 
16 #include <asm/cpufeature.h>
17 #include <asm/perf_event.h>
18 #include <asm/msr.h>
19 #include <asm/smp.h>
20 
21 #define NUM_COUNTERS_NB		4
22 #define NUM_COUNTERS_L2		4
23 #define NUM_COUNTERS_L3		6
24 #define MAX_COUNTERS		6
25 
26 #define RDPMC_BASE_NB		6
27 #define RDPMC_BASE_LLC		10
28 
29 #define COUNTER_SHIFT		16
30 
31 #undef pr_fmt
32 #define pr_fmt(fmt)	"amd_uncore: " fmt
33 
34 static int num_counters_llc;
35 static int num_counters_nb;
36 static bool l3_mask;
37 
38 static HLIST_HEAD(uncore_unused_list);
39 
40 struct amd_uncore {
41 	int id;
42 	int refcnt;
43 	int cpu;
44 	int num_counters;
45 	int rdpmc_base;
46 	u32 msr_base;
47 	cpumask_t *active_mask;
48 	struct pmu *pmu;
49 	struct perf_event *events[MAX_COUNTERS];
50 	struct hlist_node node;
51 };
52 
53 static struct amd_uncore * __percpu *amd_uncore_nb;
54 static struct amd_uncore * __percpu *amd_uncore_llc;
55 
56 static struct pmu amd_nb_pmu;
57 static struct pmu amd_llc_pmu;
58 
59 static cpumask_t amd_nb_active_mask;
60 static cpumask_t amd_llc_active_mask;
61 
62 static bool is_nb_event(struct perf_event *event)
63 {
64 	return event->pmu->type == amd_nb_pmu.type;
65 }
66 
67 static bool is_llc_event(struct perf_event *event)
68 {
69 	return event->pmu->type == amd_llc_pmu.type;
70 }
71 
72 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
73 {
74 	if (is_nb_event(event) && amd_uncore_nb)
75 		return *per_cpu_ptr(amd_uncore_nb, event->cpu);
76 	else if (is_llc_event(event) && amd_uncore_llc)
77 		return *per_cpu_ptr(amd_uncore_llc, event->cpu);
78 
79 	return NULL;
80 }
81 
82 static void amd_uncore_read(struct perf_event *event)
83 {
84 	struct hw_perf_event *hwc = &event->hw;
85 	u64 prev, new;
86 	s64 delta;
87 
88 	/*
89 	 * since we do not enable counter overflow interrupts,
90 	 * we do not have to worry about prev_count changing on us
91 	 */
92 
93 	prev = local64_read(&hwc->prev_count);
94 	rdpmcl(hwc->event_base_rdpmc, new);
95 	local64_set(&hwc->prev_count, new);
96 	delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
97 	delta >>= COUNTER_SHIFT;
98 	local64_add(delta, &event->count);
99 }
100 
101 static void amd_uncore_start(struct perf_event *event, int flags)
102 {
103 	struct hw_perf_event *hwc = &event->hw;
104 
105 	if (flags & PERF_EF_RELOAD)
106 		wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
107 
108 	hwc->state = 0;
109 	wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
110 	perf_event_update_userpage(event);
111 }
112 
113 static void amd_uncore_stop(struct perf_event *event, int flags)
114 {
115 	struct hw_perf_event *hwc = &event->hw;
116 
117 	wrmsrl(hwc->config_base, hwc->config);
118 	hwc->state |= PERF_HES_STOPPED;
119 
120 	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
121 		amd_uncore_read(event);
122 		hwc->state |= PERF_HES_UPTODATE;
123 	}
124 }
125 
126 static int amd_uncore_add(struct perf_event *event, int flags)
127 {
128 	int i;
129 	struct amd_uncore *uncore = event_to_amd_uncore(event);
130 	struct hw_perf_event *hwc = &event->hw;
131 
132 	/* are we already assigned? */
133 	if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
134 		goto out;
135 
136 	for (i = 0; i < uncore->num_counters; i++) {
137 		if (uncore->events[i] == event) {
138 			hwc->idx = i;
139 			goto out;
140 		}
141 	}
142 
143 	/* if not, take the first available counter */
144 	hwc->idx = -1;
145 	for (i = 0; i < uncore->num_counters; i++) {
146 		if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
147 			hwc->idx = i;
148 			break;
149 		}
150 	}
151 
152 out:
153 	if (hwc->idx == -1)
154 		return -EBUSY;
155 
156 	hwc->config_base = uncore->msr_base + (2 * hwc->idx);
157 	hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
158 	hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
159 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
160 
161 	if (flags & PERF_EF_START)
162 		amd_uncore_start(event, PERF_EF_RELOAD);
163 
164 	return 0;
165 }
166 
167 static void amd_uncore_del(struct perf_event *event, int flags)
168 {
169 	int i;
170 	struct amd_uncore *uncore = event_to_amd_uncore(event);
171 	struct hw_perf_event *hwc = &event->hw;
172 
173 	amd_uncore_stop(event, PERF_EF_UPDATE);
174 
175 	for (i = 0; i < uncore->num_counters; i++) {
176 		if (cmpxchg(&uncore->events[i], event, NULL) == event)
177 			break;
178 	}
179 
180 	hwc->idx = -1;
181 }
182 
183 /*
184  * Convert logical CPU number to L3 PMC Config ThreadMask format
185  */
186 static u64 l3_thread_slice_mask(int cpu)
187 {
188 	u64 thread_mask, core = topology_core_id(cpu);
189 	unsigned int shift, thread = 0;
190 
191 	if (topology_smt_supported() && !topology_is_primary_thread(cpu))
192 		thread = 1;
193 
194 	shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread;
195 	thread_mask = BIT_ULL(shift);
196 
197 	return AMD64_L3_SLICE_MASK | thread_mask;
198 }
199 
200 static int amd_uncore_event_init(struct perf_event *event)
201 {
202 	struct amd_uncore *uncore;
203 	struct hw_perf_event *hwc = &event->hw;
204 
205 	if (event->attr.type != event->pmu->type)
206 		return -ENOENT;
207 
208 	/*
209 	 * NB and Last level cache counters (MSRs) are shared across all cores
210 	 * that share the same NB / Last level cache. Interrupts can be directed
211 	 * to a single target core, however, event counts generated by processes
212 	 * running on other cores cannot be masked out. So we do not support
213 	 * sampling and per-thread events.
214 	 */
215 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
216 		return -EINVAL;
217 
218 	/* and we do not enable counter overflow interrupts */
219 	hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
220 	hwc->idx = -1;
221 
222 	if (event->cpu < 0)
223 		return -EINVAL;
224 
225 	/*
226 	 * SliceMask and ThreadMask need to be set for certain L3 events in
227 	 * Family 17h. For other events, the two fields do not affect the count.
228 	 */
229 	if (l3_mask && is_llc_event(event))
230 		hwc->config |= l3_thread_slice_mask(event->cpu);
231 
232 	uncore = event_to_amd_uncore(event);
233 	if (!uncore)
234 		return -ENODEV;
235 
236 	/*
237 	 * since request can come in to any of the shared cores, we will remap
238 	 * to a single common cpu.
239 	 */
240 	event->cpu = uncore->cpu;
241 
242 	return 0;
243 }
244 
245 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
246 					    struct device_attribute *attr,
247 					    char *buf)
248 {
249 	cpumask_t *active_mask;
250 	struct pmu *pmu = dev_get_drvdata(dev);
251 
252 	if (pmu->type == amd_nb_pmu.type)
253 		active_mask = &amd_nb_active_mask;
254 	else if (pmu->type == amd_llc_pmu.type)
255 		active_mask = &amd_llc_active_mask;
256 	else
257 		return 0;
258 
259 	return cpumap_print_to_pagebuf(true, buf, active_mask);
260 }
261 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
262 
263 static struct attribute *amd_uncore_attrs[] = {
264 	&dev_attr_cpumask.attr,
265 	NULL,
266 };
267 
268 static struct attribute_group amd_uncore_attr_group = {
269 	.attrs = amd_uncore_attrs,
270 };
271 
272 /*
273  * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
274  * on family
275  */
276 #define AMD_FORMAT_ATTR(_dev, _name, _format)				     \
277 static ssize_t								     \
278 _dev##_show##_name(struct device *dev,					     \
279 		struct device_attribute *attr,				     \
280 		char *page)						     \
281 {									     \
282 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			     \
283 	return sprintf(page, _format "\n");				     \
284 }									     \
285 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
286 
287 /* Used for each uncore counter type */
288 #define AMD_ATTRIBUTE(_name)						     \
289 static struct attribute *amd_uncore_format_attr_##_name[] = {		     \
290 	&format_attr_event_##_name.attr,				     \
291 	&format_attr_umask.attr,					     \
292 	NULL,								     \
293 };									     \
294 static struct attribute_group amd_uncore_format_group_##_name = {	     \
295 	.name = "format",						     \
296 	.attrs = amd_uncore_format_attr_##_name,			     \
297 };									     \
298 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
299 	&amd_uncore_attr_group,						     \
300 	&amd_uncore_format_group_##_name,				     \
301 	NULL,								     \
302 };
303 
304 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
305 AMD_FORMAT_ATTR(umask, , "config:8-15");
306 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
307 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
308 AMD_ATTRIBUTE(df);
309 AMD_ATTRIBUTE(l3);
310 
311 static struct pmu amd_nb_pmu = {
312 	.task_ctx_nr	= perf_invalid_context,
313 	.event_init	= amd_uncore_event_init,
314 	.add		= amd_uncore_add,
315 	.del		= amd_uncore_del,
316 	.start		= amd_uncore_start,
317 	.stop		= amd_uncore_stop,
318 	.read		= amd_uncore_read,
319 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
320 };
321 
322 static struct pmu amd_llc_pmu = {
323 	.task_ctx_nr	= perf_invalid_context,
324 	.event_init	= amd_uncore_event_init,
325 	.add		= amd_uncore_add,
326 	.del		= amd_uncore_del,
327 	.start		= amd_uncore_start,
328 	.stop		= amd_uncore_stop,
329 	.read		= amd_uncore_read,
330 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
331 };
332 
333 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
334 {
335 	return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
336 			cpu_to_node(cpu));
337 }
338 
339 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
340 {
341 	struct amd_uncore *uncore_nb = NULL, *uncore_llc;
342 
343 	if (amd_uncore_nb) {
344 		uncore_nb = amd_uncore_alloc(cpu);
345 		if (!uncore_nb)
346 			goto fail;
347 		uncore_nb->cpu = cpu;
348 		uncore_nb->num_counters = num_counters_nb;
349 		uncore_nb->rdpmc_base = RDPMC_BASE_NB;
350 		uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
351 		uncore_nb->active_mask = &amd_nb_active_mask;
352 		uncore_nb->pmu = &amd_nb_pmu;
353 		uncore_nb->id = -1;
354 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
355 	}
356 
357 	if (amd_uncore_llc) {
358 		uncore_llc = amd_uncore_alloc(cpu);
359 		if (!uncore_llc)
360 			goto fail;
361 		uncore_llc->cpu = cpu;
362 		uncore_llc->num_counters = num_counters_llc;
363 		uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
364 		uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
365 		uncore_llc->active_mask = &amd_llc_active_mask;
366 		uncore_llc->pmu = &amd_llc_pmu;
367 		uncore_llc->id = -1;
368 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
369 	}
370 
371 	return 0;
372 
373 fail:
374 	if (amd_uncore_nb)
375 		*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
376 	kfree(uncore_nb);
377 	return -ENOMEM;
378 }
379 
380 static struct amd_uncore *
381 amd_uncore_find_online_sibling(struct amd_uncore *this,
382 			       struct amd_uncore * __percpu *uncores)
383 {
384 	unsigned int cpu;
385 	struct amd_uncore *that;
386 
387 	for_each_online_cpu(cpu) {
388 		that = *per_cpu_ptr(uncores, cpu);
389 
390 		if (!that)
391 			continue;
392 
393 		if (this == that)
394 			continue;
395 
396 		if (this->id == that->id) {
397 			hlist_add_head(&this->node, &uncore_unused_list);
398 			this = that;
399 			break;
400 		}
401 	}
402 
403 	this->refcnt++;
404 	return this;
405 }
406 
407 static int amd_uncore_cpu_starting(unsigned int cpu)
408 {
409 	unsigned int eax, ebx, ecx, edx;
410 	struct amd_uncore *uncore;
411 
412 	if (amd_uncore_nb) {
413 		uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
414 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
415 		uncore->id = ecx & 0xff;
416 
417 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
418 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
419 	}
420 
421 	if (amd_uncore_llc) {
422 		uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
423 		uncore->id = per_cpu(cpu_llc_id, cpu);
424 
425 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
426 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
427 	}
428 
429 	return 0;
430 }
431 
432 static void uncore_clean_online(void)
433 {
434 	struct amd_uncore *uncore;
435 	struct hlist_node *n;
436 
437 	hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
438 		hlist_del(&uncore->node);
439 		kfree(uncore);
440 	}
441 }
442 
443 static void uncore_online(unsigned int cpu,
444 			  struct amd_uncore * __percpu *uncores)
445 {
446 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
447 
448 	uncore_clean_online();
449 
450 	if (cpu == uncore->cpu)
451 		cpumask_set_cpu(cpu, uncore->active_mask);
452 }
453 
454 static int amd_uncore_cpu_online(unsigned int cpu)
455 {
456 	if (amd_uncore_nb)
457 		uncore_online(cpu, amd_uncore_nb);
458 
459 	if (amd_uncore_llc)
460 		uncore_online(cpu, amd_uncore_llc);
461 
462 	return 0;
463 }
464 
465 static void uncore_down_prepare(unsigned int cpu,
466 				struct amd_uncore * __percpu *uncores)
467 {
468 	unsigned int i;
469 	struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
470 
471 	if (this->cpu != cpu)
472 		return;
473 
474 	/* this cpu is going down, migrate to a shared sibling if possible */
475 	for_each_online_cpu(i) {
476 		struct amd_uncore *that = *per_cpu_ptr(uncores, i);
477 
478 		if (cpu == i)
479 			continue;
480 
481 		if (this == that) {
482 			perf_pmu_migrate_context(this->pmu, cpu, i);
483 			cpumask_clear_cpu(cpu, that->active_mask);
484 			cpumask_set_cpu(i, that->active_mask);
485 			that->cpu = i;
486 			break;
487 		}
488 	}
489 }
490 
491 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
492 {
493 	if (amd_uncore_nb)
494 		uncore_down_prepare(cpu, amd_uncore_nb);
495 
496 	if (amd_uncore_llc)
497 		uncore_down_prepare(cpu, amd_uncore_llc);
498 
499 	return 0;
500 }
501 
502 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
503 {
504 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
505 
506 	if (cpu == uncore->cpu)
507 		cpumask_clear_cpu(cpu, uncore->active_mask);
508 
509 	if (!--uncore->refcnt)
510 		kfree(uncore);
511 	*per_cpu_ptr(uncores, cpu) = NULL;
512 }
513 
514 static int amd_uncore_cpu_dead(unsigned int cpu)
515 {
516 	if (amd_uncore_nb)
517 		uncore_dead(cpu, amd_uncore_nb);
518 
519 	if (amd_uncore_llc)
520 		uncore_dead(cpu, amd_uncore_llc);
521 
522 	return 0;
523 }
524 
525 static int __init amd_uncore_init(void)
526 {
527 	int ret = -ENODEV;
528 
529 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
530 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
531 		return -ENODEV;
532 
533 	if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
534 		return -ENODEV;
535 
536 	if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
537 		/*
538 		 * For F17h or F18h, the Northbridge counters are
539 		 * repurposed as Data Fabric counters. Also, L3
540 		 * counters are supported too. The PMUs are exported
541 		 * based on family as either L2 or L3 and NB or DF.
542 		 */
543 		num_counters_nb		  = NUM_COUNTERS_NB;
544 		num_counters_llc	  = NUM_COUNTERS_L3;
545 		amd_nb_pmu.name		  = "amd_df";
546 		amd_llc_pmu.name	  = "amd_l3";
547 		format_attr_event_df.show = &event_show_df;
548 		format_attr_event_l3.show = &event_show_l3;
549 		l3_mask			  = true;
550 	} else {
551 		num_counters_nb		  = NUM_COUNTERS_NB;
552 		num_counters_llc	  = NUM_COUNTERS_L2;
553 		amd_nb_pmu.name		  = "amd_nb";
554 		amd_llc_pmu.name	  = "amd_l2";
555 		format_attr_event_df	  = format_attr_event;
556 		format_attr_event_l3	  = format_attr_event;
557 		l3_mask			  = false;
558 	}
559 
560 	amd_nb_pmu.attr_groups	= amd_uncore_attr_groups_df;
561 	amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
562 
563 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
564 		amd_uncore_nb = alloc_percpu(struct amd_uncore *);
565 		if (!amd_uncore_nb) {
566 			ret = -ENOMEM;
567 			goto fail_nb;
568 		}
569 		ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
570 		if (ret)
571 			goto fail_nb;
572 
573 		pr_info("%s NB counters detected\n",
574 			boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
575 				"HYGON" : "AMD");
576 		ret = 0;
577 	}
578 
579 	if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
580 		amd_uncore_llc = alloc_percpu(struct amd_uncore *);
581 		if (!amd_uncore_llc) {
582 			ret = -ENOMEM;
583 			goto fail_llc;
584 		}
585 		ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
586 		if (ret)
587 			goto fail_llc;
588 
589 		pr_info("%s LLC counters detected\n",
590 			boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
591 				"HYGON" : "AMD");
592 		ret = 0;
593 	}
594 
595 	/*
596 	 * Install callbacks. Core will call them for each online cpu.
597 	 */
598 	if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
599 			      "perf/x86/amd/uncore:prepare",
600 			      amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
601 		goto fail_llc;
602 
603 	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
604 			      "perf/x86/amd/uncore:starting",
605 			      amd_uncore_cpu_starting, NULL))
606 		goto fail_prep;
607 	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
608 			      "perf/x86/amd/uncore:online",
609 			      amd_uncore_cpu_online,
610 			      amd_uncore_cpu_down_prepare))
611 		goto fail_start;
612 	return 0;
613 
614 fail_start:
615 	cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
616 fail_prep:
617 	cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
618 fail_llc:
619 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
620 		perf_pmu_unregister(&amd_nb_pmu);
621 	if (amd_uncore_llc)
622 		free_percpu(amd_uncore_llc);
623 fail_nb:
624 	if (amd_uncore_nb)
625 		free_percpu(amd_uncore_nb);
626 
627 	return ret;
628 }
629 device_initcall(amd_uncore_init);
630