xref: /openbmc/linux/arch/x86/events/amd/uncore.c (revision 171fa692)
1 /*
2  * Copyright (C) 2013 Advanced Micro Devices, Inc.
3  *
4  * Author: Jacob Shin <jacob.shin@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
18 
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
21 #include <asm/msr.h>
22 
23 #define NUM_COUNTERS_NB		4
24 #define NUM_COUNTERS_L2		4
25 #define NUM_COUNTERS_L3		6
26 #define MAX_COUNTERS		6
27 
28 #define RDPMC_BASE_NB		6
29 #define RDPMC_BASE_LLC		10
30 
31 #define COUNTER_SHIFT		16
32 
33 #undef pr_fmt
34 #define pr_fmt(fmt)	"amd_uncore: " fmt
35 
36 static int num_counters_llc;
37 static int num_counters_nb;
38 
39 static HLIST_HEAD(uncore_unused_list);
40 
41 struct amd_uncore {
42 	int id;
43 	int refcnt;
44 	int cpu;
45 	int num_counters;
46 	int rdpmc_base;
47 	u32 msr_base;
48 	cpumask_t *active_mask;
49 	struct pmu *pmu;
50 	struct perf_event *events[MAX_COUNTERS];
51 	struct hlist_node node;
52 };
53 
54 static struct amd_uncore * __percpu *amd_uncore_nb;
55 static struct amd_uncore * __percpu *amd_uncore_llc;
56 
57 static struct pmu amd_nb_pmu;
58 static struct pmu amd_llc_pmu;
59 
60 static cpumask_t amd_nb_active_mask;
61 static cpumask_t amd_llc_active_mask;
62 
63 static bool is_nb_event(struct perf_event *event)
64 {
65 	return event->pmu->type == amd_nb_pmu.type;
66 }
67 
68 static bool is_llc_event(struct perf_event *event)
69 {
70 	return event->pmu->type == amd_llc_pmu.type;
71 }
72 
73 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
74 {
75 	if (is_nb_event(event) && amd_uncore_nb)
76 		return *per_cpu_ptr(amd_uncore_nb, event->cpu);
77 	else if (is_llc_event(event) && amd_uncore_llc)
78 		return *per_cpu_ptr(amd_uncore_llc, event->cpu);
79 
80 	return NULL;
81 }
82 
83 static void amd_uncore_read(struct perf_event *event)
84 {
85 	struct hw_perf_event *hwc = &event->hw;
86 	u64 prev, new;
87 	s64 delta;
88 
89 	/*
90 	 * since we do not enable counter overflow interrupts,
91 	 * we do not have to worry about prev_count changing on us
92 	 */
93 
94 	prev = local64_read(&hwc->prev_count);
95 	rdpmcl(hwc->event_base_rdpmc, new);
96 	local64_set(&hwc->prev_count, new);
97 	delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
98 	delta >>= COUNTER_SHIFT;
99 	local64_add(delta, &event->count);
100 }
101 
102 static void amd_uncore_start(struct perf_event *event, int flags)
103 {
104 	struct hw_perf_event *hwc = &event->hw;
105 
106 	if (flags & PERF_EF_RELOAD)
107 		wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
108 
109 	hwc->state = 0;
110 	wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
111 	perf_event_update_userpage(event);
112 }
113 
114 static void amd_uncore_stop(struct perf_event *event, int flags)
115 {
116 	struct hw_perf_event *hwc = &event->hw;
117 
118 	wrmsrl(hwc->config_base, hwc->config);
119 	hwc->state |= PERF_HES_STOPPED;
120 
121 	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
122 		amd_uncore_read(event);
123 		hwc->state |= PERF_HES_UPTODATE;
124 	}
125 }
126 
127 static int amd_uncore_add(struct perf_event *event, int flags)
128 {
129 	int i;
130 	struct amd_uncore *uncore = event_to_amd_uncore(event);
131 	struct hw_perf_event *hwc = &event->hw;
132 
133 	/* are we already assigned? */
134 	if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
135 		goto out;
136 
137 	for (i = 0; i < uncore->num_counters; i++) {
138 		if (uncore->events[i] == event) {
139 			hwc->idx = i;
140 			goto out;
141 		}
142 	}
143 
144 	/* if not, take the first available counter */
145 	hwc->idx = -1;
146 	for (i = 0; i < uncore->num_counters; i++) {
147 		if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
148 			hwc->idx = i;
149 			break;
150 		}
151 	}
152 
153 out:
154 	if (hwc->idx == -1)
155 		return -EBUSY;
156 
157 	hwc->config_base = uncore->msr_base + (2 * hwc->idx);
158 	hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
159 	hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
160 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
161 
162 	if (flags & PERF_EF_START)
163 		amd_uncore_start(event, PERF_EF_RELOAD);
164 
165 	return 0;
166 }
167 
168 static void amd_uncore_del(struct perf_event *event, int flags)
169 {
170 	int i;
171 	struct amd_uncore *uncore = event_to_amd_uncore(event);
172 	struct hw_perf_event *hwc = &event->hw;
173 
174 	amd_uncore_stop(event, PERF_EF_UPDATE);
175 
176 	for (i = 0; i < uncore->num_counters; i++) {
177 		if (cmpxchg(&uncore->events[i], event, NULL) == event)
178 			break;
179 	}
180 
181 	hwc->idx = -1;
182 }
183 
184 static int amd_uncore_event_init(struct perf_event *event)
185 {
186 	struct amd_uncore *uncore;
187 	struct hw_perf_event *hwc = &event->hw;
188 
189 	if (event->attr.type != event->pmu->type)
190 		return -ENOENT;
191 
192 	/*
193 	 * NB and Last level cache counters (MSRs) are shared across all cores
194 	 * that share the same NB / Last level cache. Interrupts can be directed
195 	 * to a single target core, however, event counts generated by processes
196 	 * running on other cores cannot be masked out. So we do not support
197 	 * sampling and per-thread events.
198 	 */
199 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
200 		return -EINVAL;
201 
202 	/* NB and Last level cache counters do not have usr/os/guest/host bits */
203 	if (event->attr.exclude_user || event->attr.exclude_kernel ||
204 	    event->attr.exclude_host || event->attr.exclude_guest)
205 		return -EINVAL;
206 
207 	/* and we do not enable counter overflow interrupts */
208 	hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
209 	hwc->idx = -1;
210 
211 	if (event->cpu < 0)
212 		return -EINVAL;
213 
214 	uncore = event_to_amd_uncore(event);
215 	if (!uncore)
216 		return -ENODEV;
217 
218 	/*
219 	 * since request can come in to any of the shared cores, we will remap
220 	 * to a single common cpu.
221 	 */
222 	event->cpu = uncore->cpu;
223 
224 	return 0;
225 }
226 
227 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
228 					    struct device_attribute *attr,
229 					    char *buf)
230 {
231 	cpumask_t *active_mask;
232 	struct pmu *pmu = dev_get_drvdata(dev);
233 
234 	if (pmu->type == amd_nb_pmu.type)
235 		active_mask = &amd_nb_active_mask;
236 	else if (pmu->type == amd_llc_pmu.type)
237 		active_mask = &amd_llc_active_mask;
238 	else
239 		return 0;
240 
241 	return cpumap_print_to_pagebuf(true, buf, active_mask);
242 }
243 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
244 
245 static struct attribute *amd_uncore_attrs[] = {
246 	&dev_attr_cpumask.attr,
247 	NULL,
248 };
249 
250 static struct attribute_group amd_uncore_attr_group = {
251 	.attrs = amd_uncore_attrs,
252 };
253 
254 /*
255  * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
256  * on family
257  */
258 #define AMD_FORMAT_ATTR(_dev, _name, _format)				     \
259 static ssize_t								     \
260 _dev##_show##_name(struct device *dev,					     \
261 		struct device_attribute *attr,				     \
262 		char *page)						     \
263 {									     \
264 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			     \
265 	return sprintf(page, _format "\n");				     \
266 }									     \
267 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
268 
269 /* Used for each uncore counter type */
270 #define AMD_ATTRIBUTE(_name)						     \
271 static struct attribute *amd_uncore_format_attr_##_name[] = {		     \
272 	&format_attr_event_##_name.attr,				     \
273 	&format_attr_umask.attr,					     \
274 	NULL,								     \
275 };									     \
276 static struct attribute_group amd_uncore_format_group_##_name = {	     \
277 	.name = "format",						     \
278 	.attrs = amd_uncore_format_attr_##_name,			     \
279 };									     \
280 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
281 	&amd_uncore_attr_group,						     \
282 	&amd_uncore_format_group_##_name,				     \
283 	NULL,								     \
284 };
285 
286 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
287 AMD_FORMAT_ATTR(umask, , "config:8-15");
288 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
289 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
290 AMD_ATTRIBUTE(df);
291 AMD_ATTRIBUTE(l3);
292 
293 static struct pmu amd_nb_pmu = {
294 	.task_ctx_nr	= perf_invalid_context,
295 	.event_init	= amd_uncore_event_init,
296 	.add		= amd_uncore_add,
297 	.del		= amd_uncore_del,
298 	.start		= amd_uncore_start,
299 	.stop		= amd_uncore_stop,
300 	.read		= amd_uncore_read,
301 };
302 
303 static struct pmu amd_llc_pmu = {
304 	.task_ctx_nr	= perf_invalid_context,
305 	.event_init	= amd_uncore_event_init,
306 	.add		= amd_uncore_add,
307 	.del		= amd_uncore_del,
308 	.start		= amd_uncore_start,
309 	.stop		= amd_uncore_stop,
310 	.read		= amd_uncore_read,
311 };
312 
313 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
314 {
315 	return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
316 			cpu_to_node(cpu));
317 }
318 
319 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
320 {
321 	struct amd_uncore *uncore_nb = NULL, *uncore_llc;
322 
323 	if (amd_uncore_nb) {
324 		uncore_nb = amd_uncore_alloc(cpu);
325 		if (!uncore_nb)
326 			goto fail;
327 		uncore_nb->cpu = cpu;
328 		uncore_nb->num_counters = num_counters_nb;
329 		uncore_nb->rdpmc_base = RDPMC_BASE_NB;
330 		uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
331 		uncore_nb->active_mask = &amd_nb_active_mask;
332 		uncore_nb->pmu = &amd_nb_pmu;
333 		uncore_nb->id = -1;
334 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
335 	}
336 
337 	if (amd_uncore_llc) {
338 		uncore_llc = amd_uncore_alloc(cpu);
339 		if (!uncore_llc)
340 			goto fail;
341 		uncore_llc->cpu = cpu;
342 		uncore_llc->num_counters = num_counters_llc;
343 		uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
344 		uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
345 		uncore_llc->active_mask = &amd_llc_active_mask;
346 		uncore_llc->pmu = &amd_llc_pmu;
347 		uncore_llc->id = -1;
348 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
349 	}
350 
351 	return 0;
352 
353 fail:
354 	if (amd_uncore_nb)
355 		*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
356 	kfree(uncore_nb);
357 	return -ENOMEM;
358 }
359 
360 static struct amd_uncore *
361 amd_uncore_find_online_sibling(struct amd_uncore *this,
362 			       struct amd_uncore * __percpu *uncores)
363 {
364 	unsigned int cpu;
365 	struct amd_uncore *that;
366 
367 	for_each_online_cpu(cpu) {
368 		that = *per_cpu_ptr(uncores, cpu);
369 
370 		if (!that)
371 			continue;
372 
373 		if (this == that)
374 			continue;
375 
376 		if (this->id == that->id) {
377 			hlist_add_head(&this->node, &uncore_unused_list);
378 			this = that;
379 			break;
380 		}
381 	}
382 
383 	this->refcnt++;
384 	return this;
385 }
386 
387 static int amd_uncore_cpu_starting(unsigned int cpu)
388 {
389 	unsigned int eax, ebx, ecx, edx;
390 	struct amd_uncore *uncore;
391 
392 	if (amd_uncore_nb) {
393 		uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
394 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
395 		uncore->id = ecx & 0xff;
396 
397 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
398 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
399 	}
400 
401 	if (amd_uncore_llc) {
402 		unsigned int apicid = cpu_data(cpu).apicid;
403 		unsigned int nshared;
404 
405 		uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
406 		cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
407 		nshared = ((eax >> 14) & 0xfff) + 1;
408 		uncore->id = apicid - (apicid % nshared);
409 
410 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
411 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
412 	}
413 
414 	return 0;
415 }
416 
417 static void uncore_clean_online(void)
418 {
419 	struct amd_uncore *uncore;
420 	struct hlist_node *n;
421 
422 	hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
423 		hlist_del(&uncore->node);
424 		kfree(uncore);
425 	}
426 }
427 
428 static void uncore_online(unsigned int cpu,
429 			  struct amd_uncore * __percpu *uncores)
430 {
431 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
432 
433 	uncore_clean_online();
434 
435 	if (cpu == uncore->cpu)
436 		cpumask_set_cpu(cpu, uncore->active_mask);
437 }
438 
439 static int amd_uncore_cpu_online(unsigned int cpu)
440 {
441 	if (amd_uncore_nb)
442 		uncore_online(cpu, amd_uncore_nb);
443 
444 	if (amd_uncore_llc)
445 		uncore_online(cpu, amd_uncore_llc);
446 
447 	return 0;
448 }
449 
450 static void uncore_down_prepare(unsigned int cpu,
451 				struct amd_uncore * __percpu *uncores)
452 {
453 	unsigned int i;
454 	struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
455 
456 	if (this->cpu != cpu)
457 		return;
458 
459 	/* this cpu is going down, migrate to a shared sibling if possible */
460 	for_each_online_cpu(i) {
461 		struct amd_uncore *that = *per_cpu_ptr(uncores, i);
462 
463 		if (cpu == i)
464 			continue;
465 
466 		if (this == that) {
467 			perf_pmu_migrate_context(this->pmu, cpu, i);
468 			cpumask_clear_cpu(cpu, that->active_mask);
469 			cpumask_set_cpu(i, that->active_mask);
470 			that->cpu = i;
471 			break;
472 		}
473 	}
474 }
475 
476 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
477 {
478 	if (amd_uncore_nb)
479 		uncore_down_prepare(cpu, amd_uncore_nb);
480 
481 	if (amd_uncore_llc)
482 		uncore_down_prepare(cpu, amd_uncore_llc);
483 
484 	return 0;
485 }
486 
487 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
488 {
489 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
490 
491 	if (cpu == uncore->cpu)
492 		cpumask_clear_cpu(cpu, uncore->active_mask);
493 
494 	if (!--uncore->refcnt)
495 		kfree(uncore);
496 	*per_cpu_ptr(uncores, cpu) = NULL;
497 }
498 
499 static int amd_uncore_cpu_dead(unsigned int cpu)
500 {
501 	if (amd_uncore_nb)
502 		uncore_dead(cpu, amd_uncore_nb);
503 
504 	if (amd_uncore_llc)
505 		uncore_dead(cpu, amd_uncore_llc);
506 
507 	return 0;
508 }
509 
510 static int __init amd_uncore_init(void)
511 {
512 	int ret = -ENODEV;
513 
514 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
515 		return -ENODEV;
516 
517 	if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
518 		return -ENODEV;
519 
520 	if (boot_cpu_data.x86 == 0x17) {
521 		/*
522 		 * For F17h, the Northbridge counters are repurposed as Data
523 		 * Fabric counters. Also, L3 counters are supported too. The PMUs
524 		 * are exported based on  family as either L2 or L3 and NB or DF.
525 		 */
526 		num_counters_nb		  = NUM_COUNTERS_NB;
527 		num_counters_llc	  = NUM_COUNTERS_L3;
528 		amd_nb_pmu.name		  = "amd_df";
529 		amd_llc_pmu.name	  = "amd_l3";
530 		format_attr_event_df.show = &event_show_df;
531 		format_attr_event_l3.show = &event_show_l3;
532 	} else {
533 		num_counters_nb		  = NUM_COUNTERS_NB;
534 		num_counters_llc	  = NUM_COUNTERS_L2;
535 		amd_nb_pmu.name		  = "amd_nb";
536 		amd_llc_pmu.name	  = "amd_l2";
537 		format_attr_event_df	  = format_attr_event;
538 		format_attr_event_l3	  = format_attr_event;
539 	}
540 
541 	amd_nb_pmu.attr_groups	= amd_uncore_attr_groups_df;
542 	amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
543 
544 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
545 		amd_uncore_nb = alloc_percpu(struct amd_uncore *);
546 		if (!amd_uncore_nb) {
547 			ret = -ENOMEM;
548 			goto fail_nb;
549 		}
550 		ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
551 		if (ret)
552 			goto fail_nb;
553 
554 		pr_info("AMD NB counters detected\n");
555 		ret = 0;
556 	}
557 
558 	if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
559 		amd_uncore_llc = alloc_percpu(struct amd_uncore *);
560 		if (!amd_uncore_llc) {
561 			ret = -ENOMEM;
562 			goto fail_llc;
563 		}
564 		ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
565 		if (ret)
566 			goto fail_llc;
567 
568 		pr_info("AMD LLC counters detected\n");
569 		ret = 0;
570 	}
571 
572 	/*
573 	 * Install callbacks. Core will call them for each online cpu.
574 	 */
575 	if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
576 			      "perf/x86/amd/uncore:prepare",
577 			      amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
578 		goto fail_llc;
579 
580 	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
581 			      "perf/x86/amd/uncore:starting",
582 			      amd_uncore_cpu_starting, NULL))
583 		goto fail_prep;
584 	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
585 			      "perf/x86/amd/uncore:online",
586 			      amd_uncore_cpu_online,
587 			      amd_uncore_cpu_down_prepare))
588 		goto fail_start;
589 	return 0;
590 
591 fail_start:
592 	cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
593 fail_prep:
594 	cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
595 fail_llc:
596 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
597 		perf_pmu_unregister(&amd_nb_pmu);
598 	if (amd_uncore_llc)
599 		free_percpu(amd_uncore_llc);
600 fail_nb:
601 	if (amd_uncore_nb)
602 		free_percpu(amd_uncore_nb);
603 
604 	return ret;
605 }
606 device_initcall(amd_uncore_init);
607