xref: /openbmc/linux/arch/x86/events/amd/iommu.c (revision 59d5396a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Advanced Micro Devices, Inc.
4  *
5  * Author: Steven Kinney <Steven.Kinney@amd.com>
6  * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
7  *
8  * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
9  */
10 
11 #define pr_fmt(fmt)	"perf/amd_iommu: " fmt
12 
13 #include <linux/perf_event.h>
14 #include <linux/init.h>
15 #include <linux/cpumask.h>
16 #include <linux/slab.h>
17 
18 #include "../perf_event.h"
19 #include "iommu.h"
20 
21 #define COUNTER_SHIFT		16
22 
23 /* iommu pmu conf masks */
24 #define GET_CSOURCE(x)     ((x)->conf & 0xFFULL)
25 #define GET_DEVID(x)       (((x)->conf >> 8)  & 0xFFFFULL)
26 #define GET_DOMID(x)       (((x)->conf >> 24) & 0xFFFFULL)
27 #define GET_PASID(x)       (((x)->conf >> 40) & 0xFFFFFULL)
28 
29 /* iommu pmu conf1 masks */
30 #define GET_DEVID_MASK(x)  ((x)->conf1  & 0xFFFFULL)
31 #define GET_DOMID_MASK(x)  (((x)->conf1 >> 16) & 0xFFFFULL)
32 #define GET_PASID_MASK(x)  (((x)->conf1 >> 32) & 0xFFFFFULL)
33 
34 #define IOMMU_NAME_SIZE 16
35 
36 struct perf_amd_iommu {
37 	struct list_head list;
38 	struct pmu pmu;
39 	struct amd_iommu *iommu;
40 	char name[IOMMU_NAME_SIZE];
41 	u8 max_banks;
42 	u8 max_counters;
43 	u64 cntr_assign_mask;
44 	raw_spinlock_t lock;
45 };
46 
47 static LIST_HEAD(perf_amd_iommu_list);
48 
49 /*---------------------------------------------
50  * sysfs format attributes
51  *---------------------------------------------*/
52 PMU_FORMAT_ATTR(csource,    "config:0-7");
53 PMU_FORMAT_ATTR(devid,      "config:8-23");
54 PMU_FORMAT_ATTR(domid,      "config:24-39");
55 PMU_FORMAT_ATTR(pasid,      "config:40-59");
56 PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
57 PMU_FORMAT_ATTR(domid_mask, "config1:16-31");
58 PMU_FORMAT_ATTR(pasid_mask, "config1:32-51");
59 
60 static struct attribute *iommu_format_attrs[] = {
61 	&format_attr_csource.attr,
62 	&format_attr_devid.attr,
63 	&format_attr_pasid.attr,
64 	&format_attr_domid.attr,
65 	&format_attr_devid_mask.attr,
66 	&format_attr_pasid_mask.attr,
67 	&format_attr_domid_mask.attr,
68 	NULL,
69 };
70 
71 static struct attribute_group amd_iommu_format_group = {
72 	.name = "format",
73 	.attrs = iommu_format_attrs,
74 };
75 
76 /*---------------------------------------------
77  * sysfs events attributes
78  *---------------------------------------------*/
79 static struct attribute_group amd_iommu_events_group = {
80 	.name = "events",
81 };
82 
83 struct amd_iommu_event_desc {
84 	struct kobj_attribute attr;
85 	const char *event;
86 };
87 
88 static ssize_t _iommu_event_show(struct kobject *kobj,
89 				struct kobj_attribute *attr, char *buf)
90 {
91 	struct amd_iommu_event_desc *event =
92 		container_of(attr, struct amd_iommu_event_desc, attr);
93 	return sprintf(buf, "%s\n", event->event);
94 }
95 
96 #define AMD_IOMMU_EVENT_DESC(_name, _event)			\
97 {								\
98 	.attr  = __ATTR(_name, 0444, _iommu_event_show, NULL),	\
99 	.event = _event,					\
100 }
101 
102 static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = {
103 	AMD_IOMMU_EVENT_DESC(mem_pass_untrans,        "csource=0x01"),
104 	AMD_IOMMU_EVENT_DESC(mem_pass_pretrans,       "csource=0x02"),
105 	AMD_IOMMU_EVENT_DESC(mem_pass_excl,           "csource=0x03"),
106 	AMD_IOMMU_EVENT_DESC(mem_target_abort,        "csource=0x04"),
107 	AMD_IOMMU_EVENT_DESC(mem_trans_total,         "csource=0x05"),
108 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit,   "csource=0x06"),
109 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis,   "csource=0x07"),
110 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit,   "csource=0x08"),
111 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis,   "csource=0x09"),
112 	AMD_IOMMU_EVENT_DESC(mem_dte_hit,             "csource=0x0a"),
113 	AMD_IOMMU_EVENT_DESC(mem_dte_mis,             "csource=0x0b"),
114 	AMD_IOMMU_EVENT_DESC(page_tbl_read_tot,       "csource=0x0c"),
115 	AMD_IOMMU_EVENT_DESC(page_tbl_read_nst,       "csource=0x0d"),
116 	AMD_IOMMU_EVENT_DESC(page_tbl_read_gst,       "csource=0x0e"),
117 	AMD_IOMMU_EVENT_DESC(int_dte_hit,             "csource=0x0f"),
118 	AMD_IOMMU_EVENT_DESC(int_dte_mis,             "csource=0x10"),
119 	AMD_IOMMU_EVENT_DESC(cmd_processed,           "csource=0x11"),
120 	AMD_IOMMU_EVENT_DESC(cmd_processed_inv,       "csource=0x12"),
121 	AMD_IOMMU_EVENT_DESC(tlb_inv,                 "csource=0x13"),
122 	AMD_IOMMU_EVENT_DESC(ign_rd_wr_mmio_1ff8h,    "csource=0x14"),
123 	AMD_IOMMU_EVENT_DESC(vapic_int_non_guest,     "csource=0x15"),
124 	AMD_IOMMU_EVENT_DESC(vapic_int_guest,         "csource=0x16"),
125 	AMD_IOMMU_EVENT_DESC(smi_recv,                "csource=0x17"),
126 	AMD_IOMMU_EVENT_DESC(smi_blk,                 "csource=0x18"),
127 	{ /* end: all zeroes */ },
128 };
129 
130 /*---------------------------------------------
131  * sysfs cpumask attributes
132  *---------------------------------------------*/
133 static cpumask_t iommu_cpumask;
134 
135 static ssize_t _iommu_cpumask_show(struct device *dev,
136 				   struct device_attribute *attr,
137 				   char *buf)
138 {
139 	return cpumap_print_to_pagebuf(true, buf, &iommu_cpumask);
140 }
141 static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
142 
143 static struct attribute *iommu_cpumask_attrs[] = {
144 	&dev_attr_cpumask.attr,
145 	NULL,
146 };
147 
148 static struct attribute_group amd_iommu_cpumask_group = {
149 	.attrs = iommu_cpumask_attrs,
150 };
151 
152 /*---------------------------------------------*/
153 
154 static int get_next_avail_iommu_bnk_cntr(struct perf_event *event)
155 {
156 	struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu);
157 	int max_cntrs = piommu->max_counters;
158 	int max_banks = piommu->max_banks;
159 	u32 shift, bank, cntr;
160 	unsigned long flags;
161 	int retval;
162 
163 	raw_spin_lock_irqsave(&piommu->lock, flags);
164 
165 	for (bank = 0, shift = 0; bank < max_banks; bank++) {
166 		for (cntr = 0; cntr < max_cntrs; cntr++) {
167 			shift = bank + (bank*3) + cntr;
168 			if (piommu->cntr_assign_mask & BIT_ULL(shift)) {
169 				continue;
170 			} else {
171 				piommu->cntr_assign_mask |= BIT_ULL(shift);
172 				event->hw.iommu_bank = bank;
173 				event->hw.iommu_cntr = cntr;
174 				retval = 0;
175 				goto out;
176 			}
177 		}
178 	}
179 	retval = -ENOSPC;
180 out:
181 	raw_spin_unlock_irqrestore(&piommu->lock, flags);
182 	return retval;
183 }
184 
185 static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
186 					u8 bank, u8 cntr)
187 {
188 	unsigned long flags;
189 	int max_banks, max_cntrs;
190 	int shift = 0;
191 
192 	max_banks = perf_iommu->max_banks;
193 	max_cntrs = perf_iommu->max_counters;
194 
195 	if ((bank > max_banks) || (cntr > max_cntrs))
196 		return -EINVAL;
197 
198 	shift = bank + cntr + (bank*3);
199 
200 	raw_spin_lock_irqsave(&perf_iommu->lock, flags);
201 	perf_iommu->cntr_assign_mask &= ~(1ULL<<shift);
202 	raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
203 
204 	return 0;
205 }
206 
207 static int perf_iommu_event_init(struct perf_event *event)
208 {
209 	struct hw_perf_event *hwc = &event->hw;
210 
211 	/* test the event attr type check for PMU enumeration */
212 	if (event->attr.type != event->pmu->type)
213 		return -ENOENT;
214 
215 	/*
216 	 * IOMMU counters are shared across all cores.
217 	 * Therefore, it does not support per-process mode.
218 	 * Also, it does not support event sampling mode.
219 	 */
220 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
221 		return -EINVAL;
222 
223 	if (event->cpu < 0)
224 		return -EINVAL;
225 
226 	/* update the hw_perf_event struct with the iommu config data */
227 	hwc->conf  = event->attr.config;
228 	hwc->conf1 = event->attr.config1;
229 
230 	return 0;
231 }
232 
233 static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev)
234 {
235 	return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
236 }
237 
238 static void perf_iommu_enable_event(struct perf_event *ev)
239 {
240 	struct amd_iommu *iommu = perf_event_2_iommu(ev);
241 	struct hw_perf_event *hwc = &ev->hw;
242 	u8 bank = hwc->iommu_bank;
243 	u8 cntr = hwc->iommu_cntr;
244 	u64 reg = 0ULL;
245 
246 	reg = GET_CSOURCE(hwc);
247 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, &reg);
248 
249 	reg = GET_DEVID_MASK(hwc);
250 	reg = GET_DEVID(hwc) | (reg << 32);
251 	if (reg)
252 		reg |= BIT(31);
253 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, &reg);
254 
255 	reg = GET_PASID_MASK(hwc);
256 	reg = GET_PASID(hwc) | (reg << 32);
257 	if (reg)
258 		reg |= BIT(31);
259 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, &reg);
260 
261 	reg = GET_DOMID_MASK(hwc);
262 	reg = GET_DOMID(hwc) | (reg << 32);
263 	if (reg)
264 		reg |= BIT(31);
265 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, &reg);
266 }
267 
268 static void perf_iommu_disable_event(struct perf_event *event)
269 {
270 	struct amd_iommu *iommu = perf_event_2_iommu(event);
271 	struct hw_perf_event *hwc = &event->hw;
272 	u64 reg = 0ULL;
273 
274 	amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
275 			     IOMMU_PC_COUNTER_SRC_REG, &reg);
276 }
277 
278 static void perf_iommu_start(struct perf_event *event, int flags)
279 {
280 	struct hw_perf_event *hwc = &event->hw;
281 
282 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
283 		return;
284 
285 	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
286 	hwc->state = 0;
287 
288 	if (flags & PERF_EF_RELOAD) {
289 		u64 prev_raw_count = local64_read(&hwc->prev_count);
290 		struct amd_iommu *iommu = perf_event_2_iommu(event);
291 
292 		amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
293 				     IOMMU_PC_COUNTER_REG, &prev_raw_count);
294 	}
295 
296 	perf_iommu_enable_event(event);
297 	perf_event_update_userpage(event);
298 
299 }
300 
301 static void perf_iommu_read(struct perf_event *event)
302 {
303 	u64 count, prev, delta;
304 	struct hw_perf_event *hwc = &event->hw;
305 	struct amd_iommu *iommu = perf_event_2_iommu(event);
306 
307 	if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
308 				 IOMMU_PC_COUNTER_REG, &count))
309 		return;
310 
311 	/* IOMMU pc counter register is only 48 bits */
312 	count &= GENMASK_ULL(47, 0);
313 
314 	prev = local64_read(&hwc->prev_count);
315 	if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
316 		return;
317 
318 	/* Handle 48-bit counter overflow */
319 	delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
320 	delta >>= COUNTER_SHIFT;
321 	local64_add(delta, &event->count);
322 }
323 
324 static void perf_iommu_stop(struct perf_event *event, int flags)
325 {
326 	struct hw_perf_event *hwc = &event->hw;
327 
328 	if (hwc->state & PERF_HES_UPTODATE)
329 		return;
330 
331 	perf_iommu_disable_event(event);
332 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
333 	hwc->state |= PERF_HES_STOPPED;
334 
335 	if (hwc->state & PERF_HES_UPTODATE)
336 		return;
337 
338 	perf_iommu_read(event);
339 	hwc->state |= PERF_HES_UPTODATE;
340 }
341 
342 static int perf_iommu_add(struct perf_event *event, int flags)
343 {
344 	int retval;
345 
346 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
347 
348 	/* request an iommu bank/counter */
349 	retval = get_next_avail_iommu_bnk_cntr(event);
350 	if (retval)
351 		return retval;
352 
353 	if (flags & PERF_EF_START)
354 		perf_iommu_start(event, PERF_EF_RELOAD);
355 
356 	return 0;
357 }
358 
359 static void perf_iommu_del(struct perf_event *event, int flags)
360 {
361 	struct hw_perf_event *hwc = &event->hw;
362 	struct perf_amd_iommu *perf_iommu =
363 			container_of(event->pmu, struct perf_amd_iommu, pmu);
364 
365 	perf_iommu_stop(event, PERF_EF_UPDATE);
366 
367 	/* clear the assigned iommu bank/counter */
368 	clear_avail_iommu_bnk_cntr(perf_iommu,
369 				   hwc->iommu_bank, hwc->iommu_cntr);
370 
371 	perf_event_update_userpage(event);
372 }
373 
374 static __init int _init_events_attrs(void)
375 {
376 	int i = 0, j;
377 	struct attribute **attrs;
378 
379 	while (amd_iommu_v2_event_descs[i].attr.attr.name)
380 		i++;
381 
382 	attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
383 	if (!attrs)
384 		return -ENOMEM;
385 
386 	for (j = 0; j < i; j++)
387 		attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
388 
389 	amd_iommu_events_group.attrs = attrs;
390 	return 0;
391 }
392 
393 static const struct attribute_group *amd_iommu_attr_groups[] = {
394 	&amd_iommu_format_group,
395 	&amd_iommu_cpumask_group,
396 	&amd_iommu_events_group,
397 	NULL,
398 };
399 
400 static const struct pmu iommu_pmu __initconst = {
401 	.event_init	= perf_iommu_event_init,
402 	.add		= perf_iommu_add,
403 	.del		= perf_iommu_del,
404 	.start		= perf_iommu_start,
405 	.stop		= perf_iommu_stop,
406 	.read		= perf_iommu_read,
407 	.task_ctx_nr	= perf_invalid_context,
408 	.attr_groups	= amd_iommu_attr_groups,
409 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
410 };
411 
412 static __init int init_one_iommu(unsigned int idx)
413 {
414 	struct perf_amd_iommu *perf_iommu;
415 	int ret;
416 
417 	perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL);
418 	if (!perf_iommu)
419 		return -ENOMEM;
420 
421 	raw_spin_lock_init(&perf_iommu->lock);
422 
423 	perf_iommu->pmu          = iommu_pmu;
424 	perf_iommu->iommu        = get_amd_iommu(idx);
425 	perf_iommu->max_banks    = amd_iommu_pc_get_max_banks(idx);
426 	perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx);
427 
428 	if (!perf_iommu->iommu ||
429 	    !perf_iommu->max_banks ||
430 	    !perf_iommu->max_counters) {
431 		kfree(perf_iommu);
432 		return -EINVAL;
433 	}
434 
435 	snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx);
436 
437 	ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
438 	if (!ret) {
439 		pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
440 			idx, perf_iommu->max_banks, perf_iommu->max_counters);
441 		list_add_tail(&perf_iommu->list, &perf_amd_iommu_list);
442 	} else {
443 		pr_warn("Error initializing IOMMU %d.\n", idx);
444 		kfree(perf_iommu);
445 	}
446 	return ret;
447 }
448 
449 static __init int amd_iommu_pc_init(void)
450 {
451 	unsigned int i, cnt = 0;
452 	int ret;
453 
454 	/* Make sure the IOMMU PC resource is available */
455 	if (!amd_iommu_pc_supported())
456 		return -ENODEV;
457 
458 	ret = _init_events_attrs();
459 	if (ret)
460 		return ret;
461 
462 	/*
463 	 * An IOMMU PMU is specific to an IOMMU, and can function independently.
464 	 * So we go through all IOMMUs and ignore the one that fails init
465 	 * unless all IOMMU are failing.
466 	 */
467 	for (i = 0; i < amd_iommu_get_num_iommus(); i++) {
468 		ret = init_one_iommu(i);
469 		if (!ret)
470 			cnt++;
471 	}
472 
473 	if (!cnt) {
474 		kfree(amd_iommu_events_group.attrs);
475 		return -ENODEV;
476 	}
477 
478 	/* Init cpumask attributes to only core 0 */
479 	cpumask_set_cpu(0, &iommu_cpumask);
480 	return 0;
481 }
482 
483 device_initcall(amd_iommu_pc_init);
484