1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4 *
5 */
6
7 /* Support for NVIDIA specific attributes. */
8
9 #include <linux/topology.h>
10
11 #include "nvidia_cspmu.h"
12
13 #define NV_PCIE_PORT_COUNT 10ULL
14 #define NV_PCIE_FILTER_ID_MASK GENMASK_ULL(NV_PCIE_PORT_COUNT - 1, 0)
15
16 #define NV_NVL_C2C_PORT_COUNT 2ULL
17 #define NV_NVL_C2C_FILTER_ID_MASK GENMASK_ULL(NV_NVL_C2C_PORT_COUNT - 1, 0)
18
19 #define NV_CNVL_PORT_COUNT 4ULL
20 #define NV_CNVL_FILTER_ID_MASK GENMASK_ULL(NV_CNVL_PORT_COUNT - 1, 0)
21
22 #define NV_GENERIC_FILTER_ID_MASK GENMASK_ULL(31, 0)
23
24 #define NV_PRODID_MASK GENMASK(31, 0)
25
26 #define NV_FORMAT_NAME_GENERIC 0
27
28 #define to_nv_cspmu_ctx(cspmu) ((struct nv_cspmu_ctx *)(cspmu->impl.ctx))
29
30 #define NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _num, _suff, _config) \
31 ARM_CSPMU_EVENT_ATTR(_pref##_num##_suff, _config)
32
33 #define NV_CSPMU_EVENT_ATTR_4(_pref, _suff, _config) \
34 NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _0_, _suff, _config), \
35 NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _1_, _suff, _config + 1), \
36 NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _2_, _suff, _config + 2), \
37 NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _3_, _suff, _config + 3)
38
39 struct nv_cspmu_ctx {
40 const char *name;
41 u32 filter_mask;
42 u32 filter_default_val;
43 struct attribute **event_attr;
44 struct attribute **format_attr;
45 };
46
47 static struct attribute *scf_pmu_event_attrs[] = {
48 ARM_CSPMU_EVENT_ATTR(bus_cycles, 0x1d),
49
50 ARM_CSPMU_EVENT_ATTR(scf_cache_allocate, 0xF0),
51 ARM_CSPMU_EVENT_ATTR(scf_cache_refill, 0xF1),
52 ARM_CSPMU_EVENT_ATTR(scf_cache, 0xF2),
53 ARM_CSPMU_EVENT_ATTR(scf_cache_wb, 0xF3),
54
55 NV_CSPMU_EVENT_ATTR_4(socket, rd_data, 0x101),
56 NV_CSPMU_EVENT_ATTR_4(socket, dl_rsp, 0x105),
57 NV_CSPMU_EVENT_ATTR_4(socket, wb_data, 0x109),
58 NV_CSPMU_EVENT_ATTR_4(socket, ev_rsp, 0x10d),
59 NV_CSPMU_EVENT_ATTR_4(socket, prb_data, 0x111),
60
61 NV_CSPMU_EVENT_ATTR_4(socket, rd_outstanding, 0x115),
62 NV_CSPMU_EVENT_ATTR_4(socket, dl_outstanding, 0x119),
63 NV_CSPMU_EVENT_ATTR_4(socket, wb_outstanding, 0x11d),
64 NV_CSPMU_EVENT_ATTR_4(socket, wr_outstanding, 0x121),
65 NV_CSPMU_EVENT_ATTR_4(socket, ev_outstanding, 0x125),
66 NV_CSPMU_EVENT_ATTR_4(socket, prb_outstanding, 0x129),
67
68 NV_CSPMU_EVENT_ATTR_4(socket, rd_access, 0x12d),
69 NV_CSPMU_EVENT_ATTR_4(socket, dl_access, 0x131),
70 NV_CSPMU_EVENT_ATTR_4(socket, wb_access, 0x135),
71 NV_CSPMU_EVENT_ATTR_4(socket, wr_access, 0x139),
72 NV_CSPMU_EVENT_ATTR_4(socket, ev_access, 0x13d),
73 NV_CSPMU_EVENT_ATTR_4(socket, prb_access, 0x141),
74
75 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_data, 0x145),
76 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_access, 0x149),
77 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_access, 0x14d),
78 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_outstanding, 0x151),
79 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_outstanding, 0x155),
80
81 NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_data, 0x159),
82 NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_access, 0x15d),
83 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_access, 0x161),
84 NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_outstanding, 0x165),
85 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_outstanding, 0x169),
86
87 ARM_CSPMU_EVENT_ATTR(gmem_rd_data, 0x16d),
88 ARM_CSPMU_EVENT_ATTR(gmem_rd_access, 0x16e),
89 ARM_CSPMU_EVENT_ATTR(gmem_rd_outstanding, 0x16f),
90 ARM_CSPMU_EVENT_ATTR(gmem_dl_rsp, 0x170),
91 ARM_CSPMU_EVENT_ATTR(gmem_dl_access, 0x171),
92 ARM_CSPMU_EVENT_ATTR(gmem_dl_outstanding, 0x172),
93 ARM_CSPMU_EVENT_ATTR(gmem_wb_data, 0x173),
94 ARM_CSPMU_EVENT_ATTR(gmem_wb_access, 0x174),
95 ARM_CSPMU_EVENT_ATTR(gmem_wb_outstanding, 0x175),
96 ARM_CSPMU_EVENT_ATTR(gmem_ev_rsp, 0x176),
97 ARM_CSPMU_EVENT_ATTR(gmem_ev_access, 0x177),
98 ARM_CSPMU_EVENT_ATTR(gmem_ev_outstanding, 0x178),
99 ARM_CSPMU_EVENT_ATTR(gmem_wr_data, 0x179),
100 ARM_CSPMU_EVENT_ATTR(gmem_wr_outstanding, 0x17a),
101 ARM_CSPMU_EVENT_ATTR(gmem_wr_access, 0x17b),
102
103 NV_CSPMU_EVENT_ATTR_4(socket, wr_data, 0x17c),
104
105 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_data, 0x180),
106 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_data, 0x184),
107 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_access, 0x188),
108 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_outstanding, 0x18c),
109
110 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_data, 0x190),
111 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_data, 0x194),
112 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_access, 0x198),
113 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_outstanding, 0x19c),
114
115 ARM_CSPMU_EVENT_ATTR(gmem_wr_total_bytes, 0x1a0),
116 ARM_CSPMU_EVENT_ATTR(remote_socket_wr_total_bytes, 0x1a1),
117 ARM_CSPMU_EVENT_ATTR(remote_socket_rd_data, 0x1a2),
118 ARM_CSPMU_EVENT_ATTR(remote_socket_rd_outstanding, 0x1a3),
119 ARM_CSPMU_EVENT_ATTR(remote_socket_rd_access, 0x1a4),
120
121 ARM_CSPMU_EVENT_ATTR(cmem_rd_data, 0x1a5),
122 ARM_CSPMU_EVENT_ATTR(cmem_rd_access, 0x1a6),
123 ARM_CSPMU_EVENT_ATTR(cmem_rd_outstanding, 0x1a7),
124 ARM_CSPMU_EVENT_ATTR(cmem_dl_rsp, 0x1a8),
125 ARM_CSPMU_EVENT_ATTR(cmem_dl_access, 0x1a9),
126 ARM_CSPMU_EVENT_ATTR(cmem_dl_outstanding, 0x1aa),
127 ARM_CSPMU_EVENT_ATTR(cmem_wb_data, 0x1ab),
128 ARM_CSPMU_EVENT_ATTR(cmem_wb_access, 0x1ac),
129 ARM_CSPMU_EVENT_ATTR(cmem_wb_outstanding, 0x1ad),
130 ARM_CSPMU_EVENT_ATTR(cmem_ev_rsp, 0x1ae),
131 ARM_CSPMU_EVENT_ATTR(cmem_ev_access, 0x1af),
132 ARM_CSPMU_EVENT_ATTR(cmem_ev_outstanding, 0x1b0),
133 ARM_CSPMU_EVENT_ATTR(cmem_wr_data, 0x1b1),
134 ARM_CSPMU_EVENT_ATTR(cmem_wr_outstanding, 0x1b2),
135
136 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_data, 0x1b3),
137 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_access, 0x1b7),
138 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_access, 0x1bb),
139 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_outstanding, 0x1bf),
140 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_outstanding, 0x1c3),
141
142 ARM_CSPMU_EVENT_ATTR(ocu_prb_access, 0x1c7),
143 ARM_CSPMU_EVENT_ATTR(ocu_prb_data, 0x1c8),
144 ARM_CSPMU_EVENT_ATTR(ocu_prb_outstanding, 0x1c9),
145
146 ARM_CSPMU_EVENT_ATTR(cmem_wr_access, 0x1ca),
147
148 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_access, 0x1cb),
149 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_data, 0x1cf),
150 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_data, 0x1d3),
151 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_outstanding, 0x1d7),
152
153 ARM_CSPMU_EVENT_ATTR(cmem_wr_total_bytes, 0x1db),
154
155 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
156 NULL,
157 };
158
159 static struct attribute *mcf_pmu_event_attrs[] = {
160 ARM_CSPMU_EVENT_ATTR(rd_bytes_loc, 0x0),
161 ARM_CSPMU_EVENT_ATTR(rd_bytes_rem, 0x1),
162 ARM_CSPMU_EVENT_ATTR(wr_bytes_loc, 0x2),
163 ARM_CSPMU_EVENT_ATTR(wr_bytes_rem, 0x3),
164 ARM_CSPMU_EVENT_ATTR(total_bytes_loc, 0x4),
165 ARM_CSPMU_EVENT_ATTR(total_bytes_rem, 0x5),
166 ARM_CSPMU_EVENT_ATTR(rd_req_loc, 0x6),
167 ARM_CSPMU_EVENT_ATTR(rd_req_rem, 0x7),
168 ARM_CSPMU_EVENT_ATTR(wr_req_loc, 0x8),
169 ARM_CSPMU_EVENT_ATTR(wr_req_rem, 0x9),
170 ARM_CSPMU_EVENT_ATTR(total_req_loc, 0xa),
171 ARM_CSPMU_EVENT_ATTR(total_req_rem, 0xb),
172 ARM_CSPMU_EVENT_ATTR(rd_cum_outs_loc, 0xc),
173 ARM_CSPMU_EVENT_ATTR(rd_cum_outs_rem, 0xd),
174 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
175 NULL,
176 };
177
178 static struct attribute *generic_pmu_event_attrs[] = {
179 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
180 NULL,
181 };
182
183 static struct attribute *scf_pmu_format_attrs[] = {
184 ARM_CSPMU_FORMAT_EVENT_ATTR,
185 NULL,
186 };
187
188 static struct attribute *pcie_pmu_format_attrs[] = {
189 ARM_CSPMU_FORMAT_EVENT_ATTR,
190 ARM_CSPMU_FORMAT_ATTR(root_port, "config1:0-9"),
191 NULL,
192 };
193
194 static struct attribute *nvlink_c2c_pmu_format_attrs[] = {
195 ARM_CSPMU_FORMAT_EVENT_ATTR,
196 NULL,
197 };
198
199 static struct attribute *cnvlink_pmu_format_attrs[] = {
200 ARM_CSPMU_FORMAT_EVENT_ATTR,
201 ARM_CSPMU_FORMAT_ATTR(rem_socket, "config1:0-3"),
202 NULL,
203 };
204
205 static struct attribute *generic_pmu_format_attrs[] = {
206 ARM_CSPMU_FORMAT_EVENT_ATTR,
207 ARM_CSPMU_FORMAT_FILTER_ATTR,
208 NULL,
209 };
210
211 static struct attribute **
nv_cspmu_get_event_attrs(const struct arm_cspmu * cspmu)212 nv_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
213 {
214 const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
215
216 return ctx->event_attr;
217 }
218
219 static struct attribute **
nv_cspmu_get_format_attrs(const struct arm_cspmu * cspmu)220 nv_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
221 {
222 const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
223
224 return ctx->format_attr;
225 }
226
227 static const char *
nv_cspmu_get_name(const struct arm_cspmu * cspmu)228 nv_cspmu_get_name(const struct arm_cspmu *cspmu)
229 {
230 const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
231
232 return ctx->name;
233 }
234
nv_cspmu_event_filter(const struct perf_event * event)235 static u32 nv_cspmu_event_filter(const struct perf_event *event)
236 {
237 const struct nv_cspmu_ctx *ctx =
238 to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
239
240 if (ctx->filter_mask == 0)
241 return ctx->filter_default_val;
242
243 return event->attr.config1 & ctx->filter_mask;
244 }
245
246 enum nv_cspmu_name_fmt {
247 NAME_FMT_GENERIC,
248 NAME_FMT_SOCKET
249 };
250
251 struct nv_cspmu_match {
252 u32 prodid;
253 u32 prodid_mask;
254 u64 filter_mask;
255 u32 filter_default_val;
256 const char *name_pattern;
257 enum nv_cspmu_name_fmt name_fmt;
258 struct attribute **event_attr;
259 struct attribute **format_attr;
260 };
261
262 static const struct nv_cspmu_match nv_cspmu_match[] = {
263 {
264 .prodid = 0x103,
265 .prodid_mask = NV_PRODID_MASK,
266 .filter_mask = NV_PCIE_FILTER_ID_MASK,
267 .filter_default_val = NV_PCIE_FILTER_ID_MASK,
268 .name_pattern = "nvidia_pcie_pmu_%u",
269 .name_fmt = NAME_FMT_SOCKET,
270 .event_attr = mcf_pmu_event_attrs,
271 .format_attr = pcie_pmu_format_attrs
272 },
273 {
274 .prodid = 0x104,
275 .prodid_mask = NV_PRODID_MASK,
276 .filter_mask = 0x0,
277 .filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
278 .name_pattern = "nvidia_nvlink_c2c1_pmu_%u",
279 .name_fmt = NAME_FMT_SOCKET,
280 .event_attr = mcf_pmu_event_attrs,
281 .format_attr = nvlink_c2c_pmu_format_attrs
282 },
283 {
284 .prodid = 0x105,
285 .prodid_mask = NV_PRODID_MASK,
286 .filter_mask = 0x0,
287 .filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
288 .name_pattern = "nvidia_nvlink_c2c0_pmu_%u",
289 .name_fmt = NAME_FMT_SOCKET,
290 .event_attr = mcf_pmu_event_attrs,
291 .format_attr = nvlink_c2c_pmu_format_attrs
292 },
293 {
294 .prodid = 0x106,
295 .prodid_mask = NV_PRODID_MASK,
296 .filter_mask = NV_CNVL_FILTER_ID_MASK,
297 .filter_default_val = NV_CNVL_FILTER_ID_MASK,
298 .name_pattern = "nvidia_cnvlink_pmu_%u",
299 .name_fmt = NAME_FMT_SOCKET,
300 .event_attr = mcf_pmu_event_attrs,
301 .format_attr = cnvlink_pmu_format_attrs
302 },
303 {
304 .prodid = 0x2CF,
305 .prodid_mask = NV_PRODID_MASK,
306 .filter_mask = 0x0,
307 .filter_default_val = 0x0,
308 .name_pattern = "nvidia_scf_pmu_%u",
309 .name_fmt = NAME_FMT_SOCKET,
310 .event_attr = scf_pmu_event_attrs,
311 .format_attr = scf_pmu_format_attrs
312 },
313 {
314 .prodid = 0,
315 .prodid_mask = 0,
316 .filter_mask = NV_GENERIC_FILTER_ID_MASK,
317 .filter_default_val = NV_GENERIC_FILTER_ID_MASK,
318 .name_pattern = "nvidia_uncore_pmu_%u",
319 .name_fmt = NAME_FMT_GENERIC,
320 .event_attr = generic_pmu_event_attrs,
321 .format_attr = generic_pmu_format_attrs
322 },
323 };
324
nv_cspmu_format_name(const struct arm_cspmu * cspmu,const struct nv_cspmu_match * match)325 static char *nv_cspmu_format_name(const struct arm_cspmu *cspmu,
326 const struct nv_cspmu_match *match)
327 {
328 char *name;
329 struct device *dev = cspmu->dev;
330
331 static atomic_t pmu_generic_idx = {0};
332
333 switch (match->name_fmt) {
334 case NAME_FMT_SOCKET: {
335 const int cpu = cpumask_first(&cspmu->associated_cpus);
336 const int socket = cpu_to_node(cpu);
337
338 name = devm_kasprintf(dev, GFP_KERNEL, match->name_pattern,
339 socket);
340 break;
341 }
342 case NAME_FMT_GENERIC:
343 name = devm_kasprintf(dev, GFP_KERNEL, match->name_pattern,
344 atomic_fetch_inc(&pmu_generic_idx));
345 break;
346 default:
347 name = NULL;
348 break;
349 }
350
351 return name;
352 }
353
nv_cspmu_init_ops(struct arm_cspmu * cspmu)354 int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
355 {
356 u32 prodid;
357 struct nv_cspmu_ctx *ctx;
358 struct device *dev = cspmu->dev;
359 struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
360 const struct nv_cspmu_match *match = nv_cspmu_match;
361
362 ctx = devm_kzalloc(dev, sizeof(struct nv_cspmu_ctx), GFP_KERNEL);
363 if (!ctx)
364 return -ENOMEM;
365
366 prodid = FIELD_GET(ARM_CSPMU_PMIIDR_PRODUCTID, cspmu->impl.pmiidr);
367
368 /* Find matching PMU. */
369 for (; match->prodid; match++) {
370 const u32 prodid_mask = match->prodid_mask;
371
372 if ((match->prodid & prodid_mask) == (prodid & prodid_mask))
373 break;
374 }
375
376 ctx->name = nv_cspmu_format_name(cspmu, match);
377 ctx->filter_mask = match->filter_mask;
378 ctx->filter_default_val = match->filter_default_val;
379 ctx->event_attr = match->event_attr;
380 ctx->format_attr = match->format_attr;
381
382 cspmu->impl.ctx = ctx;
383
384 /* NVIDIA specific callbacks. */
385 impl_ops->event_filter = nv_cspmu_event_filter;
386 impl_ops->get_event_attrs = nv_cspmu_get_event_attrs;
387 impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
388 impl_ops->get_name = nv_cspmu_get_name;
389
390 /* Set others to NULL to use default callback. */
391 impl_ops->event_type = NULL;
392 impl_ops->event_attr_is_visible = NULL;
393 impl_ops->get_identifier = NULL;
394 impl_ops->is_cycle_counter_event = NULL;
395
396 return 0;
397 }
398 EXPORT_SYMBOL_GPL(nv_cspmu_init_ops);
399
400 MODULE_LICENSE("GPL v2");
401