xref: /openbmc/linux/arch/powerpc/perf/hv-24x7.c (revision a8a28aff)
1 /*
2  * Hypervisor supplied "24x7" performance counter support
3  *
4  * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
5  * Copyright 2014 IBM Corporation.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #define pr_fmt(fmt) "hv-24x7: " fmt
14 
15 #include <linux/perf_event.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <asm/firmware.h>
19 #include <asm/hvcall.h>
20 #include <asm/io.h>
21 
22 #include "hv-24x7.h"
23 #include "hv-24x7-catalog.h"
24 #include "hv-common.h"
25 
26 /*
27  * TODO: Merging events:
28  * - Think of the hcall as an interface to a 4d array of counters:
29  *   - x = domains
30  *   - y = indexes in the domain (core, chip, vcpu, node, etc)
31  *   - z = offset into the counter space
32  *   - w = lpars (guest vms, "logical partitions")
33  * - A single request is: x,y,y_last,z,z_last,w,w_last
34  *   - this means we can retrieve a rectangle of counters in y,z for a single x.
35  *
36  * - Things to consider (ignoring w):
37  *   - input  cost_per_request = 16
38  *   - output cost_per_result(ys,zs)  = 8 + 8 * ys + ys * zs
39  *   - limited number of requests per hcall (must fit into 4K bytes)
40  *     - 4k = 16 [buffer header] - 16 [request size] * request_count
41  *     - 255 requests per hcall
42  *   - sometimes it will be more efficient to read extra data and discard
43  */
44 
45 /*
46  * Example usage:
47  *  perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/'
48  */
49 
50 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
51 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
52 /* u16 */
53 EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31);
54 /* u32, see "data_offset" */
55 EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
56 /* u16 */
57 EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
58 
59 EVENT_DEFINE_RANGE(reserved1, config,   4, 15);
60 EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
61 EVENT_DEFINE_RANGE(reserved3, config2,  0, 63);
62 
63 static struct attribute *format_attrs[] = {
64 	&format_attr_domain.attr,
65 	&format_attr_offset.attr,
66 	&format_attr_starting_index.attr,
67 	&format_attr_lpar.attr,
68 	NULL,
69 };
70 
71 static struct attribute_group format_group = {
72 	.name = "format",
73 	.attrs = format_attrs,
74 };
75 
76 static struct kmem_cache *hv_page_cache;
77 
78 /*
79  * read_offset_data - copy data from one buffer to another while treating the
80  *                    source buffer as a small view on the total avaliable
81  *                    source data.
82  *
83  * @dest: buffer to copy into
84  * @dest_len: length of @dest in bytes
85  * @requested_offset: the offset within the source data we want. Must be > 0
86  * @src: buffer to copy data from
87  * @src_len: length of @src in bytes
88  * @source_offset: the offset in the sorce data that (src,src_len) refers to.
89  *                 Must be > 0
90  *
91  * returns the number of bytes copied.
92  *
93  * The following ascii art shows the various buffer possitioning we need to
94  * handle, assigns some arbitrary varibles to points on the buffer, and then
95  * shows how we fiddle with those values to get things we care about (copy
96  * start in src and copy len)
97  *
98  * s = @src buffer
99  * d = @dest buffer
100  * '.' areas in d are written to.
101  *
102  *                       u
103  *   x         w	 v  z
104  * d           |.........|
105  * s |----------------------|
106  *
107  *                      u
108  *   x         w	z     v
109  * d           |........------|
110  * s |------------------|
111  *
112  *   x         w        u,z,v
113  * d           |........|
114  * s |------------------|
115  *
116  *   x,w                u,v,z
117  * d |..................|
118  * s |------------------|
119  *
120  *   x        u
121  *   w        v		z
122  * d |........|
123  * s |------------------|
124  *
125  *   x      z   w      v
126  * d            |------|
127  * s |------|
128  *
129  * x = source_offset
130  * w = requested_offset
131  * z = source_offset + src_len
132  * v = requested_offset + dest_len
133  *
134  * w_offset_in_s = w - x = requested_offset - source_offset
135  * z_offset_in_s = z - x = src_len
136  * v_offset_in_s = v - x = request_offset + dest_len - src_len
137  */
138 static ssize_t read_offset_data(void *dest, size_t dest_len,
139 				loff_t requested_offset, void *src,
140 				size_t src_len, loff_t source_offset)
141 {
142 	size_t w_offset_in_s = requested_offset - source_offset;
143 	size_t z_offset_in_s = src_len;
144 	size_t v_offset_in_s = requested_offset + dest_len - src_len;
145 	size_t u_offset_in_s = min(z_offset_in_s, v_offset_in_s);
146 	size_t copy_len = u_offset_in_s - w_offset_in_s;
147 
148 	if (requested_offset < 0 || source_offset < 0)
149 		return -EINVAL;
150 
151 	if (z_offset_in_s <= w_offset_in_s)
152 		return 0;
153 
154 	memcpy(dest, src + w_offset_in_s, copy_len);
155 	return copy_len;
156 }
157 
158 static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
159 					      unsigned long version,
160 					      unsigned long index)
161 {
162 	pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
163 			phys_4096,
164 			version,
165 			index);
166 	WARN_ON(!IS_ALIGNED(phys_4096, 4096));
167 	return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
168 			phys_4096,
169 			version,
170 			index);
171 }
172 
173 static unsigned long h_get_24x7_catalog_page(char page[],
174 					     u64 version, u32 index)
175 {
176 	return h_get_24x7_catalog_page_(virt_to_phys(page),
177 					version, index);
178 }
179 
180 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
181 			    struct bin_attribute *bin_attr, char *buf,
182 			    loff_t offset, size_t count)
183 {
184 	unsigned long hret;
185 	ssize_t ret = 0;
186 	size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
187 	loff_t page_offset = 0;
188 	uint64_t catalog_version_num = 0;
189 	void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
190 	struct hv_24x7_catalog_page_0 *page_0 = page;
191 	if (!page)
192 		return -ENOMEM;
193 
194 	hret = h_get_24x7_catalog_page(page, 0, 0);
195 	if (hret) {
196 		ret = -EIO;
197 		goto e_free;
198 	}
199 
200 	catalog_version_num = be64_to_cpu(page_0->version);
201 	catalog_page_len = be32_to_cpu(page_0->length);
202 	catalog_len = catalog_page_len * 4096;
203 
204 	page_offset = offset / 4096;
205 	page_count  = count  / 4096;
206 
207 	if (page_offset >= catalog_page_len)
208 		goto e_free;
209 
210 	if (page_offset != 0) {
211 		hret = h_get_24x7_catalog_page(page, catalog_version_num,
212 					       page_offset);
213 		if (hret) {
214 			ret = -EIO;
215 			goto e_free;
216 		}
217 	}
218 
219 	ret = read_offset_data(buf, count, offset,
220 				page, 4096, page_offset * 4096);
221 e_free:
222 	if (hret)
223 		pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
224 		       " rc=%ld\n",
225 		       catalog_version_num, page_offset, hret);
226 	kfree(page);
227 
228 	pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
229 			offset, page_offset, count, page_count, catalog_len,
230 			catalog_page_len, ret);
231 
232 	return ret;
233 }
234 
235 #define PAGE_0_ATTR(_name, _fmt, _expr)				\
236 static ssize_t _name##_show(struct device *dev,			\
237 			    struct device_attribute *dev_attr,	\
238 			    char *buf)				\
239 {								\
240 	unsigned long hret;					\
241 	ssize_t ret = 0;					\
242 	void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);	\
243 	struct hv_24x7_catalog_page_0 *page_0 = page;		\
244 	if (!page)						\
245 		return -ENOMEM;					\
246 	hret = h_get_24x7_catalog_page(page, 0, 0);		\
247 	if (hret) {						\
248 		ret = -EIO;					\
249 		goto e_free;					\
250 	}							\
251 	ret = sprintf(buf, _fmt, _expr);			\
252 e_free:								\
253 	kfree(page);						\
254 	return ret;						\
255 }								\
256 static DEVICE_ATTR_RO(_name)
257 
258 PAGE_0_ATTR(catalog_version, "%lld\n",
259 		(unsigned long long)be64_to_cpu(page_0->version));
260 PAGE_0_ATTR(catalog_len, "%lld\n",
261 		(unsigned long long)be32_to_cpu(page_0->length) * 4096);
262 static BIN_ATTR_RO(catalog, 0/* real length varies */);
263 
264 static struct bin_attribute *if_bin_attrs[] = {
265 	&bin_attr_catalog,
266 	NULL,
267 };
268 
269 static struct attribute *if_attrs[] = {
270 	&dev_attr_catalog_len.attr,
271 	&dev_attr_catalog_version.attr,
272 	NULL,
273 };
274 
275 static struct attribute_group if_group = {
276 	.name = "interface",
277 	.bin_attrs = if_bin_attrs,
278 	.attrs = if_attrs,
279 };
280 
281 static const struct attribute_group *attr_groups[] = {
282 	&format_group,
283 	&if_group,
284 	NULL,
285 };
286 
287 static bool is_physical_domain(int domain)
288 {
289 	return  domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP ||
290 		domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
291 }
292 
293 static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
294 					 u16 lpar, u64 *res,
295 					 bool success_expected)
296 {
297 	unsigned long ret;
298 
299 	/*
300 	 * request_buffer and result_buffer are not required to be 4k aligned,
301 	 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
302 	 * the simplest way to ensure that.
303 	 */
304 	struct reqb {
305 		struct hv_24x7_request_buffer buf;
306 		struct hv_24x7_request req;
307 	} __packed __aligned(4096) request_buffer = {
308 		.buf = {
309 			.interface_version = HV_24X7_IF_VERSION_CURRENT,
310 			.num_requests = 1,
311 		},
312 		.req = {
313 			.performance_domain = domain,
314 			.data_size = cpu_to_be16(8),
315 			.data_offset = cpu_to_be32(offset),
316 			.starting_lpar_ix = cpu_to_be16(lpar),
317 			.max_num_lpars = cpu_to_be16(1),
318 			.starting_ix = cpu_to_be16(ix),
319 			.max_ix = cpu_to_be16(1),
320 		}
321 	};
322 
323 	struct resb {
324 		struct hv_24x7_data_result_buffer buf;
325 		struct hv_24x7_result res;
326 		struct hv_24x7_result_element elem;
327 		__be64 result;
328 	} __packed __aligned(4096) result_buffer = {};
329 
330 	ret = plpar_hcall_norets(H_GET_24X7_DATA,
331 			virt_to_phys(&request_buffer), sizeof(request_buffer),
332 			virt_to_phys(&result_buffer),  sizeof(result_buffer));
333 
334 	if (ret) {
335 		if (success_expected)
336 			pr_err_ratelimited("hcall failed: %d %#x %#x %d => 0x%lx (%ld) detail=0x%x failing ix=%x\n",
337 					domain, offset, ix, lpar,
338 					ret, ret,
339 					result_buffer.buf.detailed_rc,
340 					result_buffer.buf.failing_request_ix);
341 		return ret;
342 	}
343 
344 	*res = be64_to_cpu(result_buffer.result);
345 	return ret;
346 }
347 
348 static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
349 		bool success_expected)
350 {
351 	return single_24x7_request(event_get_domain(event),
352 				event_get_offset(event),
353 				event_get_starting_index(event),
354 				event_get_lpar(event),
355 				res,
356 				success_expected);
357 }
358 
359 static int h_24x7_event_init(struct perf_event *event)
360 {
361 	struct hv_perf_caps caps;
362 	unsigned domain;
363 	unsigned long hret;
364 	u64 ct;
365 
366 	/* Not our event */
367 	if (event->attr.type != event->pmu->type)
368 		return -ENOENT;
369 
370 	/* Unused areas must be 0 */
371 	if (event_get_reserved1(event) ||
372 	    event_get_reserved2(event) ||
373 	    event_get_reserved3(event)) {
374 		pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
375 				event->attr.config,
376 				event_get_reserved1(event),
377 				event->attr.config1,
378 				event_get_reserved2(event),
379 				event->attr.config2,
380 				event_get_reserved3(event));
381 		return -EINVAL;
382 	}
383 
384 	/* unsupported modes and filters */
385 	if (event->attr.exclude_user   ||
386 	    event->attr.exclude_kernel ||
387 	    event->attr.exclude_hv     ||
388 	    event->attr.exclude_idle   ||
389 	    event->attr.exclude_host   ||
390 	    event->attr.exclude_guest  ||
391 	    is_sampling_event(event)) /* no sampling */
392 		return -EINVAL;
393 
394 	/* no branch sampling */
395 	if (has_branch_stack(event))
396 		return -EOPNOTSUPP;
397 
398 	/* offset must be 8 byte aligned */
399 	if (event_get_offset(event) % 8) {
400 		pr_devel("bad alignment\n");
401 		return -EINVAL;
402 	}
403 
404 	/* Domains above 6 are invalid */
405 	domain = event_get_domain(event);
406 	if (domain > 6) {
407 		pr_devel("invalid domain %d\n", domain);
408 		return -EINVAL;
409 	}
410 
411 	hret = hv_perf_caps_get(&caps);
412 	if (hret) {
413 		pr_devel("could not get capabilities: rc=%ld\n", hret);
414 		return -EIO;
415 	}
416 
417 	/* PHYSICAL domains & other lpars require extra capabilities */
418 	if (!caps.collect_privileged && (is_physical_domain(domain) ||
419 		(event_get_lpar(event) != event_get_lpar_max()))) {
420 		pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
421 				is_physical_domain(domain),
422 				event_get_lpar(event));
423 		return -EACCES;
424 	}
425 
426 	/* see if the event complains */
427 	if (event_24x7_request(event, &ct, false)) {
428 		pr_devel("test hcall failed\n");
429 		return -EIO;
430 	}
431 
432 	return 0;
433 }
434 
435 static u64 h_24x7_get_value(struct perf_event *event)
436 {
437 	unsigned long ret;
438 	u64 ct;
439 	ret = event_24x7_request(event, &ct, true);
440 	if (ret)
441 		/* We checked this in event init, shouldn't fail here... */
442 		return 0;
443 
444 	return ct;
445 }
446 
447 static void h_24x7_event_update(struct perf_event *event)
448 {
449 	s64 prev;
450 	u64 now;
451 	now = h_24x7_get_value(event);
452 	prev = local64_xchg(&event->hw.prev_count, now);
453 	local64_add(now - prev, &event->count);
454 }
455 
456 static void h_24x7_event_start(struct perf_event *event, int flags)
457 {
458 	if (flags & PERF_EF_RELOAD)
459 		local64_set(&event->hw.prev_count, h_24x7_get_value(event));
460 }
461 
462 static void h_24x7_event_stop(struct perf_event *event, int flags)
463 {
464 	h_24x7_event_update(event);
465 }
466 
467 static int h_24x7_event_add(struct perf_event *event, int flags)
468 {
469 	if (flags & PERF_EF_START)
470 		h_24x7_event_start(event, flags);
471 
472 	return 0;
473 }
474 
475 static int h_24x7_event_idx(struct perf_event *event)
476 {
477 	return 0;
478 }
479 
480 static struct pmu h_24x7_pmu = {
481 	.task_ctx_nr = perf_invalid_context,
482 
483 	.name = "hv_24x7",
484 	.attr_groups = attr_groups,
485 	.event_init  = h_24x7_event_init,
486 	.add         = h_24x7_event_add,
487 	.del         = h_24x7_event_stop,
488 	.start       = h_24x7_event_start,
489 	.stop        = h_24x7_event_stop,
490 	.read        = h_24x7_event_update,
491 	.event_idx   = h_24x7_event_idx,
492 };
493 
494 static int hv_24x7_init(void)
495 {
496 	int r;
497 	unsigned long hret;
498 	struct hv_perf_caps caps;
499 
500 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
501 		pr_debug("not a virtualized system, not enabling\n");
502 		return -ENODEV;
503 	}
504 
505 	hret = hv_perf_caps_get(&caps);
506 	if (hret) {
507 		pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
508 				hret);
509 		return -ENODEV;
510 	}
511 
512 	hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
513 	if (!hv_page_cache)
514 		return -ENOMEM;
515 
516 	r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
517 	if (r)
518 		return r;
519 
520 	return 0;
521 }
522 
523 device_initcall(hv_24x7_init);
524