xref: /openbmc/linux/arch/powerpc/perf/hv-24x7.c (revision 93d90ad7)
1 /*
2  * Hypervisor supplied "24x7" performance counter support
3  *
4  * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
5  * Copyright 2014 IBM Corporation.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #define pr_fmt(fmt) "hv-24x7: " fmt
14 
15 #include <linux/perf_event.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <asm/firmware.h>
19 #include <asm/hvcall.h>
20 #include <asm/io.h>
21 
22 #include "hv-24x7.h"
23 #include "hv-24x7-catalog.h"
24 #include "hv-common.h"
25 
26 /*
27  * TODO: Merging events:
28  * - Think of the hcall as an interface to a 4d array of counters:
29  *   - x = domains
30  *   - y = indexes in the domain (core, chip, vcpu, node, etc)
31  *   - z = offset into the counter space
32  *   - w = lpars (guest vms, "logical partitions")
33  * - A single request is: x,y,y_last,z,z_last,w,w_last
34  *   - this means we can retrieve a rectangle of counters in y,z for a single x.
35  *
36  * - Things to consider (ignoring w):
37  *   - input  cost_per_request = 16
38  *   - output cost_per_result(ys,zs)  = 8 + 8 * ys + ys * zs
39  *   - limited number of requests per hcall (must fit into 4K bytes)
40  *     - 4k = 16 [buffer header] - 16 [request size] * request_count
41  *     - 255 requests per hcall
42  *   - sometimes it will be more efficient to read extra data and discard
43  */
44 
45 /*
46  * Example usage:
47  *  perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/'
48  */
49 
50 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
51 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
52 /* u16 */
53 EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31);
54 /* u32, see "data_offset" */
55 EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
56 /* u16 */
57 EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
58 
59 EVENT_DEFINE_RANGE(reserved1, config,   4, 15);
60 EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
61 EVENT_DEFINE_RANGE(reserved3, config2,  0, 63);
62 
63 static struct attribute *format_attrs[] = {
64 	&format_attr_domain.attr,
65 	&format_attr_offset.attr,
66 	&format_attr_starting_index.attr,
67 	&format_attr_lpar.attr,
68 	NULL,
69 };
70 
71 static struct attribute_group format_group = {
72 	.name = "format",
73 	.attrs = format_attrs,
74 };
75 
76 static struct kmem_cache *hv_page_cache;
77 
78 static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
79 					      unsigned long version,
80 					      unsigned long index)
81 {
82 	pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
83 			phys_4096,
84 			version,
85 			index);
86 	WARN_ON(!IS_ALIGNED(phys_4096, 4096));
87 	return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
88 			phys_4096,
89 			version,
90 			index);
91 }
92 
93 static unsigned long h_get_24x7_catalog_page(char page[],
94 					     u64 version, u32 index)
95 {
96 	return h_get_24x7_catalog_page_(virt_to_phys(page),
97 					version, index);
98 }
99 
100 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
101 			    struct bin_attribute *bin_attr, char *buf,
102 			    loff_t offset, size_t count)
103 {
104 	unsigned long hret;
105 	ssize_t ret = 0;
106 	size_t catalog_len = 0, catalog_page_len = 0;
107 	loff_t page_offset = 0;
108 	loff_t offset_in_page;
109 	size_t copy_len;
110 	uint64_t catalog_version_num = 0;
111 	void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
112 	struct hv_24x7_catalog_page_0 *page_0 = page;
113 	if (!page)
114 		return -ENOMEM;
115 
116 	hret = h_get_24x7_catalog_page(page, 0, 0);
117 	if (hret) {
118 		ret = -EIO;
119 		goto e_free;
120 	}
121 
122 	catalog_version_num = be64_to_cpu(page_0->version);
123 	catalog_page_len = be32_to_cpu(page_0->length);
124 	catalog_len = catalog_page_len * 4096;
125 
126 	page_offset = offset / 4096;
127 	offset_in_page = offset % 4096;
128 
129 	if (page_offset >= catalog_page_len)
130 		goto e_free;
131 
132 	if (page_offset != 0) {
133 		hret = h_get_24x7_catalog_page(page, catalog_version_num,
134 					       page_offset);
135 		if (hret) {
136 			ret = -EIO;
137 			goto e_free;
138 		}
139 	}
140 
141 	copy_len = 4096 - offset_in_page;
142 	if (copy_len > count)
143 		copy_len = count;
144 
145 	memcpy(buf, page+offset_in_page, copy_len);
146 	ret = copy_len;
147 
148 e_free:
149 	if (hret)
150 		pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
151 		       " rc=%ld\n",
152 		       catalog_version_num, page_offset, hret);
153 	kmem_cache_free(hv_page_cache, page);
154 
155 	pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
156 			"catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
157 			count, catalog_len, catalog_page_len, ret);
158 
159 	return ret;
160 }
161 
162 #define PAGE_0_ATTR(_name, _fmt, _expr)				\
163 static ssize_t _name##_show(struct device *dev,			\
164 			    struct device_attribute *dev_attr,	\
165 			    char *buf)				\
166 {								\
167 	unsigned long hret;					\
168 	ssize_t ret = 0;					\
169 	void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);	\
170 	struct hv_24x7_catalog_page_0 *page_0 = page;		\
171 	if (!page)						\
172 		return -ENOMEM;					\
173 	hret = h_get_24x7_catalog_page(page, 0, 0);		\
174 	if (hret) {						\
175 		ret = -EIO;					\
176 		goto e_free;					\
177 	}							\
178 	ret = sprintf(buf, _fmt, _expr);			\
179 e_free:								\
180 	kmem_cache_free(hv_page_cache, page);			\
181 	return ret;						\
182 }								\
183 static DEVICE_ATTR_RO(_name)
184 
185 PAGE_0_ATTR(catalog_version, "%lld\n",
186 		(unsigned long long)be64_to_cpu(page_0->version));
187 PAGE_0_ATTR(catalog_len, "%lld\n",
188 		(unsigned long long)be32_to_cpu(page_0->length) * 4096);
189 static BIN_ATTR_RO(catalog, 0/* real length varies */);
190 
191 static struct bin_attribute *if_bin_attrs[] = {
192 	&bin_attr_catalog,
193 	NULL,
194 };
195 
196 static struct attribute *if_attrs[] = {
197 	&dev_attr_catalog_len.attr,
198 	&dev_attr_catalog_version.attr,
199 	NULL,
200 };
201 
202 static struct attribute_group if_group = {
203 	.name = "interface",
204 	.bin_attrs = if_bin_attrs,
205 	.attrs = if_attrs,
206 };
207 
208 static const struct attribute_group *attr_groups[] = {
209 	&format_group,
210 	&if_group,
211 	NULL,
212 };
213 
214 static bool is_physical_domain(int domain)
215 {
216 	return  domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP ||
217 		domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
218 }
219 
220 DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
221 DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
222 
223 static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
224 					 u16 lpar, u64 *res,
225 					 bool success_expected)
226 {
227 	unsigned long ret;
228 
229 	/*
230 	 * request_buffer and result_buffer are not required to be 4k aligned,
231 	 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
232 	 * the simplest way to ensure that.
233 	 */
234 	struct reqb {
235 		struct hv_24x7_request_buffer buf;
236 		struct hv_24x7_request req;
237 	} __packed *request_buffer;
238 
239 	struct {
240 		struct hv_24x7_data_result_buffer buf;
241 		struct hv_24x7_result res;
242 		struct hv_24x7_result_element elem;
243 		__be64 result;
244 	} __packed *result_buffer;
245 
246 	BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
247 	BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
248 
249 	request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
250 	result_buffer = (void *)get_cpu_var(hv_24x7_resb);
251 
252 	memset(request_buffer, 0, 4096);
253 	memset(result_buffer, 0, 4096);
254 
255 	*request_buffer = (struct reqb) {
256 		.buf = {
257 			.interface_version = HV_24X7_IF_VERSION_CURRENT,
258 			.num_requests = 1,
259 		},
260 		.req = {
261 			.performance_domain = domain,
262 			.data_size = cpu_to_be16(8),
263 			.data_offset = cpu_to_be32(offset),
264 			.starting_lpar_ix = cpu_to_be16(lpar),
265 			.max_num_lpars = cpu_to_be16(1),
266 			.starting_ix = cpu_to_be16(ix),
267 			.max_ix = cpu_to_be16(1),
268 		}
269 	};
270 
271 	ret = plpar_hcall_norets(H_GET_24X7_DATA,
272 			virt_to_phys(request_buffer), sizeof(*request_buffer),
273 			virt_to_phys(result_buffer),  sizeof(*result_buffer));
274 
275 	if (ret) {
276 		if (success_expected)
277 			pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
278 				"0x%lx (%ld) detail=0x%x failing ix=%x\n",
279 				domain, offset, ix, lpar, ret, ret,
280 				result_buffer->buf.detailed_rc,
281 				result_buffer->buf.failing_request_ix);
282 		goto out;
283 	}
284 
285 	*res = be64_to_cpu(result_buffer->result);
286 
287 out:
288 	return ret;
289 }
290 
291 static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
292 		bool success_expected)
293 {
294 	return single_24x7_request(event_get_domain(event),
295 				event_get_offset(event),
296 				event_get_starting_index(event),
297 				event_get_lpar(event),
298 				res,
299 				success_expected);
300 }
301 
302 static int h_24x7_event_init(struct perf_event *event)
303 {
304 	struct hv_perf_caps caps;
305 	unsigned domain;
306 	unsigned long hret;
307 	u64 ct;
308 
309 	/* Not our event */
310 	if (event->attr.type != event->pmu->type)
311 		return -ENOENT;
312 
313 	/* Unused areas must be 0 */
314 	if (event_get_reserved1(event) ||
315 	    event_get_reserved2(event) ||
316 	    event_get_reserved3(event)) {
317 		pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
318 				event->attr.config,
319 				event_get_reserved1(event),
320 				event->attr.config1,
321 				event_get_reserved2(event),
322 				event->attr.config2,
323 				event_get_reserved3(event));
324 		return -EINVAL;
325 	}
326 
327 	/* unsupported modes and filters */
328 	if (event->attr.exclude_user   ||
329 	    event->attr.exclude_kernel ||
330 	    event->attr.exclude_hv     ||
331 	    event->attr.exclude_idle   ||
332 	    event->attr.exclude_host   ||
333 	    event->attr.exclude_guest)
334 		return -EINVAL;
335 
336 	/* no branch sampling */
337 	if (has_branch_stack(event))
338 		return -EOPNOTSUPP;
339 
340 	/* offset must be 8 byte aligned */
341 	if (event_get_offset(event) % 8) {
342 		pr_devel("bad alignment\n");
343 		return -EINVAL;
344 	}
345 
346 	/* Domains above 6 are invalid */
347 	domain = event_get_domain(event);
348 	if (domain > 6) {
349 		pr_devel("invalid domain %d\n", domain);
350 		return -EINVAL;
351 	}
352 
353 	hret = hv_perf_caps_get(&caps);
354 	if (hret) {
355 		pr_devel("could not get capabilities: rc=%ld\n", hret);
356 		return -EIO;
357 	}
358 
359 	/* PHYSICAL domains & other lpars require extra capabilities */
360 	if (!caps.collect_privileged && (is_physical_domain(domain) ||
361 		(event_get_lpar(event) != event_get_lpar_max()))) {
362 		pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
363 				is_physical_domain(domain),
364 				event_get_lpar(event));
365 		return -EACCES;
366 	}
367 
368 	/* see if the event complains */
369 	if (event_24x7_request(event, &ct, false)) {
370 		pr_devel("test hcall failed\n");
371 		return -EIO;
372 	}
373 
374 	return 0;
375 }
376 
377 static u64 h_24x7_get_value(struct perf_event *event)
378 {
379 	unsigned long ret;
380 	u64 ct;
381 	ret = event_24x7_request(event, &ct, true);
382 	if (ret)
383 		/* We checked this in event init, shouldn't fail here... */
384 		return 0;
385 
386 	return ct;
387 }
388 
389 static void h_24x7_event_update(struct perf_event *event)
390 {
391 	s64 prev;
392 	u64 now;
393 	now = h_24x7_get_value(event);
394 	prev = local64_xchg(&event->hw.prev_count, now);
395 	local64_add(now - prev, &event->count);
396 }
397 
398 static void h_24x7_event_start(struct perf_event *event, int flags)
399 {
400 	if (flags & PERF_EF_RELOAD)
401 		local64_set(&event->hw.prev_count, h_24x7_get_value(event));
402 }
403 
404 static void h_24x7_event_stop(struct perf_event *event, int flags)
405 {
406 	h_24x7_event_update(event);
407 }
408 
409 static int h_24x7_event_add(struct perf_event *event, int flags)
410 {
411 	if (flags & PERF_EF_START)
412 		h_24x7_event_start(event, flags);
413 
414 	return 0;
415 }
416 
417 static struct pmu h_24x7_pmu = {
418 	.task_ctx_nr = perf_invalid_context,
419 
420 	.name = "hv_24x7",
421 	.attr_groups = attr_groups,
422 	.event_init  = h_24x7_event_init,
423 	.add         = h_24x7_event_add,
424 	.del         = h_24x7_event_stop,
425 	.start       = h_24x7_event_start,
426 	.stop        = h_24x7_event_stop,
427 	.read        = h_24x7_event_update,
428 };
429 
430 static int hv_24x7_init(void)
431 {
432 	int r;
433 	unsigned long hret;
434 	struct hv_perf_caps caps;
435 
436 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
437 		pr_debug("not a virtualized system, not enabling\n");
438 		return -ENODEV;
439 	}
440 
441 	hret = hv_perf_caps_get(&caps);
442 	if (hret) {
443 		pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
444 				hret);
445 		return -ENODEV;
446 	}
447 
448 	hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
449 	if (!hv_page_cache)
450 		return -ENOMEM;
451 
452 	/* sampling not supported */
453 	h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
454 
455 	r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
456 	if (r)
457 		return r;
458 
459 	return 0;
460 }
461 
462 device_initcall(hv_24x7_init);
463