1 /* 2 * Hypervisor supplied "24x7" performance counter support 3 * 4 * Author: Cody P Schafer <cody@linux.vnet.ibm.com> 5 * Copyright 2014 IBM Corporation. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #define pr_fmt(fmt) "hv-24x7: " fmt 14 15 #include <linux/perf_event.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <asm/firmware.h> 19 #include <asm/hvcall.h> 20 #include <asm/io.h> 21 22 #include "hv-24x7.h" 23 #include "hv-24x7-catalog.h" 24 #include "hv-common.h" 25 26 /* 27 * TODO: Merging events: 28 * - Think of the hcall as an interface to a 4d array of counters: 29 * - x = domains 30 * - y = indexes in the domain (core, chip, vcpu, node, etc) 31 * - z = offset into the counter space 32 * - w = lpars (guest vms, "logical partitions") 33 * - A single request is: x,y,y_last,z,z_last,w,w_last 34 * - this means we can retrieve a rectangle of counters in y,z for a single x. 35 * 36 * - Things to consider (ignoring w): 37 * - input cost_per_request = 16 38 * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs 39 * - limited number of requests per hcall (must fit into 4K bytes) 40 * - 4k = 16 [buffer header] - 16 [request size] * request_count 41 * - 255 requests per hcall 42 * - sometimes it will be more efficient to read extra data and discard 43 */ 44 45 /* 46 * Example usage: 47 * perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/' 48 */ 49 50 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */ 51 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3); 52 /* u16 */ 53 EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31); 54 /* u32, see "data_offset" */ 55 EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63); 56 /* u16 */ 57 EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15); 58 59 EVENT_DEFINE_RANGE(reserved1, config, 4, 15); 60 EVENT_DEFINE_RANGE(reserved2, config1, 16, 63); 61 EVENT_DEFINE_RANGE(reserved3, config2, 0, 63); 62 63 static struct attribute *format_attrs[] = { 64 &format_attr_domain.attr, 65 &format_attr_offset.attr, 66 &format_attr_starting_index.attr, 67 &format_attr_lpar.attr, 68 NULL, 69 }; 70 71 static struct attribute_group format_group = { 72 .name = "format", 73 .attrs = format_attrs, 74 }; 75 76 static struct kmem_cache *hv_page_cache; 77 78 /* 79 * read_offset_data - copy data from one buffer to another while treating the 80 * source buffer as a small view on the total avaliable 81 * source data. 82 * 83 * @dest: buffer to copy into 84 * @dest_len: length of @dest in bytes 85 * @requested_offset: the offset within the source data we want. Must be > 0 86 * @src: buffer to copy data from 87 * @src_len: length of @src in bytes 88 * @source_offset: the offset in the sorce data that (src,src_len) refers to. 89 * Must be > 0 90 * 91 * returns the number of bytes copied. 92 * 93 * The following ascii art shows the various buffer possitioning we need to 94 * handle, assigns some arbitrary varibles to points on the buffer, and then 95 * shows how we fiddle with those values to get things we care about (copy 96 * start in src and copy len) 97 * 98 * s = @src buffer 99 * d = @dest buffer 100 * '.' areas in d are written to. 101 * 102 * u 103 * x w v z 104 * d |.........| 105 * s |----------------------| 106 * 107 * u 108 * x w z v 109 * d |........------| 110 * s |------------------| 111 * 112 * x w u,z,v 113 * d |........| 114 * s |------------------| 115 * 116 * x,w u,v,z 117 * d |..................| 118 * s |------------------| 119 * 120 * x u 121 * w v z 122 * d |........| 123 * s |------------------| 124 * 125 * x z w v 126 * d |------| 127 * s |------| 128 * 129 * x = source_offset 130 * w = requested_offset 131 * z = source_offset + src_len 132 * v = requested_offset + dest_len 133 * 134 * w_offset_in_s = w - x = requested_offset - source_offset 135 * z_offset_in_s = z - x = src_len 136 * v_offset_in_s = v - x = request_offset + dest_len - src_len 137 */ 138 static ssize_t read_offset_data(void *dest, size_t dest_len, 139 loff_t requested_offset, void *src, 140 size_t src_len, loff_t source_offset) 141 { 142 size_t w_offset_in_s = requested_offset - source_offset; 143 size_t z_offset_in_s = src_len; 144 size_t v_offset_in_s = requested_offset + dest_len - src_len; 145 size_t u_offset_in_s = min(z_offset_in_s, v_offset_in_s); 146 size_t copy_len = u_offset_in_s - w_offset_in_s; 147 148 if (requested_offset < 0 || source_offset < 0) 149 return -EINVAL; 150 151 if (z_offset_in_s <= w_offset_in_s) 152 return 0; 153 154 memcpy(dest, src + w_offset_in_s, copy_len); 155 return copy_len; 156 } 157 158 static unsigned long h_get_24x7_catalog_page(char page[static 4096], 159 u32 version, u32 index) 160 { 161 WARN_ON(!IS_ALIGNED((unsigned long)page, 4096)); 162 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, 163 virt_to_phys(page), 164 version, 165 index); 166 } 167 168 static ssize_t catalog_read(struct file *filp, struct kobject *kobj, 169 struct bin_attribute *bin_attr, char *buf, 170 loff_t offset, size_t count) 171 { 172 unsigned long hret; 173 ssize_t ret = 0; 174 size_t catalog_len = 0, catalog_page_len = 0, page_count = 0; 175 loff_t page_offset = 0; 176 uint32_t catalog_version_num = 0; 177 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); 178 struct hv_24x7_catalog_page_0 *page_0 = page; 179 if (!page) 180 return -ENOMEM; 181 182 hret = h_get_24x7_catalog_page(page, 0, 0); 183 if (hret) { 184 ret = -EIO; 185 goto e_free; 186 } 187 188 catalog_version_num = be32_to_cpu(page_0->version); 189 catalog_page_len = be32_to_cpu(page_0->length); 190 catalog_len = catalog_page_len * 4096; 191 192 page_offset = offset / 4096; 193 page_count = count / 4096; 194 195 if (page_offset >= catalog_page_len) 196 goto e_free; 197 198 if (page_offset != 0) { 199 hret = h_get_24x7_catalog_page(page, catalog_version_num, 200 page_offset); 201 if (hret) { 202 ret = -EIO; 203 goto e_free; 204 } 205 } 206 207 ret = read_offset_data(buf, count, offset, 208 page, 4096, page_offset * 4096); 209 e_free: 210 if (hret) 211 pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n", 212 catalog_version_num, page_offset, hret); 213 kfree(page); 214 215 pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n", 216 offset, page_offset, count, page_count, catalog_len, 217 catalog_page_len, ret); 218 219 return ret; 220 } 221 222 #define PAGE_0_ATTR(_name, _fmt, _expr) \ 223 static ssize_t _name##_show(struct device *dev, \ 224 struct device_attribute *dev_attr, \ 225 char *buf) \ 226 { \ 227 unsigned long hret; \ 228 ssize_t ret = 0; \ 229 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \ 230 struct hv_24x7_catalog_page_0 *page_0 = page; \ 231 if (!page) \ 232 return -ENOMEM; \ 233 hret = h_get_24x7_catalog_page(page, 0, 0); \ 234 if (hret) { \ 235 ret = -EIO; \ 236 goto e_free; \ 237 } \ 238 ret = sprintf(buf, _fmt, _expr); \ 239 e_free: \ 240 kfree(page); \ 241 return ret; \ 242 } \ 243 static DEVICE_ATTR_RO(_name) 244 245 PAGE_0_ATTR(catalog_version, "%lld\n", 246 (unsigned long long)be32_to_cpu(page_0->version)); 247 PAGE_0_ATTR(catalog_len, "%lld\n", 248 (unsigned long long)be32_to_cpu(page_0->length) * 4096); 249 static BIN_ATTR_RO(catalog, 0/* real length varies */); 250 251 static struct bin_attribute *if_bin_attrs[] = { 252 &bin_attr_catalog, 253 NULL, 254 }; 255 256 static struct attribute *if_attrs[] = { 257 &dev_attr_catalog_len.attr, 258 &dev_attr_catalog_version.attr, 259 NULL, 260 }; 261 262 static struct attribute_group if_group = { 263 .name = "interface", 264 .bin_attrs = if_bin_attrs, 265 .attrs = if_attrs, 266 }; 267 268 static const struct attribute_group *attr_groups[] = { 269 &format_group, 270 &if_group, 271 NULL, 272 }; 273 274 static bool is_physical_domain(int domain) 275 { 276 return domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP || 277 domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE; 278 } 279 280 static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, 281 u16 lpar, u64 *res, 282 bool success_expected) 283 { 284 unsigned long ret; 285 286 /* 287 * request_buffer and result_buffer are not required to be 4k aligned, 288 * but are not allowed to cross any 4k boundary. Aligning them to 4k is 289 * the simplest way to ensure that. 290 */ 291 struct reqb { 292 struct hv_24x7_request_buffer buf; 293 struct hv_24x7_request req; 294 } __packed __aligned(4096) request_buffer = { 295 .buf = { 296 .interface_version = HV_24X7_IF_VERSION_CURRENT, 297 .num_requests = 1, 298 }, 299 .req = { 300 .performance_domain = domain, 301 .data_size = cpu_to_be16(8), 302 .data_offset = cpu_to_be32(offset), 303 .starting_lpar_ix = cpu_to_be16(lpar), 304 .max_num_lpars = cpu_to_be16(1), 305 .starting_ix = cpu_to_be16(ix), 306 .max_ix = cpu_to_be16(1), 307 } 308 }; 309 310 struct resb { 311 struct hv_24x7_data_result_buffer buf; 312 struct hv_24x7_result res; 313 struct hv_24x7_result_element elem; 314 __be64 result; 315 } __packed __aligned(4096) result_buffer = {}; 316 317 ret = plpar_hcall_norets(H_GET_24X7_DATA, 318 virt_to_phys(&request_buffer), sizeof(request_buffer), 319 virt_to_phys(&result_buffer), sizeof(result_buffer)); 320 321 if (ret) { 322 if (success_expected) 323 pr_err_ratelimited("hcall failed: %d %#x %#x %d => 0x%lx (%ld) detail=0x%x failing ix=%x\n", 324 domain, offset, ix, lpar, 325 ret, ret, 326 result_buffer.buf.detailed_rc, 327 result_buffer.buf.failing_request_ix); 328 return ret; 329 } 330 331 *res = be64_to_cpu(result_buffer.result); 332 return ret; 333 } 334 335 static unsigned long event_24x7_request(struct perf_event *event, u64 *res, 336 bool success_expected) 337 { 338 return single_24x7_request(event_get_domain(event), 339 event_get_offset(event), 340 event_get_starting_index(event), 341 event_get_lpar(event), 342 res, 343 success_expected); 344 } 345 346 static int h_24x7_event_init(struct perf_event *event) 347 { 348 struct hv_perf_caps caps; 349 unsigned domain; 350 unsigned long hret; 351 u64 ct; 352 353 /* Not our event */ 354 if (event->attr.type != event->pmu->type) 355 return -ENOENT; 356 357 /* Unused areas must be 0 */ 358 if (event_get_reserved1(event) || 359 event_get_reserved2(event) || 360 event_get_reserved3(event)) { 361 pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n", 362 event->attr.config, 363 event_get_reserved1(event), 364 event->attr.config1, 365 event_get_reserved2(event), 366 event->attr.config2, 367 event_get_reserved3(event)); 368 return -EINVAL; 369 } 370 371 /* unsupported modes and filters */ 372 if (event->attr.exclude_user || 373 event->attr.exclude_kernel || 374 event->attr.exclude_hv || 375 event->attr.exclude_idle || 376 event->attr.exclude_host || 377 event->attr.exclude_guest || 378 is_sampling_event(event)) /* no sampling */ 379 return -EINVAL; 380 381 /* no branch sampling */ 382 if (has_branch_stack(event)) 383 return -EOPNOTSUPP; 384 385 /* offset must be 8 byte aligned */ 386 if (event_get_offset(event) % 8) { 387 pr_devel("bad alignment\n"); 388 return -EINVAL; 389 } 390 391 /* Domains above 6 are invalid */ 392 domain = event_get_domain(event); 393 if (domain > 6) { 394 pr_devel("invalid domain %d\n", domain); 395 return -EINVAL; 396 } 397 398 hret = hv_perf_caps_get(&caps); 399 if (hret) { 400 pr_devel("could not get capabilities: rc=%ld\n", hret); 401 return -EIO; 402 } 403 404 /* PHYSICAL domains & other lpars require extra capabilities */ 405 if (!caps.collect_privileged && (is_physical_domain(domain) || 406 (event_get_lpar(event) != event_get_lpar_max()))) { 407 pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n", 408 is_physical_domain(domain), 409 event_get_lpar(event)); 410 return -EACCES; 411 } 412 413 /* see if the event complains */ 414 if (event_24x7_request(event, &ct, false)) { 415 pr_devel("test hcall failed\n"); 416 return -EIO; 417 } 418 419 return 0; 420 } 421 422 static u64 h_24x7_get_value(struct perf_event *event) 423 { 424 unsigned long ret; 425 u64 ct; 426 ret = event_24x7_request(event, &ct, true); 427 if (ret) 428 /* We checked this in event init, shouldn't fail here... */ 429 return 0; 430 431 return ct; 432 } 433 434 static void h_24x7_event_update(struct perf_event *event) 435 { 436 s64 prev; 437 u64 now; 438 now = h_24x7_get_value(event); 439 prev = local64_xchg(&event->hw.prev_count, now); 440 local64_add(now - prev, &event->count); 441 } 442 443 static void h_24x7_event_start(struct perf_event *event, int flags) 444 { 445 if (flags & PERF_EF_RELOAD) 446 local64_set(&event->hw.prev_count, h_24x7_get_value(event)); 447 } 448 449 static void h_24x7_event_stop(struct perf_event *event, int flags) 450 { 451 h_24x7_event_update(event); 452 } 453 454 static int h_24x7_event_add(struct perf_event *event, int flags) 455 { 456 if (flags & PERF_EF_START) 457 h_24x7_event_start(event, flags); 458 459 return 0; 460 } 461 462 static int h_24x7_event_idx(struct perf_event *event) 463 { 464 return 0; 465 } 466 467 static struct pmu h_24x7_pmu = { 468 .task_ctx_nr = perf_invalid_context, 469 470 .name = "hv_24x7", 471 .attr_groups = attr_groups, 472 .event_init = h_24x7_event_init, 473 .add = h_24x7_event_add, 474 .del = h_24x7_event_stop, 475 .start = h_24x7_event_start, 476 .stop = h_24x7_event_stop, 477 .read = h_24x7_event_update, 478 .event_idx = h_24x7_event_idx, 479 }; 480 481 static int hv_24x7_init(void) 482 { 483 int r; 484 unsigned long hret; 485 struct hv_perf_caps caps; 486 487 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 488 pr_info("not a virtualized system, not enabling\n"); 489 return -ENODEV; 490 } 491 492 hret = hv_perf_caps_get(&caps); 493 if (hret) { 494 pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n", 495 hret); 496 return -ENODEV; 497 } 498 499 hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL); 500 if (!hv_page_cache) 501 return -ENOMEM; 502 503 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); 504 if (r) 505 return r; 506 507 return 0; 508 } 509 510 device_initcall(hv_24x7_init); 511