1 /* 2 * Hypervisor supplied "gpci" ("get performance counter info") performance 3 * counter support 4 * 5 * Author: Cody P Schafer <cody@linux.vnet.ibm.com> 6 * Copyright 2014 IBM Corporation. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #define pr_fmt(fmt) "hv-gpci: " fmt 15 16 #include <linux/init.h> 17 #include <linux/perf_event.h> 18 #include <asm/firmware.h> 19 #include <asm/hvcall.h> 20 #include <asm/io.h> 21 22 #include "hv-gpci.h" 23 #include "hv-common.h" 24 25 /* 26 * Example usage: 27 * perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8, 28 * secondary_index=0,starting_index=0xffffffff,request=0x10/' ... 29 */ 30 31 /* u32 */ 32 EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31); 33 /* u32 */ 34 EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63); 35 /* u16 */ 36 EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15); 37 /* u8 */ 38 EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23); 39 /* u8, bytes of data (1-8) */ 40 EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31); 41 /* u32, byte offset */ 42 EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63); 43 44 static struct attribute *format_attrs[] = { 45 &format_attr_request.attr, 46 &format_attr_starting_index.attr, 47 &format_attr_secondary_index.attr, 48 &format_attr_counter_info_version.attr, 49 50 &format_attr_offset.attr, 51 &format_attr_length.attr, 52 NULL, 53 }; 54 55 static struct attribute_group format_group = { 56 .name = "format", 57 .attrs = format_attrs, 58 }; 59 60 #define HV_CAPS_ATTR(_name, _format) \ 61 static ssize_t _name##_show(struct device *dev, \ 62 struct device_attribute *attr, \ 63 char *page) \ 64 { \ 65 struct hv_perf_caps caps; \ 66 unsigned long hret = hv_perf_caps_get(&caps); \ 67 if (hret) \ 68 return -EIO; \ 69 \ 70 return sprintf(page, _format, caps._name); \ 71 } \ 72 static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name) 73 74 static ssize_t kernel_version_show(struct device *dev, 75 struct device_attribute *attr, 76 char *page) 77 { 78 return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT); 79 } 80 81 static DEVICE_ATTR_RO(kernel_version); 82 HV_CAPS_ATTR(version, "0x%x\n"); 83 HV_CAPS_ATTR(ga, "%d\n"); 84 HV_CAPS_ATTR(expanded, "%d\n"); 85 HV_CAPS_ATTR(lab, "%d\n"); 86 HV_CAPS_ATTR(collect_privileged, "%d\n"); 87 88 static struct attribute *interface_attrs[] = { 89 &dev_attr_kernel_version.attr, 90 &hv_caps_attr_version.attr, 91 &hv_caps_attr_ga.attr, 92 &hv_caps_attr_expanded.attr, 93 &hv_caps_attr_lab.attr, 94 &hv_caps_attr_collect_privileged.attr, 95 NULL, 96 }; 97 98 static struct attribute_group interface_group = { 99 .name = "interface", 100 .attrs = interface_attrs, 101 }; 102 103 static const struct attribute_group *attr_groups[] = { 104 &format_group, 105 &interface_group, 106 NULL, 107 }; 108 109 #define GPCI_MAX_DATA_BYTES \ 110 (1024 - sizeof(struct hv_get_perf_counter_info_params)) 111 112 static unsigned long single_gpci_request(u32 req, u32 starting_index, 113 u16 secondary_index, u8 version_in, u32 offset, u8 length, 114 u64 *value) 115 { 116 unsigned long ret; 117 size_t i; 118 u64 count; 119 120 struct { 121 struct hv_get_perf_counter_info_params params; 122 uint8_t bytes[GPCI_MAX_DATA_BYTES]; 123 } __packed __aligned(sizeof(uint64_t)) arg = { 124 .params = { 125 .counter_request = cpu_to_be32(req), 126 .starting_index = cpu_to_be32(starting_index), 127 .secondary_index = cpu_to_be16(secondary_index), 128 .counter_info_version_in = version_in, 129 } 130 }; 131 132 ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, 133 virt_to_phys(&arg), sizeof(arg)); 134 if (ret) { 135 pr_devel("hcall failed: 0x%lx\n", ret); 136 return ret; 137 } 138 139 /* 140 * we verify offset and length are within the zeroed buffer at event 141 * init. 142 */ 143 count = 0; 144 for (i = offset; i < offset + length; i++) 145 count |= arg.bytes[i] << (i - offset); 146 147 *value = count; 148 return ret; 149 } 150 151 static u64 h_gpci_get_value(struct perf_event *event) 152 { 153 u64 count; 154 unsigned long ret = single_gpci_request(event_get_request(event), 155 event_get_starting_index(event), 156 event_get_secondary_index(event), 157 event_get_counter_info_version(event), 158 event_get_offset(event), 159 event_get_length(event), 160 &count); 161 if (ret) 162 return 0; 163 return count; 164 } 165 166 static void h_gpci_event_update(struct perf_event *event) 167 { 168 s64 prev; 169 u64 now = h_gpci_get_value(event); 170 prev = local64_xchg(&event->hw.prev_count, now); 171 local64_add(now - prev, &event->count); 172 } 173 174 static void h_gpci_event_start(struct perf_event *event, int flags) 175 { 176 local64_set(&event->hw.prev_count, h_gpci_get_value(event)); 177 } 178 179 static void h_gpci_event_stop(struct perf_event *event, int flags) 180 { 181 h_gpci_event_update(event); 182 } 183 184 static int h_gpci_event_add(struct perf_event *event, int flags) 185 { 186 if (flags & PERF_EF_START) 187 h_gpci_event_start(event, flags); 188 189 return 0; 190 } 191 192 static int h_gpci_event_init(struct perf_event *event) 193 { 194 u64 count; 195 u8 length; 196 197 /* Not our event */ 198 if (event->attr.type != event->pmu->type) 199 return -ENOENT; 200 201 /* config2 is unused */ 202 if (event->attr.config2) { 203 pr_devel("config2 set when reserved\n"); 204 return -EINVAL; 205 } 206 207 /* unsupported modes and filters */ 208 if (event->attr.exclude_user || 209 event->attr.exclude_kernel || 210 event->attr.exclude_hv || 211 event->attr.exclude_idle || 212 event->attr.exclude_host || 213 event->attr.exclude_guest) 214 return -EINVAL; 215 216 /* no branch sampling */ 217 if (has_branch_stack(event)) 218 return -EOPNOTSUPP; 219 220 length = event_get_length(event); 221 if (length < 1 || length > 8) { 222 pr_devel("length invalid\n"); 223 return -EINVAL; 224 } 225 226 /* last byte within the buffer? */ 227 if ((event_get_offset(event) + length) > GPCI_MAX_DATA_BYTES) { 228 pr_devel("request outside of buffer: %zu > %zu\n", 229 (size_t)event_get_offset(event) + length, 230 GPCI_MAX_DATA_BYTES); 231 return -EINVAL; 232 } 233 234 /* check if the request works... */ 235 if (single_gpci_request(event_get_request(event), 236 event_get_starting_index(event), 237 event_get_secondary_index(event), 238 event_get_counter_info_version(event), 239 event_get_offset(event), 240 length, 241 &count)) { 242 pr_devel("gpci hcall failed\n"); 243 return -EINVAL; 244 } 245 246 return 0; 247 } 248 249 static int h_gpci_event_idx(struct perf_event *event) 250 { 251 return 0; 252 } 253 254 static struct pmu h_gpci_pmu = { 255 .task_ctx_nr = perf_invalid_context, 256 257 .name = "hv_gpci", 258 .attr_groups = attr_groups, 259 .event_init = h_gpci_event_init, 260 .add = h_gpci_event_add, 261 .del = h_gpci_event_stop, 262 .start = h_gpci_event_start, 263 .stop = h_gpci_event_stop, 264 .read = h_gpci_event_update, 265 .event_idx = h_gpci_event_idx, 266 }; 267 268 static int hv_gpci_init(void) 269 { 270 int r; 271 unsigned long hret; 272 struct hv_perf_caps caps; 273 274 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 275 pr_debug("not a virtualized system, not enabling\n"); 276 return -ENODEV; 277 } 278 279 hret = hv_perf_caps_get(&caps); 280 if (hret) { 281 pr_debug("could not obtain capabilities, not enabling, rc=%ld\n", 282 hret); 283 return -ENODEV; 284 } 285 286 /* sampling not supported */ 287 h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 288 289 r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); 290 if (r) 291 return r; 292 293 return 0; 294 } 295 296 device_initcall(hv_gpci_init); 297