1 // SPDX-License-Identifier: GPL-2.0 2 #include <Python.h> 3 #include <structmember.h> 4 #include <inttypes.h> 5 #include <poll.h> 6 #include <linux/err.h> 7 #include <perf/cpumap.h> 8 #ifdef HAVE_LIBTRACEEVENT 9 #include <traceevent/event-parse.h> 10 #endif 11 #include <perf/mmap.h> 12 #include "evlist.h" 13 #include "callchain.h" 14 #include "evsel.h" 15 #include "event.h" 16 #include "print_binary.h" 17 #include "thread_map.h" 18 #include "trace-event.h" 19 #include "mmap.h" 20 #include "stat.h" 21 #include "metricgroup.h" 22 #include "util/env.h" 23 #include <internal/lib.h> 24 #include "util.h" 25 26 #if PY_MAJOR_VERSION < 3 27 #define _PyUnicode_FromString(arg) \ 28 PyString_FromString(arg) 29 #define _PyUnicode_AsString(arg) \ 30 PyString_AsString(arg) 31 #define _PyUnicode_FromFormat(...) \ 32 PyString_FromFormat(__VA_ARGS__) 33 #define _PyLong_FromLong(arg) \ 34 PyInt_FromLong(arg) 35 36 #else 37 38 #define _PyUnicode_FromString(arg) \ 39 PyUnicode_FromString(arg) 40 #define _PyUnicode_FromFormat(...) \ 41 PyUnicode_FromFormat(__VA_ARGS__) 42 #define _PyLong_FromLong(arg) \ 43 PyLong_FromLong(arg) 44 #endif 45 46 #ifndef Py_TYPE 47 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) 48 #endif 49 50 /* 51 * Provide these two so that we don't have to link against callchain.c and 52 * start dragging hist.c, etc. 53 */ 54 struct callchain_param callchain_param; 55 56 int parse_callchain_record(const char *arg __maybe_unused, 57 struct callchain_param *param __maybe_unused) 58 { 59 return 0; 60 } 61 62 /* 63 * Add these not to drag util/env.c 64 */ 65 struct perf_env perf_env; 66 67 const char *perf_env__cpuid(struct perf_env *env __maybe_unused) 68 { 69 return NULL; 70 } 71 72 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here 73 const char *perf_env__arch(struct perf_env *env __maybe_unused) 74 { 75 return NULL; 76 } 77 78 /* 79 * Add this one here not to drag util/stat-shadow.c 80 */ 81 void perf_stat__collect_metric_expr(struct evlist *evsel_list) 82 { 83 } 84 85 /* 86 * This one is needed not to drag the PMU bandwagon, jevents generated 87 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for 88 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so 89 * far, for the perf python binding known usecases, revisit if this become 90 * necessary. 91 */ 92 struct perf_pmu *evsel__find_pmu(struct evsel *evsel __maybe_unused) 93 { 94 return NULL; 95 } 96 97 /* 98 * Add this one here not to drag util/metricgroup.c 99 */ 100 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, 101 struct rblist *new_metric_events, 102 struct rblist *old_metric_events) 103 { 104 return 0; 105 } 106 107 /* 108 * XXX: All these evsel destructors need some better mechanism, like a linked 109 * list of destructors registered when the relevant code indeed is used instead 110 * of having more and more calls in perf_evsel__delete(). -- acme 111 * 112 * For now, add some more: 113 * 114 * Not to drag the BPF bandwagon... 115 */ 116 void bpf_counter__destroy(struct evsel *evsel); 117 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd); 118 int bpf_counter__disable(struct evsel *evsel); 119 120 void bpf_counter__destroy(struct evsel *evsel __maybe_unused) 121 { 122 } 123 124 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused) 125 { 126 return 0; 127 } 128 129 int bpf_counter__disable(struct evsel *evsel __maybe_unused) 130 { 131 return 0; 132 } 133 134 /* 135 * Support debug printing even though util/debug.c is not linked. That means 136 * implementing 'verbose' and 'eprintf'. 137 */ 138 int verbose; 139 int debug_peo_args; 140 141 int eprintf(int level, int var, const char *fmt, ...); 142 143 int eprintf(int level, int var, const char *fmt, ...) 144 { 145 va_list args; 146 int ret = 0; 147 148 if (var >= level) { 149 va_start(args, fmt); 150 ret = vfprintf(stderr, fmt, args); 151 va_end(args); 152 } 153 154 return ret; 155 } 156 157 /* Define PyVarObject_HEAD_INIT for python 2.5 */ 158 #ifndef PyVarObject_HEAD_INIT 159 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, 160 #endif 161 162 #if PY_MAJOR_VERSION < 3 163 PyMODINIT_FUNC initperf(void); 164 #else 165 PyMODINIT_FUNC PyInit_perf(void); 166 #endif 167 168 #define member_def(type, member, ptype, help) \ 169 { #member, ptype, \ 170 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ 171 0, help } 172 173 #define sample_member_def(name, member, ptype, help) \ 174 { #name, ptype, \ 175 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ 176 0, help } 177 178 struct pyrf_event { 179 PyObject_HEAD 180 struct evsel *evsel; 181 struct perf_sample sample; 182 union perf_event event; 183 }; 184 185 #define sample_members \ 186 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ 187 sample_member_def(sample_pid, pid, T_INT, "event pid"), \ 188 sample_member_def(sample_tid, tid, T_INT, "event tid"), \ 189 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ 190 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ 191 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ 192 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ 193 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ 194 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), 195 196 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); 197 198 static PyMemberDef pyrf_mmap_event__members[] = { 199 sample_members 200 member_def(perf_event_header, type, T_UINT, "event type"), 201 member_def(perf_event_header, misc, T_UINT, "event misc"), 202 member_def(perf_record_mmap, pid, T_UINT, "event pid"), 203 member_def(perf_record_mmap, tid, T_UINT, "event tid"), 204 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"), 205 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"), 206 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"), 207 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"), 208 { .name = NULL, }, 209 }; 210 211 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) 212 { 213 PyObject *ret; 214 char *s; 215 216 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", " 217 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", " 218 "filename: %s }", 219 pevent->event.mmap.pid, pevent->event.mmap.tid, 220 pevent->event.mmap.start, pevent->event.mmap.len, 221 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { 222 ret = PyErr_NoMemory(); 223 } else { 224 ret = _PyUnicode_FromString(s); 225 free(s); 226 } 227 return ret; 228 } 229 230 static PyTypeObject pyrf_mmap_event__type = { 231 PyVarObject_HEAD_INIT(NULL, 0) 232 .tp_name = "perf.mmap_event", 233 .tp_basicsize = sizeof(struct pyrf_event), 234 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 235 .tp_doc = pyrf_mmap_event__doc, 236 .tp_members = pyrf_mmap_event__members, 237 .tp_repr = (reprfunc)pyrf_mmap_event__repr, 238 }; 239 240 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); 241 242 static PyMemberDef pyrf_task_event__members[] = { 243 sample_members 244 member_def(perf_event_header, type, T_UINT, "event type"), 245 member_def(perf_record_fork, pid, T_UINT, "event pid"), 246 member_def(perf_record_fork, ppid, T_UINT, "event ppid"), 247 member_def(perf_record_fork, tid, T_UINT, "event tid"), 248 member_def(perf_record_fork, ptid, T_UINT, "event ptid"), 249 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"), 250 { .name = NULL, }, 251 }; 252 253 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) 254 { 255 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " 256 "ptid: %u, time: %" PRI_lu64 "}", 257 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", 258 pevent->event.fork.pid, 259 pevent->event.fork.ppid, 260 pevent->event.fork.tid, 261 pevent->event.fork.ptid, 262 pevent->event.fork.time); 263 } 264 265 static PyTypeObject pyrf_task_event__type = { 266 PyVarObject_HEAD_INIT(NULL, 0) 267 .tp_name = "perf.task_event", 268 .tp_basicsize = sizeof(struct pyrf_event), 269 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 270 .tp_doc = pyrf_task_event__doc, 271 .tp_members = pyrf_task_event__members, 272 .tp_repr = (reprfunc)pyrf_task_event__repr, 273 }; 274 275 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); 276 277 static PyMemberDef pyrf_comm_event__members[] = { 278 sample_members 279 member_def(perf_event_header, type, T_UINT, "event type"), 280 member_def(perf_record_comm, pid, T_UINT, "event pid"), 281 member_def(perf_record_comm, tid, T_UINT, "event tid"), 282 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"), 283 { .name = NULL, }, 284 }; 285 286 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) 287 { 288 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", 289 pevent->event.comm.pid, 290 pevent->event.comm.tid, 291 pevent->event.comm.comm); 292 } 293 294 static PyTypeObject pyrf_comm_event__type = { 295 PyVarObject_HEAD_INIT(NULL, 0) 296 .tp_name = "perf.comm_event", 297 .tp_basicsize = sizeof(struct pyrf_event), 298 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 299 .tp_doc = pyrf_comm_event__doc, 300 .tp_members = pyrf_comm_event__members, 301 .tp_repr = (reprfunc)pyrf_comm_event__repr, 302 }; 303 304 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); 305 306 static PyMemberDef pyrf_throttle_event__members[] = { 307 sample_members 308 member_def(perf_event_header, type, T_UINT, "event type"), 309 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"), 310 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"), 311 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"), 312 { .name = NULL, }, 313 }; 314 315 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) 316 { 317 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1); 318 319 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64 320 ", stream_id: %" PRI_lu64 " }", 321 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", 322 te->time, te->id, te->stream_id); 323 } 324 325 static PyTypeObject pyrf_throttle_event__type = { 326 PyVarObject_HEAD_INIT(NULL, 0) 327 .tp_name = "perf.throttle_event", 328 .tp_basicsize = sizeof(struct pyrf_event), 329 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 330 .tp_doc = pyrf_throttle_event__doc, 331 .tp_members = pyrf_throttle_event__members, 332 .tp_repr = (reprfunc)pyrf_throttle_event__repr, 333 }; 334 335 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); 336 337 static PyMemberDef pyrf_lost_event__members[] = { 338 sample_members 339 member_def(perf_record_lost, id, T_ULONGLONG, "event id"), 340 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"), 341 { .name = NULL, }, 342 }; 343 344 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent) 345 { 346 PyObject *ret; 347 char *s; 348 349 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", " 350 "lost: %#" PRI_lx64 " }", 351 pevent->event.lost.id, pevent->event.lost.lost) < 0) { 352 ret = PyErr_NoMemory(); 353 } else { 354 ret = _PyUnicode_FromString(s); 355 free(s); 356 } 357 return ret; 358 } 359 360 static PyTypeObject pyrf_lost_event__type = { 361 PyVarObject_HEAD_INIT(NULL, 0) 362 .tp_name = "perf.lost_event", 363 .tp_basicsize = sizeof(struct pyrf_event), 364 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 365 .tp_doc = pyrf_lost_event__doc, 366 .tp_members = pyrf_lost_event__members, 367 .tp_repr = (reprfunc)pyrf_lost_event__repr, 368 }; 369 370 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); 371 372 static PyMemberDef pyrf_read_event__members[] = { 373 sample_members 374 member_def(perf_record_read, pid, T_UINT, "event pid"), 375 member_def(perf_record_read, tid, T_UINT, "event tid"), 376 { .name = NULL, }, 377 }; 378 379 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent) 380 { 381 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }", 382 pevent->event.read.pid, 383 pevent->event.read.tid); 384 /* 385 * FIXME: return the array of read values, 386 * making this method useful ;-) 387 */ 388 } 389 390 static PyTypeObject pyrf_read_event__type = { 391 PyVarObject_HEAD_INIT(NULL, 0) 392 .tp_name = "perf.read_event", 393 .tp_basicsize = sizeof(struct pyrf_event), 394 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 395 .tp_doc = pyrf_read_event__doc, 396 .tp_members = pyrf_read_event__members, 397 .tp_repr = (reprfunc)pyrf_read_event__repr, 398 }; 399 400 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); 401 402 static PyMemberDef pyrf_sample_event__members[] = { 403 sample_members 404 member_def(perf_event_header, type, T_UINT, "event type"), 405 { .name = NULL, }, 406 }; 407 408 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent) 409 { 410 PyObject *ret; 411 char *s; 412 413 if (asprintf(&s, "{ type: sample }") < 0) { 414 ret = PyErr_NoMemory(); 415 } else { 416 ret = _PyUnicode_FromString(s); 417 free(s); 418 } 419 return ret; 420 } 421 422 #ifdef HAVE_LIBTRACEEVENT 423 static bool is_tracepoint(struct pyrf_event *pevent) 424 { 425 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT; 426 } 427 428 static PyObject* 429 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field) 430 { 431 struct tep_handle *pevent = field->event->tep; 432 void *data = pe->sample.raw_data; 433 PyObject *ret = NULL; 434 unsigned long long val; 435 unsigned int offset, len; 436 437 if (field->flags & TEP_FIELD_IS_ARRAY) { 438 offset = field->offset; 439 len = field->size; 440 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 441 val = tep_read_number(pevent, data + offset, len); 442 offset = val; 443 len = offset >> 16; 444 offset &= 0xffff; 445 #ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE 446 if (field->flags & TEP_FIELD_IS_RELATIVE) 447 offset += field->offset + field->size; 448 #endif 449 } 450 if (field->flags & TEP_FIELD_IS_STRING && 451 is_printable_array(data + offset, len)) { 452 ret = _PyUnicode_FromString((char *)data + offset); 453 } else { 454 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len); 455 field->flags &= ~TEP_FIELD_IS_STRING; 456 } 457 } else { 458 val = tep_read_number(pevent, data + field->offset, 459 field->size); 460 if (field->flags & TEP_FIELD_IS_POINTER) 461 ret = PyLong_FromUnsignedLong((unsigned long) val); 462 else if (field->flags & TEP_FIELD_IS_SIGNED) 463 ret = PyLong_FromLong((long) val); 464 else 465 ret = PyLong_FromUnsignedLong((unsigned long) val); 466 } 467 468 return ret; 469 } 470 471 static PyObject* 472 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) 473 { 474 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name)); 475 struct evsel *evsel = pevent->evsel; 476 struct tep_format_field *field; 477 478 if (!evsel->tp_format) { 479 struct tep_event *tp_format; 480 481 tp_format = trace_event__tp_format_id(evsel->core.attr.config); 482 if (IS_ERR_OR_NULL(tp_format)) 483 return NULL; 484 485 evsel->tp_format = tp_format; 486 } 487 488 field = tep_find_any_field(evsel->tp_format, str); 489 if (!field) 490 return NULL; 491 492 return tracepoint_field(pevent, field); 493 } 494 #endif /* HAVE_LIBTRACEEVENT */ 495 496 static PyObject* 497 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name) 498 { 499 PyObject *obj = NULL; 500 501 #ifdef HAVE_LIBTRACEEVENT 502 if (is_tracepoint(pevent)) 503 obj = get_tracepoint_field(pevent, attr_name); 504 #endif 505 506 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name); 507 } 508 509 static PyTypeObject pyrf_sample_event__type = { 510 PyVarObject_HEAD_INIT(NULL, 0) 511 .tp_name = "perf.sample_event", 512 .tp_basicsize = sizeof(struct pyrf_event), 513 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 514 .tp_doc = pyrf_sample_event__doc, 515 .tp_members = pyrf_sample_event__members, 516 .tp_repr = (reprfunc)pyrf_sample_event__repr, 517 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro, 518 }; 519 520 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object."); 521 522 static PyMemberDef pyrf_context_switch_event__members[] = { 523 sample_members 524 member_def(perf_event_header, type, T_UINT, "event type"), 525 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"), 526 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"), 527 { .name = NULL, }, 528 }; 529 530 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent) 531 { 532 PyObject *ret; 533 char *s; 534 535 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }", 536 pevent->event.context_switch.next_prev_pid, 537 pevent->event.context_switch.next_prev_tid, 538 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { 539 ret = PyErr_NoMemory(); 540 } else { 541 ret = _PyUnicode_FromString(s); 542 free(s); 543 } 544 return ret; 545 } 546 547 static PyTypeObject pyrf_context_switch_event__type = { 548 PyVarObject_HEAD_INIT(NULL, 0) 549 .tp_name = "perf.context_switch_event", 550 .tp_basicsize = sizeof(struct pyrf_event), 551 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 552 .tp_doc = pyrf_context_switch_event__doc, 553 .tp_members = pyrf_context_switch_event__members, 554 .tp_repr = (reprfunc)pyrf_context_switch_event__repr, 555 }; 556 557 static int pyrf_event__setup_types(void) 558 { 559 int err; 560 pyrf_mmap_event__type.tp_new = 561 pyrf_task_event__type.tp_new = 562 pyrf_comm_event__type.tp_new = 563 pyrf_lost_event__type.tp_new = 564 pyrf_read_event__type.tp_new = 565 pyrf_sample_event__type.tp_new = 566 pyrf_context_switch_event__type.tp_new = 567 pyrf_throttle_event__type.tp_new = PyType_GenericNew; 568 err = PyType_Ready(&pyrf_mmap_event__type); 569 if (err < 0) 570 goto out; 571 err = PyType_Ready(&pyrf_lost_event__type); 572 if (err < 0) 573 goto out; 574 err = PyType_Ready(&pyrf_task_event__type); 575 if (err < 0) 576 goto out; 577 err = PyType_Ready(&pyrf_comm_event__type); 578 if (err < 0) 579 goto out; 580 err = PyType_Ready(&pyrf_throttle_event__type); 581 if (err < 0) 582 goto out; 583 err = PyType_Ready(&pyrf_read_event__type); 584 if (err < 0) 585 goto out; 586 err = PyType_Ready(&pyrf_sample_event__type); 587 if (err < 0) 588 goto out; 589 err = PyType_Ready(&pyrf_context_switch_event__type); 590 if (err < 0) 591 goto out; 592 out: 593 return err; 594 } 595 596 static PyTypeObject *pyrf_event__type[] = { 597 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, 598 [PERF_RECORD_LOST] = &pyrf_lost_event__type, 599 [PERF_RECORD_COMM] = &pyrf_comm_event__type, 600 [PERF_RECORD_EXIT] = &pyrf_task_event__type, 601 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, 602 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, 603 [PERF_RECORD_FORK] = &pyrf_task_event__type, 604 [PERF_RECORD_READ] = &pyrf_read_event__type, 605 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, 606 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type, 607 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type, 608 }; 609 610 static PyObject *pyrf_event__new(union perf_event *event) 611 { 612 struct pyrf_event *pevent; 613 PyTypeObject *ptype; 614 615 if ((event->header.type < PERF_RECORD_MMAP || 616 event->header.type > PERF_RECORD_SAMPLE) && 617 !(event->header.type == PERF_RECORD_SWITCH || 618 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) 619 return NULL; 620 621 ptype = pyrf_event__type[event->header.type]; 622 pevent = PyObject_New(struct pyrf_event, ptype); 623 if (pevent != NULL) 624 memcpy(&pevent->event, event, event->header.size); 625 return (PyObject *)pevent; 626 } 627 628 struct pyrf_cpu_map { 629 PyObject_HEAD 630 631 struct perf_cpu_map *cpus; 632 }; 633 634 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, 635 PyObject *args, PyObject *kwargs) 636 { 637 static char *kwlist[] = { "cpustr", NULL }; 638 char *cpustr = NULL; 639 640 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", 641 kwlist, &cpustr)) 642 return -1; 643 644 pcpus->cpus = perf_cpu_map__new(cpustr); 645 if (pcpus->cpus == NULL) 646 return -1; 647 return 0; 648 } 649 650 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) 651 { 652 perf_cpu_map__put(pcpus->cpus); 653 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus); 654 } 655 656 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) 657 { 658 struct pyrf_cpu_map *pcpus = (void *)obj; 659 660 return perf_cpu_map__nr(pcpus->cpus); 661 } 662 663 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) 664 { 665 struct pyrf_cpu_map *pcpus = (void *)obj; 666 667 if (i >= perf_cpu_map__nr(pcpus->cpus)) 668 return NULL; 669 670 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu); 671 } 672 673 static PySequenceMethods pyrf_cpu_map__sequence_methods = { 674 .sq_length = pyrf_cpu_map__length, 675 .sq_item = pyrf_cpu_map__item, 676 }; 677 678 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); 679 680 static PyTypeObject pyrf_cpu_map__type = { 681 PyVarObject_HEAD_INIT(NULL, 0) 682 .tp_name = "perf.cpu_map", 683 .tp_basicsize = sizeof(struct pyrf_cpu_map), 684 .tp_dealloc = (destructor)pyrf_cpu_map__delete, 685 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 686 .tp_doc = pyrf_cpu_map__doc, 687 .tp_as_sequence = &pyrf_cpu_map__sequence_methods, 688 .tp_init = (initproc)pyrf_cpu_map__init, 689 }; 690 691 static int pyrf_cpu_map__setup_types(void) 692 { 693 pyrf_cpu_map__type.tp_new = PyType_GenericNew; 694 return PyType_Ready(&pyrf_cpu_map__type); 695 } 696 697 struct pyrf_thread_map { 698 PyObject_HEAD 699 700 struct perf_thread_map *threads; 701 }; 702 703 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, 704 PyObject *args, PyObject *kwargs) 705 { 706 static char *kwlist[] = { "pid", "tid", "uid", NULL }; 707 int pid = -1, tid = -1, uid = UINT_MAX; 708 709 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii", 710 kwlist, &pid, &tid, &uid)) 711 return -1; 712 713 pthreads->threads = thread_map__new(pid, tid, uid); 714 if (pthreads->threads == NULL) 715 return -1; 716 return 0; 717 } 718 719 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) 720 { 721 perf_thread_map__put(pthreads->threads); 722 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads); 723 } 724 725 static Py_ssize_t pyrf_thread_map__length(PyObject *obj) 726 { 727 struct pyrf_thread_map *pthreads = (void *)obj; 728 729 return perf_thread_map__nr(pthreads->threads); 730 } 731 732 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) 733 { 734 struct pyrf_thread_map *pthreads = (void *)obj; 735 736 if (i >= perf_thread_map__nr(pthreads->threads)) 737 return NULL; 738 739 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i)); 740 } 741 742 static PySequenceMethods pyrf_thread_map__sequence_methods = { 743 .sq_length = pyrf_thread_map__length, 744 .sq_item = pyrf_thread_map__item, 745 }; 746 747 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); 748 749 static PyTypeObject pyrf_thread_map__type = { 750 PyVarObject_HEAD_INIT(NULL, 0) 751 .tp_name = "perf.thread_map", 752 .tp_basicsize = sizeof(struct pyrf_thread_map), 753 .tp_dealloc = (destructor)pyrf_thread_map__delete, 754 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 755 .tp_doc = pyrf_thread_map__doc, 756 .tp_as_sequence = &pyrf_thread_map__sequence_methods, 757 .tp_init = (initproc)pyrf_thread_map__init, 758 }; 759 760 static int pyrf_thread_map__setup_types(void) 761 { 762 pyrf_thread_map__type.tp_new = PyType_GenericNew; 763 return PyType_Ready(&pyrf_thread_map__type); 764 } 765 766 struct pyrf_evsel { 767 PyObject_HEAD 768 769 struct evsel evsel; 770 }; 771 772 static int pyrf_evsel__init(struct pyrf_evsel *pevsel, 773 PyObject *args, PyObject *kwargs) 774 { 775 struct perf_event_attr attr = { 776 .type = PERF_TYPE_HARDWARE, 777 .config = PERF_COUNT_HW_CPU_CYCLES, 778 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, 779 }; 780 static char *kwlist[] = { 781 "type", 782 "config", 783 "sample_freq", 784 "sample_period", 785 "sample_type", 786 "read_format", 787 "disabled", 788 "inherit", 789 "pinned", 790 "exclusive", 791 "exclude_user", 792 "exclude_kernel", 793 "exclude_hv", 794 "exclude_idle", 795 "mmap", 796 "context_switch", 797 "comm", 798 "freq", 799 "inherit_stat", 800 "enable_on_exec", 801 "task", 802 "watermark", 803 "precise_ip", 804 "mmap_data", 805 "sample_id_all", 806 "wakeup_events", 807 "bp_type", 808 "bp_addr", 809 "bp_len", 810 NULL 811 }; 812 u64 sample_period = 0; 813 u32 disabled = 0, 814 inherit = 0, 815 pinned = 0, 816 exclusive = 0, 817 exclude_user = 0, 818 exclude_kernel = 0, 819 exclude_hv = 0, 820 exclude_idle = 0, 821 mmap = 0, 822 context_switch = 0, 823 comm = 0, 824 freq = 1, 825 inherit_stat = 0, 826 enable_on_exec = 0, 827 task = 0, 828 watermark = 0, 829 precise_ip = 0, 830 mmap_data = 0, 831 sample_id_all = 1; 832 int idx = 0; 833 834 if (!PyArg_ParseTupleAndKeywords(args, kwargs, 835 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist, 836 &attr.type, &attr.config, &attr.sample_freq, 837 &sample_period, &attr.sample_type, 838 &attr.read_format, &disabled, &inherit, 839 &pinned, &exclusive, &exclude_user, 840 &exclude_kernel, &exclude_hv, &exclude_idle, 841 &mmap, &context_switch, &comm, &freq, &inherit_stat, 842 &enable_on_exec, &task, &watermark, 843 &precise_ip, &mmap_data, &sample_id_all, 844 &attr.wakeup_events, &attr.bp_type, 845 &attr.bp_addr, &attr.bp_len, &idx)) 846 return -1; 847 848 /* union... */ 849 if (sample_period != 0) { 850 if (attr.sample_freq != 0) 851 return -1; /* FIXME: throw right exception */ 852 attr.sample_period = sample_period; 853 } 854 855 /* Bitfields */ 856 attr.disabled = disabled; 857 attr.inherit = inherit; 858 attr.pinned = pinned; 859 attr.exclusive = exclusive; 860 attr.exclude_user = exclude_user; 861 attr.exclude_kernel = exclude_kernel; 862 attr.exclude_hv = exclude_hv; 863 attr.exclude_idle = exclude_idle; 864 attr.mmap = mmap; 865 attr.context_switch = context_switch; 866 attr.comm = comm; 867 attr.freq = freq; 868 attr.inherit_stat = inherit_stat; 869 attr.enable_on_exec = enable_on_exec; 870 attr.task = task; 871 attr.watermark = watermark; 872 attr.precise_ip = precise_ip; 873 attr.mmap_data = mmap_data; 874 attr.sample_id_all = sample_id_all; 875 attr.size = sizeof(attr); 876 877 evsel__init(&pevsel->evsel, &attr, idx); 878 return 0; 879 } 880 881 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) 882 { 883 evsel__exit(&pevsel->evsel); 884 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel); 885 } 886 887 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, 888 PyObject *args, PyObject *kwargs) 889 { 890 struct evsel *evsel = &pevsel->evsel; 891 struct perf_cpu_map *cpus = NULL; 892 struct perf_thread_map *threads = NULL; 893 PyObject *pcpus = NULL, *pthreads = NULL; 894 int group = 0, inherit = 0; 895 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; 896 897 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 898 &pcpus, &pthreads, &group, &inherit)) 899 return NULL; 900 901 if (pthreads != NULL) 902 threads = ((struct pyrf_thread_map *)pthreads)->threads; 903 904 if (pcpus != NULL) 905 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 906 907 evsel->core.attr.inherit = inherit; 908 /* 909 * This will group just the fds for this single evsel, to group 910 * multiple events, use evlist.open(). 911 */ 912 if (evsel__open(evsel, cpus, threads) < 0) { 913 PyErr_SetFromErrno(PyExc_OSError); 914 return NULL; 915 } 916 917 Py_INCREF(Py_None); 918 return Py_None; 919 } 920 921 static PyMethodDef pyrf_evsel__methods[] = { 922 { 923 .ml_name = "open", 924 .ml_meth = (PyCFunction)pyrf_evsel__open, 925 .ml_flags = METH_VARARGS | METH_KEYWORDS, 926 .ml_doc = PyDoc_STR("open the event selector file descriptor table.") 927 }, 928 { .ml_name = NULL, } 929 }; 930 931 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); 932 933 static PyTypeObject pyrf_evsel__type = { 934 PyVarObject_HEAD_INIT(NULL, 0) 935 .tp_name = "perf.evsel", 936 .tp_basicsize = sizeof(struct pyrf_evsel), 937 .tp_dealloc = (destructor)pyrf_evsel__delete, 938 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 939 .tp_doc = pyrf_evsel__doc, 940 .tp_methods = pyrf_evsel__methods, 941 .tp_init = (initproc)pyrf_evsel__init, 942 }; 943 944 static int pyrf_evsel__setup_types(void) 945 { 946 pyrf_evsel__type.tp_new = PyType_GenericNew; 947 return PyType_Ready(&pyrf_evsel__type); 948 } 949 950 struct pyrf_evlist { 951 PyObject_HEAD 952 953 struct evlist evlist; 954 }; 955 956 static int pyrf_evlist__init(struct pyrf_evlist *pevlist, 957 PyObject *args, PyObject *kwargs __maybe_unused) 958 { 959 PyObject *pcpus = NULL, *pthreads = NULL; 960 struct perf_cpu_map *cpus; 961 struct perf_thread_map *threads; 962 963 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) 964 return -1; 965 966 threads = ((struct pyrf_thread_map *)pthreads)->threads; 967 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 968 evlist__init(&pevlist->evlist, cpus, threads); 969 return 0; 970 } 971 972 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) 973 { 974 evlist__exit(&pevlist->evlist); 975 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist); 976 } 977 978 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, 979 PyObject *args, PyObject *kwargs) 980 { 981 struct evlist *evlist = &pevlist->evlist; 982 static char *kwlist[] = { "pages", "overwrite", NULL }; 983 int pages = 128, overwrite = false; 984 985 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, 986 &pages, &overwrite)) 987 return NULL; 988 989 if (evlist__mmap(evlist, pages) < 0) { 990 PyErr_SetFromErrno(PyExc_OSError); 991 return NULL; 992 } 993 994 Py_INCREF(Py_None); 995 return Py_None; 996 } 997 998 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, 999 PyObject *args, PyObject *kwargs) 1000 { 1001 struct evlist *evlist = &pevlist->evlist; 1002 static char *kwlist[] = { "timeout", NULL }; 1003 int timeout = -1, n; 1004 1005 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 1006 return NULL; 1007 1008 n = evlist__poll(evlist, timeout); 1009 if (n < 0) { 1010 PyErr_SetFromErrno(PyExc_OSError); 1011 return NULL; 1012 } 1013 1014 return Py_BuildValue("i", n); 1015 } 1016 1017 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, 1018 PyObject *args __maybe_unused, 1019 PyObject *kwargs __maybe_unused) 1020 { 1021 struct evlist *evlist = &pevlist->evlist; 1022 PyObject *list = PyList_New(0); 1023 int i; 1024 1025 for (i = 0; i < evlist->core.pollfd.nr; ++i) { 1026 PyObject *file; 1027 #if PY_MAJOR_VERSION < 3 1028 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r"); 1029 1030 if (fp == NULL) 1031 goto free_list; 1032 1033 file = PyFile_FromFile(fp, "perf", "r", NULL); 1034 #else 1035 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1, 1036 NULL, NULL, NULL, 0); 1037 #endif 1038 if (file == NULL) 1039 goto free_list; 1040 1041 if (PyList_Append(list, file) != 0) { 1042 Py_DECREF(file); 1043 goto free_list; 1044 } 1045 1046 Py_DECREF(file); 1047 } 1048 1049 return list; 1050 free_list: 1051 return PyErr_NoMemory(); 1052 } 1053 1054 1055 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, 1056 PyObject *args, 1057 PyObject *kwargs __maybe_unused) 1058 { 1059 struct evlist *evlist = &pevlist->evlist; 1060 PyObject *pevsel; 1061 struct evsel *evsel; 1062 1063 if (!PyArg_ParseTuple(args, "O", &pevsel)) 1064 return NULL; 1065 1066 Py_INCREF(pevsel); 1067 evsel = &((struct pyrf_evsel *)pevsel)->evsel; 1068 evsel->core.idx = evlist->core.nr_entries; 1069 evlist__add(evlist, evsel); 1070 1071 return Py_BuildValue("i", evlist->core.nr_entries); 1072 } 1073 1074 static struct mmap *get_md(struct evlist *evlist, int cpu) 1075 { 1076 int i; 1077 1078 for (i = 0; i < evlist->core.nr_mmaps; i++) { 1079 struct mmap *md = &evlist->mmap[i]; 1080 1081 if (md->core.cpu.cpu == cpu) 1082 return md; 1083 } 1084 1085 return NULL; 1086 } 1087 1088 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, 1089 PyObject *args, PyObject *kwargs) 1090 { 1091 struct evlist *evlist = &pevlist->evlist; 1092 union perf_event *event; 1093 int sample_id_all = 1, cpu; 1094 static char *kwlist[] = { "cpu", "sample_id_all", NULL }; 1095 struct mmap *md; 1096 int err; 1097 1098 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 1099 &cpu, &sample_id_all)) 1100 return NULL; 1101 1102 md = get_md(evlist, cpu); 1103 if (!md) 1104 return NULL; 1105 1106 if (perf_mmap__read_init(&md->core) < 0) 1107 goto end; 1108 1109 event = perf_mmap__read_event(&md->core); 1110 if (event != NULL) { 1111 PyObject *pyevent = pyrf_event__new(event); 1112 struct pyrf_event *pevent = (struct pyrf_event *)pyevent; 1113 struct evsel *evsel; 1114 1115 if (pyevent == NULL) 1116 return PyErr_NoMemory(); 1117 1118 evsel = evlist__event2evsel(evlist, event); 1119 if (!evsel) { 1120 Py_INCREF(Py_None); 1121 return Py_None; 1122 } 1123 1124 pevent->evsel = evsel; 1125 1126 err = evsel__parse_sample(evsel, event, &pevent->sample); 1127 1128 /* Consume the even only after we parsed it out. */ 1129 perf_mmap__consume(&md->core); 1130 1131 if (err) 1132 return PyErr_Format(PyExc_OSError, 1133 "perf: can't parse sample, err=%d", err); 1134 return pyevent; 1135 } 1136 end: 1137 Py_INCREF(Py_None); 1138 return Py_None; 1139 } 1140 1141 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, 1142 PyObject *args, PyObject *kwargs) 1143 { 1144 struct evlist *evlist = &pevlist->evlist; 1145 1146 if (evlist__open(evlist) < 0) { 1147 PyErr_SetFromErrno(PyExc_OSError); 1148 return NULL; 1149 } 1150 1151 Py_INCREF(Py_None); 1152 return Py_None; 1153 } 1154 1155 static PyMethodDef pyrf_evlist__methods[] = { 1156 { 1157 .ml_name = "mmap", 1158 .ml_meth = (PyCFunction)pyrf_evlist__mmap, 1159 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1160 .ml_doc = PyDoc_STR("mmap the file descriptor table.") 1161 }, 1162 { 1163 .ml_name = "open", 1164 .ml_meth = (PyCFunction)pyrf_evlist__open, 1165 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1166 .ml_doc = PyDoc_STR("open the file descriptors.") 1167 }, 1168 { 1169 .ml_name = "poll", 1170 .ml_meth = (PyCFunction)pyrf_evlist__poll, 1171 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1172 .ml_doc = PyDoc_STR("poll the file descriptor table.") 1173 }, 1174 { 1175 .ml_name = "get_pollfd", 1176 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, 1177 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1178 .ml_doc = PyDoc_STR("get the poll file descriptor table.") 1179 }, 1180 { 1181 .ml_name = "add", 1182 .ml_meth = (PyCFunction)pyrf_evlist__add, 1183 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1184 .ml_doc = PyDoc_STR("adds an event selector to the list.") 1185 }, 1186 { 1187 .ml_name = "read_on_cpu", 1188 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, 1189 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1190 .ml_doc = PyDoc_STR("reads an event.") 1191 }, 1192 { .ml_name = NULL, } 1193 }; 1194 1195 static Py_ssize_t pyrf_evlist__length(PyObject *obj) 1196 { 1197 struct pyrf_evlist *pevlist = (void *)obj; 1198 1199 return pevlist->evlist.core.nr_entries; 1200 } 1201 1202 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) 1203 { 1204 struct pyrf_evlist *pevlist = (void *)obj; 1205 struct evsel *pos; 1206 1207 if (i >= pevlist->evlist.core.nr_entries) 1208 return NULL; 1209 1210 evlist__for_each_entry(&pevlist->evlist, pos) { 1211 if (i-- == 0) 1212 break; 1213 } 1214 1215 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); 1216 } 1217 1218 static PySequenceMethods pyrf_evlist__sequence_methods = { 1219 .sq_length = pyrf_evlist__length, 1220 .sq_item = pyrf_evlist__item, 1221 }; 1222 1223 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); 1224 1225 static PyTypeObject pyrf_evlist__type = { 1226 PyVarObject_HEAD_INIT(NULL, 0) 1227 .tp_name = "perf.evlist", 1228 .tp_basicsize = sizeof(struct pyrf_evlist), 1229 .tp_dealloc = (destructor)pyrf_evlist__delete, 1230 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 1231 .tp_as_sequence = &pyrf_evlist__sequence_methods, 1232 .tp_doc = pyrf_evlist__doc, 1233 .tp_methods = pyrf_evlist__methods, 1234 .tp_init = (initproc)pyrf_evlist__init, 1235 }; 1236 1237 static int pyrf_evlist__setup_types(void) 1238 { 1239 pyrf_evlist__type.tp_new = PyType_GenericNew; 1240 return PyType_Ready(&pyrf_evlist__type); 1241 } 1242 1243 #define PERF_CONST(name) { #name, PERF_##name } 1244 1245 static struct { 1246 const char *name; 1247 int value; 1248 } perf__constants[] = { 1249 PERF_CONST(TYPE_HARDWARE), 1250 PERF_CONST(TYPE_SOFTWARE), 1251 PERF_CONST(TYPE_TRACEPOINT), 1252 PERF_CONST(TYPE_HW_CACHE), 1253 PERF_CONST(TYPE_RAW), 1254 PERF_CONST(TYPE_BREAKPOINT), 1255 1256 PERF_CONST(COUNT_HW_CPU_CYCLES), 1257 PERF_CONST(COUNT_HW_INSTRUCTIONS), 1258 PERF_CONST(COUNT_HW_CACHE_REFERENCES), 1259 PERF_CONST(COUNT_HW_CACHE_MISSES), 1260 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS), 1261 PERF_CONST(COUNT_HW_BRANCH_MISSES), 1262 PERF_CONST(COUNT_HW_BUS_CYCLES), 1263 PERF_CONST(COUNT_HW_CACHE_L1D), 1264 PERF_CONST(COUNT_HW_CACHE_L1I), 1265 PERF_CONST(COUNT_HW_CACHE_LL), 1266 PERF_CONST(COUNT_HW_CACHE_DTLB), 1267 PERF_CONST(COUNT_HW_CACHE_ITLB), 1268 PERF_CONST(COUNT_HW_CACHE_BPU), 1269 PERF_CONST(COUNT_HW_CACHE_OP_READ), 1270 PERF_CONST(COUNT_HW_CACHE_OP_WRITE), 1271 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH), 1272 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS), 1273 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS), 1274 1275 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND), 1276 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND), 1277 1278 PERF_CONST(COUNT_SW_CPU_CLOCK), 1279 PERF_CONST(COUNT_SW_TASK_CLOCK), 1280 PERF_CONST(COUNT_SW_PAGE_FAULTS), 1281 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES), 1282 PERF_CONST(COUNT_SW_CPU_MIGRATIONS), 1283 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN), 1284 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ), 1285 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS), 1286 PERF_CONST(COUNT_SW_EMULATION_FAULTS), 1287 PERF_CONST(COUNT_SW_DUMMY), 1288 1289 PERF_CONST(SAMPLE_IP), 1290 PERF_CONST(SAMPLE_TID), 1291 PERF_CONST(SAMPLE_TIME), 1292 PERF_CONST(SAMPLE_ADDR), 1293 PERF_CONST(SAMPLE_READ), 1294 PERF_CONST(SAMPLE_CALLCHAIN), 1295 PERF_CONST(SAMPLE_ID), 1296 PERF_CONST(SAMPLE_CPU), 1297 PERF_CONST(SAMPLE_PERIOD), 1298 PERF_CONST(SAMPLE_STREAM_ID), 1299 PERF_CONST(SAMPLE_RAW), 1300 1301 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED), 1302 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING), 1303 PERF_CONST(FORMAT_ID), 1304 PERF_CONST(FORMAT_GROUP), 1305 1306 PERF_CONST(RECORD_MMAP), 1307 PERF_CONST(RECORD_LOST), 1308 PERF_CONST(RECORD_COMM), 1309 PERF_CONST(RECORD_EXIT), 1310 PERF_CONST(RECORD_THROTTLE), 1311 PERF_CONST(RECORD_UNTHROTTLE), 1312 PERF_CONST(RECORD_FORK), 1313 PERF_CONST(RECORD_READ), 1314 PERF_CONST(RECORD_SAMPLE), 1315 PERF_CONST(RECORD_MMAP2), 1316 PERF_CONST(RECORD_AUX), 1317 PERF_CONST(RECORD_ITRACE_START), 1318 PERF_CONST(RECORD_LOST_SAMPLES), 1319 PERF_CONST(RECORD_SWITCH), 1320 PERF_CONST(RECORD_SWITCH_CPU_WIDE), 1321 1322 PERF_CONST(RECORD_MISC_SWITCH_OUT), 1323 { .name = NULL, }, 1324 }; 1325 1326 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, 1327 PyObject *args, PyObject *kwargs) 1328 { 1329 #ifndef HAVE_LIBTRACEEVENT 1330 return NULL; 1331 #else 1332 struct tep_event *tp_format; 1333 static char *kwlist[] = { "sys", "name", NULL }; 1334 char *sys = NULL; 1335 char *name = NULL; 1336 1337 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist, 1338 &sys, &name)) 1339 return NULL; 1340 1341 tp_format = trace_event__tp_format(sys, name); 1342 if (IS_ERR(tp_format)) 1343 return _PyLong_FromLong(-1); 1344 1345 return _PyLong_FromLong(tp_format->id); 1346 #endif // HAVE_LIBTRACEEVENT 1347 } 1348 1349 static PyMethodDef perf__methods[] = { 1350 { 1351 .ml_name = "tracepoint", 1352 .ml_meth = (PyCFunction) pyrf__tracepoint, 1353 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1354 .ml_doc = PyDoc_STR("Get tracepoint config.") 1355 }, 1356 { .ml_name = NULL, } 1357 }; 1358 1359 #if PY_MAJOR_VERSION < 3 1360 PyMODINIT_FUNC initperf(void) 1361 #else 1362 PyMODINIT_FUNC PyInit_perf(void) 1363 #endif 1364 { 1365 PyObject *obj; 1366 int i; 1367 PyObject *dict; 1368 #if PY_MAJOR_VERSION < 3 1369 PyObject *module = Py_InitModule("perf", perf__methods); 1370 #else 1371 static struct PyModuleDef moduledef = { 1372 PyModuleDef_HEAD_INIT, 1373 "perf", /* m_name */ 1374 "", /* m_doc */ 1375 -1, /* m_size */ 1376 perf__methods, /* m_methods */ 1377 NULL, /* m_reload */ 1378 NULL, /* m_traverse */ 1379 NULL, /* m_clear */ 1380 NULL, /* m_free */ 1381 }; 1382 PyObject *module = PyModule_Create(&moduledef); 1383 #endif 1384 1385 if (module == NULL || 1386 pyrf_event__setup_types() < 0 || 1387 pyrf_evlist__setup_types() < 0 || 1388 pyrf_evsel__setup_types() < 0 || 1389 pyrf_thread_map__setup_types() < 0 || 1390 pyrf_cpu_map__setup_types() < 0) 1391 #if PY_MAJOR_VERSION < 3 1392 return; 1393 #else 1394 return module; 1395 #endif 1396 1397 /* The page_size is placed in util object. */ 1398 page_size = sysconf(_SC_PAGE_SIZE); 1399 1400 Py_INCREF(&pyrf_evlist__type); 1401 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); 1402 1403 Py_INCREF(&pyrf_evsel__type); 1404 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); 1405 1406 Py_INCREF(&pyrf_mmap_event__type); 1407 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type); 1408 1409 Py_INCREF(&pyrf_lost_event__type); 1410 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type); 1411 1412 Py_INCREF(&pyrf_comm_event__type); 1413 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type); 1414 1415 Py_INCREF(&pyrf_task_event__type); 1416 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1417 1418 Py_INCREF(&pyrf_throttle_event__type); 1419 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type); 1420 1421 Py_INCREF(&pyrf_task_event__type); 1422 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1423 1424 Py_INCREF(&pyrf_read_event__type); 1425 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type); 1426 1427 Py_INCREF(&pyrf_sample_event__type); 1428 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type); 1429 1430 Py_INCREF(&pyrf_context_switch_event__type); 1431 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type); 1432 1433 Py_INCREF(&pyrf_thread_map__type); 1434 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); 1435 1436 Py_INCREF(&pyrf_cpu_map__type); 1437 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); 1438 1439 dict = PyModule_GetDict(module); 1440 if (dict == NULL) 1441 goto error; 1442 1443 for (i = 0; perf__constants[i].name != NULL; i++) { 1444 obj = _PyLong_FromLong(perf__constants[i].value); 1445 if (obj == NULL) 1446 goto error; 1447 PyDict_SetItemString(dict, perf__constants[i].name, obj); 1448 Py_DECREF(obj); 1449 } 1450 1451 error: 1452 if (PyErr_Occurred()) 1453 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 1454 #if PY_MAJOR_VERSION >= 3 1455 return module; 1456 #endif 1457 } 1458 1459 /* 1460 * Dummy, to avoid dragging all the test_attr infrastructure in the python 1461 * binding. 1462 */ 1463 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, 1464 int fd, int group_fd, unsigned long flags) 1465 { 1466 } 1467