1 // SPDX-License-Identifier: GPL-2.0 2 #include <Python.h> 3 #include <structmember.h> 4 #include <inttypes.h> 5 #include <poll.h> 6 #include <linux/err.h> 7 #include <perf/cpumap.h> 8 #ifdef HAVE_LIBTRACEEVENT 9 #include <traceevent/event-parse.h> 10 #endif 11 #include <perf/mmap.h> 12 #include "evlist.h" 13 #include "callchain.h" 14 #include "evsel.h" 15 #include "event.h" 16 #include "print_binary.h" 17 #include "thread_map.h" 18 #include "trace-event.h" 19 #include "mmap.h" 20 #include "stat.h" 21 #include "metricgroup.h" 22 #include "util/bpf-filter.h" 23 #include "util/env.h" 24 #include "util/pmu.h" 25 #include "util/pmus.h" 26 #include <internal/lib.h> 27 #include "util.h" 28 29 #if PY_MAJOR_VERSION < 3 30 #define _PyUnicode_FromString(arg) \ 31 PyString_FromString(arg) 32 #define _PyUnicode_AsString(arg) \ 33 PyString_AsString(arg) 34 #define _PyUnicode_FromFormat(...) \ 35 PyString_FromFormat(__VA_ARGS__) 36 #define _PyLong_FromLong(arg) \ 37 PyInt_FromLong(arg) 38 39 #else 40 41 #define _PyUnicode_FromString(arg) \ 42 PyUnicode_FromString(arg) 43 #define _PyUnicode_FromFormat(...) \ 44 PyUnicode_FromFormat(__VA_ARGS__) 45 #define _PyLong_FromLong(arg) \ 46 PyLong_FromLong(arg) 47 #endif 48 49 #ifndef Py_TYPE 50 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) 51 #endif 52 53 /* 54 * Avoid bringing in event parsing. 55 */ 56 int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused) 57 { 58 return 0; 59 } 60 61 /* 62 * Provide these two so that we don't have to link against callchain.c and 63 * start dragging hist.c, etc. 64 */ 65 struct callchain_param callchain_param; 66 67 int parse_callchain_record(const char *arg __maybe_unused, 68 struct callchain_param *param __maybe_unused) 69 { 70 return 0; 71 } 72 73 /* 74 * Add these not to drag util/env.c 75 */ 76 struct perf_env perf_env; 77 78 const char *perf_env__cpuid(struct perf_env *env __maybe_unused) 79 { 80 return NULL; 81 } 82 83 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here 84 const char *perf_env__arch(struct perf_env *env __maybe_unused) 85 { 86 return NULL; 87 } 88 89 /* 90 * These ones are needed not to drag the PMU bandwagon, jevents generated 91 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for 92 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so 93 * far, for the perf python binding known usecases, revisit if this become 94 * necessary. 95 */ 96 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused) 97 { 98 return NULL; 99 } 100 101 int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) 102 { 103 return EOF; 104 } 105 106 int perf_pmus__num_core_pmus(void) 107 { 108 return 1; 109 } 110 111 bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused) 112 { 113 return false; 114 } 115 116 /* 117 * Add this one here not to drag util/metricgroup.c 118 */ 119 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, 120 struct rblist *new_metric_events, 121 struct rblist *old_metric_events) 122 { 123 return 0; 124 } 125 126 /* 127 * XXX: All these evsel destructors need some better mechanism, like a linked 128 * list of destructors registered when the relevant code indeed is used instead 129 * of having more and more calls in perf_evsel__delete(). -- acme 130 * 131 * For now, add some more: 132 * 133 * Not to drag the BPF bandwagon... 134 */ 135 void bpf_counter__destroy(struct evsel *evsel); 136 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd); 137 int bpf_counter__disable(struct evsel *evsel); 138 139 void bpf_counter__destroy(struct evsel *evsel __maybe_unused) 140 { 141 } 142 143 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused) 144 { 145 return 0; 146 } 147 148 int bpf_counter__disable(struct evsel *evsel __maybe_unused) 149 { 150 return 0; 151 } 152 153 // not to drag util/bpf-filter.c 154 #ifdef HAVE_BPF_SKEL 155 int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused) 156 { 157 return 0; 158 } 159 160 int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused) 161 { 162 return 0; 163 } 164 #endif 165 166 /* 167 * Support debug printing even though util/debug.c is not linked. That means 168 * implementing 'verbose' and 'eprintf'. 169 */ 170 int verbose; 171 int debug_peo_args; 172 173 int eprintf(int level, int var, const char *fmt, ...); 174 175 int eprintf(int level, int var, const char *fmt, ...) 176 { 177 va_list args; 178 int ret = 0; 179 180 if (var >= level) { 181 va_start(args, fmt); 182 ret = vfprintf(stderr, fmt, args); 183 va_end(args); 184 } 185 186 return ret; 187 } 188 189 /* Define PyVarObject_HEAD_INIT for python 2.5 */ 190 #ifndef PyVarObject_HEAD_INIT 191 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, 192 #endif 193 194 #if PY_MAJOR_VERSION < 3 195 PyMODINIT_FUNC initperf(void); 196 #else 197 PyMODINIT_FUNC PyInit_perf(void); 198 #endif 199 200 #define member_def(type, member, ptype, help) \ 201 { #member, ptype, \ 202 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ 203 0, help } 204 205 #define sample_member_def(name, member, ptype, help) \ 206 { #name, ptype, \ 207 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ 208 0, help } 209 210 struct pyrf_event { 211 PyObject_HEAD 212 struct evsel *evsel; 213 struct perf_sample sample; 214 union perf_event event; 215 }; 216 217 #define sample_members \ 218 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ 219 sample_member_def(sample_pid, pid, T_INT, "event pid"), \ 220 sample_member_def(sample_tid, tid, T_INT, "event tid"), \ 221 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ 222 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ 223 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ 224 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ 225 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ 226 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), 227 228 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); 229 230 static PyMemberDef pyrf_mmap_event__members[] = { 231 sample_members 232 member_def(perf_event_header, type, T_UINT, "event type"), 233 member_def(perf_event_header, misc, T_UINT, "event misc"), 234 member_def(perf_record_mmap, pid, T_UINT, "event pid"), 235 member_def(perf_record_mmap, tid, T_UINT, "event tid"), 236 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"), 237 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"), 238 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"), 239 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"), 240 { .name = NULL, }, 241 }; 242 243 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) 244 { 245 PyObject *ret; 246 char *s; 247 248 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", " 249 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", " 250 "filename: %s }", 251 pevent->event.mmap.pid, pevent->event.mmap.tid, 252 pevent->event.mmap.start, pevent->event.mmap.len, 253 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { 254 ret = PyErr_NoMemory(); 255 } else { 256 ret = _PyUnicode_FromString(s); 257 free(s); 258 } 259 return ret; 260 } 261 262 static PyTypeObject pyrf_mmap_event__type = { 263 PyVarObject_HEAD_INIT(NULL, 0) 264 .tp_name = "perf.mmap_event", 265 .tp_basicsize = sizeof(struct pyrf_event), 266 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 267 .tp_doc = pyrf_mmap_event__doc, 268 .tp_members = pyrf_mmap_event__members, 269 .tp_repr = (reprfunc)pyrf_mmap_event__repr, 270 }; 271 272 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); 273 274 static PyMemberDef pyrf_task_event__members[] = { 275 sample_members 276 member_def(perf_event_header, type, T_UINT, "event type"), 277 member_def(perf_record_fork, pid, T_UINT, "event pid"), 278 member_def(perf_record_fork, ppid, T_UINT, "event ppid"), 279 member_def(perf_record_fork, tid, T_UINT, "event tid"), 280 member_def(perf_record_fork, ptid, T_UINT, "event ptid"), 281 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"), 282 { .name = NULL, }, 283 }; 284 285 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) 286 { 287 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " 288 "ptid: %u, time: %" PRI_lu64 "}", 289 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", 290 pevent->event.fork.pid, 291 pevent->event.fork.ppid, 292 pevent->event.fork.tid, 293 pevent->event.fork.ptid, 294 pevent->event.fork.time); 295 } 296 297 static PyTypeObject pyrf_task_event__type = { 298 PyVarObject_HEAD_INIT(NULL, 0) 299 .tp_name = "perf.task_event", 300 .tp_basicsize = sizeof(struct pyrf_event), 301 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 302 .tp_doc = pyrf_task_event__doc, 303 .tp_members = pyrf_task_event__members, 304 .tp_repr = (reprfunc)pyrf_task_event__repr, 305 }; 306 307 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); 308 309 static PyMemberDef pyrf_comm_event__members[] = { 310 sample_members 311 member_def(perf_event_header, type, T_UINT, "event type"), 312 member_def(perf_record_comm, pid, T_UINT, "event pid"), 313 member_def(perf_record_comm, tid, T_UINT, "event tid"), 314 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"), 315 { .name = NULL, }, 316 }; 317 318 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) 319 { 320 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", 321 pevent->event.comm.pid, 322 pevent->event.comm.tid, 323 pevent->event.comm.comm); 324 } 325 326 static PyTypeObject pyrf_comm_event__type = { 327 PyVarObject_HEAD_INIT(NULL, 0) 328 .tp_name = "perf.comm_event", 329 .tp_basicsize = sizeof(struct pyrf_event), 330 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 331 .tp_doc = pyrf_comm_event__doc, 332 .tp_members = pyrf_comm_event__members, 333 .tp_repr = (reprfunc)pyrf_comm_event__repr, 334 }; 335 336 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); 337 338 static PyMemberDef pyrf_throttle_event__members[] = { 339 sample_members 340 member_def(perf_event_header, type, T_UINT, "event type"), 341 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"), 342 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"), 343 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"), 344 { .name = NULL, }, 345 }; 346 347 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) 348 { 349 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1); 350 351 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64 352 ", stream_id: %" PRI_lu64 " }", 353 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", 354 te->time, te->id, te->stream_id); 355 } 356 357 static PyTypeObject pyrf_throttle_event__type = { 358 PyVarObject_HEAD_INIT(NULL, 0) 359 .tp_name = "perf.throttle_event", 360 .tp_basicsize = sizeof(struct pyrf_event), 361 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 362 .tp_doc = pyrf_throttle_event__doc, 363 .tp_members = pyrf_throttle_event__members, 364 .tp_repr = (reprfunc)pyrf_throttle_event__repr, 365 }; 366 367 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); 368 369 static PyMemberDef pyrf_lost_event__members[] = { 370 sample_members 371 member_def(perf_record_lost, id, T_ULONGLONG, "event id"), 372 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"), 373 { .name = NULL, }, 374 }; 375 376 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent) 377 { 378 PyObject *ret; 379 char *s; 380 381 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", " 382 "lost: %#" PRI_lx64 " }", 383 pevent->event.lost.id, pevent->event.lost.lost) < 0) { 384 ret = PyErr_NoMemory(); 385 } else { 386 ret = _PyUnicode_FromString(s); 387 free(s); 388 } 389 return ret; 390 } 391 392 static PyTypeObject pyrf_lost_event__type = { 393 PyVarObject_HEAD_INIT(NULL, 0) 394 .tp_name = "perf.lost_event", 395 .tp_basicsize = sizeof(struct pyrf_event), 396 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 397 .tp_doc = pyrf_lost_event__doc, 398 .tp_members = pyrf_lost_event__members, 399 .tp_repr = (reprfunc)pyrf_lost_event__repr, 400 }; 401 402 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); 403 404 static PyMemberDef pyrf_read_event__members[] = { 405 sample_members 406 member_def(perf_record_read, pid, T_UINT, "event pid"), 407 member_def(perf_record_read, tid, T_UINT, "event tid"), 408 { .name = NULL, }, 409 }; 410 411 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent) 412 { 413 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }", 414 pevent->event.read.pid, 415 pevent->event.read.tid); 416 /* 417 * FIXME: return the array of read values, 418 * making this method useful ;-) 419 */ 420 } 421 422 static PyTypeObject pyrf_read_event__type = { 423 PyVarObject_HEAD_INIT(NULL, 0) 424 .tp_name = "perf.read_event", 425 .tp_basicsize = sizeof(struct pyrf_event), 426 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 427 .tp_doc = pyrf_read_event__doc, 428 .tp_members = pyrf_read_event__members, 429 .tp_repr = (reprfunc)pyrf_read_event__repr, 430 }; 431 432 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); 433 434 static PyMemberDef pyrf_sample_event__members[] = { 435 sample_members 436 member_def(perf_event_header, type, T_UINT, "event type"), 437 { .name = NULL, }, 438 }; 439 440 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent) 441 { 442 PyObject *ret; 443 char *s; 444 445 if (asprintf(&s, "{ type: sample }") < 0) { 446 ret = PyErr_NoMemory(); 447 } else { 448 ret = _PyUnicode_FromString(s); 449 free(s); 450 } 451 return ret; 452 } 453 454 #ifdef HAVE_LIBTRACEEVENT 455 static bool is_tracepoint(struct pyrf_event *pevent) 456 { 457 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT; 458 } 459 460 static PyObject* 461 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field) 462 { 463 struct tep_handle *pevent = field->event->tep; 464 void *data = pe->sample.raw_data; 465 PyObject *ret = NULL; 466 unsigned long long val; 467 unsigned int offset, len; 468 469 if (field->flags & TEP_FIELD_IS_ARRAY) { 470 offset = field->offset; 471 len = field->size; 472 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 473 val = tep_read_number(pevent, data + offset, len); 474 offset = val; 475 len = offset >> 16; 476 offset &= 0xffff; 477 if (tep_field_is_relative(field->flags)) 478 offset += field->offset + field->size; 479 } 480 if (field->flags & TEP_FIELD_IS_STRING && 481 is_printable_array(data + offset, len)) { 482 ret = _PyUnicode_FromString((char *)data + offset); 483 } else { 484 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len); 485 field->flags &= ~TEP_FIELD_IS_STRING; 486 } 487 } else { 488 val = tep_read_number(pevent, data + field->offset, 489 field->size); 490 if (field->flags & TEP_FIELD_IS_POINTER) 491 ret = PyLong_FromUnsignedLong((unsigned long) val); 492 else if (field->flags & TEP_FIELD_IS_SIGNED) 493 ret = PyLong_FromLong((long) val); 494 else 495 ret = PyLong_FromUnsignedLong((unsigned long) val); 496 } 497 498 return ret; 499 } 500 501 static PyObject* 502 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) 503 { 504 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name)); 505 struct evsel *evsel = pevent->evsel; 506 struct tep_format_field *field; 507 508 if (!evsel->tp_format) { 509 struct tep_event *tp_format; 510 511 tp_format = trace_event__tp_format_id(evsel->core.attr.config); 512 if (IS_ERR_OR_NULL(tp_format)) 513 return NULL; 514 515 evsel->tp_format = tp_format; 516 } 517 518 field = tep_find_any_field(evsel->tp_format, str); 519 if (!field) 520 return NULL; 521 522 return tracepoint_field(pevent, field); 523 } 524 #endif /* HAVE_LIBTRACEEVENT */ 525 526 static PyObject* 527 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name) 528 { 529 PyObject *obj = NULL; 530 531 #ifdef HAVE_LIBTRACEEVENT 532 if (is_tracepoint(pevent)) 533 obj = get_tracepoint_field(pevent, attr_name); 534 #endif 535 536 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name); 537 } 538 539 static PyTypeObject pyrf_sample_event__type = { 540 PyVarObject_HEAD_INIT(NULL, 0) 541 .tp_name = "perf.sample_event", 542 .tp_basicsize = sizeof(struct pyrf_event), 543 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 544 .tp_doc = pyrf_sample_event__doc, 545 .tp_members = pyrf_sample_event__members, 546 .tp_repr = (reprfunc)pyrf_sample_event__repr, 547 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro, 548 }; 549 550 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object."); 551 552 static PyMemberDef pyrf_context_switch_event__members[] = { 553 sample_members 554 member_def(perf_event_header, type, T_UINT, "event type"), 555 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"), 556 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"), 557 { .name = NULL, }, 558 }; 559 560 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent) 561 { 562 PyObject *ret; 563 char *s; 564 565 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }", 566 pevent->event.context_switch.next_prev_pid, 567 pevent->event.context_switch.next_prev_tid, 568 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { 569 ret = PyErr_NoMemory(); 570 } else { 571 ret = _PyUnicode_FromString(s); 572 free(s); 573 } 574 return ret; 575 } 576 577 static PyTypeObject pyrf_context_switch_event__type = { 578 PyVarObject_HEAD_INIT(NULL, 0) 579 .tp_name = "perf.context_switch_event", 580 .tp_basicsize = sizeof(struct pyrf_event), 581 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 582 .tp_doc = pyrf_context_switch_event__doc, 583 .tp_members = pyrf_context_switch_event__members, 584 .tp_repr = (reprfunc)pyrf_context_switch_event__repr, 585 }; 586 587 static int pyrf_event__setup_types(void) 588 { 589 int err; 590 pyrf_mmap_event__type.tp_new = 591 pyrf_task_event__type.tp_new = 592 pyrf_comm_event__type.tp_new = 593 pyrf_lost_event__type.tp_new = 594 pyrf_read_event__type.tp_new = 595 pyrf_sample_event__type.tp_new = 596 pyrf_context_switch_event__type.tp_new = 597 pyrf_throttle_event__type.tp_new = PyType_GenericNew; 598 err = PyType_Ready(&pyrf_mmap_event__type); 599 if (err < 0) 600 goto out; 601 err = PyType_Ready(&pyrf_lost_event__type); 602 if (err < 0) 603 goto out; 604 err = PyType_Ready(&pyrf_task_event__type); 605 if (err < 0) 606 goto out; 607 err = PyType_Ready(&pyrf_comm_event__type); 608 if (err < 0) 609 goto out; 610 err = PyType_Ready(&pyrf_throttle_event__type); 611 if (err < 0) 612 goto out; 613 err = PyType_Ready(&pyrf_read_event__type); 614 if (err < 0) 615 goto out; 616 err = PyType_Ready(&pyrf_sample_event__type); 617 if (err < 0) 618 goto out; 619 err = PyType_Ready(&pyrf_context_switch_event__type); 620 if (err < 0) 621 goto out; 622 out: 623 return err; 624 } 625 626 static PyTypeObject *pyrf_event__type[] = { 627 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, 628 [PERF_RECORD_LOST] = &pyrf_lost_event__type, 629 [PERF_RECORD_COMM] = &pyrf_comm_event__type, 630 [PERF_RECORD_EXIT] = &pyrf_task_event__type, 631 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, 632 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, 633 [PERF_RECORD_FORK] = &pyrf_task_event__type, 634 [PERF_RECORD_READ] = &pyrf_read_event__type, 635 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, 636 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type, 637 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type, 638 }; 639 640 static PyObject *pyrf_event__new(union perf_event *event) 641 { 642 struct pyrf_event *pevent; 643 PyTypeObject *ptype; 644 645 if ((event->header.type < PERF_RECORD_MMAP || 646 event->header.type > PERF_RECORD_SAMPLE) && 647 !(event->header.type == PERF_RECORD_SWITCH || 648 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) 649 return NULL; 650 651 ptype = pyrf_event__type[event->header.type]; 652 pevent = PyObject_New(struct pyrf_event, ptype); 653 if (pevent != NULL) 654 memcpy(&pevent->event, event, event->header.size); 655 return (PyObject *)pevent; 656 } 657 658 struct pyrf_cpu_map { 659 PyObject_HEAD 660 661 struct perf_cpu_map *cpus; 662 }; 663 664 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, 665 PyObject *args, PyObject *kwargs) 666 { 667 static char *kwlist[] = { "cpustr", NULL }; 668 char *cpustr = NULL; 669 670 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", 671 kwlist, &cpustr)) 672 return -1; 673 674 pcpus->cpus = perf_cpu_map__new(cpustr); 675 if (pcpus->cpus == NULL) 676 return -1; 677 return 0; 678 } 679 680 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) 681 { 682 perf_cpu_map__put(pcpus->cpus); 683 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus); 684 } 685 686 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) 687 { 688 struct pyrf_cpu_map *pcpus = (void *)obj; 689 690 return perf_cpu_map__nr(pcpus->cpus); 691 } 692 693 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) 694 { 695 struct pyrf_cpu_map *pcpus = (void *)obj; 696 697 if (i >= perf_cpu_map__nr(pcpus->cpus)) 698 return NULL; 699 700 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu); 701 } 702 703 static PySequenceMethods pyrf_cpu_map__sequence_methods = { 704 .sq_length = pyrf_cpu_map__length, 705 .sq_item = pyrf_cpu_map__item, 706 }; 707 708 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); 709 710 static PyTypeObject pyrf_cpu_map__type = { 711 PyVarObject_HEAD_INIT(NULL, 0) 712 .tp_name = "perf.cpu_map", 713 .tp_basicsize = sizeof(struct pyrf_cpu_map), 714 .tp_dealloc = (destructor)pyrf_cpu_map__delete, 715 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 716 .tp_doc = pyrf_cpu_map__doc, 717 .tp_as_sequence = &pyrf_cpu_map__sequence_methods, 718 .tp_init = (initproc)pyrf_cpu_map__init, 719 }; 720 721 static int pyrf_cpu_map__setup_types(void) 722 { 723 pyrf_cpu_map__type.tp_new = PyType_GenericNew; 724 return PyType_Ready(&pyrf_cpu_map__type); 725 } 726 727 struct pyrf_thread_map { 728 PyObject_HEAD 729 730 struct perf_thread_map *threads; 731 }; 732 733 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, 734 PyObject *args, PyObject *kwargs) 735 { 736 static char *kwlist[] = { "pid", "tid", "uid", NULL }; 737 int pid = -1, tid = -1, uid = UINT_MAX; 738 739 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii", 740 kwlist, &pid, &tid, &uid)) 741 return -1; 742 743 pthreads->threads = thread_map__new(pid, tid, uid); 744 if (pthreads->threads == NULL) 745 return -1; 746 return 0; 747 } 748 749 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) 750 { 751 perf_thread_map__put(pthreads->threads); 752 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads); 753 } 754 755 static Py_ssize_t pyrf_thread_map__length(PyObject *obj) 756 { 757 struct pyrf_thread_map *pthreads = (void *)obj; 758 759 return perf_thread_map__nr(pthreads->threads); 760 } 761 762 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) 763 { 764 struct pyrf_thread_map *pthreads = (void *)obj; 765 766 if (i >= perf_thread_map__nr(pthreads->threads)) 767 return NULL; 768 769 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i)); 770 } 771 772 static PySequenceMethods pyrf_thread_map__sequence_methods = { 773 .sq_length = pyrf_thread_map__length, 774 .sq_item = pyrf_thread_map__item, 775 }; 776 777 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); 778 779 static PyTypeObject pyrf_thread_map__type = { 780 PyVarObject_HEAD_INIT(NULL, 0) 781 .tp_name = "perf.thread_map", 782 .tp_basicsize = sizeof(struct pyrf_thread_map), 783 .tp_dealloc = (destructor)pyrf_thread_map__delete, 784 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 785 .tp_doc = pyrf_thread_map__doc, 786 .tp_as_sequence = &pyrf_thread_map__sequence_methods, 787 .tp_init = (initproc)pyrf_thread_map__init, 788 }; 789 790 static int pyrf_thread_map__setup_types(void) 791 { 792 pyrf_thread_map__type.tp_new = PyType_GenericNew; 793 return PyType_Ready(&pyrf_thread_map__type); 794 } 795 796 struct pyrf_evsel { 797 PyObject_HEAD 798 799 struct evsel evsel; 800 }; 801 802 static int pyrf_evsel__init(struct pyrf_evsel *pevsel, 803 PyObject *args, PyObject *kwargs) 804 { 805 struct perf_event_attr attr = { 806 .type = PERF_TYPE_HARDWARE, 807 .config = PERF_COUNT_HW_CPU_CYCLES, 808 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, 809 }; 810 static char *kwlist[] = { 811 "type", 812 "config", 813 "sample_freq", 814 "sample_period", 815 "sample_type", 816 "read_format", 817 "disabled", 818 "inherit", 819 "pinned", 820 "exclusive", 821 "exclude_user", 822 "exclude_kernel", 823 "exclude_hv", 824 "exclude_idle", 825 "mmap", 826 "context_switch", 827 "comm", 828 "freq", 829 "inherit_stat", 830 "enable_on_exec", 831 "task", 832 "watermark", 833 "precise_ip", 834 "mmap_data", 835 "sample_id_all", 836 "wakeup_events", 837 "bp_type", 838 "bp_addr", 839 "bp_len", 840 NULL 841 }; 842 u64 sample_period = 0; 843 u32 disabled = 0, 844 inherit = 0, 845 pinned = 0, 846 exclusive = 0, 847 exclude_user = 0, 848 exclude_kernel = 0, 849 exclude_hv = 0, 850 exclude_idle = 0, 851 mmap = 0, 852 context_switch = 0, 853 comm = 0, 854 freq = 1, 855 inherit_stat = 0, 856 enable_on_exec = 0, 857 task = 0, 858 watermark = 0, 859 precise_ip = 0, 860 mmap_data = 0, 861 sample_id_all = 1; 862 int idx = 0; 863 864 if (!PyArg_ParseTupleAndKeywords(args, kwargs, 865 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist, 866 &attr.type, &attr.config, &attr.sample_freq, 867 &sample_period, &attr.sample_type, 868 &attr.read_format, &disabled, &inherit, 869 &pinned, &exclusive, &exclude_user, 870 &exclude_kernel, &exclude_hv, &exclude_idle, 871 &mmap, &context_switch, &comm, &freq, &inherit_stat, 872 &enable_on_exec, &task, &watermark, 873 &precise_ip, &mmap_data, &sample_id_all, 874 &attr.wakeup_events, &attr.bp_type, 875 &attr.bp_addr, &attr.bp_len, &idx)) 876 return -1; 877 878 /* union... */ 879 if (sample_period != 0) { 880 if (attr.sample_freq != 0) 881 return -1; /* FIXME: throw right exception */ 882 attr.sample_period = sample_period; 883 } 884 885 /* Bitfields */ 886 attr.disabled = disabled; 887 attr.inherit = inherit; 888 attr.pinned = pinned; 889 attr.exclusive = exclusive; 890 attr.exclude_user = exclude_user; 891 attr.exclude_kernel = exclude_kernel; 892 attr.exclude_hv = exclude_hv; 893 attr.exclude_idle = exclude_idle; 894 attr.mmap = mmap; 895 attr.context_switch = context_switch; 896 attr.comm = comm; 897 attr.freq = freq; 898 attr.inherit_stat = inherit_stat; 899 attr.enable_on_exec = enable_on_exec; 900 attr.task = task; 901 attr.watermark = watermark; 902 attr.precise_ip = precise_ip; 903 attr.mmap_data = mmap_data; 904 attr.sample_id_all = sample_id_all; 905 attr.size = sizeof(attr); 906 907 evsel__init(&pevsel->evsel, &attr, idx); 908 return 0; 909 } 910 911 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) 912 { 913 evsel__exit(&pevsel->evsel); 914 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel); 915 } 916 917 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, 918 PyObject *args, PyObject *kwargs) 919 { 920 struct evsel *evsel = &pevsel->evsel; 921 struct perf_cpu_map *cpus = NULL; 922 struct perf_thread_map *threads = NULL; 923 PyObject *pcpus = NULL, *pthreads = NULL; 924 int group = 0, inherit = 0; 925 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; 926 927 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 928 &pcpus, &pthreads, &group, &inherit)) 929 return NULL; 930 931 if (pthreads != NULL) 932 threads = ((struct pyrf_thread_map *)pthreads)->threads; 933 934 if (pcpus != NULL) 935 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 936 937 evsel->core.attr.inherit = inherit; 938 /* 939 * This will group just the fds for this single evsel, to group 940 * multiple events, use evlist.open(). 941 */ 942 if (evsel__open(evsel, cpus, threads) < 0) { 943 PyErr_SetFromErrno(PyExc_OSError); 944 return NULL; 945 } 946 947 Py_INCREF(Py_None); 948 return Py_None; 949 } 950 951 static PyMethodDef pyrf_evsel__methods[] = { 952 { 953 .ml_name = "open", 954 .ml_meth = (PyCFunction)pyrf_evsel__open, 955 .ml_flags = METH_VARARGS | METH_KEYWORDS, 956 .ml_doc = PyDoc_STR("open the event selector file descriptor table.") 957 }, 958 { .ml_name = NULL, } 959 }; 960 961 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); 962 963 static PyTypeObject pyrf_evsel__type = { 964 PyVarObject_HEAD_INIT(NULL, 0) 965 .tp_name = "perf.evsel", 966 .tp_basicsize = sizeof(struct pyrf_evsel), 967 .tp_dealloc = (destructor)pyrf_evsel__delete, 968 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 969 .tp_doc = pyrf_evsel__doc, 970 .tp_methods = pyrf_evsel__methods, 971 .tp_init = (initproc)pyrf_evsel__init, 972 }; 973 974 static int pyrf_evsel__setup_types(void) 975 { 976 pyrf_evsel__type.tp_new = PyType_GenericNew; 977 return PyType_Ready(&pyrf_evsel__type); 978 } 979 980 struct pyrf_evlist { 981 PyObject_HEAD 982 983 struct evlist evlist; 984 }; 985 986 static int pyrf_evlist__init(struct pyrf_evlist *pevlist, 987 PyObject *args, PyObject *kwargs __maybe_unused) 988 { 989 PyObject *pcpus = NULL, *pthreads = NULL; 990 struct perf_cpu_map *cpus; 991 struct perf_thread_map *threads; 992 993 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) 994 return -1; 995 996 threads = ((struct pyrf_thread_map *)pthreads)->threads; 997 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 998 evlist__init(&pevlist->evlist, cpus, threads); 999 return 0; 1000 } 1001 1002 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) 1003 { 1004 evlist__exit(&pevlist->evlist); 1005 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist); 1006 } 1007 1008 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, 1009 PyObject *args, PyObject *kwargs) 1010 { 1011 struct evlist *evlist = &pevlist->evlist; 1012 static char *kwlist[] = { "pages", "overwrite", NULL }; 1013 int pages = 128, overwrite = false; 1014 1015 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, 1016 &pages, &overwrite)) 1017 return NULL; 1018 1019 if (evlist__mmap(evlist, pages) < 0) { 1020 PyErr_SetFromErrno(PyExc_OSError); 1021 return NULL; 1022 } 1023 1024 Py_INCREF(Py_None); 1025 return Py_None; 1026 } 1027 1028 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, 1029 PyObject *args, PyObject *kwargs) 1030 { 1031 struct evlist *evlist = &pevlist->evlist; 1032 static char *kwlist[] = { "timeout", NULL }; 1033 int timeout = -1, n; 1034 1035 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 1036 return NULL; 1037 1038 n = evlist__poll(evlist, timeout); 1039 if (n < 0) { 1040 PyErr_SetFromErrno(PyExc_OSError); 1041 return NULL; 1042 } 1043 1044 return Py_BuildValue("i", n); 1045 } 1046 1047 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, 1048 PyObject *args __maybe_unused, 1049 PyObject *kwargs __maybe_unused) 1050 { 1051 struct evlist *evlist = &pevlist->evlist; 1052 PyObject *list = PyList_New(0); 1053 int i; 1054 1055 for (i = 0; i < evlist->core.pollfd.nr; ++i) { 1056 PyObject *file; 1057 #if PY_MAJOR_VERSION < 3 1058 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r"); 1059 1060 if (fp == NULL) 1061 goto free_list; 1062 1063 file = PyFile_FromFile(fp, "perf", "r", NULL); 1064 #else 1065 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1, 1066 NULL, NULL, NULL, 0); 1067 #endif 1068 if (file == NULL) 1069 goto free_list; 1070 1071 if (PyList_Append(list, file) != 0) { 1072 Py_DECREF(file); 1073 goto free_list; 1074 } 1075 1076 Py_DECREF(file); 1077 } 1078 1079 return list; 1080 free_list: 1081 return PyErr_NoMemory(); 1082 } 1083 1084 1085 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, 1086 PyObject *args, 1087 PyObject *kwargs __maybe_unused) 1088 { 1089 struct evlist *evlist = &pevlist->evlist; 1090 PyObject *pevsel; 1091 struct evsel *evsel; 1092 1093 if (!PyArg_ParseTuple(args, "O", &pevsel)) 1094 return NULL; 1095 1096 Py_INCREF(pevsel); 1097 evsel = &((struct pyrf_evsel *)pevsel)->evsel; 1098 evsel->core.idx = evlist->core.nr_entries; 1099 evlist__add(evlist, evsel); 1100 1101 return Py_BuildValue("i", evlist->core.nr_entries); 1102 } 1103 1104 static struct mmap *get_md(struct evlist *evlist, int cpu) 1105 { 1106 int i; 1107 1108 for (i = 0; i < evlist->core.nr_mmaps; i++) { 1109 struct mmap *md = &evlist->mmap[i]; 1110 1111 if (md->core.cpu.cpu == cpu) 1112 return md; 1113 } 1114 1115 return NULL; 1116 } 1117 1118 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, 1119 PyObject *args, PyObject *kwargs) 1120 { 1121 struct evlist *evlist = &pevlist->evlist; 1122 union perf_event *event; 1123 int sample_id_all = 1, cpu; 1124 static char *kwlist[] = { "cpu", "sample_id_all", NULL }; 1125 struct mmap *md; 1126 int err; 1127 1128 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 1129 &cpu, &sample_id_all)) 1130 return NULL; 1131 1132 md = get_md(evlist, cpu); 1133 if (!md) 1134 return NULL; 1135 1136 if (perf_mmap__read_init(&md->core) < 0) 1137 goto end; 1138 1139 event = perf_mmap__read_event(&md->core); 1140 if (event != NULL) { 1141 PyObject *pyevent = pyrf_event__new(event); 1142 struct pyrf_event *pevent = (struct pyrf_event *)pyevent; 1143 struct evsel *evsel; 1144 1145 if (pyevent == NULL) 1146 return PyErr_NoMemory(); 1147 1148 evsel = evlist__event2evsel(evlist, event); 1149 if (!evsel) { 1150 Py_INCREF(Py_None); 1151 return Py_None; 1152 } 1153 1154 pevent->evsel = evsel; 1155 1156 err = evsel__parse_sample(evsel, event, &pevent->sample); 1157 1158 /* Consume the even only after we parsed it out. */ 1159 perf_mmap__consume(&md->core); 1160 1161 if (err) 1162 return PyErr_Format(PyExc_OSError, 1163 "perf: can't parse sample, err=%d", err); 1164 return pyevent; 1165 } 1166 end: 1167 Py_INCREF(Py_None); 1168 return Py_None; 1169 } 1170 1171 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, 1172 PyObject *args, PyObject *kwargs) 1173 { 1174 struct evlist *evlist = &pevlist->evlist; 1175 1176 if (evlist__open(evlist) < 0) { 1177 PyErr_SetFromErrno(PyExc_OSError); 1178 return NULL; 1179 } 1180 1181 Py_INCREF(Py_None); 1182 return Py_None; 1183 } 1184 1185 static PyMethodDef pyrf_evlist__methods[] = { 1186 { 1187 .ml_name = "mmap", 1188 .ml_meth = (PyCFunction)pyrf_evlist__mmap, 1189 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1190 .ml_doc = PyDoc_STR("mmap the file descriptor table.") 1191 }, 1192 { 1193 .ml_name = "open", 1194 .ml_meth = (PyCFunction)pyrf_evlist__open, 1195 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1196 .ml_doc = PyDoc_STR("open the file descriptors.") 1197 }, 1198 { 1199 .ml_name = "poll", 1200 .ml_meth = (PyCFunction)pyrf_evlist__poll, 1201 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1202 .ml_doc = PyDoc_STR("poll the file descriptor table.") 1203 }, 1204 { 1205 .ml_name = "get_pollfd", 1206 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, 1207 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1208 .ml_doc = PyDoc_STR("get the poll file descriptor table.") 1209 }, 1210 { 1211 .ml_name = "add", 1212 .ml_meth = (PyCFunction)pyrf_evlist__add, 1213 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1214 .ml_doc = PyDoc_STR("adds an event selector to the list.") 1215 }, 1216 { 1217 .ml_name = "read_on_cpu", 1218 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, 1219 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1220 .ml_doc = PyDoc_STR("reads an event.") 1221 }, 1222 { .ml_name = NULL, } 1223 }; 1224 1225 static Py_ssize_t pyrf_evlist__length(PyObject *obj) 1226 { 1227 struct pyrf_evlist *pevlist = (void *)obj; 1228 1229 return pevlist->evlist.core.nr_entries; 1230 } 1231 1232 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) 1233 { 1234 struct pyrf_evlist *pevlist = (void *)obj; 1235 struct evsel *pos; 1236 1237 if (i >= pevlist->evlist.core.nr_entries) 1238 return NULL; 1239 1240 evlist__for_each_entry(&pevlist->evlist, pos) { 1241 if (i-- == 0) 1242 break; 1243 } 1244 1245 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); 1246 } 1247 1248 static PySequenceMethods pyrf_evlist__sequence_methods = { 1249 .sq_length = pyrf_evlist__length, 1250 .sq_item = pyrf_evlist__item, 1251 }; 1252 1253 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); 1254 1255 static PyTypeObject pyrf_evlist__type = { 1256 PyVarObject_HEAD_INIT(NULL, 0) 1257 .tp_name = "perf.evlist", 1258 .tp_basicsize = sizeof(struct pyrf_evlist), 1259 .tp_dealloc = (destructor)pyrf_evlist__delete, 1260 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 1261 .tp_as_sequence = &pyrf_evlist__sequence_methods, 1262 .tp_doc = pyrf_evlist__doc, 1263 .tp_methods = pyrf_evlist__methods, 1264 .tp_init = (initproc)pyrf_evlist__init, 1265 }; 1266 1267 static int pyrf_evlist__setup_types(void) 1268 { 1269 pyrf_evlist__type.tp_new = PyType_GenericNew; 1270 return PyType_Ready(&pyrf_evlist__type); 1271 } 1272 1273 #define PERF_CONST(name) { #name, PERF_##name } 1274 1275 static struct { 1276 const char *name; 1277 int value; 1278 } perf__constants[] = { 1279 PERF_CONST(TYPE_HARDWARE), 1280 PERF_CONST(TYPE_SOFTWARE), 1281 PERF_CONST(TYPE_TRACEPOINT), 1282 PERF_CONST(TYPE_HW_CACHE), 1283 PERF_CONST(TYPE_RAW), 1284 PERF_CONST(TYPE_BREAKPOINT), 1285 1286 PERF_CONST(COUNT_HW_CPU_CYCLES), 1287 PERF_CONST(COUNT_HW_INSTRUCTIONS), 1288 PERF_CONST(COUNT_HW_CACHE_REFERENCES), 1289 PERF_CONST(COUNT_HW_CACHE_MISSES), 1290 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS), 1291 PERF_CONST(COUNT_HW_BRANCH_MISSES), 1292 PERF_CONST(COUNT_HW_BUS_CYCLES), 1293 PERF_CONST(COUNT_HW_CACHE_L1D), 1294 PERF_CONST(COUNT_HW_CACHE_L1I), 1295 PERF_CONST(COUNT_HW_CACHE_LL), 1296 PERF_CONST(COUNT_HW_CACHE_DTLB), 1297 PERF_CONST(COUNT_HW_CACHE_ITLB), 1298 PERF_CONST(COUNT_HW_CACHE_BPU), 1299 PERF_CONST(COUNT_HW_CACHE_OP_READ), 1300 PERF_CONST(COUNT_HW_CACHE_OP_WRITE), 1301 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH), 1302 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS), 1303 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS), 1304 1305 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND), 1306 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND), 1307 1308 PERF_CONST(COUNT_SW_CPU_CLOCK), 1309 PERF_CONST(COUNT_SW_TASK_CLOCK), 1310 PERF_CONST(COUNT_SW_PAGE_FAULTS), 1311 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES), 1312 PERF_CONST(COUNT_SW_CPU_MIGRATIONS), 1313 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN), 1314 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ), 1315 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS), 1316 PERF_CONST(COUNT_SW_EMULATION_FAULTS), 1317 PERF_CONST(COUNT_SW_DUMMY), 1318 1319 PERF_CONST(SAMPLE_IP), 1320 PERF_CONST(SAMPLE_TID), 1321 PERF_CONST(SAMPLE_TIME), 1322 PERF_CONST(SAMPLE_ADDR), 1323 PERF_CONST(SAMPLE_READ), 1324 PERF_CONST(SAMPLE_CALLCHAIN), 1325 PERF_CONST(SAMPLE_ID), 1326 PERF_CONST(SAMPLE_CPU), 1327 PERF_CONST(SAMPLE_PERIOD), 1328 PERF_CONST(SAMPLE_STREAM_ID), 1329 PERF_CONST(SAMPLE_RAW), 1330 1331 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED), 1332 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING), 1333 PERF_CONST(FORMAT_ID), 1334 PERF_CONST(FORMAT_GROUP), 1335 1336 PERF_CONST(RECORD_MMAP), 1337 PERF_CONST(RECORD_LOST), 1338 PERF_CONST(RECORD_COMM), 1339 PERF_CONST(RECORD_EXIT), 1340 PERF_CONST(RECORD_THROTTLE), 1341 PERF_CONST(RECORD_UNTHROTTLE), 1342 PERF_CONST(RECORD_FORK), 1343 PERF_CONST(RECORD_READ), 1344 PERF_CONST(RECORD_SAMPLE), 1345 PERF_CONST(RECORD_MMAP2), 1346 PERF_CONST(RECORD_AUX), 1347 PERF_CONST(RECORD_ITRACE_START), 1348 PERF_CONST(RECORD_LOST_SAMPLES), 1349 PERF_CONST(RECORD_SWITCH), 1350 PERF_CONST(RECORD_SWITCH_CPU_WIDE), 1351 1352 PERF_CONST(RECORD_MISC_SWITCH_OUT), 1353 { .name = NULL, }, 1354 }; 1355 1356 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, 1357 PyObject *args, PyObject *kwargs) 1358 { 1359 #ifndef HAVE_LIBTRACEEVENT 1360 return NULL; 1361 #else 1362 struct tep_event *tp_format; 1363 static char *kwlist[] = { "sys", "name", NULL }; 1364 char *sys = NULL; 1365 char *name = NULL; 1366 1367 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist, 1368 &sys, &name)) 1369 return NULL; 1370 1371 tp_format = trace_event__tp_format(sys, name); 1372 if (IS_ERR(tp_format)) 1373 return _PyLong_FromLong(-1); 1374 1375 return _PyLong_FromLong(tp_format->id); 1376 #endif // HAVE_LIBTRACEEVENT 1377 } 1378 1379 static PyMethodDef perf__methods[] = { 1380 { 1381 .ml_name = "tracepoint", 1382 .ml_meth = (PyCFunction) pyrf__tracepoint, 1383 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1384 .ml_doc = PyDoc_STR("Get tracepoint config.") 1385 }, 1386 { .ml_name = NULL, } 1387 }; 1388 1389 #if PY_MAJOR_VERSION < 3 1390 PyMODINIT_FUNC initperf(void) 1391 #else 1392 PyMODINIT_FUNC PyInit_perf(void) 1393 #endif 1394 { 1395 PyObject *obj; 1396 int i; 1397 PyObject *dict; 1398 #if PY_MAJOR_VERSION < 3 1399 PyObject *module = Py_InitModule("perf", perf__methods); 1400 #else 1401 static struct PyModuleDef moduledef = { 1402 PyModuleDef_HEAD_INIT, 1403 "perf", /* m_name */ 1404 "", /* m_doc */ 1405 -1, /* m_size */ 1406 perf__methods, /* m_methods */ 1407 NULL, /* m_reload */ 1408 NULL, /* m_traverse */ 1409 NULL, /* m_clear */ 1410 NULL, /* m_free */ 1411 }; 1412 PyObject *module = PyModule_Create(&moduledef); 1413 #endif 1414 1415 if (module == NULL || 1416 pyrf_event__setup_types() < 0 || 1417 pyrf_evlist__setup_types() < 0 || 1418 pyrf_evsel__setup_types() < 0 || 1419 pyrf_thread_map__setup_types() < 0 || 1420 pyrf_cpu_map__setup_types() < 0) 1421 #if PY_MAJOR_VERSION < 3 1422 return; 1423 #else 1424 return module; 1425 #endif 1426 1427 /* The page_size is placed in util object. */ 1428 page_size = sysconf(_SC_PAGE_SIZE); 1429 1430 Py_INCREF(&pyrf_evlist__type); 1431 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); 1432 1433 Py_INCREF(&pyrf_evsel__type); 1434 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); 1435 1436 Py_INCREF(&pyrf_mmap_event__type); 1437 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type); 1438 1439 Py_INCREF(&pyrf_lost_event__type); 1440 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type); 1441 1442 Py_INCREF(&pyrf_comm_event__type); 1443 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type); 1444 1445 Py_INCREF(&pyrf_task_event__type); 1446 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1447 1448 Py_INCREF(&pyrf_throttle_event__type); 1449 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type); 1450 1451 Py_INCREF(&pyrf_task_event__type); 1452 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1453 1454 Py_INCREF(&pyrf_read_event__type); 1455 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type); 1456 1457 Py_INCREF(&pyrf_sample_event__type); 1458 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type); 1459 1460 Py_INCREF(&pyrf_context_switch_event__type); 1461 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type); 1462 1463 Py_INCREF(&pyrf_thread_map__type); 1464 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); 1465 1466 Py_INCREF(&pyrf_cpu_map__type); 1467 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); 1468 1469 dict = PyModule_GetDict(module); 1470 if (dict == NULL) 1471 goto error; 1472 1473 for (i = 0; perf__constants[i].name != NULL; i++) { 1474 obj = _PyLong_FromLong(perf__constants[i].value); 1475 if (obj == NULL) 1476 goto error; 1477 PyDict_SetItemString(dict, perf__constants[i].name, obj); 1478 Py_DECREF(obj); 1479 } 1480 1481 error: 1482 if (PyErr_Occurred()) 1483 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 1484 #if PY_MAJOR_VERSION >= 3 1485 return module; 1486 #endif 1487 } 1488 1489 /* 1490 * Dummy, to avoid dragging all the test_attr infrastructure in the python 1491 * binding. 1492 */ 1493 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, 1494 int fd, int group_fd, unsigned long flags) 1495 { 1496 } 1497 1498 void evlist__free_stats(struct evlist *evlist) 1499 { 1500 } 1501