1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #ifdef HAVE_LIBTRACEEVENT
9 #include <traceevent/event-parse.h>
10 #endif
11 #include <perf/mmap.h>
12 #include "evlist.h"
13 #include "callchain.h"
14 #include "evsel.h"
15 #include "event.h"
16 #include "print_binary.h"
17 #include "thread_map.h"
18 #include "trace-event.h"
19 #include "mmap.h"
20 #include "stat.h"
21 #include "metricgroup.h"
22 #include "util/bpf-filter.h"
23 #include "util/env.h"
24 #include "util/pmu.h"
25 #include "util/pmus.h"
26 #include <internal/lib.h>
27 #include "util.h"
28
29 #if PY_MAJOR_VERSION < 3
30 #define _PyUnicode_FromString(arg) \
31 PyString_FromString(arg)
32 #define _PyUnicode_AsString(arg) \
33 PyString_AsString(arg)
34 #define _PyUnicode_FromFormat(...) \
35 PyString_FromFormat(__VA_ARGS__)
36 #define _PyLong_FromLong(arg) \
37 PyInt_FromLong(arg)
38
39 #else
40
41 #define _PyUnicode_FromString(arg) \
42 PyUnicode_FromString(arg)
43 #define _PyUnicode_FromFormat(...) \
44 PyUnicode_FromFormat(__VA_ARGS__)
45 #define _PyLong_FromLong(arg) \
46 PyLong_FromLong(arg)
47 #endif
48
49 #ifndef Py_TYPE
50 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
51 #endif
52
53 /*
54 * Avoid bringing in event parsing.
55 */
parse_event(struct evlist * evlist __maybe_unused,const char * str __maybe_unused)56 int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
57 {
58 return 0;
59 }
60
61 /*
62 * Provide these two so that we don't have to link against callchain.c and
63 * start dragging hist.c, etc.
64 */
65 struct callchain_param callchain_param;
66
parse_callchain_record(const char * arg __maybe_unused,struct callchain_param * param __maybe_unused)67 int parse_callchain_record(const char *arg __maybe_unused,
68 struct callchain_param *param __maybe_unused)
69 {
70 return 0;
71 }
72
73 /*
74 * Add these not to drag util/env.c
75 */
76 struct perf_env perf_env;
77
perf_env__cpuid(struct perf_env * env __maybe_unused)78 const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
79 {
80 return NULL;
81 }
82
83 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
perf_env__arch(struct perf_env * env __maybe_unused)84 const char *perf_env__arch(struct perf_env *env __maybe_unused)
85 {
86 return NULL;
87 }
88
89 /*
90 * These ones are needed not to drag the PMU bandwagon, jevents generated
91 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
92 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
93 * far, for the perf python binding known usecases, revisit if this become
94 * necessary.
95 */
evsel__find_pmu(const struct evsel * evsel __maybe_unused)96 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
97 {
98 return NULL;
99 }
100
perf_pmu__scan_file(struct perf_pmu * pmu,const char * name,const char * fmt,...)101 int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...)
102 {
103 return EOF;
104 }
105
perf_pmu__name_from_config(struct perf_pmu * pmu __maybe_unused,u64 config __maybe_unused)106 const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused)
107 {
108 return NULL;
109 }
110
perf_pmus__find_by_type(unsigned int type __maybe_unused)111 struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused)
112 {
113 return NULL;
114 }
115
perf_pmus__num_core_pmus(void)116 int perf_pmus__num_core_pmus(void)
117 {
118 return 1;
119 }
120
evsel__is_aux_event(const struct evsel * evsel __maybe_unused)121 bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
122 {
123 return false;
124 }
125
perf_pmus__supports_extended_type(void)126 bool perf_pmus__supports_extended_type(void)
127 {
128 return false;
129 }
130
131 /*
132 * Add this one here not to drag util/metricgroup.c
133 */
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)134 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
135 struct rblist *new_metric_events,
136 struct rblist *old_metric_events)
137 {
138 return 0;
139 }
140
141 /*
142 * Add this one here not to drag util/trace-event-info.c
143 */
tracepoint_id_to_name(u64 config)144 char *tracepoint_id_to_name(u64 config)
145 {
146 return NULL;
147 }
148
149 /*
150 * XXX: All these evsel destructors need some better mechanism, like a linked
151 * list of destructors registered when the relevant code indeed is used instead
152 * of having more and more calls in perf_evsel__delete(). -- acme
153 *
154 * For now, add some more:
155 *
156 * Not to drag the BPF bandwagon...
157 */
158 void bpf_counter__destroy(struct evsel *evsel);
159 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
160 int bpf_counter__disable(struct evsel *evsel);
161
bpf_counter__destroy(struct evsel * evsel __maybe_unused)162 void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
163 {
164 }
165
bpf_counter__install_pe(struct evsel * evsel __maybe_unused,int cpu __maybe_unused,int fd __maybe_unused)166 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
167 {
168 return 0;
169 }
170
bpf_counter__disable(struct evsel * evsel __maybe_unused)171 int bpf_counter__disable(struct evsel *evsel __maybe_unused)
172 {
173 return 0;
174 }
175
176 // not to drag util/bpf-filter.c
177 #ifdef HAVE_BPF_SKEL
perf_bpf_filter__prepare(struct evsel * evsel __maybe_unused)178 int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
179 {
180 return 0;
181 }
182
perf_bpf_filter__destroy(struct evsel * evsel __maybe_unused)183 int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
184 {
185 return 0;
186 }
187 #endif
188
189 /*
190 * Support debug printing even though util/debug.c is not linked. That means
191 * implementing 'verbose' and 'eprintf'.
192 */
193 int verbose;
194 int debug_peo_args;
195
196 int eprintf(int level, int var, const char *fmt, ...);
197
eprintf(int level,int var,const char * fmt,...)198 int eprintf(int level, int var, const char *fmt, ...)
199 {
200 va_list args;
201 int ret = 0;
202
203 if (var >= level) {
204 va_start(args, fmt);
205 ret = vfprintf(stderr, fmt, args);
206 va_end(args);
207 }
208
209 return ret;
210 }
211
212 /* Define PyVarObject_HEAD_INIT for python 2.5 */
213 #ifndef PyVarObject_HEAD_INIT
214 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
215 #endif
216
217 #if PY_MAJOR_VERSION < 3
218 PyMODINIT_FUNC initperf(void);
219 #else
220 PyMODINIT_FUNC PyInit_perf(void);
221 #endif
222
223 #define member_def(type, member, ptype, help) \
224 { #member, ptype, \
225 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
226 0, help }
227
228 #define sample_member_def(name, member, ptype, help) \
229 { #name, ptype, \
230 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
231 0, help }
232
233 struct pyrf_event {
234 PyObject_HEAD
235 struct evsel *evsel;
236 struct perf_sample sample;
237 union perf_event event;
238 };
239
240 #define sample_members \
241 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
242 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
243 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
244 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
245 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
246 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
247 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
248 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
249 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
250
251 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
252
253 static PyMemberDef pyrf_mmap_event__members[] = {
254 sample_members
255 member_def(perf_event_header, type, T_UINT, "event type"),
256 member_def(perf_event_header, misc, T_UINT, "event misc"),
257 member_def(perf_record_mmap, pid, T_UINT, "event pid"),
258 member_def(perf_record_mmap, tid, T_UINT, "event tid"),
259 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
260 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
261 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
262 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
263 { .name = NULL, },
264 };
265
pyrf_mmap_event__repr(struct pyrf_event * pevent)266 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
267 {
268 PyObject *ret;
269 char *s;
270
271 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
272 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
273 "filename: %s }",
274 pevent->event.mmap.pid, pevent->event.mmap.tid,
275 pevent->event.mmap.start, pevent->event.mmap.len,
276 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
277 ret = PyErr_NoMemory();
278 } else {
279 ret = _PyUnicode_FromString(s);
280 free(s);
281 }
282 return ret;
283 }
284
285 static PyTypeObject pyrf_mmap_event__type = {
286 PyVarObject_HEAD_INIT(NULL, 0)
287 .tp_name = "perf.mmap_event",
288 .tp_basicsize = sizeof(struct pyrf_event),
289 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
290 .tp_doc = pyrf_mmap_event__doc,
291 .tp_members = pyrf_mmap_event__members,
292 .tp_repr = (reprfunc)pyrf_mmap_event__repr,
293 };
294
295 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
296
297 static PyMemberDef pyrf_task_event__members[] = {
298 sample_members
299 member_def(perf_event_header, type, T_UINT, "event type"),
300 member_def(perf_record_fork, pid, T_UINT, "event pid"),
301 member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
302 member_def(perf_record_fork, tid, T_UINT, "event tid"),
303 member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
304 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
305 { .name = NULL, },
306 };
307
pyrf_task_event__repr(struct pyrf_event * pevent)308 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
309 {
310 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
311 "ptid: %u, time: %" PRI_lu64 "}",
312 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
313 pevent->event.fork.pid,
314 pevent->event.fork.ppid,
315 pevent->event.fork.tid,
316 pevent->event.fork.ptid,
317 pevent->event.fork.time);
318 }
319
320 static PyTypeObject pyrf_task_event__type = {
321 PyVarObject_HEAD_INIT(NULL, 0)
322 .tp_name = "perf.task_event",
323 .tp_basicsize = sizeof(struct pyrf_event),
324 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
325 .tp_doc = pyrf_task_event__doc,
326 .tp_members = pyrf_task_event__members,
327 .tp_repr = (reprfunc)pyrf_task_event__repr,
328 };
329
330 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
331
332 static PyMemberDef pyrf_comm_event__members[] = {
333 sample_members
334 member_def(perf_event_header, type, T_UINT, "event type"),
335 member_def(perf_record_comm, pid, T_UINT, "event pid"),
336 member_def(perf_record_comm, tid, T_UINT, "event tid"),
337 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
338 { .name = NULL, },
339 };
340
pyrf_comm_event__repr(struct pyrf_event * pevent)341 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
342 {
343 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
344 pevent->event.comm.pid,
345 pevent->event.comm.tid,
346 pevent->event.comm.comm);
347 }
348
349 static PyTypeObject pyrf_comm_event__type = {
350 PyVarObject_HEAD_INIT(NULL, 0)
351 .tp_name = "perf.comm_event",
352 .tp_basicsize = sizeof(struct pyrf_event),
353 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
354 .tp_doc = pyrf_comm_event__doc,
355 .tp_members = pyrf_comm_event__members,
356 .tp_repr = (reprfunc)pyrf_comm_event__repr,
357 };
358
359 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
360
361 static PyMemberDef pyrf_throttle_event__members[] = {
362 sample_members
363 member_def(perf_event_header, type, T_UINT, "event type"),
364 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
365 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
366 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
367 { .name = NULL, },
368 };
369
pyrf_throttle_event__repr(struct pyrf_event * pevent)370 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
371 {
372 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
373
374 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
375 ", stream_id: %" PRI_lu64 " }",
376 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
377 te->time, te->id, te->stream_id);
378 }
379
380 static PyTypeObject pyrf_throttle_event__type = {
381 PyVarObject_HEAD_INIT(NULL, 0)
382 .tp_name = "perf.throttle_event",
383 .tp_basicsize = sizeof(struct pyrf_event),
384 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
385 .tp_doc = pyrf_throttle_event__doc,
386 .tp_members = pyrf_throttle_event__members,
387 .tp_repr = (reprfunc)pyrf_throttle_event__repr,
388 };
389
390 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
391
392 static PyMemberDef pyrf_lost_event__members[] = {
393 sample_members
394 member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
395 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
396 { .name = NULL, },
397 };
398
pyrf_lost_event__repr(struct pyrf_event * pevent)399 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
400 {
401 PyObject *ret;
402 char *s;
403
404 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
405 "lost: %#" PRI_lx64 " }",
406 pevent->event.lost.id, pevent->event.lost.lost) < 0) {
407 ret = PyErr_NoMemory();
408 } else {
409 ret = _PyUnicode_FromString(s);
410 free(s);
411 }
412 return ret;
413 }
414
415 static PyTypeObject pyrf_lost_event__type = {
416 PyVarObject_HEAD_INIT(NULL, 0)
417 .tp_name = "perf.lost_event",
418 .tp_basicsize = sizeof(struct pyrf_event),
419 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
420 .tp_doc = pyrf_lost_event__doc,
421 .tp_members = pyrf_lost_event__members,
422 .tp_repr = (reprfunc)pyrf_lost_event__repr,
423 };
424
425 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
426
427 static PyMemberDef pyrf_read_event__members[] = {
428 sample_members
429 member_def(perf_record_read, pid, T_UINT, "event pid"),
430 member_def(perf_record_read, tid, T_UINT, "event tid"),
431 { .name = NULL, },
432 };
433
pyrf_read_event__repr(struct pyrf_event * pevent)434 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
435 {
436 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
437 pevent->event.read.pid,
438 pevent->event.read.tid);
439 /*
440 * FIXME: return the array of read values,
441 * making this method useful ;-)
442 */
443 }
444
445 static PyTypeObject pyrf_read_event__type = {
446 PyVarObject_HEAD_INIT(NULL, 0)
447 .tp_name = "perf.read_event",
448 .tp_basicsize = sizeof(struct pyrf_event),
449 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
450 .tp_doc = pyrf_read_event__doc,
451 .tp_members = pyrf_read_event__members,
452 .tp_repr = (reprfunc)pyrf_read_event__repr,
453 };
454
455 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
456
457 static PyMemberDef pyrf_sample_event__members[] = {
458 sample_members
459 member_def(perf_event_header, type, T_UINT, "event type"),
460 { .name = NULL, },
461 };
462
pyrf_sample_event__repr(struct pyrf_event * pevent)463 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
464 {
465 PyObject *ret;
466 char *s;
467
468 if (asprintf(&s, "{ type: sample }") < 0) {
469 ret = PyErr_NoMemory();
470 } else {
471 ret = _PyUnicode_FromString(s);
472 free(s);
473 }
474 return ret;
475 }
476
477 #ifdef HAVE_LIBTRACEEVENT
is_tracepoint(struct pyrf_event * pevent)478 static bool is_tracepoint(struct pyrf_event *pevent)
479 {
480 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
481 }
482
483 static PyObject*
tracepoint_field(struct pyrf_event * pe,struct tep_format_field * field)484 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
485 {
486 struct tep_handle *pevent = field->event->tep;
487 void *data = pe->sample.raw_data;
488 PyObject *ret = NULL;
489 unsigned long long val;
490 unsigned int offset, len;
491
492 if (field->flags & TEP_FIELD_IS_ARRAY) {
493 offset = field->offset;
494 len = field->size;
495 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
496 val = tep_read_number(pevent, data + offset, len);
497 offset = val;
498 len = offset >> 16;
499 offset &= 0xffff;
500 if (tep_field_is_relative(field->flags))
501 offset += field->offset + field->size;
502 }
503 if (field->flags & TEP_FIELD_IS_STRING &&
504 is_printable_array(data + offset, len)) {
505 ret = _PyUnicode_FromString((char *)data + offset);
506 } else {
507 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
508 field->flags &= ~TEP_FIELD_IS_STRING;
509 }
510 } else {
511 val = tep_read_number(pevent, data + field->offset,
512 field->size);
513 if (field->flags & TEP_FIELD_IS_POINTER)
514 ret = PyLong_FromUnsignedLong((unsigned long) val);
515 else if (field->flags & TEP_FIELD_IS_SIGNED)
516 ret = PyLong_FromLong((long) val);
517 else
518 ret = PyLong_FromUnsignedLong((unsigned long) val);
519 }
520
521 return ret;
522 }
523
524 static PyObject*
get_tracepoint_field(struct pyrf_event * pevent,PyObject * attr_name)525 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
526 {
527 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
528 struct evsel *evsel = pevent->evsel;
529 struct tep_format_field *field;
530
531 if (!evsel->tp_format) {
532 struct tep_event *tp_format;
533
534 tp_format = trace_event__tp_format_id(evsel->core.attr.config);
535 if (IS_ERR_OR_NULL(tp_format))
536 return NULL;
537
538 evsel->tp_format = tp_format;
539 }
540
541 field = tep_find_any_field(evsel->tp_format, str);
542 if (!field)
543 return NULL;
544
545 return tracepoint_field(pevent, field);
546 }
547 #endif /* HAVE_LIBTRACEEVENT */
548
549 static PyObject*
pyrf_sample_event__getattro(struct pyrf_event * pevent,PyObject * attr_name)550 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
551 {
552 PyObject *obj = NULL;
553
554 #ifdef HAVE_LIBTRACEEVENT
555 if (is_tracepoint(pevent))
556 obj = get_tracepoint_field(pevent, attr_name);
557 #endif
558
559 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
560 }
561
562 static PyTypeObject pyrf_sample_event__type = {
563 PyVarObject_HEAD_INIT(NULL, 0)
564 .tp_name = "perf.sample_event",
565 .tp_basicsize = sizeof(struct pyrf_event),
566 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
567 .tp_doc = pyrf_sample_event__doc,
568 .tp_members = pyrf_sample_event__members,
569 .tp_repr = (reprfunc)pyrf_sample_event__repr,
570 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
571 };
572
573 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
574
575 static PyMemberDef pyrf_context_switch_event__members[] = {
576 sample_members
577 member_def(perf_event_header, type, T_UINT, "event type"),
578 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
579 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
580 { .name = NULL, },
581 };
582
pyrf_context_switch_event__repr(struct pyrf_event * pevent)583 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
584 {
585 PyObject *ret;
586 char *s;
587
588 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
589 pevent->event.context_switch.next_prev_pid,
590 pevent->event.context_switch.next_prev_tid,
591 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
592 ret = PyErr_NoMemory();
593 } else {
594 ret = _PyUnicode_FromString(s);
595 free(s);
596 }
597 return ret;
598 }
599
600 static PyTypeObject pyrf_context_switch_event__type = {
601 PyVarObject_HEAD_INIT(NULL, 0)
602 .tp_name = "perf.context_switch_event",
603 .tp_basicsize = sizeof(struct pyrf_event),
604 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
605 .tp_doc = pyrf_context_switch_event__doc,
606 .tp_members = pyrf_context_switch_event__members,
607 .tp_repr = (reprfunc)pyrf_context_switch_event__repr,
608 };
609
pyrf_event__setup_types(void)610 static int pyrf_event__setup_types(void)
611 {
612 int err;
613 pyrf_mmap_event__type.tp_new =
614 pyrf_task_event__type.tp_new =
615 pyrf_comm_event__type.tp_new =
616 pyrf_lost_event__type.tp_new =
617 pyrf_read_event__type.tp_new =
618 pyrf_sample_event__type.tp_new =
619 pyrf_context_switch_event__type.tp_new =
620 pyrf_throttle_event__type.tp_new = PyType_GenericNew;
621 err = PyType_Ready(&pyrf_mmap_event__type);
622 if (err < 0)
623 goto out;
624 err = PyType_Ready(&pyrf_lost_event__type);
625 if (err < 0)
626 goto out;
627 err = PyType_Ready(&pyrf_task_event__type);
628 if (err < 0)
629 goto out;
630 err = PyType_Ready(&pyrf_comm_event__type);
631 if (err < 0)
632 goto out;
633 err = PyType_Ready(&pyrf_throttle_event__type);
634 if (err < 0)
635 goto out;
636 err = PyType_Ready(&pyrf_read_event__type);
637 if (err < 0)
638 goto out;
639 err = PyType_Ready(&pyrf_sample_event__type);
640 if (err < 0)
641 goto out;
642 err = PyType_Ready(&pyrf_context_switch_event__type);
643 if (err < 0)
644 goto out;
645 out:
646 return err;
647 }
648
649 static PyTypeObject *pyrf_event__type[] = {
650 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
651 [PERF_RECORD_LOST] = &pyrf_lost_event__type,
652 [PERF_RECORD_COMM] = &pyrf_comm_event__type,
653 [PERF_RECORD_EXIT] = &pyrf_task_event__type,
654 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
655 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
656 [PERF_RECORD_FORK] = &pyrf_task_event__type,
657 [PERF_RECORD_READ] = &pyrf_read_event__type,
658 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
659 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
660 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
661 };
662
pyrf_event__new(union perf_event * event)663 static PyObject *pyrf_event__new(union perf_event *event)
664 {
665 struct pyrf_event *pevent;
666 PyTypeObject *ptype;
667
668 if ((event->header.type < PERF_RECORD_MMAP ||
669 event->header.type > PERF_RECORD_SAMPLE) &&
670 !(event->header.type == PERF_RECORD_SWITCH ||
671 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
672 return NULL;
673
674 ptype = pyrf_event__type[event->header.type];
675 pevent = PyObject_New(struct pyrf_event, ptype);
676 if (pevent != NULL)
677 memcpy(&pevent->event, event, event->header.size);
678 return (PyObject *)pevent;
679 }
680
681 struct pyrf_cpu_map {
682 PyObject_HEAD
683
684 struct perf_cpu_map *cpus;
685 };
686
pyrf_cpu_map__init(struct pyrf_cpu_map * pcpus,PyObject * args,PyObject * kwargs)687 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
688 PyObject *args, PyObject *kwargs)
689 {
690 static char *kwlist[] = { "cpustr", NULL };
691 char *cpustr = NULL;
692
693 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
694 kwlist, &cpustr))
695 return -1;
696
697 pcpus->cpus = perf_cpu_map__new(cpustr);
698 if (pcpus->cpus == NULL)
699 return -1;
700 return 0;
701 }
702
pyrf_cpu_map__delete(struct pyrf_cpu_map * pcpus)703 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
704 {
705 perf_cpu_map__put(pcpus->cpus);
706 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
707 }
708
pyrf_cpu_map__length(PyObject * obj)709 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
710 {
711 struct pyrf_cpu_map *pcpus = (void *)obj;
712
713 return perf_cpu_map__nr(pcpus->cpus);
714 }
715
pyrf_cpu_map__item(PyObject * obj,Py_ssize_t i)716 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
717 {
718 struct pyrf_cpu_map *pcpus = (void *)obj;
719
720 if (i >= perf_cpu_map__nr(pcpus->cpus))
721 return NULL;
722
723 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
724 }
725
726 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
727 .sq_length = pyrf_cpu_map__length,
728 .sq_item = pyrf_cpu_map__item,
729 };
730
731 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
732
733 static PyTypeObject pyrf_cpu_map__type = {
734 PyVarObject_HEAD_INIT(NULL, 0)
735 .tp_name = "perf.cpu_map",
736 .tp_basicsize = sizeof(struct pyrf_cpu_map),
737 .tp_dealloc = (destructor)pyrf_cpu_map__delete,
738 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
739 .tp_doc = pyrf_cpu_map__doc,
740 .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
741 .tp_init = (initproc)pyrf_cpu_map__init,
742 };
743
pyrf_cpu_map__setup_types(void)744 static int pyrf_cpu_map__setup_types(void)
745 {
746 pyrf_cpu_map__type.tp_new = PyType_GenericNew;
747 return PyType_Ready(&pyrf_cpu_map__type);
748 }
749
750 struct pyrf_thread_map {
751 PyObject_HEAD
752
753 struct perf_thread_map *threads;
754 };
755
pyrf_thread_map__init(struct pyrf_thread_map * pthreads,PyObject * args,PyObject * kwargs)756 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
757 PyObject *args, PyObject *kwargs)
758 {
759 static char *kwlist[] = { "pid", "tid", "uid", NULL };
760 int pid = -1, tid = -1, uid = UINT_MAX;
761
762 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
763 kwlist, &pid, &tid, &uid))
764 return -1;
765
766 pthreads->threads = thread_map__new(pid, tid, uid);
767 if (pthreads->threads == NULL)
768 return -1;
769 return 0;
770 }
771
pyrf_thread_map__delete(struct pyrf_thread_map * pthreads)772 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
773 {
774 perf_thread_map__put(pthreads->threads);
775 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
776 }
777
pyrf_thread_map__length(PyObject * obj)778 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
779 {
780 struct pyrf_thread_map *pthreads = (void *)obj;
781
782 return perf_thread_map__nr(pthreads->threads);
783 }
784
pyrf_thread_map__item(PyObject * obj,Py_ssize_t i)785 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
786 {
787 struct pyrf_thread_map *pthreads = (void *)obj;
788
789 if (i >= perf_thread_map__nr(pthreads->threads))
790 return NULL;
791
792 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
793 }
794
795 static PySequenceMethods pyrf_thread_map__sequence_methods = {
796 .sq_length = pyrf_thread_map__length,
797 .sq_item = pyrf_thread_map__item,
798 };
799
800 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
801
802 static PyTypeObject pyrf_thread_map__type = {
803 PyVarObject_HEAD_INIT(NULL, 0)
804 .tp_name = "perf.thread_map",
805 .tp_basicsize = sizeof(struct pyrf_thread_map),
806 .tp_dealloc = (destructor)pyrf_thread_map__delete,
807 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
808 .tp_doc = pyrf_thread_map__doc,
809 .tp_as_sequence = &pyrf_thread_map__sequence_methods,
810 .tp_init = (initproc)pyrf_thread_map__init,
811 };
812
pyrf_thread_map__setup_types(void)813 static int pyrf_thread_map__setup_types(void)
814 {
815 pyrf_thread_map__type.tp_new = PyType_GenericNew;
816 return PyType_Ready(&pyrf_thread_map__type);
817 }
818
819 struct pyrf_evsel {
820 PyObject_HEAD
821
822 struct evsel evsel;
823 };
824
pyrf_evsel__init(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)825 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
826 PyObject *args, PyObject *kwargs)
827 {
828 struct perf_event_attr attr = {
829 .type = PERF_TYPE_HARDWARE,
830 .config = PERF_COUNT_HW_CPU_CYCLES,
831 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
832 };
833 static char *kwlist[] = {
834 "type",
835 "config",
836 "sample_freq",
837 "sample_period",
838 "sample_type",
839 "read_format",
840 "disabled",
841 "inherit",
842 "pinned",
843 "exclusive",
844 "exclude_user",
845 "exclude_kernel",
846 "exclude_hv",
847 "exclude_idle",
848 "mmap",
849 "context_switch",
850 "comm",
851 "freq",
852 "inherit_stat",
853 "enable_on_exec",
854 "task",
855 "watermark",
856 "precise_ip",
857 "mmap_data",
858 "sample_id_all",
859 "wakeup_events",
860 "bp_type",
861 "bp_addr",
862 "bp_len",
863 NULL
864 };
865 u64 sample_period = 0;
866 u32 disabled = 0,
867 inherit = 0,
868 pinned = 0,
869 exclusive = 0,
870 exclude_user = 0,
871 exclude_kernel = 0,
872 exclude_hv = 0,
873 exclude_idle = 0,
874 mmap = 0,
875 context_switch = 0,
876 comm = 0,
877 freq = 1,
878 inherit_stat = 0,
879 enable_on_exec = 0,
880 task = 0,
881 watermark = 0,
882 precise_ip = 0,
883 mmap_data = 0,
884 sample_id_all = 1;
885 int idx = 0;
886
887 if (!PyArg_ParseTupleAndKeywords(args, kwargs,
888 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
889 &attr.type, &attr.config, &attr.sample_freq,
890 &sample_period, &attr.sample_type,
891 &attr.read_format, &disabled, &inherit,
892 &pinned, &exclusive, &exclude_user,
893 &exclude_kernel, &exclude_hv, &exclude_idle,
894 &mmap, &context_switch, &comm, &freq, &inherit_stat,
895 &enable_on_exec, &task, &watermark,
896 &precise_ip, &mmap_data, &sample_id_all,
897 &attr.wakeup_events, &attr.bp_type,
898 &attr.bp_addr, &attr.bp_len, &idx))
899 return -1;
900
901 /* union... */
902 if (sample_period != 0) {
903 if (attr.sample_freq != 0)
904 return -1; /* FIXME: throw right exception */
905 attr.sample_period = sample_period;
906 }
907
908 /* Bitfields */
909 attr.disabled = disabled;
910 attr.inherit = inherit;
911 attr.pinned = pinned;
912 attr.exclusive = exclusive;
913 attr.exclude_user = exclude_user;
914 attr.exclude_kernel = exclude_kernel;
915 attr.exclude_hv = exclude_hv;
916 attr.exclude_idle = exclude_idle;
917 attr.mmap = mmap;
918 attr.context_switch = context_switch;
919 attr.comm = comm;
920 attr.freq = freq;
921 attr.inherit_stat = inherit_stat;
922 attr.enable_on_exec = enable_on_exec;
923 attr.task = task;
924 attr.watermark = watermark;
925 attr.precise_ip = precise_ip;
926 attr.mmap_data = mmap_data;
927 attr.sample_id_all = sample_id_all;
928 attr.size = sizeof(attr);
929
930 evsel__init(&pevsel->evsel, &attr, idx);
931 return 0;
932 }
933
pyrf_evsel__delete(struct pyrf_evsel * pevsel)934 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
935 {
936 evsel__exit(&pevsel->evsel);
937 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
938 }
939
pyrf_evsel__open(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)940 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
941 PyObject *args, PyObject *kwargs)
942 {
943 struct evsel *evsel = &pevsel->evsel;
944 struct perf_cpu_map *cpus = NULL;
945 struct perf_thread_map *threads = NULL;
946 PyObject *pcpus = NULL, *pthreads = NULL;
947 int group = 0, inherit = 0;
948 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
949
950 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
951 &pcpus, &pthreads, &group, &inherit))
952 return NULL;
953
954 if (pthreads != NULL)
955 threads = ((struct pyrf_thread_map *)pthreads)->threads;
956
957 if (pcpus != NULL)
958 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
959
960 evsel->core.attr.inherit = inherit;
961 /*
962 * This will group just the fds for this single evsel, to group
963 * multiple events, use evlist.open().
964 */
965 if (evsel__open(evsel, cpus, threads) < 0) {
966 PyErr_SetFromErrno(PyExc_OSError);
967 return NULL;
968 }
969
970 Py_INCREF(Py_None);
971 return Py_None;
972 }
973
974 static PyMethodDef pyrf_evsel__methods[] = {
975 {
976 .ml_name = "open",
977 .ml_meth = (PyCFunction)pyrf_evsel__open,
978 .ml_flags = METH_VARARGS | METH_KEYWORDS,
979 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
980 },
981 { .ml_name = NULL, }
982 };
983
984 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
985
986 static PyTypeObject pyrf_evsel__type = {
987 PyVarObject_HEAD_INIT(NULL, 0)
988 .tp_name = "perf.evsel",
989 .tp_basicsize = sizeof(struct pyrf_evsel),
990 .tp_dealloc = (destructor)pyrf_evsel__delete,
991 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
992 .tp_doc = pyrf_evsel__doc,
993 .tp_methods = pyrf_evsel__methods,
994 .tp_init = (initproc)pyrf_evsel__init,
995 };
996
pyrf_evsel__setup_types(void)997 static int pyrf_evsel__setup_types(void)
998 {
999 pyrf_evsel__type.tp_new = PyType_GenericNew;
1000 return PyType_Ready(&pyrf_evsel__type);
1001 }
1002
1003 struct pyrf_evlist {
1004 PyObject_HEAD
1005
1006 struct evlist evlist;
1007 };
1008
pyrf_evlist__init(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)1009 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
1010 PyObject *args, PyObject *kwargs __maybe_unused)
1011 {
1012 PyObject *pcpus = NULL, *pthreads = NULL;
1013 struct perf_cpu_map *cpus;
1014 struct perf_thread_map *threads;
1015
1016 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
1017 return -1;
1018
1019 threads = ((struct pyrf_thread_map *)pthreads)->threads;
1020 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
1021 evlist__init(&pevlist->evlist, cpus, threads);
1022 return 0;
1023 }
1024
pyrf_evlist__delete(struct pyrf_evlist * pevlist)1025 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
1026 {
1027 evlist__exit(&pevlist->evlist);
1028 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
1029 }
1030
pyrf_evlist__mmap(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1031 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
1032 PyObject *args, PyObject *kwargs)
1033 {
1034 struct evlist *evlist = &pevlist->evlist;
1035 static char *kwlist[] = { "pages", "overwrite", NULL };
1036 int pages = 128, overwrite = false;
1037
1038 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
1039 &pages, &overwrite))
1040 return NULL;
1041
1042 if (evlist__mmap(evlist, pages) < 0) {
1043 PyErr_SetFromErrno(PyExc_OSError);
1044 return NULL;
1045 }
1046
1047 Py_INCREF(Py_None);
1048 return Py_None;
1049 }
1050
pyrf_evlist__poll(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1051 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
1052 PyObject *args, PyObject *kwargs)
1053 {
1054 struct evlist *evlist = &pevlist->evlist;
1055 static char *kwlist[] = { "timeout", NULL };
1056 int timeout = -1, n;
1057
1058 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
1059 return NULL;
1060
1061 n = evlist__poll(evlist, timeout);
1062 if (n < 0) {
1063 PyErr_SetFromErrno(PyExc_OSError);
1064 return NULL;
1065 }
1066
1067 return Py_BuildValue("i", n);
1068 }
1069
pyrf_evlist__get_pollfd(struct pyrf_evlist * pevlist,PyObject * args __maybe_unused,PyObject * kwargs __maybe_unused)1070 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1071 PyObject *args __maybe_unused,
1072 PyObject *kwargs __maybe_unused)
1073 {
1074 struct evlist *evlist = &pevlist->evlist;
1075 PyObject *list = PyList_New(0);
1076 int i;
1077
1078 for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1079 PyObject *file;
1080 #if PY_MAJOR_VERSION < 3
1081 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1082
1083 if (fp == NULL)
1084 goto free_list;
1085
1086 file = PyFile_FromFile(fp, "perf", "r", NULL);
1087 #else
1088 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1089 NULL, NULL, NULL, 0);
1090 #endif
1091 if (file == NULL)
1092 goto free_list;
1093
1094 if (PyList_Append(list, file) != 0) {
1095 Py_DECREF(file);
1096 goto free_list;
1097 }
1098
1099 Py_DECREF(file);
1100 }
1101
1102 return list;
1103 free_list:
1104 return PyErr_NoMemory();
1105 }
1106
1107
pyrf_evlist__add(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)1108 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1109 PyObject *args,
1110 PyObject *kwargs __maybe_unused)
1111 {
1112 struct evlist *evlist = &pevlist->evlist;
1113 PyObject *pevsel;
1114 struct evsel *evsel;
1115
1116 if (!PyArg_ParseTuple(args, "O", &pevsel))
1117 return NULL;
1118
1119 Py_INCREF(pevsel);
1120 evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1121 evsel->core.idx = evlist->core.nr_entries;
1122 evlist__add(evlist, evsel);
1123
1124 return Py_BuildValue("i", evlist->core.nr_entries);
1125 }
1126
get_md(struct evlist * evlist,int cpu)1127 static struct mmap *get_md(struct evlist *evlist, int cpu)
1128 {
1129 int i;
1130
1131 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1132 struct mmap *md = &evlist->mmap[i];
1133
1134 if (md->core.cpu.cpu == cpu)
1135 return md;
1136 }
1137
1138 return NULL;
1139 }
1140
pyrf_evlist__read_on_cpu(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1141 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1142 PyObject *args, PyObject *kwargs)
1143 {
1144 struct evlist *evlist = &pevlist->evlist;
1145 union perf_event *event;
1146 int sample_id_all = 1, cpu;
1147 static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1148 struct mmap *md;
1149 int err;
1150
1151 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1152 &cpu, &sample_id_all))
1153 return NULL;
1154
1155 md = get_md(evlist, cpu);
1156 if (!md)
1157 return NULL;
1158
1159 if (perf_mmap__read_init(&md->core) < 0)
1160 goto end;
1161
1162 event = perf_mmap__read_event(&md->core);
1163 if (event != NULL) {
1164 PyObject *pyevent = pyrf_event__new(event);
1165 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1166 struct evsel *evsel;
1167
1168 if (pyevent == NULL)
1169 return PyErr_NoMemory();
1170
1171 evsel = evlist__event2evsel(evlist, event);
1172 if (!evsel) {
1173 Py_INCREF(Py_None);
1174 return Py_None;
1175 }
1176
1177 pevent->evsel = evsel;
1178
1179 err = evsel__parse_sample(evsel, event, &pevent->sample);
1180
1181 /* Consume the even only after we parsed it out. */
1182 perf_mmap__consume(&md->core);
1183
1184 if (err)
1185 return PyErr_Format(PyExc_OSError,
1186 "perf: can't parse sample, err=%d", err);
1187 return pyevent;
1188 }
1189 end:
1190 Py_INCREF(Py_None);
1191 return Py_None;
1192 }
1193
pyrf_evlist__open(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1194 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1195 PyObject *args, PyObject *kwargs)
1196 {
1197 struct evlist *evlist = &pevlist->evlist;
1198
1199 if (evlist__open(evlist) < 0) {
1200 PyErr_SetFromErrno(PyExc_OSError);
1201 return NULL;
1202 }
1203
1204 Py_INCREF(Py_None);
1205 return Py_None;
1206 }
1207
1208 static PyMethodDef pyrf_evlist__methods[] = {
1209 {
1210 .ml_name = "mmap",
1211 .ml_meth = (PyCFunction)pyrf_evlist__mmap,
1212 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1213 .ml_doc = PyDoc_STR("mmap the file descriptor table.")
1214 },
1215 {
1216 .ml_name = "open",
1217 .ml_meth = (PyCFunction)pyrf_evlist__open,
1218 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1219 .ml_doc = PyDoc_STR("open the file descriptors.")
1220 },
1221 {
1222 .ml_name = "poll",
1223 .ml_meth = (PyCFunction)pyrf_evlist__poll,
1224 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1225 .ml_doc = PyDoc_STR("poll the file descriptor table.")
1226 },
1227 {
1228 .ml_name = "get_pollfd",
1229 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
1230 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1231 .ml_doc = PyDoc_STR("get the poll file descriptor table.")
1232 },
1233 {
1234 .ml_name = "add",
1235 .ml_meth = (PyCFunction)pyrf_evlist__add,
1236 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1237 .ml_doc = PyDoc_STR("adds an event selector to the list.")
1238 },
1239 {
1240 .ml_name = "read_on_cpu",
1241 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
1242 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1243 .ml_doc = PyDoc_STR("reads an event.")
1244 },
1245 { .ml_name = NULL, }
1246 };
1247
pyrf_evlist__length(PyObject * obj)1248 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1249 {
1250 struct pyrf_evlist *pevlist = (void *)obj;
1251
1252 return pevlist->evlist.core.nr_entries;
1253 }
1254
pyrf_evlist__item(PyObject * obj,Py_ssize_t i)1255 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1256 {
1257 struct pyrf_evlist *pevlist = (void *)obj;
1258 struct evsel *pos;
1259
1260 if (i >= pevlist->evlist.core.nr_entries)
1261 return NULL;
1262
1263 evlist__for_each_entry(&pevlist->evlist, pos) {
1264 if (i-- == 0)
1265 break;
1266 }
1267
1268 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1269 }
1270
1271 static PySequenceMethods pyrf_evlist__sequence_methods = {
1272 .sq_length = pyrf_evlist__length,
1273 .sq_item = pyrf_evlist__item,
1274 };
1275
1276 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1277
1278 static PyTypeObject pyrf_evlist__type = {
1279 PyVarObject_HEAD_INIT(NULL, 0)
1280 .tp_name = "perf.evlist",
1281 .tp_basicsize = sizeof(struct pyrf_evlist),
1282 .tp_dealloc = (destructor)pyrf_evlist__delete,
1283 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1284 .tp_as_sequence = &pyrf_evlist__sequence_methods,
1285 .tp_doc = pyrf_evlist__doc,
1286 .tp_methods = pyrf_evlist__methods,
1287 .tp_init = (initproc)pyrf_evlist__init,
1288 };
1289
pyrf_evlist__setup_types(void)1290 static int pyrf_evlist__setup_types(void)
1291 {
1292 pyrf_evlist__type.tp_new = PyType_GenericNew;
1293 return PyType_Ready(&pyrf_evlist__type);
1294 }
1295
1296 #define PERF_CONST(name) { #name, PERF_##name }
1297
1298 static struct {
1299 const char *name;
1300 int value;
1301 } perf__constants[] = {
1302 PERF_CONST(TYPE_HARDWARE),
1303 PERF_CONST(TYPE_SOFTWARE),
1304 PERF_CONST(TYPE_TRACEPOINT),
1305 PERF_CONST(TYPE_HW_CACHE),
1306 PERF_CONST(TYPE_RAW),
1307 PERF_CONST(TYPE_BREAKPOINT),
1308
1309 PERF_CONST(COUNT_HW_CPU_CYCLES),
1310 PERF_CONST(COUNT_HW_INSTRUCTIONS),
1311 PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1312 PERF_CONST(COUNT_HW_CACHE_MISSES),
1313 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1314 PERF_CONST(COUNT_HW_BRANCH_MISSES),
1315 PERF_CONST(COUNT_HW_BUS_CYCLES),
1316 PERF_CONST(COUNT_HW_CACHE_L1D),
1317 PERF_CONST(COUNT_HW_CACHE_L1I),
1318 PERF_CONST(COUNT_HW_CACHE_LL),
1319 PERF_CONST(COUNT_HW_CACHE_DTLB),
1320 PERF_CONST(COUNT_HW_CACHE_ITLB),
1321 PERF_CONST(COUNT_HW_CACHE_BPU),
1322 PERF_CONST(COUNT_HW_CACHE_OP_READ),
1323 PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1324 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1325 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1326 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1327
1328 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1329 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1330
1331 PERF_CONST(COUNT_SW_CPU_CLOCK),
1332 PERF_CONST(COUNT_SW_TASK_CLOCK),
1333 PERF_CONST(COUNT_SW_PAGE_FAULTS),
1334 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1335 PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1336 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1337 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1338 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1339 PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1340 PERF_CONST(COUNT_SW_DUMMY),
1341
1342 PERF_CONST(SAMPLE_IP),
1343 PERF_CONST(SAMPLE_TID),
1344 PERF_CONST(SAMPLE_TIME),
1345 PERF_CONST(SAMPLE_ADDR),
1346 PERF_CONST(SAMPLE_READ),
1347 PERF_CONST(SAMPLE_CALLCHAIN),
1348 PERF_CONST(SAMPLE_ID),
1349 PERF_CONST(SAMPLE_CPU),
1350 PERF_CONST(SAMPLE_PERIOD),
1351 PERF_CONST(SAMPLE_STREAM_ID),
1352 PERF_CONST(SAMPLE_RAW),
1353
1354 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1355 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1356 PERF_CONST(FORMAT_ID),
1357 PERF_CONST(FORMAT_GROUP),
1358
1359 PERF_CONST(RECORD_MMAP),
1360 PERF_CONST(RECORD_LOST),
1361 PERF_CONST(RECORD_COMM),
1362 PERF_CONST(RECORD_EXIT),
1363 PERF_CONST(RECORD_THROTTLE),
1364 PERF_CONST(RECORD_UNTHROTTLE),
1365 PERF_CONST(RECORD_FORK),
1366 PERF_CONST(RECORD_READ),
1367 PERF_CONST(RECORD_SAMPLE),
1368 PERF_CONST(RECORD_MMAP2),
1369 PERF_CONST(RECORD_AUX),
1370 PERF_CONST(RECORD_ITRACE_START),
1371 PERF_CONST(RECORD_LOST_SAMPLES),
1372 PERF_CONST(RECORD_SWITCH),
1373 PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1374
1375 PERF_CONST(RECORD_MISC_SWITCH_OUT),
1376 { .name = NULL, },
1377 };
1378
pyrf__tracepoint(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)1379 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1380 PyObject *args, PyObject *kwargs)
1381 {
1382 #ifndef HAVE_LIBTRACEEVENT
1383 return NULL;
1384 #else
1385 struct tep_event *tp_format;
1386 static char *kwlist[] = { "sys", "name", NULL };
1387 char *sys = NULL;
1388 char *name = NULL;
1389
1390 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1391 &sys, &name))
1392 return NULL;
1393
1394 tp_format = trace_event__tp_format(sys, name);
1395 if (IS_ERR(tp_format))
1396 return _PyLong_FromLong(-1);
1397
1398 return _PyLong_FromLong(tp_format->id);
1399 #endif // HAVE_LIBTRACEEVENT
1400 }
1401
1402 static PyMethodDef perf__methods[] = {
1403 {
1404 .ml_name = "tracepoint",
1405 .ml_meth = (PyCFunction) pyrf__tracepoint,
1406 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1407 .ml_doc = PyDoc_STR("Get tracepoint config.")
1408 },
1409 { .ml_name = NULL, }
1410 };
1411
1412 #if PY_MAJOR_VERSION < 3
initperf(void)1413 PyMODINIT_FUNC initperf(void)
1414 #else
1415 PyMODINIT_FUNC PyInit_perf(void)
1416 #endif
1417 {
1418 PyObject *obj;
1419 int i;
1420 PyObject *dict;
1421 #if PY_MAJOR_VERSION < 3
1422 PyObject *module = Py_InitModule("perf", perf__methods);
1423 #else
1424 static struct PyModuleDef moduledef = {
1425 PyModuleDef_HEAD_INIT,
1426 "perf", /* m_name */
1427 "", /* m_doc */
1428 -1, /* m_size */
1429 perf__methods, /* m_methods */
1430 NULL, /* m_reload */
1431 NULL, /* m_traverse */
1432 NULL, /* m_clear */
1433 NULL, /* m_free */
1434 };
1435 PyObject *module = PyModule_Create(&moduledef);
1436 #endif
1437
1438 if (module == NULL ||
1439 pyrf_event__setup_types() < 0 ||
1440 pyrf_evlist__setup_types() < 0 ||
1441 pyrf_evsel__setup_types() < 0 ||
1442 pyrf_thread_map__setup_types() < 0 ||
1443 pyrf_cpu_map__setup_types() < 0)
1444 #if PY_MAJOR_VERSION < 3
1445 return;
1446 #else
1447 return module;
1448 #endif
1449
1450 /* The page_size is placed in util object. */
1451 page_size = sysconf(_SC_PAGE_SIZE);
1452
1453 Py_INCREF(&pyrf_evlist__type);
1454 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1455
1456 Py_INCREF(&pyrf_evsel__type);
1457 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1458
1459 Py_INCREF(&pyrf_mmap_event__type);
1460 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1461
1462 Py_INCREF(&pyrf_lost_event__type);
1463 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1464
1465 Py_INCREF(&pyrf_comm_event__type);
1466 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1467
1468 Py_INCREF(&pyrf_task_event__type);
1469 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1470
1471 Py_INCREF(&pyrf_throttle_event__type);
1472 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1473
1474 Py_INCREF(&pyrf_task_event__type);
1475 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1476
1477 Py_INCREF(&pyrf_read_event__type);
1478 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1479
1480 Py_INCREF(&pyrf_sample_event__type);
1481 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1482
1483 Py_INCREF(&pyrf_context_switch_event__type);
1484 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1485
1486 Py_INCREF(&pyrf_thread_map__type);
1487 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1488
1489 Py_INCREF(&pyrf_cpu_map__type);
1490 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1491
1492 dict = PyModule_GetDict(module);
1493 if (dict == NULL)
1494 goto error;
1495
1496 for (i = 0; perf__constants[i].name != NULL; i++) {
1497 obj = _PyLong_FromLong(perf__constants[i].value);
1498 if (obj == NULL)
1499 goto error;
1500 PyDict_SetItemString(dict, perf__constants[i].name, obj);
1501 Py_DECREF(obj);
1502 }
1503
1504 error:
1505 if (PyErr_Occurred())
1506 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1507 #if PY_MAJOR_VERSION >= 3
1508 return module;
1509 #endif
1510 }
1511
1512 /*
1513 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1514 * binding.
1515 */
test_attr__open(struct perf_event_attr * attr,pid_t pid,struct perf_cpu cpu,int fd,int group_fd,unsigned long flags)1516 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
1517 int fd, int group_fd, unsigned long flags)
1518 {
1519 }
1520
evlist__free_stats(struct evlist * evlist)1521 void evlist__free_stats(struct evlist *evlist)
1522 {
1523 }
1524