xref: /openbmc/linux/tools/perf/util/python.c (revision b694e3c604e999343258c49e574abd7be012e726)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #ifdef HAVE_LIBTRACEEVENT
9 #include <traceevent/event-parse.h>
10 #endif
11 #include <perf/mmap.h>
12 #include "evlist.h"
13 #include "callchain.h"
14 #include "evsel.h"
15 #include "event.h"
16 #include "print_binary.h"
17 #include "thread_map.h"
18 #include "trace-event.h"
19 #include "mmap.h"
20 #include "stat.h"
21 #include "metricgroup.h"
22 #include "util/bpf-filter.h"
23 #include "util/env.h"
24 #include "util/pmu.h"
25 #include "util/pmus.h"
26 #include <internal/lib.h>
27 #include "util.h"
28 
29 #if PY_MAJOR_VERSION < 3
30 #define _PyUnicode_FromString(arg) \
31   PyString_FromString(arg)
32 #define _PyUnicode_AsString(arg) \
33   PyString_AsString(arg)
34 #define _PyUnicode_FromFormat(...) \
35   PyString_FromFormat(__VA_ARGS__)
36 #define _PyLong_FromLong(arg) \
37   PyInt_FromLong(arg)
38 
39 #else
40 
41 #define _PyUnicode_FromString(arg) \
42   PyUnicode_FromString(arg)
43 #define _PyUnicode_FromFormat(...) \
44   PyUnicode_FromFormat(__VA_ARGS__)
45 #define _PyLong_FromLong(arg) \
46   PyLong_FromLong(arg)
47 #endif
48 
49 #ifndef Py_TYPE
50 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
51 #endif
52 
53 /*
54  * Avoid bringing in event parsing.
55  */
parse_event(struct evlist * evlist __maybe_unused,const char * str __maybe_unused)56 int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
57 {
58 	return 0;
59 }
60 
61 /*
62  * Provide these two so that we don't have to link against callchain.c and
63  * start dragging hist.c, etc.
64  */
65 struct callchain_param callchain_param;
66 
parse_callchain_record(const char * arg __maybe_unused,struct callchain_param * param __maybe_unused)67 int parse_callchain_record(const char *arg __maybe_unused,
68 			   struct callchain_param *param __maybe_unused)
69 {
70 	return 0;
71 }
72 
73 /*
74  * Add these not to drag util/env.c
75  */
76 struct perf_env perf_env;
77 
perf_env__cpuid(struct perf_env * env __maybe_unused)78 const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
79 {
80 	return NULL;
81 }
82 
83 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
perf_env__arch(struct perf_env * env __maybe_unused)84 const char *perf_env__arch(struct perf_env *env __maybe_unused)
85 {
86 	return NULL;
87 }
88 
89 /*
90  * These ones are needed not to drag the PMU bandwagon, jevents generated
91  * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
92  * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
93  * far, for the perf python binding known usecases, revisit if this become
94  * necessary.
95  */
evsel__find_pmu(const struct evsel * evsel __maybe_unused)96 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
97 {
98 	return NULL;
99 }
100 
perf_pmu__scan_file(struct perf_pmu * pmu,const char * name,const char * fmt,...)101 int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...)
102 {
103 	return EOF;
104 }
105 
perf_pmu__name_from_config(struct perf_pmu * pmu __maybe_unused,u64 config __maybe_unused)106 const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused)
107 {
108 	return NULL;
109 }
110 
perf_pmus__find_by_type(unsigned int type __maybe_unused)111 struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused)
112 {
113 	return NULL;
114 }
115 
perf_pmus__num_core_pmus(void)116 int perf_pmus__num_core_pmus(void)
117 {
118 	return 1;
119 }
120 
evsel__is_aux_event(const struct evsel * evsel __maybe_unused)121 bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
122 {
123 	return false;
124 }
125 
perf_pmus__supports_extended_type(void)126 bool perf_pmus__supports_extended_type(void)
127 {
128 	return false;
129 }
130 
131 /*
132  * Add this one here not to drag util/metricgroup.c
133  */
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)134 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
135 				    struct rblist *new_metric_events,
136 				    struct rblist *old_metric_events)
137 {
138 	return 0;
139 }
140 
141 /*
142  * Add this one here not to drag util/trace-event-info.c
143  */
tracepoint_id_to_name(u64 config)144 char *tracepoint_id_to_name(u64 config)
145 {
146 	return NULL;
147 }
148 
149 /*
150  * XXX: All these evsel destructors need some better mechanism, like a linked
151  * list of destructors registered when the relevant code indeed is used instead
152  * of having more and more calls in perf_evsel__delete(). -- acme
153  *
154  * For now, add some more:
155  *
156  * Not to drag the BPF bandwagon...
157  */
158 void bpf_counter__destroy(struct evsel *evsel);
159 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
160 int bpf_counter__disable(struct evsel *evsel);
161 
bpf_counter__destroy(struct evsel * evsel __maybe_unused)162 void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
163 {
164 }
165 
bpf_counter__install_pe(struct evsel * evsel __maybe_unused,int cpu __maybe_unused,int fd __maybe_unused)166 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
167 {
168 	return 0;
169 }
170 
bpf_counter__disable(struct evsel * evsel __maybe_unused)171 int bpf_counter__disable(struct evsel *evsel __maybe_unused)
172 {
173 	return 0;
174 }
175 
176 // not to drag util/bpf-filter.c
177 #ifdef HAVE_BPF_SKEL
perf_bpf_filter__prepare(struct evsel * evsel __maybe_unused)178 int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
179 {
180 	return 0;
181 }
182 
perf_bpf_filter__destroy(struct evsel * evsel __maybe_unused)183 int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
184 {
185 	return 0;
186 }
187 #endif
188 
189 /*
190  * Support debug printing even though util/debug.c is not linked.  That means
191  * implementing 'verbose' and 'eprintf'.
192  */
193 int verbose;
194 int debug_peo_args;
195 
196 int eprintf(int level, int var, const char *fmt, ...);
197 
eprintf(int level,int var,const char * fmt,...)198 int eprintf(int level, int var, const char *fmt, ...)
199 {
200 	va_list args;
201 	int ret = 0;
202 
203 	if (var >= level) {
204 		va_start(args, fmt);
205 		ret = vfprintf(stderr, fmt, args);
206 		va_end(args);
207 	}
208 
209 	return ret;
210 }
211 
212 /* Define PyVarObject_HEAD_INIT for python 2.5 */
213 #ifndef PyVarObject_HEAD_INIT
214 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
215 #endif
216 
217 #if PY_MAJOR_VERSION < 3
218 PyMODINIT_FUNC initperf(void);
219 #else
220 PyMODINIT_FUNC PyInit_perf(void);
221 #endif
222 
223 #define member_def(type, member, ptype, help) \
224 	{ #member, ptype, \
225 	  offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
226 	  0, help }
227 
228 #define sample_member_def(name, member, ptype, help) \
229 	{ #name, ptype, \
230 	  offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
231 	  0, help }
232 
233 struct pyrf_event {
234 	PyObject_HEAD
235 	struct evsel *evsel;
236 	struct perf_sample sample;
237 	union perf_event   event;
238 };
239 
240 #define sample_members \
241 	sample_member_def(sample_ip, ip, T_ULONGLONG, "event ip"),			 \
242 	sample_member_def(sample_pid, pid, T_INT, "event pid"),			 \
243 	sample_member_def(sample_tid, tid, T_INT, "event tid"),			 \
244 	sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),		 \
245 	sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"),		 \
246 	sample_member_def(sample_id, id, T_ULONGLONG, "event id"),			 \
247 	sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
248 	sample_member_def(sample_period, period, T_ULONGLONG, "event period"),		 \
249 	sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
250 
251 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
252 
253 static PyMemberDef pyrf_mmap_event__members[] = {
254 	sample_members
255 	member_def(perf_event_header, type, T_UINT, "event type"),
256 	member_def(perf_event_header, misc, T_UINT, "event misc"),
257 	member_def(perf_record_mmap, pid, T_UINT, "event pid"),
258 	member_def(perf_record_mmap, tid, T_UINT, "event tid"),
259 	member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
260 	member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
261 	member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
262 	member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
263 	{ .name = NULL, },
264 };
265 
pyrf_mmap_event__repr(struct pyrf_event * pevent)266 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
267 {
268 	PyObject *ret;
269 	char *s;
270 
271 	if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
272 			 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
273 			 "filename: %s }",
274 		     pevent->event.mmap.pid, pevent->event.mmap.tid,
275 		     pevent->event.mmap.start, pevent->event.mmap.len,
276 		     pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
277 		ret = PyErr_NoMemory();
278 	} else {
279 		ret = _PyUnicode_FromString(s);
280 		free(s);
281 	}
282 	return ret;
283 }
284 
285 static PyTypeObject pyrf_mmap_event__type = {
286 	PyVarObject_HEAD_INIT(NULL, 0)
287 	.tp_name	= "perf.mmap_event",
288 	.tp_basicsize	= sizeof(struct pyrf_event),
289 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
290 	.tp_doc		= pyrf_mmap_event__doc,
291 	.tp_members	= pyrf_mmap_event__members,
292 	.tp_repr	= (reprfunc)pyrf_mmap_event__repr,
293 };
294 
295 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
296 
297 static PyMemberDef pyrf_task_event__members[] = {
298 	sample_members
299 	member_def(perf_event_header, type, T_UINT, "event type"),
300 	member_def(perf_record_fork, pid, T_UINT, "event pid"),
301 	member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
302 	member_def(perf_record_fork, tid, T_UINT, "event tid"),
303 	member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
304 	member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
305 	{ .name = NULL, },
306 };
307 
pyrf_task_event__repr(struct pyrf_event * pevent)308 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
309 {
310 	return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
311 				   "ptid: %u, time: %" PRI_lu64 "}",
312 				   pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
313 				   pevent->event.fork.pid,
314 				   pevent->event.fork.ppid,
315 				   pevent->event.fork.tid,
316 				   pevent->event.fork.ptid,
317 				   pevent->event.fork.time);
318 }
319 
320 static PyTypeObject pyrf_task_event__type = {
321 	PyVarObject_HEAD_INIT(NULL, 0)
322 	.tp_name	= "perf.task_event",
323 	.tp_basicsize	= sizeof(struct pyrf_event),
324 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
325 	.tp_doc		= pyrf_task_event__doc,
326 	.tp_members	= pyrf_task_event__members,
327 	.tp_repr	= (reprfunc)pyrf_task_event__repr,
328 };
329 
330 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
331 
332 static PyMemberDef pyrf_comm_event__members[] = {
333 	sample_members
334 	member_def(perf_event_header, type, T_UINT, "event type"),
335 	member_def(perf_record_comm, pid, T_UINT, "event pid"),
336 	member_def(perf_record_comm, tid, T_UINT, "event tid"),
337 	member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
338 	{ .name = NULL, },
339 };
340 
pyrf_comm_event__repr(struct pyrf_event * pevent)341 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
342 {
343 	return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
344 				   pevent->event.comm.pid,
345 				   pevent->event.comm.tid,
346 				   pevent->event.comm.comm);
347 }
348 
349 static PyTypeObject pyrf_comm_event__type = {
350 	PyVarObject_HEAD_INIT(NULL, 0)
351 	.tp_name	= "perf.comm_event",
352 	.tp_basicsize	= sizeof(struct pyrf_event),
353 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
354 	.tp_doc		= pyrf_comm_event__doc,
355 	.tp_members	= pyrf_comm_event__members,
356 	.tp_repr	= (reprfunc)pyrf_comm_event__repr,
357 };
358 
359 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
360 
361 static PyMemberDef pyrf_throttle_event__members[] = {
362 	sample_members
363 	member_def(perf_event_header, type, T_UINT, "event type"),
364 	member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
365 	member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
366 	member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
367 	{ .name = NULL, },
368 };
369 
pyrf_throttle_event__repr(struct pyrf_event * pevent)370 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
371 {
372 	struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
373 
374 	return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
375 				   ", stream_id: %" PRI_lu64 " }",
376 				   pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
377 				   te->time, te->id, te->stream_id);
378 }
379 
380 static PyTypeObject pyrf_throttle_event__type = {
381 	PyVarObject_HEAD_INIT(NULL, 0)
382 	.tp_name	= "perf.throttle_event",
383 	.tp_basicsize	= sizeof(struct pyrf_event),
384 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
385 	.tp_doc		= pyrf_throttle_event__doc,
386 	.tp_members	= pyrf_throttle_event__members,
387 	.tp_repr	= (reprfunc)pyrf_throttle_event__repr,
388 };
389 
390 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
391 
392 static PyMemberDef pyrf_lost_event__members[] = {
393 	sample_members
394 	member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
395 	member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
396 	{ .name = NULL, },
397 };
398 
pyrf_lost_event__repr(struct pyrf_event * pevent)399 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
400 {
401 	PyObject *ret;
402 	char *s;
403 
404 	if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
405 			 "lost: %#" PRI_lx64 " }",
406 		     pevent->event.lost.id, pevent->event.lost.lost) < 0) {
407 		ret = PyErr_NoMemory();
408 	} else {
409 		ret = _PyUnicode_FromString(s);
410 		free(s);
411 	}
412 	return ret;
413 }
414 
415 static PyTypeObject pyrf_lost_event__type = {
416 	PyVarObject_HEAD_INIT(NULL, 0)
417 	.tp_name	= "perf.lost_event",
418 	.tp_basicsize	= sizeof(struct pyrf_event),
419 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
420 	.tp_doc		= pyrf_lost_event__doc,
421 	.tp_members	= pyrf_lost_event__members,
422 	.tp_repr	= (reprfunc)pyrf_lost_event__repr,
423 };
424 
425 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
426 
427 static PyMemberDef pyrf_read_event__members[] = {
428 	sample_members
429 	member_def(perf_record_read, pid, T_UINT, "event pid"),
430 	member_def(perf_record_read, tid, T_UINT, "event tid"),
431 	{ .name = NULL, },
432 };
433 
pyrf_read_event__repr(struct pyrf_event * pevent)434 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
435 {
436 	return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
437 				   pevent->event.read.pid,
438 				   pevent->event.read.tid);
439 	/*
440  	 * FIXME: return the array of read values,
441  	 * making this method useful ;-)
442  	 */
443 }
444 
445 static PyTypeObject pyrf_read_event__type = {
446 	PyVarObject_HEAD_INIT(NULL, 0)
447 	.tp_name	= "perf.read_event",
448 	.tp_basicsize	= sizeof(struct pyrf_event),
449 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
450 	.tp_doc		= pyrf_read_event__doc,
451 	.tp_members	= pyrf_read_event__members,
452 	.tp_repr	= (reprfunc)pyrf_read_event__repr,
453 };
454 
455 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
456 
457 static PyMemberDef pyrf_sample_event__members[] = {
458 	sample_members
459 	member_def(perf_event_header, type, T_UINT, "event type"),
460 	{ .name = NULL, },
461 };
462 
pyrf_sample_event__repr(struct pyrf_event * pevent)463 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
464 {
465 	PyObject *ret;
466 	char *s;
467 
468 	if (asprintf(&s, "{ type: sample }") < 0) {
469 		ret = PyErr_NoMemory();
470 	} else {
471 		ret = _PyUnicode_FromString(s);
472 		free(s);
473 	}
474 	return ret;
475 }
476 
477 #ifdef HAVE_LIBTRACEEVENT
is_tracepoint(struct pyrf_event * pevent)478 static bool is_tracepoint(struct pyrf_event *pevent)
479 {
480 	return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
481 }
482 
483 static PyObject*
tracepoint_field(struct pyrf_event * pe,struct tep_format_field * field)484 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
485 {
486 	struct tep_handle *pevent = field->event->tep;
487 	void *data = pe->sample.raw_data;
488 	PyObject *ret = NULL;
489 	unsigned long long val;
490 	unsigned int offset, len;
491 
492 	if (field->flags & TEP_FIELD_IS_ARRAY) {
493 		offset = field->offset;
494 		len    = field->size;
495 		if (field->flags & TEP_FIELD_IS_DYNAMIC) {
496 			val     = tep_read_number(pevent, data + offset, len);
497 			offset  = val;
498 			len     = offset >> 16;
499 			offset &= 0xffff;
500 			if (tep_field_is_relative(field->flags))
501 				offset += field->offset + field->size;
502 		}
503 		if (field->flags & TEP_FIELD_IS_STRING &&
504 		    is_printable_array(data + offset, len)) {
505 			ret = _PyUnicode_FromString((char *)data + offset);
506 		} else {
507 			ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
508 			field->flags &= ~TEP_FIELD_IS_STRING;
509 		}
510 	} else {
511 		val = tep_read_number(pevent, data + field->offset,
512 				      field->size);
513 		if (field->flags & TEP_FIELD_IS_POINTER)
514 			ret = PyLong_FromUnsignedLong((unsigned long) val);
515 		else if (field->flags & TEP_FIELD_IS_SIGNED)
516 			ret = PyLong_FromLong((long) val);
517 		else
518 			ret = PyLong_FromUnsignedLong((unsigned long) val);
519 	}
520 
521 	return ret;
522 }
523 
524 static PyObject*
get_tracepoint_field(struct pyrf_event * pevent,PyObject * attr_name)525 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
526 {
527 	const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
528 	struct evsel *evsel = pevent->evsel;
529 	struct tep_format_field *field;
530 
531 	if (!evsel->tp_format) {
532 		struct tep_event *tp_format;
533 
534 		tp_format = trace_event__tp_format_id(evsel->core.attr.config);
535 		if (IS_ERR_OR_NULL(tp_format))
536 			return NULL;
537 
538 		evsel->tp_format = tp_format;
539 	}
540 
541 	field = tep_find_any_field(evsel->tp_format, str);
542 	if (!field)
543 		return NULL;
544 
545 	return tracepoint_field(pevent, field);
546 }
547 #endif /* HAVE_LIBTRACEEVENT */
548 
549 static PyObject*
pyrf_sample_event__getattro(struct pyrf_event * pevent,PyObject * attr_name)550 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
551 {
552 	PyObject *obj = NULL;
553 
554 #ifdef HAVE_LIBTRACEEVENT
555 	if (is_tracepoint(pevent))
556 		obj = get_tracepoint_field(pevent, attr_name);
557 #endif
558 
559 	return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
560 }
561 
562 static PyTypeObject pyrf_sample_event__type = {
563 	PyVarObject_HEAD_INIT(NULL, 0)
564 	.tp_name	= "perf.sample_event",
565 	.tp_basicsize	= sizeof(struct pyrf_event),
566 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
567 	.tp_doc		= pyrf_sample_event__doc,
568 	.tp_members	= pyrf_sample_event__members,
569 	.tp_repr	= (reprfunc)pyrf_sample_event__repr,
570 	.tp_getattro	= (getattrofunc) pyrf_sample_event__getattro,
571 };
572 
573 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
574 
575 static PyMemberDef pyrf_context_switch_event__members[] = {
576 	sample_members
577 	member_def(perf_event_header, type, T_UINT, "event type"),
578 	member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
579 	member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
580 	{ .name = NULL, },
581 };
582 
pyrf_context_switch_event__repr(struct pyrf_event * pevent)583 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
584 {
585 	PyObject *ret;
586 	char *s;
587 
588 	if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
589 		     pevent->event.context_switch.next_prev_pid,
590 		     pevent->event.context_switch.next_prev_tid,
591 		     !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
592 		ret = PyErr_NoMemory();
593 	} else {
594 		ret = _PyUnicode_FromString(s);
595 		free(s);
596 	}
597 	return ret;
598 }
599 
600 static PyTypeObject pyrf_context_switch_event__type = {
601 	PyVarObject_HEAD_INIT(NULL, 0)
602 	.tp_name	= "perf.context_switch_event",
603 	.tp_basicsize	= sizeof(struct pyrf_event),
604 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
605 	.tp_doc		= pyrf_context_switch_event__doc,
606 	.tp_members	= pyrf_context_switch_event__members,
607 	.tp_repr	= (reprfunc)pyrf_context_switch_event__repr,
608 };
609 
pyrf_event__setup_types(void)610 static int pyrf_event__setup_types(void)
611 {
612 	int err;
613 	pyrf_mmap_event__type.tp_new =
614 	pyrf_task_event__type.tp_new =
615 	pyrf_comm_event__type.tp_new =
616 	pyrf_lost_event__type.tp_new =
617 	pyrf_read_event__type.tp_new =
618 	pyrf_sample_event__type.tp_new =
619 	pyrf_context_switch_event__type.tp_new =
620 	pyrf_throttle_event__type.tp_new = PyType_GenericNew;
621 	err = PyType_Ready(&pyrf_mmap_event__type);
622 	if (err < 0)
623 		goto out;
624 	err = PyType_Ready(&pyrf_lost_event__type);
625 	if (err < 0)
626 		goto out;
627 	err = PyType_Ready(&pyrf_task_event__type);
628 	if (err < 0)
629 		goto out;
630 	err = PyType_Ready(&pyrf_comm_event__type);
631 	if (err < 0)
632 		goto out;
633 	err = PyType_Ready(&pyrf_throttle_event__type);
634 	if (err < 0)
635 		goto out;
636 	err = PyType_Ready(&pyrf_read_event__type);
637 	if (err < 0)
638 		goto out;
639 	err = PyType_Ready(&pyrf_sample_event__type);
640 	if (err < 0)
641 		goto out;
642 	err = PyType_Ready(&pyrf_context_switch_event__type);
643 	if (err < 0)
644 		goto out;
645 out:
646 	return err;
647 }
648 
649 static PyTypeObject *pyrf_event__type[] = {
650 	[PERF_RECORD_MMAP]	 = &pyrf_mmap_event__type,
651 	[PERF_RECORD_LOST]	 = &pyrf_lost_event__type,
652 	[PERF_RECORD_COMM]	 = &pyrf_comm_event__type,
653 	[PERF_RECORD_EXIT]	 = &pyrf_task_event__type,
654 	[PERF_RECORD_THROTTLE]	 = &pyrf_throttle_event__type,
655 	[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
656 	[PERF_RECORD_FORK]	 = &pyrf_task_event__type,
657 	[PERF_RECORD_READ]	 = &pyrf_read_event__type,
658 	[PERF_RECORD_SAMPLE]	 = &pyrf_sample_event__type,
659 	[PERF_RECORD_SWITCH]	 = &pyrf_context_switch_event__type,
660 	[PERF_RECORD_SWITCH_CPU_WIDE]  = &pyrf_context_switch_event__type,
661 };
662 
pyrf_event__new(union perf_event * event)663 static PyObject *pyrf_event__new(union perf_event *event)
664 {
665 	struct pyrf_event *pevent;
666 	PyTypeObject *ptype;
667 
668 	if ((event->header.type < PERF_RECORD_MMAP ||
669 	     event->header.type > PERF_RECORD_SAMPLE) &&
670 	    !(event->header.type == PERF_RECORD_SWITCH ||
671 	      event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
672 		return NULL;
673 
674 	// FIXME this better be dynamic or we need to parse everything
675 	// before calling perf_mmap__consume(), including tracepoint fields.
676 	if (sizeof(pevent->event) < event->header.size)
677 		return NULL;
678 
679 	ptype = pyrf_event__type[event->header.type];
680 	pevent = PyObject_New(struct pyrf_event, ptype);
681 	if (pevent != NULL)
682 		memcpy(&pevent->event, event, event->header.size);
683 	return (PyObject *)pevent;
684 }
685 
686 struct pyrf_cpu_map {
687 	PyObject_HEAD
688 
689 	struct perf_cpu_map *cpus;
690 };
691 
pyrf_cpu_map__init(struct pyrf_cpu_map * pcpus,PyObject * args,PyObject * kwargs)692 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
693 			      PyObject *args, PyObject *kwargs)
694 {
695 	static char *kwlist[] = { "cpustr", NULL };
696 	char *cpustr = NULL;
697 
698 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
699 					 kwlist, &cpustr))
700 		return -1;
701 
702 	pcpus->cpus = perf_cpu_map__new(cpustr);
703 	if (pcpus->cpus == NULL)
704 		return -1;
705 	return 0;
706 }
707 
pyrf_cpu_map__delete(struct pyrf_cpu_map * pcpus)708 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
709 {
710 	perf_cpu_map__put(pcpus->cpus);
711 	Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
712 }
713 
pyrf_cpu_map__length(PyObject * obj)714 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
715 {
716 	struct pyrf_cpu_map *pcpus = (void *)obj;
717 
718 	return perf_cpu_map__nr(pcpus->cpus);
719 }
720 
pyrf_cpu_map__item(PyObject * obj,Py_ssize_t i)721 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
722 {
723 	struct pyrf_cpu_map *pcpus = (void *)obj;
724 
725 	if (i >= perf_cpu_map__nr(pcpus->cpus))
726 		return NULL;
727 
728 	return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
729 }
730 
731 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
732 	.sq_length = pyrf_cpu_map__length,
733 	.sq_item   = pyrf_cpu_map__item,
734 };
735 
736 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
737 
738 static PyTypeObject pyrf_cpu_map__type = {
739 	PyVarObject_HEAD_INIT(NULL, 0)
740 	.tp_name	= "perf.cpu_map",
741 	.tp_basicsize	= sizeof(struct pyrf_cpu_map),
742 	.tp_dealloc	= (destructor)pyrf_cpu_map__delete,
743 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
744 	.tp_doc		= pyrf_cpu_map__doc,
745 	.tp_as_sequence	= &pyrf_cpu_map__sequence_methods,
746 	.tp_init	= (initproc)pyrf_cpu_map__init,
747 };
748 
pyrf_cpu_map__setup_types(void)749 static int pyrf_cpu_map__setup_types(void)
750 {
751 	pyrf_cpu_map__type.tp_new = PyType_GenericNew;
752 	return PyType_Ready(&pyrf_cpu_map__type);
753 }
754 
755 struct pyrf_thread_map {
756 	PyObject_HEAD
757 
758 	struct perf_thread_map *threads;
759 };
760 
pyrf_thread_map__init(struct pyrf_thread_map * pthreads,PyObject * args,PyObject * kwargs)761 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
762 				 PyObject *args, PyObject *kwargs)
763 {
764 	static char *kwlist[] = { "pid", "tid", "uid", NULL };
765 	int pid = -1, tid = -1, uid = UINT_MAX;
766 
767 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
768 					 kwlist, &pid, &tid, &uid))
769 		return -1;
770 
771 	pthreads->threads = thread_map__new(pid, tid, uid);
772 	if (pthreads->threads == NULL)
773 		return -1;
774 	return 0;
775 }
776 
pyrf_thread_map__delete(struct pyrf_thread_map * pthreads)777 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
778 {
779 	perf_thread_map__put(pthreads->threads);
780 	Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
781 }
782 
pyrf_thread_map__length(PyObject * obj)783 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
784 {
785 	struct pyrf_thread_map *pthreads = (void *)obj;
786 
787 	return perf_thread_map__nr(pthreads->threads);
788 }
789 
pyrf_thread_map__item(PyObject * obj,Py_ssize_t i)790 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
791 {
792 	struct pyrf_thread_map *pthreads = (void *)obj;
793 
794 	if (i >= perf_thread_map__nr(pthreads->threads))
795 		return NULL;
796 
797 	return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
798 }
799 
800 static PySequenceMethods pyrf_thread_map__sequence_methods = {
801 	.sq_length = pyrf_thread_map__length,
802 	.sq_item   = pyrf_thread_map__item,
803 };
804 
805 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
806 
807 static PyTypeObject pyrf_thread_map__type = {
808 	PyVarObject_HEAD_INIT(NULL, 0)
809 	.tp_name	= "perf.thread_map",
810 	.tp_basicsize	= sizeof(struct pyrf_thread_map),
811 	.tp_dealloc	= (destructor)pyrf_thread_map__delete,
812 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
813 	.tp_doc		= pyrf_thread_map__doc,
814 	.tp_as_sequence	= &pyrf_thread_map__sequence_methods,
815 	.tp_init	= (initproc)pyrf_thread_map__init,
816 };
817 
pyrf_thread_map__setup_types(void)818 static int pyrf_thread_map__setup_types(void)
819 {
820 	pyrf_thread_map__type.tp_new = PyType_GenericNew;
821 	return PyType_Ready(&pyrf_thread_map__type);
822 }
823 
824 struct pyrf_evsel {
825 	PyObject_HEAD
826 
827 	struct evsel evsel;
828 };
829 
pyrf_evsel__init(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)830 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
831 			    PyObject *args, PyObject *kwargs)
832 {
833 	struct perf_event_attr attr = {
834 		.type = PERF_TYPE_HARDWARE,
835 		.config = PERF_COUNT_HW_CPU_CYCLES,
836 		.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
837 	};
838 	static char *kwlist[] = {
839 		"type",
840 		"config",
841 		"sample_freq",
842 		"sample_period",
843 		"sample_type",
844 		"read_format",
845 		"disabled",
846 		"inherit",
847 		"pinned",
848 		"exclusive",
849 		"exclude_user",
850 		"exclude_kernel",
851 		"exclude_hv",
852 		"exclude_idle",
853 		"mmap",
854 		"context_switch",
855 		"comm",
856 		"freq",
857 		"inherit_stat",
858 		"enable_on_exec",
859 		"task",
860 		"watermark",
861 		"precise_ip",
862 		"mmap_data",
863 		"sample_id_all",
864 		"wakeup_events",
865 		"bp_type",
866 		"bp_addr",
867 		"bp_len",
868 		 NULL
869 	};
870 	u64 sample_period = 0;
871 	u32 disabled = 0,
872 	    inherit = 0,
873 	    pinned = 0,
874 	    exclusive = 0,
875 	    exclude_user = 0,
876 	    exclude_kernel = 0,
877 	    exclude_hv = 0,
878 	    exclude_idle = 0,
879 	    mmap = 0,
880 	    context_switch = 0,
881 	    comm = 0,
882 	    freq = 1,
883 	    inherit_stat = 0,
884 	    enable_on_exec = 0,
885 	    task = 0,
886 	    watermark = 0,
887 	    precise_ip = 0,
888 	    mmap_data = 0,
889 	    sample_id_all = 1;
890 	int idx = 0;
891 
892 	if (!PyArg_ParseTupleAndKeywords(args, kwargs,
893 					 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
894 					 &attr.type, &attr.config, &attr.sample_freq,
895 					 &sample_period, &attr.sample_type,
896 					 &attr.read_format, &disabled, &inherit,
897 					 &pinned, &exclusive, &exclude_user,
898 					 &exclude_kernel, &exclude_hv, &exclude_idle,
899 					 &mmap, &context_switch, &comm, &freq, &inherit_stat,
900 					 &enable_on_exec, &task, &watermark,
901 					 &precise_ip, &mmap_data, &sample_id_all,
902 					 &attr.wakeup_events, &attr.bp_type,
903 					 &attr.bp_addr, &attr.bp_len, &idx))
904 		return -1;
905 
906 	/* union... */
907 	if (sample_period != 0) {
908 		if (attr.sample_freq != 0)
909 			return -1; /* FIXME: throw right exception */
910 		attr.sample_period = sample_period;
911 	}
912 
913 	/* Bitfields */
914 	attr.disabled	    = disabled;
915 	attr.inherit	    = inherit;
916 	attr.pinned	    = pinned;
917 	attr.exclusive	    = exclusive;
918 	attr.exclude_user   = exclude_user;
919 	attr.exclude_kernel = exclude_kernel;
920 	attr.exclude_hv	    = exclude_hv;
921 	attr.exclude_idle   = exclude_idle;
922 	attr.mmap	    = mmap;
923 	attr.context_switch = context_switch;
924 	attr.comm	    = comm;
925 	attr.freq	    = freq;
926 	attr.inherit_stat   = inherit_stat;
927 	attr.enable_on_exec = enable_on_exec;
928 	attr.task	    = task;
929 	attr.watermark	    = watermark;
930 	attr.precise_ip	    = precise_ip;
931 	attr.mmap_data	    = mmap_data;
932 	attr.sample_id_all  = sample_id_all;
933 	attr.size	    = sizeof(attr);
934 
935 	evsel__init(&pevsel->evsel, &attr, idx);
936 	return 0;
937 }
938 
pyrf_evsel__delete(struct pyrf_evsel * pevsel)939 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
940 {
941 	evsel__exit(&pevsel->evsel);
942 	Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
943 }
944 
pyrf_evsel__open(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)945 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
946 				  PyObject *args, PyObject *kwargs)
947 {
948 	struct evsel *evsel = &pevsel->evsel;
949 	struct perf_cpu_map *cpus = NULL;
950 	struct perf_thread_map *threads = NULL;
951 	PyObject *pcpus = NULL, *pthreads = NULL;
952 	int group = 0, inherit = 0;
953 	static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
954 
955 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
956 					 &pcpus, &pthreads, &group, &inherit))
957 		return NULL;
958 
959 	if (pthreads != NULL)
960 		threads = ((struct pyrf_thread_map *)pthreads)->threads;
961 
962 	if (pcpus != NULL)
963 		cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
964 
965 	evsel->core.attr.inherit = inherit;
966 	/*
967 	 * This will group just the fds for this single evsel, to group
968 	 * multiple events, use evlist.open().
969 	 */
970 	if (evsel__open(evsel, cpus, threads) < 0) {
971 		PyErr_SetFromErrno(PyExc_OSError);
972 		return NULL;
973 	}
974 
975 	Py_INCREF(Py_None);
976 	return Py_None;
977 }
978 
979 static PyMethodDef pyrf_evsel__methods[] = {
980 	{
981 		.ml_name  = "open",
982 		.ml_meth  = (PyCFunction)pyrf_evsel__open,
983 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
984 		.ml_doc	  = PyDoc_STR("open the event selector file descriptor table.")
985 	},
986 	{ .ml_name = NULL, }
987 };
988 
989 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
990 
991 static PyTypeObject pyrf_evsel__type = {
992 	PyVarObject_HEAD_INIT(NULL, 0)
993 	.tp_name	= "perf.evsel",
994 	.tp_basicsize	= sizeof(struct pyrf_evsel),
995 	.tp_dealloc	= (destructor)pyrf_evsel__delete,
996 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
997 	.tp_doc		= pyrf_evsel__doc,
998 	.tp_methods	= pyrf_evsel__methods,
999 	.tp_init	= (initproc)pyrf_evsel__init,
1000 };
1001 
pyrf_evsel__setup_types(void)1002 static int pyrf_evsel__setup_types(void)
1003 {
1004 	pyrf_evsel__type.tp_new = PyType_GenericNew;
1005 	return PyType_Ready(&pyrf_evsel__type);
1006 }
1007 
1008 struct pyrf_evlist {
1009 	PyObject_HEAD
1010 
1011 	struct evlist evlist;
1012 };
1013 
pyrf_evlist__init(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)1014 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
1015 			     PyObject *args, PyObject *kwargs __maybe_unused)
1016 {
1017 	PyObject *pcpus = NULL, *pthreads = NULL;
1018 	struct perf_cpu_map *cpus;
1019 	struct perf_thread_map *threads;
1020 
1021 	if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
1022 		return -1;
1023 
1024 	threads = ((struct pyrf_thread_map *)pthreads)->threads;
1025 	cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
1026 	evlist__init(&pevlist->evlist, cpus, threads);
1027 	return 0;
1028 }
1029 
pyrf_evlist__delete(struct pyrf_evlist * pevlist)1030 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
1031 {
1032 	evlist__exit(&pevlist->evlist);
1033 	Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
1034 }
1035 
pyrf_evlist__mmap(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1036 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
1037 				   PyObject *args, PyObject *kwargs)
1038 {
1039 	struct evlist *evlist = &pevlist->evlist;
1040 	static char *kwlist[] = { "pages", "overwrite", NULL };
1041 	int pages = 128, overwrite = false;
1042 
1043 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
1044 					 &pages, &overwrite))
1045 		return NULL;
1046 
1047 	if (evlist__mmap(evlist, pages) < 0) {
1048 		PyErr_SetFromErrno(PyExc_OSError);
1049 		return NULL;
1050 	}
1051 
1052 	Py_INCREF(Py_None);
1053 	return Py_None;
1054 }
1055 
pyrf_evlist__poll(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1056 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
1057 				   PyObject *args, PyObject *kwargs)
1058 {
1059 	struct evlist *evlist = &pevlist->evlist;
1060 	static char *kwlist[] = { "timeout", NULL };
1061 	int timeout = -1, n;
1062 
1063 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
1064 		return NULL;
1065 
1066 	n = evlist__poll(evlist, timeout);
1067 	if (n < 0) {
1068 		PyErr_SetFromErrno(PyExc_OSError);
1069 		return NULL;
1070 	}
1071 
1072 	return Py_BuildValue("i", n);
1073 }
1074 
pyrf_evlist__get_pollfd(struct pyrf_evlist * pevlist,PyObject * args __maybe_unused,PyObject * kwargs __maybe_unused)1075 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1076 					 PyObject *args __maybe_unused,
1077 					 PyObject *kwargs __maybe_unused)
1078 {
1079 	struct evlist *evlist = &pevlist->evlist;
1080         PyObject *list = PyList_New(0);
1081 	int i;
1082 
1083 	for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1084 		PyObject *file;
1085 #if PY_MAJOR_VERSION < 3
1086 		FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1087 
1088 		if (fp == NULL)
1089 			goto free_list;
1090 
1091 		file = PyFile_FromFile(fp, "perf", "r", NULL);
1092 #else
1093 		file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1094 				     NULL, NULL, NULL, 0);
1095 #endif
1096 		if (file == NULL)
1097 			goto free_list;
1098 
1099 		if (PyList_Append(list, file) != 0) {
1100 			Py_DECREF(file);
1101 			goto free_list;
1102 		}
1103 
1104 		Py_DECREF(file);
1105 	}
1106 
1107 	return list;
1108 free_list:
1109 	return PyErr_NoMemory();
1110 }
1111 
1112 
pyrf_evlist__add(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)1113 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1114 				  PyObject *args,
1115 				  PyObject *kwargs __maybe_unused)
1116 {
1117 	struct evlist *evlist = &pevlist->evlist;
1118 	PyObject *pevsel;
1119 	struct evsel *evsel;
1120 
1121 	if (!PyArg_ParseTuple(args, "O", &pevsel))
1122 		return NULL;
1123 
1124 	Py_INCREF(pevsel);
1125 	evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1126 	evsel->core.idx = evlist->core.nr_entries;
1127 	evlist__add(evlist, evsel);
1128 
1129 	return Py_BuildValue("i", evlist->core.nr_entries);
1130 }
1131 
get_md(struct evlist * evlist,int cpu)1132 static struct mmap *get_md(struct evlist *evlist, int cpu)
1133 {
1134 	int i;
1135 
1136 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
1137 		struct mmap *md = &evlist->mmap[i];
1138 
1139 		if (md->core.cpu.cpu == cpu)
1140 			return md;
1141 	}
1142 
1143 	return NULL;
1144 }
1145 
pyrf_evlist__read_on_cpu(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1146 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1147 					  PyObject *args, PyObject *kwargs)
1148 {
1149 	struct evlist *evlist = &pevlist->evlist;
1150 	union perf_event *event;
1151 	int sample_id_all = 1, cpu;
1152 	static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1153 	struct mmap *md;
1154 	int err;
1155 
1156 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1157 					 &cpu, &sample_id_all))
1158 		return NULL;
1159 
1160 	md = get_md(evlist, cpu);
1161 	if (!md)
1162 		return NULL;
1163 
1164 	if (perf_mmap__read_init(&md->core) < 0)
1165 		goto end;
1166 
1167 	event = perf_mmap__read_event(&md->core);
1168 	if (event != NULL) {
1169 		PyObject *pyevent = pyrf_event__new(event);
1170 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1171 		struct evsel *evsel;
1172 
1173 		if (pyevent == NULL)
1174 			return PyErr_NoMemory();
1175 
1176 		evsel = evlist__event2evsel(evlist, event);
1177 		if (!evsel) {
1178 			Py_DECREF(pyevent);
1179 			Py_INCREF(Py_None);
1180 			return Py_None;
1181 		}
1182 
1183 		pevent->evsel = evsel;
1184 
1185 		perf_mmap__consume(&md->core);
1186 
1187 		err = evsel__parse_sample(evsel, &pevent->event, &pevent->sample);
1188 		if (err) {
1189 			Py_DECREF(pyevent);
1190 			return PyErr_Format(PyExc_OSError,
1191 					    "perf: can't parse sample, err=%d", err);
1192 		}
1193 
1194 		return pyevent;
1195 	}
1196 end:
1197 	Py_INCREF(Py_None);
1198 	return Py_None;
1199 }
1200 
pyrf_evlist__open(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1201 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1202 				   PyObject *args, PyObject *kwargs)
1203 {
1204 	struct evlist *evlist = &pevlist->evlist;
1205 
1206 	if (evlist__open(evlist) < 0) {
1207 		PyErr_SetFromErrno(PyExc_OSError);
1208 		return NULL;
1209 	}
1210 
1211 	Py_INCREF(Py_None);
1212 	return Py_None;
1213 }
1214 
1215 static PyMethodDef pyrf_evlist__methods[] = {
1216 	{
1217 		.ml_name  = "mmap",
1218 		.ml_meth  = (PyCFunction)pyrf_evlist__mmap,
1219 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1220 		.ml_doc	  = PyDoc_STR("mmap the file descriptor table.")
1221 	},
1222 	{
1223 		.ml_name  = "open",
1224 		.ml_meth  = (PyCFunction)pyrf_evlist__open,
1225 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1226 		.ml_doc	  = PyDoc_STR("open the file descriptors.")
1227 	},
1228 	{
1229 		.ml_name  = "poll",
1230 		.ml_meth  = (PyCFunction)pyrf_evlist__poll,
1231 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1232 		.ml_doc	  = PyDoc_STR("poll the file descriptor table.")
1233 	},
1234 	{
1235 		.ml_name  = "get_pollfd",
1236 		.ml_meth  = (PyCFunction)pyrf_evlist__get_pollfd,
1237 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1238 		.ml_doc	  = PyDoc_STR("get the poll file descriptor table.")
1239 	},
1240 	{
1241 		.ml_name  = "add",
1242 		.ml_meth  = (PyCFunction)pyrf_evlist__add,
1243 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1244 		.ml_doc	  = PyDoc_STR("adds an event selector to the list.")
1245 	},
1246 	{
1247 		.ml_name  = "read_on_cpu",
1248 		.ml_meth  = (PyCFunction)pyrf_evlist__read_on_cpu,
1249 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1250 		.ml_doc	  = PyDoc_STR("reads an event.")
1251 	},
1252 	{ .ml_name = NULL, }
1253 };
1254 
pyrf_evlist__length(PyObject * obj)1255 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1256 {
1257 	struct pyrf_evlist *pevlist = (void *)obj;
1258 
1259 	return pevlist->evlist.core.nr_entries;
1260 }
1261 
pyrf_evlist__item(PyObject * obj,Py_ssize_t i)1262 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1263 {
1264 	struct pyrf_evlist *pevlist = (void *)obj;
1265 	struct evsel *pos;
1266 
1267 	if (i >= pevlist->evlist.core.nr_entries)
1268 		return NULL;
1269 
1270 	evlist__for_each_entry(&pevlist->evlist, pos) {
1271 		if (i-- == 0)
1272 			break;
1273 	}
1274 
1275 	return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1276 }
1277 
1278 static PySequenceMethods pyrf_evlist__sequence_methods = {
1279 	.sq_length = pyrf_evlist__length,
1280 	.sq_item   = pyrf_evlist__item,
1281 };
1282 
1283 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1284 
1285 static PyTypeObject pyrf_evlist__type = {
1286 	PyVarObject_HEAD_INIT(NULL, 0)
1287 	.tp_name	= "perf.evlist",
1288 	.tp_basicsize	= sizeof(struct pyrf_evlist),
1289 	.tp_dealloc	= (destructor)pyrf_evlist__delete,
1290 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1291 	.tp_as_sequence	= &pyrf_evlist__sequence_methods,
1292 	.tp_doc		= pyrf_evlist__doc,
1293 	.tp_methods	= pyrf_evlist__methods,
1294 	.tp_init	= (initproc)pyrf_evlist__init,
1295 };
1296 
pyrf_evlist__setup_types(void)1297 static int pyrf_evlist__setup_types(void)
1298 {
1299 	pyrf_evlist__type.tp_new = PyType_GenericNew;
1300 	return PyType_Ready(&pyrf_evlist__type);
1301 }
1302 
1303 #define PERF_CONST(name) { #name, PERF_##name }
1304 
1305 static struct {
1306 	const char *name;
1307 	int	    value;
1308 } perf__constants[] = {
1309 	PERF_CONST(TYPE_HARDWARE),
1310 	PERF_CONST(TYPE_SOFTWARE),
1311 	PERF_CONST(TYPE_TRACEPOINT),
1312 	PERF_CONST(TYPE_HW_CACHE),
1313 	PERF_CONST(TYPE_RAW),
1314 	PERF_CONST(TYPE_BREAKPOINT),
1315 
1316 	PERF_CONST(COUNT_HW_CPU_CYCLES),
1317 	PERF_CONST(COUNT_HW_INSTRUCTIONS),
1318 	PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1319 	PERF_CONST(COUNT_HW_CACHE_MISSES),
1320 	PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1321 	PERF_CONST(COUNT_HW_BRANCH_MISSES),
1322 	PERF_CONST(COUNT_HW_BUS_CYCLES),
1323 	PERF_CONST(COUNT_HW_CACHE_L1D),
1324 	PERF_CONST(COUNT_HW_CACHE_L1I),
1325 	PERF_CONST(COUNT_HW_CACHE_LL),
1326 	PERF_CONST(COUNT_HW_CACHE_DTLB),
1327 	PERF_CONST(COUNT_HW_CACHE_ITLB),
1328 	PERF_CONST(COUNT_HW_CACHE_BPU),
1329 	PERF_CONST(COUNT_HW_CACHE_OP_READ),
1330 	PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1331 	PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1332 	PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1333 	PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1334 
1335 	PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1336 	PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1337 
1338 	PERF_CONST(COUNT_SW_CPU_CLOCK),
1339 	PERF_CONST(COUNT_SW_TASK_CLOCK),
1340 	PERF_CONST(COUNT_SW_PAGE_FAULTS),
1341 	PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1342 	PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1343 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1344 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1345 	PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1346 	PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1347 	PERF_CONST(COUNT_SW_DUMMY),
1348 
1349 	PERF_CONST(SAMPLE_IP),
1350 	PERF_CONST(SAMPLE_TID),
1351 	PERF_CONST(SAMPLE_TIME),
1352 	PERF_CONST(SAMPLE_ADDR),
1353 	PERF_CONST(SAMPLE_READ),
1354 	PERF_CONST(SAMPLE_CALLCHAIN),
1355 	PERF_CONST(SAMPLE_ID),
1356 	PERF_CONST(SAMPLE_CPU),
1357 	PERF_CONST(SAMPLE_PERIOD),
1358 	PERF_CONST(SAMPLE_STREAM_ID),
1359 	PERF_CONST(SAMPLE_RAW),
1360 
1361 	PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1362 	PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1363 	PERF_CONST(FORMAT_ID),
1364 	PERF_CONST(FORMAT_GROUP),
1365 
1366 	PERF_CONST(RECORD_MMAP),
1367 	PERF_CONST(RECORD_LOST),
1368 	PERF_CONST(RECORD_COMM),
1369 	PERF_CONST(RECORD_EXIT),
1370 	PERF_CONST(RECORD_THROTTLE),
1371 	PERF_CONST(RECORD_UNTHROTTLE),
1372 	PERF_CONST(RECORD_FORK),
1373 	PERF_CONST(RECORD_READ),
1374 	PERF_CONST(RECORD_SAMPLE),
1375 	PERF_CONST(RECORD_MMAP2),
1376 	PERF_CONST(RECORD_AUX),
1377 	PERF_CONST(RECORD_ITRACE_START),
1378 	PERF_CONST(RECORD_LOST_SAMPLES),
1379 	PERF_CONST(RECORD_SWITCH),
1380 	PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1381 
1382 	PERF_CONST(RECORD_MISC_SWITCH_OUT),
1383 	{ .name = NULL, },
1384 };
1385 
pyrf__tracepoint(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)1386 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1387 				  PyObject *args, PyObject *kwargs)
1388 {
1389 #ifndef HAVE_LIBTRACEEVENT
1390 	return NULL;
1391 #else
1392 	struct tep_event *tp_format;
1393 	static char *kwlist[] = { "sys", "name", NULL };
1394 	char *sys  = NULL;
1395 	char *name = NULL;
1396 
1397 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1398 					 &sys, &name))
1399 		return NULL;
1400 
1401 	tp_format = trace_event__tp_format(sys, name);
1402 	if (IS_ERR(tp_format))
1403 		return _PyLong_FromLong(-1);
1404 
1405 	return _PyLong_FromLong(tp_format->id);
1406 #endif // HAVE_LIBTRACEEVENT
1407 }
1408 
1409 static PyMethodDef perf__methods[] = {
1410 	{
1411 		.ml_name  = "tracepoint",
1412 		.ml_meth  = (PyCFunction) pyrf__tracepoint,
1413 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1414 		.ml_doc	  = PyDoc_STR("Get tracepoint config.")
1415 	},
1416 	{ .ml_name = NULL, }
1417 };
1418 
1419 #if PY_MAJOR_VERSION < 3
initperf(void)1420 PyMODINIT_FUNC initperf(void)
1421 #else
1422 PyMODINIT_FUNC PyInit_perf(void)
1423 #endif
1424 {
1425 	PyObject *obj;
1426 	int i;
1427 	PyObject *dict;
1428 #if PY_MAJOR_VERSION < 3
1429 	PyObject *module = Py_InitModule("perf", perf__methods);
1430 #else
1431 	static struct PyModuleDef moduledef = {
1432 		PyModuleDef_HEAD_INIT,
1433 		"perf",			/* m_name */
1434 		"",			/* m_doc */
1435 		-1,			/* m_size */
1436 		perf__methods,		/* m_methods */
1437 		NULL,			/* m_reload */
1438 		NULL,			/* m_traverse */
1439 		NULL,			/* m_clear */
1440 		NULL,			/* m_free */
1441 	};
1442 	PyObject *module = PyModule_Create(&moduledef);
1443 #endif
1444 
1445 	if (module == NULL ||
1446 	    pyrf_event__setup_types() < 0 ||
1447 	    pyrf_evlist__setup_types() < 0 ||
1448 	    pyrf_evsel__setup_types() < 0 ||
1449 	    pyrf_thread_map__setup_types() < 0 ||
1450 	    pyrf_cpu_map__setup_types() < 0)
1451 #if PY_MAJOR_VERSION < 3
1452 		return;
1453 #else
1454 		return module;
1455 #endif
1456 
1457 	/* The page_size is placed in util object. */
1458 	page_size = sysconf(_SC_PAGE_SIZE);
1459 
1460 	Py_INCREF(&pyrf_evlist__type);
1461 	PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1462 
1463 	Py_INCREF(&pyrf_evsel__type);
1464 	PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1465 
1466 	Py_INCREF(&pyrf_mmap_event__type);
1467 	PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1468 
1469 	Py_INCREF(&pyrf_lost_event__type);
1470 	PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1471 
1472 	Py_INCREF(&pyrf_comm_event__type);
1473 	PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1474 
1475 	Py_INCREF(&pyrf_task_event__type);
1476 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1477 
1478 	Py_INCREF(&pyrf_throttle_event__type);
1479 	PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1480 
1481 	Py_INCREF(&pyrf_task_event__type);
1482 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1483 
1484 	Py_INCREF(&pyrf_read_event__type);
1485 	PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1486 
1487 	Py_INCREF(&pyrf_sample_event__type);
1488 	PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1489 
1490 	Py_INCREF(&pyrf_context_switch_event__type);
1491 	PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1492 
1493 	Py_INCREF(&pyrf_thread_map__type);
1494 	PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1495 
1496 	Py_INCREF(&pyrf_cpu_map__type);
1497 	PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1498 
1499 	dict = PyModule_GetDict(module);
1500 	if (dict == NULL)
1501 		goto error;
1502 
1503 	for (i = 0; perf__constants[i].name != NULL; i++) {
1504 		obj = _PyLong_FromLong(perf__constants[i].value);
1505 		if (obj == NULL)
1506 			goto error;
1507 		PyDict_SetItemString(dict, perf__constants[i].name, obj);
1508 		Py_DECREF(obj);
1509 	}
1510 
1511 error:
1512 	if (PyErr_Occurred())
1513 		PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1514 #if PY_MAJOR_VERSION >= 3
1515 	return module;
1516 #endif
1517 }
1518 
1519 /*
1520  * Dummy, to avoid dragging all the test_attr infrastructure in the python
1521  * binding.
1522  */
test_attr__open(struct perf_event_attr * attr,pid_t pid,struct perf_cpu cpu,int fd,int group_fd,unsigned long flags)1523 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
1524                      int fd, int group_fd, unsigned long flags)
1525 {
1526 }
1527 
evlist__free_stats(struct evlist * evlist)1528 void evlist__free_stats(struct evlist *evlist)
1529 {
1530 }
1531