xref: /openbmc/linux/tools/perf/util/python.c (revision 234489ac)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #ifdef HAVE_LIBTRACEEVENT
9 #include <traceevent/event-parse.h>
10 #endif
11 #include <perf/mmap.h>
12 #include "evlist.h"
13 #include "callchain.h"
14 #include "evsel.h"
15 #include "event.h"
16 #include "print_binary.h"
17 #include "thread_map.h"
18 #include "trace-event.h"
19 #include "mmap.h"
20 #include "stat.h"
21 #include "metricgroup.h"
22 #include "util/bpf-filter.h"
23 #include "util/env.h"
24 #include "util/pmu.h"
25 #include <internal/lib.h>
26 #include "util.h"
27 
28 #if PY_MAJOR_VERSION < 3
29 #define _PyUnicode_FromString(arg) \
30   PyString_FromString(arg)
31 #define _PyUnicode_AsString(arg) \
32   PyString_AsString(arg)
33 #define _PyUnicode_FromFormat(...) \
34   PyString_FromFormat(__VA_ARGS__)
35 #define _PyLong_FromLong(arg) \
36   PyInt_FromLong(arg)
37 
38 #else
39 
40 #define _PyUnicode_FromString(arg) \
41   PyUnicode_FromString(arg)
42 #define _PyUnicode_FromFormat(...) \
43   PyUnicode_FromFormat(__VA_ARGS__)
44 #define _PyLong_FromLong(arg) \
45   PyLong_FromLong(arg)
46 #endif
47 
48 #ifndef Py_TYPE
49 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
50 #endif
51 
52 /*
53  * Provide these two so that we don't have to link against callchain.c and
54  * start dragging hist.c, etc.
55  */
56 struct callchain_param callchain_param;
57 
58 int parse_callchain_record(const char *arg __maybe_unused,
59 			   struct callchain_param *param __maybe_unused)
60 {
61 	return 0;
62 }
63 
64 /*
65  * Add these not to drag util/env.c
66  */
67 struct perf_env perf_env;
68 
69 const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
70 {
71 	return NULL;
72 }
73 
74 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
75 const char *perf_env__arch(struct perf_env *env __maybe_unused)
76 {
77 	return NULL;
78 }
79 
80 /*
81  * These ones are needed not to drag the PMU bandwagon, jevents generated
82  * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
83  * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
84  * far, for the perf python binding known usecases, revisit if this become
85  * necessary.
86  */
87 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
88 {
89 	return NULL;
90 }
91 
92 int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...)
93 {
94 	return EOF;
95 }
96 
97 bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
98 {
99 	return false;
100 }
101 
102 /*
103  * Add this one here not to drag util/metricgroup.c
104  */
105 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
106 				    struct rblist *new_metric_events,
107 				    struct rblist *old_metric_events)
108 {
109 	return 0;
110 }
111 
112 /*
113  * XXX: All these evsel destructors need some better mechanism, like a linked
114  * list of destructors registered when the relevant code indeed is used instead
115  * of having more and more calls in perf_evsel__delete(). -- acme
116  *
117  * For now, add some more:
118  *
119  * Not to drag the BPF bandwagon...
120  */
121 void bpf_counter__destroy(struct evsel *evsel);
122 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
123 int bpf_counter__disable(struct evsel *evsel);
124 
125 void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
126 {
127 }
128 
129 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
130 {
131 	return 0;
132 }
133 
134 int bpf_counter__disable(struct evsel *evsel __maybe_unused)
135 {
136 	return 0;
137 }
138 
139 // not to drag util/bpf-filter.c
140 #ifdef HAVE_BPF_SKEL
141 int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
142 {
143 	return 0;
144 }
145 
146 int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
147 {
148 	return 0;
149 }
150 #endif
151 
152 /*
153  * Support debug printing even though util/debug.c is not linked.  That means
154  * implementing 'verbose' and 'eprintf'.
155  */
156 int verbose;
157 int debug_peo_args;
158 
159 int eprintf(int level, int var, const char *fmt, ...);
160 
161 int eprintf(int level, int var, const char *fmt, ...)
162 {
163 	va_list args;
164 	int ret = 0;
165 
166 	if (var >= level) {
167 		va_start(args, fmt);
168 		ret = vfprintf(stderr, fmt, args);
169 		va_end(args);
170 	}
171 
172 	return ret;
173 }
174 
175 /* Define PyVarObject_HEAD_INIT for python 2.5 */
176 #ifndef PyVarObject_HEAD_INIT
177 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
178 #endif
179 
180 #if PY_MAJOR_VERSION < 3
181 PyMODINIT_FUNC initperf(void);
182 #else
183 PyMODINIT_FUNC PyInit_perf(void);
184 #endif
185 
186 #define member_def(type, member, ptype, help) \
187 	{ #member, ptype, \
188 	  offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
189 	  0, help }
190 
191 #define sample_member_def(name, member, ptype, help) \
192 	{ #name, ptype, \
193 	  offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
194 	  0, help }
195 
196 struct pyrf_event {
197 	PyObject_HEAD
198 	struct evsel *evsel;
199 	struct perf_sample sample;
200 	union perf_event   event;
201 };
202 
203 #define sample_members \
204 	sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"),			 \
205 	sample_member_def(sample_pid, pid, T_INT, "event pid"),			 \
206 	sample_member_def(sample_tid, tid, T_INT, "event tid"),			 \
207 	sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),		 \
208 	sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"),		 \
209 	sample_member_def(sample_id, id, T_ULONGLONG, "event id"),			 \
210 	sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
211 	sample_member_def(sample_period, period, T_ULONGLONG, "event period"),		 \
212 	sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
213 
214 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
215 
216 static PyMemberDef pyrf_mmap_event__members[] = {
217 	sample_members
218 	member_def(perf_event_header, type, T_UINT, "event type"),
219 	member_def(perf_event_header, misc, T_UINT, "event misc"),
220 	member_def(perf_record_mmap, pid, T_UINT, "event pid"),
221 	member_def(perf_record_mmap, tid, T_UINT, "event tid"),
222 	member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
223 	member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
224 	member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
225 	member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
226 	{ .name = NULL, },
227 };
228 
229 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
230 {
231 	PyObject *ret;
232 	char *s;
233 
234 	if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
235 			 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
236 			 "filename: %s }",
237 		     pevent->event.mmap.pid, pevent->event.mmap.tid,
238 		     pevent->event.mmap.start, pevent->event.mmap.len,
239 		     pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
240 		ret = PyErr_NoMemory();
241 	} else {
242 		ret = _PyUnicode_FromString(s);
243 		free(s);
244 	}
245 	return ret;
246 }
247 
248 static PyTypeObject pyrf_mmap_event__type = {
249 	PyVarObject_HEAD_INIT(NULL, 0)
250 	.tp_name	= "perf.mmap_event",
251 	.tp_basicsize	= sizeof(struct pyrf_event),
252 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
253 	.tp_doc		= pyrf_mmap_event__doc,
254 	.tp_members	= pyrf_mmap_event__members,
255 	.tp_repr	= (reprfunc)pyrf_mmap_event__repr,
256 };
257 
258 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
259 
260 static PyMemberDef pyrf_task_event__members[] = {
261 	sample_members
262 	member_def(perf_event_header, type, T_UINT, "event type"),
263 	member_def(perf_record_fork, pid, T_UINT, "event pid"),
264 	member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
265 	member_def(perf_record_fork, tid, T_UINT, "event tid"),
266 	member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
267 	member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
268 	{ .name = NULL, },
269 };
270 
271 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
272 {
273 	return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
274 				   "ptid: %u, time: %" PRI_lu64 "}",
275 				   pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
276 				   pevent->event.fork.pid,
277 				   pevent->event.fork.ppid,
278 				   pevent->event.fork.tid,
279 				   pevent->event.fork.ptid,
280 				   pevent->event.fork.time);
281 }
282 
283 static PyTypeObject pyrf_task_event__type = {
284 	PyVarObject_HEAD_INIT(NULL, 0)
285 	.tp_name	= "perf.task_event",
286 	.tp_basicsize	= sizeof(struct pyrf_event),
287 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
288 	.tp_doc		= pyrf_task_event__doc,
289 	.tp_members	= pyrf_task_event__members,
290 	.tp_repr	= (reprfunc)pyrf_task_event__repr,
291 };
292 
293 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
294 
295 static PyMemberDef pyrf_comm_event__members[] = {
296 	sample_members
297 	member_def(perf_event_header, type, T_UINT, "event type"),
298 	member_def(perf_record_comm, pid, T_UINT, "event pid"),
299 	member_def(perf_record_comm, tid, T_UINT, "event tid"),
300 	member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
301 	{ .name = NULL, },
302 };
303 
304 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
305 {
306 	return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
307 				   pevent->event.comm.pid,
308 				   pevent->event.comm.tid,
309 				   pevent->event.comm.comm);
310 }
311 
312 static PyTypeObject pyrf_comm_event__type = {
313 	PyVarObject_HEAD_INIT(NULL, 0)
314 	.tp_name	= "perf.comm_event",
315 	.tp_basicsize	= sizeof(struct pyrf_event),
316 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
317 	.tp_doc		= pyrf_comm_event__doc,
318 	.tp_members	= pyrf_comm_event__members,
319 	.tp_repr	= (reprfunc)pyrf_comm_event__repr,
320 };
321 
322 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
323 
324 static PyMemberDef pyrf_throttle_event__members[] = {
325 	sample_members
326 	member_def(perf_event_header, type, T_UINT, "event type"),
327 	member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
328 	member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
329 	member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
330 	{ .name = NULL, },
331 };
332 
333 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
334 {
335 	struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
336 
337 	return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
338 				   ", stream_id: %" PRI_lu64 " }",
339 				   pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
340 				   te->time, te->id, te->stream_id);
341 }
342 
343 static PyTypeObject pyrf_throttle_event__type = {
344 	PyVarObject_HEAD_INIT(NULL, 0)
345 	.tp_name	= "perf.throttle_event",
346 	.tp_basicsize	= sizeof(struct pyrf_event),
347 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
348 	.tp_doc		= pyrf_throttle_event__doc,
349 	.tp_members	= pyrf_throttle_event__members,
350 	.tp_repr	= (reprfunc)pyrf_throttle_event__repr,
351 };
352 
353 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
354 
355 static PyMemberDef pyrf_lost_event__members[] = {
356 	sample_members
357 	member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
358 	member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
359 	{ .name = NULL, },
360 };
361 
362 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
363 {
364 	PyObject *ret;
365 	char *s;
366 
367 	if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
368 			 "lost: %#" PRI_lx64 " }",
369 		     pevent->event.lost.id, pevent->event.lost.lost) < 0) {
370 		ret = PyErr_NoMemory();
371 	} else {
372 		ret = _PyUnicode_FromString(s);
373 		free(s);
374 	}
375 	return ret;
376 }
377 
378 static PyTypeObject pyrf_lost_event__type = {
379 	PyVarObject_HEAD_INIT(NULL, 0)
380 	.tp_name	= "perf.lost_event",
381 	.tp_basicsize	= sizeof(struct pyrf_event),
382 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
383 	.tp_doc		= pyrf_lost_event__doc,
384 	.tp_members	= pyrf_lost_event__members,
385 	.tp_repr	= (reprfunc)pyrf_lost_event__repr,
386 };
387 
388 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
389 
390 static PyMemberDef pyrf_read_event__members[] = {
391 	sample_members
392 	member_def(perf_record_read, pid, T_UINT, "event pid"),
393 	member_def(perf_record_read, tid, T_UINT, "event tid"),
394 	{ .name = NULL, },
395 };
396 
397 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
398 {
399 	return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
400 				   pevent->event.read.pid,
401 				   pevent->event.read.tid);
402 	/*
403  	 * FIXME: return the array of read values,
404  	 * making this method useful ;-)
405  	 */
406 }
407 
408 static PyTypeObject pyrf_read_event__type = {
409 	PyVarObject_HEAD_INIT(NULL, 0)
410 	.tp_name	= "perf.read_event",
411 	.tp_basicsize	= sizeof(struct pyrf_event),
412 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
413 	.tp_doc		= pyrf_read_event__doc,
414 	.tp_members	= pyrf_read_event__members,
415 	.tp_repr	= (reprfunc)pyrf_read_event__repr,
416 };
417 
418 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
419 
420 static PyMemberDef pyrf_sample_event__members[] = {
421 	sample_members
422 	member_def(perf_event_header, type, T_UINT, "event type"),
423 	{ .name = NULL, },
424 };
425 
426 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
427 {
428 	PyObject *ret;
429 	char *s;
430 
431 	if (asprintf(&s, "{ type: sample }") < 0) {
432 		ret = PyErr_NoMemory();
433 	} else {
434 		ret = _PyUnicode_FromString(s);
435 		free(s);
436 	}
437 	return ret;
438 }
439 
440 #ifdef HAVE_LIBTRACEEVENT
441 static bool is_tracepoint(struct pyrf_event *pevent)
442 {
443 	return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
444 }
445 
446 static PyObject*
447 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
448 {
449 	struct tep_handle *pevent = field->event->tep;
450 	void *data = pe->sample.raw_data;
451 	PyObject *ret = NULL;
452 	unsigned long long val;
453 	unsigned int offset, len;
454 
455 	if (field->flags & TEP_FIELD_IS_ARRAY) {
456 		offset = field->offset;
457 		len    = field->size;
458 		if (field->flags & TEP_FIELD_IS_DYNAMIC) {
459 			val     = tep_read_number(pevent, data + offset, len);
460 			offset  = val;
461 			len     = offset >> 16;
462 			offset &= 0xffff;
463 			if (tep_field_is_relative(field->flags))
464 				offset += field->offset + field->size;
465 		}
466 		if (field->flags & TEP_FIELD_IS_STRING &&
467 		    is_printable_array(data + offset, len)) {
468 			ret = _PyUnicode_FromString((char *)data + offset);
469 		} else {
470 			ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
471 			field->flags &= ~TEP_FIELD_IS_STRING;
472 		}
473 	} else {
474 		val = tep_read_number(pevent, data + field->offset,
475 				      field->size);
476 		if (field->flags & TEP_FIELD_IS_POINTER)
477 			ret = PyLong_FromUnsignedLong((unsigned long) val);
478 		else if (field->flags & TEP_FIELD_IS_SIGNED)
479 			ret = PyLong_FromLong((long) val);
480 		else
481 			ret = PyLong_FromUnsignedLong((unsigned long) val);
482 	}
483 
484 	return ret;
485 }
486 
487 static PyObject*
488 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
489 {
490 	const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
491 	struct evsel *evsel = pevent->evsel;
492 	struct tep_format_field *field;
493 
494 	if (!evsel->tp_format) {
495 		struct tep_event *tp_format;
496 
497 		tp_format = trace_event__tp_format_id(evsel->core.attr.config);
498 		if (IS_ERR_OR_NULL(tp_format))
499 			return NULL;
500 
501 		evsel->tp_format = tp_format;
502 	}
503 
504 	field = tep_find_any_field(evsel->tp_format, str);
505 	if (!field)
506 		return NULL;
507 
508 	return tracepoint_field(pevent, field);
509 }
510 #endif /* HAVE_LIBTRACEEVENT */
511 
512 static PyObject*
513 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
514 {
515 	PyObject *obj = NULL;
516 
517 #ifdef HAVE_LIBTRACEEVENT
518 	if (is_tracepoint(pevent))
519 		obj = get_tracepoint_field(pevent, attr_name);
520 #endif
521 
522 	return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
523 }
524 
525 static PyTypeObject pyrf_sample_event__type = {
526 	PyVarObject_HEAD_INIT(NULL, 0)
527 	.tp_name	= "perf.sample_event",
528 	.tp_basicsize	= sizeof(struct pyrf_event),
529 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
530 	.tp_doc		= pyrf_sample_event__doc,
531 	.tp_members	= pyrf_sample_event__members,
532 	.tp_repr	= (reprfunc)pyrf_sample_event__repr,
533 	.tp_getattro	= (getattrofunc) pyrf_sample_event__getattro,
534 };
535 
536 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
537 
538 static PyMemberDef pyrf_context_switch_event__members[] = {
539 	sample_members
540 	member_def(perf_event_header, type, T_UINT, "event type"),
541 	member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
542 	member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
543 	{ .name = NULL, },
544 };
545 
546 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
547 {
548 	PyObject *ret;
549 	char *s;
550 
551 	if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
552 		     pevent->event.context_switch.next_prev_pid,
553 		     pevent->event.context_switch.next_prev_tid,
554 		     !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
555 		ret = PyErr_NoMemory();
556 	} else {
557 		ret = _PyUnicode_FromString(s);
558 		free(s);
559 	}
560 	return ret;
561 }
562 
563 static PyTypeObject pyrf_context_switch_event__type = {
564 	PyVarObject_HEAD_INIT(NULL, 0)
565 	.tp_name	= "perf.context_switch_event",
566 	.tp_basicsize	= sizeof(struct pyrf_event),
567 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
568 	.tp_doc		= pyrf_context_switch_event__doc,
569 	.tp_members	= pyrf_context_switch_event__members,
570 	.tp_repr	= (reprfunc)pyrf_context_switch_event__repr,
571 };
572 
573 static int pyrf_event__setup_types(void)
574 {
575 	int err;
576 	pyrf_mmap_event__type.tp_new =
577 	pyrf_task_event__type.tp_new =
578 	pyrf_comm_event__type.tp_new =
579 	pyrf_lost_event__type.tp_new =
580 	pyrf_read_event__type.tp_new =
581 	pyrf_sample_event__type.tp_new =
582 	pyrf_context_switch_event__type.tp_new =
583 	pyrf_throttle_event__type.tp_new = PyType_GenericNew;
584 	err = PyType_Ready(&pyrf_mmap_event__type);
585 	if (err < 0)
586 		goto out;
587 	err = PyType_Ready(&pyrf_lost_event__type);
588 	if (err < 0)
589 		goto out;
590 	err = PyType_Ready(&pyrf_task_event__type);
591 	if (err < 0)
592 		goto out;
593 	err = PyType_Ready(&pyrf_comm_event__type);
594 	if (err < 0)
595 		goto out;
596 	err = PyType_Ready(&pyrf_throttle_event__type);
597 	if (err < 0)
598 		goto out;
599 	err = PyType_Ready(&pyrf_read_event__type);
600 	if (err < 0)
601 		goto out;
602 	err = PyType_Ready(&pyrf_sample_event__type);
603 	if (err < 0)
604 		goto out;
605 	err = PyType_Ready(&pyrf_context_switch_event__type);
606 	if (err < 0)
607 		goto out;
608 out:
609 	return err;
610 }
611 
612 static PyTypeObject *pyrf_event__type[] = {
613 	[PERF_RECORD_MMAP]	 = &pyrf_mmap_event__type,
614 	[PERF_RECORD_LOST]	 = &pyrf_lost_event__type,
615 	[PERF_RECORD_COMM]	 = &pyrf_comm_event__type,
616 	[PERF_RECORD_EXIT]	 = &pyrf_task_event__type,
617 	[PERF_RECORD_THROTTLE]	 = &pyrf_throttle_event__type,
618 	[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
619 	[PERF_RECORD_FORK]	 = &pyrf_task_event__type,
620 	[PERF_RECORD_READ]	 = &pyrf_read_event__type,
621 	[PERF_RECORD_SAMPLE]	 = &pyrf_sample_event__type,
622 	[PERF_RECORD_SWITCH]	 = &pyrf_context_switch_event__type,
623 	[PERF_RECORD_SWITCH_CPU_WIDE]  = &pyrf_context_switch_event__type,
624 };
625 
626 static PyObject *pyrf_event__new(union perf_event *event)
627 {
628 	struct pyrf_event *pevent;
629 	PyTypeObject *ptype;
630 
631 	if ((event->header.type < PERF_RECORD_MMAP ||
632 	     event->header.type > PERF_RECORD_SAMPLE) &&
633 	    !(event->header.type == PERF_RECORD_SWITCH ||
634 	      event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
635 		return NULL;
636 
637 	ptype = pyrf_event__type[event->header.type];
638 	pevent = PyObject_New(struct pyrf_event, ptype);
639 	if (pevent != NULL)
640 		memcpy(&pevent->event, event, event->header.size);
641 	return (PyObject *)pevent;
642 }
643 
644 struct pyrf_cpu_map {
645 	PyObject_HEAD
646 
647 	struct perf_cpu_map *cpus;
648 };
649 
650 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
651 			      PyObject *args, PyObject *kwargs)
652 {
653 	static char *kwlist[] = { "cpustr", NULL };
654 	char *cpustr = NULL;
655 
656 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
657 					 kwlist, &cpustr))
658 		return -1;
659 
660 	pcpus->cpus = perf_cpu_map__new(cpustr);
661 	if (pcpus->cpus == NULL)
662 		return -1;
663 	return 0;
664 }
665 
666 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
667 {
668 	perf_cpu_map__put(pcpus->cpus);
669 	Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
670 }
671 
672 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
673 {
674 	struct pyrf_cpu_map *pcpus = (void *)obj;
675 
676 	return perf_cpu_map__nr(pcpus->cpus);
677 }
678 
679 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
680 {
681 	struct pyrf_cpu_map *pcpus = (void *)obj;
682 
683 	if (i >= perf_cpu_map__nr(pcpus->cpus))
684 		return NULL;
685 
686 	return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
687 }
688 
689 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
690 	.sq_length = pyrf_cpu_map__length,
691 	.sq_item   = pyrf_cpu_map__item,
692 };
693 
694 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
695 
696 static PyTypeObject pyrf_cpu_map__type = {
697 	PyVarObject_HEAD_INIT(NULL, 0)
698 	.tp_name	= "perf.cpu_map",
699 	.tp_basicsize	= sizeof(struct pyrf_cpu_map),
700 	.tp_dealloc	= (destructor)pyrf_cpu_map__delete,
701 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
702 	.tp_doc		= pyrf_cpu_map__doc,
703 	.tp_as_sequence	= &pyrf_cpu_map__sequence_methods,
704 	.tp_init	= (initproc)pyrf_cpu_map__init,
705 };
706 
707 static int pyrf_cpu_map__setup_types(void)
708 {
709 	pyrf_cpu_map__type.tp_new = PyType_GenericNew;
710 	return PyType_Ready(&pyrf_cpu_map__type);
711 }
712 
713 struct pyrf_thread_map {
714 	PyObject_HEAD
715 
716 	struct perf_thread_map *threads;
717 };
718 
719 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
720 				 PyObject *args, PyObject *kwargs)
721 {
722 	static char *kwlist[] = { "pid", "tid", "uid", NULL };
723 	int pid = -1, tid = -1, uid = UINT_MAX;
724 
725 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
726 					 kwlist, &pid, &tid, &uid))
727 		return -1;
728 
729 	pthreads->threads = thread_map__new(pid, tid, uid);
730 	if (pthreads->threads == NULL)
731 		return -1;
732 	return 0;
733 }
734 
735 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
736 {
737 	perf_thread_map__put(pthreads->threads);
738 	Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
739 }
740 
741 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
742 {
743 	struct pyrf_thread_map *pthreads = (void *)obj;
744 
745 	return perf_thread_map__nr(pthreads->threads);
746 }
747 
748 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
749 {
750 	struct pyrf_thread_map *pthreads = (void *)obj;
751 
752 	if (i >= perf_thread_map__nr(pthreads->threads))
753 		return NULL;
754 
755 	return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
756 }
757 
758 static PySequenceMethods pyrf_thread_map__sequence_methods = {
759 	.sq_length = pyrf_thread_map__length,
760 	.sq_item   = pyrf_thread_map__item,
761 };
762 
763 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
764 
765 static PyTypeObject pyrf_thread_map__type = {
766 	PyVarObject_HEAD_INIT(NULL, 0)
767 	.tp_name	= "perf.thread_map",
768 	.tp_basicsize	= sizeof(struct pyrf_thread_map),
769 	.tp_dealloc	= (destructor)pyrf_thread_map__delete,
770 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
771 	.tp_doc		= pyrf_thread_map__doc,
772 	.tp_as_sequence	= &pyrf_thread_map__sequence_methods,
773 	.tp_init	= (initproc)pyrf_thread_map__init,
774 };
775 
776 static int pyrf_thread_map__setup_types(void)
777 {
778 	pyrf_thread_map__type.tp_new = PyType_GenericNew;
779 	return PyType_Ready(&pyrf_thread_map__type);
780 }
781 
782 struct pyrf_evsel {
783 	PyObject_HEAD
784 
785 	struct evsel evsel;
786 };
787 
788 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
789 			    PyObject *args, PyObject *kwargs)
790 {
791 	struct perf_event_attr attr = {
792 		.type = PERF_TYPE_HARDWARE,
793 		.config = PERF_COUNT_HW_CPU_CYCLES,
794 		.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
795 	};
796 	static char *kwlist[] = {
797 		"type",
798 		"config",
799 		"sample_freq",
800 		"sample_period",
801 		"sample_type",
802 		"read_format",
803 		"disabled",
804 		"inherit",
805 		"pinned",
806 		"exclusive",
807 		"exclude_user",
808 		"exclude_kernel",
809 		"exclude_hv",
810 		"exclude_idle",
811 		"mmap",
812 		"context_switch",
813 		"comm",
814 		"freq",
815 		"inherit_stat",
816 		"enable_on_exec",
817 		"task",
818 		"watermark",
819 		"precise_ip",
820 		"mmap_data",
821 		"sample_id_all",
822 		"wakeup_events",
823 		"bp_type",
824 		"bp_addr",
825 		"bp_len",
826 		 NULL
827 	};
828 	u64 sample_period = 0;
829 	u32 disabled = 0,
830 	    inherit = 0,
831 	    pinned = 0,
832 	    exclusive = 0,
833 	    exclude_user = 0,
834 	    exclude_kernel = 0,
835 	    exclude_hv = 0,
836 	    exclude_idle = 0,
837 	    mmap = 0,
838 	    context_switch = 0,
839 	    comm = 0,
840 	    freq = 1,
841 	    inherit_stat = 0,
842 	    enable_on_exec = 0,
843 	    task = 0,
844 	    watermark = 0,
845 	    precise_ip = 0,
846 	    mmap_data = 0,
847 	    sample_id_all = 1;
848 	int idx = 0;
849 
850 	if (!PyArg_ParseTupleAndKeywords(args, kwargs,
851 					 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
852 					 &attr.type, &attr.config, &attr.sample_freq,
853 					 &sample_period, &attr.sample_type,
854 					 &attr.read_format, &disabled, &inherit,
855 					 &pinned, &exclusive, &exclude_user,
856 					 &exclude_kernel, &exclude_hv, &exclude_idle,
857 					 &mmap, &context_switch, &comm, &freq, &inherit_stat,
858 					 &enable_on_exec, &task, &watermark,
859 					 &precise_ip, &mmap_data, &sample_id_all,
860 					 &attr.wakeup_events, &attr.bp_type,
861 					 &attr.bp_addr, &attr.bp_len, &idx))
862 		return -1;
863 
864 	/* union... */
865 	if (sample_period != 0) {
866 		if (attr.sample_freq != 0)
867 			return -1; /* FIXME: throw right exception */
868 		attr.sample_period = sample_period;
869 	}
870 
871 	/* Bitfields */
872 	attr.disabled	    = disabled;
873 	attr.inherit	    = inherit;
874 	attr.pinned	    = pinned;
875 	attr.exclusive	    = exclusive;
876 	attr.exclude_user   = exclude_user;
877 	attr.exclude_kernel = exclude_kernel;
878 	attr.exclude_hv	    = exclude_hv;
879 	attr.exclude_idle   = exclude_idle;
880 	attr.mmap	    = mmap;
881 	attr.context_switch = context_switch;
882 	attr.comm	    = comm;
883 	attr.freq	    = freq;
884 	attr.inherit_stat   = inherit_stat;
885 	attr.enable_on_exec = enable_on_exec;
886 	attr.task	    = task;
887 	attr.watermark	    = watermark;
888 	attr.precise_ip	    = precise_ip;
889 	attr.mmap_data	    = mmap_data;
890 	attr.sample_id_all  = sample_id_all;
891 	attr.size	    = sizeof(attr);
892 
893 	evsel__init(&pevsel->evsel, &attr, idx);
894 	return 0;
895 }
896 
897 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
898 {
899 	evsel__exit(&pevsel->evsel);
900 	Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
901 }
902 
903 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
904 				  PyObject *args, PyObject *kwargs)
905 {
906 	struct evsel *evsel = &pevsel->evsel;
907 	struct perf_cpu_map *cpus = NULL;
908 	struct perf_thread_map *threads = NULL;
909 	PyObject *pcpus = NULL, *pthreads = NULL;
910 	int group = 0, inherit = 0;
911 	static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
912 
913 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
914 					 &pcpus, &pthreads, &group, &inherit))
915 		return NULL;
916 
917 	if (pthreads != NULL)
918 		threads = ((struct pyrf_thread_map *)pthreads)->threads;
919 
920 	if (pcpus != NULL)
921 		cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
922 
923 	evsel->core.attr.inherit = inherit;
924 	/*
925 	 * This will group just the fds for this single evsel, to group
926 	 * multiple events, use evlist.open().
927 	 */
928 	if (evsel__open(evsel, cpus, threads) < 0) {
929 		PyErr_SetFromErrno(PyExc_OSError);
930 		return NULL;
931 	}
932 
933 	Py_INCREF(Py_None);
934 	return Py_None;
935 }
936 
937 static PyMethodDef pyrf_evsel__methods[] = {
938 	{
939 		.ml_name  = "open",
940 		.ml_meth  = (PyCFunction)pyrf_evsel__open,
941 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
942 		.ml_doc	  = PyDoc_STR("open the event selector file descriptor table.")
943 	},
944 	{ .ml_name = NULL, }
945 };
946 
947 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
948 
949 static PyTypeObject pyrf_evsel__type = {
950 	PyVarObject_HEAD_INIT(NULL, 0)
951 	.tp_name	= "perf.evsel",
952 	.tp_basicsize	= sizeof(struct pyrf_evsel),
953 	.tp_dealloc	= (destructor)pyrf_evsel__delete,
954 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
955 	.tp_doc		= pyrf_evsel__doc,
956 	.tp_methods	= pyrf_evsel__methods,
957 	.tp_init	= (initproc)pyrf_evsel__init,
958 };
959 
960 static int pyrf_evsel__setup_types(void)
961 {
962 	pyrf_evsel__type.tp_new = PyType_GenericNew;
963 	return PyType_Ready(&pyrf_evsel__type);
964 }
965 
966 struct pyrf_evlist {
967 	PyObject_HEAD
968 
969 	struct evlist evlist;
970 };
971 
972 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
973 			     PyObject *args, PyObject *kwargs __maybe_unused)
974 {
975 	PyObject *pcpus = NULL, *pthreads = NULL;
976 	struct perf_cpu_map *cpus;
977 	struct perf_thread_map *threads;
978 
979 	if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
980 		return -1;
981 
982 	threads = ((struct pyrf_thread_map *)pthreads)->threads;
983 	cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
984 	evlist__init(&pevlist->evlist, cpus, threads);
985 	return 0;
986 }
987 
988 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
989 {
990 	evlist__exit(&pevlist->evlist);
991 	Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
992 }
993 
994 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
995 				   PyObject *args, PyObject *kwargs)
996 {
997 	struct evlist *evlist = &pevlist->evlist;
998 	static char *kwlist[] = { "pages", "overwrite", NULL };
999 	int pages = 128, overwrite = false;
1000 
1001 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
1002 					 &pages, &overwrite))
1003 		return NULL;
1004 
1005 	if (evlist__mmap(evlist, pages) < 0) {
1006 		PyErr_SetFromErrno(PyExc_OSError);
1007 		return NULL;
1008 	}
1009 
1010 	Py_INCREF(Py_None);
1011 	return Py_None;
1012 }
1013 
1014 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
1015 				   PyObject *args, PyObject *kwargs)
1016 {
1017 	struct evlist *evlist = &pevlist->evlist;
1018 	static char *kwlist[] = { "timeout", NULL };
1019 	int timeout = -1, n;
1020 
1021 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
1022 		return NULL;
1023 
1024 	n = evlist__poll(evlist, timeout);
1025 	if (n < 0) {
1026 		PyErr_SetFromErrno(PyExc_OSError);
1027 		return NULL;
1028 	}
1029 
1030 	return Py_BuildValue("i", n);
1031 }
1032 
1033 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1034 					 PyObject *args __maybe_unused,
1035 					 PyObject *kwargs __maybe_unused)
1036 {
1037 	struct evlist *evlist = &pevlist->evlist;
1038         PyObject *list = PyList_New(0);
1039 	int i;
1040 
1041 	for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1042 		PyObject *file;
1043 #if PY_MAJOR_VERSION < 3
1044 		FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1045 
1046 		if (fp == NULL)
1047 			goto free_list;
1048 
1049 		file = PyFile_FromFile(fp, "perf", "r", NULL);
1050 #else
1051 		file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1052 				     NULL, NULL, NULL, 0);
1053 #endif
1054 		if (file == NULL)
1055 			goto free_list;
1056 
1057 		if (PyList_Append(list, file) != 0) {
1058 			Py_DECREF(file);
1059 			goto free_list;
1060 		}
1061 
1062 		Py_DECREF(file);
1063 	}
1064 
1065 	return list;
1066 free_list:
1067 	return PyErr_NoMemory();
1068 }
1069 
1070 
1071 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1072 				  PyObject *args,
1073 				  PyObject *kwargs __maybe_unused)
1074 {
1075 	struct evlist *evlist = &pevlist->evlist;
1076 	PyObject *pevsel;
1077 	struct evsel *evsel;
1078 
1079 	if (!PyArg_ParseTuple(args, "O", &pevsel))
1080 		return NULL;
1081 
1082 	Py_INCREF(pevsel);
1083 	evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1084 	evsel->core.idx = evlist->core.nr_entries;
1085 	evlist__add(evlist, evsel);
1086 
1087 	return Py_BuildValue("i", evlist->core.nr_entries);
1088 }
1089 
1090 static struct mmap *get_md(struct evlist *evlist, int cpu)
1091 {
1092 	int i;
1093 
1094 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
1095 		struct mmap *md = &evlist->mmap[i];
1096 
1097 		if (md->core.cpu.cpu == cpu)
1098 			return md;
1099 	}
1100 
1101 	return NULL;
1102 }
1103 
1104 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1105 					  PyObject *args, PyObject *kwargs)
1106 {
1107 	struct evlist *evlist = &pevlist->evlist;
1108 	union perf_event *event;
1109 	int sample_id_all = 1, cpu;
1110 	static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1111 	struct mmap *md;
1112 	int err;
1113 
1114 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1115 					 &cpu, &sample_id_all))
1116 		return NULL;
1117 
1118 	md = get_md(evlist, cpu);
1119 	if (!md)
1120 		return NULL;
1121 
1122 	if (perf_mmap__read_init(&md->core) < 0)
1123 		goto end;
1124 
1125 	event = perf_mmap__read_event(&md->core);
1126 	if (event != NULL) {
1127 		PyObject *pyevent = pyrf_event__new(event);
1128 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1129 		struct evsel *evsel;
1130 
1131 		if (pyevent == NULL)
1132 			return PyErr_NoMemory();
1133 
1134 		evsel = evlist__event2evsel(evlist, event);
1135 		if (!evsel) {
1136 			Py_INCREF(Py_None);
1137 			return Py_None;
1138 		}
1139 
1140 		pevent->evsel = evsel;
1141 
1142 		err = evsel__parse_sample(evsel, event, &pevent->sample);
1143 
1144 		/* Consume the even only after we parsed it out. */
1145 		perf_mmap__consume(&md->core);
1146 
1147 		if (err)
1148 			return PyErr_Format(PyExc_OSError,
1149 					    "perf: can't parse sample, err=%d", err);
1150 		return pyevent;
1151 	}
1152 end:
1153 	Py_INCREF(Py_None);
1154 	return Py_None;
1155 }
1156 
1157 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1158 				   PyObject *args, PyObject *kwargs)
1159 {
1160 	struct evlist *evlist = &pevlist->evlist;
1161 
1162 	if (evlist__open(evlist) < 0) {
1163 		PyErr_SetFromErrno(PyExc_OSError);
1164 		return NULL;
1165 	}
1166 
1167 	Py_INCREF(Py_None);
1168 	return Py_None;
1169 }
1170 
1171 static PyMethodDef pyrf_evlist__methods[] = {
1172 	{
1173 		.ml_name  = "mmap",
1174 		.ml_meth  = (PyCFunction)pyrf_evlist__mmap,
1175 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1176 		.ml_doc	  = PyDoc_STR("mmap the file descriptor table.")
1177 	},
1178 	{
1179 		.ml_name  = "open",
1180 		.ml_meth  = (PyCFunction)pyrf_evlist__open,
1181 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1182 		.ml_doc	  = PyDoc_STR("open the file descriptors.")
1183 	},
1184 	{
1185 		.ml_name  = "poll",
1186 		.ml_meth  = (PyCFunction)pyrf_evlist__poll,
1187 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1188 		.ml_doc	  = PyDoc_STR("poll the file descriptor table.")
1189 	},
1190 	{
1191 		.ml_name  = "get_pollfd",
1192 		.ml_meth  = (PyCFunction)pyrf_evlist__get_pollfd,
1193 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1194 		.ml_doc	  = PyDoc_STR("get the poll file descriptor table.")
1195 	},
1196 	{
1197 		.ml_name  = "add",
1198 		.ml_meth  = (PyCFunction)pyrf_evlist__add,
1199 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1200 		.ml_doc	  = PyDoc_STR("adds an event selector to the list.")
1201 	},
1202 	{
1203 		.ml_name  = "read_on_cpu",
1204 		.ml_meth  = (PyCFunction)pyrf_evlist__read_on_cpu,
1205 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1206 		.ml_doc	  = PyDoc_STR("reads an event.")
1207 	},
1208 	{ .ml_name = NULL, }
1209 };
1210 
1211 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1212 {
1213 	struct pyrf_evlist *pevlist = (void *)obj;
1214 
1215 	return pevlist->evlist.core.nr_entries;
1216 }
1217 
1218 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1219 {
1220 	struct pyrf_evlist *pevlist = (void *)obj;
1221 	struct evsel *pos;
1222 
1223 	if (i >= pevlist->evlist.core.nr_entries)
1224 		return NULL;
1225 
1226 	evlist__for_each_entry(&pevlist->evlist, pos) {
1227 		if (i-- == 0)
1228 			break;
1229 	}
1230 
1231 	return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1232 }
1233 
1234 static PySequenceMethods pyrf_evlist__sequence_methods = {
1235 	.sq_length = pyrf_evlist__length,
1236 	.sq_item   = pyrf_evlist__item,
1237 };
1238 
1239 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1240 
1241 static PyTypeObject pyrf_evlist__type = {
1242 	PyVarObject_HEAD_INIT(NULL, 0)
1243 	.tp_name	= "perf.evlist",
1244 	.tp_basicsize	= sizeof(struct pyrf_evlist),
1245 	.tp_dealloc	= (destructor)pyrf_evlist__delete,
1246 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1247 	.tp_as_sequence	= &pyrf_evlist__sequence_methods,
1248 	.tp_doc		= pyrf_evlist__doc,
1249 	.tp_methods	= pyrf_evlist__methods,
1250 	.tp_init	= (initproc)pyrf_evlist__init,
1251 };
1252 
1253 static int pyrf_evlist__setup_types(void)
1254 {
1255 	pyrf_evlist__type.tp_new = PyType_GenericNew;
1256 	return PyType_Ready(&pyrf_evlist__type);
1257 }
1258 
1259 #define PERF_CONST(name) { #name, PERF_##name }
1260 
1261 static struct {
1262 	const char *name;
1263 	int	    value;
1264 } perf__constants[] = {
1265 	PERF_CONST(TYPE_HARDWARE),
1266 	PERF_CONST(TYPE_SOFTWARE),
1267 	PERF_CONST(TYPE_TRACEPOINT),
1268 	PERF_CONST(TYPE_HW_CACHE),
1269 	PERF_CONST(TYPE_RAW),
1270 	PERF_CONST(TYPE_BREAKPOINT),
1271 
1272 	PERF_CONST(COUNT_HW_CPU_CYCLES),
1273 	PERF_CONST(COUNT_HW_INSTRUCTIONS),
1274 	PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1275 	PERF_CONST(COUNT_HW_CACHE_MISSES),
1276 	PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1277 	PERF_CONST(COUNT_HW_BRANCH_MISSES),
1278 	PERF_CONST(COUNT_HW_BUS_CYCLES),
1279 	PERF_CONST(COUNT_HW_CACHE_L1D),
1280 	PERF_CONST(COUNT_HW_CACHE_L1I),
1281 	PERF_CONST(COUNT_HW_CACHE_LL),
1282 	PERF_CONST(COUNT_HW_CACHE_DTLB),
1283 	PERF_CONST(COUNT_HW_CACHE_ITLB),
1284 	PERF_CONST(COUNT_HW_CACHE_BPU),
1285 	PERF_CONST(COUNT_HW_CACHE_OP_READ),
1286 	PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1287 	PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1288 	PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1289 	PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1290 
1291 	PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1292 	PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1293 
1294 	PERF_CONST(COUNT_SW_CPU_CLOCK),
1295 	PERF_CONST(COUNT_SW_TASK_CLOCK),
1296 	PERF_CONST(COUNT_SW_PAGE_FAULTS),
1297 	PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1298 	PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1299 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1300 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1301 	PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1302 	PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1303 	PERF_CONST(COUNT_SW_DUMMY),
1304 
1305 	PERF_CONST(SAMPLE_IP),
1306 	PERF_CONST(SAMPLE_TID),
1307 	PERF_CONST(SAMPLE_TIME),
1308 	PERF_CONST(SAMPLE_ADDR),
1309 	PERF_CONST(SAMPLE_READ),
1310 	PERF_CONST(SAMPLE_CALLCHAIN),
1311 	PERF_CONST(SAMPLE_ID),
1312 	PERF_CONST(SAMPLE_CPU),
1313 	PERF_CONST(SAMPLE_PERIOD),
1314 	PERF_CONST(SAMPLE_STREAM_ID),
1315 	PERF_CONST(SAMPLE_RAW),
1316 
1317 	PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1318 	PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1319 	PERF_CONST(FORMAT_ID),
1320 	PERF_CONST(FORMAT_GROUP),
1321 
1322 	PERF_CONST(RECORD_MMAP),
1323 	PERF_CONST(RECORD_LOST),
1324 	PERF_CONST(RECORD_COMM),
1325 	PERF_CONST(RECORD_EXIT),
1326 	PERF_CONST(RECORD_THROTTLE),
1327 	PERF_CONST(RECORD_UNTHROTTLE),
1328 	PERF_CONST(RECORD_FORK),
1329 	PERF_CONST(RECORD_READ),
1330 	PERF_CONST(RECORD_SAMPLE),
1331 	PERF_CONST(RECORD_MMAP2),
1332 	PERF_CONST(RECORD_AUX),
1333 	PERF_CONST(RECORD_ITRACE_START),
1334 	PERF_CONST(RECORD_LOST_SAMPLES),
1335 	PERF_CONST(RECORD_SWITCH),
1336 	PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1337 
1338 	PERF_CONST(RECORD_MISC_SWITCH_OUT),
1339 	{ .name = NULL, },
1340 };
1341 
1342 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1343 				  PyObject *args, PyObject *kwargs)
1344 {
1345 #ifndef HAVE_LIBTRACEEVENT
1346 	return NULL;
1347 #else
1348 	struct tep_event *tp_format;
1349 	static char *kwlist[] = { "sys", "name", NULL };
1350 	char *sys  = NULL;
1351 	char *name = NULL;
1352 
1353 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1354 					 &sys, &name))
1355 		return NULL;
1356 
1357 	tp_format = trace_event__tp_format(sys, name);
1358 	if (IS_ERR(tp_format))
1359 		return _PyLong_FromLong(-1);
1360 
1361 	return _PyLong_FromLong(tp_format->id);
1362 #endif // HAVE_LIBTRACEEVENT
1363 }
1364 
1365 static PyMethodDef perf__methods[] = {
1366 	{
1367 		.ml_name  = "tracepoint",
1368 		.ml_meth  = (PyCFunction) pyrf__tracepoint,
1369 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1370 		.ml_doc	  = PyDoc_STR("Get tracepoint config.")
1371 	},
1372 	{ .ml_name = NULL, }
1373 };
1374 
1375 #if PY_MAJOR_VERSION < 3
1376 PyMODINIT_FUNC initperf(void)
1377 #else
1378 PyMODINIT_FUNC PyInit_perf(void)
1379 #endif
1380 {
1381 	PyObject *obj;
1382 	int i;
1383 	PyObject *dict;
1384 #if PY_MAJOR_VERSION < 3
1385 	PyObject *module = Py_InitModule("perf", perf__methods);
1386 #else
1387 	static struct PyModuleDef moduledef = {
1388 		PyModuleDef_HEAD_INIT,
1389 		"perf",			/* m_name */
1390 		"",			/* m_doc */
1391 		-1,			/* m_size */
1392 		perf__methods,		/* m_methods */
1393 		NULL,			/* m_reload */
1394 		NULL,			/* m_traverse */
1395 		NULL,			/* m_clear */
1396 		NULL,			/* m_free */
1397 	};
1398 	PyObject *module = PyModule_Create(&moduledef);
1399 #endif
1400 
1401 	if (module == NULL ||
1402 	    pyrf_event__setup_types() < 0 ||
1403 	    pyrf_evlist__setup_types() < 0 ||
1404 	    pyrf_evsel__setup_types() < 0 ||
1405 	    pyrf_thread_map__setup_types() < 0 ||
1406 	    pyrf_cpu_map__setup_types() < 0)
1407 #if PY_MAJOR_VERSION < 3
1408 		return;
1409 #else
1410 		return module;
1411 #endif
1412 
1413 	/* The page_size is placed in util object. */
1414 	page_size = sysconf(_SC_PAGE_SIZE);
1415 
1416 	Py_INCREF(&pyrf_evlist__type);
1417 	PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1418 
1419 	Py_INCREF(&pyrf_evsel__type);
1420 	PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1421 
1422 	Py_INCREF(&pyrf_mmap_event__type);
1423 	PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1424 
1425 	Py_INCREF(&pyrf_lost_event__type);
1426 	PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1427 
1428 	Py_INCREF(&pyrf_comm_event__type);
1429 	PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1430 
1431 	Py_INCREF(&pyrf_task_event__type);
1432 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1433 
1434 	Py_INCREF(&pyrf_throttle_event__type);
1435 	PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1436 
1437 	Py_INCREF(&pyrf_task_event__type);
1438 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1439 
1440 	Py_INCREF(&pyrf_read_event__type);
1441 	PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1442 
1443 	Py_INCREF(&pyrf_sample_event__type);
1444 	PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1445 
1446 	Py_INCREF(&pyrf_context_switch_event__type);
1447 	PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1448 
1449 	Py_INCREF(&pyrf_thread_map__type);
1450 	PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1451 
1452 	Py_INCREF(&pyrf_cpu_map__type);
1453 	PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1454 
1455 	dict = PyModule_GetDict(module);
1456 	if (dict == NULL)
1457 		goto error;
1458 
1459 	for (i = 0; perf__constants[i].name != NULL; i++) {
1460 		obj = _PyLong_FromLong(perf__constants[i].value);
1461 		if (obj == NULL)
1462 			goto error;
1463 		PyDict_SetItemString(dict, perf__constants[i].name, obj);
1464 		Py_DECREF(obj);
1465 	}
1466 
1467 error:
1468 	if (PyErr_Occurred())
1469 		PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1470 #if PY_MAJOR_VERSION >= 3
1471 	return module;
1472 #endif
1473 }
1474 
1475 /*
1476  * Dummy, to avoid dragging all the test_attr infrastructure in the python
1477  * binding.
1478  */
1479 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
1480                      int fd, int group_fd, unsigned long flags)
1481 {
1482 }
1483