Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <Python.h>
3#include <structmember.h>
4#include <inttypes.h>
5#include <poll.h>
6#include <linux/err.h>
7#include <perf/cpumap.h>
8#ifdef HAVE_LIBTRACEEVENT
9#include <traceevent/event-parse.h>
10#endif
11#include <perf/mmap.h>
12#include "evlist.h"
13#include "callchain.h"
14#include "evsel.h"
15#include "event.h"
16#include "print_binary.h"
17#include "thread_map.h"
18#include "trace-event.h"
19#include "mmap.h"
20#include "stat.h"
21#include "metricgroup.h"
22#include "util/bpf-filter.h"
23#include "util/env.h"
24#include "util/pmu.h"
25#include "util/pmus.h"
26#include <internal/lib.h>
27#include "util.h"
28
29#if PY_MAJOR_VERSION < 3
30#define _PyUnicode_FromString(arg) \
31 PyString_FromString(arg)
32#define _PyUnicode_AsString(arg) \
33 PyString_AsString(arg)
34#define _PyUnicode_FromFormat(...) \
35 PyString_FromFormat(__VA_ARGS__)
36#define _PyLong_FromLong(arg) \
37 PyInt_FromLong(arg)
38
39#else
40
41#define _PyUnicode_FromString(arg) \
42 PyUnicode_FromString(arg)
43#define _PyUnicode_FromFormat(...) \
44 PyUnicode_FromFormat(__VA_ARGS__)
45#define _PyLong_FromLong(arg) \
46 PyLong_FromLong(arg)
47#endif
48
49#ifndef Py_TYPE
50#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
51#endif
52
53/*
54 * Avoid bringing in event parsing.
55 */
56int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
57{
58 return 0;
59}
60
61/*
62 * Provide these two so that we don't have to link against callchain.c and
63 * start dragging hist.c, etc.
64 */
65struct callchain_param callchain_param;
66
67int parse_callchain_record(const char *arg __maybe_unused,
68 struct callchain_param *param __maybe_unused)
69{
70 return 0;
71}
72
73/*
74 * Add these not to drag util/env.c
75 */
76struct perf_env perf_env;
77
78const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
79{
80 return NULL;
81}
82
83// This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
84const char *perf_env__arch(struct perf_env *env __maybe_unused)
85{
86 return NULL;
87}
88
89/*
90 * These ones are needed not to drag the PMU bandwagon, jevents generated
91 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
92 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
93 * far, for the perf python binding known usecases, revisit if this become
94 * necessary.
95 */
96struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
97{
98 return NULL;
99}
100
101int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt, ...)
102{
103 return EOF;
104}
105
106const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused)
107{
108 return NULL;
109}
110
111struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused)
112{
113 return NULL;
114}
115
116int perf_pmus__num_core_pmus(void)
117{
118 return 1;
119}
120
121bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
122{
123 return false;
124}
125
126bool perf_pmus__supports_extended_type(void)
127{
128 return false;
129}
130
131/*
132 * Add this one here not to drag util/metricgroup.c
133 */
134int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
135 struct rblist *new_metric_events,
136 struct rblist *old_metric_events)
137{
138 return 0;
139}
140
141/*
142 * Add this one here not to drag util/trace-event-info.c
143 */
144char *tracepoint_id_to_name(u64 config)
145{
146 return NULL;
147}
148
149/*
150 * XXX: All these evsel destructors need some better mechanism, like a linked
151 * list of destructors registered when the relevant code indeed is used instead
152 * of having more and more calls in perf_evsel__delete(). -- acme
153 *
154 * For now, add some more:
155 *
156 * Not to drag the BPF bandwagon...
157 */
158void bpf_counter__destroy(struct evsel *evsel);
159int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
160int bpf_counter__disable(struct evsel *evsel);
161
162void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
163{
164}
165
166int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
167{
168 return 0;
169}
170
171int bpf_counter__disable(struct evsel *evsel __maybe_unused)
172{
173 return 0;
174}
175
176// not to drag util/bpf-filter.c
177#ifdef HAVE_BPF_SKEL
178int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
179{
180 return 0;
181}
182
183int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
184{
185 return 0;
186}
187#endif
188
189/*
190 * Support debug printing even though util/debug.c is not linked. That means
191 * implementing 'verbose' and 'eprintf'.
192 */
193int verbose;
194int debug_kmaps;
195int debug_peo_args;
196
197int eprintf(int level, int var, const char *fmt, ...);
198
199int eprintf(int level, int var, const char *fmt, ...)
200{
201 va_list args;
202 int ret = 0;
203
204 if (var >= level) {
205 va_start(args, fmt);
206 ret = vfprintf(stderr, fmt, args);
207 va_end(args);
208 }
209
210 return ret;
211}
212
213/* Define PyVarObject_HEAD_INIT for python 2.5 */
214#ifndef PyVarObject_HEAD_INIT
215# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
216#endif
217
218#if PY_MAJOR_VERSION < 3
219PyMODINIT_FUNC initperf(void);
220#else
221PyMODINIT_FUNC PyInit_perf(void);
222#endif
223
224#define member_def(type, member, ptype, help) \
225 { #member, ptype, \
226 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
227 0, help }
228
229#define sample_member_def(name, member, ptype, help) \
230 { #name, ptype, \
231 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
232 0, help }
233
234struct pyrf_event {
235 PyObject_HEAD
236 struct evsel *evsel;
237 struct perf_sample sample;
238 union perf_event event;
239};
240
241#define sample_members \
242 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
243 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
244 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
245 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
246 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
247 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
248 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
249 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
250 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
251
252static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
253
254static PyMemberDef pyrf_mmap_event__members[] = {
255 sample_members
256 member_def(perf_event_header, type, T_UINT, "event type"),
257 member_def(perf_event_header, misc, T_UINT, "event misc"),
258 member_def(perf_record_mmap, pid, T_UINT, "event pid"),
259 member_def(perf_record_mmap, tid, T_UINT, "event tid"),
260 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
261 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
262 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
263 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
264 { .name = NULL, },
265};
266
267static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
268{
269 PyObject *ret;
270 char *s;
271
272 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
273 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
274 "filename: %s }",
275 pevent->event.mmap.pid, pevent->event.mmap.tid,
276 pevent->event.mmap.start, pevent->event.mmap.len,
277 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
278 ret = PyErr_NoMemory();
279 } else {
280 ret = _PyUnicode_FromString(s);
281 free(s);
282 }
283 return ret;
284}
285
286static PyTypeObject pyrf_mmap_event__type = {
287 PyVarObject_HEAD_INIT(NULL, 0)
288 .tp_name = "perf.mmap_event",
289 .tp_basicsize = sizeof(struct pyrf_event),
290 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
291 .tp_doc = pyrf_mmap_event__doc,
292 .tp_members = pyrf_mmap_event__members,
293 .tp_repr = (reprfunc)pyrf_mmap_event__repr,
294};
295
296static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
297
298static PyMemberDef pyrf_task_event__members[] = {
299 sample_members
300 member_def(perf_event_header, type, T_UINT, "event type"),
301 member_def(perf_record_fork, pid, T_UINT, "event pid"),
302 member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
303 member_def(perf_record_fork, tid, T_UINT, "event tid"),
304 member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
305 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
306 { .name = NULL, },
307};
308
309static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
310{
311 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
312 "ptid: %u, time: %" PRI_lu64 "}",
313 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
314 pevent->event.fork.pid,
315 pevent->event.fork.ppid,
316 pevent->event.fork.tid,
317 pevent->event.fork.ptid,
318 pevent->event.fork.time);
319}
320
321static PyTypeObject pyrf_task_event__type = {
322 PyVarObject_HEAD_INIT(NULL, 0)
323 .tp_name = "perf.task_event",
324 .tp_basicsize = sizeof(struct pyrf_event),
325 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
326 .tp_doc = pyrf_task_event__doc,
327 .tp_members = pyrf_task_event__members,
328 .tp_repr = (reprfunc)pyrf_task_event__repr,
329};
330
331static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
332
333static PyMemberDef pyrf_comm_event__members[] = {
334 sample_members
335 member_def(perf_event_header, type, T_UINT, "event type"),
336 member_def(perf_record_comm, pid, T_UINT, "event pid"),
337 member_def(perf_record_comm, tid, T_UINT, "event tid"),
338 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
339 { .name = NULL, },
340};
341
342static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
343{
344 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
345 pevent->event.comm.pid,
346 pevent->event.comm.tid,
347 pevent->event.comm.comm);
348}
349
350static PyTypeObject pyrf_comm_event__type = {
351 PyVarObject_HEAD_INIT(NULL, 0)
352 .tp_name = "perf.comm_event",
353 .tp_basicsize = sizeof(struct pyrf_event),
354 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
355 .tp_doc = pyrf_comm_event__doc,
356 .tp_members = pyrf_comm_event__members,
357 .tp_repr = (reprfunc)pyrf_comm_event__repr,
358};
359
360static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
361
362static PyMemberDef pyrf_throttle_event__members[] = {
363 sample_members
364 member_def(perf_event_header, type, T_UINT, "event type"),
365 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
366 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
367 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
368 { .name = NULL, },
369};
370
371static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
372{
373 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
374
375 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
376 ", stream_id: %" PRI_lu64 " }",
377 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
378 te->time, te->id, te->stream_id);
379}
380
381static PyTypeObject pyrf_throttle_event__type = {
382 PyVarObject_HEAD_INIT(NULL, 0)
383 .tp_name = "perf.throttle_event",
384 .tp_basicsize = sizeof(struct pyrf_event),
385 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
386 .tp_doc = pyrf_throttle_event__doc,
387 .tp_members = pyrf_throttle_event__members,
388 .tp_repr = (reprfunc)pyrf_throttle_event__repr,
389};
390
391static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
392
393static PyMemberDef pyrf_lost_event__members[] = {
394 sample_members
395 member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
396 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
397 { .name = NULL, },
398};
399
400static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
401{
402 PyObject *ret;
403 char *s;
404
405 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
406 "lost: %#" PRI_lx64 " }",
407 pevent->event.lost.id, pevent->event.lost.lost) < 0) {
408 ret = PyErr_NoMemory();
409 } else {
410 ret = _PyUnicode_FromString(s);
411 free(s);
412 }
413 return ret;
414}
415
416static PyTypeObject pyrf_lost_event__type = {
417 PyVarObject_HEAD_INIT(NULL, 0)
418 .tp_name = "perf.lost_event",
419 .tp_basicsize = sizeof(struct pyrf_event),
420 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
421 .tp_doc = pyrf_lost_event__doc,
422 .tp_members = pyrf_lost_event__members,
423 .tp_repr = (reprfunc)pyrf_lost_event__repr,
424};
425
426static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
427
428static PyMemberDef pyrf_read_event__members[] = {
429 sample_members
430 member_def(perf_record_read, pid, T_UINT, "event pid"),
431 member_def(perf_record_read, tid, T_UINT, "event tid"),
432 { .name = NULL, },
433};
434
435static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
436{
437 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
438 pevent->event.read.pid,
439 pevent->event.read.tid);
440 /*
441 * FIXME: return the array of read values,
442 * making this method useful ;-)
443 */
444}
445
446static PyTypeObject pyrf_read_event__type = {
447 PyVarObject_HEAD_INIT(NULL, 0)
448 .tp_name = "perf.read_event",
449 .tp_basicsize = sizeof(struct pyrf_event),
450 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
451 .tp_doc = pyrf_read_event__doc,
452 .tp_members = pyrf_read_event__members,
453 .tp_repr = (reprfunc)pyrf_read_event__repr,
454};
455
456static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
457
458static PyMemberDef pyrf_sample_event__members[] = {
459 sample_members
460 member_def(perf_event_header, type, T_UINT, "event type"),
461 { .name = NULL, },
462};
463
464static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
465{
466 PyObject *ret;
467 char *s;
468
469 if (asprintf(&s, "{ type: sample }") < 0) {
470 ret = PyErr_NoMemory();
471 } else {
472 ret = _PyUnicode_FromString(s);
473 free(s);
474 }
475 return ret;
476}
477
478#ifdef HAVE_LIBTRACEEVENT
479static bool is_tracepoint(struct pyrf_event *pevent)
480{
481 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
482}
483
484static PyObject*
485tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
486{
487 struct tep_handle *pevent = field->event->tep;
488 void *data = pe->sample.raw_data;
489 PyObject *ret = NULL;
490 unsigned long long val;
491 unsigned int offset, len;
492
493 if (field->flags & TEP_FIELD_IS_ARRAY) {
494 offset = field->offset;
495 len = field->size;
496 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
497 val = tep_read_number(pevent, data + offset, len);
498 offset = val;
499 len = offset >> 16;
500 offset &= 0xffff;
501 if (tep_field_is_relative(field->flags))
502 offset += field->offset + field->size;
503 }
504 if (field->flags & TEP_FIELD_IS_STRING &&
505 is_printable_array(data + offset, len)) {
506 ret = _PyUnicode_FromString((char *)data + offset);
507 } else {
508 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
509 field->flags &= ~TEP_FIELD_IS_STRING;
510 }
511 } else {
512 val = tep_read_number(pevent, data + field->offset,
513 field->size);
514 if (field->flags & TEP_FIELD_IS_POINTER)
515 ret = PyLong_FromUnsignedLong((unsigned long) val);
516 else if (field->flags & TEP_FIELD_IS_SIGNED)
517 ret = PyLong_FromLong((long) val);
518 else
519 ret = PyLong_FromUnsignedLong((unsigned long) val);
520 }
521
522 return ret;
523}
524
525static PyObject*
526get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
527{
528 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
529 struct evsel *evsel = pevent->evsel;
530 struct tep_format_field *field;
531
532 if (!evsel->tp_format) {
533 struct tep_event *tp_format;
534
535 tp_format = trace_event__tp_format_id(evsel->core.attr.config);
536 if (IS_ERR_OR_NULL(tp_format))
537 return NULL;
538
539 evsel->tp_format = tp_format;
540 }
541
542 field = tep_find_any_field(evsel->tp_format, str);
543 if (!field)
544 return NULL;
545
546 return tracepoint_field(pevent, field);
547}
548#endif /* HAVE_LIBTRACEEVENT */
549
550static PyObject*
551pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
552{
553 PyObject *obj = NULL;
554
555#ifdef HAVE_LIBTRACEEVENT
556 if (is_tracepoint(pevent))
557 obj = get_tracepoint_field(pevent, attr_name);
558#endif
559
560 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
561}
562
563static PyTypeObject pyrf_sample_event__type = {
564 PyVarObject_HEAD_INIT(NULL, 0)
565 .tp_name = "perf.sample_event",
566 .tp_basicsize = sizeof(struct pyrf_event),
567 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
568 .tp_doc = pyrf_sample_event__doc,
569 .tp_members = pyrf_sample_event__members,
570 .tp_repr = (reprfunc)pyrf_sample_event__repr,
571 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
572};
573
574static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
575
576static PyMemberDef pyrf_context_switch_event__members[] = {
577 sample_members
578 member_def(perf_event_header, type, T_UINT, "event type"),
579 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
580 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
581 { .name = NULL, },
582};
583
584static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
585{
586 PyObject *ret;
587 char *s;
588
589 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
590 pevent->event.context_switch.next_prev_pid,
591 pevent->event.context_switch.next_prev_tid,
592 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
593 ret = PyErr_NoMemory();
594 } else {
595 ret = _PyUnicode_FromString(s);
596 free(s);
597 }
598 return ret;
599}
600
601static PyTypeObject pyrf_context_switch_event__type = {
602 PyVarObject_HEAD_INIT(NULL, 0)
603 .tp_name = "perf.context_switch_event",
604 .tp_basicsize = sizeof(struct pyrf_event),
605 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
606 .tp_doc = pyrf_context_switch_event__doc,
607 .tp_members = pyrf_context_switch_event__members,
608 .tp_repr = (reprfunc)pyrf_context_switch_event__repr,
609};
610
611static int pyrf_event__setup_types(void)
612{
613 int err;
614 pyrf_mmap_event__type.tp_new =
615 pyrf_task_event__type.tp_new =
616 pyrf_comm_event__type.tp_new =
617 pyrf_lost_event__type.tp_new =
618 pyrf_read_event__type.tp_new =
619 pyrf_sample_event__type.tp_new =
620 pyrf_context_switch_event__type.tp_new =
621 pyrf_throttle_event__type.tp_new = PyType_GenericNew;
622 err = PyType_Ready(&pyrf_mmap_event__type);
623 if (err < 0)
624 goto out;
625 err = PyType_Ready(&pyrf_lost_event__type);
626 if (err < 0)
627 goto out;
628 err = PyType_Ready(&pyrf_task_event__type);
629 if (err < 0)
630 goto out;
631 err = PyType_Ready(&pyrf_comm_event__type);
632 if (err < 0)
633 goto out;
634 err = PyType_Ready(&pyrf_throttle_event__type);
635 if (err < 0)
636 goto out;
637 err = PyType_Ready(&pyrf_read_event__type);
638 if (err < 0)
639 goto out;
640 err = PyType_Ready(&pyrf_sample_event__type);
641 if (err < 0)
642 goto out;
643 err = PyType_Ready(&pyrf_context_switch_event__type);
644 if (err < 0)
645 goto out;
646out:
647 return err;
648}
649
650static PyTypeObject *pyrf_event__type[] = {
651 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
652 [PERF_RECORD_LOST] = &pyrf_lost_event__type,
653 [PERF_RECORD_COMM] = &pyrf_comm_event__type,
654 [PERF_RECORD_EXIT] = &pyrf_task_event__type,
655 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
656 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
657 [PERF_RECORD_FORK] = &pyrf_task_event__type,
658 [PERF_RECORD_READ] = &pyrf_read_event__type,
659 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
660 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
661 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
662};
663
664static PyObject *pyrf_event__new(union perf_event *event)
665{
666 struct pyrf_event *pevent;
667 PyTypeObject *ptype;
668
669 if ((event->header.type < PERF_RECORD_MMAP ||
670 event->header.type > PERF_RECORD_SAMPLE) &&
671 !(event->header.type == PERF_RECORD_SWITCH ||
672 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
673 return NULL;
674
675 ptype = pyrf_event__type[event->header.type];
676 pevent = PyObject_New(struct pyrf_event, ptype);
677 if (pevent != NULL)
678 memcpy(&pevent->event, event, event->header.size);
679 return (PyObject *)pevent;
680}
681
682struct pyrf_cpu_map {
683 PyObject_HEAD
684
685 struct perf_cpu_map *cpus;
686};
687
688static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
689 PyObject *args, PyObject *kwargs)
690{
691 static char *kwlist[] = { "cpustr", NULL };
692 char *cpustr = NULL;
693
694 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
695 kwlist, &cpustr))
696 return -1;
697
698 pcpus->cpus = perf_cpu_map__new(cpustr);
699 if (pcpus->cpus == NULL)
700 return -1;
701 return 0;
702}
703
704static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
705{
706 perf_cpu_map__put(pcpus->cpus);
707 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
708}
709
710static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
711{
712 struct pyrf_cpu_map *pcpus = (void *)obj;
713
714 return perf_cpu_map__nr(pcpus->cpus);
715}
716
717static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
718{
719 struct pyrf_cpu_map *pcpus = (void *)obj;
720
721 if (i >= perf_cpu_map__nr(pcpus->cpus))
722 return NULL;
723
724 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
725}
726
727static PySequenceMethods pyrf_cpu_map__sequence_methods = {
728 .sq_length = pyrf_cpu_map__length,
729 .sq_item = pyrf_cpu_map__item,
730};
731
732static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
733
734static PyTypeObject pyrf_cpu_map__type = {
735 PyVarObject_HEAD_INIT(NULL, 0)
736 .tp_name = "perf.cpu_map",
737 .tp_basicsize = sizeof(struct pyrf_cpu_map),
738 .tp_dealloc = (destructor)pyrf_cpu_map__delete,
739 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
740 .tp_doc = pyrf_cpu_map__doc,
741 .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
742 .tp_init = (initproc)pyrf_cpu_map__init,
743};
744
745static int pyrf_cpu_map__setup_types(void)
746{
747 pyrf_cpu_map__type.tp_new = PyType_GenericNew;
748 return PyType_Ready(&pyrf_cpu_map__type);
749}
750
751struct pyrf_thread_map {
752 PyObject_HEAD
753
754 struct perf_thread_map *threads;
755};
756
757static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
758 PyObject *args, PyObject *kwargs)
759{
760 static char *kwlist[] = { "pid", "tid", "uid", NULL };
761 int pid = -1, tid = -1, uid = UINT_MAX;
762
763 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
764 kwlist, &pid, &tid, &uid))
765 return -1;
766
767 pthreads->threads = thread_map__new(pid, tid, uid);
768 if (pthreads->threads == NULL)
769 return -1;
770 return 0;
771}
772
773static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
774{
775 perf_thread_map__put(pthreads->threads);
776 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
777}
778
779static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
780{
781 struct pyrf_thread_map *pthreads = (void *)obj;
782
783 return perf_thread_map__nr(pthreads->threads);
784}
785
786static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
787{
788 struct pyrf_thread_map *pthreads = (void *)obj;
789
790 if (i >= perf_thread_map__nr(pthreads->threads))
791 return NULL;
792
793 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
794}
795
796static PySequenceMethods pyrf_thread_map__sequence_methods = {
797 .sq_length = pyrf_thread_map__length,
798 .sq_item = pyrf_thread_map__item,
799};
800
801static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
802
803static PyTypeObject pyrf_thread_map__type = {
804 PyVarObject_HEAD_INIT(NULL, 0)
805 .tp_name = "perf.thread_map",
806 .tp_basicsize = sizeof(struct pyrf_thread_map),
807 .tp_dealloc = (destructor)pyrf_thread_map__delete,
808 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
809 .tp_doc = pyrf_thread_map__doc,
810 .tp_as_sequence = &pyrf_thread_map__sequence_methods,
811 .tp_init = (initproc)pyrf_thread_map__init,
812};
813
814static int pyrf_thread_map__setup_types(void)
815{
816 pyrf_thread_map__type.tp_new = PyType_GenericNew;
817 return PyType_Ready(&pyrf_thread_map__type);
818}
819
820struct pyrf_evsel {
821 PyObject_HEAD
822
823 struct evsel evsel;
824};
825
826static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
827 PyObject *args, PyObject *kwargs)
828{
829 struct perf_event_attr attr = {
830 .type = PERF_TYPE_HARDWARE,
831 .config = PERF_COUNT_HW_CPU_CYCLES,
832 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
833 };
834 static char *kwlist[] = {
835 "type",
836 "config",
837 "sample_freq",
838 "sample_period",
839 "sample_type",
840 "read_format",
841 "disabled",
842 "inherit",
843 "pinned",
844 "exclusive",
845 "exclude_user",
846 "exclude_kernel",
847 "exclude_hv",
848 "exclude_idle",
849 "mmap",
850 "context_switch",
851 "comm",
852 "freq",
853 "inherit_stat",
854 "enable_on_exec",
855 "task",
856 "watermark",
857 "precise_ip",
858 "mmap_data",
859 "sample_id_all",
860 "wakeup_events",
861 "bp_type",
862 "bp_addr",
863 "bp_len",
864 NULL
865 };
866 u64 sample_period = 0;
867 u32 disabled = 0,
868 inherit = 0,
869 pinned = 0,
870 exclusive = 0,
871 exclude_user = 0,
872 exclude_kernel = 0,
873 exclude_hv = 0,
874 exclude_idle = 0,
875 mmap = 0,
876 context_switch = 0,
877 comm = 0,
878 freq = 1,
879 inherit_stat = 0,
880 enable_on_exec = 0,
881 task = 0,
882 watermark = 0,
883 precise_ip = 0,
884 mmap_data = 0,
885 sample_id_all = 1;
886 int idx = 0;
887
888 if (!PyArg_ParseTupleAndKeywords(args, kwargs,
889 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
890 &attr.type, &attr.config, &attr.sample_freq,
891 &sample_period, &attr.sample_type,
892 &attr.read_format, &disabled, &inherit,
893 &pinned, &exclusive, &exclude_user,
894 &exclude_kernel, &exclude_hv, &exclude_idle,
895 &mmap, &context_switch, &comm, &freq, &inherit_stat,
896 &enable_on_exec, &task, &watermark,
897 &precise_ip, &mmap_data, &sample_id_all,
898 &attr.wakeup_events, &attr.bp_type,
899 &attr.bp_addr, &attr.bp_len, &idx))
900 return -1;
901
902 /* union... */
903 if (sample_period != 0) {
904 if (attr.sample_freq != 0)
905 return -1; /* FIXME: throw right exception */
906 attr.sample_period = sample_period;
907 }
908
909 /* Bitfields */
910 attr.disabled = disabled;
911 attr.inherit = inherit;
912 attr.pinned = pinned;
913 attr.exclusive = exclusive;
914 attr.exclude_user = exclude_user;
915 attr.exclude_kernel = exclude_kernel;
916 attr.exclude_hv = exclude_hv;
917 attr.exclude_idle = exclude_idle;
918 attr.mmap = mmap;
919 attr.context_switch = context_switch;
920 attr.comm = comm;
921 attr.freq = freq;
922 attr.inherit_stat = inherit_stat;
923 attr.enable_on_exec = enable_on_exec;
924 attr.task = task;
925 attr.watermark = watermark;
926 attr.precise_ip = precise_ip;
927 attr.mmap_data = mmap_data;
928 attr.sample_id_all = sample_id_all;
929 attr.size = sizeof(attr);
930
931 evsel__init(&pevsel->evsel, &attr, idx);
932 return 0;
933}
934
935static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
936{
937 evsel__exit(&pevsel->evsel);
938 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
939}
940
941static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
942 PyObject *args, PyObject *kwargs)
943{
944 struct evsel *evsel = &pevsel->evsel;
945 struct perf_cpu_map *cpus = NULL;
946 struct perf_thread_map *threads = NULL;
947 PyObject *pcpus = NULL, *pthreads = NULL;
948 int group = 0, inherit = 0;
949 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
950
951 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
952 &pcpus, &pthreads, &group, &inherit))
953 return NULL;
954
955 if (pthreads != NULL)
956 threads = ((struct pyrf_thread_map *)pthreads)->threads;
957
958 if (pcpus != NULL)
959 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
960
961 evsel->core.attr.inherit = inherit;
962 /*
963 * This will group just the fds for this single evsel, to group
964 * multiple events, use evlist.open().
965 */
966 if (evsel__open(evsel, cpus, threads) < 0) {
967 PyErr_SetFromErrno(PyExc_OSError);
968 return NULL;
969 }
970
971 Py_INCREF(Py_None);
972 return Py_None;
973}
974
975static PyMethodDef pyrf_evsel__methods[] = {
976 {
977 .ml_name = "open",
978 .ml_meth = (PyCFunction)pyrf_evsel__open,
979 .ml_flags = METH_VARARGS | METH_KEYWORDS,
980 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
981 },
982 { .ml_name = NULL, }
983};
984
985static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
986
987static PyTypeObject pyrf_evsel__type = {
988 PyVarObject_HEAD_INIT(NULL, 0)
989 .tp_name = "perf.evsel",
990 .tp_basicsize = sizeof(struct pyrf_evsel),
991 .tp_dealloc = (destructor)pyrf_evsel__delete,
992 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
993 .tp_doc = pyrf_evsel__doc,
994 .tp_methods = pyrf_evsel__methods,
995 .tp_init = (initproc)pyrf_evsel__init,
996};
997
998static int pyrf_evsel__setup_types(void)
999{
1000 pyrf_evsel__type.tp_new = PyType_GenericNew;
1001 return PyType_Ready(&pyrf_evsel__type);
1002}
1003
1004struct pyrf_evlist {
1005 PyObject_HEAD
1006
1007 struct evlist evlist;
1008};
1009
1010static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
1011 PyObject *args, PyObject *kwargs __maybe_unused)
1012{
1013 PyObject *pcpus = NULL, *pthreads = NULL;
1014 struct perf_cpu_map *cpus;
1015 struct perf_thread_map *threads;
1016
1017 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
1018 return -1;
1019
1020 threads = ((struct pyrf_thread_map *)pthreads)->threads;
1021 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
1022 evlist__init(&pevlist->evlist, cpus, threads);
1023 return 0;
1024}
1025
1026static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
1027{
1028 evlist__exit(&pevlist->evlist);
1029 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
1030}
1031
1032static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
1033 PyObject *args, PyObject *kwargs)
1034{
1035 struct evlist *evlist = &pevlist->evlist;
1036 static char *kwlist[] = { "pages", "overwrite", NULL };
1037 int pages = 128, overwrite = false;
1038
1039 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
1040 &pages, &overwrite))
1041 return NULL;
1042
1043 if (evlist__mmap(evlist, pages) < 0) {
1044 PyErr_SetFromErrno(PyExc_OSError);
1045 return NULL;
1046 }
1047
1048 Py_INCREF(Py_None);
1049 return Py_None;
1050}
1051
1052static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
1053 PyObject *args, PyObject *kwargs)
1054{
1055 struct evlist *evlist = &pevlist->evlist;
1056 static char *kwlist[] = { "timeout", NULL };
1057 int timeout = -1, n;
1058
1059 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
1060 return NULL;
1061
1062 n = evlist__poll(evlist, timeout);
1063 if (n < 0) {
1064 PyErr_SetFromErrno(PyExc_OSError);
1065 return NULL;
1066 }
1067
1068 return Py_BuildValue("i", n);
1069}
1070
1071static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1072 PyObject *args __maybe_unused,
1073 PyObject *kwargs __maybe_unused)
1074{
1075 struct evlist *evlist = &pevlist->evlist;
1076 PyObject *list = PyList_New(0);
1077 int i;
1078
1079 for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1080 PyObject *file;
1081#if PY_MAJOR_VERSION < 3
1082 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1083
1084 if (fp == NULL)
1085 goto free_list;
1086
1087 file = PyFile_FromFile(fp, "perf", "r", NULL);
1088#else
1089 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1090 NULL, NULL, NULL, 0);
1091#endif
1092 if (file == NULL)
1093 goto free_list;
1094
1095 if (PyList_Append(list, file) != 0) {
1096 Py_DECREF(file);
1097 goto free_list;
1098 }
1099
1100 Py_DECREF(file);
1101 }
1102
1103 return list;
1104free_list:
1105 return PyErr_NoMemory();
1106}
1107
1108
1109static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1110 PyObject *args,
1111 PyObject *kwargs __maybe_unused)
1112{
1113 struct evlist *evlist = &pevlist->evlist;
1114 PyObject *pevsel;
1115 struct evsel *evsel;
1116
1117 if (!PyArg_ParseTuple(args, "O", &pevsel))
1118 return NULL;
1119
1120 Py_INCREF(pevsel);
1121 evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1122 evsel->core.idx = evlist->core.nr_entries;
1123 evlist__add(evlist, evsel);
1124
1125 return Py_BuildValue("i", evlist->core.nr_entries);
1126}
1127
1128static struct mmap *get_md(struct evlist *evlist, int cpu)
1129{
1130 int i;
1131
1132 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1133 struct mmap *md = &evlist->mmap[i];
1134
1135 if (md->core.cpu.cpu == cpu)
1136 return md;
1137 }
1138
1139 return NULL;
1140}
1141
1142static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1143 PyObject *args, PyObject *kwargs)
1144{
1145 struct evlist *evlist = &pevlist->evlist;
1146 union perf_event *event;
1147 int sample_id_all = 1, cpu;
1148 static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1149 struct mmap *md;
1150 int err;
1151
1152 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1153 &cpu, &sample_id_all))
1154 return NULL;
1155
1156 md = get_md(evlist, cpu);
1157 if (!md)
1158 return NULL;
1159
1160 if (perf_mmap__read_init(&md->core) < 0)
1161 goto end;
1162
1163 event = perf_mmap__read_event(&md->core);
1164 if (event != NULL) {
1165 PyObject *pyevent = pyrf_event__new(event);
1166 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1167 struct evsel *evsel;
1168
1169 if (pyevent == NULL)
1170 return PyErr_NoMemory();
1171
1172 evsel = evlist__event2evsel(evlist, event);
1173 if (!evsel) {
1174 Py_INCREF(Py_None);
1175 return Py_None;
1176 }
1177
1178 pevent->evsel = evsel;
1179
1180 err = evsel__parse_sample(evsel, event, &pevent->sample);
1181
1182 /* Consume the even only after we parsed it out. */
1183 perf_mmap__consume(&md->core);
1184
1185 if (err)
1186 return PyErr_Format(PyExc_OSError,
1187 "perf: can't parse sample, err=%d", err);
1188 return pyevent;
1189 }
1190end:
1191 Py_INCREF(Py_None);
1192 return Py_None;
1193}
1194
1195static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1196 PyObject *args, PyObject *kwargs)
1197{
1198 struct evlist *evlist = &pevlist->evlist;
1199
1200 if (evlist__open(evlist) < 0) {
1201 PyErr_SetFromErrno(PyExc_OSError);
1202 return NULL;
1203 }
1204
1205 Py_INCREF(Py_None);
1206 return Py_None;
1207}
1208
1209static PyMethodDef pyrf_evlist__methods[] = {
1210 {
1211 .ml_name = "mmap",
1212 .ml_meth = (PyCFunction)pyrf_evlist__mmap,
1213 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1214 .ml_doc = PyDoc_STR("mmap the file descriptor table.")
1215 },
1216 {
1217 .ml_name = "open",
1218 .ml_meth = (PyCFunction)pyrf_evlist__open,
1219 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1220 .ml_doc = PyDoc_STR("open the file descriptors.")
1221 },
1222 {
1223 .ml_name = "poll",
1224 .ml_meth = (PyCFunction)pyrf_evlist__poll,
1225 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1226 .ml_doc = PyDoc_STR("poll the file descriptor table.")
1227 },
1228 {
1229 .ml_name = "get_pollfd",
1230 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
1231 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1232 .ml_doc = PyDoc_STR("get the poll file descriptor table.")
1233 },
1234 {
1235 .ml_name = "add",
1236 .ml_meth = (PyCFunction)pyrf_evlist__add,
1237 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1238 .ml_doc = PyDoc_STR("adds an event selector to the list.")
1239 },
1240 {
1241 .ml_name = "read_on_cpu",
1242 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
1243 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1244 .ml_doc = PyDoc_STR("reads an event.")
1245 },
1246 { .ml_name = NULL, }
1247};
1248
1249static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1250{
1251 struct pyrf_evlist *pevlist = (void *)obj;
1252
1253 return pevlist->evlist.core.nr_entries;
1254}
1255
1256static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1257{
1258 struct pyrf_evlist *pevlist = (void *)obj;
1259 struct evsel *pos;
1260
1261 if (i >= pevlist->evlist.core.nr_entries)
1262 return NULL;
1263
1264 evlist__for_each_entry(&pevlist->evlist, pos) {
1265 if (i-- == 0)
1266 break;
1267 }
1268
1269 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1270}
1271
1272static PySequenceMethods pyrf_evlist__sequence_methods = {
1273 .sq_length = pyrf_evlist__length,
1274 .sq_item = pyrf_evlist__item,
1275};
1276
1277static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1278
1279static PyTypeObject pyrf_evlist__type = {
1280 PyVarObject_HEAD_INIT(NULL, 0)
1281 .tp_name = "perf.evlist",
1282 .tp_basicsize = sizeof(struct pyrf_evlist),
1283 .tp_dealloc = (destructor)pyrf_evlist__delete,
1284 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1285 .tp_as_sequence = &pyrf_evlist__sequence_methods,
1286 .tp_doc = pyrf_evlist__doc,
1287 .tp_methods = pyrf_evlist__methods,
1288 .tp_init = (initproc)pyrf_evlist__init,
1289};
1290
1291static int pyrf_evlist__setup_types(void)
1292{
1293 pyrf_evlist__type.tp_new = PyType_GenericNew;
1294 return PyType_Ready(&pyrf_evlist__type);
1295}
1296
1297#define PERF_CONST(name) { #name, PERF_##name }
1298
1299static struct {
1300 const char *name;
1301 int value;
1302} perf__constants[] = {
1303 PERF_CONST(TYPE_HARDWARE),
1304 PERF_CONST(TYPE_SOFTWARE),
1305 PERF_CONST(TYPE_TRACEPOINT),
1306 PERF_CONST(TYPE_HW_CACHE),
1307 PERF_CONST(TYPE_RAW),
1308 PERF_CONST(TYPE_BREAKPOINT),
1309
1310 PERF_CONST(COUNT_HW_CPU_CYCLES),
1311 PERF_CONST(COUNT_HW_INSTRUCTIONS),
1312 PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1313 PERF_CONST(COUNT_HW_CACHE_MISSES),
1314 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1315 PERF_CONST(COUNT_HW_BRANCH_MISSES),
1316 PERF_CONST(COUNT_HW_BUS_CYCLES),
1317 PERF_CONST(COUNT_HW_CACHE_L1D),
1318 PERF_CONST(COUNT_HW_CACHE_L1I),
1319 PERF_CONST(COUNT_HW_CACHE_LL),
1320 PERF_CONST(COUNT_HW_CACHE_DTLB),
1321 PERF_CONST(COUNT_HW_CACHE_ITLB),
1322 PERF_CONST(COUNT_HW_CACHE_BPU),
1323 PERF_CONST(COUNT_HW_CACHE_OP_READ),
1324 PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1325 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1326 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1327 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1328
1329 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1330 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1331
1332 PERF_CONST(COUNT_SW_CPU_CLOCK),
1333 PERF_CONST(COUNT_SW_TASK_CLOCK),
1334 PERF_CONST(COUNT_SW_PAGE_FAULTS),
1335 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1336 PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1337 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1338 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1339 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1340 PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1341 PERF_CONST(COUNT_SW_DUMMY),
1342
1343 PERF_CONST(SAMPLE_IP),
1344 PERF_CONST(SAMPLE_TID),
1345 PERF_CONST(SAMPLE_TIME),
1346 PERF_CONST(SAMPLE_ADDR),
1347 PERF_CONST(SAMPLE_READ),
1348 PERF_CONST(SAMPLE_CALLCHAIN),
1349 PERF_CONST(SAMPLE_ID),
1350 PERF_CONST(SAMPLE_CPU),
1351 PERF_CONST(SAMPLE_PERIOD),
1352 PERF_CONST(SAMPLE_STREAM_ID),
1353 PERF_CONST(SAMPLE_RAW),
1354
1355 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1356 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1357 PERF_CONST(FORMAT_ID),
1358 PERF_CONST(FORMAT_GROUP),
1359
1360 PERF_CONST(RECORD_MMAP),
1361 PERF_CONST(RECORD_LOST),
1362 PERF_CONST(RECORD_COMM),
1363 PERF_CONST(RECORD_EXIT),
1364 PERF_CONST(RECORD_THROTTLE),
1365 PERF_CONST(RECORD_UNTHROTTLE),
1366 PERF_CONST(RECORD_FORK),
1367 PERF_CONST(RECORD_READ),
1368 PERF_CONST(RECORD_SAMPLE),
1369 PERF_CONST(RECORD_MMAP2),
1370 PERF_CONST(RECORD_AUX),
1371 PERF_CONST(RECORD_ITRACE_START),
1372 PERF_CONST(RECORD_LOST_SAMPLES),
1373 PERF_CONST(RECORD_SWITCH),
1374 PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1375
1376 PERF_CONST(RECORD_MISC_SWITCH_OUT),
1377 { .name = NULL, },
1378};
1379
1380static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1381 PyObject *args, PyObject *kwargs)
1382{
1383#ifndef HAVE_LIBTRACEEVENT
1384 return NULL;
1385#else
1386 struct tep_event *tp_format;
1387 static char *kwlist[] = { "sys", "name", NULL };
1388 char *sys = NULL;
1389 char *name = NULL;
1390
1391 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1392 &sys, &name))
1393 return NULL;
1394
1395 tp_format = trace_event__tp_format(sys, name);
1396 if (IS_ERR(tp_format))
1397 return _PyLong_FromLong(-1);
1398
1399 return _PyLong_FromLong(tp_format->id);
1400#endif // HAVE_LIBTRACEEVENT
1401}
1402
1403static PyMethodDef perf__methods[] = {
1404 {
1405 .ml_name = "tracepoint",
1406 .ml_meth = (PyCFunction) pyrf__tracepoint,
1407 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1408 .ml_doc = PyDoc_STR("Get tracepoint config.")
1409 },
1410 { .ml_name = NULL, }
1411};
1412
1413#if PY_MAJOR_VERSION < 3
1414PyMODINIT_FUNC initperf(void)
1415#else
1416PyMODINIT_FUNC PyInit_perf(void)
1417#endif
1418{
1419 PyObject *obj;
1420 int i;
1421 PyObject *dict;
1422#if PY_MAJOR_VERSION < 3
1423 PyObject *module = Py_InitModule("perf", perf__methods);
1424#else
1425 static struct PyModuleDef moduledef = {
1426 PyModuleDef_HEAD_INIT,
1427 "perf", /* m_name */
1428 "", /* m_doc */
1429 -1, /* m_size */
1430 perf__methods, /* m_methods */
1431 NULL, /* m_reload */
1432 NULL, /* m_traverse */
1433 NULL, /* m_clear */
1434 NULL, /* m_free */
1435 };
1436 PyObject *module = PyModule_Create(&moduledef);
1437#endif
1438
1439 if (module == NULL ||
1440 pyrf_event__setup_types() < 0 ||
1441 pyrf_evlist__setup_types() < 0 ||
1442 pyrf_evsel__setup_types() < 0 ||
1443 pyrf_thread_map__setup_types() < 0 ||
1444 pyrf_cpu_map__setup_types() < 0)
1445#if PY_MAJOR_VERSION < 3
1446 return;
1447#else
1448 return module;
1449#endif
1450
1451 /* The page_size is placed in util object. */
1452 page_size = sysconf(_SC_PAGE_SIZE);
1453
1454 Py_INCREF(&pyrf_evlist__type);
1455 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1456
1457 Py_INCREF(&pyrf_evsel__type);
1458 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1459
1460 Py_INCREF(&pyrf_mmap_event__type);
1461 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1462
1463 Py_INCREF(&pyrf_lost_event__type);
1464 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1465
1466 Py_INCREF(&pyrf_comm_event__type);
1467 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1468
1469 Py_INCREF(&pyrf_task_event__type);
1470 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1471
1472 Py_INCREF(&pyrf_throttle_event__type);
1473 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1474
1475 Py_INCREF(&pyrf_task_event__type);
1476 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1477
1478 Py_INCREF(&pyrf_read_event__type);
1479 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1480
1481 Py_INCREF(&pyrf_sample_event__type);
1482 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1483
1484 Py_INCREF(&pyrf_context_switch_event__type);
1485 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1486
1487 Py_INCREF(&pyrf_thread_map__type);
1488 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1489
1490 Py_INCREF(&pyrf_cpu_map__type);
1491 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1492
1493 dict = PyModule_GetDict(module);
1494 if (dict == NULL)
1495 goto error;
1496
1497 for (i = 0; perf__constants[i].name != NULL; i++) {
1498 obj = _PyLong_FromLong(perf__constants[i].value);
1499 if (obj == NULL)
1500 goto error;
1501 PyDict_SetItemString(dict, perf__constants[i].name, obj);
1502 Py_DECREF(obj);
1503 }
1504
1505error:
1506 if (PyErr_Occurred())
1507 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1508#if PY_MAJOR_VERSION >= 3
1509 return module;
1510#endif
1511}
1512
1513/*
1514 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1515 * binding.
1516 */
1517void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
1518 int fd, int group_fd, unsigned long flags)
1519{
1520}
1521
1522void evlist__free_stats(struct evlist *evlist)
1523{
1524}
1// SPDX-License-Identifier: GPL-2.0
2#include <Python.h>
3#include <structmember.h>
4#include <inttypes.h>
5#include <poll.h>
6#include <linux/err.h>
7#include <perf/cpumap.h>
8#ifdef HAVE_LIBTRACEEVENT
9#include <traceevent/event-parse.h>
10#endif
11#include <perf/mmap.h>
12#include "evlist.h"
13#include "callchain.h"
14#include "evsel.h"
15#include "event.h"
16#include "print_binary.h"
17#include "thread_map.h"
18#include "trace-event.h"
19#include "mmap.h"
20#include "stat.h"
21#include "metricgroup.h"
22#include "util/bpf-filter.h"
23#include "util/env.h"
24#include "util/pmu.h"
25#include "util/pmus.h"
26#include <internal/lib.h>
27#include "util.h"
28
29#if PY_MAJOR_VERSION < 3
30#define _PyUnicode_FromString(arg) \
31 PyString_FromString(arg)
32#define _PyUnicode_AsString(arg) \
33 PyString_AsString(arg)
34#define _PyUnicode_FromFormat(...) \
35 PyString_FromFormat(__VA_ARGS__)
36#define _PyLong_FromLong(arg) \
37 PyInt_FromLong(arg)
38
39#else
40
41#define _PyUnicode_FromString(arg) \
42 PyUnicode_FromString(arg)
43#define _PyUnicode_FromFormat(...) \
44 PyUnicode_FromFormat(__VA_ARGS__)
45#define _PyLong_FromLong(arg) \
46 PyLong_FromLong(arg)
47#endif
48
49#ifndef Py_TYPE
50#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
51#endif
52
53/*
54 * Avoid bringing in event parsing.
55 */
56int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
57{
58 return 0;
59}
60
61/*
62 * Provide these two so that we don't have to link against callchain.c and
63 * start dragging hist.c, etc.
64 */
65struct callchain_param callchain_param;
66
67int parse_callchain_record(const char *arg __maybe_unused,
68 struct callchain_param *param __maybe_unused)
69{
70 return 0;
71}
72
73/*
74 * Add these not to drag util/env.c
75 */
76struct perf_env perf_env;
77
78const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
79{
80 return NULL;
81}
82
83// This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
84const char *perf_env__arch(struct perf_env *env __maybe_unused)
85{
86 return NULL;
87}
88
89/*
90 * These ones are needed not to drag the PMU bandwagon, jevents generated
91 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
92 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
93 * far, for the perf python binding known usecases, revisit if this become
94 * necessary.
95 */
96struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
97{
98 return NULL;
99}
100
101int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt, ...)
102{
103 return EOF;
104}
105
106int perf_pmus__num_core_pmus(void)
107{
108 return 1;
109}
110
111bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
112{
113 return false;
114}
115
116bool perf_pmus__supports_extended_type(void)
117{
118 return false;
119}
120
121/*
122 * Add this one here not to drag util/metricgroup.c
123 */
124int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
125 struct rblist *new_metric_events,
126 struct rblist *old_metric_events)
127{
128 return 0;
129}
130
131/*
132 * Add this one here not to drag util/trace-event-info.c
133 */
134char *tracepoint_id_to_name(u64 config)
135{
136 return NULL;
137}
138
139/*
140 * XXX: All these evsel destructors need some better mechanism, like a linked
141 * list of destructors registered when the relevant code indeed is used instead
142 * of having more and more calls in perf_evsel__delete(). -- acme
143 *
144 * For now, add some more:
145 *
146 * Not to drag the BPF bandwagon...
147 */
148void bpf_counter__destroy(struct evsel *evsel);
149int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
150int bpf_counter__disable(struct evsel *evsel);
151
152void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
153{
154}
155
156int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
157{
158 return 0;
159}
160
161int bpf_counter__disable(struct evsel *evsel __maybe_unused)
162{
163 return 0;
164}
165
166// not to drag util/bpf-filter.c
167#ifdef HAVE_BPF_SKEL
168int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
169{
170 return 0;
171}
172
173int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
174{
175 return 0;
176}
177#endif
178
179/*
180 * Support debug printing even though util/debug.c is not linked. That means
181 * implementing 'verbose' and 'eprintf'.
182 */
183int verbose;
184int debug_peo_args;
185
186int eprintf(int level, int var, const char *fmt, ...);
187
188int eprintf(int level, int var, const char *fmt, ...)
189{
190 va_list args;
191 int ret = 0;
192
193 if (var >= level) {
194 va_start(args, fmt);
195 ret = vfprintf(stderr, fmt, args);
196 va_end(args);
197 }
198
199 return ret;
200}
201
202/* Define PyVarObject_HEAD_INIT for python 2.5 */
203#ifndef PyVarObject_HEAD_INIT
204# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
205#endif
206
207#if PY_MAJOR_VERSION < 3
208PyMODINIT_FUNC initperf(void);
209#else
210PyMODINIT_FUNC PyInit_perf(void);
211#endif
212
213#define member_def(type, member, ptype, help) \
214 { #member, ptype, \
215 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
216 0, help }
217
218#define sample_member_def(name, member, ptype, help) \
219 { #name, ptype, \
220 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
221 0, help }
222
223struct pyrf_event {
224 PyObject_HEAD
225 struct evsel *evsel;
226 struct perf_sample sample;
227 union perf_event event;
228};
229
230#define sample_members \
231 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
232 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
233 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
234 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
235 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
236 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
237 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
238 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
239 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
240
241static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
242
243static PyMemberDef pyrf_mmap_event__members[] = {
244 sample_members
245 member_def(perf_event_header, type, T_UINT, "event type"),
246 member_def(perf_event_header, misc, T_UINT, "event misc"),
247 member_def(perf_record_mmap, pid, T_UINT, "event pid"),
248 member_def(perf_record_mmap, tid, T_UINT, "event tid"),
249 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
250 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
251 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
252 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
253 { .name = NULL, },
254};
255
256static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
257{
258 PyObject *ret;
259 char *s;
260
261 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
262 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
263 "filename: %s }",
264 pevent->event.mmap.pid, pevent->event.mmap.tid,
265 pevent->event.mmap.start, pevent->event.mmap.len,
266 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
267 ret = PyErr_NoMemory();
268 } else {
269 ret = _PyUnicode_FromString(s);
270 free(s);
271 }
272 return ret;
273}
274
275static PyTypeObject pyrf_mmap_event__type = {
276 PyVarObject_HEAD_INIT(NULL, 0)
277 .tp_name = "perf.mmap_event",
278 .tp_basicsize = sizeof(struct pyrf_event),
279 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
280 .tp_doc = pyrf_mmap_event__doc,
281 .tp_members = pyrf_mmap_event__members,
282 .tp_repr = (reprfunc)pyrf_mmap_event__repr,
283};
284
285static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
286
287static PyMemberDef pyrf_task_event__members[] = {
288 sample_members
289 member_def(perf_event_header, type, T_UINT, "event type"),
290 member_def(perf_record_fork, pid, T_UINT, "event pid"),
291 member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
292 member_def(perf_record_fork, tid, T_UINT, "event tid"),
293 member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
294 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
295 { .name = NULL, },
296};
297
298static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
299{
300 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
301 "ptid: %u, time: %" PRI_lu64 "}",
302 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
303 pevent->event.fork.pid,
304 pevent->event.fork.ppid,
305 pevent->event.fork.tid,
306 pevent->event.fork.ptid,
307 pevent->event.fork.time);
308}
309
310static PyTypeObject pyrf_task_event__type = {
311 PyVarObject_HEAD_INIT(NULL, 0)
312 .tp_name = "perf.task_event",
313 .tp_basicsize = sizeof(struct pyrf_event),
314 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
315 .tp_doc = pyrf_task_event__doc,
316 .tp_members = pyrf_task_event__members,
317 .tp_repr = (reprfunc)pyrf_task_event__repr,
318};
319
320static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
321
322static PyMemberDef pyrf_comm_event__members[] = {
323 sample_members
324 member_def(perf_event_header, type, T_UINT, "event type"),
325 member_def(perf_record_comm, pid, T_UINT, "event pid"),
326 member_def(perf_record_comm, tid, T_UINT, "event tid"),
327 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
328 { .name = NULL, },
329};
330
331static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
332{
333 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
334 pevent->event.comm.pid,
335 pevent->event.comm.tid,
336 pevent->event.comm.comm);
337}
338
339static PyTypeObject pyrf_comm_event__type = {
340 PyVarObject_HEAD_INIT(NULL, 0)
341 .tp_name = "perf.comm_event",
342 .tp_basicsize = sizeof(struct pyrf_event),
343 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
344 .tp_doc = pyrf_comm_event__doc,
345 .tp_members = pyrf_comm_event__members,
346 .tp_repr = (reprfunc)pyrf_comm_event__repr,
347};
348
349static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
350
351static PyMemberDef pyrf_throttle_event__members[] = {
352 sample_members
353 member_def(perf_event_header, type, T_UINT, "event type"),
354 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
355 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
356 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
357 { .name = NULL, },
358};
359
360static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
361{
362 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
363
364 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
365 ", stream_id: %" PRI_lu64 " }",
366 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
367 te->time, te->id, te->stream_id);
368}
369
370static PyTypeObject pyrf_throttle_event__type = {
371 PyVarObject_HEAD_INIT(NULL, 0)
372 .tp_name = "perf.throttle_event",
373 .tp_basicsize = sizeof(struct pyrf_event),
374 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
375 .tp_doc = pyrf_throttle_event__doc,
376 .tp_members = pyrf_throttle_event__members,
377 .tp_repr = (reprfunc)pyrf_throttle_event__repr,
378};
379
380static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
381
382static PyMemberDef pyrf_lost_event__members[] = {
383 sample_members
384 member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
385 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
386 { .name = NULL, },
387};
388
389static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
390{
391 PyObject *ret;
392 char *s;
393
394 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
395 "lost: %#" PRI_lx64 " }",
396 pevent->event.lost.id, pevent->event.lost.lost) < 0) {
397 ret = PyErr_NoMemory();
398 } else {
399 ret = _PyUnicode_FromString(s);
400 free(s);
401 }
402 return ret;
403}
404
405static PyTypeObject pyrf_lost_event__type = {
406 PyVarObject_HEAD_INIT(NULL, 0)
407 .tp_name = "perf.lost_event",
408 .tp_basicsize = sizeof(struct pyrf_event),
409 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
410 .tp_doc = pyrf_lost_event__doc,
411 .tp_members = pyrf_lost_event__members,
412 .tp_repr = (reprfunc)pyrf_lost_event__repr,
413};
414
415static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
416
417static PyMemberDef pyrf_read_event__members[] = {
418 sample_members
419 member_def(perf_record_read, pid, T_UINT, "event pid"),
420 member_def(perf_record_read, tid, T_UINT, "event tid"),
421 { .name = NULL, },
422};
423
424static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
425{
426 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
427 pevent->event.read.pid,
428 pevent->event.read.tid);
429 /*
430 * FIXME: return the array of read values,
431 * making this method useful ;-)
432 */
433}
434
435static PyTypeObject pyrf_read_event__type = {
436 PyVarObject_HEAD_INIT(NULL, 0)
437 .tp_name = "perf.read_event",
438 .tp_basicsize = sizeof(struct pyrf_event),
439 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
440 .tp_doc = pyrf_read_event__doc,
441 .tp_members = pyrf_read_event__members,
442 .tp_repr = (reprfunc)pyrf_read_event__repr,
443};
444
445static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
446
447static PyMemberDef pyrf_sample_event__members[] = {
448 sample_members
449 member_def(perf_event_header, type, T_UINT, "event type"),
450 { .name = NULL, },
451};
452
453static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
454{
455 PyObject *ret;
456 char *s;
457
458 if (asprintf(&s, "{ type: sample }") < 0) {
459 ret = PyErr_NoMemory();
460 } else {
461 ret = _PyUnicode_FromString(s);
462 free(s);
463 }
464 return ret;
465}
466
467#ifdef HAVE_LIBTRACEEVENT
468static bool is_tracepoint(struct pyrf_event *pevent)
469{
470 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
471}
472
473static PyObject*
474tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
475{
476 struct tep_handle *pevent = field->event->tep;
477 void *data = pe->sample.raw_data;
478 PyObject *ret = NULL;
479 unsigned long long val;
480 unsigned int offset, len;
481
482 if (field->flags & TEP_FIELD_IS_ARRAY) {
483 offset = field->offset;
484 len = field->size;
485 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
486 val = tep_read_number(pevent, data + offset, len);
487 offset = val;
488 len = offset >> 16;
489 offset &= 0xffff;
490 if (tep_field_is_relative(field->flags))
491 offset += field->offset + field->size;
492 }
493 if (field->flags & TEP_FIELD_IS_STRING &&
494 is_printable_array(data + offset, len)) {
495 ret = _PyUnicode_FromString((char *)data + offset);
496 } else {
497 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
498 field->flags &= ~TEP_FIELD_IS_STRING;
499 }
500 } else {
501 val = tep_read_number(pevent, data + field->offset,
502 field->size);
503 if (field->flags & TEP_FIELD_IS_POINTER)
504 ret = PyLong_FromUnsignedLong((unsigned long) val);
505 else if (field->flags & TEP_FIELD_IS_SIGNED)
506 ret = PyLong_FromLong((long) val);
507 else
508 ret = PyLong_FromUnsignedLong((unsigned long) val);
509 }
510
511 return ret;
512}
513
514static PyObject*
515get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
516{
517 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
518 struct evsel *evsel = pevent->evsel;
519 struct tep_format_field *field;
520
521 if (!evsel->tp_format) {
522 struct tep_event *tp_format;
523
524 tp_format = trace_event__tp_format_id(evsel->core.attr.config);
525 if (IS_ERR_OR_NULL(tp_format))
526 return NULL;
527
528 evsel->tp_format = tp_format;
529 }
530
531 field = tep_find_any_field(evsel->tp_format, str);
532 if (!field)
533 return NULL;
534
535 return tracepoint_field(pevent, field);
536}
537#endif /* HAVE_LIBTRACEEVENT */
538
539static PyObject*
540pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
541{
542 PyObject *obj = NULL;
543
544#ifdef HAVE_LIBTRACEEVENT
545 if (is_tracepoint(pevent))
546 obj = get_tracepoint_field(pevent, attr_name);
547#endif
548
549 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
550}
551
552static PyTypeObject pyrf_sample_event__type = {
553 PyVarObject_HEAD_INIT(NULL, 0)
554 .tp_name = "perf.sample_event",
555 .tp_basicsize = sizeof(struct pyrf_event),
556 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
557 .tp_doc = pyrf_sample_event__doc,
558 .tp_members = pyrf_sample_event__members,
559 .tp_repr = (reprfunc)pyrf_sample_event__repr,
560 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
561};
562
563static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
564
565static PyMemberDef pyrf_context_switch_event__members[] = {
566 sample_members
567 member_def(perf_event_header, type, T_UINT, "event type"),
568 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
569 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
570 { .name = NULL, },
571};
572
573static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
574{
575 PyObject *ret;
576 char *s;
577
578 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
579 pevent->event.context_switch.next_prev_pid,
580 pevent->event.context_switch.next_prev_tid,
581 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
582 ret = PyErr_NoMemory();
583 } else {
584 ret = _PyUnicode_FromString(s);
585 free(s);
586 }
587 return ret;
588}
589
590static PyTypeObject pyrf_context_switch_event__type = {
591 PyVarObject_HEAD_INIT(NULL, 0)
592 .tp_name = "perf.context_switch_event",
593 .tp_basicsize = sizeof(struct pyrf_event),
594 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
595 .tp_doc = pyrf_context_switch_event__doc,
596 .tp_members = pyrf_context_switch_event__members,
597 .tp_repr = (reprfunc)pyrf_context_switch_event__repr,
598};
599
600static int pyrf_event__setup_types(void)
601{
602 int err;
603 pyrf_mmap_event__type.tp_new =
604 pyrf_task_event__type.tp_new =
605 pyrf_comm_event__type.tp_new =
606 pyrf_lost_event__type.tp_new =
607 pyrf_read_event__type.tp_new =
608 pyrf_sample_event__type.tp_new =
609 pyrf_context_switch_event__type.tp_new =
610 pyrf_throttle_event__type.tp_new = PyType_GenericNew;
611 err = PyType_Ready(&pyrf_mmap_event__type);
612 if (err < 0)
613 goto out;
614 err = PyType_Ready(&pyrf_lost_event__type);
615 if (err < 0)
616 goto out;
617 err = PyType_Ready(&pyrf_task_event__type);
618 if (err < 0)
619 goto out;
620 err = PyType_Ready(&pyrf_comm_event__type);
621 if (err < 0)
622 goto out;
623 err = PyType_Ready(&pyrf_throttle_event__type);
624 if (err < 0)
625 goto out;
626 err = PyType_Ready(&pyrf_read_event__type);
627 if (err < 0)
628 goto out;
629 err = PyType_Ready(&pyrf_sample_event__type);
630 if (err < 0)
631 goto out;
632 err = PyType_Ready(&pyrf_context_switch_event__type);
633 if (err < 0)
634 goto out;
635out:
636 return err;
637}
638
639static PyTypeObject *pyrf_event__type[] = {
640 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
641 [PERF_RECORD_LOST] = &pyrf_lost_event__type,
642 [PERF_RECORD_COMM] = &pyrf_comm_event__type,
643 [PERF_RECORD_EXIT] = &pyrf_task_event__type,
644 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
645 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
646 [PERF_RECORD_FORK] = &pyrf_task_event__type,
647 [PERF_RECORD_READ] = &pyrf_read_event__type,
648 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
649 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
650 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
651};
652
653static PyObject *pyrf_event__new(union perf_event *event)
654{
655 struct pyrf_event *pevent;
656 PyTypeObject *ptype;
657
658 if ((event->header.type < PERF_RECORD_MMAP ||
659 event->header.type > PERF_RECORD_SAMPLE) &&
660 !(event->header.type == PERF_RECORD_SWITCH ||
661 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
662 return NULL;
663
664 ptype = pyrf_event__type[event->header.type];
665 pevent = PyObject_New(struct pyrf_event, ptype);
666 if (pevent != NULL)
667 memcpy(&pevent->event, event, event->header.size);
668 return (PyObject *)pevent;
669}
670
671struct pyrf_cpu_map {
672 PyObject_HEAD
673
674 struct perf_cpu_map *cpus;
675};
676
677static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
678 PyObject *args, PyObject *kwargs)
679{
680 static char *kwlist[] = { "cpustr", NULL };
681 char *cpustr = NULL;
682
683 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
684 kwlist, &cpustr))
685 return -1;
686
687 pcpus->cpus = perf_cpu_map__new(cpustr);
688 if (pcpus->cpus == NULL)
689 return -1;
690 return 0;
691}
692
693static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
694{
695 perf_cpu_map__put(pcpus->cpus);
696 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
697}
698
699static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
700{
701 struct pyrf_cpu_map *pcpus = (void *)obj;
702
703 return perf_cpu_map__nr(pcpus->cpus);
704}
705
706static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
707{
708 struct pyrf_cpu_map *pcpus = (void *)obj;
709
710 if (i >= perf_cpu_map__nr(pcpus->cpus))
711 return NULL;
712
713 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
714}
715
716static PySequenceMethods pyrf_cpu_map__sequence_methods = {
717 .sq_length = pyrf_cpu_map__length,
718 .sq_item = pyrf_cpu_map__item,
719};
720
721static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
722
723static PyTypeObject pyrf_cpu_map__type = {
724 PyVarObject_HEAD_INIT(NULL, 0)
725 .tp_name = "perf.cpu_map",
726 .tp_basicsize = sizeof(struct pyrf_cpu_map),
727 .tp_dealloc = (destructor)pyrf_cpu_map__delete,
728 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
729 .tp_doc = pyrf_cpu_map__doc,
730 .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
731 .tp_init = (initproc)pyrf_cpu_map__init,
732};
733
734static int pyrf_cpu_map__setup_types(void)
735{
736 pyrf_cpu_map__type.tp_new = PyType_GenericNew;
737 return PyType_Ready(&pyrf_cpu_map__type);
738}
739
740struct pyrf_thread_map {
741 PyObject_HEAD
742
743 struct perf_thread_map *threads;
744};
745
746static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
747 PyObject *args, PyObject *kwargs)
748{
749 static char *kwlist[] = { "pid", "tid", "uid", NULL };
750 int pid = -1, tid = -1, uid = UINT_MAX;
751
752 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
753 kwlist, &pid, &tid, &uid))
754 return -1;
755
756 pthreads->threads = thread_map__new(pid, tid, uid);
757 if (pthreads->threads == NULL)
758 return -1;
759 return 0;
760}
761
762static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
763{
764 perf_thread_map__put(pthreads->threads);
765 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
766}
767
768static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
769{
770 struct pyrf_thread_map *pthreads = (void *)obj;
771
772 return perf_thread_map__nr(pthreads->threads);
773}
774
775static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
776{
777 struct pyrf_thread_map *pthreads = (void *)obj;
778
779 if (i >= perf_thread_map__nr(pthreads->threads))
780 return NULL;
781
782 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
783}
784
785static PySequenceMethods pyrf_thread_map__sequence_methods = {
786 .sq_length = pyrf_thread_map__length,
787 .sq_item = pyrf_thread_map__item,
788};
789
790static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
791
792static PyTypeObject pyrf_thread_map__type = {
793 PyVarObject_HEAD_INIT(NULL, 0)
794 .tp_name = "perf.thread_map",
795 .tp_basicsize = sizeof(struct pyrf_thread_map),
796 .tp_dealloc = (destructor)pyrf_thread_map__delete,
797 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
798 .tp_doc = pyrf_thread_map__doc,
799 .tp_as_sequence = &pyrf_thread_map__sequence_methods,
800 .tp_init = (initproc)pyrf_thread_map__init,
801};
802
803static int pyrf_thread_map__setup_types(void)
804{
805 pyrf_thread_map__type.tp_new = PyType_GenericNew;
806 return PyType_Ready(&pyrf_thread_map__type);
807}
808
809struct pyrf_evsel {
810 PyObject_HEAD
811
812 struct evsel evsel;
813};
814
815static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
816 PyObject *args, PyObject *kwargs)
817{
818 struct perf_event_attr attr = {
819 .type = PERF_TYPE_HARDWARE,
820 .config = PERF_COUNT_HW_CPU_CYCLES,
821 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
822 };
823 static char *kwlist[] = {
824 "type",
825 "config",
826 "sample_freq",
827 "sample_period",
828 "sample_type",
829 "read_format",
830 "disabled",
831 "inherit",
832 "pinned",
833 "exclusive",
834 "exclude_user",
835 "exclude_kernel",
836 "exclude_hv",
837 "exclude_idle",
838 "mmap",
839 "context_switch",
840 "comm",
841 "freq",
842 "inherit_stat",
843 "enable_on_exec",
844 "task",
845 "watermark",
846 "precise_ip",
847 "mmap_data",
848 "sample_id_all",
849 "wakeup_events",
850 "bp_type",
851 "bp_addr",
852 "bp_len",
853 NULL
854 };
855 u64 sample_period = 0;
856 u32 disabled = 0,
857 inherit = 0,
858 pinned = 0,
859 exclusive = 0,
860 exclude_user = 0,
861 exclude_kernel = 0,
862 exclude_hv = 0,
863 exclude_idle = 0,
864 mmap = 0,
865 context_switch = 0,
866 comm = 0,
867 freq = 1,
868 inherit_stat = 0,
869 enable_on_exec = 0,
870 task = 0,
871 watermark = 0,
872 precise_ip = 0,
873 mmap_data = 0,
874 sample_id_all = 1;
875 int idx = 0;
876
877 if (!PyArg_ParseTupleAndKeywords(args, kwargs,
878 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
879 &attr.type, &attr.config, &attr.sample_freq,
880 &sample_period, &attr.sample_type,
881 &attr.read_format, &disabled, &inherit,
882 &pinned, &exclusive, &exclude_user,
883 &exclude_kernel, &exclude_hv, &exclude_idle,
884 &mmap, &context_switch, &comm, &freq, &inherit_stat,
885 &enable_on_exec, &task, &watermark,
886 &precise_ip, &mmap_data, &sample_id_all,
887 &attr.wakeup_events, &attr.bp_type,
888 &attr.bp_addr, &attr.bp_len, &idx))
889 return -1;
890
891 /* union... */
892 if (sample_period != 0) {
893 if (attr.sample_freq != 0)
894 return -1; /* FIXME: throw right exception */
895 attr.sample_period = sample_period;
896 }
897
898 /* Bitfields */
899 attr.disabled = disabled;
900 attr.inherit = inherit;
901 attr.pinned = pinned;
902 attr.exclusive = exclusive;
903 attr.exclude_user = exclude_user;
904 attr.exclude_kernel = exclude_kernel;
905 attr.exclude_hv = exclude_hv;
906 attr.exclude_idle = exclude_idle;
907 attr.mmap = mmap;
908 attr.context_switch = context_switch;
909 attr.comm = comm;
910 attr.freq = freq;
911 attr.inherit_stat = inherit_stat;
912 attr.enable_on_exec = enable_on_exec;
913 attr.task = task;
914 attr.watermark = watermark;
915 attr.precise_ip = precise_ip;
916 attr.mmap_data = mmap_data;
917 attr.sample_id_all = sample_id_all;
918 attr.size = sizeof(attr);
919
920 evsel__init(&pevsel->evsel, &attr, idx);
921 return 0;
922}
923
924static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
925{
926 evsel__exit(&pevsel->evsel);
927 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
928}
929
930static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
931 PyObject *args, PyObject *kwargs)
932{
933 struct evsel *evsel = &pevsel->evsel;
934 struct perf_cpu_map *cpus = NULL;
935 struct perf_thread_map *threads = NULL;
936 PyObject *pcpus = NULL, *pthreads = NULL;
937 int group = 0, inherit = 0;
938 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
939
940 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
941 &pcpus, &pthreads, &group, &inherit))
942 return NULL;
943
944 if (pthreads != NULL)
945 threads = ((struct pyrf_thread_map *)pthreads)->threads;
946
947 if (pcpus != NULL)
948 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
949
950 evsel->core.attr.inherit = inherit;
951 /*
952 * This will group just the fds for this single evsel, to group
953 * multiple events, use evlist.open().
954 */
955 if (evsel__open(evsel, cpus, threads) < 0) {
956 PyErr_SetFromErrno(PyExc_OSError);
957 return NULL;
958 }
959
960 Py_INCREF(Py_None);
961 return Py_None;
962}
963
964static PyMethodDef pyrf_evsel__methods[] = {
965 {
966 .ml_name = "open",
967 .ml_meth = (PyCFunction)pyrf_evsel__open,
968 .ml_flags = METH_VARARGS | METH_KEYWORDS,
969 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
970 },
971 { .ml_name = NULL, }
972};
973
974static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
975
976static PyTypeObject pyrf_evsel__type = {
977 PyVarObject_HEAD_INIT(NULL, 0)
978 .tp_name = "perf.evsel",
979 .tp_basicsize = sizeof(struct pyrf_evsel),
980 .tp_dealloc = (destructor)pyrf_evsel__delete,
981 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
982 .tp_doc = pyrf_evsel__doc,
983 .tp_methods = pyrf_evsel__methods,
984 .tp_init = (initproc)pyrf_evsel__init,
985};
986
987static int pyrf_evsel__setup_types(void)
988{
989 pyrf_evsel__type.tp_new = PyType_GenericNew;
990 return PyType_Ready(&pyrf_evsel__type);
991}
992
993struct pyrf_evlist {
994 PyObject_HEAD
995
996 struct evlist evlist;
997};
998
999static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
1000 PyObject *args, PyObject *kwargs __maybe_unused)
1001{
1002 PyObject *pcpus = NULL, *pthreads = NULL;
1003 struct perf_cpu_map *cpus;
1004 struct perf_thread_map *threads;
1005
1006 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
1007 return -1;
1008
1009 threads = ((struct pyrf_thread_map *)pthreads)->threads;
1010 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
1011 evlist__init(&pevlist->evlist, cpus, threads);
1012 return 0;
1013}
1014
1015static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
1016{
1017 evlist__exit(&pevlist->evlist);
1018 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
1019}
1020
1021static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
1022 PyObject *args, PyObject *kwargs)
1023{
1024 struct evlist *evlist = &pevlist->evlist;
1025 static char *kwlist[] = { "pages", "overwrite", NULL };
1026 int pages = 128, overwrite = false;
1027
1028 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
1029 &pages, &overwrite))
1030 return NULL;
1031
1032 if (evlist__mmap(evlist, pages) < 0) {
1033 PyErr_SetFromErrno(PyExc_OSError);
1034 return NULL;
1035 }
1036
1037 Py_INCREF(Py_None);
1038 return Py_None;
1039}
1040
1041static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
1042 PyObject *args, PyObject *kwargs)
1043{
1044 struct evlist *evlist = &pevlist->evlist;
1045 static char *kwlist[] = { "timeout", NULL };
1046 int timeout = -1, n;
1047
1048 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
1049 return NULL;
1050
1051 n = evlist__poll(evlist, timeout);
1052 if (n < 0) {
1053 PyErr_SetFromErrno(PyExc_OSError);
1054 return NULL;
1055 }
1056
1057 return Py_BuildValue("i", n);
1058}
1059
1060static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1061 PyObject *args __maybe_unused,
1062 PyObject *kwargs __maybe_unused)
1063{
1064 struct evlist *evlist = &pevlist->evlist;
1065 PyObject *list = PyList_New(0);
1066 int i;
1067
1068 for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1069 PyObject *file;
1070#if PY_MAJOR_VERSION < 3
1071 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1072
1073 if (fp == NULL)
1074 goto free_list;
1075
1076 file = PyFile_FromFile(fp, "perf", "r", NULL);
1077#else
1078 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1079 NULL, NULL, NULL, 0);
1080#endif
1081 if (file == NULL)
1082 goto free_list;
1083
1084 if (PyList_Append(list, file) != 0) {
1085 Py_DECREF(file);
1086 goto free_list;
1087 }
1088
1089 Py_DECREF(file);
1090 }
1091
1092 return list;
1093free_list:
1094 return PyErr_NoMemory();
1095}
1096
1097
1098static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1099 PyObject *args,
1100 PyObject *kwargs __maybe_unused)
1101{
1102 struct evlist *evlist = &pevlist->evlist;
1103 PyObject *pevsel;
1104 struct evsel *evsel;
1105
1106 if (!PyArg_ParseTuple(args, "O", &pevsel))
1107 return NULL;
1108
1109 Py_INCREF(pevsel);
1110 evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1111 evsel->core.idx = evlist->core.nr_entries;
1112 evlist__add(evlist, evsel);
1113
1114 return Py_BuildValue("i", evlist->core.nr_entries);
1115}
1116
1117static struct mmap *get_md(struct evlist *evlist, int cpu)
1118{
1119 int i;
1120
1121 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1122 struct mmap *md = &evlist->mmap[i];
1123
1124 if (md->core.cpu.cpu == cpu)
1125 return md;
1126 }
1127
1128 return NULL;
1129}
1130
1131static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1132 PyObject *args, PyObject *kwargs)
1133{
1134 struct evlist *evlist = &pevlist->evlist;
1135 union perf_event *event;
1136 int sample_id_all = 1, cpu;
1137 static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1138 struct mmap *md;
1139 int err;
1140
1141 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1142 &cpu, &sample_id_all))
1143 return NULL;
1144
1145 md = get_md(evlist, cpu);
1146 if (!md)
1147 return NULL;
1148
1149 if (perf_mmap__read_init(&md->core) < 0)
1150 goto end;
1151
1152 event = perf_mmap__read_event(&md->core);
1153 if (event != NULL) {
1154 PyObject *pyevent = pyrf_event__new(event);
1155 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1156 struct evsel *evsel;
1157
1158 if (pyevent == NULL)
1159 return PyErr_NoMemory();
1160
1161 evsel = evlist__event2evsel(evlist, event);
1162 if (!evsel) {
1163 Py_INCREF(Py_None);
1164 return Py_None;
1165 }
1166
1167 pevent->evsel = evsel;
1168
1169 err = evsel__parse_sample(evsel, event, &pevent->sample);
1170
1171 /* Consume the even only after we parsed it out. */
1172 perf_mmap__consume(&md->core);
1173
1174 if (err)
1175 return PyErr_Format(PyExc_OSError,
1176 "perf: can't parse sample, err=%d", err);
1177 return pyevent;
1178 }
1179end:
1180 Py_INCREF(Py_None);
1181 return Py_None;
1182}
1183
1184static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1185 PyObject *args, PyObject *kwargs)
1186{
1187 struct evlist *evlist = &pevlist->evlist;
1188
1189 if (evlist__open(evlist) < 0) {
1190 PyErr_SetFromErrno(PyExc_OSError);
1191 return NULL;
1192 }
1193
1194 Py_INCREF(Py_None);
1195 return Py_None;
1196}
1197
1198static PyMethodDef pyrf_evlist__methods[] = {
1199 {
1200 .ml_name = "mmap",
1201 .ml_meth = (PyCFunction)pyrf_evlist__mmap,
1202 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1203 .ml_doc = PyDoc_STR("mmap the file descriptor table.")
1204 },
1205 {
1206 .ml_name = "open",
1207 .ml_meth = (PyCFunction)pyrf_evlist__open,
1208 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1209 .ml_doc = PyDoc_STR("open the file descriptors.")
1210 },
1211 {
1212 .ml_name = "poll",
1213 .ml_meth = (PyCFunction)pyrf_evlist__poll,
1214 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1215 .ml_doc = PyDoc_STR("poll the file descriptor table.")
1216 },
1217 {
1218 .ml_name = "get_pollfd",
1219 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
1220 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1221 .ml_doc = PyDoc_STR("get the poll file descriptor table.")
1222 },
1223 {
1224 .ml_name = "add",
1225 .ml_meth = (PyCFunction)pyrf_evlist__add,
1226 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1227 .ml_doc = PyDoc_STR("adds an event selector to the list.")
1228 },
1229 {
1230 .ml_name = "read_on_cpu",
1231 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
1232 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1233 .ml_doc = PyDoc_STR("reads an event.")
1234 },
1235 { .ml_name = NULL, }
1236};
1237
1238static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1239{
1240 struct pyrf_evlist *pevlist = (void *)obj;
1241
1242 return pevlist->evlist.core.nr_entries;
1243}
1244
1245static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1246{
1247 struct pyrf_evlist *pevlist = (void *)obj;
1248 struct evsel *pos;
1249
1250 if (i >= pevlist->evlist.core.nr_entries)
1251 return NULL;
1252
1253 evlist__for_each_entry(&pevlist->evlist, pos) {
1254 if (i-- == 0)
1255 break;
1256 }
1257
1258 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1259}
1260
1261static PySequenceMethods pyrf_evlist__sequence_methods = {
1262 .sq_length = pyrf_evlist__length,
1263 .sq_item = pyrf_evlist__item,
1264};
1265
1266static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1267
1268static PyTypeObject pyrf_evlist__type = {
1269 PyVarObject_HEAD_INIT(NULL, 0)
1270 .tp_name = "perf.evlist",
1271 .tp_basicsize = sizeof(struct pyrf_evlist),
1272 .tp_dealloc = (destructor)pyrf_evlist__delete,
1273 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1274 .tp_as_sequence = &pyrf_evlist__sequence_methods,
1275 .tp_doc = pyrf_evlist__doc,
1276 .tp_methods = pyrf_evlist__methods,
1277 .tp_init = (initproc)pyrf_evlist__init,
1278};
1279
1280static int pyrf_evlist__setup_types(void)
1281{
1282 pyrf_evlist__type.tp_new = PyType_GenericNew;
1283 return PyType_Ready(&pyrf_evlist__type);
1284}
1285
1286#define PERF_CONST(name) { #name, PERF_##name }
1287
1288static struct {
1289 const char *name;
1290 int value;
1291} perf__constants[] = {
1292 PERF_CONST(TYPE_HARDWARE),
1293 PERF_CONST(TYPE_SOFTWARE),
1294 PERF_CONST(TYPE_TRACEPOINT),
1295 PERF_CONST(TYPE_HW_CACHE),
1296 PERF_CONST(TYPE_RAW),
1297 PERF_CONST(TYPE_BREAKPOINT),
1298
1299 PERF_CONST(COUNT_HW_CPU_CYCLES),
1300 PERF_CONST(COUNT_HW_INSTRUCTIONS),
1301 PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1302 PERF_CONST(COUNT_HW_CACHE_MISSES),
1303 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1304 PERF_CONST(COUNT_HW_BRANCH_MISSES),
1305 PERF_CONST(COUNT_HW_BUS_CYCLES),
1306 PERF_CONST(COUNT_HW_CACHE_L1D),
1307 PERF_CONST(COUNT_HW_CACHE_L1I),
1308 PERF_CONST(COUNT_HW_CACHE_LL),
1309 PERF_CONST(COUNT_HW_CACHE_DTLB),
1310 PERF_CONST(COUNT_HW_CACHE_ITLB),
1311 PERF_CONST(COUNT_HW_CACHE_BPU),
1312 PERF_CONST(COUNT_HW_CACHE_OP_READ),
1313 PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1314 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1315 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1316 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1317
1318 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1319 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1320
1321 PERF_CONST(COUNT_SW_CPU_CLOCK),
1322 PERF_CONST(COUNT_SW_TASK_CLOCK),
1323 PERF_CONST(COUNT_SW_PAGE_FAULTS),
1324 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1325 PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1326 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1327 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1328 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1329 PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1330 PERF_CONST(COUNT_SW_DUMMY),
1331
1332 PERF_CONST(SAMPLE_IP),
1333 PERF_CONST(SAMPLE_TID),
1334 PERF_CONST(SAMPLE_TIME),
1335 PERF_CONST(SAMPLE_ADDR),
1336 PERF_CONST(SAMPLE_READ),
1337 PERF_CONST(SAMPLE_CALLCHAIN),
1338 PERF_CONST(SAMPLE_ID),
1339 PERF_CONST(SAMPLE_CPU),
1340 PERF_CONST(SAMPLE_PERIOD),
1341 PERF_CONST(SAMPLE_STREAM_ID),
1342 PERF_CONST(SAMPLE_RAW),
1343
1344 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1345 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1346 PERF_CONST(FORMAT_ID),
1347 PERF_CONST(FORMAT_GROUP),
1348
1349 PERF_CONST(RECORD_MMAP),
1350 PERF_CONST(RECORD_LOST),
1351 PERF_CONST(RECORD_COMM),
1352 PERF_CONST(RECORD_EXIT),
1353 PERF_CONST(RECORD_THROTTLE),
1354 PERF_CONST(RECORD_UNTHROTTLE),
1355 PERF_CONST(RECORD_FORK),
1356 PERF_CONST(RECORD_READ),
1357 PERF_CONST(RECORD_SAMPLE),
1358 PERF_CONST(RECORD_MMAP2),
1359 PERF_CONST(RECORD_AUX),
1360 PERF_CONST(RECORD_ITRACE_START),
1361 PERF_CONST(RECORD_LOST_SAMPLES),
1362 PERF_CONST(RECORD_SWITCH),
1363 PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1364
1365 PERF_CONST(RECORD_MISC_SWITCH_OUT),
1366 { .name = NULL, },
1367};
1368
1369static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1370 PyObject *args, PyObject *kwargs)
1371{
1372#ifndef HAVE_LIBTRACEEVENT
1373 return NULL;
1374#else
1375 struct tep_event *tp_format;
1376 static char *kwlist[] = { "sys", "name", NULL };
1377 char *sys = NULL;
1378 char *name = NULL;
1379
1380 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1381 &sys, &name))
1382 return NULL;
1383
1384 tp_format = trace_event__tp_format(sys, name);
1385 if (IS_ERR(tp_format))
1386 return _PyLong_FromLong(-1);
1387
1388 return _PyLong_FromLong(tp_format->id);
1389#endif // HAVE_LIBTRACEEVENT
1390}
1391
1392static PyMethodDef perf__methods[] = {
1393 {
1394 .ml_name = "tracepoint",
1395 .ml_meth = (PyCFunction) pyrf__tracepoint,
1396 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1397 .ml_doc = PyDoc_STR("Get tracepoint config.")
1398 },
1399 { .ml_name = NULL, }
1400};
1401
1402#if PY_MAJOR_VERSION < 3
1403PyMODINIT_FUNC initperf(void)
1404#else
1405PyMODINIT_FUNC PyInit_perf(void)
1406#endif
1407{
1408 PyObject *obj;
1409 int i;
1410 PyObject *dict;
1411#if PY_MAJOR_VERSION < 3
1412 PyObject *module = Py_InitModule("perf", perf__methods);
1413#else
1414 static struct PyModuleDef moduledef = {
1415 PyModuleDef_HEAD_INIT,
1416 "perf", /* m_name */
1417 "", /* m_doc */
1418 -1, /* m_size */
1419 perf__methods, /* m_methods */
1420 NULL, /* m_reload */
1421 NULL, /* m_traverse */
1422 NULL, /* m_clear */
1423 NULL, /* m_free */
1424 };
1425 PyObject *module = PyModule_Create(&moduledef);
1426#endif
1427
1428 if (module == NULL ||
1429 pyrf_event__setup_types() < 0 ||
1430 pyrf_evlist__setup_types() < 0 ||
1431 pyrf_evsel__setup_types() < 0 ||
1432 pyrf_thread_map__setup_types() < 0 ||
1433 pyrf_cpu_map__setup_types() < 0)
1434#if PY_MAJOR_VERSION < 3
1435 return;
1436#else
1437 return module;
1438#endif
1439
1440 /* The page_size is placed in util object. */
1441 page_size = sysconf(_SC_PAGE_SIZE);
1442
1443 Py_INCREF(&pyrf_evlist__type);
1444 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1445
1446 Py_INCREF(&pyrf_evsel__type);
1447 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1448
1449 Py_INCREF(&pyrf_mmap_event__type);
1450 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1451
1452 Py_INCREF(&pyrf_lost_event__type);
1453 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1454
1455 Py_INCREF(&pyrf_comm_event__type);
1456 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1457
1458 Py_INCREF(&pyrf_task_event__type);
1459 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1460
1461 Py_INCREF(&pyrf_throttle_event__type);
1462 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1463
1464 Py_INCREF(&pyrf_task_event__type);
1465 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1466
1467 Py_INCREF(&pyrf_read_event__type);
1468 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1469
1470 Py_INCREF(&pyrf_sample_event__type);
1471 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1472
1473 Py_INCREF(&pyrf_context_switch_event__type);
1474 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1475
1476 Py_INCREF(&pyrf_thread_map__type);
1477 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1478
1479 Py_INCREF(&pyrf_cpu_map__type);
1480 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1481
1482 dict = PyModule_GetDict(module);
1483 if (dict == NULL)
1484 goto error;
1485
1486 for (i = 0; perf__constants[i].name != NULL; i++) {
1487 obj = _PyLong_FromLong(perf__constants[i].value);
1488 if (obj == NULL)
1489 goto error;
1490 PyDict_SetItemString(dict, perf__constants[i].name, obj);
1491 Py_DECREF(obj);
1492 }
1493
1494error:
1495 if (PyErr_Occurred())
1496 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1497#if PY_MAJOR_VERSION >= 3
1498 return module;
1499#endif
1500}
1501
1502/*
1503 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1504 * binding.
1505 */
1506void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
1507 int fd, int group_fd, unsigned long flags)
1508{
1509}
1510
1511void evlist__free_stats(struct evlist *evlist)
1512{
1513}