Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CTF writing support via babeltrace.
4 *
5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7 */
8
9#include <errno.h>
10#include <inttypes.h>
11#include <linux/compiler.h>
12#include <linux/kernel.h>
13#include <linux/zalloc.h>
14#include <babeltrace/ctf-writer/writer.h>
15#include <babeltrace/ctf-writer/clock.h>
16#include <babeltrace/ctf-writer/stream.h>
17#include <babeltrace/ctf-writer/event.h>
18#include <babeltrace/ctf-writer/event-types.h>
19#include <babeltrace/ctf-writer/event-fields.h>
20#include <babeltrace/ctf-ir/utils.h>
21#include <babeltrace/ctf/events.h>
22#include "asm/bug.h"
23#include "data-convert.h"
24#include "session.h"
25#include "debug.h"
26#include "tool.h"
27#include "evlist.h"
28#include "evsel.h"
29#include "machine.h"
30#include "config.h"
31#include <linux/ctype.h>
32#include <linux/err.h>
33#include <linux/time64.h>
34#include "util.h"
35#include "clockid.h"
36#include "util/sample.h"
37
38#ifdef HAVE_LIBTRACEEVENT
39#include <traceevent/event-parse.h>
40#endif
41
42#define pr_N(n, fmt, ...) \
43 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
44
45#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
46#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
47
48#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
49
50struct evsel_priv {
51 struct bt_ctf_event_class *event_class;
52};
53
54#define MAX_CPUS 4096
55
56struct ctf_stream {
57 struct bt_ctf_stream *stream;
58 int cpu;
59 u32 count;
60};
61
62struct ctf_writer {
63 /* writer primitives */
64 struct bt_ctf_writer *writer;
65 struct ctf_stream **stream;
66 int stream_cnt;
67 struct bt_ctf_stream_class *stream_class;
68 struct bt_ctf_clock *clock;
69
70 /* data types */
71 union {
72 struct {
73 struct bt_ctf_field_type *s64;
74 struct bt_ctf_field_type *u64;
75 struct bt_ctf_field_type *s32;
76 struct bt_ctf_field_type *u32;
77 struct bt_ctf_field_type *string;
78 struct bt_ctf_field_type *u32_hex;
79 struct bt_ctf_field_type *u64_hex;
80 };
81 struct bt_ctf_field_type *array[6];
82 } data;
83 struct bt_ctf_event_class *comm_class;
84 struct bt_ctf_event_class *exit_class;
85 struct bt_ctf_event_class *fork_class;
86 struct bt_ctf_event_class *mmap_class;
87 struct bt_ctf_event_class *mmap2_class;
88};
89
90struct convert {
91 struct perf_tool tool;
92 struct ctf_writer writer;
93
94 u64 events_size;
95 u64 events_count;
96 u64 non_sample_count;
97
98 /* Ordered events configured queue size. */
99 u64 queue_size;
100};
101
102static int value_set(struct bt_ctf_field_type *type,
103 struct bt_ctf_event *event,
104 const char *name, u64 val)
105{
106 struct bt_ctf_field *field;
107 bool sign = bt_ctf_field_type_integer_get_signed(type);
108 int ret;
109
110 field = bt_ctf_field_create(type);
111 if (!field) {
112 pr_err("failed to create a field %s\n", name);
113 return -1;
114 }
115
116 if (sign) {
117 ret = bt_ctf_field_signed_integer_set_value(field, val);
118 if (ret) {
119 pr_err("failed to set field value %s\n", name);
120 goto err;
121 }
122 } else {
123 ret = bt_ctf_field_unsigned_integer_set_value(field, val);
124 if (ret) {
125 pr_err("failed to set field value %s\n", name);
126 goto err;
127 }
128 }
129
130 ret = bt_ctf_event_set_payload(event, name, field);
131 if (ret) {
132 pr_err("failed to set payload %s\n", name);
133 goto err;
134 }
135
136 pr2(" SET [%s = %" PRIu64 "]\n", name, val);
137
138err:
139 bt_ctf_field_put(field);
140 return ret;
141}
142
143#define __FUNC_VALUE_SET(_name, _val_type) \
144static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
145 struct bt_ctf_event *event, \
146 const char *name, \
147 _val_type val) \
148{ \
149 struct bt_ctf_field_type *type = cw->data._name; \
150 return value_set(type, event, name, (u64) val); \
151}
152
153#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
154
155FUNC_VALUE_SET(s32)
156FUNC_VALUE_SET(u32)
157FUNC_VALUE_SET(s64)
158FUNC_VALUE_SET(u64)
159__FUNC_VALUE_SET(u64_hex, u64)
160
161static int string_set_value(struct bt_ctf_field *field, const char *string);
162static __maybe_unused int
163value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
164 const char *name, const char *string)
165{
166 struct bt_ctf_field_type *type = cw->data.string;
167 struct bt_ctf_field *field;
168 int ret = 0;
169
170 field = bt_ctf_field_create(type);
171 if (!field) {
172 pr_err("failed to create a field %s\n", name);
173 return -1;
174 }
175
176 ret = string_set_value(field, string);
177 if (ret) {
178 pr_err("failed to set value %s\n", name);
179 goto err_put_field;
180 }
181
182 ret = bt_ctf_event_set_payload(event, name, field);
183 if (ret)
184 pr_err("failed to set payload %s\n", name);
185
186err_put_field:
187 bt_ctf_field_put(field);
188 return ret;
189}
190
191static struct bt_ctf_field_type*
192get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
193{
194 unsigned long flags = field->flags;
195
196 if (flags & TEP_FIELD_IS_STRING)
197 return cw->data.string;
198
199 if (!(flags & TEP_FIELD_IS_SIGNED)) {
200 /* unsigned long are mostly pointers */
201 if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
202 return cw->data.u64_hex;
203 }
204
205 if (flags & TEP_FIELD_IS_SIGNED) {
206 if (field->size == 8)
207 return cw->data.s64;
208 else
209 return cw->data.s32;
210 }
211
212 if (field->size == 8)
213 return cw->data.u64;
214 else
215 return cw->data.u32;
216}
217
218static unsigned long long adjust_signedness(unsigned long long value_int, int size)
219{
220 unsigned long long value_mask;
221
222 /*
223 * value_mask = (1 << (size * 8 - 1)) - 1.
224 * Directly set value_mask for code readers.
225 */
226 switch (size) {
227 case 1:
228 value_mask = 0x7fULL;
229 break;
230 case 2:
231 value_mask = 0x7fffULL;
232 break;
233 case 4:
234 value_mask = 0x7fffffffULL;
235 break;
236 case 8:
237 /*
238 * For 64 bit value, return it self. There is no need
239 * to fill high bit.
240 */
241 /* Fall through */
242 default:
243 /* BUG! */
244 return value_int;
245 }
246
247 /* If it is a positive value, don't adjust. */
248 if ((value_int & (~0ULL - value_mask)) == 0)
249 return value_int;
250
251 /* Fill upper part of value_int with 1 to make it a negative long long. */
252 return (value_int & value_mask) | ~value_mask;
253}
254
255static int string_set_value(struct bt_ctf_field *field, const char *string)
256{
257 char *buffer = NULL;
258 size_t len = strlen(string), i, p;
259 int err;
260
261 for (i = p = 0; i < len; i++, p++) {
262 if (isprint(string[i])) {
263 if (!buffer)
264 continue;
265 buffer[p] = string[i];
266 } else {
267 char numstr[5];
268
269 snprintf(numstr, sizeof(numstr), "\\x%02x",
270 (unsigned int)(string[i]) & 0xff);
271
272 if (!buffer) {
273 buffer = zalloc(i + (len - i) * 4 + 2);
274 if (!buffer) {
275 pr_err("failed to set unprintable string '%s'\n", string);
276 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
277 }
278 if (i > 0)
279 strncpy(buffer, string, i);
280 }
281 memcpy(buffer + p, numstr, 4);
282 p += 3;
283 }
284 }
285
286 if (!buffer)
287 return bt_ctf_field_string_set_value(field, string);
288 err = bt_ctf_field_string_set_value(field, buffer);
289 free(buffer);
290 return err;
291}
292
293static int add_tracepoint_field_value(struct ctf_writer *cw,
294 struct bt_ctf_event_class *event_class,
295 struct bt_ctf_event *event,
296 struct perf_sample *sample,
297 struct tep_format_field *fmtf)
298{
299 struct bt_ctf_field_type *type;
300 struct bt_ctf_field *array_field;
301 struct bt_ctf_field *field;
302 const char *name = fmtf->name;
303 void *data = sample->raw_data;
304 unsigned long flags = fmtf->flags;
305 unsigned int n_items;
306 unsigned int i;
307 unsigned int offset;
308 unsigned int len;
309 int ret;
310
311 name = fmtf->alias;
312 offset = fmtf->offset;
313 len = fmtf->size;
314 if (flags & TEP_FIELD_IS_STRING)
315 flags &= ~TEP_FIELD_IS_ARRAY;
316
317 if (flags & TEP_FIELD_IS_DYNAMIC) {
318 unsigned long long tmp_val;
319
320 tmp_val = tep_read_number(fmtf->event->tep,
321 data + offset, len);
322 offset = tmp_val;
323 len = offset >> 16;
324 offset &= 0xffff;
325#ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE
326 if (flags & TEP_FIELD_IS_RELATIVE)
327 offset += fmtf->offset + fmtf->size;
328#endif
329 }
330
331 if (flags & TEP_FIELD_IS_ARRAY) {
332
333 type = bt_ctf_event_class_get_field_by_name(
334 event_class, name);
335 array_field = bt_ctf_field_create(type);
336 bt_ctf_field_type_put(type);
337 if (!array_field) {
338 pr_err("Failed to create array type %s\n", name);
339 return -1;
340 }
341
342 len = fmtf->size / fmtf->arraylen;
343 n_items = fmtf->arraylen;
344 } else {
345 n_items = 1;
346 array_field = NULL;
347 }
348
349 type = get_tracepoint_field_type(cw, fmtf);
350
351 for (i = 0; i < n_items; i++) {
352 if (flags & TEP_FIELD_IS_ARRAY)
353 field = bt_ctf_field_array_get_field(array_field, i);
354 else
355 field = bt_ctf_field_create(type);
356
357 if (!field) {
358 pr_err("failed to create a field %s\n", name);
359 return -1;
360 }
361
362 if (flags & TEP_FIELD_IS_STRING)
363 ret = string_set_value(field, data + offset + i * len);
364 else {
365 unsigned long long value_int;
366
367 value_int = tep_read_number(
368 fmtf->event->tep,
369 data + offset + i * len, len);
370
371 if (!(flags & TEP_FIELD_IS_SIGNED))
372 ret = bt_ctf_field_unsigned_integer_set_value(
373 field, value_int);
374 else
375 ret = bt_ctf_field_signed_integer_set_value(
376 field, adjust_signedness(value_int, len));
377 }
378
379 if (ret) {
380 pr_err("failed to set file value %s\n", name);
381 goto err_put_field;
382 }
383 if (!(flags & TEP_FIELD_IS_ARRAY)) {
384 ret = bt_ctf_event_set_payload(event, name, field);
385 if (ret) {
386 pr_err("failed to set payload %s\n", name);
387 goto err_put_field;
388 }
389 }
390 bt_ctf_field_put(field);
391 }
392 if (flags & TEP_FIELD_IS_ARRAY) {
393 ret = bt_ctf_event_set_payload(event, name, array_field);
394 if (ret) {
395 pr_err("Failed add payload array %s\n", name);
396 return -1;
397 }
398 bt_ctf_field_put(array_field);
399 }
400 return 0;
401
402err_put_field:
403 bt_ctf_field_put(field);
404 return -1;
405}
406
407static int add_tracepoint_fields_values(struct ctf_writer *cw,
408 struct bt_ctf_event_class *event_class,
409 struct bt_ctf_event *event,
410 struct tep_format_field *fields,
411 struct perf_sample *sample)
412{
413 struct tep_format_field *field;
414 int ret;
415
416 for (field = fields; field; field = field->next) {
417 ret = add_tracepoint_field_value(cw, event_class, event, sample,
418 field);
419 if (ret)
420 return -1;
421 }
422 return 0;
423}
424
425static int add_tracepoint_values(struct ctf_writer *cw,
426 struct bt_ctf_event_class *event_class,
427 struct bt_ctf_event *event,
428 struct evsel *evsel,
429 struct perf_sample *sample)
430{
431 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
432 struct tep_format_field *fields = evsel->tp_format->format.fields;
433 int ret;
434
435 ret = add_tracepoint_fields_values(cw, event_class, event,
436 common_fields, sample);
437 if (!ret)
438 ret = add_tracepoint_fields_values(cw, event_class, event,
439 fields, sample);
440
441 return ret;
442}
443
444static int
445add_bpf_output_values(struct bt_ctf_event_class *event_class,
446 struct bt_ctf_event *event,
447 struct perf_sample *sample)
448{
449 struct bt_ctf_field_type *len_type, *seq_type;
450 struct bt_ctf_field *len_field, *seq_field;
451 unsigned int raw_size = sample->raw_size;
452 unsigned int nr_elements = raw_size / sizeof(u32);
453 unsigned int i;
454 int ret;
455
456 if (nr_elements * sizeof(u32) != raw_size)
457 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
458 raw_size, nr_elements * sizeof(u32) - raw_size);
459
460 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
461 len_field = bt_ctf_field_create(len_type);
462 if (!len_field) {
463 pr_err("failed to create 'raw_len' for bpf output event\n");
464 ret = -1;
465 goto put_len_type;
466 }
467
468 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
469 if (ret) {
470 pr_err("failed to set field value for raw_len\n");
471 goto put_len_field;
472 }
473 ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
474 if (ret) {
475 pr_err("failed to set payload to raw_len\n");
476 goto put_len_field;
477 }
478
479 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
480 seq_field = bt_ctf_field_create(seq_type);
481 if (!seq_field) {
482 pr_err("failed to create 'raw_data' for bpf output event\n");
483 ret = -1;
484 goto put_seq_type;
485 }
486
487 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
488 if (ret) {
489 pr_err("failed to set length of 'raw_data'\n");
490 goto put_seq_field;
491 }
492
493 for (i = 0; i < nr_elements; i++) {
494 struct bt_ctf_field *elem_field =
495 bt_ctf_field_sequence_get_field(seq_field, i);
496
497 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
498 ((u32 *)(sample->raw_data))[i]);
499
500 bt_ctf_field_put(elem_field);
501 if (ret) {
502 pr_err("failed to set raw_data[%d]\n", i);
503 goto put_seq_field;
504 }
505 }
506
507 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
508 if (ret)
509 pr_err("failed to set payload for raw_data\n");
510
511put_seq_field:
512 bt_ctf_field_put(seq_field);
513put_seq_type:
514 bt_ctf_field_type_put(seq_type);
515put_len_field:
516 bt_ctf_field_put(len_field);
517put_len_type:
518 bt_ctf_field_type_put(len_type);
519 return ret;
520}
521
522static int
523add_callchain_output_values(struct bt_ctf_event_class *event_class,
524 struct bt_ctf_event *event,
525 struct ip_callchain *callchain)
526{
527 struct bt_ctf_field_type *len_type, *seq_type;
528 struct bt_ctf_field *len_field, *seq_field;
529 unsigned int nr_elements = callchain->nr;
530 unsigned int i;
531 int ret;
532
533 len_type = bt_ctf_event_class_get_field_by_name(
534 event_class, "perf_callchain_size");
535 len_field = bt_ctf_field_create(len_type);
536 if (!len_field) {
537 pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
538 ret = -1;
539 goto put_len_type;
540 }
541
542 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
543 if (ret) {
544 pr_err("failed to set field value for perf_callchain_size\n");
545 goto put_len_field;
546 }
547 ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
548 if (ret) {
549 pr_err("failed to set payload to perf_callchain_size\n");
550 goto put_len_field;
551 }
552
553 seq_type = bt_ctf_event_class_get_field_by_name(
554 event_class, "perf_callchain");
555 seq_field = bt_ctf_field_create(seq_type);
556 if (!seq_field) {
557 pr_err("failed to create 'perf_callchain' for callchain output event\n");
558 ret = -1;
559 goto put_seq_type;
560 }
561
562 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
563 if (ret) {
564 pr_err("failed to set length of 'perf_callchain'\n");
565 goto put_seq_field;
566 }
567
568 for (i = 0; i < nr_elements; i++) {
569 struct bt_ctf_field *elem_field =
570 bt_ctf_field_sequence_get_field(seq_field, i);
571
572 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
573 ((u64 *)(callchain->ips))[i]);
574
575 bt_ctf_field_put(elem_field);
576 if (ret) {
577 pr_err("failed to set callchain[%d]\n", i);
578 goto put_seq_field;
579 }
580 }
581
582 ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
583 if (ret)
584 pr_err("failed to set payload for raw_data\n");
585
586put_seq_field:
587 bt_ctf_field_put(seq_field);
588put_seq_type:
589 bt_ctf_field_type_put(seq_type);
590put_len_field:
591 bt_ctf_field_put(len_field);
592put_len_type:
593 bt_ctf_field_type_put(len_type);
594 return ret;
595}
596
597static int add_generic_values(struct ctf_writer *cw,
598 struct bt_ctf_event *event,
599 struct evsel *evsel,
600 struct perf_sample *sample)
601{
602 u64 type = evsel->core.attr.sample_type;
603 int ret;
604
605 /*
606 * missing:
607 * PERF_SAMPLE_TIME - not needed as we have it in
608 * ctf event header
609 * PERF_SAMPLE_READ - TODO
610 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
611 * PERF_SAMPLE_BRANCH_STACK - TODO
612 * PERF_SAMPLE_REGS_USER - TODO
613 * PERF_SAMPLE_STACK_USER - TODO
614 */
615
616 if (type & PERF_SAMPLE_IP) {
617 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
618 if (ret)
619 return -1;
620 }
621
622 if (type & PERF_SAMPLE_TID) {
623 ret = value_set_s32(cw, event, "perf_tid", sample->tid);
624 if (ret)
625 return -1;
626
627 ret = value_set_s32(cw, event, "perf_pid", sample->pid);
628 if (ret)
629 return -1;
630 }
631
632 if ((type & PERF_SAMPLE_ID) ||
633 (type & PERF_SAMPLE_IDENTIFIER)) {
634 ret = value_set_u64(cw, event, "perf_id", sample->id);
635 if (ret)
636 return -1;
637 }
638
639 if (type & PERF_SAMPLE_STREAM_ID) {
640 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
641 if (ret)
642 return -1;
643 }
644
645 if (type & PERF_SAMPLE_PERIOD) {
646 ret = value_set_u64(cw, event, "perf_period", sample->period);
647 if (ret)
648 return -1;
649 }
650
651 if (type & PERF_SAMPLE_WEIGHT) {
652 ret = value_set_u64(cw, event, "perf_weight", sample->weight);
653 if (ret)
654 return -1;
655 }
656
657 if (type & PERF_SAMPLE_DATA_SRC) {
658 ret = value_set_u64(cw, event, "perf_data_src",
659 sample->data_src);
660 if (ret)
661 return -1;
662 }
663
664 if (type & PERF_SAMPLE_TRANSACTION) {
665 ret = value_set_u64(cw, event, "perf_transaction",
666 sample->transaction);
667 if (ret)
668 return -1;
669 }
670
671 return 0;
672}
673
674static int ctf_stream__flush(struct ctf_stream *cs)
675{
676 int err = 0;
677
678 if (cs) {
679 err = bt_ctf_stream_flush(cs->stream);
680 if (err)
681 pr_err("CTF stream %d flush failed\n", cs->cpu);
682
683 pr("Flush stream for cpu %d (%u samples)\n",
684 cs->cpu, cs->count);
685
686 cs->count = 0;
687 }
688
689 return err;
690}
691
692static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
693{
694 struct ctf_stream *cs;
695 struct bt_ctf_field *pkt_ctx = NULL;
696 struct bt_ctf_field *cpu_field = NULL;
697 struct bt_ctf_stream *stream = NULL;
698 int ret;
699
700 cs = zalloc(sizeof(*cs));
701 if (!cs) {
702 pr_err("Failed to allocate ctf stream\n");
703 return NULL;
704 }
705
706 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
707 if (!stream) {
708 pr_err("Failed to create CTF stream\n");
709 goto out;
710 }
711
712 pkt_ctx = bt_ctf_stream_get_packet_context(stream);
713 if (!pkt_ctx) {
714 pr_err("Failed to obtain packet context\n");
715 goto out;
716 }
717
718 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
719 bt_ctf_field_put(pkt_ctx);
720 if (!cpu_field) {
721 pr_err("Failed to obtain cpu field\n");
722 goto out;
723 }
724
725 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
726 if (ret) {
727 pr_err("Failed to update CPU number\n");
728 goto out;
729 }
730
731 bt_ctf_field_put(cpu_field);
732
733 cs->cpu = cpu;
734 cs->stream = stream;
735 return cs;
736
737out:
738 if (cpu_field)
739 bt_ctf_field_put(cpu_field);
740 if (stream)
741 bt_ctf_stream_put(stream);
742
743 free(cs);
744 return NULL;
745}
746
747static void ctf_stream__delete(struct ctf_stream *cs)
748{
749 if (cs) {
750 bt_ctf_stream_put(cs->stream);
751 free(cs);
752 }
753}
754
755static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
756{
757 struct ctf_stream *cs = cw->stream[cpu];
758
759 if (!cs) {
760 cs = ctf_stream__create(cw, cpu);
761 cw->stream[cpu] = cs;
762 }
763
764 return cs;
765}
766
767static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
768 struct evsel *evsel)
769{
770 int cpu = 0;
771
772 if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
773 cpu = sample->cpu;
774
775 if (cpu > cw->stream_cnt) {
776 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
777 cpu, cw->stream_cnt);
778 cpu = 0;
779 }
780
781 return cpu;
782}
783
784#define STREAM_FLUSH_COUNT 100000
785
786/*
787 * Currently we have no other way to determine the
788 * time for the stream flush other than keep track
789 * of the number of events and check it against
790 * threshold.
791 */
792static bool is_flush_needed(struct ctf_stream *cs)
793{
794 return cs->count >= STREAM_FLUSH_COUNT;
795}
796
797static int process_sample_event(struct perf_tool *tool,
798 union perf_event *_event,
799 struct perf_sample *sample,
800 struct evsel *evsel,
801 struct machine *machine __maybe_unused)
802{
803 struct convert *c = container_of(tool, struct convert, tool);
804 struct evsel_priv *priv = evsel->priv;
805 struct ctf_writer *cw = &c->writer;
806 struct ctf_stream *cs;
807 struct bt_ctf_event_class *event_class;
808 struct bt_ctf_event *event;
809 int ret;
810 unsigned long type = evsel->core.attr.sample_type;
811
812 if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
813 return 0;
814
815 event_class = priv->event_class;
816
817 /* update stats */
818 c->events_count++;
819 c->events_size += _event->header.size;
820
821 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
822
823 event = bt_ctf_event_create(event_class);
824 if (!event) {
825 pr_err("Failed to create an CTF event\n");
826 return -1;
827 }
828
829 bt_ctf_clock_set_time(cw->clock, sample->time);
830
831 ret = add_generic_values(cw, event, evsel, sample);
832 if (ret)
833 return -1;
834
835 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
836 ret = add_tracepoint_values(cw, event_class, event,
837 evsel, sample);
838 if (ret)
839 return -1;
840 }
841
842 if (type & PERF_SAMPLE_CALLCHAIN) {
843 ret = add_callchain_output_values(event_class,
844 event, sample->callchain);
845 if (ret)
846 return -1;
847 }
848
849 if (evsel__is_bpf_output(evsel)) {
850 ret = add_bpf_output_values(event_class, event, sample);
851 if (ret)
852 return -1;
853 }
854
855 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
856 if (cs) {
857 if (is_flush_needed(cs))
858 ctf_stream__flush(cs);
859
860 cs->count++;
861 bt_ctf_stream_append_event(cs->stream, event);
862 }
863
864 bt_ctf_event_put(event);
865 return cs ? 0 : -1;
866}
867
868#define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
869do { \
870 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
871 if (ret) \
872 return -1; \
873} while(0)
874
875#define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
876static int process_##_name##_event(struct perf_tool *tool, \
877 union perf_event *_event, \
878 struct perf_sample *sample, \
879 struct machine *machine) \
880{ \
881 struct convert *c = container_of(tool, struct convert, tool);\
882 struct ctf_writer *cw = &c->writer; \
883 struct bt_ctf_event_class *event_class = cw->_name##_class;\
884 struct bt_ctf_event *event; \
885 struct ctf_stream *cs; \
886 int ret; \
887 \
888 c->non_sample_count++; \
889 c->events_size += _event->header.size; \
890 event = bt_ctf_event_create(event_class); \
891 if (!event) { \
892 pr_err("Failed to create an CTF event\n"); \
893 return -1; \
894 } \
895 \
896 bt_ctf_clock_set_time(cw->clock, sample->time); \
897 body \
898 cs = ctf_stream(cw, 0); \
899 if (cs) { \
900 if (is_flush_needed(cs)) \
901 ctf_stream__flush(cs); \
902 \
903 cs->count++; \
904 bt_ctf_stream_append_event(cs->stream, event); \
905 } \
906 bt_ctf_event_put(event); \
907 \
908 return perf_event__process_##_name(tool, _event, sample, machine);\
909}
910
911__FUNC_PROCESS_NON_SAMPLE(comm,
912 __NON_SAMPLE_SET_FIELD(comm, u32, pid);
913 __NON_SAMPLE_SET_FIELD(comm, u32, tid);
914 __NON_SAMPLE_SET_FIELD(comm, string, comm);
915)
916__FUNC_PROCESS_NON_SAMPLE(fork,
917 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
918 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
919 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
920 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
921 __NON_SAMPLE_SET_FIELD(fork, u64, time);
922)
923
924__FUNC_PROCESS_NON_SAMPLE(exit,
925 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
926 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
927 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
928 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
929 __NON_SAMPLE_SET_FIELD(fork, u64, time);
930)
931__FUNC_PROCESS_NON_SAMPLE(mmap,
932 __NON_SAMPLE_SET_FIELD(mmap, u32, pid);
933 __NON_SAMPLE_SET_FIELD(mmap, u32, tid);
934 __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
935 __NON_SAMPLE_SET_FIELD(mmap, string, filename);
936)
937__FUNC_PROCESS_NON_SAMPLE(mmap2,
938 __NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
939 __NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
940 __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
941 __NON_SAMPLE_SET_FIELD(mmap2, string, filename);
942)
943#undef __NON_SAMPLE_SET_FIELD
944#undef __FUNC_PROCESS_NON_SAMPLE
945
946/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
947static char *change_name(char *name, char *orig_name, int dup)
948{
949 char *new_name = NULL;
950 size_t len;
951
952 if (!name)
953 name = orig_name;
954
955 if (dup >= 10)
956 goto out;
957 /*
958 * Add '_' prefix to potential keywork. According to
959 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
960 * further CTF spec updating may require us to use '$'.
961 */
962 if (dup < 0)
963 len = strlen(name) + sizeof("_");
964 else
965 len = strlen(orig_name) + sizeof("_dupl_X");
966
967 new_name = malloc(len);
968 if (!new_name)
969 goto out;
970
971 if (dup < 0)
972 snprintf(new_name, len, "_%s", name);
973 else
974 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
975
976out:
977 if (name != orig_name)
978 free(name);
979 return new_name;
980}
981
982static int event_class_add_field(struct bt_ctf_event_class *event_class,
983 struct bt_ctf_field_type *type,
984 struct tep_format_field *field)
985{
986 struct bt_ctf_field_type *t = NULL;
987 char *name;
988 int dup = 1;
989 int ret;
990
991 /* alias was already assigned */
992 if (field->alias != field->name)
993 return bt_ctf_event_class_add_field(event_class, type,
994 (char *)field->alias);
995
996 name = field->name;
997
998 /* If 'name' is a keywork, add prefix. */
999 if (bt_ctf_validate_identifier(name))
1000 name = change_name(name, field->name, -1);
1001
1002 if (!name) {
1003 pr_err("Failed to fix invalid identifier.");
1004 return -1;
1005 }
1006 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1007 bt_ctf_field_type_put(t);
1008 name = change_name(name, field->name, dup++);
1009 if (!name) {
1010 pr_err("Failed to create dup name for '%s'\n", field->name);
1011 return -1;
1012 }
1013 }
1014
1015 ret = bt_ctf_event_class_add_field(event_class, type, name);
1016 if (!ret)
1017 field->alias = name;
1018
1019 return ret;
1020}
1021
1022static int add_tracepoint_fields_types(struct ctf_writer *cw,
1023 struct tep_format_field *fields,
1024 struct bt_ctf_event_class *event_class)
1025{
1026 struct tep_format_field *field;
1027 int ret;
1028
1029 for (field = fields; field; field = field->next) {
1030 struct bt_ctf_field_type *type;
1031 unsigned long flags = field->flags;
1032
1033 pr2(" field '%s'\n", field->name);
1034
1035 type = get_tracepoint_field_type(cw, field);
1036 if (!type)
1037 return -1;
1038
1039 /*
1040 * A string is an array of chars. For this we use the string
1041 * type and don't care that it is an array. What we don't
1042 * support is an array of strings.
1043 */
1044 if (flags & TEP_FIELD_IS_STRING)
1045 flags &= ~TEP_FIELD_IS_ARRAY;
1046
1047 if (flags & TEP_FIELD_IS_ARRAY)
1048 type = bt_ctf_field_type_array_create(type, field->arraylen);
1049
1050 ret = event_class_add_field(event_class, type, field);
1051
1052 if (flags & TEP_FIELD_IS_ARRAY)
1053 bt_ctf_field_type_put(type);
1054
1055 if (ret) {
1056 pr_err("Failed to add field '%s': %d\n",
1057 field->name, ret);
1058 return -1;
1059 }
1060 }
1061
1062 return 0;
1063}
1064
1065static int add_tracepoint_types(struct ctf_writer *cw,
1066 struct evsel *evsel,
1067 struct bt_ctf_event_class *class)
1068{
1069 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1070 struct tep_format_field *fields = evsel->tp_format->format.fields;
1071 int ret;
1072
1073 ret = add_tracepoint_fields_types(cw, common_fields, class);
1074 if (!ret)
1075 ret = add_tracepoint_fields_types(cw, fields, class);
1076
1077 return ret;
1078}
1079
1080static int add_bpf_output_types(struct ctf_writer *cw,
1081 struct bt_ctf_event_class *class)
1082{
1083 struct bt_ctf_field_type *len_type = cw->data.u32;
1084 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1085 struct bt_ctf_field_type *seq_type;
1086 int ret;
1087
1088 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1089 if (ret)
1090 return ret;
1091
1092 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1093 if (!seq_type)
1094 return -1;
1095
1096 return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1097}
1098
1099static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1100 struct bt_ctf_event_class *event_class)
1101{
1102 u64 type = evsel->core.attr.sample_type;
1103
1104 /*
1105 * missing:
1106 * PERF_SAMPLE_TIME - not needed as we have it in
1107 * ctf event header
1108 * PERF_SAMPLE_READ - TODO
1109 * PERF_SAMPLE_CALLCHAIN - TODO
1110 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
1111 * are handled separately
1112 * PERF_SAMPLE_BRANCH_STACK - TODO
1113 * PERF_SAMPLE_REGS_USER - TODO
1114 * PERF_SAMPLE_STACK_USER - TODO
1115 */
1116
1117#define ADD_FIELD(cl, t, n) \
1118 do { \
1119 pr2(" field '%s'\n", n); \
1120 if (bt_ctf_event_class_add_field(cl, t, n)) { \
1121 pr_err("Failed to add field '%s';\n", n); \
1122 return -1; \
1123 } \
1124 } while (0)
1125
1126 if (type & PERF_SAMPLE_IP)
1127 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1128
1129 if (type & PERF_SAMPLE_TID) {
1130 ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1131 ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1132 }
1133
1134 if ((type & PERF_SAMPLE_ID) ||
1135 (type & PERF_SAMPLE_IDENTIFIER))
1136 ADD_FIELD(event_class, cw->data.u64, "perf_id");
1137
1138 if (type & PERF_SAMPLE_STREAM_ID)
1139 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1140
1141 if (type & PERF_SAMPLE_PERIOD)
1142 ADD_FIELD(event_class, cw->data.u64, "perf_period");
1143
1144 if (type & PERF_SAMPLE_WEIGHT)
1145 ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1146
1147 if (type & PERF_SAMPLE_DATA_SRC)
1148 ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1149
1150 if (type & PERF_SAMPLE_TRANSACTION)
1151 ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1152
1153 if (type & PERF_SAMPLE_CALLCHAIN) {
1154 ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1155 ADD_FIELD(event_class,
1156 bt_ctf_field_type_sequence_create(
1157 cw->data.u64_hex, "perf_callchain_size"),
1158 "perf_callchain");
1159 }
1160
1161#undef ADD_FIELD
1162 return 0;
1163}
1164
1165static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1166{
1167 struct bt_ctf_event_class *event_class;
1168 struct evsel_priv *priv;
1169 const char *name = evsel__name(evsel);
1170 int ret;
1171
1172 pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1173
1174 event_class = bt_ctf_event_class_create(name);
1175 if (!event_class)
1176 return -1;
1177
1178 ret = add_generic_types(cw, evsel, event_class);
1179 if (ret)
1180 goto err;
1181
1182 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1183 ret = add_tracepoint_types(cw, evsel, event_class);
1184 if (ret)
1185 goto err;
1186 }
1187
1188 if (evsel__is_bpf_output(evsel)) {
1189 ret = add_bpf_output_types(cw, event_class);
1190 if (ret)
1191 goto err;
1192 }
1193
1194 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1195 if (ret) {
1196 pr("Failed to add event class into stream.\n");
1197 goto err;
1198 }
1199
1200 priv = malloc(sizeof(*priv));
1201 if (!priv)
1202 goto err;
1203
1204 priv->event_class = event_class;
1205 evsel->priv = priv;
1206 return 0;
1207
1208err:
1209 bt_ctf_event_class_put(event_class);
1210 pr_err("Failed to add event '%s'.\n", name);
1211 return -1;
1212}
1213
1214static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1215{
1216 struct evlist *evlist = session->evlist;
1217 struct evsel *evsel;
1218 int ret;
1219
1220 evlist__for_each_entry(evlist, evsel) {
1221 ret = add_event(cw, evsel);
1222 if (ret)
1223 return ret;
1224 }
1225 return 0;
1226}
1227
1228#define __NON_SAMPLE_ADD_FIELD(t, n) \
1229 do { \
1230 pr2(" field '%s'\n", #n); \
1231 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1232 pr_err("Failed to add field '%s';\n", #n);\
1233 return -1; \
1234 } \
1235 } while(0)
1236
1237#define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
1238static int add_##_name##_event(struct ctf_writer *cw) \
1239{ \
1240 struct bt_ctf_event_class *event_class; \
1241 int ret; \
1242 \
1243 pr("Adding "#_name" event\n"); \
1244 event_class = bt_ctf_event_class_create("perf_" #_name);\
1245 if (!event_class) \
1246 return -1; \
1247 body \
1248 \
1249 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1250 if (ret) { \
1251 pr("Failed to add event class '"#_name"' into stream.\n");\
1252 return ret; \
1253 } \
1254 \
1255 cw->_name##_class = event_class; \
1256 bt_ctf_event_class_put(event_class); \
1257 return 0; \
1258}
1259
1260__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1261 __NON_SAMPLE_ADD_FIELD(u32, pid);
1262 __NON_SAMPLE_ADD_FIELD(u32, tid);
1263 __NON_SAMPLE_ADD_FIELD(string, comm);
1264)
1265
1266__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1267 __NON_SAMPLE_ADD_FIELD(u32, pid);
1268 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1269 __NON_SAMPLE_ADD_FIELD(u32, tid);
1270 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1271 __NON_SAMPLE_ADD_FIELD(u64, time);
1272)
1273
1274__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1275 __NON_SAMPLE_ADD_FIELD(u32, pid);
1276 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1277 __NON_SAMPLE_ADD_FIELD(u32, tid);
1278 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1279 __NON_SAMPLE_ADD_FIELD(u64, time);
1280)
1281
1282__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1283 __NON_SAMPLE_ADD_FIELD(u32, pid);
1284 __NON_SAMPLE_ADD_FIELD(u32, tid);
1285 __NON_SAMPLE_ADD_FIELD(u64_hex, start);
1286 __NON_SAMPLE_ADD_FIELD(string, filename);
1287)
1288
1289__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1290 __NON_SAMPLE_ADD_FIELD(u32, pid);
1291 __NON_SAMPLE_ADD_FIELD(u32, tid);
1292 __NON_SAMPLE_ADD_FIELD(u64_hex, start);
1293 __NON_SAMPLE_ADD_FIELD(string, filename);
1294)
1295#undef __NON_SAMPLE_ADD_FIELD
1296#undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1297
1298static int setup_non_sample_events(struct ctf_writer *cw,
1299 struct perf_session *session __maybe_unused)
1300{
1301 int ret;
1302
1303 ret = add_comm_event(cw);
1304 if (ret)
1305 return ret;
1306 ret = add_exit_event(cw);
1307 if (ret)
1308 return ret;
1309 ret = add_fork_event(cw);
1310 if (ret)
1311 return ret;
1312 ret = add_mmap_event(cw);
1313 if (ret)
1314 return ret;
1315 ret = add_mmap2_event(cw);
1316 if (ret)
1317 return ret;
1318 return 0;
1319}
1320
1321static void cleanup_events(struct perf_session *session)
1322{
1323 struct evlist *evlist = session->evlist;
1324 struct evsel *evsel;
1325
1326 evlist__for_each_entry(evlist, evsel) {
1327 struct evsel_priv *priv;
1328
1329 priv = evsel->priv;
1330 bt_ctf_event_class_put(priv->event_class);
1331 zfree(&evsel->priv);
1332 }
1333
1334 evlist__delete(evlist);
1335 session->evlist = NULL;
1336}
1337
1338static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1339{
1340 struct ctf_stream **stream;
1341 struct perf_header *ph = &session->header;
1342 int ncpus;
1343
1344 /*
1345 * Try to get the number of cpus used in the data file,
1346 * if not present fallback to the MAX_CPUS.
1347 */
1348 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1349
1350 stream = zalloc(sizeof(*stream) * ncpus);
1351 if (!stream) {
1352 pr_err("Failed to allocate streams.\n");
1353 return -ENOMEM;
1354 }
1355
1356 cw->stream = stream;
1357 cw->stream_cnt = ncpus;
1358 return 0;
1359}
1360
1361static void free_streams(struct ctf_writer *cw)
1362{
1363 int cpu;
1364
1365 for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1366 ctf_stream__delete(cw->stream[cpu]);
1367
1368 zfree(&cw->stream);
1369}
1370
1371static int ctf_writer__setup_env(struct ctf_writer *cw,
1372 struct perf_session *session)
1373{
1374 struct perf_header *header = &session->header;
1375 struct bt_ctf_writer *writer = cw->writer;
1376
1377#define ADD(__n, __v) \
1378do { \
1379 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1380 return -1; \
1381} while (0)
1382
1383 ADD("host", header->env.hostname);
1384 ADD("sysname", "Linux");
1385 ADD("release", header->env.os_release);
1386 ADD("version", header->env.version);
1387 ADD("machine", header->env.arch);
1388 ADD("domain", "kernel");
1389 ADD("tracer_name", "perf");
1390
1391#undef ADD
1392 return 0;
1393}
1394
1395static int ctf_writer__setup_clock(struct ctf_writer *cw,
1396 struct perf_session *session,
1397 bool tod)
1398{
1399 struct bt_ctf_clock *clock = cw->clock;
1400 const char *desc = "perf clock";
1401 int64_t offset = 0;
1402
1403 if (tod) {
1404 struct perf_env *env = &session->header.env;
1405
1406 if (!env->clock.enabled) {
1407 pr_err("Can't provide --tod time, missing clock data. "
1408 "Please record with -k/--clockid option.\n");
1409 return -1;
1410 }
1411
1412 desc = clockid_name(env->clock.clockid);
1413 offset = env->clock.tod_ns - env->clock.clockid_ns;
1414 }
1415
1416#define SET(__n, __v) \
1417do { \
1418 if (bt_ctf_clock_set_##__n(clock, __v)) \
1419 return -1; \
1420} while (0)
1421
1422 SET(frequency, 1000000000);
1423 SET(offset, offset);
1424 SET(description, desc);
1425 SET(precision, 10);
1426 SET(is_absolute, 0);
1427
1428#undef SET
1429 return 0;
1430}
1431
1432static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1433{
1434 struct bt_ctf_field_type *type;
1435
1436 type = bt_ctf_field_type_integer_create(size);
1437 if (!type)
1438 return NULL;
1439
1440 if (sign &&
1441 bt_ctf_field_type_integer_set_signed(type, 1))
1442 goto err;
1443
1444 if (hex &&
1445 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1446 goto err;
1447
1448#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1449 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1450#else
1451 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1452#endif
1453
1454 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1455 size, sign ? "un" : "", hex ? "hex" : "");
1456 return type;
1457
1458err:
1459 bt_ctf_field_type_put(type);
1460 return NULL;
1461}
1462
1463static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1464{
1465 unsigned int i;
1466
1467 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1468 bt_ctf_field_type_put(cw->data.array[i]);
1469}
1470
1471static int ctf_writer__init_data(struct ctf_writer *cw)
1472{
1473#define CREATE_INT_TYPE(type, size, sign, hex) \
1474do { \
1475 (type) = create_int_type(size, sign, hex); \
1476 if (!(type)) \
1477 goto err; \
1478} while (0)
1479
1480 CREATE_INT_TYPE(cw->data.s64, 64, true, false);
1481 CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1482 CREATE_INT_TYPE(cw->data.s32, 32, true, false);
1483 CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1484 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1485 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1486
1487 cw->data.string = bt_ctf_field_type_string_create();
1488 if (cw->data.string)
1489 return 0;
1490
1491err:
1492 ctf_writer__cleanup_data(cw);
1493 pr_err("Failed to create data types.\n");
1494 return -1;
1495}
1496
1497static void ctf_writer__cleanup(struct ctf_writer *cw)
1498{
1499 ctf_writer__cleanup_data(cw);
1500
1501 bt_ctf_clock_put(cw->clock);
1502 free_streams(cw);
1503 bt_ctf_stream_class_put(cw->stream_class);
1504 bt_ctf_writer_put(cw->writer);
1505
1506 /* and NULL all the pointers */
1507 memset(cw, 0, sizeof(*cw));
1508}
1509
1510static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1511 struct perf_session *session, bool tod)
1512{
1513 struct bt_ctf_writer *writer;
1514 struct bt_ctf_stream_class *stream_class;
1515 struct bt_ctf_clock *clock;
1516 struct bt_ctf_field_type *pkt_ctx_type;
1517 int ret;
1518
1519 /* CTF writer */
1520 writer = bt_ctf_writer_create(path);
1521 if (!writer)
1522 goto err;
1523
1524 cw->writer = writer;
1525
1526 /* CTF clock */
1527 clock = bt_ctf_clock_create("perf_clock");
1528 if (!clock) {
1529 pr("Failed to create CTF clock.\n");
1530 goto err_cleanup;
1531 }
1532
1533 cw->clock = clock;
1534
1535 if (ctf_writer__setup_clock(cw, session, tod)) {
1536 pr("Failed to setup CTF clock.\n");
1537 goto err_cleanup;
1538 }
1539
1540 /* CTF stream class */
1541 stream_class = bt_ctf_stream_class_create("perf_stream");
1542 if (!stream_class) {
1543 pr("Failed to create CTF stream class.\n");
1544 goto err_cleanup;
1545 }
1546
1547 cw->stream_class = stream_class;
1548
1549 /* CTF clock stream setup */
1550 if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1551 pr("Failed to assign CTF clock to stream class.\n");
1552 goto err_cleanup;
1553 }
1554
1555 if (ctf_writer__init_data(cw))
1556 goto err_cleanup;
1557
1558 /* Add cpu_id for packet context */
1559 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1560 if (!pkt_ctx_type)
1561 goto err_cleanup;
1562
1563 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1564 bt_ctf_field_type_put(pkt_ctx_type);
1565 if (ret)
1566 goto err_cleanup;
1567
1568 /* CTF clock writer setup */
1569 if (bt_ctf_writer_add_clock(writer, clock)) {
1570 pr("Failed to assign CTF clock to writer.\n");
1571 goto err_cleanup;
1572 }
1573
1574 return 0;
1575
1576err_cleanup:
1577 ctf_writer__cleanup(cw);
1578err:
1579 pr_err("Failed to setup CTF writer.\n");
1580 return -1;
1581}
1582
1583static int ctf_writer__flush_streams(struct ctf_writer *cw)
1584{
1585 int cpu, ret = 0;
1586
1587 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1588 ret = ctf_stream__flush(cw->stream[cpu]);
1589
1590 return ret;
1591}
1592
1593static int convert__config(const char *var, const char *value, void *cb)
1594{
1595 struct convert *c = cb;
1596
1597 if (!strcmp(var, "convert.queue-size"))
1598 return perf_config_u64(&c->queue_size, var, value);
1599
1600 return 0;
1601}
1602
1603int bt_convert__perf2ctf(const char *input, const char *path,
1604 struct perf_data_convert_opts *opts)
1605{
1606 struct perf_session *session;
1607 struct perf_data data = {
1608 .path = input,
1609 .mode = PERF_DATA_MODE_READ,
1610 .force = opts->force,
1611 };
1612 struct convert c = {
1613 .tool = {
1614 .sample = process_sample_event,
1615 .mmap = perf_event__process_mmap,
1616 .mmap2 = perf_event__process_mmap2,
1617 .comm = perf_event__process_comm,
1618 .exit = perf_event__process_exit,
1619 .fork = perf_event__process_fork,
1620 .lost = perf_event__process_lost,
1621 .tracing_data = perf_event__process_tracing_data,
1622 .build_id = perf_event__process_build_id,
1623 .namespaces = perf_event__process_namespaces,
1624 .ordered_events = true,
1625 .ordering_requires_timestamps = true,
1626 },
1627 };
1628 struct ctf_writer *cw = &c.writer;
1629 int err;
1630
1631 if (opts->all) {
1632 c.tool.comm = process_comm_event;
1633 c.tool.exit = process_exit_event;
1634 c.tool.fork = process_fork_event;
1635 c.tool.mmap = process_mmap_event;
1636 c.tool.mmap2 = process_mmap2_event;
1637 }
1638
1639 err = perf_config(convert__config, &c);
1640 if (err)
1641 return err;
1642
1643 err = -1;
1644 /* perf.data session */
1645 session = perf_session__new(&data, &c.tool);
1646 if (IS_ERR(session))
1647 return PTR_ERR(session);
1648
1649 /* CTF writer */
1650 if (ctf_writer__init(cw, path, session, opts->tod))
1651 goto free_session;
1652
1653 if (c.queue_size) {
1654 ordered_events__set_alloc_size(&session->ordered_events,
1655 c.queue_size);
1656 }
1657
1658 /* CTF writer env/clock setup */
1659 if (ctf_writer__setup_env(cw, session))
1660 goto free_writer;
1661
1662 /* CTF events setup */
1663 if (setup_events(cw, session))
1664 goto free_writer;
1665
1666 if (opts->all && setup_non_sample_events(cw, session))
1667 goto free_writer;
1668
1669 if (setup_streams(cw, session))
1670 goto free_writer;
1671
1672 err = perf_session__process_events(session);
1673 if (!err)
1674 err = ctf_writer__flush_streams(cw);
1675 else
1676 pr_err("Error during conversion.\n");
1677
1678 fprintf(stderr,
1679 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1680 data.path, path);
1681
1682 fprintf(stderr,
1683 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1684 (double) c.events_size / 1024.0 / 1024.0,
1685 c.events_count);
1686
1687 if (!c.non_sample_count)
1688 fprintf(stderr, ") ]\n");
1689 else
1690 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1691
1692 cleanup_events(session);
1693 perf_session__delete(session);
1694 ctf_writer__cleanup(cw);
1695
1696 return err;
1697
1698free_writer:
1699 ctf_writer__cleanup(cw);
1700free_session:
1701 perf_session__delete(session);
1702 pr_err("Error during conversion setup.\n");
1703 return err;
1704}
1/*
2 * CTF writing support via babeltrace.
3 *
4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
10#include <linux/compiler.h>
11#include <babeltrace/ctf-writer/writer.h>
12#include <babeltrace/ctf-writer/clock.h>
13#include <babeltrace/ctf-writer/stream.h>
14#include <babeltrace/ctf-writer/event.h>
15#include <babeltrace/ctf-writer/event-types.h>
16#include <babeltrace/ctf-writer/event-fields.h>
17#include <babeltrace/ctf-ir/utils.h>
18#include <babeltrace/ctf/events.h>
19#include <traceevent/event-parse.h>
20#include "asm/bug.h"
21#include "data-convert-bt.h"
22#include "session.h"
23#include "util.h"
24#include "debug.h"
25#include "tool.h"
26#include "evlist.h"
27#include "evsel.h"
28#include "machine.h"
29
30#define pr_N(n, fmt, ...) \
31 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
32
33#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
34#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
35
36#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
37
38struct evsel_priv {
39 struct bt_ctf_event_class *event_class;
40};
41
42#define MAX_CPUS 4096
43
44struct ctf_stream {
45 struct bt_ctf_stream *stream;
46 int cpu;
47 u32 count;
48};
49
50struct ctf_writer {
51 /* writer primitives */
52 struct bt_ctf_writer *writer;
53 struct ctf_stream **stream;
54 int stream_cnt;
55 struct bt_ctf_stream_class *stream_class;
56 struct bt_ctf_clock *clock;
57
58 /* data types */
59 union {
60 struct {
61 struct bt_ctf_field_type *s64;
62 struct bt_ctf_field_type *u64;
63 struct bt_ctf_field_type *s32;
64 struct bt_ctf_field_type *u32;
65 struct bt_ctf_field_type *string;
66 struct bt_ctf_field_type *u32_hex;
67 struct bt_ctf_field_type *u64_hex;
68 };
69 struct bt_ctf_field_type *array[6];
70 } data;
71};
72
73struct convert {
74 struct perf_tool tool;
75 struct ctf_writer writer;
76
77 u64 events_size;
78 u64 events_count;
79
80 /* Ordered events configured queue size. */
81 u64 queue_size;
82};
83
84static int value_set(struct bt_ctf_field_type *type,
85 struct bt_ctf_event *event,
86 const char *name, u64 val)
87{
88 struct bt_ctf_field *field;
89 bool sign = bt_ctf_field_type_integer_get_signed(type);
90 int ret;
91
92 field = bt_ctf_field_create(type);
93 if (!field) {
94 pr_err("failed to create a field %s\n", name);
95 return -1;
96 }
97
98 if (sign) {
99 ret = bt_ctf_field_signed_integer_set_value(field, val);
100 if (ret) {
101 pr_err("failed to set field value %s\n", name);
102 goto err;
103 }
104 } else {
105 ret = bt_ctf_field_unsigned_integer_set_value(field, val);
106 if (ret) {
107 pr_err("failed to set field value %s\n", name);
108 goto err;
109 }
110 }
111
112 ret = bt_ctf_event_set_payload(event, name, field);
113 if (ret) {
114 pr_err("failed to set payload %s\n", name);
115 goto err;
116 }
117
118 pr2(" SET [%s = %" PRIu64 "]\n", name, val);
119
120err:
121 bt_ctf_field_put(field);
122 return ret;
123}
124
125#define __FUNC_VALUE_SET(_name, _val_type) \
126static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
127 struct bt_ctf_event *event, \
128 const char *name, \
129 _val_type val) \
130{ \
131 struct bt_ctf_field_type *type = cw->data._name; \
132 return value_set(type, event, name, (u64) val); \
133}
134
135#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
136
137FUNC_VALUE_SET(s32)
138FUNC_VALUE_SET(u32)
139FUNC_VALUE_SET(s64)
140FUNC_VALUE_SET(u64)
141__FUNC_VALUE_SET(u64_hex, u64)
142
143static struct bt_ctf_field_type*
144get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
145{
146 unsigned long flags = field->flags;
147
148 if (flags & FIELD_IS_STRING)
149 return cw->data.string;
150
151 if (!(flags & FIELD_IS_SIGNED)) {
152 /* unsigned long are mostly pointers */
153 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
154 return cw->data.u64_hex;
155 }
156
157 if (flags & FIELD_IS_SIGNED) {
158 if (field->size == 8)
159 return cw->data.s64;
160 else
161 return cw->data.s32;
162 }
163
164 if (field->size == 8)
165 return cw->data.u64;
166 else
167 return cw->data.u32;
168}
169
170static unsigned long long adjust_signedness(unsigned long long value_int, int size)
171{
172 unsigned long long value_mask;
173
174 /*
175 * value_mask = (1 << (size * 8 - 1)) - 1.
176 * Directly set value_mask for code readers.
177 */
178 switch (size) {
179 case 1:
180 value_mask = 0x7fULL;
181 break;
182 case 2:
183 value_mask = 0x7fffULL;
184 break;
185 case 4:
186 value_mask = 0x7fffffffULL;
187 break;
188 case 8:
189 /*
190 * For 64 bit value, return it self. There is no need
191 * to fill high bit.
192 */
193 /* Fall through */
194 default:
195 /* BUG! */
196 return value_int;
197 }
198
199 /* If it is a positive value, don't adjust. */
200 if ((value_int & (~0ULL - value_mask)) == 0)
201 return value_int;
202
203 /* Fill upper part of value_int with 1 to make it a negative long long. */
204 return (value_int & value_mask) | ~value_mask;
205}
206
207static int add_tracepoint_field_value(struct ctf_writer *cw,
208 struct bt_ctf_event_class *event_class,
209 struct bt_ctf_event *event,
210 struct perf_sample *sample,
211 struct format_field *fmtf)
212{
213 struct bt_ctf_field_type *type;
214 struct bt_ctf_field *array_field;
215 struct bt_ctf_field *field;
216 const char *name = fmtf->name;
217 void *data = sample->raw_data;
218 unsigned long flags = fmtf->flags;
219 unsigned int n_items;
220 unsigned int i;
221 unsigned int offset;
222 unsigned int len;
223 int ret;
224
225 name = fmtf->alias;
226 offset = fmtf->offset;
227 len = fmtf->size;
228 if (flags & FIELD_IS_STRING)
229 flags &= ~FIELD_IS_ARRAY;
230
231 if (flags & FIELD_IS_DYNAMIC) {
232 unsigned long long tmp_val;
233
234 tmp_val = pevent_read_number(fmtf->event->pevent,
235 data + offset, len);
236 offset = tmp_val;
237 len = offset >> 16;
238 offset &= 0xffff;
239 }
240
241 if (flags & FIELD_IS_ARRAY) {
242
243 type = bt_ctf_event_class_get_field_by_name(
244 event_class, name);
245 array_field = bt_ctf_field_create(type);
246 bt_ctf_field_type_put(type);
247 if (!array_field) {
248 pr_err("Failed to create array type %s\n", name);
249 return -1;
250 }
251
252 len = fmtf->size / fmtf->arraylen;
253 n_items = fmtf->arraylen;
254 } else {
255 n_items = 1;
256 array_field = NULL;
257 }
258
259 type = get_tracepoint_field_type(cw, fmtf);
260
261 for (i = 0; i < n_items; i++) {
262 if (flags & FIELD_IS_ARRAY)
263 field = bt_ctf_field_array_get_field(array_field, i);
264 else
265 field = bt_ctf_field_create(type);
266
267 if (!field) {
268 pr_err("failed to create a field %s\n", name);
269 return -1;
270 }
271
272 if (flags & FIELD_IS_STRING)
273 ret = bt_ctf_field_string_set_value(field,
274 data + offset + i * len);
275 else {
276 unsigned long long value_int;
277
278 value_int = pevent_read_number(
279 fmtf->event->pevent,
280 data + offset + i * len, len);
281
282 if (!(flags & FIELD_IS_SIGNED))
283 ret = bt_ctf_field_unsigned_integer_set_value(
284 field, value_int);
285 else
286 ret = bt_ctf_field_signed_integer_set_value(
287 field, adjust_signedness(value_int, len));
288 }
289
290 if (ret) {
291 pr_err("failed to set file value %s\n", name);
292 goto err_put_field;
293 }
294 if (!(flags & FIELD_IS_ARRAY)) {
295 ret = bt_ctf_event_set_payload(event, name, field);
296 if (ret) {
297 pr_err("failed to set payload %s\n", name);
298 goto err_put_field;
299 }
300 }
301 bt_ctf_field_put(field);
302 }
303 if (flags & FIELD_IS_ARRAY) {
304 ret = bt_ctf_event_set_payload(event, name, array_field);
305 if (ret) {
306 pr_err("Failed add payload array %s\n", name);
307 return -1;
308 }
309 bt_ctf_field_put(array_field);
310 }
311 return 0;
312
313err_put_field:
314 bt_ctf_field_put(field);
315 return -1;
316}
317
318static int add_tracepoint_fields_values(struct ctf_writer *cw,
319 struct bt_ctf_event_class *event_class,
320 struct bt_ctf_event *event,
321 struct format_field *fields,
322 struct perf_sample *sample)
323{
324 struct format_field *field;
325 int ret;
326
327 for (field = fields; field; field = field->next) {
328 ret = add_tracepoint_field_value(cw, event_class, event, sample,
329 field);
330 if (ret)
331 return -1;
332 }
333 return 0;
334}
335
336static int add_tracepoint_values(struct ctf_writer *cw,
337 struct bt_ctf_event_class *event_class,
338 struct bt_ctf_event *event,
339 struct perf_evsel *evsel,
340 struct perf_sample *sample)
341{
342 struct format_field *common_fields = evsel->tp_format->format.common_fields;
343 struct format_field *fields = evsel->tp_format->format.fields;
344 int ret;
345
346 ret = add_tracepoint_fields_values(cw, event_class, event,
347 common_fields, sample);
348 if (!ret)
349 ret = add_tracepoint_fields_values(cw, event_class, event,
350 fields, sample);
351
352 return ret;
353}
354
355static int
356add_bpf_output_values(struct bt_ctf_event_class *event_class,
357 struct bt_ctf_event *event,
358 struct perf_sample *sample)
359{
360 struct bt_ctf_field_type *len_type, *seq_type;
361 struct bt_ctf_field *len_field, *seq_field;
362 unsigned int raw_size = sample->raw_size;
363 unsigned int nr_elements = raw_size / sizeof(u32);
364 unsigned int i;
365 int ret;
366
367 if (nr_elements * sizeof(u32) != raw_size)
368 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
369 raw_size, nr_elements * sizeof(u32) - raw_size);
370
371 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
372 len_field = bt_ctf_field_create(len_type);
373 if (!len_field) {
374 pr_err("failed to create 'raw_len' for bpf output event\n");
375 ret = -1;
376 goto put_len_type;
377 }
378
379 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
380 if (ret) {
381 pr_err("failed to set field value for raw_len\n");
382 goto put_len_field;
383 }
384 ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
385 if (ret) {
386 pr_err("failed to set payload to raw_len\n");
387 goto put_len_field;
388 }
389
390 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
391 seq_field = bt_ctf_field_create(seq_type);
392 if (!seq_field) {
393 pr_err("failed to create 'raw_data' for bpf output event\n");
394 ret = -1;
395 goto put_seq_type;
396 }
397
398 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
399 if (ret) {
400 pr_err("failed to set length of 'raw_data'\n");
401 goto put_seq_field;
402 }
403
404 for (i = 0; i < nr_elements; i++) {
405 struct bt_ctf_field *elem_field =
406 bt_ctf_field_sequence_get_field(seq_field, i);
407
408 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
409 ((u32 *)(sample->raw_data))[i]);
410
411 bt_ctf_field_put(elem_field);
412 if (ret) {
413 pr_err("failed to set raw_data[%d]\n", i);
414 goto put_seq_field;
415 }
416 }
417
418 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
419 if (ret)
420 pr_err("failed to set payload for raw_data\n");
421
422put_seq_field:
423 bt_ctf_field_put(seq_field);
424put_seq_type:
425 bt_ctf_field_type_put(seq_type);
426put_len_field:
427 bt_ctf_field_put(len_field);
428put_len_type:
429 bt_ctf_field_type_put(len_type);
430 return ret;
431}
432
433static int add_generic_values(struct ctf_writer *cw,
434 struct bt_ctf_event *event,
435 struct perf_evsel *evsel,
436 struct perf_sample *sample)
437{
438 u64 type = evsel->attr.sample_type;
439 int ret;
440
441 /*
442 * missing:
443 * PERF_SAMPLE_TIME - not needed as we have it in
444 * ctf event header
445 * PERF_SAMPLE_READ - TODO
446 * PERF_SAMPLE_CALLCHAIN - TODO
447 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
448 * PERF_SAMPLE_BRANCH_STACK - TODO
449 * PERF_SAMPLE_REGS_USER - TODO
450 * PERF_SAMPLE_STACK_USER - TODO
451 */
452
453 if (type & PERF_SAMPLE_IP) {
454 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
455 if (ret)
456 return -1;
457 }
458
459 if (type & PERF_SAMPLE_TID) {
460 ret = value_set_s32(cw, event, "perf_tid", sample->tid);
461 if (ret)
462 return -1;
463
464 ret = value_set_s32(cw, event, "perf_pid", sample->pid);
465 if (ret)
466 return -1;
467 }
468
469 if ((type & PERF_SAMPLE_ID) ||
470 (type & PERF_SAMPLE_IDENTIFIER)) {
471 ret = value_set_u64(cw, event, "perf_id", sample->id);
472 if (ret)
473 return -1;
474 }
475
476 if (type & PERF_SAMPLE_STREAM_ID) {
477 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
478 if (ret)
479 return -1;
480 }
481
482 if (type & PERF_SAMPLE_PERIOD) {
483 ret = value_set_u64(cw, event, "perf_period", sample->period);
484 if (ret)
485 return -1;
486 }
487
488 if (type & PERF_SAMPLE_WEIGHT) {
489 ret = value_set_u64(cw, event, "perf_weight", sample->weight);
490 if (ret)
491 return -1;
492 }
493
494 if (type & PERF_SAMPLE_DATA_SRC) {
495 ret = value_set_u64(cw, event, "perf_data_src",
496 sample->data_src);
497 if (ret)
498 return -1;
499 }
500
501 if (type & PERF_SAMPLE_TRANSACTION) {
502 ret = value_set_u64(cw, event, "perf_transaction",
503 sample->transaction);
504 if (ret)
505 return -1;
506 }
507
508 return 0;
509}
510
511static int ctf_stream__flush(struct ctf_stream *cs)
512{
513 int err = 0;
514
515 if (cs) {
516 err = bt_ctf_stream_flush(cs->stream);
517 if (err)
518 pr_err("CTF stream %d flush failed\n", cs->cpu);
519
520 pr("Flush stream for cpu %d (%u samples)\n",
521 cs->cpu, cs->count);
522
523 cs->count = 0;
524 }
525
526 return err;
527}
528
529static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
530{
531 struct ctf_stream *cs;
532 struct bt_ctf_field *pkt_ctx = NULL;
533 struct bt_ctf_field *cpu_field = NULL;
534 struct bt_ctf_stream *stream = NULL;
535 int ret;
536
537 cs = zalloc(sizeof(*cs));
538 if (!cs) {
539 pr_err("Failed to allocate ctf stream\n");
540 return NULL;
541 }
542
543 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
544 if (!stream) {
545 pr_err("Failed to create CTF stream\n");
546 goto out;
547 }
548
549 pkt_ctx = bt_ctf_stream_get_packet_context(stream);
550 if (!pkt_ctx) {
551 pr_err("Failed to obtain packet context\n");
552 goto out;
553 }
554
555 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
556 bt_ctf_field_put(pkt_ctx);
557 if (!cpu_field) {
558 pr_err("Failed to obtain cpu field\n");
559 goto out;
560 }
561
562 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
563 if (ret) {
564 pr_err("Failed to update CPU number\n");
565 goto out;
566 }
567
568 bt_ctf_field_put(cpu_field);
569
570 cs->cpu = cpu;
571 cs->stream = stream;
572 return cs;
573
574out:
575 if (cpu_field)
576 bt_ctf_field_put(cpu_field);
577 if (stream)
578 bt_ctf_stream_put(stream);
579
580 free(cs);
581 return NULL;
582}
583
584static void ctf_stream__delete(struct ctf_stream *cs)
585{
586 if (cs) {
587 bt_ctf_stream_put(cs->stream);
588 free(cs);
589 }
590}
591
592static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
593{
594 struct ctf_stream *cs = cw->stream[cpu];
595
596 if (!cs) {
597 cs = ctf_stream__create(cw, cpu);
598 cw->stream[cpu] = cs;
599 }
600
601 return cs;
602}
603
604static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
605 struct perf_evsel *evsel)
606{
607 int cpu = 0;
608
609 if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
610 cpu = sample->cpu;
611
612 if (cpu > cw->stream_cnt) {
613 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
614 cpu, cw->stream_cnt);
615 cpu = 0;
616 }
617
618 return cpu;
619}
620
621#define STREAM_FLUSH_COUNT 100000
622
623/*
624 * Currently we have no other way to determine the
625 * time for the stream flush other than keep track
626 * of the number of events and check it against
627 * threshold.
628 */
629static bool is_flush_needed(struct ctf_stream *cs)
630{
631 return cs->count >= STREAM_FLUSH_COUNT;
632}
633
634static int process_sample_event(struct perf_tool *tool,
635 union perf_event *_event,
636 struct perf_sample *sample,
637 struct perf_evsel *evsel,
638 struct machine *machine __maybe_unused)
639{
640 struct convert *c = container_of(tool, struct convert, tool);
641 struct evsel_priv *priv = evsel->priv;
642 struct ctf_writer *cw = &c->writer;
643 struct ctf_stream *cs;
644 struct bt_ctf_event_class *event_class;
645 struct bt_ctf_event *event;
646 int ret;
647
648 if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
649 return 0;
650
651 event_class = priv->event_class;
652
653 /* update stats */
654 c->events_count++;
655 c->events_size += _event->header.size;
656
657 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
658
659 event = bt_ctf_event_create(event_class);
660 if (!event) {
661 pr_err("Failed to create an CTF event\n");
662 return -1;
663 }
664
665 bt_ctf_clock_set_time(cw->clock, sample->time);
666
667 ret = add_generic_values(cw, event, evsel, sample);
668 if (ret)
669 return -1;
670
671 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
672 ret = add_tracepoint_values(cw, event_class, event,
673 evsel, sample);
674 if (ret)
675 return -1;
676 }
677
678 if (perf_evsel__is_bpf_output(evsel)) {
679 ret = add_bpf_output_values(event_class, event, sample);
680 if (ret)
681 return -1;
682 }
683
684 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
685 if (cs) {
686 if (is_flush_needed(cs))
687 ctf_stream__flush(cs);
688
689 cs->count++;
690 bt_ctf_stream_append_event(cs->stream, event);
691 }
692
693 bt_ctf_event_put(event);
694 return cs ? 0 : -1;
695}
696
697/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
698static char *change_name(char *name, char *orig_name, int dup)
699{
700 char *new_name = NULL;
701 size_t len;
702
703 if (!name)
704 name = orig_name;
705
706 if (dup >= 10)
707 goto out;
708 /*
709 * Add '_' prefix to potential keywork. According to
710 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
711 * futher CTF spec updating may require us to use '$'.
712 */
713 if (dup < 0)
714 len = strlen(name) + sizeof("_");
715 else
716 len = strlen(orig_name) + sizeof("_dupl_X");
717
718 new_name = malloc(len);
719 if (!new_name)
720 goto out;
721
722 if (dup < 0)
723 snprintf(new_name, len, "_%s", name);
724 else
725 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
726
727out:
728 if (name != orig_name)
729 free(name);
730 return new_name;
731}
732
733static int event_class_add_field(struct bt_ctf_event_class *event_class,
734 struct bt_ctf_field_type *type,
735 struct format_field *field)
736{
737 struct bt_ctf_field_type *t = NULL;
738 char *name;
739 int dup = 1;
740 int ret;
741
742 /* alias was already assigned */
743 if (field->alias != field->name)
744 return bt_ctf_event_class_add_field(event_class, type,
745 (char *)field->alias);
746
747 name = field->name;
748
749 /* If 'name' is a keywork, add prefix. */
750 if (bt_ctf_validate_identifier(name))
751 name = change_name(name, field->name, -1);
752
753 if (!name) {
754 pr_err("Failed to fix invalid identifier.");
755 return -1;
756 }
757 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
758 bt_ctf_field_type_put(t);
759 name = change_name(name, field->name, dup++);
760 if (!name) {
761 pr_err("Failed to create dup name for '%s'\n", field->name);
762 return -1;
763 }
764 }
765
766 ret = bt_ctf_event_class_add_field(event_class, type, name);
767 if (!ret)
768 field->alias = name;
769
770 return ret;
771}
772
773static int add_tracepoint_fields_types(struct ctf_writer *cw,
774 struct format_field *fields,
775 struct bt_ctf_event_class *event_class)
776{
777 struct format_field *field;
778 int ret;
779
780 for (field = fields; field; field = field->next) {
781 struct bt_ctf_field_type *type;
782 unsigned long flags = field->flags;
783
784 pr2(" field '%s'\n", field->name);
785
786 type = get_tracepoint_field_type(cw, field);
787 if (!type)
788 return -1;
789
790 /*
791 * A string is an array of chars. For this we use the string
792 * type and don't care that it is an array. What we don't
793 * support is an array of strings.
794 */
795 if (flags & FIELD_IS_STRING)
796 flags &= ~FIELD_IS_ARRAY;
797
798 if (flags & FIELD_IS_ARRAY)
799 type = bt_ctf_field_type_array_create(type, field->arraylen);
800
801 ret = event_class_add_field(event_class, type, field);
802
803 if (flags & FIELD_IS_ARRAY)
804 bt_ctf_field_type_put(type);
805
806 if (ret) {
807 pr_err("Failed to add field '%s': %d\n",
808 field->name, ret);
809 return -1;
810 }
811 }
812
813 return 0;
814}
815
816static int add_tracepoint_types(struct ctf_writer *cw,
817 struct perf_evsel *evsel,
818 struct bt_ctf_event_class *class)
819{
820 struct format_field *common_fields = evsel->tp_format->format.common_fields;
821 struct format_field *fields = evsel->tp_format->format.fields;
822 int ret;
823
824 ret = add_tracepoint_fields_types(cw, common_fields, class);
825 if (!ret)
826 ret = add_tracepoint_fields_types(cw, fields, class);
827
828 return ret;
829}
830
831static int add_bpf_output_types(struct ctf_writer *cw,
832 struct bt_ctf_event_class *class)
833{
834 struct bt_ctf_field_type *len_type = cw->data.u32;
835 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
836 struct bt_ctf_field_type *seq_type;
837 int ret;
838
839 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
840 if (ret)
841 return ret;
842
843 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
844 if (!seq_type)
845 return -1;
846
847 return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
848}
849
850static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
851 struct bt_ctf_event_class *event_class)
852{
853 u64 type = evsel->attr.sample_type;
854
855 /*
856 * missing:
857 * PERF_SAMPLE_TIME - not needed as we have it in
858 * ctf event header
859 * PERF_SAMPLE_READ - TODO
860 * PERF_SAMPLE_CALLCHAIN - TODO
861 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
862 * are handled separately
863 * PERF_SAMPLE_BRANCH_STACK - TODO
864 * PERF_SAMPLE_REGS_USER - TODO
865 * PERF_SAMPLE_STACK_USER - TODO
866 */
867
868#define ADD_FIELD(cl, t, n) \
869 do { \
870 pr2(" field '%s'\n", n); \
871 if (bt_ctf_event_class_add_field(cl, t, n)) { \
872 pr_err("Failed to add field '%s';\n", n); \
873 return -1; \
874 } \
875 } while (0)
876
877 if (type & PERF_SAMPLE_IP)
878 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
879
880 if (type & PERF_SAMPLE_TID) {
881 ADD_FIELD(event_class, cw->data.s32, "perf_tid");
882 ADD_FIELD(event_class, cw->data.s32, "perf_pid");
883 }
884
885 if ((type & PERF_SAMPLE_ID) ||
886 (type & PERF_SAMPLE_IDENTIFIER))
887 ADD_FIELD(event_class, cw->data.u64, "perf_id");
888
889 if (type & PERF_SAMPLE_STREAM_ID)
890 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
891
892 if (type & PERF_SAMPLE_PERIOD)
893 ADD_FIELD(event_class, cw->data.u64, "perf_period");
894
895 if (type & PERF_SAMPLE_WEIGHT)
896 ADD_FIELD(event_class, cw->data.u64, "perf_weight");
897
898 if (type & PERF_SAMPLE_DATA_SRC)
899 ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
900
901 if (type & PERF_SAMPLE_TRANSACTION)
902 ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
903
904#undef ADD_FIELD
905 return 0;
906}
907
908static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
909{
910 struct bt_ctf_event_class *event_class;
911 struct evsel_priv *priv;
912 const char *name = perf_evsel__name(evsel);
913 int ret;
914
915 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
916
917 event_class = bt_ctf_event_class_create(name);
918 if (!event_class)
919 return -1;
920
921 ret = add_generic_types(cw, evsel, event_class);
922 if (ret)
923 goto err;
924
925 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
926 ret = add_tracepoint_types(cw, evsel, event_class);
927 if (ret)
928 goto err;
929 }
930
931 if (perf_evsel__is_bpf_output(evsel)) {
932 ret = add_bpf_output_types(cw, event_class);
933 if (ret)
934 goto err;
935 }
936
937 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
938 if (ret) {
939 pr("Failed to add event class into stream.\n");
940 goto err;
941 }
942
943 priv = malloc(sizeof(*priv));
944 if (!priv)
945 goto err;
946
947 priv->event_class = event_class;
948 evsel->priv = priv;
949 return 0;
950
951err:
952 bt_ctf_event_class_put(event_class);
953 pr_err("Failed to add event '%s'.\n", name);
954 return -1;
955}
956
957static int setup_events(struct ctf_writer *cw, struct perf_session *session)
958{
959 struct perf_evlist *evlist = session->evlist;
960 struct perf_evsel *evsel;
961 int ret;
962
963 evlist__for_each(evlist, evsel) {
964 ret = add_event(cw, evsel);
965 if (ret)
966 return ret;
967 }
968 return 0;
969}
970
971static void cleanup_events(struct perf_session *session)
972{
973 struct perf_evlist *evlist = session->evlist;
974 struct perf_evsel *evsel;
975
976 evlist__for_each(evlist, evsel) {
977 struct evsel_priv *priv;
978
979 priv = evsel->priv;
980 bt_ctf_event_class_put(priv->event_class);
981 zfree(&evsel->priv);
982 }
983
984 perf_evlist__delete(evlist);
985 session->evlist = NULL;
986}
987
988static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
989{
990 struct ctf_stream **stream;
991 struct perf_header *ph = &session->header;
992 int ncpus;
993
994 /*
995 * Try to get the number of cpus used in the data file,
996 * if not present fallback to the MAX_CPUS.
997 */
998 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
999
1000 stream = zalloc(sizeof(*stream) * ncpus);
1001 if (!stream) {
1002 pr_err("Failed to allocate streams.\n");
1003 return -ENOMEM;
1004 }
1005
1006 cw->stream = stream;
1007 cw->stream_cnt = ncpus;
1008 return 0;
1009}
1010
1011static void free_streams(struct ctf_writer *cw)
1012{
1013 int cpu;
1014
1015 for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1016 ctf_stream__delete(cw->stream[cpu]);
1017
1018 free(cw->stream);
1019}
1020
1021static int ctf_writer__setup_env(struct ctf_writer *cw,
1022 struct perf_session *session)
1023{
1024 struct perf_header *header = &session->header;
1025 struct bt_ctf_writer *writer = cw->writer;
1026
1027#define ADD(__n, __v) \
1028do { \
1029 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1030 return -1; \
1031} while (0)
1032
1033 ADD("host", header->env.hostname);
1034 ADD("sysname", "Linux");
1035 ADD("release", header->env.os_release);
1036 ADD("version", header->env.version);
1037 ADD("machine", header->env.arch);
1038 ADD("domain", "kernel");
1039 ADD("tracer_name", "perf");
1040
1041#undef ADD
1042 return 0;
1043}
1044
1045static int ctf_writer__setup_clock(struct ctf_writer *cw)
1046{
1047 struct bt_ctf_clock *clock = cw->clock;
1048
1049 bt_ctf_clock_set_description(clock, "perf clock");
1050
1051#define SET(__n, __v) \
1052do { \
1053 if (bt_ctf_clock_set_##__n(clock, __v)) \
1054 return -1; \
1055} while (0)
1056
1057 SET(frequency, 1000000000);
1058 SET(offset_s, 0);
1059 SET(offset, 0);
1060 SET(precision, 10);
1061 SET(is_absolute, 0);
1062
1063#undef SET
1064 return 0;
1065}
1066
1067static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1068{
1069 struct bt_ctf_field_type *type;
1070
1071 type = bt_ctf_field_type_integer_create(size);
1072 if (!type)
1073 return NULL;
1074
1075 if (sign &&
1076 bt_ctf_field_type_integer_set_signed(type, 1))
1077 goto err;
1078
1079 if (hex &&
1080 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1081 goto err;
1082
1083#if __BYTE_ORDER == __BIG_ENDIAN
1084 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1085#else
1086 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1087#endif
1088
1089 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1090 size, sign ? "un" : "", hex ? "hex" : "");
1091 return type;
1092
1093err:
1094 bt_ctf_field_type_put(type);
1095 return NULL;
1096}
1097
1098static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1099{
1100 unsigned int i;
1101
1102 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1103 bt_ctf_field_type_put(cw->data.array[i]);
1104}
1105
1106static int ctf_writer__init_data(struct ctf_writer *cw)
1107{
1108#define CREATE_INT_TYPE(type, size, sign, hex) \
1109do { \
1110 (type) = create_int_type(size, sign, hex); \
1111 if (!(type)) \
1112 goto err; \
1113} while (0)
1114
1115 CREATE_INT_TYPE(cw->data.s64, 64, true, false);
1116 CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1117 CREATE_INT_TYPE(cw->data.s32, 32, true, false);
1118 CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1119 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1120 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1121
1122 cw->data.string = bt_ctf_field_type_string_create();
1123 if (cw->data.string)
1124 return 0;
1125
1126err:
1127 ctf_writer__cleanup_data(cw);
1128 pr_err("Failed to create data types.\n");
1129 return -1;
1130}
1131
1132static void ctf_writer__cleanup(struct ctf_writer *cw)
1133{
1134 ctf_writer__cleanup_data(cw);
1135
1136 bt_ctf_clock_put(cw->clock);
1137 free_streams(cw);
1138 bt_ctf_stream_class_put(cw->stream_class);
1139 bt_ctf_writer_put(cw->writer);
1140
1141 /* and NULL all the pointers */
1142 memset(cw, 0, sizeof(*cw));
1143}
1144
1145static int ctf_writer__init(struct ctf_writer *cw, const char *path)
1146{
1147 struct bt_ctf_writer *writer;
1148 struct bt_ctf_stream_class *stream_class;
1149 struct bt_ctf_clock *clock;
1150 struct bt_ctf_field_type *pkt_ctx_type;
1151 int ret;
1152
1153 /* CTF writer */
1154 writer = bt_ctf_writer_create(path);
1155 if (!writer)
1156 goto err;
1157
1158 cw->writer = writer;
1159
1160 /* CTF clock */
1161 clock = bt_ctf_clock_create("perf_clock");
1162 if (!clock) {
1163 pr("Failed to create CTF clock.\n");
1164 goto err_cleanup;
1165 }
1166
1167 cw->clock = clock;
1168
1169 if (ctf_writer__setup_clock(cw)) {
1170 pr("Failed to setup CTF clock.\n");
1171 goto err_cleanup;
1172 }
1173
1174 /* CTF stream class */
1175 stream_class = bt_ctf_stream_class_create("perf_stream");
1176 if (!stream_class) {
1177 pr("Failed to create CTF stream class.\n");
1178 goto err_cleanup;
1179 }
1180
1181 cw->stream_class = stream_class;
1182
1183 /* CTF clock stream setup */
1184 if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1185 pr("Failed to assign CTF clock to stream class.\n");
1186 goto err_cleanup;
1187 }
1188
1189 if (ctf_writer__init_data(cw))
1190 goto err_cleanup;
1191
1192 /* Add cpu_id for packet context */
1193 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1194 if (!pkt_ctx_type)
1195 goto err_cleanup;
1196
1197 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1198 bt_ctf_field_type_put(pkt_ctx_type);
1199 if (ret)
1200 goto err_cleanup;
1201
1202 /* CTF clock writer setup */
1203 if (bt_ctf_writer_add_clock(writer, clock)) {
1204 pr("Failed to assign CTF clock to writer.\n");
1205 goto err_cleanup;
1206 }
1207
1208 return 0;
1209
1210err_cleanup:
1211 ctf_writer__cleanup(cw);
1212err:
1213 pr_err("Failed to setup CTF writer.\n");
1214 return -1;
1215}
1216
1217static int ctf_writer__flush_streams(struct ctf_writer *cw)
1218{
1219 int cpu, ret = 0;
1220
1221 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1222 ret = ctf_stream__flush(cw->stream[cpu]);
1223
1224 return ret;
1225}
1226
1227static int convert__config(const char *var, const char *value, void *cb)
1228{
1229 struct convert *c = cb;
1230
1231 if (!strcmp(var, "convert.queue-size")) {
1232 c->queue_size = perf_config_u64(var, value);
1233 return 0;
1234 }
1235
1236 return 0;
1237}
1238
1239int bt_convert__perf2ctf(const char *input, const char *path, bool force)
1240{
1241 struct perf_session *session;
1242 struct perf_data_file file = {
1243 .path = input,
1244 .mode = PERF_DATA_MODE_READ,
1245 .force = force,
1246 };
1247 struct convert c = {
1248 .tool = {
1249 .sample = process_sample_event,
1250 .mmap = perf_event__process_mmap,
1251 .mmap2 = perf_event__process_mmap2,
1252 .comm = perf_event__process_comm,
1253 .exit = perf_event__process_exit,
1254 .fork = perf_event__process_fork,
1255 .lost = perf_event__process_lost,
1256 .tracing_data = perf_event__process_tracing_data,
1257 .build_id = perf_event__process_build_id,
1258 .ordered_events = true,
1259 .ordering_requires_timestamps = true,
1260 },
1261 };
1262 struct ctf_writer *cw = &c.writer;
1263 int err = -1;
1264
1265 perf_config(convert__config, &c);
1266
1267 /* CTF writer */
1268 if (ctf_writer__init(cw, path))
1269 return -1;
1270
1271 /* perf.data session */
1272 session = perf_session__new(&file, 0, &c.tool);
1273 if (!session)
1274 goto free_writer;
1275
1276 if (c.queue_size) {
1277 ordered_events__set_alloc_size(&session->ordered_events,
1278 c.queue_size);
1279 }
1280
1281 /* CTF writer env/clock setup */
1282 if (ctf_writer__setup_env(cw, session))
1283 goto free_session;
1284
1285 /* CTF events setup */
1286 if (setup_events(cw, session))
1287 goto free_session;
1288
1289 if (setup_streams(cw, session))
1290 goto free_session;
1291
1292 err = perf_session__process_events(session);
1293 if (!err)
1294 err = ctf_writer__flush_streams(cw);
1295 else
1296 pr_err("Error during conversion.\n");
1297
1298 fprintf(stderr,
1299 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1300 file.path, path);
1301
1302 fprintf(stderr,
1303 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
1304 (double) c.events_size / 1024.0 / 1024.0,
1305 c.events_count);
1306
1307 cleanup_events(session);
1308 perf_session__delete(session);
1309 ctf_writer__cleanup(cw);
1310
1311 return err;
1312
1313free_session:
1314 perf_session__delete(session);
1315free_writer:
1316 ctf_writer__cleanup(cw);
1317 pr_err("Error during conversion setup.\n");
1318 return err;
1319}