Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CTF writing support via babeltrace.
   4 *
   5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
   6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
   7 */
   8
   9#include <errno.h>
  10#include <inttypes.h>
  11#include <linux/compiler.h>
  12#include <linux/kernel.h>
  13#include <linux/zalloc.h>
  14#include <babeltrace/ctf-writer/writer.h>
  15#include <babeltrace/ctf-writer/clock.h>
  16#include <babeltrace/ctf-writer/stream.h>
  17#include <babeltrace/ctf-writer/event.h>
  18#include <babeltrace/ctf-writer/event-types.h>
  19#include <babeltrace/ctf-writer/event-fields.h>
  20#include <babeltrace/ctf-ir/utils.h>
  21#include <babeltrace/ctf/events.h>
 
  22#include "asm/bug.h"
  23#include "data-convert.h"
  24#include "session.h"
  25#include "debug.h"
  26#include "tool.h"
  27#include "evlist.h"
  28#include "evsel.h"
  29#include "machine.h"
  30#include "config.h"
  31#include <linux/ctype.h>
  32#include <linux/err.h>
  33#include <linux/time64.h>
  34#include "util.h"
  35#include "clockid.h"
  36#include "util/sample.h"
  37
  38#ifdef HAVE_LIBTRACEEVENT
  39#include <traceevent/event-parse.h>
  40#endif
  41
  42#define pr_N(n, fmt, ...) \
  43	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
  44
  45#define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  46#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
  47
  48#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
  49
  50struct evsel_priv {
  51	struct bt_ctf_event_class *event_class;
  52};
  53
  54#define MAX_CPUS	4096
  55
  56struct ctf_stream {
  57	struct bt_ctf_stream *stream;
  58	int cpu;
  59	u32 count;
  60};
  61
  62struct ctf_writer {
  63	/* writer primitives */
  64	struct bt_ctf_writer		 *writer;
  65	struct ctf_stream		**stream;
  66	int				  stream_cnt;
  67	struct bt_ctf_stream_class	 *stream_class;
  68	struct bt_ctf_clock		 *clock;
  69
  70	/* data types */
  71	union {
  72		struct {
  73			struct bt_ctf_field_type	*s64;
  74			struct bt_ctf_field_type	*u64;
  75			struct bt_ctf_field_type	*s32;
  76			struct bt_ctf_field_type	*u32;
  77			struct bt_ctf_field_type	*string;
  78			struct bt_ctf_field_type	*u32_hex;
  79			struct bt_ctf_field_type	*u64_hex;
  80		};
  81		struct bt_ctf_field_type *array[6];
  82	} data;
  83	struct bt_ctf_event_class	*comm_class;
  84	struct bt_ctf_event_class	*exit_class;
  85	struct bt_ctf_event_class	*fork_class;
  86	struct bt_ctf_event_class	*mmap_class;
  87	struct bt_ctf_event_class	*mmap2_class;
  88};
  89
  90struct convert {
  91	struct perf_tool	tool;
  92	struct ctf_writer	writer;
  93
  94	u64			events_size;
  95	u64			events_count;
  96	u64			non_sample_count;
  97
  98	/* Ordered events configured queue size. */
  99	u64			queue_size;
 100};
 101
 102static int value_set(struct bt_ctf_field_type *type,
 103		     struct bt_ctf_event *event,
 104		     const char *name, u64 val)
 105{
 106	struct bt_ctf_field *field;
 107	bool sign = bt_ctf_field_type_integer_get_signed(type);
 108	int ret;
 109
 110	field = bt_ctf_field_create(type);
 111	if (!field) {
 112		pr_err("failed to create a field %s\n", name);
 113		return -1;
 114	}
 115
 116	if (sign) {
 117		ret = bt_ctf_field_signed_integer_set_value(field, val);
 118		if (ret) {
 119			pr_err("failed to set field value %s\n", name);
 120			goto err;
 121		}
 122	} else {
 123		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
 124		if (ret) {
 125			pr_err("failed to set field value %s\n", name);
 126			goto err;
 127		}
 128	}
 129
 130	ret = bt_ctf_event_set_payload(event, name, field);
 131	if (ret) {
 132		pr_err("failed to set payload %s\n", name);
 133		goto err;
 134	}
 135
 136	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
 137
 138err:
 139	bt_ctf_field_put(field);
 140	return ret;
 141}
 142
 143#define __FUNC_VALUE_SET(_name, _val_type)				\
 144static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
 145			     struct bt_ctf_event *event,		\
 146			     const char *name,				\
 147			     _val_type val)				\
 148{									\
 149	struct bt_ctf_field_type *type = cw->data._name;		\
 150	return value_set(type, event, name, (u64) val);			\
 151}
 152
 153#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
 154
 155FUNC_VALUE_SET(s32)
 156FUNC_VALUE_SET(u32)
 157FUNC_VALUE_SET(s64)
 158FUNC_VALUE_SET(u64)
 159__FUNC_VALUE_SET(u64_hex, u64)
 160
 161static int string_set_value(struct bt_ctf_field *field, const char *string);
 162static __maybe_unused int
 163value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
 164		 const char *name, const char *string)
 165{
 166	struct bt_ctf_field_type *type = cw->data.string;
 167	struct bt_ctf_field *field;
 168	int ret = 0;
 169
 170	field = bt_ctf_field_create(type);
 171	if (!field) {
 172		pr_err("failed to create a field %s\n", name);
 173		return -1;
 174	}
 175
 176	ret = string_set_value(field, string);
 177	if (ret) {
 178		pr_err("failed to set value %s\n", name);
 179		goto err_put_field;
 180	}
 181
 182	ret = bt_ctf_event_set_payload(event, name, field);
 183	if (ret)
 184		pr_err("failed to set payload %s\n", name);
 185
 186err_put_field:
 187	bt_ctf_field_put(field);
 188	return ret;
 189}
 190
 191static struct bt_ctf_field_type*
 192get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
 193{
 194	unsigned long flags = field->flags;
 195
 196	if (flags & TEP_FIELD_IS_STRING)
 197		return cw->data.string;
 198
 199	if (!(flags & TEP_FIELD_IS_SIGNED)) {
 200		/* unsigned long are mostly pointers */
 201		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
 202			return cw->data.u64_hex;
 203	}
 204
 205	if (flags & TEP_FIELD_IS_SIGNED) {
 206		if (field->size == 8)
 207			return cw->data.s64;
 208		else
 209			return cw->data.s32;
 210	}
 211
 212	if (field->size == 8)
 213		return cw->data.u64;
 214	else
 215		return cw->data.u32;
 216}
 217
 218static unsigned long long adjust_signedness(unsigned long long value_int, int size)
 219{
 220	unsigned long long value_mask;
 221
 222	/*
 223	 * value_mask = (1 << (size * 8 - 1)) - 1.
 224	 * Directly set value_mask for code readers.
 225	 */
 226	switch (size) {
 227	case 1:
 228		value_mask = 0x7fULL;
 229		break;
 230	case 2:
 231		value_mask = 0x7fffULL;
 232		break;
 233	case 4:
 234		value_mask = 0x7fffffffULL;
 235		break;
 236	case 8:
 237		/*
 238		 * For 64 bit value, return it self. There is no need
 239		 * to fill high bit.
 240		 */
 241		/* Fall through */
 242	default:
 243		/* BUG! */
 244		return value_int;
 245	}
 246
 247	/* If it is a positive value, don't adjust. */
 248	if ((value_int & (~0ULL - value_mask)) == 0)
 249		return value_int;
 250
 251	/* Fill upper part of value_int with 1 to make it a negative long long. */
 252	return (value_int & value_mask) | ~value_mask;
 253}
 254
 255static int string_set_value(struct bt_ctf_field *field, const char *string)
 256{
 257	char *buffer = NULL;
 258	size_t len = strlen(string), i, p;
 259	int err;
 260
 261	for (i = p = 0; i < len; i++, p++) {
 262		if (isprint(string[i])) {
 263			if (!buffer)
 264				continue;
 265			buffer[p] = string[i];
 266		} else {
 267			char numstr[5];
 268
 269			snprintf(numstr, sizeof(numstr), "\\x%02x",
 270				 (unsigned int)(string[i]) & 0xff);
 271
 272			if (!buffer) {
 273				buffer = zalloc(i + (len - i) * 4 + 2);
 274				if (!buffer) {
 275					pr_err("failed to set unprintable string '%s'\n", string);
 276					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
 277				}
 278				if (i > 0)
 279					strncpy(buffer, string, i);
 280			}
 281			memcpy(buffer + p, numstr, 4);
 282			p += 3;
 283		}
 284	}
 285
 286	if (!buffer)
 287		return bt_ctf_field_string_set_value(field, string);
 288	err = bt_ctf_field_string_set_value(field, buffer);
 289	free(buffer);
 290	return err;
 291}
 292
 293static int add_tracepoint_field_value(struct ctf_writer *cw,
 294				      struct bt_ctf_event_class *event_class,
 295				      struct bt_ctf_event *event,
 296				      struct perf_sample *sample,
 297				      struct tep_format_field *fmtf)
 298{
 299	struct bt_ctf_field_type *type;
 300	struct bt_ctf_field *array_field;
 301	struct bt_ctf_field *field;
 302	const char *name = fmtf->name;
 303	void *data = sample->raw_data;
 304	unsigned long flags = fmtf->flags;
 305	unsigned int n_items;
 306	unsigned int i;
 307	unsigned int offset;
 308	unsigned int len;
 309	int ret;
 310
 311	name = fmtf->alias;
 312	offset = fmtf->offset;
 313	len = fmtf->size;
 314	if (flags & TEP_FIELD_IS_STRING)
 315		flags &= ~TEP_FIELD_IS_ARRAY;
 316
 317	if (flags & TEP_FIELD_IS_DYNAMIC) {
 318		unsigned long long tmp_val;
 319
 320		tmp_val = tep_read_number(fmtf->event->tep,
 321					  data + offset, len);
 322		offset = tmp_val;
 323		len = offset >> 16;
 324		offset &= 0xffff;
 325		if (tep_field_is_relative(flags))
 326			offset += fmtf->offset + fmtf->size;
 327	}
 328
 329	if (flags & TEP_FIELD_IS_ARRAY) {
 330
 331		type = bt_ctf_event_class_get_field_by_name(
 332				event_class, name);
 333		array_field = bt_ctf_field_create(type);
 334		bt_ctf_field_type_put(type);
 335		if (!array_field) {
 336			pr_err("Failed to create array type %s\n", name);
 337			return -1;
 338		}
 339
 340		len = fmtf->size / fmtf->arraylen;
 341		n_items = fmtf->arraylen;
 342	} else {
 343		n_items = 1;
 344		array_field = NULL;
 345	}
 346
 347	type = get_tracepoint_field_type(cw, fmtf);
 348
 349	for (i = 0; i < n_items; i++) {
 350		if (flags & TEP_FIELD_IS_ARRAY)
 351			field = bt_ctf_field_array_get_field(array_field, i);
 352		else
 353			field = bt_ctf_field_create(type);
 354
 355		if (!field) {
 356			pr_err("failed to create a field %s\n", name);
 357			return -1;
 358		}
 359
 360		if (flags & TEP_FIELD_IS_STRING)
 361			ret = string_set_value(field, data + offset + i * len);
 362		else {
 363			unsigned long long value_int;
 364
 365			value_int = tep_read_number(
 366					fmtf->event->tep,
 367					data + offset + i * len, len);
 368
 369			if (!(flags & TEP_FIELD_IS_SIGNED))
 370				ret = bt_ctf_field_unsigned_integer_set_value(
 371						field, value_int);
 372			else
 373				ret = bt_ctf_field_signed_integer_set_value(
 374						field, adjust_signedness(value_int, len));
 375		}
 376
 377		if (ret) {
 378			pr_err("failed to set file value %s\n", name);
 379			goto err_put_field;
 380		}
 381		if (!(flags & TEP_FIELD_IS_ARRAY)) {
 382			ret = bt_ctf_event_set_payload(event, name, field);
 383			if (ret) {
 384				pr_err("failed to set payload %s\n", name);
 385				goto err_put_field;
 386			}
 387		}
 388		bt_ctf_field_put(field);
 389	}
 390	if (flags & TEP_FIELD_IS_ARRAY) {
 391		ret = bt_ctf_event_set_payload(event, name, array_field);
 392		if (ret) {
 393			pr_err("Failed add payload array %s\n", name);
 394			return -1;
 395		}
 396		bt_ctf_field_put(array_field);
 397	}
 398	return 0;
 399
 400err_put_field:
 401	bt_ctf_field_put(field);
 402	return -1;
 403}
 404
 405static int add_tracepoint_fields_values(struct ctf_writer *cw,
 406					struct bt_ctf_event_class *event_class,
 407					struct bt_ctf_event *event,
 408					struct tep_format_field *fields,
 409					struct perf_sample *sample)
 410{
 411	struct tep_format_field *field;
 412	int ret;
 413
 414	for (field = fields; field; field = field->next) {
 415		ret = add_tracepoint_field_value(cw, event_class, event, sample,
 416				field);
 417		if (ret)
 418			return -1;
 419	}
 420	return 0;
 421}
 422
 423static int add_tracepoint_values(struct ctf_writer *cw,
 424				 struct bt_ctf_event_class *event_class,
 425				 struct bt_ctf_event *event,
 426				 struct evsel *evsel,
 427				 struct perf_sample *sample)
 428{
 429	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
 430	struct tep_format_field *fields        = evsel->tp_format->format.fields;
 431	int ret;
 432
 433	ret = add_tracepoint_fields_values(cw, event_class, event,
 434					   common_fields, sample);
 435	if (!ret)
 436		ret = add_tracepoint_fields_values(cw, event_class, event,
 437						   fields, sample);
 438
 439	return ret;
 440}
 441
 442static int
 443add_bpf_output_values(struct bt_ctf_event_class *event_class,
 444		      struct bt_ctf_event *event,
 445		      struct perf_sample *sample)
 446{
 447	struct bt_ctf_field_type *len_type, *seq_type;
 448	struct bt_ctf_field *len_field, *seq_field;
 449	unsigned int raw_size = sample->raw_size;
 450	unsigned int nr_elements = raw_size / sizeof(u32);
 451	unsigned int i;
 452	int ret;
 453
 454	if (nr_elements * sizeof(u32) != raw_size)
 455		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
 456			   raw_size, nr_elements * sizeof(u32) - raw_size);
 457
 458	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
 459	len_field = bt_ctf_field_create(len_type);
 460	if (!len_field) {
 461		pr_err("failed to create 'raw_len' for bpf output event\n");
 462		ret = -1;
 463		goto put_len_type;
 464	}
 465
 466	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
 467	if (ret) {
 468		pr_err("failed to set field value for raw_len\n");
 469		goto put_len_field;
 470	}
 471	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
 472	if (ret) {
 473		pr_err("failed to set payload to raw_len\n");
 474		goto put_len_field;
 475	}
 476
 477	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
 478	seq_field = bt_ctf_field_create(seq_type);
 479	if (!seq_field) {
 480		pr_err("failed to create 'raw_data' for bpf output event\n");
 481		ret = -1;
 482		goto put_seq_type;
 483	}
 484
 485	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
 486	if (ret) {
 487		pr_err("failed to set length of 'raw_data'\n");
 488		goto put_seq_field;
 489	}
 490
 491	for (i = 0; i < nr_elements; i++) {
 492		struct bt_ctf_field *elem_field =
 493			bt_ctf_field_sequence_get_field(seq_field, i);
 494
 495		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
 496				((u32 *)(sample->raw_data))[i]);
 497
 498		bt_ctf_field_put(elem_field);
 499		if (ret) {
 500			pr_err("failed to set raw_data[%d]\n", i);
 501			goto put_seq_field;
 502		}
 503	}
 504
 505	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
 506	if (ret)
 507		pr_err("failed to set payload for raw_data\n");
 508
 509put_seq_field:
 510	bt_ctf_field_put(seq_field);
 511put_seq_type:
 512	bt_ctf_field_type_put(seq_type);
 513put_len_field:
 514	bt_ctf_field_put(len_field);
 515put_len_type:
 516	bt_ctf_field_type_put(len_type);
 517	return ret;
 518}
 519
 520static int
 521add_callchain_output_values(struct bt_ctf_event_class *event_class,
 522		      struct bt_ctf_event *event,
 523		      struct ip_callchain *callchain)
 524{
 525	struct bt_ctf_field_type *len_type, *seq_type;
 526	struct bt_ctf_field *len_field, *seq_field;
 527	unsigned int nr_elements = callchain->nr;
 528	unsigned int i;
 529	int ret;
 530
 531	len_type = bt_ctf_event_class_get_field_by_name(
 532			event_class, "perf_callchain_size");
 533	len_field = bt_ctf_field_create(len_type);
 534	if (!len_field) {
 535		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
 536		ret = -1;
 537		goto put_len_type;
 538	}
 539
 540	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
 541	if (ret) {
 542		pr_err("failed to set field value for perf_callchain_size\n");
 543		goto put_len_field;
 544	}
 545	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
 546	if (ret) {
 547		pr_err("failed to set payload to perf_callchain_size\n");
 548		goto put_len_field;
 549	}
 550
 551	seq_type = bt_ctf_event_class_get_field_by_name(
 552			event_class, "perf_callchain");
 553	seq_field = bt_ctf_field_create(seq_type);
 554	if (!seq_field) {
 555		pr_err("failed to create 'perf_callchain' for callchain output event\n");
 556		ret = -1;
 557		goto put_seq_type;
 558	}
 559
 560	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
 561	if (ret) {
 562		pr_err("failed to set length of 'perf_callchain'\n");
 563		goto put_seq_field;
 564	}
 565
 566	for (i = 0; i < nr_elements; i++) {
 567		struct bt_ctf_field *elem_field =
 568			bt_ctf_field_sequence_get_field(seq_field, i);
 569
 570		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
 571				((u64 *)(callchain->ips))[i]);
 572
 573		bt_ctf_field_put(elem_field);
 574		if (ret) {
 575			pr_err("failed to set callchain[%d]\n", i);
 576			goto put_seq_field;
 577		}
 578	}
 579
 580	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
 581	if (ret)
 582		pr_err("failed to set payload for raw_data\n");
 583
 584put_seq_field:
 585	bt_ctf_field_put(seq_field);
 586put_seq_type:
 587	bt_ctf_field_type_put(seq_type);
 588put_len_field:
 589	bt_ctf_field_put(len_field);
 590put_len_type:
 591	bt_ctf_field_type_put(len_type);
 592	return ret;
 593}
 594
 595static int add_generic_values(struct ctf_writer *cw,
 596			      struct bt_ctf_event *event,
 597			      struct evsel *evsel,
 598			      struct perf_sample *sample)
 599{
 600	u64 type = evsel->core.attr.sample_type;
 601	int ret;
 602
 603	/*
 604	 * missing:
 605	 *   PERF_SAMPLE_TIME         - not needed as we have it in
 606	 *                              ctf event header
 607	 *   PERF_SAMPLE_READ         - TODO
 608	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
 609	 *   PERF_SAMPLE_BRANCH_STACK - TODO
 610	 *   PERF_SAMPLE_REGS_USER    - TODO
 611	 *   PERF_SAMPLE_STACK_USER   - TODO
 612	 */
 613
 614	if (type & PERF_SAMPLE_IP) {
 615		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
 616		if (ret)
 617			return -1;
 618	}
 619
 620	if (type & PERF_SAMPLE_TID) {
 621		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
 622		if (ret)
 623			return -1;
 624
 625		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
 626		if (ret)
 627			return -1;
 628	}
 629
 630	if ((type & PERF_SAMPLE_ID) ||
 631	    (type & PERF_SAMPLE_IDENTIFIER)) {
 632		ret = value_set_u64(cw, event, "perf_id", sample->id);
 633		if (ret)
 634			return -1;
 635	}
 636
 637	if (type & PERF_SAMPLE_STREAM_ID) {
 638		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
 639		if (ret)
 640			return -1;
 641	}
 642
 643	if (type & PERF_SAMPLE_PERIOD) {
 644		ret = value_set_u64(cw, event, "perf_period", sample->period);
 645		if (ret)
 646			return -1;
 647	}
 648
 649	if (type & PERF_SAMPLE_WEIGHT) {
 650		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
 651		if (ret)
 652			return -1;
 653	}
 654
 655	if (type & PERF_SAMPLE_DATA_SRC) {
 656		ret = value_set_u64(cw, event, "perf_data_src",
 657				sample->data_src);
 658		if (ret)
 659			return -1;
 660	}
 661
 662	if (type & PERF_SAMPLE_TRANSACTION) {
 663		ret = value_set_u64(cw, event, "perf_transaction",
 664				sample->transaction);
 665		if (ret)
 666			return -1;
 667	}
 668
 669	return 0;
 670}
 671
 672static int ctf_stream__flush(struct ctf_stream *cs)
 673{
 674	int err = 0;
 675
 676	if (cs) {
 677		err = bt_ctf_stream_flush(cs->stream);
 678		if (err)
 679			pr_err("CTF stream %d flush failed\n", cs->cpu);
 680
 681		pr("Flush stream for cpu %d (%u samples)\n",
 682		   cs->cpu, cs->count);
 683
 684		cs->count = 0;
 685	}
 686
 687	return err;
 688}
 689
 690static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
 691{
 692	struct ctf_stream *cs;
 693	struct bt_ctf_field *pkt_ctx   = NULL;
 694	struct bt_ctf_field *cpu_field = NULL;
 695	struct bt_ctf_stream *stream   = NULL;
 696	int ret;
 697
 698	cs = zalloc(sizeof(*cs));
 699	if (!cs) {
 700		pr_err("Failed to allocate ctf stream\n");
 701		return NULL;
 702	}
 703
 704	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
 705	if (!stream) {
 706		pr_err("Failed to create CTF stream\n");
 707		goto out;
 708	}
 709
 710	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
 711	if (!pkt_ctx) {
 712		pr_err("Failed to obtain packet context\n");
 713		goto out;
 714	}
 715
 716	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
 717	bt_ctf_field_put(pkt_ctx);
 718	if (!cpu_field) {
 719		pr_err("Failed to obtain cpu field\n");
 720		goto out;
 721	}
 722
 723	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
 724	if (ret) {
 725		pr_err("Failed to update CPU number\n");
 726		goto out;
 727	}
 728
 729	bt_ctf_field_put(cpu_field);
 730
 731	cs->cpu    = cpu;
 732	cs->stream = stream;
 733	return cs;
 734
 735out:
 736	if (cpu_field)
 737		bt_ctf_field_put(cpu_field);
 738	if (stream)
 739		bt_ctf_stream_put(stream);
 740
 741	free(cs);
 742	return NULL;
 743}
 744
 745static void ctf_stream__delete(struct ctf_stream *cs)
 746{
 747	if (cs) {
 748		bt_ctf_stream_put(cs->stream);
 749		free(cs);
 750	}
 751}
 752
 753static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
 754{
 755	struct ctf_stream *cs = cw->stream[cpu];
 756
 757	if (!cs) {
 758		cs = ctf_stream__create(cw, cpu);
 759		cw->stream[cpu] = cs;
 760	}
 761
 762	return cs;
 763}
 764
 765static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
 766			  struct evsel *evsel)
 767{
 768	int cpu = 0;
 769
 770	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
 771		cpu = sample->cpu;
 772
 773	if (cpu > cw->stream_cnt) {
 774		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
 775			cpu, cw->stream_cnt);
 776		cpu = 0;
 777	}
 778
 779	return cpu;
 780}
 781
 782#define STREAM_FLUSH_COUNT 100000
 783
 784/*
 785 * Currently we have no other way to determine the
 786 * time for the stream flush other than keep track
 787 * of the number of events and check it against
 788 * threshold.
 789 */
 790static bool is_flush_needed(struct ctf_stream *cs)
 791{
 792	return cs->count >= STREAM_FLUSH_COUNT;
 793}
 794
 795static int process_sample_event(struct perf_tool *tool,
 796				union perf_event *_event,
 797				struct perf_sample *sample,
 798				struct evsel *evsel,
 799				struct machine *machine __maybe_unused)
 800{
 801	struct convert *c = container_of(tool, struct convert, tool);
 802	struct evsel_priv *priv = evsel->priv;
 803	struct ctf_writer *cw = &c->writer;
 804	struct ctf_stream *cs;
 805	struct bt_ctf_event_class *event_class;
 806	struct bt_ctf_event *event;
 807	int ret;
 808	unsigned long type = evsel->core.attr.sample_type;
 809
 810	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
 811		return 0;
 812
 813	event_class = priv->event_class;
 814
 815	/* update stats */
 816	c->events_count++;
 817	c->events_size += _event->header.size;
 818
 819	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
 820
 821	event = bt_ctf_event_create(event_class);
 822	if (!event) {
 823		pr_err("Failed to create an CTF event\n");
 824		return -1;
 825	}
 826
 827	bt_ctf_clock_set_time(cw->clock, sample->time);
 828
 829	ret = add_generic_values(cw, event, evsel, sample);
 830	if (ret)
 831		return -1;
 832
 833	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
 834		ret = add_tracepoint_values(cw, event_class, event,
 835					    evsel, sample);
 836		if (ret)
 837			return -1;
 838	}
 839
 840	if (type & PERF_SAMPLE_CALLCHAIN) {
 841		ret = add_callchain_output_values(event_class,
 842				event, sample->callchain);
 843		if (ret)
 844			return -1;
 845	}
 846
 847	if (evsel__is_bpf_output(evsel)) {
 848		ret = add_bpf_output_values(event_class, event, sample);
 849		if (ret)
 850			return -1;
 851	}
 852
 853	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
 854	if (cs) {
 855		if (is_flush_needed(cs))
 856			ctf_stream__flush(cs);
 857
 858		cs->count++;
 859		bt_ctf_stream_append_event(cs->stream, event);
 860	}
 861
 862	bt_ctf_event_put(event);
 863	return cs ? 0 : -1;
 864}
 865
 866#define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
 867do {							\
 868	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
 869	if (ret)					\
 870		return -1;				\
 871} while(0)
 872
 873#define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
 874static int process_##_name##_event(struct perf_tool *tool,	\
 875				   union perf_event *_event,	\
 876				   struct perf_sample *sample,	\
 877				   struct machine *machine)	\
 878{								\
 879	struct convert *c = container_of(tool, struct convert, tool);\
 880	struct ctf_writer *cw = &c->writer;			\
 881	struct bt_ctf_event_class *event_class = cw->_name##_class;\
 882	struct bt_ctf_event *event;				\
 883	struct ctf_stream *cs;					\
 884	int ret;						\
 885								\
 886	c->non_sample_count++;					\
 887	c->events_size += _event->header.size;			\
 888	event = bt_ctf_event_create(event_class);		\
 889	if (!event) {						\
 890		pr_err("Failed to create an CTF event\n");	\
 891		return -1;					\
 892	}							\
 893								\
 894	bt_ctf_clock_set_time(cw->clock, sample->time);		\
 895	body							\
 896	cs = ctf_stream(cw, 0);					\
 897	if (cs) {						\
 898		if (is_flush_needed(cs))			\
 899			ctf_stream__flush(cs);			\
 900								\
 901		cs->count++;					\
 902		bt_ctf_stream_append_event(cs->stream, event);	\
 903	}							\
 904	bt_ctf_event_put(event);				\
 905								\
 906	return perf_event__process_##_name(tool, _event, sample, machine);\
 907}
 908
 909__FUNC_PROCESS_NON_SAMPLE(comm,
 910	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
 911	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
 912	__NON_SAMPLE_SET_FIELD(comm, string, comm);
 913)
 914__FUNC_PROCESS_NON_SAMPLE(fork,
 915	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
 916	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
 917	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
 918	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
 919	__NON_SAMPLE_SET_FIELD(fork, u64, time);
 920)
 921
 922__FUNC_PROCESS_NON_SAMPLE(exit,
 923	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
 924	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
 925	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
 926	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
 927	__NON_SAMPLE_SET_FIELD(fork, u64, time);
 928)
 929__FUNC_PROCESS_NON_SAMPLE(mmap,
 930	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
 931	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
 932	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
 933	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
 934)
 935__FUNC_PROCESS_NON_SAMPLE(mmap2,
 936	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
 937	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
 938	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
 939	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
 940)
 941#undef __NON_SAMPLE_SET_FIELD
 942#undef __FUNC_PROCESS_NON_SAMPLE
 943
 944/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
 945static char *change_name(char *name, char *orig_name, int dup)
 946{
 947	char *new_name = NULL;
 948	size_t len;
 949
 950	if (!name)
 951		name = orig_name;
 952
 953	if (dup >= 10)
 954		goto out;
 955	/*
 956	 * Add '_' prefix to potential keywork.  According to
 957	 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
 958	 * further CTF spec updating may require us to use '$'.
 959	 */
 960	if (dup < 0)
 961		len = strlen(name) + sizeof("_");
 962	else
 963		len = strlen(orig_name) + sizeof("_dupl_X");
 964
 965	new_name = malloc(len);
 966	if (!new_name)
 967		goto out;
 968
 969	if (dup < 0)
 970		snprintf(new_name, len, "_%s", name);
 971	else
 972		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
 973
 974out:
 975	if (name != orig_name)
 976		free(name);
 977	return new_name;
 978}
 979
 980static int event_class_add_field(struct bt_ctf_event_class *event_class,
 981		struct bt_ctf_field_type *type,
 982		struct tep_format_field *field)
 983{
 984	struct bt_ctf_field_type *t = NULL;
 985	char *name;
 986	int dup = 1;
 987	int ret;
 988
 989	/* alias was already assigned */
 990	if (field->alias != field->name)
 991		return bt_ctf_event_class_add_field(event_class, type,
 992				(char *)field->alias);
 993
 994	name = field->name;
 995
 996	/* If 'name' is a keywork, add prefix. */
 997	if (bt_ctf_validate_identifier(name))
 998		name = change_name(name, field->name, -1);
 999
1000	if (!name) {
1001		pr_err("Failed to fix invalid identifier.");
1002		return -1;
1003	}
1004	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1005		bt_ctf_field_type_put(t);
1006		name = change_name(name, field->name, dup++);
1007		if (!name) {
1008			pr_err("Failed to create dup name for '%s'\n", field->name);
1009			return -1;
1010		}
1011	}
1012
1013	ret = bt_ctf_event_class_add_field(event_class, type, name);
1014	if (!ret)
1015		field->alias = name;
1016
1017	return ret;
1018}
1019
1020static int add_tracepoint_fields_types(struct ctf_writer *cw,
1021				       struct tep_format_field *fields,
1022				       struct bt_ctf_event_class *event_class)
1023{
1024	struct tep_format_field *field;
1025	int ret;
1026
1027	for (field = fields; field; field = field->next) {
1028		struct bt_ctf_field_type *type;
1029		unsigned long flags = field->flags;
1030
1031		pr2("  field '%s'\n", field->name);
1032
1033		type = get_tracepoint_field_type(cw, field);
1034		if (!type)
1035			return -1;
1036
1037		/*
1038		 * A string is an array of chars. For this we use the string
1039		 * type and don't care that it is an array. What we don't
1040		 * support is an array of strings.
1041		 */
1042		if (flags & TEP_FIELD_IS_STRING)
1043			flags &= ~TEP_FIELD_IS_ARRAY;
1044
1045		if (flags & TEP_FIELD_IS_ARRAY)
1046			type = bt_ctf_field_type_array_create(type, field->arraylen);
1047
1048		ret = event_class_add_field(event_class, type, field);
1049
1050		if (flags & TEP_FIELD_IS_ARRAY)
1051			bt_ctf_field_type_put(type);
1052
1053		if (ret) {
1054			pr_err("Failed to add field '%s': %d\n",
1055					field->name, ret);
1056			return -1;
1057		}
1058	}
1059
1060	return 0;
1061}
1062
1063static int add_tracepoint_types(struct ctf_writer *cw,
1064				struct evsel *evsel,
1065				struct bt_ctf_event_class *class)
1066{
1067	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1068	struct tep_format_field *fields        = evsel->tp_format->format.fields;
1069	int ret;
1070
1071	ret = add_tracepoint_fields_types(cw, common_fields, class);
1072	if (!ret)
1073		ret = add_tracepoint_fields_types(cw, fields, class);
1074
1075	return ret;
1076}
1077
1078static int add_bpf_output_types(struct ctf_writer *cw,
1079				struct bt_ctf_event_class *class)
1080{
1081	struct bt_ctf_field_type *len_type = cw->data.u32;
1082	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1083	struct bt_ctf_field_type *seq_type;
1084	int ret;
1085
1086	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1087	if (ret)
1088		return ret;
1089
1090	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1091	if (!seq_type)
1092		return -1;
1093
1094	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1095}
1096
1097static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1098			     struct bt_ctf_event_class *event_class)
1099{
1100	u64 type = evsel->core.attr.sample_type;
1101
1102	/*
1103	 * missing:
1104	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1105	 *                              ctf event header
1106	 *   PERF_SAMPLE_READ         - TODO
1107	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1108	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1109	 *                              are handled separately
1110	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1111	 *   PERF_SAMPLE_REGS_USER    - TODO
1112	 *   PERF_SAMPLE_STACK_USER   - TODO
1113	 */
1114
1115#define ADD_FIELD(cl, t, n)						\
1116	do {								\
1117		pr2("  field '%s'\n", n);				\
1118		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1119			pr_err("Failed to add field '%s';\n", n);	\
1120			return -1;					\
1121		}							\
1122	} while (0)
1123
1124	if (type & PERF_SAMPLE_IP)
1125		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1126
1127	if (type & PERF_SAMPLE_TID) {
1128		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1129		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1130	}
1131
1132	if ((type & PERF_SAMPLE_ID) ||
1133	    (type & PERF_SAMPLE_IDENTIFIER))
1134		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1135
1136	if (type & PERF_SAMPLE_STREAM_ID)
1137		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1138
1139	if (type & PERF_SAMPLE_PERIOD)
1140		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1141
1142	if (type & PERF_SAMPLE_WEIGHT)
1143		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1144
1145	if (type & PERF_SAMPLE_DATA_SRC)
1146		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1147
1148	if (type & PERF_SAMPLE_TRANSACTION)
1149		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1150
1151	if (type & PERF_SAMPLE_CALLCHAIN) {
1152		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1153		ADD_FIELD(event_class,
1154			bt_ctf_field_type_sequence_create(
1155				cw->data.u64_hex, "perf_callchain_size"),
1156			"perf_callchain");
1157	}
1158
1159#undef ADD_FIELD
1160	return 0;
1161}
1162
1163static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1164{
1165	struct bt_ctf_event_class *event_class;
1166	struct evsel_priv *priv;
1167	const char *name = evsel__name(evsel);
1168	int ret;
1169
1170	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1171
1172	event_class = bt_ctf_event_class_create(name);
1173	if (!event_class)
1174		return -1;
1175
1176	ret = add_generic_types(cw, evsel, event_class);
1177	if (ret)
1178		goto err;
1179
1180	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1181		ret = add_tracepoint_types(cw, evsel, event_class);
1182		if (ret)
1183			goto err;
1184	}
1185
1186	if (evsel__is_bpf_output(evsel)) {
1187		ret = add_bpf_output_types(cw, event_class);
1188		if (ret)
1189			goto err;
1190	}
1191
1192	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1193	if (ret) {
1194		pr("Failed to add event class into stream.\n");
1195		goto err;
1196	}
1197
1198	priv = malloc(sizeof(*priv));
1199	if (!priv)
1200		goto err;
1201
1202	priv->event_class = event_class;
1203	evsel->priv       = priv;
1204	return 0;
1205
1206err:
1207	bt_ctf_event_class_put(event_class);
1208	pr_err("Failed to add event '%s'.\n", name);
1209	return -1;
1210}
1211
1212static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1213{
1214	struct evlist *evlist = session->evlist;
1215	struct evsel *evsel;
1216	int ret;
1217
1218	evlist__for_each_entry(evlist, evsel) {
1219		ret = add_event(cw, evsel);
1220		if (ret)
1221			return ret;
1222	}
1223	return 0;
1224}
1225
1226#define __NON_SAMPLE_ADD_FIELD(t, n)						\
1227	do {							\
1228		pr2("  field '%s'\n", #n);			\
1229		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1230			pr_err("Failed to add field '%s';\n", #n);\
1231			return -1;				\
1232		}						\
1233	} while(0)
1234
1235#define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1236static int add_##_name##_event(struct ctf_writer *cw)		\
1237{								\
1238	struct bt_ctf_event_class *event_class;			\
1239	int ret;						\
1240								\
1241	pr("Adding "#_name" event\n");				\
1242	event_class = bt_ctf_event_class_create("perf_" #_name);\
1243	if (!event_class)					\
1244		return -1;					\
1245	body							\
1246								\
1247	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1248	if (ret) {						\
1249		pr("Failed to add event class '"#_name"' into stream.\n");\
1250		return ret;					\
1251	}							\
1252								\
1253	cw->_name##_class = event_class;			\
1254	bt_ctf_event_class_put(event_class);			\
1255	return 0;						\
1256}
1257
1258__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1259	__NON_SAMPLE_ADD_FIELD(u32, pid);
1260	__NON_SAMPLE_ADD_FIELD(u32, tid);
1261	__NON_SAMPLE_ADD_FIELD(string, comm);
1262)
1263
1264__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1265	__NON_SAMPLE_ADD_FIELD(u32, pid);
1266	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1267	__NON_SAMPLE_ADD_FIELD(u32, tid);
1268	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1269	__NON_SAMPLE_ADD_FIELD(u64, time);
1270)
1271
1272__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1273	__NON_SAMPLE_ADD_FIELD(u32, pid);
1274	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1275	__NON_SAMPLE_ADD_FIELD(u32, tid);
1276	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1277	__NON_SAMPLE_ADD_FIELD(u64, time);
1278)
1279
1280__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1281	__NON_SAMPLE_ADD_FIELD(u32, pid);
1282	__NON_SAMPLE_ADD_FIELD(u32, tid);
1283	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1284	__NON_SAMPLE_ADD_FIELD(string, filename);
1285)
1286
1287__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1288	__NON_SAMPLE_ADD_FIELD(u32, pid);
1289	__NON_SAMPLE_ADD_FIELD(u32, tid);
1290	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1291	__NON_SAMPLE_ADD_FIELD(string, filename);
1292)
1293#undef __NON_SAMPLE_ADD_FIELD
1294#undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1295
1296static int setup_non_sample_events(struct ctf_writer *cw,
1297				   struct perf_session *session __maybe_unused)
1298{
1299	int ret;
1300
1301	ret = add_comm_event(cw);
1302	if (ret)
1303		return ret;
1304	ret = add_exit_event(cw);
1305	if (ret)
1306		return ret;
1307	ret = add_fork_event(cw);
1308	if (ret)
1309		return ret;
1310	ret = add_mmap_event(cw);
1311	if (ret)
1312		return ret;
1313	ret = add_mmap2_event(cw);
1314	if (ret)
1315		return ret;
1316	return 0;
1317}
1318
1319static void cleanup_events(struct perf_session *session)
1320{
1321	struct evlist *evlist = session->evlist;
1322	struct evsel *evsel;
1323
1324	evlist__for_each_entry(evlist, evsel) {
1325		struct evsel_priv *priv;
1326
1327		priv = evsel->priv;
1328		bt_ctf_event_class_put(priv->event_class);
1329		zfree(&evsel->priv);
1330	}
1331
1332	evlist__delete(evlist);
1333	session->evlist = NULL;
1334}
1335
1336static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1337{
1338	struct ctf_stream **stream;
1339	struct perf_header *ph = &session->header;
1340	int ncpus;
1341
1342	/*
1343	 * Try to get the number of cpus used in the data file,
1344	 * if not present fallback to the MAX_CPUS.
1345	 */
1346	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1347
1348	stream = zalloc(sizeof(*stream) * ncpus);
1349	if (!stream) {
1350		pr_err("Failed to allocate streams.\n");
1351		return -ENOMEM;
1352	}
1353
1354	cw->stream     = stream;
1355	cw->stream_cnt = ncpus;
1356	return 0;
1357}
1358
1359static void free_streams(struct ctf_writer *cw)
1360{
1361	int cpu;
1362
1363	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1364		ctf_stream__delete(cw->stream[cpu]);
1365
1366	zfree(&cw->stream);
1367}
1368
1369static int ctf_writer__setup_env(struct ctf_writer *cw,
1370				 struct perf_session *session)
1371{
1372	struct perf_header *header = &session->header;
1373	struct bt_ctf_writer *writer = cw->writer;
1374
1375#define ADD(__n, __v)							\
1376do {									\
1377	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1378		return -1;						\
1379} while (0)
1380
1381	ADD("host",    header->env.hostname);
1382	ADD("sysname", "Linux");
1383	ADD("release", header->env.os_release);
1384	ADD("version", header->env.version);
1385	ADD("machine", header->env.arch);
1386	ADD("domain", "kernel");
1387	ADD("tracer_name", "perf");
1388
1389#undef ADD
1390	return 0;
1391}
1392
1393static int ctf_writer__setup_clock(struct ctf_writer *cw,
1394				   struct perf_session *session,
1395				   bool tod)
1396{
1397	struct bt_ctf_clock *clock = cw->clock;
1398	const char *desc = "perf clock";
1399	int64_t offset = 0;
1400
1401	if (tod) {
1402		struct perf_env *env = &session->header.env;
1403
1404		if (!env->clock.enabled) {
1405			pr_err("Can't provide --tod time, missing clock data. "
1406			       "Please record with -k/--clockid option.\n");
1407			return -1;
1408		}
1409
1410		desc   = clockid_name(env->clock.clockid);
1411		offset = env->clock.tod_ns - env->clock.clockid_ns;
1412	}
1413
1414#define SET(__n, __v)				\
1415do {						\
1416	if (bt_ctf_clock_set_##__n(clock, __v))	\
1417		return -1;			\
1418} while (0)
1419
1420	SET(frequency,   1000000000);
1421	SET(offset,      offset);
1422	SET(description, desc);
1423	SET(precision,   10);
1424	SET(is_absolute, 0);
1425
1426#undef SET
1427	return 0;
1428}
1429
1430static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1431{
1432	struct bt_ctf_field_type *type;
1433
1434	type = bt_ctf_field_type_integer_create(size);
1435	if (!type)
1436		return NULL;
1437
1438	if (sign &&
1439	    bt_ctf_field_type_integer_set_signed(type, 1))
1440		goto err;
1441
1442	if (hex &&
1443	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1444		goto err;
1445
1446#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1447	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1448#else
1449	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1450#endif
1451
1452	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1453	    size, sign ? "un" : "", hex ? "hex" : "");
1454	return type;
1455
1456err:
1457	bt_ctf_field_type_put(type);
1458	return NULL;
1459}
1460
1461static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1462{
1463	unsigned int i;
1464
1465	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1466		bt_ctf_field_type_put(cw->data.array[i]);
1467}
1468
1469static int ctf_writer__init_data(struct ctf_writer *cw)
1470{
1471#define CREATE_INT_TYPE(type, size, sign, hex)		\
1472do {							\
1473	(type) = create_int_type(size, sign, hex);	\
1474	if (!(type))					\
1475		goto err;				\
1476} while (0)
1477
1478	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1479	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1480	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1481	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1482	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1483	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1484
1485	cw->data.string  = bt_ctf_field_type_string_create();
1486	if (cw->data.string)
1487		return 0;
1488
1489err:
1490	ctf_writer__cleanup_data(cw);
1491	pr_err("Failed to create data types.\n");
1492	return -1;
1493}
1494
1495static void ctf_writer__cleanup(struct ctf_writer *cw)
1496{
1497	ctf_writer__cleanup_data(cw);
1498
1499	bt_ctf_clock_put(cw->clock);
1500	free_streams(cw);
1501	bt_ctf_stream_class_put(cw->stream_class);
1502	bt_ctf_writer_put(cw->writer);
1503
1504	/* and NULL all the pointers */
1505	memset(cw, 0, sizeof(*cw));
1506}
1507
1508static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1509			    struct perf_session *session, bool tod)
1510{
1511	struct bt_ctf_writer		*writer;
1512	struct bt_ctf_stream_class	*stream_class;
1513	struct bt_ctf_clock		*clock;
1514	struct bt_ctf_field_type	*pkt_ctx_type;
1515	int				ret;
1516
1517	/* CTF writer */
1518	writer = bt_ctf_writer_create(path);
1519	if (!writer)
1520		goto err;
1521
1522	cw->writer = writer;
1523
1524	/* CTF clock */
1525	clock = bt_ctf_clock_create("perf_clock");
1526	if (!clock) {
1527		pr("Failed to create CTF clock.\n");
1528		goto err_cleanup;
1529	}
1530
1531	cw->clock = clock;
1532
1533	if (ctf_writer__setup_clock(cw, session, tod)) {
1534		pr("Failed to setup CTF clock.\n");
1535		goto err_cleanup;
1536	}
1537
1538	/* CTF stream class */
1539	stream_class = bt_ctf_stream_class_create("perf_stream");
1540	if (!stream_class) {
1541		pr("Failed to create CTF stream class.\n");
1542		goto err_cleanup;
1543	}
1544
1545	cw->stream_class = stream_class;
1546
1547	/* CTF clock stream setup */
1548	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1549		pr("Failed to assign CTF clock to stream class.\n");
1550		goto err_cleanup;
1551	}
1552
1553	if (ctf_writer__init_data(cw))
1554		goto err_cleanup;
1555
1556	/* Add cpu_id for packet context */
1557	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1558	if (!pkt_ctx_type)
1559		goto err_cleanup;
1560
1561	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1562	bt_ctf_field_type_put(pkt_ctx_type);
1563	if (ret)
1564		goto err_cleanup;
1565
1566	/* CTF clock writer setup */
1567	if (bt_ctf_writer_add_clock(writer, clock)) {
1568		pr("Failed to assign CTF clock to writer.\n");
1569		goto err_cleanup;
1570	}
1571
1572	return 0;
1573
1574err_cleanup:
1575	ctf_writer__cleanup(cw);
1576err:
1577	pr_err("Failed to setup CTF writer.\n");
1578	return -1;
1579}
1580
1581static int ctf_writer__flush_streams(struct ctf_writer *cw)
1582{
1583	int cpu, ret = 0;
1584
1585	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1586		ret = ctf_stream__flush(cw->stream[cpu]);
1587
1588	return ret;
1589}
1590
1591static int convert__config(const char *var, const char *value, void *cb)
1592{
1593	struct convert *c = cb;
1594
1595	if (!strcmp(var, "convert.queue-size"))
1596		return perf_config_u64(&c->queue_size, var, value);
1597
1598	return 0;
1599}
1600
1601int bt_convert__perf2ctf(const char *input, const char *path,
1602			 struct perf_data_convert_opts *opts)
1603{
1604	struct perf_session *session;
1605	struct perf_data data = {
1606		.path	   = input,
1607		.mode      = PERF_DATA_MODE_READ,
1608		.force     = opts->force,
1609	};
1610	struct convert c = {
1611		.tool = {
1612			.sample          = process_sample_event,
1613			.mmap            = perf_event__process_mmap,
1614			.mmap2           = perf_event__process_mmap2,
1615			.comm            = perf_event__process_comm,
1616			.exit            = perf_event__process_exit,
1617			.fork            = perf_event__process_fork,
1618			.lost            = perf_event__process_lost,
1619			.tracing_data    = perf_event__process_tracing_data,
1620			.build_id        = perf_event__process_build_id,
1621			.namespaces      = perf_event__process_namespaces,
1622			.ordered_events  = true,
1623			.ordering_requires_timestamps = true,
1624		},
1625	};
1626	struct ctf_writer *cw = &c.writer;
1627	int err;
1628
1629	if (opts->all) {
1630		c.tool.comm = process_comm_event;
1631		c.tool.exit = process_exit_event;
1632		c.tool.fork = process_fork_event;
1633		c.tool.mmap = process_mmap_event;
1634		c.tool.mmap2 = process_mmap2_event;
1635	}
1636
1637	err = perf_config(convert__config, &c);
1638	if (err)
1639		return err;
1640
1641	err = -1;
1642	/* perf.data session */
1643	session = perf_session__new(&data, &c.tool);
1644	if (IS_ERR(session))
1645		return PTR_ERR(session);
1646
1647	/* CTF writer */
1648	if (ctf_writer__init(cw, path, session, opts->tod))
1649		goto free_session;
1650
1651	if (c.queue_size) {
1652		ordered_events__set_alloc_size(&session->ordered_events,
1653					       c.queue_size);
1654	}
1655
1656	/* CTF writer env/clock setup  */
1657	if (ctf_writer__setup_env(cw, session))
1658		goto free_writer;
1659
1660	/* CTF events setup */
1661	if (setup_events(cw, session))
1662		goto free_writer;
1663
1664	if (opts->all && setup_non_sample_events(cw, session))
1665		goto free_writer;
1666
1667	if (setup_streams(cw, session))
1668		goto free_writer;
1669
1670	err = perf_session__process_events(session);
1671	if (!err)
1672		err = ctf_writer__flush_streams(cw);
1673	else
1674		pr_err("Error during conversion.\n");
1675
1676	fprintf(stderr,
1677		"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1678		data.path, path);
1679
1680	fprintf(stderr,
1681		"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1682		(double) c.events_size / 1024.0 / 1024.0,
1683		c.events_count);
1684
1685	if (!c.non_sample_count)
1686		fprintf(stderr, ") ]\n");
1687	else
1688		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1689
1690	cleanup_events(session);
1691	perf_session__delete(session);
1692	ctf_writer__cleanup(cw);
1693
1694	return err;
1695
1696free_writer:
1697	ctf_writer__cleanup(cw);
1698free_session:
1699	perf_session__delete(session);
1700	pr_err("Error during conversion setup.\n");
1701	return err;
1702}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CTF writing support via babeltrace.
   4 *
   5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
   6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
   7 */
   8
   9#include <errno.h>
  10#include <inttypes.h>
  11#include <linux/compiler.h>
  12#include <linux/kernel.h>
  13#include <linux/zalloc.h>
  14#include <babeltrace/ctf-writer/writer.h>
  15#include <babeltrace/ctf-writer/clock.h>
  16#include <babeltrace/ctf-writer/stream.h>
  17#include <babeltrace/ctf-writer/event.h>
  18#include <babeltrace/ctf-writer/event-types.h>
  19#include <babeltrace/ctf-writer/event-fields.h>
  20#include <babeltrace/ctf-ir/utils.h>
  21#include <babeltrace/ctf/events.h>
  22#include <traceevent/event-parse.h>
  23#include "asm/bug.h"
  24#include "data-convert-bt.h"
  25#include "session.h"
  26#include "debug.h"
  27#include "tool.h"
  28#include "evlist.h"
  29#include "evsel.h"
  30#include "machine.h"
  31#include "config.h"
  32#include <linux/ctype.h>
  33#include <linux/err.h>
  34#include <linux/time64.h>
  35#include "util.h"
  36#include "clockid.h"
 
 
 
 
 
  37
  38#define pr_N(n, fmt, ...) \
  39	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
  40
  41#define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  42#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
  43
  44#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
  45
  46struct evsel_priv {
  47	struct bt_ctf_event_class *event_class;
  48};
  49
  50#define MAX_CPUS	4096
  51
  52struct ctf_stream {
  53	struct bt_ctf_stream *stream;
  54	int cpu;
  55	u32 count;
  56};
  57
  58struct ctf_writer {
  59	/* writer primitives */
  60	struct bt_ctf_writer		 *writer;
  61	struct ctf_stream		**stream;
  62	int				  stream_cnt;
  63	struct bt_ctf_stream_class	 *stream_class;
  64	struct bt_ctf_clock		 *clock;
  65
  66	/* data types */
  67	union {
  68		struct {
  69			struct bt_ctf_field_type	*s64;
  70			struct bt_ctf_field_type	*u64;
  71			struct bt_ctf_field_type	*s32;
  72			struct bt_ctf_field_type	*u32;
  73			struct bt_ctf_field_type	*string;
  74			struct bt_ctf_field_type	*u32_hex;
  75			struct bt_ctf_field_type	*u64_hex;
  76		};
  77		struct bt_ctf_field_type *array[6];
  78	} data;
  79	struct bt_ctf_event_class	*comm_class;
  80	struct bt_ctf_event_class	*exit_class;
  81	struct bt_ctf_event_class	*fork_class;
  82	struct bt_ctf_event_class	*mmap_class;
  83	struct bt_ctf_event_class	*mmap2_class;
  84};
  85
  86struct convert {
  87	struct perf_tool	tool;
  88	struct ctf_writer	writer;
  89
  90	u64			events_size;
  91	u64			events_count;
  92	u64			non_sample_count;
  93
  94	/* Ordered events configured queue size. */
  95	u64			queue_size;
  96};
  97
  98static int value_set(struct bt_ctf_field_type *type,
  99		     struct bt_ctf_event *event,
 100		     const char *name, u64 val)
 101{
 102	struct bt_ctf_field *field;
 103	bool sign = bt_ctf_field_type_integer_get_signed(type);
 104	int ret;
 105
 106	field = bt_ctf_field_create(type);
 107	if (!field) {
 108		pr_err("failed to create a field %s\n", name);
 109		return -1;
 110	}
 111
 112	if (sign) {
 113		ret = bt_ctf_field_signed_integer_set_value(field, val);
 114		if (ret) {
 115			pr_err("failed to set field value %s\n", name);
 116			goto err;
 117		}
 118	} else {
 119		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
 120		if (ret) {
 121			pr_err("failed to set field value %s\n", name);
 122			goto err;
 123		}
 124	}
 125
 126	ret = bt_ctf_event_set_payload(event, name, field);
 127	if (ret) {
 128		pr_err("failed to set payload %s\n", name);
 129		goto err;
 130	}
 131
 132	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
 133
 134err:
 135	bt_ctf_field_put(field);
 136	return ret;
 137}
 138
 139#define __FUNC_VALUE_SET(_name, _val_type)				\
 140static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
 141			     struct bt_ctf_event *event,		\
 142			     const char *name,				\
 143			     _val_type val)				\
 144{									\
 145	struct bt_ctf_field_type *type = cw->data._name;		\
 146	return value_set(type, event, name, (u64) val);			\
 147}
 148
 149#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
 150
 151FUNC_VALUE_SET(s32)
 152FUNC_VALUE_SET(u32)
 153FUNC_VALUE_SET(s64)
 154FUNC_VALUE_SET(u64)
 155__FUNC_VALUE_SET(u64_hex, u64)
 156
 157static int string_set_value(struct bt_ctf_field *field, const char *string);
 158static __maybe_unused int
 159value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
 160		 const char *name, const char *string)
 161{
 162	struct bt_ctf_field_type *type = cw->data.string;
 163	struct bt_ctf_field *field;
 164	int ret = 0;
 165
 166	field = bt_ctf_field_create(type);
 167	if (!field) {
 168		pr_err("failed to create a field %s\n", name);
 169		return -1;
 170	}
 171
 172	ret = string_set_value(field, string);
 173	if (ret) {
 174		pr_err("failed to set value %s\n", name);
 175		goto err_put_field;
 176	}
 177
 178	ret = bt_ctf_event_set_payload(event, name, field);
 179	if (ret)
 180		pr_err("failed to set payload %s\n", name);
 181
 182err_put_field:
 183	bt_ctf_field_put(field);
 184	return ret;
 185}
 186
 187static struct bt_ctf_field_type*
 188get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
 189{
 190	unsigned long flags = field->flags;
 191
 192	if (flags & TEP_FIELD_IS_STRING)
 193		return cw->data.string;
 194
 195	if (!(flags & TEP_FIELD_IS_SIGNED)) {
 196		/* unsigned long are mostly pointers */
 197		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
 198			return cw->data.u64_hex;
 199	}
 200
 201	if (flags & TEP_FIELD_IS_SIGNED) {
 202		if (field->size == 8)
 203			return cw->data.s64;
 204		else
 205			return cw->data.s32;
 206	}
 207
 208	if (field->size == 8)
 209		return cw->data.u64;
 210	else
 211		return cw->data.u32;
 212}
 213
 214static unsigned long long adjust_signedness(unsigned long long value_int, int size)
 215{
 216	unsigned long long value_mask;
 217
 218	/*
 219	 * value_mask = (1 << (size * 8 - 1)) - 1.
 220	 * Directly set value_mask for code readers.
 221	 */
 222	switch (size) {
 223	case 1:
 224		value_mask = 0x7fULL;
 225		break;
 226	case 2:
 227		value_mask = 0x7fffULL;
 228		break;
 229	case 4:
 230		value_mask = 0x7fffffffULL;
 231		break;
 232	case 8:
 233		/*
 234		 * For 64 bit value, return it self. There is no need
 235		 * to fill high bit.
 236		 */
 237		/* Fall through */
 238	default:
 239		/* BUG! */
 240		return value_int;
 241	}
 242
 243	/* If it is a positive value, don't adjust. */
 244	if ((value_int & (~0ULL - value_mask)) == 0)
 245		return value_int;
 246
 247	/* Fill upper part of value_int with 1 to make it a negative long long. */
 248	return (value_int & value_mask) | ~value_mask;
 249}
 250
 251static int string_set_value(struct bt_ctf_field *field, const char *string)
 252{
 253	char *buffer = NULL;
 254	size_t len = strlen(string), i, p;
 255	int err;
 256
 257	for (i = p = 0; i < len; i++, p++) {
 258		if (isprint(string[i])) {
 259			if (!buffer)
 260				continue;
 261			buffer[p] = string[i];
 262		} else {
 263			char numstr[5];
 264
 265			snprintf(numstr, sizeof(numstr), "\\x%02x",
 266				 (unsigned int)(string[i]) & 0xff);
 267
 268			if (!buffer) {
 269				buffer = zalloc(i + (len - i) * 4 + 2);
 270				if (!buffer) {
 271					pr_err("failed to set unprintable string '%s'\n", string);
 272					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
 273				}
 274				if (i > 0)
 275					strncpy(buffer, string, i);
 276			}
 277			memcpy(buffer + p, numstr, 4);
 278			p += 3;
 279		}
 280	}
 281
 282	if (!buffer)
 283		return bt_ctf_field_string_set_value(field, string);
 284	err = bt_ctf_field_string_set_value(field, buffer);
 285	free(buffer);
 286	return err;
 287}
 288
 289static int add_tracepoint_field_value(struct ctf_writer *cw,
 290				      struct bt_ctf_event_class *event_class,
 291				      struct bt_ctf_event *event,
 292				      struct perf_sample *sample,
 293				      struct tep_format_field *fmtf)
 294{
 295	struct bt_ctf_field_type *type;
 296	struct bt_ctf_field *array_field;
 297	struct bt_ctf_field *field;
 298	const char *name = fmtf->name;
 299	void *data = sample->raw_data;
 300	unsigned long flags = fmtf->flags;
 301	unsigned int n_items;
 302	unsigned int i;
 303	unsigned int offset;
 304	unsigned int len;
 305	int ret;
 306
 307	name = fmtf->alias;
 308	offset = fmtf->offset;
 309	len = fmtf->size;
 310	if (flags & TEP_FIELD_IS_STRING)
 311		flags &= ~TEP_FIELD_IS_ARRAY;
 312
 313	if (flags & TEP_FIELD_IS_DYNAMIC) {
 314		unsigned long long tmp_val;
 315
 316		tmp_val = tep_read_number(fmtf->event->tep,
 317					  data + offset, len);
 318		offset = tmp_val;
 319		len = offset >> 16;
 320		offset &= 0xffff;
 
 
 321	}
 322
 323	if (flags & TEP_FIELD_IS_ARRAY) {
 324
 325		type = bt_ctf_event_class_get_field_by_name(
 326				event_class, name);
 327		array_field = bt_ctf_field_create(type);
 328		bt_ctf_field_type_put(type);
 329		if (!array_field) {
 330			pr_err("Failed to create array type %s\n", name);
 331			return -1;
 332		}
 333
 334		len = fmtf->size / fmtf->arraylen;
 335		n_items = fmtf->arraylen;
 336	} else {
 337		n_items = 1;
 338		array_field = NULL;
 339	}
 340
 341	type = get_tracepoint_field_type(cw, fmtf);
 342
 343	for (i = 0; i < n_items; i++) {
 344		if (flags & TEP_FIELD_IS_ARRAY)
 345			field = bt_ctf_field_array_get_field(array_field, i);
 346		else
 347			field = bt_ctf_field_create(type);
 348
 349		if (!field) {
 350			pr_err("failed to create a field %s\n", name);
 351			return -1;
 352		}
 353
 354		if (flags & TEP_FIELD_IS_STRING)
 355			ret = string_set_value(field, data + offset + i * len);
 356		else {
 357			unsigned long long value_int;
 358
 359			value_int = tep_read_number(
 360					fmtf->event->tep,
 361					data + offset + i * len, len);
 362
 363			if (!(flags & TEP_FIELD_IS_SIGNED))
 364				ret = bt_ctf_field_unsigned_integer_set_value(
 365						field, value_int);
 366			else
 367				ret = bt_ctf_field_signed_integer_set_value(
 368						field, adjust_signedness(value_int, len));
 369		}
 370
 371		if (ret) {
 372			pr_err("failed to set file value %s\n", name);
 373			goto err_put_field;
 374		}
 375		if (!(flags & TEP_FIELD_IS_ARRAY)) {
 376			ret = bt_ctf_event_set_payload(event, name, field);
 377			if (ret) {
 378				pr_err("failed to set payload %s\n", name);
 379				goto err_put_field;
 380			}
 381		}
 382		bt_ctf_field_put(field);
 383	}
 384	if (flags & TEP_FIELD_IS_ARRAY) {
 385		ret = bt_ctf_event_set_payload(event, name, array_field);
 386		if (ret) {
 387			pr_err("Failed add payload array %s\n", name);
 388			return -1;
 389		}
 390		bt_ctf_field_put(array_field);
 391	}
 392	return 0;
 393
 394err_put_field:
 395	bt_ctf_field_put(field);
 396	return -1;
 397}
 398
 399static int add_tracepoint_fields_values(struct ctf_writer *cw,
 400					struct bt_ctf_event_class *event_class,
 401					struct bt_ctf_event *event,
 402					struct tep_format_field *fields,
 403					struct perf_sample *sample)
 404{
 405	struct tep_format_field *field;
 406	int ret;
 407
 408	for (field = fields; field; field = field->next) {
 409		ret = add_tracepoint_field_value(cw, event_class, event, sample,
 410				field);
 411		if (ret)
 412			return -1;
 413	}
 414	return 0;
 415}
 416
 417static int add_tracepoint_values(struct ctf_writer *cw,
 418				 struct bt_ctf_event_class *event_class,
 419				 struct bt_ctf_event *event,
 420				 struct evsel *evsel,
 421				 struct perf_sample *sample)
 422{
 423	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
 424	struct tep_format_field *fields        = evsel->tp_format->format.fields;
 425	int ret;
 426
 427	ret = add_tracepoint_fields_values(cw, event_class, event,
 428					   common_fields, sample);
 429	if (!ret)
 430		ret = add_tracepoint_fields_values(cw, event_class, event,
 431						   fields, sample);
 432
 433	return ret;
 434}
 435
 436static int
 437add_bpf_output_values(struct bt_ctf_event_class *event_class,
 438		      struct bt_ctf_event *event,
 439		      struct perf_sample *sample)
 440{
 441	struct bt_ctf_field_type *len_type, *seq_type;
 442	struct bt_ctf_field *len_field, *seq_field;
 443	unsigned int raw_size = sample->raw_size;
 444	unsigned int nr_elements = raw_size / sizeof(u32);
 445	unsigned int i;
 446	int ret;
 447
 448	if (nr_elements * sizeof(u32) != raw_size)
 449		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
 450			   raw_size, nr_elements * sizeof(u32) - raw_size);
 451
 452	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
 453	len_field = bt_ctf_field_create(len_type);
 454	if (!len_field) {
 455		pr_err("failed to create 'raw_len' for bpf output event\n");
 456		ret = -1;
 457		goto put_len_type;
 458	}
 459
 460	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
 461	if (ret) {
 462		pr_err("failed to set field value for raw_len\n");
 463		goto put_len_field;
 464	}
 465	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
 466	if (ret) {
 467		pr_err("failed to set payload to raw_len\n");
 468		goto put_len_field;
 469	}
 470
 471	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
 472	seq_field = bt_ctf_field_create(seq_type);
 473	if (!seq_field) {
 474		pr_err("failed to create 'raw_data' for bpf output event\n");
 475		ret = -1;
 476		goto put_seq_type;
 477	}
 478
 479	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
 480	if (ret) {
 481		pr_err("failed to set length of 'raw_data'\n");
 482		goto put_seq_field;
 483	}
 484
 485	for (i = 0; i < nr_elements; i++) {
 486		struct bt_ctf_field *elem_field =
 487			bt_ctf_field_sequence_get_field(seq_field, i);
 488
 489		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
 490				((u32 *)(sample->raw_data))[i]);
 491
 492		bt_ctf_field_put(elem_field);
 493		if (ret) {
 494			pr_err("failed to set raw_data[%d]\n", i);
 495			goto put_seq_field;
 496		}
 497	}
 498
 499	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
 500	if (ret)
 501		pr_err("failed to set payload for raw_data\n");
 502
 503put_seq_field:
 504	bt_ctf_field_put(seq_field);
 505put_seq_type:
 506	bt_ctf_field_type_put(seq_type);
 507put_len_field:
 508	bt_ctf_field_put(len_field);
 509put_len_type:
 510	bt_ctf_field_type_put(len_type);
 511	return ret;
 512}
 513
 514static int
 515add_callchain_output_values(struct bt_ctf_event_class *event_class,
 516		      struct bt_ctf_event *event,
 517		      struct ip_callchain *callchain)
 518{
 519	struct bt_ctf_field_type *len_type, *seq_type;
 520	struct bt_ctf_field *len_field, *seq_field;
 521	unsigned int nr_elements = callchain->nr;
 522	unsigned int i;
 523	int ret;
 524
 525	len_type = bt_ctf_event_class_get_field_by_name(
 526			event_class, "perf_callchain_size");
 527	len_field = bt_ctf_field_create(len_type);
 528	if (!len_field) {
 529		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
 530		ret = -1;
 531		goto put_len_type;
 532	}
 533
 534	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
 535	if (ret) {
 536		pr_err("failed to set field value for perf_callchain_size\n");
 537		goto put_len_field;
 538	}
 539	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
 540	if (ret) {
 541		pr_err("failed to set payload to perf_callchain_size\n");
 542		goto put_len_field;
 543	}
 544
 545	seq_type = bt_ctf_event_class_get_field_by_name(
 546			event_class, "perf_callchain");
 547	seq_field = bt_ctf_field_create(seq_type);
 548	if (!seq_field) {
 549		pr_err("failed to create 'perf_callchain' for callchain output event\n");
 550		ret = -1;
 551		goto put_seq_type;
 552	}
 553
 554	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
 555	if (ret) {
 556		pr_err("failed to set length of 'perf_callchain'\n");
 557		goto put_seq_field;
 558	}
 559
 560	for (i = 0; i < nr_elements; i++) {
 561		struct bt_ctf_field *elem_field =
 562			bt_ctf_field_sequence_get_field(seq_field, i);
 563
 564		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
 565				((u64 *)(callchain->ips))[i]);
 566
 567		bt_ctf_field_put(elem_field);
 568		if (ret) {
 569			pr_err("failed to set callchain[%d]\n", i);
 570			goto put_seq_field;
 571		}
 572	}
 573
 574	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
 575	if (ret)
 576		pr_err("failed to set payload for raw_data\n");
 577
 578put_seq_field:
 579	bt_ctf_field_put(seq_field);
 580put_seq_type:
 581	bt_ctf_field_type_put(seq_type);
 582put_len_field:
 583	bt_ctf_field_put(len_field);
 584put_len_type:
 585	bt_ctf_field_type_put(len_type);
 586	return ret;
 587}
 588
 589static int add_generic_values(struct ctf_writer *cw,
 590			      struct bt_ctf_event *event,
 591			      struct evsel *evsel,
 592			      struct perf_sample *sample)
 593{
 594	u64 type = evsel->core.attr.sample_type;
 595	int ret;
 596
 597	/*
 598	 * missing:
 599	 *   PERF_SAMPLE_TIME         - not needed as we have it in
 600	 *                              ctf event header
 601	 *   PERF_SAMPLE_READ         - TODO
 602	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
 603	 *   PERF_SAMPLE_BRANCH_STACK - TODO
 604	 *   PERF_SAMPLE_REGS_USER    - TODO
 605	 *   PERF_SAMPLE_STACK_USER   - TODO
 606	 */
 607
 608	if (type & PERF_SAMPLE_IP) {
 609		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
 610		if (ret)
 611			return -1;
 612	}
 613
 614	if (type & PERF_SAMPLE_TID) {
 615		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
 616		if (ret)
 617			return -1;
 618
 619		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
 620		if (ret)
 621			return -1;
 622	}
 623
 624	if ((type & PERF_SAMPLE_ID) ||
 625	    (type & PERF_SAMPLE_IDENTIFIER)) {
 626		ret = value_set_u64(cw, event, "perf_id", sample->id);
 627		if (ret)
 628			return -1;
 629	}
 630
 631	if (type & PERF_SAMPLE_STREAM_ID) {
 632		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
 633		if (ret)
 634			return -1;
 635	}
 636
 637	if (type & PERF_SAMPLE_PERIOD) {
 638		ret = value_set_u64(cw, event, "perf_period", sample->period);
 639		if (ret)
 640			return -1;
 641	}
 642
 643	if (type & PERF_SAMPLE_WEIGHT) {
 644		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
 645		if (ret)
 646			return -1;
 647	}
 648
 649	if (type & PERF_SAMPLE_DATA_SRC) {
 650		ret = value_set_u64(cw, event, "perf_data_src",
 651				sample->data_src);
 652		if (ret)
 653			return -1;
 654	}
 655
 656	if (type & PERF_SAMPLE_TRANSACTION) {
 657		ret = value_set_u64(cw, event, "perf_transaction",
 658				sample->transaction);
 659		if (ret)
 660			return -1;
 661	}
 662
 663	return 0;
 664}
 665
 666static int ctf_stream__flush(struct ctf_stream *cs)
 667{
 668	int err = 0;
 669
 670	if (cs) {
 671		err = bt_ctf_stream_flush(cs->stream);
 672		if (err)
 673			pr_err("CTF stream %d flush failed\n", cs->cpu);
 674
 675		pr("Flush stream for cpu %d (%u samples)\n",
 676		   cs->cpu, cs->count);
 677
 678		cs->count = 0;
 679	}
 680
 681	return err;
 682}
 683
 684static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
 685{
 686	struct ctf_stream *cs;
 687	struct bt_ctf_field *pkt_ctx   = NULL;
 688	struct bt_ctf_field *cpu_field = NULL;
 689	struct bt_ctf_stream *stream   = NULL;
 690	int ret;
 691
 692	cs = zalloc(sizeof(*cs));
 693	if (!cs) {
 694		pr_err("Failed to allocate ctf stream\n");
 695		return NULL;
 696	}
 697
 698	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
 699	if (!stream) {
 700		pr_err("Failed to create CTF stream\n");
 701		goto out;
 702	}
 703
 704	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
 705	if (!pkt_ctx) {
 706		pr_err("Failed to obtain packet context\n");
 707		goto out;
 708	}
 709
 710	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
 711	bt_ctf_field_put(pkt_ctx);
 712	if (!cpu_field) {
 713		pr_err("Failed to obtain cpu field\n");
 714		goto out;
 715	}
 716
 717	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
 718	if (ret) {
 719		pr_err("Failed to update CPU number\n");
 720		goto out;
 721	}
 722
 723	bt_ctf_field_put(cpu_field);
 724
 725	cs->cpu    = cpu;
 726	cs->stream = stream;
 727	return cs;
 728
 729out:
 730	if (cpu_field)
 731		bt_ctf_field_put(cpu_field);
 732	if (stream)
 733		bt_ctf_stream_put(stream);
 734
 735	free(cs);
 736	return NULL;
 737}
 738
 739static void ctf_stream__delete(struct ctf_stream *cs)
 740{
 741	if (cs) {
 742		bt_ctf_stream_put(cs->stream);
 743		free(cs);
 744	}
 745}
 746
 747static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
 748{
 749	struct ctf_stream *cs = cw->stream[cpu];
 750
 751	if (!cs) {
 752		cs = ctf_stream__create(cw, cpu);
 753		cw->stream[cpu] = cs;
 754	}
 755
 756	return cs;
 757}
 758
 759static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
 760			  struct evsel *evsel)
 761{
 762	int cpu = 0;
 763
 764	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
 765		cpu = sample->cpu;
 766
 767	if (cpu > cw->stream_cnt) {
 768		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
 769			cpu, cw->stream_cnt);
 770		cpu = 0;
 771	}
 772
 773	return cpu;
 774}
 775
 776#define STREAM_FLUSH_COUNT 100000
 777
 778/*
 779 * Currently we have no other way to determine the
 780 * time for the stream flush other than keep track
 781 * of the number of events and check it against
 782 * threshold.
 783 */
 784static bool is_flush_needed(struct ctf_stream *cs)
 785{
 786	return cs->count >= STREAM_FLUSH_COUNT;
 787}
 788
 789static int process_sample_event(struct perf_tool *tool,
 790				union perf_event *_event,
 791				struct perf_sample *sample,
 792				struct evsel *evsel,
 793				struct machine *machine __maybe_unused)
 794{
 795	struct convert *c = container_of(tool, struct convert, tool);
 796	struct evsel_priv *priv = evsel->priv;
 797	struct ctf_writer *cw = &c->writer;
 798	struct ctf_stream *cs;
 799	struct bt_ctf_event_class *event_class;
 800	struct bt_ctf_event *event;
 801	int ret;
 802	unsigned long type = evsel->core.attr.sample_type;
 803
 804	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
 805		return 0;
 806
 807	event_class = priv->event_class;
 808
 809	/* update stats */
 810	c->events_count++;
 811	c->events_size += _event->header.size;
 812
 813	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
 814
 815	event = bt_ctf_event_create(event_class);
 816	if (!event) {
 817		pr_err("Failed to create an CTF event\n");
 818		return -1;
 819	}
 820
 821	bt_ctf_clock_set_time(cw->clock, sample->time);
 822
 823	ret = add_generic_values(cw, event, evsel, sample);
 824	if (ret)
 825		return -1;
 826
 827	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
 828		ret = add_tracepoint_values(cw, event_class, event,
 829					    evsel, sample);
 830		if (ret)
 831			return -1;
 832	}
 833
 834	if (type & PERF_SAMPLE_CALLCHAIN) {
 835		ret = add_callchain_output_values(event_class,
 836				event, sample->callchain);
 837		if (ret)
 838			return -1;
 839	}
 840
 841	if (evsel__is_bpf_output(evsel)) {
 842		ret = add_bpf_output_values(event_class, event, sample);
 843		if (ret)
 844			return -1;
 845	}
 846
 847	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
 848	if (cs) {
 849		if (is_flush_needed(cs))
 850			ctf_stream__flush(cs);
 851
 852		cs->count++;
 853		bt_ctf_stream_append_event(cs->stream, event);
 854	}
 855
 856	bt_ctf_event_put(event);
 857	return cs ? 0 : -1;
 858}
 859
 860#define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
 861do {							\
 862	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
 863	if (ret)					\
 864		return -1;				\
 865} while(0)
 866
 867#define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
 868static int process_##_name##_event(struct perf_tool *tool,	\
 869				   union perf_event *_event,	\
 870				   struct perf_sample *sample,	\
 871				   struct machine *machine)	\
 872{								\
 873	struct convert *c = container_of(tool, struct convert, tool);\
 874	struct ctf_writer *cw = &c->writer;			\
 875	struct bt_ctf_event_class *event_class = cw->_name##_class;\
 876	struct bt_ctf_event *event;				\
 877	struct ctf_stream *cs;					\
 878	int ret;						\
 879								\
 880	c->non_sample_count++;					\
 881	c->events_size += _event->header.size;			\
 882	event = bt_ctf_event_create(event_class);		\
 883	if (!event) {						\
 884		pr_err("Failed to create an CTF event\n");	\
 885		return -1;					\
 886	}							\
 887								\
 888	bt_ctf_clock_set_time(cw->clock, sample->time);		\
 889	body							\
 890	cs = ctf_stream(cw, 0);					\
 891	if (cs) {						\
 892		if (is_flush_needed(cs))			\
 893			ctf_stream__flush(cs);			\
 894								\
 895		cs->count++;					\
 896		bt_ctf_stream_append_event(cs->stream, event);	\
 897	}							\
 898	bt_ctf_event_put(event);				\
 899								\
 900	return perf_event__process_##_name(tool, _event, sample, machine);\
 901}
 902
 903__FUNC_PROCESS_NON_SAMPLE(comm,
 904	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
 905	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
 906	__NON_SAMPLE_SET_FIELD(comm, string, comm);
 907)
 908__FUNC_PROCESS_NON_SAMPLE(fork,
 909	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
 910	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
 911	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
 912	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
 913	__NON_SAMPLE_SET_FIELD(fork, u64, time);
 914)
 915
 916__FUNC_PROCESS_NON_SAMPLE(exit,
 917	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
 918	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
 919	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
 920	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
 921	__NON_SAMPLE_SET_FIELD(fork, u64, time);
 922)
 923__FUNC_PROCESS_NON_SAMPLE(mmap,
 924	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
 925	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
 926	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
 927	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
 928)
 929__FUNC_PROCESS_NON_SAMPLE(mmap2,
 930	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
 931	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
 932	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
 933	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
 934)
 935#undef __NON_SAMPLE_SET_FIELD
 936#undef __FUNC_PROCESS_NON_SAMPLE
 937
 938/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
 939static char *change_name(char *name, char *orig_name, int dup)
 940{
 941	char *new_name = NULL;
 942	size_t len;
 943
 944	if (!name)
 945		name = orig_name;
 946
 947	if (dup >= 10)
 948		goto out;
 949	/*
 950	 * Add '_' prefix to potential keywork.  According to
 951	 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
 952	 * futher CTF spec updating may require us to use '$'.
 953	 */
 954	if (dup < 0)
 955		len = strlen(name) + sizeof("_");
 956	else
 957		len = strlen(orig_name) + sizeof("_dupl_X");
 958
 959	new_name = malloc(len);
 960	if (!new_name)
 961		goto out;
 962
 963	if (dup < 0)
 964		snprintf(new_name, len, "_%s", name);
 965	else
 966		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
 967
 968out:
 969	if (name != orig_name)
 970		free(name);
 971	return new_name;
 972}
 973
 974static int event_class_add_field(struct bt_ctf_event_class *event_class,
 975		struct bt_ctf_field_type *type,
 976		struct tep_format_field *field)
 977{
 978	struct bt_ctf_field_type *t = NULL;
 979	char *name;
 980	int dup = 1;
 981	int ret;
 982
 983	/* alias was already assigned */
 984	if (field->alias != field->name)
 985		return bt_ctf_event_class_add_field(event_class, type,
 986				(char *)field->alias);
 987
 988	name = field->name;
 989
 990	/* If 'name' is a keywork, add prefix. */
 991	if (bt_ctf_validate_identifier(name))
 992		name = change_name(name, field->name, -1);
 993
 994	if (!name) {
 995		pr_err("Failed to fix invalid identifier.");
 996		return -1;
 997	}
 998	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
 999		bt_ctf_field_type_put(t);
1000		name = change_name(name, field->name, dup++);
1001		if (!name) {
1002			pr_err("Failed to create dup name for '%s'\n", field->name);
1003			return -1;
1004		}
1005	}
1006
1007	ret = bt_ctf_event_class_add_field(event_class, type, name);
1008	if (!ret)
1009		field->alias = name;
1010
1011	return ret;
1012}
1013
1014static int add_tracepoint_fields_types(struct ctf_writer *cw,
1015				       struct tep_format_field *fields,
1016				       struct bt_ctf_event_class *event_class)
1017{
1018	struct tep_format_field *field;
1019	int ret;
1020
1021	for (field = fields; field; field = field->next) {
1022		struct bt_ctf_field_type *type;
1023		unsigned long flags = field->flags;
1024
1025		pr2("  field '%s'\n", field->name);
1026
1027		type = get_tracepoint_field_type(cw, field);
1028		if (!type)
1029			return -1;
1030
1031		/*
1032		 * A string is an array of chars. For this we use the string
1033		 * type and don't care that it is an array. What we don't
1034		 * support is an array of strings.
1035		 */
1036		if (flags & TEP_FIELD_IS_STRING)
1037			flags &= ~TEP_FIELD_IS_ARRAY;
1038
1039		if (flags & TEP_FIELD_IS_ARRAY)
1040			type = bt_ctf_field_type_array_create(type, field->arraylen);
1041
1042		ret = event_class_add_field(event_class, type, field);
1043
1044		if (flags & TEP_FIELD_IS_ARRAY)
1045			bt_ctf_field_type_put(type);
1046
1047		if (ret) {
1048			pr_err("Failed to add field '%s': %d\n",
1049					field->name, ret);
1050			return -1;
1051		}
1052	}
1053
1054	return 0;
1055}
1056
1057static int add_tracepoint_types(struct ctf_writer *cw,
1058				struct evsel *evsel,
1059				struct bt_ctf_event_class *class)
1060{
1061	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1062	struct tep_format_field *fields        = evsel->tp_format->format.fields;
1063	int ret;
1064
1065	ret = add_tracepoint_fields_types(cw, common_fields, class);
1066	if (!ret)
1067		ret = add_tracepoint_fields_types(cw, fields, class);
1068
1069	return ret;
1070}
1071
1072static int add_bpf_output_types(struct ctf_writer *cw,
1073				struct bt_ctf_event_class *class)
1074{
1075	struct bt_ctf_field_type *len_type = cw->data.u32;
1076	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1077	struct bt_ctf_field_type *seq_type;
1078	int ret;
1079
1080	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1081	if (ret)
1082		return ret;
1083
1084	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1085	if (!seq_type)
1086		return -1;
1087
1088	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1089}
1090
1091static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1092			     struct bt_ctf_event_class *event_class)
1093{
1094	u64 type = evsel->core.attr.sample_type;
1095
1096	/*
1097	 * missing:
1098	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1099	 *                              ctf event header
1100	 *   PERF_SAMPLE_READ         - TODO
1101	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1102	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1103	 *                              are handled separately
1104	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1105	 *   PERF_SAMPLE_REGS_USER    - TODO
1106	 *   PERF_SAMPLE_STACK_USER   - TODO
1107	 */
1108
1109#define ADD_FIELD(cl, t, n)						\
1110	do {								\
1111		pr2("  field '%s'\n", n);				\
1112		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1113			pr_err("Failed to add field '%s';\n", n);	\
1114			return -1;					\
1115		}							\
1116	} while (0)
1117
1118	if (type & PERF_SAMPLE_IP)
1119		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1120
1121	if (type & PERF_SAMPLE_TID) {
1122		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1123		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1124	}
1125
1126	if ((type & PERF_SAMPLE_ID) ||
1127	    (type & PERF_SAMPLE_IDENTIFIER))
1128		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1129
1130	if (type & PERF_SAMPLE_STREAM_ID)
1131		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1132
1133	if (type & PERF_SAMPLE_PERIOD)
1134		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1135
1136	if (type & PERF_SAMPLE_WEIGHT)
1137		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1138
1139	if (type & PERF_SAMPLE_DATA_SRC)
1140		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1141
1142	if (type & PERF_SAMPLE_TRANSACTION)
1143		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1144
1145	if (type & PERF_SAMPLE_CALLCHAIN) {
1146		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1147		ADD_FIELD(event_class,
1148			bt_ctf_field_type_sequence_create(
1149				cw->data.u64_hex, "perf_callchain_size"),
1150			"perf_callchain");
1151	}
1152
1153#undef ADD_FIELD
1154	return 0;
1155}
1156
1157static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1158{
1159	struct bt_ctf_event_class *event_class;
1160	struct evsel_priv *priv;
1161	const char *name = evsel__name(evsel);
1162	int ret;
1163
1164	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1165
1166	event_class = bt_ctf_event_class_create(name);
1167	if (!event_class)
1168		return -1;
1169
1170	ret = add_generic_types(cw, evsel, event_class);
1171	if (ret)
1172		goto err;
1173
1174	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1175		ret = add_tracepoint_types(cw, evsel, event_class);
1176		if (ret)
1177			goto err;
1178	}
1179
1180	if (evsel__is_bpf_output(evsel)) {
1181		ret = add_bpf_output_types(cw, event_class);
1182		if (ret)
1183			goto err;
1184	}
1185
1186	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1187	if (ret) {
1188		pr("Failed to add event class into stream.\n");
1189		goto err;
1190	}
1191
1192	priv = malloc(sizeof(*priv));
1193	if (!priv)
1194		goto err;
1195
1196	priv->event_class = event_class;
1197	evsel->priv       = priv;
1198	return 0;
1199
1200err:
1201	bt_ctf_event_class_put(event_class);
1202	pr_err("Failed to add event '%s'.\n", name);
1203	return -1;
1204}
1205
1206static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1207{
1208	struct evlist *evlist = session->evlist;
1209	struct evsel *evsel;
1210	int ret;
1211
1212	evlist__for_each_entry(evlist, evsel) {
1213		ret = add_event(cw, evsel);
1214		if (ret)
1215			return ret;
1216	}
1217	return 0;
1218}
1219
1220#define __NON_SAMPLE_ADD_FIELD(t, n)						\
1221	do {							\
1222		pr2("  field '%s'\n", #n);			\
1223		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1224			pr_err("Failed to add field '%s';\n", #n);\
1225			return -1;				\
1226		}						\
1227	} while(0)
1228
1229#define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1230static int add_##_name##_event(struct ctf_writer *cw)		\
1231{								\
1232	struct bt_ctf_event_class *event_class;			\
1233	int ret;						\
1234								\
1235	pr("Adding "#_name" event\n");				\
1236	event_class = bt_ctf_event_class_create("perf_" #_name);\
1237	if (!event_class)					\
1238		return -1;					\
1239	body							\
1240								\
1241	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1242	if (ret) {						\
1243		pr("Failed to add event class '"#_name"' into stream.\n");\
1244		return ret;					\
1245	}							\
1246								\
1247	cw->_name##_class = event_class;			\
1248	bt_ctf_event_class_put(event_class);			\
1249	return 0;						\
1250}
1251
1252__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1253	__NON_SAMPLE_ADD_FIELD(u32, pid);
1254	__NON_SAMPLE_ADD_FIELD(u32, tid);
1255	__NON_SAMPLE_ADD_FIELD(string, comm);
1256)
1257
1258__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1259	__NON_SAMPLE_ADD_FIELD(u32, pid);
1260	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1261	__NON_SAMPLE_ADD_FIELD(u32, tid);
1262	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1263	__NON_SAMPLE_ADD_FIELD(u64, time);
1264)
1265
1266__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1267	__NON_SAMPLE_ADD_FIELD(u32, pid);
1268	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1269	__NON_SAMPLE_ADD_FIELD(u32, tid);
1270	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1271	__NON_SAMPLE_ADD_FIELD(u64, time);
1272)
1273
1274__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1275	__NON_SAMPLE_ADD_FIELD(u32, pid);
1276	__NON_SAMPLE_ADD_FIELD(u32, tid);
1277	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1278	__NON_SAMPLE_ADD_FIELD(string, filename);
1279)
1280
1281__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1282	__NON_SAMPLE_ADD_FIELD(u32, pid);
1283	__NON_SAMPLE_ADD_FIELD(u32, tid);
1284	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1285	__NON_SAMPLE_ADD_FIELD(string, filename);
1286)
1287#undef __NON_SAMPLE_ADD_FIELD
1288#undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1289
1290static int setup_non_sample_events(struct ctf_writer *cw,
1291				   struct perf_session *session __maybe_unused)
1292{
1293	int ret;
1294
1295	ret = add_comm_event(cw);
1296	if (ret)
1297		return ret;
1298	ret = add_exit_event(cw);
1299	if (ret)
1300		return ret;
1301	ret = add_fork_event(cw);
1302	if (ret)
1303		return ret;
1304	ret = add_mmap_event(cw);
1305	if (ret)
1306		return ret;
1307	ret = add_mmap2_event(cw);
1308	if (ret)
1309		return ret;
1310	return 0;
1311}
1312
1313static void cleanup_events(struct perf_session *session)
1314{
1315	struct evlist *evlist = session->evlist;
1316	struct evsel *evsel;
1317
1318	evlist__for_each_entry(evlist, evsel) {
1319		struct evsel_priv *priv;
1320
1321		priv = evsel->priv;
1322		bt_ctf_event_class_put(priv->event_class);
1323		zfree(&evsel->priv);
1324	}
1325
1326	evlist__delete(evlist);
1327	session->evlist = NULL;
1328}
1329
1330static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1331{
1332	struct ctf_stream **stream;
1333	struct perf_header *ph = &session->header;
1334	int ncpus;
1335
1336	/*
1337	 * Try to get the number of cpus used in the data file,
1338	 * if not present fallback to the MAX_CPUS.
1339	 */
1340	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1341
1342	stream = zalloc(sizeof(*stream) * ncpus);
1343	if (!stream) {
1344		pr_err("Failed to allocate streams.\n");
1345		return -ENOMEM;
1346	}
1347
1348	cw->stream     = stream;
1349	cw->stream_cnt = ncpus;
1350	return 0;
1351}
1352
1353static void free_streams(struct ctf_writer *cw)
1354{
1355	int cpu;
1356
1357	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1358		ctf_stream__delete(cw->stream[cpu]);
1359
1360	zfree(&cw->stream);
1361}
1362
1363static int ctf_writer__setup_env(struct ctf_writer *cw,
1364				 struct perf_session *session)
1365{
1366	struct perf_header *header = &session->header;
1367	struct bt_ctf_writer *writer = cw->writer;
1368
1369#define ADD(__n, __v)							\
1370do {									\
1371	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1372		return -1;						\
1373} while (0)
1374
1375	ADD("host",    header->env.hostname);
1376	ADD("sysname", "Linux");
1377	ADD("release", header->env.os_release);
1378	ADD("version", header->env.version);
1379	ADD("machine", header->env.arch);
1380	ADD("domain", "kernel");
1381	ADD("tracer_name", "perf");
1382
1383#undef ADD
1384	return 0;
1385}
1386
1387static int ctf_writer__setup_clock(struct ctf_writer *cw,
1388				   struct perf_session *session,
1389				   bool tod)
1390{
1391	struct bt_ctf_clock *clock = cw->clock;
1392	const char *desc = "perf clock";
1393	int64_t offset = 0;
1394
1395	if (tod) {
1396		struct perf_env *env = &session->header.env;
1397
1398		if (!env->clock.enabled) {
1399			pr_err("Can't provide --tod time, missing clock data. "
1400			       "Please record with -k/--clockid option.\n");
1401			return -1;
1402		}
1403
1404		desc   = clockid_name(env->clock.clockid);
1405		offset = env->clock.tod_ns - env->clock.clockid_ns;
1406	}
1407
1408#define SET(__n, __v)				\
1409do {						\
1410	if (bt_ctf_clock_set_##__n(clock, __v))	\
1411		return -1;			\
1412} while (0)
1413
1414	SET(frequency,   1000000000);
1415	SET(offset,      offset);
1416	SET(description, desc);
1417	SET(precision,   10);
1418	SET(is_absolute, 0);
1419
1420#undef SET
1421	return 0;
1422}
1423
1424static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1425{
1426	struct bt_ctf_field_type *type;
1427
1428	type = bt_ctf_field_type_integer_create(size);
1429	if (!type)
1430		return NULL;
1431
1432	if (sign &&
1433	    bt_ctf_field_type_integer_set_signed(type, 1))
1434		goto err;
1435
1436	if (hex &&
1437	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1438		goto err;
1439
1440#if __BYTE_ORDER == __BIG_ENDIAN
1441	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1442#else
1443	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1444#endif
1445
1446	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1447	    size, sign ? "un" : "", hex ? "hex" : "");
1448	return type;
1449
1450err:
1451	bt_ctf_field_type_put(type);
1452	return NULL;
1453}
1454
1455static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1456{
1457	unsigned int i;
1458
1459	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1460		bt_ctf_field_type_put(cw->data.array[i]);
1461}
1462
1463static int ctf_writer__init_data(struct ctf_writer *cw)
1464{
1465#define CREATE_INT_TYPE(type, size, sign, hex)		\
1466do {							\
1467	(type) = create_int_type(size, sign, hex);	\
1468	if (!(type))					\
1469		goto err;				\
1470} while (0)
1471
1472	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1473	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1474	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1475	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1476	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1477	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1478
1479	cw->data.string  = bt_ctf_field_type_string_create();
1480	if (cw->data.string)
1481		return 0;
1482
1483err:
1484	ctf_writer__cleanup_data(cw);
1485	pr_err("Failed to create data types.\n");
1486	return -1;
1487}
1488
1489static void ctf_writer__cleanup(struct ctf_writer *cw)
1490{
1491	ctf_writer__cleanup_data(cw);
1492
1493	bt_ctf_clock_put(cw->clock);
1494	free_streams(cw);
1495	bt_ctf_stream_class_put(cw->stream_class);
1496	bt_ctf_writer_put(cw->writer);
1497
1498	/* and NULL all the pointers */
1499	memset(cw, 0, sizeof(*cw));
1500}
1501
1502static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1503			    struct perf_session *session, bool tod)
1504{
1505	struct bt_ctf_writer		*writer;
1506	struct bt_ctf_stream_class	*stream_class;
1507	struct bt_ctf_clock		*clock;
1508	struct bt_ctf_field_type	*pkt_ctx_type;
1509	int				ret;
1510
1511	/* CTF writer */
1512	writer = bt_ctf_writer_create(path);
1513	if (!writer)
1514		goto err;
1515
1516	cw->writer = writer;
1517
1518	/* CTF clock */
1519	clock = bt_ctf_clock_create("perf_clock");
1520	if (!clock) {
1521		pr("Failed to create CTF clock.\n");
1522		goto err_cleanup;
1523	}
1524
1525	cw->clock = clock;
1526
1527	if (ctf_writer__setup_clock(cw, session, tod)) {
1528		pr("Failed to setup CTF clock.\n");
1529		goto err_cleanup;
1530	}
1531
1532	/* CTF stream class */
1533	stream_class = bt_ctf_stream_class_create("perf_stream");
1534	if (!stream_class) {
1535		pr("Failed to create CTF stream class.\n");
1536		goto err_cleanup;
1537	}
1538
1539	cw->stream_class = stream_class;
1540
1541	/* CTF clock stream setup */
1542	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1543		pr("Failed to assign CTF clock to stream class.\n");
1544		goto err_cleanup;
1545	}
1546
1547	if (ctf_writer__init_data(cw))
1548		goto err_cleanup;
1549
1550	/* Add cpu_id for packet context */
1551	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1552	if (!pkt_ctx_type)
1553		goto err_cleanup;
1554
1555	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1556	bt_ctf_field_type_put(pkt_ctx_type);
1557	if (ret)
1558		goto err_cleanup;
1559
1560	/* CTF clock writer setup */
1561	if (bt_ctf_writer_add_clock(writer, clock)) {
1562		pr("Failed to assign CTF clock to writer.\n");
1563		goto err_cleanup;
1564	}
1565
1566	return 0;
1567
1568err_cleanup:
1569	ctf_writer__cleanup(cw);
1570err:
1571	pr_err("Failed to setup CTF writer.\n");
1572	return -1;
1573}
1574
1575static int ctf_writer__flush_streams(struct ctf_writer *cw)
1576{
1577	int cpu, ret = 0;
1578
1579	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1580		ret = ctf_stream__flush(cw->stream[cpu]);
1581
1582	return ret;
1583}
1584
1585static int convert__config(const char *var, const char *value, void *cb)
1586{
1587	struct convert *c = cb;
1588
1589	if (!strcmp(var, "convert.queue-size"))
1590		return perf_config_u64(&c->queue_size, var, value);
1591
1592	return 0;
1593}
1594
1595int bt_convert__perf2ctf(const char *input, const char *path,
1596			 struct perf_data_convert_opts *opts)
1597{
1598	struct perf_session *session;
1599	struct perf_data data = {
1600		.path	   = input,
1601		.mode      = PERF_DATA_MODE_READ,
1602		.force     = opts->force,
1603	};
1604	struct convert c = {
1605		.tool = {
1606			.sample          = process_sample_event,
1607			.mmap            = perf_event__process_mmap,
1608			.mmap2           = perf_event__process_mmap2,
1609			.comm            = perf_event__process_comm,
1610			.exit            = perf_event__process_exit,
1611			.fork            = perf_event__process_fork,
1612			.lost            = perf_event__process_lost,
1613			.tracing_data    = perf_event__process_tracing_data,
1614			.build_id        = perf_event__process_build_id,
1615			.namespaces      = perf_event__process_namespaces,
1616			.ordered_events  = true,
1617			.ordering_requires_timestamps = true,
1618		},
1619	};
1620	struct ctf_writer *cw = &c.writer;
1621	int err;
1622
1623	if (opts->all) {
1624		c.tool.comm = process_comm_event;
1625		c.tool.exit = process_exit_event;
1626		c.tool.fork = process_fork_event;
1627		c.tool.mmap = process_mmap_event;
1628		c.tool.mmap2 = process_mmap2_event;
1629	}
1630
1631	err = perf_config(convert__config, &c);
1632	if (err)
1633		return err;
1634
1635	err = -1;
1636	/* perf.data session */
1637	session = perf_session__new(&data, 0, &c.tool);
1638	if (IS_ERR(session))
1639		return PTR_ERR(session);
1640
1641	/* CTF writer */
1642	if (ctf_writer__init(cw, path, session, opts->tod))
1643		goto free_session;
1644
1645	if (c.queue_size) {
1646		ordered_events__set_alloc_size(&session->ordered_events,
1647					       c.queue_size);
1648	}
1649
1650	/* CTF writer env/clock setup  */
1651	if (ctf_writer__setup_env(cw, session))
1652		goto free_writer;
1653
1654	/* CTF events setup */
1655	if (setup_events(cw, session))
1656		goto free_writer;
1657
1658	if (opts->all && setup_non_sample_events(cw, session))
1659		goto free_writer;
1660
1661	if (setup_streams(cw, session))
1662		goto free_writer;
1663
1664	err = perf_session__process_events(session);
1665	if (!err)
1666		err = ctf_writer__flush_streams(cw);
1667	else
1668		pr_err("Error during conversion.\n");
1669
1670	fprintf(stderr,
1671		"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1672		data.path, path);
1673
1674	fprintf(stderr,
1675		"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1676		(double) c.events_size / 1024.0 / 1024.0,
1677		c.events_count);
1678
1679	if (!c.non_sample_count)
1680		fprintf(stderr, ") ]\n");
1681	else
1682		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1683
1684	cleanup_events(session);
1685	perf_session__delete(session);
1686	ctf_writer__cleanup(cw);
1687
1688	return err;
1689
1690free_writer:
1691	ctf_writer__cleanup(cw);
1692free_session:
1693	perf_session__delete(session);
1694	pr_err("Error during conversion setup.\n");
1695	return err;
1696}