Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CTF writing support via babeltrace.
   4 *
   5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
   6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
   7 */
   8
   9#include <errno.h>
  10#include <inttypes.h>
  11#include <linux/compiler.h>
  12#include <linux/kernel.h>
  13#include <linux/zalloc.h>
  14#include <babeltrace/ctf-writer/writer.h>
  15#include <babeltrace/ctf-writer/clock.h>
  16#include <babeltrace/ctf-writer/stream.h>
  17#include <babeltrace/ctf-writer/event.h>
  18#include <babeltrace/ctf-writer/event-types.h>
  19#include <babeltrace/ctf-writer/event-fields.h>
  20#include <babeltrace/ctf-ir/utils.h>
  21#include <babeltrace/ctf/events.h>
  22#include <traceevent/event-parse.h>
  23#include "asm/bug.h"
  24#include "data-convert-bt.h"
  25#include "session.h"
  26#include "debug.h"
  27#include "tool.h"
  28#include "evlist.h"
  29#include "evsel.h"
  30#include "machine.h"
  31#include "config.h"
  32#include <linux/ctype.h>
  33#include <linux/err.h>
 
 
 
 
 
 
 
 
  34
  35#define pr_N(n, fmt, ...) \
  36	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
  37
  38#define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  39#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
  40
  41#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
  42
  43struct evsel_priv {
  44	struct bt_ctf_event_class *event_class;
  45};
  46
  47#define MAX_CPUS	4096
  48
  49struct ctf_stream {
  50	struct bt_ctf_stream *stream;
  51	int cpu;
  52	u32 count;
  53};
  54
  55struct ctf_writer {
  56	/* writer primitives */
  57	struct bt_ctf_writer		 *writer;
  58	struct ctf_stream		**stream;
  59	int				  stream_cnt;
  60	struct bt_ctf_stream_class	 *stream_class;
  61	struct bt_ctf_clock		 *clock;
  62
  63	/* data types */
  64	union {
  65		struct {
  66			struct bt_ctf_field_type	*s64;
  67			struct bt_ctf_field_type	*u64;
  68			struct bt_ctf_field_type	*s32;
  69			struct bt_ctf_field_type	*u32;
  70			struct bt_ctf_field_type	*string;
  71			struct bt_ctf_field_type	*u32_hex;
  72			struct bt_ctf_field_type	*u64_hex;
  73		};
  74		struct bt_ctf_field_type *array[6];
  75	} data;
  76	struct bt_ctf_event_class	*comm_class;
  77	struct bt_ctf_event_class	*exit_class;
  78	struct bt_ctf_event_class	*fork_class;
  79	struct bt_ctf_event_class	*mmap_class;
  80	struct bt_ctf_event_class	*mmap2_class;
  81};
  82
  83struct convert {
  84	struct perf_tool	tool;
  85	struct ctf_writer	writer;
  86
  87	u64			events_size;
  88	u64			events_count;
  89	u64			non_sample_count;
  90
  91	/* Ordered events configured queue size. */
  92	u64			queue_size;
  93};
  94
  95static int value_set(struct bt_ctf_field_type *type,
  96		     struct bt_ctf_event *event,
  97		     const char *name, u64 val)
  98{
  99	struct bt_ctf_field *field;
 100	bool sign = bt_ctf_field_type_integer_get_signed(type);
 101	int ret;
 102
 103	field = bt_ctf_field_create(type);
 104	if (!field) {
 105		pr_err("failed to create a field %s\n", name);
 106		return -1;
 107	}
 108
 109	if (sign) {
 110		ret = bt_ctf_field_signed_integer_set_value(field, val);
 111		if (ret) {
 112			pr_err("failed to set field value %s\n", name);
 113			goto err;
 114		}
 115	} else {
 116		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
 117		if (ret) {
 118			pr_err("failed to set field value %s\n", name);
 119			goto err;
 120		}
 121	}
 122
 123	ret = bt_ctf_event_set_payload(event, name, field);
 124	if (ret) {
 125		pr_err("failed to set payload %s\n", name);
 126		goto err;
 127	}
 128
 129	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
 130
 131err:
 132	bt_ctf_field_put(field);
 133	return ret;
 134}
 135
 136#define __FUNC_VALUE_SET(_name, _val_type)				\
 137static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
 138			     struct bt_ctf_event *event,		\
 139			     const char *name,				\
 140			     _val_type val)				\
 141{									\
 142	struct bt_ctf_field_type *type = cw->data._name;		\
 143	return value_set(type, event, name, (u64) val);			\
 144}
 145
 146#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
 147
 148FUNC_VALUE_SET(s32)
 149FUNC_VALUE_SET(u32)
 150FUNC_VALUE_SET(s64)
 151FUNC_VALUE_SET(u64)
 152__FUNC_VALUE_SET(u64_hex, u64)
 153
 154static int string_set_value(struct bt_ctf_field *field, const char *string);
 155static __maybe_unused int
 156value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
 157		 const char *name, const char *string)
 158{
 159	struct bt_ctf_field_type *type = cw->data.string;
 160	struct bt_ctf_field *field;
 161	int ret = 0;
 162
 163	field = bt_ctf_field_create(type);
 164	if (!field) {
 165		pr_err("failed to create a field %s\n", name);
 166		return -1;
 167	}
 168
 169	ret = string_set_value(field, string);
 170	if (ret) {
 171		pr_err("failed to set value %s\n", name);
 172		goto err_put_field;
 173	}
 174
 175	ret = bt_ctf_event_set_payload(event, name, field);
 176	if (ret)
 177		pr_err("failed to set payload %s\n", name);
 178
 179err_put_field:
 180	bt_ctf_field_put(field);
 181	return ret;
 182}
 183
 184static struct bt_ctf_field_type*
 185get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
 186{
 187	unsigned long flags = field->flags;
 188
 189	if (flags & TEP_FIELD_IS_STRING)
 190		return cw->data.string;
 191
 192	if (!(flags & TEP_FIELD_IS_SIGNED)) {
 193		/* unsigned long are mostly pointers */
 194		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
 195			return cw->data.u64_hex;
 196	}
 197
 198	if (flags & TEP_FIELD_IS_SIGNED) {
 199		if (field->size == 8)
 200			return cw->data.s64;
 201		else
 202			return cw->data.s32;
 203	}
 204
 205	if (field->size == 8)
 206		return cw->data.u64;
 207	else
 208		return cw->data.u32;
 209}
 210
 211static unsigned long long adjust_signedness(unsigned long long value_int, int size)
 212{
 213	unsigned long long value_mask;
 214
 215	/*
 216	 * value_mask = (1 << (size * 8 - 1)) - 1.
 217	 * Directly set value_mask for code readers.
 218	 */
 219	switch (size) {
 220	case 1:
 221		value_mask = 0x7fULL;
 222		break;
 223	case 2:
 224		value_mask = 0x7fffULL;
 225		break;
 226	case 4:
 227		value_mask = 0x7fffffffULL;
 228		break;
 229	case 8:
 230		/*
 231		 * For 64 bit value, return it self. There is no need
 232		 * to fill high bit.
 233		 */
 234		/* Fall through */
 235	default:
 236		/* BUG! */
 237		return value_int;
 238	}
 239
 240	/* If it is a positive value, don't adjust. */
 241	if ((value_int & (~0ULL - value_mask)) == 0)
 242		return value_int;
 243
 244	/* Fill upper part of value_int with 1 to make it a negative long long. */
 245	return (value_int & value_mask) | ~value_mask;
 246}
 247
 248static int string_set_value(struct bt_ctf_field *field, const char *string)
 249{
 250	char *buffer = NULL;
 251	size_t len = strlen(string), i, p;
 252	int err;
 253
 254	for (i = p = 0; i < len; i++, p++) {
 255		if (isprint(string[i])) {
 256			if (!buffer)
 257				continue;
 258			buffer[p] = string[i];
 259		} else {
 260			char numstr[5];
 261
 262			snprintf(numstr, sizeof(numstr), "\\x%02x",
 263				 (unsigned int)(string[i]) & 0xff);
 264
 265			if (!buffer) {
 266				buffer = zalloc(i + (len - i) * 4 + 2);
 267				if (!buffer) {
 268					pr_err("failed to set unprintable string '%s'\n", string);
 269					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
 270				}
 271				if (i > 0)
 272					strncpy(buffer, string, i);
 273			}
 274			memcpy(buffer + p, numstr, 4);
 275			p += 3;
 276		}
 277	}
 278
 279	if (!buffer)
 280		return bt_ctf_field_string_set_value(field, string);
 281	err = bt_ctf_field_string_set_value(field, buffer);
 282	free(buffer);
 283	return err;
 284}
 285
 286static int add_tracepoint_field_value(struct ctf_writer *cw,
 287				      struct bt_ctf_event_class *event_class,
 288				      struct bt_ctf_event *event,
 289				      struct perf_sample *sample,
 290				      struct tep_format_field *fmtf)
 291{
 292	struct bt_ctf_field_type *type;
 293	struct bt_ctf_field *array_field;
 294	struct bt_ctf_field *field;
 295	const char *name = fmtf->name;
 296	void *data = sample->raw_data;
 297	unsigned long flags = fmtf->flags;
 298	unsigned int n_items;
 299	unsigned int i;
 300	unsigned int offset;
 301	unsigned int len;
 302	int ret;
 303
 304	name = fmtf->alias;
 305	offset = fmtf->offset;
 306	len = fmtf->size;
 307	if (flags & TEP_FIELD_IS_STRING)
 308		flags &= ~TEP_FIELD_IS_ARRAY;
 309
 310	if (flags & TEP_FIELD_IS_DYNAMIC) {
 311		unsigned long long tmp_val;
 312
 313		tmp_val = tep_read_number(fmtf->event->tep,
 314					  data + offset, len);
 315		offset = tmp_val;
 316		len = offset >> 16;
 317		offset &= 0xffff;
 
 
 318	}
 319
 320	if (flags & TEP_FIELD_IS_ARRAY) {
 321
 322		type = bt_ctf_event_class_get_field_by_name(
 323				event_class, name);
 324		array_field = bt_ctf_field_create(type);
 325		bt_ctf_field_type_put(type);
 326		if (!array_field) {
 327			pr_err("Failed to create array type %s\n", name);
 328			return -1;
 329		}
 330
 331		len = fmtf->size / fmtf->arraylen;
 332		n_items = fmtf->arraylen;
 333	} else {
 334		n_items = 1;
 335		array_field = NULL;
 336	}
 337
 338	type = get_tracepoint_field_type(cw, fmtf);
 339
 340	for (i = 0; i < n_items; i++) {
 341		if (flags & TEP_FIELD_IS_ARRAY)
 342			field = bt_ctf_field_array_get_field(array_field, i);
 343		else
 344			field = bt_ctf_field_create(type);
 345
 346		if (!field) {
 347			pr_err("failed to create a field %s\n", name);
 348			return -1;
 349		}
 350
 351		if (flags & TEP_FIELD_IS_STRING)
 352			ret = string_set_value(field, data + offset + i * len);
 353		else {
 354			unsigned long long value_int;
 355
 356			value_int = tep_read_number(
 357					fmtf->event->tep,
 358					data + offset + i * len, len);
 359
 360			if (!(flags & TEP_FIELD_IS_SIGNED))
 361				ret = bt_ctf_field_unsigned_integer_set_value(
 362						field, value_int);
 363			else
 364				ret = bt_ctf_field_signed_integer_set_value(
 365						field, adjust_signedness(value_int, len));
 366		}
 367
 368		if (ret) {
 369			pr_err("failed to set file value %s\n", name);
 370			goto err_put_field;
 371		}
 372		if (!(flags & TEP_FIELD_IS_ARRAY)) {
 373			ret = bt_ctf_event_set_payload(event, name, field);
 374			if (ret) {
 375				pr_err("failed to set payload %s\n", name);
 376				goto err_put_field;
 377			}
 378		}
 379		bt_ctf_field_put(field);
 380	}
 381	if (flags & TEP_FIELD_IS_ARRAY) {
 382		ret = bt_ctf_event_set_payload(event, name, array_field);
 383		if (ret) {
 384			pr_err("Failed add payload array %s\n", name);
 385			return -1;
 386		}
 387		bt_ctf_field_put(array_field);
 388	}
 389	return 0;
 390
 391err_put_field:
 392	bt_ctf_field_put(field);
 393	return -1;
 394}
 395
 396static int add_tracepoint_fields_values(struct ctf_writer *cw,
 397					struct bt_ctf_event_class *event_class,
 398					struct bt_ctf_event *event,
 399					struct tep_format_field *fields,
 400					struct perf_sample *sample)
 401{
 402	struct tep_format_field *field;
 403	int ret;
 404
 405	for (field = fields; field; field = field->next) {
 406		ret = add_tracepoint_field_value(cw, event_class, event, sample,
 407				field);
 408		if (ret)
 409			return -1;
 410	}
 411	return 0;
 412}
 413
 414static int add_tracepoint_values(struct ctf_writer *cw,
 415				 struct bt_ctf_event_class *event_class,
 416				 struct bt_ctf_event *event,
 417				 struct evsel *evsel,
 418				 struct perf_sample *sample)
 419{
 420	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
 421	struct tep_format_field *fields        = evsel->tp_format->format.fields;
 422	int ret;
 423
 424	ret = add_tracepoint_fields_values(cw, event_class, event,
 425					   common_fields, sample);
 426	if (!ret)
 427		ret = add_tracepoint_fields_values(cw, event_class, event,
 428						   fields, sample);
 429
 430	return ret;
 431}
 432
 433static int
 434add_bpf_output_values(struct bt_ctf_event_class *event_class,
 435		      struct bt_ctf_event *event,
 436		      struct perf_sample *sample)
 437{
 438	struct bt_ctf_field_type *len_type, *seq_type;
 439	struct bt_ctf_field *len_field, *seq_field;
 440	unsigned int raw_size = sample->raw_size;
 441	unsigned int nr_elements = raw_size / sizeof(u32);
 442	unsigned int i;
 443	int ret;
 444
 445	if (nr_elements * sizeof(u32) != raw_size)
 446		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
 447			   raw_size, nr_elements * sizeof(u32) - raw_size);
 448
 449	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
 450	len_field = bt_ctf_field_create(len_type);
 451	if (!len_field) {
 452		pr_err("failed to create 'raw_len' for bpf output event\n");
 453		ret = -1;
 454		goto put_len_type;
 455	}
 456
 457	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
 458	if (ret) {
 459		pr_err("failed to set field value for raw_len\n");
 460		goto put_len_field;
 461	}
 462	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
 463	if (ret) {
 464		pr_err("failed to set payload to raw_len\n");
 465		goto put_len_field;
 466	}
 467
 468	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
 469	seq_field = bt_ctf_field_create(seq_type);
 470	if (!seq_field) {
 471		pr_err("failed to create 'raw_data' for bpf output event\n");
 472		ret = -1;
 473		goto put_seq_type;
 474	}
 475
 476	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
 477	if (ret) {
 478		pr_err("failed to set length of 'raw_data'\n");
 479		goto put_seq_field;
 480	}
 481
 482	for (i = 0; i < nr_elements; i++) {
 483		struct bt_ctf_field *elem_field =
 484			bt_ctf_field_sequence_get_field(seq_field, i);
 485
 486		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
 487				((u32 *)(sample->raw_data))[i]);
 488
 489		bt_ctf_field_put(elem_field);
 490		if (ret) {
 491			pr_err("failed to set raw_data[%d]\n", i);
 492			goto put_seq_field;
 493		}
 494	}
 495
 496	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
 497	if (ret)
 498		pr_err("failed to set payload for raw_data\n");
 499
 500put_seq_field:
 501	bt_ctf_field_put(seq_field);
 502put_seq_type:
 503	bt_ctf_field_type_put(seq_type);
 504put_len_field:
 505	bt_ctf_field_put(len_field);
 506put_len_type:
 507	bt_ctf_field_type_put(len_type);
 508	return ret;
 509}
 510
 511static int
 512add_callchain_output_values(struct bt_ctf_event_class *event_class,
 513		      struct bt_ctf_event *event,
 514		      struct ip_callchain *callchain)
 515{
 516	struct bt_ctf_field_type *len_type, *seq_type;
 517	struct bt_ctf_field *len_field, *seq_field;
 518	unsigned int nr_elements = callchain->nr;
 519	unsigned int i;
 520	int ret;
 521
 522	len_type = bt_ctf_event_class_get_field_by_name(
 523			event_class, "perf_callchain_size");
 524	len_field = bt_ctf_field_create(len_type);
 525	if (!len_field) {
 526		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
 527		ret = -1;
 528		goto put_len_type;
 529	}
 530
 531	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
 532	if (ret) {
 533		pr_err("failed to set field value for perf_callchain_size\n");
 534		goto put_len_field;
 535	}
 536	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
 537	if (ret) {
 538		pr_err("failed to set payload to perf_callchain_size\n");
 539		goto put_len_field;
 540	}
 541
 542	seq_type = bt_ctf_event_class_get_field_by_name(
 543			event_class, "perf_callchain");
 544	seq_field = bt_ctf_field_create(seq_type);
 545	if (!seq_field) {
 546		pr_err("failed to create 'perf_callchain' for callchain output event\n");
 547		ret = -1;
 548		goto put_seq_type;
 549	}
 550
 551	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
 552	if (ret) {
 553		pr_err("failed to set length of 'perf_callchain'\n");
 554		goto put_seq_field;
 555	}
 556
 557	for (i = 0; i < nr_elements; i++) {
 558		struct bt_ctf_field *elem_field =
 559			bt_ctf_field_sequence_get_field(seq_field, i);
 560
 561		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
 562				((u64 *)(callchain->ips))[i]);
 563
 564		bt_ctf_field_put(elem_field);
 565		if (ret) {
 566			pr_err("failed to set callchain[%d]\n", i);
 567			goto put_seq_field;
 568		}
 569	}
 570
 571	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
 572	if (ret)
 573		pr_err("failed to set payload for raw_data\n");
 574
 575put_seq_field:
 576	bt_ctf_field_put(seq_field);
 577put_seq_type:
 578	bt_ctf_field_type_put(seq_type);
 579put_len_field:
 580	bt_ctf_field_put(len_field);
 581put_len_type:
 582	bt_ctf_field_type_put(len_type);
 583	return ret;
 584}
 585
 586static int add_generic_values(struct ctf_writer *cw,
 587			      struct bt_ctf_event *event,
 588			      struct evsel *evsel,
 589			      struct perf_sample *sample)
 590{
 591	u64 type = evsel->core.attr.sample_type;
 592	int ret;
 593
 594	/*
 595	 * missing:
 596	 *   PERF_SAMPLE_TIME         - not needed as we have it in
 597	 *                              ctf event header
 598	 *   PERF_SAMPLE_READ         - TODO
 599	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
 600	 *   PERF_SAMPLE_BRANCH_STACK - TODO
 601	 *   PERF_SAMPLE_REGS_USER    - TODO
 602	 *   PERF_SAMPLE_STACK_USER   - TODO
 603	 */
 604
 605	if (type & PERF_SAMPLE_IP) {
 606		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
 607		if (ret)
 608			return -1;
 609	}
 610
 611	if (type & PERF_SAMPLE_TID) {
 612		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
 613		if (ret)
 614			return -1;
 615
 616		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
 617		if (ret)
 618			return -1;
 619	}
 620
 621	if ((type & PERF_SAMPLE_ID) ||
 622	    (type & PERF_SAMPLE_IDENTIFIER)) {
 623		ret = value_set_u64(cw, event, "perf_id", sample->id);
 624		if (ret)
 625			return -1;
 626	}
 627
 628	if (type & PERF_SAMPLE_STREAM_ID) {
 629		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
 630		if (ret)
 631			return -1;
 632	}
 633
 634	if (type & PERF_SAMPLE_PERIOD) {
 635		ret = value_set_u64(cw, event, "perf_period", sample->period);
 636		if (ret)
 637			return -1;
 638	}
 639
 640	if (type & PERF_SAMPLE_WEIGHT) {
 641		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
 642		if (ret)
 643			return -1;
 644	}
 645
 646	if (type & PERF_SAMPLE_DATA_SRC) {
 647		ret = value_set_u64(cw, event, "perf_data_src",
 648				sample->data_src);
 649		if (ret)
 650			return -1;
 651	}
 652
 653	if (type & PERF_SAMPLE_TRANSACTION) {
 654		ret = value_set_u64(cw, event, "perf_transaction",
 655				sample->transaction);
 656		if (ret)
 657			return -1;
 658	}
 659
 660	return 0;
 661}
 662
 663static int ctf_stream__flush(struct ctf_stream *cs)
 664{
 665	int err = 0;
 666
 667	if (cs) {
 668		err = bt_ctf_stream_flush(cs->stream);
 669		if (err)
 670			pr_err("CTF stream %d flush failed\n", cs->cpu);
 671
 672		pr("Flush stream for cpu %d (%u samples)\n",
 673		   cs->cpu, cs->count);
 674
 675		cs->count = 0;
 676	}
 677
 678	return err;
 679}
 680
 681static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
 682{
 683	struct ctf_stream *cs;
 684	struct bt_ctf_field *pkt_ctx   = NULL;
 685	struct bt_ctf_field *cpu_field = NULL;
 686	struct bt_ctf_stream *stream   = NULL;
 687	int ret;
 688
 689	cs = zalloc(sizeof(*cs));
 690	if (!cs) {
 691		pr_err("Failed to allocate ctf stream\n");
 692		return NULL;
 693	}
 694
 695	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
 696	if (!stream) {
 697		pr_err("Failed to create CTF stream\n");
 698		goto out;
 699	}
 700
 701	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
 702	if (!pkt_ctx) {
 703		pr_err("Failed to obtain packet context\n");
 704		goto out;
 705	}
 706
 707	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
 708	bt_ctf_field_put(pkt_ctx);
 709	if (!cpu_field) {
 710		pr_err("Failed to obtain cpu field\n");
 711		goto out;
 712	}
 713
 714	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
 715	if (ret) {
 716		pr_err("Failed to update CPU number\n");
 717		goto out;
 718	}
 719
 720	bt_ctf_field_put(cpu_field);
 721
 722	cs->cpu    = cpu;
 723	cs->stream = stream;
 724	return cs;
 725
 726out:
 727	if (cpu_field)
 728		bt_ctf_field_put(cpu_field);
 729	if (stream)
 730		bt_ctf_stream_put(stream);
 731
 732	free(cs);
 733	return NULL;
 734}
 735
 736static void ctf_stream__delete(struct ctf_stream *cs)
 737{
 738	if (cs) {
 739		bt_ctf_stream_put(cs->stream);
 740		free(cs);
 741	}
 742}
 743
 744static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
 745{
 746	struct ctf_stream *cs = cw->stream[cpu];
 747
 748	if (!cs) {
 749		cs = ctf_stream__create(cw, cpu);
 750		cw->stream[cpu] = cs;
 751	}
 752
 753	return cs;
 754}
 755
 756static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
 757			  struct evsel *evsel)
 758{
 759	int cpu = 0;
 760
 761	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
 762		cpu = sample->cpu;
 763
 764	if (cpu > cw->stream_cnt) {
 765		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
 766			cpu, cw->stream_cnt);
 767		cpu = 0;
 768	}
 769
 770	return cpu;
 771}
 772
 773#define STREAM_FLUSH_COUNT 100000
 774
 775/*
 776 * Currently we have no other way to determine the
 777 * time for the stream flush other than keep track
 778 * of the number of events and check it against
 779 * threshold.
 780 */
 781static bool is_flush_needed(struct ctf_stream *cs)
 782{
 783	return cs->count >= STREAM_FLUSH_COUNT;
 784}
 785
 786static int process_sample_event(struct perf_tool *tool,
 787				union perf_event *_event,
 788				struct perf_sample *sample,
 789				struct evsel *evsel,
 790				struct machine *machine __maybe_unused)
 791{
 792	struct convert *c = container_of(tool, struct convert, tool);
 793	struct evsel_priv *priv = evsel->priv;
 794	struct ctf_writer *cw = &c->writer;
 795	struct ctf_stream *cs;
 796	struct bt_ctf_event_class *event_class;
 797	struct bt_ctf_event *event;
 798	int ret;
 799	unsigned long type = evsel->core.attr.sample_type;
 800
 801	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
 802		return 0;
 803
 804	event_class = priv->event_class;
 805
 806	/* update stats */
 807	c->events_count++;
 808	c->events_size += _event->header.size;
 809
 810	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
 811
 812	event = bt_ctf_event_create(event_class);
 813	if (!event) {
 814		pr_err("Failed to create an CTF event\n");
 815		return -1;
 816	}
 817
 818	bt_ctf_clock_set_time(cw->clock, sample->time);
 819
 820	ret = add_generic_values(cw, event, evsel, sample);
 821	if (ret)
 822		return -1;
 823
 824	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
 825		ret = add_tracepoint_values(cw, event_class, event,
 826					    evsel, sample);
 827		if (ret)
 828			return -1;
 829	}
 830
 831	if (type & PERF_SAMPLE_CALLCHAIN) {
 832		ret = add_callchain_output_values(event_class,
 833				event, sample->callchain);
 834		if (ret)
 835			return -1;
 836	}
 837
 838	if (perf_evsel__is_bpf_output(evsel)) {
 839		ret = add_bpf_output_values(event_class, event, sample);
 840		if (ret)
 841			return -1;
 842	}
 843
 844	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
 845	if (cs) {
 846		if (is_flush_needed(cs))
 847			ctf_stream__flush(cs);
 848
 849		cs->count++;
 850		bt_ctf_stream_append_event(cs->stream, event);
 851	}
 852
 853	bt_ctf_event_put(event);
 854	return cs ? 0 : -1;
 855}
 856
 857#define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
 858do {							\
 859	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
 860	if (ret)					\
 861		return -1;				\
 862} while(0)
 863
 864#define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
 865static int process_##_name##_event(struct perf_tool *tool,	\
 866				   union perf_event *_event,	\
 867				   struct perf_sample *sample,	\
 868				   struct machine *machine)	\
 869{								\
 870	struct convert *c = container_of(tool, struct convert, tool);\
 871	struct ctf_writer *cw = &c->writer;			\
 872	struct bt_ctf_event_class *event_class = cw->_name##_class;\
 873	struct bt_ctf_event *event;				\
 874	struct ctf_stream *cs;					\
 875	int ret;						\
 876								\
 877	c->non_sample_count++;					\
 878	c->events_size += _event->header.size;			\
 879	event = bt_ctf_event_create(event_class);		\
 880	if (!event) {						\
 881		pr_err("Failed to create an CTF event\n");	\
 882		return -1;					\
 883	}							\
 884								\
 885	bt_ctf_clock_set_time(cw->clock, sample->time);		\
 886	body							\
 887	cs = ctf_stream(cw, 0);					\
 888	if (cs) {						\
 889		if (is_flush_needed(cs))			\
 890			ctf_stream__flush(cs);			\
 891								\
 892		cs->count++;					\
 893		bt_ctf_stream_append_event(cs->stream, event);	\
 894	}							\
 895	bt_ctf_event_put(event);				\
 896								\
 897	return perf_event__process_##_name(tool, _event, sample, machine);\
 898}
 899
 900__FUNC_PROCESS_NON_SAMPLE(comm,
 901	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
 902	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
 903	__NON_SAMPLE_SET_FIELD(comm, string, comm);
 904)
 905__FUNC_PROCESS_NON_SAMPLE(fork,
 906	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
 907	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
 908	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
 909	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
 910	__NON_SAMPLE_SET_FIELD(fork, u64, time);
 911)
 912
 913__FUNC_PROCESS_NON_SAMPLE(exit,
 914	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
 915	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
 916	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
 917	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
 918	__NON_SAMPLE_SET_FIELD(fork, u64, time);
 919)
 920__FUNC_PROCESS_NON_SAMPLE(mmap,
 921	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
 922	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
 923	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
 924	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
 925)
 926__FUNC_PROCESS_NON_SAMPLE(mmap2,
 927	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
 928	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
 929	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
 930	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
 931)
 932#undef __NON_SAMPLE_SET_FIELD
 933#undef __FUNC_PROCESS_NON_SAMPLE
 934
 935/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
 936static char *change_name(char *name, char *orig_name, int dup)
 937{
 938	char *new_name = NULL;
 939	size_t len;
 940
 941	if (!name)
 942		name = orig_name;
 943
 944	if (dup >= 10)
 945		goto out;
 946	/*
 947	 * Add '_' prefix to potential keywork.  According to
 948	 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
 949	 * futher CTF spec updating may require us to use '$'.
 950	 */
 951	if (dup < 0)
 952		len = strlen(name) + sizeof("_");
 953	else
 954		len = strlen(orig_name) + sizeof("_dupl_X");
 955
 956	new_name = malloc(len);
 957	if (!new_name)
 958		goto out;
 959
 960	if (dup < 0)
 961		snprintf(new_name, len, "_%s", name);
 962	else
 963		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
 964
 965out:
 966	if (name != orig_name)
 967		free(name);
 968	return new_name;
 969}
 970
 971static int event_class_add_field(struct bt_ctf_event_class *event_class,
 972		struct bt_ctf_field_type *type,
 973		struct tep_format_field *field)
 974{
 975	struct bt_ctf_field_type *t = NULL;
 976	char *name;
 977	int dup = 1;
 978	int ret;
 979
 980	/* alias was already assigned */
 981	if (field->alias != field->name)
 982		return bt_ctf_event_class_add_field(event_class, type,
 983				(char *)field->alias);
 984
 985	name = field->name;
 986
 987	/* If 'name' is a keywork, add prefix. */
 988	if (bt_ctf_validate_identifier(name))
 989		name = change_name(name, field->name, -1);
 990
 991	if (!name) {
 992		pr_err("Failed to fix invalid identifier.");
 993		return -1;
 994	}
 995	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
 996		bt_ctf_field_type_put(t);
 997		name = change_name(name, field->name, dup++);
 998		if (!name) {
 999			pr_err("Failed to create dup name for '%s'\n", field->name);
1000			return -1;
1001		}
1002	}
1003
1004	ret = bt_ctf_event_class_add_field(event_class, type, name);
1005	if (!ret)
1006		field->alias = name;
1007
1008	return ret;
1009}
1010
1011static int add_tracepoint_fields_types(struct ctf_writer *cw,
1012				       struct tep_format_field *fields,
1013				       struct bt_ctf_event_class *event_class)
1014{
1015	struct tep_format_field *field;
1016	int ret;
1017
1018	for (field = fields; field; field = field->next) {
1019		struct bt_ctf_field_type *type;
1020		unsigned long flags = field->flags;
1021
1022		pr2("  field '%s'\n", field->name);
1023
1024		type = get_tracepoint_field_type(cw, field);
1025		if (!type)
1026			return -1;
1027
1028		/*
1029		 * A string is an array of chars. For this we use the string
1030		 * type and don't care that it is an array. What we don't
1031		 * support is an array of strings.
1032		 */
1033		if (flags & TEP_FIELD_IS_STRING)
1034			flags &= ~TEP_FIELD_IS_ARRAY;
1035
1036		if (flags & TEP_FIELD_IS_ARRAY)
1037			type = bt_ctf_field_type_array_create(type, field->arraylen);
1038
1039		ret = event_class_add_field(event_class, type, field);
1040
1041		if (flags & TEP_FIELD_IS_ARRAY)
1042			bt_ctf_field_type_put(type);
1043
1044		if (ret) {
1045			pr_err("Failed to add field '%s': %d\n",
1046					field->name, ret);
1047			return -1;
1048		}
1049	}
1050
1051	return 0;
1052}
1053
1054static int add_tracepoint_types(struct ctf_writer *cw,
1055				struct evsel *evsel,
1056				struct bt_ctf_event_class *class)
1057{
1058	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1059	struct tep_format_field *fields        = evsel->tp_format->format.fields;
1060	int ret;
1061
1062	ret = add_tracepoint_fields_types(cw, common_fields, class);
1063	if (!ret)
1064		ret = add_tracepoint_fields_types(cw, fields, class);
1065
1066	return ret;
1067}
1068
1069static int add_bpf_output_types(struct ctf_writer *cw,
1070				struct bt_ctf_event_class *class)
1071{
1072	struct bt_ctf_field_type *len_type = cw->data.u32;
1073	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1074	struct bt_ctf_field_type *seq_type;
1075	int ret;
1076
1077	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1078	if (ret)
1079		return ret;
1080
1081	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1082	if (!seq_type)
1083		return -1;
1084
1085	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1086}
1087
1088static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1089			     struct bt_ctf_event_class *event_class)
1090{
1091	u64 type = evsel->core.attr.sample_type;
1092
1093	/*
1094	 * missing:
1095	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1096	 *                              ctf event header
1097	 *   PERF_SAMPLE_READ         - TODO
1098	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1099	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1100	 *                              are handled separately
1101	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1102	 *   PERF_SAMPLE_REGS_USER    - TODO
1103	 *   PERF_SAMPLE_STACK_USER   - TODO
1104	 */
1105
1106#define ADD_FIELD(cl, t, n)						\
1107	do {								\
1108		pr2("  field '%s'\n", n);				\
1109		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1110			pr_err("Failed to add field '%s';\n", n);	\
1111			return -1;					\
1112		}							\
1113	} while (0)
1114
1115	if (type & PERF_SAMPLE_IP)
1116		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1117
1118	if (type & PERF_SAMPLE_TID) {
1119		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1120		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1121	}
1122
1123	if ((type & PERF_SAMPLE_ID) ||
1124	    (type & PERF_SAMPLE_IDENTIFIER))
1125		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1126
1127	if (type & PERF_SAMPLE_STREAM_ID)
1128		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1129
1130	if (type & PERF_SAMPLE_PERIOD)
1131		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1132
1133	if (type & PERF_SAMPLE_WEIGHT)
1134		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1135
1136	if (type & PERF_SAMPLE_DATA_SRC)
1137		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1138
1139	if (type & PERF_SAMPLE_TRANSACTION)
1140		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1141
1142	if (type & PERF_SAMPLE_CALLCHAIN) {
1143		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1144		ADD_FIELD(event_class,
1145			bt_ctf_field_type_sequence_create(
1146				cw->data.u64_hex, "perf_callchain_size"),
1147			"perf_callchain");
1148	}
1149
1150#undef ADD_FIELD
1151	return 0;
1152}
1153
1154static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1155{
1156	struct bt_ctf_event_class *event_class;
1157	struct evsel_priv *priv;
1158	const char *name = perf_evsel__name(evsel);
1159	int ret;
1160
1161	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1162
1163	event_class = bt_ctf_event_class_create(name);
1164	if (!event_class)
1165		return -1;
1166
1167	ret = add_generic_types(cw, evsel, event_class);
1168	if (ret)
1169		goto err;
1170
1171	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1172		ret = add_tracepoint_types(cw, evsel, event_class);
1173		if (ret)
1174			goto err;
1175	}
1176
1177	if (perf_evsel__is_bpf_output(evsel)) {
1178		ret = add_bpf_output_types(cw, event_class);
1179		if (ret)
1180			goto err;
1181	}
1182
1183	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1184	if (ret) {
1185		pr("Failed to add event class into stream.\n");
1186		goto err;
1187	}
1188
1189	priv = malloc(sizeof(*priv));
1190	if (!priv)
1191		goto err;
1192
1193	priv->event_class = event_class;
1194	evsel->priv       = priv;
1195	return 0;
1196
1197err:
1198	bt_ctf_event_class_put(event_class);
1199	pr_err("Failed to add event '%s'.\n", name);
1200	return -1;
1201}
1202
1203static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1204{
1205	struct evlist *evlist = session->evlist;
1206	struct evsel *evsel;
1207	int ret;
1208
1209	evlist__for_each_entry(evlist, evsel) {
1210		ret = add_event(cw, evsel);
1211		if (ret)
1212			return ret;
1213	}
1214	return 0;
1215}
1216
1217#define __NON_SAMPLE_ADD_FIELD(t, n)						\
1218	do {							\
1219		pr2("  field '%s'\n", #n);			\
1220		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1221			pr_err("Failed to add field '%s';\n", #n);\
1222			return -1;				\
1223		}						\
1224	} while(0)
1225
1226#define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1227static int add_##_name##_event(struct ctf_writer *cw)		\
1228{								\
1229	struct bt_ctf_event_class *event_class;			\
1230	int ret;						\
1231								\
1232	pr("Adding "#_name" event\n");				\
1233	event_class = bt_ctf_event_class_create("perf_" #_name);\
1234	if (!event_class)					\
1235		return -1;					\
1236	body							\
1237								\
1238	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1239	if (ret) {						\
1240		pr("Failed to add event class '"#_name"' into stream.\n");\
1241		return ret;					\
1242	}							\
1243								\
1244	cw->_name##_class = event_class;			\
1245	bt_ctf_event_class_put(event_class);			\
1246	return 0;						\
1247}
1248
1249__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1250	__NON_SAMPLE_ADD_FIELD(u32, pid);
1251	__NON_SAMPLE_ADD_FIELD(u32, tid);
1252	__NON_SAMPLE_ADD_FIELD(string, comm);
1253)
1254
1255__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1256	__NON_SAMPLE_ADD_FIELD(u32, pid);
1257	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1258	__NON_SAMPLE_ADD_FIELD(u32, tid);
1259	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1260	__NON_SAMPLE_ADD_FIELD(u64, time);
1261)
1262
1263__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1264	__NON_SAMPLE_ADD_FIELD(u32, pid);
1265	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1266	__NON_SAMPLE_ADD_FIELD(u32, tid);
1267	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1268	__NON_SAMPLE_ADD_FIELD(u64, time);
1269)
1270
1271__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1272	__NON_SAMPLE_ADD_FIELD(u32, pid);
1273	__NON_SAMPLE_ADD_FIELD(u32, tid);
1274	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1275	__NON_SAMPLE_ADD_FIELD(string, filename);
1276)
1277
1278__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1279	__NON_SAMPLE_ADD_FIELD(u32, pid);
1280	__NON_SAMPLE_ADD_FIELD(u32, tid);
1281	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1282	__NON_SAMPLE_ADD_FIELD(string, filename);
1283)
1284#undef __NON_SAMPLE_ADD_FIELD
1285#undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1286
1287static int setup_non_sample_events(struct ctf_writer *cw,
1288				   struct perf_session *session __maybe_unused)
1289{
1290	int ret;
1291
1292	ret = add_comm_event(cw);
1293	if (ret)
1294		return ret;
1295	ret = add_exit_event(cw);
1296	if (ret)
1297		return ret;
1298	ret = add_fork_event(cw);
1299	if (ret)
1300		return ret;
1301	ret = add_mmap_event(cw);
1302	if (ret)
1303		return ret;
1304	ret = add_mmap2_event(cw);
1305	if (ret)
1306		return ret;
1307	return 0;
1308}
1309
1310static void cleanup_events(struct perf_session *session)
1311{
1312	struct evlist *evlist = session->evlist;
1313	struct evsel *evsel;
1314
1315	evlist__for_each_entry(evlist, evsel) {
1316		struct evsel_priv *priv;
1317
1318		priv = evsel->priv;
1319		bt_ctf_event_class_put(priv->event_class);
1320		zfree(&evsel->priv);
1321	}
1322
1323	evlist__delete(evlist);
1324	session->evlist = NULL;
1325}
1326
1327static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1328{
1329	struct ctf_stream **stream;
1330	struct perf_header *ph = &session->header;
1331	int ncpus;
1332
1333	/*
1334	 * Try to get the number of cpus used in the data file,
1335	 * if not present fallback to the MAX_CPUS.
1336	 */
1337	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1338
1339	stream = zalloc(sizeof(*stream) * ncpus);
1340	if (!stream) {
1341		pr_err("Failed to allocate streams.\n");
1342		return -ENOMEM;
1343	}
1344
1345	cw->stream     = stream;
1346	cw->stream_cnt = ncpus;
1347	return 0;
1348}
1349
1350static void free_streams(struct ctf_writer *cw)
1351{
1352	int cpu;
1353
1354	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1355		ctf_stream__delete(cw->stream[cpu]);
1356
1357	zfree(&cw->stream);
1358}
1359
1360static int ctf_writer__setup_env(struct ctf_writer *cw,
1361				 struct perf_session *session)
1362{
1363	struct perf_header *header = &session->header;
1364	struct bt_ctf_writer *writer = cw->writer;
1365
1366#define ADD(__n, __v)							\
1367do {									\
1368	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1369		return -1;						\
1370} while (0)
1371
1372	ADD("host",    header->env.hostname);
1373	ADD("sysname", "Linux");
1374	ADD("release", header->env.os_release);
1375	ADD("version", header->env.version);
1376	ADD("machine", header->env.arch);
1377	ADD("domain", "kernel");
1378	ADD("tracer_name", "perf");
1379
1380#undef ADD
1381	return 0;
1382}
1383
1384static int ctf_writer__setup_clock(struct ctf_writer *cw)
 
 
1385{
1386	struct bt_ctf_clock *clock = cw->clock;
 
 
1387
1388	bt_ctf_clock_set_description(clock, "perf clock");
 
 
 
 
 
 
 
 
 
 
 
1389
1390#define SET(__n, __v)				\
1391do {						\
1392	if (bt_ctf_clock_set_##__n(clock, __v))	\
1393		return -1;			\
1394} while (0)
1395
1396	SET(frequency,   1000000000);
1397	SET(offset_s,    0);
1398	SET(offset,      0);
1399	SET(precision,   10);
1400	SET(is_absolute, 0);
1401
1402#undef SET
1403	return 0;
1404}
1405
1406static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1407{
1408	struct bt_ctf_field_type *type;
1409
1410	type = bt_ctf_field_type_integer_create(size);
1411	if (!type)
1412		return NULL;
1413
1414	if (sign &&
1415	    bt_ctf_field_type_integer_set_signed(type, 1))
1416		goto err;
1417
1418	if (hex &&
1419	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1420		goto err;
1421
1422#if __BYTE_ORDER == __BIG_ENDIAN
1423	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1424#else
1425	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1426#endif
1427
1428	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1429	    size, sign ? "un" : "", hex ? "hex" : "");
1430	return type;
1431
1432err:
1433	bt_ctf_field_type_put(type);
1434	return NULL;
1435}
1436
1437static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1438{
1439	unsigned int i;
1440
1441	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1442		bt_ctf_field_type_put(cw->data.array[i]);
1443}
1444
1445static int ctf_writer__init_data(struct ctf_writer *cw)
1446{
1447#define CREATE_INT_TYPE(type, size, sign, hex)		\
1448do {							\
1449	(type) = create_int_type(size, sign, hex);	\
1450	if (!(type))					\
1451		goto err;				\
1452} while (0)
1453
1454	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1455	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1456	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1457	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1458	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1459	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1460
1461	cw->data.string  = bt_ctf_field_type_string_create();
1462	if (cw->data.string)
1463		return 0;
1464
1465err:
1466	ctf_writer__cleanup_data(cw);
1467	pr_err("Failed to create data types.\n");
1468	return -1;
1469}
1470
1471static void ctf_writer__cleanup(struct ctf_writer *cw)
1472{
1473	ctf_writer__cleanup_data(cw);
1474
1475	bt_ctf_clock_put(cw->clock);
1476	free_streams(cw);
1477	bt_ctf_stream_class_put(cw->stream_class);
1478	bt_ctf_writer_put(cw->writer);
1479
1480	/* and NULL all the pointers */
1481	memset(cw, 0, sizeof(*cw));
1482}
1483
1484static int ctf_writer__init(struct ctf_writer *cw, const char *path)
 
1485{
1486	struct bt_ctf_writer		*writer;
1487	struct bt_ctf_stream_class	*stream_class;
1488	struct bt_ctf_clock		*clock;
1489	struct bt_ctf_field_type	*pkt_ctx_type;
1490	int				ret;
1491
1492	/* CTF writer */
1493	writer = bt_ctf_writer_create(path);
1494	if (!writer)
1495		goto err;
1496
1497	cw->writer = writer;
1498
1499	/* CTF clock */
1500	clock = bt_ctf_clock_create("perf_clock");
1501	if (!clock) {
1502		pr("Failed to create CTF clock.\n");
1503		goto err_cleanup;
1504	}
1505
1506	cw->clock = clock;
1507
1508	if (ctf_writer__setup_clock(cw)) {
1509		pr("Failed to setup CTF clock.\n");
1510		goto err_cleanup;
1511	}
1512
1513	/* CTF stream class */
1514	stream_class = bt_ctf_stream_class_create("perf_stream");
1515	if (!stream_class) {
1516		pr("Failed to create CTF stream class.\n");
1517		goto err_cleanup;
1518	}
1519
1520	cw->stream_class = stream_class;
1521
1522	/* CTF clock stream setup */
1523	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1524		pr("Failed to assign CTF clock to stream class.\n");
1525		goto err_cleanup;
1526	}
1527
1528	if (ctf_writer__init_data(cw))
1529		goto err_cleanup;
1530
1531	/* Add cpu_id for packet context */
1532	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1533	if (!pkt_ctx_type)
1534		goto err_cleanup;
1535
1536	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1537	bt_ctf_field_type_put(pkt_ctx_type);
1538	if (ret)
1539		goto err_cleanup;
1540
1541	/* CTF clock writer setup */
1542	if (bt_ctf_writer_add_clock(writer, clock)) {
1543		pr("Failed to assign CTF clock to writer.\n");
1544		goto err_cleanup;
1545	}
1546
1547	return 0;
1548
1549err_cleanup:
1550	ctf_writer__cleanup(cw);
1551err:
1552	pr_err("Failed to setup CTF writer.\n");
1553	return -1;
1554}
1555
1556static int ctf_writer__flush_streams(struct ctf_writer *cw)
1557{
1558	int cpu, ret = 0;
1559
1560	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1561		ret = ctf_stream__flush(cw->stream[cpu]);
1562
1563	return ret;
1564}
1565
1566static int convert__config(const char *var, const char *value, void *cb)
1567{
1568	struct convert *c = cb;
1569
1570	if (!strcmp(var, "convert.queue-size"))
1571		return perf_config_u64(&c->queue_size, var, value);
1572
1573	return 0;
1574}
1575
1576int bt_convert__perf2ctf(const char *input, const char *path,
1577			 struct perf_data_convert_opts *opts)
1578{
1579	struct perf_session *session;
1580	struct perf_data data = {
1581		.path	   = input,
1582		.mode      = PERF_DATA_MODE_READ,
1583		.force     = opts->force,
1584	};
1585	struct convert c = {
1586		.tool = {
1587			.sample          = process_sample_event,
1588			.mmap            = perf_event__process_mmap,
1589			.mmap2           = perf_event__process_mmap2,
1590			.comm            = perf_event__process_comm,
1591			.exit            = perf_event__process_exit,
1592			.fork            = perf_event__process_fork,
1593			.lost            = perf_event__process_lost,
1594			.tracing_data    = perf_event__process_tracing_data,
1595			.build_id        = perf_event__process_build_id,
1596			.namespaces      = perf_event__process_namespaces,
1597			.ordered_events  = true,
1598			.ordering_requires_timestamps = true,
1599		},
1600	};
1601	struct ctf_writer *cw = &c.writer;
1602	int err;
1603
1604	if (opts->all) {
1605		c.tool.comm = process_comm_event;
1606		c.tool.exit = process_exit_event;
1607		c.tool.fork = process_fork_event;
1608		c.tool.mmap = process_mmap_event;
1609		c.tool.mmap2 = process_mmap2_event;
1610	}
1611
1612	err = perf_config(convert__config, &c);
1613	if (err)
1614		return err;
1615
1616	/* CTF writer */
1617	if (ctf_writer__init(cw, path))
1618		return -1;
1619
1620	err = -1;
1621	/* perf.data session */
1622	session = perf_session__new(&data, 0, &c.tool);
1623	if (IS_ERR(session)) {
1624		err = PTR_ERR(session);
1625		goto free_writer;
1626	}
 
 
1627
1628	if (c.queue_size) {
1629		ordered_events__set_alloc_size(&session->ordered_events,
1630					       c.queue_size);
1631	}
1632
1633	/* CTF writer env/clock setup  */
1634	if (ctf_writer__setup_env(cw, session))
1635		goto free_session;
1636
1637	/* CTF events setup */
1638	if (setup_events(cw, session))
1639		goto free_session;
1640
1641	if (opts->all && setup_non_sample_events(cw, session))
1642		goto free_session;
1643
1644	if (setup_streams(cw, session))
1645		goto free_session;
1646
1647	err = perf_session__process_events(session);
1648	if (!err)
1649		err = ctf_writer__flush_streams(cw);
1650	else
1651		pr_err("Error during conversion.\n");
1652
1653	fprintf(stderr,
1654		"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1655		data.path, path);
1656
1657	fprintf(stderr,
1658		"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1659		(double) c.events_size / 1024.0 / 1024.0,
1660		c.events_count);
1661
1662	if (!c.non_sample_count)
1663		fprintf(stderr, ") ]\n");
1664	else
1665		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1666
1667	cleanup_events(session);
1668	perf_session__delete(session);
1669	ctf_writer__cleanup(cw);
1670
1671	return err;
1672
1673free_session:
1674	perf_session__delete(session);
1675free_writer:
1676	ctf_writer__cleanup(cw);
 
 
1677	pr_err("Error during conversion setup.\n");
1678	return err;
1679}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CTF writing support via babeltrace.
   4 *
   5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
   6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
   7 */
   8
   9#include <errno.h>
  10#include <inttypes.h>
  11#include <linux/compiler.h>
  12#include <linux/kernel.h>
  13#include <linux/zalloc.h>
  14#include <babeltrace/ctf-writer/writer.h>
  15#include <babeltrace/ctf-writer/clock.h>
  16#include <babeltrace/ctf-writer/stream.h>
  17#include <babeltrace/ctf-writer/event.h>
  18#include <babeltrace/ctf-writer/event-types.h>
  19#include <babeltrace/ctf-writer/event-fields.h>
  20#include <babeltrace/ctf-ir/utils.h>
  21#include <babeltrace/ctf/events.h>
 
  22#include "asm/bug.h"
  23#include "data-convert.h"
  24#include "session.h"
  25#include "debug.h"
  26#include "tool.h"
  27#include "evlist.h"
  28#include "evsel.h"
  29#include "machine.h"
  30#include "config.h"
  31#include <linux/ctype.h>
  32#include <linux/err.h>
  33#include <linux/time64.h>
  34#include "util.h"
  35#include "clockid.h"
  36#include "util/sample.h"
  37
  38#ifdef HAVE_LIBTRACEEVENT
  39#include <traceevent/event-parse.h>
  40#endif
  41
  42#define pr_N(n, fmt, ...) \
  43	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
  44
  45#define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  46#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
  47
  48#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
  49
  50struct evsel_priv {
  51	struct bt_ctf_event_class *event_class;
  52};
  53
  54#define MAX_CPUS	4096
  55
  56struct ctf_stream {
  57	struct bt_ctf_stream *stream;
  58	int cpu;
  59	u32 count;
  60};
  61
  62struct ctf_writer {
  63	/* writer primitives */
  64	struct bt_ctf_writer		 *writer;
  65	struct ctf_stream		**stream;
  66	int				  stream_cnt;
  67	struct bt_ctf_stream_class	 *stream_class;
  68	struct bt_ctf_clock		 *clock;
  69
  70	/* data types */
  71	union {
  72		struct {
  73			struct bt_ctf_field_type	*s64;
  74			struct bt_ctf_field_type	*u64;
  75			struct bt_ctf_field_type	*s32;
  76			struct bt_ctf_field_type	*u32;
  77			struct bt_ctf_field_type	*string;
  78			struct bt_ctf_field_type	*u32_hex;
  79			struct bt_ctf_field_type	*u64_hex;
  80		};
  81		struct bt_ctf_field_type *array[6];
  82	} data;
  83	struct bt_ctf_event_class	*comm_class;
  84	struct bt_ctf_event_class	*exit_class;
  85	struct bt_ctf_event_class	*fork_class;
  86	struct bt_ctf_event_class	*mmap_class;
  87	struct bt_ctf_event_class	*mmap2_class;
  88};
  89
  90struct convert {
  91	struct perf_tool	tool;
  92	struct ctf_writer	writer;
  93
  94	u64			events_size;
  95	u64			events_count;
  96	u64			non_sample_count;
  97
  98	/* Ordered events configured queue size. */
  99	u64			queue_size;
 100};
 101
 102static int value_set(struct bt_ctf_field_type *type,
 103		     struct bt_ctf_event *event,
 104		     const char *name, u64 val)
 105{
 106	struct bt_ctf_field *field;
 107	bool sign = bt_ctf_field_type_integer_get_signed(type);
 108	int ret;
 109
 110	field = bt_ctf_field_create(type);
 111	if (!field) {
 112		pr_err("failed to create a field %s\n", name);
 113		return -1;
 114	}
 115
 116	if (sign) {
 117		ret = bt_ctf_field_signed_integer_set_value(field, val);
 118		if (ret) {
 119			pr_err("failed to set field value %s\n", name);
 120			goto err;
 121		}
 122	} else {
 123		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
 124		if (ret) {
 125			pr_err("failed to set field value %s\n", name);
 126			goto err;
 127		}
 128	}
 129
 130	ret = bt_ctf_event_set_payload(event, name, field);
 131	if (ret) {
 132		pr_err("failed to set payload %s\n", name);
 133		goto err;
 134	}
 135
 136	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
 137
 138err:
 139	bt_ctf_field_put(field);
 140	return ret;
 141}
 142
 143#define __FUNC_VALUE_SET(_name, _val_type)				\
 144static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
 145			     struct bt_ctf_event *event,		\
 146			     const char *name,				\
 147			     _val_type val)				\
 148{									\
 149	struct bt_ctf_field_type *type = cw->data._name;		\
 150	return value_set(type, event, name, (u64) val);			\
 151}
 152
 153#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
 154
 155FUNC_VALUE_SET(s32)
 156FUNC_VALUE_SET(u32)
 157FUNC_VALUE_SET(s64)
 158FUNC_VALUE_SET(u64)
 159__FUNC_VALUE_SET(u64_hex, u64)
 160
 161static int string_set_value(struct bt_ctf_field *field, const char *string);
 162static __maybe_unused int
 163value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
 164		 const char *name, const char *string)
 165{
 166	struct bt_ctf_field_type *type = cw->data.string;
 167	struct bt_ctf_field *field;
 168	int ret = 0;
 169
 170	field = bt_ctf_field_create(type);
 171	if (!field) {
 172		pr_err("failed to create a field %s\n", name);
 173		return -1;
 174	}
 175
 176	ret = string_set_value(field, string);
 177	if (ret) {
 178		pr_err("failed to set value %s\n", name);
 179		goto err_put_field;
 180	}
 181
 182	ret = bt_ctf_event_set_payload(event, name, field);
 183	if (ret)
 184		pr_err("failed to set payload %s\n", name);
 185
 186err_put_field:
 187	bt_ctf_field_put(field);
 188	return ret;
 189}
 190
 191static struct bt_ctf_field_type*
 192get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
 193{
 194	unsigned long flags = field->flags;
 195
 196	if (flags & TEP_FIELD_IS_STRING)
 197		return cw->data.string;
 198
 199	if (!(flags & TEP_FIELD_IS_SIGNED)) {
 200		/* unsigned long are mostly pointers */
 201		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
 202			return cw->data.u64_hex;
 203	}
 204
 205	if (flags & TEP_FIELD_IS_SIGNED) {
 206		if (field->size == 8)
 207			return cw->data.s64;
 208		else
 209			return cw->data.s32;
 210	}
 211
 212	if (field->size == 8)
 213		return cw->data.u64;
 214	else
 215		return cw->data.u32;
 216}
 217
 218static unsigned long long adjust_signedness(unsigned long long value_int, int size)
 219{
 220	unsigned long long value_mask;
 221
 222	/*
 223	 * value_mask = (1 << (size * 8 - 1)) - 1.
 224	 * Directly set value_mask for code readers.
 225	 */
 226	switch (size) {
 227	case 1:
 228		value_mask = 0x7fULL;
 229		break;
 230	case 2:
 231		value_mask = 0x7fffULL;
 232		break;
 233	case 4:
 234		value_mask = 0x7fffffffULL;
 235		break;
 236	case 8:
 237		/*
 238		 * For 64 bit value, return it self. There is no need
 239		 * to fill high bit.
 240		 */
 241		/* Fall through */
 242	default:
 243		/* BUG! */
 244		return value_int;
 245	}
 246
 247	/* If it is a positive value, don't adjust. */
 248	if ((value_int & (~0ULL - value_mask)) == 0)
 249		return value_int;
 250
 251	/* Fill upper part of value_int with 1 to make it a negative long long. */
 252	return (value_int & value_mask) | ~value_mask;
 253}
 254
 255static int string_set_value(struct bt_ctf_field *field, const char *string)
 256{
 257	char *buffer = NULL;
 258	size_t len = strlen(string), i, p;
 259	int err;
 260
 261	for (i = p = 0; i < len; i++, p++) {
 262		if (isprint(string[i])) {
 263			if (!buffer)
 264				continue;
 265			buffer[p] = string[i];
 266		} else {
 267			char numstr[5];
 268
 269			snprintf(numstr, sizeof(numstr), "\\x%02x",
 270				 (unsigned int)(string[i]) & 0xff);
 271
 272			if (!buffer) {
 273				buffer = zalloc(i + (len - i) * 4 + 2);
 274				if (!buffer) {
 275					pr_err("failed to set unprintable string '%s'\n", string);
 276					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
 277				}
 278				if (i > 0)
 279					strncpy(buffer, string, i);
 280			}
 281			memcpy(buffer + p, numstr, 4);
 282			p += 3;
 283		}
 284	}
 285
 286	if (!buffer)
 287		return bt_ctf_field_string_set_value(field, string);
 288	err = bt_ctf_field_string_set_value(field, buffer);
 289	free(buffer);
 290	return err;
 291}
 292
 293static int add_tracepoint_field_value(struct ctf_writer *cw,
 294				      struct bt_ctf_event_class *event_class,
 295				      struct bt_ctf_event *event,
 296				      struct perf_sample *sample,
 297				      struct tep_format_field *fmtf)
 298{
 299	struct bt_ctf_field_type *type;
 300	struct bt_ctf_field *array_field;
 301	struct bt_ctf_field *field;
 302	const char *name = fmtf->name;
 303	void *data = sample->raw_data;
 304	unsigned long flags = fmtf->flags;
 305	unsigned int n_items;
 306	unsigned int i;
 307	unsigned int offset;
 308	unsigned int len;
 309	int ret;
 310
 311	name = fmtf->alias;
 312	offset = fmtf->offset;
 313	len = fmtf->size;
 314	if (flags & TEP_FIELD_IS_STRING)
 315		flags &= ~TEP_FIELD_IS_ARRAY;
 316
 317	if (flags & TEP_FIELD_IS_DYNAMIC) {
 318		unsigned long long tmp_val;
 319
 320		tmp_val = tep_read_number(fmtf->event->tep,
 321					  data + offset, len);
 322		offset = tmp_val;
 323		len = offset >> 16;
 324		offset &= 0xffff;
 325		if (tep_field_is_relative(flags))
 326			offset += fmtf->offset + fmtf->size;
 327	}
 328
 329	if (flags & TEP_FIELD_IS_ARRAY) {
 330
 331		type = bt_ctf_event_class_get_field_by_name(
 332				event_class, name);
 333		array_field = bt_ctf_field_create(type);
 334		bt_ctf_field_type_put(type);
 335		if (!array_field) {
 336			pr_err("Failed to create array type %s\n", name);
 337			return -1;
 338		}
 339
 340		len = fmtf->size / fmtf->arraylen;
 341		n_items = fmtf->arraylen;
 342	} else {
 343		n_items = 1;
 344		array_field = NULL;
 345	}
 346
 347	type = get_tracepoint_field_type(cw, fmtf);
 348
 349	for (i = 0; i < n_items; i++) {
 350		if (flags & TEP_FIELD_IS_ARRAY)
 351			field = bt_ctf_field_array_get_field(array_field, i);
 352		else
 353			field = bt_ctf_field_create(type);
 354
 355		if (!field) {
 356			pr_err("failed to create a field %s\n", name);
 357			return -1;
 358		}
 359
 360		if (flags & TEP_FIELD_IS_STRING)
 361			ret = string_set_value(field, data + offset + i * len);
 362		else {
 363			unsigned long long value_int;
 364
 365			value_int = tep_read_number(
 366					fmtf->event->tep,
 367					data + offset + i * len, len);
 368
 369			if (!(flags & TEP_FIELD_IS_SIGNED))
 370				ret = bt_ctf_field_unsigned_integer_set_value(
 371						field, value_int);
 372			else
 373				ret = bt_ctf_field_signed_integer_set_value(
 374						field, adjust_signedness(value_int, len));
 375		}
 376
 377		if (ret) {
 378			pr_err("failed to set file value %s\n", name);
 379			goto err_put_field;
 380		}
 381		if (!(flags & TEP_FIELD_IS_ARRAY)) {
 382			ret = bt_ctf_event_set_payload(event, name, field);
 383			if (ret) {
 384				pr_err("failed to set payload %s\n", name);
 385				goto err_put_field;
 386			}
 387		}
 388		bt_ctf_field_put(field);
 389	}
 390	if (flags & TEP_FIELD_IS_ARRAY) {
 391		ret = bt_ctf_event_set_payload(event, name, array_field);
 392		if (ret) {
 393			pr_err("Failed add payload array %s\n", name);
 394			return -1;
 395		}
 396		bt_ctf_field_put(array_field);
 397	}
 398	return 0;
 399
 400err_put_field:
 401	bt_ctf_field_put(field);
 402	return -1;
 403}
 404
 405static int add_tracepoint_fields_values(struct ctf_writer *cw,
 406					struct bt_ctf_event_class *event_class,
 407					struct bt_ctf_event *event,
 408					struct tep_format_field *fields,
 409					struct perf_sample *sample)
 410{
 411	struct tep_format_field *field;
 412	int ret;
 413
 414	for (field = fields; field; field = field->next) {
 415		ret = add_tracepoint_field_value(cw, event_class, event, sample,
 416				field);
 417		if (ret)
 418			return -1;
 419	}
 420	return 0;
 421}
 422
 423static int add_tracepoint_values(struct ctf_writer *cw,
 424				 struct bt_ctf_event_class *event_class,
 425				 struct bt_ctf_event *event,
 426				 struct evsel *evsel,
 427				 struct perf_sample *sample)
 428{
 429	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
 430	struct tep_format_field *fields        = evsel->tp_format->format.fields;
 431	int ret;
 432
 433	ret = add_tracepoint_fields_values(cw, event_class, event,
 434					   common_fields, sample);
 435	if (!ret)
 436		ret = add_tracepoint_fields_values(cw, event_class, event,
 437						   fields, sample);
 438
 439	return ret;
 440}
 441
 442static int
 443add_bpf_output_values(struct bt_ctf_event_class *event_class,
 444		      struct bt_ctf_event *event,
 445		      struct perf_sample *sample)
 446{
 447	struct bt_ctf_field_type *len_type, *seq_type;
 448	struct bt_ctf_field *len_field, *seq_field;
 449	unsigned int raw_size = sample->raw_size;
 450	unsigned int nr_elements = raw_size / sizeof(u32);
 451	unsigned int i;
 452	int ret;
 453
 454	if (nr_elements * sizeof(u32) != raw_size)
 455		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
 456			   raw_size, nr_elements * sizeof(u32) - raw_size);
 457
 458	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
 459	len_field = bt_ctf_field_create(len_type);
 460	if (!len_field) {
 461		pr_err("failed to create 'raw_len' for bpf output event\n");
 462		ret = -1;
 463		goto put_len_type;
 464	}
 465
 466	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
 467	if (ret) {
 468		pr_err("failed to set field value for raw_len\n");
 469		goto put_len_field;
 470	}
 471	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
 472	if (ret) {
 473		pr_err("failed to set payload to raw_len\n");
 474		goto put_len_field;
 475	}
 476
 477	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
 478	seq_field = bt_ctf_field_create(seq_type);
 479	if (!seq_field) {
 480		pr_err("failed to create 'raw_data' for bpf output event\n");
 481		ret = -1;
 482		goto put_seq_type;
 483	}
 484
 485	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
 486	if (ret) {
 487		pr_err("failed to set length of 'raw_data'\n");
 488		goto put_seq_field;
 489	}
 490
 491	for (i = 0; i < nr_elements; i++) {
 492		struct bt_ctf_field *elem_field =
 493			bt_ctf_field_sequence_get_field(seq_field, i);
 494
 495		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
 496				((u32 *)(sample->raw_data))[i]);
 497
 498		bt_ctf_field_put(elem_field);
 499		if (ret) {
 500			pr_err("failed to set raw_data[%d]\n", i);
 501			goto put_seq_field;
 502		}
 503	}
 504
 505	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
 506	if (ret)
 507		pr_err("failed to set payload for raw_data\n");
 508
 509put_seq_field:
 510	bt_ctf_field_put(seq_field);
 511put_seq_type:
 512	bt_ctf_field_type_put(seq_type);
 513put_len_field:
 514	bt_ctf_field_put(len_field);
 515put_len_type:
 516	bt_ctf_field_type_put(len_type);
 517	return ret;
 518}
 519
 520static int
 521add_callchain_output_values(struct bt_ctf_event_class *event_class,
 522		      struct bt_ctf_event *event,
 523		      struct ip_callchain *callchain)
 524{
 525	struct bt_ctf_field_type *len_type, *seq_type;
 526	struct bt_ctf_field *len_field, *seq_field;
 527	unsigned int nr_elements = callchain->nr;
 528	unsigned int i;
 529	int ret;
 530
 531	len_type = bt_ctf_event_class_get_field_by_name(
 532			event_class, "perf_callchain_size");
 533	len_field = bt_ctf_field_create(len_type);
 534	if (!len_field) {
 535		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
 536		ret = -1;
 537		goto put_len_type;
 538	}
 539
 540	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
 541	if (ret) {
 542		pr_err("failed to set field value for perf_callchain_size\n");
 543		goto put_len_field;
 544	}
 545	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
 546	if (ret) {
 547		pr_err("failed to set payload to perf_callchain_size\n");
 548		goto put_len_field;
 549	}
 550
 551	seq_type = bt_ctf_event_class_get_field_by_name(
 552			event_class, "perf_callchain");
 553	seq_field = bt_ctf_field_create(seq_type);
 554	if (!seq_field) {
 555		pr_err("failed to create 'perf_callchain' for callchain output event\n");
 556		ret = -1;
 557		goto put_seq_type;
 558	}
 559
 560	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
 561	if (ret) {
 562		pr_err("failed to set length of 'perf_callchain'\n");
 563		goto put_seq_field;
 564	}
 565
 566	for (i = 0; i < nr_elements; i++) {
 567		struct bt_ctf_field *elem_field =
 568			bt_ctf_field_sequence_get_field(seq_field, i);
 569
 570		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
 571				((u64 *)(callchain->ips))[i]);
 572
 573		bt_ctf_field_put(elem_field);
 574		if (ret) {
 575			pr_err("failed to set callchain[%d]\n", i);
 576			goto put_seq_field;
 577		}
 578	}
 579
 580	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
 581	if (ret)
 582		pr_err("failed to set payload for raw_data\n");
 583
 584put_seq_field:
 585	bt_ctf_field_put(seq_field);
 586put_seq_type:
 587	bt_ctf_field_type_put(seq_type);
 588put_len_field:
 589	bt_ctf_field_put(len_field);
 590put_len_type:
 591	bt_ctf_field_type_put(len_type);
 592	return ret;
 593}
 594
 595static int add_generic_values(struct ctf_writer *cw,
 596			      struct bt_ctf_event *event,
 597			      struct evsel *evsel,
 598			      struct perf_sample *sample)
 599{
 600	u64 type = evsel->core.attr.sample_type;
 601	int ret;
 602
 603	/*
 604	 * missing:
 605	 *   PERF_SAMPLE_TIME         - not needed as we have it in
 606	 *                              ctf event header
 607	 *   PERF_SAMPLE_READ         - TODO
 608	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
 609	 *   PERF_SAMPLE_BRANCH_STACK - TODO
 610	 *   PERF_SAMPLE_REGS_USER    - TODO
 611	 *   PERF_SAMPLE_STACK_USER   - TODO
 612	 */
 613
 614	if (type & PERF_SAMPLE_IP) {
 615		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
 616		if (ret)
 617			return -1;
 618	}
 619
 620	if (type & PERF_SAMPLE_TID) {
 621		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
 622		if (ret)
 623			return -1;
 624
 625		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
 626		if (ret)
 627			return -1;
 628	}
 629
 630	if ((type & PERF_SAMPLE_ID) ||
 631	    (type & PERF_SAMPLE_IDENTIFIER)) {
 632		ret = value_set_u64(cw, event, "perf_id", sample->id);
 633		if (ret)
 634			return -1;
 635	}
 636
 637	if (type & PERF_SAMPLE_STREAM_ID) {
 638		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
 639		if (ret)
 640			return -1;
 641	}
 642
 643	if (type & PERF_SAMPLE_PERIOD) {
 644		ret = value_set_u64(cw, event, "perf_period", sample->period);
 645		if (ret)
 646			return -1;
 647	}
 648
 649	if (type & PERF_SAMPLE_WEIGHT) {
 650		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
 651		if (ret)
 652			return -1;
 653	}
 654
 655	if (type & PERF_SAMPLE_DATA_SRC) {
 656		ret = value_set_u64(cw, event, "perf_data_src",
 657				sample->data_src);
 658		if (ret)
 659			return -1;
 660	}
 661
 662	if (type & PERF_SAMPLE_TRANSACTION) {
 663		ret = value_set_u64(cw, event, "perf_transaction",
 664				sample->transaction);
 665		if (ret)
 666			return -1;
 667	}
 668
 669	return 0;
 670}
 671
 672static int ctf_stream__flush(struct ctf_stream *cs)
 673{
 674	int err = 0;
 675
 676	if (cs) {
 677		err = bt_ctf_stream_flush(cs->stream);
 678		if (err)
 679			pr_err("CTF stream %d flush failed\n", cs->cpu);
 680
 681		pr("Flush stream for cpu %d (%u samples)\n",
 682		   cs->cpu, cs->count);
 683
 684		cs->count = 0;
 685	}
 686
 687	return err;
 688}
 689
 690static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
 691{
 692	struct ctf_stream *cs;
 693	struct bt_ctf_field *pkt_ctx   = NULL;
 694	struct bt_ctf_field *cpu_field = NULL;
 695	struct bt_ctf_stream *stream   = NULL;
 696	int ret;
 697
 698	cs = zalloc(sizeof(*cs));
 699	if (!cs) {
 700		pr_err("Failed to allocate ctf stream\n");
 701		return NULL;
 702	}
 703
 704	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
 705	if (!stream) {
 706		pr_err("Failed to create CTF stream\n");
 707		goto out;
 708	}
 709
 710	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
 711	if (!pkt_ctx) {
 712		pr_err("Failed to obtain packet context\n");
 713		goto out;
 714	}
 715
 716	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
 717	bt_ctf_field_put(pkt_ctx);
 718	if (!cpu_field) {
 719		pr_err("Failed to obtain cpu field\n");
 720		goto out;
 721	}
 722
 723	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
 724	if (ret) {
 725		pr_err("Failed to update CPU number\n");
 726		goto out;
 727	}
 728
 729	bt_ctf_field_put(cpu_field);
 730
 731	cs->cpu    = cpu;
 732	cs->stream = stream;
 733	return cs;
 734
 735out:
 736	if (cpu_field)
 737		bt_ctf_field_put(cpu_field);
 738	if (stream)
 739		bt_ctf_stream_put(stream);
 740
 741	free(cs);
 742	return NULL;
 743}
 744
 745static void ctf_stream__delete(struct ctf_stream *cs)
 746{
 747	if (cs) {
 748		bt_ctf_stream_put(cs->stream);
 749		free(cs);
 750	}
 751}
 752
 753static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
 754{
 755	struct ctf_stream *cs = cw->stream[cpu];
 756
 757	if (!cs) {
 758		cs = ctf_stream__create(cw, cpu);
 759		cw->stream[cpu] = cs;
 760	}
 761
 762	return cs;
 763}
 764
 765static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
 766			  struct evsel *evsel)
 767{
 768	int cpu = 0;
 769
 770	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
 771		cpu = sample->cpu;
 772
 773	if (cpu > cw->stream_cnt) {
 774		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
 775			cpu, cw->stream_cnt);
 776		cpu = 0;
 777	}
 778
 779	return cpu;
 780}
 781
 782#define STREAM_FLUSH_COUNT 100000
 783
 784/*
 785 * Currently we have no other way to determine the
 786 * time for the stream flush other than keep track
 787 * of the number of events and check it against
 788 * threshold.
 789 */
 790static bool is_flush_needed(struct ctf_stream *cs)
 791{
 792	return cs->count >= STREAM_FLUSH_COUNT;
 793}
 794
 795static int process_sample_event(struct perf_tool *tool,
 796				union perf_event *_event,
 797				struct perf_sample *sample,
 798				struct evsel *evsel,
 799				struct machine *machine __maybe_unused)
 800{
 801	struct convert *c = container_of(tool, struct convert, tool);
 802	struct evsel_priv *priv = evsel->priv;
 803	struct ctf_writer *cw = &c->writer;
 804	struct ctf_stream *cs;
 805	struct bt_ctf_event_class *event_class;
 806	struct bt_ctf_event *event;
 807	int ret;
 808	unsigned long type = evsel->core.attr.sample_type;
 809
 810	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
 811		return 0;
 812
 813	event_class = priv->event_class;
 814
 815	/* update stats */
 816	c->events_count++;
 817	c->events_size += _event->header.size;
 818
 819	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
 820
 821	event = bt_ctf_event_create(event_class);
 822	if (!event) {
 823		pr_err("Failed to create an CTF event\n");
 824		return -1;
 825	}
 826
 827	bt_ctf_clock_set_time(cw->clock, sample->time);
 828
 829	ret = add_generic_values(cw, event, evsel, sample);
 830	if (ret)
 831		return -1;
 832
 833	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
 834		ret = add_tracepoint_values(cw, event_class, event,
 835					    evsel, sample);
 836		if (ret)
 837			return -1;
 838	}
 839
 840	if (type & PERF_SAMPLE_CALLCHAIN) {
 841		ret = add_callchain_output_values(event_class,
 842				event, sample->callchain);
 843		if (ret)
 844			return -1;
 845	}
 846
 847	if (evsel__is_bpf_output(evsel)) {
 848		ret = add_bpf_output_values(event_class, event, sample);
 849		if (ret)
 850			return -1;
 851	}
 852
 853	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
 854	if (cs) {
 855		if (is_flush_needed(cs))
 856			ctf_stream__flush(cs);
 857
 858		cs->count++;
 859		bt_ctf_stream_append_event(cs->stream, event);
 860	}
 861
 862	bt_ctf_event_put(event);
 863	return cs ? 0 : -1;
 864}
 865
 866#define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
 867do {							\
 868	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
 869	if (ret)					\
 870		return -1;				\
 871} while(0)
 872
 873#define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
 874static int process_##_name##_event(struct perf_tool *tool,	\
 875				   union perf_event *_event,	\
 876				   struct perf_sample *sample,	\
 877				   struct machine *machine)	\
 878{								\
 879	struct convert *c = container_of(tool, struct convert, tool);\
 880	struct ctf_writer *cw = &c->writer;			\
 881	struct bt_ctf_event_class *event_class = cw->_name##_class;\
 882	struct bt_ctf_event *event;				\
 883	struct ctf_stream *cs;					\
 884	int ret;						\
 885								\
 886	c->non_sample_count++;					\
 887	c->events_size += _event->header.size;			\
 888	event = bt_ctf_event_create(event_class);		\
 889	if (!event) {						\
 890		pr_err("Failed to create an CTF event\n");	\
 891		return -1;					\
 892	}							\
 893								\
 894	bt_ctf_clock_set_time(cw->clock, sample->time);		\
 895	body							\
 896	cs = ctf_stream(cw, 0);					\
 897	if (cs) {						\
 898		if (is_flush_needed(cs))			\
 899			ctf_stream__flush(cs);			\
 900								\
 901		cs->count++;					\
 902		bt_ctf_stream_append_event(cs->stream, event);	\
 903	}							\
 904	bt_ctf_event_put(event);				\
 905								\
 906	return perf_event__process_##_name(tool, _event, sample, machine);\
 907}
 908
 909__FUNC_PROCESS_NON_SAMPLE(comm,
 910	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
 911	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
 912	__NON_SAMPLE_SET_FIELD(comm, string, comm);
 913)
 914__FUNC_PROCESS_NON_SAMPLE(fork,
 915	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
 916	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
 917	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
 918	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
 919	__NON_SAMPLE_SET_FIELD(fork, u64, time);
 920)
 921
 922__FUNC_PROCESS_NON_SAMPLE(exit,
 923	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
 924	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
 925	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
 926	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
 927	__NON_SAMPLE_SET_FIELD(fork, u64, time);
 928)
 929__FUNC_PROCESS_NON_SAMPLE(mmap,
 930	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
 931	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
 932	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
 933	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
 934)
 935__FUNC_PROCESS_NON_SAMPLE(mmap2,
 936	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
 937	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
 938	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
 939	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
 940)
 941#undef __NON_SAMPLE_SET_FIELD
 942#undef __FUNC_PROCESS_NON_SAMPLE
 943
 944/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
 945static char *change_name(char *name, char *orig_name, int dup)
 946{
 947	char *new_name = NULL;
 948	size_t len;
 949
 950	if (!name)
 951		name = orig_name;
 952
 953	if (dup >= 10)
 954		goto out;
 955	/*
 956	 * Add '_' prefix to potential keywork.  According to
 957	 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
 958	 * further CTF spec updating may require us to use '$'.
 959	 */
 960	if (dup < 0)
 961		len = strlen(name) + sizeof("_");
 962	else
 963		len = strlen(orig_name) + sizeof("_dupl_X");
 964
 965	new_name = malloc(len);
 966	if (!new_name)
 967		goto out;
 968
 969	if (dup < 0)
 970		snprintf(new_name, len, "_%s", name);
 971	else
 972		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
 973
 974out:
 975	if (name != orig_name)
 976		free(name);
 977	return new_name;
 978}
 979
 980static int event_class_add_field(struct bt_ctf_event_class *event_class,
 981		struct bt_ctf_field_type *type,
 982		struct tep_format_field *field)
 983{
 984	struct bt_ctf_field_type *t = NULL;
 985	char *name;
 986	int dup = 1;
 987	int ret;
 988
 989	/* alias was already assigned */
 990	if (field->alias != field->name)
 991		return bt_ctf_event_class_add_field(event_class, type,
 992				(char *)field->alias);
 993
 994	name = field->name;
 995
 996	/* If 'name' is a keywork, add prefix. */
 997	if (bt_ctf_validate_identifier(name))
 998		name = change_name(name, field->name, -1);
 999
1000	if (!name) {
1001		pr_err("Failed to fix invalid identifier.");
1002		return -1;
1003	}
1004	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1005		bt_ctf_field_type_put(t);
1006		name = change_name(name, field->name, dup++);
1007		if (!name) {
1008			pr_err("Failed to create dup name for '%s'\n", field->name);
1009			return -1;
1010		}
1011	}
1012
1013	ret = bt_ctf_event_class_add_field(event_class, type, name);
1014	if (!ret)
1015		field->alias = name;
1016
1017	return ret;
1018}
1019
1020static int add_tracepoint_fields_types(struct ctf_writer *cw,
1021				       struct tep_format_field *fields,
1022				       struct bt_ctf_event_class *event_class)
1023{
1024	struct tep_format_field *field;
1025	int ret;
1026
1027	for (field = fields; field; field = field->next) {
1028		struct bt_ctf_field_type *type;
1029		unsigned long flags = field->flags;
1030
1031		pr2("  field '%s'\n", field->name);
1032
1033		type = get_tracepoint_field_type(cw, field);
1034		if (!type)
1035			return -1;
1036
1037		/*
1038		 * A string is an array of chars. For this we use the string
1039		 * type and don't care that it is an array. What we don't
1040		 * support is an array of strings.
1041		 */
1042		if (flags & TEP_FIELD_IS_STRING)
1043			flags &= ~TEP_FIELD_IS_ARRAY;
1044
1045		if (flags & TEP_FIELD_IS_ARRAY)
1046			type = bt_ctf_field_type_array_create(type, field->arraylen);
1047
1048		ret = event_class_add_field(event_class, type, field);
1049
1050		if (flags & TEP_FIELD_IS_ARRAY)
1051			bt_ctf_field_type_put(type);
1052
1053		if (ret) {
1054			pr_err("Failed to add field '%s': %d\n",
1055					field->name, ret);
1056			return -1;
1057		}
1058	}
1059
1060	return 0;
1061}
1062
1063static int add_tracepoint_types(struct ctf_writer *cw,
1064				struct evsel *evsel,
1065				struct bt_ctf_event_class *class)
1066{
1067	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1068	struct tep_format_field *fields        = evsel->tp_format->format.fields;
1069	int ret;
1070
1071	ret = add_tracepoint_fields_types(cw, common_fields, class);
1072	if (!ret)
1073		ret = add_tracepoint_fields_types(cw, fields, class);
1074
1075	return ret;
1076}
1077
1078static int add_bpf_output_types(struct ctf_writer *cw,
1079				struct bt_ctf_event_class *class)
1080{
1081	struct bt_ctf_field_type *len_type = cw->data.u32;
1082	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1083	struct bt_ctf_field_type *seq_type;
1084	int ret;
1085
1086	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1087	if (ret)
1088		return ret;
1089
1090	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1091	if (!seq_type)
1092		return -1;
1093
1094	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1095}
1096
1097static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1098			     struct bt_ctf_event_class *event_class)
1099{
1100	u64 type = evsel->core.attr.sample_type;
1101
1102	/*
1103	 * missing:
1104	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1105	 *                              ctf event header
1106	 *   PERF_SAMPLE_READ         - TODO
1107	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1108	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1109	 *                              are handled separately
1110	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1111	 *   PERF_SAMPLE_REGS_USER    - TODO
1112	 *   PERF_SAMPLE_STACK_USER   - TODO
1113	 */
1114
1115#define ADD_FIELD(cl, t, n)						\
1116	do {								\
1117		pr2("  field '%s'\n", n);				\
1118		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1119			pr_err("Failed to add field '%s';\n", n);	\
1120			return -1;					\
1121		}							\
1122	} while (0)
1123
1124	if (type & PERF_SAMPLE_IP)
1125		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1126
1127	if (type & PERF_SAMPLE_TID) {
1128		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1129		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1130	}
1131
1132	if ((type & PERF_SAMPLE_ID) ||
1133	    (type & PERF_SAMPLE_IDENTIFIER))
1134		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1135
1136	if (type & PERF_SAMPLE_STREAM_ID)
1137		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1138
1139	if (type & PERF_SAMPLE_PERIOD)
1140		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1141
1142	if (type & PERF_SAMPLE_WEIGHT)
1143		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1144
1145	if (type & PERF_SAMPLE_DATA_SRC)
1146		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1147
1148	if (type & PERF_SAMPLE_TRANSACTION)
1149		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1150
1151	if (type & PERF_SAMPLE_CALLCHAIN) {
1152		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1153		ADD_FIELD(event_class,
1154			bt_ctf_field_type_sequence_create(
1155				cw->data.u64_hex, "perf_callchain_size"),
1156			"perf_callchain");
1157	}
1158
1159#undef ADD_FIELD
1160	return 0;
1161}
1162
1163static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1164{
1165	struct bt_ctf_event_class *event_class;
1166	struct evsel_priv *priv;
1167	const char *name = evsel__name(evsel);
1168	int ret;
1169
1170	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1171
1172	event_class = bt_ctf_event_class_create(name);
1173	if (!event_class)
1174		return -1;
1175
1176	ret = add_generic_types(cw, evsel, event_class);
1177	if (ret)
1178		goto err;
1179
1180	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1181		ret = add_tracepoint_types(cw, evsel, event_class);
1182		if (ret)
1183			goto err;
1184	}
1185
1186	if (evsel__is_bpf_output(evsel)) {
1187		ret = add_bpf_output_types(cw, event_class);
1188		if (ret)
1189			goto err;
1190	}
1191
1192	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1193	if (ret) {
1194		pr("Failed to add event class into stream.\n");
1195		goto err;
1196	}
1197
1198	priv = malloc(sizeof(*priv));
1199	if (!priv)
1200		goto err;
1201
1202	priv->event_class = event_class;
1203	evsel->priv       = priv;
1204	return 0;
1205
1206err:
1207	bt_ctf_event_class_put(event_class);
1208	pr_err("Failed to add event '%s'.\n", name);
1209	return -1;
1210}
1211
1212static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1213{
1214	struct evlist *evlist = session->evlist;
1215	struct evsel *evsel;
1216	int ret;
1217
1218	evlist__for_each_entry(evlist, evsel) {
1219		ret = add_event(cw, evsel);
1220		if (ret)
1221			return ret;
1222	}
1223	return 0;
1224}
1225
1226#define __NON_SAMPLE_ADD_FIELD(t, n)						\
1227	do {							\
1228		pr2("  field '%s'\n", #n);			\
1229		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1230			pr_err("Failed to add field '%s';\n", #n);\
1231			return -1;				\
1232		}						\
1233	} while(0)
1234
1235#define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1236static int add_##_name##_event(struct ctf_writer *cw)		\
1237{								\
1238	struct bt_ctf_event_class *event_class;			\
1239	int ret;						\
1240								\
1241	pr("Adding "#_name" event\n");				\
1242	event_class = bt_ctf_event_class_create("perf_" #_name);\
1243	if (!event_class)					\
1244		return -1;					\
1245	body							\
1246								\
1247	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1248	if (ret) {						\
1249		pr("Failed to add event class '"#_name"' into stream.\n");\
1250		return ret;					\
1251	}							\
1252								\
1253	cw->_name##_class = event_class;			\
1254	bt_ctf_event_class_put(event_class);			\
1255	return 0;						\
1256}
1257
1258__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1259	__NON_SAMPLE_ADD_FIELD(u32, pid);
1260	__NON_SAMPLE_ADD_FIELD(u32, tid);
1261	__NON_SAMPLE_ADD_FIELD(string, comm);
1262)
1263
1264__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1265	__NON_SAMPLE_ADD_FIELD(u32, pid);
1266	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1267	__NON_SAMPLE_ADD_FIELD(u32, tid);
1268	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1269	__NON_SAMPLE_ADD_FIELD(u64, time);
1270)
1271
1272__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1273	__NON_SAMPLE_ADD_FIELD(u32, pid);
1274	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1275	__NON_SAMPLE_ADD_FIELD(u32, tid);
1276	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1277	__NON_SAMPLE_ADD_FIELD(u64, time);
1278)
1279
1280__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1281	__NON_SAMPLE_ADD_FIELD(u32, pid);
1282	__NON_SAMPLE_ADD_FIELD(u32, tid);
1283	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1284	__NON_SAMPLE_ADD_FIELD(string, filename);
1285)
1286
1287__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1288	__NON_SAMPLE_ADD_FIELD(u32, pid);
1289	__NON_SAMPLE_ADD_FIELD(u32, tid);
1290	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1291	__NON_SAMPLE_ADD_FIELD(string, filename);
1292)
1293#undef __NON_SAMPLE_ADD_FIELD
1294#undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1295
1296static int setup_non_sample_events(struct ctf_writer *cw,
1297				   struct perf_session *session __maybe_unused)
1298{
1299	int ret;
1300
1301	ret = add_comm_event(cw);
1302	if (ret)
1303		return ret;
1304	ret = add_exit_event(cw);
1305	if (ret)
1306		return ret;
1307	ret = add_fork_event(cw);
1308	if (ret)
1309		return ret;
1310	ret = add_mmap_event(cw);
1311	if (ret)
1312		return ret;
1313	ret = add_mmap2_event(cw);
1314	if (ret)
1315		return ret;
1316	return 0;
1317}
1318
1319static void cleanup_events(struct perf_session *session)
1320{
1321	struct evlist *evlist = session->evlist;
1322	struct evsel *evsel;
1323
1324	evlist__for_each_entry(evlist, evsel) {
1325		struct evsel_priv *priv;
1326
1327		priv = evsel->priv;
1328		bt_ctf_event_class_put(priv->event_class);
1329		zfree(&evsel->priv);
1330	}
1331
1332	evlist__delete(evlist);
1333	session->evlist = NULL;
1334}
1335
1336static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1337{
1338	struct ctf_stream **stream;
1339	struct perf_header *ph = &session->header;
1340	int ncpus;
1341
1342	/*
1343	 * Try to get the number of cpus used in the data file,
1344	 * if not present fallback to the MAX_CPUS.
1345	 */
1346	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1347
1348	stream = zalloc(sizeof(*stream) * ncpus);
1349	if (!stream) {
1350		pr_err("Failed to allocate streams.\n");
1351		return -ENOMEM;
1352	}
1353
1354	cw->stream     = stream;
1355	cw->stream_cnt = ncpus;
1356	return 0;
1357}
1358
1359static void free_streams(struct ctf_writer *cw)
1360{
1361	int cpu;
1362
1363	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1364		ctf_stream__delete(cw->stream[cpu]);
1365
1366	zfree(&cw->stream);
1367}
1368
1369static int ctf_writer__setup_env(struct ctf_writer *cw,
1370				 struct perf_session *session)
1371{
1372	struct perf_header *header = &session->header;
1373	struct bt_ctf_writer *writer = cw->writer;
1374
1375#define ADD(__n, __v)							\
1376do {									\
1377	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1378		return -1;						\
1379} while (0)
1380
1381	ADD("host",    header->env.hostname);
1382	ADD("sysname", "Linux");
1383	ADD("release", header->env.os_release);
1384	ADD("version", header->env.version);
1385	ADD("machine", header->env.arch);
1386	ADD("domain", "kernel");
1387	ADD("tracer_name", "perf");
1388
1389#undef ADD
1390	return 0;
1391}
1392
1393static int ctf_writer__setup_clock(struct ctf_writer *cw,
1394				   struct perf_session *session,
1395				   bool tod)
1396{
1397	struct bt_ctf_clock *clock = cw->clock;
1398	const char *desc = "perf clock";
1399	int64_t offset = 0;
1400
1401	if (tod) {
1402		struct perf_env *env = &session->header.env;
1403
1404		if (!env->clock.enabled) {
1405			pr_err("Can't provide --tod time, missing clock data. "
1406			       "Please record with -k/--clockid option.\n");
1407			return -1;
1408		}
1409
1410		desc   = clockid_name(env->clock.clockid);
1411		offset = env->clock.tod_ns - env->clock.clockid_ns;
1412	}
1413
1414#define SET(__n, __v)				\
1415do {						\
1416	if (bt_ctf_clock_set_##__n(clock, __v))	\
1417		return -1;			\
1418} while (0)
1419
1420	SET(frequency,   1000000000);
1421	SET(offset,      offset);
1422	SET(description, desc);
1423	SET(precision,   10);
1424	SET(is_absolute, 0);
1425
1426#undef SET
1427	return 0;
1428}
1429
1430static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1431{
1432	struct bt_ctf_field_type *type;
1433
1434	type = bt_ctf_field_type_integer_create(size);
1435	if (!type)
1436		return NULL;
1437
1438	if (sign &&
1439	    bt_ctf_field_type_integer_set_signed(type, 1))
1440		goto err;
1441
1442	if (hex &&
1443	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1444		goto err;
1445
1446#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1447	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1448#else
1449	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1450#endif
1451
1452	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1453	    size, sign ? "un" : "", hex ? "hex" : "");
1454	return type;
1455
1456err:
1457	bt_ctf_field_type_put(type);
1458	return NULL;
1459}
1460
1461static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1462{
1463	unsigned int i;
1464
1465	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1466		bt_ctf_field_type_put(cw->data.array[i]);
1467}
1468
1469static int ctf_writer__init_data(struct ctf_writer *cw)
1470{
1471#define CREATE_INT_TYPE(type, size, sign, hex)		\
1472do {							\
1473	(type) = create_int_type(size, sign, hex);	\
1474	if (!(type))					\
1475		goto err;				\
1476} while (0)
1477
1478	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1479	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1480	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1481	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1482	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1483	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1484
1485	cw->data.string  = bt_ctf_field_type_string_create();
1486	if (cw->data.string)
1487		return 0;
1488
1489err:
1490	ctf_writer__cleanup_data(cw);
1491	pr_err("Failed to create data types.\n");
1492	return -1;
1493}
1494
1495static void ctf_writer__cleanup(struct ctf_writer *cw)
1496{
1497	ctf_writer__cleanup_data(cw);
1498
1499	bt_ctf_clock_put(cw->clock);
1500	free_streams(cw);
1501	bt_ctf_stream_class_put(cw->stream_class);
1502	bt_ctf_writer_put(cw->writer);
1503
1504	/* and NULL all the pointers */
1505	memset(cw, 0, sizeof(*cw));
1506}
1507
1508static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1509			    struct perf_session *session, bool tod)
1510{
1511	struct bt_ctf_writer		*writer;
1512	struct bt_ctf_stream_class	*stream_class;
1513	struct bt_ctf_clock		*clock;
1514	struct bt_ctf_field_type	*pkt_ctx_type;
1515	int				ret;
1516
1517	/* CTF writer */
1518	writer = bt_ctf_writer_create(path);
1519	if (!writer)
1520		goto err;
1521
1522	cw->writer = writer;
1523
1524	/* CTF clock */
1525	clock = bt_ctf_clock_create("perf_clock");
1526	if (!clock) {
1527		pr("Failed to create CTF clock.\n");
1528		goto err_cleanup;
1529	}
1530
1531	cw->clock = clock;
1532
1533	if (ctf_writer__setup_clock(cw, session, tod)) {
1534		pr("Failed to setup CTF clock.\n");
1535		goto err_cleanup;
1536	}
1537
1538	/* CTF stream class */
1539	stream_class = bt_ctf_stream_class_create("perf_stream");
1540	if (!stream_class) {
1541		pr("Failed to create CTF stream class.\n");
1542		goto err_cleanup;
1543	}
1544
1545	cw->stream_class = stream_class;
1546
1547	/* CTF clock stream setup */
1548	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1549		pr("Failed to assign CTF clock to stream class.\n");
1550		goto err_cleanup;
1551	}
1552
1553	if (ctf_writer__init_data(cw))
1554		goto err_cleanup;
1555
1556	/* Add cpu_id for packet context */
1557	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1558	if (!pkt_ctx_type)
1559		goto err_cleanup;
1560
1561	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1562	bt_ctf_field_type_put(pkt_ctx_type);
1563	if (ret)
1564		goto err_cleanup;
1565
1566	/* CTF clock writer setup */
1567	if (bt_ctf_writer_add_clock(writer, clock)) {
1568		pr("Failed to assign CTF clock to writer.\n");
1569		goto err_cleanup;
1570	}
1571
1572	return 0;
1573
1574err_cleanup:
1575	ctf_writer__cleanup(cw);
1576err:
1577	pr_err("Failed to setup CTF writer.\n");
1578	return -1;
1579}
1580
1581static int ctf_writer__flush_streams(struct ctf_writer *cw)
1582{
1583	int cpu, ret = 0;
1584
1585	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1586		ret = ctf_stream__flush(cw->stream[cpu]);
1587
1588	return ret;
1589}
1590
1591static int convert__config(const char *var, const char *value, void *cb)
1592{
1593	struct convert *c = cb;
1594
1595	if (!strcmp(var, "convert.queue-size"))
1596		return perf_config_u64(&c->queue_size, var, value);
1597
1598	return 0;
1599}
1600
1601int bt_convert__perf2ctf(const char *input, const char *path,
1602			 struct perf_data_convert_opts *opts)
1603{
1604	struct perf_session *session;
1605	struct perf_data data = {
1606		.path	   = input,
1607		.mode      = PERF_DATA_MODE_READ,
1608		.force     = opts->force,
1609	};
1610	struct convert c = {
1611		.tool = {
1612			.sample          = process_sample_event,
1613			.mmap            = perf_event__process_mmap,
1614			.mmap2           = perf_event__process_mmap2,
1615			.comm            = perf_event__process_comm,
1616			.exit            = perf_event__process_exit,
1617			.fork            = perf_event__process_fork,
1618			.lost            = perf_event__process_lost,
1619			.tracing_data    = perf_event__process_tracing_data,
1620			.build_id        = perf_event__process_build_id,
1621			.namespaces      = perf_event__process_namespaces,
1622			.ordered_events  = true,
1623			.ordering_requires_timestamps = true,
1624		},
1625	};
1626	struct ctf_writer *cw = &c.writer;
1627	int err;
1628
1629	if (opts->all) {
1630		c.tool.comm = process_comm_event;
1631		c.tool.exit = process_exit_event;
1632		c.tool.fork = process_fork_event;
1633		c.tool.mmap = process_mmap_event;
1634		c.tool.mmap2 = process_mmap2_event;
1635	}
1636
1637	err = perf_config(convert__config, &c);
1638	if (err)
1639		return err;
1640
 
 
 
 
1641	err = -1;
1642	/* perf.data session */
1643	session = perf_session__new(&data, &c.tool);
1644	if (IS_ERR(session))
1645		return PTR_ERR(session);
1646
1647	/* CTF writer */
1648	if (ctf_writer__init(cw, path, session, opts->tod))
1649		goto free_session;
1650
1651	if (c.queue_size) {
1652		ordered_events__set_alloc_size(&session->ordered_events,
1653					       c.queue_size);
1654	}
1655
1656	/* CTF writer env/clock setup  */
1657	if (ctf_writer__setup_env(cw, session))
1658		goto free_writer;
1659
1660	/* CTF events setup */
1661	if (setup_events(cw, session))
1662		goto free_writer;
1663
1664	if (opts->all && setup_non_sample_events(cw, session))
1665		goto free_writer;
1666
1667	if (setup_streams(cw, session))
1668		goto free_writer;
1669
1670	err = perf_session__process_events(session);
1671	if (!err)
1672		err = ctf_writer__flush_streams(cw);
1673	else
1674		pr_err("Error during conversion.\n");
1675
1676	fprintf(stderr,
1677		"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1678		data.path, path);
1679
1680	fprintf(stderr,
1681		"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1682		(double) c.events_size / 1024.0 / 1024.0,
1683		c.events_count);
1684
1685	if (!c.non_sample_count)
1686		fprintf(stderr, ") ]\n");
1687	else
1688		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1689
1690	cleanup_events(session);
1691	perf_session__delete(session);
1692	ctf_writer__cleanup(cw);
1693
1694	return err;
1695
 
 
1696free_writer:
1697	ctf_writer__cleanup(cw);
1698free_session:
1699	perf_session__delete(session);
1700	pr_err("Error during conversion setup.\n");
1701	return err;
1702}