Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_synth - synthetic trace events
   4 *
   5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/kallsyms.h>
  10#include <linux/security.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/stacktrace.h>
  14#include <linux/rculist.h>
  15#include <linux/tracefs.h>
  16
  17/* for gfp flag names */
  18#include <linux/trace_events.h>
  19#include <trace/events/mmflags.h>
  20#include "trace_probe.h"
  21#include "trace_probe_kernel.h"
  22
  23#include "trace_synth.h"
  24
  25#undef ERRORS
  26#define ERRORS	\
  27	C(BAD_NAME,		"Illegal name"),		\
  28	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
  29	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
  30	C(EVENT_EXISTS,		"Event already exists"),	\
  31	C(TOO_MANY_FIELDS,	"Too many fields"),		\
  32	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
  33	C(INVALID_TYPE,		"Invalid type"),		\
  34	C(INVALID_FIELD,        "Invalid field"),		\
  35	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
  36
  37#undef C
  38#define C(a, b)		SYNTH_ERR_##a
  39
  40enum { ERRORS };
  41
  42#undef C
  43#define C(a, b)		b
  44
  45static const char *err_text[] = { ERRORS };
  46
 
  47static char *last_cmd;
  48
  49static int errpos(const char *str)
  50{
 
 
 
  51	if (!str || !last_cmd)
  52		return 0;
  53
  54	return err_pos(last_cmd, str);
 
 
 
  55}
  56
  57static void last_cmd_set(const char *str)
  58{
  59	if (!str)
  60		return;
  61
 
  62	kfree(last_cmd);
  63
  64	last_cmd = kstrdup(str, GFP_KERNEL);
 
  65}
  66
  67static void synth_err(u8 err_type, u16 err_pos)
  68{
 
  69	if (!last_cmd)
  70		return;
  71
  72	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
  73			err_type, err_pos);
 
 
  74}
  75
  76static int create_synth_event(const char *raw_command);
  77static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
  78static int synth_event_release(struct dyn_event *ev);
  79static bool synth_event_is_busy(struct dyn_event *ev);
  80static bool synth_event_match(const char *system, const char *event,
  81			int argc, const char **argv, struct dyn_event *ev);
  82
  83static struct dyn_event_operations synth_event_ops = {
  84	.create = create_synth_event,
  85	.show = synth_event_show,
  86	.is_busy = synth_event_is_busy,
  87	.free = synth_event_release,
  88	.match = synth_event_match,
  89};
  90
  91static bool is_synth_event(struct dyn_event *ev)
  92{
  93	return ev->ops == &synth_event_ops;
  94}
  95
  96static struct synth_event *to_synth_event(struct dyn_event *ev)
  97{
  98	return container_of(ev, struct synth_event, devent);
  99}
 100
 101static bool synth_event_is_busy(struct dyn_event *ev)
 102{
 103	struct synth_event *event = to_synth_event(ev);
 104
 105	return event->ref != 0;
 106}
 107
 108static bool synth_event_match(const char *system, const char *event,
 109			int argc, const char **argv, struct dyn_event *ev)
 110{
 111	struct synth_event *sev = to_synth_event(ev);
 112
 113	return strcmp(sev->name, event) == 0 &&
 114		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
 115}
 116
 117struct synth_trace_event {
 118	struct trace_entry	ent;
 119	u64			fields[];
 120};
 121
 122static int synth_event_define_fields(struct trace_event_call *call)
 123{
 124	struct synth_trace_event trace;
 125	int offset = offsetof(typeof(trace), fields);
 126	struct synth_event *event = call->data;
 127	unsigned int i, size, n_u64;
 128	char *name, *type;
 129	bool is_signed;
 130	int ret = 0;
 131
 132	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
 133		size = event->fields[i]->size;
 134		is_signed = event->fields[i]->is_signed;
 135		type = event->fields[i]->type;
 136		name = event->fields[i]->name;
 137		ret = trace_define_field(call, type, name, offset, size,
 138					 is_signed, FILTER_OTHER);
 139		if (ret)
 140			break;
 141
 142		event->fields[i]->offset = n_u64;
 143
 144		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
 145			offset += STR_VAR_LEN_MAX;
 146			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
 147		} else {
 148			offset += sizeof(u64);
 149			n_u64++;
 150		}
 151	}
 152
 153	event->n_u64 = n_u64;
 154
 155	return ret;
 156}
 157
 158static bool synth_field_signed(char *type)
 159{
 160	if (str_has_prefix(type, "u"))
 161		return false;
 162	if (strcmp(type, "gfp_t") == 0)
 163		return false;
 164
 165	return true;
 166}
 167
 168static int synth_field_is_string(char *type)
 169{
 170	if (strstr(type, "char[") != NULL)
 171		return true;
 172
 173	return false;
 174}
 175
 
 
 
 
 
 
 
 
 176static int synth_field_string_size(char *type)
 177{
 178	char buf[4], *end, *start;
 179	unsigned int len;
 180	int size, err;
 181
 182	start = strstr(type, "char[");
 183	if (start == NULL)
 184		return -EINVAL;
 185	start += sizeof("char[") - 1;
 186
 187	end = strchr(type, ']');
 188	if (!end || end < start || type + strlen(type) > end + 1)
 189		return -EINVAL;
 190
 191	len = end - start;
 192	if (len > 3)
 193		return -EINVAL;
 194
 195	if (len == 0)
 196		return 0; /* variable-length string */
 197
 198	strncpy(buf, start, len);
 199	buf[len] = '\0';
 200
 201	err = kstrtouint(buf, 0, &size);
 202	if (err)
 203		return err;
 204
 205	if (size > STR_VAR_LEN_MAX)
 206		return -EINVAL;
 207
 208	return size;
 209}
 210
 211static int synth_field_size(char *type)
 212{
 213	int size = 0;
 214
 215	if (strcmp(type, "s64") == 0)
 216		size = sizeof(s64);
 217	else if (strcmp(type, "u64") == 0)
 218		size = sizeof(u64);
 219	else if (strcmp(type, "s32") == 0)
 220		size = sizeof(s32);
 221	else if (strcmp(type, "u32") == 0)
 222		size = sizeof(u32);
 223	else if (strcmp(type, "s16") == 0)
 224		size = sizeof(s16);
 225	else if (strcmp(type, "u16") == 0)
 226		size = sizeof(u16);
 227	else if (strcmp(type, "s8") == 0)
 228		size = sizeof(s8);
 229	else if (strcmp(type, "u8") == 0)
 230		size = sizeof(u8);
 231	else if (strcmp(type, "char") == 0)
 232		size = sizeof(char);
 233	else if (strcmp(type, "unsigned char") == 0)
 234		size = sizeof(unsigned char);
 235	else if (strcmp(type, "int") == 0)
 236		size = sizeof(int);
 237	else if (strcmp(type, "unsigned int") == 0)
 238		size = sizeof(unsigned int);
 239	else if (strcmp(type, "long") == 0)
 240		size = sizeof(long);
 241	else if (strcmp(type, "unsigned long") == 0)
 242		size = sizeof(unsigned long);
 243	else if (strcmp(type, "bool") == 0)
 244		size = sizeof(bool);
 245	else if (strcmp(type, "pid_t") == 0)
 246		size = sizeof(pid_t);
 247	else if (strcmp(type, "gfp_t") == 0)
 248		size = sizeof(gfp_t);
 249	else if (synth_field_is_string(type))
 250		size = synth_field_string_size(type);
 
 
 251
 252	return size;
 253}
 254
 255static const char *synth_field_fmt(char *type)
 256{
 257	const char *fmt = "%llu";
 258
 259	if (strcmp(type, "s64") == 0)
 260		fmt = "%lld";
 261	else if (strcmp(type, "u64") == 0)
 262		fmt = "%llu";
 263	else if (strcmp(type, "s32") == 0)
 264		fmt = "%d";
 265	else if (strcmp(type, "u32") == 0)
 266		fmt = "%u";
 267	else if (strcmp(type, "s16") == 0)
 268		fmt = "%d";
 269	else if (strcmp(type, "u16") == 0)
 270		fmt = "%u";
 271	else if (strcmp(type, "s8") == 0)
 272		fmt = "%d";
 273	else if (strcmp(type, "u8") == 0)
 274		fmt = "%u";
 275	else if (strcmp(type, "char") == 0)
 276		fmt = "%d";
 277	else if (strcmp(type, "unsigned char") == 0)
 278		fmt = "%u";
 279	else if (strcmp(type, "int") == 0)
 280		fmt = "%d";
 281	else if (strcmp(type, "unsigned int") == 0)
 282		fmt = "%u";
 283	else if (strcmp(type, "long") == 0)
 284		fmt = "%ld";
 285	else if (strcmp(type, "unsigned long") == 0)
 286		fmt = "%lu";
 287	else if (strcmp(type, "bool") == 0)
 288		fmt = "%d";
 289	else if (strcmp(type, "pid_t") == 0)
 290		fmt = "%d";
 291	else if (strcmp(type, "gfp_t") == 0)
 292		fmt = "%x";
 293	else if (synth_field_is_string(type))
 294		fmt = "%.*s";
 
 
 295
 296	return fmt;
 297}
 298
 299static void print_synth_event_num_val(struct trace_seq *s,
 300				      char *print_fmt, char *name,
 301				      int size, u64 val, char *space)
 302{
 303	switch (size) {
 304	case 1:
 305		trace_seq_printf(s, print_fmt, name, (u8)val, space);
 306		break;
 307
 308	case 2:
 309		trace_seq_printf(s, print_fmt, name, (u16)val, space);
 310		break;
 311
 312	case 4:
 313		trace_seq_printf(s, print_fmt, name, (u32)val, space);
 314		break;
 315
 316	default:
 317		trace_seq_printf(s, print_fmt, name, val, space);
 318		break;
 319	}
 320}
 321
 322static enum print_line_t print_synth_event(struct trace_iterator *iter,
 323					   int flags,
 324					   struct trace_event *event)
 325{
 326	struct trace_array *tr = iter->tr;
 327	struct trace_seq *s = &iter->seq;
 328	struct synth_trace_event *entry;
 329	struct synth_event *se;
 330	unsigned int i, n_u64;
 331	char print_fmt[32];
 332	const char *fmt;
 333
 334	entry = (struct synth_trace_event *)iter->ent;
 335	se = container_of(event, struct synth_event, call.event);
 336
 337	trace_seq_printf(s, "%s: ", se->name);
 338
 339	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
 340		if (trace_seq_has_overflowed(s))
 341			goto end;
 342
 343		fmt = synth_field_fmt(se->fields[i]->type);
 344
 345		/* parameter types */
 346		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
 347			trace_seq_printf(s, "%s ", fmt);
 348
 349		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
 350
 351		/* parameter values */
 352		if (se->fields[i]->is_string) {
 353			if (se->fields[i]->is_dynamic) {
 354				u32 offset, data_offset;
 355				char *str_field;
 356
 357				offset = (u32)entry->fields[n_u64];
 358				data_offset = offset & 0xffff;
 359
 360				str_field = (char *)entry + data_offset;
 361
 362				trace_seq_printf(s, print_fmt, se->fields[i]->name,
 363						 STR_VAR_LEN_MAX,
 364						 str_field,
 365						 i == se->n_fields - 1 ? "" : " ");
 366				n_u64++;
 367			} else {
 368				trace_seq_printf(s, print_fmt, se->fields[i]->name,
 369						 STR_VAR_LEN_MAX,
 370						 (char *)&entry->fields[n_u64],
 371						 i == se->n_fields - 1 ? "" : " ");
 372				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
 373			}
 
 
 
 
 
 
 
 
 374		} else {
 375			struct trace_print_flags __flags[] = {
 376			    __def_gfpflag_names, {-1, NULL} };
 377			char *space = (i == se->n_fields - 1 ? "" : " ");
 378
 379			print_synth_event_num_val(s, print_fmt,
 380						  se->fields[i]->name,
 381						  se->fields[i]->size,
 382						  entry->fields[n_u64],
 383						  space);
 384
 385			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
 386				trace_seq_puts(s, " (");
 387				trace_print_flags_seq(s, "|",
 388						      entry->fields[n_u64],
 389						      __flags);
 390				trace_seq_putc(s, ')');
 391			}
 392			n_u64++;
 393		}
 394	}
 395end:
 396	trace_seq_putc(s, '\n');
 397
 398	return trace_handle_return(s);
 399}
 400
 401static struct trace_event_functions synth_event_funcs = {
 402	.trace		= print_synth_event
 403};
 404
 405static unsigned int trace_string(struct synth_trace_event *entry,
 406				 struct synth_event *event,
 407				 char *str_val,
 408				 bool is_dynamic,
 409				 unsigned int data_size,
 410				 unsigned int *n_u64)
 411{
 412	unsigned int len = 0;
 413	char *str_field;
 414	int ret;
 415
 416	if (is_dynamic) {
 417		u32 data_offset;
 418
 419		data_offset = offsetof(typeof(*entry), fields);
 420		data_offset += event->n_u64 * sizeof(u64);
 421		data_offset += data_size;
 422
 423		len = kern_fetch_store_strlen((unsigned long)str_val);
 424
 425		data_offset |= len << 16;
 426		*(u32 *)&entry->fields[*n_u64] = data_offset;
 427
 428		ret = kern_fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
 429
 430		(*n_u64)++;
 431	} else {
 432		str_field = (char *)&entry->fields[*n_u64];
 433
 434#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 435		if ((unsigned long)str_val < TASK_SIZE)
 436			ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
 437		else
 438#endif
 439			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
 440
 441		if (ret < 0)
 442			strcpy(str_field, FAULT_STRING);
 443
 444		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
 445	}
 446
 447	return len;
 448}
 449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450static notrace void trace_event_raw_event_synth(void *__data,
 451						u64 *var_ref_vals,
 452						unsigned int *var_ref_idx)
 453{
 454	unsigned int i, n_u64, val_idx, len, data_size = 0;
 455	struct trace_event_file *trace_file = __data;
 456	struct synth_trace_event *entry;
 457	struct trace_event_buffer fbuffer;
 458	struct trace_buffer *buffer;
 459	struct synth_event *event;
 460	int fields_size = 0;
 461
 462	event = trace_file->event_call->data;
 463
 464	if (trace_trigger_soft_disabled(trace_file))
 465		return;
 466
 467	fields_size = event->n_u64 * sizeof(u64);
 468
 469	for (i = 0; i < event->n_dynamic_fields; i++) {
 470		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
 471		char *str_val;
 472
 473		val_idx = var_ref_idx[field_pos];
 474		str_val = (char *)(long)var_ref_vals[val_idx];
 475
 476		len = kern_fetch_store_strlen((unsigned long)str_val);
 
 
 
 
 
 
 477
 478		fields_size += len;
 479	}
 480
 481	/*
 482	 * Avoid ring buffer recursion detection, as this event
 483	 * is being performed within another event.
 484	 */
 485	buffer = trace_file->tr->array_buffer.buffer;
 486	ring_buffer_nest_start(buffer);
 487
 488	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
 489					   sizeof(*entry) + fields_size);
 490	if (!entry)
 491		goto out;
 492
 493	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
 494		val_idx = var_ref_idx[i];
 495		if (event->fields[i]->is_string) {
 496			char *str_val = (char *)(long)var_ref_vals[val_idx];
 497
 498			len = trace_string(entry, event, str_val,
 499					   event->fields[i]->is_dynamic,
 500					   data_size, &n_u64);
 501			data_size += len; /* only dynamic string increments */
 
 
 
 
 
 
 502		} else {
 503			struct synth_field *field = event->fields[i];
 504			u64 val = var_ref_vals[val_idx];
 505
 506			switch (field->size) {
 507			case 1:
 508				*(u8 *)&entry->fields[n_u64] = (u8)val;
 509				break;
 510
 511			case 2:
 512				*(u16 *)&entry->fields[n_u64] = (u16)val;
 513				break;
 514
 515			case 4:
 516				*(u32 *)&entry->fields[n_u64] = (u32)val;
 517				break;
 518
 519			default:
 520				entry->fields[n_u64] = val;
 521				break;
 522			}
 523			n_u64++;
 524		}
 525	}
 526
 527	trace_event_buffer_commit(&fbuffer);
 528out:
 529	ring_buffer_nest_end(buffer);
 530}
 531
 532static void free_synth_event_print_fmt(struct trace_event_call *call)
 533{
 534	if (call) {
 535		kfree(call->print_fmt);
 536		call->print_fmt = NULL;
 537	}
 538}
 539
 540static int __set_synth_event_print_fmt(struct synth_event *event,
 541				       char *buf, int len)
 542{
 543	const char *fmt;
 544	int pos = 0;
 545	int i;
 546
 547	/* When len=0, we just calculate the needed length */
 548#define LEN_OR_ZERO (len ? len - pos : 0)
 549
 550	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
 551	for (i = 0; i < event->n_fields; i++) {
 552		fmt = synth_field_fmt(event->fields[i]->type);
 553		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
 554				event->fields[i]->name, fmt,
 555				i == event->n_fields - 1 ? "" : ", ");
 556	}
 557	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
 558
 559	for (i = 0; i < event->n_fields; i++) {
 560		if (event->fields[i]->is_string &&
 561		    event->fields[i]->is_dynamic)
 562			pos += snprintf(buf + pos, LEN_OR_ZERO,
 563				", __get_str(%s)", event->fields[i]->name);
 
 
 
 564		else
 565			pos += snprintf(buf + pos, LEN_OR_ZERO,
 566					", REC->%s", event->fields[i]->name);
 567	}
 568
 569#undef LEN_OR_ZERO
 570
 571	/* return the length of print_fmt */
 572	return pos;
 573}
 574
 575static int set_synth_event_print_fmt(struct trace_event_call *call)
 576{
 577	struct synth_event *event = call->data;
 578	char *print_fmt;
 579	int len;
 580
 581	/* First: called with 0 length to calculate the needed length */
 582	len = __set_synth_event_print_fmt(event, NULL, 0);
 583
 584	print_fmt = kmalloc(len + 1, GFP_KERNEL);
 585	if (!print_fmt)
 586		return -ENOMEM;
 587
 588	/* Second: actually write the @print_fmt */
 589	__set_synth_event_print_fmt(event, print_fmt, len + 1);
 590	call->print_fmt = print_fmt;
 591
 592	return 0;
 593}
 594
 595static void free_synth_field(struct synth_field *field)
 596{
 597	kfree(field->type);
 598	kfree(field->name);
 599	kfree(field);
 600}
 601
 602static int check_field_version(const char *prefix, const char *field_type,
 603			       const char *field_name)
 604{
 605	/*
 606	 * For backward compatibility, the old synthetic event command
 607	 * format did not require semicolons, and in order to not
 608	 * break user space, that old format must still work. If a new
 609	 * feature is added, then the format that uses the new feature
 610	 * will be required to have semicolons, as nothing that uses
 611	 * the old format would be using the new, yet to be created,
 612	 * feature. When a new feature is added, this will detect it,
 613	 * and return a number greater than 1, and require the format
 614	 * to use semicolons.
 615	 */
 616	return 1;
 617}
 618
 619static struct synth_field *parse_synth_field(int argc, char **argv,
 620					     int *consumed, int *field_version)
 621{
 622	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
 623	struct synth_field *field;
 624	int len, ret = -ENOMEM;
 625	struct seq_buf s;
 626	ssize_t size;
 627
 628	if (!strcmp(field_type, "unsigned")) {
 629		if (argc < 3) {
 630			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
 631			return ERR_PTR(-EINVAL);
 632		}
 633		prefix = "unsigned ";
 634		field_type = argv[1];
 635		field_name = argv[2];
 636		*consumed += 3;
 637	} else {
 638		field_name = argv[1];
 639		*consumed += 2;
 640	}
 641
 642	if (!field_name) {
 643		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
 644		return ERR_PTR(-EINVAL);
 645	}
 646
 647	*field_version = check_field_version(prefix, field_type, field_name);
 648
 649	field = kzalloc(sizeof(*field), GFP_KERNEL);
 650	if (!field)
 651		return ERR_PTR(-ENOMEM);
 652
 653	len = strlen(field_name);
 654	array = strchr(field_name, '[');
 655	if (array)
 656		len -= strlen(array);
 657
 658	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
 659	if (!field->name)
 660		goto free;
 661
 662	if (!is_good_name(field->name)) {
 663		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
 664		ret = -EINVAL;
 665		goto free;
 666	}
 667
 668	len = strlen(field_type) + 1;
 669
 670	if (array)
 671		len += strlen(array);
 672
 673	if (prefix)
 674		len += strlen(prefix);
 675
 676	field->type = kzalloc(len, GFP_KERNEL);
 677	if (!field->type)
 678		goto free;
 679
 680	seq_buf_init(&s, field->type, len);
 681	if (prefix)
 682		seq_buf_puts(&s, prefix);
 683	seq_buf_puts(&s, field_type);
 684	if (array)
 685		seq_buf_puts(&s, array);
 686	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
 687		goto free;
 688
 689	s.buffer[s.len] = '\0';
 690
 691	size = synth_field_size(field->type);
 692	if (size < 0) {
 693		if (array)
 694			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
 695		else
 696			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
 697		ret = -EINVAL;
 698		goto free;
 699	} else if (size == 0) {
 700		if (synth_field_is_string(field->type)) {
 
 701			char *type;
 702
 703			len = sizeof("__data_loc ") + strlen(field->type) + 1;
 704			type = kzalloc(len, GFP_KERNEL);
 705			if (!type)
 706				goto free;
 707
 708			seq_buf_init(&s, type, len);
 709			seq_buf_puts(&s, "__data_loc ");
 710			seq_buf_puts(&s, field->type);
 711
 712			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
 713				goto free;
 714			s.buffer[s.len] = '\0';
 715
 716			kfree(field->type);
 717			field->type = type;
 718
 719			field->is_dynamic = true;
 720			size = sizeof(u64);
 721		} else {
 722			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
 723			ret = -EINVAL;
 724			goto free;
 725		}
 726	}
 727	field->size = size;
 728
 729	if (synth_field_is_string(field->type))
 730		field->is_string = true;
 
 
 731
 732	field->is_signed = synth_field_signed(field->type);
 733 out:
 734	return field;
 735 free:
 736	free_synth_field(field);
 737	field = ERR_PTR(ret);
 738	goto out;
 739}
 740
 741static void free_synth_tracepoint(struct tracepoint *tp)
 742{
 743	if (!tp)
 744		return;
 745
 746	kfree(tp->name);
 747	kfree(tp);
 748}
 749
 750static struct tracepoint *alloc_synth_tracepoint(char *name)
 751{
 752	struct tracepoint *tp;
 753
 754	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
 755	if (!tp)
 756		return ERR_PTR(-ENOMEM);
 757
 758	tp->name = kstrdup(name, GFP_KERNEL);
 759	if (!tp->name) {
 760		kfree(tp);
 761		return ERR_PTR(-ENOMEM);
 762	}
 763
 764	return tp;
 765}
 766
 767struct synth_event *find_synth_event(const char *name)
 768{
 769	struct dyn_event *pos;
 770	struct synth_event *event;
 771
 772	for_each_dyn_event(pos) {
 773		if (!is_synth_event(pos))
 774			continue;
 775		event = to_synth_event(pos);
 776		if (strcmp(event->name, name) == 0)
 777			return event;
 778	}
 779
 780	return NULL;
 781}
 782
 783static struct trace_event_fields synth_event_fields_array[] = {
 784	{ .type = TRACE_FUNCTION_TYPE,
 785	  .define_fields = synth_event_define_fields },
 786	{}
 787};
 788
 789static int register_synth_event(struct synth_event *event)
 790{
 791	struct trace_event_call *call = &event->call;
 792	int ret = 0;
 793
 794	event->call.class = &event->class;
 795	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
 796	if (!event->class.system) {
 797		ret = -ENOMEM;
 798		goto out;
 799	}
 800
 801	event->tp = alloc_synth_tracepoint(event->name);
 802	if (IS_ERR(event->tp)) {
 803		ret = PTR_ERR(event->tp);
 804		event->tp = NULL;
 805		goto out;
 806	}
 807
 808	INIT_LIST_HEAD(&call->class->fields);
 809	call->event.funcs = &synth_event_funcs;
 810	call->class->fields_array = synth_event_fields_array;
 811
 812	ret = register_trace_event(&call->event);
 813	if (!ret) {
 814		ret = -ENODEV;
 815		goto out;
 816	}
 817	call->flags = TRACE_EVENT_FL_TRACEPOINT;
 818	call->class->reg = trace_event_reg;
 819	call->class->probe = trace_event_raw_event_synth;
 820	call->data = event;
 821	call->tp = event->tp;
 822
 823	ret = trace_add_event_call(call);
 824	if (ret) {
 825		pr_warn("Failed to register synthetic event: %s\n",
 826			trace_event_name(call));
 827		goto err;
 828	}
 829
 830	ret = set_synth_event_print_fmt(call);
 831	/* unregister_trace_event() will be called inside */
 832	if (ret < 0)
 833		trace_remove_event_call(call);
 834 out:
 835	return ret;
 836 err:
 837	unregister_trace_event(&call->event);
 838	goto out;
 839}
 840
 841static int unregister_synth_event(struct synth_event *event)
 842{
 843	struct trace_event_call *call = &event->call;
 844	int ret;
 845
 846	ret = trace_remove_event_call(call);
 847
 848	return ret;
 849}
 850
 851static void free_synth_event(struct synth_event *event)
 852{
 853	unsigned int i;
 854
 855	if (!event)
 856		return;
 857
 858	for (i = 0; i < event->n_fields; i++)
 859		free_synth_field(event->fields[i]);
 860
 861	kfree(event->fields);
 862	kfree(event->dynamic_fields);
 863	kfree(event->name);
 864	kfree(event->class.system);
 865	free_synth_tracepoint(event->tp);
 866	free_synth_event_print_fmt(&event->call);
 867	kfree(event);
 868}
 869
 870static struct synth_event *alloc_synth_event(const char *name, int n_fields,
 871					     struct synth_field **fields)
 872{
 873	unsigned int i, j, n_dynamic_fields = 0;
 874	struct synth_event *event;
 875
 876	event = kzalloc(sizeof(*event), GFP_KERNEL);
 877	if (!event) {
 878		event = ERR_PTR(-ENOMEM);
 879		goto out;
 880	}
 881
 882	event->name = kstrdup(name, GFP_KERNEL);
 883	if (!event->name) {
 884		kfree(event);
 885		event = ERR_PTR(-ENOMEM);
 886		goto out;
 887	}
 888
 889	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
 890	if (!event->fields) {
 891		free_synth_event(event);
 892		event = ERR_PTR(-ENOMEM);
 893		goto out;
 894	}
 895
 896	for (i = 0; i < n_fields; i++)
 897		if (fields[i]->is_dynamic)
 898			n_dynamic_fields++;
 899
 900	if (n_dynamic_fields) {
 901		event->dynamic_fields = kcalloc(n_dynamic_fields,
 902						sizeof(*event->dynamic_fields),
 903						GFP_KERNEL);
 904		if (!event->dynamic_fields) {
 905			free_synth_event(event);
 906			event = ERR_PTR(-ENOMEM);
 907			goto out;
 908		}
 909	}
 910
 911	dyn_event_init(&event->devent, &synth_event_ops);
 912
 913	for (i = 0, j = 0; i < n_fields; i++) {
 914		fields[i]->field_pos = i;
 915		event->fields[i] = fields[i];
 916
 917		if (fields[i]->is_dynamic)
 918			event->dynamic_fields[j++] = fields[i];
 919	}
 920	event->n_dynamic_fields = j;
 921	event->n_fields = n_fields;
 922 out:
 923	return event;
 924}
 925
 926static int synth_event_check_arg_fn(void *data)
 927{
 928	struct dynevent_arg_pair *arg_pair = data;
 929	int size;
 930
 931	size = synth_field_size((char *)arg_pair->lhs);
 932	if (size == 0) {
 933		if (strstr((char *)arg_pair->lhs, "["))
 934			return 0;
 935	}
 936
 937	return size ? 0 : -EINVAL;
 938}
 939
 940/**
 941 * synth_event_add_field - Add a new field to a synthetic event cmd
 942 * @cmd: A pointer to the dynevent_cmd struct representing the new event
 943 * @type: The type of the new field to add
 944 * @name: The name of the new field to add
 945 *
 946 * Add a new field to a synthetic event cmd object.  Field ordering is in
 947 * the same order the fields are added.
 948 *
 949 * See synth_field_size() for available types. If field_name contains
 950 * [n] the field is considered to be an array.
 951 *
 952 * Return: 0 if successful, error otherwise.
 953 */
 954int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
 955			  const char *name)
 956{
 957	struct dynevent_arg_pair arg_pair;
 958	int ret;
 959
 960	if (cmd->type != DYNEVENT_TYPE_SYNTH)
 961		return -EINVAL;
 962
 963	if (!type || !name)
 964		return -EINVAL;
 965
 966	dynevent_arg_pair_init(&arg_pair, 0, ';');
 967
 968	arg_pair.lhs = type;
 969	arg_pair.rhs = name;
 970
 971	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
 972	if (ret)
 973		return ret;
 974
 975	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
 976		ret = -EINVAL;
 977
 978	return ret;
 979}
 980EXPORT_SYMBOL_GPL(synth_event_add_field);
 981
 982/**
 983 * synth_event_add_field_str - Add a new field to a synthetic event cmd
 984 * @cmd: A pointer to the dynevent_cmd struct representing the new event
 985 * @type_name: The type and name of the new field to add, as a single string
 986 *
 987 * Add a new field to a synthetic event cmd object, as a single
 988 * string.  The @type_name string is expected to be of the form 'type
 989 * name', which will be appended by ';'.  No sanity checking is done -
 990 * what's passed in is assumed to already be well-formed.  Field
 991 * ordering is in the same order the fields are added.
 992 *
 993 * See synth_field_size() for available types. If field_name contains
 994 * [n] the field is considered to be an array.
 995 *
 996 * Return: 0 if successful, error otherwise.
 997 */
 998int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
 999{
1000	struct dynevent_arg arg;
1001	int ret;
1002
1003	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1004		return -EINVAL;
1005
1006	if (!type_name)
1007		return -EINVAL;
1008
1009	dynevent_arg_init(&arg, ';');
1010
1011	arg.str = type_name;
1012
1013	ret = dynevent_arg_add(cmd, &arg, NULL);
1014	if (ret)
1015		return ret;
1016
1017	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1018		ret = -EINVAL;
1019
1020	return ret;
1021}
1022EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1023
1024/**
1025 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1026 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1027 * @fields: An array of type/name field descriptions
1028 * @n_fields: The number of field descriptions contained in the fields array
1029 *
1030 * Add a new set of fields to a synthetic event cmd object.  The event
1031 * fields that will be defined for the event should be passed in as an
1032 * array of struct synth_field_desc, and the number of elements in the
1033 * array passed in as n_fields.  Field ordering will retain the
1034 * ordering given in the fields array.
1035 *
1036 * See synth_field_size() for available types. If field_name contains
1037 * [n] the field is considered to be an array.
1038 *
1039 * Return: 0 if successful, error otherwise.
1040 */
1041int synth_event_add_fields(struct dynevent_cmd *cmd,
1042			   struct synth_field_desc *fields,
1043			   unsigned int n_fields)
1044{
1045	unsigned int i;
1046	int ret = 0;
1047
1048	for (i = 0; i < n_fields; i++) {
1049		if (fields[i].type == NULL || fields[i].name == NULL) {
1050			ret = -EINVAL;
1051			break;
1052		}
1053
1054		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1055		if (ret)
1056			break;
1057	}
1058
1059	return ret;
1060}
1061EXPORT_SYMBOL_GPL(synth_event_add_fields);
1062
1063/**
1064 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1065 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1066 * @name: The name of the synthetic event
1067 * @mod: The module creating the event, NULL if not created from a module
1068 * @args: Variable number of arg (pairs), one pair for each field
1069 *
1070 * NOTE: Users normally won't want to call this function directly, but
1071 * rather use the synth_event_gen_cmd_start() wrapper, which
1072 * automatically adds a NULL to the end of the arg list.  If this
1073 * function is used directly, make sure the last arg in the variable
1074 * arg list is NULL.
1075 *
1076 * Generate a synthetic event command to be executed by
1077 * synth_event_gen_cmd_end().  This function can be used to generate
1078 * the complete command or only the first part of it; in the latter
1079 * case, synth_event_add_field(), synth_event_add_field_str(), or
1080 * synth_event_add_fields() can be used to add more fields following
1081 * this.
1082 *
1083 * There should be an even number variable args, each pair consisting
1084 * of a type followed by a field name.
1085 *
1086 * See synth_field_size() for available types. If field_name contains
1087 * [n] the field is considered to be an array.
1088 *
1089 * Return: 0 if successful, error otherwise.
1090 */
1091int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1092				struct module *mod, ...)
1093{
1094	struct dynevent_arg arg;
1095	va_list args;
1096	int ret;
1097
1098	cmd->event_name = name;
1099	cmd->private_data = mod;
1100
1101	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1102		return -EINVAL;
1103
1104	dynevent_arg_init(&arg, 0);
1105	arg.str = name;
1106	ret = dynevent_arg_add(cmd, &arg, NULL);
1107	if (ret)
1108		return ret;
1109
1110	va_start(args, mod);
1111	for (;;) {
1112		const char *type, *name;
1113
1114		type = va_arg(args, const char *);
1115		if (!type)
1116			break;
1117		name = va_arg(args, const char *);
1118		if (!name)
1119			break;
1120
1121		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1122			ret = -EINVAL;
1123			break;
1124		}
1125
1126		ret = synth_event_add_field(cmd, type, name);
1127		if (ret)
1128			break;
1129	}
1130	va_end(args);
1131
1132	return ret;
1133}
1134EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1135
1136/**
1137 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1138 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1139 * @name: The name of the synthetic event
 
1140 * @fields: An array of type/name field descriptions
1141 * @n_fields: The number of field descriptions contained in the fields array
1142 *
1143 * Generate a synthetic event command to be executed by
1144 * synth_event_gen_cmd_end().  This function can be used to generate
1145 * the complete command or only the first part of it; in the latter
1146 * case, synth_event_add_field(), synth_event_add_field_str(), or
1147 * synth_event_add_fields() can be used to add more fields following
1148 * this.
1149 *
1150 * The event fields that will be defined for the event should be
1151 * passed in as an array of struct synth_field_desc, and the number of
1152 * elements in the array passed in as n_fields.  Field ordering will
1153 * retain the ordering given in the fields array.
1154 *
1155 * See synth_field_size() for available types. If field_name contains
1156 * [n] the field is considered to be an array.
1157 *
1158 * Return: 0 if successful, error otherwise.
1159 */
1160int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1161				    struct module *mod,
1162				    struct synth_field_desc *fields,
1163				    unsigned int n_fields)
1164{
1165	struct dynevent_arg arg;
1166	unsigned int i;
1167	int ret = 0;
1168
1169	cmd->event_name = name;
1170	cmd->private_data = mod;
1171
1172	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1173		return -EINVAL;
1174
1175	if (n_fields > SYNTH_FIELDS_MAX)
1176		return -EINVAL;
1177
1178	dynevent_arg_init(&arg, 0);
1179	arg.str = name;
1180	ret = dynevent_arg_add(cmd, &arg, NULL);
1181	if (ret)
1182		return ret;
1183
1184	for (i = 0; i < n_fields; i++) {
1185		if (fields[i].type == NULL || fields[i].name == NULL)
1186			return -EINVAL;
1187
1188		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1189		if (ret)
1190			break;
1191	}
1192
1193	return ret;
1194}
1195EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1196
1197static int __create_synth_event(const char *name, const char *raw_fields)
1198{
1199	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1200	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1201	int consumed, cmd_version = 1, n_fields_this_loop;
1202	int i, argc, n_fields = 0, ret = 0;
1203	struct synth_event *event = NULL;
1204
1205	/*
1206	 * Argument syntax:
1207	 *  - Add synthetic event: <event_name> field[;field] ...
1208	 *  - Remove synthetic event: !<event_name> field[;field] ...
1209	 *      where 'field' = type field_name
1210	 */
1211
1212	if (name[0] == '\0') {
1213		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1214		return -EINVAL;
1215	}
1216
1217	if (!is_good_name(name)) {
1218		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1219		return -EINVAL;
1220	}
1221
1222	mutex_lock(&event_mutex);
1223
1224	event = find_synth_event(name);
1225	if (event) {
1226		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1227		ret = -EEXIST;
1228		goto err;
1229	}
1230
1231	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1232	if (!tmp_fields) {
1233		ret = -ENOMEM;
1234		goto err;
1235	}
1236
1237	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1238		argv = argv_split(GFP_KERNEL, field_str, &argc);
1239		if (!argv) {
1240			ret = -ENOMEM;
1241			goto err;
1242		}
1243
1244		if (!argc) {
1245			argv_free(argv);
1246			continue;
1247		}
1248
1249		n_fields_this_loop = 0;
1250		consumed = 0;
1251		while (argc > consumed) {
1252			int field_version;
1253
1254			field = parse_synth_field(argc - consumed,
1255						  argv + consumed, &consumed,
1256						  &field_version);
1257			if (IS_ERR(field)) {
1258				ret = PTR_ERR(field);
1259				goto err_free_arg;
1260			}
1261
1262			/*
1263			 * Track the highest version of any field we
1264			 * found in the command.
1265			 */
1266			if (field_version > cmd_version)
1267				cmd_version = field_version;
1268
1269			/*
1270			 * Now sort out what is and isn't valid for
1271			 * each supported version.
1272			 *
1273			 * If we see more than 1 field per loop, it
1274			 * means we have multiple fields between
1275			 * semicolons, and that's something we no
1276			 * longer support in a version 2 or greater
1277			 * command.
1278			 */
1279			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1280				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1281				ret = -EINVAL;
1282				goto err_free_arg;
1283			}
1284
1285			if (n_fields == SYNTH_FIELDS_MAX) {
1286				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1287				ret = -EINVAL;
1288				goto err_free_arg;
1289			}
1290			fields[n_fields++] = field;
1291
1292			n_fields_this_loop++;
1293		}
1294		argv_free(argv);
1295
1296		if (consumed < argc) {
1297			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1298			ret = -EINVAL;
1299			goto err;
1300		}
1301
1302	}
1303
1304	if (n_fields == 0) {
1305		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1306		ret = -EINVAL;
1307		goto err;
1308	}
1309
1310	event = alloc_synth_event(name, n_fields, fields);
1311	if (IS_ERR(event)) {
1312		ret = PTR_ERR(event);
1313		event = NULL;
1314		goto err;
1315	}
1316	ret = register_synth_event(event);
1317	if (!ret)
1318		dyn_event_add(&event->devent, &event->call);
1319	else
1320		free_synth_event(event);
1321 out:
1322	mutex_unlock(&event_mutex);
1323
1324	kfree(saved_fields);
1325
1326	return ret;
1327 err_free_arg:
1328	argv_free(argv);
1329 err:
1330	for (i = 0; i < n_fields; i++)
1331		free_synth_field(fields[i]);
1332
1333	goto out;
1334}
1335
1336/**
1337 * synth_event_create - Create a new synthetic event
1338 * @name: The name of the new synthetic event
1339 * @fields: An array of type/name field descriptions
1340 * @n_fields: The number of field descriptions contained in the fields array
1341 * @mod: The module creating the event, NULL if not created from a module
1342 *
1343 * Create a new synthetic event with the given name under the
1344 * trace/events/synthetic/ directory.  The event fields that will be
1345 * defined for the event should be passed in as an array of struct
1346 * synth_field_desc, and the number elements in the array passed in as
1347 * n_fields. Field ordering will retain the ordering given in the
1348 * fields array.
1349 *
1350 * If the new synthetic event is being created from a module, the mod
1351 * param must be non-NULL.  This will ensure that the trace buffer
1352 * won't contain unreadable events.
1353 *
1354 * The new synth event should be deleted using synth_event_delete()
1355 * function.  The new synthetic event can be generated from modules or
1356 * other kernel code using trace_synth_event() and related functions.
1357 *
1358 * Return: 0 if successful, error otherwise.
1359 */
1360int synth_event_create(const char *name, struct synth_field_desc *fields,
1361		       unsigned int n_fields, struct module *mod)
1362{
1363	struct dynevent_cmd cmd;
1364	char *buf;
1365	int ret;
1366
1367	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1368	if (!buf)
1369		return -ENOMEM;
1370
1371	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1372
1373	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1374					      fields, n_fields);
1375	if (ret)
1376		goto out;
1377
1378	ret = synth_event_gen_cmd_end(&cmd);
1379 out:
1380	kfree(buf);
1381
1382	return ret;
1383}
1384EXPORT_SYMBOL_GPL(synth_event_create);
1385
1386static int destroy_synth_event(struct synth_event *se)
1387{
1388	int ret;
1389
1390	if (se->ref)
1391		return -EBUSY;
1392
1393	if (trace_event_dyn_busy(&se->call))
1394		return -EBUSY;
1395
1396	ret = unregister_synth_event(se);
1397	if (!ret) {
1398		dyn_event_remove(&se->devent);
1399		free_synth_event(se);
1400	}
1401
1402	return ret;
1403}
1404
1405/**
1406 * synth_event_delete - Delete a synthetic event
1407 * @event_name: The name of the new synthetic event
1408 *
1409 * Delete a synthetic event that was created with synth_event_create().
1410 *
1411 * Return: 0 if successful, error otherwise.
1412 */
1413int synth_event_delete(const char *event_name)
1414{
1415	struct synth_event *se = NULL;
1416	struct module *mod = NULL;
1417	int ret = -ENOENT;
1418
1419	mutex_lock(&event_mutex);
1420	se = find_synth_event(event_name);
1421	if (se) {
1422		mod = se->mod;
1423		ret = destroy_synth_event(se);
1424	}
1425	mutex_unlock(&event_mutex);
1426
1427	if (mod) {
1428		/*
1429		 * It is safest to reset the ring buffer if the module
1430		 * being unloaded registered any events that were
1431		 * used. The only worry is if a new module gets
1432		 * loaded, and takes on the same id as the events of
1433		 * this module. When printing out the buffer, traced
1434		 * events left over from this module may be passed to
1435		 * the new module events and unexpected results may
1436		 * occur.
1437		 */
1438		tracing_reset_all_online_cpus();
1439	}
1440
1441	return ret;
1442}
1443EXPORT_SYMBOL_GPL(synth_event_delete);
1444
1445static int check_command(const char *raw_command)
1446{
1447	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1448	int argc, ret = 0;
1449
1450	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1451	if (!cmd)
1452		return -ENOMEM;
1453
1454	name_and_field = strsep(&cmd, ";");
1455	if (!name_and_field) {
1456		ret = -EINVAL;
1457		goto free;
1458	}
1459
1460	if (name_and_field[0] == '!')
1461		goto free;
1462
1463	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1464	if (!argv) {
1465		ret = -ENOMEM;
1466		goto free;
1467	}
1468	argv_free(argv);
1469
1470	if (argc < 3)
1471		ret = -EINVAL;
1472free:
1473	kfree(saved_cmd);
1474
1475	return ret;
1476}
1477
1478static int create_or_delete_synth_event(const char *raw_command)
1479{
1480	char *name = NULL, *fields, *p;
1481	int ret = 0;
1482
1483	raw_command = skip_spaces(raw_command);
1484	if (raw_command[0] == '\0')
1485		return ret;
1486
1487	last_cmd_set(raw_command);
1488
1489	ret = check_command(raw_command);
1490	if (ret) {
1491		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1492		return ret;
1493	}
1494
1495	p = strpbrk(raw_command, " \t");
1496	if (!p && raw_command[0] != '!') {
1497		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1498		ret = -EINVAL;
1499		goto free;
1500	}
1501
1502	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1503	if (!name)
1504		return -ENOMEM;
1505
1506	if (name[0] == '!') {
1507		ret = synth_event_delete(name + 1);
1508		goto free;
1509	}
1510
1511	fields = skip_spaces(p);
1512
1513	ret = __create_synth_event(name, fields);
1514free:
1515	kfree(name);
1516
1517	return ret;
1518}
1519
1520static int synth_event_run_command(struct dynevent_cmd *cmd)
1521{
1522	struct synth_event *se;
1523	int ret;
1524
1525	ret = create_or_delete_synth_event(cmd->seq.buffer);
1526	if (ret)
1527		return ret;
1528
1529	se = find_synth_event(cmd->event_name);
1530	if (WARN_ON(!se))
1531		return -ENOENT;
1532
1533	se->mod = cmd->private_data;
1534
1535	return ret;
1536}
1537
1538/**
1539 * synth_event_cmd_init - Initialize a synthetic event command object
1540 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1541 * @buf: A pointer to the buffer used to build the command
1542 * @maxlen: The length of the buffer passed in @buf
1543 *
1544 * Initialize a synthetic event command object.  Use this before
1545 * calling any of the other dyenvent_cmd functions.
1546 */
1547void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1548{
1549	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1550			  synth_event_run_command);
1551}
1552EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1553
1554static inline int
1555__synth_event_trace_init(struct trace_event_file *file,
1556			 struct synth_event_trace_state *trace_state)
1557{
1558	int ret = 0;
1559
1560	memset(trace_state, '\0', sizeof(*trace_state));
1561
1562	/*
1563	 * Normal event tracing doesn't get called at all unless the
1564	 * ENABLED bit is set (which attaches the probe thus allowing
1565	 * this code to be called, etc).  Because this is called
1566	 * directly by the user, we don't have that but we still need
1567	 * to honor not logging when disabled.  For the iterated
1568	 * trace case, we save the enabled state upon start and just
1569	 * ignore the following data calls.
1570	 */
1571	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1572	    trace_trigger_soft_disabled(file)) {
1573		trace_state->disabled = true;
1574		ret = -ENOENT;
1575		goto out;
1576	}
1577
1578	trace_state->event = file->event_call->data;
1579out:
1580	return ret;
1581}
1582
1583static inline int
1584__synth_event_trace_start(struct trace_event_file *file,
1585			  struct synth_event_trace_state *trace_state,
1586			  int dynamic_fields_size)
1587{
1588	int entry_size, fields_size = 0;
1589	int ret = 0;
1590
1591	fields_size = trace_state->event->n_u64 * sizeof(u64);
1592	fields_size += dynamic_fields_size;
1593
1594	/*
1595	 * Avoid ring buffer recursion detection, as this event
1596	 * is being performed within another event.
1597	 */
1598	trace_state->buffer = file->tr->array_buffer.buffer;
1599	ring_buffer_nest_start(trace_state->buffer);
1600
1601	entry_size = sizeof(*trace_state->entry) + fields_size;
1602	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1603							file,
1604							entry_size);
1605	if (!trace_state->entry) {
1606		ring_buffer_nest_end(trace_state->buffer);
1607		ret = -EINVAL;
1608	}
1609
1610	return ret;
1611}
1612
1613static inline void
1614__synth_event_trace_end(struct synth_event_trace_state *trace_state)
1615{
1616	trace_event_buffer_commit(&trace_state->fbuffer);
1617
1618	ring_buffer_nest_end(trace_state->buffer);
1619}
1620
1621/**
1622 * synth_event_trace - Trace a synthetic event
1623 * @file: The trace_event_file representing the synthetic event
1624 * @n_vals: The number of values in vals
1625 * @args: Variable number of args containing the event values
1626 *
1627 * Trace a synthetic event using the values passed in the variable
1628 * argument list.
1629 *
1630 * The argument list should be a list 'n_vals' u64 values.  The number
1631 * of vals must match the number of field in the synthetic event, and
1632 * must be in the same order as the synthetic event fields.
1633 *
1634 * All vals should be cast to u64, and string vals are just pointers
1635 * to strings, cast to u64.  Strings will be copied into space
1636 * reserved in the event for the string, using these pointers.
1637 *
1638 * Return: 0 on success, err otherwise.
1639 */
1640int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1641{
1642	unsigned int i, n_u64, len, data_size = 0;
1643	struct synth_event_trace_state state;
1644	va_list args;
1645	int ret;
1646
1647	ret = __synth_event_trace_init(file, &state);
1648	if (ret) {
1649		if (ret == -ENOENT)
1650			ret = 0; /* just disabled, not really an error */
1651		return ret;
1652	}
1653
1654	if (state.event->n_dynamic_fields) {
1655		va_start(args, n_vals);
1656
1657		for (i = 0; i < state.event->n_fields; i++) {
1658			u64 val = va_arg(args, u64);
1659
1660			if (state.event->fields[i]->is_string &&
1661			    state.event->fields[i]->is_dynamic) {
1662				char *str_val = (char *)(long)val;
1663
1664				data_size += strlen(str_val) + 1;
1665			}
1666		}
1667
1668		va_end(args);
1669	}
1670
1671	ret = __synth_event_trace_start(file, &state, data_size);
1672	if (ret)
1673		return ret;
1674
1675	if (n_vals != state.event->n_fields) {
1676		ret = -EINVAL;
1677		goto out;
1678	}
1679
1680	data_size = 0;
1681
1682	va_start(args, n_vals);
1683	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1684		u64 val;
1685
1686		val = va_arg(args, u64);
1687
1688		if (state.event->fields[i]->is_string) {
1689			char *str_val = (char *)(long)val;
1690
1691			len = trace_string(state.entry, state.event, str_val,
1692					   state.event->fields[i]->is_dynamic,
1693					   data_size, &n_u64);
1694			data_size += len; /* only dynamic string increments */
1695		} else {
1696			struct synth_field *field = state.event->fields[i];
1697
1698			switch (field->size) {
1699			case 1:
1700				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1701				break;
1702
1703			case 2:
1704				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1705				break;
1706
1707			case 4:
1708				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1709				break;
1710
1711			default:
1712				state.entry->fields[n_u64] = val;
1713				break;
1714			}
1715			n_u64++;
1716		}
1717	}
1718	va_end(args);
1719out:
1720	__synth_event_trace_end(&state);
1721
1722	return ret;
1723}
1724EXPORT_SYMBOL_GPL(synth_event_trace);
1725
1726/**
1727 * synth_event_trace_array - Trace a synthetic event from an array
1728 * @file: The trace_event_file representing the synthetic event
1729 * @vals: Array of values
1730 * @n_vals: The number of values in vals
1731 *
1732 * Trace a synthetic event using the values passed in as 'vals'.
1733 *
1734 * The 'vals' array is just an array of 'n_vals' u64.  The number of
1735 * vals must match the number of field in the synthetic event, and
1736 * must be in the same order as the synthetic event fields.
1737 *
1738 * All vals should be cast to u64, and string vals are just pointers
1739 * to strings, cast to u64.  Strings will be copied into space
1740 * reserved in the event for the string, using these pointers.
1741 *
1742 * Return: 0 on success, err otherwise.
1743 */
1744int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1745			    unsigned int n_vals)
1746{
1747	unsigned int i, n_u64, field_pos, len, data_size = 0;
1748	struct synth_event_trace_state state;
1749	char *str_val;
1750	int ret;
1751
1752	ret = __synth_event_trace_init(file, &state);
1753	if (ret) {
1754		if (ret == -ENOENT)
1755			ret = 0; /* just disabled, not really an error */
1756		return ret;
1757	}
1758
1759	if (state.event->n_dynamic_fields) {
1760		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1761			field_pos = state.event->dynamic_fields[i]->field_pos;
1762			str_val = (char *)(long)vals[field_pos];
1763			len = strlen(str_val) + 1;
1764			data_size += len;
1765		}
1766	}
1767
1768	ret = __synth_event_trace_start(file, &state, data_size);
1769	if (ret)
1770		return ret;
1771
1772	if (n_vals != state.event->n_fields) {
1773		ret = -EINVAL;
1774		goto out;
1775	}
1776
1777	data_size = 0;
1778
1779	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1780		if (state.event->fields[i]->is_string) {
1781			char *str_val = (char *)(long)vals[i];
1782
1783			len = trace_string(state.entry, state.event, str_val,
1784					   state.event->fields[i]->is_dynamic,
1785					   data_size, &n_u64);
1786			data_size += len; /* only dynamic string increments */
1787		} else {
1788			struct synth_field *field = state.event->fields[i];
1789			u64 val = vals[i];
1790
1791			switch (field->size) {
1792			case 1:
1793				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1794				break;
1795
1796			case 2:
1797				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1798				break;
1799
1800			case 4:
1801				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1802				break;
1803
1804			default:
1805				state.entry->fields[n_u64] = val;
1806				break;
1807			}
1808			n_u64++;
1809		}
1810	}
1811out:
1812	__synth_event_trace_end(&state);
1813
1814	return ret;
1815}
1816EXPORT_SYMBOL_GPL(synth_event_trace_array);
1817
1818/**
1819 * synth_event_trace_start - Start piecewise synthetic event trace
1820 * @file: The trace_event_file representing the synthetic event
1821 * @trace_state: A pointer to object tracking the piecewise trace state
1822 *
1823 * Start the trace of a synthetic event field-by-field rather than all
1824 * at once.
1825 *
1826 * This function 'opens' an event trace, which means space is reserved
1827 * for the event in the trace buffer, after which the event's
1828 * individual field values can be set through either
1829 * synth_event_add_next_val() or synth_event_add_val().
1830 *
1831 * A pointer to a trace_state object is passed in, which will keep
1832 * track of the current event trace state until the event trace is
1833 * closed (and the event finally traced) using
1834 * synth_event_trace_end().
1835 *
1836 * Note that synth_event_trace_end() must be called after all values
1837 * have been added for each event trace, regardless of whether adding
1838 * all field values succeeded or not.
1839 *
1840 * Note also that for a given event trace, all fields must be added
1841 * using either synth_event_add_next_val() or synth_event_add_val()
1842 * but not both together or interleaved.
1843 *
1844 * Return: 0 on success, err otherwise.
1845 */
1846int synth_event_trace_start(struct trace_event_file *file,
1847			    struct synth_event_trace_state *trace_state)
1848{
1849	int ret;
1850
1851	if (!trace_state)
1852		return -EINVAL;
1853
1854	ret = __synth_event_trace_init(file, trace_state);
1855	if (ret) {
1856		if (ret == -ENOENT)
1857			ret = 0; /* just disabled, not really an error */
1858		return ret;
1859	}
1860
1861	if (trace_state->event->n_dynamic_fields)
1862		return -ENOTSUPP;
1863
1864	ret = __synth_event_trace_start(file, trace_state, 0);
1865
1866	return ret;
1867}
1868EXPORT_SYMBOL_GPL(synth_event_trace_start);
1869
1870static int __synth_event_add_val(const char *field_name, u64 val,
1871				 struct synth_event_trace_state *trace_state)
1872{
1873	struct synth_field *field = NULL;
1874	struct synth_trace_event *entry;
1875	struct synth_event *event;
1876	int i, ret = 0;
1877
1878	if (!trace_state) {
1879		ret = -EINVAL;
1880		goto out;
1881	}
1882
1883	/* can't mix add_next_synth_val() with add_synth_val() */
1884	if (field_name) {
1885		if (trace_state->add_next) {
1886			ret = -EINVAL;
1887			goto out;
1888		}
1889		trace_state->add_name = true;
1890	} else {
1891		if (trace_state->add_name) {
1892			ret = -EINVAL;
1893			goto out;
1894		}
1895		trace_state->add_next = true;
1896	}
1897
1898	if (trace_state->disabled)
1899		goto out;
1900
1901	event = trace_state->event;
1902	if (trace_state->add_name) {
1903		for (i = 0; i < event->n_fields; i++) {
1904			field = event->fields[i];
1905			if (strcmp(field->name, field_name) == 0)
1906				break;
1907		}
1908		if (!field) {
1909			ret = -EINVAL;
1910			goto out;
1911		}
1912	} else {
1913		if (trace_state->cur_field >= event->n_fields) {
1914			ret = -EINVAL;
1915			goto out;
1916		}
1917		field = event->fields[trace_state->cur_field++];
1918	}
1919
1920	entry = trace_state->entry;
1921	if (field->is_string) {
1922		char *str_val = (char *)(long)val;
1923		char *str_field;
1924
1925		if (field->is_dynamic) { /* add_val can't do dynamic strings */
1926			ret = -EINVAL;
1927			goto out;
1928		}
1929
1930		if (!str_val) {
1931			ret = -EINVAL;
1932			goto out;
1933		}
1934
1935		str_field = (char *)&entry->fields[field->offset];
1936		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
1937	} else {
1938		switch (field->size) {
1939		case 1:
1940			*(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
1941			break;
1942
1943		case 2:
1944			*(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
1945			break;
1946
1947		case 4:
1948			*(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
1949			break;
1950
1951		default:
1952			trace_state->entry->fields[field->offset] = val;
1953			break;
1954		}
1955	}
1956 out:
1957	return ret;
1958}
1959
1960/**
1961 * synth_event_add_next_val - Add the next field's value to an open synth trace
1962 * @val: The value to set the next field to
1963 * @trace_state: A pointer to object tracking the piecewise trace state
1964 *
1965 * Set the value of the next field in an event that's been opened by
1966 * synth_event_trace_start().
1967 *
1968 * The val param should be the value cast to u64.  If the value points
1969 * to a string, the val param should be a char * cast to u64.
1970 *
1971 * This function assumes all the fields in an event are to be set one
1972 * after another - successive calls to this function are made, one for
1973 * each field, in the order of the fields in the event, until all
1974 * fields have been set.  If you'd rather set each field individually
1975 * without regard to ordering, synth_event_add_val() can be used
1976 * instead.
1977 *
1978 * Note however that synth_event_add_next_val() and
1979 * synth_event_add_val() can't be intermixed for a given event trace -
1980 * one or the other but not both can be used at the same time.
1981 *
1982 * Note also that synth_event_trace_end() must be called after all
1983 * values have been added for each event trace, regardless of whether
1984 * adding all field values succeeded or not.
1985 *
1986 * Return: 0 on success, err otherwise.
1987 */
1988int synth_event_add_next_val(u64 val,
1989			     struct synth_event_trace_state *trace_state)
1990{
1991	return __synth_event_add_val(NULL, val, trace_state);
1992}
1993EXPORT_SYMBOL_GPL(synth_event_add_next_val);
1994
1995/**
1996 * synth_event_add_val - Add a named field's value to an open synth trace
1997 * @field_name: The name of the synthetic event field value to set
1998 * @val: The value to set the named field to
1999 * @trace_state: A pointer to object tracking the piecewise trace state
2000 *
2001 * Set the value of the named field in an event that's been opened by
2002 * synth_event_trace_start().
2003 *
2004 * The val param should be the value cast to u64.  If the value points
2005 * to a string, the val param should be a char * cast to u64.
2006 *
2007 * This function looks up the field name, and if found, sets the field
2008 * to the specified value.  This lookup makes this function more
2009 * expensive than synth_event_add_next_val(), so use that or the
2010 * none-piecewise synth_event_trace() instead if efficiency is more
2011 * important.
2012 *
2013 * Note however that synth_event_add_next_val() and
2014 * synth_event_add_val() can't be intermixed for a given event trace -
2015 * one or the other but not both can be used at the same time.
2016 *
2017 * Note also that synth_event_trace_end() must be called after all
2018 * values have been added for each event trace, regardless of whether
2019 * adding all field values succeeded or not.
2020 *
2021 * Return: 0 on success, err otherwise.
2022 */
2023int synth_event_add_val(const char *field_name, u64 val,
2024			struct synth_event_trace_state *trace_state)
2025{
2026	return __synth_event_add_val(field_name, val, trace_state);
2027}
2028EXPORT_SYMBOL_GPL(synth_event_add_val);
2029
2030/**
2031 * synth_event_trace_end - End piecewise synthetic event trace
2032 * @trace_state: A pointer to object tracking the piecewise trace state
2033 *
2034 * End the trace of a synthetic event opened by
2035 * synth_event_trace__start().
2036 *
2037 * This function 'closes' an event trace, which basically means that
2038 * it commits the reserved event and cleans up other loose ends.
2039 *
2040 * A pointer to a trace_state object is passed in, which will keep
2041 * track of the current event trace state opened with
2042 * synth_event_trace_start().
2043 *
2044 * Note that this function must be called after all values have been
2045 * added for each event trace, regardless of whether adding all field
2046 * values succeeded or not.
2047 *
2048 * Return: 0 on success, err otherwise.
2049 */
2050int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2051{
2052	if (!trace_state)
2053		return -EINVAL;
2054
2055	__synth_event_trace_end(trace_state);
2056
2057	return 0;
2058}
2059EXPORT_SYMBOL_GPL(synth_event_trace_end);
2060
2061static int create_synth_event(const char *raw_command)
2062{
2063	char *fields, *p;
2064	const char *name;
2065	int len, ret = 0;
2066
2067	raw_command = skip_spaces(raw_command);
2068	if (raw_command[0] == '\0')
2069		return ret;
2070
2071	last_cmd_set(raw_command);
2072
2073	name = raw_command;
2074
2075	/* Don't try to process if not our system */
2076	if (name[0] != 's' || name[1] != ':')
2077		return -ECANCELED;
2078	name += 2;
2079
2080	p = strpbrk(raw_command, " \t");
2081	if (!p) {
2082		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2083		return -EINVAL;
2084	}
2085
2086	fields = skip_spaces(p);
2087
2088	/* This interface accepts group name prefix */
2089	if (strchr(name, '/')) {
2090		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2091		if (len == 0) {
2092			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2093			return -EINVAL;
2094		}
2095		name += len;
2096	}
2097
2098	len = name - raw_command;
2099
2100	ret = check_command(raw_command + len);
2101	if (ret) {
2102		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2103		return ret;
2104	}
2105
2106	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2107	if (!name)
2108		return -ENOMEM;
2109
2110	ret = __create_synth_event(name, fields);
2111
2112	kfree(name);
2113
2114	return ret;
2115}
2116
2117static int synth_event_release(struct dyn_event *ev)
2118{
2119	struct synth_event *event = to_synth_event(ev);
2120	int ret;
2121
2122	if (event->ref)
2123		return -EBUSY;
2124
2125	if (trace_event_dyn_busy(&event->call))
2126		return -EBUSY;
2127
2128	ret = unregister_synth_event(event);
2129	if (ret)
2130		return ret;
2131
2132	dyn_event_remove(ev);
2133	free_synth_event(event);
2134	return 0;
2135}
2136
2137static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2138{
2139	struct synth_field *field;
2140	unsigned int i;
2141	char *type, *t;
2142
2143	seq_printf(m, "%s\t", event->name);
2144
2145	for (i = 0; i < event->n_fields; i++) {
2146		field = event->fields[i];
2147
2148		type = field->type;
2149		t = strstr(type, "__data_loc");
2150		if (t) { /* __data_loc belongs in format but not event desc */
2151			t += sizeof("__data_loc");
2152			type = t;
2153		}
2154
2155		/* parameter values */
2156		seq_printf(m, "%s %s%s", type, field->name,
2157			   i == event->n_fields - 1 ? "" : "; ");
2158	}
2159
2160	seq_putc(m, '\n');
2161
2162	return 0;
2163}
2164
2165static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2166{
2167	struct synth_event *event = to_synth_event(ev);
2168
2169	seq_printf(m, "s:%s/", event->class.system);
2170
2171	return __synth_event_show(m, event);
2172}
2173
2174static int synth_events_seq_show(struct seq_file *m, void *v)
2175{
2176	struct dyn_event *ev = v;
2177
2178	if (!is_synth_event(ev))
2179		return 0;
2180
2181	return __synth_event_show(m, to_synth_event(ev));
2182}
2183
2184static const struct seq_operations synth_events_seq_op = {
2185	.start	= dyn_event_seq_start,
2186	.next	= dyn_event_seq_next,
2187	.stop	= dyn_event_seq_stop,
2188	.show	= synth_events_seq_show,
2189};
2190
2191static int synth_events_open(struct inode *inode, struct file *file)
2192{
2193	int ret;
2194
2195	ret = security_locked_down(LOCKDOWN_TRACEFS);
2196	if (ret)
2197		return ret;
2198
2199	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2200		ret = dyn_events_release_all(&synth_event_ops);
2201		if (ret < 0)
2202			return ret;
2203	}
2204
2205	return seq_open(file, &synth_events_seq_op);
2206}
2207
2208static ssize_t synth_events_write(struct file *file,
2209				  const char __user *buffer,
2210				  size_t count, loff_t *ppos)
2211{
2212	return trace_parse_run_command(file, buffer, count, ppos,
2213				       create_or_delete_synth_event);
2214}
2215
2216static const struct file_operations synth_events_fops = {
2217	.open           = synth_events_open,
2218	.write		= synth_events_write,
2219	.read           = seq_read,
2220	.llseek         = seq_lseek,
2221	.release        = seq_release,
2222};
2223
2224/*
2225 * Register dynevent at core_initcall. This allows kernel to setup kprobe
2226 * events in postcore_initcall without tracefs.
2227 */
2228static __init int trace_events_synth_init_early(void)
2229{
2230	int err = 0;
2231
2232	err = dyn_event_register(&synth_event_ops);
2233	if (err)
2234		pr_warn("Could not register synth_event_ops\n");
2235
2236	return err;
2237}
2238core_initcall(trace_events_synth_init_early);
2239
2240static __init int trace_events_synth_init(void)
2241{
2242	struct dentry *entry = NULL;
2243	int err = 0;
2244	err = tracing_init_dentry();
2245	if (err)
2246		goto err;
2247
2248	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2249				    NULL, NULL, &synth_events_fops);
2250	if (!entry) {
2251		err = -ENODEV;
2252		goto err;
2253	}
2254
2255	return err;
2256 err:
2257	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2258
2259	return err;
2260}
2261
2262fs_initcall(trace_events_synth_init);
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_synth - synthetic trace events
   4 *
   5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/kallsyms.h>
  10#include <linux/security.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/stacktrace.h>
  14#include <linux/rculist.h>
  15#include <linux/tracefs.h>
  16
  17/* for gfp flag names */
  18#include <linux/trace_events.h>
  19#include <trace/events/mmflags.h>
  20#include "trace_probe.h"
  21#include "trace_probe_kernel.h"
  22
  23#include "trace_synth.h"
  24
  25#undef ERRORS
  26#define ERRORS	\
  27	C(BAD_NAME,		"Illegal name"),		\
  28	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
  29	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
  30	C(EVENT_EXISTS,		"Event already exists"),	\
  31	C(TOO_MANY_FIELDS,	"Too many fields"),		\
  32	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
  33	C(INVALID_TYPE,		"Invalid type"),		\
  34	C(INVALID_FIELD,        "Invalid field"),		\
  35	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
  36
  37#undef C
  38#define C(a, b)		SYNTH_ERR_##a
  39
  40enum { ERRORS };
  41
  42#undef C
  43#define C(a, b)		b
  44
  45static const char *err_text[] = { ERRORS };
  46
  47static DEFINE_MUTEX(lastcmd_mutex);
  48static char *last_cmd;
  49
  50static int errpos(const char *str)
  51{
  52	int ret = 0;
  53
  54	mutex_lock(&lastcmd_mutex);
  55	if (!str || !last_cmd)
  56		goto out;
  57
  58	ret = err_pos(last_cmd, str);
  59 out:
  60	mutex_unlock(&lastcmd_mutex);
  61	return ret;
  62}
  63
  64static void last_cmd_set(const char *str)
  65{
  66	if (!str)
  67		return;
  68
  69	mutex_lock(&lastcmd_mutex);
  70	kfree(last_cmd);
 
  71	last_cmd = kstrdup(str, GFP_KERNEL);
  72	mutex_unlock(&lastcmd_mutex);
  73}
  74
  75static void synth_err(u8 err_type, u16 err_pos)
  76{
  77	mutex_lock(&lastcmd_mutex);
  78	if (!last_cmd)
  79		goto out;
  80
  81	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
  82			err_type, err_pos);
  83 out:
  84	mutex_unlock(&lastcmd_mutex);
  85}
  86
  87static int create_synth_event(const char *raw_command);
  88static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
  89static int synth_event_release(struct dyn_event *ev);
  90static bool synth_event_is_busy(struct dyn_event *ev);
  91static bool synth_event_match(const char *system, const char *event,
  92			int argc, const char **argv, struct dyn_event *ev);
  93
  94static struct dyn_event_operations synth_event_ops = {
  95	.create = create_synth_event,
  96	.show = synth_event_show,
  97	.is_busy = synth_event_is_busy,
  98	.free = synth_event_release,
  99	.match = synth_event_match,
 100};
 101
 102static bool is_synth_event(struct dyn_event *ev)
 103{
 104	return ev->ops == &synth_event_ops;
 105}
 106
 107static struct synth_event *to_synth_event(struct dyn_event *ev)
 108{
 109	return container_of(ev, struct synth_event, devent);
 110}
 111
 112static bool synth_event_is_busy(struct dyn_event *ev)
 113{
 114	struct synth_event *event = to_synth_event(ev);
 115
 116	return event->ref != 0;
 117}
 118
 119static bool synth_event_match(const char *system, const char *event,
 120			int argc, const char **argv, struct dyn_event *ev)
 121{
 122	struct synth_event *sev = to_synth_event(ev);
 123
 124	return strcmp(sev->name, event) == 0 &&
 125		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
 126}
 127
 128struct synth_trace_event {
 129	struct trace_entry	ent;
 130	union trace_synth_field	fields[];
 131};
 132
 133static int synth_event_define_fields(struct trace_event_call *call)
 134{
 135	struct synth_trace_event trace;
 136	int offset = offsetof(typeof(trace), fields);
 137	struct synth_event *event = call->data;
 138	unsigned int i, size, n_u64;
 139	char *name, *type;
 140	bool is_signed;
 141	int ret = 0;
 142
 143	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
 144		size = event->fields[i]->size;
 145		is_signed = event->fields[i]->is_signed;
 146		type = event->fields[i]->type;
 147		name = event->fields[i]->name;
 148		ret = trace_define_field(call, type, name, offset, size,
 149					 is_signed, FILTER_OTHER);
 150		if (ret)
 151			break;
 152
 153		event->fields[i]->offset = n_u64;
 154
 155		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
 156			offset += STR_VAR_LEN_MAX;
 157			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
 158		} else {
 159			offset += sizeof(u64);
 160			n_u64++;
 161		}
 162	}
 163
 164	event->n_u64 = n_u64;
 165
 166	return ret;
 167}
 168
 169static bool synth_field_signed(char *type)
 170{
 171	if (str_has_prefix(type, "u"))
 172		return false;
 173	if (strcmp(type, "gfp_t") == 0)
 174		return false;
 175
 176	return true;
 177}
 178
 179static int synth_field_is_string(char *type)
 180{
 181	if (strstr(type, "char[") != NULL)
 182		return true;
 183
 184	return false;
 185}
 186
 187static int synth_field_is_stack(char *type)
 188{
 189	if (strstr(type, "long[") != NULL)
 190		return true;
 191
 192	return false;
 193}
 194
 195static int synth_field_string_size(char *type)
 196{
 197	char buf[4], *end, *start;
 198	unsigned int len;
 199	int size, err;
 200
 201	start = strstr(type, "char[");
 202	if (start == NULL)
 203		return -EINVAL;
 204	start += sizeof("char[") - 1;
 205
 206	end = strchr(type, ']');
 207	if (!end || end < start || type + strlen(type) > end + 1)
 208		return -EINVAL;
 209
 210	len = end - start;
 211	if (len > 3)
 212		return -EINVAL;
 213
 214	if (len == 0)
 215		return 0; /* variable-length string */
 216
 217	strncpy(buf, start, len);
 218	buf[len] = '\0';
 219
 220	err = kstrtouint(buf, 0, &size);
 221	if (err)
 222		return err;
 223
 224	if (size > STR_VAR_LEN_MAX)
 225		return -EINVAL;
 226
 227	return size;
 228}
 229
 230static int synth_field_size(char *type)
 231{
 232	int size = 0;
 233
 234	if (strcmp(type, "s64") == 0)
 235		size = sizeof(s64);
 236	else if (strcmp(type, "u64") == 0)
 237		size = sizeof(u64);
 238	else if (strcmp(type, "s32") == 0)
 239		size = sizeof(s32);
 240	else if (strcmp(type, "u32") == 0)
 241		size = sizeof(u32);
 242	else if (strcmp(type, "s16") == 0)
 243		size = sizeof(s16);
 244	else if (strcmp(type, "u16") == 0)
 245		size = sizeof(u16);
 246	else if (strcmp(type, "s8") == 0)
 247		size = sizeof(s8);
 248	else if (strcmp(type, "u8") == 0)
 249		size = sizeof(u8);
 250	else if (strcmp(type, "char") == 0)
 251		size = sizeof(char);
 252	else if (strcmp(type, "unsigned char") == 0)
 253		size = sizeof(unsigned char);
 254	else if (strcmp(type, "int") == 0)
 255		size = sizeof(int);
 256	else if (strcmp(type, "unsigned int") == 0)
 257		size = sizeof(unsigned int);
 258	else if (strcmp(type, "long") == 0)
 259		size = sizeof(long);
 260	else if (strcmp(type, "unsigned long") == 0)
 261		size = sizeof(unsigned long);
 262	else if (strcmp(type, "bool") == 0)
 263		size = sizeof(bool);
 264	else if (strcmp(type, "pid_t") == 0)
 265		size = sizeof(pid_t);
 266	else if (strcmp(type, "gfp_t") == 0)
 267		size = sizeof(gfp_t);
 268	else if (synth_field_is_string(type))
 269		size = synth_field_string_size(type);
 270	else if (synth_field_is_stack(type))
 271		size = 0;
 272
 273	return size;
 274}
 275
 276static const char *synth_field_fmt(char *type)
 277{
 278	const char *fmt = "%llu";
 279
 280	if (strcmp(type, "s64") == 0)
 281		fmt = "%lld";
 282	else if (strcmp(type, "u64") == 0)
 283		fmt = "%llu";
 284	else if (strcmp(type, "s32") == 0)
 285		fmt = "%d";
 286	else if (strcmp(type, "u32") == 0)
 287		fmt = "%u";
 288	else if (strcmp(type, "s16") == 0)
 289		fmt = "%d";
 290	else if (strcmp(type, "u16") == 0)
 291		fmt = "%u";
 292	else if (strcmp(type, "s8") == 0)
 293		fmt = "%d";
 294	else if (strcmp(type, "u8") == 0)
 295		fmt = "%u";
 296	else if (strcmp(type, "char") == 0)
 297		fmt = "%d";
 298	else if (strcmp(type, "unsigned char") == 0)
 299		fmt = "%u";
 300	else if (strcmp(type, "int") == 0)
 301		fmt = "%d";
 302	else if (strcmp(type, "unsigned int") == 0)
 303		fmt = "%u";
 304	else if (strcmp(type, "long") == 0)
 305		fmt = "%ld";
 306	else if (strcmp(type, "unsigned long") == 0)
 307		fmt = "%lu";
 308	else if (strcmp(type, "bool") == 0)
 309		fmt = "%d";
 310	else if (strcmp(type, "pid_t") == 0)
 311		fmt = "%d";
 312	else if (strcmp(type, "gfp_t") == 0)
 313		fmt = "%x";
 314	else if (synth_field_is_string(type))
 315		fmt = "%.*s";
 316	else if (synth_field_is_stack(type))
 317		fmt = "%s";
 318
 319	return fmt;
 320}
 321
 322static void print_synth_event_num_val(struct trace_seq *s,
 323				      char *print_fmt, char *name,
 324				      int size, union trace_synth_field *val, char *space)
 325{
 326	switch (size) {
 327	case 1:
 328		trace_seq_printf(s, print_fmt, name, val->as_u8, space);
 329		break;
 330
 331	case 2:
 332		trace_seq_printf(s, print_fmt, name, val->as_u16, space);
 333		break;
 334
 335	case 4:
 336		trace_seq_printf(s, print_fmt, name, val->as_u32, space);
 337		break;
 338
 339	default:
 340		trace_seq_printf(s, print_fmt, name, val->as_u64, space);
 341		break;
 342	}
 343}
 344
 345static enum print_line_t print_synth_event(struct trace_iterator *iter,
 346					   int flags,
 347					   struct trace_event *event)
 348{
 349	struct trace_array *tr = iter->tr;
 350	struct trace_seq *s = &iter->seq;
 351	struct synth_trace_event *entry;
 352	struct synth_event *se;
 353	unsigned int i, j, n_u64;
 354	char print_fmt[32];
 355	const char *fmt;
 356
 357	entry = (struct synth_trace_event *)iter->ent;
 358	se = container_of(event, struct synth_event, call.event);
 359
 360	trace_seq_printf(s, "%s: ", se->name);
 361
 362	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
 363		if (trace_seq_has_overflowed(s))
 364			goto end;
 365
 366		fmt = synth_field_fmt(se->fields[i]->type);
 367
 368		/* parameter types */
 369		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
 370			trace_seq_printf(s, "%s ", fmt);
 371
 372		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
 373
 374		/* parameter values */
 375		if (se->fields[i]->is_string) {
 376			if (se->fields[i]->is_dynamic) {
 377				union trace_synth_field *data = &entry->fields[n_u64];
 
 
 
 
 
 
 378
 379				trace_seq_printf(s, print_fmt, se->fields[i]->name,
 380						 STR_VAR_LEN_MAX,
 381						 (char *)entry + data->as_dynamic.offset,
 382						 i == se->n_fields - 1 ? "" : " ");
 383				n_u64++;
 384			} else {
 385				trace_seq_printf(s, print_fmt, se->fields[i]->name,
 386						 STR_VAR_LEN_MAX,
 387						 (char *)&entry->fields[n_u64].as_u64,
 388						 i == se->n_fields - 1 ? "" : " ");
 389				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
 390			}
 391		} else if (se->fields[i]->is_stack) {
 392			union trace_synth_field *data = &entry->fields[n_u64];
 393			unsigned long *p = (void *)entry + data->as_dynamic.offset;
 394
 395			trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
 396			for (j = 1; j < data->as_dynamic.len / sizeof(long); j++)
 397				trace_seq_printf(s, "=> %pS\n", (void *)p[j]);
 398			n_u64++;
 399		} else {
 400			struct trace_print_flags __flags[] = {
 401			    __def_gfpflag_names, {-1, NULL} };
 402			char *space = (i == se->n_fields - 1 ? "" : " ");
 403
 404			print_synth_event_num_val(s, print_fmt,
 405						  se->fields[i]->name,
 406						  se->fields[i]->size,
 407						  &entry->fields[n_u64],
 408						  space);
 409
 410			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
 411				trace_seq_puts(s, " (");
 412				trace_print_flags_seq(s, "|",
 413						      entry->fields[n_u64].as_u64,
 414						      __flags);
 415				trace_seq_putc(s, ')');
 416			}
 417			n_u64++;
 418		}
 419	}
 420end:
 421	trace_seq_putc(s, '\n');
 422
 423	return trace_handle_return(s);
 424}
 425
 426static struct trace_event_functions synth_event_funcs = {
 427	.trace		= print_synth_event
 428};
 429
 430static unsigned int trace_string(struct synth_trace_event *entry,
 431				 struct synth_event *event,
 432				 char *str_val,
 433				 bool is_dynamic,
 434				 unsigned int data_size,
 435				 unsigned int *n_u64)
 436{
 437	unsigned int len = 0;
 438	char *str_field;
 439	int ret;
 440
 441	if (is_dynamic) {
 442		union trace_synth_field *data = &entry->fields[*n_u64];
 443
 444		len = fetch_store_strlen((unsigned long)str_val);
 445		data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
 446		data->as_dynamic.len = len;
 447
 448		ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
 
 
 
 
 
 449
 450		(*n_u64)++;
 451	} else {
 452		str_field = (char *)&entry->fields[*n_u64].as_u64;
 453
 454#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 455		if ((unsigned long)str_val < TASK_SIZE)
 456			ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
 457		else
 458#endif
 459			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
 460
 461		if (ret < 0)
 462			strcpy(str_field, FAULT_STRING);
 463
 464		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
 465	}
 466
 467	return len;
 468}
 469
 470static unsigned int trace_stack(struct synth_trace_event *entry,
 471				 struct synth_event *event,
 472				 long *stack,
 473				 unsigned int data_size,
 474				 unsigned int *n_u64)
 475{
 476	union trace_synth_field *data = &entry->fields[*n_u64];
 477	unsigned int len;
 478	u32 data_offset;
 479	void *data_loc;
 480
 481	data_offset = struct_size(entry, fields, event->n_u64);
 482	data_offset += data_size;
 483
 484	for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
 485		if (!stack[len])
 486			break;
 487	}
 488
 489	len *= sizeof(long);
 490
 491	/* Find the dynamic section to copy the stack into. */
 492	data_loc = (void *)entry + data_offset;
 493	memcpy(data_loc, stack, len);
 494
 495	/* Fill in the field that holds the offset/len combo */
 496
 497	data->as_dynamic.offset = data_offset;
 498	data->as_dynamic.len = len;
 499
 500	(*n_u64)++;
 501
 502	return len;
 503}
 504
 505static notrace void trace_event_raw_event_synth(void *__data,
 506						u64 *var_ref_vals,
 507						unsigned int *var_ref_idx)
 508{
 509	unsigned int i, n_u64, val_idx, len, data_size = 0;
 510	struct trace_event_file *trace_file = __data;
 511	struct synth_trace_event *entry;
 512	struct trace_event_buffer fbuffer;
 513	struct trace_buffer *buffer;
 514	struct synth_event *event;
 515	int fields_size = 0;
 516
 517	event = trace_file->event_call->data;
 518
 519	if (trace_trigger_soft_disabled(trace_file))
 520		return;
 521
 522	fields_size = event->n_u64 * sizeof(u64);
 523
 524	for (i = 0; i < event->n_dynamic_fields; i++) {
 525		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
 526		char *str_val;
 527
 528		val_idx = var_ref_idx[field_pos];
 529		str_val = (char *)(long)var_ref_vals[val_idx];
 530
 531		if (event->dynamic_fields[i]->is_stack) {
 532			/* reserve one extra element for size */
 533			len = *((unsigned long *)str_val) + 1;
 534			len *= sizeof(unsigned long);
 535		} else {
 536			len = fetch_store_strlen((unsigned long)str_val);
 537		}
 538
 539		fields_size += len;
 540	}
 541
 542	/*
 543	 * Avoid ring buffer recursion detection, as this event
 544	 * is being performed within another event.
 545	 */
 546	buffer = trace_file->tr->array_buffer.buffer;
 547	ring_buffer_nest_start(buffer);
 548
 549	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
 550					   sizeof(*entry) + fields_size);
 551	if (!entry)
 552		goto out;
 553
 554	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
 555		val_idx = var_ref_idx[i];
 556		if (event->fields[i]->is_string) {
 557			char *str_val = (char *)(long)var_ref_vals[val_idx];
 558
 559			len = trace_string(entry, event, str_val,
 560					   event->fields[i]->is_dynamic,
 561					   data_size, &n_u64);
 562			data_size += len; /* only dynamic string increments */
 563		} else if (event->fields[i]->is_stack) {
 564			long *stack = (long *)(long)var_ref_vals[val_idx];
 565
 566			len = trace_stack(entry, event, stack,
 567					   data_size, &n_u64);
 568			data_size += len;
 569		} else {
 570			struct synth_field *field = event->fields[i];
 571			u64 val = var_ref_vals[val_idx];
 572
 573			switch (field->size) {
 574			case 1:
 575				entry->fields[n_u64].as_u8 = (u8)val;
 576				break;
 577
 578			case 2:
 579				entry->fields[n_u64].as_u16 = (u16)val;
 580				break;
 581
 582			case 4:
 583				entry->fields[n_u64].as_u32 = (u32)val;
 584				break;
 585
 586			default:
 587				entry->fields[n_u64].as_u64 = val;
 588				break;
 589			}
 590			n_u64++;
 591		}
 592	}
 593
 594	trace_event_buffer_commit(&fbuffer);
 595out:
 596	ring_buffer_nest_end(buffer);
 597}
 598
 599static void free_synth_event_print_fmt(struct trace_event_call *call)
 600{
 601	if (call) {
 602		kfree(call->print_fmt);
 603		call->print_fmt = NULL;
 604	}
 605}
 606
 607static int __set_synth_event_print_fmt(struct synth_event *event,
 608				       char *buf, int len)
 609{
 610	const char *fmt;
 611	int pos = 0;
 612	int i;
 613
 614	/* When len=0, we just calculate the needed length */
 615#define LEN_OR_ZERO (len ? len - pos : 0)
 616
 617	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
 618	for (i = 0; i < event->n_fields; i++) {
 619		fmt = synth_field_fmt(event->fields[i]->type);
 620		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
 621				event->fields[i]->name, fmt,
 622				i == event->n_fields - 1 ? "" : ", ");
 623	}
 624	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
 625
 626	for (i = 0; i < event->n_fields; i++) {
 627		if (event->fields[i]->is_string &&
 628		    event->fields[i]->is_dynamic)
 629			pos += snprintf(buf + pos, LEN_OR_ZERO,
 630				", __get_str(%s)", event->fields[i]->name);
 631		else if (event->fields[i]->is_stack)
 632			pos += snprintf(buf + pos, LEN_OR_ZERO,
 633				", __get_stacktrace(%s)", event->fields[i]->name);
 634		else
 635			pos += snprintf(buf + pos, LEN_OR_ZERO,
 636					", REC->%s", event->fields[i]->name);
 637	}
 638
 639#undef LEN_OR_ZERO
 640
 641	/* return the length of print_fmt */
 642	return pos;
 643}
 644
 645static int set_synth_event_print_fmt(struct trace_event_call *call)
 646{
 647	struct synth_event *event = call->data;
 648	char *print_fmt;
 649	int len;
 650
 651	/* First: called with 0 length to calculate the needed length */
 652	len = __set_synth_event_print_fmt(event, NULL, 0);
 653
 654	print_fmt = kmalloc(len + 1, GFP_KERNEL);
 655	if (!print_fmt)
 656		return -ENOMEM;
 657
 658	/* Second: actually write the @print_fmt */
 659	__set_synth_event_print_fmt(event, print_fmt, len + 1);
 660	call->print_fmt = print_fmt;
 661
 662	return 0;
 663}
 664
 665static void free_synth_field(struct synth_field *field)
 666{
 667	kfree(field->type);
 668	kfree(field->name);
 669	kfree(field);
 670}
 671
 672static int check_field_version(const char *prefix, const char *field_type,
 673			       const char *field_name)
 674{
 675	/*
 676	 * For backward compatibility, the old synthetic event command
 677	 * format did not require semicolons, and in order to not
 678	 * break user space, that old format must still work. If a new
 679	 * feature is added, then the format that uses the new feature
 680	 * will be required to have semicolons, as nothing that uses
 681	 * the old format would be using the new, yet to be created,
 682	 * feature. When a new feature is added, this will detect it,
 683	 * and return a number greater than 1, and require the format
 684	 * to use semicolons.
 685	 */
 686	return 1;
 687}
 688
 689static struct synth_field *parse_synth_field(int argc, char **argv,
 690					     int *consumed, int *field_version)
 691{
 692	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
 693	struct synth_field *field;
 694	int len, ret = -ENOMEM;
 695	struct seq_buf s;
 696	ssize_t size;
 697
 698	if (!strcmp(field_type, "unsigned")) {
 699		if (argc < 3) {
 700			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
 701			return ERR_PTR(-EINVAL);
 702		}
 703		prefix = "unsigned ";
 704		field_type = argv[1];
 705		field_name = argv[2];
 706		*consumed += 3;
 707	} else {
 708		field_name = argv[1];
 709		*consumed += 2;
 710	}
 711
 712	if (!field_name) {
 713		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
 714		return ERR_PTR(-EINVAL);
 715	}
 716
 717	*field_version = check_field_version(prefix, field_type, field_name);
 718
 719	field = kzalloc(sizeof(*field), GFP_KERNEL);
 720	if (!field)
 721		return ERR_PTR(-ENOMEM);
 722
 723	len = strlen(field_name);
 724	array = strchr(field_name, '[');
 725	if (array)
 726		len -= strlen(array);
 727
 728	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
 729	if (!field->name)
 730		goto free;
 731
 732	if (!is_good_name(field->name)) {
 733		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
 734		ret = -EINVAL;
 735		goto free;
 736	}
 737
 738	len = strlen(field_type) + 1;
 739
 740	if (array)
 741		len += strlen(array);
 742
 743	if (prefix)
 744		len += strlen(prefix);
 745
 746	field->type = kzalloc(len, GFP_KERNEL);
 747	if (!field->type)
 748		goto free;
 749
 750	seq_buf_init(&s, field->type, len);
 751	if (prefix)
 752		seq_buf_puts(&s, prefix);
 753	seq_buf_puts(&s, field_type);
 754	if (array)
 755		seq_buf_puts(&s, array);
 756	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
 757		goto free;
 758
 759	s.buffer[s.len] = '\0';
 760
 761	size = synth_field_size(field->type);
 762	if (size < 0) {
 763		if (array)
 764			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
 765		else
 766			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
 767		ret = -EINVAL;
 768		goto free;
 769	} else if (size == 0) {
 770		if (synth_field_is_string(field->type) ||
 771		    synth_field_is_stack(field->type)) {
 772			char *type;
 773
 774			len = sizeof("__data_loc ") + strlen(field->type) + 1;
 775			type = kzalloc(len, GFP_KERNEL);
 776			if (!type)
 777				goto free;
 778
 779			seq_buf_init(&s, type, len);
 780			seq_buf_puts(&s, "__data_loc ");
 781			seq_buf_puts(&s, field->type);
 782
 783			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
 784				goto free;
 785			s.buffer[s.len] = '\0';
 786
 787			kfree(field->type);
 788			field->type = type;
 789
 790			field->is_dynamic = true;
 791			size = sizeof(u64);
 792		} else {
 793			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
 794			ret = -EINVAL;
 795			goto free;
 796		}
 797	}
 798	field->size = size;
 799
 800	if (synth_field_is_string(field->type))
 801		field->is_string = true;
 802	else if (synth_field_is_stack(field->type))
 803		field->is_stack = true;
 804
 805	field->is_signed = synth_field_signed(field->type);
 806 out:
 807	return field;
 808 free:
 809	free_synth_field(field);
 810	field = ERR_PTR(ret);
 811	goto out;
 812}
 813
 814static void free_synth_tracepoint(struct tracepoint *tp)
 815{
 816	if (!tp)
 817		return;
 818
 819	kfree(tp->name);
 820	kfree(tp);
 821}
 822
 823static struct tracepoint *alloc_synth_tracepoint(char *name)
 824{
 825	struct tracepoint *tp;
 826
 827	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
 828	if (!tp)
 829		return ERR_PTR(-ENOMEM);
 830
 831	tp->name = kstrdup(name, GFP_KERNEL);
 832	if (!tp->name) {
 833		kfree(tp);
 834		return ERR_PTR(-ENOMEM);
 835	}
 836
 837	return tp;
 838}
 839
 840struct synth_event *find_synth_event(const char *name)
 841{
 842	struct dyn_event *pos;
 843	struct synth_event *event;
 844
 845	for_each_dyn_event(pos) {
 846		if (!is_synth_event(pos))
 847			continue;
 848		event = to_synth_event(pos);
 849		if (strcmp(event->name, name) == 0)
 850			return event;
 851	}
 852
 853	return NULL;
 854}
 855
 856static struct trace_event_fields synth_event_fields_array[] = {
 857	{ .type = TRACE_FUNCTION_TYPE,
 858	  .define_fields = synth_event_define_fields },
 859	{}
 860};
 861
 862static int register_synth_event(struct synth_event *event)
 863{
 864	struct trace_event_call *call = &event->call;
 865	int ret = 0;
 866
 867	event->call.class = &event->class;
 868	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
 869	if (!event->class.system) {
 870		ret = -ENOMEM;
 871		goto out;
 872	}
 873
 874	event->tp = alloc_synth_tracepoint(event->name);
 875	if (IS_ERR(event->tp)) {
 876		ret = PTR_ERR(event->tp);
 877		event->tp = NULL;
 878		goto out;
 879	}
 880
 881	INIT_LIST_HEAD(&call->class->fields);
 882	call->event.funcs = &synth_event_funcs;
 883	call->class->fields_array = synth_event_fields_array;
 884
 885	ret = register_trace_event(&call->event);
 886	if (!ret) {
 887		ret = -ENODEV;
 888		goto out;
 889	}
 890	call->flags = TRACE_EVENT_FL_TRACEPOINT;
 891	call->class->reg = trace_event_reg;
 892	call->class->probe = trace_event_raw_event_synth;
 893	call->data = event;
 894	call->tp = event->tp;
 895
 896	ret = trace_add_event_call(call);
 897	if (ret) {
 898		pr_warn("Failed to register synthetic event: %s\n",
 899			trace_event_name(call));
 900		goto err;
 901	}
 902
 903	ret = set_synth_event_print_fmt(call);
 904	/* unregister_trace_event() will be called inside */
 905	if (ret < 0)
 906		trace_remove_event_call(call);
 907 out:
 908	return ret;
 909 err:
 910	unregister_trace_event(&call->event);
 911	goto out;
 912}
 913
 914static int unregister_synth_event(struct synth_event *event)
 915{
 916	struct trace_event_call *call = &event->call;
 917	int ret;
 918
 919	ret = trace_remove_event_call(call);
 920
 921	return ret;
 922}
 923
 924static void free_synth_event(struct synth_event *event)
 925{
 926	unsigned int i;
 927
 928	if (!event)
 929		return;
 930
 931	for (i = 0; i < event->n_fields; i++)
 932		free_synth_field(event->fields[i]);
 933
 934	kfree(event->fields);
 935	kfree(event->dynamic_fields);
 936	kfree(event->name);
 937	kfree(event->class.system);
 938	free_synth_tracepoint(event->tp);
 939	free_synth_event_print_fmt(&event->call);
 940	kfree(event);
 941}
 942
 943static struct synth_event *alloc_synth_event(const char *name, int n_fields,
 944					     struct synth_field **fields)
 945{
 946	unsigned int i, j, n_dynamic_fields = 0;
 947	struct synth_event *event;
 948
 949	event = kzalloc(sizeof(*event), GFP_KERNEL);
 950	if (!event) {
 951		event = ERR_PTR(-ENOMEM);
 952		goto out;
 953	}
 954
 955	event->name = kstrdup(name, GFP_KERNEL);
 956	if (!event->name) {
 957		kfree(event);
 958		event = ERR_PTR(-ENOMEM);
 959		goto out;
 960	}
 961
 962	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
 963	if (!event->fields) {
 964		free_synth_event(event);
 965		event = ERR_PTR(-ENOMEM);
 966		goto out;
 967	}
 968
 969	for (i = 0; i < n_fields; i++)
 970		if (fields[i]->is_dynamic)
 971			n_dynamic_fields++;
 972
 973	if (n_dynamic_fields) {
 974		event->dynamic_fields = kcalloc(n_dynamic_fields,
 975						sizeof(*event->dynamic_fields),
 976						GFP_KERNEL);
 977		if (!event->dynamic_fields) {
 978			free_synth_event(event);
 979			event = ERR_PTR(-ENOMEM);
 980			goto out;
 981		}
 982	}
 983
 984	dyn_event_init(&event->devent, &synth_event_ops);
 985
 986	for (i = 0, j = 0; i < n_fields; i++) {
 987		fields[i]->field_pos = i;
 988		event->fields[i] = fields[i];
 989
 990		if (fields[i]->is_dynamic)
 991			event->dynamic_fields[j++] = fields[i];
 992	}
 993	event->n_dynamic_fields = j;
 994	event->n_fields = n_fields;
 995 out:
 996	return event;
 997}
 998
 999static int synth_event_check_arg_fn(void *data)
1000{
1001	struct dynevent_arg_pair *arg_pair = data;
1002	int size;
1003
1004	size = synth_field_size((char *)arg_pair->lhs);
1005	if (size == 0) {
1006		if (strstr((char *)arg_pair->lhs, "["))
1007			return 0;
1008	}
1009
1010	return size ? 0 : -EINVAL;
1011}
1012
1013/**
1014 * synth_event_add_field - Add a new field to a synthetic event cmd
1015 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1016 * @type: The type of the new field to add
1017 * @name: The name of the new field to add
1018 *
1019 * Add a new field to a synthetic event cmd object.  Field ordering is in
1020 * the same order the fields are added.
1021 *
1022 * See synth_field_size() for available types. If field_name contains
1023 * [n] the field is considered to be an array.
1024 *
1025 * Return: 0 if successful, error otherwise.
1026 */
1027int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1028			  const char *name)
1029{
1030	struct dynevent_arg_pair arg_pair;
1031	int ret;
1032
1033	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1034		return -EINVAL;
1035
1036	if (!type || !name)
1037		return -EINVAL;
1038
1039	dynevent_arg_pair_init(&arg_pair, 0, ';');
1040
1041	arg_pair.lhs = type;
1042	arg_pair.rhs = name;
1043
1044	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1045	if (ret)
1046		return ret;
1047
1048	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1049		ret = -EINVAL;
1050
1051	return ret;
1052}
1053EXPORT_SYMBOL_GPL(synth_event_add_field);
1054
1055/**
1056 * synth_event_add_field_str - Add a new field to a synthetic event cmd
1057 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1058 * @type_name: The type and name of the new field to add, as a single string
1059 *
1060 * Add a new field to a synthetic event cmd object, as a single
1061 * string.  The @type_name string is expected to be of the form 'type
1062 * name', which will be appended by ';'.  No sanity checking is done -
1063 * what's passed in is assumed to already be well-formed.  Field
1064 * ordering is in the same order the fields are added.
1065 *
1066 * See synth_field_size() for available types. If field_name contains
1067 * [n] the field is considered to be an array.
1068 *
1069 * Return: 0 if successful, error otherwise.
1070 */
1071int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1072{
1073	struct dynevent_arg arg;
1074	int ret;
1075
1076	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1077		return -EINVAL;
1078
1079	if (!type_name)
1080		return -EINVAL;
1081
1082	dynevent_arg_init(&arg, ';');
1083
1084	arg.str = type_name;
1085
1086	ret = dynevent_arg_add(cmd, &arg, NULL);
1087	if (ret)
1088		return ret;
1089
1090	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1091		ret = -EINVAL;
1092
1093	return ret;
1094}
1095EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1096
1097/**
1098 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1099 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1100 * @fields: An array of type/name field descriptions
1101 * @n_fields: The number of field descriptions contained in the fields array
1102 *
1103 * Add a new set of fields to a synthetic event cmd object.  The event
1104 * fields that will be defined for the event should be passed in as an
1105 * array of struct synth_field_desc, and the number of elements in the
1106 * array passed in as n_fields.  Field ordering will retain the
1107 * ordering given in the fields array.
1108 *
1109 * See synth_field_size() for available types. If field_name contains
1110 * [n] the field is considered to be an array.
1111 *
1112 * Return: 0 if successful, error otherwise.
1113 */
1114int synth_event_add_fields(struct dynevent_cmd *cmd,
1115			   struct synth_field_desc *fields,
1116			   unsigned int n_fields)
1117{
1118	unsigned int i;
1119	int ret = 0;
1120
1121	for (i = 0; i < n_fields; i++) {
1122		if (fields[i].type == NULL || fields[i].name == NULL) {
1123			ret = -EINVAL;
1124			break;
1125		}
1126
1127		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1128		if (ret)
1129			break;
1130	}
1131
1132	return ret;
1133}
1134EXPORT_SYMBOL_GPL(synth_event_add_fields);
1135
1136/**
1137 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1138 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1139 * @name: The name of the synthetic event
1140 * @mod: The module creating the event, NULL if not created from a module
1141 * @...: Variable number of arg (pairs), one pair for each field
1142 *
1143 * NOTE: Users normally won't want to call this function directly, but
1144 * rather use the synth_event_gen_cmd_start() wrapper, which
1145 * automatically adds a NULL to the end of the arg list.  If this
1146 * function is used directly, make sure the last arg in the variable
1147 * arg list is NULL.
1148 *
1149 * Generate a synthetic event command to be executed by
1150 * synth_event_gen_cmd_end().  This function can be used to generate
1151 * the complete command or only the first part of it; in the latter
1152 * case, synth_event_add_field(), synth_event_add_field_str(), or
1153 * synth_event_add_fields() can be used to add more fields following
1154 * this.
1155 *
1156 * There should be an even number variable args, each pair consisting
1157 * of a type followed by a field name.
1158 *
1159 * See synth_field_size() for available types. If field_name contains
1160 * [n] the field is considered to be an array.
1161 *
1162 * Return: 0 if successful, error otherwise.
1163 */
1164int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1165				struct module *mod, ...)
1166{
1167	struct dynevent_arg arg;
1168	va_list args;
1169	int ret;
1170
1171	cmd->event_name = name;
1172	cmd->private_data = mod;
1173
1174	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1175		return -EINVAL;
1176
1177	dynevent_arg_init(&arg, 0);
1178	arg.str = name;
1179	ret = dynevent_arg_add(cmd, &arg, NULL);
1180	if (ret)
1181		return ret;
1182
1183	va_start(args, mod);
1184	for (;;) {
1185		const char *type, *name;
1186
1187		type = va_arg(args, const char *);
1188		if (!type)
1189			break;
1190		name = va_arg(args, const char *);
1191		if (!name)
1192			break;
1193
1194		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1195			ret = -EINVAL;
1196			break;
1197		}
1198
1199		ret = synth_event_add_field(cmd, type, name);
1200		if (ret)
1201			break;
1202	}
1203	va_end(args);
1204
1205	return ret;
1206}
1207EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1208
1209/**
1210 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1211 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1212 * @name: The name of the synthetic event
1213 * @mod: The module creating the event, NULL if not created from a module
1214 * @fields: An array of type/name field descriptions
1215 * @n_fields: The number of field descriptions contained in the fields array
1216 *
1217 * Generate a synthetic event command to be executed by
1218 * synth_event_gen_cmd_end().  This function can be used to generate
1219 * the complete command or only the first part of it; in the latter
1220 * case, synth_event_add_field(), synth_event_add_field_str(), or
1221 * synth_event_add_fields() can be used to add more fields following
1222 * this.
1223 *
1224 * The event fields that will be defined for the event should be
1225 * passed in as an array of struct synth_field_desc, and the number of
1226 * elements in the array passed in as n_fields.  Field ordering will
1227 * retain the ordering given in the fields array.
1228 *
1229 * See synth_field_size() for available types. If field_name contains
1230 * [n] the field is considered to be an array.
1231 *
1232 * Return: 0 if successful, error otherwise.
1233 */
1234int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1235				    struct module *mod,
1236				    struct synth_field_desc *fields,
1237				    unsigned int n_fields)
1238{
1239	struct dynevent_arg arg;
1240	unsigned int i;
1241	int ret = 0;
1242
1243	cmd->event_name = name;
1244	cmd->private_data = mod;
1245
1246	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1247		return -EINVAL;
1248
1249	if (n_fields > SYNTH_FIELDS_MAX)
1250		return -EINVAL;
1251
1252	dynevent_arg_init(&arg, 0);
1253	arg.str = name;
1254	ret = dynevent_arg_add(cmd, &arg, NULL);
1255	if (ret)
1256		return ret;
1257
1258	for (i = 0; i < n_fields; i++) {
1259		if (fields[i].type == NULL || fields[i].name == NULL)
1260			return -EINVAL;
1261
1262		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1263		if (ret)
1264			break;
1265	}
1266
1267	return ret;
1268}
1269EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1270
1271static int __create_synth_event(const char *name, const char *raw_fields)
1272{
1273	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1274	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1275	int consumed, cmd_version = 1, n_fields_this_loop;
1276	int i, argc, n_fields = 0, ret = 0;
1277	struct synth_event *event = NULL;
1278
1279	/*
1280	 * Argument syntax:
1281	 *  - Add synthetic event: <event_name> field[;field] ...
1282	 *  - Remove synthetic event: !<event_name> field[;field] ...
1283	 *      where 'field' = type field_name
1284	 */
1285
1286	if (name[0] == '\0') {
1287		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1288		return -EINVAL;
1289	}
1290
1291	if (!is_good_name(name)) {
1292		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1293		return -EINVAL;
1294	}
1295
1296	mutex_lock(&event_mutex);
1297
1298	event = find_synth_event(name);
1299	if (event) {
1300		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1301		ret = -EEXIST;
1302		goto err;
1303	}
1304
1305	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1306	if (!tmp_fields) {
1307		ret = -ENOMEM;
1308		goto err;
1309	}
1310
1311	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1312		argv = argv_split(GFP_KERNEL, field_str, &argc);
1313		if (!argv) {
1314			ret = -ENOMEM;
1315			goto err;
1316		}
1317
1318		if (!argc) {
1319			argv_free(argv);
1320			continue;
1321		}
1322
1323		n_fields_this_loop = 0;
1324		consumed = 0;
1325		while (argc > consumed) {
1326			int field_version;
1327
1328			field = parse_synth_field(argc - consumed,
1329						  argv + consumed, &consumed,
1330						  &field_version);
1331			if (IS_ERR(field)) {
1332				ret = PTR_ERR(field);
1333				goto err_free_arg;
1334			}
1335
1336			/*
1337			 * Track the highest version of any field we
1338			 * found in the command.
1339			 */
1340			if (field_version > cmd_version)
1341				cmd_version = field_version;
1342
1343			/*
1344			 * Now sort out what is and isn't valid for
1345			 * each supported version.
1346			 *
1347			 * If we see more than 1 field per loop, it
1348			 * means we have multiple fields between
1349			 * semicolons, and that's something we no
1350			 * longer support in a version 2 or greater
1351			 * command.
1352			 */
1353			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1354				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1355				ret = -EINVAL;
1356				goto err_free_arg;
1357			}
1358
1359			if (n_fields == SYNTH_FIELDS_MAX) {
1360				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1361				ret = -EINVAL;
1362				goto err_free_arg;
1363			}
1364			fields[n_fields++] = field;
1365
1366			n_fields_this_loop++;
1367		}
1368		argv_free(argv);
1369
1370		if (consumed < argc) {
1371			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1372			ret = -EINVAL;
1373			goto err;
1374		}
1375
1376	}
1377
1378	if (n_fields == 0) {
1379		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1380		ret = -EINVAL;
1381		goto err;
1382	}
1383
1384	event = alloc_synth_event(name, n_fields, fields);
1385	if (IS_ERR(event)) {
1386		ret = PTR_ERR(event);
1387		event = NULL;
1388		goto err;
1389	}
1390	ret = register_synth_event(event);
1391	if (!ret)
1392		dyn_event_add(&event->devent, &event->call);
1393	else
1394		free_synth_event(event);
1395 out:
1396	mutex_unlock(&event_mutex);
1397
1398	kfree(saved_fields);
1399
1400	return ret;
1401 err_free_arg:
1402	argv_free(argv);
1403 err:
1404	for (i = 0; i < n_fields; i++)
1405		free_synth_field(fields[i]);
1406
1407	goto out;
1408}
1409
1410/**
1411 * synth_event_create - Create a new synthetic event
1412 * @name: The name of the new synthetic event
1413 * @fields: An array of type/name field descriptions
1414 * @n_fields: The number of field descriptions contained in the fields array
1415 * @mod: The module creating the event, NULL if not created from a module
1416 *
1417 * Create a new synthetic event with the given name under the
1418 * trace/events/synthetic/ directory.  The event fields that will be
1419 * defined for the event should be passed in as an array of struct
1420 * synth_field_desc, and the number elements in the array passed in as
1421 * n_fields. Field ordering will retain the ordering given in the
1422 * fields array.
1423 *
1424 * If the new synthetic event is being created from a module, the mod
1425 * param must be non-NULL.  This will ensure that the trace buffer
1426 * won't contain unreadable events.
1427 *
1428 * The new synth event should be deleted using synth_event_delete()
1429 * function.  The new synthetic event can be generated from modules or
1430 * other kernel code using trace_synth_event() and related functions.
1431 *
1432 * Return: 0 if successful, error otherwise.
1433 */
1434int synth_event_create(const char *name, struct synth_field_desc *fields,
1435		       unsigned int n_fields, struct module *mod)
1436{
1437	struct dynevent_cmd cmd;
1438	char *buf;
1439	int ret;
1440
1441	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1442	if (!buf)
1443		return -ENOMEM;
1444
1445	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1446
1447	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1448					      fields, n_fields);
1449	if (ret)
1450		goto out;
1451
1452	ret = synth_event_gen_cmd_end(&cmd);
1453 out:
1454	kfree(buf);
1455
1456	return ret;
1457}
1458EXPORT_SYMBOL_GPL(synth_event_create);
1459
1460static int destroy_synth_event(struct synth_event *se)
1461{
1462	int ret;
1463
1464	if (se->ref)
1465		return -EBUSY;
1466
1467	if (trace_event_dyn_busy(&se->call))
1468		return -EBUSY;
1469
1470	ret = unregister_synth_event(se);
1471	if (!ret) {
1472		dyn_event_remove(&se->devent);
1473		free_synth_event(se);
1474	}
1475
1476	return ret;
1477}
1478
1479/**
1480 * synth_event_delete - Delete a synthetic event
1481 * @event_name: The name of the new synthetic event
1482 *
1483 * Delete a synthetic event that was created with synth_event_create().
1484 *
1485 * Return: 0 if successful, error otherwise.
1486 */
1487int synth_event_delete(const char *event_name)
1488{
1489	struct synth_event *se = NULL;
1490	struct module *mod = NULL;
1491	int ret = -ENOENT;
1492
1493	mutex_lock(&event_mutex);
1494	se = find_synth_event(event_name);
1495	if (se) {
1496		mod = se->mod;
1497		ret = destroy_synth_event(se);
1498	}
1499	mutex_unlock(&event_mutex);
1500
1501	if (mod) {
1502		/*
1503		 * It is safest to reset the ring buffer if the module
1504		 * being unloaded registered any events that were
1505		 * used. The only worry is if a new module gets
1506		 * loaded, and takes on the same id as the events of
1507		 * this module. When printing out the buffer, traced
1508		 * events left over from this module may be passed to
1509		 * the new module events and unexpected results may
1510		 * occur.
1511		 */
1512		tracing_reset_all_online_cpus();
1513	}
1514
1515	return ret;
1516}
1517EXPORT_SYMBOL_GPL(synth_event_delete);
1518
1519static int check_command(const char *raw_command)
1520{
1521	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1522	int argc, ret = 0;
1523
1524	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1525	if (!cmd)
1526		return -ENOMEM;
1527
1528	name_and_field = strsep(&cmd, ";");
1529	if (!name_and_field) {
1530		ret = -EINVAL;
1531		goto free;
1532	}
1533
1534	if (name_and_field[0] == '!')
1535		goto free;
1536
1537	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1538	if (!argv) {
1539		ret = -ENOMEM;
1540		goto free;
1541	}
1542	argv_free(argv);
1543
1544	if (argc < 3)
1545		ret = -EINVAL;
1546free:
1547	kfree(saved_cmd);
1548
1549	return ret;
1550}
1551
1552static int create_or_delete_synth_event(const char *raw_command)
1553{
1554	char *name = NULL, *fields, *p;
1555	int ret = 0;
1556
1557	raw_command = skip_spaces(raw_command);
1558	if (raw_command[0] == '\0')
1559		return ret;
1560
1561	last_cmd_set(raw_command);
1562
1563	ret = check_command(raw_command);
1564	if (ret) {
1565		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1566		return ret;
1567	}
1568
1569	p = strpbrk(raw_command, " \t");
1570	if (!p && raw_command[0] != '!') {
1571		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1572		ret = -EINVAL;
1573		goto free;
1574	}
1575
1576	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1577	if (!name)
1578		return -ENOMEM;
1579
1580	if (name[0] == '!') {
1581		ret = synth_event_delete(name + 1);
1582		goto free;
1583	}
1584
1585	fields = skip_spaces(p);
1586
1587	ret = __create_synth_event(name, fields);
1588free:
1589	kfree(name);
1590
1591	return ret;
1592}
1593
1594static int synth_event_run_command(struct dynevent_cmd *cmd)
1595{
1596	struct synth_event *se;
1597	int ret;
1598
1599	ret = create_or_delete_synth_event(cmd->seq.buffer);
1600	if (ret)
1601		return ret;
1602
1603	se = find_synth_event(cmd->event_name);
1604	if (WARN_ON(!se))
1605		return -ENOENT;
1606
1607	se->mod = cmd->private_data;
1608
1609	return ret;
1610}
1611
1612/**
1613 * synth_event_cmd_init - Initialize a synthetic event command object
1614 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1615 * @buf: A pointer to the buffer used to build the command
1616 * @maxlen: The length of the buffer passed in @buf
1617 *
1618 * Initialize a synthetic event command object.  Use this before
1619 * calling any of the other dyenvent_cmd functions.
1620 */
1621void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1622{
1623	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1624			  synth_event_run_command);
1625}
1626EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1627
1628static inline int
1629__synth_event_trace_init(struct trace_event_file *file,
1630			 struct synth_event_trace_state *trace_state)
1631{
1632	int ret = 0;
1633
1634	memset(trace_state, '\0', sizeof(*trace_state));
1635
1636	/*
1637	 * Normal event tracing doesn't get called at all unless the
1638	 * ENABLED bit is set (which attaches the probe thus allowing
1639	 * this code to be called, etc).  Because this is called
1640	 * directly by the user, we don't have that but we still need
1641	 * to honor not logging when disabled.  For the iterated
1642	 * trace case, we save the enabled state upon start and just
1643	 * ignore the following data calls.
1644	 */
1645	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1646	    trace_trigger_soft_disabled(file)) {
1647		trace_state->disabled = true;
1648		ret = -ENOENT;
1649		goto out;
1650	}
1651
1652	trace_state->event = file->event_call->data;
1653out:
1654	return ret;
1655}
1656
1657static inline int
1658__synth_event_trace_start(struct trace_event_file *file,
1659			  struct synth_event_trace_state *trace_state,
1660			  int dynamic_fields_size)
1661{
1662	int entry_size, fields_size = 0;
1663	int ret = 0;
1664
1665	fields_size = trace_state->event->n_u64 * sizeof(u64);
1666	fields_size += dynamic_fields_size;
1667
1668	/*
1669	 * Avoid ring buffer recursion detection, as this event
1670	 * is being performed within another event.
1671	 */
1672	trace_state->buffer = file->tr->array_buffer.buffer;
1673	ring_buffer_nest_start(trace_state->buffer);
1674
1675	entry_size = sizeof(*trace_state->entry) + fields_size;
1676	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1677							file,
1678							entry_size);
1679	if (!trace_state->entry) {
1680		ring_buffer_nest_end(trace_state->buffer);
1681		ret = -EINVAL;
1682	}
1683
1684	return ret;
1685}
1686
1687static inline void
1688__synth_event_trace_end(struct synth_event_trace_state *trace_state)
1689{
1690	trace_event_buffer_commit(&trace_state->fbuffer);
1691
1692	ring_buffer_nest_end(trace_state->buffer);
1693}
1694
1695/**
1696 * synth_event_trace - Trace a synthetic event
1697 * @file: The trace_event_file representing the synthetic event
1698 * @n_vals: The number of values in vals
1699 * @...: Variable number of args containing the event values
1700 *
1701 * Trace a synthetic event using the values passed in the variable
1702 * argument list.
1703 *
1704 * The argument list should be a list 'n_vals' u64 values.  The number
1705 * of vals must match the number of field in the synthetic event, and
1706 * must be in the same order as the synthetic event fields.
1707 *
1708 * All vals should be cast to u64, and string vals are just pointers
1709 * to strings, cast to u64.  Strings will be copied into space
1710 * reserved in the event for the string, using these pointers.
1711 *
1712 * Return: 0 on success, err otherwise.
1713 */
1714int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1715{
1716	unsigned int i, n_u64, len, data_size = 0;
1717	struct synth_event_trace_state state;
1718	va_list args;
1719	int ret;
1720
1721	ret = __synth_event_trace_init(file, &state);
1722	if (ret) {
1723		if (ret == -ENOENT)
1724			ret = 0; /* just disabled, not really an error */
1725		return ret;
1726	}
1727
1728	if (state.event->n_dynamic_fields) {
1729		va_start(args, n_vals);
1730
1731		for (i = 0; i < state.event->n_fields; i++) {
1732			u64 val = va_arg(args, u64);
1733
1734			if (state.event->fields[i]->is_string &&
1735			    state.event->fields[i]->is_dynamic) {
1736				char *str_val = (char *)(long)val;
1737
1738				data_size += strlen(str_val) + 1;
1739			}
1740		}
1741
1742		va_end(args);
1743	}
1744
1745	ret = __synth_event_trace_start(file, &state, data_size);
1746	if (ret)
1747		return ret;
1748
1749	if (n_vals != state.event->n_fields) {
1750		ret = -EINVAL;
1751		goto out;
1752	}
1753
1754	data_size = 0;
1755
1756	va_start(args, n_vals);
1757	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1758		u64 val;
1759
1760		val = va_arg(args, u64);
1761
1762		if (state.event->fields[i]->is_string) {
1763			char *str_val = (char *)(long)val;
1764
1765			len = trace_string(state.entry, state.event, str_val,
1766					   state.event->fields[i]->is_dynamic,
1767					   data_size, &n_u64);
1768			data_size += len; /* only dynamic string increments */
1769		} else {
1770			struct synth_field *field = state.event->fields[i];
1771
1772			switch (field->size) {
1773			case 1:
1774				state.entry->fields[n_u64].as_u8 = (u8)val;
1775				break;
1776
1777			case 2:
1778				state.entry->fields[n_u64].as_u16 = (u16)val;
1779				break;
1780
1781			case 4:
1782				state.entry->fields[n_u64].as_u32 = (u32)val;
1783				break;
1784
1785			default:
1786				state.entry->fields[n_u64].as_u64 = val;
1787				break;
1788			}
1789			n_u64++;
1790		}
1791	}
1792	va_end(args);
1793out:
1794	__synth_event_trace_end(&state);
1795
1796	return ret;
1797}
1798EXPORT_SYMBOL_GPL(synth_event_trace);
1799
1800/**
1801 * synth_event_trace_array - Trace a synthetic event from an array
1802 * @file: The trace_event_file representing the synthetic event
1803 * @vals: Array of values
1804 * @n_vals: The number of values in vals
1805 *
1806 * Trace a synthetic event using the values passed in as 'vals'.
1807 *
1808 * The 'vals' array is just an array of 'n_vals' u64.  The number of
1809 * vals must match the number of field in the synthetic event, and
1810 * must be in the same order as the synthetic event fields.
1811 *
1812 * All vals should be cast to u64, and string vals are just pointers
1813 * to strings, cast to u64.  Strings will be copied into space
1814 * reserved in the event for the string, using these pointers.
1815 *
1816 * Return: 0 on success, err otherwise.
1817 */
1818int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1819			    unsigned int n_vals)
1820{
1821	unsigned int i, n_u64, field_pos, len, data_size = 0;
1822	struct synth_event_trace_state state;
1823	char *str_val;
1824	int ret;
1825
1826	ret = __synth_event_trace_init(file, &state);
1827	if (ret) {
1828		if (ret == -ENOENT)
1829			ret = 0; /* just disabled, not really an error */
1830		return ret;
1831	}
1832
1833	if (state.event->n_dynamic_fields) {
1834		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1835			field_pos = state.event->dynamic_fields[i]->field_pos;
1836			str_val = (char *)(long)vals[field_pos];
1837			len = strlen(str_val) + 1;
1838			data_size += len;
1839		}
1840	}
1841
1842	ret = __synth_event_trace_start(file, &state, data_size);
1843	if (ret)
1844		return ret;
1845
1846	if (n_vals != state.event->n_fields) {
1847		ret = -EINVAL;
1848		goto out;
1849	}
1850
1851	data_size = 0;
1852
1853	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1854		if (state.event->fields[i]->is_string) {
1855			char *str_val = (char *)(long)vals[i];
1856
1857			len = trace_string(state.entry, state.event, str_val,
1858					   state.event->fields[i]->is_dynamic,
1859					   data_size, &n_u64);
1860			data_size += len; /* only dynamic string increments */
1861		} else {
1862			struct synth_field *field = state.event->fields[i];
1863			u64 val = vals[i];
1864
1865			switch (field->size) {
1866			case 1:
1867				state.entry->fields[n_u64].as_u8 = (u8)val;
1868				break;
1869
1870			case 2:
1871				state.entry->fields[n_u64].as_u16 = (u16)val;
1872				break;
1873
1874			case 4:
1875				state.entry->fields[n_u64].as_u32 = (u32)val;
1876				break;
1877
1878			default:
1879				state.entry->fields[n_u64].as_u64 = val;
1880				break;
1881			}
1882			n_u64++;
1883		}
1884	}
1885out:
1886	__synth_event_trace_end(&state);
1887
1888	return ret;
1889}
1890EXPORT_SYMBOL_GPL(synth_event_trace_array);
1891
1892/**
1893 * synth_event_trace_start - Start piecewise synthetic event trace
1894 * @file: The trace_event_file representing the synthetic event
1895 * @trace_state: A pointer to object tracking the piecewise trace state
1896 *
1897 * Start the trace of a synthetic event field-by-field rather than all
1898 * at once.
1899 *
1900 * This function 'opens' an event trace, which means space is reserved
1901 * for the event in the trace buffer, after which the event's
1902 * individual field values can be set through either
1903 * synth_event_add_next_val() or synth_event_add_val().
1904 *
1905 * A pointer to a trace_state object is passed in, which will keep
1906 * track of the current event trace state until the event trace is
1907 * closed (and the event finally traced) using
1908 * synth_event_trace_end().
1909 *
1910 * Note that synth_event_trace_end() must be called after all values
1911 * have been added for each event trace, regardless of whether adding
1912 * all field values succeeded or not.
1913 *
1914 * Note also that for a given event trace, all fields must be added
1915 * using either synth_event_add_next_val() or synth_event_add_val()
1916 * but not both together or interleaved.
1917 *
1918 * Return: 0 on success, err otherwise.
1919 */
1920int synth_event_trace_start(struct trace_event_file *file,
1921			    struct synth_event_trace_state *trace_state)
1922{
1923	int ret;
1924
1925	if (!trace_state)
1926		return -EINVAL;
1927
1928	ret = __synth_event_trace_init(file, trace_state);
1929	if (ret) {
1930		if (ret == -ENOENT)
1931			ret = 0; /* just disabled, not really an error */
1932		return ret;
1933	}
1934
1935	if (trace_state->event->n_dynamic_fields)
1936		return -ENOTSUPP;
1937
1938	ret = __synth_event_trace_start(file, trace_state, 0);
1939
1940	return ret;
1941}
1942EXPORT_SYMBOL_GPL(synth_event_trace_start);
1943
1944static int __synth_event_add_val(const char *field_name, u64 val,
1945				 struct synth_event_trace_state *trace_state)
1946{
1947	struct synth_field *field = NULL;
1948	struct synth_trace_event *entry;
1949	struct synth_event *event;
1950	int i, ret = 0;
1951
1952	if (!trace_state) {
1953		ret = -EINVAL;
1954		goto out;
1955	}
1956
1957	/* can't mix add_next_synth_val() with add_synth_val() */
1958	if (field_name) {
1959		if (trace_state->add_next) {
1960			ret = -EINVAL;
1961			goto out;
1962		}
1963		trace_state->add_name = true;
1964	} else {
1965		if (trace_state->add_name) {
1966			ret = -EINVAL;
1967			goto out;
1968		}
1969		trace_state->add_next = true;
1970	}
1971
1972	if (trace_state->disabled)
1973		goto out;
1974
1975	event = trace_state->event;
1976	if (trace_state->add_name) {
1977		for (i = 0; i < event->n_fields; i++) {
1978			field = event->fields[i];
1979			if (strcmp(field->name, field_name) == 0)
1980				break;
1981		}
1982		if (!field) {
1983			ret = -EINVAL;
1984			goto out;
1985		}
1986	} else {
1987		if (trace_state->cur_field >= event->n_fields) {
1988			ret = -EINVAL;
1989			goto out;
1990		}
1991		field = event->fields[trace_state->cur_field++];
1992	}
1993
1994	entry = trace_state->entry;
1995	if (field->is_string) {
1996		char *str_val = (char *)(long)val;
1997		char *str_field;
1998
1999		if (field->is_dynamic) { /* add_val can't do dynamic strings */
2000			ret = -EINVAL;
2001			goto out;
2002		}
2003
2004		if (!str_val) {
2005			ret = -EINVAL;
2006			goto out;
2007		}
2008
2009		str_field = (char *)&entry->fields[field->offset];
2010		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2011	} else {
2012		switch (field->size) {
2013		case 1:
2014			trace_state->entry->fields[field->offset].as_u8 = (u8)val;
2015			break;
2016
2017		case 2:
2018			trace_state->entry->fields[field->offset].as_u16 = (u16)val;
2019			break;
2020
2021		case 4:
2022			trace_state->entry->fields[field->offset].as_u32 = (u32)val;
2023			break;
2024
2025		default:
2026			trace_state->entry->fields[field->offset].as_u64 = val;
2027			break;
2028		}
2029	}
2030 out:
2031	return ret;
2032}
2033
2034/**
2035 * synth_event_add_next_val - Add the next field's value to an open synth trace
2036 * @val: The value to set the next field to
2037 * @trace_state: A pointer to object tracking the piecewise trace state
2038 *
2039 * Set the value of the next field in an event that's been opened by
2040 * synth_event_trace_start().
2041 *
2042 * The val param should be the value cast to u64.  If the value points
2043 * to a string, the val param should be a char * cast to u64.
2044 *
2045 * This function assumes all the fields in an event are to be set one
2046 * after another - successive calls to this function are made, one for
2047 * each field, in the order of the fields in the event, until all
2048 * fields have been set.  If you'd rather set each field individually
2049 * without regard to ordering, synth_event_add_val() can be used
2050 * instead.
2051 *
2052 * Note however that synth_event_add_next_val() and
2053 * synth_event_add_val() can't be intermixed for a given event trace -
2054 * one or the other but not both can be used at the same time.
2055 *
2056 * Note also that synth_event_trace_end() must be called after all
2057 * values have been added for each event trace, regardless of whether
2058 * adding all field values succeeded or not.
2059 *
2060 * Return: 0 on success, err otherwise.
2061 */
2062int synth_event_add_next_val(u64 val,
2063			     struct synth_event_trace_state *trace_state)
2064{
2065	return __synth_event_add_val(NULL, val, trace_state);
2066}
2067EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2068
2069/**
2070 * synth_event_add_val - Add a named field's value to an open synth trace
2071 * @field_name: The name of the synthetic event field value to set
2072 * @val: The value to set the named field to
2073 * @trace_state: A pointer to object tracking the piecewise trace state
2074 *
2075 * Set the value of the named field in an event that's been opened by
2076 * synth_event_trace_start().
2077 *
2078 * The val param should be the value cast to u64.  If the value points
2079 * to a string, the val param should be a char * cast to u64.
2080 *
2081 * This function looks up the field name, and if found, sets the field
2082 * to the specified value.  This lookup makes this function more
2083 * expensive than synth_event_add_next_val(), so use that or the
2084 * none-piecewise synth_event_trace() instead if efficiency is more
2085 * important.
2086 *
2087 * Note however that synth_event_add_next_val() and
2088 * synth_event_add_val() can't be intermixed for a given event trace -
2089 * one or the other but not both can be used at the same time.
2090 *
2091 * Note also that synth_event_trace_end() must be called after all
2092 * values have been added for each event trace, regardless of whether
2093 * adding all field values succeeded or not.
2094 *
2095 * Return: 0 on success, err otherwise.
2096 */
2097int synth_event_add_val(const char *field_name, u64 val,
2098			struct synth_event_trace_state *trace_state)
2099{
2100	return __synth_event_add_val(field_name, val, trace_state);
2101}
2102EXPORT_SYMBOL_GPL(synth_event_add_val);
2103
2104/**
2105 * synth_event_trace_end - End piecewise synthetic event trace
2106 * @trace_state: A pointer to object tracking the piecewise trace state
2107 *
2108 * End the trace of a synthetic event opened by
2109 * synth_event_trace__start().
2110 *
2111 * This function 'closes' an event trace, which basically means that
2112 * it commits the reserved event and cleans up other loose ends.
2113 *
2114 * A pointer to a trace_state object is passed in, which will keep
2115 * track of the current event trace state opened with
2116 * synth_event_trace_start().
2117 *
2118 * Note that this function must be called after all values have been
2119 * added for each event trace, regardless of whether adding all field
2120 * values succeeded or not.
2121 *
2122 * Return: 0 on success, err otherwise.
2123 */
2124int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2125{
2126	if (!trace_state)
2127		return -EINVAL;
2128
2129	__synth_event_trace_end(trace_state);
2130
2131	return 0;
2132}
2133EXPORT_SYMBOL_GPL(synth_event_trace_end);
2134
2135static int create_synth_event(const char *raw_command)
2136{
2137	char *fields, *p;
2138	const char *name;
2139	int len, ret = 0;
2140
2141	raw_command = skip_spaces(raw_command);
2142	if (raw_command[0] == '\0')
2143		return ret;
2144
2145	last_cmd_set(raw_command);
2146
2147	name = raw_command;
2148
2149	/* Don't try to process if not our system */
2150	if (name[0] != 's' || name[1] != ':')
2151		return -ECANCELED;
2152	name += 2;
2153
2154	p = strpbrk(raw_command, " \t");
2155	if (!p) {
2156		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2157		return -EINVAL;
2158	}
2159
2160	fields = skip_spaces(p);
2161
2162	/* This interface accepts group name prefix */
2163	if (strchr(name, '/')) {
2164		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2165		if (len == 0) {
2166			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2167			return -EINVAL;
2168		}
2169		name += len;
2170	}
2171
2172	len = name - raw_command;
2173
2174	ret = check_command(raw_command + len);
2175	if (ret) {
2176		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2177		return ret;
2178	}
2179
2180	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2181	if (!name)
2182		return -ENOMEM;
2183
2184	ret = __create_synth_event(name, fields);
2185
2186	kfree(name);
2187
2188	return ret;
2189}
2190
2191static int synth_event_release(struct dyn_event *ev)
2192{
2193	struct synth_event *event = to_synth_event(ev);
2194	int ret;
2195
2196	if (event->ref)
2197		return -EBUSY;
2198
2199	if (trace_event_dyn_busy(&event->call))
2200		return -EBUSY;
2201
2202	ret = unregister_synth_event(event);
2203	if (ret)
2204		return ret;
2205
2206	dyn_event_remove(ev);
2207	free_synth_event(event);
2208	return 0;
2209}
2210
2211static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2212{
2213	struct synth_field *field;
2214	unsigned int i;
2215	char *type, *t;
2216
2217	seq_printf(m, "%s\t", event->name);
2218
2219	for (i = 0; i < event->n_fields; i++) {
2220		field = event->fields[i];
2221
2222		type = field->type;
2223		t = strstr(type, "__data_loc");
2224		if (t) { /* __data_loc belongs in format but not event desc */
2225			t += sizeof("__data_loc");
2226			type = t;
2227		}
2228
2229		/* parameter values */
2230		seq_printf(m, "%s %s%s", type, field->name,
2231			   i == event->n_fields - 1 ? "" : "; ");
2232	}
2233
2234	seq_putc(m, '\n');
2235
2236	return 0;
2237}
2238
2239static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2240{
2241	struct synth_event *event = to_synth_event(ev);
2242
2243	seq_printf(m, "s:%s/", event->class.system);
2244
2245	return __synth_event_show(m, event);
2246}
2247
2248static int synth_events_seq_show(struct seq_file *m, void *v)
2249{
2250	struct dyn_event *ev = v;
2251
2252	if (!is_synth_event(ev))
2253		return 0;
2254
2255	return __synth_event_show(m, to_synth_event(ev));
2256}
2257
2258static const struct seq_operations synth_events_seq_op = {
2259	.start	= dyn_event_seq_start,
2260	.next	= dyn_event_seq_next,
2261	.stop	= dyn_event_seq_stop,
2262	.show	= synth_events_seq_show,
2263};
2264
2265static int synth_events_open(struct inode *inode, struct file *file)
2266{
2267	int ret;
2268
2269	ret = security_locked_down(LOCKDOWN_TRACEFS);
2270	if (ret)
2271		return ret;
2272
2273	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2274		ret = dyn_events_release_all(&synth_event_ops);
2275		if (ret < 0)
2276			return ret;
2277	}
2278
2279	return seq_open(file, &synth_events_seq_op);
2280}
2281
2282static ssize_t synth_events_write(struct file *file,
2283				  const char __user *buffer,
2284				  size_t count, loff_t *ppos)
2285{
2286	return trace_parse_run_command(file, buffer, count, ppos,
2287				       create_or_delete_synth_event);
2288}
2289
2290static const struct file_operations synth_events_fops = {
2291	.open           = synth_events_open,
2292	.write		= synth_events_write,
2293	.read           = seq_read,
2294	.llseek         = seq_lseek,
2295	.release        = seq_release,
2296};
2297
2298/*
2299 * Register dynevent at core_initcall. This allows kernel to setup kprobe
2300 * events in postcore_initcall without tracefs.
2301 */
2302static __init int trace_events_synth_init_early(void)
2303{
2304	int err = 0;
2305
2306	err = dyn_event_register(&synth_event_ops);
2307	if (err)
2308		pr_warn("Could not register synth_event_ops\n");
2309
2310	return err;
2311}
2312core_initcall(trace_events_synth_init_early);
2313
2314static __init int trace_events_synth_init(void)
2315{
2316	struct dentry *entry = NULL;
2317	int err = 0;
2318	err = tracing_init_dentry();
2319	if (err)
2320		goto err;
2321
2322	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2323				    NULL, NULL, &synth_events_fops);
2324	if (!entry) {
2325		err = -ENODEV;
2326		goto err;
2327	}
2328
2329	return err;
2330 err:
2331	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2332
2333	return err;
2334}
2335
2336fs_initcall(trace_events_synth_init);