Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * event tracer
   3 *
   4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
   5 *
   6 *  - Added format output of fields of the trace point.
   7 *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
   8 *
   9 */
  10
  11#include <linux/workqueue.h>
  12#include <linux/spinlock.h>
  13#include <linux/kthread.h>
  14#include <linux/debugfs.h>
  15#include <linux/uaccess.h>
  16#include <linux/module.h>
  17#include <linux/ctype.h>
  18#include <linux/slab.h>
  19#include <linux/delay.h>
  20
  21#include <asm/setup.h>
  22
  23#include "trace_output.h"
  24
  25#undef TRACE_SYSTEM
  26#define TRACE_SYSTEM "TRACE_SYSTEM"
  27
  28DEFINE_MUTEX(event_mutex);
  29
  30LIST_HEAD(ftrace_events);
  31static LIST_HEAD(ftrace_common_fields);
  32
  33#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
  34
  35static struct kmem_cache *field_cachep;
  36static struct kmem_cache *file_cachep;
  37
  38#define SYSTEM_FL_FREE_NAME		(1 << 31)
  39
  40static inline int system_refcount(struct event_subsystem *system)
  41{
  42	return system->ref_count & ~SYSTEM_FL_FREE_NAME;
  43}
  44
  45static int system_refcount_inc(struct event_subsystem *system)
  46{
  47	return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
  48}
  49
  50static int system_refcount_dec(struct event_subsystem *system)
  51{
  52	return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
  53}
  54
  55/* Double loops, do not use break, only goto's work */
  56#define do_for_each_event_file(tr, file)			\
  57	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
  58		list_for_each_entry(file, &tr->events, list)
  59
  60#define do_for_each_event_file_safe(tr, file)			\
  61	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
  62		struct ftrace_event_file *___n;				\
  63		list_for_each_entry_safe(file, ___n, &tr->events, list)
  64
  65#define while_for_each_event_file()		\
  66	}
  67
  68static struct list_head *
  69trace_get_fields(struct ftrace_event_call *event_call)
  70{
  71	if (!event_call->class->get_fields)
  72		return &event_call->class->fields;
  73	return event_call->class->get_fields(event_call);
  74}
  75
  76static struct ftrace_event_field *
  77__find_event_field(struct list_head *head, char *name)
  78{
  79	struct ftrace_event_field *field;
  80
  81	list_for_each_entry(field, head, link) {
  82		if (!strcmp(field->name, name))
  83			return field;
  84	}
  85
  86	return NULL;
  87}
  88
  89struct ftrace_event_field *
  90trace_find_event_field(struct ftrace_event_call *call, char *name)
  91{
  92	struct ftrace_event_field *field;
  93	struct list_head *head;
  94
  95	field = __find_event_field(&ftrace_common_fields, name);
  96	if (field)
  97		return field;
  98
  99	head = trace_get_fields(call);
 100	return __find_event_field(head, name);
 101}
 102
 103static int __trace_define_field(struct list_head *head, const char *type,
 104				const char *name, int offset, int size,
 105				int is_signed, int filter_type)
 106{
 107	struct ftrace_event_field *field;
 108
 109	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
 110	if (!field)
 111		return -ENOMEM;
 112
 113	field->name = name;
 114	field->type = type;
 
 
 
 
 
 115
 116	if (filter_type == FILTER_OTHER)
 117		field->filter_type = filter_assign_type(type);
 118	else
 119		field->filter_type = filter_type;
 120
 121	field->offset = offset;
 122	field->size = size;
 123	field->is_signed = is_signed;
 124
 125	list_add(&field->link, head);
 126
 127	return 0;
 
 
 
 
 
 
 
 128}
 129
 130int trace_define_field(struct ftrace_event_call *call, const char *type,
 131		       const char *name, int offset, int size, int is_signed,
 132		       int filter_type)
 133{
 134	struct list_head *head;
 135
 136	if (WARN_ON(!call->class))
 137		return 0;
 138
 139	head = trace_get_fields(call);
 140	return __trace_define_field(head, type, name, offset, size,
 141				    is_signed, filter_type);
 142}
 143EXPORT_SYMBOL_GPL(trace_define_field);
 144
 145#define __common_field(type, item)					\
 146	ret = __trace_define_field(&ftrace_common_fields, #type,	\
 147				   "common_" #item,			\
 148				   offsetof(typeof(ent), item),		\
 149				   sizeof(ent.item),			\
 150				   is_signed_type(type), FILTER_OTHER);	\
 151	if (ret)							\
 152		return ret;
 153
 154static int trace_define_common_fields(void)
 155{
 156	int ret;
 157	struct trace_entry ent;
 158
 159	__common_field(unsigned short, type);
 160	__common_field(unsigned char, flags);
 161	__common_field(unsigned char, preempt_count);
 162	__common_field(int, pid);
 
 163
 164	return ret;
 165}
 166
 167static void trace_destroy_fields(struct ftrace_event_call *call)
 168{
 169	struct ftrace_event_field *field, *next;
 170	struct list_head *head;
 171
 172	head = trace_get_fields(call);
 173	list_for_each_entry_safe(field, next, head, link) {
 174		list_del(&field->link);
 175		kmem_cache_free(field_cachep, field);
 
 
 176	}
 177}
 178
 179int trace_event_raw_init(struct ftrace_event_call *call)
 180{
 181	int id;
 182
 183	id = register_ftrace_event(&call->event);
 184	if (!id)
 185		return -ENODEV;
 186
 187	return 0;
 188}
 189EXPORT_SYMBOL_GPL(trace_event_raw_init);
 190
 191void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
 192				  struct ftrace_event_file *ftrace_file,
 193				  unsigned long len)
 194{
 195	struct ftrace_event_call *event_call = ftrace_file->event_call;
 196
 197	local_save_flags(fbuffer->flags);
 198	fbuffer->pc = preempt_count();
 199	fbuffer->ftrace_file = ftrace_file;
 200
 201	fbuffer->event =
 202		trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
 203						event_call->event.type, len,
 204						fbuffer->flags, fbuffer->pc);
 205	if (!fbuffer->event)
 206		return NULL;
 207
 208	fbuffer->entry = ring_buffer_event_data(fbuffer->event);
 209	return fbuffer->entry;
 210}
 211EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
 212
 213void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
 214{
 215	event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
 216				    fbuffer->event, fbuffer->entry,
 217				    fbuffer->flags, fbuffer->pc);
 218}
 219EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
 220
 221int ftrace_event_reg(struct ftrace_event_call *call,
 222		     enum trace_reg type, void *data)
 223{
 224	struct ftrace_event_file *file = data;
 225
 226	WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
 227	switch (type) {
 228	case TRACE_REG_REGISTER:
 229		return tracepoint_probe_register(call->tp,
 230						 call->class->probe,
 231						 file);
 232	case TRACE_REG_UNREGISTER:
 233		tracepoint_probe_unregister(call->tp,
 234					    call->class->probe,
 235					    file);
 236		return 0;
 237
 238#ifdef CONFIG_PERF_EVENTS
 239	case TRACE_REG_PERF_REGISTER:
 240		return tracepoint_probe_register(call->tp,
 241						 call->class->perf_probe,
 242						 call);
 243	case TRACE_REG_PERF_UNREGISTER:
 244		tracepoint_probe_unregister(call->tp,
 245					    call->class->perf_probe,
 246					    call);
 247		return 0;
 248	case TRACE_REG_PERF_OPEN:
 249	case TRACE_REG_PERF_CLOSE:
 250	case TRACE_REG_PERF_ADD:
 251	case TRACE_REG_PERF_DEL:
 252		return 0;
 253#endif
 254	}
 255	return 0;
 256}
 257EXPORT_SYMBOL_GPL(ftrace_event_reg);
 258
 259void trace_event_enable_cmd_record(bool enable)
 260{
 261	struct ftrace_event_file *file;
 262	struct trace_array *tr;
 263
 264	mutex_lock(&event_mutex);
 265	do_for_each_event_file(tr, file) {
 266
 267		if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
 268			continue;
 269
 270		if (enable) {
 271			tracing_start_cmdline_record();
 272			set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
 273		} else {
 274			tracing_stop_cmdline_record();
 275			clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
 276		}
 277	} while_for_each_event_file();
 278	mutex_unlock(&event_mutex);
 279}
 280
 281static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
 282					 int enable, int soft_disable)
 283{
 284	struct ftrace_event_call *call = file->event_call;
 285	int ret = 0;
 286	int disable;
 287
 288	switch (enable) {
 289	case 0:
 290		/*
 291		 * When soft_disable is set and enable is cleared, the sm_ref
 292		 * reference counter is decremented. If it reaches 0, we want
 293		 * to clear the SOFT_DISABLED flag but leave the event in the
 294		 * state that it was. That is, if the event was enabled and
 295		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
 296		 * is set we do not want the event to be enabled before we
 297		 * clear the bit.
 298		 *
 299		 * When soft_disable is not set but the SOFT_MODE flag is,
 300		 * we do nothing. Do not disable the tracepoint, otherwise
 301		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
 302		 */
 303		if (soft_disable) {
 304			if (atomic_dec_return(&file->sm_ref) > 0)
 305				break;
 306			disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
 307			clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
 308		} else
 309			disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
 310
 311		if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
 312			clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
 313			if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
 314				tracing_stop_cmdline_record();
 315				clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
 316			}
 317			call->class->reg(call, TRACE_REG_UNREGISTER, file);
 318		}
 319		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
 320		if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
 321			set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
 322		else
 323			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
 324		break;
 325	case 1:
 326		/*
 327		 * When soft_disable is set and enable is set, we want to
 328		 * register the tracepoint for the event, but leave the event
 329		 * as is. That means, if the event was already enabled, we do
 330		 * nothing (but set SOFT_MODE). If the event is disabled, we
 331		 * set SOFT_DISABLED before enabling the event tracepoint, so
 332		 * it still seems to be disabled.
 333		 */
 334		if (!soft_disable)
 335			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
 336		else {
 337			if (atomic_inc_return(&file->sm_ref) > 1)
 338				break;
 339			set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
 340		}
 341
 342		if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
 343
 344			/* Keep the event disabled, when going to SOFT_MODE. */
 345			if (soft_disable)
 346				set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
 347
 348			if (trace_flags & TRACE_ITER_RECORD_CMD) {
 349				tracing_start_cmdline_record();
 350				set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
 351			}
 352			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
 353			if (ret) {
 354				tracing_stop_cmdline_record();
 355				pr_info("event trace: Could not enable event "
 356					"%s\n", ftrace_event_name(call));
 357				break;
 358			}
 359			set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
 360
 361			/* WAS_ENABLED gets set but never cleared. */
 362			call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
 363		}
 364		break;
 365	}
 366
 367	return ret;
 368}
 369
 370int trace_event_enable_disable(struct ftrace_event_file *file,
 371			       int enable, int soft_disable)
 372{
 373	return __ftrace_event_enable_disable(file, enable, soft_disable);
 374}
 375
 376static int ftrace_event_enable_disable(struct ftrace_event_file *file,
 377				       int enable)
 378{
 379	return __ftrace_event_enable_disable(file, enable, 0);
 380}
 381
 382static void ftrace_clear_events(struct trace_array *tr)
 383{
 384	struct ftrace_event_file *file;
 385
 386	mutex_lock(&event_mutex);
 387	list_for_each_entry(file, &tr->events, list) {
 388		ftrace_event_enable_disable(file, 0);
 389	}
 390	mutex_unlock(&event_mutex);
 391}
 392
 393static void __put_system(struct event_subsystem *system)
 394{
 395	struct event_filter *filter = system->filter;
 396
 397	WARN_ON_ONCE(system_refcount(system) == 0);
 398	if (system_refcount_dec(system))
 399		return;
 400
 401	list_del(&system->list);
 402
 403	if (filter) {
 404		kfree(filter->filter_string);
 405		kfree(filter);
 406	}
 407	if (system->ref_count & SYSTEM_FL_FREE_NAME)
 408		kfree(system->name);
 409	kfree(system);
 410}
 411
 412static void __get_system(struct event_subsystem *system)
 413{
 414	WARN_ON_ONCE(system_refcount(system) == 0);
 415	system_refcount_inc(system);
 416}
 417
 418static void __get_system_dir(struct ftrace_subsystem_dir *dir)
 419{
 420	WARN_ON_ONCE(dir->ref_count == 0);
 421	dir->ref_count++;
 422	__get_system(dir->subsystem);
 423}
 424
 425static void __put_system_dir(struct ftrace_subsystem_dir *dir)
 426{
 427	WARN_ON_ONCE(dir->ref_count == 0);
 428	/* If the subsystem is about to be freed, the dir must be too */
 429	WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
 430
 431	__put_system(dir->subsystem);
 432	if (!--dir->ref_count)
 433		kfree(dir);
 434}
 435
 436static void put_system(struct ftrace_subsystem_dir *dir)
 437{
 438	mutex_lock(&event_mutex);
 439	__put_system_dir(dir);
 440	mutex_unlock(&event_mutex);
 441}
 442
 443static void remove_subsystem(struct ftrace_subsystem_dir *dir)
 444{
 445	if (!dir)
 446		return;
 447
 448	if (!--dir->nr_events) {
 449		debugfs_remove_recursive(dir->entry);
 450		list_del(&dir->list);
 451		__put_system_dir(dir);
 452	}
 453}
 454
 455static void remove_event_file_dir(struct ftrace_event_file *file)
 456{
 457	struct dentry *dir = file->dir;
 458	struct dentry *child;
 459
 460	if (dir) {
 461		spin_lock(&dir->d_lock);	/* probably unneeded */
 462		list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
 463			if (child->d_inode)	/* probably unneeded */
 464				child->d_inode->i_private = NULL;
 465		}
 466		spin_unlock(&dir->d_lock);
 467
 468		debugfs_remove_recursive(dir);
 469	}
 470
 471	list_del(&file->list);
 472	remove_subsystem(file->system);
 473	kmem_cache_free(file_cachep, file);
 474}
 475
 476/*
 477 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
 478 */
 479static int
 480__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
 481			      const char *sub, const char *event, int set)
 482{
 483	struct ftrace_event_file *file;
 484	struct ftrace_event_call *call;
 485	const char *name;
 486	int ret = -EINVAL;
 487
 488	list_for_each_entry(file, &tr->events, list) {
 489
 490		call = file->event_call;
 491		name = ftrace_event_name(call);
 492
 493		if (!name || !call->class || !call->class->reg)
 494			continue;
 495
 496		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
 497			continue;
 498
 499		if (match &&
 500		    strcmp(match, name) != 0 &&
 501		    strcmp(match, call->class->system) != 0)
 502			continue;
 503
 504		if (sub && strcmp(sub, call->class->system) != 0)
 505			continue;
 506
 507		if (event && strcmp(event, name) != 0)
 508			continue;
 509
 510		ftrace_event_enable_disable(file, set);
 511
 512		ret = 0;
 513	}
 514
 515	return ret;
 516}
 517
 518static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
 519				  const char *sub, const char *event, int set)
 520{
 521	int ret;
 522
 523	mutex_lock(&event_mutex);
 524	ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
 525	mutex_unlock(&event_mutex);
 526
 527	return ret;
 528}
 529
 530static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
 531{
 532	char *event = NULL, *sub = NULL, *match;
 533
 534	/*
 535	 * The buf format can be <subsystem>:<event-name>
 536	 *  *:<event-name> means any event by that name.
 537	 *  :<event-name> is the same.
 538	 *
 539	 *  <subsystem>:* means all events in that subsystem
 540	 *  <subsystem>: means the same.
 541	 *
 542	 *  <name> (no ':') means all events in a subsystem with
 543	 *  the name <name> or any event that matches <name>
 544	 */
 545
 546	match = strsep(&buf, ":");
 547	if (buf) {
 548		sub = match;
 549		event = buf;
 550		match = NULL;
 551
 552		if (!strlen(sub) || strcmp(sub, "*") == 0)
 553			sub = NULL;
 554		if (!strlen(event) || strcmp(event, "*") == 0)
 555			event = NULL;
 556	}
 557
 558	return __ftrace_set_clr_event(tr, match, sub, event, set);
 559}
 560
 561/**
 562 * trace_set_clr_event - enable or disable an event
 563 * @system: system name to match (NULL for any system)
 564 * @event: event name to match (NULL for all events, within system)
 565 * @set: 1 to enable, 0 to disable
 566 *
 567 * This is a way for other parts of the kernel to enable or disable
 568 * event recording.
 569 *
 570 * Returns 0 on success, -EINVAL if the parameters do not match any
 571 * registered events.
 572 */
 573int trace_set_clr_event(const char *system, const char *event, int set)
 574{
 575	struct trace_array *tr = top_trace_array();
 576
 577	return __ftrace_set_clr_event(tr, NULL, system, event, set);
 578}
 579EXPORT_SYMBOL_GPL(trace_set_clr_event);
 580
 581/* 128 should be much more than enough */
 582#define EVENT_BUF_SIZE		127
 583
 584static ssize_t
 585ftrace_event_write(struct file *file, const char __user *ubuf,
 586		   size_t cnt, loff_t *ppos)
 587{
 588	struct trace_parser parser;
 589	struct seq_file *m = file->private_data;
 590	struct trace_array *tr = m->private;
 591	ssize_t read, ret;
 592
 593	if (!cnt)
 594		return 0;
 595
 596	ret = tracing_update_buffers();
 597	if (ret < 0)
 598		return ret;
 599
 600	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
 601		return -ENOMEM;
 602
 603	read = trace_get_user(&parser, ubuf, cnt, ppos);
 604
 605	if (read >= 0 && trace_parser_loaded((&parser))) {
 606		int set = 1;
 607
 608		if (*parser.buffer == '!')
 609			set = 0;
 610
 611		parser.buffer[parser.idx] = 0;
 612
 613		ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
 614		if (ret)
 615			goto out_put;
 616	}
 617
 618	ret = read;
 619
 620 out_put:
 621	trace_parser_put(&parser);
 622
 623	return ret;
 624}
 625
 626static void *
 627t_next(struct seq_file *m, void *v, loff_t *pos)
 628{
 629	struct ftrace_event_file *file = v;
 630	struct ftrace_event_call *call;
 631	struct trace_array *tr = m->private;
 632
 633	(*pos)++;
 634
 635	list_for_each_entry_continue(file, &tr->events, list) {
 636		call = file->event_call;
 637		/*
 638		 * The ftrace subsystem is for showing formats only.
 639		 * They can not be enabled or disabled via the event files.
 640		 */
 641		if (call->class && call->class->reg)
 642			return file;
 643	}
 644
 645	return NULL;
 646}
 647
 648static void *t_start(struct seq_file *m, loff_t *pos)
 649{
 650	struct ftrace_event_file *file;
 651	struct trace_array *tr = m->private;
 652	loff_t l;
 653
 654	mutex_lock(&event_mutex);
 655
 656	file = list_entry(&tr->events, struct ftrace_event_file, list);
 657	for (l = 0; l <= *pos; ) {
 658		file = t_next(m, file, &l);
 659		if (!file)
 660			break;
 661	}
 662	return file;
 663}
 664
 665static void *
 666s_next(struct seq_file *m, void *v, loff_t *pos)
 667{
 668	struct ftrace_event_file *file = v;
 669	struct trace_array *tr = m->private;
 670
 671	(*pos)++;
 672
 673	list_for_each_entry_continue(file, &tr->events, list) {
 674		if (file->flags & FTRACE_EVENT_FL_ENABLED)
 675			return file;
 676	}
 677
 678	return NULL;
 679}
 680
 681static void *s_start(struct seq_file *m, loff_t *pos)
 682{
 683	struct ftrace_event_file *file;
 684	struct trace_array *tr = m->private;
 685	loff_t l;
 686
 687	mutex_lock(&event_mutex);
 688
 689	file = list_entry(&tr->events, struct ftrace_event_file, list);
 690	for (l = 0; l <= *pos; ) {
 691		file = s_next(m, file, &l);
 692		if (!file)
 693			break;
 694	}
 695	return file;
 696}
 697
 698static int t_show(struct seq_file *m, void *v)
 699{
 700	struct ftrace_event_file *file = v;
 701	struct ftrace_event_call *call = file->event_call;
 702
 703	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
 704		seq_printf(m, "%s:", call->class->system);
 705	seq_printf(m, "%s\n", ftrace_event_name(call));
 706
 707	return 0;
 708}
 709
 710static void t_stop(struct seq_file *m, void *p)
 711{
 712	mutex_unlock(&event_mutex);
 713}
 714
 715static ssize_t
 716event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
 717		  loff_t *ppos)
 718{
 719	struct ftrace_event_file *file;
 720	unsigned long flags;
 721	char buf[4] = "0";
 722
 723	mutex_lock(&event_mutex);
 724	file = event_file_data(filp);
 725	if (likely(file))
 726		flags = file->flags;
 727	mutex_unlock(&event_mutex);
 728
 729	if (!file)
 730		return -ENODEV;
 
 731
 732	if (flags & FTRACE_EVENT_FL_ENABLED &&
 733	    !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
 734		strcpy(buf, "1");
 735
 736	if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
 737	    flags & FTRACE_EVENT_FL_SOFT_MODE)
 738		strcat(buf, "*");
 
 
 
 739
 740	strcat(buf, "\n");
 
 
 
 741
 742	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
 743}
 744
 745static ssize_t
 746event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
 747		   loff_t *ppos)
 748{
 749	struct ftrace_event_file *file;
 750	unsigned long val;
 751	int ret;
 752
 753	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 754	if (ret)
 755		return ret;
 756
 757	ret = tracing_update_buffers();
 758	if (ret < 0)
 759		return ret;
 760
 761	switch (val) {
 762	case 0:
 763	case 1:
 764		ret = -ENODEV;
 765		mutex_lock(&event_mutex);
 766		file = event_file_data(filp);
 767		if (likely(file))
 768			ret = ftrace_event_enable_disable(file, val);
 769		mutex_unlock(&event_mutex);
 770		break;
 771
 772	default:
 773		return -EINVAL;
 774	}
 775
 776	*ppos += cnt;
 777
 778	return ret ? ret : cnt;
 779}
 780
 781static ssize_t
 782system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
 783		   loff_t *ppos)
 784{
 785	const char set_to_char[4] = { '?', '0', '1', 'X' };
 786	struct ftrace_subsystem_dir *dir = filp->private_data;
 787	struct event_subsystem *system = dir->subsystem;
 788	struct ftrace_event_call *call;
 789	struct ftrace_event_file *file;
 790	struct trace_array *tr = dir->tr;
 791	char buf[2];
 792	int set = 0;
 793	int ret;
 794
 795	mutex_lock(&event_mutex);
 796	list_for_each_entry(file, &tr->events, list) {
 797		call = file->event_call;
 798		if (!ftrace_event_name(call) || !call->class || !call->class->reg)
 799			continue;
 800
 801		if (system && strcmp(call->class->system, system->name) != 0)
 802			continue;
 803
 804		/*
 805		 * We need to find out if all the events are set
 806		 * or if all events or cleared, or if we have
 807		 * a mixture.
 808		 */
 809		set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
 810
 811		/*
 812		 * If we have a mixture, no need to look further.
 813		 */
 814		if (set == 3)
 815			break;
 816	}
 817	mutex_unlock(&event_mutex);
 818
 819	buf[0] = set_to_char[set];
 820	buf[1] = '\n';
 821
 822	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 823
 824	return ret;
 825}
 826
 827static ssize_t
 828system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
 829		    loff_t *ppos)
 830{
 831	struct ftrace_subsystem_dir *dir = filp->private_data;
 832	struct event_subsystem *system = dir->subsystem;
 833	const char *name = NULL;
 834	unsigned long val;
 835	ssize_t ret;
 836
 837	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 838	if (ret)
 839		return ret;
 840
 841	ret = tracing_update_buffers();
 842	if (ret < 0)
 843		return ret;
 844
 845	if (val != 0 && val != 1)
 846		return -EINVAL;
 847
 848	/*
 849	 * Opening of "enable" adds a ref count to system,
 850	 * so the name is safe to use.
 851	 */
 852	if (system)
 853		name = system->name;
 854
 855	ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
 856	if (ret)
 857		goto out;
 858
 859	ret = cnt;
 860
 861out:
 862	*ppos += cnt;
 863
 864	return ret;
 865}
 866
 867enum {
 868	FORMAT_HEADER		= 1,
 869	FORMAT_FIELD_SEPERATOR	= 2,
 870	FORMAT_PRINTFMT		= 3,
 871};
 872
 873static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 874{
 875	struct ftrace_event_call *call = event_file_data(m->private);
 
 876	struct list_head *common_head = &ftrace_common_fields;
 877	struct list_head *head = trace_get_fields(call);
 878	struct list_head *node = v;
 879
 880	(*pos)++;
 881
 882	switch ((unsigned long)v) {
 883	case FORMAT_HEADER:
 884		node = common_head;
 885		break;
 
 
 
 
 886
 887	case FORMAT_FIELD_SEPERATOR:
 888		node = head;
 889		break;
 
 
 
 890
 891	case FORMAT_PRINTFMT:
 892		/* all done */
 893		return NULL;
 894	}
 895
 896	node = node->prev;
 897	if (node == common_head)
 898		return (void *)FORMAT_FIELD_SEPERATOR;
 899	else if (node == head)
 900		return (void *)FORMAT_PRINTFMT;
 901	else
 902		return node;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 903}
 904
 905static int f_show(struct seq_file *m, void *v)
 906{
 907	struct ftrace_event_call *call = event_file_data(m->private);
 908	struct ftrace_event_field *field;
 909	const char *array_descriptor;
 910
 911	switch ((unsigned long)v) {
 912	case FORMAT_HEADER:
 913		seq_printf(m, "name: %s\n", ftrace_event_name(call));
 914		seq_printf(m, "ID: %d\n", call->event.type);
 915		seq_printf(m, "format:\n");
 916		return 0;
 917
 918	case FORMAT_FIELD_SEPERATOR:
 919		seq_putc(m, '\n');
 920		return 0;
 921
 922	case FORMAT_PRINTFMT:
 923		seq_printf(m, "\nprint fmt: %s\n",
 924			   call->print_fmt);
 925		return 0;
 926	}
 927
 928	field = list_entry(v, struct ftrace_event_field, link);
 
 929	/*
 930	 * Smartly shows the array type(except dynamic array).
 931	 * Normal:
 932	 *	field:TYPE VAR
 933	 * If TYPE := TYPE[LEN], it is shown:
 934	 *	field:TYPE VAR[LEN]
 935	 */
 936	array_descriptor = strchr(field->type, '[');
 937
 938	if (!strncmp(field->type, "__data_loc", 10))
 939		array_descriptor = NULL;
 940
 941	if (!array_descriptor)
 942		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
 943			   field->type, field->name, field->offset,
 944			   field->size, !!field->is_signed);
 945	else
 946		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
 947			   (int)(array_descriptor - field->type),
 948			   field->type, field->name,
 949			   array_descriptor, field->offset,
 950			   field->size, !!field->is_signed);
 951
 952	return 0;
 953}
 954
 955static void *f_start(struct seq_file *m, loff_t *pos)
 956{
 957	void *p = (void *)FORMAT_HEADER;
 958	loff_t l = 0;
 959
 960	/* ->stop() is called even if ->start() fails */
 961	mutex_lock(&event_mutex);
 962	if (!event_file_data(m->private))
 963		return ERR_PTR(-ENODEV);
 964
 965	while (l < *pos && p)
 966		p = f_next(m, p, &l);
 967
 968	return p;
 969}
 970
 971static void f_stop(struct seq_file *m, void *p)
 972{
 973	mutex_unlock(&event_mutex);
 974}
 975
 976static const struct seq_operations trace_format_seq_ops = {
 977	.start		= f_start,
 978	.next		= f_next,
 979	.stop		= f_stop,
 980	.show		= f_show,
 981};
 982
 983static int trace_format_open(struct inode *inode, struct file *file)
 984{
 
 985	struct seq_file *m;
 986	int ret;
 987
 988	ret = seq_open(file, &trace_format_seq_ops);
 989	if (ret < 0)
 990		return ret;
 991
 992	m = file->private_data;
 993	m->private = file;
 994
 995	return 0;
 996}
 997
 998static ssize_t
 999event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1000{
1001	int id = (long)event_file_data(filp);
1002	char buf[32];
1003	int len;
1004
1005	if (*ppos)
1006		return 0;
1007
1008	if (unlikely(!id))
1009		return -ENODEV;
 
1010
1011	len = sprintf(buf, "%d\n", id);
 
1012
1013	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
 
 
 
1014}
1015
1016static ssize_t
1017event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1018		  loff_t *ppos)
1019{
1020	struct ftrace_event_file *file;
1021	struct trace_seq *s;
1022	int r = -ENODEV;
1023
1024	if (*ppos)
1025		return 0;
1026
1027	s = kmalloc(sizeof(*s), GFP_KERNEL);
1028
1029	if (!s)
1030		return -ENOMEM;
1031
1032	trace_seq_init(s);
1033
1034	mutex_lock(&event_mutex);
1035	file = event_file_data(filp);
1036	if (file)
1037		print_event_filter(file, s);
1038	mutex_unlock(&event_mutex);
1039
1040	if (file)
1041		r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1042
1043	kfree(s);
1044
1045	return r;
1046}
1047
1048static ssize_t
1049event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1050		   loff_t *ppos)
1051{
1052	struct ftrace_event_file *file;
1053	char *buf;
1054	int err = -ENODEV;
1055
1056	if (cnt >= PAGE_SIZE)
1057		return -EINVAL;
1058
1059	buf = (char *)__get_free_page(GFP_TEMPORARY);
1060	if (!buf)
1061		return -ENOMEM;
1062
1063	if (copy_from_user(buf, ubuf, cnt)) {
1064		free_page((unsigned long) buf);
1065		return -EFAULT;
1066	}
1067	buf[cnt] = '\0';
1068
1069	mutex_lock(&event_mutex);
1070	file = event_file_data(filp);
1071	if (file)
1072		err = apply_event_filter(file, buf);
1073	mutex_unlock(&event_mutex);
1074
1075	free_page((unsigned long) buf);
1076	if (err < 0)
1077		return err;
1078
1079	*ppos += cnt;
1080
1081	return cnt;
1082}
1083
1084static LIST_HEAD(event_subsystems);
1085
1086static int subsystem_open(struct inode *inode, struct file *filp)
1087{
1088	struct event_subsystem *system = NULL;
1089	struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1090	struct trace_array *tr;
1091	int ret;
1092
1093	if (tracing_is_disabled())
1094		return -ENODEV;
1095
1096	/* Make sure the system still exists */
1097	mutex_lock(&trace_types_lock);
1098	mutex_lock(&event_mutex);
1099	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1100		list_for_each_entry(dir, &tr->systems, list) {
1101			if (dir == inode->i_private) {
1102				/* Don't open systems with no events */
1103				if (dir->nr_events) {
1104					__get_system_dir(dir);
1105					system = dir->subsystem;
1106				}
1107				goto exit_loop;
1108			}
 
 
1109		}
1110	}
1111 exit_loop:
1112	mutex_unlock(&event_mutex);
1113	mutex_unlock(&trace_types_lock);
1114
1115	if (!system)
1116		return -ENODEV;
1117
1118	/* Some versions of gcc think dir can be uninitialized here */
1119	WARN_ON(!dir);
1120
1121	/* Still need to increment the ref count of the system */
1122	if (trace_array_get(tr) < 0) {
1123		put_system(dir);
1124		return -ENODEV;
1125	}
1126
 
1127	ret = tracing_open_generic(inode, filp);
1128	if (ret < 0) {
1129		trace_array_put(tr);
1130		put_system(dir);
1131	}
1132
1133	return ret;
1134}
1135
1136static int system_tr_open(struct inode *inode, struct file *filp)
1137{
1138	struct ftrace_subsystem_dir *dir;
1139	struct trace_array *tr = inode->i_private;
1140	int ret;
1141
1142	if (tracing_is_disabled())
1143		return -ENODEV;
1144
1145	if (trace_array_get(tr) < 0)
1146		return -ENODEV;
1147
1148	/* Make a temporary dir that has no system but points to tr */
1149	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1150	if (!dir) {
1151		trace_array_put(tr);
1152		return -ENOMEM;
1153	}
1154
1155	dir->tr = tr;
1156
1157	ret = tracing_open_generic(inode, filp);
1158	if (ret < 0) {
1159		trace_array_put(tr);
1160		kfree(dir);
1161		return ret;
1162	}
1163
1164	filp->private_data = dir;
1165
1166	return 0;
1167}
1168
1169static int subsystem_release(struct inode *inode, struct file *file)
1170{
1171	struct ftrace_subsystem_dir *dir = file->private_data;
1172
1173	trace_array_put(dir->tr);
1174
1175	/*
1176	 * If dir->subsystem is NULL, then this is a temporary
1177	 * descriptor that was made for a trace_array to enable
1178	 * all subsystems.
1179	 */
1180	if (dir->subsystem)
1181		put_system(dir);
1182	else
1183		kfree(dir);
1184
1185	return 0;
1186}
1187
1188static ssize_t
1189subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1190		      loff_t *ppos)
1191{
1192	struct ftrace_subsystem_dir *dir = filp->private_data;
1193	struct event_subsystem *system = dir->subsystem;
1194	struct trace_seq *s;
1195	int r;
1196
1197	if (*ppos)
1198		return 0;
1199
1200	s = kmalloc(sizeof(*s), GFP_KERNEL);
1201	if (!s)
1202		return -ENOMEM;
1203
1204	trace_seq_init(s);
1205
1206	print_subsystem_event_filter(system, s);
1207	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1208
1209	kfree(s);
1210
1211	return r;
1212}
1213
1214static ssize_t
1215subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1216		       loff_t *ppos)
1217{
1218	struct ftrace_subsystem_dir *dir = filp->private_data;
1219	char *buf;
1220	int err;
1221
1222	if (cnt >= PAGE_SIZE)
1223		return -EINVAL;
1224
1225	buf = (char *)__get_free_page(GFP_TEMPORARY);
1226	if (!buf)
1227		return -ENOMEM;
1228
1229	if (copy_from_user(buf, ubuf, cnt)) {
1230		free_page((unsigned long) buf);
1231		return -EFAULT;
1232	}
1233	buf[cnt] = '\0';
1234
1235	err = apply_subsystem_event_filter(dir, buf);
1236	free_page((unsigned long) buf);
1237	if (err < 0)
1238		return err;
1239
1240	*ppos += cnt;
1241
1242	return cnt;
1243}
1244
1245static ssize_t
1246show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1247{
1248	int (*func)(struct trace_seq *s) = filp->private_data;
1249	struct trace_seq *s;
1250	int r;
1251
1252	if (*ppos)
1253		return 0;
1254
1255	s = kmalloc(sizeof(*s), GFP_KERNEL);
1256	if (!s)
1257		return -ENOMEM;
1258
1259	trace_seq_init(s);
1260
1261	func(s);
1262	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1263
1264	kfree(s);
1265
1266	return r;
1267}
1268
1269static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1270static int ftrace_event_set_open(struct inode *inode, struct file *file);
1271static int ftrace_event_release(struct inode *inode, struct file *file);
1272
1273static const struct seq_operations show_event_seq_ops = {
1274	.start = t_start,
1275	.next = t_next,
1276	.show = t_show,
1277	.stop = t_stop,
1278};
1279
1280static const struct seq_operations show_set_event_seq_ops = {
1281	.start = s_start,
1282	.next = s_next,
1283	.show = t_show,
1284	.stop = t_stop,
1285};
1286
1287static const struct file_operations ftrace_avail_fops = {
1288	.open = ftrace_event_avail_open,
1289	.read = seq_read,
1290	.llseek = seq_lseek,
1291	.release = seq_release,
1292};
1293
1294static const struct file_operations ftrace_set_event_fops = {
1295	.open = ftrace_event_set_open,
1296	.read = seq_read,
1297	.write = ftrace_event_write,
1298	.llseek = seq_lseek,
1299	.release = ftrace_event_release,
1300};
1301
1302static const struct file_operations ftrace_enable_fops = {
1303	.open = tracing_open_generic,
1304	.read = event_enable_read,
1305	.write = event_enable_write,
1306	.llseek = default_llseek,
1307};
1308
1309static const struct file_operations ftrace_event_format_fops = {
1310	.open = trace_format_open,
1311	.read = seq_read,
1312	.llseek = seq_lseek,
1313	.release = seq_release,
1314};
1315
1316static const struct file_operations ftrace_event_id_fops = {
 
1317	.read = event_id_read,
1318	.llseek = default_llseek,
1319};
1320
1321static const struct file_operations ftrace_event_filter_fops = {
1322	.open = tracing_open_generic,
1323	.read = event_filter_read,
1324	.write = event_filter_write,
1325	.llseek = default_llseek,
1326};
1327
1328static const struct file_operations ftrace_subsystem_filter_fops = {
1329	.open = subsystem_open,
1330	.read = subsystem_filter_read,
1331	.write = subsystem_filter_write,
1332	.llseek = default_llseek,
1333	.release = subsystem_release,
1334};
1335
1336static const struct file_operations ftrace_system_enable_fops = {
1337	.open = subsystem_open,
1338	.read = system_enable_read,
1339	.write = system_enable_write,
1340	.llseek = default_llseek,
1341	.release = subsystem_release,
1342};
1343
1344static const struct file_operations ftrace_tr_enable_fops = {
1345	.open = system_tr_open,
1346	.read = system_enable_read,
1347	.write = system_enable_write,
1348	.llseek = default_llseek,
1349	.release = subsystem_release,
1350};
1351
1352static const struct file_operations ftrace_show_header_fops = {
1353	.open = tracing_open_generic,
1354	.read = show_header,
1355	.llseek = default_llseek,
1356};
1357
1358static int
1359ftrace_event_open(struct inode *inode, struct file *file,
1360		  const struct seq_operations *seq_ops)
1361{
1362	struct seq_file *m;
1363	int ret;
1364
1365	ret = seq_open(file, seq_ops);
1366	if (ret < 0)
1367		return ret;
1368	m = file->private_data;
1369	/* copy tr over to seq ops */
1370	m->private = inode->i_private;
1371
1372	return ret;
1373}
1374
1375static int ftrace_event_release(struct inode *inode, struct file *file)
1376{
1377	struct trace_array *tr = inode->i_private;
1378
1379	trace_array_put(tr);
1380
1381	return seq_release(inode, file);
1382}
1383
1384static int
1385ftrace_event_avail_open(struct inode *inode, struct file *file)
1386{
1387	const struct seq_operations *seq_ops = &show_event_seq_ops;
1388
1389	return ftrace_event_open(inode, file, seq_ops);
1390}
1391
1392static int
1393ftrace_event_set_open(struct inode *inode, struct file *file)
1394{
1395	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1396	struct trace_array *tr = inode->i_private;
1397	int ret;
1398
1399	if (trace_array_get(tr) < 0)
1400		return -ENODEV;
1401
1402	if ((file->f_mode & FMODE_WRITE) &&
1403	    (file->f_flags & O_TRUNC))
1404		ftrace_clear_events(tr);
1405
1406	ret = ftrace_event_open(inode, file, seq_ops);
1407	if (ret < 0)
1408		trace_array_put(tr);
1409	return ret;
1410}
1411
1412static struct event_subsystem *
1413create_new_subsystem(const char *name)
1414{
1415	struct event_subsystem *system;
1416
1417	/* need to create new entry */
1418	system = kmalloc(sizeof(*system), GFP_KERNEL);
1419	if (!system)
1420		return NULL;
1421
1422	system->ref_count = 1;
1423
1424	/* Only allocate if dynamic (kprobes and modules) */
1425	if (!core_kernel_data((unsigned long)name)) {
1426		system->ref_count |= SYSTEM_FL_FREE_NAME;
1427		system->name = kstrdup(name, GFP_KERNEL);
1428		if (!system->name)
1429			goto out_free;
1430	} else
1431		system->name = name;
1432
1433	system->filter = NULL;
1434
1435	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1436	if (!system->filter)
1437		goto out_free;
1438
1439	list_add(&system->list, &event_subsystems);
1440
1441	return system;
1442
1443 out_free:
1444	if (system->ref_count & SYSTEM_FL_FREE_NAME)
1445		kfree(system->name);
1446	kfree(system);
1447	return NULL;
1448}
1449
1450static struct dentry *
1451event_subsystem_dir(struct trace_array *tr, const char *name,
1452		    struct ftrace_event_file *file, struct dentry *parent)
1453{
1454	struct ftrace_subsystem_dir *dir;
1455	struct event_subsystem *system;
1456	struct dentry *entry;
1457
1458	/* First see if we did not already create this dir */
1459	list_for_each_entry(dir, &tr->systems, list) {
1460		system = dir->subsystem;
1461		if (strcmp(system->name, name) == 0) {
1462			dir->nr_events++;
1463			file->system = dir;
1464			return dir->entry;
1465		}
1466	}
1467
1468	/* Now see if the system itself exists. */
1469	list_for_each_entry(system, &event_subsystems, list) {
1470		if (strcmp(system->name, name) == 0)
1471			break;
 
 
1472	}
1473	/* Reset system variable when not found */
1474	if (&system->list == &event_subsystems)
1475		system = NULL;
1476
1477	dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1478	if (!dir)
1479		goto out_fail;
1480
1481	if (!system) {
1482		system = create_new_subsystem(name);
1483		if (!system)
1484			goto out_free;
1485	} else
1486		__get_system(system);
1487
1488	dir->entry = debugfs_create_dir(name, parent);
1489	if (!dir->entry) {
1490		pr_warning("Failed to create system directory %s\n", name);
1491		__put_system(system);
1492		goto out_free;
1493	}
1494
1495	dir->tr = tr;
1496	dir->ref_count = 1;
1497	dir->nr_events = 1;
1498	dir->subsystem = system;
1499	file->system = dir;
 
 
 
 
 
 
 
 
1500
1501	entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1502				    &ftrace_subsystem_filter_fops);
1503	if (!entry) {
1504		kfree(system->filter);
1505		system->filter = NULL;
1506		pr_warning("Could not create debugfs '%s/filter' entry\n", name);
 
1507	}
1508
1509	trace_create_file("enable", 0644, dir->entry, dir,
1510			  &ftrace_system_enable_fops);
1511
1512	list_add(&dir->list, &tr->systems);
1513
1514	return dir->entry;
1515
1516 out_free:
1517	kfree(dir);
1518 out_fail:
1519	/* Only print this message if failed on memory allocation */
1520	if (!dir || !system)
1521		pr_warning("No memory to create event subsystem %s\n",
1522			   name);
1523	return NULL;
1524}
1525
1526static int
1527event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
 
 
 
 
1528{
1529	struct ftrace_event_call *call = file->event_call;
1530	struct trace_array *tr = file->tr;
1531	struct list_head *head;
1532	struct dentry *d_events;
1533	const char *name;
1534	int ret;
1535
1536	/*
1537	 * If the trace point header did not define TRACE_SYSTEM
1538	 * then the system would be called "TRACE_SYSTEM".
1539	 */
1540	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1541		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1542		if (!d_events)
1543			return -ENOMEM;
1544	} else
1545		d_events = parent;
1546
1547	name = ftrace_event_name(call);
1548	file->dir = debugfs_create_dir(name, d_events);
1549	if (!file->dir) {
1550		pr_warning("Could not create debugfs '%s' directory\n",
1551			   name);
1552		return -1;
1553	}
1554
1555	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1556		trace_create_file("enable", 0644, file->dir, file,
1557				  &ftrace_enable_fops);
1558
1559#ifdef CONFIG_PERF_EVENTS
1560	if (call->event.type && call->class->reg)
1561		trace_create_file("id", 0444, file->dir,
1562				  (void *)(long)call->event.type,
1563				  &ftrace_event_id_fops);
1564#endif
1565
1566	/*
1567	 * Other events may have the same class. Only update
1568	 * the fields if they are not already defined.
1569	 */
1570	head = trace_get_fields(call);
1571	if (list_empty(head)) {
1572		ret = call->class->define_fields(call);
1573		if (ret < 0) {
1574			pr_warning("Could not initialize trace point"
1575				   " events/%s\n", name);
1576			return -1;
1577		}
1578	}
1579	trace_create_file("filter", 0644, file->dir, file,
1580			  &ftrace_event_filter_fops);
1581
1582	trace_create_file("trigger", 0644, file->dir, file,
1583			  &event_trigger_fops);
1584
1585	trace_create_file("format", 0444, file->dir, call,
1586			  &ftrace_event_format_fops);
1587
1588	return 0;
1589}
1590
1591static void remove_event_from_tracers(struct ftrace_event_call *call)
1592{
1593	struct ftrace_event_file *file;
1594	struct trace_array *tr;
1595
1596	do_for_each_event_file_safe(tr, file) {
1597		if (file->event_call != call)
1598			continue;
1599
1600		remove_event_file_dir(file);
1601		/*
1602		 * The do_for_each_event_file_safe() is
1603		 * a double loop. After finding the call for this
1604		 * trace_array, we use break to jump to the next
1605		 * trace_array.
1606		 */
1607		break;
1608	} while_for_each_event_file();
1609}
1610
1611static void event_remove(struct ftrace_event_call *call)
1612{
1613	struct trace_array *tr;
1614	struct ftrace_event_file *file;
1615
1616	do_for_each_event_file(tr, file) {
1617		if (file->event_call != call)
1618			continue;
1619		ftrace_event_enable_disable(file, 0);
1620		destroy_preds(file);
1621		/*
1622		 * The do_for_each_event_file() is
1623		 * a double loop. After finding the call for this
1624		 * trace_array, we use break to jump to the next
1625		 * trace_array.
1626		 */
1627		break;
1628	} while_for_each_event_file();
1629
1630	if (call->event.funcs)
1631		__unregister_ftrace_event(&call->event);
1632	remove_event_from_tracers(call);
1633	list_del(&call->list);
1634}
1635
1636static int event_init(struct ftrace_event_call *call)
1637{
1638	int ret = 0;
1639	const char *name;
1640
1641	name = ftrace_event_name(call);
1642	if (WARN_ON(!name))
1643		return -EINVAL;
1644
1645	if (call->class->raw_init) {
1646		ret = call->class->raw_init(call);
1647		if (ret < 0 && ret != -ENOSYS)
1648			pr_warn("Could not initialize trace events/%s\n",
1649				name);
 
 
 
1650	}
1651
1652	return ret;
1653}
1654
1655static int
1656__register_event(struct ftrace_event_call *call, struct module *mod)
1657{
1658	int ret;
1659
1660	ret = event_init(call);
1661	if (ret < 0)
1662		return ret;
1663
1664	list_add(&call->list, &ftrace_events);
1665	call->mod = mod;
1666
1667	return 0;
1668}
1669
1670static struct ftrace_event_file *
1671trace_create_new_event(struct ftrace_event_call *call,
1672		       struct trace_array *tr)
1673{
1674	struct ftrace_event_file *file;
1675
1676	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1677	if (!file)
1678		return NULL;
1679
1680	file->event_call = call;
1681	file->tr = tr;
1682	atomic_set(&file->sm_ref, 0);
1683	atomic_set(&file->tm_ref, 0);
1684	INIT_LIST_HEAD(&file->triggers);
1685	list_add(&file->list, &tr->events);
1686
1687	return file;
1688}
1689
1690/* Add an event to a trace directory */
1691static int
1692__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1693{
1694	struct ftrace_event_file *file;
1695
1696	file = trace_create_new_event(call, tr);
1697	if (!file)
1698		return -ENOMEM;
1699
1700	return event_create_dir(tr->event_dir, file);
 
 
 
 
 
 
 
 
 
1701}
1702
1703/*
1704 * Just create a decriptor for early init. A descriptor is required
1705 * for enabling events at boot. We want to enable events before
1706 * the filesystem is initialized.
1707 */
1708static __init int
1709__trace_early_add_new_event(struct ftrace_event_call *call,
1710			    struct trace_array *tr)
1711{
1712	struct ftrace_event_file *file;
1713
1714	file = trace_create_new_event(call, tr);
1715	if (!file)
1716		return -ENOMEM;
1717
1718	return 0;
 
1719}
1720
1721struct ftrace_module_file_ops;
1722static void __add_event_to_tracers(struct ftrace_event_call *call);
1723
1724/* Add an additional event_call dynamically */
1725int trace_add_event_call(struct ftrace_event_call *call)
1726{
1727	int ret;
1728	mutex_lock(&trace_types_lock);
1729	mutex_lock(&event_mutex);
1730
1731	ret = __register_event(call, NULL);
1732	if (ret >= 0)
1733		__add_event_to_tracers(call);
1734
1735	mutex_unlock(&event_mutex);
1736	mutex_unlock(&trace_types_lock);
1737	return ret;
1738}
1739
 
 
 
 
 
 
 
 
 
1740/*
1741 * Must be called under locking of trace_types_lock, event_mutex and
1742 * trace_event_sem.
1743 */
1744static void __trace_remove_event_call(struct ftrace_event_call *call)
1745{
1746	event_remove(call);
1747	trace_destroy_fields(call);
1748	destroy_call_preds(call);
1749}
 
 
1750
1751static int probe_remove_event_call(struct ftrace_event_call *call)
 
1752{
1753	struct trace_array *tr;
1754	struct ftrace_event_file *file;
1755
1756#ifdef CONFIG_PERF_EVENTS
1757	if (call->perf_refcount)
1758		return -EBUSY;
1759#endif
1760	do_for_each_event_file(tr, file) {
1761		if (file->event_call != call)
1762			continue;
1763		/*
1764		 * We can't rely on ftrace_event_enable_disable(enable => 0)
1765		 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1766		 * TRACE_REG_UNREGISTER.
1767		 */
1768		if (file->flags & FTRACE_EVENT_FL_ENABLED)
1769			return -EBUSY;
1770		/*
1771		 * The do_for_each_event_file_safe() is
1772		 * a double loop. After finding the call for this
1773		 * trace_array, we use break to jump to the next
1774		 * trace_array.
1775		 */
1776		break;
1777	} while_for_each_event_file();
1778
1779	__trace_remove_event_call(call);
 
 
1780
1781	return 0;
1782}
1783
1784/* Remove an event_call */
1785int trace_remove_event_call(struct ftrace_event_call *call)
1786{
1787	int ret;
1788
1789	mutex_lock(&trace_types_lock);
1790	mutex_lock(&event_mutex);
1791	down_write(&trace_event_sem);
1792	ret = probe_remove_event_call(call);
1793	up_write(&trace_event_sem);
1794	mutex_unlock(&event_mutex);
1795	mutex_unlock(&trace_types_lock);
1796
1797	return ret;
1798}
1799
1800#define for_each_event(event, start, end)			\
1801	for (event = start;					\
1802	     (unsigned long)event < (unsigned long)end;		\
1803	     event++)
1804
1805#ifdef CONFIG_MODULES
 
 
 
1806
1807static void trace_module_add_events(struct module *mod)
1808{
 
1809	struct ftrace_event_call **call, **start, **end;
1810
1811	if (!mod->num_trace_events)
1812		return;
1813
1814	/* Don't add infrastructure for mods without tracepoints */
1815	if (trace_module_has_bad_taint(mod)) {
1816		pr_err("%s: module has bad taint, not creating trace events\n",
1817		       mod->name);
1818		return;
1819	}
1820
1821	start = mod->trace_events;
1822	end = mod->trace_events + mod->num_trace_events;
 
1823
1824	for_each_event(call, start, end) {
1825		__register_event(*call, mod);
1826		__add_event_to_tracers(*call);
 
1827	}
1828}
1829
1830static void trace_module_remove_events(struct module *mod)
1831{
 
1832	struct ftrace_event_call *call, *p;
1833	bool clear_trace = false;
1834
1835	down_write(&trace_event_sem);
1836	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1837		if (call->mod == mod) {
1838			if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1839				clear_trace = true;
1840			__trace_remove_event_call(call);
1841		}
1842	}
1843	up_write(&trace_event_sem);
 
 
 
 
 
 
 
 
 
1844
1845	/*
1846	 * It is safest to reset the ring buffer if the module being unloaded
1847	 * registered any events that were used. The only worry is if
1848	 * a new module gets loaded, and takes on the same id as the events
1849	 * of this module. When printing out the buffer, traced events left
1850	 * over from this module may be passed to the new module events and
1851	 * unexpected results may occur.
1852	 */
1853	if (clear_trace)
1854		tracing_reset_all_online_cpus();
 
1855}
1856
1857static int trace_module_notify(struct notifier_block *self,
1858			       unsigned long val, void *data)
1859{
1860	struct module *mod = data;
1861
1862	mutex_lock(&trace_types_lock);
1863	mutex_lock(&event_mutex);
1864	switch (val) {
1865	case MODULE_STATE_COMING:
1866		trace_module_add_events(mod);
1867		break;
1868	case MODULE_STATE_GOING:
1869		trace_module_remove_events(mod);
1870		break;
1871	}
1872	mutex_unlock(&event_mutex);
1873	mutex_unlock(&trace_types_lock);
1874
1875	return 0;
1876}
1877
1878static struct notifier_block trace_module_nb = {
1879	.notifier_call = trace_module_notify,
1880	.priority = 0,
1881};
1882#endif /* CONFIG_MODULES */
1883
1884/* Create a new event directory structure for a trace directory. */
1885static void
1886__trace_add_event_dirs(struct trace_array *tr)
1887{
1888	struct ftrace_event_call *call;
1889	int ret;
1890
1891	list_for_each_entry(call, &ftrace_events, list) {
1892		ret = __trace_add_new_event(call, tr);
1893		if (ret < 0)
1894			pr_warning("Could not create directory for event %s\n",
1895				   ftrace_event_name(call));
1896	}
1897}
1898
1899struct ftrace_event_file *
1900find_event_file(struct trace_array *tr, const char *system,  const char *event)
1901{
1902	struct ftrace_event_file *file;
1903	struct ftrace_event_call *call;
1904	const char *name;
1905
1906	list_for_each_entry(file, &tr->events, list) {
1907
1908		call = file->event_call;
1909		name = ftrace_event_name(call);
1910
1911		if (!name || !call->class || !call->class->reg)
1912			continue;
1913
1914		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1915			continue;
1916
1917		if (strcmp(event, name) == 0 &&
1918		    strcmp(system, call->class->system) == 0)
1919			return file;
1920	}
1921	return NULL;
1922}
1923
1924#ifdef CONFIG_DYNAMIC_FTRACE
1925
1926/* Avoid typos */
1927#define ENABLE_EVENT_STR	"enable_event"
1928#define DISABLE_EVENT_STR	"disable_event"
1929
1930struct event_probe_data {
1931	struct ftrace_event_file	*file;
1932	unsigned long			count;
1933	int				ref;
1934	bool				enable;
1935};
1936
1937static void
1938event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1939{
1940	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1941	struct event_probe_data *data = *pdata;
1942
1943	if (!data)
1944		return;
1945
1946	if (data->enable)
1947		clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1948	else
1949		set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1950}
1951
1952static void
1953event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1954{
1955	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1956	struct event_probe_data *data = *pdata;
1957
1958	if (!data)
1959		return;
1960
1961	if (!data->count)
1962		return;
1963
1964	/* Skip if the event is in a state we want to switch to */
1965	if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1966		return;
1967
1968	if (data->count != -1)
1969		(data->count)--;
1970
1971	event_enable_probe(ip, parent_ip, _data);
1972}
1973
1974static int
1975event_enable_print(struct seq_file *m, unsigned long ip,
1976		      struct ftrace_probe_ops *ops, void *_data)
1977{
1978	struct event_probe_data *data = _data;
1979
1980	seq_printf(m, "%ps:", (void *)ip);
1981
1982	seq_printf(m, "%s:%s:%s",
1983		   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1984		   data->file->event_call->class->system,
1985		   ftrace_event_name(data->file->event_call));
1986
1987	if (data->count == -1)
1988		seq_printf(m, ":unlimited\n");
1989	else
1990		seq_printf(m, ":count=%ld\n", data->count);
1991
1992	return 0;
1993}
1994
1995static int
1996event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1997		  void **_data)
1998{
1999	struct event_probe_data **pdata = (struct event_probe_data **)_data;
2000	struct event_probe_data *data = *pdata;
2001
2002	data->ref++;
2003	return 0;
2004}
 
2005
2006static void
2007event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2008		  void **_data)
2009{
2010	struct event_probe_data **pdata = (struct event_probe_data **)_data;
2011	struct event_probe_data *data = *pdata;
2012
2013	if (WARN_ON_ONCE(data->ref <= 0))
2014		return;
2015
2016	data->ref--;
2017	if (!data->ref) {
2018		/* Remove the SOFT_MODE flag */
2019		__ftrace_event_enable_disable(data->file, 0, 1);
2020		module_put(data->file->event_call->mod);
2021		kfree(data);
2022	}
2023	*pdata = NULL;
2024}
2025
2026static struct ftrace_probe_ops event_enable_probe_ops = {
2027	.func			= event_enable_probe,
2028	.print			= event_enable_print,
2029	.init			= event_enable_init,
2030	.free			= event_enable_free,
2031};
2032
2033static struct ftrace_probe_ops event_enable_count_probe_ops = {
2034	.func			= event_enable_count_probe,
2035	.print			= event_enable_print,
2036	.init			= event_enable_init,
2037	.free			= event_enable_free,
2038};
2039
2040static struct ftrace_probe_ops event_disable_probe_ops = {
2041	.func			= event_enable_probe,
2042	.print			= event_enable_print,
2043	.init			= event_enable_init,
2044	.free			= event_enable_free,
2045};
2046
2047static struct ftrace_probe_ops event_disable_count_probe_ops = {
2048	.func			= event_enable_count_probe,
2049	.print			= event_enable_print,
2050	.init			= event_enable_init,
2051	.free			= event_enable_free,
2052};
2053
2054static int
2055event_enable_func(struct ftrace_hash *hash,
2056		  char *glob, char *cmd, char *param, int enabled)
2057{
2058	struct trace_array *tr = top_trace_array();
2059	struct ftrace_event_file *file;
2060	struct ftrace_probe_ops *ops;
2061	struct event_probe_data *data;
2062	const char *system;
2063	const char *event;
2064	char *number;
2065	bool enable;
2066	int ret;
2067
2068	/* hash funcs only work with set_ftrace_filter */
2069	if (!enabled || !param)
2070		return -EINVAL;
2071
2072	system = strsep(&param, ":");
2073	if (!param)
2074		return -EINVAL;
2075
2076	event = strsep(&param, ":");
2077
2078	mutex_lock(&event_mutex);
2079
2080	ret = -EINVAL;
2081	file = find_event_file(tr, system, event);
2082	if (!file)
2083		goto out;
2084
2085	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2086
2087	if (enable)
2088		ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2089	else
2090		ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2091
2092	if (glob[0] == '!') {
2093		unregister_ftrace_function_probe_func(glob+1, ops);
2094		ret = 0;
2095		goto out;
2096	}
2097
2098	ret = -ENOMEM;
2099	data = kzalloc(sizeof(*data), GFP_KERNEL);
2100	if (!data)
2101		goto out;
2102
2103	data->enable = enable;
2104	data->count = -1;
2105	data->file = file;
2106
2107	if (!param)
2108		goto out_reg;
2109
2110	number = strsep(&param, ":");
2111
2112	ret = -EINVAL;
2113	if (!strlen(number))
2114		goto out_free;
2115
2116	/*
2117	 * We use the callback data field (which is a pointer)
2118	 * as our counter.
2119	 */
2120	ret = kstrtoul(number, 0, &data->count);
2121	if (ret)
2122		goto out_free;
2123
2124 out_reg:
2125	/* Don't let event modules unload while probe registered */
2126	ret = try_module_get(file->event_call->mod);
2127	if (!ret) {
2128		ret = -EBUSY;
2129		goto out_free;
2130	}
2131
2132	ret = __ftrace_event_enable_disable(file, 1, 1);
2133	if (ret < 0)
2134		goto out_put;
2135	ret = register_ftrace_function_probe(glob, ops, data);
2136	/*
2137	 * The above returns on success the # of functions enabled,
2138	 * but if it didn't find any functions it returns zero.
2139	 * Consider no functions a failure too.
2140	 */
2141	if (!ret) {
2142		ret = -ENOENT;
2143		goto out_disable;
2144	} else if (ret < 0)
2145		goto out_disable;
2146	/* Just return zero, not the number of enabled functions */
2147	ret = 0;
2148 out:
2149	mutex_unlock(&event_mutex);
2150	return ret;
2151
2152 out_disable:
2153	__ftrace_event_enable_disable(file, 0, 1);
2154 out_put:
2155	module_put(file->event_call->mod);
2156 out_free:
2157	kfree(data);
2158	goto out;
2159}
2160
2161static struct ftrace_func_command event_enable_cmd = {
2162	.name			= ENABLE_EVENT_STR,
2163	.func			= event_enable_func,
2164};
2165
2166static struct ftrace_func_command event_disable_cmd = {
2167	.name			= DISABLE_EVENT_STR,
2168	.func			= event_enable_func,
2169};
2170
2171static __init int register_event_cmds(void)
2172{
2173	int ret;
2174
2175	ret = register_ftrace_command(&event_enable_cmd);
2176	if (WARN_ON(ret < 0))
2177		return ret;
2178	ret = register_ftrace_command(&event_disable_cmd);
2179	if (WARN_ON(ret < 0))
2180		unregister_ftrace_command(&event_enable_cmd);
2181	return ret;
2182}
2183#else
2184static inline int register_event_cmds(void) { return 0; }
2185#endif /* CONFIG_DYNAMIC_FTRACE */
2186
2187/*
2188 * The top level array has already had its ftrace_event_file
2189 * descriptors created in order to allow for early events to
2190 * be recorded. This function is called after the debugfs has been
2191 * initialized, and we now have to create the files associated
2192 * to the events.
2193 */
2194static __init void
2195__trace_early_add_event_dirs(struct trace_array *tr)
2196{
2197	struct ftrace_event_file *file;
2198	int ret;
2199
2200
2201	list_for_each_entry(file, &tr->events, list) {
2202		ret = event_create_dir(tr->event_dir, file);
2203		if (ret < 0)
2204			pr_warning("Could not create directory for event %s\n",
2205				   ftrace_event_name(file->event_call));
2206	}
2207}
2208
2209/*
2210 * For early boot up, the top trace array requires to have
2211 * a list of events that can be enabled. This must be done before
2212 * the filesystem is set up in order to allow events to be traced
2213 * early.
2214 */
2215static __init void
2216__trace_early_add_events(struct trace_array *tr)
2217{
2218	struct ftrace_event_call *call;
2219	int ret;
2220
2221	list_for_each_entry(call, &ftrace_events, list) {
2222		/* Early boot up should not have any modules loaded */
2223		if (WARN_ON_ONCE(call->mod))
2224			continue;
2225
2226		ret = __trace_early_add_new_event(call, tr);
2227		if (ret < 0)
2228			pr_warning("Could not create early event %s\n",
2229				   ftrace_event_name(call));
2230	}
2231}
2232
2233/* Remove the event directory structure for a trace directory. */
2234static void
2235__trace_remove_event_dirs(struct trace_array *tr)
2236{
2237	struct ftrace_event_file *file, *next;
2238
2239	list_for_each_entry_safe(file, next, &tr->events, list)
2240		remove_event_file_dir(file);
2241}
2242
2243static void __add_event_to_tracers(struct ftrace_event_call *call)
2244{
2245	struct trace_array *tr;
2246
2247	list_for_each_entry(tr, &ftrace_trace_arrays, list)
2248		__trace_add_new_event(call, tr);
2249}
2250
2251extern struct ftrace_event_call *__start_ftrace_events[];
2252extern struct ftrace_event_call *__stop_ftrace_events[];
2253
2254static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2255
2256static __init int setup_trace_event(char *str)
2257{
2258	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2259	ring_buffer_expanded = true;
2260	tracing_selftest_disabled = true;
2261
2262	return 1;
2263}
2264__setup("trace_event=", setup_trace_event);
2265
2266/* Expects to have event_mutex held when called */
2267static int
2268create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2269{
2270	struct dentry *d_events;
 
2271	struct dentry *entry;
 
 
 
 
2272
2273	entry = debugfs_create_file("set_event", 0644, parent,
2274				    tr, &ftrace_set_event_fops);
2275	if (!entry) {
2276		pr_warning("Could not create debugfs 'set_event' entry\n");
2277		return -ENOMEM;
2278	}
2279
2280	d_events = debugfs_create_dir("events", parent);
2281	if (!d_events) {
2282		pr_warning("Could not create debugfs 'events' directory\n");
2283		return -ENOMEM;
2284	}
 
 
 
 
 
 
 
 
 
 
 
 
2285
2286	/* ring buffer internal formats */
2287	trace_create_file("header_page", 0444, d_events,
2288			  ring_buffer_print_page_header,
2289			  &ftrace_show_header_fops);
2290
2291	trace_create_file("header_event", 0444, d_events,
2292			  ring_buffer_print_entry_header,
2293			  &ftrace_show_header_fops);
2294
2295	trace_create_file("enable", 0644, d_events,
2296			  tr, &ftrace_tr_enable_fops);
2297
2298	tr->event_dir = d_events;
2299
2300	return 0;
2301}
2302
2303/**
2304 * event_trace_add_tracer - add a instance of a trace_array to events
2305 * @parent: The parent dentry to place the files/directories for events in
2306 * @tr: The trace array associated with these events
2307 *
2308 * When a new instance is created, it needs to set up its events
2309 * directory, as well as other files associated with events. It also
2310 * creates the event hierachry in the @parent/events directory.
2311 *
2312 * Returns 0 on success.
2313 */
2314int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2315{
2316	int ret;
2317
2318	mutex_lock(&event_mutex);
2319
2320	ret = create_event_toplevel_files(parent, tr);
2321	if (ret)
2322		goto out_unlock;
2323
2324	down_write(&trace_event_sem);
2325	__trace_add_event_dirs(tr);
2326	up_write(&trace_event_sem);
2327
2328 out_unlock:
2329	mutex_unlock(&event_mutex);
2330
2331	return ret;
2332}
2333
2334/*
2335 * The top trace array already had its file descriptors created.
2336 * Now the files themselves need to be created.
2337 */
2338static __init int
2339early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2340{
2341	int ret;
2342
2343	mutex_lock(&event_mutex);
2344
2345	ret = create_event_toplevel_files(parent, tr);
2346	if (ret)
2347		goto out_unlock;
2348
2349	down_write(&trace_event_sem);
2350	__trace_early_add_event_dirs(tr);
2351	up_write(&trace_event_sem);
2352
2353 out_unlock:
2354	mutex_unlock(&event_mutex);
2355
2356	return ret;
2357}
2358
2359int event_trace_del_tracer(struct trace_array *tr)
2360{
2361	mutex_lock(&event_mutex);
2362
2363	/* Disable any event triggers and associated soft-disabled events */
2364	clear_event_triggers(tr);
2365
2366	/* Disable any running events */
2367	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2368
2369	/* Access to events are within rcu_read_lock_sched() */
2370	synchronize_sched();
2371
2372	down_write(&trace_event_sem);
2373	__trace_remove_event_dirs(tr);
2374	debugfs_remove_recursive(tr->event_dir);
2375	up_write(&trace_event_sem);
2376
2377	tr->event_dir = NULL;
2378
2379	mutex_unlock(&event_mutex);
2380
2381	return 0;
2382}
2383
2384static __init int event_trace_memsetup(void)
2385{
2386	field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2387	file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2388	return 0;
2389}
2390
2391static __init int event_trace_enable(void)
2392{
2393	struct trace_array *tr = top_trace_array();
2394	struct ftrace_event_call **iter, *call;
2395	char *buf = bootup_event_buf;
2396	char *token;
2397	int ret;
2398
2399	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2400
2401		call = *iter;
2402		ret = event_init(call);
2403		if (!ret)
2404			list_add(&call->list, &ftrace_events);
 
2405	}
2406
2407	/*
2408	 * We need the top trace array to have a working set of trace
2409	 * points at early init, before the debug files and directories
2410	 * are created. Create the file entries now, and attach them
2411	 * to the actual file dentries later.
2412	 */
2413	__trace_early_add_events(tr);
2414
2415	while (true) {
2416		token = strsep(&buf, ",");
2417
2418		if (!token)
2419			break;
2420		if (!*token)
2421			continue;
2422
2423		ret = ftrace_set_clr_event(tr, token, 1);
2424		if (ret)
2425			pr_warn("Failed to enable trace event: %s\n", token);
2426	}
2427
2428	trace_printk_start_comm();
2429
2430	register_event_cmds();
2431
2432	register_trigger_cmds();
2433
2434	return 0;
2435}
2436
2437static __init int event_trace_init(void)
2438{
2439	struct trace_array *tr;
2440	struct dentry *d_tracer;
2441	struct dentry *entry;
2442	int ret;
2443
2444	tr = top_trace_array();
2445
2446	d_tracer = tracing_init_dentry();
2447	if (!d_tracer)
2448		return 0;
2449
2450	entry = debugfs_create_file("available_events", 0444, d_tracer,
2451				    tr, &ftrace_avail_fops);
2452	if (!entry)
2453		pr_warning("Could not create debugfs "
2454			   "'available_events' entry\n");
2455
2456	if (trace_define_common_fields())
2457		pr_warning("tracing: Failed to allocate common fields");
2458
2459	ret = early_event_add_tracer(d_tracer, tr);
2460	if (ret)
2461		return ret;
2462
2463#ifdef CONFIG_MODULES
2464	ret = register_module_notifier(&trace_module_nb);
2465	if (ret)
2466		pr_warning("Failed to register trace events module notifier\n");
2467#endif
2468	return 0;
2469}
2470early_initcall(event_trace_memsetup);
2471core_initcall(event_trace_enable);
2472fs_initcall(event_trace_init);
2473
2474#ifdef CONFIG_FTRACE_STARTUP_TEST
2475
2476static DEFINE_SPINLOCK(test_spinlock);
2477static DEFINE_SPINLOCK(test_spinlock_irq);
2478static DEFINE_MUTEX(test_mutex);
2479
2480static __init void test_work(struct work_struct *dummy)
2481{
2482	spin_lock(&test_spinlock);
2483	spin_lock_irq(&test_spinlock_irq);
2484	udelay(1);
2485	spin_unlock_irq(&test_spinlock_irq);
2486	spin_unlock(&test_spinlock);
2487
2488	mutex_lock(&test_mutex);
2489	msleep(1);
2490	mutex_unlock(&test_mutex);
2491}
2492
2493static __init int event_test_thread(void *unused)
2494{
2495	void *test_malloc;
2496
2497	test_malloc = kmalloc(1234, GFP_KERNEL);
2498	if (!test_malloc)
2499		pr_info("failed to kmalloc\n");
2500
2501	schedule_on_each_cpu(test_work);
2502
2503	kfree(test_malloc);
2504
2505	set_current_state(TASK_INTERRUPTIBLE);
2506	while (!kthread_should_stop())
2507		schedule();
2508
2509	return 0;
2510}
2511
2512/*
2513 * Do various things that may trigger events.
2514 */
2515static __init void event_test_stuff(void)
2516{
2517	struct task_struct *test_thread;
2518
2519	test_thread = kthread_run(event_test_thread, NULL, "test-events");
2520	msleep(1);
2521	kthread_stop(test_thread);
2522}
2523
2524/*
2525 * For every trace event defined, we will test each trace point separately,
2526 * and then by groups, and finally all trace points.
2527 */
2528static __init void event_trace_self_tests(void)
2529{
2530	struct ftrace_subsystem_dir *dir;
2531	struct ftrace_event_file *file;
2532	struct ftrace_event_call *call;
2533	struct event_subsystem *system;
2534	struct trace_array *tr;
2535	int ret;
2536
2537	tr = top_trace_array();
2538
2539	pr_info("Running tests on trace events:\n");
2540
2541	list_for_each_entry(file, &tr->events, list) {
2542
2543		call = file->event_call;
2544
2545		/* Only test those that have a probe */
2546		if (!call->class || !call->class->probe)
2547			continue;
2548
2549/*
2550 * Testing syscall events here is pretty useless, but
2551 * we still do it if configured. But this is time consuming.
2552 * What we really need is a user thread to perform the
2553 * syscalls as we test.
2554 */
2555#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2556		if (call->class->system &&
2557		    strcmp(call->class->system, "syscalls") == 0)
2558			continue;
2559#endif
2560
2561		pr_info("Testing event %s: ", ftrace_event_name(call));
2562
2563		/*
2564		 * If an event is already enabled, someone is using
2565		 * it and the self test should not be on.
2566		 */
2567		if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2568			pr_warning("Enabled event during self test!\n");
2569			WARN_ON_ONCE(1);
2570			continue;
2571		}
2572
2573		ftrace_event_enable_disable(file, 1);
2574		event_test_stuff();
2575		ftrace_event_enable_disable(file, 0);
2576
2577		pr_cont("OK\n");
2578	}
2579
2580	/* Now test at the sub system level */
2581
2582	pr_info("Running tests on trace event systems:\n");
2583
2584	list_for_each_entry(dir, &tr->systems, list) {
2585
2586		system = dir->subsystem;
2587
2588		/* the ftrace system is special, skip it */
2589		if (strcmp(system->name, "ftrace") == 0)
2590			continue;
2591
2592		pr_info("Testing event system %s: ", system->name);
2593
2594		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2595		if (WARN_ON_ONCE(ret)) {
2596			pr_warning("error enabling system %s\n",
2597				   system->name);
2598			continue;
2599		}
2600
2601		event_test_stuff();
2602
2603		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2604		if (WARN_ON_ONCE(ret)) {
2605			pr_warning("error disabling system %s\n",
2606				   system->name);
2607			continue;
2608		}
2609
2610		pr_cont("OK\n");
2611	}
2612
2613	/* Test with all events enabled */
2614
2615	pr_info("Running tests on all trace events:\n");
2616	pr_info("Testing all events: ");
2617
2618	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2619	if (WARN_ON_ONCE(ret)) {
2620		pr_warning("error enabling all events\n");
2621		return;
2622	}
2623
2624	event_test_stuff();
2625
2626	/* reset sysname */
2627	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2628	if (WARN_ON_ONCE(ret)) {
2629		pr_warning("error disabling all events\n");
2630		return;
2631	}
2632
2633	pr_cont("OK\n");
2634}
2635
2636#ifdef CONFIG_FUNCTION_TRACER
2637
2638static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2639
2640static void
2641function_test_events_call(unsigned long ip, unsigned long parent_ip,
2642			  struct ftrace_ops *op, struct pt_regs *pt_regs)
2643{
2644	struct ring_buffer_event *event;
2645	struct ring_buffer *buffer;
2646	struct ftrace_entry *entry;
2647	unsigned long flags;
2648	long disabled;
2649	int cpu;
2650	int pc;
2651
2652	pc = preempt_count();
2653	preempt_disable_notrace();
2654	cpu = raw_smp_processor_id();
2655	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2656
2657	if (disabled != 1)
2658		goto out;
2659
2660	local_save_flags(flags);
2661
2662	event = trace_current_buffer_lock_reserve(&buffer,
2663						  TRACE_FN, sizeof(*entry),
2664						  flags, pc);
2665	if (!event)
2666		goto out;
2667	entry	= ring_buffer_event_data(event);
2668	entry->ip			= ip;
2669	entry->parent_ip		= parent_ip;
2670
2671	trace_buffer_unlock_commit(buffer, event, flags, pc);
2672
2673 out:
2674	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2675	preempt_enable_notrace();
2676}
2677
2678static struct ftrace_ops trace_ops __initdata  =
2679{
2680	.func = function_test_events_call,
2681	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
2682};
2683
2684static __init void event_trace_self_test_with_function(void)
2685{
2686	int ret;
2687	ret = register_ftrace_function(&trace_ops);
2688	if (WARN_ON(ret < 0)) {
2689		pr_info("Failed to enable function tracer for event tests\n");
2690		return;
2691	}
2692	pr_info("Running tests again, along with the function tracer\n");
2693	event_trace_self_tests();
2694	unregister_ftrace_function(&trace_ops);
2695}
2696#else
2697static __init void event_trace_self_test_with_function(void)
2698{
2699}
2700#endif
2701
2702static __init int event_trace_self_tests_init(void)
2703{
2704	if (!tracing_selftest_disabled) {
2705		event_trace_self_tests();
2706		event_trace_self_test_with_function();
2707	}
2708
2709	return 0;
2710}
2711
2712late_initcall(event_trace_self_tests_init);
2713
2714#endif
v3.1
   1/*
   2 * event tracer
   3 *
   4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
   5 *
   6 *  - Added format output of fields of the trace point.
   7 *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
   8 *
   9 */
  10
  11#include <linux/workqueue.h>
  12#include <linux/spinlock.h>
  13#include <linux/kthread.h>
  14#include <linux/debugfs.h>
  15#include <linux/uaccess.h>
  16#include <linux/module.h>
  17#include <linux/ctype.h>
  18#include <linux/slab.h>
  19#include <linux/delay.h>
  20
  21#include <asm/setup.h>
  22
  23#include "trace_output.h"
  24
  25#undef TRACE_SYSTEM
  26#define TRACE_SYSTEM "TRACE_SYSTEM"
  27
  28DEFINE_MUTEX(event_mutex);
  29
  30DEFINE_MUTEX(event_storage_mutex);
  31EXPORT_SYMBOL_GPL(event_storage_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  32
  33char event_storage[EVENT_STORAGE_SIZE];
  34EXPORT_SYMBOL_GPL(event_storage);
 
 
  35
  36LIST_HEAD(ftrace_events);
  37LIST_HEAD(ftrace_common_fields);
 
 
 
 
 
 
 
 
 
 
  38
  39struct list_head *
  40trace_get_fields(struct ftrace_event_call *event_call)
  41{
  42	if (!event_call->class->get_fields)
  43		return &event_call->class->fields;
  44	return event_call->class->get_fields(event_call);
  45}
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47static int __trace_define_field(struct list_head *head, const char *type,
  48				const char *name, int offset, int size,
  49				int is_signed, int filter_type)
  50{
  51	struct ftrace_event_field *field;
  52
  53	field = kzalloc(sizeof(*field), GFP_KERNEL);
  54	if (!field)
  55		goto err;
  56
  57	field->name = kstrdup(name, GFP_KERNEL);
  58	if (!field->name)
  59		goto err;
  60
  61	field->type = kstrdup(type, GFP_KERNEL);
  62	if (!field->type)
  63		goto err;
  64
  65	if (filter_type == FILTER_OTHER)
  66		field->filter_type = filter_assign_type(type);
  67	else
  68		field->filter_type = filter_type;
  69
  70	field->offset = offset;
  71	field->size = size;
  72	field->is_signed = is_signed;
  73
  74	list_add(&field->link, head);
  75
  76	return 0;
  77
  78err:
  79	if (field)
  80		kfree(field->name);
  81	kfree(field);
  82
  83	return -ENOMEM;
  84}
  85
  86int trace_define_field(struct ftrace_event_call *call, const char *type,
  87		       const char *name, int offset, int size, int is_signed,
  88		       int filter_type)
  89{
  90	struct list_head *head;
  91
  92	if (WARN_ON(!call->class))
  93		return 0;
  94
  95	head = trace_get_fields(call);
  96	return __trace_define_field(head, type, name, offset, size,
  97				    is_signed, filter_type);
  98}
  99EXPORT_SYMBOL_GPL(trace_define_field);
 100
 101#define __common_field(type, item)					\
 102	ret = __trace_define_field(&ftrace_common_fields, #type,	\
 103				   "common_" #item,			\
 104				   offsetof(typeof(ent), item),		\
 105				   sizeof(ent.item),			\
 106				   is_signed_type(type), FILTER_OTHER);	\
 107	if (ret)							\
 108		return ret;
 109
 110static int trace_define_common_fields(void)
 111{
 112	int ret;
 113	struct trace_entry ent;
 114
 115	__common_field(unsigned short, type);
 116	__common_field(unsigned char, flags);
 117	__common_field(unsigned char, preempt_count);
 118	__common_field(int, pid);
 119	__common_field(int, padding);
 120
 121	return ret;
 122}
 123
 124void trace_destroy_fields(struct ftrace_event_call *call)
 125{
 126	struct ftrace_event_field *field, *next;
 127	struct list_head *head;
 128
 129	head = trace_get_fields(call);
 130	list_for_each_entry_safe(field, next, head, link) {
 131		list_del(&field->link);
 132		kfree(field->type);
 133		kfree(field->name);
 134		kfree(field);
 135	}
 136}
 137
 138int trace_event_raw_init(struct ftrace_event_call *call)
 139{
 140	int id;
 141
 142	id = register_ftrace_event(&call->event);
 143	if (!id)
 144		return -ENODEV;
 145
 146	return 0;
 147}
 148EXPORT_SYMBOL_GPL(trace_event_raw_init);
 149
 150int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 151{
 
 
 
 
 
 
 
 
 
 
 
 
 152	switch (type) {
 153	case TRACE_REG_REGISTER:
 154		return tracepoint_probe_register(call->name,
 155						 call->class->probe,
 156						 call);
 157	case TRACE_REG_UNREGISTER:
 158		tracepoint_probe_unregister(call->name,
 159					    call->class->probe,
 160					    call);
 161		return 0;
 162
 163#ifdef CONFIG_PERF_EVENTS
 164	case TRACE_REG_PERF_REGISTER:
 165		return tracepoint_probe_register(call->name,
 166						 call->class->perf_probe,
 167						 call);
 168	case TRACE_REG_PERF_UNREGISTER:
 169		tracepoint_probe_unregister(call->name,
 170					    call->class->perf_probe,
 171					    call);
 172		return 0;
 
 
 
 
 
 173#endif
 174	}
 175	return 0;
 176}
 177EXPORT_SYMBOL_GPL(ftrace_event_reg);
 178
 179void trace_event_enable_cmd_record(bool enable)
 180{
 181	struct ftrace_event_call *call;
 
 182
 183	mutex_lock(&event_mutex);
 184	list_for_each_entry(call, &ftrace_events, list) {
 185		if (!(call->flags & TRACE_EVENT_FL_ENABLED))
 
 186			continue;
 187
 188		if (enable) {
 189			tracing_start_cmdline_record();
 190			call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
 191		} else {
 192			tracing_stop_cmdline_record();
 193			call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
 194		}
 195	}
 196	mutex_unlock(&event_mutex);
 197}
 198
 199static int ftrace_event_enable_disable(struct ftrace_event_call *call,
 200					int enable)
 201{
 
 202	int ret = 0;
 
 203
 204	switch (enable) {
 205	case 0:
 206		if (call->flags & TRACE_EVENT_FL_ENABLED) {
 207			call->flags &= ~TRACE_EVENT_FL_ENABLED;
 208			if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 209				tracing_stop_cmdline_record();
 210				call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
 211			}
 212			call->class->reg(call, TRACE_REG_UNREGISTER);
 213		}
 
 
 
 
 
 214		break;
 215	case 1:
 216		if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217			if (trace_flags & TRACE_ITER_RECORD_CMD) {
 218				tracing_start_cmdline_record();
 219				call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
 220			}
 221			ret = call->class->reg(call, TRACE_REG_REGISTER);
 222			if (ret) {
 223				tracing_stop_cmdline_record();
 224				pr_info("event trace: Could not enable event "
 225					"%s\n", call->name);
 226				break;
 227			}
 228			call->flags |= TRACE_EVENT_FL_ENABLED;
 
 
 
 229		}
 230		break;
 231	}
 232
 233	return ret;
 234}
 235
 236static void ftrace_clear_events(void)
 
 
 
 
 
 
 
 
 
 
 
 
 237{
 238	struct ftrace_event_call *call;
 239
 240	mutex_lock(&event_mutex);
 241	list_for_each_entry(call, &ftrace_events, list) {
 242		ftrace_event_enable_disable(call, 0);
 243	}
 244	mutex_unlock(&event_mutex);
 245}
 246
 247static void __put_system(struct event_subsystem *system)
 248{
 249	struct event_filter *filter = system->filter;
 250
 251	WARN_ON_ONCE(system->ref_count == 0);
 252	if (--system->ref_count)
 253		return;
 254
 
 
 255	if (filter) {
 256		kfree(filter->filter_string);
 257		kfree(filter);
 258	}
 259	kfree(system->name);
 
 260	kfree(system);
 261}
 262
 263static void __get_system(struct event_subsystem *system)
 264{
 265	WARN_ON_ONCE(system->ref_count == 0);
 266	system->ref_count++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 267}
 268
 269static void put_system(struct event_subsystem *system)
 270{
 271	mutex_lock(&event_mutex);
 272	__put_system(system);
 273	mutex_unlock(&event_mutex);
 274}
 275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 276/*
 277 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
 278 */
 279static int __ftrace_set_clr_event(const char *match, const char *sub,
 280				  const char *event, int set)
 
 281{
 
 282	struct ftrace_event_call *call;
 
 283	int ret = -EINVAL;
 284
 285	mutex_lock(&event_mutex);
 286	list_for_each_entry(call, &ftrace_events, list) {
 
 
 
 
 
 287
 288		if (!call->name || !call->class || !call->class->reg)
 289			continue;
 290
 291		if (match &&
 292		    strcmp(match, call->name) != 0 &&
 293		    strcmp(match, call->class->system) != 0)
 294			continue;
 295
 296		if (sub && strcmp(sub, call->class->system) != 0)
 297			continue;
 298
 299		if (event && strcmp(event, call->name) != 0)
 300			continue;
 301
 302		ftrace_event_enable_disable(call, set);
 303
 304		ret = 0;
 305	}
 
 
 
 
 
 
 
 
 
 
 
 306	mutex_unlock(&event_mutex);
 307
 308	return ret;
 309}
 310
 311static int ftrace_set_clr_event(char *buf, int set)
 312{
 313	char *event = NULL, *sub = NULL, *match;
 314
 315	/*
 316	 * The buf format can be <subsystem>:<event-name>
 317	 *  *:<event-name> means any event by that name.
 318	 *  :<event-name> is the same.
 319	 *
 320	 *  <subsystem>:* means all events in that subsystem
 321	 *  <subsystem>: means the same.
 322	 *
 323	 *  <name> (no ':') means all events in a subsystem with
 324	 *  the name <name> or any event that matches <name>
 325	 */
 326
 327	match = strsep(&buf, ":");
 328	if (buf) {
 329		sub = match;
 330		event = buf;
 331		match = NULL;
 332
 333		if (!strlen(sub) || strcmp(sub, "*") == 0)
 334			sub = NULL;
 335		if (!strlen(event) || strcmp(event, "*") == 0)
 336			event = NULL;
 337	}
 338
 339	return __ftrace_set_clr_event(match, sub, event, set);
 340}
 341
 342/**
 343 * trace_set_clr_event - enable or disable an event
 344 * @system: system name to match (NULL for any system)
 345 * @event: event name to match (NULL for all events, within system)
 346 * @set: 1 to enable, 0 to disable
 347 *
 348 * This is a way for other parts of the kernel to enable or disable
 349 * event recording.
 350 *
 351 * Returns 0 on success, -EINVAL if the parameters do not match any
 352 * registered events.
 353 */
 354int trace_set_clr_event(const char *system, const char *event, int set)
 355{
 356	return __ftrace_set_clr_event(NULL, system, event, set);
 
 
 357}
 358EXPORT_SYMBOL_GPL(trace_set_clr_event);
 359
 360/* 128 should be much more than enough */
 361#define EVENT_BUF_SIZE		127
 362
 363static ssize_t
 364ftrace_event_write(struct file *file, const char __user *ubuf,
 365		   size_t cnt, loff_t *ppos)
 366{
 367	struct trace_parser parser;
 
 
 368	ssize_t read, ret;
 369
 370	if (!cnt)
 371		return 0;
 372
 373	ret = tracing_update_buffers();
 374	if (ret < 0)
 375		return ret;
 376
 377	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
 378		return -ENOMEM;
 379
 380	read = trace_get_user(&parser, ubuf, cnt, ppos);
 381
 382	if (read >= 0 && trace_parser_loaded((&parser))) {
 383		int set = 1;
 384
 385		if (*parser.buffer == '!')
 386			set = 0;
 387
 388		parser.buffer[parser.idx] = 0;
 389
 390		ret = ftrace_set_clr_event(parser.buffer + !set, set);
 391		if (ret)
 392			goto out_put;
 393	}
 394
 395	ret = read;
 396
 397 out_put:
 398	trace_parser_put(&parser);
 399
 400	return ret;
 401}
 402
 403static void *
 404t_next(struct seq_file *m, void *v, loff_t *pos)
 405{
 406	struct ftrace_event_call *call = v;
 
 
 407
 408	(*pos)++;
 409
 410	list_for_each_entry_continue(call, &ftrace_events, list) {
 
 411		/*
 412		 * The ftrace subsystem is for showing formats only.
 413		 * They can not be enabled or disabled via the event files.
 414		 */
 415		if (call->class && call->class->reg)
 416			return call;
 417	}
 418
 419	return NULL;
 420}
 421
 422static void *t_start(struct seq_file *m, loff_t *pos)
 423{
 424	struct ftrace_event_call *call;
 
 425	loff_t l;
 426
 427	mutex_lock(&event_mutex);
 428
 429	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
 430	for (l = 0; l <= *pos; ) {
 431		call = t_next(m, call, &l);
 432		if (!call)
 433			break;
 434	}
 435	return call;
 436}
 437
 438static void *
 439s_next(struct seq_file *m, void *v, loff_t *pos)
 440{
 441	struct ftrace_event_call *call = v;
 
 442
 443	(*pos)++;
 444
 445	list_for_each_entry_continue(call, &ftrace_events, list) {
 446		if (call->flags & TRACE_EVENT_FL_ENABLED)
 447			return call;
 448	}
 449
 450	return NULL;
 451}
 452
 453static void *s_start(struct seq_file *m, loff_t *pos)
 454{
 455	struct ftrace_event_call *call;
 
 456	loff_t l;
 457
 458	mutex_lock(&event_mutex);
 459
 460	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
 461	for (l = 0; l <= *pos; ) {
 462		call = s_next(m, call, &l);
 463		if (!call)
 464			break;
 465	}
 466	return call;
 467}
 468
 469static int t_show(struct seq_file *m, void *v)
 470{
 471	struct ftrace_event_call *call = v;
 
 472
 473	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
 474		seq_printf(m, "%s:", call->class->system);
 475	seq_printf(m, "%s\n", call->name);
 476
 477	return 0;
 478}
 479
 480static void t_stop(struct seq_file *m, void *p)
 481{
 482	mutex_unlock(&event_mutex);
 483}
 484
 485static int
 486ftrace_event_seq_open(struct inode *inode, struct file *file)
 
 487{
 488	const struct seq_operations *seq_ops;
 
 
 
 
 
 
 
 
 489
 490	if ((file->f_mode & FMODE_WRITE) &&
 491	    (file->f_flags & O_TRUNC))
 492		ftrace_clear_events();
 493
 494	seq_ops = inode->i_private;
 495	return seq_open(file, seq_ops);
 496}
 497
 498static ssize_t
 499event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
 500		  loff_t *ppos)
 501{
 502	struct ftrace_event_call *call = filp->private_data;
 503	char *buf;
 504
 505	if (call->flags & TRACE_EVENT_FL_ENABLED)
 506		buf = "1\n";
 507	else
 508		buf = "0\n";
 509
 510	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 511}
 512
 513static ssize_t
 514event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
 515		   loff_t *ppos)
 516{
 517	struct ftrace_event_call *call = filp->private_data;
 518	unsigned long val;
 519	int ret;
 520
 521	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 522	if (ret)
 523		return ret;
 524
 525	ret = tracing_update_buffers();
 526	if (ret < 0)
 527		return ret;
 528
 529	switch (val) {
 530	case 0:
 531	case 1:
 
 532		mutex_lock(&event_mutex);
 533		ret = ftrace_event_enable_disable(call, val);
 
 
 534		mutex_unlock(&event_mutex);
 535		break;
 536
 537	default:
 538		return -EINVAL;
 539	}
 540
 541	*ppos += cnt;
 542
 543	return ret ? ret : cnt;
 544}
 545
 546static ssize_t
 547system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
 548		   loff_t *ppos)
 549{
 550	const char set_to_char[4] = { '?', '0', '1', 'X' };
 551	struct event_subsystem *system = filp->private_data;
 
 552	struct ftrace_event_call *call;
 
 
 553	char buf[2];
 554	int set = 0;
 555	int ret;
 556
 557	mutex_lock(&event_mutex);
 558	list_for_each_entry(call, &ftrace_events, list) {
 559		if (!call->name || !call->class || !call->class->reg)
 
 560			continue;
 561
 562		if (system && strcmp(call->class->system, system->name) != 0)
 563			continue;
 564
 565		/*
 566		 * We need to find out if all the events are set
 567		 * or if all events or cleared, or if we have
 568		 * a mixture.
 569		 */
 570		set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
 571
 572		/*
 573		 * If we have a mixture, no need to look further.
 574		 */
 575		if (set == 3)
 576			break;
 577	}
 578	mutex_unlock(&event_mutex);
 579
 580	buf[0] = set_to_char[set];
 581	buf[1] = '\n';
 582
 583	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 584
 585	return ret;
 586}
 587
 588static ssize_t
 589system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
 590		    loff_t *ppos)
 591{
 592	struct event_subsystem *system = filp->private_data;
 
 593	const char *name = NULL;
 594	unsigned long val;
 595	ssize_t ret;
 596
 597	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 598	if (ret)
 599		return ret;
 600
 601	ret = tracing_update_buffers();
 602	if (ret < 0)
 603		return ret;
 604
 605	if (val != 0 && val != 1)
 606		return -EINVAL;
 607
 608	/*
 609	 * Opening of "enable" adds a ref count to system,
 610	 * so the name is safe to use.
 611	 */
 612	if (system)
 613		name = system->name;
 614
 615	ret = __ftrace_set_clr_event(NULL, name, NULL, val);
 616	if (ret)
 617		goto out;
 618
 619	ret = cnt;
 620
 621out:
 622	*ppos += cnt;
 623
 624	return ret;
 625}
 626
 627enum {
 628	FORMAT_HEADER		= 1,
 629	FORMAT_FIELD_SEPERATOR	= 2,
 630	FORMAT_PRINTFMT		= 3,
 631};
 632
 633static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 634{
 635	struct ftrace_event_call *call = m->private;
 636	struct ftrace_event_field *field;
 637	struct list_head *common_head = &ftrace_common_fields;
 638	struct list_head *head = trace_get_fields(call);
 
 639
 640	(*pos)++;
 641
 642	switch ((unsigned long)v) {
 643	case FORMAT_HEADER:
 644		if (unlikely(list_empty(common_head)))
 645			return NULL;
 646
 647		field = list_entry(common_head->prev,
 648				   struct ftrace_event_field, link);
 649		return field;
 650
 651	case FORMAT_FIELD_SEPERATOR:
 652		if (unlikely(list_empty(head)))
 653			return NULL;
 654
 655		field = list_entry(head->prev, struct ftrace_event_field, link);
 656		return field;
 657
 658	case FORMAT_PRINTFMT:
 659		/* all done */
 660		return NULL;
 661	}
 662
 663	field = v;
 664	if (field->link.prev == common_head)
 665		return (void *)FORMAT_FIELD_SEPERATOR;
 666	else if (field->link.prev == head)
 667		return (void *)FORMAT_PRINTFMT;
 668
 669	field = list_entry(field->link.prev, struct ftrace_event_field, link);
 670
 671	return field;
 672}
 673
 674static void *f_start(struct seq_file *m, loff_t *pos)
 675{
 676	loff_t l = 0;
 677	void *p;
 678
 679	/* Start by showing the header */
 680	if (!*pos)
 681		return (void *)FORMAT_HEADER;
 682
 683	p = (void *)FORMAT_HEADER;
 684	do {
 685		p = f_next(m, p, &l);
 686	} while (p && l < *pos);
 687
 688	return p;
 689}
 690
 691static int f_show(struct seq_file *m, void *v)
 692{
 693	struct ftrace_event_call *call = m->private;
 694	struct ftrace_event_field *field;
 695	const char *array_descriptor;
 696
 697	switch ((unsigned long)v) {
 698	case FORMAT_HEADER:
 699		seq_printf(m, "name: %s\n", call->name);
 700		seq_printf(m, "ID: %d\n", call->event.type);
 701		seq_printf(m, "format:\n");
 702		return 0;
 703
 704	case FORMAT_FIELD_SEPERATOR:
 705		seq_putc(m, '\n');
 706		return 0;
 707
 708	case FORMAT_PRINTFMT:
 709		seq_printf(m, "\nprint fmt: %s\n",
 710			   call->print_fmt);
 711		return 0;
 712	}
 713
 714	field = v;
 715
 716	/*
 717	 * Smartly shows the array type(except dynamic array).
 718	 * Normal:
 719	 *	field:TYPE VAR
 720	 * If TYPE := TYPE[LEN], it is shown:
 721	 *	field:TYPE VAR[LEN]
 722	 */
 723	array_descriptor = strchr(field->type, '[');
 724
 725	if (!strncmp(field->type, "__data_loc", 10))
 726		array_descriptor = NULL;
 727
 728	if (!array_descriptor)
 729		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
 730			   field->type, field->name, field->offset,
 731			   field->size, !!field->is_signed);
 732	else
 733		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
 734			   (int)(array_descriptor - field->type),
 735			   field->type, field->name,
 736			   array_descriptor, field->offset,
 737			   field->size, !!field->is_signed);
 738
 739	return 0;
 740}
 741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 742static void f_stop(struct seq_file *m, void *p)
 743{
 
 744}
 745
 746static const struct seq_operations trace_format_seq_ops = {
 747	.start		= f_start,
 748	.next		= f_next,
 749	.stop		= f_stop,
 750	.show		= f_show,
 751};
 752
 753static int trace_format_open(struct inode *inode, struct file *file)
 754{
 755	struct ftrace_event_call *call = inode->i_private;
 756	struct seq_file *m;
 757	int ret;
 758
 759	ret = seq_open(file, &trace_format_seq_ops);
 760	if (ret < 0)
 761		return ret;
 762
 763	m = file->private_data;
 764	m->private = call;
 765
 766	return 0;
 767}
 768
 769static ssize_t
 770event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 771{
 772	struct ftrace_event_call *call = filp->private_data;
 773	struct trace_seq *s;
 774	int r;
 775
 776	if (*ppos)
 777		return 0;
 778
 779	s = kmalloc(sizeof(*s), GFP_KERNEL);
 780	if (!s)
 781		return -ENOMEM;
 782
 783	trace_seq_init(s);
 784	trace_seq_printf(s, "%d\n", call->event.type);
 785
 786	r = simple_read_from_buffer(ubuf, cnt, ppos,
 787				    s->buffer, s->len);
 788	kfree(s);
 789	return r;
 790}
 791
 792static ssize_t
 793event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
 794		  loff_t *ppos)
 795{
 796	struct ftrace_event_call *call = filp->private_data;
 797	struct trace_seq *s;
 798	int r;
 799
 800	if (*ppos)
 801		return 0;
 802
 803	s = kmalloc(sizeof(*s), GFP_KERNEL);
 
 804	if (!s)
 805		return -ENOMEM;
 806
 807	trace_seq_init(s);
 808
 809	print_event_filter(call, s);
 810	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
 
 
 
 
 
 
 811
 812	kfree(s);
 813
 814	return r;
 815}
 816
 817static ssize_t
 818event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
 819		   loff_t *ppos)
 820{
 821	struct ftrace_event_call *call = filp->private_data;
 822	char *buf;
 823	int err;
 824
 825	if (cnt >= PAGE_SIZE)
 826		return -EINVAL;
 827
 828	buf = (char *)__get_free_page(GFP_TEMPORARY);
 829	if (!buf)
 830		return -ENOMEM;
 831
 832	if (copy_from_user(buf, ubuf, cnt)) {
 833		free_page((unsigned long) buf);
 834		return -EFAULT;
 835	}
 836	buf[cnt] = '\0';
 837
 838	err = apply_event_filter(call, buf);
 
 
 
 
 
 839	free_page((unsigned long) buf);
 840	if (err < 0)
 841		return err;
 842
 843	*ppos += cnt;
 844
 845	return cnt;
 846}
 847
 848static LIST_HEAD(event_subsystems);
 849
 850static int subsystem_open(struct inode *inode, struct file *filp)
 851{
 852	struct event_subsystem *system = NULL;
 
 
 853	int ret;
 854
 855	if (!inode->i_private)
 856		goto skip_search;
 857
 858	/* Make sure the system still exists */
 
 859	mutex_lock(&event_mutex);
 860	list_for_each_entry(system, &event_subsystems, list) {
 861		if (system == inode->i_private) {
 862			/* Don't open systems with no events */
 863			if (!system->nr_events) {
 864				system = NULL;
 865				break;
 
 
 
 866			}
 867			__get_system(system);
 868			break;
 869		}
 870	}
 
 871	mutex_unlock(&event_mutex);
 
 
 
 
 872
 873	if (system != inode->i_private)
 
 
 
 
 
 874		return -ENODEV;
 
 875
 876 skip_search:
 877	ret = tracing_open_generic(inode, filp);
 878	if (ret < 0 && system)
 879		put_system(system);
 
 
 880
 881	return ret;
 882}
 883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 884static int subsystem_release(struct inode *inode, struct file *file)
 885{
 886	struct event_subsystem *system = inode->i_private;
 
 
 887
 888	if (system)
 889		put_system(system);
 
 
 
 
 
 
 
 890
 891	return 0;
 892}
 893
 894static ssize_t
 895subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
 896		      loff_t *ppos)
 897{
 898	struct event_subsystem *system = filp->private_data;
 
 899	struct trace_seq *s;
 900	int r;
 901
 902	if (*ppos)
 903		return 0;
 904
 905	s = kmalloc(sizeof(*s), GFP_KERNEL);
 906	if (!s)
 907		return -ENOMEM;
 908
 909	trace_seq_init(s);
 910
 911	print_subsystem_event_filter(system, s);
 912	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
 913
 914	kfree(s);
 915
 916	return r;
 917}
 918
 919static ssize_t
 920subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
 921		       loff_t *ppos)
 922{
 923	struct event_subsystem *system = filp->private_data;
 924	char *buf;
 925	int err;
 926
 927	if (cnt >= PAGE_SIZE)
 928		return -EINVAL;
 929
 930	buf = (char *)__get_free_page(GFP_TEMPORARY);
 931	if (!buf)
 932		return -ENOMEM;
 933
 934	if (copy_from_user(buf, ubuf, cnt)) {
 935		free_page((unsigned long) buf);
 936		return -EFAULT;
 937	}
 938	buf[cnt] = '\0';
 939
 940	err = apply_subsystem_event_filter(system, buf);
 941	free_page((unsigned long) buf);
 942	if (err < 0)
 943		return err;
 944
 945	*ppos += cnt;
 946
 947	return cnt;
 948}
 949
 950static ssize_t
 951show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 952{
 953	int (*func)(struct trace_seq *s) = filp->private_data;
 954	struct trace_seq *s;
 955	int r;
 956
 957	if (*ppos)
 958		return 0;
 959
 960	s = kmalloc(sizeof(*s), GFP_KERNEL);
 961	if (!s)
 962		return -ENOMEM;
 963
 964	trace_seq_init(s);
 965
 966	func(s);
 967	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
 968
 969	kfree(s);
 970
 971	return r;
 972}
 973
 
 
 
 
 974static const struct seq_operations show_event_seq_ops = {
 975	.start = t_start,
 976	.next = t_next,
 977	.show = t_show,
 978	.stop = t_stop,
 979};
 980
 981static const struct seq_operations show_set_event_seq_ops = {
 982	.start = s_start,
 983	.next = s_next,
 984	.show = t_show,
 985	.stop = t_stop,
 986};
 987
 988static const struct file_operations ftrace_avail_fops = {
 989	.open = ftrace_event_seq_open,
 990	.read = seq_read,
 991	.llseek = seq_lseek,
 992	.release = seq_release,
 993};
 994
 995static const struct file_operations ftrace_set_event_fops = {
 996	.open = ftrace_event_seq_open,
 997	.read = seq_read,
 998	.write = ftrace_event_write,
 999	.llseek = seq_lseek,
1000	.release = seq_release,
1001};
1002
1003static const struct file_operations ftrace_enable_fops = {
1004	.open = tracing_open_generic,
1005	.read = event_enable_read,
1006	.write = event_enable_write,
1007	.llseek = default_llseek,
1008};
1009
1010static const struct file_operations ftrace_event_format_fops = {
1011	.open = trace_format_open,
1012	.read = seq_read,
1013	.llseek = seq_lseek,
1014	.release = seq_release,
1015};
1016
1017static const struct file_operations ftrace_event_id_fops = {
1018	.open = tracing_open_generic,
1019	.read = event_id_read,
1020	.llseek = default_llseek,
1021};
1022
1023static const struct file_operations ftrace_event_filter_fops = {
1024	.open = tracing_open_generic,
1025	.read = event_filter_read,
1026	.write = event_filter_write,
1027	.llseek = default_llseek,
1028};
1029
1030static const struct file_operations ftrace_subsystem_filter_fops = {
1031	.open = subsystem_open,
1032	.read = subsystem_filter_read,
1033	.write = subsystem_filter_write,
1034	.llseek = default_llseek,
1035	.release = subsystem_release,
1036};
1037
1038static const struct file_operations ftrace_system_enable_fops = {
1039	.open = subsystem_open,
1040	.read = system_enable_read,
1041	.write = system_enable_write,
1042	.llseek = default_llseek,
1043	.release = subsystem_release,
1044};
1045
 
 
 
 
 
 
 
 
1046static const struct file_operations ftrace_show_header_fops = {
1047	.open = tracing_open_generic,
1048	.read = show_header,
1049	.llseek = default_llseek,
1050};
1051
1052static struct dentry *event_trace_events_dir(void)
 
 
1053{
1054	static struct dentry *d_tracer;
1055	static struct dentry *d_events;
1056
1057	if (d_events)
1058		return d_events;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059
1060	d_tracer = tracing_init_dentry();
1061	if (!d_tracer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1062		return NULL;
1063
1064	d_events = debugfs_create_dir("events", d_tracer);
1065	if (!d_events)
1066		pr_warning("Could not create debugfs "
1067			   "'events' directory\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1068
1069	return d_events;
 
 
 
 
1070}
1071
1072static struct dentry *
1073event_subsystem_dir(const char *name, struct dentry *d_events)
 
1074{
 
1075	struct event_subsystem *system;
1076	struct dentry *entry;
1077
1078	/* First see if we did not already create this dir */
1079	list_for_each_entry(system, &event_subsystems, list) {
 
1080		if (strcmp(system->name, name) == 0) {
1081			__get_system(system);
1082			system->nr_events++;
1083			return system->entry;
1084		}
1085	}
1086
1087	/* need to create new entry */
1088	system = kmalloc(sizeof(*system), GFP_KERNEL);
1089	if (!system) {
1090		pr_warning("No memory to create event subsystem %s\n",
1091			   name);
1092		return d_events;
1093	}
 
 
 
 
 
 
 
1094
1095	system->entry = debugfs_create_dir(name, d_events);
1096	if (!system->entry) {
1097		pr_warning("Could not create event subsystem %s\n",
1098			   name);
1099		kfree(system);
1100		return d_events;
1101	}
1102
1103	system->nr_events = 1;
1104	system->ref_count = 1;
1105	system->name = kstrdup(name, GFP_KERNEL);
1106	if (!system->name) {
1107		debugfs_remove(system->entry);
1108		kfree(system);
1109		return d_events;
1110	}
1111
1112	list_add(&system->list, &event_subsystems);
1113
1114	system->filter = NULL;
1115
1116	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1117	if (!system->filter) {
1118		pr_warning("Could not allocate filter for subsystem "
1119			   "'%s'\n", name);
1120		return system->entry;
1121	}
1122
1123	entry = debugfs_create_file("filter", 0644, system->entry, system,
1124				    &ftrace_subsystem_filter_fops);
1125	if (!entry) {
1126		kfree(system->filter);
1127		system->filter = NULL;
1128		pr_warning("Could not create debugfs "
1129			   "'%s/filter' entry\n", name);
1130	}
1131
1132	trace_create_file("enable", 0644, system->entry, system,
1133			  &ftrace_system_enable_fops);
1134
1135	return system->entry;
 
 
 
 
 
 
 
 
 
 
 
1136}
1137
1138static int
1139event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1140		 const struct file_operations *id,
1141		 const struct file_operations *enable,
1142		 const struct file_operations *filter,
1143		 const struct file_operations *format)
1144{
 
 
1145	struct list_head *head;
 
 
1146	int ret;
1147
1148	/*
1149	 * If the trace point header did not define TRACE_SYSTEM
1150	 * then the system would be called "TRACE_SYSTEM".
1151	 */
1152	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1153		d_events = event_subsystem_dir(call->class->system, d_events);
1154
1155	call->dir = debugfs_create_dir(call->name, d_events);
1156	if (!call->dir) {
1157		pr_warning("Could not create debugfs "
1158			   "'%s' directory\n", call->name);
 
 
 
 
 
1159		return -1;
1160	}
1161
1162	if (call->class->reg)
1163		trace_create_file("enable", 0644, call->dir, call,
1164				  enable);
1165
1166#ifdef CONFIG_PERF_EVENTS
1167	if (call->event.type && call->class->reg)
1168		trace_create_file("id", 0444, call->dir, call,
1169		 		  id);
 
1170#endif
1171
1172	/*
1173	 * Other events may have the same class. Only update
1174	 * the fields if they are not already defined.
1175	 */
1176	head = trace_get_fields(call);
1177	if (list_empty(head)) {
1178		ret = call->class->define_fields(call);
1179		if (ret < 0) {
1180			pr_warning("Could not initialize trace point"
1181				   " events/%s\n", call->name);
1182			return ret;
1183		}
1184	}
1185	trace_create_file("filter", 0644, call->dir, call,
1186			  filter);
 
 
 
1187
1188	trace_create_file("format", 0444, call->dir, call,
1189			  format);
1190
1191	return 0;
1192}
1193
1194static int
1195__trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1196		       const struct file_operations *id,
1197		       const struct file_operations *enable,
1198		       const struct file_operations *filter,
1199		       const struct file_operations *format)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1200{
1201	struct dentry *d_events;
1202	int ret;
1203
1204	/* The linker may leave blanks */
1205	if (!call->name)
1206		return -EINVAL;
1207
1208	if (call->class->raw_init) {
1209		ret = call->class->raw_init(call);
1210		if (ret < 0) {
1211			if (ret != -ENOSYS)
1212				pr_warning("Could not initialize trace events/%s\n",
1213					   call->name);
1214			return ret;
1215		}
1216	}
1217
1218	d_events = event_trace_events_dir();
1219	if (!d_events)
1220		return -ENOENT;
1221
1222	ret = event_create_dir(call, d_events, id, enable, filter, format);
1223	if (!ret)
1224		list_add(&call->list, &ftrace_events);
 
 
 
 
 
 
1225	call->mod = mod;
1226
1227	return ret;
1228}
1229
1230/* Add an additional event_call dynamically */
1231int trace_add_event_call(struct ftrace_event_call *call)
 
1232{
1233	int ret;
1234	mutex_lock(&event_mutex);
1235	ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1236				     &ftrace_enable_fops,
1237				     &ftrace_event_filter_fops,
1238				     &ftrace_event_format_fops);
1239	mutex_unlock(&event_mutex);
1240	return ret;
 
 
 
 
 
 
1241}
1242
1243static void remove_subsystem_dir(const char *name)
 
 
1244{
1245	struct event_subsystem *system;
1246
1247	if (strcmp(name, TRACE_SYSTEM) == 0)
1248		return;
 
1249
1250	list_for_each_entry(system, &event_subsystems, list) {
1251		if (strcmp(system->name, name) == 0) {
1252			if (!--system->nr_events) {
1253				debugfs_remove_recursive(system->entry);
1254				list_del(&system->list);
1255				__put_system(system);
1256			}
1257			break;
1258		}
1259	}
1260}
1261
1262/*
1263 * Must be called under locking both of event_mutex and trace_event_mutex.
 
 
1264 */
1265static void __trace_remove_event_call(struct ftrace_event_call *call)
 
 
1266{
1267	ftrace_event_enable_disable(call, 0);
1268	if (call->event.funcs)
1269		__unregister_ftrace_event(&call->event);
1270	debugfs_remove_recursive(call->dir);
1271	list_del(&call->list);
1272	trace_destroy_fields(call);
1273	destroy_preds(call);
1274	remove_subsystem_dir(call->class->system);
1275}
1276
1277/* Remove an event_call */
1278void trace_remove_event_call(struct ftrace_event_call *call)
 
 
 
1279{
 
 
1280	mutex_lock(&event_mutex);
1281	down_write(&trace_event_mutex);
1282	__trace_remove_event_call(call);
1283	up_write(&trace_event_mutex);
 
 
1284	mutex_unlock(&event_mutex);
 
 
1285}
1286
1287#define for_each_event(event, start, end)			\
1288	for (event = start;					\
1289	     (unsigned long)event < (unsigned long)end;		\
1290	     event++)
1291
1292#ifdef CONFIG_MODULES
1293
1294static LIST_HEAD(ftrace_module_file_list);
1295
1296/*
1297 * Modules must own their file_operations to keep up with
1298 * reference counting.
1299 */
1300struct ftrace_module_file_ops {
1301	struct list_head		list;
1302	struct module			*mod;
1303	struct file_operations		id;
1304	struct file_operations		enable;
1305	struct file_operations		format;
1306	struct file_operations		filter;
1307};
1308
1309static struct ftrace_module_file_ops *
1310trace_create_file_ops(struct module *mod)
1311{
1312	struct ftrace_module_file_ops *file_ops;
 
1313
1314	/*
1315	 * This is a bit of a PITA. To allow for correct reference
1316	 * counting, modules must "own" their file_operations.
1317	 * To do this, we allocate the file operations that will be
1318	 * used in the event directory.
1319	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1320
1321	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1322	if (!file_ops)
1323		return NULL;
1324
1325	file_ops->mod = mod;
 
1326
1327	file_ops->id = ftrace_event_id_fops;
1328	file_ops->id.owner = mod;
 
 
1329
1330	file_ops->enable = ftrace_enable_fops;
1331	file_ops->enable.owner = mod;
 
 
 
 
 
1332
1333	file_ops->filter = ftrace_event_filter_fops;
1334	file_ops->filter.owner = mod;
1335
1336	file_ops->format = ftrace_event_format_fops;
1337	file_ops->format.owner = mod;
 
 
1338
1339	list_add(&file_ops->list, &ftrace_module_file_list);
1340
1341	return file_ops;
1342}
1343
1344static void trace_module_add_events(struct module *mod)
1345{
1346	struct ftrace_module_file_ops *file_ops = NULL;
1347	struct ftrace_event_call **call, **start, **end;
1348
1349	start = mod->trace_events;
1350	end = mod->trace_events + mod->num_trace_events;
1351
1352	if (start == end)
 
 
 
1353		return;
 
1354
1355	file_ops = trace_create_file_ops(mod);
1356	if (!file_ops)
1357		return;
1358
1359	for_each_event(call, start, end) {
1360		__trace_add_event_call(*call, mod,
1361				       &file_ops->id, &file_ops->enable,
1362				       &file_ops->filter, &file_ops->format);
1363	}
1364}
1365
1366static void trace_module_remove_events(struct module *mod)
1367{
1368	struct ftrace_module_file_ops *file_ops;
1369	struct ftrace_event_call *call, *p;
1370	bool found = false;
1371
1372	down_write(&trace_event_mutex);
1373	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1374		if (call->mod == mod) {
1375			found = true;
 
1376			__trace_remove_event_call(call);
1377		}
1378	}
1379
1380	/* Now free the file_operations */
1381	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1382		if (file_ops->mod == mod)
1383			break;
1384	}
1385	if (&file_ops->list != &ftrace_module_file_list) {
1386		list_del(&file_ops->list);
1387		kfree(file_ops);
1388	}
1389
1390	/*
1391	 * It is safest to reset the ring buffer if the module being unloaded
1392	 * registered any events.
 
 
 
 
1393	 */
1394	if (found)
1395		tracing_reset_current_online_cpus();
1396	up_write(&trace_event_mutex);
1397}
1398
1399static int trace_module_notify(struct notifier_block *self,
1400			       unsigned long val, void *data)
1401{
1402	struct module *mod = data;
1403
 
1404	mutex_lock(&event_mutex);
1405	switch (val) {
1406	case MODULE_STATE_COMING:
1407		trace_module_add_events(mod);
1408		break;
1409	case MODULE_STATE_GOING:
1410		trace_module_remove_events(mod);
1411		break;
1412	}
1413	mutex_unlock(&event_mutex);
 
1414
1415	return 0;
1416}
1417#else
1418static int trace_module_notify(struct notifier_block *self,
1419			       unsigned long val, void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1420{
 
 
 
 
1421	return 0;
1422}
1423#endif /* CONFIG_MODULES */
1424
1425static struct notifier_block trace_module_nb = {
1426	.notifier_call = trace_module_notify,
1427	.priority = 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1428};
1429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1430extern struct ftrace_event_call *__start_ftrace_events[];
1431extern struct ftrace_event_call *__stop_ftrace_events[];
1432
1433static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1434
1435static __init int setup_trace_event(char *str)
1436{
1437	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1438	ring_buffer_expanded = 1;
1439	tracing_selftest_disabled = 1;
1440
1441	return 1;
1442}
1443__setup("trace_event=", setup_trace_event);
1444
1445static __init int event_trace_init(void)
 
 
1446{
1447	struct ftrace_event_call **call;
1448	struct dentry *d_tracer;
1449	struct dentry *entry;
1450	struct dentry *d_events;
1451	int ret;
1452	char *buf = bootup_event_buf;
1453	char *token;
1454
1455	d_tracer = tracing_init_dentry();
1456	if (!d_tracer)
1457		return 0;
 
 
 
1458
1459	entry = debugfs_create_file("available_events", 0444, d_tracer,
1460				    (void *)&show_event_seq_ops,
1461				    &ftrace_avail_fops);
1462	if (!entry)
1463		pr_warning("Could not create debugfs "
1464			   "'available_events' entry\n");
1465
1466	entry = debugfs_create_file("set_event", 0644, d_tracer,
1467				    (void *)&show_set_event_seq_ops,
1468				    &ftrace_set_event_fops);
1469	if (!entry)
1470		pr_warning("Could not create debugfs "
1471			   "'set_event' entry\n");
1472
1473	d_events = event_trace_events_dir();
1474	if (!d_events)
1475		return 0;
1476
1477	/* ring buffer internal formats */
1478	trace_create_file("header_page", 0444, d_events,
1479			  ring_buffer_print_page_header,
1480			  &ftrace_show_header_fops);
1481
1482	trace_create_file("header_event", 0444, d_events,
1483			  ring_buffer_print_entry_header,
1484			  &ftrace_show_header_fops);
1485
1486	trace_create_file("enable", 0644, d_events,
1487			  NULL, &ftrace_system_enable_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1488
1489	if (trace_define_common_fields())
1490		pr_warning("tracing: Failed to allocate common fields");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1491
1492	for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1493		__trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
1494				       &ftrace_enable_fops,
1495				       &ftrace_event_filter_fops,
1496				       &ftrace_event_format_fops);
1497	}
1498
 
 
 
 
 
 
 
 
1499	while (true) {
1500		token = strsep(&buf, ",");
1501
1502		if (!token)
1503			break;
1504		if (!*token)
1505			continue;
1506
1507		ret = ftrace_set_clr_event(token, 1);
1508		if (ret)
1509			pr_warning("Failed to enable trace event: %s\n", token);
1510	}
1511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1512	ret = register_module_notifier(&trace_module_nb);
1513	if (ret)
1514		pr_warning("Failed to register trace events module notifier\n");
1515
1516	return 0;
1517}
 
 
1518fs_initcall(event_trace_init);
1519
1520#ifdef CONFIG_FTRACE_STARTUP_TEST
1521
1522static DEFINE_SPINLOCK(test_spinlock);
1523static DEFINE_SPINLOCK(test_spinlock_irq);
1524static DEFINE_MUTEX(test_mutex);
1525
1526static __init void test_work(struct work_struct *dummy)
1527{
1528	spin_lock(&test_spinlock);
1529	spin_lock_irq(&test_spinlock_irq);
1530	udelay(1);
1531	spin_unlock_irq(&test_spinlock_irq);
1532	spin_unlock(&test_spinlock);
1533
1534	mutex_lock(&test_mutex);
1535	msleep(1);
1536	mutex_unlock(&test_mutex);
1537}
1538
1539static __init int event_test_thread(void *unused)
1540{
1541	void *test_malloc;
1542
1543	test_malloc = kmalloc(1234, GFP_KERNEL);
1544	if (!test_malloc)
1545		pr_info("failed to kmalloc\n");
1546
1547	schedule_on_each_cpu(test_work);
1548
1549	kfree(test_malloc);
1550
1551	set_current_state(TASK_INTERRUPTIBLE);
1552	while (!kthread_should_stop())
1553		schedule();
1554
1555	return 0;
1556}
1557
1558/*
1559 * Do various things that may trigger events.
1560 */
1561static __init void event_test_stuff(void)
1562{
1563	struct task_struct *test_thread;
1564
1565	test_thread = kthread_run(event_test_thread, NULL, "test-events");
1566	msleep(1);
1567	kthread_stop(test_thread);
1568}
1569
1570/*
1571 * For every trace event defined, we will test each trace point separately,
1572 * and then by groups, and finally all trace points.
1573 */
1574static __init void event_trace_self_tests(void)
1575{
 
 
1576	struct ftrace_event_call *call;
1577	struct event_subsystem *system;
 
1578	int ret;
1579
 
 
1580	pr_info("Running tests on trace events:\n");
1581
1582	list_for_each_entry(call, &ftrace_events, list) {
 
 
1583
1584		/* Only test those that have a probe */
1585		if (!call->class || !call->class->probe)
1586			continue;
1587
1588/*
1589 * Testing syscall events here is pretty useless, but
1590 * we still do it if configured. But this is time consuming.
1591 * What we really need is a user thread to perform the
1592 * syscalls as we test.
1593 */
1594#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1595		if (call->class->system &&
1596		    strcmp(call->class->system, "syscalls") == 0)
1597			continue;
1598#endif
1599
1600		pr_info("Testing event %s: ", call->name);
1601
1602		/*
1603		 * If an event is already enabled, someone is using
1604		 * it and the self test should not be on.
1605		 */
1606		if (call->flags & TRACE_EVENT_FL_ENABLED) {
1607			pr_warning("Enabled event during self test!\n");
1608			WARN_ON_ONCE(1);
1609			continue;
1610		}
1611
1612		ftrace_event_enable_disable(call, 1);
1613		event_test_stuff();
1614		ftrace_event_enable_disable(call, 0);
1615
1616		pr_cont("OK\n");
1617	}
1618
1619	/* Now test at the sub system level */
1620
1621	pr_info("Running tests on trace event systems:\n");
1622
1623	list_for_each_entry(system, &event_subsystems, list) {
 
 
1624
1625		/* the ftrace system is special, skip it */
1626		if (strcmp(system->name, "ftrace") == 0)
1627			continue;
1628
1629		pr_info("Testing event system %s: ", system->name);
1630
1631		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1632		if (WARN_ON_ONCE(ret)) {
1633			pr_warning("error enabling system %s\n",
1634				   system->name);
1635			continue;
1636		}
1637
1638		event_test_stuff();
1639
1640		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1641		if (WARN_ON_ONCE(ret))
1642			pr_warning("error disabling system %s\n",
1643				   system->name);
 
 
1644
1645		pr_cont("OK\n");
1646	}
1647
1648	/* Test with all events enabled */
1649
1650	pr_info("Running tests on all trace events:\n");
1651	pr_info("Testing all events: ");
1652
1653	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1654	if (WARN_ON_ONCE(ret)) {
1655		pr_warning("error enabling all events\n");
1656		return;
1657	}
1658
1659	event_test_stuff();
1660
1661	/* reset sysname */
1662	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1663	if (WARN_ON_ONCE(ret)) {
1664		pr_warning("error disabling all events\n");
1665		return;
1666	}
1667
1668	pr_cont("OK\n");
1669}
1670
1671#ifdef CONFIG_FUNCTION_TRACER
1672
1673static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1674
1675static void
1676function_test_events_call(unsigned long ip, unsigned long parent_ip)
 
1677{
1678	struct ring_buffer_event *event;
1679	struct ring_buffer *buffer;
1680	struct ftrace_entry *entry;
1681	unsigned long flags;
1682	long disabled;
1683	int cpu;
1684	int pc;
1685
1686	pc = preempt_count();
1687	preempt_disable_notrace();
1688	cpu = raw_smp_processor_id();
1689	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1690
1691	if (disabled != 1)
1692		goto out;
1693
1694	local_save_flags(flags);
1695
1696	event = trace_current_buffer_lock_reserve(&buffer,
1697						  TRACE_FN, sizeof(*entry),
1698						  flags, pc);
1699	if (!event)
1700		goto out;
1701	entry	= ring_buffer_event_data(event);
1702	entry->ip			= ip;
1703	entry->parent_ip		= parent_ip;
1704
1705	trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1706
1707 out:
1708	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1709	preempt_enable_notrace();
1710}
1711
1712static struct ftrace_ops trace_ops __initdata  =
1713{
1714	.func = function_test_events_call,
 
1715};
1716
1717static __init void event_trace_self_test_with_function(void)
1718{
1719	int ret;
1720	ret = register_ftrace_function(&trace_ops);
1721	if (WARN_ON(ret < 0)) {
1722		pr_info("Failed to enable function tracer for event tests\n");
1723		return;
1724	}
1725	pr_info("Running tests again, along with the function tracer\n");
1726	event_trace_self_tests();
1727	unregister_ftrace_function(&trace_ops);
1728}
1729#else
1730static __init void event_trace_self_test_with_function(void)
1731{
1732}
1733#endif
1734
1735static __init int event_trace_self_tests_init(void)
1736{
1737	if (!tracing_selftest_disabled) {
1738		event_trace_self_tests();
1739		event_trace_self_test_with_function();
1740	}
1741
1742	return 0;
1743}
1744
1745late_initcall(event_trace_self_tests_init);
1746
1747#endif