Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
  34 * @buffer: The ring buffer that the event is being written to
  35 * @rec: The trace entry for the event, NULL for unconditional invocation
  36 * @event: The event meta data in the ring buffer
  37 *
  38 * For each trigger associated with an event, invoke the trigger
  39 * function registered with the associated trigger command.  If rec is
  40 * non-NULL, it means that the trigger requires further processing and
  41 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  42 * trigger has a filter associated with it, rec will checked against
  43 * the filter and if the record matches the trigger will be invoked.
  44 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  45 * in any case until the current event is written, the trigger
  46 * function isn't invoked but the bit associated with the deferred
  47 * trigger is set in the return value.
  48 *
  49 * Returns an enum event_trigger_type value containing a set bit for
  50 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  51 *
  52 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  53 *
  54 * Return: an enum event_trigger_type value containing a set bit for
  55 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  56 */
  57enum event_trigger_type
  58event_triggers_call(struct trace_event_file *file,
  59		    struct trace_buffer *buffer, void *rec,
  60		    struct ring_buffer_event *event)
  61{
  62	struct event_trigger_data *data;
  63	enum event_trigger_type tt = ETT_NONE;
  64	struct event_filter *filter;
  65
  66	if (list_empty(&file->triggers))
  67		return tt;
  68
  69	list_for_each_entry_rcu(data, &file->triggers, list) {
  70		if (data->paused)
  71			continue;
  72		if (!rec) {
  73			data->ops->trigger(data, buffer, rec, event);
  74			continue;
  75		}
  76		filter = rcu_dereference_sched(data->filter);
  77		if (filter && !filter_match_preds(filter, rec))
  78			continue;
  79		if (event_command_post_trigger(data->cmd_ops)) {
  80			tt |= data->cmd_ops->trigger_type;
  81			continue;
  82		}
  83		data->ops->trigger(data, buffer, rec, event);
  84	}
  85	return tt;
  86}
  87EXPORT_SYMBOL_GPL(event_triggers_call);
  88
  89bool __trace_trigger_soft_disabled(struct trace_event_file *file)
  90{
  91	unsigned long eflags = file->flags;
  92
  93	if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
  94		event_triggers_call(file, NULL, NULL, NULL);
  95	if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
  96		return true;
  97	if (eflags & EVENT_FILE_FL_PID_FILTER)
  98		return trace_event_ignore_this_pid(file);
  99	return false;
 100}
 101EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
 102
 103/**
 104 * event_triggers_post_call - Call 'post_triggers' for a trace event
 105 * @file: The trace_event_file associated with the event
 106 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
 107 *
 108 * For each trigger associated with an event, invoke the trigger
 109 * function registered with the associated trigger command, if the
 110 * corresponding bit is set in the tt enum passed into this function.
 111 * See @event_triggers_call for details on how those bits are set.
 112 *
 113 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
 114 */
 115void
 116event_triggers_post_call(struct trace_event_file *file,
 117			 enum event_trigger_type tt)
 118{
 119	struct event_trigger_data *data;
 120
 121	list_for_each_entry_rcu(data, &file->triggers, list) {
 122		if (data->paused)
 123			continue;
 124		if (data->cmd_ops->trigger_type & tt)
 125			data->ops->trigger(data, NULL, NULL, NULL);
 126	}
 127}
 128EXPORT_SYMBOL_GPL(event_triggers_post_call);
 129
 130#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 131
 132static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 133{
 134	struct trace_event_file *event_file = event_file_data(m->private);
 135
 136	if (t == SHOW_AVAILABLE_TRIGGERS) {
 137		(*pos)++;
 138		return NULL;
 139	}
 140	return seq_list_next(t, &event_file->triggers, pos);
 141}
 142
 143static bool check_user_trigger(struct trace_event_file *file)
 144{
 145	struct event_trigger_data *data;
 146
 147	list_for_each_entry_rcu(data, &file->triggers, list,
 148				lockdep_is_held(&event_mutex)) {
 149		if (data->flags & EVENT_TRIGGER_FL_PROBE)
 150			continue;
 151		return true;
 152	}
 153	return false;
 154}
 155
 156static void *trigger_start(struct seq_file *m, loff_t *pos)
 157{
 158	struct trace_event_file *event_file;
 159
 160	/* ->stop() is called even if ->start() fails */
 161	mutex_lock(&event_mutex);
 162	event_file = event_file_data(m->private);
 163	if (unlikely(!event_file))
 164		return ERR_PTR(-ENODEV);
 165
 166	if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
 167		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 168
 169	return seq_list_start(&event_file->triggers, *pos);
 170}
 171
 172static void trigger_stop(struct seq_file *m, void *t)
 173{
 174	mutex_unlock(&event_mutex);
 175}
 176
 177static int trigger_show(struct seq_file *m, void *v)
 178{
 179	struct event_trigger_data *data;
 180	struct event_command *p;
 181
 182	if (v == SHOW_AVAILABLE_TRIGGERS) {
 183		seq_puts(m, "# Available triggers:\n");
 184		seq_putc(m, '#');
 185		mutex_lock(&trigger_cmd_mutex);
 186		list_for_each_entry_reverse(p, &trigger_commands, list)
 187			seq_printf(m, " %s", p->name);
 188		seq_putc(m, '\n');
 189		mutex_unlock(&trigger_cmd_mutex);
 190		return 0;
 191	}
 192
 193	data = list_entry(v, struct event_trigger_data, list);
 194	data->ops->print(m, data);
 195
 196	return 0;
 197}
 198
 199static const struct seq_operations event_triggers_seq_ops = {
 200	.start = trigger_start,
 201	.next = trigger_next,
 202	.stop = trigger_stop,
 203	.show = trigger_show,
 204};
 205
 206static int event_trigger_regex_open(struct inode *inode, struct file *file)
 207{
 208	int ret;
 209
 210	ret = security_locked_down(LOCKDOWN_TRACEFS);
 211	if (ret)
 212		return ret;
 213
 214	mutex_lock(&event_mutex);
 215
 216	if (unlikely(!event_file_data(file))) {
 217		mutex_unlock(&event_mutex);
 218		return -ENODEV;
 219	}
 220
 221	if ((file->f_mode & FMODE_WRITE) &&
 222	    (file->f_flags & O_TRUNC)) {
 223		struct trace_event_file *event_file;
 224		struct event_command *p;
 225
 226		event_file = event_file_data(file);
 227
 228		list_for_each_entry(p, &trigger_commands, list) {
 229			if (p->unreg_all)
 230				p->unreg_all(event_file);
 231		}
 232	}
 233
 234	if (file->f_mode & FMODE_READ) {
 235		ret = seq_open(file, &event_triggers_seq_ops);
 236		if (!ret) {
 237			struct seq_file *m = file->private_data;
 238			m->private = file;
 239		}
 240	}
 241
 242	mutex_unlock(&event_mutex);
 243
 244	return ret;
 245}
 246
 247int trigger_process_regex(struct trace_event_file *file, char *buff)
 248{
 249	char *command, *next;
 250	struct event_command *p;
 251	int ret = -EINVAL;
 252
 253	next = buff = skip_spaces(buff);
 254	command = strsep(&next, ": \t");
 255	if (next) {
 256		next = skip_spaces(next);
 257		if (!*next)
 258			next = NULL;
 259	}
 260	command = (command[0] != '!') ? command : command + 1;
 261
 262	mutex_lock(&trigger_cmd_mutex);
 263	list_for_each_entry(p, &trigger_commands, list) {
 264		if (strcmp(p->name, command) == 0) {
 265			ret = p->parse(p, file, buff, command, next);
 266			goto out_unlock;
 267		}
 268	}
 269 out_unlock:
 270	mutex_unlock(&trigger_cmd_mutex);
 271
 272	return ret;
 273}
 274
 275static ssize_t event_trigger_regex_write(struct file *file,
 276					 const char __user *ubuf,
 277					 size_t cnt, loff_t *ppos)
 278{
 279	struct trace_event_file *event_file;
 280	ssize_t ret;
 281	char *buf;
 282
 283	if (!cnt)
 284		return 0;
 285
 286	if (cnt >= PAGE_SIZE)
 287		return -EINVAL;
 288
 289	buf = memdup_user_nul(ubuf, cnt);
 290	if (IS_ERR(buf))
 291		return PTR_ERR(buf);
 292
 293	strim(buf);
 294
 295	mutex_lock(&event_mutex);
 296	event_file = event_file_data(file);
 297	if (unlikely(!event_file)) {
 298		mutex_unlock(&event_mutex);
 299		kfree(buf);
 300		return -ENODEV;
 301	}
 302	ret = trigger_process_regex(event_file, buf);
 303	mutex_unlock(&event_mutex);
 304
 305	kfree(buf);
 306	if (ret < 0)
 307		goto out;
 308
 309	*ppos += cnt;
 310	ret = cnt;
 311 out:
 312	return ret;
 313}
 314
 315static int event_trigger_regex_release(struct inode *inode, struct file *file)
 316{
 317	mutex_lock(&event_mutex);
 318
 319	if (file->f_mode & FMODE_READ)
 320		seq_release(inode, file);
 321
 322	mutex_unlock(&event_mutex);
 323
 324	return 0;
 325}
 326
 327static ssize_t
 328event_trigger_write(struct file *filp, const char __user *ubuf,
 329		    size_t cnt, loff_t *ppos)
 330{
 331	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 332}
 333
 334static int
 335event_trigger_open(struct inode *inode, struct file *filp)
 336{
 337	/* Checks for tracefs lockdown */
 338	return event_trigger_regex_open(inode, filp);
 339}
 340
 341static int
 342event_trigger_release(struct inode *inode, struct file *file)
 343{
 344	return event_trigger_regex_release(inode, file);
 345}
 346
 347const struct file_operations event_trigger_fops = {
 348	.open = event_trigger_open,
 349	.read = seq_read,
 350	.write = event_trigger_write,
 351	.llseek = tracing_lseek,
 352	.release = event_trigger_release,
 353};
 354
 355/*
 356 * Currently we only register event commands from __init, so mark this
 357 * __init too.
 358 */
 359__init int register_event_command(struct event_command *cmd)
 360{
 361	struct event_command *p;
 362	int ret = 0;
 363
 364	mutex_lock(&trigger_cmd_mutex);
 365	list_for_each_entry(p, &trigger_commands, list) {
 366		if (strcmp(cmd->name, p->name) == 0) {
 367			ret = -EBUSY;
 368			goto out_unlock;
 369		}
 370	}
 371	list_add(&cmd->list, &trigger_commands);
 372 out_unlock:
 373	mutex_unlock(&trigger_cmd_mutex);
 374
 375	return ret;
 376}
 377
 378/*
 379 * Currently we only unregister event commands from __init, so mark
 380 * this __init too.
 381 */
 382__init int unregister_event_command(struct event_command *cmd)
 383{
 384	struct event_command *p, *n;
 385	int ret = -ENODEV;
 386
 387	mutex_lock(&trigger_cmd_mutex);
 388	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 389		if (strcmp(cmd->name, p->name) == 0) {
 390			ret = 0;
 391			list_del_init(&p->list);
 392			goto out_unlock;
 393		}
 394	}
 395 out_unlock:
 396	mutex_unlock(&trigger_cmd_mutex);
 397
 398	return ret;
 399}
 400
 401/**
 402 * event_trigger_print - Generic event_trigger_ops @print implementation
 403 * @name: The name of the event trigger
 404 * @m: The seq_file being printed to
 405 * @data: Trigger-specific data
 406 * @filter_str: filter_str to print, if present
 407 *
 408 * Common implementation for event triggers to print themselves.
 409 *
 410 * Usually wrapped by a function that simply sets the @name of the
 411 * trigger command and then invokes this.
 412 *
 413 * Return: 0 on success, errno otherwise
 414 */
 415static int
 416event_trigger_print(const char *name, struct seq_file *m,
 417		    void *data, char *filter_str)
 418{
 419	long count = (long)data;
 420
 421	seq_puts(m, name);
 422
 423	if (count == -1)
 424		seq_puts(m, ":unlimited");
 425	else
 426		seq_printf(m, ":count=%ld", count);
 427
 428	if (filter_str)
 429		seq_printf(m, " if %s\n", filter_str);
 430	else
 431		seq_putc(m, '\n');
 432
 433	return 0;
 434}
 435
 436/**
 437 * event_trigger_init - Generic event_trigger_ops @init implementation
 
 438 * @data: Trigger-specific data
 439 *
 440 * Common implementation of event trigger initialization.
 441 *
 442 * Usually used directly as the @init method in event trigger
 443 * implementations.
 444 *
 445 * Return: 0 on success, errno otherwise
 446 */
 447int event_trigger_init(struct event_trigger_data *data)
 
 448{
 449	data->ref++;
 450	return 0;
 451}
 452
 453/**
 454 * event_trigger_free - Generic event_trigger_ops @free implementation
 
 455 * @data: Trigger-specific data
 456 *
 457 * Common implementation of event trigger de-initialization.
 458 *
 459 * Usually used directly as the @free method in event trigger
 460 * implementations.
 461 */
 462static void
 463event_trigger_free(struct event_trigger_data *data)
 
 464{
 465	if (WARN_ON_ONCE(data->ref <= 0))
 466		return;
 467
 468	data->ref--;
 469	if (!data->ref)
 470		trigger_data_free(data);
 471}
 472
 473int trace_event_trigger_enable_disable(struct trace_event_file *file,
 474				       int trigger_enable)
 475{
 476	int ret = 0;
 477
 478	if (trigger_enable) {
 479		if (atomic_inc_return(&file->tm_ref) > 1)
 480			return ret;
 481		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 482		ret = trace_event_enable_disable(file, 1, 1);
 483	} else {
 484		if (atomic_dec_return(&file->tm_ref) > 0)
 485			return ret;
 486		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 487		ret = trace_event_enable_disable(file, 0, 1);
 488	}
 489
 490	return ret;
 491}
 492
 493/**
 494 * clear_event_triggers - Clear all triggers associated with a trace array
 495 * @tr: The trace array to clear
 496 *
 497 * For each trigger, the triggering event has its tm_ref decremented
 498 * via trace_event_trigger_enable_disable(), and any associated event
 499 * (in the case of enable/disable_event triggers) will have its sm_ref
 500 * decremented via free()->trace_event_enable_disable().  That
 501 * combination effectively reverses the soft-mode/trigger state added
 502 * by trigger registration.
 503 *
 504 * Must be called with event_mutex held.
 505 */
 506void
 507clear_event_triggers(struct trace_array *tr)
 508{
 509	struct trace_event_file *file;
 510
 511	list_for_each_entry(file, &tr->events, list) {
 512		struct event_trigger_data *data, *n;
 513		list_for_each_entry_safe(data, n, &file->triggers, list) {
 514			trace_event_trigger_enable_disable(file, 0);
 515			list_del_rcu(&data->list);
 516			if (data->ops->free)
 517				data->ops->free(data);
 518		}
 519	}
 520}
 521
 522/**
 523 * update_cond_flag - Set or reset the TRIGGER_COND bit
 524 * @file: The trace_event_file associated with the event
 525 *
 526 * If an event has triggers and any of those triggers has a filter or
 527 * a post_trigger, trigger invocation needs to be deferred until after
 528 * the current event has logged its data, and the event should have
 529 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 530 * cleared.
 531 */
 532void update_cond_flag(struct trace_event_file *file)
 533{
 534	struct event_trigger_data *data;
 535	bool set_cond = false;
 536
 537	lockdep_assert_held(&event_mutex);
 538
 539	list_for_each_entry(data, &file->triggers, list) {
 540		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 541		    event_command_needs_rec(data->cmd_ops)) {
 542			set_cond = true;
 543			break;
 544		}
 545	}
 546
 547	if (set_cond)
 548		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 549	else
 550		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 551}
 552
 553/**
 554 * register_trigger - Generic event_command @reg implementation
 555 * @glob: The raw string used to register the trigger
 
 556 * @data: Trigger-specific data to associate with the trigger
 557 * @file: The trace_event_file associated with the event
 558 *
 559 * Common implementation for event trigger registration.
 560 *
 561 * Usually used directly as the @reg method in event command
 562 * implementations.
 563 *
 564 * Return: 0 on success, errno otherwise
 565 */
 566static int register_trigger(char *glob,
 567			    struct event_trigger_data *data,
 568			    struct trace_event_file *file)
 569{
 570	struct event_trigger_data *test;
 571	int ret = 0;
 572
 573	lockdep_assert_held(&event_mutex);
 574
 575	list_for_each_entry(test, &file->triggers, list) {
 576		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 577			ret = -EEXIST;
 578			goto out;
 579		}
 580	}
 581
 582	if (data->ops->init) {
 583		ret = data->ops->init(data);
 584		if (ret < 0)
 585			goto out;
 586	}
 587
 588	list_add_rcu(&data->list, &file->triggers);
 
 589
 590	update_cond_flag(file);
 591	ret = trace_event_trigger_enable_disable(file, 1);
 592	if (ret < 0) {
 593		list_del_rcu(&data->list);
 594		update_cond_flag(file);
 
 595	}
 596out:
 597	return ret;
 598}
 599
 600/**
 601 * unregister_trigger - Generic event_command @unreg implementation
 602 * @glob: The raw string used to register the trigger
 
 603 * @test: Trigger-specific data used to find the trigger to remove
 604 * @file: The trace_event_file associated with the event
 605 *
 606 * Common implementation for event trigger unregistration.
 607 *
 608 * Usually used directly as the @unreg method in event command
 609 * implementations.
 610 */
 611static void unregister_trigger(char *glob,
 612			       struct event_trigger_data *test,
 613			       struct trace_event_file *file)
 614{
 615	struct event_trigger_data *data = NULL, *iter;
 
 616
 617	lockdep_assert_held(&event_mutex);
 618
 619	list_for_each_entry(iter, &file->triggers, list) {
 620		if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 621			data = iter;
 622			list_del_rcu(&data->list);
 623			trace_event_trigger_enable_disable(file, 0);
 624			update_cond_flag(file);
 625			break;
 626		}
 627	}
 628
 629	if (data && data->ops->free)
 630		data->ops->free(data);
 631}
 632
 633/*
 634 * Event trigger parsing helper functions.
 635 *
 636 * These functions help make it easier to write an event trigger
 637 * parsing function i.e. the struct event_command.parse() callback
 638 * function responsible for parsing and registering a trigger command
 639 * written to the 'trigger' file.
 640 *
 641 * A trigger command (or just 'trigger' for short) takes the form:
 642 *   [trigger] [if filter]
 643 *
 644 * The struct event_command.parse() callback (and other struct
 645 * event_command functions) refer to several components of a trigger
 646 * command.  Those same components are referenced by the event trigger
 647 * parsing helper functions defined below.  These components are:
 648 *
 649 *   cmd               - the trigger command name
 650 *   glob              - the trigger command name optionally prefaced with '!'
 651 *   param_and_filter  - text following cmd and ':'
 652 *   param             - text following cmd and ':' and stripped of filter
 653 *   filter            - the optional filter text following (and including) 'if'
 654 *
 655 * To illustrate the use of these componenents, here are some concrete
 656 * examples. For the following triggers:
 657 *
 658 *   echo 'traceon:5 if pid == 0' > trigger
 659 *     - 'traceon' is both cmd and glob
 660 *     - '5 if pid == 0' is the param_and_filter
 661 *     - '5' is the param
 662 *     - 'if pid == 0' is the filter
 663 *
 664 *   echo 'enable_event:sys:event:n' > trigger
 665 *     - 'enable_event' is both cmd and glob
 666 *     - 'sys:event:n' is the param_and_filter
 667 *     - 'sys:event:n' is the param
 668 *     - there is no filter
 669 *
 670 *   echo 'hist:keys=pid if prio > 50' > trigger
 671 *     - 'hist' is both cmd and glob
 672 *     - 'keys=pid if prio > 50' is the param_and_filter
 673 *     - 'keys=pid' is the param
 674 *     - 'if prio > 50' is the filter
 675 *
 676 *   echo '!enable_event:sys:event:n' > trigger
 677 *     - 'enable_event' the cmd
 678 *     - '!enable_event' is the glob
 679 *     - 'sys:event:n' is the param_and_filter
 680 *     - 'sys:event:n' is the param
 681 *     - there is no filter
 682 *
 683 *   echo 'traceoff' > trigger
 684 *     - 'traceoff' is both cmd and glob
 685 *     - there is no param_and_filter
 686 *     - there is no param
 687 *     - there is no filter
 688 *
 689 * There are a few different categories of event trigger covered by
 690 * these helpers:
 691 *
 692 *  - triggers that don't require a parameter e.g. traceon
 693 *  - triggers that do require a parameter e.g. enable_event and hist
 694 *  - triggers that though they may not require a param may support an
 695 *    optional 'n' param (n = number of times the trigger should fire)
 696 *    e.g.: traceon:5 or enable_event:sys:event:n
 697 *  - triggers that do not support an 'n' param e.g. hist
 698 *
 699 * These functions can be used or ignored as necessary - it all
 700 * depends on the complexity of the trigger, and the granularity of
 701 * the functions supported reflects the fact that some implementations
 702 * may need to customize certain aspects of their implementations and
 703 * won't need certain functions.  For instance, the hist trigger
 704 * implementation doesn't use event_trigger_separate_filter() because
 705 * it has special requirements for handling the filter.
 706 */
 707
 708/**
 709 * event_trigger_check_remove - check whether an event trigger specifies remove
 710 * @glob: The trigger command string, with optional remove(!) operator
 711 *
 712 * The event trigger callback implementations pass in 'glob' as a
 713 * parameter.  This is the command name either with or without a
 714 * remove(!)  operator.  This function simply parses the glob and
 715 * determines whether the command corresponds to a trigger removal or
 716 * a trigger addition.
 717 *
 718 * Return: true if this is a remove command, false otherwise
 719 */
 720bool event_trigger_check_remove(const char *glob)
 721{
 722	return (glob && glob[0] == '!') ? true : false;
 723}
 724
 725/**
 726 * event_trigger_empty_param - check whether the param is empty
 727 * @param: The trigger param string
 
 
 
 
 728 *
 729 * The event trigger callback implementations pass in 'param' as a
 730 * parameter.  This corresponds to the string following the command
 731 * name minus the command name.  This function can be called by a
 732 * callback implementation for any command that requires a param; a
 733 * callback that doesn't require a param can ignore it.
 734 *
 735 * Return: true if this is an empty param, false otherwise
 736 */
 737bool event_trigger_empty_param(const char *param)
 738{
 739	return !param;
 740}
 741
 742/**
 743 * event_trigger_separate_filter - separate an event trigger from a filter
 744 * @param_and_filter: String containing trigger and possibly filter
 745 * @param: outparam, will be filled with a pointer to the trigger
 746 * @filter: outparam, will be filled with a pointer to the filter
 747 * @param_required: Specifies whether or not the param string is required
 748 *
 749 * Given a param string of the form '[trigger] [if filter]', this
 750 * function separates the filter from the trigger and returns the
 751 * trigger in @param and the filter in @filter.  Either the @param
 752 * or the @filter may be set to NULL by this function - if not set to
 753 * NULL, they will contain strings corresponding to the trigger and
 754 * filter.
 755 *
 756 * There are two cases that need to be handled with respect to the
 757 * passed-in param: either the param is required, or it is not
 758 * required.  If @param_required is set, and there's no param, it will
 759 * return -EINVAL.  If @param_required is not set and there's a param
 760 * that starts with a number, that corresponds to the case of a
 761 * trigger with :n (n = number of times the trigger should fire) and
 762 * the parsing continues normally; otherwise the function just returns
 763 * and assumes param just contains a filter and there's nothing else
 764 * to do.
 765 *
 766 * Return: 0 on success, errno otherwise
 767 */
 768int event_trigger_separate_filter(char *param_and_filter, char **param,
 769				  char **filter, bool param_required)
 770{
 771	int ret = 0;
 772
 773	*param = *filter = NULL;
 774
 775	if (!param_and_filter) {
 776		if (param_required)
 777			ret = -EINVAL;
 778		goto out;
 779	}
 780
 781	/*
 782	 * Here we check for an optional param. The only legal
 783	 * optional param is :n, and if that's the case, continue
 784	 * below. Otherwise we assume what's left is a filter and
 785	 * return it as the filter string for the caller to deal with.
 786	 */
 787	if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
 788		*filter = param_and_filter;
 789		goto out;
 790	}
 791
 792	/*
 793	 * Separate the param from the filter (param [if filter]).
 794	 * Here we have either an optional :n param or a required
 795	 * param and an optional filter.
 796	 */
 797	*param = strsep(&param_and_filter, " \t");
 798
 799	/*
 800	 * Here we have a filter, though it may be empty.
 801	 */
 802	if (param_and_filter) {
 803		*filter = skip_spaces(param_and_filter);
 804		if (!**filter)
 805			*filter = NULL;
 806	}
 807out:
 808	return ret;
 809}
 810
 811/**
 812 * event_trigger_alloc - allocate and init event_trigger_data for a trigger
 813 * @cmd_ops: The event_command operations for the trigger
 814 * @cmd: The cmd string
 815 * @param: The param string
 816 * @private_data: User data to associate with the event trigger
 817 *
 818 * Allocate an event_trigger_data instance and initialize it.  The
 819 * @cmd_ops are used along with the @cmd and @param to get the
 820 * trigger_ops to assign to the event_trigger_data.  @private_data can
 821 * also be passed in and associated with the event_trigger_data.
 822 *
 823 * Use event_trigger_free() to free an event_trigger_data object.
 824 *
 825 * Return: The trigger_data object success, NULL otherwise
 826 */
 827struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
 828					       char *cmd,
 829					       char *param,
 830					       void *private_data)
 831{
 832	struct event_trigger_data *trigger_data;
 833	struct event_trigger_ops *trigger_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 834
 835	trigger_ops = cmd_ops->get_trigger_ops(cmd, param);
 836
 
 837	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 838	if (!trigger_data)
 839		return NULL;
 840
 841	trigger_data->count = -1;
 842	trigger_data->ops = trigger_ops;
 843	trigger_data->cmd_ops = cmd_ops;
 844	trigger_data->private_data = private_data;
 845
 846	INIT_LIST_HEAD(&trigger_data->list);
 847	INIT_LIST_HEAD(&trigger_data->named_list);
 848	RCU_INIT_POINTER(trigger_data->filter, NULL);
 849
 850	return trigger_data;
 851}
 852
 853/**
 854 * event_trigger_parse_num - parse and return the number param for a trigger
 855 * @param: The param string
 856 * @trigger_data: The trigger_data for the trigger
 857 *
 858 * Parse the :n (n = number of times the trigger should fire) param
 859 * and set the count variable in the trigger_data to the parsed count.
 860 *
 861 * Return: 0 on success, errno otherwise
 862 */
 863int event_trigger_parse_num(char *param,
 864			    struct event_trigger_data *trigger_data)
 865{
 866	char *number;
 867	int ret = 0;
 868
 869	if (param) {
 870		number = strsep(&param, ":");
 871
 
 872		if (!strlen(number))
 873			return -EINVAL;
 874
 875		/*
 876		 * We use the callback data field (which is a pointer)
 877		 * as our counter.
 878		 */
 879		ret = kstrtoul(number, 0, &trigger_data->count);
 
 
 880	}
 881
 882	return ret;
 883}
 884
 885/**
 886 * event_trigger_set_filter - set an event trigger's filter
 887 * @cmd_ops: The event_command operations for the trigger
 888 * @file: The event file for the trigger's event
 889 * @param: The string containing the filter
 890 * @trigger_data: The trigger_data for the trigger
 891 *
 892 * Set the filter for the trigger.  If the filter is NULL, just return
 893 * without error.
 894 *
 895 * Return: 0 on success, errno otherwise
 896 */
 897int event_trigger_set_filter(struct event_command *cmd_ops,
 898			     struct trace_event_file *file,
 899			     char *param,
 900			     struct event_trigger_data *trigger_data)
 901{
 902	if (param && cmd_ops->set_filter)
 903		return cmd_ops->set_filter(param, trigger_data, file);
 904
 905	return 0;
 906}
 907
 908/**
 909 * event_trigger_reset_filter - reset an event trigger's filter
 910 * @cmd_ops: The event_command operations for the trigger
 911 * @trigger_data: The trigger_data for the trigger
 912 *
 913 * Reset the filter for the trigger to no filter.
 914 */
 915void event_trigger_reset_filter(struct event_command *cmd_ops,
 916				struct event_trigger_data *trigger_data)
 917{
 918	if (cmd_ops->set_filter)
 919		cmd_ops->set_filter(NULL, trigger_data, NULL);
 920}
 921
 922/**
 923 * event_trigger_register - register an event trigger
 924 * @cmd_ops: The event_command operations for the trigger
 925 * @file: The event file for the trigger's event
 926 * @glob: The trigger command string, with optional remove(!) operator
 927 * @trigger_data: The trigger_data for the trigger
 928 *
 929 * Register an event trigger.  The @cmd_ops are used to call the
 930 * cmd_ops->reg() function which actually does the registration.
 931 *
 932 * Return: 0 on success, errno otherwise
 933 */
 934int event_trigger_register(struct event_command *cmd_ops,
 935			   struct trace_event_file *file,
 936			   char *glob,
 937			   struct event_trigger_data *trigger_data)
 938{
 939	return cmd_ops->reg(glob, trigger_data, file);
 940}
 941
 942/**
 943 * event_trigger_unregister - unregister an event trigger
 944 * @cmd_ops: The event_command operations for the trigger
 945 * @file: The event file for the trigger's event
 946 * @glob: The trigger command string, with optional remove(!) operator
 947 * @trigger_data: The trigger_data for the trigger
 948 *
 949 * Unregister an event trigger.  The @cmd_ops are used to call the
 950 * cmd_ops->unreg() function which actually does the unregistration.
 951 */
 952void event_trigger_unregister(struct event_command *cmd_ops,
 953			      struct trace_event_file *file,
 954			      char *glob,
 955			      struct event_trigger_data *trigger_data)
 956{
 957	cmd_ops->unreg(glob, trigger_data, file);
 958}
 959
 960/*
 961 * End event trigger parsing helper functions.
 962 */
 963
 964/**
 965 * event_trigger_parse - Generic event_command @parse implementation
 966 * @cmd_ops: The command ops, used for trigger registration
 967 * @file: The trace_event_file associated with the event
 968 * @glob: The raw string used to register the trigger
 969 * @cmd: The cmd portion of the string used to register the trigger
 970 * @param_and_filter: The param and filter portion of the string used to register the trigger
 971 *
 972 * Common implementation for event command parsing and trigger
 973 * instantiation.
 974 *
 975 * Usually used directly as the @parse method in event command
 976 * implementations.
 977 *
 978 * Return: 0 on success, errno otherwise
 979 */
 980static int
 981event_trigger_parse(struct event_command *cmd_ops,
 982		    struct trace_event_file *file,
 983		    char *glob, char *cmd, char *param_and_filter)
 984{
 985	struct event_trigger_data *trigger_data;
 986	char *param, *filter;
 987	bool remove;
 988	int ret;
 989
 990	remove = event_trigger_check_remove(glob);
 991
 992	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, false);
 993	if (ret)
 994		return ret;
 995
 996	ret = -ENOMEM;
 997	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
 998	if (!trigger_data)
 999		goto out;
1000
1001	if (remove) {
1002		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1003		kfree(trigger_data);
1004		ret = 0;
1005		goto out;
1006	}
1007
1008	ret = event_trigger_parse_num(param, trigger_data);
1009	if (ret)
1010		goto out_free;
1011
1012	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1013	if (ret < 0)
1014		goto out_free;
1015
 
1016	/* Up the trigger_data count to make sure reg doesn't free it on failure */
1017	event_trigger_init(trigger_data);
1018
1019	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1020	if (ret)
1021		goto out_free;
 
 
 
 
 
 
 
1022
1023	/* Down the counter of trigger_data or free it if not used anymore */
1024	event_trigger_free(trigger_data);
1025 out:
1026	return ret;
1027
1028 out_free:
1029	event_trigger_reset_filter(cmd_ops, trigger_data);
 
1030	kfree(trigger_data);
1031	goto out;
1032}
1033
1034/**
1035 * set_trigger_filter - Generic event_command @set_filter implementation
1036 * @filter_str: The filter string for the trigger, NULL to remove filter
1037 * @trigger_data: Trigger-specific data
1038 * @file: The trace_event_file associated with the event
1039 *
1040 * Common implementation for event command filter parsing and filter
1041 * instantiation.
1042 *
1043 * Usually used directly as the @set_filter method in event command
1044 * implementations.
1045 *
1046 * Also used to remove a filter (if filter_str = NULL).
1047 *
1048 * Return: 0 on success, errno otherwise
1049 */
1050int set_trigger_filter(char *filter_str,
1051		       struct event_trigger_data *trigger_data,
1052		       struct trace_event_file *file)
1053{
1054	struct event_trigger_data *data = trigger_data;
1055	struct event_filter *filter = NULL, *tmp;
1056	int ret = -EINVAL;
1057	char *s;
1058
1059	if (!filter_str) /* clear the current filter */
1060		goto assign;
1061
1062	s = strsep(&filter_str, " \t");
1063
1064	if (!strlen(s) || strcmp(s, "if") != 0)
1065		goto out;
1066
1067	if (!filter_str)
1068		goto out;
1069
1070	/* The filter is for the 'trigger' event, not the triggered event */
1071	ret = create_event_filter(file->tr, file->event_call,
1072				  filter_str, true, &filter);
1073
1074	/* Only enabled set_str for error handling */
1075	if (filter) {
1076		kfree(filter->filter_string);
1077		filter->filter_string = NULL;
1078	}
1079
1080	/*
1081	 * If create_event_filter() fails, filter still needs to be freed.
1082	 * Which the calling code will do with data->filter.
1083	 */
1084 assign:
1085	tmp = rcu_access_pointer(data->filter);
1086
1087	rcu_assign_pointer(data->filter, filter);
1088
1089	if (tmp) {
1090		/*
1091		 * Make sure the call is done with the filter.
1092		 * It is possible that a filter could fail at boot up,
1093		 * and then this path will be called. Avoid the synchronization
1094		 * in that case.
1095		 */
1096		if (system_state != SYSTEM_BOOTING)
1097			tracepoint_synchronize_unregister();
1098		free_event_filter(tmp);
1099	}
1100
1101	kfree(data->filter_str);
1102	data->filter_str = NULL;
1103
1104	if (filter_str) {
1105		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
1106		if (!data->filter_str) {
1107			free_event_filter(rcu_access_pointer(data->filter));
1108			data->filter = NULL;
1109			ret = -ENOMEM;
1110		}
1111	}
1112 out:
1113	return ret;
1114}
1115
1116static LIST_HEAD(named_triggers);
1117
1118/**
1119 * find_named_trigger - Find the common named trigger associated with @name
1120 * @name: The name of the set of named triggers to find the common data for
1121 *
1122 * Named triggers are sets of triggers that share a common set of
1123 * trigger data.  The first named trigger registered with a given name
1124 * owns the common trigger data that the others subsequently
1125 * registered with the same name will reference.  This function
1126 * returns the common trigger data associated with that first
1127 * registered instance.
1128 *
1129 * Return: the common trigger data for the given named trigger on
1130 * success, NULL otherwise.
1131 */
1132struct event_trigger_data *find_named_trigger(const char *name)
1133{
1134	struct event_trigger_data *data;
1135
1136	if (!name)
1137		return NULL;
1138
1139	list_for_each_entry(data, &named_triggers, named_list) {
1140		if (data->named_data)
1141			continue;
1142		if (strcmp(data->name, name) == 0)
1143			return data;
1144	}
1145
1146	return NULL;
1147}
1148
1149/**
1150 * is_named_trigger - determine if a given trigger is a named trigger
1151 * @test: The trigger data to test
1152 *
1153 * Return: true if 'test' is a named trigger, false otherwise.
1154 */
1155bool is_named_trigger(struct event_trigger_data *test)
1156{
1157	struct event_trigger_data *data;
1158
1159	list_for_each_entry(data, &named_triggers, named_list) {
1160		if (test == data)
1161			return true;
1162	}
1163
1164	return false;
1165}
1166
1167/**
1168 * save_named_trigger - save the trigger in the named trigger list
1169 * @name: The name of the named trigger set
1170 * @data: The trigger data to save
1171 *
1172 * Return: 0 if successful, negative error otherwise.
1173 */
1174int save_named_trigger(const char *name, struct event_trigger_data *data)
1175{
1176	data->name = kstrdup(name, GFP_KERNEL);
1177	if (!data->name)
1178		return -ENOMEM;
1179
1180	list_add(&data->named_list, &named_triggers);
1181
1182	return 0;
1183}
1184
1185/**
1186 * del_named_trigger - delete a trigger from the named trigger list
1187 * @data: The trigger data to delete
1188 */
1189void del_named_trigger(struct event_trigger_data *data)
1190{
1191	kfree(data->name);
1192	data->name = NULL;
1193
1194	list_del(&data->named_list);
1195}
1196
1197static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
1198{
1199	struct event_trigger_data *test;
1200
1201	list_for_each_entry(test, &named_triggers, named_list) {
1202		if (strcmp(test->name, data->name) == 0) {
1203			if (pause) {
1204				test->paused_tmp = test->paused;
1205				test->paused = true;
1206			} else {
1207				test->paused = test->paused_tmp;
1208			}
1209		}
1210	}
1211}
1212
1213/**
1214 * pause_named_trigger - Pause all named triggers with the same name
1215 * @data: The trigger data of a named trigger to pause
1216 *
1217 * Pauses a named trigger along with all other triggers having the
1218 * same name.  Because named triggers share a common set of data,
1219 * pausing only one is meaningless, so pausing one named trigger needs
1220 * to pause all triggers with the same name.
1221 */
1222void pause_named_trigger(struct event_trigger_data *data)
1223{
1224	__pause_named_trigger(data, true);
1225}
1226
1227/**
1228 * unpause_named_trigger - Un-pause all named triggers with the same name
1229 * @data: The trigger data of a named trigger to unpause
1230 *
1231 * Un-pauses a named trigger along with all other triggers having the
1232 * same name.  Because named triggers share a common set of data,
1233 * unpausing only one is meaningless, so unpausing one named trigger
1234 * needs to unpause all triggers with the same name.
1235 */
1236void unpause_named_trigger(struct event_trigger_data *data)
1237{
1238	__pause_named_trigger(data, false);
1239}
1240
1241/**
1242 * set_named_trigger_data - Associate common named trigger data
1243 * @data: The trigger data to associate
1244 * @named_data: The common named trigger to be associated
1245 *
1246 * Named triggers are sets of triggers that share a common set of
1247 * trigger data.  The first named trigger registered with a given name
1248 * owns the common trigger data that the others subsequently
1249 * registered with the same name will reference.  This function
1250 * associates the common trigger data from the first trigger with the
1251 * given trigger.
1252 */
1253void set_named_trigger_data(struct event_trigger_data *data,
1254			    struct event_trigger_data *named_data)
1255{
1256	data->named_data = named_data;
1257}
1258
1259struct event_trigger_data *
1260get_named_trigger_data(struct event_trigger_data *data)
1261{
1262	return data->named_data;
1263}
1264
1265static void
1266traceon_trigger(struct event_trigger_data *data,
1267		struct trace_buffer *buffer, void *rec,
1268		struct ring_buffer_event *event)
1269{
1270	struct trace_event_file *file = data->private_data;
1271
1272	if (file) {
1273		if (tracer_tracing_is_on(file->tr))
1274			return;
1275
1276		tracer_tracing_on(file->tr);
1277		return;
1278	}
1279
1280	if (tracing_is_on())
1281		return;
1282
1283	tracing_on();
1284}
1285
1286static void
1287traceon_count_trigger(struct event_trigger_data *data,
1288		      struct trace_buffer *buffer, void *rec,
1289		      struct ring_buffer_event *event)
1290{
1291	struct trace_event_file *file = data->private_data;
1292
1293	if (file) {
1294		if (tracer_tracing_is_on(file->tr))
1295			return;
1296	} else {
1297		if (tracing_is_on())
1298			return;
1299	}
1300
1301	if (!data->count)
1302		return;
1303
1304	if (data->count != -1)
1305		(data->count)--;
1306
1307	if (file)
1308		tracer_tracing_on(file->tr);
1309	else
1310		tracing_on();
1311}
1312
1313static void
1314traceoff_trigger(struct event_trigger_data *data,
1315		 struct trace_buffer *buffer, void *rec,
1316		 struct ring_buffer_event *event)
1317{
1318	struct trace_event_file *file = data->private_data;
1319
1320	if (file) {
1321		if (!tracer_tracing_is_on(file->tr))
1322			return;
1323
1324		tracer_tracing_off(file->tr);
1325		return;
1326	}
1327
1328	if (!tracing_is_on())
1329		return;
1330
1331	tracing_off();
1332}
1333
1334static void
1335traceoff_count_trigger(struct event_trigger_data *data,
1336		       struct trace_buffer *buffer, void *rec,
1337		       struct ring_buffer_event *event)
1338{
1339	struct trace_event_file *file = data->private_data;
1340
1341	if (file) {
1342		if (!tracer_tracing_is_on(file->tr))
1343			return;
1344	} else {
1345		if (!tracing_is_on())
1346			return;
1347	}
1348
1349	if (!data->count)
1350		return;
1351
1352	if (data->count != -1)
1353		(data->count)--;
1354
1355	if (file)
1356		tracer_tracing_off(file->tr);
1357	else
1358		tracing_off();
1359}
1360
1361static int
1362traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1363{
1364	return event_trigger_print("traceon", m, (void *)data->count,
1365				   data->filter_str);
1366}
1367
1368static int
1369traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1370{
1371	return event_trigger_print("traceoff", m, (void *)data->count,
1372				   data->filter_str);
1373}
1374
1375static struct event_trigger_ops traceon_trigger_ops = {
1376	.trigger		= traceon_trigger,
1377	.print			= traceon_trigger_print,
1378	.init			= event_trigger_init,
1379	.free			= event_trigger_free,
1380};
1381
1382static struct event_trigger_ops traceon_count_trigger_ops = {
1383	.trigger		= traceon_count_trigger,
1384	.print			= traceon_trigger_print,
1385	.init			= event_trigger_init,
1386	.free			= event_trigger_free,
1387};
1388
1389static struct event_trigger_ops traceoff_trigger_ops = {
1390	.trigger		= traceoff_trigger,
1391	.print			= traceoff_trigger_print,
1392	.init			= event_trigger_init,
1393	.free			= event_trigger_free,
1394};
1395
1396static struct event_trigger_ops traceoff_count_trigger_ops = {
1397	.trigger		= traceoff_count_trigger,
1398	.print			= traceoff_trigger_print,
1399	.init			= event_trigger_init,
1400	.free			= event_trigger_free,
1401};
1402
1403static struct event_trigger_ops *
1404onoff_get_trigger_ops(char *cmd, char *param)
1405{
1406	struct event_trigger_ops *ops;
1407
1408	/* we register both traceon and traceoff to this callback */
1409	if (strcmp(cmd, "traceon") == 0)
1410		ops = param ? &traceon_count_trigger_ops :
1411			&traceon_trigger_ops;
1412	else
1413		ops = param ? &traceoff_count_trigger_ops :
1414			&traceoff_trigger_ops;
1415
1416	return ops;
1417}
1418
1419static struct event_command trigger_traceon_cmd = {
1420	.name			= "traceon",
1421	.trigger_type		= ETT_TRACE_ONOFF,
1422	.parse			= event_trigger_parse,
1423	.reg			= register_trigger,
1424	.unreg			= unregister_trigger,
1425	.get_trigger_ops	= onoff_get_trigger_ops,
1426	.set_filter		= set_trigger_filter,
1427};
1428
1429static struct event_command trigger_traceoff_cmd = {
1430	.name			= "traceoff",
1431	.trigger_type		= ETT_TRACE_ONOFF,
1432	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1433	.parse			= event_trigger_parse,
1434	.reg			= register_trigger,
1435	.unreg			= unregister_trigger,
1436	.get_trigger_ops	= onoff_get_trigger_ops,
1437	.set_filter		= set_trigger_filter,
1438};
1439
1440#ifdef CONFIG_TRACER_SNAPSHOT
1441static void
1442snapshot_trigger(struct event_trigger_data *data,
1443		 struct trace_buffer *buffer, void *rec,
1444		 struct ring_buffer_event *event)
1445{
1446	struct trace_event_file *file = data->private_data;
1447
1448	if (file)
1449		tracing_snapshot_instance(file->tr);
1450	else
1451		tracing_snapshot();
1452}
1453
1454static void
1455snapshot_count_trigger(struct event_trigger_data *data,
1456		       struct trace_buffer *buffer, void *rec,
1457		       struct ring_buffer_event *event)
1458{
1459	if (!data->count)
1460		return;
1461
1462	if (data->count != -1)
1463		(data->count)--;
1464
1465	snapshot_trigger(data, buffer, rec, event);
1466}
1467
1468static int
1469register_snapshot_trigger(char *glob,
1470			  struct event_trigger_data *data,
1471			  struct trace_event_file *file)
1472{
1473	int ret = tracing_alloc_snapshot_instance(file->tr);
1474
1475	if (ret < 0)
1476		return ret;
1477
1478	return register_trigger(glob, data, file);
1479}
1480
1481static int
1482snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1483{
1484	return event_trigger_print("snapshot", m, (void *)data->count,
1485				   data->filter_str);
1486}
1487
1488static struct event_trigger_ops snapshot_trigger_ops = {
1489	.trigger		= snapshot_trigger,
1490	.print			= snapshot_trigger_print,
1491	.init			= event_trigger_init,
1492	.free			= event_trigger_free,
1493};
1494
1495static struct event_trigger_ops snapshot_count_trigger_ops = {
1496	.trigger		= snapshot_count_trigger,
1497	.print			= snapshot_trigger_print,
1498	.init			= event_trigger_init,
1499	.free			= event_trigger_free,
1500};
1501
1502static struct event_trigger_ops *
1503snapshot_get_trigger_ops(char *cmd, char *param)
1504{
1505	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1506}
1507
1508static struct event_command trigger_snapshot_cmd = {
1509	.name			= "snapshot",
1510	.trigger_type		= ETT_SNAPSHOT,
1511	.parse			= event_trigger_parse,
1512	.reg			= register_snapshot_trigger,
1513	.unreg			= unregister_trigger,
1514	.get_trigger_ops	= snapshot_get_trigger_ops,
1515	.set_filter		= set_trigger_filter,
1516};
1517
1518static __init int register_trigger_snapshot_cmd(void)
1519{
1520	int ret;
1521
1522	ret = register_event_command(&trigger_snapshot_cmd);
1523	WARN_ON(ret < 0);
1524
1525	return ret;
1526}
1527#else
1528static __init int register_trigger_snapshot_cmd(void) { return 0; }
1529#endif /* CONFIG_TRACER_SNAPSHOT */
1530
1531#ifdef CONFIG_STACKTRACE
1532#ifdef CONFIG_UNWINDER_ORC
1533/* Skip 2:
1534 *   event_triggers_post_call()
1535 *   trace_event_raw_event_xxx()
1536 */
1537# define STACK_SKIP 2
1538#else
1539/*
1540 * Skip 4:
1541 *   stacktrace_trigger()
1542 *   event_triggers_post_call()
1543 *   trace_event_buffer_commit()
1544 *   trace_event_raw_event_xxx()
1545 */
1546#define STACK_SKIP 4
1547#endif
1548
1549static void
1550stacktrace_trigger(struct event_trigger_data *data,
1551		   struct trace_buffer *buffer,  void *rec,
1552		   struct ring_buffer_event *event)
1553{
1554	struct trace_event_file *file = data->private_data;
1555
1556	if (file)
1557		__trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
1558	else
1559		trace_dump_stack(STACK_SKIP);
1560}
1561
1562static void
1563stacktrace_count_trigger(struct event_trigger_data *data,
1564			 struct trace_buffer *buffer, void *rec,
1565			 struct ring_buffer_event *event)
1566{
1567	if (!data->count)
1568		return;
1569
1570	if (data->count != -1)
1571		(data->count)--;
1572
1573	stacktrace_trigger(data, buffer, rec, event);
1574}
1575
1576static int
1577stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1578{
1579	return event_trigger_print("stacktrace", m, (void *)data->count,
1580				   data->filter_str);
1581}
1582
1583static struct event_trigger_ops stacktrace_trigger_ops = {
1584	.trigger		= stacktrace_trigger,
1585	.print			= stacktrace_trigger_print,
1586	.init			= event_trigger_init,
1587	.free			= event_trigger_free,
1588};
1589
1590static struct event_trigger_ops stacktrace_count_trigger_ops = {
1591	.trigger		= stacktrace_count_trigger,
1592	.print			= stacktrace_trigger_print,
1593	.init			= event_trigger_init,
1594	.free			= event_trigger_free,
1595};
1596
1597static struct event_trigger_ops *
1598stacktrace_get_trigger_ops(char *cmd, char *param)
1599{
1600	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1601}
1602
1603static struct event_command trigger_stacktrace_cmd = {
1604	.name			= "stacktrace",
1605	.trigger_type		= ETT_STACKTRACE,
1606	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1607	.parse			= event_trigger_parse,
1608	.reg			= register_trigger,
1609	.unreg			= unregister_trigger,
1610	.get_trigger_ops	= stacktrace_get_trigger_ops,
1611	.set_filter		= set_trigger_filter,
1612};
1613
1614static __init int register_trigger_stacktrace_cmd(void)
1615{
1616	int ret;
1617
1618	ret = register_event_command(&trigger_stacktrace_cmd);
1619	WARN_ON(ret < 0);
1620
1621	return ret;
1622}
1623#else
1624static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1625#endif /* CONFIG_STACKTRACE */
1626
1627static __init void unregister_trigger_traceon_traceoff_cmds(void)
1628{
1629	unregister_event_command(&trigger_traceon_cmd);
1630	unregister_event_command(&trigger_traceoff_cmd);
1631}
1632
1633static void
1634event_enable_trigger(struct event_trigger_data *data,
1635		     struct trace_buffer *buffer,  void *rec,
1636		     struct ring_buffer_event *event)
1637{
1638	struct enable_trigger_data *enable_data = data->private_data;
1639
1640	if (enable_data->enable)
1641		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1642	else
1643		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1644}
1645
1646static void
1647event_enable_count_trigger(struct event_trigger_data *data,
1648			   struct trace_buffer *buffer,  void *rec,
1649			   struct ring_buffer_event *event)
1650{
1651	struct enable_trigger_data *enable_data = data->private_data;
1652
1653	if (!data->count)
1654		return;
1655
1656	/* Skip if the event is in a state we want to switch to */
1657	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1658		return;
1659
1660	if (data->count != -1)
1661		(data->count)--;
1662
1663	event_enable_trigger(data, buffer, rec, event);
1664}
1665
1666int event_enable_trigger_print(struct seq_file *m,
 
1667			       struct event_trigger_data *data)
1668{
1669	struct enable_trigger_data *enable_data = data->private_data;
1670
1671	seq_printf(m, "%s:%s:%s",
1672		   enable_data->hist ?
1673		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1674		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1675		   enable_data->file->event_call->class->system,
1676		   trace_event_name(enable_data->file->event_call));
1677
1678	if (data->count == -1)
1679		seq_puts(m, ":unlimited");
1680	else
1681		seq_printf(m, ":count=%ld", data->count);
1682
1683	if (data->filter_str)
1684		seq_printf(m, " if %s\n", data->filter_str);
1685	else
1686		seq_putc(m, '\n');
1687
1688	return 0;
1689}
1690
1691void event_enable_trigger_free(struct event_trigger_data *data)
 
1692{
1693	struct enable_trigger_data *enable_data = data->private_data;
1694
1695	if (WARN_ON_ONCE(data->ref <= 0))
1696		return;
1697
1698	data->ref--;
1699	if (!data->ref) {
1700		/* Remove the SOFT_MODE flag */
1701		trace_event_enable_disable(enable_data->file, 0, 1);
1702		trace_event_put_ref(enable_data->file->event_call);
1703		trigger_data_free(data);
1704		kfree(enable_data);
1705	}
1706}
1707
1708static struct event_trigger_ops event_enable_trigger_ops = {
1709	.trigger		= event_enable_trigger,
1710	.print			= event_enable_trigger_print,
1711	.init			= event_trigger_init,
1712	.free			= event_enable_trigger_free,
1713};
1714
1715static struct event_trigger_ops event_enable_count_trigger_ops = {
1716	.trigger		= event_enable_count_trigger,
1717	.print			= event_enable_trigger_print,
1718	.init			= event_trigger_init,
1719	.free			= event_enable_trigger_free,
1720};
1721
1722static struct event_trigger_ops event_disable_trigger_ops = {
1723	.trigger		= event_enable_trigger,
1724	.print			= event_enable_trigger_print,
1725	.init			= event_trigger_init,
1726	.free			= event_enable_trigger_free,
1727};
1728
1729static struct event_trigger_ops event_disable_count_trigger_ops = {
1730	.trigger		= event_enable_count_trigger,
1731	.print			= event_enable_trigger_print,
1732	.init			= event_trigger_init,
1733	.free			= event_enable_trigger_free,
1734};
1735
1736int event_enable_trigger_parse(struct event_command *cmd_ops,
1737			       struct trace_event_file *file,
1738			       char *glob, char *cmd, char *param_and_filter)
1739{
1740	struct trace_event_file *event_enable_file;
1741	struct enable_trigger_data *enable_data;
1742	struct event_trigger_data *trigger_data;
 
1743	struct trace_array *tr = file->tr;
1744	char *param, *filter;
1745	bool enable, remove;
1746	const char *system;
1747	const char *event;
1748	bool hist = false;
 
 
 
1749	int ret;
1750
1751	remove = event_trigger_check_remove(glob);
1752
1753	if (event_trigger_empty_param(param_and_filter))
1754		return -EINVAL;
1755
1756	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, true);
1757	if (ret)
1758		return ret;
 
 
 
 
 
 
1759
1760	system = strsep(&param, ":");
1761	if (!param)
1762		return -EINVAL;
1763
1764	event = strsep(&param, ":");
1765
1766	ret = -EINVAL;
1767	event_enable_file = find_event_file(tr, system, event);
1768	if (!event_enable_file)
1769		goto out;
1770
1771#ifdef CONFIG_HIST_TRIGGERS
1772	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1773		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1774
1775	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1776		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1777#else
1778	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1779#endif
 
 
1780	ret = -ENOMEM;
 
 
 
1781
1782	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1783	if (!enable_data)
 
1784		goto out;
 
 
 
 
 
 
 
1785
1786	enable_data->hist = hist;
1787	enable_data->enable = enable;
1788	enable_data->file = event_enable_file;
 
1789
1790	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
1791	if (!trigger_data) {
1792		kfree(enable_data);
1793		goto out;
1794	}
1795
1796	if (remove) {
1797		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1798		kfree(trigger_data);
1799		kfree(enable_data);
1800		ret = 0;
1801		goto out;
1802	}
1803
1804	/* Up the trigger_data count to make sure nothing frees it on failure */
1805	event_trigger_init(trigger_data);
1806
1807	ret = event_trigger_parse_num(param, trigger_data);
1808	if (ret)
1809		goto out_free;
1810
1811	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1812	if (ret < 0)
1813		goto out_free;
1814
 
1815	/* Don't let event modules unload while probe registered */
1816	ret = trace_event_try_get_ref(event_enable_file->event_call);
1817	if (!ret) {
1818		ret = -EBUSY;
1819		goto out_free;
1820	}
1821
1822	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1823	if (ret < 0)
1824		goto out_put;
1825
1826	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1827	if (ret)
 
 
 
 
 
1828		goto out_disable;
1829
1830	event_trigger_free(trigger_data);
 
 
 
1831 out:
1832	return ret;
 
1833 out_disable:
1834	trace_event_enable_disable(event_enable_file, 0, 1);
1835 out_put:
1836	trace_event_put_ref(event_enable_file->event_call);
1837 out_free:
1838	event_trigger_reset_filter(cmd_ops, trigger_data);
1839	event_trigger_free(trigger_data);
 
1840	kfree(enable_data);
1841
1842	goto out;
1843}
1844
1845int event_enable_register_trigger(char *glob,
 
1846				  struct event_trigger_data *data,
1847				  struct trace_event_file *file)
1848{
1849	struct enable_trigger_data *enable_data = data->private_data;
1850	struct enable_trigger_data *test_enable_data;
1851	struct event_trigger_data *test;
1852	int ret = 0;
1853
1854	lockdep_assert_held(&event_mutex);
1855
1856	list_for_each_entry(test, &file->triggers, list) {
1857		test_enable_data = test->private_data;
1858		if (test_enable_data &&
1859		    (test->cmd_ops->trigger_type ==
1860		     data->cmd_ops->trigger_type) &&
1861		    (test_enable_data->file == enable_data->file)) {
1862			ret = -EEXIST;
1863			goto out;
1864		}
1865	}
1866
1867	if (data->ops->init) {
1868		ret = data->ops->init(data);
1869		if (ret < 0)
1870			goto out;
1871	}
1872
1873	list_add_rcu(&data->list, &file->triggers);
 
1874
1875	update_cond_flag(file);
1876	ret = trace_event_trigger_enable_disable(file, 1);
1877	if (ret < 0) {
1878		list_del_rcu(&data->list);
1879		update_cond_flag(file);
 
1880	}
1881out:
1882	return ret;
1883}
1884
1885void event_enable_unregister_trigger(char *glob,
 
1886				     struct event_trigger_data *test,
1887				     struct trace_event_file *file)
1888{
1889	struct enable_trigger_data *test_enable_data = test->private_data;
1890	struct event_trigger_data *data = NULL, *iter;
1891	struct enable_trigger_data *enable_data;
 
 
1892
1893	lockdep_assert_held(&event_mutex);
1894
1895	list_for_each_entry(iter, &file->triggers, list) {
1896		enable_data = iter->private_data;
1897		if (enable_data &&
1898		    (iter->cmd_ops->trigger_type ==
1899		     test->cmd_ops->trigger_type) &&
1900		    (enable_data->file == test_enable_data->file)) {
1901			data = iter;
1902			list_del_rcu(&data->list);
1903			trace_event_trigger_enable_disable(file, 0);
1904			update_cond_flag(file);
1905			break;
1906		}
1907	}
1908
1909	if (data && data->ops->free)
1910		data->ops->free(data);
1911}
1912
1913static struct event_trigger_ops *
1914event_enable_get_trigger_ops(char *cmd, char *param)
1915{
1916	struct event_trigger_ops *ops;
1917	bool enable;
1918
1919#ifdef CONFIG_HIST_TRIGGERS
1920	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1921		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1922#else
1923	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1924#endif
1925	if (enable)
1926		ops = param ? &event_enable_count_trigger_ops :
1927			&event_enable_trigger_ops;
1928	else
1929		ops = param ? &event_disable_count_trigger_ops :
1930			&event_disable_trigger_ops;
1931
1932	return ops;
1933}
1934
1935static struct event_command trigger_enable_cmd = {
1936	.name			= ENABLE_EVENT_STR,
1937	.trigger_type		= ETT_EVENT_ENABLE,
1938	.parse			= event_enable_trigger_parse,
1939	.reg			= event_enable_register_trigger,
1940	.unreg			= event_enable_unregister_trigger,
1941	.get_trigger_ops	= event_enable_get_trigger_ops,
1942	.set_filter		= set_trigger_filter,
1943};
1944
1945static struct event_command trigger_disable_cmd = {
1946	.name			= DISABLE_EVENT_STR,
1947	.trigger_type		= ETT_EVENT_ENABLE,
1948	.parse			= event_enable_trigger_parse,
1949	.reg			= event_enable_register_trigger,
1950	.unreg			= event_enable_unregister_trigger,
1951	.get_trigger_ops	= event_enable_get_trigger_ops,
1952	.set_filter		= set_trigger_filter,
1953};
1954
1955static __init void unregister_trigger_enable_disable_cmds(void)
1956{
1957	unregister_event_command(&trigger_enable_cmd);
1958	unregister_event_command(&trigger_disable_cmd);
1959}
1960
1961static __init int register_trigger_enable_disable_cmds(void)
1962{
1963	int ret;
1964
1965	ret = register_event_command(&trigger_enable_cmd);
1966	if (WARN_ON(ret < 0))
1967		return ret;
1968	ret = register_event_command(&trigger_disable_cmd);
1969	if (WARN_ON(ret < 0))
1970		unregister_trigger_enable_disable_cmds();
1971
1972	return ret;
1973}
1974
1975static __init int register_trigger_traceon_traceoff_cmds(void)
1976{
1977	int ret;
1978
1979	ret = register_event_command(&trigger_traceon_cmd);
1980	if (WARN_ON(ret < 0))
1981		return ret;
1982	ret = register_event_command(&trigger_traceoff_cmd);
1983	if (WARN_ON(ret < 0))
1984		unregister_trigger_traceon_traceoff_cmds();
1985
1986	return ret;
1987}
1988
1989__init int register_trigger_cmds(void)
1990{
1991	register_trigger_traceon_traceoff_cmds();
1992	register_trigger_snapshot_cmd();
1993	register_trigger_stacktrace_cmd();
1994	register_trigger_enable_disable_cmds();
1995	register_trigger_hist_enable_disable_cmds();
1996	register_trigger_hist_cmd();
1997
1998	return 0;
1999}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
 
  34 * @rec: The trace entry for the event, NULL for unconditional invocation
 
  35 *
  36 * For each trigger associated with an event, invoke the trigger
  37 * function registered with the associated trigger command.  If rec is
  38 * non-NULL, it means that the trigger requires further processing and
  39 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  40 * trigger has a filter associated with it, rec will checked against
  41 * the filter and if the record matches the trigger will be invoked.
  42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  43 * in any case until the current event is written, the trigger
  44 * function isn't invoked but the bit associated with the deferred
  45 * trigger is set in the return value.
  46 *
  47 * Returns an enum event_trigger_type value containing a set bit for
  48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  49 *
  50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  51 *
  52 * Return: an enum event_trigger_type value containing a set bit for
  53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  54 */
  55enum event_trigger_type
  56event_triggers_call(struct trace_event_file *file, void *rec,
 
  57		    struct ring_buffer_event *event)
  58{
  59	struct event_trigger_data *data;
  60	enum event_trigger_type tt = ETT_NONE;
  61	struct event_filter *filter;
  62
  63	if (list_empty(&file->triggers))
  64		return tt;
  65
  66	list_for_each_entry_rcu(data, &file->triggers, list) {
  67		if (data->paused)
  68			continue;
  69		if (!rec) {
  70			data->ops->func(data, rec, event);
  71			continue;
  72		}
  73		filter = rcu_dereference_sched(data->filter);
  74		if (filter && !filter_match_preds(filter, rec))
  75			continue;
  76		if (event_command_post_trigger(data->cmd_ops)) {
  77			tt |= data->cmd_ops->trigger_type;
  78			continue;
  79		}
  80		data->ops->func(data, rec, event);
  81	}
  82	return tt;
  83}
  84EXPORT_SYMBOL_GPL(event_triggers_call);
  85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86/**
  87 * event_triggers_post_call - Call 'post_triggers' for a trace event
  88 * @file: The trace_event_file associated with the event
  89 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  90 *
  91 * For each trigger associated with an event, invoke the trigger
  92 * function registered with the associated trigger command, if the
  93 * corresponding bit is set in the tt enum passed into this function.
  94 * See @event_triggers_call for details on how those bits are set.
  95 *
  96 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  97 */
  98void
  99event_triggers_post_call(struct trace_event_file *file,
 100			 enum event_trigger_type tt)
 101{
 102	struct event_trigger_data *data;
 103
 104	list_for_each_entry_rcu(data, &file->triggers, list) {
 105		if (data->paused)
 106			continue;
 107		if (data->cmd_ops->trigger_type & tt)
 108			data->ops->func(data, NULL, NULL);
 109	}
 110}
 111EXPORT_SYMBOL_GPL(event_triggers_post_call);
 112
 113#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 114
 115static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 116{
 117	struct trace_event_file *event_file = event_file_data(m->private);
 118
 119	if (t == SHOW_AVAILABLE_TRIGGERS) {
 120		(*pos)++;
 121		return NULL;
 122	}
 123	return seq_list_next(t, &event_file->triggers, pos);
 124}
 125
 
 
 
 
 
 
 
 
 
 
 
 
 
 126static void *trigger_start(struct seq_file *m, loff_t *pos)
 127{
 128	struct trace_event_file *event_file;
 129
 130	/* ->stop() is called even if ->start() fails */
 131	mutex_lock(&event_mutex);
 132	event_file = event_file_data(m->private);
 133	if (unlikely(!event_file))
 134		return ERR_PTR(-ENODEV);
 135
 136	if (list_empty(&event_file->triggers))
 137		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 138
 139	return seq_list_start(&event_file->triggers, *pos);
 140}
 141
 142static void trigger_stop(struct seq_file *m, void *t)
 143{
 144	mutex_unlock(&event_mutex);
 145}
 146
 147static int trigger_show(struct seq_file *m, void *v)
 148{
 149	struct event_trigger_data *data;
 150	struct event_command *p;
 151
 152	if (v == SHOW_AVAILABLE_TRIGGERS) {
 153		seq_puts(m, "# Available triggers:\n");
 154		seq_putc(m, '#');
 155		mutex_lock(&trigger_cmd_mutex);
 156		list_for_each_entry_reverse(p, &trigger_commands, list)
 157			seq_printf(m, " %s", p->name);
 158		seq_putc(m, '\n');
 159		mutex_unlock(&trigger_cmd_mutex);
 160		return 0;
 161	}
 162
 163	data = list_entry(v, struct event_trigger_data, list);
 164	data->ops->print(m, data->ops, data);
 165
 166	return 0;
 167}
 168
 169static const struct seq_operations event_triggers_seq_ops = {
 170	.start = trigger_start,
 171	.next = trigger_next,
 172	.stop = trigger_stop,
 173	.show = trigger_show,
 174};
 175
 176static int event_trigger_regex_open(struct inode *inode, struct file *file)
 177{
 178	int ret;
 179
 180	ret = security_locked_down(LOCKDOWN_TRACEFS);
 181	if (ret)
 182		return ret;
 183
 184	mutex_lock(&event_mutex);
 185
 186	if (unlikely(!event_file_data(file))) {
 187		mutex_unlock(&event_mutex);
 188		return -ENODEV;
 189	}
 190
 191	if ((file->f_mode & FMODE_WRITE) &&
 192	    (file->f_flags & O_TRUNC)) {
 193		struct trace_event_file *event_file;
 194		struct event_command *p;
 195
 196		event_file = event_file_data(file);
 197
 198		list_for_each_entry(p, &trigger_commands, list) {
 199			if (p->unreg_all)
 200				p->unreg_all(event_file);
 201		}
 202	}
 203
 204	if (file->f_mode & FMODE_READ) {
 205		ret = seq_open(file, &event_triggers_seq_ops);
 206		if (!ret) {
 207			struct seq_file *m = file->private_data;
 208			m->private = file;
 209		}
 210	}
 211
 212	mutex_unlock(&event_mutex);
 213
 214	return ret;
 215}
 216
 217int trigger_process_regex(struct trace_event_file *file, char *buff)
 218{
 219	char *command, *next;
 220	struct event_command *p;
 221	int ret = -EINVAL;
 222
 223	next = buff = skip_spaces(buff);
 224	command = strsep(&next, ": \t");
 225	if (next) {
 226		next = skip_spaces(next);
 227		if (!*next)
 228			next = NULL;
 229	}
 230	command = (command[0] != '!') ? command : command + 1;
 231
 232	mutex_lock(&trigger_cmd_mutex);
 233	list_for_each_entry(p, &trigger_commands, list) {
 234		if (strcmp(p->name, command) == 0) {
 235			ret = p->func(p, file, buff, command, next);
 236			goto out_unlock;
 237		}
 238	}
 239 out_unlock:
 240	mutex_unlock(&trigger_cmd_mutex);
 241
 242	return ret;
 243}
 244
 245static ssize_t event_trigger_regex_write(struct file *file,
 246					 const char __user *ubuf,
 247					 size_t cnt, loff_t *ppos)
 248{
 249	struct trace_event_file *event_file;
 250	ssize_t ret;
 251	char *buf;
 252
 253	if (!cnt)
 254		return 0;
 255
 256	if (cnt >= PAGE_SIZE)
 257		return -EINVAL;
 258
 259	buf = memdup_user_nul(ubuf, cnt);
 260	if (IS_ERR(buf))
 261		return PTR_ERR(buf);
 262
 263	strim(buf);
 264
 265	mutex_lock(&event_mutex);
 266	event_file = event_file_data(file);
 267	if (unlikely(!event_file)) {
 268		mutex_unlock(&event_mutex);
 269		kfree(buf);
 270		return -ENODEV;
 271	}
 272	ret = trigger_process_regex(event_file, buf);
 273	mutex_unlock(&event_mutex);
 274
 275	kfree(buf);
 276	if (ret < 0)
 277		goto out;
 278
 279	*ppos += cnt;
 280	ret = cnt;
 281 out:
 282	return ret;
 283}
 284
 285static int event_trigger_regex_release(struct inode *inode, struct file *file)
 286{
 287	mutex_lock(&event_mutex);
 288
 289	if (file->f_mode & FMODE_READ)
 290		seq_release(inode, file);
 291
 292	mutex_unlock(&event_mutex);
 293
 294	return 0;
 295}
 296
 297static ssize_t
 298event_trigger_write(struct file *filp, const char __user *ubuf,
 299		    size_t cnt, loff_t *ppos)
 300{
 301	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 302}
 303
 304static int
 305event_trigger_open(struct inode *inode, struct file *filp)
 306{
 307	/* Checks for tracefs lockdown */
 308	return event_trigger_regex_open(inode, filp);
 309}
 310
 311static int
 312event_trigger_release(struct inode *inode, struct file *file)
 313{
 314	return event_trigger_regex_release(inode, file);
 315}
 316
 317const struct file_operations event_trigger_fops = {
 318	.open = event_trigger_open,
 319	.read = seq_read,
 320	.write = event_trigger_write,
 321	.llseek = tracing_lseek,
 322	.release = event_trigger_release,
 323};
 324
 325/*
 326 * Currently we only register event commands from __init, so mark this
 327 * __init too.
 328 */
 329__init int register_event_command(struct event_command *cmd)
 330{
 331	struct event_command *p;
 332	int ret = 0;
 333
 334	mutex_lock(&trigger_cmd_mutex);
 335	list_for_each_entry(p, &trigger_commands, list) {
 336		if (strcmp(cmd->name, p->name) == 0) {
 337			ret = -EBUSY;
 338			goto out_unlock;
 339		}
 340	}
 341	list_add(&cmd->list, &trigger_commands);
 342 out_unlock:
 343	mutex_unlock(&trigger_cmd_mutex);
 344
 345	return ret;
 346}
 347
 348/*
 349 * Currently we only unregister event commands from __init, so mark
 350 * this __init too.
 351 */
 352__init int unregister_event_command(struct event_command *cmd)
 353{
 354	struct event_command *p, *n;
 355	int ret = -ENODEV;
 356
 357	mutex_lock(&trigger_cmd_mutex);
 358	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 359		if (strcmp(cmd->name, p->name) == 0) {
 360			ret = 0;
 361			list_del_init(&p->list);
 362			goto out_unlock;
 363		}
 364	}
 365 out_unlock:
 366	mutex_unlock(&trigger_cmd_mutex);
 367
 368	return ret;
 369}
 370
 371/**
 372 * event_trigger_print - Generic event_trigger_ops @print implementation
 373 * @name: The name of the event trigger
 374 * @m: The seq_file being printed to
 375 * @data: Trigger-specific data
 376 * @filter_str: filter_str to print, if present
 377 *
 378 * Common implementation for event triggers to print themselves.
 379 *
 380 * Usually wrapped by a function that simply sets the @name of the
 381 * trigger command and then invokes this.
 382 *
 383 * Return: 0 on success, errno otherwise
 384 */
 385static int
 386event_trigger_print(const char *name, struct seq_file *m,
 387		    void *data, char *filter_str)
 388{
 389	long count = (long)data;
 390
 391	seq_puts(m, name);
 392
 393	if (count == -1)
 394		seq_puts(m, ":unlimited");
 395	else
 396		seq_printf(m, ":count=%ld", count);
 397
 398	if (filter_str)
 399		seq_printf(m, " if %s\n", filter_str);
 400	else
 401		seq_putc(m, '\n');
 402
 403	return 0;
 404}
 405
 406/**
 407 * event_trigger_init - Generic event_trigger_ops @init implementation
 408 * @ops: The trigger ops associated with the trigger
 409 * @data: Trigger-specific data
 410 *
 411 * Common implementation of event trigger initialization.
 412 *
 413 * Usually used directly as the @init method in event trigger
 414 * implementations.
 415 *
 416 * Return: 0 on success, errno otherwise
 417 */
 418int event_trigger_init(struct event_trigger_ops *ops,
 419		       struct event_trigger_data *data)
 420{
 421	data->ref++;
 422	return 0;
 423}
 424
 425/**
 426 * event_trigger_free - Generic event_trigger_ops @free implementation
 427 * @ops: The trigger ops associated with the trigger
 428 * @data: Trigger-specific data
 429 *
 430 * Common implementation of event trigger de-initialization.
 431 *
 432 * Usually used directly as the @free method in event trigger
 433 * implementations.
 434 */
 435static void
 436event_trigger_free(struct event_trigger_ops *ops,
 437		   struct event_trigger_data *data)
 438{
 439	if (WARN_ON_ONCE(data->ref <= 0))
 440		return;
 441
 442	data->ref--;
 443	if (!data->ref)
 444		trigger_data_free(data);
 445}
 446
 447int trace_event_trigger_enable_disable(struct trace_event_file *file,
 448				       int trigger_enable)
 449{
 450	int ret = 0;
 451
 452	if (trigger_enable) {
 453		if (atomic_inc_return(&file->tm_ref) > 1)
 454			return ret;
 455		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 456		ret = trace_event_enable_disable(file, 1, 1);
 457	} else {
 458		if (atomic_dec_return(&file->tm_ref) > 0)
 459			return ret;
 460		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 461		ret = trace_event_enable_disable(file, 0, 1);
 462	}
 463
 464	return ret;
 465}
 466
 467/**
 468 * clear_event_triggers - Clear all triggers associated with a trace array
 469 * @tr: The trace array to clear
 470 *
 471 * For each trigger, the triggering event has its tm_ref decremented
 472 * via trace_event_trigger_enable_disable(), and any associated event
 473 * (in the case of enable/disable_event triggers) will have its sm_ref
 474 * decremented via free()->trace_event_enable_disable().  That
 475 * combination effectively reverses the soft-mode/trigger state added
 476 * by trigger registration.
 477 *
 478 * Must be called with event_mutex held.
 479 */
 480void
 481clear_event_triggers(struct trace_array *tr)
 482{
 483	struct trace_event_file *file;
 484
 485	list_for_each_entry(file, &tr->events, list) {
 486		struct event_trigger_data *data, *n;
 487		list_for_each_entry_safe(data, n, &file->triggers, list) {
 488			trace_event_trigger_enable_disable(file, 0);
 489			list_del_rcu(&data->list);
 490			if (data->ops->free)
 491				data->ops->free(data->ops, data);
 492		}
 493	}
 494}
 495
 496/**
 497 * update_cond_flag - Set or reset the TRIGGER_COND bit
 498 * @file: The trace_event_file associated with the event
 499 *
 500 * If an event has triggers and any of those triggers has a filter or
 501 * a post_trigger, trigger invocation needs to be deferred until after
 502 * the current event has logged its data, and the event should have
 503 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 504 * cleared.
 505 */
 506void update_cond_flag(struct trace_event_file *file)
 507{
 508	struct event_trigger_data *data;
 509	bool set_cond = false;
 510
 511	lockdep_assert_held(&event_mutex);
 512
 513	list_for_each_entry(data, &file->triggers, list) {
 514		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 515		    event_command_needs_rec(data->cmd_ops)) {
 516			set_cond = true;
 517			break;
 518		}
 519	}
 520
 521	if (set_cond)
 522		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 523	else
 524		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 525}
 526
 527/**
 528 * register_trigger - Generic event_command @reg implementation
 529 * @glob: The raw string used to register the trigger
 530 * @ops: The trigger ops associated with the trigger
 531 * @data: Trigger-specific data to associate with the trigger
 532 * @file: The trace_event_file associated with the event
 533 *
 534 * Common implementation for event trigger registration.
 535 *
 536 * Usually used directly as the @reg method in event command
 537 * implementations.
 538 *
 539 * Return: 0 on success, errno otherwise
 540 */
 541static int register_trigger(char *glob, struct event_trigger_ops *ops,
 542			    struct event_trigger_data *data,
 543			    struct trace_event_file *file)
 544{
 545	struct event_trigger_data *test;
 546	int ret = 0;
 547
 548	lockdep_assert_held(&event_mutex);
 549
 550	list_for_each_entry(test, &file->triggers, list) {
 551		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 552			ret = -EEXIST;
 553			goto out;
 554		}
 555	}
 556
 557	if (data->ops->init) {
 558		ret = data->ops->init(data->ops, data);
 559		if (ret < 0)
 560			goto out;
 561	}
 562
 563	list_add_rcu(&data->list, &file->triggers);
 564	ret++;
 565
 566	update_cond_flag(file);
 567	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 
 568		list_del_rcu(&data->list);
 569		update_cond_flag(file);
 570		ret--;
 571	}
 572out:
 573	return ret;
 574}
 575
 576/**
 577 * unregister_trigger - Generic event_command @unreg implementation
 578 * @glob: The raw string used to register the trigger
 579 * @ops: The trigger ops associated with the trigger
 580 * @test: Trigger-specific data used to find the trigger to remove
 581 * @file: The trace_event_file associated with the event
 582 *
 583 * Common implementation for event trigger unregistration.
 584 *
 585 * Usually used directly as the @unreg method in event command
 586 * implementations.
 587 */
 588static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 589			       struct event_trigger_data *test,
 590			       struct trace_event_file *file)
 591{
 592	struct event_trigger_data *data;
 593	bool unregistered = false;
 594
 595	lockdep_assert_held(&event_mutex);
 596
 597	list_for_each_entry(data, &file->triggers, list) {
 598		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 599			unregistered = true;
 600			list_del_rcu(&data->list);
 601			trace_event_trigger_enable_disable(file, 0);
 602			update_cond_flag(file);
 603			break;
 604		}
 605	}
 606
 607	if (unregistered && data->ops->free)
 608		data->ops->free(data->ops, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 609}
 610
 611/**
 612 * event_trigger_callback - Generic event_command @func implementation
 613 * @cmd_ops: The command ops, used for trigger registration
 614 * @file: The trace_event_file associated with the event
 615 * @glob: The raw string used to register the trigger
 616 * @cmd: The cmd portion of the string used to register the trigger
 617 * @param: The params portion of the string used to register the trigger
 618 *
 619 * Common implementation for event command parsing and trigger
 620 * instantiation.
 
 
 
 621 *
 622 * Usually used directly as the @func method in event command
 623 * implementations.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 624 *
 625 * Return: 0 on success, errno otherwise
 626 */
 627static int
 628event_trigger_callback(struct event_command *cmd_ops,
 629		       struct trace_event_file *file,
 630		       char *glob, char *cmd, char *param)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 631{
 632	struct event_trigger_data *trigger_data;
 633	struct event_trigger_ops *trigger_ops;
 634	char *trigger = NULL;
 635	char *number;
 636	int ret;
 637
 638	/* separate the trigger from the filter (t:n [if filter]) */
 639	if (param && isdigit(param[0])) {
 640		trigger = strsep(&param, " \t");
 641		if (param) {
 642			param = skip_spaces(param);
 643			if (!*param)
 644				param = NULL;
 645		}
 646	}
 647
 648	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 649
 650	ret = -ENOMEM;
 651	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 652	if (!trigger_data)
 653		goto out;
 654
 655	trigger_data->count = -1;
 656	trigger_data->ops = trigger_ops;
 657	trigger_data->cmd_ops = cmd_ops;
 658	trigger_data->private_data = file;
 
 659	INIT_LIST_HEAD(&trigger_data->list);
 660	INIT_LIST_HEAD(&trigger_data->named_list);
 
 661
 662	if (glob[0] == '!') {
 663		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 664		kfree(trigger_data);
 665		ret = 0;
 666		goto out;
 667	}
 
 
 
 
 
 
 
 
 
 
 
 
 668
 669	if (trigger) {
 670		number = strsep(&trigger, ":");
 671
 672		ret = -EINVAL;
 673		if (!strlen(number))
 674			goto out_free;
 675
 676		/*
 677		 * We use the callback data field (which is a pointer)
 678		 * as our counter.
 679		 */
 680		ret = kstrtoul(number, 0, &trigger_data->count);
 681		if (ret)
 682			goto out_free;
 683	}
 684
 685	if (!param) /* if param is non-empty, it's supposed to be a filter */
 686		goto out_reg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 687
 688	if (!cmd_ops->set_filter)
 689		goto out_reg;
 
 690
 691	ret = cmd_ops->set_filter(param, trigger_data, file);
 692	if (ret < 0)
 693		goto out_free;
 694
 695 out_reg:
 696	/* Up the trigger_data count to make sure reg doesn't free it on failure */
 697	event_trigger_init(trigger_ops, trigger_data);
 698	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 699	/*
 700	 * The above returns on success the # of functions enabled,
 701	 * but if it didn't find any functions it returns zero.
 702	 * Consider no functions a failure too.
 703	 */
 704	if (!ret) {
 705		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
 706		ret = -ENOENT;
 707	} else if (ret > 0)
 708		ret = 0;
 709
 710	/* Down the counter of trigger_data or free it if not used anymore */
 711	event_trigger_free(trigger_ops, trigger_data);
 712 out:
 713	return ret;
 714
 715 out_free:
 716	if (cmd_ops->set_filter)
 717		cmd_ops->set_filter(NULL, trigger_data, NULL);
 718	kfree(trigger_data);
 719	goto out;
 720}
 721
 722/**
 723 * set_trigger_filter - Generic event_command @set_filter implementation
 724 * @filter_str: The filter string for the trigger, NULL to remove filter
 725 * @trigger_data: Trigger-specific data
 726 * @file: The trace_event_file associated with the event
 727 *
 728 * Common implementation for event command filter parsing and filter
 729 * instantiation.
 730 *
 731 * Usually used directly as the @set_filter method in event command
 732 * implementations.
 733 *
 734 * Also used to remove a filter (if filter_str = NULL).
 735 *
 736 * Return: 0 on success, errno otherwise
 737 */
 738int set_trigger_filter(char *filter_str,
 739		       struct event_trigger_data *trigger_data,
 740		       struct trace_event_file *file)
 741{
 742	struct event_trigger_data *data = trigger_data;
 743	struct event_filter *filter = NULL, *tmp;
 744	int ret = -EINVAL;
 745	char *s;
 746
 747	if (!filter_str) /* clear the current filter */
 748		goto assign;
 749
 750	s = strsep(&filter_str, " \t");
 751
 752	if (!strlen(s) || strcmp(s, "if") != 0)
 753		goto out;
 754
 755	if (!filter_str)
 756		goto out;
 757
 758	/* The filter is for the 'trigger' event, not the triggered event */
 759	ret = create_event_filter(file->tr, file->event_call,
 760				  filter_str, false, &filter);
 
 
 
 
 
 
 
 761	/*
 762	 * If create_event_filter() fails, filter still needs to be freed.
 763	 * Which the calling code will do with data->filter.
 764	 */
 765 assign:
 766	tmp = rcu_access_pointer(data->filter);
 767
 768	rcu_assign_pointer(data->filter, filter);
 769
 770	if (tmp) {
 771		/* Make sure the call is done with the filter */
 772		tracepoint_synchronize_unregister();
 
 
 
 
 
 
 773		free_event_filter(tmp);
 774	}
 775
 776	kfree(data->filter_str);
 777	data->filter_str = NULL;
 778
 779	if (filter_str) {
 780		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 781		if (!data->filter_str) {
 782			free_event_filter(rcu_access_pointer(data->filter));
 783			data->filter = NULL;
 784			ret = -ENOMEM;
 785		}
 786	}
 787 out:
 788	return ret;
 789}
 790
 791static LIST_HEAD(named_triggers);
 792
 793/**
 794 * find_named_trigger - Find the common named trigger associated with @name
 795 * @name: The name of the set of named triggers to find the common data for
 796 *
 797 * Named triggers are sets of triggers that share a common set of
 798 * trigger data.  The first named trigger registered with a given name
 799 * owns the common trigger data that the others subsequently
 800 * registered with the same name will reference.  This function
 801 * returns the common trigger data associated with that first
 802 * registered instance.
 803 *
 804 * Return: the common trigger data for the given named trigger on
 805 * success, NULL otherwise.
 806 */
 807struct event_trigger_data *find_named_trigger(const char *name)
 808{
 809	struct event_trigger_data *data;
 810
 811	if (!name)
 812		return NULL;
 813
 814	list_for_each_entry(data, &named_triggers, named_list) {
 815		if (data->named_data)
 816			continue;
 817		if (strcmp(data->name, name) == 0)
 818			return data;
 819	}
 820
 821	return NULL;
 822}
 823
 824/**
 825 * is_named_trigger - determine if a given trigger is a named trigger
 826 * @test: The trigger data to test
 827 *
 828 * Return: true if 'test' is a named trigger, false otherwise.
 829 */
 830bool is_named_trigger(struct event_trigger_data *test)
 831{
 832	struct event_trigger_data *data;
 833
 834	list_for_each_entry(data, &named_triggers, named_list) {
 835		if (test == data)
 836			return true;
 837	}
 838
 839	return false;
 840}
 841
 842/**
 843 * save_named_trigger - save the trigger in the named trigger list
 844 * @name: The name of the named trigger set
 845 * @data: The trigger data to save
 846 *
 847 * Return: 0 if successful, negative error otherwise.
 848 */
 849int save_named_trigger(const char *name, struct event_trigger_data *data)
 850{
 851	data->name = kstrdup(name, GFP_KERNEL);
 852	if (!data->name)
 853		return -ENOMEM;
 854
 855	list_add(&data->named_list, &named_triggers);
 856
 857	return 0;
 858}
 859
 860/**
 861 * del_named_trigger - delete a trigger from the named trigger list
 862 * @data: The trigger data to delete
 863 */
 864void del_named_trigger(struct event_trigger_data *data)
 865{
 866	kfree(data->name);
 867	data->name = NULL;
 868
 869	list_del(&data->named_list);
 870}
 871
 872static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
 873{
 874	struct event_trigger_data *test;
 875
 876	list_for_each_entry(test, &named_triggers, named_list) {
 877		if (strcmp(test->name, data->name) == 0) {
 878			if (pause) {
 879				test->paused_tmp = test->paused;
 880				test->paused = true;
 881			} else {
 882				test->paused = test->paused_tmp;
 883			}
 884		}
 885	}
 886}
 887
 888/**
 889 * pause_named_trigger - Pause all named triggers with the same name
 890 * @data: The trigger data of a named trigger to pause
 891 *
 892 * Pauses a named trigger along with all other triggers having the
 893 * same name.  Because named triggers share a common set of data,
 894 * pausing only one is meaningless, so pausing one named trigger needs
 895 * to pause all triggers with the same name.
 896 */
 897void pause_named_trigger(struct event_trigger_data *data)
 898{
 899	__pause_named_trigger(data, true);
 900}
 901
 902/**
 903 * unpause_named_trigger - Un-pause all named triggers with the same name
 904 * @data: The trigger data of a named trigger to unpause
 905 *
 906 * Un-pauses a named trigger along with all other triggers having the
 907 * same name.  Because named triggers share a common set of data,
 908 * unpausing only one is meaningless, so unpausing one named trigger
 909 * needs to unpause all triggers with the same name.
 910 */
 911void unpause_named_trigger(struct event_trigger_data *data)
 912{
 913	__pause_named_trigger(data, false);
 914}
 915
 916/**
 917 * set_named_trigger_data - Associate common named trigger data
 918 * @data: The trigger data of a named trigger to unpause
 
 919 *
 920 * Named triggers are sets of triggers that share a common set of
 921 * trigger data.  The first named trigger registered with a given name
 922 * owns the common trigger data that the others subsequently
 923 * registered with the same name will reference.  This function
 924 * associates the common trigger data from the first trigger with the
 925 * given trigger.
 926 */
 927void set_named_trigger_data(struct event_trigger_data *data,
 928			    struct event_trigger_data *named_data)
 929{
 930	data->named_data = named_data;
 931}
 932
 933struct event_trigger_data *
 934get_named_trigger_data(struct event_trigger_data *data)
 935{
 936	return data->named_data;
 937}
 938
 939static void
 940traceon_trigger(struct event_trigger_data *data, void *rec,
 
 941		struct ring_buffer_event *event)
 942{
 
 
 
 
 
 
 
 
 
 
 943	if (tracing_is_on())
 944		return;
 945
 946	tracing_on();
 947}
 948
 949static void
 950traceon_count_trigger(struct event_trigger_data *data, void *rec,
 
 951		      struct ring_buffer_event *event)
 952{
 953	if (tracing_is_on())
 954		return;
 
 
 
 
 
 
 
 955
 956	if (!data->count)
 957		return;
 958
 959	if (data->count != -1)
 960		(data->count)--;
 961
 962	tracing_on();
 
 
 
 963}
 964
 965static void
 966traceoff_trigger(struct event_trigger_data *data, void *rec,
 
 967		 struct ring_buffer_event *event)
 968{
 
 
 
 
 
 
 
 
 
 
 969	if (!tracing_is_on())
 970		return;
 971
 972	tracing_off();
 973}
 974
 975static void
 976traceoff_count_trigger(struct event_trigger_data *data, void *rec,
 
 977		       struct ring_buffer_event *event)
 978{
 979	if (!tracing_is_on())
 980		return;
 
 
 
 
 
 
 
 981
 982	if (!data->count)
 983		return;
 984
 985	if (data->count != -1)
 986		(data->count)--;
 987
 988	tracing_off();
 
 
 
 989}
 990
 991static int
 992traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 993		      struct event_trigger_data *data)
 994{
 995	return event_trigger_print("traceon", m, (void *)data->count,
 996				   data->filter_str);
 997}
 998
 999static int
1000traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1001		       struct event_trigger_data *data)
1002{
1003	return event_trigger_print("traceoff", m, (void *)data->count,
1004				   data->filter_str);
1005}
1006
1007static struct event_trigger_ops traceon_trigger_ops = {
1008	.func			= traceon_trigger,
1009	.print			= traceon_trigger_print,
1010	.init			= event_trigger_init,
1011	.free			= event_trigger_free,
1012};
1013
1014static struct event_trigger_ops traceon_count_trigger_ops = {
1015	.func			= traceon_count_trigger,
1016	.print			= traceon_trigger_print,
1017	.init			= event_trigger_init,
1018	.free			= event_trigger_free,
1019};
1020
1021static struct event_trigger_ops traceoff_trigger_ops = {
1022	.func			= traceoff_trigger,
1023	.print			= traceoff_trigger_print,
1024	.init			= event_trigger_init,
1025	.free			= event_trigger_free,
1026};
1027
1028static struct event_trigger_ops traceoff_count_trigger_ops = {
1029	.func			= traceoff_count_trigger,
1030	.print			= traceoff_trigger_print,
1031	.init			= event_trigger_init,
1032	.free			= event_trigger_free,
1033};
1034
1035static struct event_trigger_ops *
1036onoff_get_trigger_ops(char *cmd, char *param)
1037{
1038	struct event_trigger_ops *ops;
1039
1040	/* we register both traceon and traceoff to this callback */
1041	if (strcmp(cmd, "traceon") == 0)
1042		ops = param ? &traceon_count_trigger_ops :
1043			&traceon_trigger_ops;
1044	else
1045		ops = param ? &traceoff_count_trigger_ops :
1046			&traceoff_trigger_ops;
1047
1048	return ops;
1049}
1050
1051static struct event_command trigger_traceon_cmd = {
1052	.name			= "traceon",
1053	.trigger_type		= ETT_TRACE_ONOFF,
1054	.func			= event_trigger_callback,
1055	.reg			= register_trigger,
1056	.unreg			= unregister_trigger,
1057	.get_trigger_ops	= onoff_get_trigger_ops,
1058	.set_filter		= set_trigger_filter,
1059};
1060
1061static struct event_command trigger_traceoff_cmd = {
1062	.name			= "traceoff",
1063	.trigger_type		= ETT_TRACE_ONOFF,
1064	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1065	.func			= event_trigger_callback,
1066	.reg			= register_trigger,
1067	.unreg			= unregister_trigger,
1068	.get_trigger_ops	= onoff_get_trigger_ops,
1069	.set_filter		= set_trigger_filter,
1070};
1071
1072#ifdef CONFIG_TRACER_SNAPSHOT
1073static void
1074snapshot_trigger(struct event_trigger_data *data, void *rec,
 
1075		 struct ring_buffer_event *event)
1076{
1077	struct trace_event_file *file = data->private_data;
1078
1079	if (file)
1080		tracing_snapshot_instance(file->tr);
1081	else
1082		tracing_snapshot();
1083}
1084
1085static void
1086snapshot_count_trigger(struct event_trigger_data *data, void *rec,
 
1087		       struct ring_buffer_event *event)
1088{
1089	if (!data->count)
1090		return;
1091
1092	if (data->count != -1)
1093		(data->count)--;
1094
1095	snapshot_trigger(data, rec, event);
1096}
1097
1098static int
1099register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1100			  struct event_trigger_data *data,
1101			  struct trace_event_file *file)
1102{
1103	if (tracing_alloc_snapshot_instance(file->tr) != 0)
1104		return 0;
 
 
1105
1106	return register_trigger(glob, ops, data, file);
1107}
1108
1109static int
1110snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1111		       struct event_trigger_data *data)
1112{
1113	return event_trigger_print("snapshot", m, (void *)data->count,
1114				   data->filter_str);
1115}
1116
1117static struct event_trigger_ops snapshot_trigger_ops = {
1118	.func			= snapshot_trigger,
1119	.print			= snapshot_trigger_print,
1120	.init			= event_trigger_init,
1121	.free			= event_trigger_free,
1122};
1123
1124static struct event_trigger_ops snapshot_count_trigger_ops = {
1125	.func			= snapshot_count_trigger,
1126	.print			= snapshot_trigger_print,
1127	.init			= event_trigger_init,
1128	.free			= event_trigger_free,
1129};
1130
1131static struct event_trigger_ops *
1132snapshot_get_trigger_ops(char *cmd, char *param)
1133{
1134	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1135}
1136
1137static struct event_command trigger_snapshot_cmd = {
1138	.name			= "snapshot",
1139	.trigger_type		= ETT_SNAPSHOT,
1140	.func			= event_trigger_callback,
1141	.reg			= register_snapshot_trigger,
1142	.unreg			= unregister_trigger,
1143	.get_trigger_ops	= snapshot_get_trigger_ops,
1144	.set_filter		= set_trigger_filter,
1145};
1146
1147static __init int register_trigger_snapshot_cmd(void)
1148{
1149	int ret;
1150
1151	ret = register_event_command(&trigger_snapshot_cmd);
1152	WARN_ON(ret < 0);
1153
1154	return ret;
1155}
1156#else
1157static __init int register_trigger_snapshot_cmd(void) { return 0; }
1158#endif /* CONFIG_TRACER_SNAPSHOT */
1159
1160#ifdef CONFIG_STACKTRACE
1161#ifdef CONFIG_UNWINDER_ORC
1162/* Skip 2:
1163 *   event_triggers_post_call()
1164 *   trace_event_raw_event_xxx()
1165 */
1166# define STACK_SKIP 2
1167#else
1168/*
1169 * Skip 4:
1170 *   stacktrace_trigger()
1171 *   event_triggers_post_call()
1172 *   trace_event_buffer_commit()
1173 *   trace_event_raw_event_xxx()
1174 */
1175#define STACK_SKIP 4
1176#endif
1177
1178static void
1179stacktrace_trigger(struct event_trigger_data *data, void *rec,
 
1180		   struct ring_buffer_event *event)
1181{
1182	trace_dump_stack(STACK_SKIP);
 
 
 
 
 
1183}
1184
1185static void
1186stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
 
1187			 struct ring_buffer_event *event)
1188{
1189	if (!data->count)
1190		return;
1191
1192	if (data->count != -1)
1193		(data->count)--;
1194
1195	stacktrace_trigger(data, rec, event);
1196}
1197
1198static int
1199stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1200			 struct event_trigger_data *data)
1201{
1202	return event_trigger_print("stacktrace", m, (void *)data->count,
1203				   data->filter_str);
1204}
1205
1206static struct event_trigger_ops stacktrace_trigger_ops = {
1207	.func			= stacktrace_trigger,
1208	.print			= stacktrace_trigger_print,
1209	.init			= event_trigger_init,
1210	.free			= event_trigger_free,
1211};
1212
1213static struct event_trigger_ops stacktrace_count_trigger_ops = {
1214	.func			= stacktrace_count_trigger,
1215	.print			= stacktrace_trigger_print,
1216	.init			= event_trigger_init,
1217	.free			= event_trigger_free,
1218};
1219
1220static struct event_trigger_ops *
1221stacktrace_get_trigger_ops(char *cmd, char *param)
1222{
1223	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1224}
1225
1226static struct event_command trigger_stacktrace_cmd = {
1227	.name			= "stacktrace",
1228	.trigger_type		= ETT_STACKTRACE,
1229	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1230	.func			= event_trigger_callback,
1231	.reg			= register_trigger,
1232	.unreg			= unregister_trigger,
1233	.get_trigger_ops	= stacktrace_get_trigger_ops,
1234	.set_filter		= set_trigger_filter,
1235};
1236
1237static __init int register_trigger_stacktrace_cmd(void)
1238{
1239	int ret;
1240
1241	ret = register_event_command(&trigger_stacktrace_cmd);
1242	WARN_ON(ret < 0);
1243
1244	return ret;
1245}
1246#else
1247static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1248#endif /* CONFIG_STACKTRACE */
1249
1250static __init void unregister_trigger_traceon_traceoff_cmds(void)
1251{
1252	unregister_event_command(&trigger_traceon_cmd);
1253	unregister_event_command(&trigger_traceoff_cmd);
1254}
1255
1256static void
1257event_enable_trigger(struct event_trigger_data *data, void *rec,
 
1258		     struct ring_buffer_event *event)
1259{
1260	struct enable_trigger_data *enable_data = data->private_data;
1261
1262	if (enable_data->enable)
1263		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1264	else
1265		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1266}
1267
1268static void
1269event_enable_count_trigger(struct event_trigger_data *data, void *rec,
 
1270			   struct ring_buffer_event *event)
1271{
1272	struct enable_trigger_data *enable_data = data->private_data;
1273
1274	if (!data->count)
1275		return;
1276
1277	/* Skip if the event is in a state we want to switch to */
1278	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1279		return;
1280
1281	if (data->count != -1)
1282		(data->count)--;
1283
1284	event_enable_trigger(data, rec, event);
1285}
1286
1287int event_enable_trigger_print(struct seq_file *m,
1288			       struct event_trigger_ops *ops,
1289			       struct event_trigger_data *data)
1290{
1291	struct enable_trigger_data *enable_data = data->private_data;
1292
1293	seq_printf(m, "%s:%s:%s",
1294		   enable_data->hist ?
1295		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1296		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1297		   enable_data->file->event_call->class->system,
1298		   trace_event_name(enable_data->file->event_call));
1299
1300	if (data->count == -1)
1301		seq_puts(m, ":unlimited");
1302	else
1303		seq_printf(m, ":count=%ld", data->count);
1304
1305	if (data->filter_str)
1306		seq_printf(m, " if %s\n", data->filter_str);
1307	else
1308		seq_putc(m, '\n');
1309
1310	return 0;
1311}
1312
1313void event_enable_trigger_free(struct event_trigger_ops *ops,
1314			       struct event_trigger_data *data)
1315{
1316	struct enable_trigger_data *enable_data = data->private_data;
1317
1318	if (WARN_ON_ONCE(data->ref <= 0))
1319		return;
1320
1321	data->ref--;
1322	if (!data->ref) {
1323		/* Remove the SOFT_MODE flag */
1324		trace_event_enable_disable(enable_data->file, 0, 1);
1325		module_put(enable_data->file->event_call->mod);
1326		trigger_data_free(data);
1327		kfree(enable_data);
1328	}
1329}
1330
1331static struct event_trigger_ops event_enable_trigger_ops = {
1332	.func			= event_enable_trigger,
1333	.print			= event_enable_trigger_print,
1334	.init			= event_trigger_init,
1335	.free			= event_enable_trigger_free,
1336};
1337
1338static struct event_trigger_ops event_enable_count_trigger_ops = {
1339	.func			= event_enable_count_trigger,
1340	.print			= event_enable_trigger_print,
1341	.init			= event_trigger_init,
1342	.free			= event_enable_trigger_free,
1343};
1344
1345static struct event_trigger_ops event_disable_trigger_ops = {
1346	.func			= event_enable_trigger,
1347	.print			= event_enable_trigger_print,
1348	.init			= event_trigger_init,
1349	.free			= event_enable_trigger_free,
1350};
1351
1352static struct event_trigger_ops event_disable_count_trigger_ops = {
1353	.func			= event_enable_count_trigger,
1354	.print			= event_enable_trigger_print,
1355	.init			= event_trigger_init,
1356	.free			= event_enable_trigger_free,
1357};
1358
1359int event_enable_trigger_func(struct event_command *cmd_ops,
1360			      struct trace_event_file *file,
1361			      char *glob, char *cmd, char *param)
1362{
1363	struct trace_event_file *event_enable_file;
1364	struct enable_trigger_data *enable_data;
1365	struct event_trigger_data *trigger_data;
1366	struct event_trigger_ops *trigger_ops;
1367	struct trace_array *tr = file->tr;
 
 
1368	const char *system;
1369	const char *event;
1370	bool hist = false;
1371	char *trigger;
1372	char *number;
1373	bool enable;
1374	int ret;
1375
1376	if (!param)
 
 
1377		return -EINVAL;
1378
1379	/* separate the trigger from the filter (s:e:n [if filter]) */
1380	trigger = strsep(&param, " \t");
1381	if (!trigger)
1382		return -EINVAL;
1383	if (param) {
1384		param = skip_spaces(param);
1385		if (!*param)
1386			param = NULL;
1387	}
1388
1389	system = strsep(&trigger, ":");
1390	if (!trigger)
1391		return -EINVAL;
1392
1393	event = strsep(&trigger, ":");
1394
1395	ret = -EINVAL;
1396	event_enable_file = find_event_file(tr, system, event);
1397	if (!event_enable_file)
1398		goto out;
1399
1400#ifdef CONFIG_HIST_TRIGGERS
1401	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1402		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1403
1404	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1405		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1406#else
1407	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1408#endif
1409	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1410
1411	ret = -ENOMEM;
1412	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1413	if (!trigger_data)
1414		goto out;
1415
1416	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1417	if (!enable_data) {
1418		kfree(trigger_data);
1419		goto out;
1420	}
1421
1422	trigger_data->count = -1;
1423	trigger_data->ops = trigger_ops;
1424	trigger_data->cmd_ops = cmd_ops;
1425	INIT_LIST_HEAD(&trigger_data->list);
1426	RCU_INIT_POINTER(trigger_data->filter, NULL);
1427
1428	enable_data->hist = hist;
1429	enable_data->enable = enable;
1430	enable_data->file = event_enable_file;
1431	trigger_data->private_data = enable_data;
1432
1433	if (glob[0] == '!') {
1434		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 
 
 
 
 
 
1435		kfree(trigger_data);
1436		kfree(enable_data);
1437		ret = 0;
1438		goto out;
1439	}
1440
1441	/* Up the trigger_data count to make sure nothing frees it on failure */
1442	event_trigger_init(trigger_ops, trigger_data);
1443
1444	if (trigger) {
1445		number = strsep(&trigger, ":");
 
1446
1447		ret = -EINVAL;
1448		if (!strlen(number))
1449			goto out_free;
1450
1451		/*
1452		 * We use the callback data field (which is a pointer)
1453		 * as our counter.
1454		 */
1455		ret = kstrtoul(number, 0, &trigger_data->count);
1456		if (ret)
1457			goto out_free;
1458	}
1459
1460	if (!param) /* if param is non-empty, it's supposed to be a filter */
1461		goto out_reg;
1462
1463	if (!cmd_ops->set_filter)
1464		goto out_reg;
1465
1466	ret = cmd_ops->set_filter(param, trigger_data, file);
1467	if (ret < 0)
1468		goto out_free;
1469
1470 out_reg:
1471	/* Don't let event modules unload while probe registered */
1472	ret = try_module_get(event_enable_file->event_call->mod);
1473	if (!ret) {
1474		ret = -EBUSY;
1475		goto out_free;
1476	}
1477
1478	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1479	if (ret < 0)
1480		goto out_put;
1481	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1482	/*
1483	 * The above returns on success the # of functions enabled,
1484	 * but if it didn't find any functions it returns zero.
1485	 * Consider no functions a failure too.
1486	 */
1487	if (!ret) {
1488		ret = -ENOENT;
1489		goto out_disable;
1490	} else if (ret < 0)
1491		goto out_disable;
1492	/* Just return zero, not the number of enabled functions */
1493	ret = 0;
1494	event_trigger_free(trigger_ops, trigger_data);
1495 out:
1496	return ret;
1497
1498 out_disable:
1499	trace_event_enable_disable(event_enable_file, 0, 1);
1500 out_put:
1501	module_put(event_enable_file->event_call->mod);
1502 out_free:
1503	if (cmd_ops->set_filter)
1504		cmd_ops->set_filter(NULL, trigger_data, NULL);
1505	event_trigger_free(trigger_ops, trigger_data);
1506	kfree(enable_data);
 
1507	goto out;
1508}
1509
1510int event_enable_register_trigger(char *glob,
1511				  struct event_trigger_ops *ops,
1512				  struct event_trigger_data *data,
1513				  struct trace_event_file *file)
1514{
1515	struct enable_trigger_data *enable_data = data->private_data;
1516	struct enable_trigger_data *test_enable_data;
1517	struct event_trigger_data *test;
1518	int ret = 0;
1519
1520	lockdep_assert_held(&event_mutex);
1521
1522	list_for_each_entry(test, &file->triggers, list) {
1523		test_enable_data = test->private_data;
1524		if (test_enable_data &&
1525		    (test->cmd_ops->trigger_type ==
1526		     data->cmd_ops->trigger_type) &&
1527		    (test_enable_data->file == enable_data->file)) {
1528			ret = -EEXIST;
1529			goto out;
1530		}
1531	}
1532
1533	if (data->ops->init) {
1534		ret = data->ops->init(data->ops, data);
1535		if (ret < 0)
1536			goto out;
1537	}
1538
1539	list_add_rcu(&data->list, &file->triggers);
1540	ret++;
1541
1542	update_cond_flag(file);
1543	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 
1544		list_del_rcu(&data->list);
1545		update_cond_flag(file);
1546		ret--;
1547	}
1548out:
1549	return ret;
1550}
1551
1552void event_enable_unregister_trigger(char *glob,
1553				     struct event_trigger_ops *ops,
1554				     struct event_trigger_data *test,
1555				     struct trace_event_file *file)
1556{
1557	struct enable_trigger_data *test_enable_data = test->private_data;
 
1558	struct enable_trigger_data *enable_data;
1559	struct event_trigger_data *data;
1560	bool unregistered = false;
1561
1562	lockdep_assert_held(&event_mutex);
1563
1564	list_for_each_entry(data, &file->triggers, list) {
1565		enable_data = data->private_data;
1566		if (enable_data &&
1567		    (data->cmd_ops->trigger_type ==
1568		     test->cmd_ops->trigger_type) &&
1569		    (enable_data->file == test_enable_data->file)) {
1570			unregistered = true;
1571			list_del_rcu(&data->list);
1572			trace_event_trigger_enable_disable(file, 0);
1573			update_cond_flag(file);
1574			break;
1575		}
1576	}
1577
1578	if (unregistered && data->ops->free)
1579		data->ops->free(data->ops, data);
1580}
1581
1582static struct event_trigger_ops *
1583event_enable_get_trigger_ops(char *cmd, char *param)
1584{
1585	struct event_trigger_ops *ops;
1586	bool enable;
1587
1588#ifdef CONFIG_HIST_TRIGGERS
1589	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1590		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1591#else
1592	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1593#endif
1594	if (enable)
1595		ops = param ? &event_enable_count_trigger_ops :
1596			&event_enable_trigger_ops;
1597	else
1598		ops = param ? &event_disable_count_trigger_ops :
1599			&event_disable_trigger_ops;
1600
1601	return ops;
1602}
1603
1604static struct event_command trigger_enable_cmd = {
1605	.name			= ENABLE_EVENT_STR,
1606	.trigger_type		= ETT_EVENT_ENABLE,
1607	.func			= event_enable_trigger_func,
1608	.reg			= event_enable_register_trigger,
1609	.unreg			= event_enable_unregister_trigger,
1610	.get_trigger_ops	= event_enable_get_trigger_ops,
1611	.set_filter		= set_trigger_filter,
1612};
1613
1614static struct event_command trigger_disable_cmd = {
1615	.name			= DISABLE_EVENT_STR,
1616	.trigger_type		= ETT_EVENT_ENABLE,
1617	.func			= event_enable_trigger_func,
1618	.reg			= event_enable_register_trigger,
1619	.unreg			= event_enable_unregister_trigger,
1620	.get_trigger_ops	= event_enable_get_trigger_ops,
1621	.set_filter		= set_trigger_filter,
1622};
1623
1624static __init void unregister_trigger_enable_disable_cmds(void)
1625{
1626	unregister_event_command(&trigger_enable_cmd);
1627	unregister_event_command(&trigger_disable_cmd);
1628}
1629
1630static __init int register_trigger_enable_disable_cmds(void)
1631{
1632	int ret;
1633
1634	ret = register_event_command(&trigger_enable_cmd);
1635	if (WARN_ON(ret < 0))
1636		return ret;
1637	ret = register_event_command(&trigger_disable_cmd);
1638	if (WARN_ON(ret < 0))
1639		unregister_trigger_enable_disable_cmds();
1640
1641	return ret;
1642}
1643
1644static __init int register_trigger_traceon_traceoff_cmds(void)
1645{
1646	int ret;
1647
1648	ret = register_event_command(&trigger_traceon_cmd);
1649	if (WARN_ON(ret < 0))
1650		return ret;
1651	ret = register_event_command(&trigger_traceoff_cmd);
1652	if (WARN_ON(ret < 0))
1653		unregister_trigger_traceon_traceoff_cmds();
1654
1655	return ret;
1656}
1657
1658__init int register_trigger_cmds(void)
1659{
1660	register_trigger_traceon_traceoff_cmds();
1661	register_trigger_snapshot_cmd();
1662	register_trigger_stacktrace_cmd();
1663	register_trigger_enable_disable_cmds();
1664	register_trigger_hist_enable_disable_cmds();
1665	register_trigger_hist_cmd();
1666
1667	return 0;
1668}