Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
  34 * @buffer: The ring buffer that the event is being written to
  35 * @rec: The trace entry for the event, NULL for unconditional invocation
  36 * @event: The event meta data in the ring buffer
  37 *
  38 * For each trigger associated with an event, invoke the trigger
  39 * function registered with the associated trigger command.  If rec is
  40 * non-NULL, it means that the trigger requires further processing and
  41 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  42 * trigger has a filter associated with it, rec will checked against
  43 * the filter and if the record matches the trigger will be invoked.
  44 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  45 * in any case until the current event is written, the trigger
  46 * function isn't invoked but the bit associated with the deferred
  47 * trigger is set in the return value.
  48 *
  49 * Returns an enum event_trigger_type value containing a set bit for
  50 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  51 *
  52 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  53 *
  54 * Return: an enum event_trigger_type value containing a set bit for
  55 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  56 */
  57enum event_trigger_type
  58event_triggers_call(struct trace_event_file *file,
  59		    struct trace_buffer *buffer, void *rec,
  60		    struct ring_buffer_event *event)
  61{
  62	struct event_trigger_data *data;
  63	enum event_trigger_type tt = ETT_NONE;
  64	struct event_filter *filter;
  65
  66	if (list_empty(&file->triggers))
  67		return tt;
  68
  69	list_for_each_entry_rcu(data, &file->triggers, list) {
  70		if (data->paused)
  71			continue;
  72		if (!rec) {
  73			data->ops->trigger(data, buffer, rec, event);
  74			continue;
  75		}
  76		filter = rcu_dereference_sched(data->filter);
  77		if (filter && !filter_match_preds(filter, rec))
  78			continue;
  79		if (event_command_post_trigger(data->cmd_ops)) {
  80			tt |= data->cmd_ops->trigger_type;
  81			continue;
  82		}
  83		data->ops->trigger(data, buffer, rec, event);
  84	}
  85	return tt;
  86}
  87EXPORT_SYMBOL_GPL(event_triggers_call);
  88
  89bool __trace_trigger_soft_disabled(struct trace_event_file *file)
  90{
  91	unsigned long eflags = file->flags;
  92
  93	if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
  94		event_triggers_call(file, NULL, NULL, NULL);
  95	if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
  96		return true;
  97	if (eflags & EVENT_FILE_FL_PID_FILTER)
  98		return trace_event_ignore_this_pid(file);
  99	return false;
 100}
 101EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
 102
 103/**
 104 * event_triggers_post_call - Call 'post_triggers' for a trace event
 105 * @file: The trace_event_file associated with the event
 106 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
 107 *
 108 * For each trigger associated with an event, invoke the trigger
 109 * function registered with the associated trigger command, if the
 110 * corresponding bit is set in the tt enum passed into this function.
 111 * See @event_triggers_call for details on how those bits are set.
 112 *
 113 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
 114 */
 115void
 116event_triggers_post_call(struct trace_event_file *file,
 117			 enum event_trigger_type tt)
 118{
 119	struct event_trigger_data *data;
 120
 121	list_for_each_entry_rcu(data, &file->triggers, list) {
 122		if (data->paused)
 123			continue;
 124		if (data->cmd_ops->trigger_type & tt)
 125			data->ops->trigger(data, NULL, NULL, NULL);
 126	}
 127}
 128EXPORT_SYMBOL_GPL(event_triggers_post_call);
 129
 130#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 131
 132static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 133{
 134	struct trace_event_file *event_file = event_file_data(m->private);
 135
 136	if (t == SHOW_AVAILABLE_TRIGGERS) {
 137		(*pos)++;
 138		return NULL;
 139	}
 140	return seq_list_next(t, &event_file->triggers, pos);
 141}
 142
 143static bool check_user_trigger(struct trace_event_file *file)
 144{
 145	struct event_trigger_data *data;
 146
 147	list_for_each_entry_rcu(data, &file->triggers, list,
 148				lockdep_is_held(&event_mutex)) {
 149		if (data->flags & EVENT_TRIGGER_FL_PROBE)
 150			continue;
 151		return true;
 152	}
 153	return false;
 154}
 155
 156static void *trigger_start(struct seq_file *m, loff_t *pos)
 157{
 158	struct trace_event_file *event_file;
 159
 160	/* ->stop() is called even if ->start() fails */
 161	mutex_lock(&event_mutex);
 162	event_file = event_file_data(m->private);
 163	if (unlikely(!event_file))
 164		return ERR_PTR(-ENODEV);
 165
 166	if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
 167		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 168
 169	return seq_list_start(&event_file->triggers, *pos);
 170}
 171
 172static void trigger_stop(struct seq_file *m, void *t)
 173{
 174	mutex_unlock(&event_mutex);
 175}
 176
 177static int trigger_show(struct seq_file *m, void *v)
 178{
 179	struct event_trigger_data *data;
 180	struct event_command *p;
 181
 182	if (v == SHOW_AVAILABLE_TRIGGERS) {
 183		seq_puts(m, "# Available triggers:\n");
 184		seq_putc(m, '#');
 185		mutex_lock(&trigger_cmd_mutex);
 186		list_for_each_entry_reverse(p, &trigger_commands, list)
 187			seq_printf(m, " %s", p->name);
 188		seq_putc(m, '\n');
 189		mutex_unlock(&trigger_cmd_mutex);
 190		return 0;
 191	}
 192
 193	data = list_entry(v, struct event_trigger_data, list);
 194	data->ops->print(m, data);
 195
 196	return 0;
 197}
 198
 199static const struct seq_operations event_triggers_seq_ops = {
 200	.start = trigger_start,
 201	.next = trigger_next,
 202	.stop = trigger_stop,
 203	.show = trigger_show,
 204};
 205
 206static int event_trigger_regex_open(struct inode *inode, struct file *file)
 207{
 208	int ret;
 209
 210	ret = security_locked_down(LOCKDOWN_TRACEFS);
 211	if (ret)
 212		return ret;
 213
 214	mutex_lock(&event_mutex);
 215
 216	if (unlikely(!event_file_data(file))) {
 217		mutex_unlock(&event_mutex);
 218		return -ENODEV;
 219	}
 220
 221	if ((file->f_mode & FMODE_WRITE) &&
 222	    (file->f_flags & O_TRUNC)) {
 223		struct trace_event_file *event_file;
 224		struct event_command *p;
 225
 226		event_file = event_file_data(file);
 227
 228		list_for_each_entry(p, &trigger_commands, list) {
 229			if (p->unreg_all)
 230				p->unreg_all(event_file);
 231		}
 232	}
 233
 234	if (file->f_mode & FMODE_READ) {
 235		ret = seq_open(file, &event_triggers_seq_ops);
 236		if (!ret) {
 237			struct seq_file *m = file->private_data;
 238			m->private = file;
 239		}
 240	}
 241
 242	mutex_unlock(&event_mutex);
 243
 244	return ret;
 245}
 246
 247int trigger_process_regex(struct trace_event_file *file, char *buff)
 248{
 249	char *command, *next;
 250	struct event_command *p;
 251	int ret = -EINVAL;
 252
 253	next = buff = skip_spaces(buff);
 254	command = strsep(&next, ": \t");
 255	if (next) {
 256		next = skip_spaces(next);
 257		if (!*next)
 258			next = NULL;
 259	}
 260	command = (command[0] != '!') ? command : command + 1;
 261
 262	mutex_lock(&trigger_cmd_mutex);
 263	list_for_each_entry(p, &trigger_commands, list) {
 264		if (strcmp(p->name, command) == 0) {
 265			ret = p->parse(p, file, buff, command, next);
 266			goto out_unlock;
 267		}
 268	}
 269 out_unlock:
 270	mutex_unlock(&trigger_cmd_mutex);
 271
 272	return ret;
 273}
 274
 275static ssize_t event_trigger_regex_write(struct file *file,
 276					 const char __user *ubuf,
 277					 size_t cnt, loff_t *ppos)
 278{
 279	struct trace_event_file *event_file;
 280	ssize_t ret;
 281	char *buf;
 282
 283	if (!cnt)
 284		return 0;
 285
 286	if (cnt >= PAGE_SIZE)
 287		return -EINVAL;
 288
 289	buf = memdup_user_nul(ubuf, cnt);
 290	if (IS_ERR(buf))
 291		return PTR_ERR(buf);
 292
 293	strim(buf);
 294
 295	mutex_lock(&event_mutex);
 296	event_file = event_file_data(file);
 297	if (unlikely(!event_file)) {
 298		mutex_unlock(&event_mutex);
 299		kfree(buf);
 300		return -ENODEV;
 301	}
 302	ret = trigger_process_regex(event_file, buf);
 303	mutex_unlock(&event_mutex);
 304
 305	kfree(buf);
 306	if (ret < 0)
 307		goto out;
 308
 309	*ppos += cnt;
 310	ret = cnt;
 311 out:
 312	return ret;
 313}
 314
 315static int event_trigger_regex_release(struct inode *inode, struct file *file)
 316{
 317	mutex_lock(&event_mutex);
 318
 319	if (file->f_mode & FMODE_READ)
 320		seq_release(inode, file);
 321
 322	mutex_unlock(&event_mutex);
 323
 324	return 0;
 325}
 326
 327static ssize_t
 328event_trigger_write(struct file *filp, const char __user *ubuf,
 329		    size_t cnt, loff_t *ppos)
 330{
 331	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 332}
 333
 334static int
 335event_trigger_open(struct inode *inode, struct file *filp)
 336{
 337	/* Checks for tracefs lockdown */
 338	return event_trigger_regex_open(inode, filp);
 339}
 340
 341static int
 342event_trigger_release(struct inode *inode, struct file *file)
 343{
 344	return event_trigger_regex_release(inode, file);
 345}
 346
 347const struct file_operations event_trigger_fops = {
 348	.open = event_trigger_open,
 349	.read = seq_read,
 350	.write = event_trigger_write,
 351	.llseek = tracing_lseek,
 352	.release = event_trigger_release,
 353};
 354
 355/*
 356 * Currently we only register event commands from __init, so mark this
 357 * __init too.
 358 */
 359__init int register_event_command(struct event_command *cmd)
 360{
 361	struct event_command *p;
 362	int ret = 0;
 363
 364	mutex_lock(&trigger_cmd_mutex);
 365	list_for_each_entry(p, &trigger_commands, list) {
 366		if (strcmp(cmd->name, p->name) == 0) {
 367			ret = -EBUSY;
 368			goto out_unlock;
 369		}
 370	}
 371	list_add(&cmd->list, &trigger_commands);
 372 out_unlock:
 373	mutex_unlock(&trigger_cmd_mutex);
 374
 375	return ret;
 376}
 377
 378/*
 379 * Currently we only unregister event commands from __init, so mark
 380 * this __init too.
 381 */
 382__init int unregister_event_command(struct event_command *cmd)
 383{
 384	struct event_command *p, *n;
 385	int ret = -ENODEV;
 386
 387	mutex_lock(&trigger_cmd_mutex);
 388	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 389		if (strcmp(cmd->name, p->name) == 0) {
 390			ret = 0;
 391			list_del_init(&p->list);
 392			goto out_unlock;
 393		}
 394	}
 395 out_unlock:
 396	mutex_unlock(&trigger_cmd_mutex);
 397
 398	return ret;
 399}
 400
 401/**
 402 * event_trigger_print - Generic event_trigger_ops @print implementation
 403 * @name: The name of the event trigger
 404 * @m: The seq_file being printed to
 405 * @data: Trigger-specific data
 406 * @filter_str: filter_str to print, if present
 407 *
 408 * Common implementation for event triggers to print themselves.
 409 *
 410 * Usually wrapped by a function that simply sets the @name of the
 411 * trigger command and then invokes this.
 412 *
 413 * Return: 0 on success, errno otherwise
 414 */
 415static int
 416event_trigger_print(const char *name, struct seq_file *m,
 417		    void *data, char *filter_str)
 418{
 419	long count = (long)data;
 420
 421	seq_puts(m, name);
 422
 423	if (count == -1)
 424		seq_puts(m, ":unlimited");
 425	else
 426		seq_printf(m, ":count=%ld", count);
 427
 428	if (filter_str)
 429		seq_printf(m, " if %s\n", filter_str);
 430	else
 431		seq_putc(m, '\n');
 432
 433	return 0;
 434}
 435
 436/**
 437 * event_trigger_init - Generic event_trigger_ops @init implementation
 
 438 * @data: Trigger-specific data
 439 *
 440 * Common implementation of event trigger initialization.
 441 *
 442 * Usually used directly as the @init method in event trigger
 443 * implementations.
 444 *
 445 * Return: 0 on success, errno otherwise
 446 */
 447int event_trigger_init(struct event_trigger_data *data)
 
 448{
 449	data->ref++;
 450	return 0;
 451}
 452
 453/**
 454 * event_trigger_free - Generic event_trigger_ops @free implementation
 
 455 * @data: Trigger-specific data
 456 *
 457 * Common implementation of event trigger de-initialization.
 458 *
 459 * Usually used directly as the @free method in event trigger
 460 * implementations.
 461 */
 462static void
 463event_trigger_free(struct event_trigger_data *data)
 
 464{
 465	if (WARN_ON_ONCE(data->ref <= 0))
 466		return;
 467
 468	data->ref--;
 469	if (!data->ref)
 470		trigger_data_free(data);
 471}
 472
 473int trace_event_trigger_enable_disable(struct trace_event_file *file,
 474				       int trigger_enable)
 475{
 476	int ret = 0;
 477
 478	if (trigger_enable) {
 479		if (atomic_inc_return(&file->tm_ref) > 1)
 480			return ret;
 481		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 482		ret = trace_event_enable_disable(file, 1, 1);
 483	} else {
 484		if (atomic_dec_return(&file->tm_ref) > 0)
 485			return ret;
 486		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 487		ret = trace_event_enable_disable(file, 0, 1);
 488	}
 489
 490	return ret;
 491}
 492
 493/**
 494 * clear_event_triggers - Clear all triggers associated with a trace array
 495 * @tr: The trace array to clear
 496 *
 497 * For each trigger, the triggering event has its tm_ref decremented
 498 * via trace_event_trigger_enable_disable(), and any associated event
 499 * (in the case of enable/disable_event triggers) will have its sm_ref
 500 * decremented via free()->trace_event_enable_disable().  That
 501 * combination effectively reverses the soft-mode/trigger state added
 502 * by trigger registration.
 503 *
 504 * Must be called with event_mutex held.
 505 */
 506void
 507clear_event_triggers(struct trace_array *tr)
 508{
 509	struct trace_event_file *file;
 510
 511	list_for_each_entry(file, &tr->events, list) {
 512		struct event_trigger_data *data, *n;
 513		list_for_each_entry_safe(data, n, &file->triggers, list) {
 514			trace_event_trigger_enable_disable(file, 0);
 515			list_del_rcu(&data->list);
 516			if (data->ops->free)
 517				data->ops->free(data);
 518		}
 519	}
 520}
 521
 522/**
 523 * update_cond_flag - Set or reset the TRIGGER_COND bit
 524 * @file: The trace_event_file associated with the event
 525 *
 526 * If an event has triggers and any of those triggers has a filter or
 527 * a post_trigger, trigger invocation needs to be deferred until after
 528 * the current event has logged its data, and the event should have
 529 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 530 * cleared.
 531 */
 532void update_cond_flag(struct trace_event_file *file)
 533{
 534	struct event_trigger_data *data;
 535	bool set_cond = false;
 536
 537	lockdep_assert_held(&event_mutex);
 538
 539	list_for_each_entry(data, &file->triggers, list) {
 540		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 541		    event_command_needs_rec(data->cmd_ops)) {
 542			set_cond = true;
 543			break;
 544		}
 545	}
 546
 547	if (set_cond)
 548		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 549	else
 550		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 551}
 552
 553/**
 554 * register_trigger - Generic event_command @reg implementation
 555 * @glob: The raw string used to register the trigger
 
 556 * @data: Trigger-specific data to associate with the trigger
 557 * @file: The trace_event_file associated with the event
 558 *
 559 * Common implementation for event trigger registration.
 560 *
 561 * Usually used directly as the @reg method in event command
 562 * implementations.
 563 *
 564 * Return: 0 on success, errno otherwise
 565 */
 566static int register_trigger(char *glob,
 567			    struct event_trigger_data *data,
 568			    struct trace_event_file *file)
 569{
 570	struct event_trigger_data *test;
 571	int ret = 0;
 572
 573	lockdep_assert_held(&event_mutex);
 574
 575	list_for_each_entry(test, &file->triggers, list) {
 576		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 577			ret = -EEXIST;
 578			goto out;
 579		}
 580	}
 581
 582	if (data->ops->init) {
 583		ret = data->ops->init(data);
 584		if (ret < 0)
 585			goto out;
 586	}
 587
 588	list_add_rcu(&data->list, &file->triggers);
 
 589
 590	update_cond_flag(file);
 591	ret = trace_event_trigger_enable_disable(file, 1);
 592	if (ret < 0) {
 593		list_del_rcu(&data->list);
 594		update_cond_flag(file);
 
 595	}
 596out:
 597	return ret;
 598}
 599
 600/**
 601 * unregister_trigger - Generic event_command @unreg implementation
 602 * @glob: The raw string used to register the trigger
 
 603 * @test: Trigger-specific data used to find the trigger to remove
 604 * @file: The trace_event_file associated with the event
 605 *
 606 * Common implementation for event trigger unregistration.
 607 *
 608 * Usually used directly as the @unreg method in event command
 609 * implementations.
 610 */
 611static void unregister_trigger(char *glob,
 612			       struct event_trigger_data *test,
 613			       struct trace_event_file *file)
 614{
 615	struct event_trigger_data *data = NULL, *iter;
 
 616
 617	lockdep_assert_held(&event_mutex);
 618
 619	list_for_each_entry(iter, &file->triggers, list) {
 620		if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 621			data = iter;
 622			list_del_rcu(&data->list);
 623			trace_event_trigger_enable_disable(file, 0);
 624			update_cond_flag(file);
 625			break;
 626		}
 627	}
 628
 629	if (data && data->ops->free)
 630		data->ops->free(data);
 631}
 632
 633/*
 634 * Event trigger parsing helper functions.
 635 *
 636 * These functions help make it easier to write an event trigger
 637 * parsing function i.e. the struct event_command.parse() callback
 638 * function responsible for parsing and registering a trigger command
 639 * written to the 'trigger' file.
 640 *
 641 * A trigger command (or just 'trigger' for short) takes the form:
 642 *   [trigger] [if filter]
 643 *
 644 * The struct event_command.parse() callback (and other struct
 645 * event_command functions) refer to several components of a trigger
 646 * command.  Those same components are referenced by the event trigger
 647 * parsing helper functions defined below.  These components are:
 648 *
 649 *   cmd               - the trigger command name
 650 *   glob              - the trigger command name optionally prefaced with '!'
 651 *   param_and_filter  - text following cmd and ':'
 652 *   param             - text following cmd and ':' and stripped of filter
 653 *   filter            - the optional filter text following (and including) 'if'
 654 *
 655 * To illustrate the use of these componenents, here are some concrete
 656 * examples. For the following triggers:
 657 *
 658 *   echo 'traceon:5 if pid == 0' > trigger
 659 *     - 'traceon' is both cmd and glob
 660 *     - '5 if pid == 0' is the param_and_filter
 661 *     - '5' is the param
 662 *     - 'if pid == 0' is the filter
 663 *
 664 *   echo 'enable_event:sys:event:n' > trigger
 665 *     - 'enable_event' is both cmd and glob
 666 *     - 'sys:event:n' is the param_and_filter
 667 *     - 'sys:event:n' is the param
 668 *     - there is no filter
 669 *
 670 *   echo 'hist:keys=pid if prio > 50' > trigger
 671 *     - 'hist' is both cmd and glob
 672 *     - 'keys=pid if prio > 50' is the param_and_filter
 673 *     - 'keys=pid' is the param
 674 *     - 'if prio > 50' is the filter
 675 *
 676 *   echo '!enable_event:sys:event:n' > trigger
 677 *     - 'enable_event' the cmd
 678 *     - '!enable_event' is the glob
 679 *     - 'sys:event:n' is the param_and_filter
 680 *     - 'sys:event:n' is the param
 681 *     - there is no filter
 682 *
 683 *   echo 'traceoff' > trigger
 684 *     - 'traceoff' is both cmd and glob
 685 *     - there is no param_and_filter
 686 *     - there is no param
 687 *     - there is no filter
 688 *
 689 * There are a few different categories of event trigger covered by
 690 * these helpers:
 691 *
 692 *  - triggers that don't require a parameter e.g. traceon
 693 *  - triggers that do require a parameter e.g. enable_event and hist
 694 *  - triggers that though they may not require a param may support an
 695 *    optional 'n' param (n = number of times the trigger should fire)
 696 *    e.g.: traceon:5 or enable_event:sys:event:n
 697 *  - triggers that do not support an 'n' param e.g. hist
 698 *
 699 * These functions can be used or ignored as necessary - it all
 700 * depends on the complexity of the trigger, and the granularity of
 701 * the functions supported reflects the fact that some implementations
 702 * may need to customize certain aspects of their implementations and
 703 * won't need certain functions.  For instance, the hist trigger
 704 * implementation doesn't use event_trigger_separate_filter() because
 705 * it has special requirements for handling the filter.
 706 */
 707
 708/**
 709 * event_trigger_check_remove - check whether an event trigger specifies remove
 710 * @glob: The trigger command string, with optional remove(!) operator
 711 *
 712 * The event trigger callback implementations pass in 'glob' as a
 713 * parameter.  This is the command name either with or without a
 714 * remove(!)  operator.  This function simply parses the glob and
 715 * determines whether the command corresponds to a trigger removal or
 716 * a trigger addition.
 717 *
 718 * Return: true if this is a remove command, false otherwise
 719 */
 720bool event_trigger_check_remove(const char *glob)
 721{
 722	return (glob && glob[0] == '!') ? true : false;
 723}
 724
 725/**
 726 * event_trigger_empty_param - check whether the param is empty
 727 * @param: The trigger param string
 
 
 
 
 728 *
 729 * The event trigger callback implementations pass in 'param' as a
 730 * parameter.  This corresponds to the string following the command
 731 * name minus the command name.  This function can be called by a
 732 * callback implementation for any command that requires a param; a
 733 * callback that doesn't require a param can ignore it.
 734 *
 735 * Return: true if this is an empty param, false otherwise
 736 */
 737bool event_trigger_empty_param(const char *param)
 738{
 739	return !param;
 740}
 741
 742/**
 743 * event_trigger_separate_filter - separate an event trigger from a filter
 744 * @param_and_filter: String containing trigger and possibly filter
 745 * @param: outparam, will be filled with a pointer to the trigger
 746 * @filter: outparam, will be filled with a pointer to the filter
 747 * @param_required: Specifies whether or not the param string is required
 748 *
 749 * Given a param string of the form '[trigger] [if filter]', this
 750 * function separates the filter from the trigger and returns the
 751 * trigger in @param and the filter in @filter.  Either the @param
 752 * or the @filter may be set to NULL by this function - if not set to
 753 * NULL, they will contain strings corresponding to the trigger and
 754 * filter.
 755 *
 756 * There are two cases that need to be handled with respect to the
 757 * passed-in param: either the param is required, or it is not
 758 * required.  If @param_required is set, and there's no param, it will
 759 * return -EINVAL.  If @param_required is not set and there's a param
 760 * that starts with a number, that corresponds to the case of a
 761 * trigger with :n (n = number of times the trigger should fire) and
 762 * the parsing continues normally; otherwise the function just returns
 763 * and assumes param just contains a filter and there's nothing else
 764 * to do.
 765 *
 766 * Return: 0 on success, errno otherwise
 767 */
 768int event_trigger_separate_filter(char *param_and_filter, char **param,
 769				  char **filter, bool param_required)
 770{
 771	int ret = 0;
 772
 773	*param = *filter = NULL;
 774
 775	if (!param_and_filter) {
 776		if (param_required)
 777			ret = -EINVAL;
 778		goto out;
 779	}
 780
 781	/*
 782	 * Here we check for an optional param. The only legal
 783	 * optional param is :n, and if that's the case, continue
 784	 * below. Otherwise we assume what's left is a filter and
 785	 * return it as the filter string for the caller to deal with.
 786	 */
 787	if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
 788		*filter = param_and_filter;
 789		goto out;
 790	}
 791
 792	/*
 793	 * Separate the param from the filter (param [if filter]).
 794	 * Here we have either an optional :n param or a required
 795	 * param and an optional filter.
 796	 */
 797	*param = strsep(&param_and_filter, " \t");
 798
 799	/*
 800	 * Here we have a filter, though it may be empty.
 801	 */
 802	if (param_and_filter) {
 803		*filter = skip_spaces(param_and_filter);
 804		if (!**filter)
 805			*filter = NULL;
 806	}
 807out:
 808	return ret;
 809}
 810
 811/**
 812 * event_trigger_alloc - allocate and init event_trigger_data for a trigger
 813 * @cmd_ops: The event_command operations for the trigger
 814 * @cmd: The cmd string
 815 * @param: The param string
 816 * @private_data: User data to associate with the event trigger
 817 *
 818 * Allocate an event_trigger_data instance and initialize it.  The
 819 * @cmd_ops are used along with the @cmd and @param to get the
 820 * trigger_ops to assign to the event_trigger_data.  @private_data can
 821 * also be passed in and associated with the event_trigger_data.
 822 *
 823 * Use event_trigger_free() to free an event_trigger_data object.
 824 *
 825 * Return: The trigger_data object success, NULL otherwise
 826 */
 827struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
 828					       char *cmd,
 829					       char *param,
 830					       void *private_data)
 831{
 832	struct event_trigger_data *trigger_data;
 833	struct event_trigger_ops *trigger_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 834
 835	trigger_ops = cmd_ops->get_trigger_ops(cmd, param);
 836
 
 837	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 838	if (!trigger_data)
 839		return NULL;
 840
 841	trigger_data->count = -1;
 842	trigger_data->ops = trigger_ops;
 843	trigger_data->cmd_ops = cmd_ops;
 844	trigger_data->private_data = private_data;
 845
 846	INIT_LIST_HEAD(&trigger_data->list);
 847	INIT_LIST_HEAD(&trigger_data->named_list);
 848	RCU_INIT_POINTER(trigger_data->filter, NULL);
 849
 850	return trigger_data;
 851}
 852
 853/**
 854 * event_trigger_parse_num - parse and return the number param for a trigger
 855 * @param: The param string
 856 * @trigger_data: The trigger_data for the trigger
 857 *
 858 * Parse the :n (n = number of times the trigger should fire) param
 859 * and set the count variable in the trigger_data to the parsed count.
 860 *
 861 * Return: 0 on success, errno otherwise
 862 */
 863int event_trigger_parse_num(char *param,
 864			    struct event_trigger_data *trigger_data)
 865{
 866	char *number;
 867	int ret = 0;
 868
 869	if (param) {
 870		number = strsep(&param, ":");
 871
 
 872		if (!strlen(number))
 873			return -EINVAL;
 874
 875		/*
 876		 * We use the callback data field (which is a pointer)
 877		 * as our counter.
 878		 */
 879		ret = kstrtoul(number, 0, &trigger_data->count);
 
 
 880	}
 881
 882	return ret;
 883}
 884
 885/**
 886 * event_trigger_set_filter - set an event trigger's filter
 887 * @cmd_ops: The event_command operations for the trigger
 888 * @file: The event file for the trigger's event
 889 * @param: The string containing the filter
 890 * @trigger_data: The trigger_data for the trigger
 891 *
 892 * Set the filter for the trigger.  If the filter is NULL, just return
 893 * without error.
 894 *
 895 * Return: 0 on success, errno otherwise
 896 */
 897int event_trigger_set_filter(struct event_command *cmd_ops,
 898			     struct trace_event_file *file,
 899			     char *param,
 900			     struct event_trigger_data *trigger_data)
 901{
 902	if (param && cmd_ops->set_filter)
 903		return cmd_ops->set_filter(param, trigger_data, file);
 904
 905	return 0;
 906}
 907
 908/**
 909 * event_trigger_reset_filter - reset an event trigger's filter
 910 * @cmd_ops: The event_command operations for the trigger
 911 * @trigger_data: The trigger_data for the trigger
 912 *
 913 * Reset the filter for the trigger to no filter.
 914 */
 915void event_trigger_reset_filter(struct event_command *cmd_ops,
 916				struct event_trigger_data *trigger_data)
 917{
 918	if (cmd_ops->set_filter)
 919		cmd_ops->set_filter(NULL, trigger_data, NULL);
 920}
 921
 922/**
 923 * event_trigger_register - register an event trigger
 924 * @cmd_ops: The event_command operations for the trigger
 925 * @file: The event file for the trigger's event
 926 * @glob: The trigger command string, with optional remove(!) operator
 927 * @trigger_data: The trigger_data for the trigger
 928 *
 929 * Register an event trigger.  The @cmd_ops are used to call the
 930 * cmd_ops->reg() function which actually does the registration.
 931 *
 932 * Return: 0 on success, errno otherwise
 933 */
 934int event_trigger_register(struct event_command *cmd_ops,
 935			   struct trace_event_file *file,
 936			   char *glob,
 937			   struct event_trigger_data *trigger_data)
 938{
 939	return cmd_ops->reg(glob, trigger_data, file);
 940}
 941
 942/**
 943 * event_trigger_unregister - unregister an event trigger
 944 * @cmd_ops: The event_command operations for the trigger
 945 * @file: The event file for the trigger's event
 946 * @glob: The trigger command string, with optional remove(!) operator
 947 * @trigger_data: The trigger_data for the trigger
 948 *
 949 * Unregister an event trigger.  The @cmd_ops are used to call the
 950 * cmd_ops->unreg() function which actually does the unregistration.
 951 */
 952void event_trigger_unregister(struct event_command *cmd_ops,
 953			      struct trace_event_file *file,
 954			      char *glob,
 955			      struct event_trigger_data *trigger_data)
 956{
 957	cmd_ops->unreg(glob, trigger_data, file);
 958}
 959
 960/*
 961 * End event trigger parsing helper functions.
 962 */
 963
 964/**
 965 * event_trigger_parse - Generic event_command @parse implementation
 966 * @cmd_ops: The command ops, used for trigger registration
 967 * @file: The trace_event_file associated with the event
 968 * @glob: The raw string used to register the trigger
 969 * @cmd: The cmd portion of the string used to register the trigger
 970 * @param_and_filter: The param and filter portion of the string used to register the trigger
 971 *
 972 * Common implementation for event command parsing and trigger
 973 * instantiation.
 974 *
 975 * Usually used directly as the @parse method in event command
 976 * implementations.
 977 *
 978 * Return: 0 on success, errno otherwise
 979 */
 980static int
 981event_trigger_parse(struct event_command *cmd_ops,
 982		    struct trace_event_file *file,
 983		    char *glob, char *cmd, char *param_and_filter)
 984{
 985	struct event_trigger_data *trigger_data;
 986	char *param, *filter;
 987	bool remove;
 988	int ret;
 989
 990	remove = event_trigger_check_remove(glob);
 991
 992	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, false);
 993	if (ret)
 994		return ret;
 995
 996	ret = -ENOMEM;
 997	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
 998	if (!trigger_data)
 999		goto out;
1000
1001	if (remove) {
1002		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1003		kfree(trigger_data);
1004		ret = 0;
1005		goto out;
1006	}
1007
1008	ret = event_trigger_parse_num(param, trigger_data);
1009	if (ret)
1010		goto out_free;
1011
1012	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1013	if (ret < 0)
1014		goto out_free;
1015
 
1016	/* Up the trigger_data count to make sure reg doesn't free it on failure */
1017	event_trigger_init(trigger_data);
1018
1019	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1020	if (ret)
1021		goto out_free;
 
 
 
 
 
 
 
1022
1023	/* Down the counter of trigger_data or free it if not used anymore */
1024	event_trigger_free(trigger_data);
1025 out:
1026	return ret;
1027
1028 out_free:
1029	event_trigger_reset_filter(cmd_ops, trigger_data);
 
1030	kfree(trigger_data);
1031	goto out;
1032}
1033
1034/**
1035 * set_trigger_filter - Generic event_command @set_filter implementation
1036 * @filter_str: The filter string for the trigger, NULL to remove filter
1037 * @trigger_data: Trigger-specific data
1038 * @file: The trace_event_file associated with the event
1039 *
1040 * Common implementation for event command filter parsing and filter
1041 * instantiation.
1042 *
1043 * Usually used directly as the @set_filter method in event command
1044 * implementations.
1045 *
1046 * Also used to remove a filter (if filter_str = NULL).
1047 *
1048 * Return: 0 on success, errno otherwise
1049 */
1050int set_trigger_filter(char *filter_str,
1051		       struct event_trigger_data *trigger_data,
1052		       struct trace_event_file *file)
1053{
1054	struct event_trigger_data *data = trigger_data;
1055	struct event_filter *filter = NULL, *tmp;
1056	int ret = -EINVAL;
1057	char *s;
1058
1059	if (!filter_str) /* clear the current filter */
1060		goto assign;
1061
1062	s = strsep(&filter_str, " \t");
1063
1064	if (!strlen(s) || strcmp(s, "if") != 0)
1065		goto out;
1066
1067	if (!filter_str)
1068		goto out;
1069
1070	/* The filter is for the 'trigger' event, not the triggered event */
1071	ret = create_event_filter(file->tr, file->event_call,
1072				  filter_str, true, &filter);
1073
1074	/* Only enabled set_str for error handling */
1075	if (filter) {
1076		kfree(filter->filter_string);
1077		filter->filter_string = NULL;
1078	}
1079
1080	/*
1081	 * If create_event_filter() fails, filter still needs to be freed.
1082	 * Which the calling code will do with data->filter.
1083	 */
1084 assign:
1085	tmp = rcu_access_pointer(data->filter);
1086
1087	rcu_assign_pointer(data->filter, filter);
1088
1089	if (tmp) {
1090		/*
1091		 * Make sure the call is done with the filter.
1092		 * It is possible that a filter could fail at boot up,
1093		 * and then this path will be called. Avoid the synchronization
1094		 * in that case.
1095		 */
1096		if (system_state != SYSTEM_BOOTING)
1097			tracepoint_synchronize_unregister();
1098		free_event_filter(tmp);
1099	}
1100
1101	kfree(data->filter_str);
1102	data->filter_str = NULL;
1103
1104	if (filter_str) {
1105		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
1106		if (!data->filter_str) {
1107			free_event_filter(rcu_access_pointer(data->filter));
1108			data->filter = NULL;
1109			ret = -ENOMEM;
1110		}
1111	}
1112 out:
1113	return ret;
1114}
1115
1116static LIST_HEAD(named_triggers);
1117
1118/**
1119 * find_named_trigger - Find the common named trigger associated with @name
1120 * @name: The name of the set of named triggers to find the common data for
1121 *
1122 * Named triggers are sets of triggers that share a common set of
1123 * trigger data.  The first named trigger registered with a given name
1124 * owns the common trigger data that the others subsequently
1125 * registered with the same name will reference.  This function
1126 * returns the common trigger data associated with that first
1127 * registered instance.
1128 *
1129 * Return: the common trigger data for the given named trigger on
1130 * success, NULL otherwise.
1131 */
1132struct event_trigger_data *find_named_trigger(const char *name)
1133{
1134	struct event_trigger_data *data;
1135
1136	if (!name)
1137		return NULL;
1138
1139	list_for_each_entry(data, &named_triggers, named_list) {
1140		if (data->named_data)
1141			continue;
1142		if (strcmp(data->name, name) == 0)
1143			return data;
1144	}
1145
1146	return NULL;
1147}
1148
1149/**
1150 * is_named_trigger - determine if a given trigger is a named trigger
1151 * @test: The trigger data to test
1152 *
1153 * Return: true if 'test' is a named trigger, false otherwise.
1154 */
1155bool is_named_trigger(struct event_trigger_data *test)
1156{
1157	struct event_trigger_data *data;
1158
1159	list_for_each_entry(data, &named_triggers, named_list) {
1160		if (test == data)
1161			return true;
1162	}
1163
1164	return false;
1165}
1166
1167/**
1168 * save_named_trigger - save the trigger in the named trigger list
1169 * @name: The name of the named trigger set
1170 * @data: The trigger data to save
1171 *
1172 * Return: 0 if successful, negative error otherwise.
1173 */
1174int save_named_trigger(const char *name, struct event_trigger_data *data)
1175{
1176	data->name = kstrdup(name, GFP_KERNEL);
1177	if (!data->name)
1178		return -ENOMEM;
1179
1180	list_add(&data->named_list, &named_triggers);
1181
1182	return 0;
1183}
1184
1185/**
1186 * del_named_trigger - delete a trigger from the named trigger list
1187 * @data: The trigger data to delete
1188 */
1189void del_named_trigger(struct event_trigger_data *data)
1190{
1191	kfree(data->name);
1192	data->name = NULL;
1193
1194	list_del(&data->named_list);
1195}
1196
1197static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
1198{
1199	struct event_trigger_data *test;
1200
1201	list_for_each_entry(test, &named_triggers, named_list) {
1202		if (strcmp(test->name, data->name) == 0) {
1203			if (pause) {
1204				test->paused_tmp = test->paused;
1205				test->paused = true;
1206			} else {
1207				test->paused = test->paused_tmp;
1208			}
1209		}
1210	}
1211}
1212
1213/**
1214 * pause_named_trigger - Pause all named triggers with the same name
1215 * @data: The trigger data of a named trigger to pause
1216 *
1217 * Pauses a named trigger along with all other triggers having the
1218 * same name.  Because named triggers share a common set of data,
1219 * pausing only one is meaningless, so pausing one named trigger needs
1220 * to pause all triggers with the same name.
1221 */
1222void pause_named_trigger(struct event_trigger_data *data)
1223{
1224	__pause_named_trigger(data, true);
1225}
1226
1227/**
1228 * unpause_named_trigger - Un-pause all named triggers with the same name
1229 * @data: The trigger data of a named trigger to unpause
1230 *
1231 * Un-pauses a named trigger along with all other triggers having the
1232 * same name.  Because named triggers share a common set of data,
1233 * unpausing only one is meaningless, so unpausing one named trigger
1234 * needs to unpause all triggers with the same name.
1235 */
1236void unpause_named_trigger(struct event_trigger_data *data)
1237{
1238	__pause_named_trigger(data, false);
1239}
1240
1241/**
1242 * set_named_trigger_data - Associate common named trigger data
1243 * @data: The trigger data to associate
1244 * @named_data: The common named trigger to be associated
1245 *
1246 * Named triggers are sets of triggers that share a common set of
1247 * trigger data.  The first named trigger registered with a given name
1248 * owns the common trigger data that the others subsequently
1249 * registered with the same name will reference.  This function
1250 * associates the common trigger data from the first trigger with the
1251 * given trigger.
1252 */
1253void set_named_trigger_data(struct event_trigger_data *data,
1254			    struct event_trigger_data *named_data)
1255{
1256	data->named_data = named_data;
1257}
1258
1259struct event_trigger_data *
1260get_named_trigger_data(struct event_trigger_data *data)
1261{
1262	return data->named_data;
1263}
1264
1265static void
1266traceon_trigger(struct event_trigger_data *data,
1267		struct trace_buffer *buffer, void *rec,
1268		struct ring_buffer_event *event)
1269{
1270	struct trace_event_file *file = data->private_data;
1271
1272	if (file) {
1273		if (tracer_tracing_is_on(file->tr))
1274			return;
1275
1276		tracer_tracing_on(file->tr);
1277		return;
1278	}
1279
1280	if (tracing_is_on())
1281		return;
1282
1283	tracing_on();
1284}
1285
1286static void
1287traceon_count_trigger(struct event_trigger_data *data,
1288		      struct trace_buffer *buffer, void *rec,
1289		      struct ring_buffer_event *event)
1290{
1291	struct trace_event_file *file = data->private_data;
1292
1293	if (file) {
1294		if (tracer_tracing_is_on(file->tr))
1295			return;
1296	} else {
1297		if (tracing_is_on())
1298			return;
1299	}
1300
1301	if (!data->count)
1302		return;
1303
1304	if (data->count != -1)
1305		(data->count)--;
1306
1307	if (file)
1308		tracer_tracing_on(file->tr);
1309	else
1310		tracing_on();
1311}
1312
1313static void
1314traceoff_trigger(struct event_trigger_data *data,
1315		 struct trace_buffer *buffer, void *rec,
1316		 struct ring_buffer_event *event)
1317{
1318	struct trace_event_file *file = data->private_data;
1319
1320	if (file) {
1321		if (!tracer_tracing_is_on(file->tr))
1322			return;
1323
1324		tracer_tracing_off(file->tr);
1325		return;
1326	}
1327
1328	if (!tracing_is_on())
1329		return;
1330
1331	tracing_off();
1332}
1333
1334static void
1335traceoff_count_trigger(struct event_trigger_data *data,
1336		       struct trace_buffer *buffer, void *rec,
1337		       struct ring_buffer_event *event)
1338{
1339	struct trace_event_file *file = data->private_data;
1340
1341	if (file) {
1342		if (!tracer_tracing_is_on(file->tr))
1343			return;
1344	} else {
1345		if (!tracing_is_on())
1346			return;
1347	}
1348
1349	if (!data->count)
1350		return;
1351
1352	if (data->count != -1)
1353		(data->count)--;
1354
1355	if (file)
1356		tracer_tracing_off(file->tr);
1357	else
1358		tracing_off();
1359}
1360
1361static int
1362traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1363{
1364	return event_trigger_print("traceon", m, (void *)data->count,
1365				   data->filter_str);
1366}
1367
1368static int
1369traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1370{
1371	return event_trigger_print("traceoff", m, (void *)data->count,
1372				   data->filter_str);
1373}
1374
1375static struct event_trigger_ops traceon_trigger_ops = {
1376	.trigger		= traceon_trigger,
1377	.print			= traceon_trigger_print,
1378	.init			= event_trigger_init,
1379	.free			= event_trigger_free,
1380};
1381
1382static struct event_trigger_ops traceon_count_trigger_ops = {
1383	.trigger		= traceon_count_trigger,
1384	.print			= traceon_trigger_print,
1385	.init			= event_trigger_init,
1386	.free			= event_trigger_free,
1387};
1388
1389static struct event_trigger_ops traceoff_trigger_ops = {
1390	.trigger		= traceoff_trigger,
1391	.print			= traceoff_trigger_print,
1392	.init			= event_trigger_init,
1393	.free			= event_trigger_free,
1394};
1395
1396static struct event_trigger_ops traceoff_count_trigger_ops = {
1397	.trigger		= traceoff_count_trigger,
1398	.print			= traceoff_trigger_print,
1399	.init			= event_trigger_init,
1400	.free			= event_trigger_free,
1401};
1402
1403static struct event_trigger_ops *
1404onoff_get_trigger_ops(char *cmd, char *param)
1405{
1406	struct event_trigger_ops *ops;
1407
1408	/* we register both traceon and traceoff to this callback */
1409	if (strcmp(cmd, "traceon") == 0)
1410		ops = param ? &traceon_count_trigger_ops :
1411			&traceon_trigger_ops;
1412	else
1413		ops = param ? &traceoff_count_trigger_ops :
1414			&traceoff_trigger_ops;
1415
1416	return ops;
1417}
1418
1419static struct event_command trigger_traceon_cmd = {
1420	.name			= "traceon",
1421	.trigger_type		= ETT_TRACE_ONOFF,
1422	.parse			= event_trigger_parse,
1423	.reg			= register_trigger,
1424	.unreg			= unregister_trigger,
1425	.get_trigger_ops	= onoff_get_trigger_ops,
1426	.set_filter		= set_trigger_filter,
1427};
1428
1429static struct event_command trigger_traceoff_cmd = {
1430	.name			= "traceoff",
1431	.trigger_type		= ETT_TRACE_ONOFF,
1432	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1433	.parse			= event_trigger_parse,
1434	.reg			= register_trigger,
1435	.unreg			= unregister_trigger,
1436	.get_trigger_ops	= onoff_get_trigger_ops,
1437	.set_filter		= set_trigger_filter,
1438};
1439
1440#ifdef CONFIG_TRACER_SNAPSHOT
1441static void
1442snapshot_trigger(struct event_trigger_data *data,
1443		 struct trace_buffer *buffer, void *rec,
1444		 struct ring_buffer_event *event)
1445{
1446	struct trace_event_file *file = data->private_data;
1447
1448	if (file)
1449		tracing_snapshot_instance(file->tr);
1450	else
1451		tracing_snapshot();
1452}
1453
1454static void
1455snapshot_count_trigger(struct event_trigger_data *data,
1456		       struct trace_buffer *buffer, void *rec,
1457		       struct ring_buffer_event *event)
1458{
1459	if (!data->count)
1460		return;
1461
1462	if (data->count != -1)
1463		(data->count)--;
1464
1465	snapshot_trigger(data, buffer, rec, event);
1466}
1467
1468static int
1469register_snapshot_trigger(char *glob,
1470			  struct event_trigger_data *data,
1471			  struct trace_event_file *file)
1472{
1473	int ret = tracing_alloc_snapshot_instance(file->tr);
1474
1475	if (ret < 0)
1476		return ret;
1477
1478	return register_trigger(glob, data, file);
1479}
1480
1481static int
1482snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1483{
1484	return event_trigger_print("snapshot", m, (void *)data->count,
1485				   data->filter_str);
1486}
1487
1488static struct event_trigger_ops snapshot_trigger_ops = {
1489	.trigger		= snapshot_trigger,
1490	.print			= snapshot_trigger_print,
1491	.init			= event_trigger_init,
1492	.free			= event_trigger_free,
1493};
1494
1495static struct event_trigger_ops snapshot_count_trigger_ops = {
1496	.trigger		= snapshot_count_trigger,
1497	.print			= snapshot_trigger_print,
1498	.init			= event_trigger_init,
1499	.free			= event_trigger_free,
1500};
1501
1502static struct event_trigger_ops *
1503snapshot_get_trigger_ops(char *cmd, char *param)
1504{
1505	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1506}
1507
1508static struct event_command trigger_snapshot_cmd = {
1509	.name			= "snapshot",
1510	.trigger_type		= ETT_SNAPSHOT,
1511	.parse			= event_trigger_parse,
1512	.reg			= register_snapshot_trigger,
1513	.unreg			= unregister_trigger,
1514	.get_trigger_ops	= snapshot_get_trigger_ops,
1515	.set_filter		= set_trigger_filter,
1516};
1517
1518static __init int register_trigger_snapshot_cmd(void)
1519{
1520	int ret;
1521
1522	ret = register_event_command(&trigger_snapshot_cmd);
1523	WARN_ON(ret < 0);
1524
1525	return ret;
1526}
1527#else
1528static __init int register_trigger_snapshot_cmd(void) { return 0; }
1529#endif /* CONFIG_TRACER_SNAPSHOT */
1530
1531#ifdef CONFIG_STACKTRACE
1532#ifdef CONFIG_UNWINDER_ORC
1533/* Skip 2:
1534 *   event_triggers_post_call()
1535 *   trace_event_raw_event_xxx()
1536 */
1537# define STACK_SKIP 2
1538#else
1539/*
1540 * Skip 4:
1541 *   stacktrace_trigger()
1542 *   event_triggers_post_call()
1543 *   trace_event_buffer_commit()
1544 *   trace_event_raw_event_xxx()
1545 */
1546#define STACK_SKIP 4
1547#endif
1548
1549static void
1550stacktrace_trigger(struct event_trigger_data *data,
1551		   struct trace_buffer *buffer,  void *rec,
1552		   struct ring_buffer_event *event)
1553{
1554	struct trace_event_file *file = data->private_data;
1555
1556	if (file)
1557		__trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
1558	else
1559		trace_dump_stack(STACK_SKIP);
1560}
1561
1562static void
1563stacktrace_count_trigger(struct event_trigger_data *data,
1564			 struct trace_buffer *buffer, void *rec,
1565			 struct ring_buffer_event *event)
1566{
1567	if (!data->count)
1568		return;
1569
1570	if (data->count != -1)
1571		(data->count)--;
1572
1573	stacktrace_trigger(data, buffer, rec, event);
1574}
1575
1576static int
1577stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1578{
1579	return event_trigger_print("stacktrace", m, (void *)data->count,
1580				   data->filter_str);
1581}
1582
1583static struct event_trigger_ops stacktrace_trigger_ops = {
1584	.trigger		= stacktrace_trigger,
1585	.print			= stacktrace_trigger_print,
1586	.init			= event_trigger_init,
1587	.free			= event_trigger_free,
1588};
1589
1590static struct event_trigger_ops stacktrace_count_trigger_ops = {
1591	.trigger		= stacktrace_count_trigger,
1592	.print			= stacktrace_trigger_print,
1593	.init			= event_trigger_init,
1594	.free			= event_trigger_free,
1595};
1596
1597static struct event_trigger_ops *
1598stacktrace_get_trigger_ops(char *cmd, char *param)
1599{
1600	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1601}
1602
1603static struct event_command trigger_stacktrace_cmd = {
1604	.name			= "stacktrace",
1605	.trigger_type		= ETT_STACKTRACE,
1606	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1607	.parse			= event_trigger_parse,
1608	.reg			= register_trigger,
1609	.unreg			= unregister_trigger,
1610	.get_trigger_ops	= stacktrace_get_trigger_ops,
1611	.set_filter		= set_trigger_filter,
1612};
1613
1614static __init int register_trigger_stacktrace_cmd(void)
1615{
1616	int ret;
1617
1618	ret = register_event_command(&trigger_stacktrace_cmd);
1619	WARN_ON(ret < 0);
1620
1621	return ret;
1622}
1623#else
1624static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1625#endif /* CONFIG_STACKTRACE */
1626
1627static __init void unregister_trigger_traceon_traceoff_cmds(void)
1628{
1629	unregister_event_command(&trigger_traceon_cmd);
1630	unregister_event_command(&trigger_traceoff_cmd);
1631}
1632
1633static void
1634event_enable_trigger(struct event_trigger_data *data,
1635		     struct trace_buffer *buffer,  void *rec,
1636		     struct ring_buffer_event *event)
1637{
1638	struct enable_trigger_data *enable_data = data->private_data;
1639
1640	if (enable_data->enable)
1641		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1642	else
1643		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1644}
1645
1646static void
1647event_enable_count_trigger(struct event_trigger_data *data,
1648			   struct trace_buffer *buffer,  void *rec,
1649			   struct ring_buffer_event *event)
1650{
1651	struct enable_trigger_data *enable_data = data->private_data;
1652
1653	if (!data->count)
1654		return;
1655
1656	/* Skip if the event is in a state we want to switch to */
1657	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1658		return;
1659
1660	if (data->count != -1)
1661		(data->count)--;
1662
1663	event_enable_trigger(data, buffer, rec, event);
1664}
1665
1666int event_enable_trigger_print(struct seq_file *m,
 
1667			       struct event_trigger_data *data)
1668{
1669	struct enable_trigger_data *enable_data = data->private_data;
1670
1671	seq_printf(m, "%s:%s:%s",
1672		   enable_data->hist ?
1673		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1674		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1675		   enable_data->file->event_call->class->system,
1676		   trace_event_name(enable_data->file->event_call));
1677
1678	if (data->count == -1)
1679		seq_puts(m, ":unlimited");
1680	else
1681		seq_printf(m, ":count=%ld", data->count);
1682
1683	if (data->filter_str)
1684		seq_printf(m, " if %s\n", data->filter_str);
1685	else
1686		seq_putc(m, '\n');
1687
1688	return 0;
1689}
1690
1691void event_enable_trigger_free(struct event_trigger_data *data)
 
1692{
1693	struct enable_trigger_data *enable_data = data->private_data;
1694
1695	if (WARN_ON_ONCE(data->ref <= 0))
1696		return;
1697
1698	data->ref--;
1699	if (!data->ref) {
1700		/* Remove the SOFT_MODE flag */
1701		trace_event_enable_disable(enable_data->file, 0, 1);
1702		trace_event_put_ref(enable_data->file->event_call);
1703		trigger_data_free(data);
1704		kfree(enable_data);
1705	}
1706}
1707
1708static struct event_trigger_ops event_enable_trigger_ops = {
1709	.trigger		= event_enable_trigger,
1710	.print			= event_enable_trigger_print,
1711	.init			= event_trigger_init,
1712	.free			= event_enable_trigger_free,
1713};
1714
1715static struct event_trigger_ops event_enable_count_trigger_ops = {
1716	.trigger		= event_enable_count_trigger,
1717	.print			= event_enable_trigger_print,
1718	.init			= event_trigger_init,
1719	.free			= event_enable_trigger_free,
1720};
1721
1722static struct event_trigger_ops event_disable_trigger_ops = {
1723	.trigger		= event_enable_trigger,
1724	.print			= event_enable_trigger_print,
1725	.init			= event_trigger_init,
1726	.free			= event_enable_trigger_free,
1727};
1728
1729static struct event_trigger_ops event_disable_count_trigger_ops = {
1730	.trigger		= event_enable_count_trigger,
1731	.print			= event_enable_trigger_print,
1732	.init			= event_trigger_init,
1733	.free			= event_enable_trigger_free,
1734};
1735
1736int event_enable_trigger_parse(struct event_command *cmd_ops,
1737			       struct trace_event_file *file,
1738			       char *glob, char *cmd, char *param_and_filter)
1739{
1740	struct trace_event_file *event_enable_file;
1741	struct enable_trigger_data *enable_data;
1742	struct event_trigger_data *trigger_data;
 
1743	struct trace_array *tr = file->tr;
1744	char *param, *filter;
1745	bool enable, remove;
1746	const char *system;
1747	const char *event;
1748	bool hist = false;
 
 
 
1749	int ret;
1750
1751	remove = event_trigger_check_remove(glob);
1752
1753	if (event_trigger_empty_param(param_and_filter))
1754		return -EINVAL;
1755
1756	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, true);
1757	if (ret)
1758		return ret;
 
 
 
 
 
 
1759
1760	system = strsep(&param, ":");
1761	if (!param)
1762		return -EINVAL;
1763
1764	event = strsep(&param, ":");
1765
1766	ret = -EINVAL;
1767	event_enable_file = find_event_file(tr, system, event);
1768	if (!event_enable_file)
1769		goto out;
1770
1771#ifdef CONFIG_HIST_TRIGGERS
1772	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1773		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1774
1775	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1776		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1777#else
1778	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1779#endif
 
 
1780	ret = -ENOMEM;
 
 
 
1781
1782	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1783	if (!enable_data)
 
1784		goto out;
 
 
 
 
 
 
 
1785
1786	enable_data->hist = hist;
1787	enable_data->enable = enable;
1788	enable_data->file = event_enable_file;
 
1789
1790	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
1791	if (!trigger_data) {
1792		kfree(enable_data);
1793		goto out;
1794	}
1795
1796	if (remove) {
1797		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1798		kfree(trigger_data);
1799		kfree(enable_data);
1800		ret = 0;
1801		goto out;
1802	}
1803
1804	/* Up the trigger_data count to make sure nothing frees it on failure */
1805	event_trigger_init(trigger_data);
1806
1807	ret = event_trigger_parse_num(param, trigger_data);
1808	if (ret)
1809		goto out_free;
1810
1811	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1812	if (ret < 0)
1813		goto out_free;
1814
 
1815	/* Don't let event modules unload while probe registered */
1816	ret = trace_event_try_get_ref(event_enable_file->event_call);
1817	if (!ret) {
1818		ret = -EBUSY;
1819		goto out_free;
1820	}
1821
1822	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1823	if (ret < 0)
1824		goto out_put;
1825
1826	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1827	if (ret)
 
 
 
 
 
1828		goto out_disable;
1829
1830	event_trigger_free(trigger_data);
 
 
 
1831 out:
1832	return ret;
 
1833 out_disable:
1834	trace_event_enable_disable(event_enable_file, 0, 1);
1835 out_put:
1836	trace_event_put_ref(event_enable_file->event_call);
1837 out_free:
1838	event_trigger_reset_filter(cmd_ops, trigger_data);
1839	event_trigger_free(trigger_data);
 
1840	kfree(enable_data);
1841
1842	goto out;
1843}
1844
1845int event_enable_register_trigger(char *glob,
 
1846				  struct event_trigger_data *data,
1847				  struct trace_event_file *file)
1848{
1849	struct enable_trigger_data *enable_data = data->private_data;
1850	struct enable_trigger_data *test_enable_data;
1851	struct event_trigger_data *test;
1852	int ret = 0;
1853
1854	lockdep_assert_held(&event_mutex);
1855
1856	list_for_each_entry(test, &file->triggers, list) {
1857		test_enable_data = test->private_data;
1858		if (test_enable_data &&
1859		    (test->cmd_ops->trigger_type ==
1860		     data->cmd_ops->trigger_type) &&
1861		    (test_enable_data->file == enable_data->file)) {
1862			ret = -EEXIST;
1863			goto out;
1864		}
1865	}
1866
1867	if (data->ops->init) {
1868		ret = data->ops->init(data);
1869		if (ret < 0)
1870			goto out;
1871	}
1872
1873	list_add_rcu(&data->list, &file->triggers);
 
1874
1875	update_cond_flag(file);
1876	ret = trace_event_trigger_enable_disable(file, 1);
1877	if (ret < 0) {
1878		list_del_rcu(&data->list);
1879		update_cond_flag(file);
 
1880	}
1881out:
1882	return ret;
1883}
1884
1885void event_enable_unregister_trigger(char *glob,
 
1886				     struct event_trigger_data *test,
1887				     struct trace_event_file *file)
1888{
1889	struct enable_trigger_data *test_enable_data = test->private_data;
1890	struct event_trigger_data *data = NULL, *iter;
1891	struct enable_trigger_data *enable_data;
 
 
1892
1893	lockdep_assert_held(&event_mutex);
1894
1895	list_for_each_entry(iter, &file->triggers, list) {
1896		enable_data = iter->private_data;
1897		if (enable_data &&
1898		    (iter->cmd_ops->trigger_type ==
1899		     test->cmd_ops->trigger_type) &&
1900		    (enable_data->file == test_enable_data->file)) {
1901			data = iter;
1902			list_del_rcu(&data->list);
1903			trace_event_trigger_enable_disable(file, 0);
1904			update_cond_flag(file);
1905			break;
1906		}
1907	}
1908
1909	if (data && data->ops->free)
1910		data->ops->free(data);
1911}
1912
1913static struct event_trigger_ops *
1914event_enable_get_trigger_ops(char *cmd, char *param)
1915{
1916	struct event_trigger_ops *ops;
1917	bool enable;
1918
1919#ifdef CONFIG_HIST_TRIGGERS
1920	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1921		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1922#else
1923	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1924#endif
1925	if (enable)
1926		ops = param ? &event_enable_count_trigger_ops :
1927			&event_enable_trigger_ops;
1928	else
1929		ops = param ? &event_disable_count_trigger_ops :
1930			&event_disable_trigger_ops;
1931
1932	return ops;
1933}
1934
1935static struct event_command trigger_enable_cmd = {
1936	.name			= ENABLE_EVENT_STR,
1937	.trigger_type		= ETT_EVENT_ENABLE,
1938	.parse			= event_enable_trigger_parse,
1939	.reg			= event_enable_register_trigger,
1940	.unreg			= event_enable_unregister_trigger,
1941	.get_trigger_ops	= event_enable_get_trigger_ops,
1942	.set_filter		= set_trigger_filter,
1943};
1944
1945static struct event_command trigger_disable_cmd = {
1946	.name			= DISABLE_EVENT_STR,
1947	.trigger_type		= ETT_EVENT_ENABLE,
1948	.parse			= event_enable_trigger_parse,
1949	.reg			= event_enable_register_trigger,
1950	.unreg			= event_enable_unregister_trigger,
1951	.get_trigger_ops	= event_enable_get_trigger_ops,
1952	.set_filter		= set_trigger_filter,
1953};
1954
1955static __init void unregister_trigger_enable_disable_cmds(void)
1956{
1957	unregister_event_command(&trigger_enable_cmd);
1958	unregister_event_command(&trigger_disable_cmd);
1959}
1960
1961static __init int register_trigger_enable_disable_cmds(void)
1962{
1963	int ret;
1964
1965	ret = register_event_command(&trigger_enable_cmd);
1966	if (WARN_ON(ret < 0))
1967		return ret;
1968	ret = register_event_command(&trigger_disable_cmd);
1969	if (WARN_ON(ret < 0))
1970		unregister_trigger_enable_disable_cmds();
1971
1972	return ret;
1973}
1974
1975static __init int register_trigger_traceon_traceoff_cmds(void)
1976{
1977	int ret;
1978
1979	ret = register_event_command(&trigger_traceon_cmd);
1980	if (WARN_ON(ret < 0))
1981		return ret;
1982	ret = register_event_command(&trigger_traceoff_cmd);
1983	if (WARN_ON(ret < 0))
1984		unregister_trigger_traceon_traceoff_cmds();
1985
1986	return ret;
1987}
1988
1989__init int register_trigger_cmds(void)
1990{
1991	register_trigger_traceon_traceoff_cmds();
1992	register_trigger_snapshot_cmd();
1993	register_trigger_stacktrace_cmd();
1994	register_trigger_enable_disable_cmds();
1995	register_trigger_hist_enable_disable_cmds();
1996	register_trigger_hist_cmd();
1997
1998	return 0;
1999}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
 
  34 * @rec: The trace entry for the event, NULL for unconditional invocation
 
  35 *
  36 * For each trigger associated with an event, invoke the trigger
  37 * function registered with the associated trigger command.  If rec is
  38 * non-NULL, it means that the trigger requires further processing and
  39 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  40 * trigger has a filter associated with it, rec will checked against
  41 * the filter and if the record matches the trigger will be invoked.
  42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  43 * in any case until the current event is written, the trigger
  44 * function isn't invoked but the bit associated with the deferred
  45 * trigger is set in the return value.
  46 *
  47 * Returns an enum event_trigger_type value containing a set bit for
  48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  49 *
  50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  51 *
  52 * Return: an enum event_trigger_type value containing a set bit for
  53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  54 */
  55enum event_trigger_type
  56event_triggers_call(struct trace_event_file *file,
  57		    struct trace_buffer *buffer, void *rec,
  58		    struct ring_buffer_event *event)
  59{
  60	struct event_trigger_data *data;
  61	enum event_trigger_type tt = ETT_NONE;
  62	struct event_filter *filter;
  63
  64	if (list_empty(&file->triggers))
  65		return tt;
  66
  67	list_for_each_entry_rcu(data, &file->triggers, list) {
  68		if (data->paused)
  69			continue;
  70		if (!rec) {
  71			data->ops->func(data, buffer, rec, event);
  72			continue;
  73		}
  74		filter = rcu_dereference_sched(data->filter);
  75		if (filter && !filter_match_preds(filter, rec))
  76			continue;
  77		if (event_command_post_trigger(data->cmd_ops)) {
  78			tt |= data->cmd_ops->trigger_type;
  79			continue;
  80		}
  81		data->ops->func(data, buffer, rec, event);
  82	}
  83	return tt;
  84}
  85EXPORT_SYMBOL_GPL(event_triggers_call);
  86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87/**
  88 * event_triggers_post_call - Call 'post_triggers' for a trace event
  89 * @file: The trace_event_file associated with the event
  90 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  91 *
  92 * For each trigger associated with an event, invoke the trigger
  93 * function registered with the associated trigger command, if the
  94 * corresponding bit is set in the tt enum passed into this function.
  95 * See @event_triggers_call for details on how those bits are set.
  96 *
  97 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  98 */
  99void
 100event_triggers_post_call(struct trace_event_file *file,
 101			 enum event_trigger_type tt)
 102{
 103	struct event_trigger_data *data;
 104
 105	list_for_each_entry_rcu(data, &file->triggers, list) {
 106		if (data->paused)
 107			continue;
 108		if (data->cmd_ops->trigger_type & tt)
 109			data->ops->func(data, NULL, NULL, NULL);
 110	}
 111}
 112EXPORT_SYMBOL_GPL(event_triggers_post_call);
 113
 114#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 115
 116static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 117{
 118	struct trace_event_file *event_file = event_file_data(m->private);
 119
 120	if (t == SHOW_AVAILABLE_TRIGGERS) {
 121		(*pos)++;
 122		return NULL;
 123	}
 124	return seq_list_next(t, &event_file->triggers, pos);
 125}
 126
 
 
 
 
 
 
 
 
 
 
 
 
 
 127static void *trigger_start(struct seq_file *m, loff_t *pos)
 128{
 129	struct trace_event_file *event_file;
 130
 131	/* ->stop() is called even if ->start() fails */
 132	mutex_lock(&event_mutex);
 133	event_file = event_file_data(m->private);
 134	if (unlikely(!event_file))
 135		return ERR_PTR(-ENODEV);
 136
 137	if (list_empty(&event_file->triggers))
 138		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 139
 140	return seq_list_start(&event_file->triggers, *pos);
 141}
 142
 143static void trigger_stop(struct seq_file *m, void *t)
 144{
 145	mutex_unlock(&event_mutex);
 146}
 147
 148static int trigger_show(struct seq_file *m, void *v)
 149{
 150	struct event_trigger_data *data;
 151	struct event_command *p;
 152
 153	if (v == SHOW_AVAILABLE_TRIGGERS) {
 154		seq_puts(m, "# Available triggers:\n");
 155		seq_putc(m, '#');
 156		mutex_lock(&trigger_cmd_mutex);
 157		list_for_each_entry_reverse(p, &trigger_commands, list)
 158			seq_printf(m, " %s", p->name);
 159		seq_putc(m, '\n');
 160		mutex_unlock(&trigger_cmd_mutex);
 161		return 0;
 162	}
 163
 164	data = list_entry(v, struct event_trigger_data, list);
 165	data->ops->print(m, data->ops, data);
 166
 167	return 0;
 168}
 169
 170static const struct seq_operations event_triggers_seq_ops = {
 171	.start = trigger_start,
 172	.next = trigger_next,
 173	.stop = trigger_stop,
 174	.show = trigger_show,
 175};
 176
 177static int event_trigger_regex_open(struct inode *inode, struct file *file)
 178{
 179	int ret;
 180
 181	ret = security_locked_down(LOCKDOWN_TRACEFS);
 182	if (ret)
 183		return ret;
 184
 185	mutex_lock(&event_mutex);
 186
 187	if (unlikely(!event_file_data(file))) {
 188		mutex_unlock(&event_mutex);
 189		return -ENODEV;
 190	}
 191
 192	if ((file->f_mode & FMODE_WRITE) &&
 193	    (file->f_flags & O_TRUNC)) {
 194		struct trace_event_file *event_file;
 195		struct event_command *p;
 196
 197		event_file = event_file_data(file);
 198
 199		list_for_each_entry(p, &trigger_commands, list) {
 200			if (p->unreg_all)
 201				p->unreg_all(event_file);
 202		}
 203	}
 204
 205	if (file->f_mode & FMODE_READ) {
 206		ret = seq_open(file, &event_triggers_seq_ops);
 207		if (!ret) {
 208			struct seq_file *m = file->private_data;
 209			m->private = file;
 210		}
 211	}
 212
 213	mutex_unlock(&event_mutex);
 214
 215	return ret;
 216}
 217
 218int trigger_process_regex(struct trace_event_file *file, char *buff)
 219{
 220	char *command, *next;
 221	struct event_command *p;
 222	int ret = -EINVAL;
 223
 224	next = buff = skip_spaces(buff);
 225	command = strsep(&next, ": \t");
 226	if (next) {
 227		next = skip_spaces(next);
 228		if (!*next)
 229			next = NULL;
 230	}
 231	command = (command[0] != '!') ? command : command + 1;
 232
 233	mutex_lock(&trigger_cmd_mutex);
 234	list_for_each_entry(p, &trigger_commands, list) {
 235		if (strcmp(p->name, command) == 0) {
 236			ret = p->func(p, file, buff, command, next);
 237			goto out_unlock;
 238		}
 239	}
 240 out_unlock:
 241	mutex_unlock(&trigger_cmd_mutex);
 242
 243	return ret;
 244}
 245
 246static ssize_t event_trigger_regex_write(struct file *file,
 247					 const char __user *ubuf,
 248					 size_t cnt, loff_t *ppos)
 249{
 250	struct trace_event_file *event_file;
 251	ssize_t ret;
 252	char *buf;
 253
 254	if (!cnt)
 255		return 0;
 256
 257	if (cnt >= PAGE_SIZE)
 258		return -EINVAL;
 259
 260	buf = memdup_user_nul(ubuf, cnt);
 261	if (IS_ERR(buf))
 262		return PTR_ERR(buf);
 263
 264	strim(buf);
 265
 266	mutex_lock(&event_mutex);
 267	event_file = event_file_data(file);
 268	if (unlikely(!event_file)) {
 269		mutex_unlock(&event_mutex);
 270		kfree(buf);
 271		return -ENODEV;
 272	}
 273	ret = trigger_process_regex(event_file, buf);
 274	mutex_unlock(&event_mutex);
 275
 276	kfree(buf);
 277	if (ret < 0)
 278		goto out;
 279
 280	*ppos += cnt;
 281	ret = cnt;
 282 out:
 283	return ret;
 284}
 285
 286static int event_trigger_regex_release(struct inode *inode, struct file *file)
 287{
 288	mutex_lock(&event_mutex);
 289
 290	if (file->f_mode & FMODE_READ)
 291		seq_release(inode, file);
 292
 293	mutex_unlock(&event_mutex);
 294
 295	return 0;
 296}
 297
 298static ssize_t
 299event_trigger_write(struct file *filp, const char __user *ubuf,
 300		    size_t cnt, loff_t *ppos)
 301{
 302	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 303}
 304
 305static int
 306event_trigger_open(struct inode *inode, struct file *filp)
 307{
 308	/* Checks for tracefs lockdown */
 309	return event_trigger_regex_open(inode, filp);
 310}
 311
 312static int
 313event_trigger_release(struct inode *inode, struct file *file)
 314{
 315	return event_trigger_regex_release(inode, file);
 316}
 317
 318const struct file_operations event_trigger_fops = {
 319	.open = event_trigger_open,
 320	.read = seq_read,
 321	.write = event_trigger_write,
 322	.llseek = tracing_lseek,
 323	.release = event_trigger_release,
 324};
 325
 326/*
 327 * Currently we only register event commands from __init, so mark this
 328 * __init too.
 329 */
 330__init int register_event_command(struct event_command *cmd)
 331{
 332	struct event_command *p;
 333	int ret = 0;
 334
 335	mutex_lock(&trigger_cmd_mutex);
 336	list_for_each_entry(p, &trigger_commands, list) {
 337		if (strcmp(cmd->name, p->name) == 0) {
 338			ret = -EBUSY;
 339			goto out_unlock;
 340		}
 341	}
 342	list_add(&cmd->list, &trigger_commands);
 343 out_unlock:
 344	mutex_unlock(&trigger_cmd_mutex);
 345
 346	return ret;
 347}
 348
 349/*
 350 * Currently we only unregister event commands from __init, so mark
 351 * this __init too.
 352 */
 353__init int unregister_event_command(struct event_command *cmd)
 354{
 355	struct event_command *p, *n;
 356	int ret = -ENODEV;
 357
 358	mutex_lock(&trigger_cmd_mutex);
 359	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 360		if (strcmp(cmd->name, p->name) == 0) {
 361			ret = 0;
 362			list_del_init(&p->list);
 363			goto out_unlock;
 364		}
 365	}
 366 out_unlock:
 367	mutex_unlock(&trigger_cmd_mutex);
 368
 369	return ret;
 370}
 371
 372/**
 373 * event_trigger_print - Generic event_trigger_ops @print implementation
 374 * @name: The name of the event trigger
 375 * @m: The seq_file being printed to
 376 * @data: Trigger-specific data
 377 * @filter_str: filter_str to print, if present
 378 *
 379 * Common implementation for event triggers to print themselves.
 380 *
 381 * Usually wrapped by a function that simply sets the @name of the
 382 * trigger command and then invokes this.
 383 *
 384 * Return: 0 on success, errno otherwise
 385 */
 386static int
 387event_trigger_print(const char *name, struct seq_file *m,
 388		    void *data, char *filter_str)
 389{
 390	long count = (long)data;
 391
 392	seq_puts(m, name);
 393
 394	if (count == -1)
 395		seq_puts(m, ":unlimited");
 396	else
 397		seq_printf(m, ":count=%ld", count);
 398
 399	if (filter_str)
 400		seq_printf(m, " if %s\n", filter_str);
 401	else
 402		seq_putc(m, '\n');
 403
 404	return 0;
 405}
 406
 407/**
 408 * event_trigger_init - Generic event_trigger_ops @init implementation
 409 * @ops: The trigger ops associated with the trigger
 410 * @data: Trigger-specific data
 411 *
 412 * Common implementation of event trigger initialization.
 413 *
 414 * Usually used directly as the @init method in event trigger
 415 * implementations.
 416 *
 417 * Return: 0 on success, errno otherwise
 418 */
 419int event_trigger_init(struct event_trigger_ops *ops,
 420		       struct event_trigger_data *data)
 421{
 422	data->ref++;
 423	return 0;
 424}
 425
 426/**
 427 * event_trigger_free - Generic event_trigger_ops @free implementation
 428 * @ops: The trigger ops associated with the trigger
 429 * @data: Trigger-specific data
 430 *
 431 * Common implementation of event trigger de-initialization.
 432 *
 433 * Usually used directly as the @free method in event trigger
 434 * implementations.
 435 */
 436static void
 437event_trigger_free(struct event_trigger_ops *ops,
 438		   struct event_trigger_data *data)
 439{
 440	if (WARN_ON_ONCE(data->ref <= 0))
 441		return;
 442
 443	data->ref--;
 444	if (!data->ref)
 445		trigger_data_free(data);
 446}
 447
 448int trace_event_trigger_enable_disable(struct trace_event_file *file,
 449				       int trigger_enable)
 450{
 451	int ret = 0;
 452
 453	if (trigger_enable) {
 454		if (atomic_inc_return(&file->tm_ref) > 1)
 455			return ret;
 456		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 457		ret = trace_event_enable_disable(file, 1, 1);
 458	} else {
 459		if (atomic_dec_return(&file->tm_ref) > 0)
 460			return ret;
 461		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 462		ret = trace_event_enable_disable(file, 0, 1);
 463	}
 464
 465	return ret;
 466}
 467
 468/**
 469 * clear_event_triggers - Clear all triggers associated with a trace array
 470 * @tr: The trace array to clear
 471 *
 472 * For each trigger, the triggering event has its tm_ref decremented
 473 * via trace_event_trigger_enable_disable(), and any associated event
 474 * (in the case of enable/disable_event triggers) will have its sm_ref
 475 * decremented via free()->trace_event_enable_disable().  That
 476 * combination effectively reverses the soft-mode/trigger state added
 477 * by trigger registration.
 478 *
 479 * Must be called with event_mutex held.
 480 */
 481void
 482clear_event_triggers(struct trace_array *tr)
 483{
 484	struct trace_event_file *file;
 485
 486	list_for_each_entry(file, &tr->events, list) {
 487		struct event_trigger_data *data, *n;
 488		list_for_each_entry_safe(data, n, &file->triggers, list) {
 489			trace_event_trigger_enable_disable(file, 0);
 490			list_del_rcu(&data->list);
 491			if (data->ops->free)
 492				data->ops->free(data->ops, data);
 493		}
 494	}
 495}
 496
 497/**
 498 * update_cond_flag - Set or reset the TRIGGER_COND bit
 499 * @file: The trace_event_file associated with the event
 500 *
 501 * If an event has triggers and any of those triggers has a filter or
 502 * a post_trigger, trigger invocation needs to be deferred until after
 503 * the current event has logged its data, and the event should have
 504 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 505 * cleared.
 506 */
 507void update_cond_flag(struct trace_event_file *file)
 508{
 509	struct event_trigger_data *data;
 510	bool set_cond = false;
 511
 512	lockdep_assert_held(&event_mutex);
 513
 514	list_for_each_entry(data, &file->triggers, list) {
 515		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 516		    event_command_needs_rec(data->cmd_ops)) {
 517			set_cond = true;
 518			break;
 519		}
 520	}
 521
 522	if (set_cond)
 523		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 524	else
 525		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 526}
 527
 528/**
 529 * register_trigger - Generic event_command @reg implementation
 530 * @glob: The raw string used to register the trigger
 531 * @ops: The trigger ops associated with the trigger
 532 * @data: Trigger-specific data to associate with the trigger
 533 * @file: The trace_event_file associated with the event
 534 *
 535 * Common implementation for event trigger registration.
 536 *
 537 * Usually used directly as the @reg method in event command
 538 * implementations.
 539 *
 540 * Return: 0 on success, errno otherwise
 541 */
 542static int register_trigger(char *glob, struct event_trigger_ops *ops,
 543			    struct event_trigger_data *data,
 544			    struct trace_event_file *file)
 545{
 546	struct event_trigger_data *test;
 547	int ret = 0;
 548
 549	lockdep_assert_held(&event_mutex);
 550
 551	list_for_each_entry(test, &file->triggers, list) {
 552		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 553			ret = -EEXIST;
 554			goto out;
 555		}
 556	}
 557
 558	if (data->ops->init) {
 559		ret = data->ops->init(data->ops, data);
 560		if (ret < 0)
 561			goto out;
 562	}
 563
 564	list_add_rcu(&data->list, &file->triggers);
 565	ret++;
 566
 567	update_cond_flag(file);
 568	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 
 569		list_del_rcu(&data->list);
 570		update_cond_flag(file);
 571		ret--;
 572	}
 573out:
 574	return ret;
 575}
 576
 577/**
 578 * unregister_trigger - Generic event_command @unreg implementation
 579 * @glob: The raw string used to register the trigger
 580 * @ops: The trigger ops associated with the trigger
 581 * @test: Trigger-specific data used to find the trigger to remove
 582 * @file: The trace_event_file associated with the event
 583 *
 584 * Common implementation for event trigger unregistration.
 585 *
 586 * Usually used directly as the @unreg method in event command
 587 * implementations.
 588 */
 589static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 590			       struct event_trigger_data *test,
 591			       struct trace_event_file *file)
 592{
 593	struct event_trigger_data *data;
 594	bool unregistered = false;
 595
 596	lockdep_assert_held(&event_mutex);
 597
 598	list_for_each_entry(data, &file->triggers, list) {
 599		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 600			unregistered = true;
 601			list_del_rcu(&data->list);
 602			trace_event_trigger_enable_disable(file, 0);
 603			update_cond_flag(file);
 604			break;
 605		}
 606	}
 607
 608	if (unregistered && data->ops->free)
 609		data->ops->free(data->ops, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610}
 611
 612/**
 613 * event_trigger_callback - Generic event_command @func implementation
 614 * @cmd_ops: The command ops, used for trigger registration
 615 * @file: The trace_event_file associated with the event
 616 * @glob: The raw string used to register the trigger
 617 * @cmd: The cmd portion of the string used to register the trigger
 618 * @param: The params portion of the string used to register the trigger
 619 *
 620 * Common implementation for event command parsing and trigger
 621 * instantiation.
 
 
 
 622 *
 623 * Usually used directly as the @func method in event command
 624 * implementations.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625 *
 626 * Return: 0 on success, errno otherwise
 627 */
 628static int
 629event_trigger_callback(struct event_command *cmd_ops,
 630		       struct trace_event_file *file,
 631		       char *glob, char *cmd, char *param)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 632{
 633	struct event_trigger_data *trigger_data;
 634	struct event_trigger_ops *trigger_ops;
 635	char *trigger = NULL;
 636	char *number;
 637	int ret;
 638
 639	/* separate the trigger from the filter (t:n [if filter]) */
 640	if (param && isdigit(param[0])) {
 641		trigger = strsep(&param, " \t");
 642		if (param) {
 643			param = skip_spaces(param);
 644			if (!*param)
 645				param = NULL;
 646		}
 647	}
 648
 649	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 650
 651	ret = -ENOMEM;
 652	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 653	if (!trigger_data)
 654		goto out;
 655
 656	trigger_data->count = -1;
 657	trigger_data->ops = trigger_ops;
 658	trigger_data->cmd_ops = cmd_ops;
 659	trigger_data->private_data = file;
 
 660	INIT_LIST_HEAD(&trigger_data->list);
 661	INIT_LIST_HEAD(&trigger_data->named_list);
 
 662
 663	if (glob[0] == '!') {
 664		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 665		kfree(trigger_data);
 666		ret = 0;
 667		goto out;
 668	}
 
 
 
 
 
 
 
 
 
 
 
 
 669
 670	if (trigger) {
 671		number = strsep(&trigger, ":");
 672
 673		ret = -EINVAL;
 674		if (!strlen(number))
 675			goto out_free;
 676
 677		/*
 678		 * We use the callback data field (which is a pointer)
 679		 * as our counter.
 680		 */
 681		ret = kstrtoul(number, 0, &trigger_data->count);
 682		if (ret)
 683			goto out_free;
 684	}
 685
 686	if (!param) /* if param is non-empty, it's supposed to be a filter */
 687		goto out_reg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 688
 689	if (!cmd_ops->set_filter)
 690		goto out_reg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 691
 692	ret = cmd_ops->set_filter(param, trigger_data, file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 693	if (ret < 0)
 694		goto out_free;
 695
 696 out_reg:
 697	/* Up the trigger_data count to make sure reg doesn't free it on failure */
 698	event_trigger_init(trigger_ops, trigger_data);
 699	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 700	/*
 701	 * The above returns on success the # of functions enabled,
 702	 * but if it didn't find any functions it returns zero.
 703	 * Consider no functions a failure too.
 704	 */
 705	if (!ret) {
 706		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
 707		ret = -ENOENT;
 708	} else if (ret > 0)
 709		ret = 0;
 710
 711	/* Down the counter of trigger_data or free it if not used anymore */
 712	event_trigger_free(trigger_ops, trigger_data);
 713 out:
 714	return ret;
 715
 716 out_free:
 717	if (cmd_ops->set_filter)
 718		cmd_ops->set_filter(NULL, trigger_data, NULL);
 719	kfree(trigger_data);
 720	goto out;
 721}
 722
 723/**
 724 * set_trigger_filter - Generic event_command @set_filter implementation
 725 * @filter_str: The filter string for the trigger, NULL to remove filter
 726 * @trigger_data: Trigger-specific data
 727 * @file: The trace_event_file associated with the event
 728 *
 729 * Common implementation for event command filter parsing and filter
 730 * instantiation.
 731 *
 732 * Usually used directly as the @set_filter method in event command
 733 * implementations.
 734 *
 735 * Also used to remove a filter (if filter_str = NULL).
 736 *
 737 * Return: 0 on success, errno otherwise
 738 */
 739int set_trigger_filter(char *filter_str,
 740		       struct event_trigger_data *trigger_data,
 741		       struct trace_event_file *file)
 742{
 743	struct event_trigger_data *data = trigger_data;
 744	struct event_filter *filter = NULL, *tmp;
 745	int ret = -EINVAL;
 746	char *s;
 747
 748	if (!filter_str) /* clear the current filter */
 749		goto assign;
 750
 751	s = strsep(&filter_str, " \t");
 752
 753	if (!strlen(s) || strcmp(s, "if") != 0)
 754		goto out;
 755
 756	if (!filter_str)
 757		goto out;
 758
 759	/* The filter is for the 'trigger' event, not the triggered event */
 760	ret = create_event_filter(file->tr, file->event_call,
 761				  filter_str, false, &filter);
 
 
 
 
 
 
 
 762	/*
 763	 * If create_event_filter() fails, filter still needs to be freed.
 764	 * Which the calling code will do with data->filter.
 765	 */
 766 assign:
 767	tmp = rcu_access_pointer(data->filter);
 768
 769	rcu_assign_pointer(data->filter, filter);
 770
 771	if (tmp) {
 772		/* Make sure the call is done with the filter */
 773		tracepoint_synchronize_unregister();
 
 
 
 
 
 
 774		free_event_filter(tmp);
 775	}
 776
 777	kfree(data->filter_str);
 778	data->filter_str = NULL;
 779
 780	if (filter_str) {
 781		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 782		if (!data->filter_str) {
 783			free_event_filter(rcu_access_pointer(data->filter));
 784			data->filter = NULL;
 785			ret = -ENOMEM;
 786		}
 787	}
 788 out:
 789	return ret;
 790}
 791
 792static LIST_HEAD(named_triggers);
 793
 794/**
 795 * find_named_trigger - Find the common named trigger associated with @name
 796 * @name: The name of the set of named triggers to find the common data for
 797 *
 798 * Named triggers are sets of triggers that share a common set of
 799 * trigger data.  The first named trigger registered with a given name
 800 * owns the common trigger data that the others subsequently
 801 * registered with the same name will reference.  This function
 802 * returns the common trigger data associated with that first
 803 * registered instance.
 804 *
 805 * Return: the common trigger data for the given named trigger on
 806 * success, NULL otherwise.
 807 */
 808struct event_trigger_data *find_named_trigger(const char *name)
 809{
 810	struct event_trigger_data *data;
 811
 812	if (!name)
 813		return NULL;
 814
 815	list_for_each_entry(data, &named_triggers, named_list) {
 816		if (data->named_data)
 817			continue;
 818		if (strcmp(data->name, name) == 0)
 819			return data;
 820	}
 821
 822	return NULL;
 823}
 824
 825/**
 826 * is_named_trigger - determine if a given trigger is a named trigger
 827 * @test: The trigger data to test
 828 *
 829 * Return: true if 'test' is a named trigger, false otherwise.
 830 */
 831bool is_named_trigger(struct event_trigger_data *test)
 832{
 833	struct event_trigger_data *data;
 834
 835	list_for_each_entry(data, &named_triggers, named_list) {
 836		if (test == data)
 837			return true;
 838	}
 839
 840	return false;
 841}
 842
 843/**
 844 * save_named_trigger - save the trigger in the named trigger list
 845 * @name: The name of the named trigger set
 846 * @data: The trigger data to save
 847 *
 848 * Return: 0 if successful, negative error otherwise.
 849 */
 850int save_named_trigger(const char *name, struct event_trigger_data *data)
 851{
 852	data->name = kstrdup(name, GFP_KERNEL);
 853	if (!data->name)
 854		return -ENOMEM;
 855
 856	list_add(&data->named_list, &named_triggers);
 857
 858	return 0;
 859}
 860
 861/**
 862 * del_named_trigger - delete a trigger from the named trigger list
 863 * @data: The trigger data to delete
 864 */
 865void del_named_trigger(struct event_trigger_data *data)
 866{
 867	kfree(data->name);
 868	data->name = NULL;
 869
 870	list_del(&data->named_list);
 871}
 872
 873static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
 874{
 875	struct event_trigger_data *test;
 876
 877	list_for_each_entry(test, &named_triggers, named_list) {
 878		if (strcmp(test->name, data->name) == 0) {
 879			if (pause) {
 880				test->paused_tmp = test->paused;
 881				test->paused = true;
 882			} else {
 883				test->paused = test->paused_tmp;
 884			}
 885		}
 886	}
 887}
 888
 889/**
 890 * pause_named_trigger - Pause all named triggers with the same name
 891 * @data: The trigger data of a named trigger to pause
 892 *
 893 * Pauses a named trigger along with all other triggers having the
 894 * same name.  Because named triggers share a common set of data,
 895 * pausing only one is meaningless, so pausing one named trigger needs
 896 * to pause all triggers with the same name.
 897 */
 898void pause_named_trigger(struct event_trigger_data *data)
 899{
 900	__pause_named_trigger(data, true);
 901}
 902
 903/**
 904 * unpause_named_trigger - Un-pause all named triggers with the same name
 905 * @data: The trigger data of a named trigger to unpause
 906 *
 907 * Un-pauses a named trigger along with all other triggers having the
 908 * same name.  Because named triggers share a common set of data,
 909 * unpausing only one is meaningless, so unpausing one named trigger
 910 * needs to unpause all triggers with the same name.
 911 */
 912void unpause_named_trigger(struct event_trigger_data *data)
 913{
 914	__pause_named_trigger(data, false);
 915}
 916
 917/**
 918 * set_named_trigger_data - Associate common named trigger data
 919 * @data: The trigger data to associate
 920 * @named_data: The common named trigger to be associated
 921 *
 922 * Named triggers are sets of triggers that share a common set of
 923 * trigger data.  The first named trigger registered with a given name
 924 * owns the common trigger data that the others subsequently
 925 * registered with the same name will reference.  This function
 926 * associates the common trigger data from the first trigger with the
 927 * given trigger.
 928 */
 929void set_named_trigger_data(struct event_trigger_data *data,
 930			    struct event_trigger_data *named_data)
 931{
 932	data->named_data = named_data;
 933}
 934
 935struct event_trigger_data *
 936get_named_trigger_data(struct event_trigger_data *data)
 937{
 938	return data->named_data;
 939}
 940
 941static void
 942traceon_trigger(struct event_trigger_data *data,
 943		struct trace_buffer *buffer, void *rec,
 944		struct ring_buffer_event *event)
 945{
 
 
 
 
 
 
 
 
 
 
 946	if (tracing_is_on())
 947		return;
 948
 949	tracing_on();
 950}
 951
 952static void
 953traceon_count_trigger(struct event_trigger_data *data,
 954		      struct trace_buffer *buffer, void *rec,
 955		      struct ring_buffer_event *event)
 956{
 957	if (tracing_is_on())
 958		return;
 
 
 
 
 
 
 
 959
 960	if (!data->count)
 961		return;
 962
 963	if (data->count != -1)
 964		(data->count)--;
 965
 966	tracing_on();
 
 
 
 967}
 968
 969static void
 970traceoff_trigger(struct event_trigger_data *data,
 971		 struct trace_buffer *buffer, void *rec,
 972		 struct ring_buffer_event *event)
 973{
 
 
 
 
 
 
 
 
 
 
 974	if (!tracing_is_on())
 975		return;
 976
 977	tracing_off();
 978}
 979
 980static void
 981traceoff_count_trigger(struct event_trigger_data *data,
 982		       struct trace_buffer *buffer, void *rec,
 983		       struct ring_buffer_event *event)
 984{
 985	if (!tracing_is_on())
 986		return;
 
 
 
 
 
 
 
 987
 988	if (!data->count)
 989		return;
 990
 991	if (data->count != -1)
 992		(data->count)--;
 993
 994	tracing_off();
 
 
 
 995}
 996
 997static int
 998traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 999		      struct event_trigger_data *data)
1000{
1001	return event_trigger_print("traceon", m, (void *)data->count,
1002				   data->filter_str);
1003}
1004
1005static int
1006traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1007		       struct event_trigger_data *data)
1008{
1009	return event_trigger_print("traceoff", m, (void *)data->count,
1010				   data->filter_str);
1011}
1012
1013static struct event_trigger_ops traceon_trigger_ops = {
1014	.func			= traceon_trigger,
1015	.print			= traceon_trigger_print,
1016	.init			= event_trigger_init,
1017	.free			= event_trigger_free,
1018};
1019
1020static struct event_trigger_ops traceon_count_trigger_ops = {
1021	.func			= traceon_count_trigger,
1022	.print			= traceon_trigger_print,
1023	.init			= event_trigger_init,
1024	.free			= event_trigger_free,
1025};
1026
1027static struct event_trigger_ops traceoff_trigger_ops = {
1028	.func			= traceoff_trigger,
1029	.print			= traceoff_trigger_print,
1030	.init			= event_trigger_init,
1031	.free			= event_trigger_free,
1032};
1033
1034static struct event_trigger_ops traceoff_count_trigger_ops = {
1035	.func			= traceoff_count_trigger,
1036	.print			= traceoff_trigger_print,
1037	.init			= event_trigger_init,
1038	.free			= event_trigger_free,
1039};
1040
1041static struct event_trigger_ops *
1042onoff_get_trigger_ops(char *cmd, char *param)
1043{
1044	struct event_trigger_ops *ops;
1045
1046	/* we register both traceon and traceoff to this callback */
1047	if (strcmp(cmd, "traceon") == 0)
1048		ops = param ? &traceon_count_trigger_ops :
1049			&traceon_trigger_ops;
1050	else
1051		ops = param ? &traceoff_count_trigger_ops :
1052			&traceoff_trigger_ops;
1053
1054	return ops;
1055}
1056
1057static struct event_command trigger_traceon_cmd = {
1058	.name			= "traceon",
1059	.trigger_type		= ETT_TRACE_ONOFF,
1060	.func			= event_trigger_callback,
1061	.reg			= register_trigger,
1062	.unreg			= unregister_trigger,
1063	.get_trigger_ops	= onoff_get_trigger_ops,
1064	.set_filter		= set_trigger_filter,
1065};
1066
1067static struct event_command trigger_traceoff_cmd = {
1068	.name			= "traceoff",
1069	.trigger_type		= ETT_TRACE_ONOFF,
1070	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1071	.func			= event_trigger_callback,
1072	.reg			= register_trigger,
1073	.unreg			= unregister_trigger,
1074	.get_trigger_ops	= onoff_get_trigger_ops,
1075	.set_filter		= set_trigger_filter,
1076};
1077
1078#ifdef CONFIG_TRACER_SNAPSHOT
1079static void
1080snapshot_trigger(struct event_trigger_data *data,
1081		 struct trace_buffer *buffer, void *rec,
1082		 struct ring_buffer_event *event)
1083{
1084	struct trace_event_file *file = data->private_data;
1085
1086	if (file)
1087		tracing_snapshot_instance(file->tr);
1088	else
1089		tracing_snapshot();
1090}
1091
1092static void
1093snapshot_count_trigger(struct event_trigger_data *data,
1094		       struct trace_buffer *buffer, void *rec,
1095		       struct ring_buffer_event *event)
1096{
1097	if (!data->count)
1098		return;
1099
1100	if (data->count != -1)
1101		(data->count)--;
1102
1103	snapshot_trigger(data, buffer, rec, event);
1104}
1105
1106static int
1107register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1108			  struct event_trigger_data *data,
1109			  struct trace_event_file *file)
1110{
1111	if (tracing_alloc_snapshot_instance(file->tr) != 0)
1112		return 0;
 
 
1113
1114	return register_trigger(glob, ops, data, file);
1115}
1116
1117static int
1118snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1119		       struct event_trigger_data *data)
1120{
1121	return event_trigger_print("snapshot", m, (void *)data->count,
1122				   data->filter_str);
1123}
1124
1125static struct event_trigger_ops snapshot_trigger_ops = {
1126	.func			= snapshot_trigger,
1127	.print			= snapshot_trigger_print,
1128	.init			= event_trigger_init,
1129	.free			= event_trigger_free,
1130};
1131
1132static struct event_trigger_ops snapshot_count_trigger_ops = {
1133	.func			= snapshot_count_trigger,
1134	.print			= snapshot_trigger_print,
1135	.init			= event_trigger_init,
1136	.free			= event_trigger_free,
1137};
1138
1139static struct event_trigger_ops *
1140snapshot_get_trigger_ops(char *cmd, char *param)
1141{
1142	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1143}
1144
1145static struct event_command trigger_snapshot_cmd = {
1146	.name			= "snapshot",
1147	.trigger_type		= ETT_SNAPSHOT,
1148	.func			= event_trigger_callback,
1149	.reg			= register_snapshot_trigger,
1150	.unreg			= unregister_trigger,
1151	.get_trigger_ops	= snapshot_get_trigger_ops,
1152	.set_filter		= set_trigger_filter,
1153};
1154
1155static __init int register_trigger_snapshot_cmd(void)
1156{
1157	int ret;
1158
1159	ret = register_event_command(&trigger_snapshot_cmd);
1160	WARN_ON(ret < 0);
1161
1162	return ret;
1163}
1164#else
1165static __init int register_trigger_snapshot_cmd(void) { return 0; }
1166#endif /* CONFIG_TRACER_SNAPSHOT */
1167
1168#ifdef CONFIG_STACKTRACE
1169#ifdef CONFIG_UNWINDER_ORC
1170/* Skip 2:
1171 *   event_triggers_post_call()
1172 *   trace_event_raw_event_xxx()
1173 */
1174# define STACK_SKIP 2
1175#else
1176/*
1177 * Skip 4:
1178 *   stacktrace_trigger()
1179 *   event_triggers_post_call()
1180 *   trace_event_buffer_commit()
1181 *   trace_event_raw_event_xxx()
1182 */
1183#define STACK_SKIP 4
1184#endif
1185
1186static void
1187stacktrace_trigger(struct event_trigger_data *data,
1188		   struct trace_buffer *buffer,  void *rec,
1189		   struct ring_buffer_event *event)
1190{
1191	trace_dump_stack(STACK_SKIP);
 
 
 
 
 
1192}
1193
1194static void
1195stacktrace_count_trigger(struct event_trigger_data *data,
1196			 struct trace_buffer *buffer, void *rec,
1197			 struct ring_buffer_event *event)
1198{
1199	if (!data->count)
1200		return;
1201
1202	if (data->count != -1)
1203		(data->count)--;
1204
1205	stacktrace_trigger(data, buffer, rec, event);
1206}
1207
1208static int
1209stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1210			 struct event_trigger_data *data)
1211{
1212	return event_trigger_print("stacktrace", m, (void *)data->count,
1213				   data->filter_str);
1214}
1215
1216static struct event_trigger_ops stacktrace_trigger_ops = {
1217	.func			= stacktrace_trigger,
1218	.print			= stacktrace_trigger_print,
1219	.init			= event_trigger_init,
1220	.free			= event_trigger_free,
1221};
1222
1223static struct event_trigger_ops stacktrace_count_trigger_ops = {
1224	.func			= stacktrace_count_trigger,
1225	.print			= stacktrace_trigger_print,
1226	.init			= event_trigger_init,
1227	.free			= event_trigger_free,
1228};
1229
1230static struct event_trigger_ops *
1231stacktrace_get_trigger_ops(char *cmd, char *param)
1232{
1233	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1234}
1235
1236static struct event_command trigger_stacktrace_cmd = {
1237	.name			= "stacktrace",
1238	.trigger_type		= ETT_STACKTRACE,
1239	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1240	.func			= event_trigger_callback,
1241	.reg			= register_trigger,
1242	.unreg			= unregister_trigger,
1243	.get_trigger_ops	= stacktrace_get_trigger_ops,
1244	.set_filter		= set_trigger_filter,
1245};
1246
1247static __init int register_trigger_stacktrace_cmd(void)
1248{
1249	int ret;
1250
1251	ret = register_event_command(&trigger_stacktrace_cmd);
1252	WARN_ON(ret < 0);
1253
1254	return ret;
1255}
1256#else
1257static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1258#endif /* CONFIG_STACKTRACE */
1259
1260static __init void unregister_trigger_traceon_traceoff_cmds(void)
1261{
1262	unregister_event_command(&trigger_traceon_cmd);
1263	unregister_event_command(&trigger_traceoff_cmd);
1264}
1265
1266static void
1267event_enable_trigger(struct event_trigger_data *data,
1268		     struct trace_buffer *buffer,  void *rec,
1269		     struct ring_buffer_event *event)
1270{
1271	struct enable_trigger_data *enable_data = data->private_data;
1272
1273	if (enable_data->enable)
1274		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1275	else
1276		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1277}
1278
1279static void
1280event_enable_count_trigger(struct event_trigger_data *data,
1281			   struct trace_buffer *buffer,  void *rec,
1282			   struct ring_buffer_event *event)
1283{
1284	struct enable_trigger_data *enable_data = data->private_data;
1285
1286	if (!data->count)
1287		return;
1288
1289	/* Skip if the event is in a state we want to switch to */
1290	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1291		return;
1292
1293	if (data->count != -1)
1294		(data->count)--;
1295
1296	event_enable_trigger(data, buffer, rec, event);
1297}
1298
1299int event_enable_trigger_print(struct seq_file *m,
1300			       struct event_trigger_ops *ops,
1301			       struct event_trigger_data *data)
1302{
1303	struct enable_trigger_data *enable_data = data->private_data;
1304
1305	seq_printf(m, "%s:%s:%s",
1306		   enable_data->hist ?
1307		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1308		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1309		   enable_data->file->event_call->class->system,
1310		   trace_event_name(enable_data->file->event_call));
1311
1312	if (data->count == -1)
1313		seq_puts(m, ":unlimited");
1314	else
1315		seq_printf(m, ":count=%ld", data->count);
1316
1317	if (data->filter_str)
1318		seq_printf(m, " if %s\n", data->filter_str);
1319	else
1320		seq_putc(m, '\n');
1321
1322	return 0;
1323}
1324
1325void event_enable_trigger_free(struct event_trigger_ops *ops,
1326			       struct event_trigger_data *data)
1327{
1328	struct enable_trigger_data *enable_data = data->private_data;
1329
1330	if (WARN_ON_ONCE(data->ref <= 0))
1331		return;
1332
1333	data->ref--;
1334	if (!data->ref) {
1335		/* Remove the SOFT_MODE flag */
1336		trace_event_enable_disable(enable_data->file, 0, 1);
1337		module_put(enable_data->file->event_call->mod);
1338		trigger_data_free(data);
1339		kfree(enable_data);
1340	}
1341}
1342
1343static struct event_trigger_ops event_enable_trigger_ops = {
1344	.func			= event_enable_trigger,
1345	.print			= event_enable_trigger_print,
1346	.init			= event_trigger_init,
1347	.free			= event_enable_trigger_free,
1348};
1349
1350static struct event_trigger_ops event_enable_count_trigger_ops = {
1351	.func			= event_enable_count_trigger,
1352	.print			= event_enable_trigger_print,
1353	.init			= event_trigger_init,
1354	.free			= event_enable_trigger_free,
1355};
1356
1357static struct event_trigger_ops event_disable_trigger_ops = {
1358	.func			= event_enable_trigger,
1359	.print			= event_enable_trigger_print,
1360	.init			= event_trigger_init,
1361	.free			= event_enable_trigger_free,
1362};
1363
1364static struct event_trigger_ops event_disable_count_trigger_ops = {
1365	.func			= event_enable_count_trigger,
1366	.print			= event_enable_trigger_print,
1367	.init			= event_trigger_init,
1368	.free			= event_enable_trigger_free,
1369};
1370
1371int event_enable_trigger_func(struct event_command *cmd_ops,
1372			      struct trace_event_file *file,
1373			      char *glob, char *cmd, char *param)
1374{
1375	struct trace_event_file *event_enable_file;
1376	struct enable_trigger_data *enable_data;
1377	struct event_trigger_data *trigger_data;
1378	struct event_trigger_ops *trigger_ops;
1379	struct trace_array *tr = file->tr;
 
 
1380	const char *system;
1381	const char *event;
1382	bool hist = false;
1383	char *trigger;
1384	char *number;
1385	bool enable;
1386	int ret;
1387
1388	if (!param)
 
 
1389		return -EINVAL;
1390
1391	/* separate the trigger from the filter (s:e:n [if filter]) */
1392	trigger = strsep(&param, " \t");
1393	if (!trigger)
1394		return -EINVAL;
1395	if (param) {
1396		param = skip_spaces(param);
1397		if (!*param)
1398			param = NULL;
1399	}
1400
1401	system = strsep(&trigger, ":");
1402	if (!trigger)
1403		return -EINVAL;
1404
1405	event = strsep(&trigger, ":");
1406
1407	ret = -EINVAL;
1408	event_enable_file = find_event_file(tr, system, event);
1409	if (!event_enable_file)
1410		goto out;
1411
1412#ifdef CONFIG_HIST_TRIGGERS
1413	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1414		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1415
1416	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1417		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1418#else
1419	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1420#endif
1421	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1422
1423	ret = -ENOMEM;
1424	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1425	if (!trigger_data)
1426		goto out;
1427
1428	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1429	if (!enable_data) {
1430		kfree(trigger_data);
1431		goto out;
1432	}
1433
1434	trigger_data->count = -1;
1435	trigger_data->ops = trigger_ops;
1436	trigger_data->cmd_ops = cmd_ops;
1437	INIT_LIST_HEAD(&trigger_data->list);
1438	RCU_INIT_POINTER(trigger_data->filter, NULL);
1439
1440	enable_data->hist = hist;
1441	enable_data->enable = enable;
1442	enable_data->file = event_enable_file;
1443	trigger_data->private_data = enable_data;
1444
1445	if (glob[0] == '!') {
1446		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 
 
 
 
 
 
1447		kfree(trigger_data);
1448		kfree(enable_data);
1449		ret = 0;
1450		goto out;
1451	}
1452
1453	/* Up the trigger_data count to make sure nothing frees it on failure */
1454	event_trigger_init(trigger_ops, trigger_data);
1455
1456	if (trigger) {
1457		number = strsep(&trigger, ":");
 
1458
1459		ret = -EINVAL;
1460		if (!strlen(number))
1461			goto out_free;
1462
1463		/*
1464		 * We use the callback data field (which is a pointer)
1465		 * as our counter.
1466		 */
1467		ret = kstrtoul(number, 0, &trigger_data->count);
1468		if (ret)
1469			goto out_free;
1470	}
1471
1472	if (!param) /* if param is non-empty, it's supposed to be a filter */
1473		goto out_reg;
1474
1475	if (!cmd_ops->set_filter)
1476		goto out_reg;
1477
1478	ret = cmd_ops->set_filter(param, trigger_data, file);
1479	if (ret < 0)
1480		goto out_free;
1481
1482 out_reg:
1483	/* Don't let event modules unload while probe registered */
1484	ret = try_module_get(event_enable_file->event_call->mod);
1485	if (!ret) {
1486		ret = -EBUSY;
1487		goto out_free;
1488	}
1489
1490	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1491	if (ret < 0)
1492		goto out_put;
1493	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1494	/*
1495	 * The above returns on success the # of functions enabled,
1496	 * but if it didn't find any functions it returns zero.
1497	 * Consider no functions a failure too.
1498	 */
1499	if (!ret) {
1500		ret = -ENOENT;
1501		goto out_disable;
1502	} else if (ret < 0)
1503		goto out_disable;
1504	/* Just return zero, not the number of enabled functions */
1505	ret = 0;
1506	event_trigger_free(trigger_ops, trigger_data);
1507 out:
1508	return ret;
1509
1510 out_disable:
1511	trace_event_enable_disable(event_enable_file, 0, 1);
1512 out_put:
1513	module_put(event_enable_file->event_call->mod);
1514 out_free:
1515	if (cmd_ops->set_filter)
1516		cmd_ops->set_filter(NULL, trigger_data, NULL);
1517	event_trigger_free(trigger_ops, trigger_data);
1518	kfree(enable_data);
 
1519	goto out;
1520}
1521
1522int event_enable_register_trigger(char *glob,
1523				  struct event_trigger_ops *ops,
1524				  struct event_trigger_data *data,
1525				  struct trace_event_file *file)
1526{
1527	struct enable_trigger_data *enable_data = data->private_data;
1528	struct enable_trigger_data *test_enable_data;
1529	struct event_trigger_data *test;
1530	int ret = 0;
1531
1532	lockdep_assert_held(&event_mutex);
1533
1534	list_for_each_entry(test, &file->triggers, list) {
1535		test_enable_data = test->private_data;
1536		if (test_enable_data &&
1537		    (test->cmd_ops->trigger_type ==
1538		     data->cmd_ops->trigger_type) &&
1539		    (test_enable_data->file == enable_data->file)) {
1540			ret = -EEXIST;
1541			goto out;
1542		}
1543	}
1544
1545	if (data->ops->init) {
1546		ret = data->ops->init(data->ops, data);
1547		if (ret < 0)
1548			goto out;
1549	}
1550
1551	list_add_rcu(&data->list, &file->triggers);
1552	ret++;
1553
1554	update_cond_flag(file);
1555	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 
1556		list_del_rcu(&data->list);
1557		update_cond_flag(file);
1558		ret--;
1559	}
1560out:
1561	return ret;
1562}
1563
1564void event_enable_unregister_trigger(char *glob,
1565				     struct event_trigger_ops *ops,
1566				     struct event_trigger_data *test,
1567				     struct trace_event_file *file)
1568{
1569	struct enable_trigger_data *test_enable_data = test->private_data;
 
1570	struct enable_trigger_data *enable_data;
1571	struct event_trigger_data *data;
1572	bool unregistered = false;
1573
1574	lockdep_assert_held(&event_mutex);
1575
1576	list_for_each_entry(data, &file->triggers, list) {
1577		enable_data = data->private_data;
1578		if (enable_data &&
1579		    (data->cmd_ops->trigger_type ==
1580		     test->cmd_ops->trigger_type) &&
1581		    (enable_data->file == test_enable_data->file)) {
1582			unregistered = true;
1583			list_del_rcu(&data->list);
1584			trace_event_trigger_enable_disable(file, 0);
1585			update_cond_flag(file);
1586			break;
1587		}
1588	}
1589
1590	if (unregistered && data->ops->free)
1591		data->ops->free(data->ops, data);
1592}
1593
1594static struct event_trigger_ops *
1595event_enable_get_trigger_ops(char *cmd, char *param)
1596{
1597	struct event_trigger_ops *ops;
1598	bool enable;
1599
1600#ifdef CONFIG_HIST_TRIGGERS
1601	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1602		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1603#else
1604	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1605#endif
1606	if (enable)
1607		ops = param ? &event_enable_count_trigger_ops :
1608			&event_enable_trigger_ops;
1609	else
1610		ops = param ? &event_disable_count_trigger_ops :
1611			&event_disable_trigger_ops;
1612
1613	return ops;
1614}
1615
1616static struct event_command trigger_enable_cmd = {
1617	.name			= ENABLE_EVENT_STR,
1618	.trigger_type		= ETT_EVENT_ENABLE,
1619	.func			= event_enable_trigger_func,
1620	.reg			= event_enable_register_trigger,
1621	.unreg			= event_enable_unregister_trigger,
1622	.get_trigger_ops	= event_enable_get_trigger_ops,
1623	.set_filter		= set_trigger_filter,
1624};
1625
1626static struct event_command trigger_disable_cmd = {
1627	.name			= DISABLE_EVENT_STR,
1628	.trigger_type		= ETT_EVENT_ENABLE,
1629	.func			= event_enable_trigger_func,
1630	.reg			= event_enable_register_trigger,
1631	.unreg			= event_enable_unregister_trigger,
1632	.get_trigger_ops	= event_enable_get_trigger_ops,
1633	.set_filter		= set_trigger_filter,
1634};
1635
1636static __init void unregister_trigger_enable_disable_cmds(void)
1637{
1638	unregister_event_command(&trigger_enable_cmd);
1639	unregister_event_command(&trigger_disable_cmd);
1640}
1641
1642static __init int register_trigger_enable_disable_cmds(void)
1643{
1644	int ret;
1645
1646	ret = register_event_command(&trigger_enable_cmd);
1647	if (WARN_ON(ret < 0))
1648		return ret;
1649	ret = register_event_command(&trigger_disable_cmd);
1650	if (WARN_ON(ret < 0))
1651		unregister_trigger_enable_disable_cmds();
1652
1653	return ret;
1654}
1655
1656static __init int register_trigger_traceon_traceoff_cmds(void)
1657{
1658	int ret;
1659
1660	ret = register_event_command(&trigger_traceon_cmd);
1661	if (WARN_ON(ret < 0))
1662		return ret;
1663	ret = register_event_command(&trigger_traceoff_cmd);
1664	if (WARN_ON(ret < 0))
1665		unregister_trigger_traceon_traceoff_cmds();
1666
1667	return ret;
1668}
1669
1670__init int register_trigger_cmds(void)
1671{
1672	register_trigger_traceon_traceoff_cmds();
1673	register_trigger_snapshot_cmd();
1674	register_trigger_stacktrace_cmd();
1675	register_trigger_enable_disable_cmds();
1676	register_trigger_hist_enable_disable_cmds();
1677	register_trigger_hist_cmd();
1678
1679	return 0;
1680}