Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * trace_events_trigger - trace event triggers
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19 */
  20
 
  21#include <linux/module.h>
  22#include <linux/ctype.h>
  23#include <linux/mutex.h>
  24#include <linux/slab.h>
 
  25
  26#include "trace.h"
  27
  28static LIST_HEAD(trigger_commands);
  29static DEFINE_MUTEX(trigger_cmd_mutex);
  30
  31static void
  32trigger_data_free(struct event_trigger_data *data)
  33{
  34	if (data->cmd_ops->set_filter)
  35		data->cmd_ops->set_filter(NULL, data, NULL);
  36
  37	synchronize_sched(); /* make sure current triggers exit before free */
 
 
  38	kfree(data);
  39}
  40
  41/**
  42 * event_triggers_call - Call triggers associated with a trace event
  43 * @file: The ftrace_event_file associated with the event
  44 * @rec: The trace entry for the event, NULL for unconditional invocation
  45 *
  46 * For each trigger associated with an event, invoke the trigger
  47 * function registered with the associated trigger command.  If rec is
  48 * non-NULL, it means that the trigger requires further processing and
  49 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  50 * trigger has a filter associated with it, rec will checked against
  51 * the filter and if the record matches the trigger will be invoked.
  52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  53 * in any case until the current event is written, the trigger
  54 * function isn't invoked but the bit associated with the deferred
  55 * trigger is set in the return value.
  56 *
  57 * Returns an enum event_trigger_type value containing a set bit for
  58 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  59 *
  60 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  61 *
  62 * Return: an enum event_trigger_type value containing a set bit for
  63 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  64 */
  65enum event_trigger_type
  66event_triggers_call(struct ftrace_event_file *file, void *rec)
 
  67{
  68	struct event_trigger_data *data;
  69	enum event_trigger_type tt = ETT_NONE;
  70	struct event_filter *filter;
  71
  72	if (list_empty(&file->triggers))
  73		return tt;
  74
  75	list_for_each_entry_rcu(data, &file->triggers, list) {
 
 
  76		if (!rec) {
  77			data->ops->func(data);
  78			continue;
  79		}
  80		filter = rcu_dereference_sched(data->filter);
  81		if (filter && !filter_match_preds(filter, rec))
  82			continue;
  83		if (data->cmd_ops->post_trigger) {
  84			tt |= data->cmd_ops->trigger_type;
  85			continue;
  86		}
  87		data->ops->func(data);
  88	}
  89	return tt;
  90}
  91EXPORT_SYMBOL_GPL(event_triggers_call);
  92
  93/**
  94 * event_triggers_post_call - Call 'post_triggers' for a trace event
  95 * @file: The ftrace_event_file associated with the event
  96 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  97 *
  98 * For each trigger associated with an event, invoke the trigger
  99 * function registered with the associated trigger command, if the
 100 * corresponding bit is set in the tt enum passed into this function.
 101 * See @event_triggers_call for details on how those bits are set.
 102 *
 103 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
 104 */
 105void
 106event_triggers_post_call(struct ftrace_event_file *file,
 107			 enum event_trigger_type tt)
 108{
 109	struct event_trigger_data *data;
 110
 111	list_for_each_entry_rcu(data, &file->triggers, list) {
 
 
 112		if (data->cmd_ops->trigger_type & tt)
 113			data->ops->func(data);
 114	}
 115}
 116EXPORT_SYMBOL_GPL(event_triggers_post_call);
 117
 118#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 119
 120static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 121{
 122	struct ftrace_event_file *event_file = event_file_data(m->private);
 123
 124	if (t == SHOW_AVAILABLE_TRIGGERS)
 125		return NULL;
 126
 127	return seq_list_next(t, &event_file->triggers, pos);
 128}
 129
 130static void *trigger_start(struct seq_file *m, loff_t *pos)
 131{
 132	struct ftrace_event_file *event_file;
 133
 134	/* ->stop() is called even if ->start() fails */
 135	mutex_lock(&event_mutex);
 136	event_file = event_file_data(m->private);
 137	if (unlikely(!event_file))
 138		return ERR_PTR(-ENODEV);
 139
 140	if (list_empty(&event_file->triggers))
 141		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 142
 143	return seq_list_start(&event_file->triggers, *pos);
 144}
 145
 146static void trigger_stop(struct seq_file *m, void *t)
 147{
 148	mutex_unlock(&event_mutex);
 149}
 150
 151static int trigger_show(struct seq_file *m, void *v)
 152{
 153	struct event_trigger_data *data;
 154	struct event_command *p;
 155
 156	if (v == SHOW_AVAILABLE_TRIGGERS) {
 157		seq_puts(m, "# Available triggers:\n");
 158		seq_putc(m, '#');
 159		mutex_lock(&trigger_cmd_mutex);
 160		list_for_each_entry_reverse(p, &trigger_commands, list)
 161			seq_printf(m, " %s", p->name);
 162		seq_putc(m, '\n');
 163		mutex_unlock(&trigger_cmd_mutex);
 164		return 0;
 165	}
 166
 167	data = list_entry(v, struct event_trigger_data, list);
 168	data->ops->print(m, data->ops, data);
 169
 170	return 0;
 171}
 172
 173static const struct seq_operations event_triggers_seq_ops = {
 174	.start = trigger_start,
 175	.next = trigger_next,
 176	.stop = trigger_stop,
 177	.show = trigger_show,
 178};
 179
 180static int event_trigger_regex_open(struct inode *inode, struct file *file)
 181{
 182	int ret = 0;
 
 
 
 
 183
 184	mutex_lock(&event_mutex);
 185
 186	if (unlikely(!event_file_data(file))) {
 187		mutex_unlock(&event_mutex);
 188		return -ENODEV;
 189	}
 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 191	if (file->f_mode & FMODE_READ) {
 192		ret = seq_open(file, &event_triggers_seq_ops);
 193		if (!ret) {
 194			struct seq_file *m = file->private_data;
 195			m->private = file;
 196		}
 197	}
 198
 199	mutex_unlock(&event_mutex);
 200
 201	return ret;
 202}
 203
 204static int trigger_process_regex(struct ftrace_event_file *file, char *buff)
 205{
 206	char *command, *next = buff;
 207	struct event_command *p;
 208	int ret = -EINVAL;
 209
 210	command = strsep(&next, ": \t");
 211	command = (command[0] != '!') ? command : command + 1;
 212
 213	mutex_lock(&trigger_cmd_mutex);
 214	list_for_each_entry(p, &trigger_commands, list) {
 215		if (strcmp(p->name, command) == 0) {
 216			ret = p->func(p, file, buff, command, next);
 217			goto out_unlock;
 218		}
 219	}
 220 out_unlock:
 221	mutex_unlock(&trigger_cmd_mutex);
 222
 223	return ret;
 224}
 225
 226static ssize_t event_trigger_regex_write(struct file *file,
 227					 const char __user *ubuf,
 228					 size_t cnt, loff_t *ppos)
 229{
 230	struct ftrace_event_file *event_file;
 231	ssize_t ret;
 232	char *buf;
 233
 234	if (!cnt)
 235		return 0;
 236
 237	if (cnt >= PAGE_SIZE)
 238		return -EINVAL;
 239
 240	buf = (char *)__get_free_page(GFP_TEMPORARY);
 241	if (!buf)
 242		return -ENOMEM;
 243
 244	if (copy_from_user(buf, ubuf, cnt)) {
 245		free_page((unsigned long)buf);
 246		return -EFAULT;
 247	}
 248	buf[cnt] = '\0';
 249	strim(buf);
 250
 251	mutex_lock(&event_mutex);
 252	event_file = event_file_data(file);
 253	if (unlikely(!event_file)) {
 254		mutex_unlock(&event_mutex);
 255		free_page((unsigned long)buf);
 256		return -ENODEV;
 257	}
 258	ret = trigger_process_regex(event_file, buf);
 259	mutex_unlock(&event_mutex);
 260
 261	free_page((unsigned long)buf);
 262	if (ret < 0)
 263		goto out;
 264
 265	*ppos += cnt;
 266	ret = cnt;
 267 out:
 268	return ret;
 269}
 270
 271static int event_trigger_regex_release(struct inode *inode, struct file *file)
 272{
 273	mutex_lock(&event_mutex);
 274
 275	if (file->f_mode & FMODE_READ)
 276		seq_release(inode, file);
 277
 278	mutex_unlock(&event_mutex);
 279
 280	return 0;
 281}
 282
 283static ssize_t
 284event_trigger_write(struct file *filp, const char __user *ubuf,
 285		    size_t cnt, loff_t *ppos)
 286{
 287	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 288}
 289
 290static int
 291event_trigger_open(struct inode *inode, struct file *filp)
 292{
 
 293	return event_trigger_regex_open(inode, filp);
 294}
 295
 296static int
 297event_trigger_release(struct inode *inode, struct file *file)
 298{
 299	return event_trigger_regex_release(inode, file);
 300}
 301
 302const struct file_operations event_trigger_fops = {
 303	.open = event_trigger_open,
 304	.read = seq_read,
 305	.write = event_trigger_write,
 306	.llseek = tracing_lseek,
 307	.release = event_trigger_release,
 308};
 309
 310/*
 311 * Currently we only register event commands from __init, so mark this
 312 * __init too.
 313 */
 314static __init int register_event_command(struct event_command *cmd)
 315{
 316	struct event_command *p;
 317	int ret = 0;
 318
 319	mutex_lock(&trigger_cmd_mutex);
 320	list_for_each_entry(p, &trigger_commands, list) {
 321		if (strcmp(cmd->name, p->name) == 0) {
 322			ret = -EBUSY;
 323			goto out_unlock;
 324		}
 325	}
 326	list_add(&cmd->list, &trigger_commands);
 327 out_unlock:
 328	mutex_unlock(&trigger_cmd_mutex);
 329
 330	return ret;
 331}
 332
 333/*
 334 * Currently we only unregister event commands from __init, so mark
 335 * this __init too.
 336 */
 337static __init int unregister_event_command(struct event_command *cmd)
 338{
 339	struct event_command *p, *n;
 340	int ret = -ENODEV;
 341
 342	mutex_lock(&trigger_cmd_mutex);
 343	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 344		if (strcmp(cmd->name, p->name) == 0) {
 345			ret = 0;
 346			list_del_init(&p->list);
 347			goto out_unlock;
 348		}
 349	}
 350 out_unlock:
 351	mutex_unlock(&trigger_cmd_mutex);
 352
 353	return ret;
 354}
 355
 356/**
 357 * event_trigger_print - Generic event_trigger_ops @print implementation
 358 * @name: The name of the event trigger
 359 * @m: The seq_file being printed to
 360 * @data: Trigger-specific data
 361 * @filter_str: filter_str to print, if present
 362 *
 363 * Common implementation for event triggers to print themselves.
 364 *
 365 * Usually wrapped by a function that simply sets the @name of the
 366 * trigger command and then invokes this.
 367 *
 368 * Return: 0 on success, errno otherwise
 369 */
 370static int
 371event_trigger_print(const char *name, struct seq_file *m,
 372		    void *data, char *filter_str)
 373{
 374	long count = (long)data;
 375
 376	seq_printf(m, "%s", name);
 377
 378	if (count == -1)
 379		seq_puts(m, ":unlimited");
 380	else
 381		seq_printf(m, ":count=%ld", count);
 382
 383	if (filter_str)
 384		seq_printf(m, " if %s\n", filter_str);
 385	else
 386		seq_puts(m, "\n");
 387
 388	return 0;
 389}
 390
 391/**
 392 * event_trigger_init - Generic event_trigger_ops @init implementation
 393 * @ops: The trigger ops associated with the trigger
 394 * @data: Trigger-specific data
 395 *
 396 * Common implementation of event trigger initialization.
 397 *
 398 * Usually used directly as the @init method in event trigger
 399 * implementations.
 400 *
 401 * Return: 0 on success, errno otherwise
 402 */
 403static int
 404event_trigger_init(struct event_trigger_ops *ops,
 405		   struct event_trigger_data *data)
 406{
 407	data->ref++;
 408	return 0;
 409}
 410
 411/**
 412 * event_trigger_free - Generic event_trigger_ops @free implementation
 413 * @ops: The trigger ops associated with the trigger
 414 * @data: Trigger-specific data
 415 *
 416 * Common implementation of event trigger de-initialization.
 417 *
 418 * Usually used directly as the @free method in event trigger
 419 * implementations.
 420 */
 421static void
 422event_trigger_free(struct event_trigger_ops *ops,
 423		   struct event_trigger_data *data)
 424{
 425	if (WARN_ON_ONCE(data->ref <= 0))
 426		return;
 427
 428	data->ref--;
 429	if (!data->ref)
 430		trigger_data_free(data);
 431}
 432
 433static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
 434					      int trigger_enable)
 435{
 436	int ret = 0;
 437
 438	if (trigger_enable) {
 439		if (atomic_inc_return(&file->tm_ref) > 1)
 440			return ret;
 441		set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
 442		ret = trace_event_enable_disable(file, 1, 1);
 443	} else {
 444		if (atomic_dec_return(&file->tm_ref) > 0)
 445			return ret;
 446		clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
 447		ret = trace_event_enable_disable(file, 0, 1);
 448	}
 449
 450	return ret;
 451}
 452
 453/**
 454 * clear_event_triggers - Clear all triggers associated with a trace array
 455 * @tr: The trace array to clear
 456 *
 457 * For each trigger, the triggering event has its tm_ref decremented
 458 * via trace_event_trigger_enable_disable(), and any associated event
 459 * (in the case of enable/disable_event triggers) will have its sm_ref
 460 * decremented via free()->trace_event_enable_disable().  That
 461 * combination effectively reverses the soft-mode/trigger state added
 462 * by trigger registration.
 463 *
 464 * Must be called with event_mutex held.
 465 */
 466void
 467clear_event_triggers(struct trace_array *tr)
 468{
 469	struct ftrace_event_file *file;
 470
 471	list_for_each_entry(file, &tr->events, list) {
 472		struct event_trigger_data *data;
 473		list_for_each_entry_rcu(data, &file->triggers, list) {
 474			trace_event_trigger_enable_disable(file, 0);
 
 475			if (data->ops->free)
 476				data->ops->free(data->ops, data);
 477		}
 478	}
 479}
 480
 481/**
 482 * update_cond_flag - Set or reset the TRIGGER_COND bit
 483 * @file: The ftrace_event_file associated with the event
 484 *
 485 * If an event has triggers and any of those triggers has a filter or
 486 * a post_trigger, trigger invocation needs to be deferred until after
 487 * the current event has logged its data, and the event should have
 488 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 489 * cleared.
 490 */
 491static void update_cond_flag(struct ftrace_event_file *file)
 492{
 493	struct event_trigger_data *data;
 494	bool set_cond = false;
 495
 496	list_for_each_entry_rcu(data, &file->triggers, list) {
 497		if (data->filter || data->cmd_ops->post_trigger) {
 
 498			set_cond = true;
 499			break;
 500		}
 501	}
 502
 503	if (set_cond)
 504		set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
 505	else
 506		clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
 507}
 508
 509/**
 510 * register_trigger - Generic event_command @reg implementation
 511 * @glob: The raw string used to register the trigger
 512 * @ops: The trigger ops associated with the trigger
 513 * @data: Trigger-specific data to associate with the trigger
 514 * @file: The ftrace_event_file associated with the event
 515 *
 516 * Common implementation for event trigger registration.
 517 *
 518 * Usually used directly as the @reg method in event command
 519 * implementations.
 520 *
 521 * Return: 0 on success, errno otherwise
 522 */
 523static int register_trigger(char *glob, struct event_trigger_ops *ops,
 524			    struct event_trigger_data *data,
 525			    struct ftrace_event_file *file)
 526{
 527	struct event_trigger_data *test;
 528	int ret = 0;
 529
 530	list_for_each_entry_rcu(test, &file->triggers, list) {
 531		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 532			ret = -EEXIST;
 533			goto out;
 534		}
 535	}
 536
 537	if (data->ops->init) {
 538		ret = data->ops->init(data->ops, data);
 539		if (ret < 0)
 540			goto out;
 541	}
 542
 543	list_add_rcu(&data->list, &file->triggers);
 544	ret++;
 545
 
 546	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 547		list_del_rcu(&data->list);
 
 548		ret--;
 549	}
 550	update_cond_flag(file);
 551out:
 552	return ret;
 553}
 554
 555/**
 556 * unregister_trigger - Generic event_command @unreg implementation
 557 * @glob: The raw string used to register the trigger
 558 * @ops: The trigger ops associated with the trigger
 559 * @test: Trigger-specific data used to find the trigger to remove
 560 * @file: The ftrace_event_file associated with the event
 561 *
 562 * Common implementation for event trigger unregistration.
 563 *
 564 * Usually used directly as the @unreg method in event command
 565 * implementations.
 566 */
 567static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 568			       struct event_trigger_data *test,
 569			       struct ftrace_event_file *file)
 570{
 571	struct event_trigger_data *data;
 572	bool unregistered = false;
 573
 574	list_for_each_entry_rcu(data, &file->triggers, list) {
 575		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 576			unregistered = true;
 577			list_del_rcu(&data->list);
 578			update_cond_flag(file);
 579			trace_event_trigger_enable_disable(file, 0);
 
 580			break;
 581		}
 582	}
 583
 584	if (unregistered && data->ops->free)
 585		data->ops->free(data->ops, data);
 586}
 587
 588/**
 589 * event_trigger_callback - Generic event_command @func implementation
 590 * @cmd_ops: The command ops, used for trigger registration
 591 * @file: The ftrace_event_file associated with the event
 592 * @glob: The raw string used to register the trigger
 593 * @cmd: The cmd portion of the string used to register the trigger
 594 * @param: The params portion of the string used to register the trigger
 595 *
 596 * Common implementation for event command parsing and trigger
 597 * instantiation.
 598 *
 599 * Usually used directly as the @func method in event command
 600 * implementations.
 601 *
 602 * Return: 0 on success, errno otherwise
 603 */
 604static int
 605event_trigger_callback(struct event_command *cmd_ops,
 606		       struct ftrace_event_file *file,
 607		       char *glob, char *cmd, char *param)
 608{
 609	struct event_trigger_data *trigger_data;
 610	struct event_trigger_ops *trigger_ops;
 611	char *trigger = NULL;
 612	char *number;
 613	int ret;
 614
 615	/* separate the trigger from the filter (t:n [if filter]) */
 616	if (param && isdigit(param[0]))
 617		trigger = strsep(&param, " \t");
 618
 619	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 620
 621	ret = -ENOMEM;
 622	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 623	if (!trigger_data)
 624		goto out;
 625
 626	trigger_data->count = -1;
 627	trigger_data->ops = trigger_ops;
 628	trigger_data->cmd_ops = cmd_ops;
 
 629	INIT_LIST_HEAD(&trigger_data->list);
 
 630
 631	if (glob[0] == '!') {
 632		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 633		kfree(trigger_data);
 634		ret = 0;
 635		goto out;
 636	}
 637
 638	if (trigger) {
 639		number = strsep(&trigger, ":");
 640
 641		ret = -EINVAL;
 642		if (!strlen(number))
 643			goto out_free;
 644
 645		/*
 646		 * We use the callback data field (which is a pointer)
 647		 * as our counter.
 648		 */
 649		ret = kstrtoul(number, 0, &trigger_data->count);
 650		if (ret)
 651			goto out_free;
 652	}
 653
 654	if (!param) /* if param is non-empty, it's supposed to be a filter */
 655		goto out_reg;
 656
 657	if (!cmd_ops->set_filter)
 658		goto out_reg;
 659
 660	ret = cmd_ops->set_filter(param, trigger_data, file);
 661	if (ret < 0)
 662		goto out_free;
 663
 664 out_reg:
 
 
 665	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 666	/*
 667	 * The above returns on success the # of functions enabled,
 668	 * but if it didn't find any functions it returns zero.
 669	 * Consider no functions a failure too.
 670	 */
 671	if (!ret) {
 
 672		ret = -ENOENT;
 673		goto out_free;
 674	} else if (ret < 0)
 675		goto out_free;
 676	ret = 0;
 
 677 out:
 678	return ret;
 679
 680 out_free:
 681	if (cmd_ops->set_filter)
 682		cmd_ops->set_filter(NULL, trigger_data, NULL);
 683	kfree(trigger_data);
 684	goto out;
 685}
 686
 687/**
 688 * set_trigger_filter - Generic event_command @set_filter implementation
 689 * @filter_str: The filter string for the trigger, NULL to remove filter
 690 * @trigger_data: Trigger-specific data
 691 * @file: The ftrace_event_file associated with the event
 692 *
 693 * Common implementation for event command filter parsing and filter
 694 * instantiation.
 695 *
 696 * Usually used directly as the @set_filter method in event command
 697 * implementations.
 698 *
 699 * Also used to remove a filter (if filter_str = NULL).
 700 *
 701 * Return: 0 on success, errno otherwise
 702 */
 703static int set_trigger_filter(char *filter_str,
 704			      struct event_trigger_data *trigger_data,
 705			      struct ftrace_event_file *file)
 706{
 707	struct event_trigger_data *data = trigger_data;
 708	struct event_filter *filter = NULL, *tmp;
 709	int ret = -EINVAL;
 710	char *s;
 711
 712	if (!filter_str) /* clear the current filter */
 713		goto assign;
 714
 715	s = strsep(&filter_str, " \t");
 716
 717	if (!strlen(s) || strcmp(s, "if") != 0)
 718		goto out;
 719
 720	if (!filter_str)
 721		goto out;
 722
 723	/* The filter is for the 'trigger' event, not the triggered event */
 724	ret = create_event_filter(file->event_call, filter_str, false, &filter);
 725	if (ret)
 726		goto out;
 
 
 
 727 assign:
 728	tmp = rcu_access_pointer(data->filter);
 729
 730	rcu_assign_pointer(data->filter, filter);
 731
 732	if (tmp) {
 733		/* Make sure the call is done with the filter */
 734		synchronize_sched();
 735		free_event_filter(tmp);
 736	}
 737
 738	kfree(data->filter_str);
 739	data->filter_str = NULL;
 740
 741	if (filter_str) {
 742		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 743		if (!data->filter_str) {
 744			free_event_filter(rcu_access_pointer(data->filter));
 745			data->filter = NULL;
 746			ret = -ENOMEM;
 747		}
 748	}
 749 out:
 750	return ret;
 751}
 752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753static void
 754traceon_trigger(struct event_trigger_data *data)
 
 755{
 756	if (tracing_is_on())
 757		return;
 758
 759	tracing_on();
 760}
 761
 762static void
 763traceon_count_trigger(struct event_trigger_data *data)
 
 764{
 765	if (tracing_is_on())
 766		return;
 767
 768	if (!data->count)
 769		return;
 770
 771	if (data->count != -1)
 772		(data->count)--;
 773
 774	tracing_on();
 775}
 776
 777static void
 778traceoff_trigger(struct event_trigger_data *data)
 
 779{
 780	if (!tracing_is_on())
 781		return;
 782
 783	tracing_off();
 784}
 785
 786static void
 787traceoff_count_trigger(struct event_trigger_data *data)
 
 788{
 789	if (!tracing_is_on())
 790		return;
 791
 792	if (!data->count)
 793		return;
 794
 795	if (data->count != -1)
 796		(data->count)--;
 797
 798	tracing_off();
 799}
 800
 801static int
 802traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 803		      struct event_trigger_data *data)
 804{
 805	return event_trigger_print("traceon", m, (void *)data->count,
 806				   data->filter_str);
 807}
 808
 809static int
 810traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 811		       struct event_trigger_data *data)
 812{
 813	return event_trigger_print("traceoff", m, (void *)data->count,
 814				   data->filter_str);
 815}
 816
 817static struct event_trigger_ops traceon_trigger_ops = {
 818	.func			= traceon_trigger,
 819	.print			= traceon_trigger_print,
 820	.init			= event_trigger_init,
 821	.free			= event_trigger_free,
 822};
 823
 824static struct event_trigger_ops traceon_count_trigger_ops = {
 825	.func			= traceon_count_trigger,
 826	.print			= traceon_trigger_print,
 827	.init			= event_trigger_init,
 828	.free			= event_trigger_free,
 829};
 830
 831static struct event_trigger_ops traceoff_trigger_ops = {
 832	.func			= traceoff_trigger,
 833	.print			= traceoff_trigger_print,
 834	.init			= event_trigger_init,
 835	.free			= event_trigger_free,
 836};
 837
 838static struct event_trigger_ops traceoff_count_trigger_ops = {
 839	.func			= traceoff_count_trigger,
 840	.print			= traceoff_trigger_print,
 841	.init			= event_trigger_init,
 842	.free			= event_trigger_free,
 843};
 844
 845static struct event_trigger_ops *
 846onoff_get_trigger_ops(char *cmd, char *param)
 847{
 848	struct event_trigger_ops *ops;
 849
 850	/* we register both traceon and traceoff to this callback */
 851	if (strcmp(cmd, "traceon") == 0)
 852		ops = param ? &traceon_count_trigger_ops :
 853			&traceon_trigger_ops;
 854	else
 855		ops = param ? &traceoff_count_trigger_ops :
 856			&traceoff_trigger_ops;
 857
 858	return ops;
 859}
 860
 861static struct event_command trigger_traceon_cmd = {
 862	.name			= "traceon",
 863	.trigger_type		= ETT_TRACE_ONOFF,
 864	.func			= event_trigger_callback,
 865	.reg			= register_trigger,
 866	.unreg			= unregister_trigger,
 867	.get_trigger_ops	= onoff_get_trigger_ops,
 868	.set_filter		= set_trigger_filter,
 869};
 870
 871static struct event_command trigger_traceoff_cmd = {
 872	.name			= "traceoff",
 873	.trigger_type		= ETT_TRACE_ONOFF,
 
 874	.func			= event_trigger_callback,
 875	.reg			= register_trigger,
 876	.unreg			= unregister_trigger,
 877	.get_trigger_ops	= onoff_get_trigger_ops,
 878	.set_filter		= set_trigger_filter,
 879};
 880
 881#ifdef CONFIG_TRACER_SNAPSHOT
 882static void
 883snapshot_trigger(struct event_trigger_data *data)
 
 884{
 885	tracing_snapshot();
 
 
 
 
 
 886}
 887
 888static void
 889snapshot_count_trigger(struct event_trigger_data *data)
 
 890{
 891	if (!data->count)
 892		return;
 893
 894	if (data->count != -1)
 895		(data->count)--;
 896
 897	snapshot_trigger(data);
 898}
 899
 900static int
 901register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
 902			  struct event_trigger_data *data,
 903			  struct ftrace_event_file *file)
 904{
 905	int ret = register_trigger(glob, ops, data, file);
 906
 907	if (ret > 0 && tracing_alloc_snapshot() != 0) {
 908		unregister_trigger(glob, ops, data, file);
 909		ret = 0;
 910	}
 911
 912	return ret;
 913}
 914
 915static int
 916snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 917		       struct event_trigger_data *data)
 918{
 919	return event_trigger_print("snapshot", m, (void *)data->count,
 920				   data->filter_str);
 921}
 922
 923static struct event_trigger_ops snapshot_trigger_ops = {
 924	.func			= snapshot_trigger,
 925	.print			= snapshot_trigger_print,
 926	.init			= event_trigger_init,
 927	.free			= event_trigger_free,
 928};
 929
 930static struct event_trigger_ops snapshot_count_trigger_ops = {
 931	.func			= snapshot_count_trigger,
 932	.print			= snapshot_trigger_print,
 933	.init			= event_trigger_init,
 934	.free			= event_trigger_free,
 935};
 936
 937static struct event_trigger_ops *
 938snapshot_get_trigger_ops(char *cmd, char *param)
 939{
 940	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
 941}
 942
 943static struct event_command trigger_snapshot_cmd = {
 944	.name			= "snapshot",
 945	.trigger_type		= ETT_SNAPSHOT,
 946	.func			= event_trigger_callback,
 947	.reg			= register_snapshot_trigger,
 948	.unreg			= unregister_trigger,
 949	.get_trigger_ops	= snapshot_get_trigger_ops,
 950	.set_filter		= set_trigger_filter,
 951};
 952
 953static __init int register_trigger_snapshot_cmd(void)
 954{
 955	int ret;
 956
 957	ret = register_event_command(&trigger_snapshot_cmd);
 958	WARN_ON(ret < 0);
 959
 960	return ret;
 961}
 962#else
 963static __init int register_trigger_snapshot_cmd(void) { return 0; }
 964#endif /* CONFIG_TRACER_SNAPSHOT */
 965
 966#ifdef CONFIG_STACKTRACE
 
 
 
 
 
 
 
 967/*
 968 * Skip 3:
 969 *   stacktrace_trigger()
 970 *   event_triggers_post_call()
 971 *   ftrace_raw_event_xxx()
 
 972 */
 973#define STACK_SKIP 3
 
 974
 975static void
 976stacktrace_trigger(struct event_trigger_data *data)
 
 977{
 978	trace_dump_stack(STACK_SKIP);
 979}
 980
 981static void
 982stacktrace_count_trigger(struct event_trigger_data *data)
 
 983{
 984	if (!data->count)
 985		return;
 986
 987	if (data->count != -1)
 988		(data->count)--;
 989
 990	stacktrace_trigger(data);
 991}
 992
 993static int
 994stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 995			 struct event_trigger_data *data)
 996{
 997	return event_trigger_print("stacktrace", m, (void *)data->count,
 998				   data->filter_str);
 999}
1000
1001static struct event_trigger_ops stacktrace_trigger_ops = {
1002	.func			= stacktrace_trigger,
1003	.print			= stacktrace_trigger_print,
1004	.init			= event_trigger_init,
1005	.free			= event_trigger_free,
1006};
1007
1008static struct event_trigger_ops stacktrace_count_trigger_ops = {
1009	.func			= stacktrace_count_trigger,
1010	.print			= stacktrace_trigger_print,
1011	.init			= event_trigger_init,
1012	.free			= event_trigger_free,
1013};
1014
1015static struct event_trigger_ops *
1016stacktrace_get_trigger_ops(char *cmd, char *param)
1017{
1018	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1019}
1020
1021static struct event_command trigger_stacktrace_cmd = {
1022	.name			= "stacktrace",
1023	.trigger_type		= ETT_STACKTRACE,
1024	.post_trigger		= true,
1025	.func			= event_trigger_callback,
1026	.reg			= register_trigger,
1027	.unreg			= unregister_trigger,
1028	.get_trigger_ops	= stacktrace_get_trigger_ops,
1029	.set_filter		= set_trigger_filter,
1030};
1031
1032static __init int register_trigger_stacktrace_cmd(void)
1033{
1034	int ret;
1035
1036	ret = register_event_command(&trigger_stacktrace_cmd);
1037	WARN_ON(ret < 0);
1038
1039	return ret;
1040}
1041#else
1042static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1043#endif /* CONFIG_STACKTRACE */
1044
1045static __init void unregister_trigger_traceon_traceoff_cmds(void)
1046{
1047	unregister_event_command(&trigger_traceon_cmd);
1048	unregister_event_command(&trigger_traceoff_cmd);
1049}
1050
1051/* Avoid typos */
1052#define ENABLE_EVENT_STR	"enable_event"
1053#define DISABLE_EVENT_STR	"disable_event"
1054
1055struct enable_trigger_data {
1056	struct ftrace_event_file	*file;
1057	bool				enable;
1058};
1059
1060static void
1061event_enable_trigger(struct event_trigger_data *data)
 
1062{
1063	struct enable_trigger_data *enable_data = data->private_data;
1064
1065	if (enable_data->enable)
1066		clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1067	else
1068		set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1069}
1070
1071static void
1072event_enable_count_trigger(struct event_trigger_data *data)
 
1073{
1074	struct enable_trigger_data *enable_data = data->private_data;
1075
1076	if (!data->count)
1077		return;
1078
1079	/* Skip if the event is in a state we want to switch to */
1080	if (enable_data->enable == !(enable_data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1081		return;
1082
1083	if (data->count != -1)
1084		(data->count)--;
1085
1086	event_enable_trigger(data);
1087}
1088
1089static int
1090event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1091			   struct event_trigger_data *data)
1092{
1093	struct enable_trigger_data *enable_data = data->private_data;
1094
1095	seq_printf(m, "%s:%s:%s",
1096		   enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
 
 
1097		   enable_data->file->event_call->class->system,
1098		   ftrace_event_name(enable_data->file->event_call));
1099
1100	if (data->count == -1)
1101		seq_puts(m, ":unlimited");
1102	else
1103		seq_printf(m, ":count=%ld", data->count);
1104
1105	if (data->filter_str)
1106		seq_printf(m, " if %s\n", data->filter_str);
1107	else
1108		seq_puts(m, "\n");
1109
1110	return 0;
1111}
1112
1113static void
1114event_enable_trigger_free(struct event_trigger_ops *ops,
1115			  struct event_trigger_data *data)
1116{
1117	struct enable_trigger_data *enable_data = data->private_data;
1118
1119	if (WARN_ON_ONCE(data->ref <= 0))
1120		return;
1121
1122	data->ref--;
1123	if (!data->ref) {
1124		/* Remove the SOFT_MODE flag */
1125		trace_event_enable_disable(enable_data->file, 0, 1);
1126		module_put(enable_data->file->event_call->mod);
1127		trigger_data_free(data);
1128		kfree(enable_data);
1129	}
1130}
1131
1132static struct event_trigger_ops event_enable_trigger_ops = {
1133	.func			= event_enable_trigger,
1134	.print			= event_enable_trigger_print,
1135	.init			= event_trigger_init,
1136	.free			= event_enable_trigger_free,
1137};
1138
1139static struct event_trigger_ops event_enable_count_trigger_ops = {
1140	.func			= event_enable_count_trigger,
1141	.print			= event_enable_trigger_print,
1142	.init			= event_trigger_init,
1143	.free			= event_enable_trigger_free,
1144};
1145
1146static struct event_trigger_ops event_disable_trigger_ops = {
1147	.func			= event_enable_trigger,
1148	.print			= event_enable_trigger_print,
1149	.init			= event_trigger_init,
1150	.free			= event_enable_trigger_free,
1151};
1152
1153static struct event_trigger_ops event_disable_count_trigger_ops = {
1154	.func			= event_enable_count_trigger,
1155	.print			= event_enable_trigger_print,
1156	.init			= event_trigger_init,
1157	.free			= event_enable_trigger_free,
1158};
1159
1160static int
1161event_enable_trigger_func(struct event_command *cmd_ops,
1162			  struct ftrace_event_file *file,
1163			  char *glob, char *cmd, char *param)
1164{
1165	struct ftrace_event_file *event_enable_file;
1166	struct enable_trigger_data *enable_data;
1167	struct event_trigger_data *trigger_data;
1168	struct event_trigger_ops *trigger_ops;
1169	struct trace_array *tr = file->tr;
1170	const char *system;
1171	const char *event;
 
1172	char *trigger;
1173	char *number;
1174	bool enable;
1175	int ret;
1176
1177	if (!param)
1178		return -EINVAL;
1179
1180	/* separate the trigger from the filter (s:e:n [if filter]) */
1181	trigger = strsep(&param, " \t");
1182	if (!trigger)
1183		return -EINVAL;
1184
1185	system = strsep(&trigger, ":");
1186	if (!trigger)
1187		return -EINVAL;
1188
1189	event = strsep(&trigger, ":");
1190
1191	ret = -EINVAL;
1192	event_enable_file = find_event_file(tr, system, event);
1193	if (!event_enable_file)
1194		goto out;
1195
1196	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
 
 
1197
 
 
 
 
 
1198	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1199
1200	ret = -ENOMEM;
1201	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1202	if (!trigger_data)
1203		goto out;
1204
1205	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1206	if (!enable_data) {
1207		kfree(trigger_data);
1208		goto out;
1209	}
1210
1211	trigger_data->count = -1;
1212	trigger_data->ops = trigger_ops;
1213	trigger_data->cmd_ops = cmd_ops;
1214	INIT_LIST_HEAD(&trigger_data->list);
1215	RCU_INIT_POINTER(trigger_data->filter, NULL);
1216
 
1217	enable_data->enable = enable;
1218	enable_data->file = event_enable_file;
1219	trigger_data->private_data = enable_data;
1220
1221	if (glob[0] == '!') {
1222		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1223		kfree(trigger_data);
1224		kfree(enable_data);
1225		ret = 0;
1226		goto out;
1227	}
1228
 
 
 
1229	if (trigger) {
1230		number = strsep(&trigger, ":");
1231
1232		ret = -EINVAL;
1233		if (!strlen(number))
1234			goto out_free;
1235
1236		/*
1237		 * We use the callback data field (which is a pointer)
1238		 * as our counter.
1239		 */
1240		ret = kstrtoul(number, 0, &trigger_data->count);
1241		if (ret)
1242			goto out_free;
1243	}
1244
1245	if (!param) /* if param is non-empty, it's supposed to be a filter */
1246		goto out_reg;
1247
1248	if (!cmd_ops->set_filter)
1249		goto out_reg;
1250
1251	ret = cmd_ops->set_filter(param, trigger_data, file);
1252	if (ret < 0)
1253		goto out_free;
1254
1255 out_reg:
1256	/* Don't let event modules unload while probe registered */
1257	ret = try_module_get(event_enable_file->event_call->mod);
1258	if (!ret) {
1259		ret = -EBUSY;
1260		goto out_free;
1261	}
1262
1263	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1264	if (ret < 0)
1265		goto out_put;
1266	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1267	/*
1268	 * The above returns on success the # of functions enabled,
1269	 * but if it didn't find any functions it returns zero.
1270	 * Consider no functions a failure too.
1271	 */
1272	if (!ret) {
1273		ret = -ENOENT;
1274		goto out_disable;
1275	} else if (ret < 0)
1276		goto out_disable;
1277	/* Just return zero, not the number of enabled functions */
1278	ret = 0;
 
1279 out:
1280	return ret;
1281
1282 out_disable:
1283	trace_event_enable_disable(event_enable_file, 0, 1);
1284 out_put:
1285	module_put(event_enable_file->event_call->mod);
1286 out_free:
1287	if (cmd_ops->set_filter)
1288		cmd_ops->set_filter(NULL, trigger_data, NULL);
1289	kfree(trigger_data);
1290	kfree(enable_data);
1291	goto out;
1292}
1293
1294static int event_enable_register_trigger(char *glob,
1295					 struct event_trigger_ops *ops,
1296					 struct event_trigger_data *data,
1297					 struct ftrace_event_file *file)
1298{
1299	struct enable_trigger_data *enable_data = data->private_data;
1300	struct enable_trigger_data *test_enable_data;
1301	struct event_trigger_data *test;
1302	int ret = 0;
1303
1304	list_for_each_entry_rcu(test, &file->triggers, list) {
1305		test_enable_data = test->private_data;
1306		if (test_enable_data &&
 
 
1307		    (test_enable_data->file == enable_data->file)) {
1308			ret = -EEXIST;
1309			goto out;
1310		}
1311	}
1312
1313	if (data->ops->init) {
1314		ret = data->ops->init(data->ops, data);
1315		if (ret < 0)
1316			goto out;
1317	}
1318
1319	list_add_rcu(&data->list, &file->triggers);
1320	ret++;
1321
 
1322	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1323		list_del_rcu(&data->list);
 
1324		ret--;
1325	}
1326	update_cond_flag(file);
1327out:
1328	return ret;
1329}
1330
1331static void event_enable_unregister_trigger(char *glob,
1332					    struct event_trigger_ops *ops,
1333					    struct event_trigger_data *test,
1334					    struct ftrace_event_file *file)
1335{
1336	struct enable_trigger_data *test_enable_data = test->private_data;
1337	struct enable_trigger_data *enable_data;
1338	struct event_trigger_data *data;
1339	bool unregistered = false;
1340
1341	list_for_each_entry_rcu(data, &file->triggers, list) {
1342		enable_data = data->private_data;
1343		if (enable_data &&
 
 
1344		    (enable_data->file == test_enable_data->file)) {
1345			unregistered = true;
1346			list_del_rcu(&data->list);
1347			update_cond_flag(file);
1348			trace_event_trigger_enable_disable(file, 0);
 
1349			break;
1350		}
1351	}
1352
1353	if (unregistered && data->ops->free)
1354		data->ops->free(data->ops, data);
1355}
1356
1357static struct event_trigger_ops *
1358event_enable_get_trigger_ops(char *cmd, char *param)
1359{
1360	struct event_trigger_ops *ops;
1361	bool enable;
1362
 
 
 
 
1363	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1364
1365	if (enable)
1366		ops = param ? &event_enable_count_trigger_ops :
1367			&event_enable_trigger_ops;
1368	else
1369		ops = param ? &event_disable_count_trigger_ops :
1370			&event_disable_trigger_ops;
1371
1372	return ops;
1373}
1374
1375static struct event_command trigger_enable_cmd = {
1376	.name			= ENABLE_EVENT_STR,
1377	.trigger_type		= ETT_EVENT_ENABLE,
1378	.func			= event_enable_trigger_func,
1379	.reg			= event_enable_register_trigger,
1380	.unreg			= event_enable_unregister_trigger,
1381	.get_trigger_ops	= event_enable_get_trigger_ops,
1382	.set_filter		= set_trigger_filter,
1383};
1384
1385static struct event_command trigger_disable_cmd = {
1386	.name			= DISABLE_EVENT_STR,
1387	.trigger_type		= ETT_EVENT_ENABLE,
1388	.func			= event_enable_trigger_func,
1389	.reg			= event_enable_register_trigger,
1390	.unreg			= event_enable_unregister_trigger,
1391	.get_trigger_ops	= event_enable_get_trigger_ops,
1392	.set_filter		= set_trigger_filter,
1393};
1394
1395static __init void unregister_trigger_enable_disable_cmds(void)
1396{
1397	unregister_event_command(&trigger_enable_cmd);
1398	unregister_event_command(&trigger_disable_cmd);
1399}
1400
1401static __init int register_trigger_enable_disable_cmds(void)
1402{
1403	int ret;
1404
1405	ret = register_event_command(&trigger_enable_cmd);
1406	if (WARN_ON(ret < 0))
1407		return ret;
1408	ret = register_event_command(&trigger_disable_cmd);
1409	if (WARN_ON(ret < 0))
1410		unregister_trigger_enable_disable_cmds();
1411
1412	return ret;
1413}
1414
1415static __init int register_trigger_traceon_traceoff_cmds(void)
1416{
1417	int ret;
1418
1419	ret = register_event_command(&trigger_traceon_cmd);
1420	if (WARN_ON(ret < 0))
1421		return ret;
1422	ret = register_event_command(&trigger_traceoff_cmd);
1423	if (WARN_ON(ret < 0))
1424		unregister_trigger_traceon_traceoff_cmds();
1425
1426	return ret;
1427}
1428
1429__init int register_trigger_cmds(void)
1430{
1431	register_trigger_traceon_traceoff_cmds();
1432	register_trigger_snapshot_cmd();
1433	register_trigger_stacktrace_cmd();
1434	register_trigger_enable_disable_cmds();
 
 
1435
1436	return 0;
1437}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
 
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
  34 * @rec: The trace entry for the event, NULL for unconditional invocation
  35 *
  36 * For each trigger associated with an event, invoke the trigger
  37 * function registered with the associated trigger command.  If rec is
  38 * non-NULL, it means that the trigger requires further processing and
  39 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  40 * trigger has a filter associated with it, rec will checked against
  41 * the filter and if the record matches the trigger will be invoked.
  42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  43 * in any case until the current event is written, the trigger
  44 * function isn't invoked but the bit associated with the deferred
  45 * trigger is set in the return value.
  46 *
  47 * Returns an enum event_trigger_type value containing a set bit for
  48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  49 *
  50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  51 *
  52 * Return: an enum event_trigger_type value containing a set bit for
  53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  54 */
  55enum event_trigger_type
  56event_triggers_call(struct trace_event_file *file, void *rec,
  57		    struct ring_buffer_event *event)
  58{
  59	struct event_trigger_data *data;
  60	enum event_trigger_type tt = ETT_NONE;
  61	struct event_filter *filter;
  62
  63	if (list_empty(&file->triggers))
  64		return tt;
  65
  66	list_for_each_entry_rcu(data, &file->triggers, list) {
  67		if (data->paused)
  68			continue;
  69		if (!rec) {
  70			data->ops->func(data, rec, event);
  71			continue;
  72		}
  73		filter = rcu_dereference_sched(data->filter);
  74		if (filter && !filter_match_preds(filter, rec))
  75			continue;
  76		if (event_command_post_trigger(data->cmd_ops)) {
  77			tt |= data->cmd_ops->trigger_type;
  78			continue;
  79		}
  80		data->ops->func(data, rec, event);
  81	}
  82	return tt;
  83}
  84EXPORT_SYMBOL_GPL(event_triggers_call);
  85
  86/**
  87 * event_triggers_post_call - Call 'post_triggers' for a trace event
  88 * @file: The trace_event_file associated with the event
  89 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  90 *
  91 * For each trigger associated with an event, invoke the trigger
  92 * function registered with the associated trigger command, if the
  93 * corresponding bit is set in the tt enum passed into this function.
  94 * See @event_triggers_call for details on how those bits are set.
  95 *
  96 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  97 */
  98void
  99event_triggers_post_call(struct trace_event_file *file,
 100			 enum event_trigger_type tt)
 101{
 102	struct event_trigger_data *data;
 103
 104	list_for_each_entry_rcu(data, &file->triggers, list) {
 105		if (data->paused)
 106			continue;
 107		if (data->cmd_ops->trigger_type & tt)
 108			data->ops->func(data, NULL, NULL);
 109	}
 110}
 111EXPORT_SYMBOL_GPL(event_triggers_post_call);
 112
 113#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 114
 115static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 116{
 117	struct trace_event_file *event_file = event_file_data(m->private);
 118
 119	if (t == SHOW_AVAILABLE_TRIGGERS)
 120		return NULL;
 121
 122	return seq_list_next(t, &event_file->triggers, pos);
 123}
 124
 125static void *trigger_start(struct seq_file *m, loff_t *pos)
 126{
 127	struct trace_event_file *event_file;
 128
 129	/* ->stop() is called even if ->start() fails */
 130	mutex_lock(&event_mutex);
 131	event_file = event_file_data(m->private);
 132	if (unlikely(!event_file))
 133		return ERR_PTR(-ENODEV);
 134
 135	if (list_empty(&event_file->triggers))
 136		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 137
 138	return seq_list_start(&event_file->triggers, *pos);
 139}
 140
 141static void trigger_stop(struct seq_file *m, void *t)
 142{
 143	mutex_unlock(&event_mutex);
 144}
 145
 146static int trigger_show(struct seq_file *m, void *v)
 147{
 148	struct event_trigger_data *data;
 149	struct event_command *p;
 150
 151	if (v == SHOW_AVAILABLE_TRIGGERS) {
 152		seq_puts(m, "# Available triggers:\n");
 153		seq_putc(m, '#');
 154		mutex_lock(&trigger_cmd_mutex);
 155		list_for_each_entry_reverse(p, &trigger_commands, list)
 156			seq_printf(m, " %s", p->name);
 157		seq_putc(m, '\n');
 158		mutex_unlock(&trigger_cmd_mutex);
 159		return 0;
 160	}
 161
 162	data = list_entry(v, struct event_trigger_data, list);
 163	data->ops->print(m, data->ops, data);
 164
 165	return 0;
 166}
 167
 168static const struct seq_operations event_triggers_seq_ops = {
 169	.start = trigger_start,
 170	.next = trigger_next,
 171	.stop = trigger_stop,
 172	.show = trigger_show,
 173};
 174
 175static int event_trigger_regex_open(struct inode *inode, struct file *file)
 176{
 177	int ret;
 178
 179	ret = security_locked_down(LOCKDOWN_TRACEFS);
 180	if (ret)
 181		return ret;
 182
 183	mutex_lock(&event_mutex);
 184
 185	if (unlikely(!event_file_data(file))) {
 186		mutex_unlock(&event_mutex);
 187		return -ENODEV;
 188	}
 189
 190	if ((file->f_mode & FMODE_WRITE) &&
 191	    (file->f_flags & O_TRUNC)) {
 192		struct trace_event_file *event_file;
 193		struct event_command *p;
 194
 195		event_file = event_file_data(file);
 196
 197		list_for_each_entry(p, &trigger_commands, list) {
 198			if (p->unreg_all)
 199				p->unreg_all(event_file);
 200		}
 201	}
 202
 203	if (file->f_mode & FMODE_READ) {
 204		ret = seq_open(file, &event_triggers_seq_ops);
 205		if (!ret) {
 206			struct seq_file *m = file->private_data;
 207			m->private = file;
 208		}
 209	}
 210
 211	mutex_unlock(&event_mutex);
 212
 213	return ret;
 214}
 215
 216static int trigger_process_regex(struct trace_event_file *file, char *buff)
 217{
 218	char *command, *next = buff;
 219	struct event_command *p;
 220	int ret = -EINVAL;
 221
 222	command = strsep(&next, ": \t");
 223	command = (command[0] != '!') ? command : command + 1;
 224
 225	mutex_lock(&trigger_cmd_mutex);
 226	list_for_each_entry(p, &trigger_commands, list) {
 227		if (strcmp(p->name, command) == 0) {
 228			ret = p->func(p, file, buff, command, next);
 229			goto out_unlock;
 230		}
 231	}
 232 out_unlock:
 233	mutex_unlock(&trigger_cmd_mutex);
 234
 235	return ret;
 236}
 237
 238static ssize_t event_trigger_regex_write(struct file *file,
 239					 const char __user *ubuf,
 240					 size_t cnt, loff_t *ppos)
 241{
 242	struct trace_event_file *event_file;
 243	ssize_t ret;
 244	char *buf;
 245
 246	if (!cnt)
 247		return 0;
 248
 249	if (cnt >= PAGE_SIZE)
 250		return -EINVAL;
 251
 252	buf = memdup_user_nul(ubuf, cnt);
 253	if (IS_ERR(buf))
 254		return PTR_ERR(buf);
 255
 
 
 
 
 
 256	strim(buf);
 257
 258	mutex_lock(&event_mutex);
 259	event_file = event_file_data(file);
 260	if (unlikely(!event_file)) {
 261		mutex_unlock(&event_mutex);
 262		kfree(buf);
 263		return -ENODEV;
 264	}
 265	ret = trigger_process_regex(event_file, buf);
 266	mutex_unlock(&event_mutex);
 267
 268	kfree(buf);
 269	if (ret < 0)
 270		goto out;
 271
 272	*ppos += cnt;
 273	ret = cnt;
 274 out:
 275	return ret;
 276}
 277
 278static int event_trigger_regex_release(struct inode *inode, struct file *file)
 279{
 280	mutex_lock(&event_mutex);
 281
 282	if (file->f_mode & FMODE_READ)
 283		seq_release(inode, file);
 284
 285	mutex_unlock(&event_mutex);
 286
 287	return 0;
 288}
 289
 290static ssize_t
 291event_trigger_write(struct file *filp, const char __user *ubuf,
 292		    size_t cnt, loff_t *ppos)
 293{
 294	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 295}
 296
 297static int
 298event_trigger_open(struct inode *inode, struct file *filp)
 299{
 300	/* Checks for tracefs lockdown */
 301	return event_trigger_regex_open(inode, filp);
 302}
 303
 304static int
 305event_trigger_release(struct inode *inode, struct file *file)
 306{
 307	return event_trigger_regex_release(inode, file);
 308}
 309
 310const struct file_operations event_trigger_fops = {
 311	.open = event_trigger_open,
 312	.read = seq_read,
 313	.write = event_trigger_write,
 314	.llseek = tracing_lseek,
 315	.release = event_trigger_release,
 316};
 317
 318/*
 319 * Currently we only register event commands from __init, so mark this
 320 * __init too.
 321 */
 322__init int register_event_command(struct event_command *cmd)
 323{
 324	struct event_command *p;
 325	int ret = 0;
 326
 327	mutex_lock(&trigger_cmd_mutex);
 328	list_for_each_entry(p, &trigger_commands, list) {
 329		if (strcmp(cmd->name, p->name) == 0) {
 330			ret = -EBUSY;
 331			goto out_unlock;
 332		}
 333	}
 334	list_add(&cmd->list, &trigger_commands);
 335 out_unlock:
 336	mutex_unlock(&trigger_cmd_mutex);
 337
 338	return ret;
 339}
 340
 341/*
 342 * Currently we only unregister event commands from __init, so mark
 343 * this __init too.
 344 */
 345__init int unregister_event_command(struct event_command *cmd)
 346{
 347	struct event_command *p, *n;
 348	int ret = -ENODEV;
 349
 350	mutex_lock(&trigger_cmd_mutex);
 351	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 352		if (strcmp(cmd->name, p->name) == 0) {
 353			ret = 0;
 354			list_del_init(&p->list);
 355			goto out_unlock;
 356		}
 357	}
 358 out_unlock:
 359	mutex_unlock(&trigger_cmd_mutex);
 360
 361	return ret;
 362}
 363
 364/**
 365 * event_trigger_print - Generic event_trigger_ops @print implementation
 366 * @name: The name of the event trigger
 367 * @m: The seq_file being printed to
 368 * @data: Trigger-specific data
 369 * @filter_str: filter_str to print, if present
 370 *
 371 * Common implementation for event triggers to print themselves.
 372 *
 373 * Usually wrapped by a function that simply sets the @name of the
 374 * trigger command and then invokes this.
 375 *
 376 * Return: 0 on success, errno otherwise
 377 */
 378static int
 379event_trigger_print(const char *name, struct seq_file *m,
 380		    void *data, char *filter_str)
 381{
 382	long count = (long)data;
 383
 384	seq_puts(m, name);
 385
 386	if (count == -1)
 387		seq_puts(m, ":unlimited");
 388	else
 389		seq_printf(m, ":count=%ld", count);
 390
 391	if (filter_str)
 392		seq_printf(m, " if %s\n", filter_str);
 393	else
 394		seq_putc(m, '\n');
 395
 396	return 0;
 397}
 398
 399/**
 400 * event_trigger_init - Generic event_trigger_ops @init implementation
 401 * @ops: The trigger ops associated with the trigger
 402 * @data: Trigger-specific data
 403 *
 404 * Common implementation of event trigger initialization.
 405 *
 406 * Usually used directly as the @init method in event trigger
 407 * implementations.
 408 *
 409 * Return: 0 on success, errno otherwise
 410 */
 411int event_trigger_init(struct event_trigger_ops *ops,
 412		       struct event_trigger_data *data)
 
 413{
 414	data->ref++;
 415	return 0;
 416}
 417
 418/**
 419 * event_trigger_free - Generic event_trigger_ops @free implementation
 420 * @ops: The trigger ops associated with the trigger
 421 * @data: Trigger-specific data
 422 *
 423 * Common implementation of event trigger de-initialization.
 424 *
 425 * Usually used directly as the @free method in event trigger
 426 * implementations.
 427 */
 428static void
 429event_trigger_free(struct event_trigger_ops *ops,
 430		   struct event_trigger_data *data)
 431{
 432	if (WARN_ON_ONCE(data->ref <= 0))
 433		return;
 434
 435	data->ref--;
 436	if (!data->ref)
 437		trigger_data_free(data);
 438}
 439
 440int trace_event_trigger_enable_disable(struct trace_event_file *file,
 441				       int trigger_enable)
 442{
 443	int ret = 0;
 444
 445	if (trigger_enable) {
 446		if (atomic_inc_return(&file->tm_ref) > 1)
 447			return ret;
 448		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 449		ret = trace_event_enable_disable(file, 1, 1);
 450	} else {
 451		if (atomic_dec_return(&file->tm_ref) > 0)
 452			return ret;
 453		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 454		ret = trace_event_enable_disable(file, 0, 1);
 455	}
 456
 457	return ret;
 458}
 459
 460/**
 461 * clear_event_triggers - Clear all triggers associated with a trace array
 462 * @tr: The trace array to clear
 463 *
 464 * For each trigger, the triggering event has its tm_ref decremented
 465 * via trace_event_trigger_enable_disable(), and any associated event
 466 * (in the case of enable/disable_event triggers) will have its sm_ref
 467 * decremented via free()->trace_event_enable_disable().  That
 468 * combination effectively reverses the soft-mode/trigger state added
 469 * by trigger registration.
 470 *
 471 * Must be called with event_mutex held.
 472 */
 473void
 474clear_event_triggers(struct trace_array *tr)
 475{
 476	struct trace_event_file *file;
 477
 478	list_for_each_entry(file, &tr->events, list) {
 479		struct event_trigger_data *data, *n;
 480		list_for_each_entry_safe(data, n, &file->triggers, list) {
 481			trace_event_trigger_enable_disable(file, 0);
 482			list_del_rcu(&data->list);
 483			if (data->ops->free)
 484				data->ops->free(data->ops, data);
 485		}
 486	}
 487}
 488
 489/**
 490 * update_cond_flag - Set or reset the TRIGGER_COND bit
 491 * @file: The trace_event_file associated with the event
 492 *
 493 * If an event has triggers and any of those triggers has a filter or
 494 * a post_trigger, trigger invocation needs to be deferred until after
 495 * the current event has logged its data, and the event should have
 496 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 497 * cleared.
 498 */
 499void update_cond_flag(struct trace_event_file *file)
 500{
 501	struct event_trigger_data *data;
 502	bool set_cond = false;
 503
 504	list_for_each_entry_rcu(data, &file->triggers, list) {
 505		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 506		    event_command_needs_rec(data->cmd_ops)) {
 507			set_cond = true;
 508			break;
 509		}
 510	}
 511
 512	if (set_cond)
 513		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 514	else
 515		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 516}
 517
 518/**
 519 * register_trigger - Generic event_command @reg implementation
 520 * @glob: The raw string used to register the trigger
 521 * @ops: The trigger ops associated with the trigger
 522 * @data: Trigger-specific data to associate with the trigger
 523 * @file: The trace_event_file associated with the event
 524 *
 525 * Common implementation for event trigger registration.
 526 *
 527 * Usually used directly as the @reg method in event command
 528 * implementations.
 529 *
 530 * Return: 0 on success, errno otherwise
 531 */
 532static int register_trigger(char *glob, struct event_trigger_ops *ops,
 533			    struct event_trigger_data *data,
 534			    struct trace_event_file *file)
 535{
 536	struct event_trigger_data *test;
 537	int ret = 0;
 538
 539	list_for_each_entry_rcu(test, &file->triggers, list) {
 540		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 541			ret = -EEXIST;
 542			goto out;
 543		}
 544	}
 545
 546	if (data->ops->init) {
 547		ret = data->ops->init(data->ops, data);
 548		if (ret < 0)
 549			goto out;
 550	}
 551
 552	list_add_rcu(&data->list, &file->triggers);
 553	ret++;
 554
 555	update_cond_flag(file);
 556	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 557		list_del_rcu(&data->list);
 558		update_cond_flag(file);
 559		ret--;
 560	}
 
 561out:
 562	return ret;
 563}
 564
 565/**
 566 * unregister_trigger - Generic event_command @unreg implementation
 567 * @glob: The raw string used to register the trigger
 568 * @ops: The trigger ops associated with the trigger
 569 * @test: Trigger-specific data used to find the trigger to remove
 570 * @file: The trace_event_file associated with the event
 571 *
 572 * Common implementation for event trigger unregistration.
 573 *
 574 * Usually used directly as the @unreg method in event command
 575 * implementations.
 576 */
 577static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 578			       struct event_trigger_data *test,
 579			       struct trace_event_file *file)
 580{
 581	struct event_trigger_data *data;
 582	bool unregistered = false;
 583
 584	list_for_each_entry_rcu(data, &file->triggers, list) {
 585		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 586			unregistered = true;
 587			list_del_rcu(&data->list);
 
 588			trace_event_trigger_enable_disable(file, 0);
 589			update_cond_flag(file);
 590			break;
 591		}
 592	}
 593
 594	if (unregistered && data->ops->free)
 595		data->ops->free(data->ops, data);
 596}
 597
 598/**
 599 * event_trigger_callback - Generic event_command @func implementation
 600 * @cmd_ops: The command ops, used for trigger registration
 601 * @file: The trace_event_file associated with the event
 602 * @glob: The raw string used to register the trigger
 603 * @cmd: The cmd portion of the string used to register the trigger
 604 * @param: The params portion of the string used to register the trigger
 605 *
 606 * Common implementation for event command parsing and trigger
 607 * instantiation.
 608 *
 609 * Usually used directly as the @func method in event command
 610 * implementations.
 611 *
 612 * Return: 0 on success, errno otherwise
 613 */
 614static int
 615event_trigger_callback(struct event_command *cmd_ops,
 616		       struct trace_event_file *file,
 617		       char *glob, char *cmd, char *param)
 618{
 619	struct event_trigger_data *trigger_data;
 620	struct event_trigger_ops *trigger_ops;
 621	char *trigger = NULL;
 622	char *number;
 623	int ret;
 624
 625	/* separate the trigger from the filter (t:n [if filter]) */
 626	if (param && isdigit(param[0]))
 627		trigger = strsep(&param, " \t");
 628
 629	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 630
 631	ret = -ENOMEM;
 632	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 633	if (!trigger_data)
 634		goto out;
 635
 636	trigger_data->count = -1;
 637	trigger_data->ops = trigger_ops;
 638	trigger_data->cmd_ops = cmd_ops;
 639	trigger_data->private_data = file;
 640	INIT_LIST_HEAD(&trigger_data->list);
 641	INIT_LIST_HEAD(&trigger_data->named_list);
 642
 643	if (glob[0] == '!') {
 644		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 645		kfree(trigger_data);
 646		ret = 0;
 647		goto out;
 648	}
 649
 650	if (trigger) {
 651		number = strsep(&trigger, ":");
 652
 653		ret = -EINVAL;
 654		if (!strlen(number))
 655			goto out_free;
 656
 657		/*
 658		 * We use the callback data field (which is a pointer)
 659		 * as our counter.
 660		 */
 661		ret = kstrtoul(number, 0, &trigger_data->count);
 662		if (ret)
 663			goto out_free;
 664	}
 665
 666	if (!param) /* if param is non-empty, it's supposed to be a filter */
 667		goto out_reg;
 668
 669	if (!cmd_ops->set_filter)
 670		goto out_reg;
 671
 672	ret = cmd_ops->set_filter(param, trigger_data, file);
 673	if (ret < 0)
 674		goto out_free;
 675
 676 out_reg:
 677	/* Up the trigger_data count to make sure reg doesn't free it on failure */
 678	event_trigger_init(trigger_ops, trigger_data);
 679	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 680	/*
 681	 * The above returns on success the # of functions enabled,
 682	 * but if it didn't find any functions it returns zero.
 683	 * Consider no functions a failure too.
 684	 */
 685	if (!ret) {
 686		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
 687		ret = -ENOENT;
 688	} else if (ret > 0)
 689		ret = 0;
 690
 691	/* Down the counter of trigger_data or free it if not used anymore */
 692	event_trigger_free(trigger_ops, trigger_data);
 693 out:
 694	return ret;
 695
 696 out_free:
 697	if (cmd_ops->set_filter)
 698		cmd_ops->set_filter(NULL, trigger_data, NULL);
 699	kfree(trigger_data);
 700	goto out;
 701}
 702
 703/**
 704 * set_trigger_filter - Generic event_command @set_filter implementation
 705 * @filter_str: The filter string for the trigger, NULL to remove filter
 706 * @trigger_data: Trigger-specific data
 707 * @file: The trace_event_file associated with the event
 708 *
 709 * Common implementation for event command filter parsing and filter
 710 * instantiation.
 711 *
 712 * Usually used directly as the @set_filter method in event command
 713 * implementations.
 714 *
 715 * Also used to remove a filter (if filter_str = NULL).
 716 *
 717 * Return: 0 on success, errno otherwise
 718 */
 719int set_trigger_filter(char *filter_str,
 720		       struct event_trigger_data *trigger_data,
 721		       struct trace_event_file *file)
 722{
 723	struct event_trigger_data *data = trigger_data;
 724	struct event_filter *filter = NULL, *tmp;
 725	int ret = -EINVAL;
 726	char *s;
 727
 728	if (!filter_str) /* clear the current filter */
 729		goto assign;
 730
 731	s = strsep(&filter_str, " \t");
 732
 733	if (!strlen(s) || strcmp(s, "if") != 0)
 734		goto out;
 735
 736	if (!filter_str)
 737		goto out;
 738
 739	/* The filter is for the 'trigger' event, not the triggered event */
 740	ret = create_event_filter(file->tr, file->event_call,
 741				  filter_str, false, &filter);
 742	/*
 743	 * If create_event_filter() fails, filter still needs to be freed.
 744	 * Which the calling code will do with data->filter.
 745	 */
 746 assign:
 747	tmp = rcu_access_pointer(data->filter);
 748
 749	rcu_assign_pointer(data->filter, filter);
 750
 751	if (tmp) {
 752		/* Make sure the call is done with the filter */
 753		tracepoint_synchronize_unregister();
 754		free_event_filter(tmp);
 755	}
 756
 757	kfree(data->filter_str);
 758	data->filter_str = NULL;
 759
 760	if (filter_str) {
 761		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 762		if (!data->filter_str) {
 763			free_event_filter(rcu_access_pointer(data->filter));
 764			data->filter = NULL;
 765			ret = -ENOMEM;
 766		}
 767	}
 768 out:
 769	return ret;
 770}
 771
 772static LIST_HEAD(named_triggers);
 773
 774/**
 775 * find_named_trigger - Find the common named trigger associated with @name
 776 * @name: The name of the set of named triggers to find the common data for
 777 *
 778 * Named triggers are sets of triggers that share a common set of
 779 * trigger data.  The first named trigger registered with a given name
 780 * owns the common trigger data that the others subsequently
 781 * registered with the same name will reference.  This function
 782 * returns the common trigger data associated with that first
 783 * registered instance.
 784 *
 785 * Return: the common trigger data for the given named trigger on
 786 * success, NULL otherwise.
 787 */
 788struct event_trigger_data *find_named_trigger(const char *name)
 789{
 790	struct event_trigger_data *data;
 791
 792	if (!name)
 793		return NULL;
 794
 795	list_for_each_entry(data, &named_triggers, named_list) {
 796		if (data->named_data)
 797			continue;
 798		if (strcmp(data->name, name) == 0)
 799			return data;
 800	}
 801
 802	return NULL;
 803}
 804
 805/**
 806 * is_named_trigger - determine if a given trigger is a named trigger
 807 * @test: The trigger data to test
 808 *
 809 * Return: true if 'test' is a named trigger, false otherwise.
 810 */
 811bool is_named_trigger(struct event_trigger_data *test)
 812{
 813	struct event_trigger_data *data;
 814
 815	list_for_each_entry(data, &named_triggers, named_list) {
 816		if (test == data)
 817			return true;
 818	}
 819
 820	return false;
 821}
 822
 823/**
 824 * save_named_trigger - save the trigger in the named trigger list
 825 * @name: The name of the named trigger set
 826 * @data: The trigger data to save
 827 *
 828 * Return: 0 if successful, negative error otherwise.
 829 */
 830int save_named_trigger(const char *name, struct event_trigger_data *data)
 831{
 832	data->name = kstrdup(name, GFP_KERNEL);
 833	if (!data->name)
 834		return -ENOMEM;
 835
 836	list_add(&data->named_list, &named_triggers);
 837
 838	return 0;
 839}
 840
 841/**
 842 * del_named_trigger - delete a trigger from the named trigger list
 843 * @data: The trigger data to delete
 844 */
 845void del_named_trigger(struct event_trigger_data *data)
 846{
 847	kfree(data->name);
 848	data->name = NULL;
 849
 850	list_del(&data->named_list);
 851}
 852
 853static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
 854{
 855	struct event_trigger_data *test;
 856
 857	list_for_each_entry(test, &named_triggers, named_list) {
 858		if (strcmp(test->name, data->name) == 0) {
 859			if (pause) {
 860				test->paused_tmp = test->paused;
 861				test->paused = true;
 862			} else {
 863				test->paused = test->paused_tmp;
 864			}
 865		}
 866	}
 867}
 868
 869/**
 870 * pause_named_trigger - Pause all named triggers with the same name
 871 * @data: The trigger data of a named trigger to pause
 872 *
 873 * Pauses a named trigger along with all other triggers having the
 874 * same name.  Because named triggers share a common set of data,
 875 * pausing only one is meaningless, so pausing one named trigger needs
 876 * to pause all triggers with the same name.
 877 */
 878void pause_named_trigger(struct event_trigger_data *data)
 879{
 880	__pause_named_trigger(data, true);
 881}
 882
 883/**
 884 * unpause_named_trigger - Un-pause all named triggers with the same name
 885 * @data: The trigger data of a named trigger to unpause
 886 *
 887 * Un-pauses a named trigger along with all other triggers having the
 888 * same name.  Because named triggers share a common set of data,
 889 * unpausing only one is meaningless, so unpausing one named trigger
 890 * needs to unpause all triggers with the same name.
 891 */
 892void unpause_named_trigger(struct event_trigger_data *data)
 893{
 894	__pause_named_trigger(data, false);
 895}
 896
 897/**
 898 * set_named_trigger_data - Associate common named trigger data
 899 * @data: The trigger data of a named trigger to unpause
 900 *
 901 * Named triggers are sets of triggers that share a common set of
 902 * trigger data.  The first named trigger registered with a given name
 903 * owns the common trigger data that the others subsequently
 904 * registered with the same name will reference.  This function
 905 * associates the common trigger data from the first trigger with the
 906 * given trigger.
 907 */
 908void set_named_trigger_data(struct event_trigger_data *data,
 909			    struct event_trigger_data *named_data)
 910{
 911	data->named_data = named_data;
 912}
 913
 914struct event_trigger_data *
 915get_named_trigger_data(struct event_trigger_data *data)
 916{
 917	return data->named_data;
 918}
 919
 920static void
 921traceon_trigger(struct event_trigger_data *data, void *rec,
 922		struct ring_buffer_event *event)
 923{
 924	if (tracing_is_on())
 925		return;
 926
 927	tracing_on();
 928}
 929
 930static void
 931traceon_count_trigger(struct event_trigger_data *data, void *rec,
 932		      struct ring_buffer_event *event)
 933{
 934	if (tracing_is_on())
 935		return;
 936
 937	if (!data->count)
 938		return;
 939
 940	if (data->count != -1)
 941		(data->count)--;
 942
 943	tracing_on();
 944}
 945
 946static void
 947traceoff_trigger(struct event_trigger_data *data, void *rec,
 948		 struct ring_buffer_event *event)
 949{
 950	if (!tracing_is_on())
 951		return;
 952
 953	tracing_off();
 954}
 955
 956static void
 957traceoff_count_trigger(struct event_trigger_data *data, void *rec,
 958		       struct ring_buffer_event *event)
 959{
 960	if (!tracing_is_on())
 961		return;
 962
 963	if (!data->count)
 964		return;
 965
 966	if (data->count != -1)
 967		(data->count)--;
 968
 969	tracing_off();
 970}
 971
 972static int
 973traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 974		      struct event_trigger_data *data)
 975{
 976	return event_trigger_print("traceon", m, (void *)data->count,
 977				   data->filter_str);
 978}
 979
 980static int
 981traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 982		       struct event_trigger_data *data)
 983{
 984	return event_trigger_print("traceoff", m, (void *)data->count,
 985				   data->filter_str);
 986}
 987
 988static struct event_trigger_ops traceon_trigger_ops = {
 989	.func			= traceon_trigger,
 990	.print			= traceon_trigger_print,
 991	.init			= event_trigger_init,
 992	.free			= event_trigger_free,
 993};
 994
 995static struct event_trigger_ops traceon_count_trigger_ops = {
 996	.func			= traceon_count_trigger,
 997	.print			= traceon_trigger_print,
 998	.init			= event_trigger_init,
 999	.free			= event_trigger_free,
1000};
1001
1002static struct event_trigger_ops traceoff_trigger_ops = {
1003	.func			= traceoff_trigger,
1004	.print			= traceoff_trigger_print,
1005	.init			= event_trigger_init,
1006	.free			= event_trigger_free,
1007};
1008
1009static struct event_trigger_ops traceoff_count_trigger_ops = {
1010	.func			= traceoff_count_trigger,
1011	.print			= traceoff_trigger_print,
1012	.init			= event_trigger_init,
1013	.free			= event_trigger_free,
1014};
1015
1016static struct event_trigger_ops *
1017onoff_get_trigger_ops(char *cmd, char *param)
1018{
1019	struct event_trigger_ops *ops;
1020
1021	/* we register both traceon and traceoff to this callback */
1022	if (strcmp(cmd, "traceon") == 0)
1023		ops = param ? &traceon_count_trigger_ops :
1024			&traceon_trigger_ops;
1025	else
1026		ops = param ? &traceoff_count_trigger_ops :
1027			&traceoff_trigger_ops;
1028
1029	return ops;
1030}
1031
1032static struct event_command trigger_traceon_cmd = {
1033	.name			= "traceon",
1034	.trigger_type		= ETT_TRACE_ONOFF,
1035	.func			= event_trigger_callback,
1036	.reg			= register_trigger,
1037	.unreg			= unregister_trigger,
1038	.get_trigger_ops	= onoff_get_trigger_ops,
1039	.set_filter		= set_trigger_filter,
1040};
1041
1042static struct event_command trigger_traceoff_cmd = {
1043	.name			= "traceoff",
1044	.trigger_type		= ETT_TRACE_ONOFF,
1045	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1046	.func			= event_trigger_callback,
1047	.reg			= register_trigger,
1048	.unreg			= unregister_trigger,
1049	.get_trigger_ops	= onoff_get_trigger_ops,
1050	.set_filter		= set_trigger_filter,
1051};
1052
1053#ifdef CONFIG_TRACER_SNAPSHOT
1054static void
1055snapshot_trigger(struct event_trigger_data *data, void *rec,
1056		 struct ring_buffer_event *event)
1057{
1058	struct trace_event_file *file = data->private_data;
1059
1060	if (file)
1061		tracing_snapshot_instance(file->tr);
1062	else
1063		tracing_snapshot();
1064}
1065
1066static void
1067snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1068		       struct ring_buffer_event *event)
1069{
1070	if (!data->count)
1071		return;
1072
1073	if (data->count != -1)
1074		(data->count)--;
1075
1076	snapshot_trigger(data, rec, event);
1077}
1078
1079static int
1080register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1081			  struct event_trigger_data *data,
1082			  struct trace_event_file *file)
1083{
1084	int ret = register_trigger(glob, ops, data, file);
1085
1086	if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1087		unregister_trigger(glob, ops, data, file);
1088		ret = 0;
1089	}
1090
1091	return ret;
1092}
1093
1094static int
1095snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1096		       struct event_trigger_data *data)
1097{
1098	return event_trigger_print("snapshot", m, (void *)data->count,
1099				   data->filter_str);
1100}
1101
1102static struct event_trigger_ops snapshot_trigger_ops = {
1103	.func			= snapshot_trigger,
1104	.print			= snapshot_trigger_print,
1105	.init			= event_trigger_init,
1106	.free			= event_trigger_free,
1107};
1108
1109static struct event_trigger_ops snapshot_count_trigger_ops = {
1110	.func			= snapshot_count_trigger,
1111	.print			= snapshot_trigger_print,
1112	.init			= event_trigger_init,
1113	.free			= event_trigger_free,
1114};
1115
1116static struct event_trigger_ops *
1117snapshot_get_trigger_ops(char *cmd, char *param)
1118{
1119	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1120}
1121
1122static struct event_command trigger_snapshot_cmd = {
1123	.name			= "snapshot",
1124	.trigger_type		= ETT_SNAPSHOT,
1125	.func			= event_trigger_callback,
1126	.reg			= register_snapshot_trigger,
1127	.unreg			= unregister_trigger,
1128	.get_trigger_ops	= snapshot_get_trigger_ops,
1129	.set_filter		= set_trigger_filter,
1130};
1131
1132static __init int register_trigger_snapshot_cmd(void)
1133{
1134	int ret;
1135
1136	ret = register_event_command(&trigger_snapshot_cmd);
1137	WARN_ON(ret < 0);
1138
1139	return ret;
1140}
1141#else
1142static __init int register_trigger_snapshot_cmd(void) { return 0; }
1143#endif /* CONFIG_TRACER_SNAPSHOT */
1144
1145#ifdef CONFIG_STACKTRACE
1146#ifdef CONFIG_UNWINDER_ORC
1147/* Skip 2:
1148 *   event_triggers_post_call()
1149 *   trace_event_raw_event_xxx()
1150 */
1151# define STACK_SKIP 2
1152#else
1153/*
1154 * Skip 4:
1155 *   stacktrace_trigger()
1156 *   event_triggers_post_call()
1157 *   trace_event_buffer_commit()
1158 *   trace_event_raw_event_xxx()
1159 */
1160#define STACK_SKIP 4
1161#endif
1162
1163static void
1164stacktrace_trigger(struct event_trigger_data *data, void *rec,
1165		   struct ring_buffer_event *event)
1166{
1167	trace_dump_stack(STACK_SKIP);
1168}
1169
1170static void
1171stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1172			 struct ring_buffer_event *event)
1173{
1174	if (!data->count)
1175		return;
1176
1177	if (data->count != -1)
1178		(data->count)--;
1179
1180	stacktrace_trigger(data, rec, event);
1181}
1182
1183static int
1184stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1185			 struct event_trigger_data *data)
1186{
1187	return event_trigger_print("stacktrace", m, (void *)data->count,
1188				   data->filter_str);
1189}
1190
1191static struct event_trigger_ops stacktrace_trigger_ops = {
1192	.func			= stacktrace_trigger,
1193	.print			= stacktrace_trigger_print,
1194	.init			= event_trigger_init,
1195	.free			= event_trigger_free,
1196};
1197
1198static struct event_trigger_ops stacktrace_count_trigger_ops = {
1199	.func			= stacktrace_count_trigger,
1200	.print			= stacktrace_trigger_print,
1201	.init			= event_trigger_init,
1202	.free			= event_trigger_free,
1203};
1204
1205static struct event_trigger_ops *
1206stacktrace_get_trigger_ops(char *cmd, char *param)
1207{
1208	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1209}
1210
1211static struct event_command trigger_stacktrace_cmd = {
1212	.name			= "stacktrace",
1213	.trigger_type		= ETT_STACKTRACE,
1214	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1215	.func			= event_trigger_callback,
1216	.reg			= register_trigger,
1217	.unreg			= unregister_trigger,
1218	.get_trigger_ops	= stacktrace_get_trigger_ops,
1219	.set_filter		= set_trigger_filter,
1220};
1221
1222static __init int register_trigger_stacktrace_cmd(void)
1223{
1224	int ret;
1225
1226	ret = register_event_command(&trigger_stacktrace_cmd);
1227	WARN_ON(ret < 0);
1228
1229	return ret;
1230}
1231#else
1232static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1233#endif /* CONFIG_STACKTRACE */
1234
1235static __init void unregister_trigger_traceon_traceoff_cmds(void)
1236{
1237	unregister_event_command(&trigger_traceon_cmd);
1238	unregister_event_command(&trigger_traceoff_cmd);
1239}
1240
 
 
 
 
 
 
 
 
 
1241static void
1242event_enable_trigger(struct event_trigger_data *data, void *rec,
1243		     struct ring_buffer_event *event)
1244{
1245	struct enable_trigger_data *enable_data = data->private_data;
1246
1247	if (enable_data->enable)
1248		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1249	else
1250		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1251}
1252
1253static void
1254event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1255			   struct ring_buffer_event *event)
1256{
1257	struct enable_trigger_data *enable_data = data->private_data;
1258
1259	if (!data->count)
1260		return;
1261
1262	/* Skip if the event is in a state we want to switch to */
1263	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1264		return;
1265
1266	if (data->count != -1)
1267		(data->count)--;
1268
1269	event_enable_trigger(data, rec, event);
1270}
1271
1272int event_enable_trigger_print(struct seq_file *m,
1273			       struct event_trigger_ops *ops,
1274			       struct event_trigger_data *data)
1275{
1276	struct enable_trigger_data *enable_data = data->private_data;
1277
1278	seq_printf(m, "%s:%s:%s",
1279		   enable_data->hist ?
1280		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1281		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1282		   enable_data->file->event_call->class->system,
1283		   trace_event_name(enable_data->file->event_call));
1284
1285	if (data->count == -1)
1286		seq_puts(m, ":unlimited");
1287	else
1288		seq_printf(m, ":count=%ld", data->count);
1289
1290	if (data->filter_str)
1291		seq_printf(m, " if %s\n", data->filter_str);
1292	else
1293		seq_putc(m, '\n');
1294
1295	return 0;
1296}
1297
1298void event_enable_trigger_free(struct event_trigger_ops *ops,
1299			       struct event_trigger_data *data)
 
1300{
1301	struct enable_trigger_data *enable_data = data->private_data;
1302
1303	if (WARN_ON_ONCE(data->ref <= 0))
1304		return;
1305
1306	data->ref--;
1307	if (!data->ref) {
1308		/* Remove the SOFT_MODE flag */
1309		trace_event_enable_disable(enable_data->file, 0, 1);
1310		module_put(enable_data->file->event_call->mod);
1311		trigger_data_free(data);
1312		kfree(enable_data);
1313	}
1314}
1315
1316static struct event_trigger_ops event_enable_trigger_ops = {
1317	.func			= event_enable_trigger,
1318	.print			= event_enable_trigger_print,
1319	.init			= event_trigger_init,
1320	.free			= event_enable_trigger_free,
1321};
1322
1323static struct event_trigger_ops event_enable_count_trigger_ops = {
1324	.func			= event_enable_count_trigger,
1325	.print			= event_enable_trigger_print,
1326	.init			= event_trigger_init,
1327	.free			= event_enable_trigger_free,
1328};
1329
1330static struct event_trigger_ops event_disable_trigger_ops = {
1331	.func			= event_enable_trigger,
1332	.print			= event_enable_trigger_print,
1333	.init			= event_trigger_init,
1334	.free			= event_enable_trigger_free,
1335};
1336
1337static struct event_trigger_ops event_disable_count_trigger_ops = {
1338	.func			= event_enable_count_trigger,
1339	.print			= event_enable_trigger_print,
1340	.init			= event_trigger_init,
1341	.free			= event_enable_trigger_free,
1342};
1343
1344int event_enable_trigger_func(struct event_command *cmd_ops,
1345			      struct trace_event_file *file,
1346			      char *glob, char *cmd, char *param)
 
1347{
1348	struct trace_event_file *event_enable_file;
1349	struct enable_trigger_data *enable_data;
1350	struct event_trigger_data *trigger_data;
1351	struct event_trigger_ops *trigger_ops;
1352	struct trace_array *tr = file->tr;
1353	const char *system;
1354	const char *event;
1355	bool hist = false;
1356	char *trigger;
1357	char *number;
1358	bool enable;
1359	int ret;
1360
1361	if (!param)
1362		return -EINVAL;
1363
1364	/* separate the trigger from the filter (s:e:n [if filter]) */
1365	trigger = strsep(&param, " \t");
1366	if (!trigger)
1367		return -EINVAL;
1368
1369	system = strsep(&trigger, ":");
1370	if (!trigger)
1371		return -EINVAL;
1372
1373	event = strsep(&trigger, ":");
1374
1375	ret = -EINVAL;
1376	event_enable_file = find_event_file(tr, system, event);
1377	if (!event_enable_file)
1378		goto out;
1379
1380#ifdef CONFIG_HIST_TRIGGERS
1381	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1382		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1383
1384	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1385		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1386#else
1387	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1388#endif
1389	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1390
1391	ret = -ENOMEM;
1392	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1393	if (!trigger_data)
1394		goto out;
1395
1396	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1397	if (!enable_data) {
1398		kfree(trigger_data);
1399		goto out;
1400	}
1401
1402	trigger_data->count = -1;
1403	trigger_data->ops = trigger_ops;
1404	trigger_data->cmd_ops = cmd_ops;
1405	INIT_LIST_HEAD(&trigger_data->list);
1406	RCU_INIT_POINTER(trigger_data->filter, NULL);
1407
1408	enable_data->hist = hist;
1409	enable_data->enable = enable;
1410	enable_data->file = event_enable_file;
1411	trigger_data->private_data = enable_data;
1412
1413	if (glob[0] == '!') {
1414		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1415		kfree(trigger_data);
1416		kfree(enable_data);
1417		ret = 0;
1418		goto out;
1419	}
1420
1421	/* Up the trigger_data count to make sure nothing frees it on failure */
1422	event_trigger_init(trigger_ops, trigger_data);
1423
1424	if (trigger) {
1425		number = strsep(&trigger, ":");
1426
1427		ret = -EINVAL;
1428		if (!strlen(number))
1429			goto out_free;
1430
1431		/*
1432		 * We use the callback data field (which is a pointer)
1433		 * as our counter.
1434		 */
1435		ret = kstrtoul(number, 0, &trigger_data->count);
1436		if (ret)
1437			goto out_free;
1438	}
1439
1440	if (!param) /* if param is non-empty, it's supposed to be a filter */
1441		goto out_reg;
1442
1443	if (!cmd_ops->set_filter)
1444		goto out_reg;
1445
1446	ret = cmd_ops->set_filter(param, trigger_data, file);
1447	if (ret < 0)
1448		goto out_free;
1449
1450 out_reg:
1451	/* Don't let event modules unload while probe registered */
1452	ret = try_module_get(event_enable_file->event_call->mod);
1453	if (!ret) {
1454		ret = -EBUSY;
1455		goto out_free;
1456	}
1457
1458	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1459	if (ret < 0)
1460		goto out_put;
1461	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1462	/*
1463	 * The above returns on success the # of functions enabled,
1464	 * but if it didn't find any functions it returns zero.
1465	 * Consider no functions a failure too.
1466	 */
1467	if (!ret) {
1468		ret = -ENOENT;
1469		goto out_disable;
1470	} else if (ret < 0)
1471		goto out_disable;
1472	/* Just return zero, not the number of enabled functions */
1473	ret = 0;
1474	event_trigger_free(trigger_ops, trigger_data);
1475 out:
1476	return ret;
1477
1478 out_disable:
1479	trace_event_enable_disable(event_enable_file, 0, 1);
1480 out_put:
1481	module_put(event_enable_file->event_call->mod);
1482 out_free:
1483	if (cmd_ops->set_filter)
1484		cmd_ops->set_filter(NULL, trigger_data, NULL);
1485	event_trigger_free(trigger_ops, trigger_data);
1486	kfree(enable_data);
1487	goto out;
1488}
1489
1490int event_enable_register_trigger(char *glob,
1491				  struct event_trigger_ops *ops,
1492				  struct event_trigger_data *data,
1493				  struct trace_event_file *file)
1494{
1495	struct enable_trigger_data *enable_data = data->private_data;
1496	struct enable_trigger_data *test_enable_data;
1497	struct event_trigger_data *test;
1498	int ret = 0;
1499
1500	list_for_each_entry_rcu(test, &file->triggers, list) {
1501		test_enable_data = test->private_data;
1502		if (test_enable_data &&
1503		    (test->cmd_ops->trigger_type ==
1504		     data->cmd_ops->trigger_type) &&
1505		    (test_enable_data->file == enable_data->file)) {
1506			ret = -EEXIST;
1507			goto out;
1508		}
1509	}
1510
1511	if (data->ops->init) {
1512		ret = data->ops->init(data->ops, data);
1513		if (ret < 0)
1514			goto out;
1515	}
1516
1517	list_add_rcu(&data->list, &file->triggers);
1518	ret++;
1519
1520	update_cond_flag(file);
1521	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1522		list_del_rcu(&data->list);
1523		update_cond_flag(file);
1524		ret--;
1525	}
 
1526out:
1527	return ret;
1528}
1529
1530void event_enable_unregister_trigger(char *glob,
1531				     struct event_trigger_ops *ops,
1532				     struct event_trigger_data *test,
1533				     struct trace_event_file *file)
1534{
1535	struct enable_trigger_data *test_enable_data = test->private_data;
1536	struct enable_trigger_data *enable_data;
1537	struct event_trigger_data *data;
1538	bool unregistered = false;
1539
1540	list_for_each_entry_rcu(data, &file->triggers, list) {
1541		enable_data = data->private_data;
1542		if (enable_data &&
1543		    (data->cmd_ops->trigger_type ==
1544		     test->cmd_ops->trigger_type) &&
1545		    (enable_data->file == test_enable_data->file)) {
1546			unregistered = true;
1547			list_del_rcu(&data->list);
 
1548			trace_event_trigger_enable_disable(file, 0);
1549			update_cond_flag(file);
1550			break;
1551		}
1552	}
1553
1554	if (unregistered && data->ops->free)
1555		data->ops->free(data->ops, data);
1556}
1557
1558static struct event_trigger_ops *
1559event_enable_get_trigger_ops(char *cmd, char *param)
1560{
1561	struct event_trigger_ops *ops;
1562	bool enable;
1563
1564#ifdef CONFIG_HIST_TRIGGERS
1565	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1566		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1567#else
1568	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1569#endif
1570	if (enable)
1571		ops = param ? &event_enable_count_trigger_ops :
1572			&event_enable_trigger_ops;
1573	else
1574		ops = param ? &event_disable_count_trigger_ops :
1575			&event_disable_trigger_ops;
1576
1577	return ops;
1578}
1579
1580static struct event_command trigger_enable_cmd = {
1581	.name			= ENABLE_EVENT_STR,
1582	.trigger_type		= ETT_EVENT_ENABLE,
1583	.func			= event_enable_trigger_func,
1584	.reg			= event_enable_register_trigger,
1585	.unreg			= event_enable_unregister_trigger,
1586	.get_trigger_ops	= event_enable_get_trigger_ops,
1587	.set_filter		= set_trigger_filter,
1588};
1589
1590static struct event_command trigger_disable_cmd = {
1591	.name			= DISABLE_EVENT_STR,
1592	.trigger_type		= ETT_EVENT_ENABLE,
1593	.func			= event_enable_trigger_func,
1594	.reg			= event_enable_register_trigger,
1595	.unreg			= event_enable_unregister_trigger,
1596	.get_trigger_ops	= event_enable_get_trigger_ops,
1597	.set_filter		= set_trigger_filter,
1598};
1599
1600static __init void unregister_trigger_enable_disable_cmds(void)
1601{
1602	unregister_event_command(&trigger_enable_cmd);
1603	unregister_event_command(&trigger_disable_cmd);
1604}
1605
1606static __init int register_trigger_enable_disable_cmds(void)
1607{
1608	int ret;
1609
1610	ret = register_event_command(&trigger_enable_cmd);
1611	if (WARN_ON(ret < 0))
1612		return ret;
1613	ret = register_event_command(&trigger_disable_cmd);
1614	if (WARN_ON(ret < 0))
1615		unregister_trigger_enable_disable_cmds();
1616
1617	return ret;
1618}
1619
1620static __init int register_trigger_traceon_traceoff_cmds(void)
1621{
1622	int ret;
1623
1624	ret = register_event_command(&trigger_traceon_cmd);
1625	if (WARN_ON(ret < 0))
1626		return ret;
1627	ret = register_event_command(&trigger_traceoff_cmd);
1628	if (WARN_ON(ret < 0))
1629		unregister_trigger_traceon_traceoff_cmds();
1630
1631	return ret;
1632}
1633
1634__init int register_trigger_cmds(void)
1635{
1636	register_trigger_traceon_traceoff_cmds();
1637	register_trigger_snapshot_cmd();
1638	register_trigger_stacktrace_cmd();
1639	register_trigger_enable_disable_cmds();
1640	register_trigger_hist_enable_disable_cmds();
1641	register_trigger_hist_cmd();
1642
1643	return 0;
1644}