Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * trace_events_trigger - trace event triggers
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19 */
  20
 
  21#include <linux/module.h>
  22#include <linux/ctype.h>
  23#include <linux/mutex.h>
  24#include <linux/slab.h>
 
  25
  26#include "trace.h"
  27
  28static LIST_HEAD(trigger_commands);
  29static DEFINE_MUTEX(trigger_cmd_mutex);
  30
  31void trigger_data_free(struct event_trigger_data *data)
  32{
  33	if (data->cmd_ops->set_filter)
  34		data->cmd_ops->set_filter(NULL, data, NULL);
  35
  36	synchronize_sched(); /* make sure current triggers exit before free */
 
 
  37	kfree(data);
  38}
  39
  40/**
  41 * event_triggers_call - Call triggers associated with a trace event
  42 * @file: The trace_event_file associated with the event
  43 * @rec: The trace entry for the event, NULL for unconditional invocation
  44 *
  45 * For each trigger associated with an event, invoke the trigger
  46 * function registered with the associated trigger command.  If rec is
  47 * non-NULL, it means that the trigger requires further processing and
  48 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  49 * trigger has a filter associated with it, rec will checked against
  50 * the filter and if the record matches the trigger will be invoked.
  51 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  52 * in any case until the current event is written, the trigger
  53 * function isn't invoked but the bit associated with the deferred
  54 * trigger is set in the return value.
  55 *
  56 * Returns an enum event_trigger_type value containing a set bit for
  57 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  58 *
  59 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  60 *
  61 * Return: an enum event_trigger_type value containing a set bit for
  62 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  63 */
  64enum event_trigger_type
  65event_triggers_call(struct trace_event_file *file, void *rec)
 
 
  66{
  67	struct event_trigger_data *data;
  68	enum event_trigger_type tt = ETT_NONE;
  69	struct event_filter *filter;
  70
  71	if (list_empty(&file->triggers))
  72		return tt;
  73
  74	list_for_each_entry_rcu(data, &file->triggers, list) {
  75		if (data->paused)
  76			continue;
  77		if (!rec) {
  78			data->ops->func(data, rec);
  79			continue;
  80		}
  81		filter = rcu_dereference_sched(data->filter);
  82		if (filter && !filter_match_preds(filter, rec))
  83			continue;
  84		if (event_command_post_trigger(data->cmd_ops)) {
  85			tt |= data->cmd_ops->trigger_type;
  86			continue;
  87		}
  88		data->ops->func(data, rec);
  89	}
  90	return tt;
  91}
  92EXPORT_SYMBOL_GPL(event_triggers_call);
  93
  94/**
  95 * event_triggers_post_call - Call 'post_triggers' for a trace event
  96 * @file: The trace_event_file associated with the event
  97 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  98 * @rec: The trace entry for the event
  99 *
 100 * For each trigger associated with an event, invoke the trigger
 101 * function registered with the associated trigger command, if the
 102 * corresponding bit is set in the tt enum passed into this function.
 103 * See @event_triggers_call for details on how those bits are set.
 104 *
 105 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
 106 */
 107void
 108event_triggers_post_call(struct trace_event_file *file,
 109			 enum event_trigger_type tt,
 110			 void *rec)
 111{
 112	struct event_trigger_data *data;
 113
 114	list_for_each_entry_rcu(data, &file->triggers, list) {
 115		if (data->paused)
 116			continue;
 117		if (data->cmd_ops->trigger_type & tt)
 118			data->ops->func(data, rec);
 119	}
 120}
 121EXPORT_SYMBOL_GPL(event_triggers_post_call);
 122
 123#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 124
 125static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 126{
 127	struct trace_event_file *event_file = event_file_data(m->private);
 128
 129	if (t == SHOW_AVAILABLE_TRIGGERS)
 
 130		return NULL;
 131
 132	return seq_list_next(t, &event_file->triggers, pos);
 133}
 134
 135static void *trigger_start(struct seq_file *m, loff_t *pos)
 136{
 137	struct trace_event_file *event_file;
 138
 139	/* ->stop() is called even if ->start() fails */
 140	mutex_lock(&event_mutex);
 141	event_file = event_file_data(m->private);
 142	if (unlikely(!event_file))
 143		return ERR_PTR(-ENODEV);
 144
 145	if (list_empty(&event_file->triggers))
 146		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 147
 148	return seq_list_start(&event_file->triggers, *pos);
 149}
 150
 151static void trigger_stop(struct seq_file *m, void *t)
 152{
 153	mutex_unlock(&event_mutex);
 154}
 155
 156static int trigger_show(struct seq_file *m, void *v)
 157{
 158	struct event_trigger_data *data;
 159	struct event_command *p;
 160
 161	if (v == SHOW_AVAILABLE_TRIGGERS) {
 162		seq_puts(m, "# Available triggers:\n");
 163		seq_putc(m, '#');
 164		mutex_lock(&trigger_cmd_mutex);
 165		list_for_each_entry_reverse(p, &trigger_commands, list)
 166			seq_printf(m, " %s", p->name);
 167		seq_putc(m, '\n');
 168		mutex_unlock(&trigger_cmd_mutex);
 169		return 0;
 170	}
 171
 172	data = list_entry(v, struct event_trigger_data, list);
 173	data->ops->print(m, data->ops, data);
 174
 175	return 0;
 176}
 177
 178static const struct seq_operations event_triggers_seq_ops = {
 179	.start = trigger_start,
 180	.next = trigger_next,
 181	.stop = trigger_stop,
 182	.show = trigger_show,
 183};
 184
 185static int event_trigger_regex_open(struct inode *inode, struct file *file)
 186{
 187	int ret = 0;
 
 
 
 
 188
 189	mutex_lock(&event_mutex);
 190
 191	if (unlikely(!event_file_data(file))) {
 192		mutex_unlock(&event_mutex);
 193		return -ENODEV;
 194	}
 195
 196	if ((file->f_mode & FMODE_WRITE) &&
 197	    (file->f_flags & O_TRUNC)) {
 198		struct trace_event_file *event_file;
 199		struct event_command *p;
 200
 201		event_file = event_file_data(file);
 202
 203		list_for_each_entry(p, &trigger_commands, list) {
 204			if (p->unreg_all)
 205				p->unreg_all(event_file);
 206		}
 207	}
 208
 209	if (file->f_mode & FMODE_READ) {
 210		ret = seq_open(file, &event_triggers_seq_ops);
 211		if (!ret) {
 212			struct seq_file *m = file->private_data;
 213			m->private = file;
 214		}
 215	}
 216
 217	mutex_unlock(&event_mutex);
 218
 219	return ret;
 220}
 221
 222static int trigger_process_regex(struct trace_event_file *file, char *buff)
 223{
 224	char *command, *next = buff;
 225	struct event_command *p;
 226	int ret = -EINVAL;
 227
 
 228	command = strsep(&next, ": \t");
 
 
 
 
 
 229	command = (command[0] != '!') ? command : command + 1;
 230
 231	mutex_lock(&trigger_cmd_mutex);
 232	list_for_each_entry(p, &trigger_commands, list) {
 233		if (strcmp(p->name, command) == 0) {
 234			ret = p->func(p, file, buff, command, next);
 235			goto out_unlock;
 236		}
 237	}
 238 out_unlock:
 239	mutex_unlock(&trigger_cmd_mutex);
 240
 241	return ret;
 242}
 243
 244static ssize_t event_trigger_regex_write(struct file *file,
 245					 const char __user *ubuf,
 246					 size_t cnt, loff_t *ppos)
 247{
 248	struct trace_event_file *event_file;
 249	ssize_t ret;
 250	char *buf;
 251
 252	if (!cnt)
 253		return 0;
 254
 255	if (cnt >= PAGE_SIZE)
 256		return -EINVAL;
 257
 258	buf = memdup_user_nul(ubuf, cnt);
 259	if (IS_ERR(buf))
 260		return PTR_ERR(buf);
 261
 262	strim(buf);
 263
 264	mutex_lock(&event_mutex);
 265	event_file = event_file_data(file);
 266	if (unlikely(!event_file)) {
 267		mutex_unlock(&event_mutex);
 268		kfree(buf);
 269		return -ENODEV;
 270	}
 271	ret = trigger_process_regex(event_file, buf);
 272	mutex_unlock(&event_mutex);
 273
 274	kfree(buf);
 275	if (ret < 0)
 276		goto out;
 277
 278	*ppos += cnt;
 279	ret = cnt;
 280 out:
 281	return ret;
 282}
 283
 284static int event_trigger_regex_release(struct inode *inode, struct file *file)
 285{
 286	mutex_lock(&event_mutex);
 287
 288	if (file->f_mode & FMODE_READ)
 289		seq_release(inode, file);
 290
 291	mutex_unlock(&event_mutex);
 292
 293	return 0;
 294}
 295
 296static ssize_t
 297event_trigger_write(struct file *filp, const char __user *ubuf,
 298		    size_t cnt, loff_t *ppos)
 299{
 300	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 301}
 302
 303static int
 304event_trigger_open(struct inode *inode, struct file *filp)
 305{
 
 306	return event_trigger_regex_open(inode, filp);
 307}
 308
 309static int
 310event_trigger_release(struct inode *inode, struct file *file)
 311{
 312	return event_trigger_regex_release(inode, file);
 313}
 314
 315const struct file_operations event_trigger_fops = {
 316	.open = event_trigger_open,
 317	.read = seq_read,
 318	.write = event_trigger_write,
 319	.llseek = tracing_lseek,
 320	.release = event_trigger_release,
 321};
 322
 323/*
 324 * Currently we only register event commands from __init, so mark this
 325 * __init too.
 326 */
 327__init int register_event_command(struct event_command *cmd)
 328{
 329	struct event_command *p;
 330	int ret = 0;
 331
 332	mutex_lock(&trigger_cmd_mutex);
 333	list_for_each_entry(p, &trigger_commands, list) {
 334		if (strcmp(cmd->name, p->name) == 0) {
 335			ret = -EBUSY;
 336			goto out_unlock;
 337		}
 338	}
 339	list_add(&cmd->list, &trigger_commands);
 340 out_unlock:
 341	mutex_unlock(&trigger_cmd_mutex);
 342
 343	return ret;
 344}
 345
 346/*
 347 * Currently we only unregister event commands from __init, so mark
 348 * this __init too.
 349 */
 350static __init int unregister_event_command(struct event_command *cmd)
 351{
 352	struct event_command *p, *n;
 353	int ret = -ENODEV;
 354
 355	mutex_lock(&trigger_cmd_mutex);
 356	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 357		if (strcmp(cmd->name, p->name) == 0) {
 358			ret = 0;
 359			list_del_init(&p->list);
 360			goto out_unlock;
 361		}
 362	}
 363 out_unlock:
 364	mutex_unlock(&trigger_cmd_mutex);
 365
 366	return ret;
 367}
 368
 369/**
 370 * event_trigger_print - Generic event_trigger_ops @print implementation
 371 * @name: The name of the event trigger
 372 * @m: The seq_file being printed to
 373 * @data: Trigger-specific data
 374 * @filter_str: filter_str to print, if present
 375 *
 376 * Common implementation for event triggers to print themselves.
 377 *
 378 * Usually wrapped by a function that simply sets the @name of the
 379 * trigger command and then invokes this.
 380 *
 381 * Return: 0 on success, errno otherwise
 382 */
 383static int
 384event_trigger_print(const char *name, struct seq_file *m,
 385		    void *data, char *filter_str)
 386{
 387	long count = (long)data;
 388
 389	seq_puts(m, name);
 390
 391	if (count == -1)
 392		seq_puts(m, ":unlimited");
 393	else
 394		seq_printf(m, ":count=%ld", count);
 395
 396	if (filter_str)
 397		seq_printf(m, " if %s\n", filter_str);
 398	else
 399		seq_putc(m, '\n');
 400
 401	return 0;
 402}
 403
 404/**
 405 * event_trigger_init - Generic event_trigger_ops @init implementation
 406 * @ops: The trigger ops associated with the trigger
 407 * @data: Trigger-specific data
 408 *
 409 * Common implementation of event trigger initialization.
 410 *
 411 * Usually used directly as the @init method in event trigger
 412 * implementations.
 413 *
 414 * Return: 0 on success, errno otherwise
 415 */
 416int event_trigger_init(struct event_trigger_ops *ops,
 417		       struct event_trigger_data *data)
 418{
 419	data->ref++;
 420	return 0;
 421}
 422
 423/**
 424 * event_trigger_free - Generic event_trigger_ops @free implementation
 425 * @ops: The trigger ops associated with the trigger
 426 * @data: Trigger-specific data
 427 *
 428 * Common implementation of event trigger de-initialization.
 429 *
 430 * Usually used directly as the @free method in event trigger
 431 * implementations.
 432 */
 433static void
 434event_trigger_free(struct event_trigger_ops *ops,
 435		   struct event_trigger_data *data)
 436{
 437	if (WARN_ON_ONCE(data->ref <= 0))
 438		return;
 439
 440	data->ref--;
 441	if (!data->ref)
 442		trigger_data_free(data);
 443}
 444
 445int trace_event_trigger_enable_disable(struct trace_event_file *file,
 446				       int trigger_enable)
 447{
 448	int ret = 0;
 449
 450	if (trigger_enable) {
 451		if (atomic_inc_return(&file->tm_ref) > 1)
 452			return ret;
 453		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 454		ret = trace_event_enable_disable(file, 1, 1);
 455	} else {
 456		if (atomic_dec_return(&file->tm_ref) > 0)
 457			return ret;
 458		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 459		ret = trace_event_enable_disable(file, 0, 1);
 460	}
 461
 462	return ret;
 463}
 464
 465/**
 466 * clear_event_triggers - Clear all triggers associated with a trace array
 467 * @tr: The trace array to clear
 468 *
 469 * For each trigger, the triggering event has its tm_ref decremented
 470 * via trace_event_trigger_enable_disable(), and any associated event
 471 * (in the case of enable/disable_event triggers) will have its sm_ref
 472 * decremented via free()->trace_event_enable_disable().  That
 473 * combination effectively reverses the soft-mode/trigger state added
 474 * by trigger registration.
 475 *
 476 * Must be called with event_mutex held.
 477 */
 478void
 479clear_event_triggers(struct trace_array *tr)
 480{
 481	struct trace_event_file *file;
 482
 483	list_for_each_entry(file, &tr->events, list) {
 484		struct event_trigger_data *data;
 485		list_for_each_entry_rcu(data, &file->triggers, list) {
 486			trace_event_trigger_enable_disable(file, 0);
 
 487			if (data->ops->free)
 488				data->ops->free(data->ops, data);
 489		}
 490	}
 491}
 492
 493/**
 494 * update_cond_flag - Set or reset the TRIGGER_COND bit
 495 * @file: The trace_event_file associated with the event
 496 *
 497 * If an event has triggers and any of those triggers has a filter or
 498 * a post_trigger, trigger invocation needs to be deferred until after
 499 * the current event has logged its data, and the event should have
 500 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 501 * cleared.
 502 */
 503void update_cond_flag(struct trace_event_file *file)
 504{
 505	struct event_trigger_data *data;
 506	bool set_cond = false;
 507
 508	list_for_each_entry_rcu(data, &file->triggers, list) {
 
 
 509		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 510		    event_command_needs_rec(data->cmd_ops)) {
 511			set_cond = true;
 512			break;
 513		}
 514	}
 515
 516	if (set_cond)
 517		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 518	else
 519		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 520}
 521
 522/**
 523 * register_trigger - Generic event_command @reg implementation
 524 * @glob: The raw string used to register the trigger
 525 * @ops: The trigger ops associated with the trigger
 526 * @data: Trigger-specific data to associate with the trigger
 527 * @file: The trace_event_file associated with the event
 528 *
 529 * Common implementation for event trigger registration.
 530 *
 531 * Usually used directly as the @reg method in event command
 532 * implementations.
 533 *
 534 * Return: 0 on success, errno otherwise
 535 */
 536static int register_trigger(char *glob, struct event_trigger_ops *ops,
 537			    struct event_trigger_data *data,
 538			    struct trace_event_file *file)
 539{
 540	struct event_trigger_data *test;
 541	int ret = 0;
 542
 543	list_for_each_entry_rcu(test, &file->triggers, list) {
 
 
 544		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 545			ret = -EEXIST;
 546			goto out;
 547		}
 548	}
 549
 550	if (data->ops->init) {
 551		ret = data->ops->init(data->ops, data);
 552		if (ret < 0)
 553			goto out;
 554	}
 555
 556	list_add_rcu(&data->list, &file->triggers);
 557	ret++;
 558
 559	update_cond_flag(file);
 560	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 561		list_del_rcu(&data->list);
 562		update_cond_flag(file);
 563		ret--;
 564	}
 565out:
 566	return ret;
 567}
 568
 569/**
 570 * unregister_trigger - Generic event_command @unreg implementation
 571 * @glob: The raw string used to register the trigger
 572 * @ops: The trigger ops associated with the trigger
 573 * @test: Trigger-specific data used to find the trigger to remove
 574 * @file: The trace_event_file associated with the event
 575 *
 576 * Common implementation for event trigger unregistration.
 577 *
 578 * Usually used directly as the @unreg method in event command
 579 * implementations.
 580 */
 581void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 582			struct event_trigger_data *test,
 583			struct trace_event_file *file)
 584{
 585	struct event_trigger_data *data;
 586	bool unregistered = false;
 587
 588	list_for_each_entry_rcu(data, &file->triggers, list) {
 
 
 589		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 590			unregistered = true;
 591			list_del_rcu(&data->list);
 592			trace_event_trigger_enable_disable(file, 0);
 593			update_cond_flag(file);
 594			break;
 595		}
 596	}
 597
 598	if (unregistered && data->ops->free)
 599		data->ops->free(data->ops, data);
 600}
 601
 602/**
 603 * event_trigger_callback - Generic event_command @func implementation
 604 * @cmd_ops: The command ops, used for trigger registration
 605 * @file: The trace_event_file associated with the event
 606 * @glob: The raw string used to register the trigger
 607 * @cmd: The cmd portion of the string used to register the trigger
 608 * @param: The params portion of the string used to register the trigger
 609 *
 610 * Common implementation for event command parsing and trigger
 611 * instantiation.
 612 *
 613 * Usually used directly as the @func method in event command
 614 * implementations.
 615 *
 616 * Return: 0 on success, errno otherwise
 617 */
 618static int
 619event_trigger_callback(struct event_command *cmd_ops,
 620		       struct trace_event_file *file,
 621		       char *glob, char *cmd, char *param)
 622{
 623	struct event_trigger_data *trigger_data;
 624	struct event_trigger_ops *trigger_ops;
 625	char *trigger = NULL;
 626	char *number;
 627	int ret;
 628
 629	/* separate the trigger from the filter (t:n [if filter]) */
 630	if (param && isdigit(param[0]))
 631		trigger = strsep(&param, " \t");
 
 
 
 
 
 
 632
 633	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 634
 635	ret = -ENOMEM;
 636	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 637	if (!trigger_data)
 638		goto out;
 639
 640	trigger_data->count = -1;
 641	trigger_data->ops = trigger_ops;
 642	trigger_data->cmd_ops = cmd_ops;
 
 643	INIT_LIST_HEAD(&trigger_data->list);
 
 644
 645	if (glob[0] == '!') {
 646		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 647		kfree(trigger_data);
 648		ret = 0;
 649		goto out;
 650	}
 651
 652	if (trigger) {
 653		number = strsep(&trigger, ":");
 654
 655		ret = -EINVAL;
 656		if (!strlen(number))
 657			goto out_free;
 658
 659		/*
 660		 * We use the callback data field (which is a pointer)
 661		 * as our counter.
 662		 */
 663		ret = kstrtoul(number, 0, &trigger_data->count);
 664		if (ret)
 665			goto out_free;
 666	}
 667
 668	if (!param) /* if param is non-empty, it's supposed to be a filter */
 669		goto out_reg;
 670
 671	if (!cmd_ops->set_filter)
 672		goto out_reg;
 673
 674	ret = cmd_ops->set_filter(param, trigger_data, file);
 675	if (ret < 0)
 676		goto out_free;
 677
 678 out_reg:
 
 
 679	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 680	/*
 681	 * The above returns on success the # of functions enabled,
 682	 * but if it didn't find any functions it returns zero.
 683	 * Consider no functions a failure too.
 684	 */
 685	if (!ret) {
 
 686		ret = -ENOENT;
 687		goto out_free;
 688	} else if (ret < 0)
 689		goto out_free;
 690	ret = 0;
 
 691 out:
 692	return ret;
 693
 694 out_free:
 695	if (cmd_ops->set_filter)
 696		cmd_ops->set_filter(NULL, trigger_data, NULL);
 697	kfree(trigger_data);
 698	goto out;
 699}
 700
 701/**
 702 * set_trigger_filter - Generic event_command @set_filter implementation
 703 * @filter_str: The filter string for the trigger, NULL to remove filter
 704 * @trigger_data: Trigger-specific data
 705 * @file: The trace_event_file associated with the event
 706 *
 707 * Common implementation for event command filter parsing and filter
 708 * instantiation.
 709 *
 710 * Usually used directly as the @set_filter method in event command
 711 * implementations.
 712 *
 713 * Also used to remove a filter (if filter_str = NULL).
 714 *
 715 * Return: 0 on success, errno otherwise
 716 */
 717int set_trigger_filter(char *filter_str,
 718		       struct event_trigger_data *trigger_data,
 719		       struct trace_event_file *file)
 720{
 721	struct event_trigger_data *data = trigger_data;
 722	struct event_filter *filter = NULL, *tmp;
 723	int ret = -EINVAL;
 724	char *s;
 725
 726	if (!filter_str) /* clear the current filter */
 727		goto assign;
 728
 729	s = strsep(&filter_str, " \t");
 730
 731	if (!strlen(s) || strcmp(s, "if") != 0)
 732		goto out;
 733
 734	if (!filter_str)
 735		goto out;
 736
 737	/* The filter is for the 'trigger' event, not the triggered event */
 738	ret = create_event_filter(file->event_call, filter_str, false, &filter);
 739	if (ret)
 740		goto out;
 
 
 
 741 assign:
 742	tmp = rcu_access_pointer(data->filter);
 743
 744	rcu_assign_pointer(data->filter, filter);
 745
 746	if (tmp) {
 747		/* Make sure the call is done with the filter */
 748		synchronize_sched();
 749		free_event_filter(tmp);
 750	}
 751
 752	kfree(data->filter_str);
 753	data->filter_str = NULL;
 754
 755	if (filter_str) {
 756		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 757		if (!data->filter_str) {
 758			free_event_filter(rcu_access_pointer(data->filter));
 759			data->filter = NULL;
 760			ret = -ENOMEM;
 761		}
 762	}
 763 out:
 764	return ret;
 765}
 766
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767static void
 768traceon_trigger(struct event_trigger_data *data, void *rec)
 
 
 769{
 770	if (tracing_is_on())
 771		return;
 772
 773	tracing_on();
 774}
 775
 776static void
 777traceon_count_trigger(struct event_trigger_data *data, void *rec)
 
 
 778{
 779	if (tracing_is_on())
 780		return;
 781
 782	if (!data->count)
 783		return;
 784
 785	if (data->count != -1)
 786		(data->count)--;
 787
 788	tracing_on();
 789}
 790
 791static void
 792traceoff_trigger(struct event_trigger_data *data, void *rec)
 
 
 793{
 794	if (!tracing_is_on())
 795		return;
 796
 797	tracing_off();
 798}
 799
 800static void
 801traceoff_count_trigger(struct event_trigger_data *data, void *rec)
 
 
 802{
 803	if (!tracing_is_on())
 804		return;
 805
 806	if (!data->count)
 807		return;
 808
 809	if (data->count != -1)
 810		(data->count)--;
 811
 812	tracing_off();
 813}
 814
 815static int
 816traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 817		      struct event_trigger_data *data)
 818{
 819	return event_trigger_print("traceon", m, (void *)data->count,
 820				   data->filter_str);
 821}
 822
 823static int
 824traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 825		       struct event_trigger_data *data)
 826{
 827	return event_trigger_print("traceoff", m, (void *)data->count,
 828				   data->filter_str);
 829}
 830
 831static struct event_trigger_ops traceon_trigger_ops = {
 832	.func			= traceon_trigger,
 833	.print			= traceon_trigger_print,
 834	.init			= event_trigger_init,
 835	.free			= event_trigger_free,
 836};
 837
 838static struct event_trigger_ops traceon_count_trigger_ops = {
 839	.func			= traceon_count_trigger,
 840	.print			= traceon_trigger_print,
 841	.init			= event_trigger_init,
 842	.free			= event_trigger_free,
 843};
 844
 845static struct event_trigger_ops traceoff_trigger_ops = {
 846	.func			= traceoff_trigger,
 847	.print			= traceoff_trigger_print,
 848	.init			= event_trigger_init,
 849	.free			= event_trigger_free,
 850};
 851
 852static struct event_trigger_ops traceoff_count_trigger_ops = {
 853	.func			= traceoff_count_trigger,
 854	.print			= traceoff_trigger_print,
 855	.init			= event_trigger_init,
 856	.free			= event_trigger_free,
 857};
 858
 859static struct event_trigger_ops *
 860onoff_get_trigger_ops(char *cmd, char *param)
 861{
 862	struct event_trigger_ops *ops;
 863
 864	/* we register both traceon and traceoff to this callback */
 865	if (strcmp(cmd, "traceon") == 0)
 866		ops = param ? &traceon_count_trigger_ops :
 867			&traceon_trigger_ops;
 868	else
 869		ops = param ? &traceoff_count_trigger_ops :
 870			&traceoff_trigger_ops;
 871
 872	return ops;
 873}
 874
 875static struct event_command trigger_traceon_cmd = {
 876	.name			= "traceon",
 877	.trigger_type		= ETT_TRACE_ONOFF,
 878	.func			= event_trigger_callback,
 879	.reg			= register_trigger,
 880	.unreg			= unregister_trigger,
 881	.get_trigger_ops	= onoff_get_trigger_ops,
 882	.set_filter		= set_trigger_filter,
 883};
 884
 885static struct event_command trigger_traceoff_cmd = {
 886	.name			= "traceoff",
 887	.trigger_type		= ETT_TRACE_ONOFF,
 
 888	.func			= event_trigger_callback,
 889	.reg			= register_trigger,
 890	.unreg			= unregister_trigger,
 891	.get_trigger_ops	= onoff_get_trigger_ops,
 892	.set_filter		= set_trigger_filter,
 893};
 894
 895#ifdef CONFIG_TRACER_SNAPSHOT
 896static void
 897snapshot_trigger(struct event_trigger_data *data, void *rec)
 
 
 898{
 899	tracing_snapshot();
 
 
 
 
 
 900}
 901
 902static void
 903snapshot_count_trigger(struct event_trigger_data *data, void *rec)
 
 
 904{
 905	if (!data->count)
 906		return;
 907
 908	if (data->count != -1)
 909		(data->count)--;
 910
 911	snapshot_trigger(data, rec);
 912}
 913
 914static int
 915register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
 916			  struct event_trigger_data *data,
 917			  struct trace_event_file *file)
 918{
 919	int ret = register_trigger(glob, ops, data, file);
 920
 921	if (ret > 0 && tracing_alloc_snapshot() != 0) {
 922		unregister_trigger(glob, ops, data, file);
 923		ret = 0;
 924	}
 925
 926	return ret;
 927}
 928
 929static int
 930snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 931		       struct event_trigger_data *data)
 932{
 933	return event_trigger_print("snapshot", m, (void *)data->count,
 934				   data->filter_str);
 935}
 936
 937static struct event_trigger_ops snapshot_trigger_ops = {
 938	.func			= snapshot_trigger,
 939	.print			= snapshot_trigger_print,
 940	.init			= event_trigger_init,
 941	.free			= event_trigger_free,
 942};
 943
 944static struct event_trigger_ops snapshot_count_trigger_ops = {
 945	.func			= snapshot_count_trigger,
 946	.print			= snapshot_trigger_print,
 947	.init			= event_trigger_init,
 948	.free			= event_trigger_free,
 949};
 950
 951static struct event_trigger_ops *
 952snapshot_get_trigger_ops(char *cmd, char *param)
 953{
 954	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
 955}
 956
 957static struct event_command trigger_snapshot_cmd = {
 958	.name			= "snapshot",
 959	.trigger_type		= ETT_SNAPSHOT,
 960	.func			= event_trigger_callback,
 961	.reg			= register_snapshot_trigger,
 962	.unreg			= unregister_trigger,
 963	.get_trigger_ops	= snapshot_get_trigger_ops,
 964	.set_filter		= set_trigger_filter,
 965};
 966
 967static __init int register_trigger_snapshot_cmd(void)
 968{
 969	int ret;
 970
 971	ret = register_event_command(&trigger_snapshot_cmd);
 972	WARN_ON(ret < 0);
 973
 974	return ret;
 975}
 976#else
 977static __init int register_trigger_snapshot_cmd(void) { return 0; }
 978#endif /* CONFIG_TRACER_SNAPSHOT */
 979
 980#ifdef CONFIG_STACKTRACE
 
 
 
 
 
 
 
 981/*
 982 * Skip 3:
 983 *   stacktrace_trigger()
 984 *   event_triggers_post_call()
 
 985 *   trace_event_raw_event_xxx()
 986 */
 987#define STACK_SKIP 3
 
 988
 989static void
 990stacktrace_trigger(struct event_trigger_data *data, void *rec)
 
 
 991{
 992	trace_dump_stack(STACK_SKIP);
 993}
 994
 995static void
 996stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
 
 
 997{
 998	if (!data->count)
 999		return;
1000
1001	if (data->count != -1)
1002		(data->count)--;
1003
1004	stacktrace_trigger(data, rec);
1005}
1006
1007static int
1008stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1009			 struct event_trigger_data *data)
1010{
1011	return event_trigger_print("stacktrace", m, (void *)data->count,
1012				   data->filter_str);
1013}
1014
1015static struct event_trigger_ops stacktrace_trigger_ops = {
1016	.func			= stacktrace_trigger,
1017	.print			= stacktrace_trigger_print,
1018	.init			= event_trigger_init,
1019	.free			= event_trigger_free,
1020};
1021
1022static struct event_trigger_ops stacktrace_count_trigger_ops = {
1023	.func			= stacktrace_count_trigger,
1024	.print			= stacktrace_trigger_print,
1025	.init			= event_trigger_init,
1026	.free			= event_trigger_free,
1027};
1028
1029static struct event_trigger_ops *
1030stacktrace_get_trigger_ops(char *cmd, char *param)
1031{
1032	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1033}
1034
1035static struct event_command trigger_stacktrace_cmd = {
1036	.name			= "stacktrace",
1037	.trigger_type		= ETT_STACKTRACE,
1038	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1039	.func			= event_trigger_callback,
1040	.reg			= register_trigger,
1041	.unreg			= unregister_trigger,
1042	.get_trigger_ops	= stacktrace_get_trigger_ops,
1043	.set_filter		= set_trigger_filter,
1044};
1045
1046static __init int register_trigger_stacktrace_cmd(void)
1047{
1048	int ret;
1049
1050	ret = register_event_command(&trigger_stacktrace_cmd);
1051	WARN_ON(ret < 0);
1052
1053	return ret;
1054}
1055#else
1056static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1057#endif /* CONFIG_STACKTRACE */
1058
1059static __init void unregister_trigger_traceon_traceoff_cmds(void)
1060{
1061	unregister_event_command(&trigger_traceon_cmd);
1062	unregister_event_command(&trigger_traceoff_cmd);
1063}
1064
1065/* Avoid typos */
1066#define ENABLE_EVENT_STR	"enable_event"
1067#define DISABLE_EVENT_STR	"disable_event"
1068
1069struct enable_trigger_data {
1070	struct trace_event_file		*file;
1071	bool				enable;
1072};
1073
1074static void
1075event_enable_trigger(struct event_trigger_data *data, void *rec)
 
 
1076{
1077	struct enable_trigger_data *enable_data = data->private_data;
1078
1079	if (enable_data->enable)
1080		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1081	else
1082		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1083}
1084
1085static void
1086event_enable_count_trigger(struct event_trigger_data *data, void *rec)
 
 
1087{
1088	struct enable_trigger_data *enable_data = data->private_data;
1089
1090	if (!data->count)
1091		return;
1092
1093	/* Skip if the event is in a state we want to switch to */
1094	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1095		return;
1096
1097	if (data->count != -1)
1098		(data->count)--;
1099
1100	event_enable_trigger(data, rec);
1101}
1102
1103static int
1104event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1105			   struct event_trigger_data *data)
1106{
1107	struct enable_trigger_data *enable_data = data->private_data;
1108
1109	seq_printf(m, "%s:%s:%s",
1110		   enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
 
 
1111		   enable_data->file->event_call->class->system,
1112		   trace_event_name(enable_data->file->event_call));
1113
1114	if (data->count == -1)
1115		seq_puts(m, ":unlimited");
1116	else
1117		seq_printf(m, ":count=%ld", data->count);
1118
1119	if (data->filter_str)
1120		seq_printf(m, " if %s\n", data->filter_str);
1121	else
1122		seq_putc(m, '\n');
1123
1124	return 0;
1125}
1126
1127static void
1128event_enable_trigger_free(struct event_trigger_ops *ops,
1129			  struct event_trigger_data *data)
1130{
1131	struct enable_trigger_data *enable_data = data->private_data;
1132
1133	if (WARN_ON_ONCE(data->ref <= 0))
1134		return;
1135
1136	data->ref--;
1137	if (!data->ref) {
1138		/* Remove the SOFT_MODE flag */
1139		trace_event_enable_disable(enable_data->file, 0, 1);
1140		module_put(enable_data->file->event_call->mod);
1141		trigger_data_free(data);
1142		kfree(enable_data);
1143	}
1144}
1145
1146static struct event_trigger_ops event_enable_trigger_ops = {
1147	.func			= event_enable_trigger,
1148	.print			= event_enable_trigger_print,
1149	.init			= event_trigger_init,
1150	.free			= event_enable_trigger_free,
1151};
1152
1153static struct event_trigger_ops event_enable_count_trigger_ops = {
1154	.func			= event_enable_count_trigger,
1155	.print			= event_enable_trigger_print,
1156	.init			= event_trigger_init,
1157	.free			= event_enable_trigger_free,
1158};
1159
1160static struct event_trigger_ops event_disable_trigger_ops = {
1161	.func			= event_enable_trigger,
1162	.print			= event_enable_trigger_print,
1163	.init			= event_trigger_init,
1164	.free			= event_enable_trigger_free,
1165};
1166
1167static struct event_trigger_ops event_disable_count_trigger_ops = {
1168	.func			= event_enable_count_trigger,
1169	.print			= event_enable_trigger_print,
1170	.init			= event_trigger_init,
1171	.free			= event_enable_trigger_free,
1172};
1173
1174static int
1175event_enable_trigger_func(struct event_command *cmd_ops,
1176			  struct trace_event_file *file,
1177			  char *glob, char *cmd, char *param)
1178{
1179	struct trace_event_file *event_enable_file;
1180	struct enable_trigger_data *enable_data;
1181	struct event_trigger_data *trigger_data;
1182	struct event_trigger_ops *trigger_ops;
1183	struct trace_array *tr = file->tr;
1184	const char *system;
1185	const char *event;
 
1186	char *trigger;
1187	char *number;
1188	bool enable;
1189	int ret;
1190
1191	if (!param)
1192		return -EINVAL;
1193
1194	/* separate the trigger from the filter (s:e:n [if filter]) */
1195	trigger = strsep(&param, " \t");
1196	if (!trigger)
1197		return -EINVAL;
 
 
 
 
 
1198
1199	system = strsep(&trigger, ":");
1200	if (!trigger)
1201		return -EINVAL;
1202
1203	event = strsep(&trigger, ":");
1204
1205	ret = -EINVAL;
1206	event_enable_file = find_event_file(tr, system, event);
1207	if (!event_enable_file)
1208		goto out;
1209
1210	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
 
 
1211
 
 
 
 
 
1212	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1213
1214	ret = -ENOMEM;
1215	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1216	if (!trigger_data)
1217		goto out;
1218
1219	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1220	if (!enable_data) {
1221		kfree(trigger_data);
1222		goto out;
1223	}
1224
1225	trigger_data->count = -1;
1226	trigger_data->ops = trigger_ops;
1227	trigger_data->cmd_ops = cmd_ops;
1228	INIT_LIST_HEAD(&trigger_data->list);
1229	RCU_INIT_POINTER(trigger_data->filter, NULL);
1230
 
1231	enable_data->enable = enable;
1232	enable_data->file = event_enable_file;
1233	trigger_data->private_data = enable_data;
1234
1235	if (glob[0] == '!') {
1236		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1237		kfree(trigger_data);
1238		kfree(enable_data);
1239		ret = 0;
1240		goto out;
1241	}
1242
 
 
 
1243	if (trigger) {
1244		number = strsep(&trigger, ":");
1245
1246		ret = -EINVAL;
1247		if (!strlen(number))
1248			goto out_free;
1249
1250		/*
1251		 * We use the callback data field (which is a pointer)
1252		 * as our counter.
1253		 */
1254		ret = kstrtoul(number, 0, &trigger_data->count);
1255		if (ret)
1256			goto out_free;
1257	}
1258
1259	if (!param) /* if param is non-empty, it's supposed to be a filter */
1260		goto out_reg;
1261
1262	if (!cmd_ops->set_filter)
1263		goto out_reg;
1264
1265	ret = cmd_ops->set_filter(param, trigger_data, file);
1266	if (ret < 0)
1267		goto out_free;
1268
1269 out_reg:
1270	/* Don't let event modules unload while probe registered */
1271	ret = try_module_get(event_enable_file->event_call->mod);
1272	if (!ret) {
1273		ret = -EBUSY;
1274		goto out_free;
1275	}
1276
1277	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1278	if (ret < 0)
1279		goto out_put;
1280	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1281	/*
1282	 * The above returns on success the # of functions enabled,
1283	 * but if it didn't find any functions it returns zero.
1284	 * Consider no functions a failure too.
1285	 */
1286	if (!ret) {
1287		ret = -ENOENT;
1288		goto out_disable;
1289	} else if (ret < 0)
1290		goto out_disable;
1291	/* Just return zero, not the number of enabled functions */
1292	ret = 0;
 
1293 out:
1294	return ret;
1295
1296 out_disable:
1297	trace_event_enable_disable(event_enable_file, 0, 1);
1298 out_put:
1299	module_put(event_enable_file->event_call->mod);
1300 out_free:
1301	if (cmd_ops->set_filter)
1302		cmd_ops->set_filter(NULL, trigger_data, NULL);
1303	kfree(trigger_data);
1304	kfree(enable_data);
1305	goto out;
1306}
1307
1308static int event_enable_register_trigger(char *glob,
1309					 struct event_trigger_ops *ops,
1310					 struct event_trigger_data *data,
1311					 struct trace_event_file *file)
1312{
1313	struct enable_trigger_data *enable_data = data->private_data;
1314	struct enable_trigger_data *test_enable_data;
1315	struct event_trigger_data *test;
1316	int ret = 0;
1317
1318	list_for_each_entry_rcu(test, &file->triggers, list) {
 
 
1319		test_enable_data = test->private_data;
1320		if (test_enable_data &&
 
 
1321		    (test_enable_data->file == enable_data->file)) {
1322			ret = -EEXIST;
1323			goto out;
1324		}
1325	}
1326
1327	if (data->ops->init) {
1328		ret = data->ops->init(data->ops, data);
1329		if (ret < 0)
1330			goto out;
1331	}
1332
1333	list_add_rcu(&data->list, &file->triggers);
1334	ret++;
1335
1336	update_cond_flag(file);
1337	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1338		list_del_rcu(&data->list);
1339		update_cond_flag(file);
1340		ret--;
1341	}
1342out:
1343	return ret;
1344}
1345
1346static void event_enable_unregister_trigger(char *glob,
1347					    struct event_trigger_ops *ops,
1348					    struct event_trigger_data *test,
1349					    struct trace_event_file *file)
1350{
1351	struct enable_trigger_data *test_enable_data = test->private_data;
1352	struct enable_trigger_data *enable_data;
1353	struct event_trigger_data *data;
1354	bool unregistered = false;
1355
1356	list_for_each_entry_rcu(data, &file->triggers, list) {
 
 
1357		enable_data = data->private_data;
1358		if (enable_data &&
 
 
1359		    (enable_data->file == test_enable_data->file)) {
1360			unregistered = true;
1361			list_del_rcu(&data->list);
1362			trace_event_trigger_enable_disable(file, 0);
1363			update_cond_flag(file);
1364			break;
1365		}
1366	}
1367
1368	if (unregistered && data->ops->free)
1369		data->ops->free(data->ops, data);
1370}
1371
1372static struct event_trigger_ops *
1373event_enable_get_trigger_ops(char *cmd, char *param)
1374{
1375	struct event_trigger_ops *ops;
1376	bool enable;
1377
 
 
 
 
1378	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1379
1380	if (enable)
1381		ops = param ? &event_enable_count_trigger_ops :
1382			&event_enable_trigger_ops;
1383	else
1384		ops = param ? &event_disable_count_trigger_ops :
1385			&event_disable_trigger_ops;
1386
1387	return ops;
1388}
1389
1390static struct event_command trigger_enable_cmd = {
1391	.name			= ENABLE_EVENT_STR,
1392	.trigger_type		= ETT_EVENT_ENABLE,
1393	.func			= event_enable_trigger_func,
1394	.reg			= event_enable_register_trigger,
1395	.unreg			= event_enable_unregister_trigger,
1396	.get_trigger_ops	= event_enable_get_trigger_ops,
1397	.set_filter		= set_trigger_filter,
1398};
1399
1400static struct event_command trigger_disable_cmd = {
1401	.name			= DISABLE_EVENT_STR,
1402	.trigger_type		= ETT_EVENT_ENABLE,
1403	.func			= event_enable_trigger_func,
1404	.reg			= event_enable_register_trigger,
1405	.unreg			= event_enable_unregister_trigger,
1406	.get_trigger_ops	= event_enable_get_trigger_ops,
1407	.set_filter		= set_trigger_filter,
1408};
1409
1410static __init void unregister_trigger_enable_disable_cmds(void)
1411{
1412	unregister_event_command(&trigger_enable_cmd);
1413	unregister_event_command(&trigger_disable_cmd);
1414}
1415
1416static __init int register_trigger_enable_disable_cmds(void)
1417{
1418	int ret;
1419
1420	ret = register_event_command(&trigger_enable_cmd);
1421	if (WARN_ON(ret < 0))
1422		return ret;
1423	ret = register_event_command(&trigger_disable_cmd);
1424	if (WARN_ON(ret < 0))
1425		unregister_trigger_enable_disable_cmds();
1426
1427	return ret;
1428}
1429
1430static __init int register_trigger_traceon_traceoff_cmds(void)
1431{
1432	int ret;
1433
1434	ret = register_event_command(&trigger_traceon_cmd);
1435	if (WARN_ON(ret < 0))
1436		return ret;
1437	ret = register_event_command(&trigger_traceoff_cmd);
1438	if (WARN_ON(ret < 0))
1439		unregister_trigger_traceon_traceoff_cmds();
1440
1441	return ret;
1442}
1443
1444__init int register_trigger_cmds(void)
1445{
1446	register_trigger_traceon_traceoff_cmds();
1447	register_trigger_snapshot_cmd();
1448	register_trigger_stacktrace_cmd();
1449	register_trigger_enable_disable_cmds();
 
 
1450
1451	return 0;
1452}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
  34 * @rec: The trace entry for the event, NULL for unconditional invocation
  35 *
  36 * For each trigger associated with an event, invoke the trigger
  37 * function registered with the associated trigger command.  If rec is
  38 * non-NULL, it means that the trigger requires further processing and
  39 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  40 * trigger has a filter associated with it, rec will checked against
  41 * the filter and if the record matches the trigger will be invoked.
  42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  43 * in any case until the current event is written, the trigger
  44 * function isn't invoked but the bit associated with the deferred
  45 * trigger is set in the return value.
  46 *
  47 * Returns an enum event_trigger_type value containing a set bit for
  48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  49 *
  50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  51 *
  52 * Return: an enum event_trigger_type value containing a set bit for
  53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  54 */
  55enum event_trigger_type
  56event_triggers_call(struct trace_event_file *file,
  57		    struct trace_buffer *buffer, void *rec,
  58		    struct ring_buffer_event *event)
  59{
  60	struct event_trigger_data *data;
  61	enum event_trigger_type tt = ETT_NONE;
  62	struct event_filter *filter;
  63
  64	if (list_empty(&file->triggers))
  65		return tt;
  66
  67	list_for_each_entry_rcu(data, &file->triggers, list) {
  68		if (data->paused)
  69			continue;
  70		if (!rec) {
  71			data->ops->func(data, buffer, rec, event);
  72			continue;
  73		}
  74		filter = rcu_dereference_sched(data->filter);
  75		if (filter && !filter_match_preds(filter, rec))
  76			continue;
  77		if (event_command_post_trigger(data->cmd_ops)) {
  78			tt |= data->cmd_ops->trigger_type;
  79			continue;
  80		}
  81		data->ops->func(data, buffer, rec, event);
  82	}
  83	return tt;
  84}
  85EXPORT_SYMBOL_GPL(event_triggers_call);
  86
  87/**
  88 * event_triggers_post_call - Call 'post_triggers' for a trace event
  89 * @file: The trace_event_file associated with the event
  90 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
 
  91 *
  92 * For each trigger associated with an event, invoke the trigger
  93 * function registered with the associated trigger command, if the
  94 * corresponding bit is set in the tt enum passed into this function.
  95 * See @event_triggers_call for details on how those bits are set.
  96 *
  97 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  98 */
  99void
 100event_triggers_post_call(struct trace_event_file *file,
 101			 enum event_trigger_type tt)
 
 102{
 103	struct event_trigger_data *data;
 104
 105	list_for_each_entry_rcu(data, &file->triggers, list) {
 106		if (data->paused)
 107			continue;
 108		if (data->cmd_ops->trigger_type & tt)
 109			data->ops->func(data, NULL, NULL, NULL);
 110	}
 111}
 112EXPORT_SYMBOL_GPL(event_triggers_post_call);
 113
 114#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 115
 116static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 117{
 118	struct trace_event_file *event_file = event_file_data(m->private);
 119
 120	if (t == SHOW_AVAILABLE_TRIGGERS) {
 121		(*pos)++;
 122		return NULL;
 123	}
 124	return seq_list_next(t, &event_file->triggers, pos);
 125}
 126
 127static void *trigger_start(struct seq_file *m, loff_t *pos)
 128{
 129	struct trace_event_file *event_file;
 130
 131	/* ->stop() is called even if ->start() fails */
 132	mutex_lock(&event_mutex);
 133	event_file = event_file_data(m->private);
 134	if (unlikely(!event_file))
 135		return ERR_PTR(-ENODEV);
 136
 137	if (list_empty(&event_file->triggers))
 138		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 139
 140	return seq_list_start(&event_file->triggers, *pos);
 141}
 142
 143static void trigger_stop(struct seq_file *m, void *t)
 144{
 145	mutex_unlock(&event_mutex);
 146}
 147
 148static int trigger_show(struct seq_file *m, void *v)
 149{
 150	struct event_trigger_data *data;
 151	struct event_command *p;
 152
 153	if (v == SHOW_AVAILABLE_TRIGGERS) {
 154		seq_puts(m, "# Available triggers:\n");
 155		seq_putc(m, '#');
 156		mutex_lock(&trigger_cmd_mutex);
 157		list_for_each_entry_reverse(p, &trigger_commands, list)
 158			seq_printf(m, " %s", p->name);
 159		seq_putc(m, '\n');
 160		mutex_unlock(&trigger_cmd_mutex);
 161		return 0;
 162	}
 163
 164	data = list_entry(v, struct event_trigger_data, list);
 165	data->ops->print(m, data->ops, data);
 166
 167	return 0;
 168}
 169
 170static const struct seq_operations event_triggers_seq_ops = {
 171	.start = trigger_start,
 172	.next = trigger_next,
 173	.stop = trigger_stop,
 174	.show = trigger_show,
 175};
 176
 177static int event_trigger_regex_open(struct inode *inode, struct file *file)
 178{
 179	int ret;
 180
 181	ret = security_locked_down(LOCKDOWN_TRACEFS);
 182	if (ret)
 183		return ret;
 184
 185	mutex_lock(&event_mutex);
 186
 187	if (unlikely(!event_file_data(file))) {
 188		mutex_unlock(&event_mutex);
 189		return -ENODEV;
 190	}
 191
 192	if ((file->f_mode & FMODE_WRITE) &&
 193	    (file->f_flags & O_TRUNC)) {
 194		struct trace_event_file *event_file;
 195		struct event_command *p;
 196
 197		event_file = event_file_data(file);
 198
 199		list_for_each_entry(p, &trigger_commands, list) {
 200			if (p->unreg_all)
 201				p->unreg_all(event_file);
 202		}
 203	}
 204
 205	if (file->f_mode & FMODE_READ) {
 206		ret = seq_open(file, &event_triggers_seq_ops);
 207		if (!ret) {
 208			struct seq_file *m = file->private_data;
 209			m->private = file;
 210		}
 211	}
 212
 213	mutex_unlock(&event_mutex);
 214
 215	return ret;
 216}
 217
 218int trigger_process_regex(struct trace_event_file *file, char *buff)
 219{
 220	char *command, *next;
 221	struct event_command *p;
 222	int ret = -EINVAL;
 223
 224	next = buff = skip_spaces(buff);
 225	command = strsep(&next, ": \t");
 226	if (next) {
 227		next = skip_spaces(next);
 228		if (!*next)
 229			next = NULL;
 230	}
 231	command = (command[0] != '!') ? command : command + 1;
 232
 233	mutex_lock(&trigger_cmd_mutex);
 234	list_for_each_entry(p, &trigger_commands, list) {
 235		if (strcmp(p->name, command) == 0) {
 236			ret = p->func(p, file, buff, command, next);
 237			goto out_unlock;
 238		}
 239	}
 240 out_unlock:
 241	mutex_unlock(&trigger_cmd_mutex);
 242
 243	return ret;
 244}
 245
 246static ssize_t event_trigger_regex_write(struct file *file,
 247					 const char __user *ubuf,
 248					 size_t cnt, loff_t *ppos)
 249{
 250	struct trace_event_file *event_file;
 251	ssize_t ret;
 252	char *buf;
 253
 254	if (!cnt)
 255		return 0;
 256
 257	if (cnt >= PAGE_SIZE)
 258		return -EINVAL;
 259
 260	buf = memdup_user_nul(ubuf, cnt);
 261	if (IS_ERR(buf))
 262		return PTR_ERR(buf);
 263
 264	strim(buf);
 265
 266	mutex_lock(&event_mutex);
 267	event_file = event_file_data(file);
 268	if (unlikely(!event_file)) {
 269		mutex_unlock(&event_mutex);
 270		kfree(buf);
 271		return -ENODEV;
 272	}
 273	ret = trigger_process_regex(event_file, buf);
 274	mutex_unlock(&event_mutex);
 275
 276	kfree(buf);
 277	if (ret < 0)
 278		goto out;
 279
 280	*ppos += cnt;
 281	ret = cnt;
 282 out:
 283	return ret;
 284}
 285
 286static int event_trigger_regex_release(struct inode *inode, struct file *file)
 287{
 288	mutex_lock(&event_mutex);
 289
 290	if (file->f_mode & FMODE_READ)
 291		seq_release(inode, file);
 292
 293	mutex_unlock(&event_mutex);
 294
 295	return 0;
 296}
 297
 298static ssize_t
 299event_trigger_write(struct file *filp, const char __user *ubuf,
 300		    size_t cnt, loff_t *ppos)
 301{
 302	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 303}
 304
 305static int
 306event_trigger_open(struct inode *inode, struct file *filp)
 307{
 308	/* Checks for tracefs lockdown */
 309	return event_trigger_regex_open(inode, filp);
 310}
 311
 312static int
 313event_trigger_release(struct inode *inode, struct file *file)
 314{
 315	return event_trigger_regex_release(inode, file);
 316}
 317
 318const struct file_operations event_trigger_fops = {
 319	.open = event_trigger_open,
 320	.read = seq_read,
 321	.write = event_trigger_write,
 322	.llseek = tracing_lseek,
 323	.release = event_trigger_release,
 324};
 325
 326/*
 327 * Currently we only register event commands from __init, so mark this
 328 * __init too.
 329 */
 330__init int register_event_command(struct event_command *cmd)
 331{
 332	struct event_command *p;
 333	int ret = 0;
 334
 335	mutex_lock(&trigger_cmd_mutex);
 336	list_for_each_entry(p, &trigger_commands, list) {
 337		if (strcmp(cmd->name, p->name) == 0) {
 338			ret = -EBUSY;
 339			goto out_unlock;
 340		}
 341	}
 342	list_add(&cmd->list, &trigger_commands);
 343 out_unlock:
 344	mutex_unlock(&trigger_cmd_mutex);
 345
 346	return ret;
 347}
 348
 349/*
 350 * Currently we only unregister event commands from __init, so mark
 351 * this __init too.
 352 */
 353__init int unregister_event_command(struct event_command *cmd)
 354{
 355	struct event_command *p, *n;
 356	int ret = -ENODEV;
 357
 358	mutex_lock(&trigger_cmd_mutex);
 359	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 360		if (strcmp(cmd->name, p->name) == 0) {
 361			ret = 0;
 362			list_del_init(&p->list);
 363			goto out_unlock;
 364		}
 365	}
 366 out_unlock:
 367	mutex_unlock(&trigger_cmd_mutex);
 368
 369	return ret;
 370}
 371
 372/**
 373 * event_trigger_print - Generic event_trigger_ops @print implementation
 374 * @name: The name of the event trigger
 375 * @m: The seq_file being printed to
 376 * @data: Trigger-specific data
 377 * @filter_str: filter_str to print, if present
 378 *
 379 * Common implementation for event triggers to print themselves.
 380 *
 381 * Usually wrapped by a function that simply sets the @name of the
 382 * trigger command and then invokes this.
 383 *
 384 * Return: 0 on success, errno otherwise
 385 */
 386static int
 387event_trigger_print(const char *name, struct seq_file *m,
 388		    void *data, char *filter_str)
 389{
 390	long count = (long)data;
 391
 392	seq_puts(m, name);
 393
 394	if (count == -1)
 395		seq_puts(m, ":unlimited");
 396	else
 397		seq_printf(m, ":count=%ld", count);
 398
 399	if (filter_str)
 400		seq_printf(m, " if %s\n", filter_str);
 401	else
 402		seq_putc(m, '\n');
 403
 404	return 0;
 405}
 406
 407/**
 408 * event_trigger_init - Generic event_trigger_ops @init implementation
 409 * @ops: The trigger ops associated with the trigger
 410 * @data: Trigger-specific data
 411 *
 412 * Common implementation of event trigger initialization.
 413 *
 414 * Usually used directly as the @init method in event trigger
 415 * implementations.
 416 *
 417 * Return: 0 on success, errno otherwise
 418 */
 419int event_trigger_init(struct event_trigger_ops *ops,
 420		       struct event_trigger_data *data)
 421{
 422	data->ref++;
 423	return 0;
 424}
 425
 426/**
 427 * event_trigger_free - Generic event_trigger_ops @free implementation
 428 * @ops: The trigger ops associated with the trigger
 429 * @data: Trigger-specific data
 430 *
 431 * Common implementation of event trigger de-initialization.
 432 *
 433 * Usually used directly as the @free method in event trigger
 434 * implementations.
 435 */
 436static void
 437event_trigger_free(struct event_trigger_ops *ops,
 438		   struct event_trigger_data *data)
 439{
 440	if (WARN_ON_ONCE(data->ref <= 0))
 441		return;
 442
 443	data->ref--;
 444	if (!data->ref)
 445		trigger_data_free(data);
 446}
 447
 448int trace_event_trigger_enable_disable(struct trace_event_file *file,
 449				       int trigger_enable)
 450{
 451	int ret = 0;
 452
 453	if (trigger_enable) {
 454		if (atomic_inc_return(&file->tm_ref) > 1)
 455			return ret;
 456		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 457		ret = trace_event_enable_disable(file, 1, 1);
 458	} else {
 459		if (atomic_dec_return(&file->tm_ref) > 0)
 460			return ret;
 461		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 462		ret = trace_event_enable_disable(file, 0, 1);
 463	}
 464
 465	return ret;
 466}
 467
 468/**
 469 * clear_event_triggers - Clear all triggers associated with a trace array
 470 * @tr: The trace array to clear
 471 *
 472 * For each trigger, the triggering event has its tm_ref decremented
 473 * via trace_event_trigger_enable_disable(), and any associated event
 474 * (in the case of enable/disable_event triggers) will have its sm_ref
 475 * decremented via free()->trace_event_enable_disable().  That
 476 * combination effectively reverses the soft-mode/trigger state added
 477 * by trigger registration.
 478 *
 479 * Must be called with event_mutex held.
 480 */
 481void
 482clear_event_triggers(struct trace_array *tr)
 483{
 484	struct trace_event_file *file;
 485
 486	list_for_each_entry(file, &tr->events, list) {
 487		struct event_trigger_data *data, *n;
 488		list_for_each_entry_safe(data, n, &file->triggers, list) {
 489			trace_event_trigger_enable_disable(file, 0);
 490			list_del_rcu(&data->list);
 491			if (data->ops->free)
 492				data->ops->free(data->ops, data);
 493		}
 494	}
 495}
 496
 497/**
 498 * update_cond_flag - Set or reset the TRIGGER_COND bit
 499 * @file: The trace_event_file associated with the event
 500 *
 501 * If an event has triggers and any of those triggers has a filter or
 502 * a post_trigger, trigger invocation needs to be deferred until after
 503 * the current event has logged its data, and the event should have
 504 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 505 * cleared.
 506 */
 507void update_cond_flag(struct trace_event_file *file)
 508{
 509	struct event_trigger_data *data;
 510	bool set_cond = false;
 511
 512	lockdep_assert_held(&event_mutex);
 513
 514	list_for_each_entry(data, &file->triggers, list) {
 515		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 516		    event_command_needs_rec(data->cmd_ops)) {
 517			set_cond = true;
 518			break;
 519		}
 520	}
 521
 522	if (set_cond)
 523		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 524	else
 525		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 526}
 527
 528/**
 529 * register_trigger - Generic event_command @reg implementation
 530 * @glob: The raw string used to register the trigger
 531 * @ops: The trigger ops associated with the trigger
 532 * @data: Trigger-specific data to associate with the trigger
 533 * @file: The trace_event_file associated with the event
 534 *
 535 * Common implementation for event trigger registration.
 536 *
 537 * Usually used directly as the @reg method in event command
 538 * implementations.
 539 *
 540 * Return: 0 on success, errno otherwise
 541 */
 542static int register_trigger(char *glob, struct event_trigger_ops *ops,
 543			    struct event_trigger_data *data,
 544			    struct trace_event_file *file)
 545{
 546	struct event_trigger_data *test;
 547	int ret = 0;
 548
 549	lockdep_assert_held(&event_mutex);
 550
 551	list_for_each_entry(test, &file->triggers, list) {
 552		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 553			ret = -EEXIST;
 554			goto out;
 555		}
 556	}
 557
 558	if (data->ops->init) {
 559		ret = data->ops->init(data->ops, data);
 560		if (ret < 0)
 561			goto out;
 562	}
 563
 564	list_add_rcu(&data->list, &file->triggers);
 565	ret++;
 566
 567	update_cond_flag(file);
 568	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 569		list_del_rcu(&data->list);
 570		update_cond_flag(file);
 571		ret--;
 572	}
 573out:
 574	return ret;
 575}
 576
 577/**
 578 * unregister_trigger - Generic event_command @unreg implementation
 579 * @glob: The raw string used to register the trigger
 580 * @ops: The trigger ops associated with the trigger
 581 * @test: Trigger-specific data used to find the trigger to remove
 582 * @file: The trace_event_file associated with the event
 583 *
 584 * Common implementation for event trigger unregistration.
 585 *
 586 * Usually used directly as the @unreg method in event command
 587 * implementations.
 588 */
 589static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 590			       struct event_trigger_data *test,
 591			       struct trace_event_file *file)
 592{
 593	struct event_trigger_data *data;
 594	bool unregistered = false;
 595
 596	lockdep_assert_held(&event_mutex);
 597
 598	list_for_each_entry(data, &file->triggers, list) {
 599		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 600			unregistered = true;
 601			list_del_rcu(&data->list);
 602			trace_event_trigger_enable_disable(file, 0);
 603			update_cond_flag(file);
 604			break;
 605		}
 606	}
 607
 608	if (unregistered && data->ops->free)
 609		data->ops->free(data->ops, data);
 610}
 611
 612/**
 613 * event_trigger_callback - Generic event_command @func implementation
 614 * @cmd_ops: The command ops, used for trigger registration
 615 * @file: The trace_event_file associated with the event
 616 * @glob: The raw string used to register the trigger
 617 * @cmd: The cmd portion of the string used to register the trigger
 618 * @param: The params portion of the string used to register the trigger
 619 *
 620 * Common implementation for event command parsing and trigger
 621 * instantiation.
 622 *
 623 * Usually used directly as the @func method in event command
 624 * implementations.
 625 *
 626 * Return: 0 on success, errno otherwise
 627 */
 628static int
 629event_trigger_callback(struct event_command *cmd_ops,
 630		       struct trace_event_file *file,
 631		       char *glob, char *cmd, char *param)
 632{
 633	struct event_trigger_data *trigger_data;
 634	struct event_trigger_ops *trigger_ops;
 635	char *trigger = NULL;
 636	char *number;
 637	int ret;
 638
 639	/* separate the trigger from the filter (t:n [if filter]) */
 640	if (param && isdigit(param[0])) {
 641		trigger = strsep(&param, " \t");
 642		if (param) {
 643			param = skip_spaces(param);
 644			if (!*param)
 645				param = NULL;
 646		}
 647	}
 648
 649	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 650
 651	ret = -ENOMEM;
 652	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 653	if (!trigger_data)
 654		goto out;
 655
 656	trigger_data->count = -1;
 657	trigger_data->ops = trigger_ops;
 658	trigger_data->cmd_ops = cmd_ops;
 659	trigger_data->private_data = file;
 660	INIT_LIST_HEAD(&trigger_data->list);
 661	INIT_LIST_HEAD(&trigger_data->named_list);
 662
 663	if (glob[0] == '!') {
 664		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 665		kfree(trigger_data);
 666		ret = 0;
 667		goto out;
 668	}
 669
 670	if (trigger) {
 671		number = strsep(&trigger, ":");
 672
 673		ret = -EINVAL;
 674		if (!strlen(number))
 675			goto out_free;
 676
 677		/*
 678		 * We use the callback data field (which is a pointer)
 679		 * as our counter.
 680		 */
 681		ret = kstrtoul(number, 0, &trigger_data->count);
 682		if (ret)
 683			goto out_free;
 684	}
 685
 686	if (!param) /* if param is non-empty, it's supposed to be a filter */
 687		goto out_reg;
 688
 689	if (!cmd_ops->set_filter)
 690		goto out_reg;
 691
 692	ret = cmd_ops->set_filter(param, trigger_data, file);
 693	if (ret < 0)
 694		goto out_free;
 695
 696 out_reg:
 697	/* Up the trigger_data count to make sure reg doesn't free it on failure */
 698	event_trigger_init(trigger_ops, trigger_data);
 699	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 700	/*
 701	 * The above returns on success the # of functions enabled,
 702	 * but if it didn't find any functions it returns zero.
 703	 * Consider no functions a failure too.
 704	 */
 705	if (!ret) {
 706		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
 707		ret = -ENOENT;
 708	} else if (ret > 0)
 709		ret = 0;
 710
 711	/* Down the counter of trigger_data or free it if not used anymore */
 712	event_trigger_free(trigger_ops, trigger_data);
 713 out:
 714	return ret;
 715
 716 out_free:
 717	if (cmd_ops->set_filter)
 718		cmd_ops->set_filter(NULL, trigger_data, NULL);
 719	kfree(trigger_data);
 720	goto out;
 721}
 722
 723/**
 724 * set_trigger_filter - Generic event_command @set_filter implementation
 725 * @filter_str: The filter string for the trigger, NULL to remove filter
 726 * @trigger_data: Trigger-specific data
 727 * @file: The trace_event_file associated with the event
 728 *
 729 * Common implementation for event command filter parsing and filter
 730 * instantiation.
 731 *
 732 * Usually used directly as the @set_filter method in event command
 733 * implementations.
 734 *
 735 * Also used to remove a filter (if filter_str = NULL).
 736 *
 737 * Return: 0 on success, errno otherwise
 738 */
 739int set_trigger_filter(char *filter_str,
 740		       struct event_trigger_data *trigger_data,
 741		       struct trace_event_file *file)
 742{
 743	struct event_trigger_data *data = trigger_data;
 744	struct event_filter *filter = NULL, *tmp;
 745	int ret = -EINVAL;
 746	char *s;
 747
 748	if (!filter_str) /* clear the current filter */
 749		goto assign;
 750
 751	s = strsep(&filter_str, " \t");
 752
 753	if (!strlen(s) || strcmp(s, "if") != 0)
 754		goto out;
 755
 756	if (!filter_str)
 757		goto out;
 758
 759	/* The filter is for the 'trigger' event, not the triggered event */
 760	ret = create_event_filter(file->tr, file->event_call,
 761				  filter_str, false, &filter);
 762	/*
 763	 * If create_event_filter() fails, filter still needs to be freed.
 764	 * Which the calling code will do with data->filter.
 765	 */
 766 assign:
 767	tmp = rcu_access_pointer(data->filter);
 768
 769	rcu_assign_pointer(data->filter, filter);
 770
 771	if (tmp) {
 772		/* Make sure the call is done with the filter */
 773		tracepoint_synchronize_unregister();
 774		free_event_filter(tmp);
 775	}
 776
 777	kfree(data->filter_str);
 778	data->filter_str = NULL;
 779
 780	if (filter_str) {
 781		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 782		if (!data->filter_str) {
 783			free_event_filter(rcu_access_pointer(data->filter));
 784			data->filter = NULL;
 785			ret = -ENOMEM;
 786		}
 787	}
 788 out:
 789	return ret;
 790}
 791
 792static LIST_HEAD(named_triggers);
 793
 794/**
 795 * find_named_trigger - Find the common named trigger associated with @name
 796 * @name: The name of the set of named triggers to find the common data for
 797 *
 798 * Named triggers are sets of triggers that share a common set of
 799 * trigger data.  The first named trigger registered with a given name
 800 * owns the common trigger data that the others subsequently
 801 * registered with the same name will reference.  This function
 802 * returns the common trigger data associated with that first
 803 * registered instance.
 804 *
 805 * Return: the common trigger data for the given named trigger on
 806 * success, NULL otherwise.
 807 */
 808struct event_trigger_data *find_named_trigger(const char *name)
 809{
 810	struct event_trigger_data *data;
 811
 812	if (!name)
 813		return NULL;
 814
 815	list_for_each_entry(data, &named_triggers, named_list) {
 816		if (data->named_data)
 817			continue;
 818		if (strcmp(data->name, name) == 0)
 819			return data;
 820	}
 821
 822	return NULL;
 823}
 824
 825/**
 826 * is_named_trigger - determine if a given trigger is a named trigger
 827 * @test: The trigger data to test
 828 *
 829 * Return: true if 'test' is a named trigger, false otherwise.
 830 */
 831bool is_named_trigger(struct event_trigger_data *test)
 832{
 833	struct event_trigger_data *data;
 834
 835	list_for_each_entry(data, &named_triggers, named_list) {
 836		if (test == data)
 837			return true;
 838	}
 839
 840	return false;
 841}
 842
 843/**
 844 * save_named_trigger - save the trigger in the named trigger list
 845 * @name: The name of the named trigger set
 846 * @data: The trigger data to save
 847 *
 848 * Return: 0 if successful, negative error otherwise.
 849 */
 850int save_named_trigger(const char *name, struct event_trigger_data *data)
 851{
 852	data->name = kstrdup(name, GFP_KERNEL);
 853	if (!data->name)
 854		return -ENOMEM;
 855
 856	list_add(&data->named_list, &named_triggers);
 857
 858	return 0;
 859}
 860
 861/**
 862 * del_named_trigger - delete a trigger from the named trigger list
 863 * @data: The trigger data to delete
 864 */
 865void del_named_trigger(struct event_trigger_data *data)
 866{
 867	kfree(data->name);
 868	data->name = NULL;
 869
 870	list_del(&data->named_list);
 871}
 872
 873static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
 874{
 875	struct event_trigger_data *test;
 876
 877	list_for_each_entry(test, &named_triggers, named_list) {
 878		if (strcmp(test->name, data->name) == 0) {
 879			if (pause) {
 880				test->paused_tmp = test->paused;
 881				test->paused = true;
 882			} else {
 883				test->paused = test->paused_tmp;
 884			}
 885		}
 886	}
 887}
 888
 889/**
 890 * pause_named_trigger - Pause all named triggers with the same name
 891 * @data: The trigger data of a named trigger to pause
 892 *
 893 * Pauses a named trigger along with all other triggers having the
 894 * same name.  Because named triggers share a common set of data,
 895 * pausing only one is meaningless, so pausing one named trigger needs
 896 * to pause all triggers with the same name.
 897 */
 898void pause_named_trigger(struct event_trigger_data *data)
 899{
 900	__pause_named_trigger(data, true);
 901}
 902
 903/**
 904 * unpause_named_trigger - Un-pause all named triggers with the same name
 905 * @data: The trigger data of a named trigger to unpause
 906 *
 907 * Un-pauses a named trigger along with all other triggers having the
 908 * same name.  Because named triggers share a common set of data,
 909 * unpausing only one is meaningless, so unpausing one named trigger
 910 * needs to unpause all triggers with the same name.
 911 */
 912void unpause_named_trigger(struct event_trigger_data *data)
 913{
 914	__pause_named_trigger(data, false);
 915}
 916
 917/**
 918 * set_named_trigger_data - Associate common named trigger data
 919 * @data: The trigger data to associate
 920 * @named_data: The common named trigger to be associated
 921 *
 922 * Named triggers are sets of triggers that share a common set of
 923 * trigger data.  The first named trigger registered with a given name
 924 * owns the common trigger data that the others subsequently
 925 * registered with the same name will reference.  This function
 926 * associates the common trigger data from the first trigger with the
 927 * given trigger.
 928 */
 929void set_named_trigger_data(struct event_trigger_data *data,
 930			    struct event_trigger_data *named_data)
 931{
 932	data->named_data = named_data;
 933}
 934
 935struct event_trigger_data *
 936get_named_trigger_data(struct event_trigger_data *data)
 937{
 938	return data->named_data;
 939}
 940
 941static void
 942traceon_trigger(struct event_trigger_data *data,
 943		struct trace_buffer *buffer, void *rec,
 944		struct ring_buffer_event *event)
 945{
 946	if (tracing_is_on())
 947		return;
 948
 949	tracing_on();
 950}
 951
 952static void
 953traceon_count_trigger(struct event_trigger_data *data,
 954		      struct trace_buffer *buffer, void *rec,
 955		      struct ring_buffer_event *event)
 956{
 957	if (tracing_is_on())
 958		return;
 959
 960	if (!data->count)
 961		return;
 962
 963	if (data->count != -1)
 964		(data->count)--;
 965
 966	tracing_on();
 967}
 968
 969static void
 970traceoff_trigger(struct event_trigger_data *data,
 971		 struct trace_buffer *buffer, void *rec,
 972		 struct ring_buffer_event *event)
 973{
 974	if (!tracing_is_on())
 975		return;
 976
 977	tracing_off();
 978}
 979
 980static void
 981traceoff_count_trigger(struct event_trigger_data *data,
 982		       struct trace_buffer *buffer, void *rec,
 983		       struct ring_buffer_event *event)
 984{
 985	if (!tracing_is_on())
 986		return;
 987
 988	if (!data->count)
 989		return;
 990
 991	if (data->count != -1)
 992		(data->count)--;
 993
 994	tracing_off();
 995}
 996
 997static int
 998traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 999		      struct event_trigger_data *data)
1000{
1001	return event_trigger_print("traceon", m, (void *)data->count,
1002				   data->filter_str);
1003}
1004
1005static int
1006traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1007		       struct event_trigger_data *data)
1008{
1009	return event_trigger_print("traceoff", m, (void *)data->count,
1010				   data->filter_str);
1011}
1012
1013static struct event_trigger_ops traceon_trigger_ops = {
1014	.func			= traceon_trigger,
1015	.print			= traceon_trigger_print,
1016	.init			= event_trigger_init,
1017	.free			= event_trigger_free,
1018};
1019
1020static struct event_trigger_ops traceon_count_trigger_ops = {
1021	.func			= traceon_count_trigger,
1022	.print			= traceon_trigger_print,
1023	.init			= event_trigger_init,
1024	.free			= event_trigger_free,
1025};
1026
1027static struct event_trigger_ops traceoff_trigger_ops = {
1028	.func			= traceoff_trigger,
1029	.print			= traceoff_trigger_print,
1030	.init			= event_trigger_init,
1031	.free			= event_trigger_free,
1032};
1033
1034static struct event_trigger_ops traceoff_count_trigger_ops = {
1035	.func			= traceoff_count_trigger,
1036	.print			= traceoff_trigger_print,
1037	.init			= event_trigger_init,
1038	.free			= event_trigger_free,
1039};
1040
1041static struct event_trigger_ops *
1042onoff_get_trigger_ops(char *cmd, char *param)
1043{
1044	struct event_trigger_ops *ops;
1045
1046	/* we register both traceon and traceoff to this callback */
1047	if (strcmp(cmd, "traceon") == 0)
1048		ops = param ? &traceon_count_trigger_ops :
1049			&traceon_trigger_ops;
1050	else
1051		ops = param ? &traceoff_count_trigger_ops :
1052			&traceoff_trigger_ops;
1053
1054	return ops;
1055}
1056
1057static struct event_command trigger_traceon_cmd = {
1058	.name			= "traceon",
1059	.trigger_type		= ETT_TRACE_ONOFF,
1060	.func			= event_trigger_callback,
1061	.reg			= register_trigger,
1062	.unreg			= unregister_trigger,
1063	.get_trigger_ops	= onoff_get_trigger_ops,
1064	.set_filter		= set_trigger_filter,
1065};
1066
1067static struct event_command trigger_traceoff_cmd = {
1068	.name			= "traceoff",
1069	.trigger_type		= ETT_TRACE_ONOFF,
1070	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1071	.func			= event_trigger_callback,
1072	.reg			= register_trigger,
1073	.unreg			= unregister_trigger,
1074	.get_trigger_ops	= onoff_get_trigger_ops,
1075	.set_filter		= set_trigger_filter,
1076};
1077
1078#ifdef CONFIG_TRACER_SNAPSHOT
1079static void
1080snapshot_trigger(struct event_trigger_data *data,
1081		 struct trace_buffer *buffer, void *rec,
1082		 struct ring_buffer_event *event)
1083{
1084	struct trace_event_file *file = data->private_data;
1085
1086	if (file)
1087		tracing_snapshot_instance(file->tr);
1088	else
1089		tracing_snapshot();
1090}
1091
1092static void
1093snapshot_count_trigger(struct event_trigger_data *data,
1094		       struct trace_buffer *buffer, void *rec,
1095		       struct ring_buffer_event *event)
1096{
1097	if (!data->count)
1098		return;
1099
1100	if (data->count != -1)
1101		(data->count)--;
1102
1103	snapshot_trigger(data, buffer, rec, event);
1104}
1105
1106static int
1107register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1108			  struct event_trigger_data *data,
1109			  struct trace_event_file *file)
1110{
1111	if (tracing_alloc_snapshot_instance(file->tr) != 0)
1112		return 0;
 
 
 
 
1113
1114	return register_trigger(glob, ops, data, file);
1115}
1116
1117static int
1118snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1119		       struct event_trigger_data *data)
1120{
1121	return event_trigger_print("snapshot", m, (void *)data->count,
1122				   data->filter_str);
1123}
1124
1125static struct event_trigger_ops snapshot_trigger_ops = {
1126	.func			= snapshot_trigger,
1127	.print			= snapshot_trigger_print,
1128	.init			= event_trigger_init,
1129	.free			= event_trigger_free,
1130};
1131
1132static struct event_trigger_ops snapshot_count_trigger_ops = {
1133	.func			= snapshot_count_trigger,
1134	.print			= snapshot_trigger_print,
1135	.init			= event_trigger_init,
1136	.free			= event_trigger_free,
1137};
1138
1139static struct event_trigger_ops *
1140snapshot_get_trigger_ops(char *cmd, char *param)
1141{
1142	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1143}
1144
1145static struct event_command trigger_snapshot_cmd = {
1146	.name			= "snapshot",
1147	.trigger_type		= ETT_SNAPSHOT,
1148	.func			= event_trigger_callback,
1149	.reg			= register_snapshot_trigger,
1150	.unreg			= unregister_trigger,
1151	.get_trigger_ops	= snapshot_get_trigger_ops,
1152	.set_filter		= set_trigger_filter,
1153};
1154
1155static __init int register_trigger_snapshot_cmd(void)
1156{
1157	int ret;
1158
1159	ret = register_event_command(&trigger_snapshot_cmd);
1160	WARN_ON(ret < 0);
1161
1162	return ret;
1163}
1164#else
1165static __init int register_trigger_snapshot_cmd(void) { return 0; }
1166#endif /* CONFIG_TRACER_SNAPSHOT */
1167
1168#ifdef CONFIG_STACKTRACE
1169#ifdef CONFIG_UNWINDER_ORC
1170/* Skip 2:
1171 *   event_triggers_post_call()
1172 *   trace_event_raw_event_xxx()
1173 */
1174# define STACK_SKIP 2
1175#else
1176/*
1177 * Skip 4:
1178 *   stacktrace_trigger()
1179 *   event_triggers_post_call()
1180 *   trace_event_buffer_commit()
1181 *   trace_event_raw_event_xxx()
1182 */
1183#define STACK_SKIP 4
1184#endif
1185
1186static void
1187stacktrace_trigger(struct event_trigger_data *data,
1188		   struct trace_buffer *buffer,  void *rec,
1189		   struct ring_buffer_event *event)
1190{
1191	trace_dump_stack(STACK_SKIP);
1192}
1193
1194static void
1195stacktrace_count_trigger(struct event_trigger_data *data,
1196			 struct trace_buffer *buffer, void *rec,
1197			 struct ring_buffer_event *event)
1198{
1199	if (!data->count)
1200		return;
1201
1202	if (data->count != -1)
1203		(data->count)--;
1204
1205	stacktrace_trigger(data, buffer, rec, event);
1206}
1207
1208static int
1209stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1210			 struct event_trigger_data *data)
1211{
1212	return event_trigger_print("stacktrace", m, (void *)data->count,
1213				   data->filter_str);
1214}
1215
1216static struct event_trigger_ops stacktrace_trigger_ops = {
1217	.func			= stacktrace_trigger,
1218	.print			= stacktrace_trigger_print,
1219	.init			= event_trigger_init,
1220	.free			= event_trigger_free,
1221};
1222
1223static struct event_trigger_ops stacktrace_count_trigger_ops = {
1224	.func			= stacktrace_count_trigger,
1225	.print			= stacktrace_trigger_print,
1226	.init			= event_trigger_init,
1227	.free			= event_trigger_free,
1228};
1229
1230static struct event_trigger_ops *
1231stacktrace_get_trigger_ops(char *cmd, char *param)
1232{
1233	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1234}
1235
1236static struct event_command trigger_stacktrace_cmd = {
1237	.name			= "stacktrace",
1238	.trigger_type		= ETT_STACKTRACE,
1239	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1240	.func			= event_trigger_callback,
1241	.reg			= register_trigger,
1242	.unreg			= unregister_trigger,
1243	.get_trigger_ops	= stacktrace_get_trigger_ops,
1244	.set_filter		= set_trigger_filter,
1245};
1246
1247static __init int register_trigger_stacktrace_cmd(void)
1248{
1249	int ret;
1250
1251	ret = register_event_command(&trigger_stacktrace_cmd);
1252	WARN_ON(ret < 0);
1253
1254	return ret;
1255}
1256#else
1257static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1258#endif /* CONFIG_STACKTRACE */
1259
1260static __init void unregister_trigger_traceon_traceoff_cmds(void)
1261{
1262	unregister_event_command(&trigger_traceon_cmd);
1263	unregister_event_command(&trigger_traceoff_cmd);
1264}
1265
 
 
 
 
 
 
 
 
 
1266static void
1267event_enable_trigger(struct event_trigger_data *data,
1268		     struct trace_buffer *buffer,  void *rec,
1269		     struct ring_buffer_event *event)
1270{
1271	struct enable_trigger_data *enable_data = data->private_data;
1272
1273	if (enable_data->enable)
1274		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1275	else
1276		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1277}
1278
1279static void
1280event_enable_count_trigger(struct event_trigger_data *data,
1281			   struct trace_buffer *buffer,  void *rec,
1282			   struct ring_buffer_event *event)
1283{
1284	struct enable_trigger_data *enable_data = data->private_data;
1285
1286	if (!data->count)
1287		return;
1288
1289	/* Skip if the event is in a state we want to switch to */
1290	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1291		return;
1292
1293	if (data->count != -1)
1294		(data->count)--;
1295
1296	event_enable_trigger(data, buffer, rec, event);
1297}
1298
1299int event_enable_trigger_print(struct seq_file *m,
1300			       struct event_trigger_ops *ops,
1301			       struct event_trigger_data *data)
1302{
1303	struct enable_trigger_data *enable_data = data->private_data;
1304
1305	seq_printf(m, "%s:%s:%s",
1306		   enable_data->hist ?
1307		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1308		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1309		   enable_data->file->event_call->class->system,
1310		   trace_event_name(enable_data->file->event_call));
1311
1312	if (data->count == -1)
1313		seq_puts(m, ":unlimited");
1314	else
1315		seq_printf(m, ":count=%ld", data->count);
1316
1317	if (data->filter_str)
1318		seq_printf(m, " if %s\n", data->filter_str);
1319	else
1320		seq_putc(m, '\n');
1321
1322	return 0;
1323}
1324
1325void event_enable_trigger_free(struct event_trigger_ops *ops,
1326			       struct event_trigger_data *data)
 
1327{
1328	struct enable_trigger_data *enable_data = data->private_data;
1329
1330	if (WARN_ON_ONCE(data->ref <= 0))
1331		return;
1332
1333	data->ref--;
1334	if (!data->ref) {
1335		/* Remove the SOFT_MODE flag */
1336		trace_event_enable_disable(enable_data->file, 0, 1);
1337		module_put(enable_data->file->event_call->mod);
1338		trigger_data_free(data);
1339		kfree(enable_data);
1340	}
1341}
1342
1343static struct event_trigger_ops event_enable_trigger_ops = {
1344	.func			= event_enable_trigger,
1345	.print			= event_enable_trigger_print,
1346	.init			= event_trigger_init,
1347	.free			= event_enable_trigger_free,
1348};
1349
1350static struct event_trigger_ops event_enable_count_trigger_ops = {
1351	.func			= event_enable_count_trigger,
1352	.print			= event_enable_trigger_print,
1353	.init			= event_trigger_init,
1354	.free			= event_enable_trigger_free,
1355};
1356
1357static struct event_trigger_ops event_disable_trigger_ops = {
1358	.func			= event_enable_trigger,
1359	.print			= event_enable_trigger_print,
1360	.init			= event_trigger_init,
1361	.free			= event_enable_trigger_free,
1362};
1363
1364static struct event_trigger_ops event_disable_count_trigger_ops = {
1365	.func			= event_enable_count_trigger,
1366	.print			= event_enable_trigger_print,
1367	.init			= event_trigger_init,
1368	.free			= event_enable_trigger_free,
1369};
1370
1371int event_enable_trigger_func(struct event_command *cmd_ops,
1372			      struct trace_event_file *file,
1373			      char *glob, char *cmd, char *param)
 
1374{
1375	struct trace_event_file *event_enable_file;
1376	struct enable_trigger_data *enable_data;
1377	struct event_trigger_data *trigger_data;
1378	struct event_trigger_ops *trigger_ops;
1379	struct trace_array *tr = file->tr;
1380	const char *system;
1381	const char *event;
1382	bool hist = false;
1383	char *trigger;
1384	char *number;
1385	bool enable;
1386	int ret;
1387
1388	if (!param)
1389		return -EINVAL;
1390
1391	/* separate the trigger from the filter (s:e:n [if filter]) */
1392	trigger = strsep(&param, " \t");
1393	if (!trigger)
1394		return -EINVAL;
1395	if (param) {
1396		param = skip_spaces(param);
1397		if (!*param)
1398			param = NULL;
1399	}
1400
1401	system = strsep(&trigger, ":");
1402	if (!trigger)
1403		return -EINVAL;
1404
1405	event = strsep(&trigger, ":");
1406
1407	ret = -EINVAL;
1408	event_enable_file = find_event_file(tr, system, event);
1409	if (!event_enable_file)
1410		goto out;
1411
1412#ifdef CONFIG_HIST_TRIGGERS
1413	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1414		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1415
1416	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1417		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1418#else
1419	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1420#endif
1421	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1422
1423	ret = -ENOMEM;
1424	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1425	if (!trigger_data)
1426		goto out;
1427
1428	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1429	if (!enable_data) {
1430		kfree(trigger_data);
1431		goto out;
1432	}
1433
1434	trigger_data->count = -1;
1435	trigger_data->ops = trigger_ops;
1436	trigger_data->cmd_ops = cmd_ops;
1437	INIT_LIST_HEAD(&trigger_data->list);
1438	RCU_INIT_POINTER(trigger_data->filter, NULL);
1439
1440	enable_data->hist = hist;
1441	enable_data->enable = enable;
1442	enable_data->file = event_enable_file;
1443	trigger_data->private_data = enable_data;
1444
1445	if (glob[0] == '!') {
1446		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1447		kfree(trigger_data);
1448		kfree(enable_data);
1449		ret = 0;
1450		goto out;
1451	}
1452
1453	/* Up the trigger_data count to make sure nothing frees it on failure */
1454	event_trigger_init(trigger_ops, trigger_data);
1455
1456	if (trigger) {
1457		number = strsep(&trigger, ":");
1458
1459		ret = -EINVAL;
1460		if (!strlen(number))
1461			goto out_free;
1462
1463		/*
1464		 * We use the callback data field (which is a pointer)
1465		 * as our counter.
1466		 */
1467		ret = kstrtoul(number, 0, &trigger_data->count);
1468		if (ret)
1469			goto out_free;
1470	}
1471
1472	if (!param) /* if param is non-empty, it's supposed to be a filter */
1473		goto out_reg;
1474
1475	if (!cmd_ops->set_filter)
1476		goto out_reg;
1477
1478	ret = cmd_ops->set_filter(param, trigger_data, file);
1479	if (ret < 0)
1480		goto out_free;
1481
1482 out_reg:
1483	/* Don't let event modules unload while probe registered */
1484	ret = try_module_get(event_enable_file->event_call->mod);
1485	if (!ret) {
1486		ret = -EBUSY;
1487		goto out_free;
1488	}
1489
1490	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1491	if (ret < 0)
1492		goto out_put;
1493	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1494	/*
1495	 * The above returns on success the # of functions enabled,
1496	 * but if it didn't find any functions it returns zero.
1497	 * Consider no functions a failure too.
1498	 */
1499	if (!ret) {
1500		ret = -ENOENT;
1501		goto out_disable;
1502	} else if (ret < 0)
1503		goto out_disable;
1504	/* Just return zero, not the number of enabled functions */
1505	ret = 0;
1506	event_trigger_free(trigger_ops, trigger_data);
1507 out:
1508	return ret;
1509
1510 out_disable:
1511	trace_event_enable_disable(event_enable_file, 0, 1);
1512 out_put:
1513	module_put(event_enable_file->event_call->mod);
1514 out_free:
1515	if (cmd_ops->set_filter)
1516		cmd_ops->set_filter(NULL, trigger_data, NULL);
1517	event_trigger_free(trigger_ops, trigger_data);
1518	kfree(enable_data);
1519	goto out;
1520}
1521
1522int event_enable_register_trigger(char *glob,
1523				  struct event_trigger_ops *ops,
1524				  struct event_trigger_data *data,
1525				  struct trace_event_file *file)
1526{
1527	struct enable_trigger_data *enable_data = data->private_data;
1528	struct enable_trigger_data *test_enable_data;
1529	struct event_trigger_data *test;
1530	int ret = 0;
1531
1532	lockdep_assert_held(&event_mutex);
1533
1534	list_for_each_entry(test, &file->triggers, list) {
1535		test_enable_data = test->private_data;
1536		if (test_enable_data &&
1537		    (test->cmd_ops->trigger_type ==
1538		     data->cmd_ops->trigger_type) &&
1539		    (test_enable_data->file == enable_data->file)) {
1540			ret = -EEXIST;
1541			goto out;
1542		}
1543	}
1544
1545	if (data->ops->init) {
1546		ret = data->ops->init(data->ops, data);
1547		if (ret < 0)
1548			goto out;
1549	}
1550
1551	list_add_rcu(&data->list, &file->triggers);
1552	ret++;
1553
1554	update_cond_flag(file);
1555	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1556		list_del_rcu(&data->list);
1557		update_cond_flag(file);
1558		ret--;
1559	}
1560out:
1561	return ret;
1562}
1563
1564void event_enable_unregister_trigger(char *glob,
1565				     struct event_trigger_ops *ops,
1566				     struct event_trigger_data *test,
1567				     struct trace_event_file *file)
1568{
1569	struct enable_trigger_data *test_enable_data = test->private_data;
1570	struct enable_trigger_data *enable_data;
1571	struct event_trigger_data *data;
1572	bool unregistered = false;
1573
1574	lockdep_assert_held(&event_mutex);
1575
1576	list_for_each_entry(data, &file->triggers, list) {
1577		enable_data = data->private_data;
1578		if (enable_data &&
1579		    (data->cmd_ops->trigger_type ==
1580		     test->cmd_ops->trigger_type) &&
1581		    (enable_data->file == test_enable_data->file)) {
1582			unregistered = true;
1583			list_del_rcu(&data->list);
1584			trace_event_trigger_enable_disable(file, 0);
1585			update_cond_flag(file);
1586			break;
1587		}
1588	}
1589
1590	if (unregistered && data->ops->free)
1591		data->ops->free(data->ops, data);
1592}
1593
1594static struct event_trigger_ops *
1595event_enable_get_trigger_ops(char *cmd, char *param)
1596{
1597	struct event_trigger_ops *ops;
1598	bool enable;
1599
1600#ifdef CONFIG_HIST_TRIGGERS
1601	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1602		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1603#else
1604	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1605#endif
1606	if (enable)
1607		ops = param ? &event_enable_count_trigger_ops :
1608			&event_enable_trigger_ops;
1609	else
1610		ops = param ? &event_disable_count_trigger_ops :
1611			&event_disable_trigger_ops;
1612
1613	return ops;
1614}
1615
1616static struct event_command trigger_enable_cmd = {
1617	.name			= ENABLE_EVENT_STR,
1618	.trigger_type		= ETT_EVENT_ENABLE,
1619	.func			= event_enable_trigger_func,
1620	.reg			= event_enable_register_trigger,
1621	.unreg			= event_enable_unregister_trigger,
1622	.get_trigger_ops	= event_enable_get_trigger_ops,
1623	.set_filter		= set_trigger_filter,
1624};
1625
1626static struct event_command trigger_disable_cmd = {
1627	.name			= DISABLE_EVENT_STR,
1628	.trigger_type		= ETT_EVENT_ENABLE,
1629	.func			= event_enable_trigger_func,
1630	.reg			= event_enable_register_trigger,
1631	.unreg			= event_enable_unregister_trigger,
1632	.get_trigger_ops	= event_enable_get_trigger_ops,
1633	.set_filter		= set_trigger_filter,
1634};
1635
1636static __init void unregister_trigger_enable_disable_cmds(void)
1637{
1638	unregister_event_command(&trigger_enable_cmd);
1639	unregister_event_command(&trigger_disable_cmd);
1640}
1641
1642static __init int register_trigger_enable_disable_cmds(void)
1643{
1644	int ret;
1645
1646	ret = register_event_command(&trigger_enable_cmd);
1647	if (WARN_ON(ret < 0))
1648		return ret;
1649	ret = register_event_command(&trigger_disable_cmd);
1650	if (WARN_ON(ret < 0))
1651		unregister_trigger_enable_disable_cmds();
1652
1653	return ret;
1654}
1655
1656static __init int register_trigger_traceon_traceoff_cmds(void)
1657{
1658	int ret;
1659
1660	ret = register_event_command(&trigger_traceon_cmd);
1661	if (WARN_ON(ret < 0))
1662		return ret;
1663	ret = register_event_command(&trigger_traceoff_cmd);
1664	if (WARN_ON(ret < 0))
1665		unregister_trigger_traceon_traceoff_cmds();
1666
1667	return ret;
1668}
1669
1670__init int register_trigger_cmds(void)
1671{
1672	register_trigger_traceon_traceoff_cmds();
1673	register_trigger_snapshot_cmd();
1674	register_trigger_stacktrace_cmd();
1675	register_trigger_enable_disable_cmds();
1676	register_trigger_hist_enable_disable_cmds();
1677	register_trigger_hist_cmd();
1678
1679	return 0;
1680}