Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
  34 * @rec: The trace entry for the event, NULL for unconditional invocation
  35 *
  36 * For each trigger associated with an event, invoke the trigger
  37 * function registered with the associated trigger command.  If rec is
  38 * non-NULL, it means that the trigger requires further processing and
  39 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  40 * trigger has a filter associated with it, rec will checked against
  41 * the filter and if the record matches the trigger will be invoked.
  42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  43 * in any case until the current event is written, the trigger
  44 * function isn't invoked but the bit associated with the deferred
  45 * trigger is set in the return value.
  46 *
  47 * Returns an enum event_trigger_type value containing a set bit for
  48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  49 *
  50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  51 *
  52 * Return: an enum event_trigger_type value containing a set bit for
  53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  54 */
  55enum event_trigger_type
  56event_triggers_call(struct trace_event_file *file, void *rec,
  57		    struct ring_buffer_event *event)
  58{
  59	struct event_trigger_data *data;
  60	enum event_trigger_type tt = ETT_NONE;
  61	struct event_filter *filter;
  62
  63	if (list_empty(&file->triggers))
  64		return tt;
  65
  66	list_for_each_entry_rcu(data, &file->triggers, list) {
  67		if (data->paused)
  68			continue;
  69		if (!rec) {
  70			data->ops->func(data, rec, event);
  71			continue;
  72		}
  73		filter = rcu_dereference_sched(data->filter);
  74		if (filter && !filter_match_preds(filter, rec))
  75			continue;
  76		if (event_command_post_trigger(data->cmd_ops)) {
  77			tt |= data->cmd_ops->trigger_type;
  78			continue;
  79		}
  80		data->ops->func(data, rec, event);
  81	}
  82	return tt;
  83}
  84EXPORT_SYMBOL_GPL(event_triggers_call);
  85
  86/**
  87 * event_triggers_post_call - Call 'post_triggers' for a trace event
  88 * @file: The trace_event_file associated with the event
  89 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
 
  90 *
  91 * For each trigger associated with an event, invoke the trigger
  92 * function registered with the associated trigger command, if the
  93 * corresponding bit is set in the tt enum passed into this function.
  94 * See @event_triggers_call for details on how those bits are set.
  95 *
  96 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  97 */
  98void
  99event_triggers_post_call(struct trace_event_file *file,
 100			 enum event_trigger_type tt)
 
 101{
 102	struct event_trigger_data *data;
 103
 104	list_for_each_entry_rcu(data, &file->triggers, list) {
 105		if (data->paused)
 106			continue;
 107		if (data->cmd_ops->trigger_type & tt)
 108			data->ops->func(data, NULL, NULL);
 109	}
 110}
 111EXPORT_SYMBOL_GPL(event_triggers_post_call);
 112
 113#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 114
 115static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 116{
 117	struct trace_event_file *event_file = event_file_data(m->private);
 118
 119	if (t == SHOW_AVAILABLE_TRIGGERS)
 120		return NULL;
 121
 122	return seq_list_next(t, &event_file->triggers, pos);
 123}
 124
 125static void *trigger_start(struct seq_file *m, loff_t *pos)
 126{
 127	struct trace_event_file *event_file;
 128
 129	/* ->stop() is called even if ->start() fails */
 130	mutex_lock(&event_mutex);
 131	event_file = event_file_data(m->private);
 132	if (unlikely(!event_file))
 133		return ERR_PTR(-ENODEV);
 134
 135	if (list_empty(&event_file->triggers))
 136		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 137
 138	return seq_list_start(&event_file->triggers, *pos);
 139}
 140
 141static void trigger_stop(struct seq_file *m, void *t)
 142{
 143	mutex_unlock(&event_mutex);
 144}
 145
 146static int trigger_show(struct seq_file *m, void *v)
 147{
 148	struct event_trigger_data *data;
 149	struct event_command *p;
 150
 151	if (v == SHOW_AVAILABLE_TRIGGERS) {
 152		seq_puts(m, "# Available triggers:\n");
 153		seq_putc(m, '#');
 154		mutex_lock(&trigger_cmd_mutex);
 155		list_for_each_entry_reverse(p, &trigger_commands, list)
 156			seq_printf(m, " %s", p->name);
 157		seq_putc(m, '\n');
 158		mutex_unlock(&trigger_cmd_mutex);
 159		return 0;
 160	}
 161
 162	data = list_entry(v, struct event_trigger_data, list);
 163	data->ops->print(m, data->ops, data);
 164
 165	return 0;
 166}
 167
 168static const struct seq_operations event_triggers_seq_ops = {
 169	.start = trigger_start,
 170	.next = trigger_next,
 171	.stop = trigger_stop,
 172	.show = trigger_show,
 173};
 174
 175static int event_trigger_regex_open(struct inode *inode, struct file *file)
 176{
 177	int ret;
 178
 179	ret = security_locked_down(LOCKDOWN_TRACEFS);
 180	if (ret)
 181		return ret;
 182
 183	mutex_lock(&event_mutex);
 184
 185	if (unlikely(!event_file_data(file))) {
 186		mutex_unlock(&event_mutex);
 187		return -ENODEV;
 188	}
 189
 190	if ((file->f_mode & FMODE_WRITE) &&
 191	    (file->f_flags & O_TRUNC)) {
 192		struct trace_event_file *event_file;
 193		struct event_command *p;
 194
 195		event_file = event_file_data(file);
 196
 197		list_for_each_entry(p, &trigger_commands, list) {
 198			if (p->unreg_all)
 199				p->unreg_all(event_file);
 200		}
 201	}
 202
 203	if (file->f_mode & FMODE_READ) {
 204		ret = seq_open(file, &event_triggers_seq_ops);
 205		if (!ret) {
 206			struct seq_file *m = file->private_data;
 207			m->private = file;
 208		}
 209	}
 210
 211	mutex_unlock(&event_mutex);
 212
 213	return ret;
 214}
 215
 216static int trigger_process_regex(struct trace_event_file *file, char *buff)
 217{
 218	char *command, *next = buff;
 219	struct event_command *p;
 220	int ret = -EINVAL;
 221
 222	command = strsep(&next, ": \t");
 223	command = (command[0] != '!') ? command : command + 1;
 224
 225	mutex_lock(&trigger_cmd_mutex);
 226	list_for_each_entry(p, &trigger_commands, list) {
 227		if (strcmp(p->name, command) == 0) {
 228			ret = p->func(p, file, buff, command, next);
 229			goto out_unlock;
 230		}
 231	}
 232 out_unlock:
 233	mutex_unlock(&trigger_cmd_mutex);
 234
 235	return ret;
 236}
 237
 238static ssize_t event_trigger_regex_write(struct file *file,
 239					 const char __user *ubuf,
 240					 size_t cnt, loff_t *ppos)
 241{
 242	struct trace_event_file *event_file;
 243	ssize_t ret;
 244	char *buf;
 245
 246	if (!cnt)
 247		return 0;
 248
 249	if (cnt >= PAGE_SIZE)
 250		return -EINVAL;
 251
 252	buf = memdup_user_nul(ubuf, cnt);
 253	if (IS_ERR(buf))
 254		return PTR_ERR(buf);
 255
 256	strim(buf);
 257
 258	mutex_lock(&event_mutex);
 259	event_file = event_file_data(file);
 260	if (unlikely(!event_file)) {
 261		mutex_unlock(&event_mutex);
 262		kfree(buf);
 263		return -ENODEV;
 264	}
 265	ret = trigger_process_regex(event_file, buf);
 266	mutex_unlock(&event_mutex);
 267
 268	kfree(buf);
 269	if (ret < 0)
 270		goto out;
 271
 272	*ppos += cnt;
 273	ret = cnt;
 274 out:
 275	return ret;
 276}
 277
 278static int event_trigger_regex_release(struct inode *inode, struct file *file)
 279{
 280	mutex_lock(&event_mutex);
 281
 282	if (file->f_mode & FMODE_READ)
 283		seq_release(inode, file);
 284
 285	mutex_unlock(&event_mutex);
 286
 287	return 0;
 288}
 289
 290static ssize_t
 291event_trigger_write(struct file *filp, const char __user *ubuf,
 292		    size_t cnt, loff_t *ppos)
 293{
 294	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 295}
 296
 297static int
 298event_trigger_open(struct inode *inode, struct file *filp)
 299{
 300	/* Checks for tracefs lockdown */
 301	return event_trigger_regex_open(inode, filp);
 302}
 303
 304static int
 305event_trigger_release(struct inode *inode, struct file *file)
 306{
 307	return event_trigger_regex_release(inode, file);
 308}
 309
 310const struct file_operations event_trigger_fops = {
 311	.open = event_trigger_open,
 312	.read = seq_read,
 313	.write = event_trigger_write,
 314	.llseek = tracing_lseek,
 315	.release = event_trigger_release,
 316};
 317
 318/*
 319 * Currently we only register event commands from __init, so mark this
 320 * __init too.
 321 */
 322__init int register_event_command(struct event_command *cmd)
 323{
 324	struct event_command *p;
 325	int ret = 0;
 326
 327	mutex_lock(&trigger_cmd_mutex);
 328	list_for_each_entry(p, &trigger_commands, list) {
 329		if (strcmp(cmd->name, p->name) == 0) {
 330			ret = -EBUSY;
 331			goto out_unlock;
 332		}
 333	}
 334	list_add(&cmd->list, &trigger_commands);
 335 out_unlock:
 336	mutex_unlock(&trigger_cmd_mutex);
 337
 338	return ret;
 339}
 340
 341/*
 342 * Currently we only unregister event commands from __init, so mark
 343 * this __init too.
 344 */
 345__init int unregister_event_command(struct event_command *cmd)
 346{
 347	struct event_command *p, *n;
 348	int ret = -ENODEV;
 349
 350	mutex_lock(&trigger_cmd_mutex);
 351	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 352		if (strcmp(cmd->name, p->name) == 0) {
 353			ret = 0;
 354			list_del_init(&p->list);
 355			goto out_unlock;
 356		}
 357	}
 358 out_unlock:
 359	mutex_unlock(&trigger_cmd_mutex);
 360
 361	return ret;
 362}
 363
 364/**
 365 * event_trigger_print - Generic event_trigger_ops @print implementation
 366 * @name: The name of the event trigger
 367 * @m: The seq_file being printed to
 368 * @data: Trigger-specific data
 369 * @filter_str: filter_str to print, if present
 370 *
 371 * Common implementation for event triggers to print themselves.
 372 *
 373 * Usually wrapped by a function that simply sets the @name of the
 374 * trigger command and then invokes this.
 375 *
 376 * Return: 0 on success, errno otherwise
 377 */
 378static int
 379event_trigger_print(const char *name, struct seq_file *m,
 380		    void *data, char *filter_str)
 381{
 382	long count = (long)data;
 383
 384	seq_puts(m, name);
 385
 386	if (count == -1)
 387		seq_puts(m, ":unlimited");
 388	else
 389		seq_printf(m, ":count=%ld", count);
 390
 391	if (filter_str)
 392		seq_printf(m, " if %s\n", filter_str);
 393	else
 394		seq_putc(m, '\n');
 395
 396	return 0;
 397}
 398
 399/**
 400 * event_trigger_init - Generic event_trigger_ops @init implementation
 401 * @ops: The trigger ops associated with the trigger
 402 * @data: Trigger-specific data
 403 *
 404 * Common implementation of event trigger initialization.
 405 *
 406 * Usually used directly as the @init method in event trigger
 407 * implementations.
 408 *
 409 * Return: 0 on success, errno otherwise
 410 */
 411int event_trigger_init(struct event_trigger_ops *ops,
 412		       struct event_trigger_data *data)
 413{
 414	data->ref++;
 415	return 0;
 416}
 417
 418/**
 419 * event_trigger_free - Generic event_trigger_ops @free implementation
 420 * @ops: The trigger ops associated with the trigger
 421 * @data: Trigger-specific data
 422 *
 423 * Common implementation of event trigger de-initialization.
 424 *
 425 * Usually used directly as the @free method in event trigger
 426 * implementations.
 427 */
 428static void
 429event_trigger_free(struct event_trigger_ops *ops,
 430		   struct event_trigger_data *data)
 431{
 432	if (WARN_ON_ONCE(data->ref <= 0))
 433		return;
 434
 435	data->ref--;
 436	if (!data->ref)
 437		trigger_data_free(data);
 438}
 439
 440int trace_event_trigger_enable_disable(struct trace_event_file *file,
 441				       int trigger_enable)
 442{
 443	int ret = 0;
 444
 445	if (trigger_enable) {
 446		if (atomic_inc_return(&file->tm_ref) > 1)
 447			return ret;
 448		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 449		ret = trace_event_enable_disable(file, 1, 1);
 450	} else {
 451		if (atomic_dec_return(&file->tm_ref) > 0)
 452			return ret;
 453		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 454		ret = trace_event_enable_disable(file, 0, 1);
 455	}
 456
 457	return ret;
 458}
 459
 460/**
 461 * clear_event_triggers - Clear all triggers associated with a trace array
 462 * @tr: The trace array to clear
 463 *
 464 * For each trigger, the triggering event has its tm_ref decremented
 465 * via trace_event_trigger_enable_disable(), and any associated event
 466 * (in the case of enable/disable_event triggers) will have its sm_ref
 467 * decremented via free()->trace_event_enable_disable().  That
 468 * combination effectively reverses the soft-mode/trigger state added
 469 * by trigger registration.
 470 *
 471 * Must be called with event_mutex held.
 472 */
 473void
 474clear_event_triggers(struct trace_array *tr)
 475{
 476	struct trace_event_file *file;
 477
 478	list_for_each_entry(file, &tr->events, list) {
 479		struct event_trigger_data *data, *n;
 480		list_for_each_entry_safe(data, n, &file->triggers, list) {
 481			trace_event_trigger_enable_disable(file, 0);
 482			list_del_rcu(&data->list);
 483			if (data->ops->free)
 484				data->ops->free(data->ops, data);
 485		}
 486	}
 487}
 488
 489/**
 490 * update_cond_flag - Set or reset the TRIGGER_COND bit
 491 * @file: The trace_event_file associated with the event
 492 *
 493 * If an event has triggers and any of those triggers has a filter or
 494 * a post_trigger, trigger invocation needs to be deferred until after
 495 * the current event has logged its data, and the event should have
 496 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 497 * cleared.
 498 */
 499void update_cond_flag(struct trace_event_file *file)
 500{
 501	struct event_trigger_data *data;
 502	bool set_cond = false;
 503
 504	list_for_each_entry_rcu(data, &file->triggers, list) {
 505		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 506		    event_command_needs_rec(data->cmd_ops)) {
 507			set_cond = true;
 508			break;
 509		}
 510	}
 511
 512	if (set_cond)
 513		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 514	else
 515		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 516}
 517
 518/**
 519 * register_trigger - Generic event_command @reg implementation
 520 * @glob: The raw string used to register the trigger
 521 * @ops: The trigger ops associated with the trigger
 522 * @data: Trigger-specific data to associate with the trigger
 523 * @file: The trace_event_file associated with the event
 524 *
 525 * Common implementation for event trigger registration.
 526 *
 527 * Usually used directly as the @reg method in event command
 528 * implementations.
 529 *
 530 * Return: 0 on success, errno otherwise
 531 */
 532static int register_trigger(char *glob, struct event_trigger_ops *ops,
 533			    struct event_trigger_data *data,
 534			    struct trace_event_file *file)
 535{
 536	struct event_trigger_data *test;
 537	int ret = 0;
 538
 539	list_for_each_entry_rcu(test, &file->triggers, list) {
 540		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 541			ret = -EEXIST;
 542			goto out;
 543		}
 544	}
 545
 546	if (data->ops->init) {
 547		ret = data->ops->init(data->ops, data);
 548		if (ret < 0)
 549			goto out;
 550	}
 551
 552	list_add_rcu(&data->list, &file->triggers);
 553	ret++;
 554
 555	update_cond_flag(file);
 556	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 557		list_del_rcu(&data->list);
 558		update_cond_flag(file);
 559		ret--;
 560	}
 561out:
 562	return ret;
 563}
 564
 565/**
 566 * unregister_trigger - Generic event_command @unreg implementation
 567 * @glob: The raw string used to register the trigger
 568 * @ops: The trigger ops associated with the trigger
 569 * @test: Trigger-specific data used to find the trigger to remove
 570 * @file: The trace_event_file associated with the event
 571 *
 572 * Common implementation for event trigger unregistration.
 573 *
 574 * Usually used directly as the @unreg method in event command
 575 * implementations.
 576 */
 577static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 578			       struct event_trigger_data *test,
 579			       struct trace_event_file *file)
 580{
 581	struct event_trigger_data *data;
 582	bool unregistered = false;
 583
 584	list_for_each_entry_rcu(data, &file->triggers, list) {
 585		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 586			unregistered = true;
 587			list_del_rcu(&data->list);
 588			trace_event_trigger_enable_disable(file, 0);
 589			update_cond_flag(file);
 590			break;
 591		}
 592	}
 593
 594	if (unregistered && data->ops->free)
 595		data->ops->free(data->ops, data);
 596}
 597
 598/**
 599 * event_trigger_callback - Generic event_command @func implementation
 600 * @cmd_ops: The command ops, used for trigger registration
 601 * @file: The trace_event_file associated with the event
 602 * @glob: The raw string used to register the trigger
 603 * @cmd: The cmd portion of the string used to register the trigger
 604 * @param: The params portion of the string used to register the trigger
 605 *
 606 * Common implementation for event command parsing and trigger
 607 * instantiation.
 608 *
 609 * Usually used directly as the @func method in event command
 610 * implementations.
 611 *
 612 * Return: 0 on success, errno otherwise
 613 */
 614static int
 615event_trigger_callback(struct event_command *cmd_ops,
 616		       struct trace_event_file *file,
 617		       char *glob, char *cmd, char *param)
 618{
 619	struct event_trigger_data *trigger_data;
 620	struct event_trigger_ops *trigger_ops;
 621	char *trigger = NULL;
 622	char *number;
 623	int ret;
 624
 625	/* separate the trigger from the filter (t:n [if filter]) */
 626	if (param && isdigit(param[0]))
 627		trigger = strsep(&param, " \t");
 628
 629	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 630
 631	ret = -ENOMEM;
 632	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 633	if (!trigger_data)
 634		goto out;
 635
 636	trigger_data->count = -1;
 637	trigger_data->ops = trigger_ops;
 638	trigger_data->cmd_ops = cmd_ops;
 639	trigger_data->private_data = file;
 640	INIT_LIST_HEAD(&trigger_data->list);
 641	INIT_LIST_HEAD(&trigger_data->named_list);
 642
 643	if (glob[0] == '!') {
 644		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 645		kfree(trigger_data);
 646		ret = 0;
 647		goto out;
 648	}
 649
 650	if (trigger) {
 651		number = strsep(&trigger, ":");
 652
 653		ret = -EINVAL;
 654		if (!strlen(number))
 655			goto out_free;
 656
 657		/*
 658		 * We use the callback data field (which is a pointer)
 659		 * as our counter.
 660		 */
 661		ret = kstrtoul(number, 0, &trigger_data->count);
 662		if (ret)
 663			goto out_free;
 664	}
 665
 666	if (!param) /* if param is non-empty, it's supposed to be a filter */
 667		goto out_reg;
 668
 669	if (!cmd_ops->set_filter)
 670		goto out_reg;
 671
 672	ret = cmd_ops->set_filter(param, trigger_data, file);
 673	if (ret < 0)
 674		goto out_free;
 675
 676 out_reg:
 677	/* Up the trigger_data count to make sure reg doesn't free it on failure */
 678	event_trigger_init(trigger_ops, trigger_data);
 679	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 680	/*
 681	 * The above returns on success the # of functions enabled,
 682	 * but if it didn't find any functions it returns zero.
 683	 * Consider no functions a failure too.
 684	 */
 685	if (!ret) {
 686		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
 687		ret = -ENOENT;
 688	} else if (ret > 0)
 689		ret = 0;
 690
 691	/* Down the counter of trigger_data or free it if not used anymore */
 692	event_trigger_free(trigger_ops, trigger_data);
 693 out:
 694	return ret;
 695
 696 out_free:
 697	if (cmd_ops->set_filter)
 698		cmd_ops->set_filter(NULL, trigger_data, NULL);
 699	kfree(trigger_data);
 700	goto out;
 701}
 702
 703/**
 704 * set_trigger_filter - Generic event_command @set_filter implementation
 705 * @filter_str: The filter string for the trigger, NULL to remove filter
 706 * @trigger_data: Trigger-specific data
 707 * @file: The trace_event_file associated with the event
 708 *
 709 * Common implementation for event command filter parsing and filter
 710 * instantiation.
 711 *
 712 * Usually used directly as the @set_filter method in event command
 713 * implementations.
 714 *
 715 * Also used to remove a filter (if filter_str = NULL).
 716 *
 717 * Return: 0 on success, errno otherwise
 718 */
 719int set_trigger_filter(char *filter_str,
 720		       struct event_trigger_data *trigger_data,
 721		       struct trace_event_file *file)
 722{
 723	struct event_trigger_data *data = trigger_data;
 724	struct event_filter *filter = NULL, *tmp;
 725	int ret = -EINVAL;
 726	char *s;
 727
 728	if (!filter_str) /* clear the current filter */
 729		goto assign;
 730
 731	s = strsep(&filter_str, " \t");
 732
 733	if (!strlen(s) || strcmp(s, "if") != 0)
 734		goto out;
 735
 736	if (!filter_str)
 737		goto out;
 738
 739	/* The filter is for the 'trigger' event, not the triggered event */
 740	ret = create_event_filter(file->tr, file->event_call,
 741				  filter_str, false, &filter);
 742	/*
 743	 * If create_event_filter() fails, filter still needs to be freed.
 744	 * Which the calling code will do with data->filter.
 745	 */
 746 assign:
 747	tmp = rcu_access_pointer(data->filter);
 748
 749	rcu_assign_pointer(data->filter, filter);
 750
 751	if (tmp) {
 752		/* Make sure the call is done with the filter */
 753		tracepoint_synchronize_unregister();
 754		free_event_filter(tmp);
 755	}
 756
 757	kfree(data->filter_str);
 758	data->filter_str = NULL;
 759
 760	if (filter_str) {
 761		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 762		if (!data->filter_str) {
 763			free_event_filter(rcu_access_pointer(data->filter));
 764			data->filter = NULL;
 765			ret = -ENOMEM;
 766		}
 767	}
 768 out:
 769	return ret;
 770}
 771
 772static LIST_HEAD(named_triggers);
 773
 774/**
 775 * find_named_trigger - Find the common named trigger associated with @name
 776 * @name: The name of the set of named triggers to find the common data for
 777 *
 778 * Named triggers are sets of triggers that share a common set of
 779 * trigger data.  The first named trigger registered with a given name
 780 * owns the common trigger data that the others subsequently
 781 * registered with the same name will reference.  This function
 782 * returns the common trigger data associated with that first
 783 * registered instance.
 784 *
 785 * Return: the common trigger data for the given named trigger on
 786 * success, NULL otherwise.
 787 */
 788struct event_trigger_data *find_named_trigger(const char *name)
 789{
 790	struct event_trigger_data *data;
 791
 792	if (!name)
 793		return NULL;
 794
 795	list_for_each_entry(data, &named_triggers, named_list) {
 796		if (data->named_data)
 797			continue;
 798		if (strcmp(data->name, name) == 0)
 799			return data;
 800	}
 801
 802	return NULL;
 803}
 804
 805/**
 806 * is_named_trigger - determine if a given trigger is a named trigger
 807 * @test: The trigger data to test
 808 *
 809 * Return: true if 'test' is a named trigger, false otherwise.
 810 */
 811bool is_named_trigger(struct event_trigger_data *test)
 812{
 813	struct event_trigger_data *data;
 814
 815	list_for_each_entry(data, &named_triggers, named_list) {
 816		if (test == data)
 817			return true;
 818	}
 819
 820	return false;
 821}
 822
 823/**
 824 * save_named_trigger - save the trigger in the named trigger list
 825 * @name: The name of the named trigger set
 826 * @data: The trigger data to save
 827 *
 828 * Return: 0 if successful, negative error otherwise.
 829 */
 830int save_named_trigger(const char *name, struct event_trigger_data *data)
 831{
 832	data->name = kstrdup(name, GFP_KERNEL);
 833	if (!data->name)
 834		return -ENOMEM;
 835
 836	list_add(&data->named_list, &named_triggers);
 837
 838	return 0;
 839}
 840
 841/**
 842 * del_named_trigger - delete a trigger from the named trigger list
 843 * @data: The trigger data to delete
 844 */
 845void del_named_trigger(struct event_trigger_data *data)
 846{
 847	kfree(data->name);
 848	data->name = NULL;
 849
 850	list_del(&data->named_list);
 851}
 852
 853static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
 854{
 855	struct event_trigger_data *test;
 856
 857	list_for_each_entry(test, &named_triggers, named_list) {
 858		if (strcmp(test->name, data->name) == 0) {
 859			if (pause) {
 860				test->paused_tmp = test->paused;
 861				test->paused = true;
 862			} else {
 863				test->paused = test->paused_tmp;
 864			}
 865		}
 866	}
 867}
 868
 869/**
 870 * pause_named_trigger - Pause all named triggers with the same name
 871 * @data: The trigger data of a named trigger to pause
 872 *
 873 * Pauses a named trigger along with all other triggers having the
 874 * same name.  Because named triggers share a common set of data,
 875 * pausing only one is meaningless, so pausing one named trigger needs
 876 * to pause all triggers with the same name.
 877 */
 878void pause_named_trigger(struct event_trigger_data *data)
 879{
 880	__pause_named_trigger(data, true);
 881}
 882
 883/**
 884 * unpause_named_trigger - Un-pause all named triggers with the same name
 885 * @data: The trigger data of a named trigger to unpause
 886 *
 887 * Un-pauses a named trigger along with all other triggers having the
 888 * same name.  Because named triggers share a common set of data,
 889 * unpausing only one is meaningless, so unpausing one named trigger
 890 * needs to unpause all triggers with the same name.
 891 */
 892void unpause_named_trigger(struct event_trigger_data *data)
 893{
 894	__pause_named_trigger(data, false);
 895}
 896
 897/**
 898 * set_named_trigger_data - Associate common named trigger data
 899 * @data: The trigger data of a named trigger to unpause
 900 *
 901 * Named triggers are sets of triggers that share a common set of
 902 * trigger data.  The first named trigger registered with a given name
 903 * owns the common trigger data that the others subsequently
 904 * registered with the same name will reference.  This function
 905 * associates the common trigger data from the first trigger with the
 906 * given trigger.
 907 */
 908void set_named_trigger_data(struct event_trigger_data *data,
 909			    struct event_trigger_data *named_data)
 910{
 911	data->named_data = named_data;
 912}
 913
 914struct event_trigger_data *
 915get_named_trigger_data(struct event_trigger_data *data)
 916{
 917	return data->named_data;
 918}
 919
 920static void
 921traceon_trigger(struct event_trigger_data *data, void *rec,
 922		struct ring_buffer_event *event)
 923{
 924	if (tracing_is_on())
 925		return;
 926
 927	tracing_on();
 928}
 929
 930static void
 931traceon_count_trigger(struct event_trigger_data *data, void *rec,
 932		      struct ring_buffer_event *event)
 933{
 934	if (tracing_is_on())
 935		return;
 936
 937	if (!data->count)
 938		return;
 939
 940	if (data->count != -1)
 941		(data->count)--;
 942
 943	tracing_on();
 944}
 945
 946static void
 947traceoff_trigger(struct event_trigger_data *data, void *rec,
 948		 struct ring_buffer_event *event)
 949{
 950	if (!tracing_is_on())
 951		return;
 952
 953	tracing_off();
 954}
 955
 956static void
 957traceoff_count_trigger(struct event_trigger_data *data, void *rec,
 958		       struct ring_buffer_event *event)
 959{
 960	if (!tracing_is_on())
 961		return;
 962
 963	if (!data->count)
 964		return;
 965
 966	if (data->count != -1)
 967		(data->count)--;
 968
 969	tracing_off();
 970}
 971
 972static int
 973traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 974		      struct event_trigger_data *data)
 975{
 976	return event_trigger_print("traceon", m, (void *)data->count,
 977				   data->filter_str);
 978}
 979
 980static int
 981traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 982		       struct event_trigger_data *data)
 983{
 984	return event_trigger_print("traceoff", m, (void *)data->count,
 985				   data->filter_str);
 986}
 987
 988static struct event_trigger_ops traceon_trigger_ops = {
 989	.func			= traceon_trigger,
 990	.print			= traceon_trigger_print,
 991	.init			= event_trigger_init,
 992	.free			= event_trigger_free,
 993};
 994
 995static struct event_trigger_ops traceon_count_trigger_ops = {
 996	.func			= traceon_count_trigger,
 997	.print			= traceon_trigger_print,
 998	.init			= event_trigger_init,
 999	.free			= event_trigger_free,
1000};
1001
1002static struct event_trigger_ops traceoff_trigger_ops = {
1003	.func			= traceoff_trigger,
1004	.print			= traceoff_trigger_print,
1005	.init			= event_trigger_init,
1006	.free			= event_trigger_free,
1007};
1008
1009static struct event_trigger_ops traceoff_count_trigger_ops = {
1010	.func			= traceoff_count_trigger,
1011	.print			= traceoff_trigger_print,
1012	.init			= event_trigger_init,
1013	.free			= event_trigger_free,
1014};
1015
1016static struct event_trigger_ops *
1017onoff_get_trigger_ops(char *cmd, char *param)
1018{
1019	struct event_trigger_ops *ops;
1020
1021	/* we register both traceon and traceoff to this callback */
1022	if (strcmp(cmd, "traceon") == 0)
1023		ops = param ? &traceon_count_trigger_ops :
1024			&traceon_trigger_ops;
1025	else
1026		ops = param ? &traceoff_count_trigger_ops :
1027			&traceoff_trigger_ops;
1028
1029	return ops;
1030}
1031
1032static struct event_command trigger_traceon_cmd = {
1033	.name			= "traceon",
1034	.trigger_type		= ETT_TRACE_ONOFF,
1035	.func			= event_trigger_callback,
1036	.reg			= register_trigger,
1037	.unreg			= unregister_trigger,
1038	.get_trigger_ops	= onoff_get_trigger_ops,
1039	.set_filter		= set_trigger_filter,
1040};
1041
1042static struct event_command trigger_traceoff_cmd = {
1043	.name			= "traceoff",
1044	.trigger_type		= ETT_TRACE_ONOFF,
1045	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1046	.func			= event_trigger_callback,
1047	.reg			= register_trigger,
1048	.unreg			= unregister_trigger,
1049	.get_trigger_ops	= onoff_get_trigger_ops,
1050	.set_filter		= set_trigger_filter,
1051};
1052
1053#ifdef CONFIG_TRACER_SNAPSHOT
1054static void
1055snapshot_trigger(struct event_trigger_data *data, void *rec,
1056		 struct ring_buffer_event *event)
1057{
1058	struct trace_event_file *file = data->private_data;
1059
1060	if (file)
1061		tracing_snapshot_instance(file->tr);
1062	else
1063		tracing_snapshot();
1064}
1065
1066static void
1067snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1068		       struct ring_buffer_event *event)
1069{
1070	if (!data->count)
1071		return;
1072
1073	if (data->count != -1)
1074		(data->count)--;
1075
1076	snapshot_trigger(data, rec, event);
1077}
1078
1079static int
1080register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1081			  struct event_trigger_data *data,
1082			  struct trace_event_file *file)
1083{
1084	int ret = register_trigger(glob, ops, data, file);
1085
1086	if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1087		unregister_trigger(glob, ops, data, file);
1088		ret = 0;
1089	}
1090
1091	return ret;
1092}
1093
1094static int
1095snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1096		       struct event_trigger_data *data)
1097{
1098	return event_trigger_print("snapshot", m, (void *)data->count,
1099				   data->filter_str);
1100}
1101
1102static struct event_trigger_ops snapshot_trigger_ops = {
1103	.func			= snapshot_trigger,
1104	.print			= snapshot_trigger_print,
1105	.init			= event_trigger_init,
1106	.free			= event_trigger_free,
1107};
1108
1109static struct event_trigger_ops snapshot_count_trigger_ops = {
1110	.func			= snapshot_count_trigger,
1111	.print			= snapshot_trigger_print,
1112	.init			= event_trigger_init,
1113	.free			= event_trigger_free,
1114};
1115
1116static struct event_trigger_ops *
1117snapshot_get_trigger_ops(char *cmd, char *param)
1118{
1119	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1120}
1121
1122static struct event_command trigger_snapshot_cmd = {
1123	.name			= "snapshot",
1124	.trigger_type		= ETT_SNAPSHOT,
1125	.func			= event_trigger_callback,
1126	.reg			= register_snapshot_trigger,
1127	.unreg			= unregister_trigger,
1128	.get_trigger_ops	= snapshot_get_trigger_ops,
1129	.set_filter		= set_trigger_filter,
1130};
1131
1132static __init int register_trigger_snapshot_cmd(void)
1133{
1134	int ret;
1135
1136	ret = register_event_command(&trigger_snapshot_cmd);
1137	WARN_ON(ret < 0);
1138
1139	return ret;
1140}
1141#else
1142static __init int register_trigger_snapshot_cmd(void) { return 0; }
1143#endif /* CONFIG_TRACER_SNAPSHOT */
1144
1145#ifdef CONFIG_STACKTRACE
1146#ifdef CONFIG_UNWINDER_ORC
1147/* Skip 2:
1148 *   event_triggers_post_call()
1149 *   trace_event_raw_event_xxx()
1150 */
1151# define STACK_SKIP 2
1152#else
1153/*
1154 * Skip 4:
1155 *   stacktrace_trigger()
1156 *   event_triggers_post_call()
1157 *   trace_event_buffer_commit()
1158 *   trace_event_raw_event_xxx()
1159 */
1160#define STACK_SKIP 4
1161#endif
1162
1163static void
1164stacktrace_trigger(struct event_trigger_data *data, void *rec,
1165		   struct ring_buffer_event *event)
1166{
1167	trace_dump_stack(STACK_SKIP);
1168}
1169
1170static void
1171stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1172			 struct ring_buffer_event *event)
1173{
1174	if (!data->count)
1175		return;
1176
1177	if (data->count != -1)
1178		(data->count)--;
1179
1180	stacktrace_trigger(data, rec, event);
1181}
1182
1183static int
1184stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1185			 struct event_trigger_data *data)
1186{
1187	return event_trigger_print("stacktrace", m, (void *)data->count,
1188				   data->filter_str);
1189}
1190
1191static struct event_trigger_ops stacktrace_trigger_ops = {
1192	.func			= stacktrace_trigger,
1193	.print			= stacktrace_trigger_print,
1194	.init			= event_trigger_init,
1195	.free			= event_trigger_free,
1196};
1197
1198static struct event_trigger_ops stacktrace_count_trigger_ops = {
1199	.func			= stacktrace_count_trigger,
1200	.print			= stacktrace_trigger_print,
1201	.init			= event_trigger_init,
1202	.free			= event_trigger_free,
1203};
1204
1205static struct event_trigger_ops *
1206stacktrace_get_trigger_ops(char *cmd, char *param)
1207{
1208	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1209}
1210
1211static struct event_command trigger_stacktrace_cmd = {
1212	.name			= "stacktrace",
1213	.trigger_type		= ETT_STACKTRACE,
1214	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1215	.func			= event_trigger_callback,
1216	.reg			= register_trigger,
1217	.unreg			= unregister_trigger,
1218	.get_trigger_ops	= stacktrace_get_trigger_ops,
1219	.set_filter		= set_trigger_filter,
1220};
1221
1222static __init int register_trigger_stacktrace_cmd(void)
1223{
1224	int ret;
1225
1226	ret = register_event_command(&trigger_stacktrace_cmd);
1227	WARN_ON(ret < 0);
1228
1229	return ret;
1230}
1231#else
1232static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1233#endif /* CONFIG_STACKTRACE */
1234
1235static __init void unregister_trigger_traceon_traceoff_cmds(void)
1236{
1237	unregister_event_command(&trigger_traceon_cmd);
1238	unregister_event_command(&trigger_traceoff_cmd);
1239}
1240
1241static void
1242event_enable_trigger(struct event_trigger_data *data, void *rec,
1243		     struct ring_buffer_event *event)
1244{
1245	struct enable_trigger_data *enable_data = data->private_data;
1246
1247	if (enable_data->enable)
1248		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1249	else
1250		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1251}
1252
1253static void
1254event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1255			   struct ring_buffer_event *event)
1256{
1257	struct enable_trigger_data *enable_data = data->private_data;
1258
1259	if (!data->count)
1260		return;
1261
1262	/* Skip if the event is in a state we want to switch to */
1263	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1264		return;
1265
1266	if (data->count != -1)
1267		(data->count)--;
1268
1269	event_enable_trigger(data, rec, event);
1270}
1271
1272int event_enable_trigger_print(struct seq_file *m,
1273			       struct event_trigger_ops *ops,
1274			       struct event_trigger_data *data)
1275{
1276	struct enable_trigger_data *enable_data = data->private_data;
1277
1278	seq_printf(m, "%s:%s:%s",
1279		   enable_data->hist ?
1280		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1281		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1282		   enable_data->file->event_call->class->system,
1283		   trace_event_name(enable_data->file->event_call));
1284
1285	if (data->count == -1)
1286		seq_puts(m, ":unlimited");
1287	else
1288		seq_printf(m, ":count=%ld", data->count);
1289
1290	if (data->filter_str)
1291		seq_printf(m, " if %s\n", data->filter_str);
1292	else
1293		seq_putc(m, '\n');
1294
1295	return 0;
1296}
1297
1298void event_enable_trigger_free(struct event_trigger_ops *ops,
1299			       struct event_trigger_data *data)
1300{
1301	struct enable_trigger_data *enable_data = data->private_data;
1302
1303	if (WARN_ON_ONCE(data->ref <= 0))
1304		return;
1305
1306	data->ref--;
1307	if (!data->ref) {
1308		/* Remove the SOFT_MODE flag */
1309		trace_event_enable_disable(enable_data->file, 0, 1);
1310		module_put(enable_data->file->event_call->mod);
1311		trigger_data_free(data);
1312		kfree(enable_data);
1313	}
1314}
1315
1316static struct event_trigger_ops event_enable_trigger_ops = {
1317	.func			= event_enable_trigger,
1318	.print			= event_enable_trigger_print,
1319	.init			= event_trigger_init,
1320	.free			= event_enable_trigger_free,
1321};
1322
1323static struct event_trigger_ops event_enable_count_trigger_ops = {
1324	.func			= event_enable_count_trigger,
1325	.print			= event_enable_trigger_print,
1326	.init			= event_trigger_init,
1327	.free			= event_enable_trigger_free,
1328};
1329
1330static struct event_trigger_ops event_disable_trigger_ops = {
1331	.func			= event_enable_trigger,
1332	.print			= event_enable_trigger_print,
1333	.init			= event_trigger_init,
1334	.free			= event_enable_trigger_free,
1335};
1336
1337static struct event_trigger_ops event_disable_count_trigger_ops = {
1338	.func			= event_enable_count_trigger,
1339	.print			= event_enable_trigger_print,
1340	.init			= event_trigger_init,
1341	.free			= event_enable_trigger_free,
1342};
1343
1344int event_enable_trigger_func(struct event_command *cmd_ops,
1345			      struct trace_event_file *file,
1346			      char *glob, char *cmd, char *param)
1347{
1348	struct trace_event_file *event_enable_file;
1349	struct enable_trigger_data *enable_data;
1350	struct event_trigger_data *trigger_data;
1351	struct event_trigger_ops *trigger_ops;
1352	struct trace_array *tr = file->tr;
1353	const char *system;
1354	const char *event;
1355	bool hist = false;
1356	char *trigger;
1357	char *number;
1358	bool enable;
1359	int ret;
1360
1361	if (!param)
1362		return -EINVAL;
1363
1364	/* separate the trigger from the filter (s:e:n [if filter]) */
1365	trigger = strsep(&param, " \t");
1366	if (!trigger)
1367		return -EINVAL;
1368
1369	system = strsep(&trigger, ":");
1370	if (!trigger)
1371		return -EINVAL;
1372
1373	event = strsep(&trigger, ":");
1374
1375	ret = -EINVAL;
1376	event_enable_file = find_event_file(tr, system, event);
1377	if (!event_enable_file)
1378		goto out;
1379
1380#ifdef CONFIG_HIST_TRIGGERS
1381	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1382		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1383
1384	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1385		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1386#else
1387	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1388#endif
1389	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1390
1391	ret = -ENOMEM;
1392	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1393	if (!trigger_data)
1394		goto out;
1395
1396	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1397	if (!enable_data) {
1398		kfree(trigger_data);
1399		goto out;
1400	}
1401
1402	trigger_data->count = -1;
1403	trigger_data->ops = trigger_ops;
1404	trigger_data->cmd_ops = cmd_ops;
1405	INIT_LIST_HEAD(&trigger_data->list);
1406	RCU_INIT_POINTER(trigger_data->filter, NULL);
1407
1408	enable_data->hist = hist;
1409	enable_data->enable = enable;
1410	enable_data->file = event_enable_file;
1411	trigger_data->private_data = enable_data;
1412
1413	if (glob[0] == '!') {
1414		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1415		kfree(trigger_data);
1416		kfree(enable_data);
1417		ret = 0;
1418		goto out;
1419	}
1420
1421	/* Up the trigger_data count to make sure nothing frees it on failure */
1422	event_trigger_init(trigger_ops, trigger_data);
1423
1424	if (trigger) {
1425		number = strsep(&trigger, ":");
1426
1427		ret = -EINVAL;
1428		if (!strlen(number))
1429			goto out_free;
1430
1431		/*
1432		 * We use the callback data field (which is a pointer)
1433		 * as our counter.
1434		 */
1435		ret = kstrtoul(number, 0, &trigger_data->count);
1436		if (ret)
1437			goto out_free;
1438	}
1439
1440	if (!param) /* if param is non-empty, it's supposed to be a filter */
1441		goto out_reg;
1442
1443	if (!cmd_ops->set_filter)
1444		goto out_reg;
1445
1446	ret = cmd_ops->set_filter(param, trigger_data, file);
1447	if (ret < 0)
1448		goto out_free;
1449
1450 out_reg:
1451	/* Don't let event modules unload while probe registered */
1452	ret = try_module_get(event_enable_file->event_call->mod);
1453	if (!ret) {
1454		ret = -EBUSY;
1455		goto out_free;
1456	}
1457
1458	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1459	if (ret < 0)
1460		goto out_put;
1461	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1462	/*
1463	 * The above returns on success the # of functions enabled,
1464	 * but if it didn't find any functions it returns zero.
1465	 * Consider no functions a failure too.
1466	 */
1467	if (!ret) {
1468		ret = -ENOENT;
1469		goto out_disable;
1470	} else if (ret < 0)
1471		goto out_disable;
1472	/* Just return zero, not the number of enabled functions */
1473	ret = 0;
1474	event_trigger_free(trigger_ops, trigger_data);
1475 out:
1476	return ret;
1477
1478 out_disable:
1479	trace_event_enable_disable(event_enable_file, 0, 1);
1480 out_put:
1481	module_put(event_enable_file->event_call->mod);
1482 out_free:
1483	if (cmd_ops->set_filter)
1484		cmd_ops->set_filter(NULL, trigger_data, NULL);
1485	event_trigger_free(trigger_ops, trigger_data);
1486	kfree(enable_data);
1487	goto out;
1488}
1489
1490int event_enable_register_trigger(char *glob,
1491				  struct event_trigger_ops *ops,
1492				  struct event_trigger_data *data,
1493				  struct trace_event_file *file)
1494{
1495	struct enable_trigger_data *enable_data = data->private_data;
1496	struct enable_trigger_data *test_enable_data;
1497	struct event_trigger_data *test;
1498	int ret = 0;
1499
1500	list_for_each_entry_rcu(test, &file->triggers, list) {
1501		test_enable_data = test->private_data;
1502		if (test_enable_data &&
1503		    (test->cmd_ops->trigger_type ==
1504		     data->cmd_ops->trigger_type) &&
1505		    (test_enable_data->file == enable_data->file)) {
1506			ret = -EEXIST;
1507			goto out;
1508		}
1509	}
1510
1511	if (data->ops->init) {
1512		ret = data->ops->init(data->ops, data);
1513		if (ret < 0)
1514			goto out;
1515	}
1516
1517	list_add_rcu(&data->list, &file->triggers);
1518	ret++;
1519
1520	update_cond_flag(file);
1521	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1522		list_del_rcu(&data->list);
1523		update_cond_flag(file);
1524		ret--;
1525	}
1526out:
1527	return ret;
1528}
1529
1530void event_enable_unregister_trigger(char *glob,
1531				     struct event_trigger_ops *ops,
1532				     struct event_trigger_data *test,
1533				     struct trace_event_file *file)
1534{
1535	struct enable_trigger_data *test_enable_data = test->private_data;
1536	struct enable_trigger_data *enable_data;
1537	struct event_trigger_data *data;
1538	bool unregistered = false;
1539
1540	list_for_each_entry_rcu(data, &file->triggers, list) {
1541		enable_data = data->private_data;
1542		if (enable_data &&
1543		    (data->cmd_ops->trigger_type ==
1544		     test->cmd_ops->trigger_type) &&
1545		    (enable_data->file == test_enable_data->file)) {
1546			unregistered = true;
1547			list_del_rcu(&data->list);
1548			trace_event_trigger_enable_disable(file, 0);
1549			update_cond_flag(file);
1550			break;
1551		}
1552	}
1553
1554	if (unregistered && data->ops->free)
1555		data->ops->free(data->ops, data);
1556}
1557
1558static struct event_trigger_ops *
1559event_enable_get_trigger_ops(char *cmd, char *param)
1560{
1561	struct event_trigger_ops *ops;
1562	bool enable;
1563
1564#ifdef CONFIG_HIST_TRIGGERS
1565	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1566		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1567#else
1568	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1569#endif
1570	if (enable)
1571		ops = param ? &event_enable_count_trigger_ops :
1572			&event_enable_trigger_ops;
1573	else
1574		ops = param ? &event_disable_count_trigger_ops :
1575			&event_disable_trigger_ops;
1576
1577	return ops;
1578}
1579
1580static struct event_command trigger_enable_cmd = {
1581	.name			= ENABLE_EVENT_STR,
1582	.trigger_type		= ETT_EVENT_ENABLE,
1583	.func			= event_enable_trigger_func,
1584	.reg			= event_enable_register_trigger,
1585	.unreg			= event_enable_unregister_trigger,
1586	.get_trigger_ops	= event_enable_get_trigger_ops,
1587	.set_filter		= set_trigger_filter,
1588};
1589
1590static struct event_command trigger_disable_cmd = {
1591	.name			= DISABLE_EVENT_STR,
1592	.trigger_type		= ETT_EVENT_ENABLE,
1593	.func			= event_enable_trigger_func,
1594	.reg			= event_enable_register_trigger,
1595	.unreg			= event_enable_unregister_trigger,
1596	.get_trigger_ops	= event_enable_get_trigger_ops,
1597	.set_filter		= set_trigger_filter,
1598};
1599
1600static __init void unregister_trigger_enable_disable_cmds(void)
1601{
1602	unregister_event_command(&trigger_enable_cmd);
1603	unregister_event_command(&trigger_disable_cmd);
1604}
1605
1606static __init int register_trigger_enable_disable_cmds(void)
1607{
1608	int ret;
1609
1610	ret = register_event_command(&trigger_enable_cmd);
1611	if (WARN_ON(ret < 0))
1612		return ret;
1613	ret = register_event_command(&trigger_disable_cmd);
1614	if (WARN_ON(ret < 0))
1615		unregister_trigger_enable_disable_cmds();
1616
1617	return ret;
1618}
1619
1620static __init int register_trigger_traceon_traceoff_cmds(void)
1621{
1622	int ret;
1623
1624	ret = register_event_command(&trigger_traceon_cmd);
1625	if (WARN_ON(ret < 0))
1626		return ret;
1627	ret = register_event_command(&trigger_traceoff_cmd);
1628	if (WARN_ON(ret < 0))
1629		unregister_trigger_traceon_traceoff_cmds();
1630
1631	return ret;
1632}
1633
1634__init int register_trigger_cmds(void)
1635{
1636	register_trigger_traceon_traceoff_cmds();
1637	register_trigger_snapshot_cmd();
1638	register_trigger_stacktrace_cmd();
1639	register_trigger_enable_disable_cmds();
1640	register_trigger_hist_enable_disable_cmds();
1641	register_trigger_hist_cmd();
1642
1643	return 0;
1644}
v4.10.11
 
   1/*
   2 * trace_events_trigger - trace event triggers
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19 */
  20
 
  21#include <linux/module.h>
  22#include <linux/ctype.h>
  23#include <linux/mutex.h>
  24#include <linux/slab.h>
 
  25
  26#include "trace.h"
  27
  28static LIST_HEAD(trigger_commands);
  29static DEFINE_MUTEX(trigger_cmd_mutex);
  30
  31void trigger_data_free(struct event_trigger_data *data)
  32{
  33	if (data->cmd_ops->set_filter)
  34		data->cmd_ops->set_filter(NULL, data, NULL);
  35
  36	synchronize_sched(); /* make sure current triggers exit before free */
 
 
  37	kfree(data);
  38}
  39
  40/**
  41 * event_triggers_call - Call triggers associated with a trace event
  42 * @file: The trace_event_file associated with the event
  43 * @rec: The trace entry for the event, NULL for unconditional invocation
  44 *
  45 * For each trigger associated with an event, invoke the trigger
  46 * function registered with the associated trigger command.  If rec is
  47 * non-NULL, it means that the trigger requires further processing and
  48 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  49 * trigger has a filter associated with it, rec will checked against
  50 * the filter and if the record matches the trigger will be invoked.
  51 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  52 * in any case until the current event is written, the trigger
  53 * function isn't invoked but the bit associated with the deferred
  54 * trigger is set in the return value.
  55 *
  56 * Returns an enum event_trigger_type value containing a set bit for
  57 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  58 *
  59 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  60 *
  61 * Return: an enum event_trigger_type value containing a set bit for
  62 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  63 */
  64enum event_trigger_type
  65event_triggers_call(struct trace_event_file *file, void *rec)
 
  66{
  67	struct event_trigger_data *data;
  68	enum event_trigger_type tt = ETT_NONE;
  69	struct event_filter *filter;
  70
  71	if (list_empty(&file->triggers))
  72		return tt;
  73
  74	list_for_each_entry_rcu(data, &file->triggers, list) {
  75		if (data->paused)
  76			continue;
  77		if (!rec) {
  78			data->ops->func(data, rec);
  79			continue;
  80		}
  81		filter = rcu_dereference_sched(data->filter);
  82		if (filter && !filter_match_preds(filter, rec))
  83			continue;
  84		if (event_command_post_trigger(data->cmd_ops)) {
  85			tt |= data->cmd_ops->trigger_type;
  86			continue;
  87		}
  88		data->ops->func(data, rec);
  89	}
  90	return tt;
  91}
  92EXPORT_SYMBOL_GPL(event_triggers_call);
  93
  94/**
  95 * event_triggers_post_call - Call 'post_triggers' for a trace event
  96 * @file: The trace_event_file associated with the event
  97 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  98 * @rec: The trace entry for the event
  99 *
 100 * For each trigger associated with an event, invoke the trigger
 101 * function registered with the associated trigger command, if the
 102 * corresponding bit is set in the tt enum passed into this function.
 103 * See @event_triggers_call for details on how those bits are set.
 104 *
 105 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
 106 */
 107void
 108event_triggers_post_call(struct trace_event_file *file,
 109			 enum event_trigger_type tt,
 110			 void *rec)
 111{
 112	struct event_trigger_data *data;
 113
 114	list_for_each_entry_rcu(data, &file->triggers, list) {
 115		if (data->paused)
 116			continue;
 117		if (data->cmd_ops->trigger_type & tt)
 118			data->ops->func(data, rec);
 119	}
 120}
 121EXPORT_SYMBOL_GPL(event_triggers_post_call);
 122
 123#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 124
 125static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 126{
 127	struct trace_event_file *event_file = event_file_data(m->private);
 128
 129	if (t == SHOW_AVAILABLE_TRIGGERS)
 130		return NULL;
 131
 132	return seq_list_next(t, &event_file->triggers, pos);
 133}
 134
 135static void *trigger_start(struct seq_file *m, loff_t *pos)
 136{
 137	struct trace_event_file *event_file;
 138
 139	/* ->stop() is called even if ->start() fails */
 140	mutex_lock(&event_mutex);
 141	event_file = event_file_data(m->private);
 142	if (unlikely(!event_file))
 143		return ERR_PTR(-ENODEV);
 144
 145	if (list_empty(&event_file->triggers))
 146		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 147
 148	return seq_list_start(&event_file->triggers, *pos);
 149}
 150
 151static void trigger_stop(struct seq_file *m, void *t)
 152{
 153	mutex_unlock(&event_mutex);
 154}
 155
 156static int trigger_show(struct seq_file *m, void *v)
 157{
 158	struct event_trigger_data *data;
 159	struct event_command *p;
 160
 161	if (v == SHOW_AVAILABLE_TRIGGERS) {
 162		seq_puts(m, "# Available triggers:\n");
 163		seq_putc(m, '#');
 164		mutex_lock(&trigger_cmd_mutex);
 165		list_for_each_entry_reverse(p, &trigger_commands, list)
 166			seq_printf(m, " %s", p->name);
 167		seq_putc(m, '\n');
 168		mutex_unlock(&trigger_cmd_mutex);
 169		return 0;
 170	}
 171
 172	data = list_entry(v, struct event_trigger_data, list);
 173	data->ops->print(m, data->ops, data);
 174
 175	return 0;
 176}
 177
 178static const struct seq_operations event_triggers_seq_ops = {
 179	.start = trigger_start,
 180	.next = trigger_next,
 181	.stop = trigger_stop,
 182	.show = trigger_show,
 183};
 184
 185static int event_trigger_regex_open(struct inode *inode, struct file *file)
 186{
 187	int ret = 0;
 
 
 
 
 188
 189	mutex_lock(&event_mutex);
 190
 191	if (unlikely(!event_file_data(file))) {
 192		mutex_unlock(&event_mutex);
 193		return -ENODEV;
 194	}
 195
 196	if ((file->f_mode & FMODE_WRITE) &&
 197	    (file->f_flags & O_TRUNC)) {
 198		struct trace_event_file *event_file;
 199		struct event_command *p;
 200
 201		event_file = event_file_data(file);
 202
 203		list_for_each_entry(p, &trigger_commands, list) {
 204			if (p->unreg_all)
 205				p->unreg_all(event_file);
 206		}
 207	}
 208
 209	if (file->f_mode & FMODE_READ) {
 210		ret = seq_open(file, &event_triggers_seq_ops);
 211		if (!ret) {
 212			struct seq_file *m = file->private_data;
 213			m->private = file;
 214		}
 215	}
 216
 217	mutex_unlock(&event_mutex);
 218
 219	return ret;
 220}
 221
 222static int trigger_process_regex(struct trace_event_file *file, char *buff)
 223{
 224	char *command, *next = buff;
 225	struct event_command *p;
 226	int ret = -EINVAL;
 227
 228	command = strsep(&next, ": \t");
 229	command = (command[0] != '!') ? command : command + 1;
 230
 231	mutex_lock(&trigger_cmd_mutex);
 232	list_for_each_entry(p, &trigger_commands, list) {
 233		if (strcmp(p->name, command) == 0) {
 234			ret = p->func(p, file, buff, command, next);
 235			goto out_unlock;
 236		}
 237	}
 238 out_unlock:
 239	mutex_unlock(&trigger_cmd_mutex);
 240
 241	return ret;
 242}
 243
 244static ssize_t event_trigger_regex_write(struct file *file,
 245					 const char __user *ubuf,
 246					 size_t cnt, loff_t *ppos)
 247{
 248	struct trace_event_file *event_file;
 249	ssize_t ret;
 250	char *buf;
 251
 252	if (!cnt)
 253		return 0;
 254
 255	if (cnt >= PAGE_SIZE)
 256		return -EINVAL;
 257
 258	buf = memdup_user_nul(ubuf, cnt);
 259	if (IS_ERR(buf))
 260		return PTR_ERR(buf);
 261
 262	strim(buf);
 263
 264	mutex_lock(&event_mutex);
 265	event_file = event_file_data(file);
 266	if (unlikely(!event_file)) {
 267		mutex_unlock(&event_mutex);
 268		kfree(buf);
 269		return -ENODEV;
 270	}
 271	ret = trigger_process_regex(event_file, buf);
 272	mutex_unlock(&event_mutex);
 273
 274	kfree(buf);
 275	if (ret < 0)
 276		goto out;
 277
 278	*ppos += cnt;
 279	ret = cnt;
 280 out:
 281	return ret;
 282}
 283
 284static int event_trigger_regex_release(struct inode *inode, struct file *file)
 285{
 286	mutex_lock(&event_mutex);
 287
 288	if (file->f_mode & FMODE_READ)
 289		seq_release(inode, file);
 290
 291	mutex_unlock(&event_mutex);
 292
 293	return 0;
 294}
 295
 296static ssize_t
 297event_trigger_write(struct file *filp, const char __user *ubuf,
 298		    size_t cnt, loff_t *ppos)
 299{
 300	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 301}
 302
 303static int
 304event_trigger_open(struct inode *inode, struct file *filp)
 305{
 
 306	return event_trigger_regex_open(inode, filp);
 307}
 308
 309static int
 310event_trigger_release(struct inode *inode, struct file *file)
 311{
 312	return event_trigger_regex_release(inode, file);
 313}
 314
 315const struct file_operations event_trigger_fops = {
 316	.open = event_trigger_open,
 317	.read = seq_read,
 318	.write = event_trigger_write,
 319	.llseek = tracing_lseek,
 320	.release = event_trigger_release,
 321};
 322
 323/*
 324 * Currently we only register event commands from __init, so mark this
 325 * __init too.
 326 */
 327__init int register_event_command(struct event_command *cmd)
 328{
 329	struct event_command *p;
 330	int ret = 0;
 331
 332	mutex_lock(&trigger_cmd_mutex);
 333	list_for_each_entry(p, &trigger_commands, list) {
 334		if (strcmp(cmd->name, p->name) == 0) {
 335			ret = -EBUSY;
 336			goto out_unlock;
 337		}
 338	}
 339	list_add(&cmd->list, &trigger_commands);
 340 out_unlock:
 341	mutex_unlock(&trigger_cmd_mutex);
 342
 343	return ret;
 344}
 345
 346/*
 347 * Currently we only unregister event commands from __init, so mark
 348 * this __init too.
 349 */
 350__init int unregister_event_command(struct event_command *cmd)
 351{
 352	struct event_command *p, *n;
 353	int ret = -ENODEV;
 354
 355	mutex_lock(&trigger_cmd_mutex);
 356	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 357		if (strcmp(cmd->name, p->name) == 0) {
 358			ret = 0;
 359			list_del_init(&p->list);
 360			goto out_unlock;
 361		}
 362	}
 363 out_unlock:
 364	mutex_unlock(&trigger_cmd_mutex);
 365
 366	return ret;
 367}
 368
 369/**
 370 * event_trigger_print - Generic event_trigger_ops @print implementation
 371 * @name: The name of the event trigger
 372 * @m: The seq_file being printed to
 373 * @data: Trigger-specific data
 374 * @filter_str: filter_str to print, if present
 375 *
 376 * Common implementation for event triggers to print themselves.
 377 *
 378 * Usually wrapped by a function that simply sets the @name of the
 379 * trigger command and then invokes this.
 380 *
 381 * Return: 0 on success, errno otherwise
 382 */
 383static int
 384event_trigger_print(const char *name, struct seq_file *m,
 385		    void *data, char *filter_str)
 386{
 387	long count = (long)data;
 388
 389	seq_puts(m, name);
 390
 391	if (count == -1)
 392		seq_puts(m, ":unlimited");
 393	else
 394		seq_printf(m, ":count=%ld", count);
 395
 396	if (filter_str)
 397		seq_printf(m, " if %s\n", filter_str);
 398	else
 399		seq_putc(m, '\n');
 400
 401	return 0;
 402}
 403
 404/**
 405 * event_trigger_init - Generic event_trigger_ops @init implementation
 406 * @ops: The trigger ops associated with the trigger
 407 * @data: Trigger-specific data
 408 *
 409 * Common implementation of event trigger initialization.
 410 *
 411 * Usually used directly as the @init method in event trigger
 412 * implementations.
 413 *
 414 * Return: 0 on success, errno otherwise
 415 */
 416int event_trigger_init(struct event_trigger_ops *ops,
 417		       struct event_trigger_data *data)
 418{
 419	data->ref++;
 420	return 0;
 421}
 422
 423/**
 424 * event_trigger_free - Generic event_trigger_ops @free implementation
 425 * @ops: The trigger ops associated with the trigger
 426 * @data: Trigger-specific data
 427 *
 428 * Common implementation of event trigger de-initialization.
 429 *
 430 * Usually used directly as the @free method in event trigger
 431 * implementations.
 432 */
 433static void
 434event_trigger_free(struct event_trigger_ops *ops,
 435		   struct event_trigger_data *data)
 436{
 437	if (WARN_ON_ONCE(data->ref <= 0))
 438		return;
 439
 440	data->ref--;
 441	if (!data->ref)
 442		trigger_data_free(data);
 443}
 444
 445int trace_event_trigger_enable_disable(struct trace_event_file *file,
 446				       int trigger_enable)
 447{
 448	int ret = 0;
 449
 450	if (trigger_enable) {
 451		if (atomic_inc_return(&file->tm_ref) > 1)
 452			return ret;
 453		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 454		ret = trace_event_enable_disable(file, 1, 1);
 455	} else {
 456		if (atomic_dec_return(&file->tm_ref) > 0)
 457			return ret;
 458		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 459		ret = trace_event_enable_disable(file, 0, 1);
 460	}
 461
 462	return ret;
 463}
 464
 465/**
 466 * clear_event_triggers - Clear all triggers associated with a trace array
 467 * @tr: The trace array to clear
 468 *
 469 * For each trigger, the triggering event has its tm_ref decremented
 470 * via trace_event_trigger_enable_disable(), and any associated event
 471 * (in the case of enable/disable_event triggers) will have its sm_ref
 472 * decremented via free()->trace_event_enable_disable().  That
 473 * combination effectively reverses the soft-mode/trigger state added
 474 * by trigger registration.
 475 *
 476 * Must be called with event_mutex held.
 477 */
 478void
 479clear_event_triggers(struct trace_array *tr)
 480{
 481	struct trace_event_file *file;
 482
 483	list_for_each_entry(file, &tr->events, list) {
 484		struct event_trigger_data *data;
 485		list_for_each_entry_rcu(data, &file->triggers, list) {
 486			trace_event_trigger_enable_disable(file, 0);
 
 487			if (data->ops->free)
 488				data->ops->free(data->ops, data);
 489		}
 490	}
 491}
 492
 493/**
 494 * update_cond_flag - Set or reset the TRIGGER_COND bit
 495 * @file: The trace_event_file associated with the event
 496 *
 497 * If an event has triggers and any of those triggers has a filter or
 498 * a post_trigger, trigger invocation needs to be deferred until after
 499 * the current event has logged its data, and the event should have
 500 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 501 * cleared.
 502 */
 503void update_cond_flag(struct trace_event_file *file)
 504{
 505	struct event_trigger_data *data;
 506	bool set_cond = false;
 507
 508	list_for_each_entry_rcu(data, &file->triggers, list) {
 509		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 510		    event_command_needs_rec(data->cmd_ops)) {
 511			set_cond = true;
 512			break;
 513		}
 514	}
 515
 516	if (set_cond)
 517		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 518	else
 519		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 520}
 521
 522/**
 523 * register_trigger - Generic event_command @reg implementation
 524 * @glob: The raw string used to register the trigger
 525 * @ops: The trigger ops associated with the trigger
 526 * @data: Trigger-specific data to associate with the trigger
 527 * @file: The trace_event_file associated with the event
 528 *
 529 * Common implementation for event trigger registration.
 530 *
 531 * Usually used directly as the @reg method in event command
 532 * implementations.
 533 *
 534 * Return: 0 on success, errno otherwise
 535 */
 536static int register_trigger(char *glob, struct event_trigger_ops *ops,
 537			    struct event_trigger_data *data,
 538			    struct trace_event_file *file)
 539{
 540	struct event_trigger_data *test;
 541	int ret = 0;
 542
 543	list_for_each_entry_rcu(test, &file->triggers, list) {
 544		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 545			ret = -EEXIST;
 546			goto out;
 547		}
 548	}
 549
 550	if (data->ops->init) {
 551		ret = data->ops->init(data->ops, data);
 552		if (ret < 0)
 553			goto out;
 554	}
 555
 556	list_add_rcu(&data->list, &file->triggers);
 557	ret++;
 558
 559	update_cond_flag(file);
 560	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 561		list_del_rcu(&data->list);
 562		update_cond_flag(file);
 563		ret--;
 564	}
 565out:
 566	return ret;
 567}
 568
 569/**
 570 * unregister_trigger - Generic event_command @unreg implementation
 571 * @glob: The raw string used to register the trigger
 572 * @ops: The trigger ops associated with the trigger
 573 * @test: Trigger-specific data used to find the trigger to remove
 574 * @file: The trace_event_file associated with the event
 575 *
 576 * Common implementation for event trigger unregistration.
 577 *
 578 * Usually used directly as the @unreg method in event command
 579 * implementations.
 580 */
 581void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 582			struct event_trigger_data *test,
 583			struct trace_event_file *file)
 584{
 585	struct event_trigger_data *data;
 586	bool unregistered = false;
 587
 588	list_for_each_entry_rcu(data, &file->triggers, list) {
 589		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 590			unregistered = true;
 591			list_del_rcu(&data->list);
 592			trace_event_trigger_enable_disable(file, 0);
 593			update_cond_flag(file);
 594			break;
 595		}
 596	}
 597
 598	if (unregistered && data->ops->free)
 599		data->ops->free(data->ops, data);
 600}
 601
 602/**
 603 * event_trigger_callback - Generic event_command @func implementation
 604 * @cmd_ops: The command ops, used for trigger registration
 605 * @file: The trace_event_file associated with the event
 606 * @glob: The raw string used to register the trigger
 607 * @cmd: The cmd portion of the string used to register the trigger
 608 * @param: The params portion of the string used to register the trigger
 609 *
 610 * Common implementation for event command parsing and trigger
 611 * instantiation.
 612 *
 613 * Usually used directly as the @func method in event command
 614 * implementations.
 615 *
 616 * Return: 0 on success, errno otherwise
 617 */
 618static int
 619event_trigger_callback(struct event_command *cmd_ops,
 620		       struct trace_event_file *file,
 621		       char *glob, char *cmd, char *param)
 622{
 623	struct event_trigger_data *trigger_data;
 624	struct event_trigger_ops *trigger_ops;
 625	char *trigger = NULL;
 626	char *number;
 627	int ret;
 628
 629	/* separate the trigger from the filter (t:n [if filter]) */
 630	if (param && isdigit(param[0]))
 631		trigger = strsep(&param, " \t");
 632
 633	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 634
 635	ret = -ENOMEM;
 636	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 637	if (!trigger_data)
 638		goto out;
 639
 640	trigger_data->count = -1;
 641	trigger_data->ops = trigger_ops;
 642	trigger_data->cmd_ops = cmd_ops;
 
 643	INIT_LIST_HEAD(&trigger_data->list);
 644	INIT_LIST_HEAD(&trigger_data->named_list);
 645
 646	if (glob[0] == '!') {
 647		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 648		kfree(trigger_data);
 649		ret = 0;
 650		goto out;
 651	}
 652
 653	if (trigger) {
 654		number = strsep(&trigger, ":");
 655
 656		ret = -EINVAL;
 657		if (!strlen(number))
 658			goto out_free;
 659
 660		/*
 661		 * We use the callback data field (which is a pointer)
 662		 * as our counter.
 663		 */
 664		ret = kstrtoul(number, 0, &trigger_data->count);
 665		if (ret)
 666			goto out_free;
 667	}
 668
 669	if (!param) /* if param is non-empty, it's supposed to be a filter */
 670		goto out_reg;
 671
 672	if (!cmd_ops->set_filter)
 673		goto out_reg;
 674
 675	ret = cmd_ops->set_filter(param, trigger_data, file);
 676	if (ret < 0)
 677		goto out_free;
 678
 679 out_reg:
 
 
 680	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 681	/*
 682	 * The above returns on success the # of functions enabled,
 683	 * but if it didn't find any functions it returns zero.
 684	 * Consider no functions a failure too.
 685	 */
 686	if (!ret) {
 
 687		ret = -ENOENT;
 688		goto out_free;
 689	} else if (ret < 0)
 690		goto out_free;
 691	ret = 0;
 
 692 out:
 693	return ret;
 694
 695 out_free:
 696	if (cmd_ops->set_filter)
 697		cmd_ops->set_filter(NULL, trigger_data, NULL);
 698	kfree(trigger_data);
 699	goto out;
 700}
 701
 702/**
 703 * set_trigger_filter - Generic event_command @set_filter implementation
 704 * @filter_str: The filter string for the trigger, NULL to remove filter
 705 * @trigger_data: Trigger-specific data
 706 * @file: The trace_event_file associated with the event
 707 *
 708 * Common implementation for event command filter parsing and filter
 709 * instantiation.
 710 *
 711 * Usually used directly as the @set_filter method in event command
 712 * implementations.
 713 *
 714 * Also used to remove a filter (if filter_str = NULL).
 715 *
 716 * Return: 0 on success, errno otherwise
 717 */
 718int set_trigger_filter(char *filter_str,
 719		       struct event_trigger_data *trigger_data,
 720		       struct trace_event_file *file)
 721{
 722	struct event_trigger_data *data = trigger_data;
 723	struct event_filter *filter = NULL, *tmp;
 724	int ret = -EINVAL;
 725	char *s;
 726
 727	if (!filter_str) /* clear the current filter */
 728		goto assign;
 729
 730	s = strsep(&filter_str, " \t");
 731
 732	if (!strlen(s) || strcmp(s, "if") != 0)
 733		goto out;
 734
 735	if (!filter_str)
 736		goto out;
 737
 738	/* The filter is for the 'trigger' event, not the triggered event */
 739	ret = create_event_filter(file->event_call, filter_str, false, &filter);
 740	if (ret)
 741		goto out;
 
 
 
 742 assign:
 743	tmp = rcu_access_pointer(data->filter);
 744
 745	rcu_assign_pointer(data->filter, filter);
 746
 747	if (tmp) {
 748		/* Make sure the call is done with the filter */
 749		synchronize_sched();
 750		free_event_filter(tmp);
 751	}
 752
 753	kfree(data->filter_str);
 754	data->filter_str = NULL;
 755
 756	if (filter_str) {
 757		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 758		if (!data->filter_str) {
 759			free_event_filter(rcu_access_pointer(data->filter));
 760			data->filter = NULL;
 761			ret = -ENOMEM;
 762		}
 763	}
 764 out:
 765	return ret;
 766}
 767
 768static LIST_HEAD(named_triggers);
 769
 770/**
 771 * find_named_trigger - Find the common named trigger associated with @name
 772 * @name: The name of the set of named triggers to find the common data for
 773 *
 774 * Named triggers are sets of triggers that share a common set of
 775 * trigger data.  The first named trigger registered with a given name
 776 * owns the common trigger data that the others subsequently
 777 * registered with the same name will reference.  This function
 778 * returns the common trigger data associated with that first
 779 * registered instance.
 780 *
 781 * Return: the common trigger data for the given named trigger on
 782 * success, NULL otherwise.
 783 */
 784struct event_trigger_data *find_named_trigger(const char *name)
 785{
 786	struct event_trigger_data *data;
 787
 788	if (!name)
 789		return NULL;
 790
 791	list_for_each_entry(data, &named_triggers, named_list) {
 792		if (data->named_data)
 793			continue;
 794		if (strcmp(data->name, name) == 0)
 795			return data;
 796	}
 797
 798	return NULL;
 799}
 800
 801/**
 802 * is_named_trigger - determine if a given trigger is a named trigger
 803 * @test: The trigger data to test
 804 *
 805 * Return: true if 'test' is a named trigger, false otherwise.
 806 */
 807bool is_named_trigger(struct event_trigger_data *test)
 808{
 809	struct event_trigger_data *data;
 810
 811	list_for_each_entry(data, &named_triggers, named_list) {
 812		if (test == data)
 813			return true;
 814	}
 815
 816	return false;
 817}
 818
 819/**
 820 * save_named_trigger - save the trigger in the named trigger list
 821 * @name: The name of the named trigger set
 822 * @data: The trigger data to save
 823 *
 824 * Return: 0 if successful, negative error otherwise.
 825 */
 826int save_named_trigger(const char *name, struct event_trigger_data *data)
 827{
 828	data->name = kstrdup(name, GFP_KERNEL);
 829	if (!data->name)
 830		return -ENOMEM;
 831
 832	list_add(&data->named_list, &named_triggers);
 833
 834	return 0;
 835}
 836
 837/**
 838 * del_named_trigger - delete a trigger from the named trigger list
 839 * @data: The trigger data to delete
 840 */
 841void del_named_trigger(struct event_trigger_data *data)
 842{
 843	kfree(data->name);
 844	data->name = NULL;
 845
 846	list_del(&data->named_list);
 847}
 848
 849static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
 850{
 851	struct event_trigger_data *test;
 852
 853	list_for_each_entry(test, &named_triggers, named_list) {
 854		if (strcmp(test->name, data->name) == 0) {
 855			if (pause) {
 856				test->paused_tmp = test->paused;
 857				test->paused = true;
 858			} else {
 859				test->paused = test->paused_tmp;
 860			}
 861		}
 862	}
 863}
 864
 865/**
 866 * pause_named_trigger - Pause all named triggers with the same name
 867 * @data: The trigger data of a named trigger to pause
 868 *
 869 * Pauses a named trigger along with all other triggers having the
 870 * same name.  Because named triggers share a common set of data,
 871 * pausing only one is meaningless, so pausing one named trigger needs
 872 * to pause all triggers with the same name.
 873 */
 874void pause_named_trigger(struct event_trigger_data *data)
 875{
 876	__pause_named_trigger(data, true);
 877}
 878
 879/**
 880 * unpause_named_trigger - Un-pause all named triggers with the same name
 881 * @data: The trigger data of a named trigger to unpause
 882 *
 883 * Un-pauses a named trigger along with all other triggers having the
 884 * same name.  Because named triggers share a common set of data,
 885 * unpausing only one is meaningless, so unpausing one named trigger
 886 * needs to unpause all triggers with the same name.
 887 */
 888void unpause_named_trigger(struct event_trigger_data *data)
 889{
 890	__pause_named_trigger(data, false);
 891}
 892
 893/**
 894 * set_named_trigger_data - Associate common named trigger data
 895 * @data: The trigger data of a named trigger to unpause
 896 *
 897 * Named triggers are sets of triggers that share a common set of
 898 * trigger data.  The first named trigger registered with a given name
 899 * owns the common trigger data that the others subsequently
 900 * registered with the same name will reference.  This function
 901 * associates the common trigger data from the first trigger with the
 902 * given trigger.
 903 */
 904void set_named_trigger_data(struct event_trigger_data *data,
 905			    struct event_trigger_data *named_data)
 906{
 907	data->named_data = named_data;
 908}
 909
 
 
 
 
 
 
 910static void
 911traceon_trigger(struct event_trigger_data *data, void *rec)
 
 912{
 913	if (tracing_is_on())
 914		return;
 915
 916	tracing_on();
 917}
 918
 919static void
 920traceon_count_trigger(struct event_trigger_data *data, void *rec)
 
 921{
 922	if (tracing_is_on())
 923		return;
 924
 925	if (!data->count)
 926		return;
 927
 928	if (data->count != -1)
 929		(data->count)--;
 930
 931	tracing_on();
 932}
 933
 934static void
 935traceoff_trigger(struct event_trigger_data *data, void *rec)
 
 936{
 937	if (!tracing_is_on())
 938		return;
 939
 940	tracing_off();
 941}
 942
 943static void
 944traceoff_count_trigger(struct event_trigger_data *data, void *rec)
 
 945{
 946	if (!tracing_is_on())
 947		return;
 948
 949	if (!data->count)
 950		return;
 951
 952	if (data->count != -1)
 953		(data->count)--;
 954
 955	tracing_off();
 956}
 957
 958static int
 959traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 960		      struct event_trigger_data *data)
 961{
 962	return event_trigger_print("traceon", m, (void *)data->count,
 963				   data->filter_str);
 964}
 965
 966static int
 967traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 968		       struct event_trigger_data *data)
 969{
 970	return event_trigger_print("traceoff", m, (void *)data->count,
 971				   data->filter_str);
 972}
 973
 974static struct event_trigger_ops traceon_trigger_ops = {
 975	.func			= traceon_trigger,
 976	.print			= traceon_trigger_print,
 977	.init			= event_trigger_init,
 978	.free			= event_trigger_free,
 979};
 980
 981static struct event_trigger_ops traceon_count_trigger_ops = {
 982	.func			= traceon_count_trigger,
 983	.print			= traceon_trigger_print,
 984	.init			= event_trigger_init,
 985	.free			= event_trigger_free,
 986};
 987
 988static struct event_trigger_ops traceoff_trigger_ops = {
 989	.func			= traceoff_trigger,
 990	.print			= traceoff_trigger_print,
 991	.init			= event_trigger_init,
 992	.free			= event_trigger_free,
 993};
 994
 995static struct event_trigger_ops traceoff_count_trigger_ops = {
 996	.func			= traceoff_count_trigger,
 997	.print			= traceoff_trigger_print,
 998	.init			= event_trigger_init,
 999	.free			= event_trigger_free,
1000};
1001
1002static struct event_trigger_ops *
1003onoff_get_trigger_ops(char *cmd, char *param)
1004{
1005	struct event_trigger_ops *ops;
1006
1007	/* we register both traceon and traceoff to this callback */
1008	if (strcmp(cmd, "traceon") == 0)
1009		ops = param ? &traceon_count_trigger_ops :
1010			&traceon_trigger_ops;
1011	else
1012		ops = param ? &traceoff_count_trigger_ops :
1013			&traceoff_trigger_ops;
1014
1015	return ops;
1016}
1017
1018static struct event_command trigger_traceon_cmd = {
1019	.name			= "traceon",
1020	.trigger_type		= ETT_TRACE_ONOFF,
1021	.func			= event_trigger_callback,
1022	.reg			= register_trigger,
1023	.unreg			= unregister_trigger,
1024	.get_trigger_ops	= onoff_get_trigger_ops,
1025	.set_filter		= set_trigger_filter,
1026};
1027
1028static struct event_command trigger_traceoff_cmd = {
1029	.name			= "traceoff",
1030	.trigger_type		= ETT_TRACE_ONOFF,
1031	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1032	.func			= event_trigger_callback,
1033	.reg			= register_trigger,
1034	.unreg			= unregister_trigger,
1035	.get_trigger_ops	= onoff_get_trigger_ops,
1036	.set_filter		= set_trigger_filter,
1037};
1038
1039#ifdef CONFIG_TRACER_SNAPSHOT
1040static void
1041snapshot_trigger(struct event_trigger_data *data, void *rec)
 
1042{
1043	tracing_snapshot();
 
 
 
 
 
1044}
1045
1046static void
1047snapshot_count_trigger(struct event_trigger_data *data, void *rec)
 
1048{
1049	if (!data->count)
1050		return;
1051
1052	if (data->count != -1)
1053		(data->count)--;
1054
1055	snapshot_trigger(data, rec);
1056}
1057
1058static int
1059register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1060			  struct event_trigger_data *data,
1061			  struct trace_event_file *file)
1062{
1063	int ret = register_trigger(glob, ops, data, file);
1064
1065	if (ret > 0 && tracing_alloc_snapshot() != 0) {
1066		unregister_trigger(glob, ops, data, file);
1067		ret = 0;
1068	}
1069
1070	return ret;
1071}
1072
1073static int
1074snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1075		       struct event_trigger_data *data)
1076{
1077	return event_trigger_print("snapshot", m, (void *)data->count,
1078				   data->filter_str);
1079}
1080
1081static struct event_trigger_ops snapshot_trigger_ops = {
1082	.func			= snapshot_trigger,
1083	.print			= snapshot_trigger_print,
1084	.init			= event_trigger_init,
1085	.free			= event_trigger_free,
1086};
1087
1088static struct event_trigger_ops snapshot_count_trigger_ops = {
1089	.func			= snapshot_count_trigger,
1090	.print			= snapshot_trigger_print,
1091	.init			= event_trigger_init,
1092	.free			= event_trigger_free,
1093};
1094
1095static struct event_trigger_ops *
1096snapshot_get_trigger_ops(char *cmd, char *param)
1097{
1098	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1099}
1100
1101static struct event_command trigger_snapshot_cmd = {
1102	.name			= "snapshot",
1103	.trigger_type		= ETT_SNAPSHOT,
1104	.func			= event_trigger_callback,
1105	.reg			= register_snapshot_trigger,
1106	.unreg			= unregister_trigger,
1107	.get_trigger_ops	= snapshot_get_trigger_ops,
1108	.set_filter		= set_trigger_filter,
1109};
1110
1111static __init int register_trigger_snapshot_cmd(void)
1112{
1113	int ret;
1114
1115	ret = register_event_command(&trigger_snapshot_cmd);
1116	WARN_ON(ret < 0);
1117
1118	return ret;
1119}
1120#else
1121static __init int register_trigger_snapshot_cmd(void) { return 0; }
1122#endif /* CONFIG_TRACER_SNAPSHOT */
1123
1124#ifdef CONFIG_STACKTRACE
 
 
 
 
 
 
 
1125/*
1126 * Skip 3:
1127 *   stacktrace_trigger()
1128 *   event_triggers_post_call()
 
1129 *   trace_event_raw_event_xxx()
1130 */
1131#define STACK_SKIP 3
 
1132
1133static void
1134stacktrace_trigger(struct event_trigger_data *data, void *rec)
 
1135{
1136	trace_dump_stack(STACK_SKIP);
1137}
1138
1139static void
1140stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
 
1141{
1142	if (!data->count)
1143		return;
1144
1145	if (data->count != -1)
1146		(data->count)--;
1147
1148	stacktrace_trigger(data, rec);
1149}
1150
1151static int
1152stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1153			 struct event_trigger_data *data)
1154{
1155	return event_trigger_print("stacktrace", m, (void *)data->count,
1156				   data->filter_str);
1157}
1158
1159static struct event_trigger_ops stacktrace_trigger_ops = {
1160	.func			= stacktrace_trigger,
1161	.print			= stacktrace_trigger_print,
1162	.init			= event_trigger_init,
1163	.free			= event_trigger_free,
1164};
1165
1166static struct event_trigger_ops stacktrace_count_trigger_ops = {
1167	.func			= stacktrace_count_trigger,
1168	.print			= stacktrace_trigger_print,
1169	.init			= event_trigger_init,
1170	.free			= event_trigger_free,
1171};
1172
1173static struct event_trigger_ops *
1174stacktrace_get_trigger_ops(char *cmd, char *param)
1175{
1176	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1177}
1178
1179static struct event_command trigger_stacktrace_cmd = {
1180	.name			= "stacktrace",
1181	.trigger_type		= ETT_STACKTRACE,
1182	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1183	.func			= event_trigger_callback,
1184	.reg			= register_trigger,
1185	.unreg			= unregister_trigger,
1186	.get_trigger_ops	= stacktrace_get_trigger_ops,
1187	.set_filter		= set_trigger_filter,
1188};
1189
1190static __init int register_trigger_stacktrace_cmd(void)
1191{
1192	int ret;
1193
1194	ret = register_event_command(&trigger_stacktrace_cmd);
1195	WARN_ON(ret < 0);
1196
1197	return ret;
1198}
1199#else
1200static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1201#endif /* CONFIG_STACKTRACE */
1202
1203static __init void unregister_trigger_traceon_traceoff_cmds(void)
1204{
1205	unregister_event_command(&trigger_traceon_cmd);
1206	unregister_event_command(&trigger_traceoff_cmd);
1207}
1208
1209static void
1210event_enable_trigger(struct event_trigger_data *data, void *rec)
 
1211{
1212	struct enable_trigger_data *enable_data = data->private_data;
1213
1214	if (enable_data->enable)
1215		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1216	else
1217		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1218}
1219
1220static void
1221event_enable_count_trigger(struct event_trigger_data *data, void *rec)
 
1222{
1223	struct enable_trigger_data *enable_data = data->private_data;
1224
1225	if (!data->count)
1226		return;
1227
1228	/* Skip if the event is in a state we want to switch to */
1229	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1230		return;
1231
1232	if (data->count != -1)
1233		(data->count)--;
1234
1235	event_enable_trigger(data, rec);
1236}
1237
1238int event_enable_trigger_print(struct seq_file *m,
1239			       struct event_trigger_ops *ops,
1240			       struct event_trigger_data *data)
1241{
1242	struct enable_trigger_data *enable_data = data->private_data;
1243
1244	seq_printf(m, "%s:%s:%s",
1245		   enable_data->hist ?
1246		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1247		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1248		   enable_data->file->event_call->class->system,
1249		   trace_event_name(enable_data->file->event_call));
1250
1251	if (data->count == -1)
1252		seq_puts(m, ":unlimited");
1253	else
1254		seq_printf(m, ":count=%ld", data->count);
1255
1256	if (data->filter_str)
1257		seq_printf(m, " if %s\n", data->filter_str);
1258	else
1259		seq_putc(m, '\n');
1260
1261	return 0;
1262}
1263
1264void event_enable_trigger_free(struct event_trigger_ops *ops,
1265			       struct event_trigger_data *data)
1266{
1267	struct enable_trigger_data *enable_data = data->private_data;
1268
1269	if (WARN_ON_ONCE(data->ref <= 0))
1270		return;
1271
1272	data->ref--;
1273	if (!data->ref) {
1274		/* Remove the SOFT_MODE flag */
1275		trace_event_enable_disable(enable_data->file, 0, 1);
1276		module_put(enable_data->file->event_call->mod);
1277		trigger_data_free(data);
1278		kfree(enable_data);
1279	}
1280}
1281
1282static struct event_trigger_ops event_enable_trigger_ops = {
1283	.func			= event_enable_trigger,
1284	.print			= event_enable_trigger_print,
1285	.init			= event_trigger_init,
1286	.free			= event_enable_trigger_free,
1287};
1288
1289static struct event_trigger_ops event_enable_count_trigger_ops = {
1290	.func			= event_enable_count_trigger,
1291	.print			= event_enable_trigger_print,
1292	.init			= event_trigger_init,
1293	.free			= event_enable_trigger_free,
1294};
1295
1296static struct event_trigger_ops event_disable_trigger_ops = {
1297	.func			= event_enable_trigger,
1298	.print			= event_enable_trigger_print,
1299	.init			= event_trigger_init,
1300	.free			= event_enable_trigger_free,
1301};
1302
1303static struct event_trigger_ops event_disable_count_trigger_ops = {
1304	.func			= event_enable_count_trigger,
1305	.print			= event_enable_trigger_print,
1306	.init			= event_trigger_init,
1307	.free			= event_enable_trigger_free,
1308};
1309
1310int event_enable_trigger_func(struct event_command *cmd_ops,
1311			      struct trace_event_file *file,
1312			      char *glob, char *cmd, char *param)
1313{
1314	struct trace_event_file *event_enable_file;
1315	struct enable_trigger_data *enable_data;
1316	struct event_trigger_data *trigger_data;
1317	struct event_trigger_ops *trigger_ops;
1318	struct trace_array *tr = file->tr;
1319	const char *system;
1320	const char *event;
1321	bool hist = false;
1322	char *trigger;
1323	char *number;
1324	bool enable;
1325	int ret;
1326
1327	if (!param)
1328		return -EINVAL;
1329
1330	/* separate the trigger from the filter (s:e:n [if filter]) */
1331	trigger = strsep(&param, " \t");
1332	if (!trigger)
1333		return -EINVAL;
1334
1335	system = strsep(&trigger, ":");
1336	if (!trigger)
1337		return -EINVAL;
1338
1339	event = strsep(&trigger, ":");
1340
1341	ret = -EINVAL;
1342	event_enable_file = find_event_file(tr, system, event);
1343	if (!event_enable_file)
1344		goto out;
1345
1346#ifdef CONFIG_HIST_TRIGGERS
1347	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1348		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1349
1350	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1351		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1352#else
1353	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1354#endif
1355	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1356
1357	ret = -ENOMEM;
1358	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1359	if (!trigger_data)
1360		goto out;
1361
1362	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1363	if (!enable_data) {
1364		kfree(trigger_data);
1365		goto out;
1366	}
1367
1368	trigger_data->count = -1;
1369	trigger_data->ops = trigger_ops;
1370	trigger_data->cmd_ops = cmd_ops;
1371	INIT_LIST_HEAD(&trigger_data->list);
1372	RCU_INIT_POINTER(trigger_data->filter, NULL);
1373
1374	enable_data->hist = hist;
1375	enable_data->enable = enable;
1376	enable_data->file = event_enable_file;
1377	trigger_data->private_data = enable_data;
1378
1379	if (glob[0] == '!') {
1380		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1381		kfree(trigger_data);
1382		kfree(enable_data);
1383		ret = 0;
1384		goto out;
1385	}
1386
 
 
 
1387	if (trigger) {
1388		number = strsep(&trigger, ":");
1389
1390		ret = -EINVAL;
1391		if (!strlen(number))
1392			goto out_free;
1393
1394		/*
1395		 * We use the callback data field (which is a pointer)
1396		 * as our counter.
1397		 */
1398		ret = kstrtoul(number, 0, &trigger_data->count);
1399		if (ret)
1400			goto out_free;
1401	}
1402
1403	if (!param) /* if param is non-empty, it's supposed to be a filter */
1404		goto out_reg;
1405
1406	if (!cmd_ops->set_filter)
1407		goto out_reg;
1408
1409	ret = cmd_ops->set_filter(param, trigger_data, file);
1410	if (ret < 0)
1411		goto out_free;
1412
1413 out_reg:
1414	/* Don't let event modules unload while probe registered */
1415	ret = try_module_get(event_enable_file->event_call->mod);
1416	if (!ret) {
1417		ret = -EBUSY;
1418		goto out_free;
1419	}
1420
1421	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1422	if (ret < 0)
1423		goto out_put;
1424	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1425	/*
1426	 * The above returns on success the # of functions enabled,
1427	 * but if it didn't find any functions it returns zero.
1428	 * Consider no functions a failure too.
1429	 */
1430	if (!ret) {
1431		ret = -ENOENT;
1432		goto out_disable;
1433	} else if (ret < 0)
1434		goto out_disable;
1435	/* Just return zero, not the number of enabled functions */
1436	ret = 0;
 
1437 out:
1438	return ret;
1439
1440 out_disable:
1441	trace_event_enable_disable(event_enable_file, 0, 1);
1442 out_put:
1443	module_put(event_enable_file->event_call->mod);
1444 out_free:
1445	if (cmd_ops->set_filter)
1446		cmd_ops->set_filter(NULL, trigger_data, NULL);
1447	kfree(trigger_data);
1448	kfree(enable_data);
1449	goto out;
1450}
1451
1452int event_enable_register_trigger(char *glob,
1453				  struct event_trigger_ops *ops,
1454				  struct event_trigger_data *data,
1455				  struct trace_event_file *file)
1456{
1457	struct enable_trigger_data *enable_data = data->private_data;
1458	struct enable_trigger_data *test_enable_data;
1459	struct event_trigger_data *test;
1460	int ret = 0;
1461
1462	list_for_each_entry_rcu(test, &file->triggers, list) {
1463		test_enable_data = test->private_data;
1464		if (test_enable_data &&
1465		    (test->cmd_ops->trigger_type ==
1466		     data->cmd_ops->trigger_type) &&
1467		    (test_enable_data->file == enable_data->file)) {
1468			ret = -EEXIST;
1469			goto out;
1470		}
1471	}
1472
1473	if (data->ops->init) {
1474		ret = data->ops->init(data->ops, data);
1475		if (ret < 0)
1476			goto out;
1477	}
1478
1479	list_add_rcu(&data->list, &file->triggers);
1480	ret++;
1481
1482	update_cond_flag(file);
1483	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1484		list_del_rcu(&data->list);
1485		update_cond_flag(file);
1486		ret--;
1487	}
1488out:
1489	return ret;
1490}
1491
1492void event_enable_unregister_trigger(char *glob,
1493				     struct event_trigger_ops *ops,
1494				     struct event_trigger_data *test,
1495				     struct trace_event_file *file)
1496{
1497	struct enable_trigger_data *test_enable_data = test->private_data;
1498	struct enable_trigger_data *enable_data;
1499	struct event_trigger_data *data;
1500	bool unregistered = false;
1501
1502	list_for_each_entry_rcu(data, &file->triggers, list) {
1503		enable_data = data->private_data;
1504		if (enable_data &&
1505		    (data->cmd_ops->trigger_type ==
1506		     test->cmd_ops->trigger_type) &&
1507		    (enable_data->file == test_enable_data->file)) {
1508			unregistered = true;
1509			list_del_rcu(&data->list);
1510			trace_event_trigger_enable_disable(file, 0);
1511			update_cond_flag(file);
1512			break;
1513		}
1514	}
1515
1516	if (unregistered && data->ops->free)
1517		data->ops->free(data->ops, data);
1518}
1519
1520static struct event_trigger_ops *
1521event_enable_get_trigger_ops(char *cmd, char *param)
1522{
1523	struct event_trigger_ops *ops;
1524	bool enable;
1525
1526#ifdef CONFIG_HIST_TRIGGERS
1527	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1528		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1529#else
1530	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1531#endif
1532	if (enable)
1533		ops = param ? &event_enable_count_trigger_ops :
1534			&event_enable_trigger_ops;
1535	else
1536		ops = param ? &event_disable_count_trigger_ops :
1537			&event_disable_trigger_ops;
1538
1539	return ops;
1540}
1541
1542static struct event_command trigger_enable_cmd = {
1543	.name			= ENABLE_EVENT_STR,
1544	.trigger_type		= ETT_EVENT_ENABLE,
1545	.func			= event_enable_trigger_func,
1546	.reg			= event_enable_register_trigger,
1547	.unreg			= event_enable_unregister_trigger,
1548	.get_trigger_ops	= event_enable_get_trigger_ops,
1549	.set_filter		= set_trigger_filter,
1550};
1551
1552static struct event_command trigger_disable_cmd = {
1553	.name			= DISABLE_EVENT_STR,
1554	.trigger_type		= ETT_EVENT_ENABLE,
1555	.func			= event_enable_trigger_func,
1556	.reg			= event_enable_register_trigger,
1557	.unreg			= event_enable_unregister_trigger,
1558	.get_trigger_ops	= event_enable_get_trigger_ops,
1559	.set_filter		= set_trigger_filter,
1560};
1561
1562static __init void unregister_trigger_enable_disable_cmds(void)
1563{
1564	unregister_event_command(&trigger_enable_cmd);
1565	unregister_event_command(&trigger_disable_cmd);
1566}
1567
1568static __init int register_trigger_enable_disable_cmds(void)
1569{
1570	int ret;
1571
1572	ret = register_event_command(&trigger_enable_cmd);
1573	if (WARN_ON(ret < 0))
1574		return ret;
1575	ret = register_event_command(&trigger_disable_cmd);
1576	if (WARN_ON(ret < 0))
1577		unregister_trigger_enable_disable_cmds();
1578
1579	return ret;
1580}
1581
1582static __init int register_trigger_traceon_traceoff_cmds(void)
1583{
1584	int ret;
1585
1586	ret = register_event_command(&trigger_traceon_cmd);
1587	if (WARN_ON(ret < 0))
1588		return ret;
1589	ret = register_event_command(&trigger_traceoff_cmd);
1590	if (WARN_ON(ret < 0))
1591		unregister_trigger_traceon_traceoff_cmds();
1592
1593	return ret;
1594}
1595
1596__init int register_trigger_cmds(void)
1597{
1598	register_trigger_traceon_traceoff_cmds();
1599	register_trigger_snapshot_cmd();
1600	register_trigger_stacktrace_cmd();
1601	register_trigger_enable_disable_cmds();
1602	register_trigger_hist_enable_disable_cmds();
1603	register_trigger_hist_cmd();
1604
1605	return 0;
1606}