Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
 
  34 * @rec: The trace entry for the event, NULL for unconditional invocation
 
  35 *
  36 * For each trigger associated with an event, invoke the trigger
  37 * function registered with the associated trigger command.  If rec is
  38 * non-NULL, it means that the trigger requires further processing and
  39 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  40 * trigger has a filter associated with it, rec will checked against
  41 * the filter and if the record matches the trigger will be invoked.
  42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  43 * in any case until the current event is written, the trigger
  44 * function isn't invoked but the bit associated with the deferred
  45 * trigger is set in the return value.
  46 *
  47 * Returns an enum event_trigger_type value containing a set bit for
  48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  49 *
  50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  51 *
  52 * Return: an enum event_trigger_type value containing a set bit for
  53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  54 */
  55enum event_trigger_type
  56event_triggers_call(struct trace_event_file *file, void *rec,
 
  57		    struct ring_buffer_event *event)
  58{
  59	struct event_trigger_data *data;
  60	enum event_trigger_type tt = ETT_NONE;
  61	struct event_filter *filter;
  62
  63	if (list_empty(&file->triggers))
  64		return tt;
  65
  66	list_for_each_entry_rcu(data, &file->triggers, list) {
  67		if (data->paused)
  68			continue;
  69		if (!rec) {
  70			data->ops->func(data, rec, event);
  71			continue;
  72		}
  73		filter = rcu_dereference_sched(data->filter);
  74		if (filter && !filter_match_preds(filter, rec))
  75			continue;
  76		if (event_command_post_trigger(data->cmd_ops)) {
  77			tt |= data->cmd_ops->trigger_type;
  78			continue;
  79		}
  80		data->ops->func(data, rec, event);
  81	}
  82	return tt;
  83}
  84EXPORT_SYMBOL_GPL(event_triggers_call);
  85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86/**
  87 * event_triggers_post_call - Call 'post_triggers' for a trace event
  88 * @file: The trace_event_file associated with the event
  89 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  90 *
  91 * For each trigger associated with an event, invoke the trigger
  92 * function registered with the associated trigger command, if the
  93 * corresponding bit is set in the tt enum passed into this function.
  94 * See @event_triggers_call for details on how those bits are set.
  95 *
  96 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  97 */
  98void
  99event_triggers_post_call(struct trace_event_file *file,
 100			 enum event_trigger_type tt)
 101{
 102	struct event_trigger_data *data;
 103
 104	list_for_each_entry_rcu(data, &file->triggers, list) {
 105		if (data->paused)
 106			continue;
 107		if (data->cmd_ops->trigger_type & tt)
 108			data->ops->func(data, NULL, NULL);
 109	}
 110}
 111EXPORT_SYMBOL_GPL(event_triggers_post_call);
 112
 113#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 114
 115static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 116{
 117	struct trace_event_file *event_file = event_file_data(m->private);
 118
 119	if (t == SHOW_AVAILABLE_TRIGGERS)
 
 120		return NULL;
 121
 122	return seq_list_next(t, &event_file->triggers, pos);
 123}
 124
 
 
 
 
 
 
 
 
 
 
 
 
 
 125static void *trigger_start(struct seq_file *m, loff_t *pos)
 126{
 127	struct trace_event_file *event_file;
 128
 129	/* ->stop() is called even if ->start() fails */
 130	mutex_lock(&event_mutex);
 131	event_file = event_file_data(m->private);
 132	if (unlikely(!event_file))
 133		return ERR_PTR(-ENODEV);
 134
 135	if (list_empty(&event_file->triggers))
 136		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 137
 138	return seq_list_start(&event_file->triggers, *pos);
 139}
 140
 141static void trigger_stop(struct seq_file *m, void *t)
 142{
 143	mutex_unlock(&event_mutex);
 144}
 145
 146static int trigger_show(struct seq_file *m, void *v)
 147{
 148	struct event_trigger_data *data;
 149	struct event_command *p;
 150
 151	if (v == SHOW_AVAILABLE_TRIGGERS) {
 152		seq_puts(m, "# Available triggers:\n");
 153		seq_putc(m, '#');
 154		mutex_lock(&trigger_cmd_mutex);
 155		list_for_each_entry_reverse(p, &trigger_commands, list)
 156			seq_printf(m, " %s", p->name);
 157		seq_putc(m, '\n');
 158		mutex_unlock(&trigger_cmd_mutex);
 159		return 0;
 160	}
 161
 162	data = list_entry(v, struct event_trigger_data, list);
 163	data->ops->print(m, data->ops, data);
 164
 165	return 0;
 166}
 167
 168static const struct seq_operations event_triggers_seq_ops = {
 169	.start = trigger_start,
 170	.next = trigger_next,
 171	.stop = trigger_stop,
 172	.show = trigger_show,
 173};
 174
 175static int event_trigger_regex_open(struct inode *inode, struct file *file)
 176{
 177	int ret;
 178
 179	ret = security_locked_down(LOCKDOWN_TRACEFS);
 180	if (ret)
 181		return ret;
 182
 183	mutex_lock(&event_mutex);
 184
 185	if (unlikely(!event_file_data(file))) {
 186		mutex_unlock(&event_mutex);
 187		return -ENODEV;
 188	}
 189
 190	if ((file->f_mode & FMODE_WRITE) &&
 191	    (file->f_flags & O_TRUNC)) {
 192		struct trace_event_file *event_file;
 193		struct event_command *p;
 194
 195		event_file = event_file_data(file);
 196
 197		list_for_each_entry(p, &trigger_commands, list) {
 198			if (p->unreg_all)
 199				p->unreg_all(event_file);
 200		}
 201	}
 202
 203	if (file->f_mode & FMODE_READ) {
 204		ret = seq_open(file, &event_triggers_seq_ops);
 205		if (!ret) {
 206			struct seq_file *m = file->private_data;
 207			m->private = file;
 208		}
 209	}
 210
 211	mutex_unlock(&event_mutex);
 212
 213	return ret;
 214}
 215
 216static int trigger_process_regex(struct trace_event_file *file, char *buff)
 217{
 218	char *command, *next = buff;
 219	struct event_command *p;
 220	int ret = -EINVAL;
 221
 
 222	command = strsep(&next, ": \t");
 
 
 
 
 
 223	command = (command[0] != '!') ? command : command + 1;
 224
 225	mutex_lock(&trigger_cmd_mutex);
 226	list_for_each_entry(p, &trigger_commands, list) {
 227		if (strcmp(p->name, command) == 0) {
 228			ret = p->func(p, file, buff, command, next);
 229			goto out_unlock;
 230		}
 231	}
 232 out_unlock:
 233	mutex_unlock(&trigger_cmd_mutex);
 234
 235	return ret;
 236}
 237
 238static ssize_t event_trigger_regex_write(struct file *file,
 239					 const char __user *ubuf,
 240					 size_t cnt, loff_t *ppos)
 241{
 242	struct trace_event_file *event_file;
 243	ssize_t ret;
 244	char *buf;
 245
 246	if (!cnt)
 247		return 0;
 248
 249	if (cnt >= PAGE_SIZE)
 250		return -EINVAL;
 251
 252	buf = memdup_user_nul(ubuf, cnt);
 253	if (IS_ERR(buf))
 254		return PTR_ERR(buf);
 255
 256	strim(buf);
 257
 258	mutex_lock(&event_mutex);
 259	event_file = event_file_data(file);
 260	if (unlikely(!event_file)) {
 261		mutex_unlock(&event_mutex);
 262		kfree(buf);
 263		return -ENODEV;
 264	}
 265	ret = trigger_process_regex(event_file, buf);
 266	mutex_unlock(&event_mutex);
 267
 268	kfree(buf);
 269	if (ret < 0)
 270		goto out;
 271
 272	*ppos += cnt;
 273	ret = cnt;
 274 out:
 275	return ret;
 276}
 277
 278static int event_trigger_regex_release(struct inode *inode, struct file *file)
 279{
 280	mutex_lock(&event_mutex);
 281
 282	if (file->f_mode & FMODE_READ)
 283		seq_release(inode, file);
 284
 285	mutex_unlock(&event_mutex);
 286
 287	return 0;
 288}
 289
 290static ssize_t
 291event_trigger_write(struct file *filp, const char __user *ubuf,
 292		    size_t cnt, loff_t *ppos)
 293{
 294	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 295}
 296
 297static int
 298event_trigger_open(struct inode *inode, struct file *filp)
 299{
 300	/* Checks for tracefs lockdown */
 301	return event_trigger_regex_open(inode, filp);
 302}
 303
 304static int
 305event_trigger_release(struct inode *inode, struct file *file)
 306{
 307	return event_trigger_regex_release(inode, file);
 308}
 309
 310const struct file_operations event_trigger_fops = {
 311	.open = event_trigger_open,
 312	.read = seq_read,
 313	.write = event_trigger_write,
 314	.llseek = tracing_lseek,
 315	.release = event_trigger_release,
 316};
 317
 318/*
 319 * Currently we only register event commands from __init, so mark this
 320 * __init too.
 321 */
 322__init int register_event_command(struct event_command *cmd)
 323{
 324	struct event_command *p;
 325	int ret = 0;
 326
 327	mutex_lock(&trigger_cmd_mutex);
 328	list_for_each_entry(p, &trigger_commands, list) {
 329		if (strcmp(cmd->name, p->name) == 0) {
 330			ret = -EBUSY;
 331			goto out_unlock;
 332		}
 333	}
 334	list_add(&cmd->list, &trigger_commands);
 335 out_unlock:
 336	mutex_unlock(&trigger_cmd_mutex);
 337
 338	return ret;
 339}
 340
 341/*
 342 * Currently we only unregister event commands from __init, so mark
 343 * this __init too.
 344 */
 345__init int unregister_event_command(struct event_command *cmd)
 346{
 347	struct event_command *p, *n;
 348	int ret = -ENODEV;
 349
 350	mutex_lock(&trigger_cmd_mutex);
 351	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 352		if (strcmp(cmd->name, p->name) == 0) {
 353			ret = 0;
 354			list_del_init(&p->list);
 355			goto out_unlock;
 356		}
 357	}
 358 out_unlock:
 359	mutex_unlock(&trigger_cmd_mutex);
 360
 361	return ret;
 362}
 363
 364/**
 365 * event_trigger_print - Generic event_trigger_ops @print implementation
 366 * @name: The name of the event trigger
 367 * @m: The seq_file being printed to
 368 * @data: Trigger-specific data
 369 * @filter_str: filter_str to print, if present
 370 *
 371 * Common implementation for event triggers to print themselves.
 372 *
 373 * Usually wrapped by a function that simply sets the @name of the
 374 * trigger command and then invokes this.
 375 *
 376 * Return: 0 on success, errno otherwise
 377 */
 378static int
 379event_trigger_print(const char *name, struct seq_file *m,
 380		    void *data, char *filter_str)
 381{
 382	long count = (long)data;
 383
 384	seq_puts(m, name);
 385
 386	if (count == -1)
 387		seq_puts(m, ":unlimited");
 388	else
 389		seq_printf(m, ":count=%ld", count);
 390
 391	if (filter_str)
 392		seq_printf(m, " if %s\n", filter_str);
 393	else
 394		seq_putc(m, '\n');
 395
 396	return 0;
 397}
 398
 399/**
 400 * event_trigger_init - Generic event_trigger_ops @init implementation
 401 * @ops: The trigger ops associated with the trigger
 402 * @data: Trigger-specific data
 403 *
 404 * Common implementation of event trigger initialization.
 405 *
 406 * Usually used directly as the @init method in event trigger
 407 * implementations.
 408 *
 409 * Return: 0 on success, errno otherwise
 410 */
 411int event_trigger_init(struct event_trigger_ops *ops,
 412		       struct event_trigger_data *data)
 413{
 414	data->ref++;
 415	return 0;
 416}
 417
 418/**
 419 * event_trigger_free - Generic event_trigger_ops @free implementation
 420 * @ops: The trigger ops associated with the trigger
 421 * @data: Trigger-specific data
 422 *
 423 * Common implementation of event trigger de-initialization.
 424 *
 425 * Usually used directly as the @free method in event trigger
 426 * implementations.
 427 */
 428static void
 429event_trigger_free(struct event_trigger_ops *ops,
 430		   struct event_trigger_data *data)
 431{
 432	if (WARN_ON_ONCE(data->ref <= 0))
 433		return;
 434
 435	data->ref--;
 436	if (!data->ref)
 437		trigger_data_free(data);
 438}
 439
 440int trace_event_trigger_enable_disable(struct trace_event_file *file,
 441				       int trigger_enable)
 442{
 443	int ret = 0;
 444
 445	if (trigger_enable) {
 446		if (atomic_inc_return(&file->tm_ref) > 1)
 447			return ret;
 448		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 449		ret = trace_event_enable_disable(file, 1, 1);
 450	} else {
 451		if (atomic_dec_return(&file->tm_ref) > 0)
 452			return ret;
 453		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 454		ret = trace_event_enable_disable(file, 0, 1);
 455	}
 456
 457	return ret;
 458}
 459
 460/**
 461 * clear_event_triggers - Clear all triggers associated with a trace array
 462 * @tr: The trace array to clear
 463 *
 464 * For each trigger, the triggering event has its tm_ref decremented
 465 * via trace_event_trigger_enable_disable(), and any associated event
 466 * (in the case of enable/disable_event triggers) will have its sm_ref
 467 * decremented via free()->trace_event_enable_disable().  That
 468 * combination effectively reverses the soft-mode/trigger state added
 469 * by trigger registration.
 470 *
 471 * Must be called with event_mutex held.
 472 */
 473void
 474clear_event_triggers(struct trace_array *tr)
 475{
 476	struct trace_event_file *file;
 477
 478	list_for_each_entry(file, &tr->events, list) {
 479		struct event_trigger_data *data, *n;
 480		list_for_each_entry_safe(data, n, &file->triggers, list) {
 481			trace_event_trigger_enable_disable(file, 0);
 482			list_del_rcu(&data->list);
 483			if (data->ops->free)
 484				data->ops->free(data->ops, data);
 485		}
 486	}
 487}
 488
 489/**
 490 * update_cond_flag - Set or reset the TRIGGER_COND bit
 491 * @file: The trace_event_file associated with the event
 492 *
 493 * If an event has triggers and any of those triggers has a filter or
 494 * a post_trigger, trigger invocation needs to be deferred until after
 495 * the current event has logged its data, and the event should have
 496 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 497 * cleared.
 498 */
 499void update_cond_flag(struct trace_event_file *file)
 500{
 501	struct event_trigger_data *data;
 502	bool set_cond = false;
 503
 504	list_for_each_entry_rcu(data, &file->triggers, list) {
 
 
 505		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 506		    event_command_needs_rec(data->cmd_ops)) {
 507			set_cond = true;
 508			break;
 509		}
 510	}
 511
 512	if (set_cond)
 513		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 514	else
 515		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 516}
 517
 518/**
 519 * register_trigger - Generic event_command @reg implementation
 520 * @glob: The raw string used to register the trigger
 521 * @ops: The trigger ops associated with the trigger
 522 * @data: Trigger-specific data to associate with the trigger
 523 * @file: The trace_event_file associated with the event
 524 *
 525 * Common implementation for event trigger registration.
 526 *
 527 * Usually used directly as the @reg method in event command
 528 * implementations.
 529 *
 530 * Return: 0 on success, errno otherwise
 531 */
 532static int register_trigger(char *glob, struct event_trigger_ops *ops,
 533			    struct event_trigger_data *data,
 534			    struct trace_event_file *file)
 535{
 536	struct event_trigger_data *test;
 537	int ret = 0;
 538
 539	list_for_each_entry_rcu(test, &file->triggers, list) {
 
 
 540		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 541			ret = -EEXIST;
 542			goto out;
 543		}
 544	}
 545
 546	if (data->ops->init) {
 547		ret = data->ops->init(data->ops, data);
 548		if (ret < 0)
 549			goto out;
 550	}
 551
 552	list_add_rcu(&data->list, &file->triggers);
 553	ret++;
 554
 555	update_cond_flag(file);
 556	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 
 557		list_del_rcu(&data->list);
 558		update_cond_flag(file);
 559		ret--;
 560	}
 561out:
 562	return ret;
 563}
 564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565/**
 566 * unregister_trigger - Generic event_command @unreg implementation
 567 * @glob: The raw string used to register the trigger
 568 * @ops: The trigger ops associated with the trigger
 569 * @test: Trigger-specific data used to find the trigger to remove
 570 * @file: The trace_event_file associated with the event
 571 *
 572 * Common implementation for event trigger unregistration.
 573 *
 574 * Usually used directly as the @unreg method in event command
 575 * implementations.
 576 */
 577static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
 578			       struct event_trigger_data *test,
 579			       struct trace_event_file *file)
 580{
 581	struct event_trigger_data *data;
 582	bool unregistered = false;
 583
 584	list_for_each_entry_rcu(data, &file->triggers, list) {
 585		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 586			unregistered = true;
 587			list_del_rcu(&data->list);
 588			trace_event_trigger_enable_disable(file, 0);
 589			update_cond_flag(file);
 590			break;
 591		}
 592	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593
 594	if (unregistered && data->ops->free)
 595		data->ops->free(data->ops, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 596}
 597
 598/**
 599 * event_trigger_callback - Generic event_command @func implementation
 600 * @cmd_ops: The command ops, used for trigger registration
 601 * @file: The trace_event_file associated with the event
 602 * @glob: The raw string used to register the trigger
 603 * @cmd: The cmd portion of the string used to register the trigger
 604 * @param: The params portion of the string used to register the trigger
 605 *
 606 * Common implementation for event command parsing and trigger
 607 * instantiation.
 
 
 
 608 *
 609 * Usually used directly as the @func method in event command
 610 * implementations.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611 *
 612 * Return: 0 on success, errno otherwise
 613 */
 614static int
 615event_trigger_callback(struct event_command *cmd_ops,
 616		       struct trace_event_file *file,
 617		       char *glob, char *cmd, char *param)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618{
 619	struct event_trigger_data *trigger_data;
 620	struct event_trigger_ops *trigger_ops;
 621	char *trigger = NULL;
 622	char *number;
 623	int ret;
 624
 625	/* separate the trigger from the filter (t:n [if filter]) */
 626	if (param && isdigit(param[0]))
 627		trigger = strsep(&param, " \t");
 628
 629	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 630
 631	ret = -ENOMEM;
 632	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 633	if (!trigger_data)
 634		goto out;
 635
 636	trigger_data->count = -1;
 637	trigger_data->ops = trigger_ops;
 638	trigger_data->cmd_ops = cmd_ops;
 639	trigger_data->private_data = file;
 
 640	INIT_LIST_HEAD(&trigger_data->list);
 641	INIT_LIST_HEAD(&trigger_data->named_list);
 
 642
 643	if (glob[0] == '!') {
 644		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 645		kfree(trigger_data);
 646		ret = 0;
 647		goto out;
 648	}
 
 
 
 
 
 
 
 
 
 
 
 
 649
 650	if (trigger) {
 651		number = strsep(&trigger, ":");
 652
 653		ret = -EINVAL;
 654		if (!strlen(number))
 655			goto out_free;
 656
 657		/*
 658		 * We use the callback data field (which is a pointer)
 659		 * as our counter.
 660		 */
 661		ret = kstrtoul(number, 0, &trigger_data->count);
 662		if (ret)
 663			goto out_free;
 664	}
 665
 666	if (!param) /* if param is non-empty, it's supposed to be a filter */
 667		goto out_reg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668
 669	if (!cmd_ops->set_filter)
 670		goto out_reg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671
 672	ret = cmd_ops->set_filter(param, trigger_data, file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673	if (ret < 0)
 674		goto out_free;
 675
 676 out_reg:
 677	/* Up the trigger_data count to make sure reg doesn't free it on failure */
 678	event_trigger_init(trigger_ops, trigger_data);
 679	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 680	/*
 681	 * The above returns on success the # of functions enabled,
 682	 * but if it didn't find any functions it returns zero.
 683	 * Consider no functions a failure too.
 684	 */
 685	if (!ret) {
 686		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
 687		ret = -ENOENT;
 688	} else if (ret > 0)
 689		ret = 0;
 690
 691	/* Down the counter of trigger_data or free it if not used anymore */
 692	event_trigger_free(trigger_ops, trigger_data);
 693 out:
 694	return ret;
 695
 696 out_free:
 697	if (cmd_ops->set_filter)
 698		cmd_ops->set_filter(NULL, trigger_data, NULL);
 699	kfree(trigger_data);
 700	goto out;
 701}
 702
 703/**
 704 * set_trigger_filter - Generic event_command @set_filter implementation
 705 * @filter_str: The filter string for the trigger, NULL to remove filter
 706 * @trigger_data: Trigger-specific data
 707 * @file: The trace_event_file associated with the event
 708 *
 709 * Common implementation for event command filter parsing and filter
 710 * instantiation.
 711 *
 712 * Usually used directly as the @set_filter method in event command
 713 * implementations.
 714 *
 715 * Also used to remove a filter (if filter_str = NULL).
 716 *
 717 * Return: 0 on success, errno otherwise
 718 */
 719int set_trigger_filter(char *filter_str,
 720		       struct event_trigger_data *trigger_data,
 721		       struct trace_event_file *file)
 722{
 723	struct event_trigger_data *data = trigger_data;
 724	struct event_filter *filter = NULL, *tmp;
 725	int ret = -EINVAL;
 726	char *s;
 727
 728	if (!filter_str) /* clear the current filter */
 729		goto assign;
 730
 731	s = strsep(&filter_str, " \t");
 732
 733	if (!strlen(s) || strcmp(s, "if") != 0)
 734		goto out;
 735
 736	if (!filter_str)
 737		goto out;
 738
 739	/* The filter is for the 'trigger' event, not the triggered event */
 740	ret = create_event_filter(file->tr, file->event_call,
 741				  filter_str, false, &filter);
 
 
 
 
 
 
 
 742	/*
 743	 * If create_event_filter() fails, filter still needs to be freed.
 744	 * Which the calling code will do with data->filter.
 745	 */
 746 assign:
 747	tmp = rcu_access_pointer(data->filter);
 748
 749	rcu_assign_pointer(data->filter, filter);
 750
 751	if (tmp) {
 752		/* Make sure the call is done with the filter */
 753		tracepoint_synchronize_unregister();
 
 
 
 
 
 
 754		free_event_filter(tmp);
 755	}
 756
 757	kfree(data->filter_str);
 758	data->filter_str = NULL;
 759
 760	if (filter_str) {
 761		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
 762		if (!data->filter_str) {
 763			free_event_filter(rcu_access_pointer(data->filter));
 764			data->filter = NULL;
 765			ret = -ENOMEM;
 766		}
 767	}
 768 out:
 769	return ret;
 770}
 771
 772static LIST_HEAD(named_triggers);
 773
 774/**
 775 * find_named_trigger - Find the common named trigger associated with @name
 776 * @name: The name of the set of named triggers to find the common data for
 777 *
 778 * Named triggers are sets of triggers that share a common set of
 779 * trigger data.  The first named trigger registered with a given name
 780 * owns the common trigger data that the others subsequently
 781 * registered with the same name will reference.  This function
 782 * returns the common trigger data associated with that first
 783 * registered instance.
 784 *
 785 * Return: the common trigger data for the given named trigger on
 786 * success, NULL otherwise.
 787 */
 788struct event_trigger_data *find_named_trigger(const char *name)
 789{
 790	struct event_trigger_data *data;
 791
 792	if (!name)
 793		return NULL;
 794
 795	list_for_each_entry(data, &named_triggers, named_list) {
 796		if (data->named_data)
 797			continue;
 798		if (strcmp(data->name, name) == 0)
 799			return data;
 800	}
 801
 802	return NULL;
 803}
 804
 805/**
 806 * is_named_trigger - determine if a given trigger is a named trigger
 807 * @test: The trigger data to test
 808 *
 809 * Return: true if 'test' is a named trigger, false otherwise.
 810 */
 811bool is_named_trigger(struct event_trigger_data *test)
 812{
 813	struct event_trigger_data *data;
 814
 815	list_for_each_entry(data, &named_triggers, named_list) {
 816		if (test == data)
 817			return true;
 818	}
 819
 820	return false;
 821}
 822
 823/**
 824 * save_named_trigger - save the trigger in the named trigger list
 825 * @name: The name of the named trigger set
 826 * @data: The trigger data to save
 827 *
 828 * Return: 0 if successful, negative error otherwise.
 829 */
 830int save_named_trigger(const char *name, struct event_trigger_data *data)
 831{
 832	data->name = kstrdup(name, GFP_KERNEL);
 833	if (!data->name)
 834		return -ENOMEM;
 835
 836	list_add(&data->named_list, &named_triggers);
 837
 838	return 0;
 839}
 840
 841/**
 842 * del_named_trigger - delete a trigger from the named trigger list
 843 * @data: The trigger data to delete
 844 */
 845void del_named_trigger(struct event_trigger_data *data)
 846{
 847	kfree(data->name);
 848	data->name = NULL;
 849
 850	list_del(&data->named_list);
 851}
 852
 853static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
 854{
 855	struct event_trigger_data *test;
 856
 857	list_for_each_entry(test, &named_triggers, named_list) {
 858		if (strcmp(test->name, data->name) == 0) {
 859			if (pause) {
 860				test->paused_tmp = test->paused;
 861				test->paused = true;
 862			} else {
 863				test->paused = test->paused_tmp;
 864			}
 865		}
 866	}
 867}
 868
 869/**
 870 * pause_named_trigger - Pause all named triggers with the same name
 871 * @data: The trigger data of a named trigger to pause
 872 *
 873 * Pauses a named trigger along with all other triggers having the
 874 * same name.  Because named triggers share a common set of data,
 875 * pausing only one is meaningless, so pausing one named trigger needs
 876 * to pause all triggers with the same name.
 877 */
 878void pause_named_trigger(struct event_trigger_data *data)
 879{
 880	__pause_named_trigger(data, true);
 881}
 882
 883/**
 884 * unpause_named_trigger - Un-pause all named triggers with the same name
 885 * @data: The trigger data of a named trigger to unpause
 886 *
 887 * Un-pauses a named trigger along with all other triggers having the
 888 * same name.  Because named triggers share a common set of data,
 889 * unpausing only one is meaningless, so unpausing one named trigger
 890 * needs to unpause all triggers with the same name.
 891 */
 892void unpause_named_trigger(struct event_trigger_data *data)
 893{
 894	__pause_named_trigger(data, false);
 895}
 896
 897/**
 898 * set_named_trigger_data - Associate common named trigger data
 899 * @data: The trigger data of a named trigger to unpause
 
 900 *
 901 * Named triggers are sets of triggers that share a common set of
 902 * trigger data.  The first named trigger registered with a given name
 903 * owns the common trigger data that the others subsequently
 904 * registered with the same name will reference.  This function
 905 * associates the common trigger data from the first trigger with the
 906 * given trigger.
 907 */
 908void set_named_trigger_data(struct event_trigger_data *data,
 909			    struct event_trigger_data *named_data)
 910{
 911	data->named_data = named_data;
 912}
 913
 914struct event_trigger_data *
 915get_named_trigger_data(struct event_trigger_data *data)
 916{
 917	return data->named_data;
 918}
 919
 920static void
 921traceon_trigger(struct event_trigger_data *data, void *rec,
 
 922		struct ring_buffer_event *event)
 923{
 
 
 
 
 
 
 
 
 
 
 924	if (tracing_is_on())
 925		return;
 926
 927	tracing_on();
 928}
 929
 930static void
 931traceon_count_trigger(struct event_trigger_data *data, void *rec,
 
 932		      struct ring_buffer_event *event)
 933{
 934	if (tracing_is_on())
 935		return;
 
 
 
 
 
 
 
 936
 937	if (!data->count)
 938		return;
 939
 940	if (data->count != -1)
 941		(data->count)--;
 942
 943	tracing_on();
 
 
 
 944}
 945
 946static void
 947traceoff_trigger(struct event_trigger_data *data, void *rec,
 
 948		 struct ring_buffer_event *event)
 949{
 
 
 
 
 
 
 
 
 
 
 950	if (!tracing_is_on())
 951		return;
 952
 953	tracing_off();
 954}
 955
 956static void
 957traceoff_count_trigger(struct event_trigger_data *data, void *rec,
 
 958		       struct ring_buffer_event *event)
 959{
 960	if (!tracing_is_on())
 961		return;
 
 
 
 
 
 
 
 962
 963	if (!data->count)
 964		return;
 965
 966	if (data->count != -1)
 967		(data->count)--;
 968
 969	tracing_off();
 
 
 
 970}
 971
 972static int
 973traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 974		      struct event_trigger_data *data)
 975{
 976	return event_trigger_print("traceon", m, (void *)data->count,
 977				   data->filter_str);
 978}
 979
 980static int
 981traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
 982		       struct event_trigger_data *data)
 983{
 984	return event_trigger_print("traceoff", m, (void *)data->count,
 985				   data->filter_str);
 986}
 987
 988static struct event_trigger_ops traceon_trigger_ops = {
 989	.func			= traceon_trigger,
 990	.print			= traceon_trigger_print,
 991	.init			= event_trigger_init,
 992	.free			= event_trigger_free,
 993};
 994
 995static struct event_trigger_ops traceon_count_trigger_ops = {
 996	.func			= traceon_count_trigger,
 997	.print			= traceon_trigger_print,
 998	.init			= event_trigger_init,
 999	.free			= event_trigger_free,
1000};
1001
1002static struct event_trigger_ops traceoff_trigger_ops = {
1003	.func			= traceoff_trigger,
1004	.print			= traceoff_trigger_print,
1005	.init			= event_trigger_init,
1006	.free			= event_trigger_free,
1007};
1008
1009static struct event_trigger_ops traceoff_count_trigger_ops = {
1010	.func			= traceoff_count_trigger,
1011	.print			= traceoff_trigger_print,
1012	.init			= event_trigger_init,
1013	.free			= event_trigger_free,
1014};
1015
1016static struct event_trigger_ops *
1017onoff_get_trigger_ops(char *cmd, char *param)
1018{
1019	struct event_trigger_ops *ops;
1020
1021	/* we register both traceon and traceoff to this callback */
1022	if (strcmp(cmd, "traceon") == 0)
1023		ops = param ? &traceon_count_trigger_ops :
1024			&traceon_trigger_ops;
1025	else
1026		ops = param ? &traceoff_count_trigger_ops :
1027			&traceoff_trigger_ops;
1028
1029	return ops;
1030}
1031
1032static struct event_command trigger_traceon_cmd = {
1033	.name			= "traceon",
1034	.trigger_type		= ETT_TRACE_ONOFF,
1035	.func			= event_trigger_callback,
1036	.reg			= register_trigger,
1037	.unreg			= unregister_trigger,
1038	.get_trigger_ops	= onoff_get_trigger_ops,
1039	.set_filter		= set_trigger_filter,
1040};
1041
1042static struct event_command trigger_traceoff_cmd = {
1043	.name			= "traceoff",
1044	.trigger_type		= ETT_TRACE_ONOFF,
1045	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1046	.func			= event_trigger_callback,
1047	.reg			= register_trigger,
1048	.unreg			= unregister_trigger,
1049	.get_trigger_ops	= onoff_get_trigger_ops,
1050	.set_filter		= set_trigger_filter,
1051};
1052
1053#ifdef CONFIG_TRACER_SNAPSHOT
1054static void
1055snapshot_trigger(struct event_trigger_data *data, void *rec,
 
1056		 struct ring_buffer_event *event)
1057{
1058	struct trace_event_file *file = data->private_data;
1059
1060	if (file)
1061		tracing_snapshot_instance(file->tr);
1062	else
1063		tracing_snapshot();
1064}
1065
1066static void
1067snapshot_count_trigger(struct event_trigger_data *data, void *rec,
 
1068		       struct ring_buffer_event *event)
1069{
1070	if (!data->count)
1071		return;
1072
1073	if (data->count != -1)
1074		(data->count)--;
1075
1076	snapshot_trigger(data, rec, event);
1077}
1078
1079static int
1080register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1081			  struct event_trigger_data *data,
1082			  struct trace_event_file *file)
1083{
1084	int ret = register_trigger(glob, ops, data, file);
1085
1086	if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1087		unregister_trigger(glob, ops, data, file);
1088		ret = 0;
1089	}
1090
 
 
 
1091	return ret;
1092}
1093
 
 
 
 
 
 
 
 
1094static int
1095snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1096		       struct event_trigger_data *data)
1097{
1098	return event_trigger_print("snapshot", m, (void *)data->count,
1099				   data->filter_str);
1100}
1101
1102static struct event_trigger_ops snapshot_trigger_ops = {
1103	.func			= snapshot_trigger,
1104	.print			= snapshot_trigger_print,
1105	.init			= event_trigger_init,
1106	.free			= event_trigger_free,
1107};
1108
1109static struct event_trigger_ops snapshot_count_trigger_ops = {
1110	.func			= snapshot_count_trigger,
1111	.print			= snapshot_trigger_print,
1112	.init			= event_trigger_init,
1113	.free			= event_trigger_free,
1114};
1115
1116static struct event_trigger_ops *
1117snapshot_get_trigger_ops(char *cmd, char *param)
1118{
1119	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1120}
1121
1122static struct event_command trigger_snapshot_cmd = {
1123	.name			= "snapshot",
1124	.trigger_type		= ETT_SNAPSHOT,
1125	.func			= event_trigger_callback,
1126	.reg			= register_snapshot_trigger,
1127	.unreg			= unregister_trigger,
1128	.get_trigger_ops	= snapshot_get_trigger_ops,
1129	.set_filter		= set_trigger_filter,
1130};
1131
1132static __init int register_trigger_snapshot_cmd(void)
1133{
1134	int ret;
1135
1136	ret = register_event_command(&trigger_snapshot_cmd);
1137	WARN_ON(ret < 0);
1138
1139	return ret;
1140}
1141#else
1142static __init int register_trigger_snapshot_cmd(void) { return 0; }
1143#endif /* CONFIG_TRACER_SNAPSHOT */
1144
1145#ifdef CONFIG_STACKTRACE
1146#ifdef CONFIG_UNWINDER_ORC
1147/* Skip 2:
1148 *   event_triggers_post_call()
1149 *   trace_event_raw_event_xxx()
1150 */
1151# define STACK_SKIP 2
1152#else
1153/*
1154 * Skip 4:
1155 *   stacktrace_trigger()
1156 *   event_triggers_post_call()
1157 *   trace_event_buffer_commit()
1158 *   trace_event_raw_event_xxx()
1159 */
1160#define STACK_SKIP 4
1161#endif
1162
1163static void
1164stacktrace_trigger(struct event_trigger_data *data, void *rec,
 
1165		   struct ring_buffer_event *event)
1166{
1167	trace_dump_stack(STACK_SKIP);
 
 
 
 
 
1168}
1169
1170static void
1171stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
 
1172			 struct ring_buffer_event *event)
1173{
1174	if (!data->count)
1175		return;
1176
1177	if (data->count != -1)
1178		(data->count)--;
1179
1180	stacktrace_trigger(data, rec, event);
1181}
1182
1183static int
1184stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1185			 struct event_trigger_data *data)
1186{
1187	return event_trigger_print("stacktrace", m, (void *)data->count,
1188				   data->filter_str);
1189}
1190
1191static struct event_trigger_ops stacktrace_trigger_ops = {
1192	.func			= stacktrace_trigger,
1193	.print			= stacktrace_trigger_print,
1194	.init			= event_trigger_init,
1195	.free			= event_trigger_free,
1196};
1197
1198static struct event_trigger_ops stacktrace_count_trigger_ops = {
1199	.func			= stacktrace_count_trigger,
1200	.print			= stacktrace_trigger_print,
1201	.init			= event_trigger_init,
1202	.free			= event_trigger_free,
1203};
1204
1205static struct event_trigger_ops *
1206stacktrace_get_trigger_ops(char *cmd, char *param)
1207{
1208	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1209}
1210
1211static struct event_command trigger_stacktrace_cmd = {
1212	.name			= "stacktrace",
1213	.trigger_type		= ETT_STACKTRACE,
1214	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1215	.func			= event_trigger_callback,
1216	.reg			= register_trigger,
1217	.unreg			= unregister_trigger,
1218	.get_trigger_ops	= stacktrace_get_trigger_ops,
1219	.set_filter		= set_trigger_filter,
1220};
1221
1222static __init int register_trigger_stacktrace_cmd(void)
1223{
1224	int ret;
1225
1226	ret = register_event_command(&trigger_stacktrace_cmd);
1227	WARN_ON(ret < 0);
1228
1229	return ret;
1230}
1231#else
1232static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1233#endif /* CONFIG_STACKTRACE */
1234
1235static __init void unregister_trigger_traceon_traceoff_cmds(void)
1236{
1237	unregister_event_command(&trigger_traceon_cmd);
1238	unregister_event_command(&trigger_traceoff_cmd);
1239}
1240
1241static void
1242event_enable_trigger(struct event_trigger_data *data, void *rec,
 
1243		     struct ring_buffer_event *event)
1244{
1245	struct enable_trigger_data *enable_data = data->private_data;
1246
1247	if (enable_data->enable)
1248		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1249	else
1250		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1251}
1252
1253static void
1254event_enable_count_trigger(struct event_trigger_data *data, void *rec,
 
1255			   struct ring_buffer_event *event)
1256{
1257	struct enable_trigger_data *enable_data = data->private_data;
1258
1259	if (!data->count)
1260		return;
1261
1262	/* Skip if the event is in a state we want to switch to */
1263	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1264		return;
1265
1266	if (data->count != -1)
1267		(data->count)--;
1268
1269	event_enable_trigger(data, rec, event);
1270}
1271
1272int event_enable_trigger_print(struct seq_file *m,
1273			       struct event_trigger_ops *ops,
1274			       struct event_trigger_data *data)
1275{
1276	struct enable_trigger_data *enable_data = data->private_data;
1277
1278	seq_printf(m, "%s:%s:%s",
1279		   enable_data->hist ?
1280		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1281		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1282		   enable_data->file->event_call->class->system,
1283		   trace_event_name(enable_data->file->event_call));
1284
1285	if (data->count == -1)
1286		seq_puts(m, ":unlimited");
1287	else
1288		seq_printf(m, ":count=%ld", data->count);
1289
1290	if (data->filter_str)
1291		seq_printf(m, " if %s\n", data->filter_str);
1292	else
1293		seq_putc(m, '\n');
1294
1295	return 0;
1296}
1297
1298void event_enable_trigger_free(struct event_trigger_ops *ops,
1299			       struct event_trigger_data *data)
1300{
1301	struct enable_trigger_data *enable_data = data->private_data;
1302
1303	if (WARN_ON_ONCE(data->ref <= 0))
1304		return;
1305
1306	data->ref--;
1307	if (!data->ref) {
1308		/* Remove the SOFT_MODE flag */
1309		trace_event_enable_disable(enable_data->file, 0, 1);
1310		module_put(enable_data->file->event_call->mod);
1311		trigger_data_free(data);
1312		kfree(enable_data);
1313	}
1314}
1315
1316static struct event_trigger_ops event_enable_trigger_ops = {
1317	.func			= event_enable_trigger,
1318	.print			= event_enable_trigger_print,
1319	.init			= event_trigger_init,
1320	.free			= event_enable_trigger_free,
1321};
1322
1323static struct event_trigger_ops event_enable_count_trigger_ops = {
1324	.func			= event_enable_count_trigger,
1325	.print			= event_enable_trigger_print,
1326	.init			= event_trigger_init,
1327	.free			= event_enable_trigger_free,
1328};
1329
1330static struct event_trigger_ops event_disable_trigger_ops = {
1331	.func			= event_enable_trigger,
1332	.print			= event_enable_trigger_print,
1333	.init			= event_trigger_init,
1334	.free			= event_enable_trigger_free,
1335};
1336
1337static struct event_trigger_ops event_disable_count_trigger_ops = {
1338	.func			= event_enable_count_trigger,
1339	.print			= event_enable_trigger_print,
1340	.init			= event_trigger_init,
1341	.free			= event_enable_trigger_free,
1342};
1343
1344int event_enable_trigger_func(struct event_command *cmd_ops,
1345			      struct trace_event_file *file,
1346			      char *glob, char *cmd, char *param)
1347{
1348	struct trace_event_file *event_enable_file;
1349	struct enable_trigger_data *enable_data;
1350	struct event_trigger_data *trigger_data;
1351	struct event_trigger_ops *trigger_ops;
1352	struct trace_array *tr = file->tr;
 
 
1353	const char *system;
1354	const char *event;
1355	bool hist = false;
1356	char *trigger;
1357	char *number;
1358	bool enable;
1359	int ret;
1360
1361	if (!param)
1362		return -EINVAL;
1363
1364	/* separate the trigger from the filter (s:e:n [if filter]) */
1365	trigger = strsep(&param, " \t");
1366	if (!trigger)
1367		return -EINVAL;
1368
1369	system = strsep(&trigger, ":");
1370	if (!trigger)
 
 
 
 
1371		return -EINVAL;
1372
1373	event = strsep(&trigger, ":");
1374
1375	ret = -EINVAL;
1376	event_enable_file = find_event_file(tr, system, event);
1377	if (!event_enable_file)
1378		goto out;
1379
1380#ifdef CONFIG_HIST_TRIGGERS
1381	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1382		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1383
1384	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1385		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1386#else
1387	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1388#endif
1389	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1390
1391	ret = -ENOMEM;
1392	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1393	if (!trigger_data)
1394		goto out;
1395
1396	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1397	if (!enable_data) {
1398		kfree(trigger_data);
1399		goto out;
1400	}
1401
1402	trigger_data->count = -1;
1403	trigger_data->ops = trigger_ops;
1404	trigger_data->cmd_ops = cmd_ops;
1405	INIT_LIST_HEAD(&trigger_data->list);
1406	RCU_INIT_POINTER(trigger_data->filter, NULL);
1407
1408	enable_data->hist = hist;
1409	enable_data->enable = enable;
1410	enable_data->file = event_enable_file;
1411	trigger_data->private_data = enable_data;
1412
1413	if (glob[0] == '!') {
1414		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
 
 
 
 
 
 
1415		kfree(trigger_data);
1416		kfree(enable_data);
1417		ret = 0;
1418		goto out;
1419	}
1420
1421	/* Up the trigger_data count to make sure nothing frees it on failure */
1422	event_trigger_init(trigger_ops, trigger_data);
1423
1424	if (trigger) {
1425		number = strsep(&trigger, ":");
1426
1427		ret = -EINVAL;
1428		if (!strlen(number))
1429			goto out_free;
1430
1431		/*
1432		 * We use the callback data field (which is a pointer)
1433		 * as our counter.
1434		 */
1435		ret = kstrtoul(number, 0, &trigger_data->count);
1436		if (ret)
1437			goto out_free;
1438	}
1439
1440	if (!param) /* if param is non-empty, it's supposed to be a filter */
1441		goto out_reg;
1442
1443	if (!cmd_ops->set_filter)
1444		goto out_reg;
1445
1446	ret = cmd_ops->set_filter(param, trigger_data, file);
1447	if (ret < 0)
1448		goto out_free;
1449
1450 out_reg:
1451	/* Don't let event modules unload while probe registered */
1452	ret = try_module_get(event_enable_file->event_call->mod);
1453	if (!ret) {
1454		ret = -EBUSY;
1455		goto out_free;
1456	}
1457
1458	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1459	if (ret < 0)
1460		goto out_put;
1461	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1462	/*
1463	 * The above returns on success the # of functions enabled,
1464	 * but if it didn't find any functions it returns zero.
1465	 * Consider no functions a failure too.
1466	 */
1467	if (!ret) {
1468		ret = -ENOENT;
1469		goto out_disable;
1470	} else if (ret < 0)
1471		goto out_disable;
1472	/* Just return zero, not the number of enabled functions */
1473	ret = 0;
1474	event_trigger_free(trigger_ops, trigger_data);
1475 out:
1476	return ret;
1477
1478 out_disable:
1479	trace_event_enable_disable(event_enable_file, 0, 1);
1480 out_put:
1481	module_put(event_enable_file->event_call->mod);
1482 out_free:
1483	if (cmd_ops->set_filter)
1484		cmd_ops->set_filter(NULL, trigger_data, NULL);
1485	event_trigger_free(trigger_ops, trigger_data);
1486	kfree(enable_data);
 
1487	goto out;
1488}
1489
1490int event_enable_register_trigger(char *glob,
1491				  struct event_trigger_ops *ops,
1492				  struct event_trigger_data *data,
1493				  struct trace_event_file *file)
1494{
1495	struct enable_trigger_data *enable_data = data->private_data;
1496	struct enable_trigger_data *test_enable_data;
1497	struct event_trigger_data *test;
1498	int ret = 0;
1499
1500	list_for_each_entry_rcu(test, &file->triggers, list) {
 
 
1501		test_enable_data = test->private_data;
1502		if (test_enable_data &&
1503		    (test->cmd_ops->trigger_type ==
1504		     data->cmd_ops->trigger_type) &&
1505		    (test_enable_data->file == enable_data->file)) {
1506			ret = -EEXIST;
1507			goto out;
1508		}
1509	}
1510
1511	if (data->ops->init) {
1512		ret = data->ops->init(data->ops, data);
1513		if (ret < 0)
1514			goto out;
1515	}
1516
1517	list_add_rcu(&data->list, &file->triggers);
1518	ret++;
1519
1520	update_cond_flag(file);
1521	if (trace_event_trigger_enable_disable(file, 1) < 0) {
 
1522		list_del_rcu(&data->list);
1523		update_cond_flag(file);
1524		ret--;
1525	}
1526out:
1527	return ret;
1528}
1529
1530void event_enable_unregister_trigger(char *glob,
1531				     struct event_trigger_ops *ops,
1532				     struct event_trigger_data *test,
1533				     struct trace_event_file *file)
1534{
1535	struct enable_trigger_data *test_enable_data = test->private_data;
 
1536	struct enable_trigger_data *enable_data;
1537	struct event_trigger_data *data;
1538	bool unregistered = false;
1539
1540	list_for_each_entry_rcu(data, &file->triggers, list) {
1541		enable_data = data->private_data;
 
 
1542		if (enable_data &&
1543		    (data->cmd_ops->trigger_type ==
1544		     test->cmd_ops->trigger_type) &&
1545		    (enable_data->file == test_enable_data->file)) {
1546			unregistered = true;
1547			list_del_rcu(&data->list);
1548			trace_event_trigger_enable_disable(file, 0);
1549			update_cond_flag(file);
1550			break;
1551		}
1552	}
1553
1554	if (unregistered && data->ops->free)
1555		data->ops->free(data->ops, data);
1556}
1557
1558static struct event_trigger_ops *
1559event_enable_get_trigger_ops(char *cmd, char *param)
1560{
1561	struct event_trigger_ops *ops;
1562	bool enable;
1563
1564#ifdef CONFIG_HIST_TRIGGERS
1565	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1566		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1567#else
1568	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1569#endif
1570	if (enable)
1571		ops = param ? &event_enable_count_trigger_ops :
1572			&event_enable_trigger_ops;
1573	else
1574		ops = param ? &event_disable_count_trigger_ops :
1575			&event_disable_trigger_ops;
1576
1577	return ops;
1578}
1579
1580static struct event_command trigger_enable_cmd = {
1581	.name			= ENABLE_EVENT_STR,
1582	.trigger_type		= ETT_EVENT_ENABLE,
1583	.func			= event_enable_trigger_func,
1584	.reg			= event_enable_register_trigger,
1585	.unreg			= event_enable_unregister_trigger,
1586	.get_trigger_ops	= event_enable_get_trigger_ops,
1587	.set_filter		= set_trigger_filter,
1588};
1589
1590static struct event_command trigger_disable_cmd = {
1591	.name			= DISABLE_EVENT_STR,
1592	.trigger_type		= ETT_EVENT_ENABLE,
1593	.func			= event_enable_trigger_func,
1594	.reg			= event_enable_register_trigger,
1595	.unreg			= event_enable_unregister_trigger,
1596	.get_trigger_ops	= event_enable_get_trigger_ops,
1597	.set_filter		= set_trigger_filter,
1598};
1599
1600static __init void unregister_trigger_enable_disable_cmds(void)
1601{
1602	unregister_event_command(&trigger_enable_cmd);
1603	unregister_event_command(&trigger_disable_cmd);
1604}
1605
1606static __init int register_trigger_enable_disable_cmds(void)
1607{
1608	int ret;
1609
1610	ret = register_event_command(&trigger_enable_cmd);
1611	if (WARN_ON(ret < 0))
1612		return ret;
1613	ret = register_event_command(&trigger_disable_cmd);
1614	if (WARN_ON(ret < 0))
1615		unregister_trigger_enable_disable_cmds();
1616
1617	return ret;
1618}
1619
1620static __init int register_trigger_traceon_traceoff_cmds(void)
1621{
1622	int ret;
1623
1624	ret = register_event_command(&trigger_traceon_cmd);
1625	if (WARN_ON(ret < 0))
1626		return ret;
1627	ret = register_event_command(&trigger_traceoff_cmd);
1628	if (WARN_ON(ret < 0))
1629		unregister_trigger_traceon_traceoff_cmds();
1630
1631	return ret;
1632}
1633
1634__init int register_trigger_cmds(void)
1635{
1636	register_trigger_traceon_traceoff_cmds();
1637	register_trigger_snapshot_cmd();
1638	register_trigger_stacktrace_cmd();
1639	register_trigger_enable_disable_cmds();
1640	register_trigger_hist_enable_disable_cmds();
1641	register_trigger_hist_cmd();
1642
1643	return 0;
1644}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace_events_trigger - trace event triggers
   4 *
   5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
   6 */
   7
   8#include <linux/security.h>
   9#include <linux/module.h>
  10#include <linux/ctype.h>
  11#include <linux/mutex.h>
  12#include <linux/slab.h>
  13#include <linux/rculist.h>
  14
  15#include "trace.h"
  16
  17static LIST_HEAD(trigger_commands);
  18static DEFINE_MUTEX(trigger_cmd_mutex);
  19
  20void trigger_data_free(struct event_trigger_data *data)
  21{
  22	if (data->cmd_ops->set_filter)
  23		data->cmd_ops->set_filter(NULL, data, NULL);
  24
  25	/* make sure current triggers exit before free */
  26	tracepoint_synchronize_unregister();
  27
  28	kfree(data);
  29}
  30
  31/**
  32 * event_triggers_call - Call triggers associated with a trace event
  33 * @file: The trace_event_file associated with the event
  34 * @buffer: The ring buffer that the event is being written to
  35 * @rec: The trace entry for the event, NULL for unconditional invocation
  36 * @event: The event meta data in the ring buffer
  37 *
  38 * For each trigger associated with an event, invoke the trigger
  39 * function registered with the associated trigger command.  If rec is
  40 * non-NULL, it means that the trigger requires further processing and
  41 * shouldn't be unconditionally invoked.  If rec is non-NULL and the
  42 * trigger has a filter associated with it, rec will checked against
  43 * the filter and if the record matches the trigger will be invoked.
  44 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  45 * in any case until the current event is written, the trigger
  46 * function isn't invoked but the bit associated with the deferred
  47 * trigger is set in the return value.
  48 *
  49 * Returns an enum event_trigger_type value containing a set bit for
  50 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  51 *
  52 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  53 *
  54 * Return: an enum event_trigger_type value containing a set bit for
  55 * any trigger that should be deferred, ETT_NONE if nothing to defer.
  56 */
  57enum event_trigger_type
  58event_triggers_call(struct trace_event_file *file,
  59		    struct trace_buffer *buffer, void *rec,
  60		    struct ring_buffer_event *event)
  61{
  62	struct event_trigger_data *data;
  63	enum event_trigger_type tt = ETT_NONE;
  64	struct event_filter *filter;
  65
  66	if (list_empty(&file->triggers))
  67		return tt;
  68
  69	list_for_each_entry_rcu(data, &file->triggers, list) {
  70		if (data->paused)
  71			continue;
  72		if (!rec) {
  73			data->ops->trigger(data, buffer, rec, event);
  74			continue;
  75		}
  76		filter = rcu_dereference_sched(data->filter);
  77		if (filter && !filter_match_preds(filter, rec))
  78			continue;
  79		if (event_command_post_trigger(data->cmd_ops)) {
  80			tt |= data->cmd_ops->trigger_type;
  81			continue;
  82		}
  83		data->ops->trigger(data, buffer, rec, event);
  84	}
  85	return tt;
  86}
  87EXPORT_SYMBOL_GPL(event_triggers_call);
  88
  89bool __trace_trigger_soft_disabled(struct trace_event_file *file)
  90{
  91	unsigned long eflags = file->flags;
  92
  93	if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
  94		event_triggers_call(file, NULL, NULL, NULL);
  95	if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
  96		return true;
  97	if (eflags & EVENT_FILE_FL_PID_FILTER)
  98		return trace_event_ignore_this_pid(file);
  99	return false;
 100}
 101EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
 102
 103/**
 104 * event_triggers_post_call - Call 'post_triggers' for a trace event
 105 * @file: The trace_event_file associated with the event
 106 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
 107 *
 108 * For each trigger associated with an event, invoke the trigger
 109 * function registered with the associated trigger command, if the
 110 * corresponding bit is set in the tt enum passed into this function.
 111 * See @event_triggers_call for details on how those bits are set.
 112 *
 113 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
 114 */
 115void
 116event_triggers_post_call(struct trace_event_file *file,
 117			 enum event_trigger_type tt)
 118{
 119	struct event_trigger_data *data;
 120
 121	list_for_each_entry_rcu(data, &file->triggers, list) {
 122		if (data->paused)
 123			continue;
 124		if (data->cmd_ops->trigger_type & tt)
 125			data->ops->trigger(data, NULL, NULL, NULL);
 126	}
 127}
 128EXPORT_SYMBOL_GPL(event_triggers_post_call);
 129
 130#define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
 131
 132static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 133{
 134	struct trace_event_file *event_file = event_file_data(m->private);
 135
 136	if (t == SHOW_AVAILABLE_TRIGGERS) {
 137		(*pos)++;
 138		return NULL;
 139	}
 140	return seq_list_next(t, &event_file->triggers, pos);
 141}
 142
 143static bool check_user_trigger(struct trace_event_file *file)
 144{
 145	struct event_trigger_data *data;
 146
 147	list_for_each_entry_rcu(data, &file->triggers, list,
 148				lockdep_is_held(&event_mutex)) {
 149		if (data->flags & EVENT_TRIGGER_FL_PROBE)
 150			continue;
 151		return true;
 152	}
 153	return false;
 154}
 155
 156static void *trigger_start(struct seq_file *m, loff_t *pos)
 157{
 158	struct trace_event_file *event_file;
 159
 160	/* ->stop() is called even if ->start() fails */
 161	mutex_lock(&event_mutex);
 162	event_file = event_file_file(m->private);
 163	if (unlikely(!event_file))
 164		return ERR_PTR(-ENODEV);
 165
 166	if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
 167		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
 168
 169	return seq_list_start(&event_file->triggers, *pos);
 170}
 171
 172static void trigger_stop(struct seq_file *m, void *t)
 173{
 174	mutex_unlock(&event_mutex);
 175}
 176
 177static int trigger_show(struct seq_file *m, void *v)
 178{
 179	struct event_trigger_data *data;
 180	struct event_command *p;
 181
 182	if (v == SHOW_AVAILABLE_TRIGGERS) {
 183		seq_puts(m, "# Available triggers:\n");
 184		seq_putc(m, '#');
 185		mutex_lock(&trigger_cmd_mutex);
 186		list_for_each_entry_reverse(p, &trigger_commands, list)
 187			seq_printf(m, " %s", p->name);
 188		seq_putc(m, '\n');
 189		mutex_unlock(&trigger_cmd_mutex);
 190		return 0;
 191	}
 192
 193	data = list_entry(v, struct event_trigger_data, list);
 194	data->ops->print(m, data);
 195
 196	return 0;
 197}
 198
 199static const struct seq_operations event_triggers_seq_ops = {
 200	.start = trigger_start,
 201	.next = trigger_next,
 202	.stop = trigger_stop,
 203	.show = trigger_show,
 204};
 205
 206static int event_trigger_regex_open(struct inode *inode, struct file *file)
 207{
 208	int ret;
 209
 210	ret = security_locked_down(LOCKDOWN_TRACEFS);
 211	if (ret)
 212		return ret;
 213
 214	mutex_lock(&event_mutex);
 215
 216	if (unlikely(!event_file_file(file))) {
 217		mutex_unlock(&event_mutex);
 218		return -ENODEV;
 219	}
 220
 221	if ((file->f_mode & FMODE_WRITE) &&
 222	    (file->f_flags & O_TRUNC)) {
 223		struct trace_event_file *event_file;
 224		struct event_command *p;
 225
 226		event_file = event_file_data(file);
 227
 228		list_for_each_entry(p, &trigger_commands, list) {
 229			if (p->unreg_all)
 230				p->unreg_all(event_file);
 231		}
 232	}
 233
 234	if (file->f_mode & FMODE_READ) {
 235		ret = seq_open(file, &event_triggers_seq_ops);
 236		if (!ret) {
 237			struct seq_file *m = file->private_data;
 238			m->private = file;
 239		}
 240	}
 241
 242	mutex_unlock(&event_mutex);
 243
 244	return ret;
 245}
 246
 247int trigger_process_regex(struct trace_event_file *file, char *buff)
 248{
 249	char *command, *next;
 250	struct event_command *p;
 251	int ret = -EINVAL;
 252
 253	next = buff = skip_spaces(buff);
 254	command = strsep(&next, ": \t");
 255	if (next) {
 256		next = skip_spaces(next);
 257		if (!*next)
 258			next = NULL;
 259	}
 260	command = (command[0] != '!') ? command : command + 1;
 261
 262	mutex_lock(&trigger_cmd_mutex);
 263	list_for_each_entry(p, &trigger_commands, list) {
 264		if (strcmp(p->name, command) == 0) {
 265			ret = p->parse(p, file, buff, command, next);
 266			goto out_unlock;
 267		}
 268	}
 269 out_unlock:
 270	mutex_unlock(&trigger_cmd_mutex);
 271
 272	return ret;
 273}
 274
 275static ssize_t event_trigger_regex_write(struct file *file,
 276					 const char __user *ubuf,
 277					 size_t cnt, loff_t *ppos)
 278{
 279	struct trace_event_file *event_file;
 280	ssize_t ret;
 281	char *buf;
 282
 283	if (!cnt)
 284		return 0;
 285
 286	if (cnt >= PAGE_SIZE)
 287		return -EINVAL;
 288
 289	buf = memdup_user_nul(ubuf, cnt);
 290	if (IS_ERR(buf))
 291		return PTR_ERR(buf);
 292
 293	strim(buf);
 294
 295	mutex_lock(&event_mutex);
 296	event_file = event_file_file(file);
 297	if (unlikely(!event_file)) {
 298		mutex_unlock(&event_mutex);
 299		kfree(buf);
 300		return -ENODEV;
 301	}
 302	ret = trigger_process_regex(event_file, buf);
 303	mutex_unlock(&event_mutex);
 304
 305	kfree(buf);
 306	if (ret < 0)
 307		goto out;
 308
 309	*ppos += cnt;
 310	ret = cnt;
 311 out:
 312	return ret;
 313}
 314
 315static int event_trigger_regex_release(struct inode *inode, struct file *file)
 316{
 317	mutex_lock(&event_mutex);
 318
 319	if (file->f_mode & FMODE_READ)
 320		seq_release(inode, file);
 321
 322	mutex_unlock(&event_mutex);
 323
 324	return 0;
 325}
 326
 327static ssize_t
 328event_trigger_write(struct file *filp, const char __user *ubuf,
 329		    size_t cnt, loff_t *ppos)
 330{
 331	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
 332}
 333
 334static int
 335event_trigger_open(struct inode *inode, struct file *filp)
 336{
 337	/* Checks for tracefs lockdown */
 338	return event_trigger_regex_open(inode, filp);
 339}
 340
 341static int
 342event_trigger_release(struct inode *inode, struct file *file)
 343{
 344	return event_trigger_regex_release(inode, file);
 345}
 346
 347const struct file_operations event_trigger_fops = {
 348	.open = event_trigger_open,
 349	.read = seq_read,
 350	.write = event_trigger_write,
 351	.llseek = tracing_lseek,
 352	.release = event_trigger_release,
 353};
 354
 355/*
 356 * Currently we only register event commands from __init, so mark this
 357 * __init too.
 358 */
 359__init int register_event_command(struct event_command *cmd)
 360{
 361	struct event_command *p;
 362	int ret = 0;
 363
 364	mutex_lock(&trigger_cmd_mutex);
 365	list_for_each_entry(p, &trigger_commands, list) {
 366		if (strcmp(cmd->name, p->name) == 0) {
 367			ret = -EBUSY;
 368			goto out_unlock;
 369		}
 370	}
 371	list_add(&cmd->list, &trigger_commands);
 372 out_unlock:
 373	mutex_unlock(&trigger_cmd_mutex);
 374
 375	return ret;
 376}
 377
 378/*
 379 * Currently we only unregister event commands from __init, so mark
 380 * this __init too.
 381 */
 382__init int unregister_event_command(struct event_command *cmd)
 383{
 384	struct event_command *p, *n;
 385	int ret = -ENODEV;
 386
 387	mutex_lock(&trigger_cmd_mutex);
 388	list_for_each_entry_safe(p, n, &trigger_commands, list) {
 389		if (strcmp(cmd->name, p->name) == 0) {
 390			ret = 0;
 391			list_del_init(&p->list);
 392			goto out_unlock;
 393		}
 394	}
 395 out_unlock:
 396	mutex_unlock(&trigger_cmd_mutex);
 397
 398	return ret;
 399}
 400
 401/**
 402 * event_trigger_print - Generic event_trigger_ops @print implementation
 403 * @name: The name of the event trigger
 404 * @m: The seq_file being printed to
 405 * @data: Trigger-specific data
 406 * @filter_str: filter_str to print, if present
 407 *
 408 * Common implementation for event triggers to print themselves.
 409 *
 410 * Usually wrapped by a function that simply sets the @name of the
 411 * trigger command and then invokes this.
 412 *
 413 * Return: 0 on success, errno otherwise
 414 */
 415static int
 416event_trigger_print(const char *name, struct seq_file *m,
 417		    void *data, char *filter_str)
 418{
 419	long count = (long)data;
 420
 421	seq_puts(m, name);
 422
 423	if (count == -1)
 424		seq_puts(m, ":unlimited");
 425	else
 426		seq_printf(m, ":count=%ld", count);
 427
 428	if (filter_str)
 429		seq_printf(m, " if %s\n", filter_str);
 430	else
 431		seq_putc(m, '\n');
 432
 433	return 0;
 434}
 435
 436/**
 437 * event_trigger_init - Generic event_trigger_ops @init implementation
 
 438 * @data: Trigger-specific data
 439 *
 440 * Common implementation of event trigger initialization.
 441 *
 442 * Usually used directly as the @init method in event trigger
 443 * implementations.
 444 *
 445 * Return: 0 on success, errno otherwise
 446 */
 447int event_trigger_init(struct event_trigger_data *data)
 
 448{
 449	data->ref++;
 450	return 0;
 451}
 452
 453/**
 454 * event_trigger_free - Generic event_trigger_ops @free implementation
 
 455 * @data: Trigger-specific data
 456 *
 457 * Common implementation of event trigger de-initialization.
 458 *
 459 * Usually used directly as the @free method in event trigger
 460 * implementations.
 461 */
 462static void
 463event_trigger_free(struct event_trigger_data *data)
 
 464{
 465	if (WARN_ON_ONCE(data->ref <= 0))
 466		return;
 467
 468	data->ref--;
 469	if (!data->ref)
 470		trigger_data_free(data);
 471}
 472
 473int trace_event_trigger_enable_disable(struct trace_event_file *file,
 474				       int trigger_enable)
 475{
 476	int ret = 0;
 477
 478	if (trigger_enable) {
 479		if (atomic_inc_return(&file->tm_ref) > 1)
 480			return ret;
 481		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 482		ret = trace_event_enable_disable(file, 1, 1);
 483	} else {
 484		if (atomic_dec_return(&file->tm_ref) > 0)
 485			return ret;
 486		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
 487		ret = trace_event_enable_disable(file, 0, 1);
 488	}
 489
 490	return ret;
 491}
 492
 493/**
 494 * clear_event_triggers - Clear all triggers associated with a trace array
 495 * @tr: The trace array to clear
 496 *
 497 * For each trigger, the triggering event has its tm_ref decremented
 498 * via trace_event_trigger_enable_disable(), and any associated event
 499 * (in the case of enable/disable_event triggers) will have its sm_ref
 500 * decremented via free()->trace_event_enable_disable().  That
 501 * combination effectively reverses the soft-mode/trigger state added
 502 * by trigger registration.
 503 *
 504 * Must be called with event_mutex held.
 505 */
 506void
 507clear_event_triggers(struct trace_array *tr)
 508{
 509	struct trace_event_file *file;
 510
 511	list_for_each_entry(file, &tr->events, list) {
 512		struct event_trigger_data *data, *n;
 513		list_for_each_entry_safe(data, n, &file->triggers, list) {
 514			trace_event_trigger_enable_disable(file, 0);
 515			list_del_rcu(&data->list);
 516			if (data->ops->free)
 517				data->ops->free(data);
 518		}
 519	}
 520}
 521
 522/**
 523 * update_cond_flag - Set or reset the TRIGGER_COND bit
 524 * @file: The trace_event_file associated with the event
 525 *
 526 * If an event has triggers and any of those triggers has a filter or
 527 * a post_trigger, trigger invocation needs to be deferred until after
 528 * the current event has logged its data, and the event should have
 529 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
 530 * cleared.
 531 */
 532void update_cond_flag(struct trace_event_file *file)
 533{
 534	struct event_trigger_data *data;
 535	bool set_cond = false;
 536
 537	lockdep_assert_held(&event_mutex);
 538
 539	list_for_each_entry(data, &file->triggers, list) {
 540		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
 541		    event_command_needs_rec(data->cmd_ops)) {
 542			set_cond = true;
 543			break;
 544		}
 545	}
 546
 547	if (set_cond)
 548		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 549	else
 550		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
 551}
 552
 553/**
 554 * register_trigger - Generic event_command @reg implementation
 555 * @glob: The raw string used to register the trigger
 
 556 * @data: Trigger-specific data to associate with the trigger
 557 * @file: The trace_event_file associated with the event
 558 *
 559 * Common implementation for event trigger registration.
 560 *
 561 * Usually used directly as the @reg method in event command
 562 * implementations.
 563 *
 564 * Return: 0 on success, errno otherwise
 565 */
 566static int register_trigger(char *glob,
 567			    struct event_trigger_data *data,
 568			    struct trace_event_file *file)
 569{
 570	struct event_trigger_data *test;
 571	int ret = 0;
 572
 573	lockdep_assert_held(&event_mutex);
 574
 575	list_for_each_entry(test, &file->triggers, list) {
 576		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
 577			ret = -EEXIST;
 578			goto out;
 579		}
 580	}
 581
 582	if (data->ops->init) {
 583		ret = data->ops->init(data);
 584		if (ret < 0)
 585			goto out;
 586	}
 587
 588	list_add_rcu(&data->list, &file->triggers);
 
 589
 590	update_cond_flag(file);
 591	ret = trace_event_trigger_enable_disable(file, 1);
 592	if (ret < 0) {
 593		list_del_rcu(&data->list);
 594		update_cond_flag(file);
 
 595	}
 596out:
 597	return ret;
 598}
 599
 600/*
 601 * True if the trigger was found and unregistered, else false.
 602 */
 603static bool try_unregister_trigger(char *glob,
 604				   struct event_trigger_data *test,
 605				   struct trace_event_file *file)
 606{
 607	struct event_trigger_data *data = NULL, *iter;
 608
 609	lockdep_assert_held(&event_mutex);
 610
 611	list_for_each_entry(iter, &file->triggers, list) {
 612		if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
 613			data = iter;
 614			list_del_rcu(&data->list);
 615			trace_event_trigger_enable_disable(file, 0);
 616			update_cond_flag(file);
 617			break;
 618		}
 619	}
 620
 621	if (data) {
 622		if (data->ops->free)
 623			data->ops->free(data);
 624
 625		return true;
 626	}
 627
 628	return false;
 629}
 630
 631/**
 632 * unregister_trigger - Generic event_command @unreg implementation
 633 * @glob: The raw string used to register the trigger
 
 634 * @test: Trigger-specific data used to find the trigger to remove
 635 * @file: The trace_event_file associated with the event
 636 *
 637 * Common implementation for event trigger unregistration.
 638 *
 639 * Usually used directly as the @unreg method in event command
 640 * implementations.
 641 */
 642static void unregister_trigger(char *glob,
 643			       struct event_trigger_data *test,
 644			       struct trace_event_file *file)
 645{
 646	try_unregister_trigger(glob, test, file);
 647}
 648
 649/*
 650 * Event trigger parsing helper functions.
 651 *
 652 * These functions help make it easier to write an event trigger
 653 * parsing function i.e. the struct event_command.parse() callback
 654 * function responsible for parsing and registering a trigger command
 655 * written to the 'trigger' file.
 656 *
 657 * A trigger command (or just 'trigger' for short) takes the form:
 658 *   [trigger] [if filter]
 659 *
 660 * The struct event_command.parse() callback (and other struct
 661 * event_command functions) refer to several components of a trigger
 662 * command.  Those same components are referenced by the event trigger
 663 * parsing helper functions defined below.  These components are:
 664 *
 665 *   cmd               - the trigger command name
 666 *   glob              - the trigger command name optionally prefaced with '!'
 667 *   param_and_filter  - text following cmd and ':'
 668 *   param             - text following cmd and ':' and stripped of filter
 669 *   filter            - the optional filter text following (and including) 'if'
 670 *
 671 * To illustrate the use of these componenents, here are some concrete
 672 * examples. For the following triggers:
 673 *
 674 *   echo 'traceon:5 if pid == 0' > trigger
 675 *     - 'traceon' is both cmd and glob
 676 *     - '5 if pid == 0' is the param_and_filter
 677 *     - '5' is the param
 678 *     - 'if pid == 0' is the filter
 679 *
 680 *   echo 'enable_event:sys:event:n' > trigger
 681 *     - 'enable_event' is both cmd and glob
 682 *     - 'sys:event:n' is the param_and_filter
 683 *     - 'sys:event:n' is the param
 684 *     - there is no filter
 685 *
 686 *   echo 'hist:keys=pid if prio > 50' > trigger
 687 *     - 'hist' is both cmd and glob
 688 *     - 'keys=pid if prio > 50' is the param_and_filter
 689 *     - 'keys=pid' is the param
 690 *     - 'if prio > 50' is the filter
 691 *
 692 *   echo '!enable_event:sys:event:n' > trigger
 693 *     - 'enable_event' the cmd
 694 *     - '!enable_event' is the glob
 695 *     - 'sys:event:n' is the param_and_filter
 696 *     - 'sys:event:n' is the param
 697 *     - there is no filter
 698 *
 699 *   echo 'traceoff' > trigger
 700 *     - 'traceoff' is both cmd and glob
 701 *     - there is no param_and_filter
 702 *     - there is no param
 703 *     - there is no filter
 704 *
 705 * There are a few different categories of event trigger covered by
 706 * these helpers:
 707 *
 708 *  - triggers that don't require a parameter e.g. traceon
 709 *  - triggers that do require a parameter e.g. enable_event and hist
 710 *  - triggers that though they may not require a param may support an
 711 *    optional 'n' param (n = number of times the trigger should fire)
 712 *    e.g.: traceon:5 or enable_event:sys:event:n
 713 *  - triggers that do not support an 'n' param e.g. hist
 714 *
 715 * These functions can be used or ignored as necessary - it all
 716 * depends on the complexity of the trigger, and the granularity of
 717 * the functions supported reflects the fact that some implementations
 718 * may need to customize certain aspects of their implementations and
 719 * won't need certain functions.  For instance, the hist trigger
 720 * implementation doesn't use event_trigger_separate_filter() because
 721 * it has special requirements for handling the filter.
 722 */
 723
 724/**
 725 * event_trigger_check_remove - check whether an event trigger specifies remove
 726 * @glob: The trigger command string, with optional remove(!) operator
 727 *
 728 * The event trigger callback implementations pass in 'glob' as a
 729 * parameter.  This is the command name either with or without a
 730 * remove(!)  operator.  This function simply parses the glob and
 731 * determines whether the command corresponds to a trigger removal or
 732 * a trigger addition.
 733 *
 734 * Return: true if this is a remove command, false otherwise
 735 */
 736bool event_trigger_check_remove(const char *glob)
 737{
 738	return (glob && glob[0] == '!') ? true : false;
 739}
 740
 741/**
 742 * event_trigger_empty_param - check whether the param is empty
 743 * @param: The trigger param string
 
 
 
 
 744 *
 745 * The event trigger callback implementations pass in 'param' as a
 746 * parameter.  This corresponds to the string following the command
 747 * name minus the command name.  This function can be called by a
 748 * callback implementation for any command that requires a param; a
 749 * callback that doesn't require a param can ignore it.
 750 *
 751 * Return: true if this is an empty param, false otherwise
 752 */
 753bool event_trigger_empty_param(const char *param)
 754{
 755	return !param;
 756}
 757
 758/**
 759 * event_trigger_separate_filter - separate an event trigger from a filter
 760 * @param_and_filter: String containing trigger and possibly filter
 761 * @param: outparam, will be filled with a pointer to the trigger
 762 * @filter: outparam, will be filled with a pointer to the filter
 763 * @param_required: Specifies whether or not the param string is required
 764 *
 765 * Given a param string of the form '[trigger] [if filter]', this
 766 * function separates the filter from the trigger and returns the
 767 * trigger in @param and the filter in @filter.  Either the @param
 768 * or the @filter may be set to NULL by this function - if not set to
 769 * NULL, they will contain strings corresponding to the trigger and
 770 * filter.
 771 *
 772 * There are two cases that need to be handled with respect to the
 773 * passed-in param: either the param is required, or it is not
 774 * required.  If @param_required is set, and there's no param, it will
 775 * return -EINVAL.  If @param_required is not set and there's a param
 776 * that starts with a number, that corresponds to the case of a
 777 * trigger with :n (n = number of times the trigger should fire) and
 778 * the parsing continues normally; otherwise the function just returns
 779 * and assumes param just contains a filter and there's nothing else
 780 * to do.
 781 *
 782 * Return: 0 on success, errno otherwise
 783 */
 784int event_trigger_separate_filter(char *param_and_filter, char **param,
 785				  char **filter, bool param_required)
 786{
 787	int ret = 0;
 788
 789	*param = *filter = NULL;
 790
 791	if (!param_and_filter) {
 792		if (param_required)
 793			ret = -EINVAL;
 794		goto out;
 795	}
 796
 797	/*
 798	 * Here we check for an optional param. The only legal
 799	 * optional param is :n, and if that's the case, continue
 800	 * below. Otherwise we assume what's left is a filter and
 801	 * return it as the filter string for the caller to deal with.
 802	 */
 803	if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
 804		*filter = param_and_filter;
 805		goto out;
 806	}
 807
 808	/*
 809	 * Separate the param from the filter (param [if filter]).
 810	 * Here we have either an optional :n param or a required
 811	 * param and an optional filter.
 812	 */
 813	*param = strsep(&param_and_filter, " \t");
 814
 815	/*
 816	 * Here we have a filter, though it may be empty.
 817	 */
 818	if (param_and_filter) {
 819		*filter = skip_spaces(param_and_filter);
 820		if (!**filter)
 821			*filter = NULL;
 822	}
 823out:
 824	return ret;
 825}
 826
 827/**
 828 * event_trigger_alloc - allocate and init event_trigger_data for a trigger
 829 * @cmd_ops: The event_command operations for the trigger
 830 * @cmd: The cmd string
 831 * @param: The param string
 832 * @private_data: User data to associate with the event trigger
 833 *
 834 * Allocate an event_trigger_data instance and initialize it.  The
 835 * @cmd_ops are used along with the @cmd and @param to get the
 836 * trigger_ops to assign to the event_trigger_data.  @private_data can
 837 * also be passed in and associated with the event_trigger_data.
 838 *
 839 * Use event_trigger_free() to free an event_trigger_data object.
 840 *
 841 * Return: The trigger_data object success, NULL otherwise
 842 */
 843struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
 844					       char *cmd,
 845					       char *param,
 846					       void *private_data)
 847{
 848	struct event_trigger_data *trigger_data;
 849	struct event_trigger_ops *trigger_ops;
 
 
 
 
 
 
 
 850
 851	trigger_ops = cmd_ops->get_trigger_ops(cmd, param);
 852
 
 853	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
 854	if (!trigger_data)
 855		return NULL;
 856
 857	trigger_data->count = -1;
 858	trigger_data->ops = trigger_ops;
 859	trigger_data->cmd_ops = cmd_ops;
 860	trigger_data->private_data = private_data;
 861
 862	INIT_LIST_HEAD(&trigger_data->list);
 863	INIT_LIST_HEAD(&trigger_data->named_list);
 864	RCU_INIT_POINTER(trigger_data->filter, NULL);
 865
 866	return trigger_data;
 867}
 868
 869/**
 870 * event_trigger_parse_num - parse and return the number param for a trigger
 871 * @param: The param string
 872 * @trigger_data: The trigger_data for the trigger
 873 *
 874 * Parse the :n (n = number of times the trigger should fire) param
 875 * and set the count variable in the trigger_data to the parsed count.
 876 *
 877 * Return: 0 on success, errno otherwise
 878 */
 879int event_trigger_parse_num(char *param,
 880			    struct event_trigger_data *trigger_data)
 881{
 882	char *number;
 883	int ret = 0;
 884
 885	if (param) {
 886		number = strsep(&param, ":");
 887
 
 888		if (!strlen(number))
 889			return -EINVAL;
 890
 891		/*
 892		 * We use the callback data field (which is a pointer)
 893		 * as our counter.
 894		 */
 895		ret = kstrtoul(number, 0, &trigger_data->count);
 
 
 896	}
 897
 898	return ret;
 899}
 900
 901/**
 902 * event_trigger_set_filter - set an event trigger's filter
 903 * @cmd_ops: The event_command operations for the trigger
 904 * @file: The event file for the trigger's event
 905 * @param: The string containing the filter
 906 * @trigger_data: The trigger_data for the trigger
 907 *
 908 * Set the filter for the trigger.  If the filter is NULL, just return
 909 * without error.
 910 *
 911 * Return: 0 on success, errno otherwise
 912 */
 913int event_trigger_set_filter(struct event_command *cmd_ops,
 914			     struct trace_event_file *file,
 915			     char *param,
 916			     struct event_trigger_data *trigger_data)
 917{
 918	if (param && cmd_ops->set_filter)
 919		return cmd_ops->set_filter(param, trigger_data, file);
 920
 921	return 0;
 922}
 923
 924/**
 925 * event_trigger_reset_filter - reset an event trigger's filter
 926 * @cmd_ops: The event_command operations for the trigger
 927 * @trigger_data: The trigger_data for the trigger
 928 *
 929 * Reset the filter for the trigger to no filter.
 930 */
 931void event_trigger_reset_filter(struct event_command *cmd_ops,
 932				struct event_trigger_data *trigger_data)
 933{
 934	if (cmd_ops->set_filter)
 935		cmd_ops->set_filter(NULL, trigger_data, NULL);
 936}
 937
 938/**
 939 * event_trigger_register - register an event trigger
 940 * @cmd_ops: The event_command operations for the trigger
 941 * @file: The event file for the trigger's event
 942 * @glob: The trigger command string, with optional remove(!) operator
 943 * @trigger_data: The trigger_data for the trigger
 944 *
 945 * Register an event trigger.  The @cmd_ops are used to call the
 946 * cmd_ops->reg() function which actually does the registration.
 947 *
 948 * Return: 0 on success, errno otherwise
 949 */
 950int event_trigger_register(struct event_command *cmd_ops,
 951			   struct trace_event_file *file,
 952			   char *glob,
 953			   struct event_trigger_data *trigger_data)
 954{
 955	return cmd_ops->reg(glob, trigger_data, file);
 956}
 957
 958/**
 959 * event_trigger_unregister - unregister an event trigger
 960 * @cmd_ops: The event_command operations for the trigger
 961 * @file: The event file for the trigger's event
 962 * @glob: The trigger command string, with optional remove(!) operator
 963 * @trigger_data: The trigger_data for the trigger
 964 *
 965 * Unregister an event trigger.  The @cmd_ops are used to call the
 966 * cmd_ops->unreg() function which actually does the unregistration.
 967 */
 968void event_trigger_unregister(struct event_command *cmd_ops,
 969			      struct trace_event_file *file,
 970			      char *glob,
 971			      struct event_trigger_data *trigger_data)
 972{
 973	cmd_ops->unreg(glob, trigger_data, file);
 974}
 975
 976/*
 977 * End event trigger parsing helper functions.
 978 */
 979
 980/**
 981 * event_trigger_parse - Generic event_command @parse implementation
 982 * @cmd_ops: The command ops, used for trigger registration
 983 * @file: The trace_event_file associated with the event
 984 * @glob: The raw string used to register the trigger
 985 * @cmd: The cmd portion of the string used to register the trigger
 986 * @param_and_filter: The param and filter portion of the string used to register the trigger
 987 *
 988 * Common implementation for event command parsing and trigger
 989 * instantiation.
 990 *
 991 * Usually used directly as the @parse method in event command
 992 * implementations.
 993 *
 994 * Return: 0 on success, errno otherwise
 995 */
 996static int
 997event_trigger_parse(struct event_command *cmd_ops,
 998		    struct trace_event_file *file,
 999		    char *glob, char *cmd, char *param_and_filter)
1000{
1001	struct event_trigger_data *trigger_data;
1002	char *param, *filter;
1003	bool remove;
1004	int ret;
1005
1006	remove = event_trigger_check_remove(glob);
1007
1008	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, false);
1009	if (ret)
1010		return ret;
1011
1012	ret = -ENOMEM;
1013	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
1014	if (!trigger_data)
1015		goto out;
1016
1017	if (remove) {
1018		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1019		kfree(trigger_data);
1020		ret = 0;
1021		goto out;
1022	}
1023
1024	ret = event_trigger_parse_num(param, trigger_data);
1025	if (ret)
1026		goto out_free;
1027
1028	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1029	if (ret < 0)
1030		goto out_free;
1031
 
1032	/* Up the trigger_data count to make sure reg doesn't free it on failure */
1033	event_trigger_init(trigger_data);
1034
1035	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1036	if (ret)
1037		goto out_free;
 
 
 
 
 
 
 
1038
1039	/* Down the counter of trigger_data or free it if not used anymore */
1040	event_trigger_free(trigger_data);
1041 out:
1042	return ret;
1043
1044 out_free:
1045	event_trigger_reset_filter(cmd_ops, trigger_data);
 
1046	kfree(trigger_data);
1047	goto out;
1048}
1049
1050/**
1051 * set_trigger_filter - Generic event_command @set_filter implementation
1052 * @filter_str: The filter string for the trigger, NULL to remove filter
1053 * @trigger_data: Trigger-specific data
1054 * @file: The trace_event_file associated with the event
1055 *
1056 * Common implementation for event command filter parsing and filter
1057 * instantiation.
1058 *
1059 * Usually used directly as the @set_filter method in event command
1060 * implementations.
1061 *
1062 * Also used to remove a filter (if filter_str = NULL).
1063 *
1064 * Return: 0 on success, errno otherwise
1065 */
1066int set_trigger_filter(char *filter_str,
1067		       struct event_trigger_data *trigger_data,
1068		       struct trace_event_file *file)
1069{
1070	struct event_trigger_data *data = trigger_data;
1071	struct event_filter *filter = NULL, *tmp;
1072	int ret = -EINVAL;
1073	char *s;
1074
1075	if (!filter_str) /* clear the current filter */
1076		goto assign;
1077
1078	s = strsep(&filter_str, " \t");
1079
1080	if (!strlen(s) || strcmp(s, "if") != 0)
1081		goto out;
1082
1083	if (!filter_str)
1084		goto out;
1085
1086	/* The filter is for the 'trigger' event, not the triggered event */
1087	ret = create_event_filter(file->tr, file->event_call,
1088				  filter_str, true, &filter);
1089
1090	/* Only enabled set_str for error handling */
1091	if (filter) {
1092		kfree(filter->filter_string);
1093		filter->filter_string = NULL;
1094	}
1095
1096	/*
1097	 * If create_event_filter() fails, filter still needs to be freed.
1098	 * Which the calling code will do with data->filter.
1099	 */
1100 assign:
1101	tmp = rcu_access_pointer(data->filter);
1102
1103	rcu_assign_pointer(data->filter, filter);
1104
1105	if (tmp) {
1106		/*
1107		 * Make sure the call is done with the filter.
1108		 * It is possible that a filter could fail at boot up,
1109		 * and then this path will be called. Avoid the synchronization
1110		 * in that case.
1111		 */
1112		if (system_state != SYSTEM_BOOTING)
1113			tracepoint_synchronize_unregister();
1114		free_event_filter(tmp);
1115	}
1116
1117	kfree(data->filter_str);
1118	data->filter_str = NULL;
1119
1120	if (filter_str) {
1121		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
1122		if (!data->filter_str) {
1123			free_event_filter(rcu_access_pointer(data->filter));
1124			data->filter = NULL;
1125			ret = -ENOMEM;
1126		}
1127	}
1128 out:
1129	return ret;
1130}
1131
1132static LIST_HEAD(named_triggers);
1133
1134/**
1135 * find_named_trigger - Find the common named trigger associated with @name
1136 * @name: The name of the set of named triggers to find the common data for
1137 *
1138 * Named triggers are sets of triggers that share a common set of
1139 * trigger data.  The first named trigger registered with a given name
1140 * owns the common trigger data that the others subsequently
1141 * registered with the same name will reference.  This function
1142 * returns the common trigger data associated with that first
1143 * registered instance.
1144 *
1145 * Return: the common trigger data for the given named trigger on
1146 * success, NULL otherwise.
1147 */
1148struct event_trigger_data *find_named_trigger(const char *name)
1149{
1150	struct event_trigger_data *data;
1151
1152	if (!name)
1153		return NULL;
1154
1155	list_for_each_entry(data, &named_triggers, named_list) {
1156		if (data->named_data)
1157			continue;
1158		if (strcmp(data->name, name) == 0)
1159			return data;
1160	}
1161
1162	return NULL;
1163}
1164
1165/**
1166 * is_named_trigger - determine if a given trigger is a named trigger
1167 * @test: The trigger data to test
1168 *
1169 * Return: true if 'test' is a named trigger, false otherwise.
1170 */
1171bool is_named_trigger(struct event_trigger_data *test)
1172{
1173	struct event_trigger_data *data;
1174
1175	list_for_each_entry(data, &named_triggers, named_list) {
1176		if (test == data)
1177			return true;
1178	}
1179
1180	return false;
1181}
1182
1183/**
1184 * save_named_trigger - save the trigger in the named trigger list
1185 * @name: The name of the named trigger set
1186 * @data: The trigger data to save
1187 *
1188 * Return: 0 if successful, negative error otherwise.
1189 */
1190int save_named_trigger(const char *name, struct event_trigger_data *data)
1191{
1192	data->name = kstrdup(name, GFP_KERNEL);
1193	if (!data->name)
1194		return -ENOMEM;
1195
1196	list_add(&data->named_list, &named_triggers);
1197
1198	return 0;
1199}
1200
1201/**
1202 * del_named_trigger - delete a trigger from the named trigger list
1203 * @data: The trigger data to delete
1204 */
1205void del_named_trigger(struct event_trigger_data *data)
1206{
1207	kfree(data->name);
1208	data->name = NULL;
1209
1210	list_del(&data->named_list);
1211}
1212
1213static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
1214{
1215	struct event_trigger_data *test;
1216
1217	list_for_each_entry(test, &named_triggers, named_list) {
1218		if (strcmp(test->name, data->name) == 0) {
1219			if (pause) {
1220				test->paused_tmp = test->paused;
1221				test->paused = true;
1222			} else {
1223				test->paused = test->paused_tmp;
1224			}
1225		}
1226	}
1227}
1228
1229/**
1230 * pause_named_trigger - Pause all named triggers with the same name
1231 * @data: The trigger data of a named trigger to pause
1232 *
1233 * Pauses a named trigger along with all other triggers having the
1234 * same name.  Because named triggers share a common set of data,
1235 * pausing only one is meaningless, so pausing one named trigger needs
1236 * to pause all triggers with the same name.
1237 */
1238void pause_named_trigger(struct event_trigger_data *data)
1239{
1240	__pause_named_trigger(data, true);
1241}
1242
1243/**
1244 * unpause_named_trigger - Un-pause all named triggers with the same name
1245 * @data: The trigger data of a named trigger to unpause
1246 *
1247 * Un-pauses a named trigger along with all other triggers having the
1248 * same name.  Because named triggers share a common set of data,
1249 * unpausing only one is meaningless, so unpausing one named trigger
1250 * needs to unpause all triggers with the same name.
1251 */
1252void unpause_named_trigger(struct event_trigger_data *data)
1253{
1254	__pause_named_trigger(data, false);
1255}
1256
1257/**
1258 * set_named_trigger_data - Associate common named trigger data
1259 * @data: The trigger data to associate
1260 * @named_data: The common named trigger to be associated
1261 *
1262 * Named triggers are sets of triggers that share a common set of
1263 * trigger data.  The first named trigger registered with a given name
1264 * owns the common trigger data that the others subsequently
1265 * registered with the same name will reference.  This function
1266 * associates the common trigger data from the first trigger with the
1267 * given trigger.
1268 */
1269void set_named_trigger_data(struct event_trigger_data *data,
1270			    struct event_trigger_data *named_data)
1271{
1272	data->named_data = named_data;
1273}
1274
1275struct event_trigger_data *
1276get_named_trigger_data(struct event_trigger_data *data)
1277{
1278	return data->named_data;
1279}
1280
1281static void
1282traceon_trigger(struct event_trigger_data *data,
1283		struct trace_buffer *buffer, void *rec,
1284		struct ring_buffer_event *event)
1285{
1286	struct trace_event_file *file = data->private_data;
1287
1288	if (file) {
1289		if (tracer_tracing_is_on(file->tr))
1290			return;
1291
1292		tracer_tracing_on(file->tr);
1293		return;
1294	}
1295
1296	if (tracing_is_on())
1297		return;
1298
1299	tracing_on();
1300}
1301
1302static void
1303traceon_count_trigger(struct event_trigger_data *data,
1304		      struct trace_buffer *buffer, void *rec,
1305		      struct ring_buffer_event *event)
1306{
1307	struct trace_event_file *file = data->private_data;
1308
1309	if (file) {
1310		if (tracer_tracing_is_on(file->tr))
1311			return;
1312	} else {
1313		if (tracing_is_on())
1314			return;
1315	}
1316
1317	if (!data->count)
1318		return;
1319
1320	if (data->count != -1)
1321		(data->count)--;
1322
1323	if (file)
1324		tracer_tracing_on(file->tr);
1325	else
1326		tracing_on();
1327}
1328
1329static void
1330traceoff_trigger(struct event_trigger_data *data,
1331		 struct trace_buffer *buffer, void *rec,
1332		 struct ring_buffer_event *event)
1333{
1334	struct trace_event_file *file = data->private_data;
1335
1336	if (file) {
1337		if (!tracer_tracing_is_on(file->tr))
1338			return;
1339
1340		tracer_tracing_off(file->tr);
1341		return;
1342	}
1343
1344	if (!tracing_is_on())
1345		return;
1346
1347	tracing_off();
1348}
1349
1350static void
1351traceoff_count_trigger(struct event_trigger_data *data,
1352		       struct trace_buffer *buffer, void *rec,
1353		       struct ring_buffer_event *event)
1354{
1355	struct trace_event_file *file = data->private_data;
1356
1357	if (file) {
1358		if (!tracer_tracing_is_on(file->tr))
1359			return;
1360	} else {
1361		if (!tracing_is_on())
1362			return;
1363	}
1364
1365	if (!data->count)
1366		return;
1367
1368	if (data->count != -1)
1369		(data->count)--;
1370
1371	if (file)
1372		tracer_tracing_off(file->tr);
1373	else
1374		tracing_off();
1375}
1376
1377static int
1378traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1379{
1380	return event_trigger_print("traceon", m, (void *)data->count,
1381				   data->filter_str);
1382}
1383
1384static int
1385traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1386{
1387	return event_trigger_print("traceoff", m, (void *)data->count,
1388				   data->filter_str);
1389}
1390
1391static struct event_trigger_ops traceon_trigger_ops = {
1392	.trigger		= traceon_trigger,
1393	.print			= traceon_trigger_print,
1394	.init			= event_trigger_init,
1395	.free			= event_trigger_free,
1396};
1397
1398static struct event_trigger_ops traceon_count_trigger_ops = {
1399	.trigger		= traceon_count_trigger,
1400	.print			= traceon_trigger_print,
1401	.init			= event_trigger_init,
1402	.free			= event_trigger_free,
1403};
1404
1405static struct event_trigger_ops traceoff_trigger_ops = {
1406	.trigger		= traceoff_trigger,
1407	.print			= traceoff_trigger_print,
1408	.init			= event_trigger_init,
1409	.free			= event_trigger_free,
1410};
1411
1412static struct event_trigger_ops traceoff_count_trigger_ops = {
1413	.trigger		= traceoff_count_trigger,
1414	.print			= traceoff_trigger_print,
1415	.init			= event_trigger_init,
1416	.free			= event_trigger_free,
1417};
1418
1419static struct event_trigger_ops *
1420onoff_get_trigger_ops(char *cmd, char *param)
1421{
1422	struct event_trigger_ops *ops;
1423
1424	/* we register both traceon and traceoff to this callback */
1425	if (strcmp(cmd, "traceon") == 0)
1426		ops = param ? &traceon_count_trigger_ops :
1427			&traceon_trigger_ops;
1428	else
1429		ops = param ? &traceoff_count_trigger_ops :
1430			&traceoff_trigger_ops;
1431
1432	return ops;
1433}
1434
1435static struct event_command trigger_traceon_cmd = {
1436	.name			= "traceon",
1437	.trigger_type		= ETT_TRACE_ONOFF,
1438	.parse			= event_trigger_parse,
1439	.reg			= register_trigger,
1440	.unreg			= unregister_trigger,
1441	.get_trigger_ops	= onoff_get_trigger_ops,
1442	.set_filter		= set_trigger_filter,
1443};
1444
1445static struct event_command trigger_traceoff_cmd = {
1446	.name			= "traceoff",
1447	.trigger_type		= ETT_TRACE_ONOFF,
1448	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1449	.parse			= event_trigger_parse,
1450	.reg			= register_trigger,
1451	.unreg			= unregister_trigger,
1452	.get_trigger_ops	= onoff_get_trigger_ops,
1453	.set_filter		= set_trigger_filter,
1454};
1455
1456#ifdef CONFIG_TRACER_SNAPSHOT
1457static void
1458snapshot_trigger(struct event_trigger_data *data,
1459		 struct trace_buffer *buffer, void *rec,
1460		 struct ring_buffer_event *event)
1461{
1462	struct trace_event_file *file = data->private_data;
1463
1464	if (file)
1465		tracing_snapshot_instance(file->tr);
1466	else
1467		tracing_snapshot();
1468}
1469
1470static void
1471snapshot_count_trigger(struct event_trigger_data *data,
1472		       struct trace_buffer *buffer, void *rec,
1473		       struct ring_buffer_event *event)
1474{
1475	if (!data->count)
1476		return;
1477
1478	if (data->count != -1)
1479		(data->count)--;
1480
1481	snapshot_trigger(data, buffer, rec, event);
1482}
1483
1484static int
1485register_snapshot_trigger(char *glob,
1486			  struct event_trigger_data *data,
1487			  struct trace_event_file *file)
1488{
1489	int ret = tracing_arm_snapshot(file->tr);
1490
1491	if (ret < 0)
1492		return ret;
 
 
1493
1494	ret = register_trigger(glob, data, file);
1495	if (ret < 0)
1496		tracing_disarm_snapshot(file->tr);
1497	return ret;
1498}
1499
1500static void unregister_snapshot_trigger(char *glob,
1501					struct event_trigger_data *data,
1502					struct trace_event_file *file)
1503{
1504	if (try_unregister_trigger(glob, data, file))
1505		tracing_disarm_snapshot(file->tr);
1506}
1507
1508static int
1509snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1510{
1511	return event_trigger_print("snapshot", m, (void *)data->count,
1512				   data->filter_str);
1513}
1514
1515static struct event_trigger_ops snapshot_trigger_ops = {
1516	.trigger		= snapshot_trigger,
1517	.print			= snapshot_trigger_print,
1518	.init			= event_trigger_init,
1519	.free			= event_trigger_free,
1520};
1521
1522static struct event_trigger_ops snapshot_count_trigger_ops = {
1523	.trigger		= snapshot_count_trigger,
1524	.print			= snapshot_trigger_print,
1525	.init			= event_trigger_init,
1526	.free			= event_trigger_free,
1527};
1528
1529static struct event_trigger_ops *
1530snapshot_get_trigger_ops(char *cmd, char *param)
1531{
1532	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1533}
1534
1535static struct event_command trigger_snapshot_cmd = {
1536	.name			= "snapshot",
1537	.trigger_type		= ETT_SNAPSHOT,
1538	.parse			= event_trigger_parse,
1539	.reg			= register_snapshot_trigger,
1540	.unreg			= unregister_snapshot_trigger,
1541	.get_trigger_ops	= snapshot_get_trigger_ops,
1542	.set_filter		= set_trigger_filter,
1543};
1544
1545static __init int register_trigger_snapshot_cmd(void)
1546{
1547	int ret;
1548
1549	ret = register_event_command(&trigger_snapshot_cmd);
1550	WARN_ON(ret < 0);
1551
1552	return ret;
1553}
1554#else
1555static __init int register_trigger_snapshot_cmd(void) { return 0; }
1556#endif /* CONFIG_TRACER_SNAPSHOT */
1557
1558#ifdef CONFIG_STACKTRACE
1559#ifdef CONFIG_UNWINDER_ORC
1560/* Skip 2:
1561 *   event_triggers_post_call()
1562 *   trace_event_raw_event_xxx()
1563 */
1564# define STACK_SKIP 2
1565#else
1566/*
1567 * Skip 4:
1568 *   stacktrace_trigger()
1569 *   event_triggers_post_call()
1570 *   trace_event_buffer_commit()
1571 *   trace_event_raw_event_xxx()
1572 */
1573#define STACK_SKIP 4
1574#endif
1575
1576static void
1577stacktrace_trigger(struct event_trigger_data *data,
1578		   struct trace_buffer *buffer,  void *rec,
1579		   struct ring_buffer_event *event)
1580{
1581	struct trace_event_file *file = data->private_data;
1582
1583	if (file)
1584		__trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
1585	else
1586		trace_dump_stack(STACK_SKIP);
1587}
1588
1589static void
1590stacktrace_count_trigger(struct event_trigger_data *data,
1591			 struct trace_buffer *buffer, void *rec,
1592			 struct ring_buffer_event *event)
1593{
1594	if (!data->count)
1595		return;
1596
1597	if (data->count != -1)
1598		(data->count)--;
1599
1600	stacktrace_trigger(data, buffer, rec, event);
1601}
1602
1603static int
1604stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 
1605{
1606	return event_trigger_print("stacktrace", m, (void *)data->count,
1607				   data->filter_str);
1608}
1609
1610static struct event_trigger_ops stacktrace_trigger_ops = {
1611	.trigger		= stacktrace_trigger,
1612	.print			= stacktrace_trigger_print,
1613	.init			= event_trigger_init,
1614	.free			= event_trigger_free,
1615};
1616
1617static struct event_trigger_ops stacktrace_count_trigger_ops = {
1618	.trigger		= stacktrace_count_trigger,
1619	.print			= stacktrace_trigger_print,
1620	.init			= event_trigger_init,
1621	.free			= event_trigger_free,
1622};
1623
1624static struct event_trigger_ops *
1625stacktrace_get_trigger_ops(char *cmd, char *param)
1626{
1627	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1628}
1629
1630static struct event_command trigger_stacktrace_cmd = {
1631	.name			= "stacktrace",
1632	.trigger_type		= ETT_STACKTRACE,
1633	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1634	.parse			= event_trigger_parse,
1635	.reg			= register_trigger,
1636	.unreg			= unregister_trigger,
1637	.get_trigger_ops	= stacktrace_get_trigger_ops,
1638	.set_filter		= set_trigger_filter,
1639};
1640
1641static __init int register_trigger_stacktrace_cmd(void)
1642{
1643	int ret;
1644
1645	ret = register_event_command(&trigger_stacktrace_cmd);
1646	WARN_ON(ret < 0);
1647
1648	return ret;
1649}
1650#else
1651static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1652#endif /* CONFIG_STACKTRACE */
1653
1654static __init void unregister_trigger_traceon_traceoff_cmds(void)
1655{
1656	unregister_event_command(&trigger_traceon_cmd);
1657	unregister_event_command(&trigger_traceoff_cmd);
1658}
1659
1660static void
1661event_enable_trigger(struct event_trigger_data *data,
1662		     struct trace_buffer *buffer,  void *rec,
1663		     struct ring_buffer_event *event)
1664{
1665	struct enable_trigger_data *enable_data = data->private_data;
1666
1667	if (enable_data->enable)
1668		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1669	else
1670		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1671}
1672
1673static void
1674event_enable_count_trigger(struct event_trigger_data *data,
1675			   struct trace_buffer *buffer,  void *rec,
1676			   struct ring_buffer_event *event)
1677{
1678	struct enable_trigger_data *enable_data = data->private_data;
1679
1680	if (!data->count)
1681		return;
1682
1683	/* Skip if the event is in a state we want to switch to */
1684	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1685		return;
1686
1687	if (data->count != -1)
1688		(data->count)--;
1689
1690	event_enable_trigger(data, buffer, rec, event);
1691}
1692
1693int event_enable_trigger_print(struct seq_file *m,
 
1694			       struct event_trigger_data *data)
1695{
1696	struct enable_trigger_data *enable_data = data->private_data;
1697
1698	seq_printf(m, "%s:%s:%s",
1699		   enable_data->hist ?
1700		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1701		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1702		   enable_data->file->event_call->class->system,
1703		   trace_event_name(enable_data->file->event_call));
1704
1705	if (data->count == -1)
1706		seq_puts(m, ":unlimited");
1707	else
1708		seq_printf(m, ":count=%ld", data->count);
1709
1710	if (data->filter_str)
1711		seq_printf(m, " if %s\n", data->filter_str);
1712	else
1713		seq_putc(m, '\n');
1714
1715	return 0;
1716}
1717
1718void event_enable_trigger_free(struct event_trigger_data *data)
 
1719{
1720	struct enable_trigger_data *enable_data = data->private_data;
1721
1722	if (WARN_ON_ONCE(data->ref <= 0))
1723		return;
1724
1725	data->ref--;
1726	if (!data->ref) {
1727		/* Remove the SOFT_MODE flag */
1728		trace_event_enable_disable(enable_data->file, 0, 1);
1729		trace_event_put_ref(enable_data->file->event_call);
1730		trigger_data_free(data);
1731		kfree(enable_data);
1732	}
1733}
1734
1735static struct event_trigger_ops event_enable_trigger_ops = {
1736	.trigger		= event_enable_trigger,
1737	.print			= event_enable_trigger_print,
1738	.init			= event_trigger_init,
1739	.free			= event_enable_trigger_free,
1740};
1741
1742static struct event_trigger_ops event_enable_count_trigger_ops = {
1743	.trigger		= event_enable_count_trigger,
1744	.print			= event_enable_trigger_print,
1745	.init			= event_trigger_init,
1746	.free			= event_enable_trigger_free,
1747};
1748
1749static struct event_trigger_ops event_disable_trigger_ops = {
1750	.trigger		= event_enable_trigger,
1751	.print			= event_enable_trigger_print,
1752	.init			= event_trigger_init,
1753	.free			= event_enable_trigger_free,
1754};
1755
1756static struct event_trigger_ops event_disable_count_trigger_ops = {
1757	.trigger		= event_enable_count_trigger,
1758	.print			= event_enable_trigger_print,
1759	.init			= event_trigger_init,
1760	.free			= event_enable_trigger_free,
1761};
1762
1763int event_enable_trigger_parse(struct event_command *cmd_ops,
1764			       struct trace_event_file *file,
1765			       char *glob, char *cmd, char *param_and_filter)
1766{
1767	struct trace_event_file *event_enable_file;
1768	struct enable_trigger_data *enable_data;
1769	struct event_trigger_data *trigger_data;
 
1770	struct trace_array *tr = file->tr;
1771	char *param, *filter;
1772	bool enable, remove;
1773	const char *system;
1774	const char *event;
1775	bool hist = false;
 
 
 
1776	int ret;
1777
1778	remove = event_trigger_check_remove(glob);
 
1779
1780	if (event_trigger_empty_param(param_and_filter))
 
 
1781		return -EINVAL;
1782
1783	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, true);
1784	if (ret)
1785		return ret;
1786
1787	system = strsep(&param, ":");
1788	if (!param)
1789		return -EINVAL;
1790
1791	event = strsep(&param, ":");
1792
1793	ret = -EINVAL;
1794	event_enable_file = find_event_file(tr, system, event);
1795	if (!event_enable_file)
1796		goto out;
1797
1798#ifdef CONFIG_HIST_TRIGGERS
1799	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1800		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1801
1802	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1803		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1804#else
1805	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1806#endif
 
 
1807	ret = -ENOMEM;
 
 
 
1808
1809	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1810	if (!enable_data)
 
1811		goto out;
 
 
 
 
 
 
 
1812
1813	enable_data->hist = hist;
1814	enable_data->enable = enable;
1815	enable_data->file = event_enable_file;
 
1816
1817	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
1818	if (!trigger_data) {
1819		kfree(enable_data);
1820		goto out;
1821	}
1822
1823	if (remove) {
1824		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1825		kfree(trigger_data);
1826		kfree(enable_data);
1827		ret = 0;
1828		goto out;
1829	}
1830
1831	/* Up the trigger_data count to make sure nothing frees it on failure */
1832	event_trigger_init(trigger_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1833
1834	ret = event_trigger_parse_num(param, trigger_data);
1835	if (ret)
1836		goto out_free;
 
 
1837
1838	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1839	if (ret < 0)
1840		goto out_free;
1841
 
1842	/* Don't let event modules unload while probe registered */
1843	ret = trace_event_try_get_ref(event_enable_file->event_call);
1844	if (!ret) {
1845		ret = -EBUSY;
1846		goto out_free;
1847	}
1848
1849	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1850	if (ret < 0)
1851		goto out_put;
1852
1853	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1854	if (ret)
 
 
 
 
 
 
 
1855		goto out_disable;
1856
1857	event_trigger_free(trigger_data);
 
1858 out:
1859	return ret;
 
1860 out_disable:
1861	trace_event_enable_disable(event_enable_file, 0, 1);
1862 out_put:
1863	trace_event_put_ref(event_enable_file->event_call);
1864 out_free:
1865	event_trigger_reset_filter(cmd_ops, trigger_data);
1866	event_trigger_free(trigger_data);
 
1867	kfree(enable_data);
1868
1869	goto out;
1870}
1871
1872int event_enable_register_trigger(char *glob,
 
1873				  struct event_trigger_data *data,
1874				  struct trace_event_file *file)
1875{
1876	struct enable_trigger_data *enable_data = data->private_data;
1877	struct enable_trigger_data *test_enable_data;
1878	struct event_trigger_data *test;
1879	int ret = 0;
1880
1881	lockdep_assert_held(&event_mutex);
1882
1883	list_for_each_entry(test, &file->triggers, list) {
1884		test_enable_data = test->private_data;
1885		if (test_enable_data &&
1886		    (test->cmd_ops->trigger_type ==
1887		     data->cmd_ops->trigger_type) &&
1888		    (test_enable_data->file == enable_data->file)) {
1889			ret = -EEXIST;
1890			goto out;
1891		}
1892	}
1893
1894	if (data->ops->init) {
1895		ret = data->ops->init(data);
1896		if (ret < 0)
1897			goto out;
1898	}
1899
1900	list_add_rcu(&data->list, &file->triggers);
 
1901
1902	update_cond_flag(file);
1903	ret = trace_event_trigger_enable_disable(file, 1);
1904	if (ret < 0) {
1905		list_del_rcu(&data->list);
1906		update_cond_flag(file);
 
1907	}
1908out:
1909	return ret;
1910}
1911
1912void event_enable_unregister_trigger(char *glob,
 
1913				     struct event_trigger_data *test,
1914				     struct trace_event_file *file)
1915{
1916	struct enable_trigger_data *test_enable_data = test->private_data;
1917	struct event_trigger_data *data = NULL, *iter;
1918	struct enable_trigger_data *enable_data;
 
 
1919
1920	lockdep_assert_held(&event_mutex);
1921
1922	list_for_each_entry(iter, &file->triggers, list) {
1923		enable_data = iter->private_data;
1924		if (enable_data &&
1925		    (iter->cmd_ops->trigger_type ==
1926		     test->cmd_ops->trigger_type) &&
1927		    (enable_data->file == test_enable_data->file)) {
1928			data = iter;
1929			list_del_rcu(&data->list);
1930			trace_event_trigger_enable_disable(file, 0);
1931			update_cond_flag(file);
1932			break;
1933		}
1934	}
1935
1936	if (data && data->ops->free)
1937		data->ops->free(data);
1938}
1939
1940static struct event_trigger_ops *
1941event_enable_get_trigger_ops(char *cmd, char *param)
1942{
1943	struct event_trigger_ops *ops;
1944	bool enable;
1945
1946#ifdef CONFIG_HIST_TRIGGERS
1947	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1948		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1949#else
1950	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1951#endif
1952	if (enable)
1953		ops = param ? &event_enable_count_trigger_ops :
1954			&event_enable_trigger_ops;
1955	else
1956		ops = param ? &event_disable_count_trigger_ops :
1957			&event_disable_trigger_ops;
1958
1959	return ops;
1960}
1961
1962static struct event_command trigger_enable_cmd = {
1963	.name			= ENABLE_EVENT_STR,
1964	.trigger_type		= ETT_EVENT_ENABLE,
1965	.parse			= event_enable_trigger_parse,
1966	.reg			= event_enable_register_trigger,
1967	.unreg			= event_enable_unregister_trigger,
1968	.get_trigger_ops	= event_enable_get_trigger_ops,
1969	.set_filter		= set_trigger_filter,
1970};
1971
1972static struct event_command trigger_disable_cmd = {
1973	.name			= DISABLE_EVENT_STR,
1974	.trigger_type		= ETT_EVENT_ENABLE,
1975	.parse			= event_enable_trigger_parse,
1976	.reg			= event_enable_register_trigger,
1977	.unreg			= event_enable_unregister_trigger,
1978	.get_trigger_ops	= event_enable_get_trigger_ops,
1979	.set_filter		= set_trigger_filter,
1980};
1981
1982static __init void unregister_trigger_enable_disable_cmds(void)
1983{
1984	unregister_event_command(&trigger_enable_cmd);
1985	unregister_event_command(&trigger_disable_cmd);
1986}
1987
1988static __init int register_trigger_enable_disable_cmds(void)
1989{
1990	int ret;
1991
1992	ret = register_event_command(&trigger_enable_cmd);
1993	if (WARN_ON(ret < 0))
1994		return ret;
1995	ret = register_event_command(&trigger_disable_cmd);
1996	if (WARN_ON(ret < 0))
1997		unregister_trigger_enable_disable_cmds();
1998
1999	return ret;
2000}
2001
2002static __init int register_trigger_traceon_traceoff_cmds(void)
2003{
2004	int ret;
2005
2006	ret = register_event_command(&trigger_traceon_cmd);
2007	if (WARN_ON(ret < 0))
2008		return ret;
2009	ret = register_event_command(&trigger_traceoff_cmd);
2010	if (WARN_ON(ret < 0))
2011		unregister_trigger_traceon_traceoff_cmds();
2012
2013	return ret;
2014}
2015
2016__init int register_trigger_cmds(void)
2017{
2018	register_trigger_traceon_traceoff_cmds();
2019	register_trigger_snapshot_cmd();
2020	register_trigger_stacktrace_cmd();
2021	register_trigger_enable_disable_cmds();
2022	register_trigger_hist_enable_disable_cmds();
2023	register_trigger_hist_cmd();
2024
2025	return 0;
2026}