Loading...
1/*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25
26#include "trace.h"
27
28static LIST_HEAD(trigger_commands);
29static DEFINE_MUTEX(trigger_cmd_mutex);
30
31void trigger_data_free(struct event_trigger_data *data)
32{
33 if (data->cmd_ops->set_filter)
34 data->cmd_ops->set_filter(NULL, data, NULL);
35
36 synchronize_sched(); /* make sure current triggers exit before free */
37 kfree(data);
38}
39
40/**
41 * event_triggers_call - Call triggers associated with a trace event
42 * @file: The trace_event_file associated with the event
43 * @rec: The trace entry for the event, NULL for unconditional invocation
44 *
45 * For each trigger associated with an event, invoke the trigger
46 * function registered with the associated trigger command. If rec is
47 * non-NULL, it means that the trigger requires further processing and
48 * shouldn't be unconditionally invoked. If rec is non-NULL and the
49 * trigger has a filter associated with it, rec will checked against
50 * the filter and if the record matches the trigger will be invoked.
51 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
52 * in any case until the current event is written, the trigger
53 * function isn't invoked but the bit associated with the deferred
54 * trigger is set in the return value.
55 *
56 * Returns an enum event_trigger_type value containing a set bit for
57 * any trigger that should be deferred, ETT_NONE if nothing to defer.
58 *
59 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
60 *
61 * Return: an enum event_trigger_type value containing a set bit for
62 * any trigger that should be deferred, ETT_NONE if nothing to defer.
63 */
64enum event_trigger_type
65event_triggers_call(struct trace_event_file *file, void *rec)
66{
67 struct event_trigger_data *data;
68 enum event_trigger_type tt = ETT_NONE;
69 struct event_filter *filter;
70
71 if (list_empty(&file->triggers))
72 return tt;
73
74 list_for_each_entry_rcu(data, &file->triggers, list) {
75 if (data->paused)
76 continue;
77 if (!rec) {
78 data->ops->func(data, rec);
79 continue;
80 }
81 filter = rcu_dereference_sched(data->filter);
82 if (filter && !filter_match_preds(filter, rec))
83 continue;
84 if (event_command_post_trigger(data->cmd_ops)) {
85 tt |= data->cmd_ops->trigger_type;
86 continue;
87 }
88 data->ops->func(data, rec);
89 }
90 return tt;
91}
92EXPORT_SYMBOL_GPL(event_triggers_call);
93
94/**
95 * event_triggers_post_call - Call 'post_triggers' for a trace event
96 * @file: The trace_event_file associated with the event
97 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
98 * @rec: The trace entry for the event
99 *
100 * For each trigger associated with an event, invoke the trigger
101 * function registered with the associated trigger command, if the
102 * corresponding bit is set in the tt enum passed into this function.
103 * See @event_triggers_call for details on how those bits are set.
104 *
105 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
106 */
107void
108event_triggers_post_call(struct trace_event_file *file,
109 enum event_trigger_type tt,
110 void *rec)
111{
112 struct event_trigger_data *data;
113
114 list_for_each_entry_rcu(data, &file->triggers, list) {
115 if (data->paused)
116 continue;
117 if (data->cmd_ops->trigger_type & tt)
118 data->ops->func(data, rec);
119 }
120}
121EXPORT_SYMBOL_GPL(event_triggers_post_call);
122
123#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
124
125static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
126{
127 struct trace_event_file *event_file = event_file_data(m->private);
128
129 if (t == SHOW_AVAILABLE_TRIGGERS)
130 return NULL;
131
132 return seq_list_next(t, &event_file->triggers, pos);
133}
134
135static void *trigger_start(struct seq_file *m, loff_t *pos)
136{
137 struct trace_event_file *event_file;
138
139 /* ->stop() is called even if ->start() fails */
140 mutex_lock(&event_mutex);
141 event_file = event_file_data(m->private);
142 if (unlikely(!event_file))
143 return ERR_PTR(-ENODEV);
144
145 if (list_empty(&event_file->triggers))
146 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
147
148 return seq_list_start(&event_file->triggers, *pos);
149}
150
151static void trigger_stop(struct seq_file *m, void *t)
152{
153 mutex_unlock(&event_mutex);
154}
155
156static int trigger_show(struct seq_file *m, void *v)
157{
158 struct event_trigger_data *data;
159 struct event_command *p;
160
161 if (v == SHOW_AVAILABLE_TRIGGERS) {
162 seq_puts(m, "# Available triggers:\n");
163 seq_putc(m, '#');
164 mutex_lock(&trigger_cmd_mutex);
165 list_for_each_entry_reverse(p, &trigger_commands, list)
166 seq_printf(m, " %s", p->name);
167 seq_putc(m, '\n');
168 mutex_unlock(&trigger_cmd_mutex);
169 return 0;
170 }
171
172 data = list_entry(v, struct event_trigger_data, list);
173 data->ops->print(m, data->ops, data);
174
175 return 0;
176}
177
178static const struct seq_operations event_triggers_seq_ops = {
179 .start = trigger_start,
180 .next = trigger_next,
181 .stop = trigger_stop,
182 .show = trigger_show,
183};
184
185static int event_trigger_regex_open(struct inode *inode, struct file *file)
186{
187 int ret = 0;
188
189 mutex_lock(&event_mutex);
190
191 if (unlikely(!event_file_data(file))) {
192 mutex_unlock(&event_mutex);
193 return -ENODEV;
194 }
195
196 if ((file->f_mode & FMODE_WRITE) &&
197 (file->f_flags & O_TRUNC)) {
198 struct trace_event_file *event_file;
199 struct event_command *p;
200
201 event_file = event_file_data(file);
202
203 list_for_each_entry(p, &trigger_commands, list) {
204 if (p->unreg_all)
205 p->unreg_all(event_file);
206 }
207 }
208
209 if (file->f_mode & FMODE_READ) {
210 ret = seq_open(file, &event_triggers_seq_ops);
211 if (!ret) {
212 struct seq_file *m = file->private_data;
213 m->private = file;
214 }
215 }
216
217 mutex_unlock(&event_mutex);
218
219 return ret;
220}
221
222static int trigger_process_regex(struct trace_event_file *file, char *buff)
223{
224 char *command, *next = buff;
225 struct event_command *p;
226 int ret = -EINVAL;
227
228 command = strsep(&next, ": \t");
229 command = (command[0] != '!') ? command : command + 1;
230
231 mutex_lock(&trigger_cmd_mutex);
232 list_for_each_entry(p, &trigger_commands, list) {
233 if (strcmp(p->name, command) == 0) {
234 ret = p->func(p, file, buff, command, next);
235 goto out_unlock;
236 }
237 }
238 out_unlock:
239 mutex_unlock(&trigger_cmd_mutex);
240
241 return ret;
242}
243
244static ssize_t event_trigger_regex_write(struct file *file,
245 const char __user *ubuf,
246 size_t cnt, loff_t *ppos)
247{
248 struct trace_event_file *event_file;
249 ssize_t ret;
250 char *buf;
251
252 if (!cnt)
253 return 0;
254
255 if (cnt >= PAGE_SIZE)
256 return -EINVAL;
257
258 buf = memdup_user_nul(ubuf, cnt);
259 if (IS_ERR(buf))
260 return PTR_ERR(buf);
261
262 strim(buf);
263
264 mutex_lock(&event_mutex);
265 event_file = event_file_data(file);
266 if (unlikely(!event_file)) {
267 mutex_unlock(&event_mutex);
268 kfree(buf);
269 return -ENODEV;
270 }
271 ret = trigger_process_regex(event_file, buf);
272 mutex_unlock(&event_mutex);
273
274 kfree(buf);
275 if (ret < 0)
276 goto out;
277
278 *ppos += cnt;
279 ret = cnt;
280 out:
281 return ret;
282}
283
284static int event_trigger_regex_release(struct inode *inode, struct file *file)
285{
286 mutex_lock(&event_mutex);
287
288 if (file->f_mode & FMODE_READ)
289 seq_release(inode, file);
290
291 mutex_unlock(&event_mutex);
292
293 return 0;
294}
295
296static ssize_t
297event_trigger_write(struct file *filp, const char __user *ubuf,
298 size_t cnt, loff_t *ppos)
299{
300 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
301}
302
303static int
304event_trigger_open(struct inode *inode, struct file *filp)
305{
306 return event_trigger_regex_open(inode, filp);
307}
308
309static int
310event_trigger_release(struct inode *inode, struct file *file)
311{
312 return event_trigger_regex_release(inode, file);
313}
314
315const struct file_operations event_trigger_fops = {
316 .open = event_trigger_open,
317 .read = seq_read,
318 .write = event_trigger_write,
319 .llseek = tracing_lseek,
320 .release = event_trigger_release,
321};
322
323/*
324 * Currently we only register event commands from __init, so mark this
325 * __init too.
326 */
327__init int register_event_command(struct event_command *cmd)
328{
329 struct event_command *p;
330 int ret = 0;
331
332 mutex_lock(&trigger_cmd_mutex);
333 list_for_each_entry(p, &trigger_commands, list) {
334 if (strcmp(cmd->name, p->name) == 0) {
335 ret = -EBUSY;
336 goto out_unlock;
337 }
338 }
339 list_add(&cmd->list, &trigger_commands);
340 out_unlock:
341 mutex_unlock(&trigger_cmd_mutex);
342
343 return ret;
344}
345
346/*
347 * Currently we only unregister event commands from __init, so mark
348 * this __init too.
349 */
350__init int unregister_event_command(struct event_command *cmd)
351{
352 struct event_command *p, *n;
353 int ret = -ENODEV;
354
355 mutex_lock(&trigger_cmd_mutex);
356 list_for_each_entry_safe(p, n, &trigger_commands, list) {
357 if (strcmp(cmd->name, p->name) == 0) {
358 ret = 0;
359 list_del_init(&p->list);
360 goto out_unlock;
361 }
362 }
363 out_unlock:
364 mutex_unlock(&trigger_cmd_mutex);
365
366 return ret;
367}
368
369/**
370 * event_trigger_print - Generic event_trigger_ops @print implementation
371 * @name: The name of the event trigger
372 * @m: The seq_file being printed to
373 * @data: Trigger-specific data
374 * @filter_str: filter_str to print, if present
375 *
376 * Common implementation for event triggers to print themselves.
377 *
378 * Usually wrapped by a function that simply sets the @name of the
379 * trigger command and then invokes this.
380 *
381 * Return: 0 on success, errno otherwise
382 */
383static int
384event_trigger_print(const char *name, struct seq_file *m,
385 void *data, char *filter_str)
386{
387 long count = (long)data;
388
389 seq_puts(m, name);
390
391 if (count == -1)
392 seq_puts(m, ":unlimited");
393 else
394 seq_printf(m, ":count=%ld", count);
395
396 if (filter_str)
397 seq_printf(m, " if %s\n", filter_str);
398 else
399 seq_putc(m, '\n');
400
401 return 0;
402}
403
404/**
405 * event_trigger_init - Generic event_trigger_ops @init implementation
406 * @ops: The trigger ops associated with the trigger
407 * @data: Trigger-specific data
408 *
409 * Common implementation of event trigger initialization.
410 *
411 * Usually used directly as the @init method in event trigger
412 * implementations.
413 *
414 * Return: 0 on success, errno otherwise
415 */
416int event_trigger_init(struct event_trigger_ops *ops,
417 struct event_trigger_data *data)
418{
419 data->ref++;
420 return 0;
421}
422
423/**
424 * event_trigger_free - Generic event_trigger_ops @free implementation
425 * @ops: The trigger ops associated with the trigger
426 * @data: Trigger-specific data
427 *
428 * Common implementation of event trigger de-initialization.
429 *
430 * Usually used directly as the @free method in event trigger
431 * implementations.
432 */
433static void
434event_trigger_free(struct event_trigger_ops *ops,
435 struct event_trigger_data *data)
436{
437 if (WARN_ON_ONCE(data->ref <= 0))
438 return;
439
440 data->ref--;
441 if (!data->ref)
442 trigger_data_free(data);
443}
444
445int trace_event_trigger_enable_disable(struct trace_event_file *file,
446 int trigger_enable)
447{
448 int ret = 0;
449
450 if (trigger_enable) {
451 if (atomic_inc_return(&file->tm_ref) > 1)
452 return ret;
453 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
454 ret = trace_event_enable_disable(file, 1, 1);
455 } else {
456 if (atomic_dec_return(&file->tm_ref) > 0)
457 return ret;
458 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
459 ret = trace_event_enable_disable(file, 0, 1);
460 }
461
462 return ret;
463}
464
465/**
466 * clear_event_triggers - Clear all triggers associated with a trace array
467 * @tr: The trace array to clear
468 *
469 * For each trigger, the triggering event has its tm_ref decremented
470 * via trace_event_trigger_enable_disable(), and any associated event
471 * (in the case of enable/disable_event triggers) will have its sm_ref
472 * decremented via free()->trace_event_enable_disable(). That
473 * combination effectively reverses the soft-mode/trigger state added
474 * by trigger registration.
475 *
476 * Must be called with event_mutex held.
477 */
478void
479clear_event_triggers(struct trace_array *tr)
480{
481 struct trace_event_file *file;
482
483 list_for_each_entry(file, &tr->events, list) {
484 struct event_trigger_data *data;
485 list_for_each_entry_rcu(data, &file->triggers, list) {
486 trace_event_trigger_enable_disable(file, 0);
487 if (data->ops->free)
488 data->ops->free(data->ops, data);
489 }
490 }
491}
492
493/**
494 * update_cond_flag - Set or reset the TRIGGER_COND bit
495 * @file: The trace_event_file associated with the event
496 *
497 * If an event has triggers and any of those triggers has a filter or
498 * a post_trigger, trigger invocation needs to be deferred until after
499 * the current event has logged its data, and the event should have
500 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
501 * cleared.
502 */
503void update_cond_flag(struct trace_event_file *file)
504{
505 struct event_trigger_data *data;
506 bool set_cond = false;
507
508 list_for_each_entry_rcu(data, &file->triggers, list) {
509 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
510 event_command_needs_rec(data->cmd_ops)) {
511 set_cond = true;
512 break;
513 }
514 }
515
516 if (set_cond)
517 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
518 else
519 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
520}
521
522/**
523 * register_trigger - Generic event_command @reg implementation
524 * @glob: The raw string used to register the trigger
525 * @ops: The trigger ops associated with the trigger
526 * @data: Trigger-specific data to associate with the trigger
527 * @file: The trace_event_file associated with the event
528 *
529 * Common implementation for event trigger registration.
530 *
531 * Usually used directly as the @reg method in event command
532 * implementations.
533 *
534 * Return: 0 on success, errno otherwise
535 */
536static int register_trigger(char *glob, struct event_trigger_ops *ops,
537 struct event_trigger_data *data,
538 struct trace_event_file *file)
539{
540 struct event_trigger_data *test;
541 int ret = 0;
542
543 list_for_each_entry_rcu(test, &file->triggers, list) {
544 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
545 ret = -EEXIST;
546 goto out;
547 }
548 }
549
550 if (data->ops->init) {
551 ret = data->ops->init(data->ops, data);
552 if (ret < 0)
553 goto out;
554 }
555
556 list_add_rcu(&data->list, &file->triggers);
557 ret++;
558
559 update_cond_flag(file);
560 if (trace_event_trigger_enable_disable(file, 1) < 0) {
561 list_del_rcu(&data->list);
562 update_cond_flag(file);
563 ret--;
564 }
565out:
566 return ret;
567}
568
569/**
570 * unregister_trigger - Generic event_command @unreg implementation
571 * @glob: The raw string used to register the trigger
572 * @ops: The trigger ops associated with the trigger
573 * @test: Trigger-specific data used to find the trigger to remove
574 * @file: The trace_event_file associated with the event
575 *
576 * Common implementation for event trigger unregistration.
577 *
578 * Usually used directly as the @unreg method in event command
579 * implementations.
580 */
581void unregister_trigger(char *glob, struct event_trigger_ops *ops,
582 struct event_trigger_data *test,
583 struct trace_event_file *file)
584{
585 struct event_trigger_data *data;
586 bool unregistered = false;
587
588 list_for_each_entry_rcu(data, &file->triggers, list) {
589 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
590 unregistered = true;
591 list_del_rcu(&data->list);
592 trace_event_trigger_enable_disable(file, 0);
593 update_cond_flag(file);
594 break;
595 }
596 }
597
598 if (unregistered && data->ops->free)
599 data->ops->free(data->ops, data);
600}
601
602/**
603 * event_trigger_callback - Generic event_command @func implementation
604 * @cmd_ops: The command ops, used for trigger registration
605 * @file: The trace_event_file associated with the event
606 * @glob: The raw string used to register the trigger
607 * @cmd: The cmd portion of the string used to register the trigger
608 * @param: The params portion of the string used to register the trigger
609 *
610 * Common implementation for event command parsing and trigger
611 * instantiation.
612 *
613 * Usually used directly as the @func method in event command
614 * implementations.
615 *
616 * Return: 0 on success, errno otherwise
617 */
618static int
619event_trigger_callback(struct event_command *cmd_ops,
620 struct trace_event_file *file,
621 char *glob, char *cmd, char *param)
622{
623 struct event_trigger_data *trigger_data;
624 struct event_trigger_ops *trigger_ops;
625 char *trigger = NULL;
626 char *number;
627 int ret;
628
629 /* separate the trigger from the filter (t:n [if filter]) */
630 if (param && isdigit(param[0]))
631 trigger = strsep(¶m, " \t");
632
633 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
634
635 ret = -ENOMEM;
636 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
637 if (!trigger_data)
638 goto out;
639
640 trigger_data->count = -1;
641 trigger_data->ops = trigger_ops;
642 trigger_data->cmd_ops = cmd_ops;
643 INIT_LIST_HEAD(&trigger_data->list);
644 INIT_LIST_HEAD(&trigger_data->named_list);
645
646 if (glob[0] == '!') {
647 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
648 kfree(trigger_data);
649 ret = 0;
650 goto out;
651 }
652
653 if (trigger) {
654 number = strsep(&trigger, ":");
655
656 ret = -EINVAL;
657 if (!strlen(number))
658 goto out_free;
659
660 /*
661 * We use the callback data field (which is a pointer)
662 * as our counter.
663 */
664 ret = kstrtoul(number, 0, &trigger_data->count);
665 if (ret)
666 goto out_free;
667 }
668
669 if (!param) /* if param is non-empty, it's supposed to be a filter */
670 goto out_reg;
671
672 if (!cmd_ops->set_filter)
673 goto out_reg;
674
675 ret = cmd_ops->set_filter(param, trigger_data, file);
676 if (ret < 0)
677 goto out_free;
678
679 out_reg:
680 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
681 /*
682 * The above returns on success the # of functions enabled,
683 * but if it didn't find any functions it returns zero.
684 * Consider no functions a failure too.
685 */
686 if (!ret) {
687 ret = -ENOENT;
688 goto out_free;
689 } else if (ret < 0)
690 goto out_free;
691 ret = 0;
692 out:
693 return ret;
694
695 out_free:
696 if (cmd_ops->set_filter)
697 cmd_ops->set_filter(NULL, trigger_data, NULL);
698 kfree(trigger_data);
699 goto out;
700}
701
702/**
703 * set_trigger_filter - Generic event_command @set_filter implementation
704 * @filter_str: The filter string for the trigger, NULL to remove filter
705 * @trigger_data: Trigger-specific data
706 * @file: The trace_event_file associated with the event
707 *
708 * Common implementation for event command filter parsing and filter
709 * instantiation.
710 *
711 * Usually used directly as the @set_filter method in event command
712 * implementations.
713 *
714 * Also used to remove a filter (if filter_str = NULL).
715 *
716 * Return: 0 on success, errno otherwise
717 */
718int set_trigger_filter(char *filter_str,
719 struct event_trigger_data *trigger_data,
720 struct trace_event_file *file)
721{
722 struct event_trigger_data *data = trigger_data;
723 struct event_filter *filter = NULL, *tmp;
724 int ret = -EINVAL;
725 char *s;
726
727 if (!filter_str) /* clear the current filter */
728 goto assign;
729
730 s = strsep(&filter_str, " \t");
731
732 if (!strlen(s) || strcmp(s, "if") != 0)
733 goto out;
734
735 if (!filter_str)
736 goto out;
737
738 /* The filter is for the 'trigger' event, not the triggered event */
739 ret = create_event_filter(file->event_call, filter_str, false, &filter);
740 if (ret)
741 goto out;
742 assign:
743 tmp = rcu_access_pointer(data->filter);
744
745 rcu_assign_pointer(data->filter, filter);
746
747 if (tmp) {
748 /* Make sure the call is done with the filter */
749 synchronize_sched();
750 free_event_filter(tmp);
751 }
752
753 kfree(data->filter_str);
754 data->filter_str = NULL;
755
756 if (filter_str) {
757 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
758 if (!data->filter_str) {
759 free_event_filter(rcu_access_pointer(data->filter));
760 data->filter = NULL;
761 ret = -ENOMEM;
762 }
763 }
764 out:
765 return ret;
766}
767
768static LIST_HEAD(named_triggers);
769
770/**
771 * find_named_trigger - Find the common named trigger associated with @name
772 * @name: The name of the set of named triggers to find the common data for
773 *
774 * Named triggers are sets of triggers that share a common set of
775 * trigger data. The first named trigger registered with a given name
776 * owns the common trigger data that the others subsequently
777 * registered with the same name will reference. This function
778 * returns the common trigger data associated with that first
779 * registered instance.
780 *
781 * Return: the common trigger data for the given named trigger on
782 * success, NULL otherwise.
783 */
784struct event_trigger_data *find_named_trigger(const char *name)
785{
786 struct event_trigger_data *data;
787
788 if (!name)
789 return NULL;
790
791 list_for_each_entry(data, &named_triggers, named_list) {
792 if (data->named_data)
793 continue;
794 if (strcmp(data->name, name) == 0)
795 return data;
796 }
797
798 return NULL;
799}
800
801/**
802 * is_named_trigger - determine if a given trigger is a named trigger
803 * @test: The trigger data to test
804 *
805 * Return: true if 'test' is a named trigger, false otherwise.
806 */
807bool is_named_trigger(struct event_trigger_data *test)
808{
809 struct event_trigger_data *data;
810
811 list_for_each_entry(data, &named_triggers, named_list) {
812 if (test == data)
813 return true;
814 }
815
816 return false;
817}
818
819/**
820 * save_named_trigger - save the trigger in the named trigger list
821 * @name: The name of the named trigger set
822 * @data: The trigger data to save
823 *
824 * Return: 0 if successful, negative error otherwise.
825 */
826int save_named_trigger(const char *name, struct event_trigger_data *data)
827{
828 data->name = kstrdup(name, GFP_KERNEL);
829 if (!data->name)
830 return -ENOMEM;
831
832 list_add(&data->named_list, &named_triggers);
833
834 return 0;
835}
836
837/**
838 * del_named_trigger - delete a trigger from the named trigger list
839 * @data: The trigger data to delete
840 */
841void del_named_trigger(struct event_trigger_data *data)
842{
843 kfree(data->name);
844 data->name = NULL;
845
846 list_del(&data->named_list);
847}
848
849static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
850{
851 struct event_trigger_data *test;
852
853 list_for_each_entry(test, &named_triggers, named_list) {
854 if (strcmp(test->name, data->name) == 0) {
855 if (pause) {
856 test->paused_tmp = test->paused;
857 test->paused = true;
858 } else {
859 test->paused = test->paused_tmp;
860 }
861 }
862 }
863}
864
865/**
866 * pause_named_trigger - Pause all named triggers with the same name
867 * @data: The trigger data of a named trigger to pause
868 *
869 * Pauses a named trigger along with all other triggers having the
870 * same name. Because named triggers share a common set of data,
871 * pausing only one is meaningless, so pausing one named trigger needs
872 * to pause all triggers with the same name.
873 */
874void pause_named_trigger(struct event_trigger_data *data)
875{
876 __pause_named_trigger(data, true);
877}
878
879/**
880 * unpause_named_trigger - Un-pause all named triggers with the same name
881 * @data: The trigger data of a named trigger to unpause
882 *
883 * Un-pauses a named trigger along with all other triggers having the
884 * same name. Because named triggers share a common set of data,
885 * unpausing only one is meaningless, so unpausing one named trigger
886 * needs to unpause all triggers with the same name.
887 */
888void unpause_named_trigger(struct event_trigger_data *data)
889{
890 __pause_named_trigger(data, false);
891}
892
893/**
894 * set_named_trigger_data - Associate common named trigger data
895 * @data: The trigger data of a named trigger to unpause
896 *
897 * Named triggers are sets of triggers that share a common set of
898 * trigger data. The first named trigger registered with a given name
899 * owns the common trigger data that the others subsequently
900 * registered with the same name will reference. This function
901 * associates the common trigger data from the first trigger with the
902 * given trigger.
903 */
904void set_named_trigger_data(struct event_trigger_data *data,
905 struct event_trigger_data *named_data)
906{
907 data->named_data = named_data;
908}
909
910static void
911traceon_trigger(struct event_trigger_data *data, void *rec)
912{
913 if (tracing_is_on())
914 return;
915
916 tracing_on();
917}
918
919static void
920traceon_count_trigger(struct event_trigger_data *data, void *rec)
921{
922 if (tracing_is_on())
923 return;
924
925 if (!data->count)
926 return;
927
928 if (data->count != -1)
929 (data->count)--;
930
931 tracing_on();
932}
933
934static void
935traceoff_trigger(struct event_trigger_data *data, void *rec)
936{
937 if (!tracing_is_on())
938 return;
939
940 tracing_off();
941}
942
943static void
944traceoff_count_trigger(struct event_trigger_data *data, void *rec)
945{
946 if (!tracing_is_on())
947 return;
948
949 if (!data->count)
950 return;
951
952 if (data->count != -1)
953 (data->count)--;
954
955 tracing_off();
956}
957
958static int
959traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
960 struct event_trigger_data *data)
961{
962 return event_trigger_print("traceon", m, (void *)data->count,
963 data->filter_str);
964}
965
966static int
967traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
968 struct event_trigger_data *data)
969{
970 return event_trigger_print("traceoff", m, (void *)data->count,
971 data->filter_str);
972}
973
974static struct event_trigger_ops traceon_trigger_ops = {
975 .func = traceon_trigger,
976 .print = traceon_trigger_print,
977 .init = event_trigger_init,
978 .free = event_trigger_free,
979};
980
981static struct event_trigger_ops traceon_count_trigger_ops = {
982 .func = traceon_count_trigger,
983 .print = traceon_trigger_print,
984 .init = event_trigger_init,
985 .free = event_trigger_free,
986};
987
988static struct event_trigger_ops traceoff_trigger_ops = {
989 .func = traceoff_trigger,
990 .print = traceoff_trigger_print,
991 .init = event_trigger_init,
992 .free = event_trigger_free,
993};
994
995static struct event_trigger_ops traceoff_count_trigger_ops = {
996 .func = traceoff_count_trigger,
997 .print = traceoff_trigger_print,
998 .init = event_trigger_init,
999 .free = event_trigger_free,
1000};
1001
1002static struct event_trigger_ops *
1003onoff_get_trigger_ops(char *cmd, char *param)
1004{
1005 struct event_trigger_ops *ops;
1006
1007 /* we register both traceon and traceoff to this callback */
1008 if (strcmp(cmd, "traceon") == 0)
1009 ops = param ? &traceon_count_trigger_ops :
1010 &traceon_trigger_ops;
1011 else
1012 ops = param ? &traceoff_count_trigger_ops :
1013 &traceoff_trigger_ops;
1014
1015 return ops;
1016}
1017
1018static struct event_command trigger_traceon_cmd = {
1019 .name = "traceon",
1020 .trigger_type = ETT_TRACE_ONOFF,
1021 .func = event_trigger_callback,
1022 .reg = register_trigger,
1023 .unreg = unregister_trigger,
1024 .get_trigger_ops = onoff_get_trigger_ops,
1025 .set_filter = set_trigger_filter,
1026};
1027
1028static struct event_command trigger_traceoff_cmd = {
1029 .name = "traceoff",
1030 .trigger_type = ETT_TRACE_ONOFF,
1031 .flags = EVENT_CMD_FL_POST_TRIGGER,
1032 .func = event_trigger_callback,
1033 .reg = register_trigger,
1034 .unreg = unregister_trigger,
1035 .get_trigger_ops = onoff_get_trigger_ops,
1036 .set_filter = set_trigger_filter,
1037};
1038
1039#ifdef CONFIG_TRACER_SNAPSHOT
1040static void
1041snapshot_trigger(struct event_trigger_data *data, void *rec)
1042{
1043 tracing_snapshot();
1044}
1045
1046static void
1047snapshot_count_trigger(struct event_trigger_data *data, void *rec)
1048{
1049 if (!data->count)
1050 return;
1051
1052 if (data->count != -1)
1053 (data->count)--;
1054
1055 snapshot_trigger(data, rec);
1056}
1057
1058static int
1059register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1060 struct event_trigger_data *data,
1061 struct trace_event_file *file)
1062{
1063 int ret = register_trigger(glob, ops, data, file);
1064
1065 if (ret > 0 && tracing_alloc_snapshot() != 0) {
1066 unregister_trigger(glob, ops, data, file);
1067 ret = 0;
1068 }
1069
1070 return ret;
1071}
1072
1073static int
1074snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1075 struct event_trigger_data *data)
1076{
1077 return event_trigger_print("snapshot", m, (void *)data->count,
1078 data->filter_str);
1079}
1080
1081static struct event_trigger_ops snapshot_trigger_ops = {
1082 .func = snapshot_trigger,
1083 .print = snapshot_trigger_print,
1084 .init = event_trigger_init,
1085 .free = event_trigger_free,
1086};
1087
1088static struct event_trigger_ops snapshot_count_trigger_ops = {
1089 .func = snapshot_count_trigger,
1090 .print = snapshot_trigger_print,
1091 .init = event_trigger_init,
1092 .free = event_trigger_free,
1093};
1094
1095static struct event_trigger_ops *
1096snapshot_get_trigger_ops(char *cmd, char *param)
1097{
1098 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1099}
1100
1101static struct event_command trigger_snapshot_cmd = {
1102 .name = "snapshot",
1103 .trigger_type = ETT_SNAPSHOT,
1104 .func = event_trigger_callback,
1105 .reg = register_snapshot_trigger,
1106 .unreg = unregister_trigger,
1107 .get_trigger_ops = snapshot_get_trigger_ops,
1108 .set_filter = set_trigger_filter,
1109};
1110
1111static __init int register_trigger_snapshot_cmd(void)
1112{
1113 int ret;
1114
1115 ret = register_event_command(&trigger_snapshot_cmd);
1116 WARN_ON(ret < 0);
1117
1118 return ret;
1119}
1120#else
1121static __init int register_trigger_snapshot_cmd(void) { return 0; }
1122#endif /* CONFIG_TRACER_SNAPSHOT */
1123
1124#ifdef CONFIG_STACKTRACE
1125/*
1126 * Skip 3:
1127 * stacktrace_trigger()
1128 * event_triggers_post_call()
1129 * trace_event_raw_event_xxx()
1130 */
1131#define STACK_SKIP 3
1132
1133static void
1134stacktrace_trigger(struct event_trigger_data *data, void *rec)
1135{
1136 trace_dump_stack(STACK_SKIP);
1137}
1138
1139static void
1140stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
1141{
1142 if (!data->count)
1143 return;
1144
1145 if (data->count != -1)
1146 (data->count)--;
1147
1148 stacktrace_trigger(data, rec);
1149}
1150
1151static int
1152stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1153 struct event_trigger_data *data)
1154{
1155 return event_trigger_print("stacktrace", m, (void *)data->count,
1156 data->filter_str);
1157}
1158
1159static struct event_trigger_ops stacktrace_trigger_ops = {
1160 .func = stacktrace_trigger,
1161 .print = stacktrace_trigger_print,
1162 .init = event_trigger_init,
1163 .free = event_trigger_free,
1164};
1165
1166static struct event_trigger_ops stacktrace_count_trigger_ops = {
1167 .func = stacktrace_count_trigger,
1168 .print = stacktrace_trigger_print,
1169 .init = event_trigger_init,
1170 .free = event_trigger_free,
1171};
1172
1173static struct event_trigger_ops *
1174stacktrace_get_trigger_ops(char *cmd, char *param)
1175{
1176 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1177}
1178
1179static struct event_command trigger_stacktrace_cmd = {
1180 .name = "stacktrace",
1181 .trigger_type = ETT_STACKTRACE,
1182 .flags = EVENT_CMD_FL_POST_TRIGGER,
1183 .func = event_trigger_callback,
1184 .reg = register_trigger,
1185 .unreg = unregister_trigger,
1186 .get_trigger_ops = stacktrace_get_trigger_ops,
1187 .set_filter = set_trigger_filter,
1188};
1189
1190static __init int register_trigger_stacktrace_cmd(void)
1191{
1192 int ret;
1193
1194 ret = register_event_command(&trigger_stacktrace_cmd);
1195 WARN_ON(ret < 0);
1196
1197 return ret;
1198}
1199#else
1200static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1201#endif /* CONFIG_STACKTRACE */
1202
1203static __init void unregister_trigger_traceon_traceoff_cmds(void)
1204{
1205 unregister_event_command(&trigger_traceon_cmd);
1206 unregister_event_command(&trigger_traceoff_cmd);
1207}
1208
1209static void
1210event_enable_trigger(struct event_trigger_data *data, void *rec)
1211{
1212 struct enable_trigger_data *enable_data = data->private_data;
1213
1214 if (enable_data->enable)
1215 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1216 else
1217 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1218}
1219
1220static void
1221event_enable_count_trigger(struct event_trigger_data *data, void *rec)
1222{
1223 struct enable_trigger_data *enable_data = data->private_data;
1224
1225 if (!data->count)
1226 return;
1227
1228 /* Skip if the event is in a state we want to switch to */
1229 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1230 return;
1231
1232 if (data->count != -1)
1233 (data->count)--;
1234
1235 event_enable_trigger(data, rec);
1236}
1237
1238int event_enable_trigger_print(struct seq_file *m,
1239 struct event_trigger_ops *ops,
1240 struct event_trigger_data *data)
1241{
1242 struct enable_trigger_data *enable_data = data->private_data;
1243
1244 seq_printf(m, "%s:%s:%s",
1245 enable_data->hist ?
1246 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1247 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1248 enable_data->file->event_call->class->system,
1249 trace_event_name(enable_data->file->event_call));
1250
1251 if (data->count == -1)
1252 seq_puts(m, ":unlimited");
1253 else
1254 seq_printf(m, ":count=%ld", data->count);
1255
1256 if (data->filter_str)
1257 seq_printf(m, " if %s\n", data->filter_str);
1258 else
1259 seq_putc(m, '\n');
1260
1261 return 0;
1262}
1263
1264void event_enable_trigger_free(struct event_trigger_ops *ops,
1265 struct event_trigger_data *data)
1266{
1267 struct enable_trigger_data *enable_data = data->private_data;
1268
1269 if (WARN_ON_ONCE(data->ref <= 0))
1270 return;
1271
1272 data->ref--;
1273 if (!data->ref) {
1274 /* Remove the SOFT_MODE flag */
1275 trace_event_enable_disable(enable_data->file, 0, 1);
1276 module_put(enable_data->file->event_call->mod);
1277 trigger_data_free(data);
1278 kfree(enable_data);
1279 }
1280}
1281
1282static struct event_trigger_ops event_enable_trigger_ops = {
1283 .func = event_enable_trigger,
1284 .print = event_enable_trigger_print,
1285 .init = event_trigger_init,
1286 .free = event_enable_trigger_free,
1287};
1288
1289static struct event_trigger_ops event_enable_count_trigger_ops = {
1290 .func = event_enable_count_trigger,
1291 .print = event_enable_trigger_print,
1292 .init = event_trigger_init,
1293 .free = event_enable_trigger_free,
1294};
1295
1296static struct event_trigger_ops event_disable_trigger_ops = {
1297 .func = event_enable_trigger,
1298 .print = event_enable_trigger_print,
1299 .init = event_trigger_init,
1300 .free = event_enable_trigger_free,
1301};
1302
1303static struct event_trigger_ops event_disable_count_trigger_ops = {
1304 .func = event_enable_count_trigger,
1305 .print = event_enable_trigger_print,
1306 .init = event_trigger_init,
1307 .free = event_enable_trigger_free,
1308};
1309
1310int event_enable_trigger_func(struct event_command *cmd_ops,
1311 struct trace_event_file *file,
1312 char *glob, char *cmd, char *param)
1313{
1314 struct trace_event_file *event_enable_file;
1315 struct enable_trigger_data *enable_data;
1316 struct event_trigger_data *trigger_data;
1317 struct event_trigger_ops *trigger_ops;
1318 struct trace_array *tr = file->tr;
1319 const char *system;
1320 const char *event;
1321 bool hist = false;
1322 char *trigger;
1323 char *number;
1324 bool enable;
1325 int ret;
1326
1327 if (!param)
1328 return -EINVAL;
1329
1330 /* separate the trigger from the filter (s:e:n [if filter]) */
1331 trigger = strsep(¶m, " \t");
1332 if (!trigger)
1333 return -EINVAL;
1334
1335 system = strsep(&trigger, ":");
1336 if (!trigger)
1337 return -EINVAL;
1338
1339 event = strsep(&trigger, ":");
1340
1341 ret = -EINVAL;
1342 event_enable_file = find_event_file(tr, system, event);
1343 if (!event_enable_file)
1344 goto out;
1345
1346#ifdef CONFIG_HIST_TRIGGERS
1347 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1348 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1349
1350 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1351 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1352#else
1353 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1354#endif
1355 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1356
1357 ret = -ENOMEM;
1358 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1359 if (!trigger_data)
1360 goto out;
1361
1362 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1363 if (!enable_data) {
1364 kfree(trigger_data);
1365 goto out;
1366 }
1367
1368 trigger_data->count = -1;
1369 trigger_data->ops = trigger_ops;
1370 trigger_data->cmd_ops = cmd_ops;
1371 INIT_LIST_HEAD(&trigger_data->list);
1372 RCU_INIT_POINTER(trigger_data->filter, NULL);
1373
1374 enable_data->hist = hist;
1375 enable_data->enable = enable;
1376 enable_data->file = event_enable_file;
1377 trigger_data->private_data = enable_data;
1378
1379 if (glob[0] == '!') {
1380 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1381 kfree(trigger_data);
1382 kfree(enable_data);
1383 ret = 0;
1384 goto out;
1385 }
1386
1387 if (trigger) {
1388 number = strsep(&trigger, ":");
1389
1390 ret = -EINVAL;
1391 if (!strlen(number))
1392 goto out_free;
1393
1394 /*
1395 * We use the callback data field (which is a pointer)
1396 * as our counter.
1397 */
1398 ret = kstrtoul(number, 0, &trigger_data->count);
1399 if (ret)
1400 goto out_free;
1401 }
1402
1403 if (!param) /* if param is non-empty, it's supposed to be a filter */
1404 goto out_reg;
1405
1406 if (!cmd_ops->set_filter)
1407 goto out_reg;
1408
1409 ret = cmd_ops->set_filter(param, trigger_data, file);
1410 if (ret < 0)
1411 goto out_free;
1412
1413 out_reg:
1414 /* Don't let event modules unload while probe registered */
1415 ret = try_module_get(event_enable_file->event_call->mod);
1416 if (!ret) {
1417 ret = -EBUSY;
1418 goto out_free;
1419 }
1420
1421 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1422 if (ret < 0)
1423 goto out_put;
1424 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1425 /*
1426 * The above returns on success the # of functions enabled,
1427 * but if it didn't find any functions it returns zero.
1428 * Consider no functions a failure too.
1429 */
1430 if (!ret) {
1431 ret = -ENOENT;
1432 goto out_disable;
1433 } else if (ret < 0)
1434 goto out_disable;
1435 /* Just return zero, not the number of enabled functions */
1436 ret = 0;
1437 out:
1438 return ret;
1439
1440 out_disable:
1441 trace_event_enable_disable(event_enable_file, 0, 1);
1442 out_put:
1443 module_put(event_enable_file->event_call->mod);
1444 out_free:
1445 if (cmd_ops->set_filter)
1446 cmd_ops->set_filter(NULL, trigger_data, NULL);
1447 kfree(trigger_data);
1448 kfree(enable_data);
1449 goto out;
1450}
1451
1452int event_enable_register_trigger(char *glob,
1453 struct event_trigger_ops *ops,
1454 struct event_trigger_data *data,
1455 struct trace_event_file *file)
1456{
1457 struct enable_trigger_data *enable_data = data->private_data;
1458 struct enable_trigger_data *test_enable_data;
1459 struct event_trigger_data *test;
1460 int ret = 0;
1461
1462 list_for_each_entry_rcu(test, &file->triggers, list) {
1463 test_enable_data = test->private_data;
1464 if (test_enable_data &&
1465 (test->cmd_ops->trigger_type ==
1466 data->cmd_ops->trigger_type) &&
1467 (test_enable_data->file == enable_data->file)) {
1468 ret = -EEXIST;
1469 goto out;
1470 }
1471 }
1472
1473 if (data->ops->init) {
1474 ret = data->ops->init(data->ops, data);
1475 if (ret < 0)
1476 goto out;
1477 }
1478
1479 list_add_rcu(&data->list, &file->triggers);
1480 ret++;
1481
1482 update_cond_flag(file);
1483 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1484 list_del_rcu(&data->list);
1485 update_cond_flag(file);
1486 ret--;
1487 }
1488out:
1489 return ret;
1490}
1491
1492void event_enable_unregister_trigger(char *glob,
1493 struct event_trigger_ops *ops,
1494 struct event_trigger_data *test,
1495 struct trace_event_file *file)
1496{
1497 struct enable_trigger_data *test_enable_data = test->private_data;
1498 struct enable_trigger_data *enable_data;
1499 struct event_trigger_data *data;
1500 bool unregistered = false;
1501
1502 list_for_each_entry_rcu(data, &file->triggers, list) {
1503 enable_data = data->private_data;
1504 if (enable_data &&
1505 (data->cmd_ops->trigger_type ==
1506 test->cmd_ops->trigger_type) &&
1507 (enable_data->file == test_enable_data->file)) {
1508 unregistered = true;
1509 list_del_rcu(&data->list);
1510 trace_event_trigger_enable_disable(file, 0);
1511 update_cond_flag(file);
1512 break;
1513 }
1514 }
1515
1516 if (unregistered && data->ops->free)
1517 data->ops->free(data->ops, data);
1518}
1519
1520static struct event_trigger_ops *
1521event_enable_get_trigger_ops(char *cmd, char *param)
1522{
1523 struct event_trigger_ops *ops;
1524 bool enable;
1525
1526#ifdef CONFIG_HIST_TRIGGERS
1527 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1528 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1529#else
1530 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1531#endif
1532 if (enable)
1533 ops = param ? &event_enable_count_trigger_ops :
1534 &event_enable_trigger_ops;
1535 else
1536 ops = param ? &event_disable_count_trigger_ops :
1537 &event_disable_trigger_ops;
1538
1539 return ops;
1540}
1541
1542static struct event_command trigger_enable_cmd = {
1543 .name = ENABLE_EVENT_STR,
1544 .trigger_type = ETT_EVENT_ENABLE,
1545 .func = event_enable_trigger_func,
1546 .reg = event_enable_register_trigger,
1547 .unreg = event_enable_unregister_trigger,
1548 .get_trigger_ops = event_enable_get_trigger_ops,
1549 .set_filter = set_trigger_filter,
1550};
1551
1552static struct event_command trigger_disable_cmd = {
1553 .name = DISABLE_EVENT_STR,
1554 .trigger_type = ETT_EVENT_ENABLE,
1555 .func = event_enable_trigger_func,
1556 .reg = event_enable_register_trigger,
1557 .unreg = event_enable_unregister_trigger,
1558 .get_trigger_ops = event_enable_get_trigger_ops,
1559 .set_filter = set_trigger_filter,
1560};
1561
1562static __init void unregister_trigger_enable_disable_cmds(void)
1563{
1564 unregister_event_command(&trigger_enable_cmd);
1565 unregister_event_command(&trigger_disable_cmd);
1566}
1567
1568static __init int register_trigger_enable_disable_cmds(void)
1569{
1570 int ret;
1571
1572 ret = register_event_command(&trigger_enable_cmd);
1573 if (WARN_ON(ret < 0))
1574 return ret;
1575 ret = register_event_command(&trigger_disable_cmd);
1576 if (WARN_ON(ret < 0))
1577 unregister_trigger_enable_disable_cmds();
1578
1579 return ret;
1580}
1581
1582static __init int register_trigger_traceon_traceoff_cmds(void)
1583{
1584 int ret;
1585
1586 ret = register_event_command(&trigger_traceon_cmd);
1587 if (WARN_ON(ret < 0))
1588 return ret;
1589 ret = register_event_command(&trigger_traceoff_cmd);
1590 if (WARN_ON(ret < 0))
1591 unregister_trigger_traceon_traceoff_cmds();
1592
1593 return ret;
1594}
1595
1596__init int register_trigger_cmds(void)
1597{
1598 register_trigger_traceon_traceoff_cmds();
1599 register_trigger_snapshot_cmd();
1600 register_trigger_stacktrace_cmd();
1601 register_trigger_enable_disable_cmds();
1602 register_trigger_hist_enable_disable_cmds();
1603 register_trigger_hist_cmd();
1604
1605 return 0;
1606}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace_events_trigger - trace event triggers
4 *
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8#include <linux/security.h>
9#include <linux/module.h>
10#include <linux/ctype.h>
11#include <linux/mutex.h>
12#include <linux/slab.h>
13#include <linux/rculist.h>
14
15#include "trace.h"
16
17static LIST_HEAD(trigger_commands);
18static DEFINE_MUTEX(trigger_cmd_mutex);
19
20void trigger_data_free(struct event_trigger_data *data)
21{
22 if (data->cmd_ops->set_filter)
23 data->cmd_ops->set_filter(NULL, data, NULL);
24
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
27
28 kfree(data);
29}
30
31/**
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @rec: The trace entry for the event, NULL for unconditional invocation
35 *
36 * For each trigger associated with an event, invoke the trigger
37 * function registered with the associated trigger command. If rec is
38 * non-NULL, it means that the trigger requires further processing and
39 * shouldn't be unconditionally invoked. If rec is non-NULL and the
40 * trigger has a filter associated with it, rec will checked against
41 * the filter and if the record matches the trigger will be invoked.
42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43 * in any case until the current event is written, the trigger
44 * function isn't invoked but the bit associated with the deferred
45 * trigger is set in the return value.
46 *
47 * Returns an enum event_trigger_type value containing a set bit for
48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 *
50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 *
52 * Return: an enum event_trigger_type value containing a set bit for
53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 */
55enum event_trigger_type
56event_triggers_call(struct trace_event_file *file,
57 struct trace_buffer *buffer, void *rec,
58 struct ring_buffer_event *event)
59{
60 struct event_trigger_data *data;
61 enum event_trigger_type tt = ETT_NONE;
62 struct event_filter *filter;
63
64 if (list_empty(&file->triggers))
65 return tt;
66
67 list_for_each_entry_rcu(data, &file->triggers, list) {
68 if (data->paused)
69 continue;
70 if (!rec) {
71 data->ops->func(data, buffer, rec, event);
72 continue;
73 }
74 filter = rcu_dereference_sched(data->filter);
75 if (filter && !filter_match_preds(filter, rec))
76 continue;
77 if (event_command_post_trigger(data->cmd_ops)) {
78 tt |= data->cmd_ops->trigger_type;
79 continue;
80 }
81 data->ops->func(data, buffer, rec, event);
82 }
83 return tt;
84}
85EXPORT_SYMBOL_GPL(event_triggers_call);
86
87/**
88 * event_triggers_post_call - Call 'post_triggers' for a trace event
89 * @file: The trace_event_file associated with the event
90 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
91 *
92 * For each trigger associated with an event, invoke the trigger
93 * function registered with the associated trigger command, if the
94 * corresponding bit is set in the tt enum passed into this function.
95 * See @event_triggers_call for details on how those bits are set.
96 *
97 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
98 */
99void
100event_triggers_post_call(struct trace_event_file *file,
101 enum event_trigger_type tt)
102{
103 struct event_trigger_data *data;
104
105 list_for_each_entry_rcu(data, &file->triggers, list) {
106 if (data->paused)
107 continue;
108 if (data->cmd_ops->trigger_type & tt)
109 data->ops->func(data, NULL, NULL, NULL);
110 }
111}
112EXPORT_SYMBOL_GPL(event_triggers_post_call);
113
114#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
115
116static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
117{
118 struct trace_event_file *event_file = event_file_data(m->private);
119
120 if (t == SHOW_AVAILABLE_TRIGGERS) {
121 (*pos)++;
122 return NULL;
123 }
124 return seq_list_next(t, &event_file->triggers, pos);
125}
126
127static void *trigger_start(struct seq_file *m, loff_t *pos)
128{
129 struct trace_event_file *event_file;
130
131 /* ->stop() is called even if ->start() fails */
132 mutex_lock(&event_mutex);
133 event_file = event_file_data(m->private);
134 if (unlikely(!event_file))
135 return ERR_PTR(-ENODEV);
136
137 if (list_empty(&event_file->triggers))
138 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
139
140 return seq_list_start(&event_file->triggers, *pos);
141}
142
143static void trigger_stop(struct seq_file *m, void *t)
144{
145 mutex_unlock(&event_mutex);
146}
147
148static int trigger_show(struct seq_file *m, void *v)
149{
150 struct event_trigger_data *data;
151 struct event_command *p;
152
153 if (v == SHOW_AVAILABLE_TRIGGERS) {
154 seq_puts(m, "# Available triggers:\n");
155 seq_putc(m, '#');
156 mutex_lock(&trigger_cmd_mutex);
157 list_for_each_entry_reverse(p, &trigger_commands, list)
158 seq_printf(m, " %s", p->name);
159 seq_putc(m, '\n');
160 mutex_unlock(&trigger_cmd_mutex);
161 return 0;
162 }
163
164 data = list_entry(v, struct event_trigger_data, list);
165 data->ops->print(m, data->ops, data);
166
167 return 0;
168}
169
170static const struct seq_operations event_triggers_seq_ops = {
171 .start = trigger_start,
172 .next = trigger_next,
173 .stop = trigger_stop,
174 .show = trigger_show,
175};
176
177static int event_trigger_regex_open(struct inode *inode, struct file *file)
178{
179 int ret;
180
181 ret = security_locked_down(LOCKDOWN_TRACEFS);
182 if (ret)
183 return ret;
184
185 mutex_lock(&event_mutex);
186
187 if (unlikely(!event_file_data(file))) {
188 mutex_unlock(&event_mutex);
189 return -ENODEV;
190 }
191
192 if ((file->f_mode & FMODE_WRITE) &&
193 (file->f_flags & O_TRUNC)) {
194 struct trace_event_file *event_file;
195 struct event_command *p;
196
197 event_file = event_file_data(file);
198
199 list_for_each_entry(p, &trigger_commands, list) {
200 if (p->unreg_all)
201 p->unreg_all(event_file);
202 }
203 }
204
205 if (file->f_mode & FMODE_READ) {
206 ret = seq_open(file, &event_triggers_seq_ops);
207 if (!ret) {
208 struct seq_file *m = file->private_data;
209 m->private = file;
210 }
211 }
212
213 mutex_unlock(&event_mutex);
214
215 return ret;
216}
217
218int trigger_process_regex(struct trace_event_file *file, char *buff)
219{
220 char *command, *next;
221 struct event_command *p;
222 int ret = -EINVAL;
223
224 next = buff = skip_spaces(buff);
225 command = strsep(&next, ": \t");
226 if (next) {
227 next = skip_spaces(next);
228 if (!*next)
229 next = NULL;
230 }
231 command = (command[0] != '!') ? command : command + 1;
232
233 mutex_lock(&trigger_cmd_mutex);
234 list_for_each_entry(p, &trigger_commands, list) {
235 if (strcmp(p->name, command) == 0) {
236 ret = p->func(p, file, buff, command, next);
237 goto out_unlock;
238 }
239 }
240 out_unlock:
241 mutex_unlock(&trigger_cmd_mutex);
242
243 return ret;
244}
245
246static ssize_t event_trigger_regex_write(struct file *file,
247 const char __user *ubuf,
248 size_t cnt, loff_t *ppos)
249{
250 struct trace_event_file *event_file;
251 ssize_t ret;
252 char *buf;
253
254 if (!cnt)
255 return 0;
256
257 if (cnt >= PAGE_SIZE)
258 return -EINVAL;
259
260 buf = memdup_user_nul(ubuf, cnt);
261 if (IS_ERR(buf))
262 return PTR_ERR(buf);
263
264 strim(buf);
265
266 mutex_lock(&event_mutex);
267 event_file = event_file_data(file);
268 if (unlikely(!event_file)) {
269 mutex_unlock(&event_mutex);
270 kfree(buf);
271 return -ENODEV;
272 }
273 ret = trigger_process_regex(event_file, buf);
274 mutex_unlock(&event_mutex);
275
276 kfree(buf);
277 if (ret < 0)
278 goto out;
279
280 *ppos += cnt;
281 ret = cnt;
282 out:
283 return ret;
284}
285
286static int event_trigger_regex_release(struct inode *inode, struct file *file)
287{
288 mutex_lock(&event_mutex);
289
290 if (file->f_mode & FMODE_READ)
291 seq_release(inode, file);
292
293 mutex_unlock(&event_mutex);
294
295 return 0;
296}
297
298static ssize_t
299event_trigger_write(struct file *filp, const char __user *ubuf,
300 size_t cnt, loff_t *ppos)
301{
302 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
303}
304
305static int
306event_trigger_open(struct inode *inode, struct file *filp)
307{
308 /* Checks for tracefs lockdown */
309 return event_trigger_regex_open(inode, filp);
310}
311
312static int
313event_trigger_release(struct inode *inode, struct file *file)
314{
315 return event_trigger_regex_release(inode, file);
316}
317
318const struct file_operations event_trigger_fops = {
319 .open = event_trigger_open,
320 .read = seq_read,
321 .write = event_trigger_write,
322 .llseek = tracing_lseek,
323 .release = event_trigger_release,
324};
325
326/*
327 * Currently we only register event commands from __init, so mark this
328 * __init too.
329 */
330__init int register_event_command(struct event_command *cmd)
331{
332 struct event_command *p;
333 int ret = 0;
334
335 mutex_lock(&trigger_cmd_mutex);
336 list_for_each_entry(p, &trigger_commands, list) {
337 if (strcmp(cmd->name, p->name) == 0) {
338 ret = -EBUSY;
339 goto out_unlock;
340 }
341 }
342 list_add(&cmd->list, &trigger_commands);
343 out_unlock:
344 mutex_unlock(&trigger_cmd_mutex);
345
346 return ret;
347}
348
349/*
350 * Currently we only unregister event commands from __init, so mark
351 * this __init too.
352 */
353__init int unregister_event_command(struct event_command *cmd)
354{
355 struct event_command *p, *n;
356 int ret = -ENODEV;
357
358 mutex_lock(&trigger_cmd_mutex);
359 list_for_each_entry_safe(p, n, &trigger_commands, list) {
360 if (strcmp(cmd->name, p->name) == 0) {
361 ret = 0;
362 list_del_init(&p->list);
363 goto out_unlock;
364 }
365 }
366 out_unlock:
367 mutex_unlock(&trigger_cmd_mutex);
368
369 return ret;
370}
371
372/**
373 * event_trigger_print - Generic event_trigger_ops @print implementation
374 * @name: The name of the event trigger
375 * @m: The seq_file being printed to
376 * @data: Trigger-specific data
377 * @filter_str: filter_str to print, if present
378 *
379 * Common implementation for event triggers to print themselves.
380 *
381 * Usually wrapped by a function that simply sets the @name of the
382 * trigger command and then invokes this.
383 *
384 * Return: 0 on success, errno otherwise
385 */
386static int
387event_trigger_print(const char *name, struct seq_file *m,
388 void *data, char *filter_str)
389{
390 long count = (long)data;
391
392 seq_puts(m, name);
393
394 if (count == -1)
395 seq_puts(m, ":unlimited");
396 else
397 seq_printf(m, ":count=%ld", count);
398
399 if (filter_str)
400 seq_printf(m, " if %s\n", filter_str);
401 else
402 seq_putc(m, '\n');
403
404 return 0;
405}
406
407/**
408 * event_trigger_init - Generic event_trigger_ops @init implementation
409 * @ops: The trigger ops associated with the trigger
410 * @data: Trigger-specific data
411 *
412 * Common implementation of event trigger initialization.
413 *
414 * Usually used directly as the @init method in event trigger
415 * implementations.
416 *
417 * Return: 0 on success, errno otherwise
418 */
419int event_trigger_init(struct event_trigger_ops *ops,
420 struct event_trigger_data *data)
421{
422 data->ref++;
423 return 0;
424}
425
426/**
427 * event_trigger_free - Generic event_trigger_ops @free implementation
428 * @ops: The trigger ops associated with the trigger
429 * @data: Trigger-specific data
430 *
431 * Common implementation of event trigger de-initialization.
432 *
433 * Usually used directly as the @free method in event trigger
434 * implementations.
435 */
436static void
437event_trigger_free(struct event_trigger_ops *ops,
438 struct event_trigger_data *data)
439{
440 if (WARN_ON_ONCE(data->ref <= 0))
441 return;
442
443 data->ref--;
444 if (!data->ref)
445 trigger_data_free(data);
446}
447
448int trace_event_trigger_enable_disable(struct trace_event_file *file,
449 int trigger_enable)
450{
451 int ret = 0;
452
453 if (trigger_enable) {
454 if (atomic_inc_return(&file->tm_ref) > 1)
455 return ret;
456 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
457 ret = trace_event_enable_disable(file, 1, 1);
458 } else {
459 if (atomic_dec_return(&file->tm_ref) > 0)
460 return ret;
461 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
462 ret = trace_event_enable_disable(file, 0, 1);
463 }
464
465 return ret;
466}
467
468/**
469 * clear_event_triggers - Clear all triggers associated with a trace array
470 * @tr: The trace array to clear
471 *
472 * For each trigger, the triggering event has its tm_ref decremented
473 * via trace_event_trigger_enable_disable(), and any associated event
474 * (in the case of enable/disable_event triggers) will have its sm_ref
475 * decremented via free()->trace_event_enable_disable(). That
476 * combination effectively reverses the soft-mode/trigger state added
477 * by trigger registration.
478 *
479 * Must be called with event_mutex held.
480 */
481void
482clear_event_triggers(struct trace_array *tr)
483{
484 struct trace_event_file *file;
485
486 list_for_each_entry(file, &tr->events, list) {
487 struct event_trigger_data *data, *n;
488 list_for_each_entry_safe(data, n, &file->triggers, list) {
489 trace_event_trigger_enable_disable(file, 0);
490 list_del_rcu(&data->list);
491 if (data->ops->free)
492 data->ops->free(data->ops, data);
493 }
494 }
495}
496
497/**
498 * update_cond_flag - Set or reset the TRIGGER_COND bit
499 * @file: The trace_event_file associated with the event
500 *
501 * If an event has triggers and any of those triggers has a filter or
502 * a post_trigger, trigger invocation needs to be deferred until after
503 * the current event has logged its data, and the event should have
504 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
505 * cleared.
506 */
507void update_cond_flag(struct trace_event_file *file)
508{
509 struct event_trigger_data *data;
510 bool set_cond = false;
511
512 lockdep_assert_held(&event_mutex);
513
514 list_for_each_entry(data, &file->triggers, list) {
515 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
516 event_command_needs_rec(data->cmd_ops)) {
517 set_cond = true;
518 break;
519 }
520 }
521
522 if (set_cond)
523 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
524 else
525 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
526}
527
528/**
529 * register_trigger - Generic event_command @reg implementation
530 * @glob: The raw string used to register the trigger
531 * @ops: The trigger ops associated with the trigger
532 * @data: Trigger-specific data to associate with the trigger
533 * @file: The trace_event_file associated with the event
534 *
535 * Common implementation for event trigger registration.
536 *
537 * Usually used directly as the @reg method in event command
538 * implementations.
539 *
540 * Return: 0 on success, errno otherwise
541 */
542static int register_trigger(char *glob, struct event_trigger_ops *ops,
543 struct event_trigger_data *data,
544 struct trace_event_file *file)
545{
546 struct event_trigger_data *test;
547 int ret = 0;
548
549 lockdep_assert_held(&event_mutex);
550
551 list_for_each_entry(test, &file->triggers, list) {
552 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
553 ret = -EEXIST;
554 goto out;
555 }
556 }
557
558 if (data->ops->init) {
559 ret = data->ops->init(data->ops, data);
560 if (ret < 0)
561 goto out;
562 }
563
564 list_add_rcu(&data->list, &file->triggers);
565 ret++;
566
567 update_cond_flag(file);
568 if (trace_event_trigger_enable_disable(file, 1) < 0) {
569 list_del_rcu(&data->list);
570 update_cond_flag(file);
571 ret--;
572 }
573out:
574 return ret;
575}
576
577/**
578 * unregister_trigger - Generic event_command @unreg implementation
579 * @glob: The raw string used to register the trigger
580 * @ops: The trigger ops associated with the trigger
581 * @test: Trigger-specific data used to find the trigger to remove
582 * @file: The trace_event_file associated with the event
583 *
584 * Common implementation for event trigger unregistration.
585 *
586 * Usually used directly as the @unreg method in event command
587 * implementations.
588 */
589static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
590 struct event_trigger_data *test,
591 struct trace_event_file *file)
592{
593 struct event_trigger_data *data;
594 bool unregistered = false;
595
596 lockdep_assert_held(&event_mutex);
597
598 list_for_each_entry(data, &file->triggers, list) {
599 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
600 unregistered = true;
601 list_del_rcu(&data->list);
602 trace_event_trigger_enable_disable(file, 0);
603 update_cond_flag(file);
604 break;
605 }
606 }
607
608 if (unregistered && data->ops->free)
609 data->ops->free(data->ops, data);
610}
611
612/**
613 * event_trigger_callback - Generic event_command @func implementation
614 * @cmd_ops: The command ops, used for trigger registration
615 * @file: The trace_event_file associated with the event
616 * @glob: The raw string used to register the trigger
617 * @cmd: The cmd portion of the string used to register the trigger
618 * @param: The params portion of the string used to register the trigger
619 *
620 * Common implementation for event command parsing and trigger
621 * instantiation.
622 *
623 * Usually used directly as the @func method in event command
624 * implementations.
625 *
626 * Return: 0 on success, errno otherwise
627 */
628static int
629event_trigger_callback(struct event_command *cmd_ops,
630 struct trace_event_file *file,
631 char *glob, char *cmd, char *param)
632{
633 struct event_trigger_data *trigger_data;
634 struct event_trigger_ops *trigger_ops;
635 char *trigger = NULL;
636 char *number;
637 int ret;
638
639 /* separate the trigger from the filter (t:n [if filter]) */
640 if (param && isdigit(param[0])) {
641 trigger = strsep(¶m, " \t");
642 if (param) {
643 param = skip_spaces(param);
644 if (!*param)
645 param = NULL;
646 }
647 }
648
649 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
650
651 ret = -ENOMEM;
652 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
653 if (!trigger_data)
654 goto out;
655
656 trigger_data->count = -1;
657 trigger_data->ops = trigger_ops;
658 trigger_data->cmd_ops = cmd_ops;
659 trigger_data->private_data = file;
660 INIT_LIST_HEAD(&trigger_data->list);
661 INIT_LIST_HEAD(&trigger_data->named_list);
662
663 if (glob[0] == '!') {
664 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
665 kfree(trigger_data);
666 ret = 0;
667 goto out;
668 }
669
670 if (trigger) {
671 number = strsep(&trigger, ":");
672
673 ret = -EINVAL;
674 if (!strlen(number))
675 goto out_free;
676
677 /*
678 * We use the callback data field (which is a pointer)
679 * as our counter.
680 */
681 ret = kstrtoul(number, 0, &trigger_data->count);
682 if (ret)
683 goto out_free;
684 }
685
686 if (!param) /* if param is non-empty, it's supposed to be a filter */
687 goto out_reg;
688
689 if (!cmd_ops->set_filter)
690 goto out_reg;
691
692 ret = cmd_ops->set_filter(param, trigger_data, file);
693 if (ret < 0)
694 goto out_free;
695
696 out_reg:
697 /* Up the trigger_data count to make sure reg doesn't free it on failure */
698 event_trigger_init(trigger_ops, trigger_data);
699 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
700 /*
701 * The above returns on success the # of functions enabled,
702 * but if it didn't find any functions it returns zero.
703 * Consider no functions a failure too.
704 */
705 if (!ret) {
706 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
707 ret = -ENOENT;
708 } else if (ret > 0)
709 ret = 0;
710
711 /* Down the counter of trigger_data or free it if not used anymore */
712 event_trigger_free(trigger_ops, trigger_data);
713 out:
714 return ret;
715
716 out_free:
717 if (cmd_ops->set_filter)
718 cmd_ops->set_filter(NULL, trigger_data, NULL);
719 kfree(trigger_data);
720 goto out;
721}
722
723/**
724 * set_trigger_filter - Generic event_command @set_filter implementation
725 * @filter_str: The filter string for the trigger, NULL to remove filter
726 * @trigger_data: Trigger-specific data
727 * @file: The trace_event_file associated with the event
728 *
729 * Common implementation for event command filter parsing and filter
730 * instantiation.
731 *
732 * Usually used directly as the @set_filter method in event command
733 * implementations.
734 *
735 * Also used to remove a filter (if filter_str = NULL).
736 *
737 * Return: 0 on success, errno otherwise
738 */
739int set_trigger_filter(char *filter_str,
740 struct event_trigger_data *trigger_data,
741 struct trace_event_file *file)
742{
743 struct event_trigger_data *data = trigger_data;
744 struct event_filter *filter = NULL, *tmp;
745 int ret = -EINVAL;
746 char *s;
747
748 if (!filter_str) /* clear the current filter */
749 goto assign;
750
751 s = strsep(&filter_str, " \t");
752
753 if (!strlen(s) || strcmp(s, "if") != 0)
754 goto out;
755
756 if (!filter_str)
757 goto out;
758
759 /* The filter is for the 'trigger' event, not the triggered event */
760 ret = create_event_filter(file->tr, file->event_call,
761 filter_str, false, &filter);
762 /*
763 * If create_event_filter() fails, filter still needs to be freed.
764 * Which the calling code will do with data->filter.
765 */
766 assign:
767 tmp = rcu_access_pointer(data->filter);
768
769 rcu_assign_pointer(data->filter, filter);
770
771 if (tmp) {
772 /* Make sure the call is done with the filter */
773 tracepoint_synchronize_unregister();
774 free_event_filter(tmp);
775 }
776
777 kfree(data->filter_str);
778 data->filter_str = NULL;
779
780 if (filter_str) {
781 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
782 if (!data->filter_str) {
783 free_event_filter(rcu_access_pointer(data->filter));
784 data->filter = NULL;
785 ret = -ENOMEM;
786 }
787 }
788 out:
789 return ret;
790}
791
792static LIST_HEAD(named_triggers);
793
794/**
795 * find_named_trigger - Find the common named trigger associated with @name
796 * @name: The name of the set of named triggers to find the common data for
797 *
798 * Named triggers are sets of triggers that share a common set of
799 * trigger data. The first named trigger registered with a given name
800 * owns the common trigger data that the others subsequently
801 * registered with the same name will reference. This function
802 * returns the common trigger data associated with that first
803 * registered instance.
804 *
805 * Return: the common trigger data for the given named trigger on
806 * success, NULL otherwise.
807 */
808struct event_trigger_data *find_named_trigger(const char *name)
809{
810 struct event_trigger_data *data;
811
812 if (!name)
813 return NULL;
814
815 list_for_each_entry(data, &named_triggers, named_list) {
816 if (data->named_data)
817 continue;
818 if (strcmp(data->name, name) == 0)
819 return data;
820 }
821
822 return NULL;
823}
824
825/**
826 * is_named_trigger - determine if a given trigger is a named trigger
827 * @test: The trigger data to test
828 *
829 * Return: true if 'test' is a named trigger, false otherwise.
830 */
831bool is_named_trigger(struct event_trigger_data *test)
832{
833 struct event_trigger_data *data;
834
835 list_for_each_entry(data, &named_triggers, named_list) {
836 if (test == data)
837 return true;
838 }
839
840 return false;
841}
842
843/**
844 * save_named_trigger - save the trigger in the named trigger list
845 * @name: The name of the named trigger set
846 * @data: The trigger data to save
847 *
848 * Return: 0 if successful, negative error otherwise.
849 */
850int save_named_trigger(const char *name, struct event_trigger_data *data)
851{
852 data->name = kstrdup(name, GFP_KERNEL);
853 if (!data->name)
854 return -ENOMEM;
855
856 list_add(&data->named_list, &named_triggers);
857
858 return 0;
859}
860
861/**
862 * del_named_trigger - delete a trigger from the named trigger list
863 * @data: The trigger data to delete
864 */
865void del_named_trigger(struct event_trigger_data *data)
866{
867 kfree(data->name);
868 data->name = NULL;
869
870 list_del(&data->named_list);
871}
872
873static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
874{
875 struct event_trigger_data *test;
876
877 list_for_each_entry(test, &named_triggers, named_list) {
878 if (strcmp(test->name, data->name) == 0) {
879 if (pause) {
880 test->paused_tmp = test->paused;
881 test->paused = true;
882 } else {
883 test->paused = test->paused_tmp;
884 }
885 }
886 }
887}
888
889/**
890 * pause_named_trigger - Pause all named triggers with the same name
891 * @data: The trigger data of a named trigger to pause
892 *
893 * Pauses a named trigger along with all other triggers having the
894 * same name. Because named triggers share a common set of data,
895 * pausing only one is meaningless, so pausing one named trigger needs
896 * to pause all triggers with the same name.
897 */
898void pause_named_trigger(struct event_trigger_data *data)
899{
900 __pause_named_trigger(data, true);
901}
902
903/**
904 * unpause_named_trigger - Un-pause all named triggers with the same name
905 * @data: The trigger data of a named trigger to unpause
906 *
907 * Un-pauses a named trigger along with all other triggers having the
908 * same name. Because named triggers share a common set of data,
909 * unpausing only one is meaningless, so unpausing one named trigger
910 * needs to unpause all triggers with the same name.
911 */
912void unpause_named_trigger(struct event_trigger_data *data)
913{
914 __pause_named_trigger(data, false);
915}
916
917/**
918 * set_named_trigger_data - Associate common named trigger data
919 * @data: The trigger data to associate
920 * @named_data: The common named trigger to be associated
921 *
922 * Named triggers are sets of triggers that share a common set of
923 * trigger data. The first named trigger registered with a given name
924 * owns the common trigger data that the others subsequently
925 * registered with the same name will reference. This function
926 * associates the common trigger data from the first trigger with the
927 * given trigger.
928 */
929void set_named_trigger_data(struct event_trigger_data *data,
930 struct event_trigger_data *named_data)
931{
932 data->named_data = named_data;
933}
934
935struct event_trigger_data *
936get_named_trigger_data(struct event_trigger_data *data)
937{
938 return data->named_data;
939}
940
941static void
942traceon_trigger(struct event_trigger_data *data,
943 struct trace_buffer *buffer, void *rec,
944 struct ring_buffer_event *event)
945{
946 if (tracing_is_on())
947 return;
948
949 tracing_on();
950}
951
952static void
953traceon_count_trigger(struct event_trigger_data *data,
954 struct trace_buffer *buffer, void *rec,
955 struct ring_buffer_event *event)
956{
957 if (tracing_is_on())
958 return;
959
960 if (!data->count)
961 return;
962
963 if (data->count != -1)
964 (data->count)--;
965
966 tracing_on();
967}
968
969static void
970traceoff_trigger(struct event_trigger_data *data,
971 struct trace_buffer *buffer, void *rec,
972 struct ring_buffer_event *event)
973{
974 if (!tracing_is_on())
975 return;
976
977 tracing_off();
978}
979
980static void
981traceoff_count_trigger(struct event_trigger_data *data,
982 struct trace_buffer *buffer, void *rec,
983 struct ring_buffer_event *event)
984{
985 if (!tracing_is_on())
986 return;
987
988 if (!data->count)
989 return;
990
991 if (data->count != -1)
992 (data->count)--;
993
994 tracing_off();
995}
996
997static int
998traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
999 struct event_trigger_data *data)
1000{
1001 return event_trigger_print("traceon", m, (void *)data->count,
1002 data->filter_str);
1003}
1004
1005static int
1006traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1007 struct event_trigger_data *data)
1008{
1009 return event_trigger_print("traceoff", m, (void *)data->count,
1010 data->filter_str);
1011}
1012
1013static struct event_trigger_ops traceon_trigger_ops = {
1014 .func = traceon_trigger,
1015 .print = traceon_trigger_print,
1016 .init = event_trigger_init,
1017 .free = event_trigger_free,
1018};
1019
1020static struct event_trigger_ops traceon_count_trigger_ops = {
1021 .func = traceon_count_trigger,
1022 .print = traceon_trigger_print,
1023 .init = event_trigger_init,
1024 .free = event_trigger_free,
1025};
1026
1027static struct event_trigger_ops traceoff_trigger_ops = {
1028 .func = traceoff_trigger,
1029 .print = traceoff_trigger_print,
1030 .init = event_trigger_init,
1031 .free = event_trigger_free,
1032};
1033
1034static struct event_trigger_ops traceoff_count_trigger_ops = {
1035 .func = traceoff_count_trigger,
1036 .print = traceoff_trigger_print,
1037 .init = event_trigger_init,
1038 .free = event_trigger_free,
1039};
1040
1041static struct event_trigger_ops *
1042onoff_get_trigger_ops(char *cmd, char *param)
1043{
1044 struct event_trigger_ops *ops;
1045
1046 /* we register both traceon and traceoff to this callback */
1047 if (strcmp(cmd, "traceon") == 0)
1048 ops = param ? &traceon_count_trigger_ops :
1049 &traceon_trigger_ops;
1050 else
1051 ops = param ? &traceoff_count_trigger_ops :
1052 &traceoff_trigger_ops;
1053
1054 return ops;
1055}
1056
1057static struct event_command trigger_traceon_cmd = {
1058 .name = "traceon",
1059 .trigger_type = ETT_TRACE_ONOFF,
1060 .func = event_trigger_callback,
1061 .reg = register_trigger,
1062 .unreg = unregister_trigger,
1063 .get_trigger_ops = onoff_get_trigger_ops,
1064 .set_filter = set_trigger_filter,
1065};
1066
1067static struct event_command trigger_traceoff_cmd = {
1068 .name = "traceoff",
1069 .trigger_type = ETT_TRACE_ONOFF,
1070 .flags = EVENT_CMD_FL_POST_TRIGGER,
1071 .func = event_trigger_callback,
1072 .reg = register_trigger,
1073 .unreg = unregister_trigger,
1074 .get_trigger_ops = onoff_get_trigger_ops,
1075 .set_filter = set_trigger_filter,
1076};
1077
1078#ifdef CONFIG_TRACER_SNAPSHOT
1079static void
1080snapshot_trigger(struct event_trigger_data *data,
1081 struct trace_buffer *buffer, void *rec,
1082 struct ring_buffer_event *event)
1083{
1084 struct trace_event_file *file = data->private_data;
1085
1086 if (file)
1087 tracing_snapshot_instance(file->tr);
1088 else
1089 tracing_snapshot();
1090}
1091
1092static void
1093snapshot_count_trigger(struct event_trigger_data *data,
1094 struct trace_buffer *buffer, void *rec,
1095 struct ring_buffer_event *event)
1096{
1097 if (!data->count)
1098 return;
1099
1100 if (data->count != -1)
1101 (data->count)--;
1102
1103 snapshot_trigger(data, buffer, rec, event);
1104}
1105
1106static int
1107register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1108 struct event_trigger_data *data,
1109 struct trace_event_file *file)
1110{
1111 if (tracing_alloc_snapshot_instance(file->tr) != 0)
1112 return 0;
1113
1114 return register_trigger(glob, ops, data, file);
1115}
1116
1117static int
1118snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1119 struct event_trigger_data *data)
1120{
1121 return event_trigger_print("snapshot", m, (void *)data->count,
1122 data->filter_str);
1123}
1124
1125static struct event_trigger_ops snapshot_trigger_ops = {
1126 .func = snapshot_trigger,
1127 .print = snapshot_trigger_print,
1128 .init = event_trigger_init,
1129 .free = event_trigger_free,
1130};
1131
1132static struct event_trigger_ops snapshot_count_trigger_ops = {
1133 .func = snapshot_count_trigger,
1134 .print = snapshot_trigger_print,
1135 .init = event_trigger_init,
1136 .free = event_trigger_free,
1137};
1138
1139static struct event_trigger_ops *
1140snapshot_get_trigger_ops(char *cmd, char *param)
1141{
1142 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1143}
1144
1145static struct event_command trigger_snapshot_cmd = {
1146 .name = "snapshot",
1147 .trigger_type = ETT_SNAPSHOT,
1148 .func = event_trigger_callback,
1149 .reg = register_snapshot_trigger,
1150 .unreg = unregister_trigger,
1151 .get_trigger_ops = snapshot_get_trigger_ops,
1152 .set_filter = set_trigger_filter,
1153};
1154
1155static __init int register_trigger_snapshot_cmd(void)
1156{
1157 int ret;
1158
1159 ret = register_event_command(&trigger_snapshot_cmd);
1160 WARN_ON(ret < 0);
1161
1162 return ret;
1163}
1164#else
1165static __init int register_trigger_snapshot_cmd(void) { return 0; }
1166#endif /* CONFIG_TRACER_SNAPSHOT */
1167
1168#ifdef CONFIG_STACKTRACE
1169#ifdef CONFIG_UNWINDER_ORC
1170/* Skip 2:
1171 * event_triggers_post_call()
1172 * trace_event_raw_event_xxx()
1173 */
1174# define STACK_SKIP 2
1175#else
1176/*
1177 * Skip 4:
1178 * stacktrace_trigger()
1179 * event_triggers_post_call()
1180 * trace_event_buffer_commit()
1181 * trace_event_raw_event_xxx()
1182 */
1183#define STACK_SKIP 4
1184#endif
1185
1186static void
1187stacktrace_trigger(struct event_trigger_data *data,
1188 struct trace_buffer *buffer, void *rec,
1189 struct ring_buffer_event *event)
1190{
1191 trace_dump_stack(STACK_SKIP);
1192}
1193
1194static void
1195stacktrace_count_trigger(struct event_trigger_data *data,
1196 struct trace_buffer *buffer, void *rec,
1197 struct ring_buffer_event *event)
1198{
1199 if (!data->count)
1200 return;
1201
1202 if (data->count != -1)
1203 (data->count)--;
1204
1205 stacktrace_trigger(data, buffer, rec, event);
1206}
1207
1208static int
1209stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1210 struct event_trigger_data *data)
1211{
1212 return event_trigger_print("stacktrace", m, (void *)data->count,
1213 data->filter_str);
1214}
1215
1216static struct event_trigger_ops stacktrace_trigger_ops = {
1217 .func = stacktrace_trigger,
1218 .print = stacktrace_trigger_print,
1219 .init = event_trigger_init,
1220 .free = event_trigger_free,
1221};
1222
1223static struct event_trigger_ops stacktrace_count_trigger_ops = {
1224 .func = stacktrace_count_trigger,
1225 .print = stacktrace_trigger_print,
1226 .init = event_trigger_init,
1227 .free = event_trigger_free,
1228};
1229
1230static struct event_trigger_ops *
1231stacktrace_get_trigger_ops(char *cmd, char *param)
1232{
1233 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1234}
1235
1236static struct event_command trigger_stacktrace_cmd = {
1237 .name = "stacktrace",
1238 .trigger_type = ETT_STACKTRACE,
1239 .flags = EVENT_CMD_FL_POST_TRIGGER,
1240 .func = event_trigger_callback,
1241 .reg = register_trigger,
1242 .unreg = unregister_trigger,
1243 .get_trigger_ops = stacktrace_get_trigger_ops,
1244 .set_filter = set_trigger_filter,
1245};
1246
1247static __init int register_trigger_stacktrace_cmd(void)
1248{
1249 int ret;
1250
1251 ret = register_event_command(&trigger_stacktrace_cmd);
1252 WARN_ON(ret < 0);
1253
1254 return ret;
1255}
1256#else
1257static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1258#endif /* CONFIG_STACKTRACE */
1259
1260static __init void unregister_trigger_traceon_traceoff_cmds(void)
1261{
1262 unregister_event_command(&trigger_traceon_cmd);
1263 unregister_event_command(&trigger_traceoff_cmd);
1264}
1265
1266static void
1267event_enable_trigger(struct event_trigger_data *data,
1268 struct trace_buffer *buffer, void *rec,
1269 struct ring_buffer_event *event)
1270{
1271 struct enable_trigger_data *enable_data = data->private_data;
1272
1273 if (enable_data->enable)
1274 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1275 else
1276 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1277}
1278
1279static void
1280event_enable_count_trigger(struct event_trigger_data *data,
1281 struct trace_buffer *buffer, void *rec,
1282 struct ring_buffer_event *event)
1283{
1284 struct enable_trigger_data *enable_data = data->private_data;
1285
1286 if (!data->count)
1287 return;
1288
1289 /* Skip if the event is in a state we want to switch to */
1290 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1291 return;
1292
1293 if (data->count != -1)
1294 (data->count)--;
1295
1296 event_enable_trigger(data, buffer, rec, event);
1297}
1298
1299int event_enable_trigger_print(struct seq_file *m,
1300 struct event_trigger_ops *ops,
1301 struct event_trigger_data *data)
1302{
1303 struct enable_trigger_data *enable_data = data->private_data;
1304
1305 seq_printf(m, "%s:%s:%s",
1306 enable_data->hist ?
1307 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1308 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1309 enable_data->file->event_call->class->system,
1310 trace_event_name(enable_data->file->event_call));
1311
1312 if (data->count == -1)
1313 seq_puts(m, ":unlimited");
1314 else
1315 seq_printf(m, ":count=%ld", data->count);
1316
1317 if (data->filter_str)
1318 seq_printf(m, " if %s\n", data->filter_str);
1319 else
1320 seq_putc(m, '\n');
1321
1322 return 0;
1323}
1324
1325void event_enable_trigger_free(struct event_trigger_ops *ops,
1326 struct event_trigger_data *data)
1327{
1328 struct enable_trigger_data *enable_data = data->private_data;
1329
1330 if (WARN_ON_ONCE(data->ref <= 0))
1331 return;
1332
1333 data->ref--;
1334 if (!data->ref) {
1335 /* Remove the SOFT_MODE flag */
1336 trace_event_enable_disable(enable_data->file, 0, 1);
1337 module_put(enable_data->file->event_call->mod);
1338 trigger_data_free(data);
1339 kfree(enable_data);
1340 }
1341}
1342
1343static struct event_trigger_ops event_enable_trigger_ops = {
1344 .func = event_enable_trigger,
1345 .print = event_enable_trigger_print,
1346 .init = event_trigger_init,
1347 .free = event_enable_trigger_free,
1348};
1349
1350static struct event_trigger_ops event_enable_count_trigger_ops = {
1351 .func = event_enable_count_trigger,
1352 .print = event_enable_trigger_print,
1353 .init = event_trigger_init,
1354 .free = event_enable_trigger_free,
1355};
1356
1357static struct event_trigger_ops event_disable_trigger_ops = {
1358 .func = event_enable_trigger,
1359 .print = event_enable_trigger_print,
1360 .init = event_trigger_init,
1361 .free = event_enable_trigger_free,
1362};
1363
1364static struct event_trigger_ops event_disable_count_trigger_ops = {
1365 .func = event_enable_count_trigger,
1366 .print = event_enable_trigger_print,
1367 .init = event_trigger_init,
1368 .free = event_enable_trigger_free,
1369};
1370
1371int event_enable_trigger_func(struct event_command *cmd_ops,
1372 struct trace_event_file *file,
1373 char *glob, char *cmd, char *param)
1374{
1375 struct trace_event_file *event_enable_file;
1376 struct enable_trigger_data *enable_data;
1377 struct event_trigger_data *trigger_data;
1378 struct event_trigger_ops *trigger_ops;
1379 struct trace_array *tr = file->tr;
1380 const char *system;
1381 const char *event;
1382 bool hist = false;
1383 char *trigger;
1384 char *number;
1385 bool enable;
1386 int ret;
1387
1388 if (!param)
1389 return -EINVAL;
1390
1391 /* separate the trigger from the filter (s:e:n [if filter]) */
1392 trigger = strsep(¶m, " \t");
1393 if (!trigger)
1394 return -EINVAL;
1395 if (param) {
1396 param = skip_spaces(param);
1397 if (!*param)
1398 param = NULL;
1399 }
1400
1401 system = strsep(&trigger, ":");
1402 if (!trigger)
1403 return -EINVAL;
1404
1405 event = strsep(&trigger, ":");
1406
1407 ret = -EINVAL;
1408 event_enable_file = find_event_file(tr, system, event);
1409 if (!event_enable_file)
1410 goto out;
1411
1412#ifdef CONFIG_HIST_TRIGGERS
1413 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1414 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1415
1416 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1417 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1418#else
1419 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1420#endif
1421 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1422
1423 ret = -ENOMEM;
1424 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1425 if (!trigger_data)
1426 goto out;
1427
1428 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1429 if (!enable_data) {
1430 kfree(trigger_data);
1431 goto out;
1432 }
1433
1434 trigger_data->count = -1;
1435 trigger_data->ops = trigger_ops;
1436 trigger_data->cmd_ops = cmd_ops;
1437 INIT_LIST_HEAD(&trigger_data->list);
1438 RCU_INIT_POINTER(trigger_data->filter, NULL);
1439
1440 enable_data->hist = hist;
1441 enable_data->enable = enable;
1442 enable_data->file = event_enable_file;
1443 trigger_data->private_data = enable_data;
1444
1445 if (glob[0] == '!') {
1446 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1447 kfree(trigger_data);
1448 kfree(enable_data);
1449 ret = 0;
1450 goto out;
1451 }
1452
1453 /* Up the trigger_data count to make sure nothing frees it on failure */
1454 event_trigger_init(trigger_ops, trigger_data);
1455
1456 if (trigger) {
1457 number = strsep(&trigger, ":");
1458
1459 ret = -EINVAL;
1460 if (!strlen(number))
1461 goto out_free;
1462
1463 /*
1464 * We use the callback data field (which is a pointer)
1465 * as our counter.
1466 */
1467 ret = kstrtoul(number, 0, &trigger_data->count);
1468 if (ret)
1469 goto out_free;
1470 }
1471
1472 if (!param) /* if param is non-empty, it's supposed to be a filter */
1473 goto out_reg;
1474
1475 if (!cmd_ops->set_filter)
1476 goto out_reg;
1477
1478 ret = cmd_ops->set_filter(param, trigger_data, file);
1479 if (ret < 0)
1480 goto out_free;
1481
1482 out_reg:
1483 /* Don't let event modules unload while probe registered */
1484 ret = try_module_get(event_enable_file->event_call->mod);
1485 if (!ret) {
1486 ret = -EBUSY;
1487 goto out_free;
1488 }
1489
1490 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1491 if (ret < 0)
1492 goto out_put;
1493 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1494 /*
1495 * The above returns on success the # of functions enabled,
1496 * but if it didn't find any functions it returns zero.
1497 * Consider no functions a failure too.
1498 */
1499 if (!ret) {
1500 ret = -ENOENT;
1501 goto out_disable;
1502 } else if (ret < 0)
1503 goto out_disable;
1504 /* Just return zero, not the number of enabled functions */
1505 ret = 0;
1506 event_trigger_free(trigger_ops, trigger_data);
1507 out:
1508 return ret;
1509
1510 out_disable:
1511 trace_event_enable_disable(event_enable_file, 0, 1);
1512 out_put:
1513 module_put(event_enable_file->event_call->mod);
1514 out_free:
1515 if (cmd_ops->set_filter)
1516 cmd_ops->set_filter(NULL, trigger_data, NULL);
1517 event_trigger_free(trigger_ops, trigger_data);
1518 kfree(enable_data);
1519 goto out;
1520}
1521
1522int event_enable_register_trigger(char *glob,
1523 struct event_trigger_ops *ops,
1524 struct event_trigger_data *data,
1525 struct trace_event_file *file)
1526{
1527 struct enable_trigger_data *enable_data = data->private_data;
1528 struct enable_trigger_data *test_enable_data;
1529 struct event_trigger_data *test;
1530 int ret = 0;
1531
1532 lockdep_assert_held(&event_mutex);
1533
1534 list_for_each_entry(test, &file->triggers, list) {
1535 test_enable_data = test->private_data;
1536 if (test_enable_data &&
1537 (test->cmd_ops->trigger_type ==
1538 data->cmd_ops->trigger_type) &&
1539 (test_enable_data->file == enable_data->file)) {
1540 ret = -EEXIST;
1541 goto out;
1542 }
1543 }
1544
1545 if (data->ops->init) {
1546 ret = data->ops->init(data->ops, data);
1547 if (ret < 0)
1548 goto out;
1549 }
1550
1551 list_add_rcu(&data->list, &file->triggers);
1552 ret++;
1553
1554 update_cond_flag(file);
1555 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1556 list_del_rcu(&data->list);
1557 update_cond_flag(file);
1558 ret--;
1559 }
1560out:
1561 return ret;
1562}
1563
1564void event_enable_unregister_trigger(char *glob,
1565 struct event_trigger_ops *ops,
1566 struct event_trigger_data *test,
1567 struct trace_event_file *file)
1568{
1569 struct enable_trigger_data *test_enable_data = test->private_data;
1570 struct enable_trigger_data *enable_data;
1571 struct event_trigger_data *data;
1572 bool unregistered = false;
1573
1574 lockdep_assert_held(&event_mutex);
1575
1576 list_for_each_entry(data, &file->triggers, list) {
1577 enable_data = data->private_data;
1578 if (enable_data &&
1579 (data->cmd_ops->trigger_type ==
1580 test->cmd_ops->trigger_type) &&
1581 (enable_data->file == test_enable_data->file)) {
1582 unregistered = true;
1583 list_del_rcu(&data->list);
1584 trace_event_trigger_enable_disable(file, 0);
1585 update_cond_flag(file);
1586 break;
1587 }
1588 }
1589
1590 if (unregistered && data->ops->free)
1591 data->ops->free(data->ops, data);
1592}
1593
1594static struct event_trigger_ops *
1595event_enable_get_trigger_ops(char *cmd, char *param)
1596{
1597 struct event_trigger_ops *ops;
1598 bool enable;
1599
1600#ifdef CONFIG_HIST_TRIGGERS
1601 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1602 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1603#else
1604 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1605#endif
1606 if (enable)
1607 ops = param ? &event_enable_count_trigger_ops :
1608 &event_enable_trigger_ops;
1609 else
1610 ops = param ? &event_disable_count_trigger_ops :
1611 &event_disable_trigger_ops;
1612
1613 return ops;
1614}
1615
1616static struct event_command trigger_enable_cmd = {
1617 .name = ENABLE_EVENT_STR,
1618 .trigger_type = ETT_EVENT_ENABLE,
1619 .func = event_enable_trigger_func,
1620 .reg = event_enable_register_trigger,
1621 .unreg = event_enable_unregister_trigger,
1622 .get_trigger_ops = event_enable_get_trigger_ops,
1623 .set_filter = set_trigger_filter,
1624};
1625
1626static struct event_command trigger_disable_cmd = {
1627 .name = DISABLE_EVENT_STR,
1628 .trigger_type = ETT_EVENT_ENABLE,
1629 .func = event_enable_trigger_func,
1630 .reg = event_enable_register_trigger,
1631 .unreg = event_enable_unregister_trigger,
1632 .get_trigger_ops = event_enable_get_trigger_ops,
1633 .set_filter = set_trigger_filter,
1634};
1635
1636static __init void unregister_trigger_enable_disable_cmds(void)
1637{
1638 unregister_event_command(&trigger_enable_cmd);
1639 unregister_event_command(&trigger_disable_cmd);
1640}
1641
1642static __init int register_trigger_enable_disable_cmds(void)
1643{
1644 int ret;
1645
1646 ret = register_event_command(&trigger_enable_cmd);
1647 if (WARN_ON(ret < 0))
1648 return ret;
1649 ret = register_event_command(&trigger_disable_cmd);
1650 if (WARN_ON(ret < 0))
1651 unregister_trigger_enable_disable_cmds();
1652
1653 return ret;
1654}
1655
1656static __init int register_trigger_traceon_traceoff_cmds(void)
1657{
1658 int ret;
1659
1660 ret = register_event_command(&trigger_traceon_cmd);
1661 if (WARN_ON(ret < 0))
1662 return ret;
1663 ret = register_event_command(&trigger_traceoff_cmd);
1664 if (WARN_ON(ret < 0))
1665 unregister_trigger_traceon_traceoff_cmds();
1666
1667 return ret;
1668}
1669
1670__init int register_trigger_cmds(void)
1671{
1672 register_trigger_traceon_traceoff_cmds();
1673 register_trigger_snapshot_cmd();
1674 register_trigger_stacktrace_cmd();
1675 register_trigger_enable_disable_cmds();
1676 register_trigger_hist_enable_disable_cmds();
1677 register_trigger_hist_cmd();
1678
1679 return 0;
1680}