Loading...
1/*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25
26#include "trace.h"
27
28static LIST_HEAD(trigger_commands);
29static DEFINE_MUTEX(trigger_cmd_mutex);
30
31void trigger_data_free(struct event_trigger_data *data)
32{
33 if (data->cmd_ops->set_filter)
34 data->cmd_ops->set_filter(NULL, data, NULL);
35
36 synchronize_sched(); /* make sure current triggers exit before free */
37 kfree(data);
38}
39
40/**
41 * event_triggers_call - Call triggers associated with a trace event
42 * @file: The trace_event_file associated with the event
43 * @rec: The trace entry for the event, NULL for unconditional invocation
44 *
45 * For each trigger associated with an event, invoke the trigger
46 * function registered with the associated trigger command. If rec is
47 * non-NULL, it means that the trigger requires further processing and
48 * shouldn't be unconditionally invoked. If rec is non-NULL and the
49 * trigger has a filter associated with it, rec will checked against
50 * the filter and if the record matches the trigger will be invoked.
51 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
52 * in any case until the current event is written, the trigger
53 * function isn't invoked but the bit associated with the deferred
54 * trigger is set in the return value.
55 *
56 * Returns an enum event_trigger_type value containing a set bit for
57 * any trigger that should be deferred, ETT_NONE if nothing to defer.
58 *
59 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
60 *
61 * Return: an enum event_trigger_type value containing a set bit for
62 * any trigger that should be deferred, ETT_NONE if nothing to defer.
63 */
64enum event_trigger_type
65event_triggers_call(struct trace_event_file *file, void *rec)
66{
67 struct event_trigger_data *data;
68 enum event_trigger_type tt = ETT_NONE;
69 struct event_filter *filter;
70
71 if (list_empty(&file->triggers))
72 return tt;
73
74 list_for_each_entry_rcu(data, &file->triggers, list) {
75 if (data->paused)
76 continue;
77 if (!rec) {
78 data->ops->func(data, rec);
79 continue;
80 }
81 filter = rcu_dereference_sched(data->filter);
82 if (filter && !filter_match_preds(filter, rec))
83 continue;
84 if (event_command_post_trigger(data->cmd_ops)) {
85 tt |= data->cmd_ops->trigger_type;
86 continue;
87 }
88 data->ops->func(data, rec);
89 }
90 return tt;
91}
92EXPORT_SYMBOL_GPL(event_triggers_call);
93
94/**
95 * event_triggers_post_call - Call 'post_triggers' for a trace event
96 * @file: The trace_event_file associated with the event
97 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
98 * @rec: The trace entry for the event
99 *
100 * For each trigger associated with an event, invoke the trigger
101 * function registered with the associated trigger command, if the
102 * corresponding bit is set in the tt enum passed into this function.
103 * See @event_triggers_call for details on how those bits are set.
104 *
105 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
106 */
107void
108event_triggers_post_call(struct trace_event_file *file,
109 enum event_trigger_type tt,
110 void *rec)
111{
112 struct event_trigger_data *data;
113
114 list_for_each_entry_rcu(data, &file->triggers, list) {
115 if (data->paused)
116 continue;
117 if (data->cmd_ops->trigger_type & tt)
118 data->ops->func(data, rec);
119 }
120}
121EXPORT_SYMBOL_GPL(event_triggers_post_call);
122
123#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
124
125static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
126{
127 struct trace_event_file *event_file = event_file_data(m->private);
128
129 if (t == SHOW_AVAILABLE_TRIGGERS)
130 return NULL;
131
132 return seq_list_next(t, &event_file->triggers, pos);
133}
134
135static void *trigger_start(struct seq_file *m, loff_t *pos)
136{
137 struct trace_event_file *event_file;
138
139 /* ->stop() is called even if ->start() fails */
140 mutex_lock(&event_mutex);
141 event_file = event_file_data(m->private);
142 if (unlikely(!event_file))
143 return ERR_PTR(-ENODEV);
144
145 if (list_empty(&event_file->triggers))
146 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
147
148 return seq_list_start(&event_file->triggers, *pos);
149}
150
151static void trigger_stop(struct seq_file *m, void *t)
152{
153 mutex_unlock(&event_mutex);
154}
155
156static int trigger_show(struct seq_file *m, void *v)
157{
158 struct event_trigger_data *data;
159 struct event_command *p;
160
161 if (v == SHOW_AVAILABLE_TRIGGERS) {
162 seq_puts(m, "# Available triggers:\n");
163 seq_putc(m, '#');
164 mutex_lock(&trigger_cmd_mutex);
165 list_for_each_entry_reverse(p, &trigger_commands, list)
166 seq_printf(m, " %s", p->name);
167 seq_putc(m, '\n');
168 mutex_unlock(&trigger_cmd_mutex);
169 return 0;
170 }
171
172 data = list_entry(v, struct event_trigger_data, list);
173 data->ops->print(m, data->ops, data);
174
175 return 0;
176}
177
178static const struct seq_operations event_triggers_seq_ops = {
179 .start = trigger_start,
180 .next = trigger_next,
181 .stop = trigger_stop,
182 .show = trigger_show,
183};
184
185static int event_trigger_regex_open(struct inode *inode, struct file *file)
186{
187 int ret = 0;
188
189 mutex_lock(&event_mutex);
190
191 if (unlikely(!event_file_data(file))) {
192 mutex_unlock(&event_mutex);
193 return -ENODEV;
194 }
195
196 if ((file->f_mode & FMODE_WRITE) &&
197 (file->f_flags & O_TRUNC)) {
198 struct trace_event_file *event_file;
199 struct event_command *p;
200
201 event_file = event_file_data(file);
202
203 list_for_each_entry(p, &trigger_commands, list) {
204 if (p->unreg_all)
205 p->unreg_all(event_file);
206 }
207 }
208
209 if (file->f_mode & FMODE_READ) {
210 ret = seq_open(file, &event_triggers_seq_ops);
211 if (!ret) {
212 struct seq_file *m = file->private_data;
213 m->private = file;
214 }
215 }
216
217 mutex_unlock(&event_mutex);
218
219 return ret;
220}
221
222static int trigger_process_regex(struct trace_event_file *file, char *buff)
223{
224 char *command, *next = buff;
225 struct event_command *p;
226 int ret = -EINVAL;
227
228 command = strsep(&next, ": \t");
229 command = (command[0] != '!') ? command : command + 1;
230
231 mutex_lock(&trigger_cmd_mutex);
232 list_for_each_entry(p, &trigger_commands, list) {
233 if (strcmp(p->name, command) == 0) {
234 ret = p->func(p, file, buff, command, next);
235 goto out_unlock;
236 }
237 }
238 out_unlock:
239 mutex_unlock(&trigger_cmd_mutex);
240
241 return ret;
242}
243
244static ssize_t event_trigger_regex_write(struct file *file,
245 const char __user *ubuf,
246 size_t cnt, loff_t *ppos)
247{
248 struct trace_event_file *event_file;
249 ssize_t ret;
250 char *buf;
251
252 if (!cnt)
253 return 0;
254
255 if (cnt >= PAGE_SIZE)
256 return -EINVAL;
257
258 buf = memdup_user_nul(ubuf, cnt);
259 if (IS_ERR(buf))
260 return PTR_ERR(buf);
261
262 strim(buf);
263
264 mutex_lock(&event_mutex);
265 event_file = event_file_data(file);
266 if (unlikely(!event_file)) {
267 mutex_unlock(&event_mutex);
268 kfree(buf);
269 return -ENODEV;
270 }
271 ret = trigger_process_regex(event_file, buf);
272 mutex_unlock(&event_mutex);
273
274 kfree(buf);
275 if (ret < 0)
276 goto out;
277
278 *ppos += cnt;
279 ret = cnt;
280 out:
281 return ret;
282}
283
284static int event_trigger_regex_release(struct inode *inode, struct file *file)
285{
286 mutex_lock(&event_mutex);
287
288 if (file->f_mode & FMODE_READ)
289 seq_release(inode, file);
290
291 mutex_unlock(&event_mutex);
292
293 return 0;
294}
295
296static ssize_t
297event_trigger_write(struct file *filp, const char __user *ubuf,
298 size_t cnt, loff_t *ppos)
299{
300 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
301}
302
303static int
304event_trigger_open(struct inode *inode, struct file *filp)
305{
306 return event_trigger_regex_open(inode, filp);
307}
308
309static int
310event_trigger_release(struct inode *inode, struct file *file)
311{
312 return event_trigger_regex_release(inode, file);
313}
314
315const struct file_operations event_trigger_fops = {
316 .open = event_trigger_open,
317 .read = seq_read,
318 .write = event_trigger_write,
319 .llseek = tracing_lseek,
320 .release = event_trigger_release,
321};
322
323/*
324 * Currently we only register event commands from __init, so mark this
325 * __init too.
326 */
327__init int register_event_command(struct event_command *cmd)
328{
329 struct event_command *p;
330 int ret = 0;
331
332 mutex_lock(&trigger_cmd_mutex);
333 list_for_each_entry(p, &trigger_commands, list) {
334 if (strcmp(cmd->name, p->name) == 0) {
335 ret = -EBUSY;
336 goto out_unlock;
337 }
338 }
339 list_add(&cmd->list, &trigger_commands);
340 out_unlock:
341 mutex_unlock(&trigger_cmd_mutex);
342
343 return ret;
344}
345
346/*
347 * Currently we only unregister event commands from __init, so mark
348 * this __init too.
349 */
350static __init int unregister_event_command(struct event_command *cmd)
351{
352 struct event_command *p, *n;
353 int ret = -ENODEV;
354
355 mutex_lock(&trigger_cmd_mutex);
356 list_for_each_entry_safe(p, n, &trigger_commands, list) {
357 if (strcmp(cmd->name, p->name) == 0) {
358 ret = 0;
359 list_del_init(&p->list);
360 goto out_unlock;
361 }
362 }
363 out_unlock:
364 mutex_unlock(&trigger_cmd_mutex);
365
366 return ret;
367}
368
369/**
370 * event_trigger_print - Generic event_trigger_ops @print implementation
371 * @name: The name of the event trigger
372 * @m: The seq_file being printed to
373 * @data: Trigger-specific data
374 * @filter_str: filter_str to print, if present
375 *
376 * Common implementation for event triggers to print themselves.
377 *
378 * Usually wrapped by a function that simply sets the @name of the
379 * trigger command and then invokes this.
380 *
381 * Return: 0 on success, errno otherwise
382 */
383static int
384event_trigger_print(const char *name, struct seq_file *m,
385 void *data, char *filter_str)
386{
387 long count = (long)data;
388
389 seq_puts(m, name);
390
391 if (count == -1)
392 seq_puts(m, ":unlimited");
393 else
394 seq_printf(m, ":count=%ld", count);
395
396 if (filter_str)
397 seq_printf(m, " if %s\n", filter_str);
398 else
399 seq_putc(m, '\n');
400
401 return 0;
402}
403
404/**
405 * event_trigger_init - Generic event_trigger_ops @init implementation
406 * @ops: The trigger ops associated with the trigger
407 * @data: Trigger-specific data
408 *
409 * Common implementation of event trigger initialization.
410 *
411 * Usually used directly as the @init method in event trigger
412 * implementations.
413 *
414 * Return: 0 on success, errno otherwise
415 */
416int event_trigger_init(struct event_trigger_ops *ops,
417 struct event_trigger_data *data)
418{
419 data->ref++;
420 return 0;
421}
422
423/**
424 * event_trigger_free - Generic event_trigger_ops @free implementation
425 * @ops: The trigger ops associated with the trigger
426 * @data: Trigger-specific data
427 *
428 * Common implementation of event trigger de-initialization.
429 *
430 * Usually used directly as the @free method in event trigger
431 * implementations.
432 */
433static void
434event_trigger_free(struct event_trigger_ops *ops,
435 struct event_trigger_data *data)
436{
437 if (WARN_ON_ONCE(data->ref <= 0))
438 return;
439
440 data->ref--;
441 if (!data->ref)
442 trigger_data_free(data);
443}
444
445int trace_event_trigger_enable_disable(struct trace_event_file *file,
446 int trigger_enable)
447{
448 int ret = 0;
449
450 if (trigger_enable) {
451 if (atomic_inc_return(&file->tm_ref) > 1)
452 return ret;
453 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
454 ret = trace_event_enable_disable(file, 1, 1);
455 } else {
456 if (atomic_dec_return(&file->tm_ref) > 0)
457 return ret;
458 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
459 ret = trace_event_enable_disable(file, 0, 1);
460 }
461
462 return ret;
463}
464
465/**
466 * clear_event_triggers - Clear all triggers associated with a trace array
467 * @tr: The trace array to clear
468 *
469 * For each trigger, the triggering event has its tm_ref decremented
470 * via trace_event_trigger_enable_disable(), and any associated event
471 * (in the case of enable/disable_event triggers) will have its sm_ref
472 * decremented via free()->trace_event_enable_disable(). That
473 * combination effectively reverses the soft-mode/trigger state added
474 * by trigger registration.
475 *
476 * Must be called with event_mutex held.
477 */
478void
479clear_event_triggers(struct trace_array *tr)
480{
481 struct trace_event_file *file;
482
483 list_for_each_entry(file, &tr->events, list) {
484 struct event_trigger_data *data;
485 list_for_each_entry_rcu(data, &file->triggers, list) {
486 trace_event_trigger_enable_disable(file, 0);
487 if (data->ops->free)
488 data->ops->free(data->ops, data);
489 }
490 }
491}
492
493/**
494 * update_cond_flag - Set or reset the TRIGGER_COND bit
495 * @file: The trace_event_file associated with the event
496 *
497 * If an event has triggers and any of those triggers has a filter or
498 * a post_trigger, trigger invocation needs to be deferred until after
499 * the current event has logged its data, and the event should have
500 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
501 * cleared.
502 */
503void update_cond_flag(struct trace_event_file *file)
504{
505 struct event_trigger_data *data;
506 bool set_cond = false;
507
508 list_for_each_entry_rcu(data, &file->triggers, list) {
509 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
510 event_command_needs_rec(data->cmd_ops)) {
511 set_cond = true;
512 break;
513 }
514 }
515
516 if (set_cond)
517 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
518 else
519 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
520}
521
522/**
523 * register_trigger - Generic event_command @reg implementation
524 * @glob: The raw string used to register the trigger
525 * @ops: The trigger ops associated with the trigger
526 * @data: Trigger-specific data to associate with the trigger
527 * @file: The trace_event_file associated with the event
528 *
529 * Common implementation for event trigger registration.
530 *
531 * Usually used directly as the @reg method in event command
532 * implementations.
533 *
534 * Return: 0 on success, errno otherwise
535 */
536static int register_trigger(char *glob, struct event_trigger_ops *ops,
537 struct event_trigger_data *data,
538 struct trace_event_file *file)
539{
540 struct event_trigger_data *test;
541 int ret = 0;
542
543 list_for_each_entry_rcu(test, &file->triggers, list) {
544 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
545 ret = -EEXIST;
546 goto out;
547 }
548 }
549
550 if (data->ops->init) {
551 ret = data->ops->init(data->ops, data);
552 if (ret < 0)
553 goto out;
554 }
555
556 list_add_rcu(&data->list, &file->triggers);
557 ret++;
558
559 update_cond_flag(file);
560 if (trace_event_trigger_enable_disable(file, 1) < 0) {
561 list_del_rcu(&data->list);
562 update_cond_flag(file);
563 ret--;
564 }
565out:
566 return ret;
567}
568
569/**
570 * unregister_trigger - Generic event_command @unreg implementation
571 * @glob: The raw string used to register the trigger
572 * @ops: The trigger ops associated with the trigger
573 * @test: Trigger-specific data used to find the trigger to remove
574 * @file: The trace_event_file associated with the event
575 *
576 * Common implementation for event trigger unregistration.
577 *
578 * Usually used directly as the @unreg method in event command
579 * implementations.
580 */
581void unregister_trigger(char *glob, struct event_trigger_ops *ops,
582 struct event_trigger_data *test,
583 struct trace_event_file *file)
584{
585 struct event_trigger_data *data;
586 bool unregistered = false;
587
588 list_for_each_entry_rcu(data, &file->triggers, list) {
589 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
590 unregistered = true;
591 list_del_rcu(&data->list);
592 trace_event_trigger_enable_disable(file, 0);
593 update_cond_flag(file);
594 break;
595 }
596 }
597
598 if (unregistered && data->ops->free)
599 data->ops->free(data->ops, data);
600}
601
602/**
603 * event_trigger_callback - Generic event_command @func implementation
604 * @cmd_ops: The command ops, used for trigger registration
605 * @file: The trace_event_file associated with the event
606 * @glob: The raw string used to register the trigger
607 * @cmd: The cmd portion of the string used to register the trigger
608 * @param: The params portion of the string used to register the trigger
609 *
610 * Common implementation for event command parsing and trigger
611 * instantiation.
612 *
613 * Usually used directly as the @func method in event command
614 * implementations.
615 *
616 * Return: 0 on success, errno otherwise
617 */
618static int
619event_trigger_callback(struct event_command *cmd_ops,
620 struct trace_event_file *file,
621 char *glob, char *cmd, char *param)
622{
623 struct event_trigger_data *trigger_data;
624 struct event_trigger_ops *trigger_ops;
625 char *trigger = NULL;
626 char *number;
627 int ret;
628
629 /* separate the trigger from the filter (t:n [if filter]) */
630 if (param && isdigit(param[0]))
631 trigger = strsep(¶m, " \t");
632
633 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
634
635 ret = -ENOMEM;
636 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
637 if (!trigger_data)
638 goto out;
639
640 trigger_data->count = -1;
641 trigger_data->ops = trigger_ops;
642 trigger_data->cmd_ops = cmd_ops;
643 INIT_LIST_HEAD(&trigger_data->list);
644
645 if (glob[0] == '!') {
646 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
647 kfree(trigger_data);
648 ret = 0;
649 goto out;
650 }
651
652 if (trigger) {
653 number = strsep(&trigger, ":");
654
655 ret = -EINVAL;
656 if (!strlen(number))
657 goto out_free;
658
659 /*
660 * We use the callback data field (which is a pointer)
661 * as our counter.
662 */
663 ret = kstrtoul(number, 0, &trigger_data->count);
664 if (ret)
665 goto out_free;
666 }
667
668 if (!param) /* if param is non-empty, it's supposed to be a filter */
669 goto out_reg;
670
671 if (!cmd_ops->set_filter)
672 goto out_reg;
673
674 ret = cmd_ops->set_filter(param, trigger_data, file);
675 if (ret < 0)
676 goto out_free;
677
678 out_reg:
679 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
680 /*
681 * The above returns on success the # of functions enabled,
682 * but if it didn't find any functions it returns zero.
683 * Consider no functions a failure too.
684 */
685 if (!ret) {
686 ret = -ENOENT;
687 goto out_free;
688 } else if (ret < 0)
689 goto out_free;
690 ret = 0;
691 out:
692 return ret;
693
694 out_free:
695 if (cmd_ops->set_filter)
696 cmd_ops->set_filter(NULL, trigger_data, NULL);
697 kfree(trigger_data);
698 goto out;
699}
700
701/**
702 * set_trigger_filter - Generic event_command @set_filter implementation
703 * @filter_str: The filter string for the trigger, NULL to remove filter
704 * @trigger_data: Trigger-specific data
705 * @file: The trace_event_file associated with the event
706 *
707 * Common implementation for event command filter parsing and filter
708 * instantiation.
709 *
710 * Usually used directly as the @set_filter method in event command
711 * implementations.
712 *
713 * Also used to remove a filter (if filter_str = NULL).
714 *
715 * Return: 0 on success, errno otherwise
716 */
717int set_trigger_filter(char *filter_str,
718 struct event_trigger_data *trigger_data,
719 struct trace_event_file *file)
720{
721 struct event_trigger_data *data = trigger_data;
722 struct event_filter *filter = NULL, *tmp;
723 int ret = -EINVAL;
724 char *s;
725
726 if (!filter_str) /* clear the current filter */
727 goto assign;
728
729 s = strsep(&filter_str, " \t");
730
731 if (!strlen(s) || strcmp(s, "if") != 0)
732 goto out;
733
734 if (!filter_str)
735 goto out;
736
737 /* The filter is for the 'trigger' event, not the triggered event */
738 ret = create_event_filter(file->event_call, filter_str, false, &filter);
739 if (ret)
740 goto out;
741 assign:
742 tmp = rcu_access_pointer(data->filter);
743
744 rcu_assign_pointer(data->filter, filter);
745
746 if (tmp) {
747 /* Make sure the call is done with the filter */
748 synchronize_sched();
749 free_event_filter(tmp);
750 }
751
752 kfree(data->filter_str);
753 data->filter_str = NULL;
754
755 if (filter_str) {
756 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
757 if (!data->filter_str) {
758 free_event_filter(rcu_access_pointer(data->filter));
759 data->filter = NULL;
760 ret = -ENOMEM;
761 }
762 }
763 out:
764 return ret;
765}
766
767static void
768traceon_trigger(struct event_trigger_data *data, void *rec)
769{
770 if (tracing_is_on())
771 return;
772
773 tracing_on();
774}
775
776static void
777traceon_count_trigger(struct event_trigger_data *data, void *rec)
778{
779 if (tracing_is_on())
780 return;
781
782 if (!data->count)
783 return;
784
785 if (data->count != -1)
786 (data->count)--;
787
788 tracing_on();
789}
790
791static void
792traceoff_trigger(struct event_trigger_data *data, void *rec)
793{
794 if (!tracing_is_on())
795 return;
796
797 tracing_off();
798}
799
800static void
801traceoff_count_trigger(struct event_trigger_data *data, void *rec)
802{
803 if (!tracing_is_on())
804 return;
805
806 if (!data->count)
807 return;
808
809 if (data->count != -1)
810 (data->count)--;
811
812 tracing_off();
813}
814
815static int
816traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
817 struct event_trigger_data *data)
818{
819 return event_trigger_print("traceon", m, (void *)data->count,
820 data->filter_str);
821}
822
823static int
824traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
825 struct event_trigger_data *data)
826{
827 return event_trigger_print("traceoff", m, (void *)data->count,
828 data->filter_str);
829}
830
831static struct event_trigger_ops traceon_trigger_ops = {
832 .func = traceon_trigger,
833 .print = traceon_trigger_print,
834 .init = event_trigger_init,
835 .free = event_trigger_free,
836};
837
838static struct event_trigger_ops traceon_count_trigger_ops = {
839 .func = traceon_count_trigger,
840 .print = traceon_trigger_print,
841 .init = event_trigger_init,
842 .free = event_trigger_free,
843};
844
845static struct event_trigger_ops traceoff_trigger_ops = {
846 .func = traceoff_trigger,
847 .print = traceoff_trigger_print,
848 .init = event_trigger_init,
849 .free = event_trigger_free,
850};
851
852static struct event_trigger_ops traceoff_count_trigger_ops = {
853 .func = traceoff_count_trigger,
854 .print = traceoff_trigger_print,
855 .init = event_trigger_init,
856 .free = event_trigger_free,
857};
858
859static struct event_trigger_ops *
860onoff_get_trigger_ops(char *cmd, char *param)
861{
862 struct event_trigger_ops *ops;
863
864 /* we register both traceon and traceoff to this callback */
865 if (strcmp(cmd, "traceon") == 0)
866 ops = param ? &traceon_count_trigger_ops :
867 &traceon_trigger_ops;
868 else
869 ops = param ? &traceoff_count_trigger_ops :
870 &traceoff_trigger_ops;
871
872 return ops;
873}
874
875static struct event_command trigger_traceon_cmd = {
876 .name = "traceon",
877 .trigger_type = ETT_TRACE_ONOFF,
878 .func = event_trigger_callback,
879 .reg = register_trigger,
880 .unreg = unregister_trigger,
881 .get_trigger_ops = onoff_get_trigger_ops,
882 .set_filter = set_trigger_filter,
883};
884
885static struct event_command trigger_traceoff_cmd = {
886 .name = "traceoff",
887 .trigger_type = ETT_TRACE_ONOFF,
888 .func = event_trigger_callback,
889 .reg = register_trigger,
890 .unreg = unregister_trigger,
891 .get_trigger_ops = onoff_get_trigger_ops,
892 .set_filter = set_trigger_filter,
893};
894
895#ifdef CONFIG_TRACER_SNAPSHOT
896static void
897snapshot_trigger(struct event_trigger_data *data, void *rec)
898{
899 tracing_snapshot();
900}
901
902static void
903snapshot_count_trigger(struct event_trigger_data *data, void *rec)
904{
905 if (!data->count)
906 return;
907
908 if (data->count != -1)
909 (data->count)--;
910
911 snapshot_trigger(data, rec);
912}
913
914static int
915register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
916 struct event_trigger_data *data,
917 struct trace_event_file *file)
918{
919 int ret = register_trigger(glob, ops, data, file);
920
921 if (ret > 0 && tracing_alloc_snapshot() != 0) {
922 unregister_trigger(glob, ops, data, file);
923 ret = 0;
924 }
925
926 return ret;
927}
928
929static int
930snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
931 struct event_trigger_data *data)
932{
933 return event_trigger_print("snapshot", m, (void *)data->count,
934 data->filter_str);
935}
936
937static struct event_trigger_ops snapshot_trigger_ops = {
938 .func = snapshot_trigger,
939 .print = snapshot_trigger_print,
940 .init = event_trigger_init,
941 .free = event_trigger_free,
942};
943
944static struct event_trigger_ops snapshot_count_trigger_ops = {
945 .func = snapshot_count_trigger,
946 .print = snapshot_trigger_print,
947 .init = event_trigger_init,
948 .free = event_trigger_free,
949};
950
951static struct event_trigger_ops *
952snapshot_get_trigger_ops(char *cmd, char *param)
953{
954 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
955}
956
957static struct event_command trigger_snapshot_cmd = {
958 .name = "snapshot",
959 .trigger_type = ETT_SNAPSHOT,
960 .func = event_trigger_callback,
961 .reg = register_snapshot_trigger,
962 .unreg = unregister_trigger,
963 .get_trigger_ops = snapshot_get_trigger_ops,
964 .set_filter = set_trigger_filter,
965};
966
967static __init int register_trigger_snapshot_cmd(void)
968{
969 int ret;
970
971 ret = register_event_command(&trigger_snapshot_cmd);
972 WARN_ON(ret < 0);
973
974 return ret;
975}
976#else
977static __init int register_trigger_snapshot_cmd(void) { return 0; }
978#endif /* CONFIG_TRACER_SNAPSHOT */
979
980#ifdef CONFIG_STACKTRACE
981/*
982 * Skip 3:
983 * stacktrace_trigger()
984 * event_triggers_post_call()
985 * trace_event_raw_event_xxx()
986 */
987#define STACK_SKIP 3
988
989static void
990stacktrace_trigger(struct event_trigger_data *data, void *rec)
991{
992 trace_dump_stack(STACK_SKIP);
993}
994
995static void
996stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
997{
998 if (!data->count)
999 return;
1000
1001 if (data->count != -1)
1002 (data->count)--;
1003
1004 stacktrace_trigger(data, rec);
1005}
1006
1007static int
1008stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1009 struct event_trigger_data *data)
1010{
1011 return event_trigger_print("stacktrace", m, (void *)data->count,
1012 data->filter_str);
1013}
1014
1015static struct event_trigger_ops stacktrace_trigger_ops = {
1016 .func = stacktrace_trigger,
1017 .print = stacktrace_trigger_print,
1018 .init = event_trigger_init,
1019 .free = event_trigger_free,
1020};
1021
1022static struct event_trigger_ops stacktrace_count_trigger_ops = {
1023 .func = stacktrace_count_trigger,
1024 .print = stacktrace_trigger_print,
1025 .init = event_trigger_init,
1026 .free = event_trigger_free,
1027};
1028
1029static struct event_trigger_ops *
1030stacktrace_get_trigger_ops(char *cmd, char *param)
1031{
1032 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1033}
1034
1035static struct event_command trigger_stacktrace_cmd = {
1036 .name = "stacktrace",
1037 .trigger_type = ETT_STACKTRACE,
1038 .flags = EVENT_CMD_FL_POST_TRIGGER,
1039 .func = event_trigger_callback,
1040 .reg = register_trigger,
1041 .unreg = unregister_trigger,
1042 .get_trigger_ops = stacktrace_get_trigger_ops,
1043 .set_filter = set_trigger_filter,
1044};
1045
1046static __init int register_trigger_stacktrace_cmd(void)
1047{
1048 int ret;
1049
1050 ret = register_event_command(&trigger_stacktrace_cmd);
1051 WARN_ON(ret < 0);
1052
1053 return ret;
1054}
1055#else
1056static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1057#endif /* CONFIG_STACKTRACE */
1058
1059static __init void unregister_trigger_traceon_traceoff_cmds(void)
1060{
1061 unregister_event_command(&trigger_traceon_cmd);
1062 unregister_event_command(&trigger_traceoff_cmd);
1063}
1064
1065/* Avoid typos */
1066#define ENABLE_EVENT_STR "enable_event"
1067#define DISABLE_EVENT_STR "disable_event"
1068
1069struct enable_trigger_data {
1070 struct trace_event_file *file;
1071 bool enable;
1072};
1073
1074static void
1075event_enable_trigger(struct event_trigger_data *data, void *rec)
1076{
1077 struct enable_trigger_data *enable_data = data->private_data;
1078
1079 if (enable_data->enable)
1080 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1081 else
1082 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1083}
1084
1085static void
1086event_enable_count_trigger(struct event_trigger_data *data, void *rec)
1087{
1088 struct enable_trigger_data *enable_data = data->private_data;
1089
1090 if (!data->count)
1091 return;
1092
1093 /* Skip if the event is in a state we want to switch to */
1094 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1095 return;
1096
1097 if (data->count != -1)
1098 (data->count)--;
1099
1100 event_enable_trigger(data, rec);
1101}
1102
1103static int
1104event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1105 struct event_trigger_data *data)
1106{
1107 struct enable_trigger_data *enable_data = data->private_data;
1108
1109 seq_printf(m, "%s:%s:%s",
1110 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1111 enable_data->file->event_call->class->system,
1112 trace_event_name(enable_data->file->event_call));
1113
1114 if (data->count == -1)
1115 seq_puts(m, ":unlimited");
1116 else
1117 seq_printf(m, ":count=%ld", data->count);
1118
1119 if (data->filter_str)
1120 seq_printf(m, " if %s\n", data->filter_str);
1121 else
1122 seq_putc(m, '\n');
1123
1124 return 0;
1125}
1126
1127static void
1128event_enable_trigger_free(struct event_trigger_ops *ops,
1129 struct event_trigger_data *data)
1130{
1131 struct enable_trigger_data *enable_data = data->private_data;
1132
1133 if (WARN_ON_ONCE(data->ref <= 0))
1134 return;
1135
1136 data->ref--;
1137 if (!data->ref) {
1138 /* Remove the SOFT_MODE flag */
1139 trace_event_enable_disable(enable_data->file, 0, 1);
1140 module_put(enable_data->file->event_call->mod);
1141 trigger_data_free(data);
1142 kfree(enable_data);
1143 }
1144}
1145
1146static struct event_trigger_ops event_enable_trigger_ops = {
1147 .func = event_enable_trigger,
1148 .print = event_enable_trigger_print,
1149 .init = event_trigger_init,
1150 .free = event_enable_trigger_free,
1151};
1152
1153static struct event_trigger_ops event_enable_count_trigger_ops = {
1154 .func = event_enable_count_trigger,
1155 .print = event_enable_trigger_print,
1156 .init = event_trigger_init,
1157 .free = event_enable_trigger_free,
1158};
1159
1160static struct event_trigger_ops event_disable_trigger_ops = {
1161 .func = event_enable_trigger,
1162 .print = event_enable_trigger_print,
1163 .init = event_trigger_init,
1164 .free = event_enable_trigger_free,
1165};
1166
1167static struct event_trigger_ops event_disable_count_trigger_ops = {
1168 .func = event_enable_count_trigger,
1169 .print = event_enable_trigger_print,
1170 .init = event_trigger_init,
1171 .free = event_enable_trigger_free,
1172};
1173
1174static int
1175event_enable_trigger_func(struct event_command *cmd_ops,
1176 struct trace_event_file *file,
1177 char *glob, char *cmd, char *param)
1178{
1179 struct trace_event_file *event_enable_file;
1180 struct enable_trigger_data *enable_data;
1181 struct event_trigger_data *trigger_data;
1182 struct event_trigger_ops *trigger_ops;
1183 struct trace_array *tr = file->tr;
1184 const char *system;
1185 const char *event;
1186 char *trigger;
1187 char *number;
1188 bool enable;
1189 int ret;
1190
1191 if (!param)
1192 return -EINVAL;
1193
1194 /* separate the trigger from the filter (s:e:n [if filter]) */
1195 trigger = strsep(¶m, " \t");
1196 if (!trigger)
1197 return -EINVAL;
1198
1199 system = strsep(&trigger, ":");
1200 if (!trigger)
1201 return -EINVAL;
1202
1203 event = strsep(&trigger, ":");
1204
1205 ret = -EINVAL;
1206 event_enable_file = find_event_file(tr, system, event);
1207 if (!event_enable_file)
1208 goto out;
1209
1210 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1211
1212 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1213
1214 ret = -ENOMEM;
1215 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1216 if (!trigger_data)
1217 goto out;
1218
1219 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1220 if (!enable_data) {
1221 kfree(trigger_data);
1222 goto out;
1223 }
1224
1225 trigger_data->count = -1;
1226 trigger_data->ops = trigger_ops;
1227 trigger_data->cmd_ops = cmd_ops;
1228 INIT_LIST_HEAD(&trigger_data->list);
1229 RCU_INIT_POINTER(trigger_data->filter, NULL);
1230
1231 enable_data->enable = enable;
1232 enable_data->file = event_enable_file;
1233 trigger_data->private_data = enable_data;
1234
1235 if (glob[0] == '!') {
1236 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1237 kfree(trigger_data);
1238 kfree(enable_data);
1239 ret = 0;
1240 goto out;
1241 }
1242
1243 if (trigger) {
1244 number = strsep(&trigger, ":");
1245
1246 ret = -EINVAL;
1247 if (!strlen(number))
1248 goto out_free;
1249
1250 /*
1251 * We use the callback data field (which is a pointer)
1252 * as our counter.
1253 */
1254 ret = kstrtoul(number, 0, &trigger_data->count);
1255 if (ret)
1256 goto out_free;
1257 }
1258
1259 if (!param) /* if param is non-empty, it's supposed to be a filter */
1260 goto out_reg;
1261
1262 if (!cmd_ops->set_filter)
1263 goto out_reg;
1264
1265 ret = cmd_ops->set_filter(param, trigger_data, file);
1266 if (ret < 0)
1267 goto out_free;
1268
1269 out_reg:
1270 /* Don't let event modules unload while probe registered */
1271 ret = try_module_get(event_enable_file->event_call->mod);
1272 if (!ret) {
1273 ret = -EBUSY;
1274 goto out_free;
1275 }
1276
1277 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1278 if (ret < 0)
1279 goto out_put;
1280 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1281 /*
1282 * The above returns on success the # of functions enabled,
1283 * but if it didn't find any functions it returns zero.
1284 * Consider no functions a failure too.
1285 */
1286 if (!ret) {
1287 ret = -ENOENT;
1288 goto out_disable;
1289 } else if (ret < 0)
1290 goto out_disable;
1291 /* Just return zero, not the number of enabled functions */
1292 ret = 0;
1293 out:
1294 return ret;
1295
1296 out_disable:
1297 trace_event_enable_disable(event_enable_file, 0, 1);
1298 out_put:
1299 module_put(event_enable_file->event_call->mod);
1300 out_free:
1301 if (cmd_ops->set_filter)
1302 cmd_ops->set_filter(NULL, trigger_data, NULL);
1303 kfree(trigger_data);
1304 kfree(enable_data);
1305 goto out;
1306}
1307
1308static int event_enable_register_trigger(char *glob,
1309 struct event_trigger_ops *ops,
1310 struct event_trigger_data *data,
1311 struct trace_event_file *file)
1312{
1313 struct enable_trigger_data *enable_data = data->private_data;
1314 struct enable_trigger_data *test_enable_data;
1315 struct event_trigger_data *test;
1316 int ret = 0;
1317
1318 list_for_each_entry_rcu(test, &file->triggers, list) {
1319 test_enable_data = test->private_data;
1320 if (test_enable_data &&
1321 (test_enable_data->file == enable_data->file)) {
1322 ret = -EEXIST;
1323 goto out;
1324 }
1325 }
1326
1327 if (data->ops->init) {
1328 ret = data->ops->init(data->ops, data);
1329 if (ret < 0)
1330 goto out;
1331 }
1332
1333 list_add_rcu(&data->list, &file->triggers);
1334 ret++;
1335
1336 update_cond_flag(file);
1337 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1338 list_del_rcu(&data->list);
1339 update_cond_flag(file);
1340 ret--;
1341 }
1342out:
1343 return ret;
1344}
1345
1346static void event_enable_unregister_trigger(char *glob,
1347 struct event_trigger_ops *ops,
1348 struct event_trigger_data *test,
1349 struct trace_event_file *file)
1350{
1351 struct enable_trigger_data *test_enable_data = test->private_data;
1352 struct enable_trigger_data *enable_data;
1353 struct event_trigger_data *data;
1354 bool unregistered = false;
1355
1356 list_for_each_entry_rcu(data, &file->triggers, list) {
1357 enable_data = data->private_data;
1358 if (enable_data &&
1359 (enable_data->file == test_enable_data->file)) {
1360 unregistered = true;
1361 list_del_rcu(&data->list);
1362 trace_event_trigger_enable_disable(file, 0);
1363 update_cond_flag(file);
1364 break;
1365 }
1366 }
1367
1368 if (unregistered && data->ops->free)
1369 data->ops->free(data->ops, data);
1370}
1371
1372static struct event_trigger_ops *
1373event_enable_get_trigger_ops(char *cmd, char *param)
1374{
1375 struct event_trigger_ops *ops;
1376 bool enable;
1377
1378 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1379
1380 if (enable)
1381 ops = param ? &event_enable_count_trigger_ops :
1382 &event_enable_trigger_ops;
1383 else
1384 ops = param ? &event_disable_count_trigger_ops :
1385 &event_disable_trigger_ops;
1386
1387 return ops;
1388}
1389
1390static struct event_command trigger_enable_cmd = {
1391 .name = ENABLE_EVENT_STR,
1392 .trigger_type = ETT_EVENT_ENABLE,
1393 .func = event_enable_trigger_func,
1394 .reg = event_enable_register_trigger,
1395 .unreg = event_enable_unregister_trigger,
1396 .get_trigger_ops = event_enable_get_trigger_ops,
1397 .set_filter = set_trigger_filter,
1398};
1399
1400static struct event_command trigger_disable_cmd = {
1401 .name = DISABLE_EVENT_STR,
1402 .trigger_type = ETT_EVENT_ENABLE,
1403 .func = event_enable_trigger_func,
1404 .reg = event_enable_register_trigger,
1405 .unreg = event_enable_unregister_trigger,
1406 .get_trigger_ops = event_enable_get_trigger_ops,
1407 .set_filter = set_trigger_filter,
1408};
1409
1410static __init void unregister_trigger_enable_disable_cmds(void)
1411{
1412 unregister_event_command(&trigger_enable_cmd);
1413 unregister_event_command(&trigger_disable_cmd);
1414}
1415
1416static __init int register_trigger_enable_disable_cmds(void)
1417{
1418 int ret;
1419
1420 ret = register_event_command(&trigger_enable_cmd);
1421 if (WARN_ON(ret < 0))
1422 return ret;
1423 ret = register_event_command(&trigger_disable_cmd);
1424 if (WARN_ON(ret < 0))
1425 unregister_trigger_enable_disable_cmds();
1426
1427 return ret;
1428}
1429
1430static __init int register_trigger_traceon_traceoff_cmds(void)
1431{
1432 int ret;
1433
1434 ret = register_event_command(&trigger_traceon_cmd);
1435 if (WARN_ON(ret < 0))
1436 return ret;
1437 ret = register_event_command(&trigger_traceoff_cmd);
1438 if (WARN_ON(ret < 0))
1439 unregister_trigger_traceon_traceoff_cmds();
1440
1441 return ret;
1442}
1443
1444__init int register_trigger_cmds(void)
1445{
1446 register_trigger_traceon_traceoff_cmds();
1447 register_trigger_snapshot_cmd();
1448 register_trigger_stacktrace_cmd();
1449 register_trigger_enable_disable_cmds();
1450
1451 return 0;
1452}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace_events_trigger - trace event triggers
4 *
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8#include <linux/security.h>
9#include <linux/module.h>
10#include <linux/ctype.h>
11#include <linux/mutex.h>
12#include <linux/slab.h>
13#include <linux/rculist.h>
14
15#include "trace.h"
16
17static LIST_HEAD(trigger_commands);
18static DEFINE_MUTEX(trigger_cmd_mutex);
19
20void trigger_data_free(struct event_trigger_data *data)
21{
22 if (data->cmd_ops->set_filter)
23 data->cmd_ops->set_filter(NULL, data, NULL);
24
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
27
28 kfree(data);
29}
30
31/**
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @rec: The trace entry for the event, NULL for unconditional invocation
35 *
36 * For each trigger associated with an event, invoke the trigger
37 * function registered with the associated trigger command. If rec is
38 * non-NULL, it means that the trigger requires further processing and
39 * shouldn't be unconditionally invoked. If rec is non-NULL and the
40 * trigger has a filter associated with it, rec will checked against
41 * the filter and if the record matches the trigger will be invoked.
42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43 * in any case until the current event is written, the trigger
44 * function isn't invoked but the bit associated with the deferred
45 * trigger is set in the return value.
46 *
47 * Returns an enum event_trigger_type value containing a set bit for
48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 *
50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 *
52 * Return: an enum event_trigger_type value containing a set bit for
53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 */
55enum event_trigger_type
56event_triggers_call(struct trace_event_file *file, void *rec,
57 struct ring_buffer_event *event)
58{
59 struct event_trigger_data *data;
60 enum event_trigger_type tt = ETT_NONE;
61 struct event_filter *filter;
62
63 if (list_empty(&file->triggers))
64 return tt;
65
66 list_for_each_entry_rcu(data, &file->triggers, list) {
67 if (data->paused)
68 continue;
69 if (!rec) {
70 data->ops->func(data, rec, event);
71 continue;
72 }
73 filter = rcu_dereference_sched(data->filter);
74 if (filter && !filter_match_preds(filter, rec))
75 continue;
76 if (event_command_post_trigger(data->cmd_ops)) {
77 tt |= data->cmd_ops->trigger_type;
78 continue;
79 }
80 data->ops->func(data, rec, event);
81 }
82 return tt;
83}
84EXPORT_SYMBOL_GPL(event_triggers_call);
85
86/**
87 * event_triggers_post_call - Call 'post_triggers' for a trace event
88 * @file: The trace_event_file associated with the event
89 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90 *
91 * For each trigger associated with an event, invoke the trigger
92 * function registered with the associated trigger command, if the
93 * corresponding bit is set in the tt enum passed into this function.
94 * See @event_triggers_call for details on how those bits are set.
95 *
96 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
97 */
98void
99event_triggers_post_call(struct trace_event_file *file,
100 enum event_trigger_type tt)
101{
102 struct event_trigger_data *data;
103
104 list_for_each_entry_rcu(data, &file->triggers, list) {
105 if (data->paused)
106 continue;
107 if (data->cmd_ops->trigger_type & tt)
108 data->ops->func(data, NULL, NULL);
109 }
110}
111EXPORT_SYMBOL_GPL(event_triggers_post_call);
112
113#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
114
115static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
116{
117 struct trace_event_file *event_file = event_file_data(m->private);
118
119 if (t == SHOW_AVAILABLE_TRIGGERS) {
120 (*pos)++;
121 return NULL;
122 }
123 return seq_list_next(t, &event_file->triggers, pos);
124}
125
126static void *trigger_start(struct seq_file *m, loff_t *pos)
127{
128 struct trace_event_file *event_file;
129
130 /* ->stop() is called even if ->start() fails */
131 mutex_lock(&event_mutex);
132 event_file = event_file_data(m->private);
133 if (unlikely(!event_file))
134 return ERR_PTR(-ENODEV);
135
136 if (list_empty(&event_file->triggers))
137 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
138
139 return seq_list_start(&event_file->triggers, *pos);
140}
141
142static void trigger_stop(struct seq_file *m, void *t)
143{
144 mutex_unlock(&event_mutex);
145}
146
147static int trigger_show(struct seq_file *m, void *v)
148{
149 struct event_trigger_data *data;
150 struct event_command *p;
151
152 if (v == SHOW_AVAILABLE_TRIGGERS) {
153 seq_puts(m, "# Available triggers:\n");
154 seq_putc(m, '#');
155 mutex_lock(&trigger_cmd_mutex);
156 list_for_each_entry_reverse(p, &trigger_commands, list)
157 seq_printf(m, " %s", p->name);
158 seq_putc(m, '\n');
159 mutex_unlock(&trigger_cmd_mutex);
160 return 0;
161 }
162
163 data = list_entry(v, struct event_trigger_data, list);
164 data->ops->print(m, data->ops, data);
165
166 return 0;
167}
168
169static const struct seq_operations event_triggers_seq_ops = {
170 .start = trigger_start,
171 .next = trigger_next,
172 .stop = trigger_stop,
173 .show = trigger_show,
174};
175
176static int event_trigger_regex_open(struct inode *inode, struct file *file)
177{
178 int ret;
179
180 ret = security_locked_down(LOCKDOWN_TRACEFS);
181 if (ret)
182 return ret;
183
184 mutex_lock(&event_mutex);
185
186 if (unlikely(!event_file_data(file))) {
187 mutex_unlock(&event_mutex);
188 return -ENODEV;
189 }
190
191 if ((file->f_mode & FMODE_WRITE) &&
192 (file->f_flags & O_TRUNC)) {
193 struct trace_event_file *event_file;
194 struct event_command *p;
195
196 event_file = event_file_data(file);
197
198 list_for_each_entry(p, &trigger_commands, list) {
199 if (p->unreg_all)
200 p->unreg_all(event_file);
201 }
202 }
203
204 if (file->f_mode & FMODE_READ) {
205 ret = seq_open(file, &event_triggers_seq_ops);
206 if (!ret) {
207 struct seq_file *m = file->private_data;
208 m->private = file;
209 }
210 }
211
212 mutex_unlock(&event_mutex);
213
214 return ret;
215}
216
217int trigger_process_regex(struct trace_event_file *file, char *buff)
218{
219 char *command, *next;
220 struct event_command *p;
221 int ret = -EINVAL;
222
223 next = buff = skip_spaces(buff);
224 command = strsep(&next, ": \t");
225 if (next) {
226 next = skip_spaces(next);
227 if (!*next)
228 next = NULL;
229 }
230 command = (command[0] != '!') ? command : command + 1;
231
232 mutex_lock(&trigger_cmd_mutex);
233 list_for_each_entry(p, &trigger_commands, list) {
234 if (strcmp(p->name, command) == 0) {
235 ret = p->func(p, file, buff, command, next);
236 goto out_unlock;
237 }
238 }
239 out_unlock:
240 mutex_unlock(&trigger_cmd_mutex);
241
242 return ret;
243}
244
245static ssize_t event_trigger_regex_write(struct file *file,
246 const char __user *ubuf,
247 size_t cnt, loff_t *ppos)
248{
249 struct trace_event_file *event_file;
250 ssize_t ret;
251 char *buf;
252
253 if (!cnt)
254 return 0;
255
256 if (cnt >= PAGE_SIZE)
257 return -EINVAL;
258
259 buf = memdup_user_nul(ubuf, cnt);
260 if (IS_ERR(buf))
261 return PTR_ERR(buf);
262
263 strim(buf);
264
265 mutex_lock(&event_mutex);
266 event_file = event_file_data(file);
267 if (unlikely(!event_file)) {
268 mutex_unlock(&event_mutex);
269 kfree(buf);
270 return -ENODEV;
271 }
272 ret = trigger_process_regex(event_file, buf);
273 mutex_unlock(&event_mutex);
274
275 kfree(buf);
276 if (ret < 0)
277 goto out;
278
279 *ppos += cnt;
280 ret = cnt;
281 out:
282 return ret;
283}
284
285static int event_trigger_regex_release(struct inode *inode, struct file *file)
286{
287 mutex_lock(&event_mutex);
288
289 if (file->f_mode & FMODE_READ)
290 seq_release(inode, file);
291
292 mutex_unlock(&event_mutex);
293
294 return 0;
295}
296
297static ssize_t
298event_trigger_write(struct file *filp, const char __user *ubuf,
299 size_t cnt, loff_t *ppos)
300{
301 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
302}
303
304static int
305event_trigger_open(struct inode *inode, struct file *filp)
306{
307 /* Checks for tracefs lockdown */
308 return event_trigger_regex_open(inode, filp);
309}
310
311static int
312event_trigger_release(struct inode *inode, struct file *file)
313{
314 return event_trigger_regex_release(inode, file);
315}
316
317const struct file_operations event_trigger_fops = {
318 .open = event_trigger_open,
319 .read = seq_read,
320 .write = event_trigger_write,
321 .llseek = tracing_lseek,
322 .release = event_trigger_release,
323};
324
325/*
326 * Currently we only register event commands from __init, so mark this
327 * __init too.
328 */
329__init int register_event_command(struct event_command *cmd)
330{
331 struct event_command *p;
332 int ret = 0;
333
334 mutex_lock(&trigger_cmd_mutex);
335 list_for_each_entry(p, &trigger_commands, list) {
336 if (strcmp(cmd->name, p->name) == 0) {
337 ret = -EBUSY;
338 goto out_unlock;
339 }
340 }
341 list_add(&cmd->list, &trigger_commands);
342 out_unlock:
343 mutex_unlock(&trigger_cmd_mutex);
344
345 return ret;
346}
347
348/*
349 * Currently we only unregister event commands from __init, so mark
350 * this __init too.
351 */
352__init int unregister_event_command(struct event_command *cmd)
353{
354 struct event_command *p, *n;
355 int ret = -ENODEV;
356
357 mutex_lock(&trigger_cmd_mutex);
358 list_for_each_entry_safe(p, n, &trigger_commands, list) {
359 if (strcmp(cmd->name, p->name) == 0) {
360 ret = 0;
361 list_del_init(&p->list);
362 goto out_unlock;
363 }
364 }
365 out_unlock:
366 mutex_unlock(&trigger_cmd_mutex);
367
368 return ret;
369}
370
371/**
372 * event_trigger_print - Generic event_trigger_ops @print implementation
373 * @name: The name of the event trigger
374 * @m: The seq_file being printed to
375 * @data: Trigger-specific data
376 * @filter_str: filter_str to print, if present
377 *
378 * Common implementation for event triggers to print themselves.
379 *
380 * Usually wrapped by a function that simply sets the @name of the
381 * trigger command and then invokes this.
382 *
383 * Return: 0 on success, errno otherwise
384 */
385static int
386event_trigger_print(const char *name, struct seq_file *m,
387 void *data, char *filter_str)
388{
389 long count = (long)data;
390
391 seq_puts(m, name);
392
393 if (count == -1)
394 seq_puts(m, ":unlimited");
395 else
396 seq_printf(m, ":count=%ld", count);
397
398 if (filter_str)
399 seq_printf(m, " if %s\n", filter_str);
400 else
401 seq_putc(m, '\n');
402
403 return 0;
404}
405
406/**
407 * event_trigger_init - Generic event_trigger_ops @init implementation
408 * @ops: The trigger ops associated with the trigger
409 * @data: Trigger-specific data
410 *
411 * Common implementation of event trigger initialization.
412 *
413 * Usually used directly as the @init method in event trigger
414 * implementations.
415 *
416 * Return: 0 on success, errno otherwise
417 */
418int event_trigger_init(struct event_trigger_ops *ops,
419 struct event_trigger_data *data)
420{
421 data->ref++;
422 return 0;
423}
424
425/**
426 * event_trigger_free - Generic event_trigger_ops @free implementation
427 * @ops: The trigger ops associated with the trigger
428 * @data: Trigger-specific data
429 *
430 * Common implementation of event trigger de-initialization.
431 *
432 * Usually used directly as the @free method in event trigger
433 * implementations.
434 */
435static void
436event_trigger_free(struct event_trigger_ops *ops,
437 struct event_trigger_data *data)
438{
439 if (WARN_ON_ONCE(data->ref <= 0))
440 return;
441
442 data->ref--;
443 if (!data->ref)
444 trigger_data_free(data);
445}
446
447int trace_event_trigger_enable_disable(struct trace_event_file *file,
448 int trigger_enable)
449{
450 int ret = 0;
451
452 if (trigger_enable) {
453 if (atomic_inc_return(&file->tm_ref) > 1)
454 return ret;
455 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
456 ret = trace_event_enable_disable(file, 1, 1);
457 } else {
458 if (atomic_dec_return(&file->tm_ref) > 0)
459 return ret;
460 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
461 ret = trace_event_enable_disable(file, 0, 1);
462 }
463
464 return ret;
465}
466
467/**
468 * clear_event_triggers - Clear all triggers associated with a trace array
469 * @tr: The trace array to clear
470 *
471 * For each trigger, the triggering event has its tm_ref decremented
472 * via trace_event_trigger_enable_disable(), and any associated event
473 * (in the case of enable/disable_event triggers) will have its sm_ref
474 * decremented via free()->trace_event_enable_disable(). That
475 * combination effectively reverses the soft-mode/trigger state added
476 * by trigger registration.
477 *
478 * Must be called with event_mutex held.
479 */
480void
481clear_event_triggers(struct trace_array *tr)
482{
483 struct trace_event_file *file;
484
485 list_for_each_entry(file, &tr->events, list) {
486 struct event_trigger_data *data, *n;
487 list_for_each_entry_safe(data, n, &file->triggers, list) {
488 trace_event_trigger_enable_disable(file, 0);
489 list_del_rcu(&data->list);
490 if (data->ops->free)
491 data->ops->free(data->ops, data);
492 }
493 }
494}
495
496/**
497 * update_cond_flag - Set or reset the TRIGGER_COND bit
498 * @file: The trace_event_file associated with the event
499 *
500 * If an event has triggers and any of those triggers has a filter or
501 * a post_trigger, trigger invocation needs to be deferred until after
502 * the current event has logged its data, and the event should have
503 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
504 * cleared.
505 */
506void update_cond_flag(struct trace_event_file *file)
507{
508 struct event_trigger_data *data;
509 bool set_cond = false;
510
511 lockdep_assert_held(&event_mutex);
512
513 list_for_each_entry(data, &file->triggers, list) {
514 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
515 event_command_needs_rec(data->cmd_ops)) {
516 set_cond = true;
517 break;
518 }
519 }
520
521 if (set_cond)
522 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
523 else
524 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
525}
526
527/**
528 * register_trigger - Generic event_command @reg implementation
529 * @glob: The raw string used to register the trigger
530 * @ops: The trigger ops associated with the trigger
531 * @data: Trigger-specific data to associate with the trigger
532 * @file: The trace_event_file associated with the event
533 *
534 * Common implementation for event trigger registration.
535 *
536 * Usually used directly as the @reg method in event command
537 * implementations.
538 *
539 * Return: 0 on success, errno otherwise
540 */
541static int register_trigger(char *glob, struct event_trigger_ops *ops,
542 struct event_trigger_data *data,
543 struct trace_event_file *file)
544{
545 struct event_trigger_data *test;
546 int ret = 0;
547
548 lockdep_assert_held(&event_mutex);
549
550 list_for_each_entry(test, &file->triggers, list) {
551 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
552 ret = -EEXIST;
553 goto out;
554 }
555 }
556
557 if (data->ops->init) {
558 ret = data->ops->init(data->ops, data);
559 if (ret < 0)
560 goto out;
561 }
562
563 list_add_rcu(&data->list, &file->triggers);
564 ret++;
565
566 update_cond_flag(file);
567 if (trace_event_trigger_enable_disable(file, 1) < 0) {
568 list_del_rcu(&data->list);
569 update_cond_flag(file);
570 ret--;
571 }
572out:
573 return ret;
574}
575
576/**
577 * unregister_trigger - Generic event_command @unreg implementation
578 * @glob: The raw string used to register the trigger
579 * @ops: The trigger ops associated with the trigger
580 * @test: Trigger-specific data used to find the trigger to remove
581 * @file: The trace_event_file associated with the event
582 *
583 * Common implementation for event trigger unregistration.
584 *
585 * Usually used directly as the @unreg method in event command
586 * implementations.
587 */
588static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
589 struct event_trigger_data *test,
590 struct trace_event_file *file)
591{
592 struct event_trigger_data *data;
593 bool unregistered = false;
594
595 lockdep_assert_held(&event_mutex);
596
597 list_for_each_entry(data, &file->triggers, list) {
598 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
599 unregistered = true;
600 list_del_rcu(&data->list);
601 trace_event_trigger_enable_disable(file, 0);
602 update_cond_flag(file);
603 break;
604 }
605 }
606
607 if (unregistered && data->ops->free)
608 data->ops->free(data->ops, data);
609}
610
611/**
612 * event_trigger_callback - Generic event_command @func implementation
613 * @cmd_ops: The command ops, used for trigger registration
614 * @file: The trace_event_file associated with the event
615 * @glob: The raw string used to register the trigger
616 * @cmd: The cmd portion of the string used to register the trigger
617 * @param: The params portion of the string used to register the trigger
618 *
619 * Common implementation for event command parsing and trigger
620 * instantiation.
621 *
622 * Usually used directly as the @func method in event command
623 * implementations.
624 *
625 * Return: 0 on success, errno otherwise
626 */
627static int
628event_trigger_callback(struct event_command *cmd_ops,
629 struct trace_event_file *file,
630 char *glob, char *cmd, char *param)
631{
632 struct event_trigger_data *trigger_data;
633 struct event_trigger_ops *trigger_ops;
634 char *trigger = NULL;
635 char *number;
636 int ret;
637
638 /* separate the trigger from the filter (t:n [if filter]) */
639 if (param && isdigit(param[0])) {
640 trigger = strsep(¶m, " \t");
641 if (param) {
642 param = skip_spaces(param);
643 if (!*param)
644 param = NULL;
645 }
646 }
647
648 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
649
650 ret = -ENOMEM;
651 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
652 if (!trigger_data)
653 goto out;
654
655 trigger_data->count = -1;
656 trigger_data->ops = trigger_ops;
657 trigger_data->cmd_ops = cmd_ops;
658 trigger_data->private_data = file;
659 INIT_LIST_HEAD(&trigger_data->list);
660 INIT_LIST_HEAD(&trigger_data->named_list);
661
662 if (glob[0] == '!') {
663 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
664 kfree(trigger_data);
665 ret = 0;
666 goto out;
667 }
668
669 if (trigger) {
670 number = strsep(&trigger, ":");
671
672 ret = -EINVAL;
673 if (!strlen(number))
674 goto out_free;
675
676 /*
677 * We use the callback data field (which is a pointer)
678 * as our counter.
679 */
680 ret = kstrtoul(number, 0, &trigger_data->count);
681 if (ret)
682 goto out_free;
683 }
684
685 if (!param) /* if param is non-empty, it's supposed to be a filter */
686 goto out_reg;
687
688 if (!cmd_ops->set_filter)
689 goto out_reg;
690
691 ret = cmd_ops->set_filter(param, trigger_data, file);
692 if (ret < 0)
693 goto out_free;
694
695 out_reg:
696 /* Up the trigger_data count to make sure reg doesn't free it on failure */
697 event_trigger_init(trigger_ops, trigger_data);
698 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
699 /*
700 * The above returns on success the # of functions enabled,
701 * but if it didn't find any functions it returns zero.
702 * Consider no functions a failure too.
703 */
704 if (!ret) {
705 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
706 ret = -ENOENT;
707 } else if (ret > 0)
708 ret = 0;
709
710 /* Down the counter of trigger_data or free it if not used anymore */
711 event_trigger_free(trigger_ops, trigger_data);
712 out:
713 return ret;
714
715 out_free:
716 if (cmd_ops->set_filter)
717 cmd_ops->set_filter(NULL, trigger_data, NULL);
718 kfree(trigger_data);
719 goto out;
720}
721
722/**
723 * set_trigger_filter - Generic event_command @set_filter implementation
724 * @filter_str: The filter string for the trigger, NULL to remove filter
725 * @trigger_data: Trigger-specific data
726 * @file: The trace_event_file associated with the event
727 *
728 * Common implementation for event command filter parsing and filter
729 * instantiation.
730 *
731 * Usually used directly as the @set_filter method in event command
732 * implementations.
733 *
734 * Also used to remove a filter (if filter_str = NULL).
735 *
736 * Return: 0 on success, errno otherwise
737 */
738int set_trigger_filter(char *filter_str,
739 struct event_trigger_data *trigger_data,
740 struct trace_event_file *file)
741{
742 struct event_trigger_data *data = trigger_data;
743 struct event_filter *filter = NULL, *tmp;
744 int ret = -EINVAL;
745 char *s;
746
747 if (!filter_str) /* clear the current filter */
748 goto assign;
749
750 s = strsep(&filter_str, " \t");
751
752 if (!strlen(s) || strcmp(s, "if") != 0)
753 goto out;
754
755 if (!filter_str)
756 goto out;
757
758 /* The filter is for the 'trigger' event, not the triggered event */
759 ret = create_event_filter(file->tr, file->event_call,
760 filter_str, false, &filter);
761 /*
762 * If create_event_filter() fails, filter still needs to be freed.
763 * Which the calling code will do with data->filter.
764 */
765 assign:
766 tmp = rcu_access_pointer(data->filter);
767
768 rcu_assign_pointer(data->filter, filter);
769
770 if (tmp) {
771 /* Make sure the call is done with the filter */
772 tracepoint_synchronize_unregister();
773 free_event_filter(tmp);
774 }
775
776 kfree(data->filter_str);
777 data->filter_str = NULL;
778
779 if (filter_str) {
780 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
781 if (!data->filter_str) {
782 free_event_filter(rcu_access_pointer(data->filter));
783 data->filter = NULL;
784 ret = -ENOMEM;
785 }
786 }
787 out:
788 return ret;
789}
790
791static LIST_HEAD(named_triggers);
792
793/**
794 * find_named_trigger - Find the common named trigger associated with @name
795 * @name: The name of the set of named triggers to find the common data for
796 *
797 * Named triggers are sets of triggers that share a common set of
798 * trigger data. The first named trigger registered with a given name
799 * owns the common trigger data that the others subsequently
800 * registered with the same name will reference. This function
801 * returns the common trigger data associated with that first
802 * registered instance.
803 *
804 * Return: the common trigger data for the given named trigger on
805 * success, NULL otherwise.
806 */
807struct event_trigger_data *find_named_trigger(const char *name)
808{
809 struct event_trigger_data *data;
810
811 if (!name)
812 return NULL;
813
814 list_for_each_entry(data, &named_triggers, named_list) {
815 if (data->named_data)
816 continue;
817 if (strcmp(data->name, name) == 0)
818 return data;
819 }
820
821 return NULL;
822}
823
824/**
825 * is_named_trigger - determine if a given trigger is a named trigger
826 * @test: The trigger data to test
827 *
828 * Return: true if 'test' is a named trigger, false otherwise.
829 */
830bool is_named_trigger(struct event_trigger_data *test)
831{
832 struct event_trigger_data *data;
833
834 list_for_each_entry(data, &named_triggers, named_list) {
835 if (test == data)
836 return true;
837 }
838
839 return false;
840}
841
842/**
843 * save_named_trigger - save the trigger in the named trigger list
844 * @name: The name of the named trigger set
845 * @data: The trigger data to save
846 *
847 * Return: 0 if successful, negative error otherwise.
848 */
849int save_named_trigger(const char *name, struct event_trigger_data *data)
850{
851 data->name = kstrdup(name, GFP_KERNEL);
852 if (!data->name)
853 return -ENOMEM;
854
855 list_add(&data->named_list, &named_triggers);
856
857 return 0;
858}
859
860/**
861 * del_named_trigger - delete a trigger from the named trigger list
862 * @data: The trigger data to delete
863 */
864void del_named_trigger(struct event_trigger_data *data)
865{
866 kfree(data->name);
867 data->name = NULL;
868
869 list_del(&data->named_list);
870}
871
872static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
873{
874 struct event_trigger_data *test;
875
876 list_for_each_entry(test, &named_triggers, named_list) {
877 if (strcmp(test->name, data->name) == 0) {
878 if (pause) {
879 test->paused_tmp = test->paused;
880 test->paused = true;
881 } else {
882 test->paused = test->paused_tmp;
883 }
884 }
885 }
886}
887
888/**
889 * pause_named_trigger - Pause all named triggers with the same name
890 * @data: The trigger data of a named trigger to pause
891 *
892 * Pauses a named trigger along with all other triggers having the
893 * same name. Because named triggers share a common set of data,
894 * pausing only one is meaningless, so pausing one named trigger needs
895 * to pause all triggers with the same name.
896 */
897void pause_named_trigger(struct event_trigger_data *data)
898{
899 __pause_named_trigger(data, true);
900}
901
902/**
903 * unpause_named_trigger - Un-pause all named triggers with the same name
904 * @data: The trigger data of a named trigger to unpause
905 *
906 * Un-pauses a named trigger along with all other triggers having the
907 * same name. Because named triggers share a common set of data,
908 * unpausing only one is meaningless, so unpausing one named trigger
909 * needs to unpause all triggers with the same name.
910 */
911void unpause_named_trigger(struct event_trigger_data *data)
912{
913 __pause_named_trigger(data, false);
914}
915
916/**
917 * set_named_trigger_data - Associate common named trigger data
918 * @data: The trigger data of a named trigger to unpause
919 *
920 * Named triggers are sets of triggers that share a common set of
921 * trigger data. The first named trigger registered with a given name
922 * owns the common trigger data that the others subsequently
923 * registered with the same name will reference. This function
924 * associates the common trigger data from the first trigger with the
925 * given trigger.
926 */
927void set_named_trigger_data(struct event_trigger_data *data,
928 struct event_trigger_data *named_data)
929{
930 data->named_data = named_data;
931}
932
933struct event_trigger_data *
934get_named_trigger_data(struct event_trigger_data *data)
935{
936 return data->named_data;
937}
938
939static void
940traceon_trigger(struct event_trigger_data *data, void *rec,
941 struct ring_buffer_event *event)
942{
943 if (tracing_is_on())
944 return;
945
946 tracing_on();
947}
948
949static void
950traceon_count_trigger(struct event_trigger_data *data, void *rec,
951 struct ring_buffer_event *event)
952{
953 if (tracing_is_on())
954 return;
955
956 if (!data->count)
957 return;
958
959 if (data->count != -1)
960 (data->count)--;
961
962 tracing_on();
963}
964
965static void
966traceoff_trigger(struct event_trigger_data *data, void *rec,
967 struct ring_buffer_event *event)
968{
969 if (!tracing_is_on())
970 return;
971
972 tracing_off();
973}
974
975static void
976traceoff_count_trigger(struct event_trigger_data *data, void *rec,
977 struct ring_buffer_event *event)
978{
979 if (!tracing_is_on())
980 return;
981
982 if (!data->count)
983 return;
984
985 if (data->count != -1)
986 (data->count)--;
987
988 tracing_off();
989}
990
991static int
992traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
993 struct event_trigger_data *data)
994{
995 return event_trigger_print("traceon", m, (void *)data->count,
996 data->filter_str);
997}
998
999static int
1000traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1001 struct event_trigger_data *data)
1002{
1003 return event_trigger_print("traceoff", m, (void *)data->count,
1004 data->filter_str);
1005}
1006
1007static struct event_trigger_ops traceon_trigger_ops = {
1008 .func = traceon_trigger,
1009 .print = traceon_trigger_print,
1010 .init = event_trigger_init,
1011 .free = event_trigger_free,
1012};
1013
1014static struct event_trigger_ops traceon_count_trigger_ops = {
1015 .func = traceon_count_trigger,
1016 .print = traceon_trigger_print,
1017 .init = event_trigger_init,
1018 .free = event_trigger_free,
1019};
1020
1021static struct event_trigger_ops traceoff_trigger_ops = {
1022 .func = traceoff_trigger,
1023 .print = traceoff_trigger_print,
1024 .init = event_trigger_init,
1025 .free = event_trigger_free,
1026};
1027
1028static struct event_trigger_ops traceoff_count_trigger_ops = {
1029 .func = traceoff_count_trigger,
1030 .print = traceoff_trigger_print,
1031 .init = event_trigger_init,
1032 .free = event_trigger_free,
1033};
1034
1035static struct event_trigger_ops *
1036onoff_get_trigger_ops(char *cmd, char *param)
1037{
1038 struct event_trigger_ops *ops;
1039
1040 /* we register both traceon and traceoff to this callback */
1041 if (strcmp(cmd, "traceon") == 0)
1042 ops = param ? &traceon_count_trigger_ops :
1043 &traceon_trigger_ops;
1044 else
1045 ops = param ? &traceoff_count_trigger_ops :
1046 &traceoff_trigger_ops;
1047
1048 return ops;
1049}
1050
1051static struct event_command trigger_traceon_cmd = {
1052 .name = "traceon",
1053 .trigger_type = ETT_TRACE_ONOFF,
1054 .func = event_trigger_callback,
1055 .reg = register_trigger,
1056 .unreg = unregister_trigger,
1057 .get_trigger_ops = onoff_get_trigger_ops,
1058 .set_filter = set_trigger_filter,
1059};
1060
1061static struct event_command trigger_traceoff_cmd = {
1062 .name = "traceoff",
1063 .trigger_type = ETT_TRACE_ONOFF,
1064 .flags = EVENT_CMD_FL_POST_TRIGGER,
1065 .func = event_trigger_callback,
1066 .reg = register_trigger,
1067 .unreg = unregister_trigger,
1068 .get_trigger_ops = onoff_get_trigger_ops,
1069 .set_filter = set_trigger_filter,
1070};
1071
1072#ifdef CONFIG_TRACER_SNAPSHOT
1073static void
1074snapshot_trigger(struct event_trigger_data *data, void *rec,
1075 struct ring_buffer_event *event)
1076{
1077 struct trace_event_file *file = data->private_data;
1078
1079 if (file)
1080 tracing_snapshot_instance(file->tr);
1081 else
1082 tracing_snapshot();
1083}
1084
1085static void
1086snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1087 struct ring_buffer_event *event)
1088{
1089 if (!data->count)
1090 return;
1091
1092 if (data->count != -1)
1093 (data->count)--;
1094
1095 snapshot_trigger(data, rec, event);
1096}
1097
1098static int
1099register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1100 struct event_trigger_data *data,
1101 struct trace_event_file *file)
1102{
1103 if (tracing_alloc_snapshot_instance(file->tr) != 0)
1104 return 0;
1105
1106 return register_trigger(glob, ops, data, file);
1107}
1108
1109static int
1110snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1111 struct event_trigger_data *data)
1112{
1113 return event_trigger_print("snapshot", m, (void *)data->count,
1114 data->filter_str);
1115}
1116
1117static struct event_trigger_ops snapshot_trigger_ops = {
1118 .func = snapshot_trigger,
1119 .print = snapshot_trigger_print,
1120 .init = event_trigger_init,
1121 .free = event_trigger_free,
1122};
1123
1124static struct event_trigger_ops snapshot_count_trigger_ops = {
1125 .func = snapshot_count_trigger,
1126 .print = snapshot_trigger_print,
1127 .init = event_trigger_init,
1128 .free = event_trigger_free,
1129};
1130
1131static struct event_trigger_ops *
1132snapshot_get_trigger_ops(char *cmd, char *param)
1133{
1134 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1135}
1136
1137static struct event_command trigger_snapshot_cmd = {
1138 .name = "snapshot",
1139 .trigger_type = ETT_SNAPSHOT,
1140 .func = event_trigger_callback,
1141 .reg = register_snapshot_trigger,
1142 .unreg = unregister_trigger,
1143 .get_trigger_ops = snapshot_get_trigger_ops,
1144 .set_filter = set_trigger_filter,
1145};
1146
1147static __init int register_trigger_snapshot_cmd(void)
1148{
1149 int ret;
1150
1151 ret = register_event_command(&trigger_snapshot_cmd);
1152 WARN_ON(ret < 0);
1153
1154 return ret;
1155}
1156#else
1157static __init int register_trigger_snapshot_cmd(void) { return 0; }
1158#endif /* CONFIG_TRACER_SNAPSHOT */
1159
1160#ifdef CONFIG_STACKTRACE
1161#ifdef CONFIG_UNWINDER_ORC
1162/* Skip 2:
1163 * event_triggers_post_call()
1164 * trace_event_raw_event_xxx()
1165 */
1166# define STACK_SKIP 2
1167#else
1168/*
1169 * Skip 4:
1170 * stacktrace_trigger()
1171 * event_triggers_post_call()
1172 * trace_event_buffer_commit()
1173 * trace_event_raw_event_xxx()
1174 */
1175#define STACK_SKIP 4
1176#endif
1177
1178static void
1179stacktrace_trigger(struct event_trigger_data *data, void *rec,
1180 struct ring_buffer_event *event)
1181{
1182 trace_dump_stack(STACK_SKIP);
1183}
1184
1185static void
1186stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1187 struct ring_buffer_event *event)
1188{
1189 if (!data->count)
1190 return;
1191
1192 if (data->count != -1)
1193 (data->count)--;
1194
1195 stacktrace_trigger(data, rec, event);
1196}
1197
1198static int
1199stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1200 struct event_trigger_data *data)
1201{
1202 return event_trigger_print("stacktrace", m, (void *)data->count,
1203 data->filter_str);
1204}
1205
1206static struct event_trigger_ops stacktrace_trigger_ops = {
1207 .func = stacktrace_trigger,
1208 .print = stacktrace_trigger_print,
1209 .init = event_trigger_init,
1210 .free = event_trigger_free,
1211};
1212
1213static struct event_trigger_ops stacktrace_count_trigger_ops = {
1214 .func = stacktrace_count_trigger,
1215 .print = stacktrace_trigger_print,
1216 .init = event_trigger_init,
1217 .free = event_trigger_free,
1218};
1219
1220static struct event_trigger_ops *
1221stacktrace_get_trigger_ops(char *cmd, char *param)
1222{
1223 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1224}
1225
1226static struct event_command trigger_stacktrace_cmd = {
1227 .name = "stacktrace",
1228 .trigger_type = ETT_STACKTRACE,
1229 .flags = EVENT_CMD_FL_POST_TRIGGER,
1230 .func = event_trigger_callback,
1231 .reg = register_trigger,
1232 .unreg = unregister_trigger,
1233 .get_trigger_ops = stacktrace_get_trigger_ops,
1234 .set_filter = set_trigger_filter,
1235};
1236
1237static __init int register_trigger_stacktrace_cmd(void)
1238{
1239 int ret;
1240
1241 ret = register_event_command(&trigger_stacktrace_cmd);
1242 WARN_ON(ret < 0);
1243
1244 return ret;
1245}
1246#else
1247static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1248#endif /* CONFIG_STACKTRACE */
1249
1250static __init void unregister_trigger_traceon_traceoff_cmds(void)
1251{
1252 unregister_event_command(&trigger_traceon_cmd);
1253 unregister_event_command(&trigger_traceoff_cmd);
1254}
1255
1256static void
1257event_enable_trigger(struct event_trigger_data *data, void *rec,
1258 struct ring_buffer_event *event)
1259{
1260 struct enable_trigger_data *enable_data = data->private_data;
1261
1262 if (enable_data->enable)
1263 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1264 else
1265 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1266}
1267
1268static void
1269event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1270 struct ring_buffer_event *event)
1271{
1272 struct enable_trigger_data *enable_data = data->private_data;
1273
1274 if (!data->count)
1275 return;
1276
1277 /* Skip if the event is in a state we want to switch to */
1278 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1279 return;
1280
1281 if (data->count != -1)
1282 (data->count)--;
1283
1284 event_enable_trigger(data, rec, event);
1285}
1286
1287int event_enable_trigger_print(struct seq_file *m,
1288 struct event_trigger_ops *ops,
1289 struct event_trigger_data *data)
1290{
1291 struct enable_trigger_data *enable_data = data->private_data;
1292
1293 seq_printf(m, "%s:%s:%s",
1294 enable_data->hist ?
1295 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1296 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1297 enable_data->file->event_call->class->system,
1298 trace_event_name(enable_data->file->event_call));
1299
1300 if (data->count == -1)
1301 seq_puts(m, ":unlimited");
1302 else
1303 seq_printf(m, ":count=%ld", data->count);
1304
1305 if (data->filter_str)
1306 seq_printf(m, " if %s\n", data->filter_str);
1307 else
1308 seq_putc(m, '\n');
1309
1310 return 0;
1311}
1312
1313void event_enable_trigger_free(struct event_trigger_ops *ops,
1314 struct event_trigger_data *data)
1315{
1316 struct enable_trigger_data *enable_data = data->private_data;
1317
1318 if (WARN_ON_ONCE(data->ref <= 0))
1319 return;
1320
1321 data->ref--;
1322 if (!data->ref) {
1323 /* Remove the SOFT_MODE flag */
1324 trace_event_enable_disable(enable_data->file, 0, 1);
1325 module_put(enable_data->file->event_call->mod);
1326 trigger_data_free(data);
1327 kfree(enable_data);
1328 }
1329}
1330
1331static struct event_trigger_ops event_enable_trigger_ops = {
1332 .func = event_enable_trigger,
1333 .print = event_enable_trigger_print,
1334 .init = event_trigger_init,
1335 .free = event_enable_trigger_free,
1336};
1337
1338static struct event_trigger_ops event_enable_count_trigger_ops = {
1339 .func = event_enable_count_trigger,
1340 .print = event_enable_trigger_print,
1341 .init = event_trigger_init,
1342 .free = event_enable_trigger_free,
1343};
1344
1345static struct event_trigger_ops event_disable_trigger_ops = {
1346 .func = event_enable_trigger,
1347 .print = event_enable_trigger_print,
1348 .init = event_trigger_init,
1349 .free = event_enable_trigger_free,
1350};
1351
1352static struct event_trigger_ops event_disable_count_trigger_ops = {
1353 .func = event_enable_count_trigger,
1354 .print = event_enable_trigger_print,
1355 .init = event_trigger_init,
1356 .free = event_enable_trigger_free,
1357};
1358
1359int event_enable_trigger_func(struct event_command *cmd_ops,
1360 struct trace_event_file *file,
1361 char *glob, char *cmd, char *param)
1362{
1363 struct trace_event_file *event_enable_file;
1364 struct enable_trigger_data *enable_data;
1365 struct event_trigger_data *trigger_data;
1366 struct event_trigger_ops *trigger_ops;
1367 struct trace_array *tr = file->tr;
1368 const char *system;
1369 const char *event;
1370 bool hist = false;
1371 char *trigger;
1372 char *number;
1373 bool enable;
1374 int ret;
1375
1376 if (!param)
1377 return -EINVAL;
1378
1379 /* separate the trigger from the filter (s:e:n [if filter]) */
1380 trigger = strsep(¶m, " \t");
1381 if (!trigger)
1382 return -EINVAL;
1383 if (param) {
1384 param = skip_spaces(param);
1385 if (!*param)
1386 param = NULL;
1387 }
1388
1389 system = strsep(&trigger, ":");
1390 if (!trigger)
1391 return -EINVAL;
1392
1393 event = strsep(&trigger, ":");
1394
1395 ret = -EINVAL;
1396 event_enable_file = find_event_file(tr, system, event);
1397 if (!event_enable_file)
1398 goto out;
1399
1400#ifdef CONFIG_HIST_TRIGGERS
1401 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1402 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1403
1404 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1405 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1406#else
1407 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1408#endif
1409 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1410
1411 ret = -ENOMEM;
1412 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1413 if (!trigger_data)
1414 goto out;
1415
1416 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1417 if (!enable_data) {
1418 kfree(trigger_data);
1419 goto out;
1420 }
1421
1422 trigger_data->count = -1;
1423 trigger_data->ops = trigger_ops;
1424 trigger_data->cmd_ops = cmd_ops;
1425 INIT_LIST_HEAD(&trigger_data->list);
1426 RCU_INIT_POINTER(trigger_data->filter, NULL);
1427
1428 enable_data->hist = hist;
1429 enable_data->enable = enable;
1430 enable_data->file = event_enable_file;
1431 trigger_data->private_data = enable_data;
1432
1433 if (glob[0] == '!') {
1434 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1435 kfree(trigger_data);
1436 kfree(enable_data);
1437 ret = 0;
1438 goto out;
1439 }
1440
1441 /* Up the trigger_data count to make sure nothing frees it on failure */
1442 event_trigger_init(trigger_ops, trigger_data);
1443
1444 if (trigger) {
1445 number = strsep(&trigger, ":");
1446
1447 ret = -EINVAL;
1448 if (!strlen(number))
1449 goto out_free;
1450
1451 /*
1452 * We use the callback data field (which is a pointer)
1453 * as our counter.
1454 */
1455 ret = kstrtoul(number, 0, &trigger_data->count);
1456 if (ret)
1457 goto out_free;
1458 }
1459
1460 if (!param) /* if param is non-empty, it's supposed to be a filter */
1461 goto out_reg;
1462
1463 if (!cmd_ops->set_filter)
1464 goto out_reg;
1465
1466 ret = cmd_ops->set_filter(param, trigger_data, file);
1467 if (ret < 0)
1468 goto out_free;
1469
1470 out_reg:
1471 /* Don't let event modules unload while probe registered */
1472 ret = try_module_get(event_enable_file->event_call->mod);
1473 if (!ret) {
1474 ret = -EBUSY;
1475 goto out_free;
1476 }
1477
1478 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1479 if (ret < 0)
1480 goto out_put;
1481 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1482 /*
1483 * The above returns on success the # of functions enabled,
1484 * but if it didn't find any functions it returns zero.
1485 * Consider no functions a failure too.
1486 */
1487 if (!ret) {
1488 ret = -ENOENT;
1489 goto out_disable;
1490 } else if (ret < 0)
1491 goto out_disable;
1492 /* Just return zero, not the number of enabled functions */
1493 ret = 0;
1494 event_trigger_free(trigger_ops, trigger_data);
1495 out:
1496 return ret;
1497
1498 out_disable:
1499 trace_event_enable_disable(event_enable_file, 0, 1);
1500 out_put:
1501 module_put(event_enable_file->event_call->mod);
1502 out_free:
1503 if (cmd_ops->set_filter)
1504 cmd_ops->set_filter(NULL, trigger_data, NULL);
1505 event_trigger_free(trigger_ops, trigger_data);
1506 kfree(enable_data);
1507 goto out;
1508}
1509
1510int event_enable_register_trigger(char *glob,
1511 struct event_trigger_ops *ops,
1512 struct event_trigger_data *data,
1513 struct trace_event_file *file)
1514{
1515 struct enable_trigger_data *enable_data = data->private_data;
1516 struct enable_trigger_data *test_enable_data;
1517 struct event_trigger_data *test;
1518 int ret = 0;
1519
1520 lockdep_assert_held(&event_mutex);
1521
1522 list_for_each_entry(test, &file->triggers, list) {
1523 test_enable_data = test->private_data;
1524 if (test_enable_data &&
1525 (test->cmd_ops->trigger_type ==
1526 data->cmd_ops->trigger_type) &&
1527 (test_enable_data->file == enable_data->file)) {
1528 ret = -EEXIST;
1529 goto out;
1530 }
1531 }
1532
1533 if (data->ops->init) {
1534 ret = data->ops->init(data->ops, data);
1535 if (ret < 0)
1536 goto out;
1537 }
1538
1539 list_add_rcu(&data->list, &file->triggers);
1540 ret++;
1541
1542 update_cond_flag(file);
1543 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1544 list_del_rcu(&data->list);
1545 update_cond_flag(file);
1546 ret--;
1547 }
1548out:
1549 return ret;
1550}
1551
1552void event_enable_unregister_trigger(char *glob,
1553 struct event_trigger_ops *ops,
1554 struct event_trigger_data *test,
1555 struct trace_event_file *file)
1556{
1557 struct enable_trigger_data *test_enable_data = test->private_data;
1558 struct enable_trigger_data *enable_data;
1559 struct event_trigger_data *data;
1560 bool unregistered = false;
1561
1562 lockdep_assert_held(&event_mutex);
1563
1564 list_for_each_entry(data, &file->triggers, list) {
1565 enable_data = data->private_data;
1566 if (enable_data &&
1567 (data->cmd_ops->trigger_type ==
1568 test->cmd_ops->trigger_type) &&
1569 (enable_data->file == test_enable_data->file)) {
1570 unregistered = true;
1571 list_del_rcu(&data->list);
1572 trace_event_trigger_enable_disable(file, 0);
1573 update_cond_flag(file);
1574 break;
1575 }
1576 }
1577
1578 if (unregistered && data->ops->free)
1579 data->ops->free(data->ops, data);
1580}
1581
1582static struct event_trigger_ops *
1583event_enable_get_trigger_ops(char *cmd, char *param)
1584{
1585 struct event_trigger_ops *ops;
1586 bool enable;
1587
1588#ifdef CONFIG_HIST_TRIGGERS
1589 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1590 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1591#else
1592 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1593#endif
1594 if (enable)
1595 ops = param ? &event_enable_count_trigger_ops :
1596 &event_enable_trigger_ops;
1597 else
1598 ops = param ? &event_disable_count_trigger_ops :
1599 &event_disable_trigger_ops;
1600
1601 return ops;
1602}
1603
1604static struct event_command trigger_enable_cmd = {
1605 .name = ENABLE_EVENT_STR,
1606 .trigger_type = ETT_EVENT_ENABLE,
1607 .func = event_enable_trigger_func,
1608 .reg = event_enable_register_trigger,
1609 .unreg = event_enable_unregister_trigger,
1610 .get_trigger_ops = event_enable_get_trigger_ops,
1611 .set_filter = set_trigger_filter,
1612};
1613
1614static struct event_command trigger_disable_cmd = {
1615 .name = DISABLE_EVENT_STR,
1616 .trigger_type = ETT_EVENT_ENABLE,
1617 .func = event_enable_trigger_func,
1618 .reg = event_enable_register_trigger,
1619 .unreg = event_enable_unregister_trigger,
1620 .get_trigger_ops = event_enable_get_trigger_ops,
1621 .set_filter = set_trigger_filter,
1622};
1623
1624static __init void unregister_trigger_enable_disable_cmds(void)
1625{
1626 unregister_event_command(&trigger_enable_cmd);
1627 unregister_event_command(&trigger_disable_cmd);
1628}
1629
1630static __init int register_trigger_enable_disable_cmds(void)
1631{
1632 int ret;
1633
1634 ret = register_event_command(&trigger_enable_cmd);
1635 if (WARN_ON(ret < 0))
1636 return ret;
1637 ret = register_event_command(&trigger_disable_cmd);
1638 if (WARN_ON(ret < 0))
1639 unregister_trigger_enable_disable_cmds();
1640
1641 return ret;
1642}
1643
1644static __init int register_trigger_traceon_traceoff_cmds(void)
1645{
1646 int ret;
1647
1648 ret = register_event_command(&trigger_traceon_cmd);
1649 if (WARN_ON(ret < 0))
1650 return ret;
1651 ret = register_event_command(&trigger_traceoff_cmd);
1652 if (WARN_ON(ret < 0))
1653 unregister_trigger_traceon_traceoff_cmds();
1654
1655 return ret;
1656}
1657
1658__init int register_trigger_cmds(void)
1659{
1660 register_trigger_traceon_traceoff_cmds();
1661 register_trigger_snapshot_cmd();
1662 register_trigger_stacktrace_cmd();
1663 register_trigger_enable_disable_cmds();
1664 register_trigger_hist_enable_disable_cmds();
1665 register_trigger_hist_cmd();
1666
1667 return 0;
1668}