Loading...
1/*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25
26#include "trace.h"
27
28static LIST_HEAD(trigger_commands);
29static DEFINE_MUTEX(trigger_cmd_mutex);
30
31void trigger_data_free(struct event_trigger_data *data)
32{
33 if (data->cmd_ops->set_filter)
34 data->cmd_ops->set_filter(NULL, data, NULL);
35
36 synchronize_sched(); /* make sure current triggers exit before free */
37 kfree(data);
38}
39
40/**
41 * event_triggers_call - Call triggers associated with a trace event
42 * @file: The trace_event_file associated with the event
43 * @rec: The trace entry for the event, NULL for unconditional invocation
44 *
45 * For each trigger associated with an event, invoke the trigger
46 * function registered with the associated trigger command. If rec is
47 * non-NULL, it means that the trigger requires further processing and
48 * shouldn't be unconditionally invoked. If rec is non-NULL and the
49 * trigger has a filter associated with it, rec will checked against
50 * the filter and if the record matches the trigger will be invoked.
51 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
52 * in any case until the current event is written, the trigger
53 * function isn't invoked but the bit associated with the deferred
54 * trigger is set in the return value.
55 *
56 * Returns an enum event_trigger_type value containing a set bit for
57 * any trigger that should be deferred, ETT_NONE if nothing to defer.
58 *
59 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
60 *
61 * Return: an enum event_trigger_type value containing a set bit for
62 * any trigger that should be deferred, ETT_NONE if nothing to defer.
63 */
64enum event_trigger_type
65event_triggers_call(struct trace_event_file *file, void *rec)
66{
67 struct event_trigger_data *data;
68 enum event_trigger_type tt = ETT_NONE;
69 struct event_filter *filter;
70
71 if (list_empty(&file->triggers))
72 return tt;
73
74 list_for_each_entry_rcu(data, &file->triggers, list) {
75 if (data->paused)
76 continue;
77 if (!rec) {
78 data->ops->func(data, rec);
79 continue;
80 }
81 filter = rcu_dereference_sched(data->filter);
82 if (filter && !filter_match_preds(filter, rec))
83 continue;
84 if (event_command_post_trigger(data->cmd_ops)) {
85 tt |= data->cmd_ops->trigger_type;
86 continue;
87 }
88 data->ops->func(data, rec);
89 }
90 return tt;
91}
92EXPORT_SYMBOL_GPL(event_triggers_call);
93
94/**
95 * event_triggers_post_call - Call 'post_triggers' for a trace event
96 * @file: The trace_event_file associated with the event
97 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
98 * @rec: The trace entry for the event
99 *
100 * For each trigger associated with an event, invoke the trigger
101 * function registered with the associated trigger command, if the
102 * corresponding bit is set in the tt enum passed into this function.
103 * See @event_triggers_call for details on how those bits are set.
104 *
105 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
106 */
107void
108event_triggers_post_call(struct trace_event_file *file,
109 enum event_trigger_type tt,
110 void *rec)
111{
112 struct event_trigger_data *data;
113
114 list_for_each_entry_rcu(data, &file->triggers, list) {
115 if (data->paused)
116 continue;
117 if (data->cmd_ops->trigger_type & tt)
118 data->ops->func(data, rec);
119 }
120}
121EXPORT_SYMBOL_GPL(event_triggers_post_call);
122
123#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
124
125static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
126{
127 struct trace_event_file *event_file = event_file_data(m->private);
128
129 if (t == SHOW_AVAILABLE_TRIGGERS)
130 return NULL;
131
132 return seq_list_next(t, &event_file->triggers, pos);
133}
134
135static void *trigger_start(struct seq_file *m, loff_t *pos)
136{
137 struct trace_event_file *event_file;
138
139 /* ->stop() is called even if ->start() fails */
140 mutex_lock(&event_mutex);
141 event_file = event_file_data(m->private);
142 if (unlikely(!event_file))
143 return ERR_PTR(-ENODEV);
144
145 if (list_empty(&event_file->triggers))
146 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
147
148 return seq_list_start(&event_file->triggers, *pos);
149}
150
151static void trigger_stop(struct seq_file *m, void *t)
152{
153 mutex_unlock(&event_mutex);
154}
155
156static int trigger_show(struct seq_file *m, void *v)
157{
158 struct event_trigger_data *data;
159 struct event_command *p;
160
161 if (v == SHOW_AVAILABLE_TRIGGERS) {
162 seq_puts(m, "# Available triggers:\n");
163 seq_putc(m, '#');
164 mutex_lock(&trigger_cmd_mutex);
165 list_for_each_entry_reverse(p, &trigger_commands, list)
166 seq_printf(m, " %s", p->name);
167 seq_putc(m, '\n');
168 mutex_unlock(&trigger_cmd_mutex);
169 return 0;
170 }
171
172 data = list_entry(v, struct event_trigger_data, list);
173 data->ops->print(m, data->ops, data);
174
175 return 0;
176}
177
178static const struct seq_operations event_triggers_seq_ops = {
179 .start = trigger_start,
180 .next = trigger_next,
181 .stop = trigger_stop,
182 .show = trigger_show,
183};
184
185static int event_trigger_regex_open(struct inode *inode, struct file *file)
186{
187 int ret = 0;
188
189 mutex_lock(&event_mutex);
190
191 if (unlikely(!event_file_data(file))) {
192 mutex_unlock(&event_mutex);
193 return -ENODEV;
194 }
195
196 if ((file->f_mode & FMODE_WRITE) &&
197 (file->f_flags & O_TRUNC)) {
198 struct trace_event_file *event_file;
199 struct event_command *p;
200
201 event_file = event_file_data(file);
202
203 list_for_each_entry(p, &trigger_commands, list) {
204 if (p->unreg_all)
205 p->unreg_all(event_file);
206 }
207 }
208
209 if (file->f_mode & FMODE_READ) {
210 ret = seq_open(file, &event_triggers_seq_ops);
211 if (!ret) {
212 struct seq_file *m = file->private_data;
213 m->private = file;
214 }
215 }
216
217 mutex_unlock(&event_mutex);
218
219 return ret;
220}
221
222static int trigger_process_regex(struct trace_event_file *file, char *buff)
223{
224 char *command, *next = buff;
225 struct event_command *p;
226 int ret = -EINVAL;
227
228 command = strsep(&next, ": \t");
229 command = (command[0] != '!') ? command : command + 1;
230
231 mutex_lock(&trigger_cmd_mutex);
232 list_for_each_entry(p, &trigger_commands, list) {
233 if (strcmp(p->name, command) == 0) {
234 ret = p->func(p, file, buff, command, next);
235 goto out_unlock;
236 }
237 }
238 out_unlock:
239 mutex_unlock(&trigger_cmd_mutex);
240
241 return ret;
242}
243
244static ssize_t event_trigger_regex_write(struct file *file,
245 const char __user *ubuf,
246 size_t cnt, loff_t *ppos)
247{
248 struct trace_event_file *event_file;
249 ssize_t ret;
250 char *buf;
251
252 if (!cnt)
253 return 0;
254
255 if (cnt >= PAGE_SIZE)
256 return -EINVAL;
257
258 buf = memdup_user_nul(ubuf, cnt);
259 if (IS_ERR(buf))
260 return PTR_ERR(buf);
261
262 strim(buf);
263
264 mutex_lock(&event_mutex);
265 event_file = event_file_data(file);
266 if (unlikely(!event_file)) {
267 mutex_unlock(&event_mutex);
268 kfree(buf);
269 return -ENODEV;
270 }
271 ret = trigger_process_regex(event_file, buf);
272 mutex_unlock(&event_mutex);
273
274 kfree(buf);
275 if (ret < 0)
276 goto out;
277
278 *ppos += cnt;
279 ret = cnt;
280 out:
281 return ret;
282}
283
284static int event_trigger_regex_release(struct inode *inode, struct file *file)
285{
286 mutex_lock(&event_mutex);
287
288 if (file->f_mode & FMODE_READ)
289 seq_release(inode, file);
290
291 mutex_unlock(&event_mutex);
292
293 return 0;
294}
295
296static ssize_t
297event_trigger_write(struct file *filp, const char __user *ubuf,
298 size_t cnt, loff_t *ppos)
299{
300 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
301}
302
303static int
304event_trigger_open(struct inode *inode, struct file *filp)
305{
306 return event_trigger_regex_open(inode, filp);
307}
308
309static int
310event_trigger_release(struct inode *inode, struct file *file)
311{
312 return event_trigger_regex_release(inode, file);
313}
314
315const struct file_operations event_trigger_fops = {
316 .open = event_trigger_open,
317 .read = seq_read,
318 .write = event_trigger_write,
319 .llseek = tracing_lseek,
320 .release = event_trigger_release,
321};
322
323/*
324 * Currently we only register event commands from __init, so mark this
325 * __init too.
326 */
327__init int register_event_command(struct event_command *cmd)
328{
329 struct event_command *p;
330 int ret = 0;
331
332 mutex_lock(&trigger_cmd_mutex);
333 list_for_each_entry(p, &trigger_commands, list) {
334 if (strcmp(cmd->name, p->name) == 0) {
335 ret = -EBUSY;
336 goto out_unlock;
337 }
338 }
339 list_add(&cmd->list, &trigger_commands);
340 out_unlock:
341 mutex_unlock(&trigger_cmd_mutex);
342
343 return ret;
344}
345
346/*
347 * Currently we only unregister event commands from __init, so mark
348 * this __init too.
349 */
350static __init int unregister_event_command(struct event_command *cmd)
351{
352 struct event_command *p, *n;
353 int ret = -ENODEV;
354
355 mutex_lock(&trigger_cmd_mutex);
356 list_for_each_entry_safe(p, n, &trigger_commands, list) {
357 if (strcmp(cmd->name, p->name) == 0) {
358 ret = 0;
359 list_del_init(&p->list);
360 goto out_unlock;
361 }
362 }
363 out_unlock:
364 mutex_unlock(&trigger_cmd_mutex);
365
366 return ret;
367}
368
369/**
370 * event_trigger_print - Generic event_trigger_ops @print implementation
371 * @name: The name of the event trigger
372 * @m: The seq_file being printed to
373 * @data: Trigger-specific data
374 * @filter_str: filter_str to print, if present
375 *
376 * Common implementation for event triggers to print themselves.
377 *
378 * Usually wrapped by a function that simply sets the @name of the
379 * trigger command and then invokes this.
380 *
381 * Return: 0 on success, errno otherwise
382 */
383static int
384event_trigger_print(const char *name, struct seq_file *m,
385 void *data, char *filter_str)
386{
387 long count = (long)data;
388
389 seq_puts(m, name);
390
391 if (count == -1)
392 seq_puts(m, ":unlimited");
393 else
394 seq_printf(m, ":count=%ld", count);
395
396 if (filter_str)
397 seq_printf(m, " if %s\n", filter_str);
398 else
399 seq_putc(m, '\n');
400
401 return 0;
402}
403
404/**
405 * event_trigger_init - Generic event_trigger_ops @init implementation
406 * @ops: The trigger ops associated with the trigger
407 * @data: Trigger-specific data
408 *
409 * Common implementation of event trigger initialization.
410 *
411 * Usually used directly as the @init method in event trigger
412 * implementations.
413 *
414 * Return: 0 on success, errno otherwise
415 */
416int event_trigger_init(struct event_trigger_ops *ops,
417 struct event_trigger_data *data)
418{
419 data->ref++;
420 return 0;
421}
422
423/**
424 * event_trigger_free - Generic event_trigger_ops @free implementation
425 * @ops: The trigger ops associated with the trigger
426 * @data: Trigger-specific data
427 *
428 * Common implementation of event trigger de-initialization.
429 *
430 * Usually used directly as the @free method in event trigger
431 * implementations.
432 */
433static void
434event_trigger_free(struct event_trigger_ops *ops,
435 struct event_trigger_data *data)
436{
437 if (WARN_ON_ONCE(data->ref <= 0))
438 return;
439
440 data->ref--;
441 if (!data->ref)
442 trigger_data_free(data);
443}
444
445int trace_event_trigger_enable_disable(struct trace_event_file *file,
446 int trigger_enable)
447{
448 int ret = 0;
449
450 if (trigger_enable) {
451 if (atomic_inc_return(&file->tm_ref) > 1)
452 return ret;
453 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
454 ret = trace_event_enable_disable(file, 1, 1);
455 } else {
456 if (atomic_dec_return(&file->tm_ref) > 0)
457 return ret;
458 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
459 ret = trace_event_enable_disable(file, 0, 1);
460 }
461
462 return ret;
463}
464
465/**
466 * clear_event_triggers - Clear all triggers associated with a trace array
467 * @tr: The trace array to clear
468 *
469 * For each trigger, the triggering event has its tm_ref decremented
470 * via trace_event_trigger_enable_disable(), and any associated event
471 * (in the case of enable/disable_event triggers) will have its sm_ref
472 * decremented via free()->trace_event_enable_disable(). That
473 * combination effectively reverses the soft-mode/trigger state added
474 * by trigger registration.
475 *
476 * Must be called with event_mutex held.
477 */
478void
479clear_event_triggers(struct trace_array *tr)
480{
481 struct trace_event_file *file;
482
483 list_for_each_entry(file, &tr->events, list) {
484 struct event_trigger_data *data;
485 list_for_each_entry_rcu(data, &file->triggers, list) {
486 trace_event_trigger_enable_disable(file, 0);
487 if (data->ops->free)
488 data->ops->free(data->ops, data);
489 }
490 }
491}
492
493/**
494 * update_cond_flag - Set or reset the TRIGGER_COND bit
495 * @file: The trace_event_file associated with the event
496 *
497 * If an event has triggers and any of those triggers has a filter or
498 * a post_trigger, trigger invocation needs to be deferred until after
499 * the current event has logged its data, and the event should have
500 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
501 * cleared.
502 */
503void update_cond_flag(struct trace_event_file *file)
504{
505 struct event_trigger_data *data;
506 bool set_cond = false;
507
508 list_for_each_entry_rcu(data, &file->triggers, list) {
509 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
510 event_command_needs_rec(data->cmd_ops)) {
511 set_cond = true;
512 break;
513 }
514 }
515
516 if (set_cond)
517 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
518 else
519 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
520}
521
522/**
523 * register_trigger - Generic event_command @reg implementation
524 * @glob: The raw string used to register the trigger
525 * @ops: The trigger ops associated with the trigger
526 * @data: Trigger-specific data to associate with the trigger
527 * @file: The trace_event_file associated with the event
528 *
529 * Common implementation for event trigger registration.
530 *
531 * Usually used directly as the @reg method in event command
532 * implementations.
533 *
534 * Return: 0 on success, errno otherwise
535 */
536static int register_trigger(char *glob, struct event_trigger_ops *ops,
537 struct event_trigger_data *data,
538 struct trace_event_file *file)
539{
540 struct event_trigger_data *test;
541 int ret = 0;
542
543 list_for_each_entry_rcu(test, &file->triggers, list) {
544 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
545 ret = -EEXIST;
546 goto out;
547 }
548 }
549
550 if (data->ops->init) {
551 ret = data->ops->init(data->ops, data);
552 if (ret < 0)
553 goto out;
554 }
555
556 list_add_rcu(&data->list, &file->triggers);
557 ret++;
558
559 update_cond_flag(file);
560 if (trace_event_trigger_enable_disable(file, 1) < 0) {
561 list_del_rcu(&data->list);
562 update_cond_flag(file);
563 ret--;
564 }
565out:
566 return ret;
567}
568
569/**
570 * unregister_trigger - Generic event_command @unreg implementation
571 * @glob: The raw string used to register the trigger
572 * @ops: The trigger ops associated with the trigger
573 * @test: Trigger-specific data used to find the trigger to remove
574 * @file: The trace_event_file associated with the event
575 *
576 * Common implementation for event trigger unregistration.
577 *
578 * Usually used directly as the @unreg method in event command
579 * implementations.
580 */
581void unregister_trigger(char *glob, struct event_trigger_ops *ops,
582 struct event_trigger_data *test,
583 struct trace_event_file *file)
584{
585 struct event_trigger_data *data;
586 bool unregistered = false;
587
588 list_for_each_entry_rcu(data, &file->triggers, list) {
589 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
590 unregistered = true;
591 list_del_rcu(&data->list);
592 trace_event_trigger_enable_disable(file, 0);
593 update_cond_flag(file);
594 break;
595 }
596 }
597
598 if (unregistered && data->ops->free)
599 data->ops->free(data->ops, data);
600}
601
602/**
603 * event_trigger_callback - Generic event_command @func implementation
604 * @cmd_ops: The command ops, used for trigger registration
605 * @file: The trace_event_file associated with the event
606 * @glob: The raw string used to register the trigger
607 * @cmd: The cmd portion of the string used to register the trigger
608 * @param: The params portion of the string used to register the trigger
609 *
610 * Common implementation for event command parsing and trigger
611 * instantiation.
612 *
613 * Usually used directly as the @func method in event command
614 * implementations.
615 *
616 * Return: 0 on success, errno otherwise
617 */
618static int
619event_trigger_callback(struct event_command *cmd_ops,
620 struct trace_event_file *file,
621 char *glob, char *cmd, char *param)
622{
623 struct event_trigger_data *trigger_data;
624 struct event_trigger_ops *trigger_ops;
625 char *trigger = NULL;
626 char *number;
627 int ret;
628
629 /* separate the trigger from the filter (t:n [if filter]) */
630 if (param && isdigit(param[0]))
631 trigger = strsep(¶m, " \t");
632
633 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
634
635 ret = -ENOMEM;
636 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
637 if (!trigger_data)
638 goto out;
639
640 trigger_data->count = -1;
641 trigger_data->ops = trigger_ops;
642 trigger_data->cmd_ops = cmd_ops;
643 INIT_LIST_HEAD(&trigger_data->list);
644
645 if (glob[0] == '!') {
646 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
647 kfree(trigger_data);
648 ret = 0;
649 goto out;
650 }
651
652 if (trigger) {
653 number = strsep(&trigger, ":");
654
655 ret = -EINVAL;
656 if (!strlen(number))
657 goto out_free;
658
659 /*
660 * We use the callback data field (which is a pointer)
661 * as our counter.
662 */
663 ret = kstrtoul(number, 0, &trigger_data->count);
664 if (ret)
665 goto out_free;
666 }
667
668 if (!param) /* if param is non-empty, it's supposed to be a filter */
669 goto out_reg;
670
671 if (!cmd_ops->set_filter)
672 goto out_reg;
673
674 ret = cmd_ops->set_filter(param, trigger_data, file);
675 if (ret < 0)
676 goto out_free;
677
678 out_reg:
679 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
680 /*
681 * The above returns on success the # of functions enabled,
682 * but if it didn't find any functions it returns zero.
683 * Consider no functions a failure too.
684 */
685 if (!ret) {
686 ret = -ENOENT;
687 goto out_free;
688 } else if (ret < 0)
689 goto out_free;
690 ret = 0;
691 out:
692 return ret;
693
694 out_free:
695 if (cmd_ops->set_filter)
696 cmd_ops->set_filter(NULL, trigger_data, NULL);
697 kfree(trigger_data);
698 goto out;
699}
700
701/**
702 * set_trigger_filter - Generic event_command @set_filter implementation
703 * @filter_str: The filter string for the trigger, NULL to remove filter
704 * @trigger_data: Trigger-specific data
705 * @file: The trace_event_file associated with the event
706 *
707 * Common implementation for event command filter parsing and filter
708 * instantiation.
709 *
710 * Usually used directly as the @set_filter method in event command
711 * implementations.
712 *
713 * Also used to remove a filter (if filter_str = NULL).
714 *
715 * Return: 0 on success, errno otherwise
716 */
717int set_trigger_filter(char *filter_str,
718 struct event_trigger_data *trigger_data,
719 struct trace_event_file *file)
720{
721 struct event_trigger_data *data = trigger_data;
722 struct event_filter *filter = NULL, *tmp;
723 int ret = -EINVAL;
724 char *s;
725
726 if (!filter_str) /* clear the current filter */
727 goto assign;
728
729 s = strsep(&filter_str, " \t");
730
731 if (!strlen(s) || strcmp(s, "if") != 0)
732 goto out;
733
734 if (!filter_str)
735 goto out;
736
737 /* The filter is for the 'trigger' event, not the triggered event */
738 ret = create_event_filter(file->event_call, filter_str, false, &filter);
739 if (ret)
740 goto out;
741 assign:
742 tmp = rcu_access_pointer(data->filter);
743
744 rcu_assign_pointer(data->filter, filter);
745
746 if (tmp) {
747 /* Make sure the call is done with the filter */
748 synchronize_sched();
749 free_event_filter(tmp);
750 }
751
752 kfree(data->filter_str);
753 data->filter_str = NULL;
754
755 if (filter_str) {
756 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
757 if (!data->filter_str) {
758 free_event_filter(rcu_access_pointer(data->filter));
759 data->filter = NULL;
760 ret = -ENOMEM;
761 }
762 }
763 out:
764 return ret;
765}
766
767static void
768traceon_trigger(struct event_trigger_data *data, void *rec)
769{
770 if (tracing_is_on())
771 return;
772
773 tracing_on();
774}
775
776static void
777traceon_count_trigger(struct event_trigger_data *data, void *rec)
778{
779 if (tracing_is_on())
780 return;
781
782 if (!data->count)
783 return;
784
785 if (data->count != -1)
786 (data->count)--;
787
788 tracing_on();
789}
790
791static void
792traceoff_trigger(struct event_trigger_data *data, void *rec)
793{
794 if (!tracing_is_on())
795 return;
796
797 tracing_off();
798}
799
800static void
801traceoff_count_trigger(struct event_trigger_data *data, void *rec)
802{
803 if (!tracing_is_on())
804 return;
805
806 if (!data->count)
807 return;
808
809 if (data->count != -1)
810 (data->count)--;
811
812 tracing_off();
813}
814
815static int
816traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
817 struct event_trigger_data *data)
818{
819 return event_trigger_print("traceon", m, (void *)data->count,
820 data->filter_str);
821}
822
823static int
824traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
825 struct event_trigger_data *data)
826{
827 return event_trigger_print("traceoff", m, (void *)data->count,
828 data->filter_str);
829}
830
831static struct event_trigger_ops traceon_trigger_ops = {
832 .func = traceon_trigger,
833 .print = traceon_trigger_print,
834 .init = event_trigger_init,
835 .free = event_trigger_free,
836};
837
838static struct event_trigger_ops traceon_count_trigger_ops = {
839 .func = traceon_count_trigger,
840 .print = traceon_trigger_print,
841 .init = event_trigger_init,
842 .free = event_trigger_free,
843};
844
845static struct event_trigger_ops traceoff_trigger_ops = {
846 .func = traceoff_trigger,
847 .print = traceoff_trigger_print,
848 .init = event_trigger_init,
849 .free = event_trigger_free,
850};
851
852static struct event_trigger_ops traceoff_count_trigger_ops = {
853 .func = traceoff_count_trigger,
854 .print = traceoff_trigger_print,
855 .init = event_trigger_init,
856 .free = event_trigger_free,
857};
858
859static struct event_trigger_ops *
860onoff_get_trigger_ops(char *cmd, char *param)
861{
862 struct event_trigger_ops *ops;
863
864 /* we register both traceon and traceoff to this callback */
865 if (strcmp(cmd, "traceon") == 0)
866 ops = param ? &traceon_count_trigger_ops :
867 &traceon_trigger_ops;
868 else
869 ops = param ? &traceoff_count_trigger_ops :
870 &traceoff_trigger_ops;
871
872 return ops;
873}
874
875static struct event_command trigger_traceon_cmd = {
876 .name = "traceon",
877 .trigger_type = ETT_TRACE_ONOFF,
878 .func = event_trigger_callback,
879 .reg = register_trigger,
880 .unreg = unregister_trigger,
881 .get_trigger_ops = onoff_get_trigger_ops,
882 .set_filter = set_trigger_filter,
883};
884
885static struct event_command trigger_traceoff_cmd = {
886 .name = "traceoff",
887 .trigger_type = ETT_TRACE_ONOFF,
888 .func = event_trigger_callback,
889 .reg = register_trigger,
890 .unreg = unregister_trigger,
891 .get_trigger_ops = onoff_get_trigger_ops,
892 .set_filter = set_trigger_filter,
893};
894
895#ifdef CONFIG_TRACER_SNAPSHOT
896static void
897snapshot_trigger(struct event_trigger_data *data, void *rec)
898{
899 tracing_snapshot();
900}
901
902static void
903snapshot_count_trigger(struct event_trigger_data *data, void *rec)
904{
905 if (!data->count)
906 return;
907
908 if (data->count != -1)
909 (data->count)--;
910
911 snapshot_trigger(data, rec);
912}
913
914static int
915register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
916 struct event_trigger_data *data,
917 struct trace_event_file *file)
918{
919 int ret = register_trigger(glob, ops, data, file);
920
921 if (ret > 0 && tracing_alloc_snapshot() != 0) {
922 unregister_trigger(glob, ops, data, file);
923 ret = 0;
924 }
925
926 return ret;
927}
928
929static int
930snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
931 struct event_trigger_data *data)
932{
933 return event_trigger_print("snapshot", m, (void *)data->count,
934 data->filter_str);
935}
936
937static struct event_trigger_ops snapshot_trigger_ops = {
938 .func = snapshot_trigger,
939 .print = snapshot_trigger_print,
940 .init = event_trigger_init,
941 .free = event_trigger_free,
942};
943
944static struct event_trigger_ops snapshot_count_trigger_ops = {
945 .func = snapshot_count_trigger,
946 .print = snapshot_trigger_print,
947 .init = event_trigger_init,
948 .free = event_trigger_free,
949};
950
951static struct event_trigger_ops *
952snapshot_get_trigger_ops(char *cmd, char *param)
953{
954 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
955}
956
957static struct event_command trigger_snapshot_cmd = {
958 .name = "snapshot",
959 .trigger_type = ETT_SNAPSHOT,
960 .func = event_trigger_callback,
961 .reg = register_snapshot_trigger,
962 .unreg = unregister_trigger,
963 .get_trigger_ops = snapshot_get_trigger_ops,
964 .set_filter = set_trigger_filter,
965};
966
967static __init int register_trigger_snapshot_cmd(void)
968{
969 int ret;
970
971 ret = register_event_command(&trigger_snapshot_cmd);
972 WARN_ON(ret < 0);
973
974 return ret;
975}
976#else
977static __init int register_trigger_snapshot_cmd(void) { return 0; }
978#endif /* CONFIG_TRACER_SNAPSHOT */
979
980#ifdef CONFIG_STACKTRACE
981/*
982 * Skip 3:
983 * stacktrace_trigger()
984 * event_triggers_post_call()
985 * trace_event_raw_event_xxx()
986 */
987#define STACK_SKIP 3
988
989static void
990stacktrace_trigger(struct event_trigger_data *data, void *rec)
991{
992 trace_dump_stack(STACK_SKIP);
993}
994
995static void
996stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
997{
998 if (!data->count)
999 return;
1000
1001 if (data->count != -1)
1002 (data->count)--;
1003
1004 stacktrace_trigger(data, rec);
1005}
1006
1007static int
1008stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1009 struct event_trigger_data *data)
1010{
1011 return event_trigger_print("stacktrace", m, (void *)data->count,
1012 data->filter_str);
1013}
1014
1015static struct event_trigger_ops stacktrace_trigger_ops = {
1016 .func = stacktrace_trigger,
1017 .print = stacktrace_trigger_print,
1018 .init = event_trigger_init,
1019 .free = event_trigger_free,
1020};
1021
1022static struct event_trigger_ops stacktrace_count_trigger_ops = {
1023 .func = stacktrace_count_trigger,
1024 .print = stacktrace_trigger_print,
1025 .init = event_trigger_init,
1026 .free = event_trigger_free,
1027};
1028
1029static struct event_trigger_ops *
1030stacktrace_get_trigger_ops(char *cmd, char *param)
1031{
1032 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1033}
1034
1035static struct event_command trigger_stacktrace_cmd = {
1036 .name = "stacktrace",
1037 .trigger_type = ETT_STACKTRACE,
1038 .flags = EVENT_CMD_FL_POST_TRIGGER,
1039 .func = event_trigger_callback,
1040 .reg = register_trigger,
1041 .unreg = unregister_trigger,
1042 .get_trigger_ops = stacktrace_get_trigger_ops,
1043 .set_filter = set_trigger_filter,
1044};
1045
1046static __init int register_trigger_stacktrace_cmd(void)
1047{
1048 int ret;
1049
1050 ret = register_event_command(&trigger_stacktrace_cmd);
1051 WARN_ON(ret < 0);
1052
1053 return ret;
1054}
1055#else
1056static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1057#endif /* CONFIG_STACKTRACE */
1058
1059static __init void unregister_trigger_traceon_traceoff_cmds(void)
1060{
1061 unregister_event_command(&trigger_traceon_cmd);
1062 unregister_event_command(&trigger_traceoff_cmd);
1063}
1064
1065/* Avoid typos */
1066#define ENABLE_EVENT_STR "enable_event"
1067#define DISABLE_EVENT_STR "disable_event"
1068
1069struct enable_trigger_data {
1070 struct trace_event_file *file;
1071 bool enable;
1072};
1073
1074static void
1075event_enable_trigger(struct event_trigger_data *data, void *rec)
1076{
1077 struct enable_trigger_data *enable_data = data->private_data;
1078
1079 if (enable_data->enable)
1080 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1081 else
1082 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1083}
1084
1085static void
1086event_enable_count_trigger(struct event_trigger_data *data, void *rec)
1087{
1088 struct enable_trigger_data *enable_data = data->private_data;
1089
1090 if (!data->count)
1091 return;
1092
1093 /* Skip if the event is in a state we want to switch to */
1094 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1095 return;
1096
1097 if (data->count != -1)
1098 (data->count)--;
1099
1100 event_enable_trigger(data, rec);
1101}
1102
1103static int
1104event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1105 struct event_trigger_data *data)
1106{
1107 struct enable_trigger_data *enable_data = data->private_data;
1108
1109 seq_printf(m, "%s:%s:%s",
1110 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1111 enable_data->file->event_call->class->system,
1112 trace_event_name(enable_data->file->event_call));
1113
1114 if (data->count == -1)
1115 seq_puts(m, ":unlimited");
1116 else
1117 seq_printf(m, ":count=%ld", data->count);
1118
1119 if (data->filter_str)
1120 seq_printf(m, " if %s\n", data->filter_str);
1121 else
1122 seq_putc(m, '\n');
1123
1124 return 0;
1125}
1126
1127static void
1128event_enable_trigger_free(struct event_trigger_ops *ops,
1129 struct event_trigger_data *data)
1130{
1131 struct enable_trigger_data *enable_data = data->private_data;
1132
1133 if (WARN_ON_ONCE(data->ref <= 0))
1134 return;
1135
1136 data->ref--;
1137 if (!data->ref) {
1138 /* Remove the SOFT_MODE flag */
1139 trace_event_enable_disable(enable_data->file, 0, 1);
1140 module_put(enable_data->file->event_call->mod);
1141 trigger_data_free(data);
1142 kfree(enable_data);
1143 }
1144}
1145
1146static struct event_trigger_ops event_enable_trigger_ops = {
1147 .func = event_enable_trigger,
1148 .print = event_enable_trigger_print,
1149 .init = event_trigger_init,
1150 .free = event_enable_trigger_free,
1151};
1152
1153static struct event_trigger_ops event_enable_count_trigger_ops = {
1154 .func = event_enable_count_trigger,
1155 .print = event_enable_trigger_print,
1156 .init = event_trigger_init,
1157 .free = event_enable_trigger_free,
1158};
1159
1160static struct event_trigger_ops event_disable_trigger_ops = {
1161 .func = event_enable_trigger,
1162 .print = event_enable_trigger_print,
1163 .init = event_trigger_init,
1164 .free = event_enable_trigger_free,
1165};
1166
1167static struct event_trigger_ops event_disable_count_trigger_ops = {
1168 .func = event_enable_count_trigger,
1169 .print = event_enable_trigger_print,
1170 .init = event_trigger_init,
1171 .free = event_enable_trigger_free,
1172};
1173
1174static int
1175event_enable_trigger_func(struct event_command *cmd_ops,
1176 struct trace_event_file *file,
1177 char *glob, char *cmd, char *param)
1178{
1179 struct trace_event_file *event_enable_file;
1180 struct enable_trigger_data *enable_data;
1181 struct event_trigger_data *trigger_data;
1182 struct event_trigger_ops *trigger_ops;
1183 struct trace_array *tr = file->tr;
1184 const char *system;
1185 const char *event;
1186 char *trigger;
1187 char *number;
1188 bool enable;
1189 int ret;
1190
1191 if (!param)
1192 return -EINVAL;
1193
1194 /* separate the trigger from the filter (s:e:n [if filter]) */
1195 trigger = strsep(¶m, " \t");
1196 if (!trigger)
1197 return -EINVAL;
1198
1199 system = strsep(&trigger, ":");
1200 if (!trigger)
1201 return -EINVAL;
1202
1203 event = strsep(&trigger, ":");
1204
1205 ret = -EINVAL;
1206 event_enable_file = find_event_file(tr, system, event);
1207 if (!event_enable_file)
1208 goto out;
1209
1210 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1211
1212 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1213
1214 ret = -ENOMEM;
1215 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1216 if (!trigger_data)
1217 goto out;
1218
1219 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1220 if (!enable_data) {
1221 kfree(trigger_data);
1222 goto out;
1223 }
1224
1225 trigger_data->count = -1;
1226 trigger_data->ops = trigger_ops;
1227 trigger_data->cmd_ops = cmd_ops;
1228 INIT_LIST_HEAD(&trigger_data->list);
1229 RCU_INIT_POINTER(trigger_data->filter, NULL);
1230
1231 enable_data->enable = enable;
1232 enable_data->file = event_enable_file;
1233 trigger_data->private_data = enable_data;
1234
1235 if (glob[0] == '!') {
1236 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1237 kfree(trigger_data);
1238 kfree(enable_data);
1239 ret = 0;
1240 goto out;
1241 }
1242
1243 if (trigger) {
1244 number = strsep(&trigger, ":");
1245
1246 ret = -EINVAL;
1247 if (!strlen(number))
1248 goto out_free;
1249
1250 /*
1251 * We use the callback data field (which is a pointer)
1252 * as our counter.
1253 */
1254 ret = kstrtoul(number, 0, &trigger_data->count);
1255 if (ret)
1256 goto out_free;
1257 }
1258
1259 if (!param) /* if param is non-empty, it's supposed to be a filter */
1260 goto out_reg;
1261
1262 if (!cmd_ops->set_filter)
1263 goto out_reg;
1264
1265 ret = cmd_ops->set_filter(param, trigger_data, file);
1266 if (ret < 0)
1267 goto out_free;
1268
1269 out_reg:
1270 /* Don't let event modules unload while probe registered */
1271 ret = try_module_get(event_enable_file->event_call->mod);
1272 if (!ret) {
1273 ret = -EBUSY;
1274 goto out_free;
1275 }
1276
1277 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1278 if (ret < 0)
1279 goto out_put;
1280 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1281 /*
1282 * The above returns on success the # of functions enabled,
1283 * but if it didn't find any functions it returns zero.
1284 * Consider no functions a failure too.
1285 */
1286 if (!ret) {
1287 ret = -ENOENT;
1288 goto out_disable;
1289 } else if (ret < 0)
1290 goto out_disable;
1291 /* Just return zero, not the number of enabled functions */
1292 ret = 0;
1293 out:
1294 return ret;
1295
1296 out_disable:
1297 trace_event_enable_disable(event_enable_file, 0, 1);
1298 out_put:
1299 module_put(event_enable_file->event_call->mod);
1300 out_free:
1301 if (cmd_ops->set_filter)
1302 cmd_ops->set_filter(NULL, trigger_data, NULL);
1303 kfree(trigger_data);
1304 kfree(enable_data);
1305 goto out;
1306}
1307
1308static int event_enable_register_trigger(char *glob,
1309 struct event_trigger_ops *ops,
1310 struct event_trigger_data *data,
1311 struct trace_event_file *file)
1312{
1313 struct enable_trigger_data *enable_data = data->private_data;
1314 struct enable_trigger_data *test_enable_data;
1315 struct event_trigger_data *test;
1316 int ret = 0;
1317
1318 list_for_each_entry_rcu(test, &file->triggers, list) {
1319 test_enable_data = test->private_data;
1320 if (test_enable_data &&
1321 (test_enable_data->file == enable_data->file)) {
1322 ret = -EEXIST;
1323 goto out;
1324 }
1325 }
1326
1327 if (data->ops->init) {
1328 ret = data->ops->init(data->ops, data);
1329 if (ret < 0)
1330 goto out;
1331 }
1332
1333 list_add_rcu(&data->list, &file->triggers);
1334 ret++;
1335
1336 update_cond_flag(file);
1337 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1338 list_del_rcu(&data->list);
1339 update_cond_flag(file);
1340 ret--;
1341 }
1342out:
1343 return ret;
1344}
1345
1346static void event_enable_unregister_trigger(char *glob,
1347 struct event_trigger_ops *ops,
1348 struct event_trigger_data *test,
1349 struct trace_event_file *file)
1350{
1351 struct enable_trigger_data *test_enable_data = test->private_data;
1352 struct enable_trigger_data *enable_data;
1353 struct event_trigger_data *data;
1354 bool unregistered = false;
1355
1356 list_for_each_entry_rcu(data, &file->triggers, list) {
1357 enable_data = data->private_data;
1358 if (enable_data &&
1359 (enable_data->file == test_enable_data->file)) {
1360 unregistered = true;
1361 list_del_rcu(&data->list);
1362 trace_event_trigger_enable_disable(file, 0);
1363 update_cond_flag(file);
1364 break;
1365 }
1366 }
1367
1368 if (unregistered && data->ops->free)
1369 data->ops->free(data->ops, data);
1370}
1371
1372static struct event_trigger_ops *
1373event_enable_get_trigger_ops(char *cmd, char *param)
1374{
1375 struct event_trigger_ops *ops;
1376 bool enable;
1377
1378 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1379
1380 if (enable)
1381 ops = param ? &event_enable_count_trigger_ops :
1382 &event_enable_trigger_ops;
1383 else
1384 ops = param ? &event_disable_count_trigger_ops :
1385 &event_disable_trigger_ops;
1386
1387 return ops;
1388}
1389
1390static struct event_command trigger_enable_cmd = {
1391 .name = ENABLE_EVENT_STR,
1392 .trigger_type = ETT_EVENT_ENABLE,
1393 .func = event_enable_trigger_func,
1394 .reg = event_enable_register_trigger,
1395 .unreg = event_enable_unregister_trigger,
1396 .get_trigger_ops = event_enable_get_trigger_ops,
1397 .set_filter = set_trigger_filter,
1398};
1399
1400static struct event_command trigger_disable_cmd = {
1401 .name = DISABLE_EVENT_STR,
1402 .trigger_type = ETT_EVENT_ENABLE,
1403 .func = event_enable_trigger_func,
1404 .reg = event_enable_register_trigger,
1405 .unreg = event_enable_unregister_trigger,
1406 .get_trigger_ops = event_enable_get_trigger_ops,
1407 .set_filter = set_trigger_filter,
1408};
1409
1410static __init void unregister_trigger_enable_disable_cmds(void)
1411{
1412 unregister_event_command(&trigger_enable_cmd);
1413 unregister_event_command(&trigger_disable_cmd);
1414}
1415
1416static __init int register_trigger_enable_disable_cmds(void)
1417{
1418 int ret;
1419
1420 ret = register_event_command(&trigger_enable_cmd);
1421 if (WARN_ON(ret < 0))
1422 return ret;
1423 ret = register_event_command(&trigger_disable_cmd);
1424 if (WARN_ON(ret < 0))
1425 unregister_trigger_enable_disable_cmds();
1426
1427 return ret;
1428}
1429
1430static __init int register_trigger_traceon_traceoff_cmds(void)
1431{
1432 int ret;
1433
1434 ret = register_event_command(&trigger_traceon_cmd);
1435 if (WARN_ON(ret < 0))
1436 return ret;
1437 ret = register_event_command(&trigger_traceoff_cmd);
1438 if (WARN_ON(ret < 0))
1439 unregister_trigger_traceon_traceoff_cmds();
1440
1441 return ret;
1442}
1443
1444__init int register_trigger_cmds(void)
1445{
1446 register_trigger_traceon_traceoff_cmds();
1447 register_trigger_snapshot_cmd();
1448 register_trigger_stacktrace_cmd();
1449 register_trigger_enable_disable_cmds();
1450
1451 return 0;
1452}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace_events_trigger - trace event triggers
4 *
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8#include <linux/security.h>
9#include <linux/module.h>
10#include <linux/ctype.h>
11#include <linux/mutex.h>
12#include <linux/slab.h>
13#include <linux/rculist.h>
14
15#include "trace.h"
16
17static LIST_HEAD(trigger_commands);
18static DEFINE_MUTEX(trigger_cmd_mutex);
19
20void trigger_data_free(struct event_trigger_data *data)
21{
22 if (data->cmd_ops->set_filter)
23 data->cmd_ops->set_filter(NULL, data, NULL);
24
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
27
28 kfree(data);
29}
30
31/**
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @buffer: The ring buffer that the event is being written to
35 * @rec: The trace entry for the event, NULL for unconditional invocation
36 * @event: The event meta data in the ring buffer
37 *
38 * For each trigger associated with an event, invoke the trigger
39 * function registered with the associated trigger command. If rec is
40 * non-NULL, it means that the trigger requires further processing and
41 * shouldn't be unconditionally invoked. If rec is non-NULL and the
42 * trigger has a filter associated with it, rec will checked against
43 * the filter and if the record matches the trigger will be invoked.
44 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
45 * in any case until the current event is written, the trigger
46 * function isn't invoked but the bit associated with the deferred
47 * trigger is set in the return value.
48 *
49 * Returns an enum event_trigger_type value containing a set bit for
50 * any trigger that should be deferred, ETT_NONE if nothing to defer.
51 *
52 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
53 *
54 * Return: an enum event_trigger_type value containing a set bit for
55 * any trigger that should be deferred, ETT_NONE if nothing to defer.
56 */
57enum event_trigger_type
58event_triggers_call(struct trace_event_file *file,
59 struct trace_buffer *buffer, void *rec,
60 struct ring_buffer_event *event)
61{
62 struct event_trigger_data *data;
63 enum event_trigger_type tt = ETT_NONE;
64 struct event_filter *filter;
65
66 if (list_empty(&file->triggers))
67 return tt;
68
69 list_for_each_entry_rcu(data, &file->triggers, list) {
70 if (data->paused)
71 continue;
72 if (!rec) {
73 data->ops->trigger(data, buffer, rec, event);
74 continue;
75 }
76 filter = rcu_dereference_sched(data->filter);
77 if (filter && !filter_match_preds(filter, rec))
78 continue;
79 if (event_command_post_trigger(data->cmd_ops)) {
80 tt |= data->cmd_ops->trigger_type;
81 continue;
82 }
83 data->ops->trigger(data, buffer, rec, event);
84 }
85 return tt;
86}
87EXPORT_SYMBOL_GPL(event_triggers_call);
88
89bool __trace_trigger_soft_disabled(struct trace_event_file *file)
90{
91 unsigned long eflags = file->flags;
92
93 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
94 event_triggers_call(file, NULL, NULL, NULL);
95 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
96 return true;
97 if (eflags & EVENT_FILE_FL_PID_FILTER)
98 return trace_event_ignore_this_pid(file);
99 return false;
100}
101EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
102
103/**
104 * event_triggers_post_call - Call 'post_triggers' for a trace event
105 * @file: The trace_event_file associated with the event
106 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
107 *
108 * For each trigger associated with an event, invoke the trigger
109 * function registered with the associated trigger command, if the
110 * corresponding bit is set in the tt enum passed into this function.
111 * See @event_triggers_call for details on how those bits are set.
112 *
113 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
114 */
115void
116event_triggers_post_call(struct trace_event_file *file,
117 enum event_trigger_type tt)
118{
119 struct event_trigger_data *data;
120
121 list_for_each_entry_rcu(data, &file->triggers, list) {
122 if (data->paused)
123 continue;
124 if (data->cmd_ops->trigger_type & tt)
125 data->ops->trigger(data, NULL, NULL, NULL);
126 }
127}
128EXPORT_SYMBOL_GPL(event_triggers_post_call);
129
130#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
131
132static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
133{
134 struct trace_event_file *event_file = event_file_data(m->private);
135
136 if (t == SHOW_AVAILABLE_TRIGGERS) {
137 (*pos)++;
138 return NULL;
139 }
140 return seq_list_next(t, &event_file->triggers, pos);
141}
142
143static bool check_user_trigger(struct trace_event_file *file)
144{
145 struct event_trigger_data *data;
146
147 list_for_each_entry_rcu(data, &file->triggers, list,
148 lockdep_is_held(&event_mutex)) {
149 if (data->flags & EVENT_TRIGGER_FL_PROBE)
150 continue;
151 return true;
152 }
153 return false;
154}
155
156static void *trigger_start(struct seq_file *m, loff_t *pos)
157{
158 struct trace_event_file *event_file;
159
160 /* ->stop() is called even if ->start() fails */
161 mutex_lock(&event_mutex);
162 event_file = event_file_data(m->private);
163 if (unlikely(!event_file))
164 return ERR_PTR(-ENODEV);
165
166 if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
167 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
168
169 return seq_list_start(&event_file->triggers, *pos);
170}
171
172static void trigger_stop(struct seq_file *m, void *t)
173{
174 mutex_unlock(&event_mutex);
175}
176
177static int trigger_show(struct seq_file *m, void *v)
178{
179 struct event_trigger_data *data;
180 struct event_command *p;
181
182 if (v == SHOW_AVAILABLE_TRIGGERS) {
183 seq_puts(m, "# Available triggers:\n");
184 seq_putc(m, '#');
185 mutex_lock(&trigger_cmd_mutex);
186 list_for_each_entry_reverse(p, &trigger_commands, list)
187 seq_printf(m, " %s", p->name);
188 seq_putc(m, '\n');
189 mutex_unlock(&trigger_cmd_mutex);
190 return 0;
191 }
192
193 data = list_entry(v, struct event_trigger_data, list);
194 data->ops->print(m, data);
195
196 return 0;
197}
198
199static const struct seq_operations event_triggers_seq_ops = {
200 .start = trigger_start,
201 .next = trigger_next,
202 .stop = trigger_stop,
203 .show = trigger_show,
204};
205
206static int event_trigger_regex_open(struct inode *inode, struct file *file)
207{
208 int ret;
209
210 ret = security_locked_down(LOCKDOWN_TRACEFS);
211 if (ret)
212 return ret;
213
214 mutex_lock(&event_mutex);
215
216 if (unlikely(!event_file_data(file))) {
217 mutex_unlock(&event_mutex);
218 return -ENODEV;
219 }
220
221 if ((file->f_mode & FMODE_WRITE) &&
222 (file->f_flags & O_TRUNC)) {
223 struct trace_event_file *event_file;
224 struct event_command *p;
225
226 event_file = event_file_data(file);
227
228 list_for_each_entry(p, &trigger_commands, list) {
229 if (p->unreg_all)
230 p->unreg_all(event_file);
231 }
232 }
233
234 if (file->f_mode & FMODE_READ) {
235 ret = seq_open(file, &event_triggers_seq_ops);
236 if (!ret) {
237 struct seq_file *m = file->private_data;
238 m->private = file;
239 }
240 }
241
242 mutex_unlock(&event_mutex);
243
244 return ret;
245}
246
247int trigger_process_regex(struct trace_event_file *file, char *buff)
248{
249 char *command, *next;
250 struct event_command *p;
251 int ret = -EINVAL;
252
253 next = buff = skip_spaces(buff);
254 command = strsep(&next, ": \t");
255 if (next) {
256 next = skip_spaces(next);
257 if (!*next)
258 next = NULL;
259 }
260 command = (command[0] != '!') ? command : command + 1;
261
262 mutex_lock(&trigger_cmd_mutex);
263 list_for_each_entry(p, &trigger_commands, list) {
264 if (strcmp(p->name, command) == 0) {
265 ret = p->parse(p, file, buff, command, next);
266 goto out_unlock;
267 }
268 }
269 out_unlock:
270 mutex_unlock(&trigger_cmd_mutex);
271
272 return ret;
273}
274
275static ssize_t event_trigger_regex_write(struct file *file,
276 const char __user *ubuf,
277 size_t cnt, loff_t *ppos)
278{
279 struct trace_event_file *event_file;
280 ssize_t ret;
281 char *buf;
282
283 if (!cnt)
284 return 0;
285
286 if (cnt >= PAGE_SIZE)
287 return -EINVAL;
288
289 buf = memdup_user_nul(ubuf, cnt);
290 if (IS_ERR(buf))
291 return PTR_ERR(buf);
292
293 strim(buf);
294
295 mutex_lock(&event_mutex);
296 event_file = event_file_data(file);
297 if (unlikely(!event_file)) {
298 mutex_unlock(&event_mutex);
299 kfree(buf);
300 return -ENODEV;
301 }
302 ret = trigger_process_regex(event_file, buf);
303 mutex_unlock(&event_mutex);
304
305 kfree(buf);
306 if (ret < 0)
307 goto out;
308
309 *ppos += cnt;
310 ret = cnt;
311 out:
312 return ret;
313}
314
315static int event_trigger_regex_release(struct inode *inode, struct file *file)
316{
317 mutex_lock(&event_mutex);
318
319 if (file->f_mode & FMODE_READ)
320 seq_release(inode, file);
321
322 mutex_unlock(&event_mutex);
323
324 return 0;
325}
326
327static ssize_t
328event_trigger_write(struct file *filp, const char __user *ubuf,
329 size_t cnt, loff_t *ppos)
330{
331 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
332}
333
334static int
335event_trigger_open(struct inode *inode, struct file *filp)
336{
337 /* Checks for tracefs lockdown */
338 return event_trigger_regex_open(inode, filp);
339}
340
341static int
342event_trigger_release(struct inode *inode, struct file *file)
343{
344 return event_trigger_regex_release(inode, file);
345}
346
347const struct file_operations event_trigger_fops = {
348 .open = event_trigger_open,
349 .read = seq_read,
350 .write = event_trigger_write,
351 .llseek = tracing_lseek,
352 .release = event_trigger_release,
353};
354
355/*
356 * Currently we only register event commands from __init, so mark this
357 * __init too.
358 */
359__init int register_event_command(struct event_command *cmd)
360{
361 struct event_command *p;
362 int ret = 0;
363
364 mutex_lock(&trigger_cmd_mutex);
365 list_for_each_entry(p, &trigger_commands, list) {
366 if (strcmp(cmd->name, p->name) == 0) {
367 ret = -EBUSY;
368 goto out_unlock;
369 }
370 }
371 list_add(&cmd->list, &trigger_commands);
372 out_unlock:
373 mutex_unlock(&trigger_cmd_mutex);
374
375 return ret;
376}
377
378/*
379 * Currently we only unregister event commands from __init, so mark
380 * this __init too.
381 */
382__init int unregister_event_command(struct event_command *cmd)
383{
384 struct event_command *p, *n;
385 int ret = -ENODEV;
386
387 mutex_lock(&trigger_cmd_mutex);
388 list_for_each_entry_safe(p, n, &trigger_commands, list) {
389 if (strcmp(cmd->name, p->name) == 0) {
390 ret = 0;
391 list_del_init(&p->list);
392 goto out_unlock;
393 }
394 }
395 out_unlock:
396 mutex_unlock(&trigger_cmd_mutex);
397
398 return ret;
399}
400
401/**
402 * event_trigger_print - Generic event_trigger_ops @print implementation
403 * @name: The name of the event trigger
404 * @m: The seq_file being printed to
405 * @data: Trigger-specific data
406 * @filter_str: filter_str to print, if present
407 *
408 * Common implementation for event triggers to print themselves.
409 *
410 * Usually wrapped by a function that simply sets the @name of the
411 * trigger command and then invokes this.
412 *
413 * Return: 0 on success, errno otherwise
414 */
415static int
416event_trigger_print(const char *name, struct seq_file *m,
417 void *data, char *filter_str)
418{
419 long count = (long)data;
420
421 seq_puts(m, name);
422
423 if (count == -1)
424 seq_puts(m, ":unlimited");
425 else
426 seq_printf(m, ":count=%ld", count);
427
428 if (filter_str)
429 seq_printf(m, " if %s\n", filter_str);
430 else
431 seq_putc(m, '\n');
432
433 return 0;
434}
435
436/**
437 * event_trigger_init - Generic event_trigger_ops @init implementation
438 * @data: Trigger-specific data
439 *
440 * Common implementation of event trigger initialization.
441 *
442 * Usually used directly as the @init method in event trigger
443 * implementations.
444 *
445 * Return: 0 on success, errno otherwise
446 */
447int event_trigger_init(struct event_trigger_data *data)
448{
449 data->ref++;
450 return 0;
451}
452
453/**
454 * event_trigger_free - Generic event_trigger_ops @free implementation
455 * @data: Trigger-specific data
456 *
457 * Common implementation of event trigger de-initialization.
458 *
459 * Usually used directly as the @free method in event trigger
460 * implementations.
461 */
462static void
463event_trigger_free(struct event_trigger_data *data)
464{
465 if (WARN_ON_ONCE(data->ref <= 0))
466 return;
467
468 data->ref--;
469 if (!data->ref)
470 trigger_data_free(data);
471}
472
473int trace_event_trigger_enable_disable(struct trace_event_file *file,
474 int trigger_enable)
475{
476 int ret = 0;
477
478 if (trigger_enable) {
479 if (atomic_inc_return(&file->tm_ref) > 1)
480 return ret;
481 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
482 ret = trace_event_enable_disable(file, 1, 1);
483 } else {
484 if (atomic_dec_return(&file->tm_ref) > 0)
485 return ret;
486 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
487 ret = trace_event_enable_disable(file, 0, 1);
488 }
489
490 return ret;
491}
492
493/**
494 * clear_event_triggers - Clear all triggers associated with a trace array
495 * @tr: The trace array to clear
496 *
497 * For each trigger, the triggering event has its tm_ref decremented
498 * via trace_event_trigger_enable_disable(), and any associated event
499 * (in the case of enable/disable_event triggers) will have its sm_ref
500 * decremented via free()->trace_event_enable_disable(). That
501 * combination effectively reverses the soft-mode/trigger state added
502 * by trigger registration.
503 *
504 * Must be called with event_mutex held.
505 */
506void
507clear_event_triggers(struct trace_array *tr)
508{
509 struct trace_event_file *file;
510
511 list_for_each_entry(file, &tr->events, list) {
512 struct event_trigger_data *data, *n;
513 list_for_each_entry_safe(data, n, &file->triggers, list) {
514 trace_event_trigger_enable_disable(file, 0);
515 list_del_rcu(&data->list);
516 if (data->ops->free)
517 data->ops->free(data);
518 }
519 }
520}
521
522/**
523 * update_cond_flag - Set or reset the TRIGGER_COND bit
524 * @file: The trace_event_file associated with the event
525 *
526 * If an event has triggers and any of those triggers has a filter or
527 * a post_trigger, trigger invocation needs to be deferred until after
528 * the current event has logged its data, and the event should have
529 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
530 * cleared.
531 */
532void update_cond_flag(struct trace_event_file *file)
533{
534 struct event_trigger_data *data;
535 bool set_cond = false;
536
537 lockdep_assert_held(&event_mutex);
538
539 list_for_each_entry(data, &file->triggers, list) {
540 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
541 event_command_needs_rec(data->cmd_ops)) {
542 set_cond = true;
543 break;
544 }
545 }
546
547 if (set_cond)
548 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
549 else
550 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
551}
552
553/**
554 * register_trigger - Generic event_command @reg implementation
555 * @glob: The raw string used to register the trigger
556 * @data: Trigger-specific data to associate with the trigger
557 * @file: The trace_event_file associated with the event
558 *
559 * Common implementation for event trigger registration.
560 *
561 * Usually used directly as the @reg method in event command
562 * implementations.
563 *
564 * Return: 0 on success, errno otherwise
565 */
566static int register_trigger(char *glob,
567 struct event_trigger_data *data,
568 struct trace_event_file *file)
569{
570 struct event_trigger_data *test;
571 int ret = 0;
572
573 lockdep_assert_held(&event_mutex);
574
575 list_for_each_entry(test, &file->triggers, list) {
576 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
577 ret = -EEXIST;
578 goto out;
579 }
580 }
581
582 if (data->ops->init) {
583 ret = data->ops->init(data);
584 if (ret < 0)
585 goto out;
586 }
587
588 list_add_rcu(&data->list, &file->triggers);
589
590 update_cond_flag(file);
591 ret = trace_event_trigger_enable_disable(file, 1);
592 if (ret < 0) {
593 list_del_rcu(&data->list);
594 update_cond_flag(file);
595 }
596out:
597 return ret;
598}
599
600/*
601 * True if the trigger was found and unregistered, else false.
602 */
603static bool try_unregister_trigger(char *glob,
604 struct event_trigger_data *test,
605 struct trace_event_file *file)
606{
607 struct event_trigger_data *data = NULL, *iter;
608
609 lockdep_assert_held(&event_mutex);
610
611 list_for_each_entry(iter, &file->triggers, list) {
612 if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
613 data = iter;
614 list_del_rcu(&data->list);
615 trace_event_trigger_enable_disable(file, 0);
616 update_cond_flag(file);
617 break;
618 }
619 }
620
621 if (data) {
622 if (data->ops->free)
623 data->ops->free(data);
624
625 return true;
626 }
627
628 return false;
629}
630
631/**
632 * unregister_trigger - Generic event_command @unreg implementation
633 * @glob: The raw string used to register the trigger
634 * @test: Trigger-specific data used to find the trigger to remove
635 * @file: The trace_event_file associated with the event
636 *
637 * Common implementation for event trigger unregistration.
638 *
639 * Usually used directly as the @unreg method in event command
640 * implementations.
641 */
642static void unregister_trigger(char *glob,
643 struct event_trigger_data *test,
644 struct trace_event_file *file)
645{
646 try_unregister_trigger(glob, test, file);
647}
648
649/*
650 * Event trigger parsing helper functions.
651 *
652 * These functions help make it easier to write an event trigger
653 * parsing function i.e. the struct event_command.parse() callback
654 * function responsible for parsing and registering a trigger command
655 * written to the 'trigger' file.
656 *
657 * A trigger command (or just 'trigger' for short) takes the form:
658 * [trigger] [if filter]
659 *
660 * The struct event_command.parse() callback (and other struct
661 * event_command functions) refer to several components of a trigger
662 * command. Those same components are referenced by the event trigger
663 * parsing helper functions defined below. These components are:
664 *
665 * cmd - the trigger command name
666 * glob - the trigger command name optionally prefaced with '!'
667 * param_and_filter - text following cmd and ':'
668 * param - text following cmd and ':' and stripped of filter
669 * filter - the optional filter text following (and including) 'if'
670 *
671 * To illustrate the use of these componenents, here are some concrete
672 * examples. For the following triggers:
673 *
674 * echo 'traceon:5 if pid == 0' > trigger
675 * - 'traceon' is both cmd and glob
676 * - '5 if pid == 0' is the param_and_filter
677 * - '5' is the param
678 * - 'if pid == 0' is the filter
679 *
680 * echo 'enable_event:sys:event:n' > trigger
681 * - 'enable_event' is both cmd and glob
682 * - 'sys:event:n' is the param_and_filter
683 * - 'sys:event:n' is the param
684 * - there is no filter
685 *
686 * echo 'hist:keys=pid if prio > 50' > trigger
687 * - 'hist' is both cmd and glob
688 * - 'keys=pid if prio > 50' is the param_and_filter
689 * - 'keys=pid' is the param
690 * - 'if prio > 50' is the filter
691 *
692 * echo '!enable_event:sys:event:n' > trigger
693 * - 'enable_event' the cmd
694 * - '!enable_event' is the glob
695 * - 'sys:event:n' is the param_and_filter
696 * - 'sys:event:n' is the param
697 * - there is no filter
698 *
699 * echo 'traceoff' > trigger
700 * - 'traceoff' is both cmd and glob
701 * - there is no param_and_filter
702 * - there is no param
703 * - there is no filter
704 *
705 * There are a few different categories of event trigger covered by
706 * these helpers:
707 *
708 * - triggers that don't require a parameter e.g. traceon
709 * - triggers that do require a parameter e.g. enable_event and hist
710 * - triggers that though they may not require a param may support an
711 * optional 'n' param (n = number of times the trigger should fire)
712 * e.g.: traceon:5 or enable_event:sys:event:n
713 * - triggers that do not support an 'n' param e.g. hist
714 *
715 * These functions can be used or ignored as necessary - it all
716 * depends on the complexity of the trigger, and the granularity of
717 * the functions supported reflects the fact that some implementations
718 * may need to customize certain aspects of their implementations and
719 * won't need certain functions. For instance, the hist trigger
720 * implementation doesn't use event_trigger_separate_filter() because
721 * it has special requirements for handling the filter.
722 */
723
724/**
725 * event_trigger_check_remove - check whether an event trigger specifies remove
726 * @glob: The trigger command string, with optional remove(!) operator
727 *
728 * The event trigger callback implementations pass in 'glob' as a
729 * parameter. This is the command name either with or without a
730 * remove(!) operator. This function simply parses the glob and
731 * determines whether the command corresponds to a trigger removal or
732 * a trigger addition.
733 *
734 * Return: true if this is a remove command, false otherwise
735 */
736bool event_trigger_check_remove(const char *glob)
737{
738 return (glob && glob[0] == '!') ? true : false;
739}
740
741/**
742 * event_trigger_empty_param - check whether the param is empty
743 * @param: The trigger param string
744 *
745 * The event trigger callback implementations pass in 'param' as a
746 * parameter. This corresponds to the string following the command
747 * name minus the command name. This function can be called by a
748 * callback implementation for any command that requires a param; a
749 * callback that doesn't require a param can ignore it.
750 *
751 * Return: true if this is an empty param, false otherwise
752 */
753bool event_trigger_empty_param(const char *param)
754{
755 return !param;
756}
757
758/**
759 * event_trigger_separate_filter - separate an event trigger from a filter
760 * @param_and_filter: String containing trigger and possibly filter
761 * @param: outparam, will be filled with a pointer to the trigger
762 * @filter: outparam, will be filled with a pointer to the filter
763 * @param_required: Specifies whether or not the param string is required
764 *
765 * Given a param string of the form '[trigger] [if filter]', this
766 * function separates the filter from the trigger and returns the
767 * trigger in @param and the filter in @filter. Either the @param
768 * or the @filter may be set to NULL by this function - if not set to
769 * NULL, they will contain strings corresponding to the trigger and
770 * filter.
771 *
772 * There are two cases that need to be handled with respect to the
773 * passed-in param: either the param is required, or it is not
774 * required. If @param_required is set, and there's no param, it will
775 * return -EINVAL. If @param_required is not set and there's a param
776 * that starts with a number, that corresponds to the case of a
777 * trigger with :n (n = number of times the trigger should fire) and
778 * the parsing continues normally; otherwise the function just returns
779 * and assumes param just contains a filter and there's nothing else
780 * to do.
781 *
782 * Return: 0 on success, errno otherwise
783 */
784int event_trigger_separate_filter(char *param_and_filter, char **param,
785 char **filter, bool param_required)
786{
787 int ret = 0;
788
789 *param = *filter = NULL;
790
791 if (!param_and_filter) {
792 if (param_required)
793 ret = -EINVAL;
794 goto out;
795 }
796
797 /*
798 * Here we check for an optional param. The only legal
799 * optional param is :n, and if that's the case, continue
800 * below. Otherwise we assume what's left is a filter and
801 * return it as the filter string for the caller to deal with.
802 */
803 if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
804 *filter = param_and_filter;
805 goto out;
806 }
807
808 /*
809 * Separate the param from the filter (param [if filter]).
810 * Here we have either an optional :n param or a required
811 * param and an optional filter.
812 */
813 *param = strsep(¶m_and_filter, " \t");
814
815 /*
816 * Here we have a filter, though it may be empty.
817 */
818 if (param_and_filter) {
819 *filter = skip_spaces(param_and_filter);
820 if (!**filter)
821 *filter = NULL;
822 }
823out:
824 return ret;
825}
826
827/**
828 * event_trigger_alloc - allocate and init event_trigger_data for a trigger
829 * @cmd_ops: The event_command operations for the trigger
830 * @cmd: The cmd string
831 * @param: The param string
832 * @private_data: User data to associate with the event trigger
833 *
834 * Allocate an event_trigger_data instance and initialize it. The
835 * @cmd_ops are used along with the @cmd and @param to get the
836 * trigger_ops to assign to the event_trigger_data. @private_data can
837 * also be passed in and associated with the event_trigger_data.
838 *
839 * Use event_trigger_free() to free an event_trigger_data object.
840 *
841 * Return: The trigger_data object success, NULL otherwise
842 */
843struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
844 char *cmd,
845 char *param,
846 void *private_data)
847{
848 struct event_trigger_data *trigger_data;
849 struct event_trigger_ops *trigger_ops;
850
851 trigger_ops = cmd_ops->get_trigger_ops(cmd, param);
852
853 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
854 if (!trigger_data)
855 return NULL;
856
857 trigger_data->count = -1;
858 trigger_data->ops = trigger_ops;
859 trigger_data->cmd_ops = cmd_ops;
860 trigger_data->private_data = private_data;
861
862 INIT_LIST_HEAD(&trigger_data->list);
863 INIT_LIST_HEAD(&trigger_data->named_list);
864 RCU_INIT_POINTER(trigger_data->filter, NULL);
865
866 return trigger_data;
867}
868
869/**
870 * event_trigger_parse_num - parse and return the number param for a trigger
871 * @param: The param string
872 * @trigger_data: The trigger_data for the trigger
873 *
874 * Parse the :n (n = number of times the trigger should fire) param
875 * and set the count variable in the trigger_data to the parsed count.
876 *
877 * Return: 0 on success, errno otherwise
878 */
879int event_trigger_parse_num(char *param,
880 struct event_trigger_data *trigger_data)
881{
882 char *number;
883 int ret = 0;
884
885 if (param) {
886 number = strsep(¶m, ":");
887
888 if (!strlen(number))
889 return -EINVAL;
890
891 /*
892 * We use the callback data field (which is a pointer)
893 * as our counter.
894 */
895 ret = kstrtoul(number, 0, &trigger_data->count);
896 }
897
898 return ret;
899}
900
901/**
902 * event_trigger_set_filter - set an event trigger's filter
903 * @cmd_ops: The event_command operations for the trigger
904 * @file: The event file for the trigger's event
905 * @param: The string containing the filter
906 * @trigger_data: The trigger_data for the trigger
907 *
908 * Set the filter for the trigger. If the filter is NULL, just return
909 * without error.
910 *
911 * Return: 0 on success, errno otherwise
912 */
913int event_trigger_set_filter(struct event_command *cmd_ops,
914 struct trace_event_file *file,
915 char *param,
916 struct event_trigger_data *trigger_data)
917{
918 if (param && cmd_ops->set_filter)
919 return cmd_ops->set_filter(param, trigger_data, file);
920
921 return 0;
922}
923
924/**
925 * event_trigger_reset_filter - reset an event trigger's filter
926 * @cmd_ops: The event_command operations for the trigger
927 * @trigger_data: The trigger_data for the trigger
928 *
929 * Reset the filter for the trigger to no filter.
930 */
931void event_trigger_reset_filter(struct event_command *cmd_ops,
932 struct event_trigger_data *trigger_data)
933{
934 if (cmd_ops->set_filter)
935 cmd_ops->set_filter(NULL, trigger_data, NULL);
936}
937
938/**
939 * event_trigger_register - register an event trigger
940 * @cmd_ops: The event_command operations for the trigger
941 * @file: The event file for the trigger's event
942 * @glob: The trigger command string, with optional remove(!) operator
943 * @trigger_data: The trigger_data for the trigger
944 *
945 * Register an event trigger. The @cmd_ops are used to call the
946 * cmd_ops->reg() function which actually does the registration.
947 *
948 * Return: 0 on success, errno otherwise
949 */
950int event_trigger_register(struct event_command *cmd_ops,
951 struct trace_event_file *file,
952 char *glob,
953 struct event_trigger_data *trigger_data)
954{
955 return cmd_ops->reg(glob, trigger_data, file);
956}
957
958/**
959 * event_trigger_unregister - unregister an event trigger
960 * @cmd_ops: The event_command operations for the trigger
961 * @file: The event file for the trigger's event
962 * @glob: The trigger command string, with optional remove(!) operator
963 * @trigger_data: The trigger_data for the trigger
964 *
965 * Unregister an event trigger. The @cmd_ops are used to call the
966 * cmd_ops->unreg() function which actually does the unregistration.
967 */
968void event_trigger_unregister(struct event_command *cmd_ops,
969 struct trace_event_file *file,
970 char *glob,
971 struct event_trigger_data *trigger_data)
972{
973 cmd_ops->unreg(glob, trigger_data, file);
974}
975
976/*
977 * End event trigger parsing helper functions.
978 */
979
980/**
981 * event_trigger_parse - Generic event_command @parse implementation
982 * @cmd_ops: The command ops, used for trigger registration
983 * @file: The trace_event_file associated with the event
984 * @glob: The raw string used to register the trigger
985 * @cmd: The cmd portion of the string used to register the trigger
986 * @param_and_filter: The param and filter portion of the string used to register the trigger
987 *
988 * Common implementation for event command parsing and trigger
989 * instantiation.
990 *
991 * Usually used directly as the @parse method in event command
992 * implementations.
993 *
994 * Return: 0 on success, errno otherwise
995 */
996static int
997event_trigger_parse(struct event_command *cmd_ops,
998 struct trace_event_file *file,
999 char *glob, char *cmd, char *param_and_filter)
1000{
1001 struct event_trigger_data *trigger_data;
1002 char *param, *filter;
1003 bool remove;
1004 int ret;
1005
1006 remove = event_trigger_check_remove(glob);
1007
1008 ret = event_trigger_separate_filter(param_and_filter, ¶m, &filter, false);
1009 if (ret)
1010 return ret;
1011
1012 ret = -ENOMEM;
1013 trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
1014 if (!trigger_data)
1015 goto out;
1016
1017 if (remove) {
1018 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1019 kfree(trigger_data);
1020 ret = 0;
1021 goto out;
1022 }
1023
1024 ret = event_trigger_parse_num(param, trigger_data);
1025 if (ret)
1026 goto out_free;
1027
1028 ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1029 if (ret < 0)
1030 goto out_free;
1031
1032 /* Up the trigger_data count to make sure reg doesn't free it on failure */
1033 event_trigger_init(trigger_data);
1034
1035 ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1036 if (ret)
1037 goto out_free;
1038
1039 /* Down the counter of trigger_data or free it if not used anymore */
1040 event_trigger_free(trigger_data);
1041 out:
1042 return ret;
1043
1044 out_free:
1045 event_trigger_reset_filter(cmd_ops, trigger_data);
1046 kfree(trigger_data);
1047 goto out;
1048}
1049
1050/**
1051 * set_trigger_filter - Generic event_command @set_filter implementation
1052 * @filter_str: The filter string for the trigger, NULL to remove filter
1053 * @trigger_data: Trigger-specific data
1054 * @file: The trace_event_file associated with the event
1055 *
1056 * Common implementation for event command filter parsing and filter
1057 * instantiation.
1058 *
1059 * Usually used directly as the @set_filter method in event command
1060 * implementations.
1061 *
1062 * Also used to remove a filter (if filter_str = NULL).
1063 *
1064 * Return: 0 on success, errno otherwise
1065 */
1066int set_trigger_filter(char *filter_str,
1067 struct event_trigger_data *trigger_data,
1068 struct trace_event_file *file)
1069{
1070 struct event_trigger_data *data = trigger_data;
1071 struct event_filter *filter = NULL, *tmp;
1072 int ret = -EINVAL;
1073 char *s;
1074
1075 if (!filter_str) /* clear the current filter */
1076 goto assign;
1077
1078 s = strsep(&filter_str, " \t");
1079
1080 if (!strlen(s) || strcmp(s, "if") != 0)
1081 goto out;
1082
1083 if (!filter_str)
1084 goto out;
1085
1086 /* The filter is for the 'trigger' event, not the triggered event */
1087 ret = create_event_filter(file->tr, file->event_call,
1088 filter_str, true, &filter);
1089
1090 /* Only enabled set_str for error handling */
1091 if (filter) {
1092 kfree(filter->filter_string);
1093 filter->filter_string = NULL;
1094 }
1095
1096 /*
1097 * If create_event_filter() fails, filter still needs to be freed.
1098 * Which the calling code will do with data->filter.
1099 */
1100 assign:
1101 tmp = rcu_access_pointer(data->filter);
1102
1103 rcu_assign_pointer(data->filter, filter);
1104
1105 if (tmp) {
1106 /*
1107 * Make sure the call is done with the filter.
1108 * It is possible that a filter could fail at boot up,
1109 * and then this path will be called. Avoid the synchronization
1110 * in that case.
1111 */
1112 if (system_state != SYSTEM_BOOTING)
1113 tracepoint_synchronize_unregister();
1114 free_event_filter(tmp);
1115 }
1116
1117 kfree(data->filter_str);
1118 data->filter_str = NULL;
1119
1120 if (filter_str) {
1121 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
1122 if (!data->filter_str) {
1123 free_event_filter(rcu_access_pointer(data->filter));
1124 data->filter = NULL;
1125 ret = -ENOMEM;
1126 }
1127 }
1128 out:
1129 return ret;
1130}
1131
1132static LIST_HEAD(named_triggers);
1133
1134/**
1135 * find_named_trigger - Find the common named trigger associated with @name
1136 * @name: The name of the set of named triggers to find the common data for
1137 *
1138 * Named triggers are sets of triggers that share a common set of
1139 * trigger data. The first named trigger registered with a given name
1140 * owns the common trigger data that the others subsequently
1141 * registered with the same name will reference. This function
1142 * returns the common trigger data associated with that first
1143 * registered instance.
1144 *
1145 * Return: the common trigger data for the given named trigger on
1146 * success, NULL otherwise.
1147 */
1148struct event_trigger_data *find_named_trigger(const char *name)
1149{
1150 struct event_trigger_data *data;
1151
1152 if (!name)
1153 return NULL;
1154
1155 list_for_each_entry(data, &named_triggers, named_list) {
1156 if (data->named_data)
1157 continue;
1158 if (strcmp(data->name, name) == 0)
1159 return data;
1160 }
1161
1162 return NULL;
1163}
1164
1165/**
1166 * is_named_trigger - determine if a given trigger is a named trigger
1167 * @test: The trigger data to test
1168 *
1169 * Return: true if 'test' is a named trigger, false otherwise.
1170 */
1171bool is_named_trigger(struct event_trigger_data *test)
1172{
1173 struct event_trigger_data *data;
1174
1175 list_for_each_entry(data, &named_triggers, named_list) {
1176 if (test == data)
1177 return true;
1178 }
1179
1180 return false;
1181}
1182
1183/**
1184 * save_named_trigger - save the trigger in the named trigger list
1185 * @name: The name of the named trigger set
1186 * @data: The trigger data to save
1187 *
1188 * Return: 0 if successful, negative error otherwise.
1189 */
1190int save_named_trigger(const char *name, struct event_trigger_data *data)
1191{
1192 data->name = kstrdup(name, GFP_KERNEL);
1193 if (!data->name)
1194 return -ENOMEM;
1195
1196 list_add(&data->named_list, &named_triggers);
1197
1198 return 0;
1199}
1200
1201/**
1202 * del_named_trigger - delete a trigger from the named trigger list
1203 * @data: The trigger data to delete
1204 */
1205void del_named_trigger(struct event_trigger_data *data)
1206{
1207 kfree(data->name);
1208 data->name = NULL;
1209
1210 list_del(&data->named_list);
1211}
1212
1213static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
1214{
1215 struct event_trigger_data *test;
1216
1217 list_for_each_entry(test, &named_triggers, named_list) {
1218 if (strcmp(test->name, data->name) == 0) {
1219 if (pause) {
1220 test->paused_tmp = test->paused;
1221 test->paused = true;
1222 } else {
1223 test->paused = test->paused_tmp;
1224 }
1225 }
1226 }
1227}
1228
1229/**
1230 * pause_named_trigger - Pause all named triggers with the same name
1231 * @data: The trigger data of a named trigger to pause
1232 *
1233 * Pauses a named trigger along with all other triggers having the
1234 * same name. Because named triggers share a common set of data,
1235 * pausing only one is meaningless, so pausing one named trigger needs
1236 * to pause all triggers with the same name.
1237 */
1238void pause_named_trigger(struct event_trigger_data *data)
1239{
1240 __pause_named_trigger(data, true);
1241}
1242
1243/**
1244 * unpause_named_trigger - Un-pause all named triggers with the same name
1245 * @data: The trigger data of a named trigger to unpause
1246 *
1247 * Un-pauses a named trigger along with all other triggers having the
1248 * same name. Because named triggers share a common set of data,
1249 * unpausing only one is meaningless, so unpausing one named trigger
1250 * needs to unpause all triggers with the same name.
1251 */
1252void unpause_named_trigger(struct event_trigger_data *data)
1253{
1254 __pause_named_trigger(data, false);
1255}
1256
1257/**
1258 * set_named_trigger_data - Associate common named trigger data
1259 * @data: The trigger data to associate
1260 * @named_data: The common named trigger to be associated
1261 *
1262 * Named triggers are sets of triggers that share a common set of
1263 * trigger data. The first named trigger registered with a given name
1264 * owns the common trigger data that the others subsequently
1265 * registered with the same name will reference. This function
1266 * associates the common trigger data from the first trigger with the
1267 * given trigger.
1268 */
1269void set_named_trigger_data(struct event_trigger_data *data,
1270 struct event_trigger_data *named_data)
1271{
1272 data->named_data = named_data;
1273}
1274
1275struct event_trigger_data *
1276get_named_trigger_data(struct event_trigger_data *data)
1277{
1278 return data->named_data;
1279}
1280
1281static void
1282traceon_trigger(struct event_trigger_data *data,
1283 struct trace_buffer *buffer, void *rec,
1284 struct ring_buffer_event *event)
1285{
1286 struct trace_event_file *file = data->private_data;
1287
1288 if (file) {
1289 if (tracer_tracing_is_on(file->tr))
1290 return;
1291
1292 tracer_tracing_on(file->tr);
1293 return;
1294 }
1295
1296 if (tracing_is_on())
1297 return;
1298
1299 tracing_on();
1300}
1301
1302static void
1303traceon_count_trigger(struct event_trigger_data *data,
1304 struct trace_buffer *buffer, void *rec,
1305 struct ring_buffer_event *event)
1306{
1307 struct trace_event_file *file = data->private_data;
1308
1309 if (file) {
1310 if (tracer_tracing_is_on(file->tr))
1311 return;
1312 } else {
1313 if (tracing_is_on())
1314 return;
1315 }
1316
1317 if (!data->count)
1318 return;
1319
1320 if (data->count != -1)
1321 (data->count)--;
1322
1323 if (file)
1324 tracer_tracing_on(file->tr);
1325 else
1326 tracing_on();
1327}
1328
1329static void
1330traceoff_trigger(struct event_trigger_data *data,
1331 struct trace_buffer *buffer, void *rec,
1332 struct ring_buffer_event *event)
1333{
1334 struct trace_event_file *file = data->private_data;
1335
1336 if (file) {
1337 if (!tracer_tracing_is_on(file->tr))
1338 return;
1339
1340 tracer_tracing_off(file->tr);
1341 return;
1342 }
1343
1344 if (!tracing_is_on())
1345 return;
1346
1347 tracing_off();
1348}
1349
1350static void
1351traceoff_count_trigger(struct event_trigger_data *data,
1352 struct trace_buffer *buffer, void *rec,
1353 struct ring_buffer_event *event)
1354{
1355 struct trace_event_file *file = data->private_data;
1356
1357 if (file) {
1358 if (!tracer_tracing_is_on(file->tr))
1359 return;
1360 } else {
1361 if (!tracing_is_on())
1362 return;
1363 }
1364
1365 if (!data->count)
1366 return;
1367
1368 if (data->count != -1)
1369 (data->count)--;
1370
1371 if (file)
1372 tracer_tracing_off(file->tr);
1373 else
1374 tracing_off();
1375}
1376
1377static int
1378traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1379{
1380 return event_trigger_print("traceon", m, (void *)data->count,
1381 data->filter_str);
1382}
1383
1384static int
1385traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1386{
1387 return event_trigger_print("traceoff", m, (void *)data->count,
1388 data->filter_str);
1389}
1390
1391static struct event_trigger_ops traceon_trigger_ops = {
1392 .trigger = traceon_trigger,
1393 .print = traceon_trigger_print,
1394 .init = event_trigger_init,
1395 .free = event_trigger_free,
1396};
1397
1398static struct event_trigger_ops traceon_count_trigger_ops = {
1399 .trigger = traceon_count_trigger,
1400 .print = traceon_trigger_print,
1401 .init = event_trigger_init,
1402 .free = event_trigger_free,
1403};
1404
1405static struct event_trigger_ops traceoff_trigger_ops = {
1406 .trigger = traceoff_trigger,
1407 .print = traceoff_trigger_print,
1408 .init = event_trigger_init,
1409 .free = event_trigger_free,
1410};
1411
1412static struct event_trigger_ops traceoff_count_trigger_ops = {
1413 .trigger = traceoff_count_trigger,
1414 .print = traceoff_trigger_print,
1415 .init = event_trigger_init,
1416 .free = event_trigger_free,
1417};
1418
1419static struct event_trigger_ops *
1420onoff_get_trigger_ops(char *cmd, char *param)
1421{
1422 struct event_trigger_ops *ops;
1423
1424 /* we register both traceon and traceoff to this callback */
1425 if (strcmp(cmd, "traceon") == 0)
1426 ops = param ? &traceon_count_trigger_ops :
1427 &traceon_trigger_ops;
1428 else
1429 ops = param ? &traceoff_count_trigger_ops :
1430 &traceoff_trigger_ops;
1431
1432 return ops;
1433}
1434
1435static struct event_command trigger_traceon_cmd = {
1436 .name = "traceon",
1437 .trigger_type = ETT_TRACE_ONOFF,
1438 .parse = event_trigger_parse,
1439 .reg = register_trigger,
1440 .unreg = unregister_trigger,
1441 .get_trigger_ops = onoff_get_trigger_ops,
1442 .set_filter = set_trigger_filter,
1443};
1444
1445static struct event_command trigger_traceoff_cmd = {
1446 .name = "traceoff",
1447 .trigger_type = ETT_TRACE_ONOFF,
1448 .flags = EVENT_CMD_FL_POST_TRIGGER,
1449 .parse = event_trigger_parse,
1450 .reg = register_trigger,
1451 .unreg = unregister_trigger,
1452 .get_trigger_ops = onoff_get_trigger_ops,
1453 .set_filter = set_trigger_filter,
1454};
1455
1456#ifdef CONFIG_TRACER_SNAPSHOT
1457static void
1458snapshot_trigger(struct event_trigger_data *data,
1459 struct trace_buffer *buffer, void *rec,
1460 struct ring_buffer_event *event)
1461{
1462 struct trace_event_file *file = data->private_data;
1463
1464 if (file)
1465 tracing_snapshot_instance(file->tr);
1466 else
1467 tracing_snapshot();
1468}
1469
1470static void
1471snapshot_count_trigger(struct event_trigger_data *data,
1472 struct trace_buffer *buffer, void *rec,
1473 struct ring_buffer_event *event)
1474{
1475 if (!data->count)
1476 return;
1477
1478 if (data->count != -1)
1479 (data->count)--;
1480
1481 snapshot_trigger(data, buffer, rec, event);
1482}
1483
1484static int
1485register_snapshot_trigger(char *glob,
1486 struct event_trigger_data *data,
1487 struct trace_event_file *file)
1488{
1489 int ret = tracing_arm_snapshot(file->tr);
1490
1491 if (ret < 0)
1492 return ret;
1493
1494 ret = register_trigger(glob, data, file);
1495 if (ret < 0)
1496 tracing_disarm_snapshot(file->tr);
1497 return ret;
1498}
1499
1500static void unregister_snapshot_trigger(char *glob,
1501 struct event_trigger_data *data,
1502 struct trace_event_file *file)
1503{
1504 if (try_unregister_trigger(glob, data, file))
1505 tracing_disarm_snapshot(file->tr);
1506}
1507
1508static int
1509snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1510{
1511 return event_trigger_print("snapshot", m, (void *)data->count,
1512 data->filter_str);
1513}
1514
1515static struct event_trigger_ops snapshot_trigger_ops = {
1516 .trigger = snapshot_trigger,
1517 .print = snapshot_trigger_print,
1518 .init = event_trigger_init,
1519 .free = event_trigger_free,
1520};
1521
1522static struct event_trigger_ops snapshot_count_trigger_ops = {
1523 .trigger = snapshot_count_trigger,
1524 .print = snapshot_trigger_print,
1525 .init = event_trigger_init,
1526 .free = event_trigger_free,
1527};
1528
1529static struct event_trigger_ops *
1530snapshot_get_trigger_ops(char *cmd, char *param)
1531{
1532 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1533}
1534
1535static struct event_command trigger_snapshot_cmd = {
1536 .name = "snapshot",
1537 .trigger_type = ETT_SNAPSHOT,
1538 .parse = event_trigger_parse,
1539 .reg = register_snapshot_trigger,
1540 .unreg = unregister_snapshot_trigger,
1541 .get_trigger_ops = snapshot_get_trigger_ops,
1542 .set_filter = set_trigger_filter,
1543};
1544
1545static __init int register_trigger_snapshot_cmd(void)
1546{
1547 int ret;
1548
1549 ret = register_event_command(&trigger_snapshot_cmd);
1550 WARN_ON(ret < 0);
1551
1552 return ret;
1553}
1554#else
1555static __init int register_trigger_snapshot_cmd(void) { return 0; }
1556#endif /* CONFIG_TRACER_SNAPSHOT */
1557
1558#ifdef CONFIG_STACKTRACE
1559#ifdef CONFIG_UNWINDER_ORC
1560/* Skip 2:
1561 * event_triggers_post_call()
1562 * trace_event_raw_event_xxx()
1563 */
1564# define STACK_SKIP 2
1565#else
1566/*
1567 * Skip 4:
1568 * stacktrace_trigger()
1569 * event_triggers_post_call()
1570 * trace_event_buffer_commit()
1571 * trace_event_raw_event_xxx()
1572 */
1573#define STACK_SKIP 4
1574#endif
1575
1576static void
1577stacktrace_trigger(struct event_trigger_data *data,
1578 struct trace_buffer *buffer, void *rec,
1579 struct ring_buffer_event *event)
1580{
1581 struct trace_event_file *file = data->private_data;
1582
1583 if (file)
1584 __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
1585 else
1586 trace_dump_stack(STACK_SKIP);
1587}
1588
1589static void
1590stacktrace_count_trigger(struct event_trigger_data *data,
1591 struct trace_buffer *buffer, void *rec,
1592 struct ring_buffer_event *event)
1593{
1594 if (!data->count)
1595 return;
1596
1597 if (data->count != -1)
1598 (data->count)--;
1599
1600 stacktrace_trigger(data, buffer, rec, event);
1601}
1602
1603static int
1604stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1605{
1606 return event_trigger_print("stacktrace", m, (void *)data->count,
1607 data->filter_str);
1608}
1609
1610static struct event_trigger_ops stacktrace_trigger_ops = {
1611 .trigger = stacktrace_trigger,
1612 .print = stacktrace_trigger_print,
1613 .init = event_trigger_init,
1614 .free = event_trigger_free,
1615};
1616
1617static struct event_trigger_ops stacktrace_count_trigger_ops = {
1618 .trigger = stacktrace_count_trigger,
1619 .print = stacktrace_trigger_print,
1620 .init = event_trigger_init,
1621 .free = event_trigger_free,
1622};
1623
1624static struct event_trigger_ops *
1625stacktrace_get_trigger_ops(char *cmd, char *param)
1626{
1627 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1628}
1629
1630static struct event_command trigger_stacktrace_cmd = {
1631 .name = "stacktrace",
1632 .trigger_type = ETT_STACKTRACE,
1633 .flags = EVENT_CMD_FL_POST_TRIGGER,
1634 .parse = event_trigger_parse,
1635 .reg = register_trigger,
1636 .unreg = unregister_trigger,
1637 .get_trigger_ops = stacktrace_get_trigger_ops,
1638 .set_filter = set_trigger_filter,
1639};
1640
1641static __init int register_trigger_stacktrace_cmd(void)
1642{
1643 int ret;
1644
1645 ret = register_event_command(&trigger_stacktrace_cmd);
1646 WARN_ON(ret < 0);
1647
1648 return ret;
1649}
1650#else
1651static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1652#endif /* CONFIG_STACKTRACE */
1653
1654static __init void unregister_trigger_traceon_traceoff_cmds(void)
1655{
1656 unregister_event_command(&trigger_traceon_cmd);
1657 unregister_event_command(&trigger_traceoff_cmd);
1658}
1659
1660static void
1661event_enable_trigger(struct event_trigger_data *data,
1662 struct trace_buffer *buffer, void *rec,
1663 struct ring_buffer_event *event)
1664{
1665 struct enable_trigger_data *enable_data = data->private_data;
1666
1667 if (enable_data->enable)
1668 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1669 else
1670 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1671}
1672
1673static void
1674event_enable_count_trigger(struct event_trigger_data *data,
1675 struct trace_buffer *buffer, void *rec,
1676 struct ring_buffer_event *event)
1677{
1678 struct enable_trigger_data *enable_data = data->private_data;
1679
1680 if (!data->count)
1681 return;
1682
1683 /* Skip if the event is in a state we want to switch to */
1684 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1685 return;
1686
1687 if (data->count != -1)
1688 (data->count)--;
1689
1690 event_enable_trigger(data, buffer, rec, event);
1691}
1692
1693int event_enable_trigger_print(struct seq_file *m,
1694 struct event_trigger_data *data)
1695{
1696 struct enable_trigger_data *enable_data = data->private_data;
1697
1698 seq_printf(m, "%s:%s:%s",
1699 enable_data->hist ?
1700 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1701 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1702 enable_data->file->event_call->class->system,
1703 trace_event_name(enable_data->file->event_call));
1704
1705 if (data->count == -1)
1706 seq_puts(m, ":unlimited");
1707 else
1708 seq_printf(m, ":count=%ld", data->count);
1709
1710 if (data->filter_str)
1711 seq_printf(m, " if %s\n", data->filter_str);
1712 else
1713 seq_putc(m, '\n');
1714
1715 return 0;
1716}
1717
1718void event_enable_trigger_free(struct event_trigger_data *data)
1719{
1720 struct enable_trigger_data *enable_data = data->private_data;
1721
1722 if (WARN_ON_ONCE(data->ref <= 0))
1723 return;
1724
1725 data->ref--;
1726 if (!data->ref) {
1727 /* Remove the SOFT_MODE flag */
1728 trace_event_enable_disable(enable_data->file, 0, 1);
1729 trace_event_put_ref(enable_data->file->event_call);
1730 trigger_data_free(data);
1731 kfree(enable_data);
1732 }
1733}
1734
1735static struct event_trigger_ops event_enable_trigger_ops = {
1736 .trigger = event_enable_trigger,
1737 .print = event_enable_trigger_print,
1738 .init = event_trigger_init,
1739 .free = event_enable_trigger_free,
1740};
1741
1742static struct event_trigger_ops event_enable_count_trigger_ops = {
1743 .trigger = event_enable_count_trigger,
1744 .print = event_enable_trigger_print,
1745 .init = event_trigger_init,
1746 .free = event_enable_trigger_free,
1747};
1748
1749static struct event_trigger_ops event_disable_trigger_ops = {
1750 .trigger = event_enable_trigger,
1751 .print = event_enable_trigger_print,
1752 .init = event_trigger_init,
1753 .free = event_enable_trigger_free,
1754};
1755
1756static struct event_trigger_ops event_disable_count_trigger_ops = {
1757 .trigger = event_enable_count_trigger,
1758 .print = event_enable_trigger_print,
1759 .init = event_trigger_init,
1760 .free = event_enable_trigger_free,
1761};
1762
1763int event_enable_trigger_parse(struct event_command *cmd_ops,
1764 struct trace_event_file *file,
1765 char *glob, char *cmd, char *param_and_filter)
1766{
1767 struct trace_event_file *event_enable_file;
1768 struct enable_trigger_data *enable_data;
1769 struct event_trigger_data *trigger_data;
1770 struct trace_array *tr = file->tr;
1771 char *param, *filter;
1772 bool enable, remove;
1773 const char *system;
1774 const char *event;
1775 bool hist = false;
1776 int ret;
1777
1778 remove = event_trigger_check_remove(glob);
1779
1780 if (event_trigger_empty_param(param_and_filter))
1781 return -EINVAL;
1782
1783 ret = event_trigger_separate_filter(param_and_filter, ¶m, &filter, true);
1784 if (ret)
1785 return ret;
1786
1787 system = strsep(¶m, ":");
1788 if (!param)
1789 return -EINVAL;
1790
1791 event = strsep(¶m, ":");
1792
1793 ret = -EINVAL;
1794 event_enable_file = find_event_file(tr, system, event);
1795 if (!event_enable_file)
1796 goto out;
1797
1798#ifdef CONFIG_HIST_TRIGGERS
1799 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1800 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1801
1802 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1803 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1804#else
1805 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1806#endif
1807 ret = -ENOMEM;
1808
1809 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1810 if (!enable_data)
1811 goto out;
1812
1813 enable_data->hist = hist;
1814 enable_data->enable = enable;
1815 enable_data->file = event_enable_file;
1816
1817 trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
1818 if (!trigger_data) {
1819 kfree(enable_data);
1820 goto out;
1821 }
1822
1823 if (remove) {
1824 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1825 kfree(trigger_data);
1826 kfree(enable_data);
1827 ret = 0;
1828 goto out;
1829 }
1830
1831 /* Up the trigger_data count to make sure nothing frees it on failure */
1832 event_trigger_init(trigger_data);
1833
1834 ret = event_trigger_parse_num(param, trigger_data);
1835 if (ret)
1836 goto out_free;
1837
1838 ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1839 if (ret < 0)
1840 goto out_free;
1841
1842 /* Don't let event modules unload while probe registered */
1843 ret = trace_event_try_get_ref(event_enable_file->event_call);
1844 if (!ret) {
1845 ret = -EBUSY;
1846 goto out_free;
1847 }
1848
1849 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1850 if (ret < 0)
1851 goto out_put;
1852
1853 ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1854 if (ret)
1855 goto out_disable;
1856
1857 event_trigger_free(trigger_data);
1858 out:
1859 return ret;
1860 out_disable:
1861 trace_event_enable_disable(event_enable_file, 0, 1);
1862 out_put:
1863 trace_event_put_ref(event_enable_file->event_call);
1864 out_free:
1865 event_trigger_reset_filter(cmd_ops, trigger_data);
1866 event_trigger_free(trigger_data);
1867 kfree(enable_data);
1868
1869 goto out;
1870}
1871
1872int event_enable_register_trigger(char *glob,
1873 struct event_trigger_data *data,
1874 struct trace_event_file *file)
1875{
1876 struct enable_trigger_data *enable_data = data->private_data;
1877 struct enable_trigger_data *test_enable_data;
1878 struct event_trigger_data *test;
1879 int ret = 0;
1880
1881 lockdep_assert_held(&event_mutex);
1882
1883 list_for_each_entry(test, &file->triggers, list) {
1884 test_enable_data = test->private_data;
1885 if (test_enable_data &&
1886 (test->cmd_ops->trigger_type ==
1887 data->cmd_ops->trigger_type) &&
1888 (test_enable_data->file == enable_data->file)) {
1889 ret = -EEXIST;
1890 goto out;
1891 }
1892 }
1893
1894 if (data->ops->init) {
1895 ret = data->ops->init(data);
1896 if (ret < 0)
1897 goto out;
1898 }
1899
1900 list_add_rcu(&data->list, &file->triggers);
1901
1902 update_cond_flag(file);
1903 ret = trace_event_trigger_enable_disable(file, 1);
1904 if (ret < 0) {
1905 list_del_rcu(&data->list);
1906 update_cond_flag(file);
1907 }
1908out:
1909 return ret;
1910}
1911
1912void event_enable_unregister_trigger(char *glob,
1913 struct event_trigger_data *test,
1914 struct trace_event_file *file)
1915{
1916 struct enable_trigger_data *test_enable_data = test->private_data;
1917 struct event_trigger_data *data = NULL, *iter;
1918 struct enable_trigger_data *enable_data;
1919
1920 lockdep_assert_held(&event_mutex);
1921
1922 list_for_each_entry(iter, &file->triggers, list) {
1923 enable_data = iter->private_data;
1924 if (enable_data &&
1925 (iter->cmd_ops->trigger_type ==
1926 test->cmd_ops->trigger_type) &&
1927 (enable_data->file == test_enable_data->file)) {
1928 data = iter;
1929 list_del_rcu(&data->list);
1930 trace_event_trigger_enable_disable(file, 0);
1931 update_cond_flag(file);
1932 break;
1933 }
1934 }
1935
1936 if (data && data->ops->free)
1937 data->ops->free(data);
1938}
1939
1940static struct event_trigger_ops *
1941event_enable_get_trigger_ops(char *cmd, char *param)
1942{
1943 struct event_trigger_ops *ops;
1944 bool enable;
1945
1946#ifdef CONFIG_HIST_TRIGGERS
1947 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1948 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1949#else
1950 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1951#endif
1952 if (enable)
1953 ops = param ? &event_enable_count_trigger_ops :
1954 &event_enable_trigger_ops;
1955 else
1956 ops = param ? &event_disable_count_trigger_ops :
1957 &event_disable_trigger_ops;
1958
1959 return ops;
1960}
1961
1962static struct event_command trigger_enable_cmd = {
1963 .name = ENABLE_EVENT_STR,
1964 .trigger_type = ETT_EVENT_ENABLE,
1965 .parse = event_enable_trigger_parse,
1966 .reg = event_enable_register_trigger,
1967 .unreg = event_enable_unregister_trigger,
1968 .get_trigger_ops = event_enable_get_trigger_ops,
1969 .set_filter = set_trigger_filter,
1970};
1971
1972static struct event_command trigger_disable_cmd = {
1973 .name = DISABLE_EVENT_STR,
1974 .trigger_type = ETT_EVENT_ENABLE,
1975 .parse = event_enable_trigger_parse,
1976 .reg = event_enable_register_trigger,
1977 .unreg = event_enable_unregister_trigger,
1978 .get_trigger_ops = event_enable_get_trigger_ops,
1979 .set_filter = set_trigger_filter,
1980};
1981
1982static __init void unregister_trigger_enable_disable_cmds(void)
1983{
1984 unregister_event_command(&trigger_enable_cmd);
1985 unregister_event_command(&trigger_disable_cmd);
1986}
1987
1988static __init int register_trigger_enable_disable_cmds(void)
1989{
1990 int ret;
1991
1992 ret = register_event_command(&trigger_enable_cmd);
1993 if (WARN_ON(ret < 0))
1994 return ret;
1995 ret = register_event_command(&trigger_disable_cmd);
1996 if (WARN_ON(ret < 0))
1997 unregister_trigger_enable_disable_cmds();
1998
1999 return ret;
2000}
2001
2002static __init int register_trigger_traceon_traceoff_cmds(void)
2003{
2004 int ret;
2005
2006 ret = register_event_command(&trigger_traceon_cmd);
2007 if (WARN_ON(ret < 0))
2008 return ret;
2009 ret = register_event_command(&trigger_traceoff_cmd);
2010 if (WARN_ON(ret < 0))
2011 unregister_trigger_traceon_traceoff_cmds();
2012
2013 return ret;
2014}
2015
2016__init int register_trigger_cmds(void)
2017{
2018 register_trigger_traceon_traceoff_cmds();
2019 register_trigger_snapshot_cmd();
2020 register_trigger_stacktrace_cmd();
2021 register_trigger_enable_disable_cmds();
2022 register_trigger_hist_enable_disable_cmds();
2023 register_trigger_hist_cmd();
2024
2025 return 0;
2026}