Loading...
1/*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25
26#include "trace.h"
27
28static LIST_HEAD(trigger_commands);
29static DEFINE_MUTEX(trigger_cmd_mutex);
30
31static void
32trigger_data_free(struct event_trigger_data *data)
33{
34 if (data->cmd_ops->set_filter)
35 data->cmd_ops->set_filter(NULL, data, NULL);
36
37 synchronize_sched(); /* make sure current triggers exit before free */
38 kfree(data);
39}
40
41/**
42 * event_triggers_call - Call triggers associated with a trace event
43 * @file: The ftrace_event_file associated with the event
44 * @rec: The trace entry for the event, NULL for unconditional invocation
45 *
46 * For each trigger associated with an event, invoke the trigger
47 * function registered with the associated trigger command. If rec is
48 * non-NULL, it means that the trigger requires further processing and
49 * shouldn't be unconditionally invoked. If rec is non-NULL and the
50 * trigger has a filter associated with it, rec will checked against
51 * the filter and if the record matches the trigger will be invoked.
52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
53 * in any case until the current event is written, the trigger
54 * function isn't invoked but the bit associated with the deferred
55 * trigger is set in the return value.
56 *
57 * Returns an enum event_trigger_type value containing a set bit for
58 * any trigger that should be deferred, ETT_NONE if nothing to defer.
59 *
60 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
61 *
62 * Return: an enum event_trigger_type value containing a set bit for
63 * any trigger that should be deferred, ETT_NONE if nothing to defer.
64 */
65enum event_trigger_type
66event_triggers_call(struct ftrace_event_file *file, void *rec)
67{
68 struct event_trigger_data *data;
69 enum event_trigger_type tt = ETT_NONE;
70 struct event_filter *filter;
71
72 if (list_empty(&file->triggers))
73 return tt;
74
75 list_for_each_entry_rcu(data, &file->triggers, list) {
76 if (!rec) {
77 data->ops->func(data);
78 continue;
79 }
80 filter = rcu_dereference_sched(data->filter);
81 if (filter && !filter_match_preds(filter, rec))
82 continue;
83 if (data->cmd_ops->post_trigger) {
84 tt |= data->cmd_ops->trigger_type;
85 continue;
86 }
87 data->ops->func(data);
88 }
89 return tt;
90}
91EXPORT_SYMBOL_GPL(event_triggers_call);
92
93/**
94 * event_triggers_post_call - Call 'post_triggers' for a trace event
95 * @file: The ftrace_event_file associated with the event
96 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
97 *
98 * For each trigger associated with an event, invoke the trigger
99 * function registered with the associated trigger command, if the
100 * corresponding bit is set in the tt enum passed into this function.
101 * See @event_triggers_call for details on how those bits are set.
102 *
103 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
104 */
105void
106event_triggers_post_call(struct ftrace_event_file *file,
107 enum event_trigger_type tt)
108{
109 struct event_trigger_data *data;
110
111 list_for_each_entry_rcu(data, &file->triggers, list) {
112 if (data->cmd_ops->trigger_type & tt)
113 data->ops->func(data);
114 }
115}
116EXPORT_SYMBOL_GPL(event_triggers_post_call);
117
118#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
119
120static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
121{
122 struct ftrace_event_file *event_file = event_file_data(m->private);
123
124 if (t == SHOW_AVAILABLE_TRIGGERS)
125 return NULL;
126
127 return seq_list_next(t, &event_file->triggers, pos);
128}
129
130static void *trigger_start(struct seq_file *m, loff_t *pos)
131{
132 struct ftrace_event_file *event_file;
133
134 /* ->stop() is called even if ->start() fails */
135 mutex_lock(&event_mutex);
136 event_file = event_file_data(m->private);
137 if (unlikely(!event_file))
138 return ERR_PTR(-ENODEV);
139
140 if (list_empty(&event_file->triggers))
141 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
142
143 return seq_list_start(&event_file->triggers, *pos);
144}
145
146static void trigger_stop(struct seq_file *m, void *t)
147{
148 mutex_unlock(&event_mutex);
149}
150
151static int trigger_show(struct seq_file *m, void *v)
152{
153 struct event_trigger_data *data;
154 struct event_command *p;
155
156 if (v == SHOW_AVAILABLE_TRIGGERS) {
157 seq_puts(m, "# Available triggers:\n");
158 seq_putc(m, '#');
159 mutex_lock(&trigger_cmd_mutex);
160 list_for_each_entry_reverse(p, &trigger_commands, list)
161 seq_printf(m, " %s", p->name);
162 seq_putc(m, '\n');
163 mutex_unlock(&trigger_cmd_mutex);
164 return 0;
165 }
166
167 data = list_entry(v, struct event_trigger_data, list);
168 data->ops->print(m, data->ops, data);
169
170 return 0;
171}
172
173static const struct seq_operations event_triggers_seq_ops = {
174 .start = trigger_start,
175 .next = trigger_next,
176 .stop = trigger_stop,
177 .show = trigger_show,
178};
179
180static int event_trigger_regex_open(struct inode *inode, struct file *file)
181{
182 int ret = 0;
183
184 mutex_lock(&event_mutex);
185
186 if (unlikely(!event_file_data(file))) {
187 mutex_unlock(&event_mutex);
188 return -ENODEV;
189 }
190
191 if (file->f_mode & FMODE_READ) {
192 ret = seq_open(file, &event_triggers_seq_ops);
193 if (!ret) {
194 struct seq_file *m = file->private_data;
195 m->private = file;
196 }
197 }
198
199 mutex_unlock(&event_mutex);
200
201 return ret;
202}
203
204static int trigger_process_regex(struct ftrace_event_file *file, char *buff)
205{
206 char *command, *next = buff;
207 struct event_command *p;
208 int ret = -EINVAL;
209
210 command = strsep(&next, ": \t");
211 command = (command[0] != '!') ? command : command + 1;
212
213 mutex_lock(&trigger_cmd_mutex);
214 list_for_each_entry(p, &trigger_commands, list) {
215 if (strcmp(p->name, command) == 0) {
216 ret = p->func(p, file, buff, command, next);
217 goto out_unlock;
218 }
219 }
220 out_unlock:
221 mutex_unlock(&trigger_cmd_mutex);
222
223 return ret;
224}
225
226static ssize_t event_trigger_regex_write(struct file *file,
227 const char __user *ubuf,
228 size_t cnt, loff_t *ppos)
229{
230 struct ftrace_event_file *event_file;
231 ssize_t ret;
232 char *buf;
233
234 if (!cnt)
235 return 0;
236
237 if (cnt >= PAGE_SIZE)
238 return -EINVAL;
239
240 buf = (char *)__get_free_page(GFP_TEMPORARY);
241 if (!buf)
242 return -ENOMEM;
243
244 if (copy_from_user(buf, ubuf, cnt)) {
245 free_page((unsigned long)buf);
246 return -EFAULT;
247 }
248 buf[cnt] = '\0';
249 strim(buf);
250
251 mutex_lock(&event_mutex);
252 event_file = event_file_data(file);
253 if (unlikely(!event_file)) {
254 mutex_unlock(&event_mutex);
255 free_page((unsigned long)buf);
256 return -ENODEV;
257 }
258 ret = trigger_process_regex(event_file, buf);
259 mutex_unlock(&event_mutex);
260
261 free_page((unsigned long)buf);
262 if (ret < 0)
263 goto out;
264
265 *ppos += cnt;
266 ret = cnt;
267 out:
268 return ret;
269}
270
271static int event_trigger_regex_release(struct inode *inode, struct file *file)
272{
273 mutex_lock(&event_mutex);
274
275 if (file->f_mode & FMODE_READ)
276 seq_release(inode, file);
277
278 mutex_unlock(&event_mutex);
279
280 return 0;
281}
282
283static ssize_t
284event_trigger_write(struct file *filp, const char __user *ubuf,
285 size_t cnt, loff_t *ppos)
286{
287 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
288}
289
290static int
291event_trigger_open(struct inode *inode, struct file *filp)
292{
293 return event_trigger_regex_open(inode, filp);
294}
295
296static int
297event_trigger_release(struct inode *inode, struct file *file)
298{
299 return event_trigger_regex_release(inode, file);
300}
301
302const struct file_operations event_trigger_fops = {
303 .open = event_trigger_open,
304 .read = seq_read,
305 .write = event_trigger_write,
306 .llseek = tracing_lseek,
307 .release = event_trigger_release,
308};
309
310/*
311 * Currently we only register event commands from __init, so mark this
312 * __init too.
313 */
314static __init int register_event_command(struct event_command *cmd)
315{
316 struct event_command *p;
317 int ret = 0;
318
319 mutex_lock(&trigger_cmd_mutex);
320 list_for_each_entry(p, &trigger_commands, list) {
321 if (strcmp(cmd->name, p->name) == 0) {
322 ret = -EBUSY;
323 goto out_unlock;
324 }
325 }
326 list_add(&cmd->list, &trigger_commands);
327 out_unlock:
328 mutex_unlock(&trigger_cmd_mutex);
329
330 return ret;
331}
332
333/*
334 * Currently we only unregister event commands from __init, so mark
335 * this __init too.
336 */
337static __init int unregister_event_command(struct event_command *cmd)
338{
339 struct event_command *p, *n;
340 int ret = -ENODEV;
341
342 mutex_lock(&trigger_cmd_mutex);
343 list_for_each_entry_safe(p, n, &trigger_commands, list) {
344 if (strcmp(cmd->name, p->name) == 0) {
345 ret = 0;
346 list_del_init(&p->list);
347 goto out_unlock;
348 }
349 }
350 out_unlock:
351 mutex_unlock(&trigger_cmd_mutex);
352
353 return ret;
354}
355
356/**
357 * event_trigger_print - Generic event_trigger_ops @print implementation
358 * @name: The name of the event trigger
359 * @m: The seq_file being printed to
360 * @data: Trigger-specific data
361 * @filter_str: filter_str to print, if present
362 *
363 * Common implementation for event triggers to print themselves.
364 *
365 * Usually wrapped by a function that simply sets the @name of the
366 * trigger command and then invokes this.
367 *
368 * Return: 0 on success, errno otherwise
369 */
370static int
371event_trigger_print(const char *name, struct seq_file *m,
372 void *data, char *filter_str)
373{
374 long count = (long)data;
375
376 seq_printf(m, "%s", name);
377
378 if (count == -1)
379 seq_puts(m, ":unlimited");
380 else
381 seq_printf(m, ":count=%ld", count);
382
383 if (filter_str)
384 seq_printf(m, " if %s\n", filter_str);
385 else
386 seq_puts(m, "\n");
387
388 return 0;
389}
390
391/**
392 * event_trigger_init - Generic event_trigger_ops @init implementation
393 * @ops: The trigger ops associated with the trigger
394 * @data: Trigger-specific data
395 *
396 * Common implementation of event trigger initialization.
397 *
398 * Usually used directly as the @init method in event trigger
399 * implementations.
400 *
401 * Return: 0 on success, errno otherwise
402 */
403static int
404event_trigger_init(struct event_trigger_ops *ops,
405 struct event_trigger_data *data)
406{
407 data->ref++;
408 return 0;
409}
410
411/**
412 * event_trigger_free - Generic event_trigger_ops @free implementation
413 * @ops: The trigger ops associated with the trigger
414 * @data: Trigger-specific data
415 *
416 * Common implementation of event trigger de-initialization.
417 *
418 * Usually used directly as the @free method in event trigger
419 * implementations.
420 */
421static void
422event_trigger_free(struct event_trigger_ops *ops,
423 struct event_trigger_data *data)
424{
425 if (WARN_ON_ONCE(data->ref <= 0))
426 return;
427
428 data->ref--;
429 if (!data->ref)
430 trigger_data_free(data);
431}
432
433static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
434 int trigger_enable)
435{
436 int ret = 0;
437
438 if (trigger_enable) {
439 if (atomic_inc_return(&file->tm_ref) > 1)
440 return ret;
441 set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
442 ret = trace_event_enable_disable(file, 1, 1);
443 } else {
444 if (atomic_dec_return(&file->tm_ref) > 0)
445 return ret;
446 clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
447 ret = trace_event_enable_disable(file, 0, 1);
448 }
449
450 return ret;
451}
452
453/**
454 * clear_event_triggers - Clear all triggers associated with a trace array
455 * @tr: The trace array to clear
456 *
457 * For each trigger, the triggering event has its tm_ref decremented
458 * via trace_event_trigger_enable_disable(), and any associated event
459 * (in the case of enable/disable_event triggers) will have its sm_ref
460 * decremented via free()->trace_event_enable_disable(). That
461 * combination effectively reverses the soft-mode/trigger state added
462 * by trigger registration.
463 *
464 * Must be called with event_mutex held.
465 */
466void
467clear_event_triggers(struct trace_array *tr)
468{
469 struct ftrace_event_file *file;
470
471 list_for_each_entry(file, &tr->events, list) {
472 struct event_trigger_data *data;
473 list_for_each_entry_rcu(data, &file->triggers, list) {
474 trace_event_trigger_enable_disable(file, 0);
475 if (data->ops->free)
476 data->ops->free(data->ops, data);
477 }
478 }
479}
480
481/**
482 * update_cond_flag - Set or reset the TRIGGER_COND bit
483 * @file: The ftrace_event_file associated with the event
484 *
485 * If an event has triggers and any of those triggers has a filter or
486 * a post_trigger, trigger invocation needs to be deferred until after
487 * the current event has logged its data, and the event should have
488 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
489 * cleared.
490 */
491static void update_cond_flag(struct ftrace_event_file *file)
492{
493 struct event_trigger_data *data;
494 bool set_cond = false;
495
496 list_for_each_entry_rcu(data, &file->triggers, list) {
497 if (data->filter || data->cmd_ops->post_trigger) {
498 set_cond = true;
499 break;
500 }
501 }
502
503 if (set_cond)
504 set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
505 else
506 clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
507}
508
509/**
510 * register_trigger - Generic event_command @reg implementation
511 * @glob: The raw string used to register the trigger
512 * @ops: The trigger ops associated with the trigger
513 * @data: Trigger-specific data to associate with the trigger
514 * @file: The ftrace_event_file associated with the event
515 *
516 * Common implementation for event trigger registration.
517 *
518 * Usually used directly as the @reg method in event command
519 * implementations.
520 *
521 * Return: 0 on success, errno otherwise
522 */
523static int register_trigger(char *glob, struct event_trigger_ops *ops,
524 struct event_trigger_data *data,
525 struct ftrace_event_file *file)
526{
527 struct event_trigger_data *test;
528 int ret = 0;
529
530 list_for_each_entry_rcu(test, &file->triggers, list) {
531 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
532 ret = -EEXIST;
533 goto out;
534 }
535 }
536
537 if (data->ops->init) {
538 ret = data->ops->init(data->ops, data);
539 if (ret < 0)
540 goto out;
541 }
542
543 list_add_rcu(&data->list, &file->triggers);
544 ret++;
545
546 if (trace_event_trigger_enable_disable(file, 1) < 0) {
547 list_del_rcu(&data->list);
548 ret--;
549 }
550 update_cond_flag(file);
551out:
552 return ret;
553}
554
555/**
556 * unregister_trigger - Generic event_command @unreg implementation
557 * @glob: The raw string used to register the trigger
558 * @ops: The trigger ops associated with the trigger
559 * @test: Trigger-specific data used to find the trigger to remove
560 * @file: The ftrace_event_file associated with the event
561 *
562 * Common implementation for event trigger unregistration.
563 *
564 * Usually used directly as the @unreg method in event command
565 * implementations.
566 */
567static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
568 struct event_trigger_data *test,
569 struct ftrace_event_file *file)
570{
571 struct event_trigger_data *data;
572 bool unregistered = false;
573
574 list_for_each_entry_rcu(data, &file->triggers, list) {
575 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
576 unregistered = true;
577 list_del_rcu(&data->list);
578 update_cond_flag(file);
579 trace_event_trigger_enable_disable(file, 0);
580 break;
581 }
582 }
583
584 if (unregistered && data->ops->free)
585 data->ops->free(data->ops, data);
586}
587
588/**
589 * event_trigger_callback - Generic event_command @func implementation
590 * @cmd_ops: The command ops, used for trigger registration
591 * @file: The ftrace_event_file associated with the event
592 * @glob: The raw string used to register the trigger
593 * @cmd: The cmd portion of the string used to register the trigger
594 * @param: The params portion of the string used to register the trigger
595 *
596 * Common implementation for event command parsing and trigger
597 * instantiation.
598 *
599 * Usually used directly as the @func method in event command
600 * implementations.
601 *
602 * Return: 0 on success, errno otherwise
603 */
604static int
605event_trigger_callback(struct event_command *cmd_ops,
606 struct ftrace_event_file *file,
607 char *glob, char *cmd, char *param)
608{
609 struct event_trigger_data *trigger_data;
610 struct event_trigger_ops *trigger_ops;
611 char *trigger = NULL;
612 char *number;
613 int ret;
614
615 /* separate the trigger from the filter (t:n [if filter]) */
616 if (param && isdigit(param[0]))
617 trigger = strsep(¶m, " \t");
618
619 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
620
621 ret = -ENOMEM;
622 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
623 if (!trigger_data)
624 goto out;
625
626 trigger_data->count = -1;
627 trigger_data->ops = trigger_ops;
628 trigger_data->cmd_ops = cmd_ops;
629 INIT_LIST_HEAD(&trigger_data->list);
630
631 if (glob[0] == '!') {
632 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
633 kfree(trigger_data);
634 ret = 0;
635 goto out;
636 }
637
638 if (trigger) {
639 number = strsep(&trigger, ":");
640
641 ret = -EINVAL;
642 if (!strlen(number))
643 goto out_free;
644
645 /*
646 * We use the callback data field (which is a pointer)
647 * as our counter.
648 */
649 ret = kstrtoul(number, 0, &trigger_data->count);
650 if (ret)
651 goto out_free;
652 }
653
654 if (!param) /* if param is non-empty, it's supposed to be a filter */
655 goto out_reg;
656
657 if (!cmd_ops->set_filter)
658 goto out_reg;
659
660 ret = cmd_ops->set_filter(param, trigger_data, file);
661 if (ret < 0)
662 goto out_free;
663
664 out_reg:
665 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
666 /*
667 * The above returns on success the # of functions enabled,
668 * but if it didn't find any functions it returns zero.
669 * Consider no functions a failure too.
670 */
671 if (!ret) {
672 ret = -ENOENT;
673 goto out_free;
674 } else if (ret < 0)
675 goto out_free;
676 ret = 0;
677 out:
678 return ret;
679
680 out_free:
681 if (cmd_ops->set_filter)
682 cmd_ops->set_filter(NULL, trigger_data, NULL);
683 kfree(trigger_data);
684 goto out;
685}
686
687/**
688 * set_trigger_filter - Generic event_command @set_filter implementation
689 * @filter_str: The filter string for the trigger, NULL to remove filter
690 * @trigger_data: Trigger-specific data
691 * @file: The ftrace_event_file associated with the event
692 *
693 * Common implementation for event command filter parsing and filter
694 * instantiation.
695 *
696 * Usually used directly as the @set_filter method in event command
697 * implementations.
698 *
699 * Also used to remove a filter (if filter_str = NULL).
700 *
701 * Return: 0 on success, errno otherwise
702 */
703static int set_trigger_filter(char *filter_str,
704 struct event_trigger_data *trigger_data,
705 struct ftrace_event_file *file)
706{
707 struct event_trigger_data *data = trigger_data;
708 struct event_filter *filter = NULL, *tmp;
709 int ret = -EINVAL;
710 char *s;
711
712 if (!filter_str) /* clear the current filter */
713 goto assign;
714
715 s = strsep(&filter_str, " \t");
716
717 if (!strlen(s) || strcmp(s, "if") != 0)
718 goto out;
719
720 if (!filter_str)
721 goto out;
722
723 /* The filter is for the 'trigger' event, not the triggered event */
724 ret = create_event_filter(file->event_call, filter_str, false, &filter);
725 if (ret)
726 goto out;
727 assign:
728 tmp = rcu_access_pointer(data->filter);
729
730 rcu_assign_pointer(data->filter, filter);
731
732 if (tmp) {
733 /* Make sure the call is done with the filter */
734 synchronize_sched();
735 free_event_filter(tmp);
736 }
737
738 kfree(data->filter_str);
739 data->filter_str = NULL;
740
741 if (filter_str) {
742 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
743 if (!data->filter_str) {
744 free_event_filter(rcu_access_pointer(data->filter));
745 data->filter = NULL;
746 ret = -ENOMEM;
747 }
748 }
749 out:
750 return ret;
751}
752
753static void
754traceon_trigger(struct event_trigger_data *data)
755{
756 if (tracing_is_on())
757 return;
758
759 tracing_on();
760}
761
762static void
763traceon_count_trigger(struct event_trigger_data *data)
764{
765 if (tracing_is_on())
766 return;
767
768 if (!data->count)
769 return;
770
771 if (data->count != -1)
772 (data->count)--;
773
774 tracing_on();
775}
776
777static void
778traceoff_trigger(struct event_trigger_data *data)
779{
780 if (!tracing_is_on())
781 return;
782
783 tracing_off();
784}
785
786static void
787traceoff_count_trigger(struct event_trigger_data *data)
788{
789 if (!tracing_is_on())
790 return;
791
792 if (!data->count)
793 return;
794
795 if (data->count != -1)
796 (data->count)--;
797
798 tracing_off();
799}
800
801static int
802traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
803 struct event_trigger_data *data)
804{
805 return event_trigger_print("traceon", m, (void *)data->count,
806 data->filter_str);
807}
808
809static int
810traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
811 struct event_trigger_data *data)
812{
813 return event_trigger_print("traceoff", m, (void *)data->count,
814 data->filter_str);
815}
816
817static struct event_trigger_ops traceon_trigger_ops = {
818 .func = traceon_trigger,
819 .print = traceon_trigger_print,
820 .init = event_trigger_init,
821 .free = event_trigger_free,
822};
823
824static struct event_trigger_ops traceon_count_trigger_ops = {
825 .func = traceon_count_trigger,
826 .print = traceon_trigger_print,
827 .init = event_trigger_init,
828 .free = event_trigger_free,
829};
830
831static struct event_trigger_ops traceoff_trigger_ops = {
832 .func = traceoff_trigger,
833 .print = traceoff_trigger_print,
834 .init = event_trigger_init,
835 .free = event_trigger_free,
836};
837
838static struct event_trigger_ops traceoff_count_trigger_ops = {
839 .func = traceoff_count_trigger,
840 .print = traceoff_trigger_print,
841 .init = event_trigger_init,
842 .free = event_trigger_free,
843};
844
845static struct event_trigger_ops *
846onoff_get_trigger_ops(char *cmd, char *param)
847{
848 struct event_trigger_ops *ops;
849
850 /* we register both traceon and traceoff to this callback */
851 if (strcmp(cmd, "traceon") == 0)
852 ops = param ? &traceon_count_trigger_ops :
853 &traceon_trigger_ops;
854 else
855 ops = param ? &traceoff_count_trigger_ops :
856 &traceoff_trigger_ops;
857
858 return ops;
859}
860
861static struct event_command trigger_traceon_cmd = {
862 .name = "traceon",
863 .trigger_type = ETT_TRACE_ONOFF,
864 .func = event_trigger_callback,
865 .reg = register_trigger,
866 .unreg = unregister_trigger,
867 .get_trigger_ops = onoff_get_trigger_ops,
868 .set_filter = set_trigger_filter,
869};
870
871static struct event_command trigger_traceoff_cmd = {
872 .name = "traceoff",
873 .trigger_type = ETT_TRACE_ONOFF,
874 .func = event_trigger_callback,
875 .reg = register_trigger,
876 .unreg = unregister_trigger,
877 .get_trigger_ops = onoff_get_trigger_ops,
878 .set_filter = set_trigger_filter,
879};
880
881#ifdef CONFIG_TRACER_SNAPSHOT
882static void
883snapshot_trigger(struct event_trigger_data *data)
884{
885 tracing_snapshot();
886}
887
888static void
889snapshot_count_trigger(struct event_trigger_data *data)
890{
891 if (!data->count)
892 return;
893
894 if (data->count != -1)
895 (data->count)--;
896
897 snapshot_trigger(data);
898}
899
900static int
901register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
902 struct event_trigger_data *data,
903 struct ftrace_event_file *file)
904{
905 int ret = register_trigger(glob, ops, data, file);
906
907 if (ret > 0 && tracing_alloc_snapshot() != 0) {
908 unregister_trigger(glob, ops, data, file);
909 ret = 0;
910 }
911
912 return ret;
913}
914
915static int
916snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
917 struct event_trigger_data *data)
918{
919 return event_trigger_print("snapshot", m, (void *)data->count,
920 data->filter_str);
921}
922
923static struct event_trigger_ops snapshot_trigger_ops = {
924 .func = snapshot_trigger,
925 .print = snapshot_trigger_print,
926 .init = event_trigger_init,
927 .free = event_trigger_free,
928};
929
930static struct event_trigger_ops snapshot_count_trigger_ops = {
931 .func = snapshot_count_trigger,
932 .print = snapshot_trigger_print,
933 .init = event_trigger_init,
934 .free = event_trigger_free,
935};
936
937static struct event_trigger_ops *
938snapshot_get_trigger_ops(char *cmd, char *param)
939{
940 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
941}
942
943static struct event_command trigger_snapshot_cmd = {
944 .name = "snapshot",
945 .trigger_type = ETT_SNAPSHOT,
946 .func = event_trigger_callback,
947 .reg = register_snapshot_trigger,
948 .unreg = unregister_trigger,
949 .get_trigger_ops = snapshot_get_trigger_ops,
950 .set_filter = set_trigger_filter,
951};
952
953static __init int register_trigger_snapshot_cmd(void)
954{
955 int ret;
956
957 ret = register_event_command(&trigger_snapshot_cmd);
958 WARN_ON(ret < 0);
959
960 return ret;
961}
962#else
963static __init int register_trigger_snapshot_cmd(void) { return 0; }
964#endif /* CONFIG_TRACER_SNAPSHOT */
965
966#ifdef CONFIG_STACKTRACE
967/*
968 * Skip 3:
969 * stacktrace_trigger()
970 * event_triggers_post_call()
971 * ftrace_raw_event_xxx()
972 */
973#define STACK_SKIP 3
974
975static void
976stacktrace_trigger(struct event_trigger_data *data)
977{
978 trace_dump_stack(STACK_SKIP);
979}
980
981static void
982stacktrace_count_trigger(struct event_trigger_data *data)
983{
984 if (!data->count)
985 return;
986
987 if (data->count != -1)
988 (data->count)--;
989
990 stacktrace_trigger(data);
991}
992
993static int
994stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
995 struct event_trigger_data *data)
996{
997 return event_trigger_print("stacktrace", m, (void *)data->count,
998 data->filter_str);
999}
1000
1001static struct event_trigger_ops stacktrace_trigger_ops = {
1002 .func = stacktrace_trigger,
1003 .print = stacktrace_trigger_print,
1004 .init = event_trigger_init,
1005 .free = event_trigger_free,
1006};
1007
1008static struct event_trigger_ops stacktrace_count_trigger_ops = {
1009 .func = stacktrace_count_trigger,
1010 .print = stacktrace_trigger_print,
1011 .init = event_trigger_init,
1012 .free = event_trigger_free,
1013};
1014
1015static struct event_trigger_ops *
1016stacktrace_get_trigger_ops(char *cmd, char *param)
1017{
1018 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1019}
1020
1021static struct event_command trigger_stacktrace_cmd = {
1022 .name = "stacktrace",
1023 .trigger_type = ETT_STACKTRACE,
1024 .post_trigger = true,
1025 .func = event_trigger_callback,
1026 .reg = register_trigger,
1027 .unreg = unregister_trigger,
1028 .get_trigger_ops = stacktrace_get_trigger_ops,
1029 .set_filter = set_trigger_filter,
1030};
1031
1032static __init int register_trigger_stacktrace_cmd(void)
1033{
1034 int ret;
1035
1036 ret = register_event_command(&trigger_stacktrace_cmd);
1037 WARN_ON(ret < 0);
1038
1039 return ret;
1040}
1041#else
1042static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1043#endif /* CONFIG_STACKTRACE */
1044
1045static __init void unregister_trigger_traceon_traceoff_cmds(void)
1046{
1047 unregister_event_command(&trigger_traceon_cmd);
1048 unregister_event_command(&trigger_traceoff_cmd);
1049}
1050
1051/* Avoid typos */
1052#define ENABLE_EVENT_STR "enable_event"
1053#define DISABLE_EVENT_STR "disable_event"
1054
1055struct enable_trigger_data {
1056 struct ftrace_event_file *file;
1057 bool enable;
1058};
1059
1060static void
1061event_enable_trigger(struct event_trigger_data *data)
1062{
1063 struct enable_trigger_data *enable_data = data->private_data;
1064
1065 if (enable_data->enable)
1066 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1067 else
1068 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1069}
1070
1071static void
1072event_enable_count_trigger(struct event_trigger_data *data)
1073{
1074 struct enable_trigger_data *enable_data = data->private_data;
1075
1076 if (!data->count)
1077 return;
1078
1079 /* Skip if the event is in a state we want to switch to */
1080 if (enable_data->enable == !(enable_data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1081 return;
1082
1083 if (data->count != -1)
1084 (data->count)--;
1085
1086 event_enable_trigger(data);
1087}
1088
1089static int
1090event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1091 struct event_trigger_data *data)
1092{
1093 struct enable_trigger_data *enable_data = data->private_data;
1094
1095 seq_printf(m, "%s:%s:%s",
1096 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1097 enable_data->file->event_call->class->system,
1098 ftrace_event_name(enable_data->file->event_call));
1099
1100 if (data->count == -1)
1101 seq_puts(m, ":unlimited");
1102 else
1103 seq_printf(m, ":count=%ld", data->count);
1104
1105 if (data->filter_str)
1106 seq_printf(m, " if %s\n", data->filter_str);
1107 else
1108 seq_puts(m, "\n");
1109
1110 return 0;
1111}
1112
1113static void
1114event_enable_trigger_free(struct event_trigger_ops *ops,
1115 struct event_trigger_data *data)
1116{
1117 struct enable_trigger_data *enable_data = data->private_data;
1118
1119 if (WARN_ON_ONCE(data->ref <= 0))
1120 return;
1121
1122 data->ref--;
1123 if (!data->ref) {
1124 /* Remove the SOFT_MODE flag */
1125 trace_event_enable_disable(enable_data->file, 0, 1);
1126 module_put(enable_data->file->event_call->mod);
1127 trigger_data_free(data);
1128 kfree(enable_data);
1129 }
1130}
1131
1132static struct event_trigger_ops event_enable_trigger_ops = {
1133 .func = event_enable_trigger,
1134 .print = event_enable_trigger_print,
1135 .init = event_trigger_init,
1136 .free = event_enable_trigger_free,
1137};
1138
1139static struct event_trigger_ops event_enable_count_trigger_ops = {
1140 .func = event_enable_count_trigger,
1141 .print = event_enable_trigger_print,
1142 .init = event_trigger_init,
1143 .free = event_enable_trigger_free,
1144};
1145
1146static struct event_trigger_ops event_disable_trigger_ops = {
1147 .func = event_enable_trigger,
1148 .print = event_enable_trigger_print,
1149 .init = event_trigger_init,
1150 .free = event_enable_trigger_free,
1151};
1152
1153static struct event_trigger_ops event_disable_count_trigger_ops = {
1154 .func = event_enable_count_trigger,
1155 .print = event_enable_trigger_print,
1156 .init = event_trigger_init,
1157 .free = event_enable_trigger_free,
1158};
1159
1160static int
1161event_enable_trigger_func(struct event_command *cmd_ops,
1162 struct ftrace_event_file *file,
1163 char *glob, char *cmd, char *param)
1164{
1165 struct ftrace_event_file *event_enable_file;
1166 struct enable_trigger_data *enable_data;
1167 struct event_trigger_data *trigger_data;
1168 struct event_trigger_ops *trigger_ops;
1169 struct trace_array *tr = file->tr;
1170 const char *system;
1171 const char *event;
1172 char *trigger;
1173 char *number;
1174 bool enable;
1175 int ret;
1176
1177 if (!param)
1178 return -EINVAL;
1179
1180 /* separate the trigger from the filter (s:e:n [if filter]) */
1181 trigger = strsep(¶m, " \t");
1182 if (!trigger)
1183 return -EINVAL;
1184
1185 system = strsep(&trigger, ":");
1186 if (!trigger)
1187 return -EINVAL;
1188
1189 event = strsep(&trigger, ":");
1190
1191 ret = -EINVAL;
1192 event_enable_file = find_event_file(tr, system, event);
1193 if (!event_enable_file)
1194 goto out;
1195
1196 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1197
1198 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1199
1200 ret = -ENOMEM;
1201 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1202 if (!trigger_data)
1203 goto out;
1204
1205 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1206 if (!enable_data) {
1207 kfree(trigger_data);
1208 goto out;
1209 }
1210
1211 trigger_data->count = -1;
1212 trigger_data->ops = trigger_ops;
1213 trigger_data->cmd_ops = cmd_ops;
1214 INIT_LIST_HEAD(&trigger_data->list);
1215 RCU_INIT_POINTER(trigger_data->filter, NULL);
1216
1217 enable_data->enable = enable;
1218 enable_data->file = event_enable_file;
1219 trigger_data->private_data = enable_data;
1220
1221 if (glob[0] == '!') {
1222 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1223 kfree(trigger_data);
1224 kfree(enable_data);
1225 ret = 0;
1226 goto out;
1227 }
1228
1229 if (trigger) {
1230 number = strsep(&trigger, ":");
1231
1232 ret = -EINVAL;
1233 if (!strlen(number))
1234 goto out_free;
1235
1236 /*
1237 * We use the callback data field (which is a pointer)
1238 * as our counter.
1239 */
1240 ret = kstrtoul(number, 0, &trigger_data->count);
1241 if (ret)
1242 goto out_free;
1243 }
1244
1245 if (!param) /* if param is non-empty, it's supposed to be a filter */
1246 goto out_reg;
1247
1248 if (!cmd_ops->set_filter)
1249 goto out_reg;
1250
1251 ret = cmd_ops->set_filter(param, trigger_data, file);
1252 if (ret < 0)
1253 goto out_free;
1254
1255 out_reg:
1256 /* Don't let event modules unload while probe registered */
1257 ret = try_module_get(event_enable_file->event_call->mod);
1258 if (!ret) {
1259 ret = -EBUSY;
1260 goto out_free;
1261 }
1262
1263 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1264 if (ret < 0)
1265 goto out_put;
1266 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1267 /*
1268 * The above returns on success the # of functions enabled,
1269 * but if it didn't find any functions it returns zero.
1270 * Consider no functions a failure too.
1271 */
1272 if (!ret) {
1273 ret = -ENOENT;
1274 goto out_disable;
1275 } else if (ret < 0)
1276 goto out_disable;
1277 /* Just return zero, not the number of enabled functions */
1278 ret = 0;
1279 out:
1280 return ret;
1281
1282 out_disable:
1283 trace_event_enable_disable(event_enable_file, 0, 1);
1284 out_put:
1285 module_put(event_enable_file->event_call->mod);
1286 out_free:
1287 if (cmd_ops->set_filter)
1288 cmd_ops->set_filter(NULL, trigger_data, NULL);
1289 kfree(trigger_data);
1290 kfree(enable_data);
1291 goto out;
1292}
1293
1294static int event_enable_register_trigger(char *glob,
1295 struct event_trigger_ops *ops,
1296 struct event_trigger_data *data,
1297 struct ftrace_event_file *file)
1298{
1299 struct enable_trigger_data *enable_data = data->private_data;
1300 struct enable_trigger_data *test_enable_data;
1301 struct event_trigger_data *test;
1302 int ret = 0;
1303
1304 list_for_each_entry_rcu(test, &file->triggers, list) {
1305 test_enable_data = test->private_data;
1306 if (test_enable_data &&
1307 (test_enable_data->file == enable_data->file)) {
1308 ret = -EEXIST;
1309 goto out;
1310 }
1311 }
1312
1313 if (data->ops->init) {
1314 ret = data->ops->init(data->ops, data);
1315 if (ret < 0)
1316 goto out;
1317 }
1318
1319 list_add_rcu(&data->list, &file->triggers);
1320 ret++;
1321
1322 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1323 list_del_rcu(&data->list);
1324 ret--;
1325 }
1326 update_cond_flag(file);
1327out:
1328 return ret;
1329}
1330
1331static void event_enable_unregister_trigger(char *glob,
1332 struct event_trigger_ops *ops,
1333 struct event_trigger_data *test,
1334 struct ftrace_event_file *file)
1335{
1336 struct enable_trigger_data *test_enable_data = test->private_data;
1337 struct enable_trigger_data *enable_data;
1338 struct event_trigger_data *data;
1339 bool unregistered = false;
1340
1341 list_for_each_entry_rcu(data, &file->triggers, list) {
1342 enable_data = data->private_data;
1343 if (enable_data &&
1344 (enable_data->file == test_enable_data->file)) {
1345 unregistered = true;
1346 list_del_rcu(&data->list);
1347 update_cond_flag(file);
1348 trace_event_trigger_enable_disable(file, 0);
1349 break;
1350 }
1351 }
1352
1353 if (unregistered && data->ops->free)
1354 data->ops->free(data->ops, data);
1355}
1356
1357static struct event_trigger_ops *
1358event_enable_get_trigger_ops(char *cmd, char *param)
1359{
1360 struct event_trigger_ops *ops;
1361 bool enable;
1362
1363 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1364
1365 if (enable)
1366 ops = param ? &event_enable_count_trigger_ops :
1367 &event_enable_trigger_ops;
1368 else
1369 ops = param ? &event_disable_count_trigger_ops :
1370 &event_disable_trigger_ops;
1371
1372 return ops;
1373}
1374
1375static struct event_command trigger_enable_cmd = {
1376 .name = ENABLE_EVENT_STR,
1377 .trigger_type = ETT_EVENT_ENABLE,
1378 .func = event_enable_trigger_func,
1379 .reg = event_enable_register_trigger,
1380 .unreg = event_enable_unregister_trigger,
1381 .get_trigger_ops = event_enable_get_trigger_ops,
1382 .set_filter = set_trigger_filter,
1383};
1384
1385static struct event_command trigger_disable_cmd = {
1386 .name = DISABLE_EVENT_STR,
1387 .trigger_type = ETT_EVENT_ENABLE,
1388 .func = event_enable_trigger_func,
1389 .reg = event_enable_register_trigger,
1390 .unreg = event_enable_unregister_trigger,
1391 .get_trigger_ops = event_enable_get_trigger_ops,
1392 .set_filter = set_trigger_filter,
1393};
1394
1395static __init void unregister_trigger_enable_disable_cmds(void)
1396{
1397 unregister_event_command(&trigger_enable_cmd);
1398 unregister_event_command(&trigger_disable_cmd);
1399}
1400
1401static __init int register_trigger_enable_disable_cmds(void)
1402{
1403 int ret;
1404
1405 ret = register_event_command(&trigger_enable_cmd);
1406 if (WARN_ON(ret < 0))
1407 return ret;
1408 ret = register_event_command(&trigger_disable_cmd);
1409 if (WARN_ON(ret < 0))
1410 unregister_trigger_enable_disable_cmds();
1411
1412 return ret;
1413}
1414
1415static __init int register_trigger_traceon_traceoff_cmds(void)
1416{
1417 int ret;
1418
1419 ret = register_event_command(&trigger_traceon_cmd);
1420 if (WARN_ON(ret < 0))
1421 return ret;
1422 ret = register_event_command(&trigger_traceoff_cmd);
1423 if (WARN_ON(ret < 0))
1424 unregister_trigger_traceon_traceoff_cmds();
1425
1426 return ret;
1427}
1428
1429__init int register_trigger_cmds(void)
1430{
1431 register_trigger_traceon_traceoff_cmds();
1432 register_trigger_snapshot_cmd();
1433 register_trigger_stacktrace_cmd();
1434 register_trigger_enable_disable_cmds();
1435
1436 return 0;
1437}
1/*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25#include <linux/rculist.h>
26
27#include "trace.h"
28
29static LIST_HEAD(trigger_commands);
30static DEFINE_MUTEX(trigger_cmd_mutex);
31
32void trigger_data_free(struct event_trigger_data *data)
33{
34 if (data->cmd_ops->set_filter)
35 data->cmd_ops->set_filter(NULL, data, NULL);
36
37 synchronize_sched(); /* make sure current triggers exit before free */
38 kfree(data);
39}
40
41/**
42 * event_triggers_call - Call triggers associated with a trace event
43 * @file: The trace_event_file associated with the event
44 * @rec: The trace entry for the event, NULL for unconditional invocation
45 *
46 * For each trigger associated with an event, invoke the trigger
47 * function registered with the associated trigger command. If rec is
48 * non-NULL, it means that the trigger requires further processing and
49 * shouldn't be unconditionally invoked. If rec is non-NULL and the
50 * trigger has a filter associated with it, rec will checked against
51 * the filter and if the record matches the trigger will be invoked.
52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
53 * in any case until the current event is written, the trigger
54 * function isn't invoked but the bit associated with the deferred
55 * trigger is set in the return value.
56 *
57 * Returns an enum event_trigger_type value containing a set bit for
58 * any trigger that should be deferred, ETT_NONE if nothing to defer.
59 *
60 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
61 *
62 * Return: an enum event_trigger_type value containing a set bit for
63 * any trigger that should be deferred, ETT_NONE if nothing to defer.
64 */
65enum event_trigger_type
66event_triggers_call(struct trace_event_file *file, void *rec,
67 struct ring_buffer_event *event)
68{
69 struct event_trigger_data *data;
70 enum event_trigger_type tt = ETT_NONE;
71 struct event_filter *filter;
72
73 if (list_empty(&file->triggers))
74 return tt;
75
76 list_for_each_entry_rcu(data, &file->triggers, list) {
77 if (data->paused)
78 continue;
79 if (!rec) {
80 data->ops->func(data, rec, event);
81 continue;
82 }
83 filter = rcu_dereference_sched(data->filter);
84 if (filter && !filter_match_preds(filter, rec))
85 continue;
86 if (event_command_post_trigger(data->cmd_ops)) {
87 tt |= data->cmd_ops->trigger_type;
88 continue;
89 }
90 data->ops->func(data, rec, event);
91 }
92 return tt;
93}
94EXPORT_SYMBOL_GPL(event_triggers_call);
95
96/**
97 * event_triggers_post_call - Call 'post_triggers' for a trace event
98 * @file: The trace_event_file associated with the event
99 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
100 * @rec: The trace entry for the event
101 *
102 * For each trigger associated with an event, invoke the trigger
103 * function registered with the associated trigger command, if the
104 * corresponding bit is set in the tt enum passed into this function.
105 * See @event_triggers_call for details on how those bits are set.
106 *
107 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
108 */
109void
110event_triggers_post_call(struct trace_event_file *file,
111 enum event_trigger_type tt,
112 void *rec, struct ring_buffer_event *event)
113{
114 struct event_trigger_data *data;
115
116 list_for_each_entry_rcu(data, &file->triggers, list) {
117 if (data->paused)
118 continue;
119 if (data->cmd_ops->trigger_type & tt)
120 data->ops->func(data, rec, event);
121 }
122}
123EXPORT_SYMBOL_GPL(event_triggers_post_call);
124
125#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
126
127static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
128{
129 struct trace_event_file *event_file = event_file_data(m->private);
130
131 if (t == SHOW_AVAILABLE_TRIGGERS)
132 return NULL;
133
134 return seq_list_next(t, &event_file->triggers, pos);
135}
136
137static void *trigger_start(struct seq_file *m, loff_t *pos)
138{
139 struct trace_event_file *event_file;
140
141 /* ->stop() is called even if ->start() fails */
142 mutex_lock(&event_mutex);
143 event_file = event_file_data(m->private);
144 if (unlikely(!event_file))
145 return ERR_PTR(-ENODEV);
146
147 if (list_empty(&event_file->triggers))
148 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
149
150 return seq_list_start(&event_file->triggers, *pos);
151}
152
153static void trigger_stop(struct seq_file *m, void *t)
154{
155 mutex_unlock(&event_mutex);
156}
157
158static int trigger_show(struct seq_file *m, void *v)
159{
160 struct event_trigger_data *data;
161 struct event_command *p;
162
163 if (v == SHOW_AVAILABLE_TRIGGERS) {
164 seq_puts(m, "# Available triggers:\n");
165 seq_putc(m, '#');
166 mutex_lock(&trigger_cmd_mutex);
167 list_for_each_entry_reverse(p, &trigger_commands, list)
168 seq_printf(m, " %s", p->name);
169 seq_putc(m, '\n');
170 mutex_unlock(&trigger_cmd_mutex);
171 return 0;
172 }
173
174 data = list_entry(v, struct event_trigger_data, list);
175 data->ops->print(m, data->ops, data);
176
177 return 0;
178}
179
180static const struct seq_operations event_triggers_seq_ops = {
181 .start = trigger_start,
182 .next = trigger_next,
183 .stop = trigger_stop,
184 .show = trigger_show,
185};
186
187static int event_trigger_regex_open(struct inode *inode, struct file *file)
188{
189 int ret = 0;
190
191 mutex_lock(&event_mutex);
192
193 if (unlikely(!event_file_data(file))) {
194 mutex_unlock(&event_mutex);
195 return -ENODEV;
196 }
197
198 if ((file->f_mode & FMODE_WRITE) &&
199 (file->f_flags & O_TRUNC)) {
200 struct trace_event_file *event_file;
201 struct event_command *p;
202
203 event_file = event_file_data(file);
204
205 list_for_each_entry(p, &trigger_commands, list) {
206 if (p->unreg_all)
207 p->unreg_all(event_file);
208 }
209 }
210
211 if (file->f_mode & FMODE_READ) {
212 ret = seq_open(file, &event_triggers_seq_ops);
213 if (!ret) {
214 struct seq_file *m = file->private_data;
215 m->private = file;
216 }
217 }
218
219 mutex_unlock(&event_mutex);
220
221 return ret;
222}
223
224static int trigger_process_regex(struct trace_event_file *file, char *buff)
225{
226 char *command, *next = buff;
227 struct event_command *p;
228 int ret = -EINVAL;
229
230 command = strsep(&next, ": \t");
231 command = (command[0] != '!') ? command : command + 1;
232
233 mutex_lock(&trigger_cmd_mutex);
234 list_for_each_entry(p, &trigger_commands, list) {
235 if (strcmp(p->name, command) == 0) {
236 ret = p->func(p, file, buff, command, next);
237 goto out_unlock;
238 }
239 }
240 out_unlock:
241 mutex_unlock(&trigger_cmd_mutex);
242
243 return ret;
244}
245
246static ssize_t event_trigger_regex_write(struct file *file,
247 const char __user *ubuf,
248 size_t cnt, loff_t *ppos)
249{
250 struct trace_event_file *event_file;
251 ssize_t ret;
252 char *buf;
253
254 if (!cnt)
255 return 0;
256
257 if (cnt >= PAGE_SIZE)
258 return -EINVAL;
259
260 buf = memdup_user_nul(ubuf, cnt);
261 if (IS_ERR(buf))
262 return PTR_ERR(buf);
263
264 strim(buf);
265
266 mutex_lock(&event_mutex);
267 event_file = event_file_data(file);
268 if (unlikely(!event_file)) {
269 mutex_unlock(&event_mutex);
270 kfree(buf);
271 return -ENODEV;
272 }
273 ret = trigger_process_regex(event_file, buf);
274 mutex_unlock(&event_mutex);
275
276 kfree(buf);
277 if (ret < 0)
278 goto out;
279
280 *ppos += cnt;
281 ret = cnt;
282 out:
283 return ret;
284}
285
286static int event_trigger_regex_release(struct inode *inode, struct file *file)
287{
288 mutex_lock(&event_mutex);
289
290 if (file->f_mode & FMODE_READ)
291 seq_release(inode, file);
292
293 mutex_unlock(&event_mutex);
294
295 return 0;
296}
297
298static ssize_t
299event_trigger_write(struct file *filp, const char __user *ubuf,
300 size_t cnt, loff_t *ppos)
301{
302 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
303}
304
305static int
306event_trigger_open(struct inode *inode, struct file *filp)
307{
308 return event_trigger_regex_open(inode, filp);
309}
310
311static int
312event_trigger_release(struct inode *inode, struct file *file)
313{
314 return event_trigger_regex_release(inode, file);
315}
316
317const struct file_operations event_trigger_fops = {
318 .open = event_trigger_open,
319 .read = seq_read,
320 .write = event_trigger_write,
321 .llseek = tracing_lseek,
322 .release = event_trigger_release,
323};
324
325/*
326 * Currently we only register event commands from __init, so mark this
327 * __init too.
328 */
329__init int register_event_command(struct event_command *cmd)
330{
331 struct event_command *p;
332 int ret = 0;
333
334 mutex_lock(&trigger_cmd_mutex);
335 list_for_each_entry(p, &trigger_commands, list) {
336 if (strcmp(cmd->name, p->name) == 0) {
337 ret = -EBUSY;
338 goto out_unlock;
339 }
340 }
341 list_add(&cmd->list, &trigger_commands);
342 out_unlock:
343 mutex_unlock(&trigger_cmd_mutex);
344
345 return ret;
346}
347
348/*
349 * Currently we only unregister event commands from __init, so mark
350 * this __init too.
351 */
352__init int unregister_event_command(struct event_command *cmd)
353{
354 struct event_command *p, *n;
355 int ret = -ENODEV;
356
357 mutex_lock(&trigger_cmd_mutex);
358 list_for_each_entry_safe(p, n, &trigger_commands, list) {
359 if (strcmp(cmd->name, p->name) == 0) {
360 ret = 0;
361 list_del_init(&p->list);
362 goto out_unlock;
363 }
364 }
365 out_unlock:
366 mutex_unlock(&trigger_cmd_mutex);
367
368 return ret;
369}
370
371/**
372 * event_trigger_print - Generic event_trigger_ops @print implementation
373 * @name: The name of the event trigger
374 * @m: The seq_file being printed to
375 * @data: Trigger-specific data
376 * @filter_str: filter_str to print, if present
377 *
378 * Common implementation for event triggers to print themselves.
379 *
380 * Usually wrapped by a function that simply sets the @name of the
381 * trigger command and then invokes this.
382 *
383 * Return: 0 on success, errno otherwise
384 */
385static int
386event_trigger_print(const char *name, struct seq_file *m,
387 void *data, char *filter_str)
388{
389 long count = (long)data;
390
391 seq_puts(m, name);
392
393 if (count == -1)
394 seq_puts(m, ":unlimited");
395 else
396 seq_printf(m, ":count=%ld", count);
397
398 if (filter_str)
399 seq_printf(m, " if %s\n", filter_str);
400 else
401 seq_putc(m, '\n');
402
403 return 0;
404}
405
406/**
407 * event_trigger_init - Generic event_trigger_ops @init implementation
408 * @ops: The trigger ops associated with the trigger
409 * @data: Trigger-specific data
410 *
411 * Common implementation of event trigger initialization.
412 *
413 * Usually used directly as the @init method in event trigger
414 * implementations.
415 *
416 * Return: 0 on success, errno otherwise
417 */
418int event_trigger_init(struct event_trigger_ops *ops,
419 struct event_trigger_data *data)
420{
421 data->ref++;
422 return 0;
423}
424
425/**
426 * event_trigger_free - Generic event_trigger_ops @free implementation
427 * @ops: The trigger ops associated with the trigger
428 * @data: Trigger-specific data
429 *
430 * Common implementation of event trigger de-initialization.
431 *
432 * Usually used directly as the @free method in event trigger
433 * implementations.
434 */
435static void
436event_trigger_free(struct event_trigger_ops *ops,
437 struct event_trigger_data *data)
438{
439 if (WARN_ON_ONCE(data->ref <= 0))
440 return;
441
442 data->ref--;
443 if (!data->ref)
444 trigger_data_free(data);
445}
446
447int trace_event_trigger_enable_disable(struct trace_event_file *file,
448 int trigger_enable)
449{
450 int ret = 0;
451
452 if (trigger_enable) {
453 if (atomic_inc_return(&file->tm_ref) > 1)
454 return ret;
455 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
456 ret = trace_event_enable_disable(file, 1, 1);
457 } else {
458 if (atomic_dec_return(&file->tm_ref) > 0)
459 return ret;
460 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
461 ret = trace_event_enable_disable(file, 0, 1);
462 }
463
464 return ret;
465}
466
467/**
468 * clear_event_triggers - Clear all triggers associated with a trace array
469 * @tr: The trace array to clear
470 *
471 * For each trigger, the triggering event has its tm_ref decremented
472 * via trace_event_trigger_enable_disable(), and any associated event
473 * (in the case of enable/disable_event triggers) will have its sm_ref
474 * decremented via free()->trace_event_enable_disable(). That
475 * combination effectively reverses the soft-mode/trigger state added
476 * by trigger registration.
477 *
478 * Must be called with event_mutex held.
479 */
480void
481clear_event_triggers(struct trace_array *tr)
482{
483 struct trace_event_file *file;
484
485 list_for_each_entry(file, &tr->events, list) {
486 struct event_trigger_data *data, *n;
487 list_for_each_entry_safe(data, n, &file->triggers, list) {
488 trace_event_trigger_enable_disable(file, 0);
489 list_del_rcu(&data->list);
490 if (data->ops->free)
491 data->ops->free(data->ops, data);
492 }
493 }
494}
495
496/**
497 * update_cond_flag - Set or reset the TRIGGER_COND bit
498 * @file: The trace_event_file associated with the event
499 *
500 * If an event has triggers and any of those triggers has a filter or
501 * a post_trigger, trigger invocation needs to be deferred until after
502 * the current event has logged its data, and the event should have
503 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
504 * cleared.
505 */
506void update_cond_flag(struct trace_event_file *file)
507{
508 struct event_trigger_data *data;
509 bool set_cond = false;
510
511 list_for_each_entry_rcu(data, &file->triggers, list) {
512 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
513 event_command_needs_rec(data->cmd_ops)) {
514 set_cond = true;
515 break;
516 }
517 }
518
519 if (set_cond)
520 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
521 else
522 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
523}
524
525/**
526 * register_trigger - Generic event_command @reg implementation
527 * @glob: The raw string used to register the trigger
528 * @ops: The trigger ops associated with the trigger
529 * @data: Trigger-specific data to associate with the trigger
530 * @file: The trace_event_file associated with the event
531 *
532 * Common implementation for event trigger registration.
533 *
534 * Usually used directly as the @reg method in event command
535 * implementations.
536 *
537 * Return: 0 on success, errno otherwise
538 */
539static int register_trigger(char *glob, struct event_trigger_ops *ops,
540 struct event_trigger_data *data,
541 struct trace_event_file *file)
542{
543 struct event_trigger_data *test;
544 int ret = 0;
545
546 list_for_each_entry_rcu(test, &file->triggers, list) {
547 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
548 ret = -EEXIST;
549 goto out;
550 }
551 }
552
553 if (data->ops->init) {
554 ret = data->ops->init(data->ops, data);
555 if (ret < 0)
556 goto out;
557 }
558
559 list_add_rcu(&data->list, &file->triggers);
560 ret++;
561
562 update_cond_flag(file);
563 if (trace_event_trigger_enable_disable(file, 1) < 0) {
564 list_del_rcu(&data->list);
565 update_cond_flag(file);
566 ret--;
567 }
568out:
569 return ret;
570}
571
572/**
573 * unregister_trigger - Generic event_command @unreg implementation
574 * @glob: The raw string used to register the trigger
575 * @ops: The trigger ops associated with the trigger
576 * @test: Trigger-specific data used to find the trigger to remove
577 * @file: The trace_event_file associated with the event
578 *
579 * Common implementation for event trigger unregistration.
580 *
581 * Usually used directly as the @unreg method in event command
582 * implementations.
583 */
584void unregister_trigger(char *glob, struct event_trigger_ops *ops,
585 struct event_trigger_data *test,
586 struct trace_event_file *file)
587{
588 struct event_trigger_data *data;
589 bool unregistered = false;
590
591 list_for_each_entry_rcu(data, &file->triggers, list) {
592 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
593 unregistered = true;
594 list_del_rcu(&data->list);
595 trace_event_trigger_enable_disable(file, 0);
596 update_cond_flag(file);
597 break;
598 }
599 }
600
601 if (unregistered && data->ops->free)
602 data->ops->free(data->ops, data);
603}
604
605/**
606 * event_trigger_callback - Generic event_command @func implementation
607 * @cmd_ops: The command ops, used for trigger registration
608 * @file: The trace_event_file associated with the event
609 * @glob: The raw string used to register the trigger
610 * @cmd: The cmd portion of the string used to register the trigger
611 * @param: The params portion of the string used to register the trigger
612 *
613 * Common implementation for event command parsing and trigger
614 * instantiation.
615 *
616 * Usually used directly as the @func method in event command
617 * implementations.
618 *
619 * Return: 0 on success, errno otherwise
620 */
621static int
622event_trigger_callback(struct event_command *cmd_ops,
623 struct trace_event_file *file,
624 char *glob, char *cmd, char *param)
625{
626 struct event_trigger_data *trigger_data;
627 struct event_trigger_ops *trigger_ops;
628 char *trigger = NULL;
629 char *number;
630 int ret;
631
632 /* separate the trigger from the filter (t:n [if filter]) */
633 if (param && isdigit(param[0]))
634 trigger = strsep(¶m, " \t");
635
636 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
637
638 ret = -ENOMEM;
639 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
640 if (!trigger_data)
641 goto out;
642
643 trigger_data->count = -1;
644 trigger_data->ops = trigger_ops;
645 trigger_data->cmd_ops = cmd_ops;
646 trigger_data->private_data = file;
647 INIT_LIST_HEAD(&trigger_data->list);
648 INIT_LIST_HEAD(&trigger_data->named_list);
649
650 if (glob[0] == '!') {
651 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
652 kfree(trigger_data);
653 ret = 0;
654 goto out;
655 }
656
657 if (trigger) {
658 number = strsep(&trigger, ":");
659
660 ret = -EINVAL;
661 if (!strlen(number))
662 goto out_free;
663
664 /*
665 * We use the callback data field (which is a pointer)
666 * as our counter.
667 */
668 ret = kstrtoul(number, 0, &trigger_data->count);
669 if (ret)
670 goto out_free;
671 }
672
673 if (!param) /* if param is non-empty, it's supposed to be a filter */
674 goto out_reg;
675
676 if (!cmd_ops->set_filter)
677 goto out_reg;
678
679 ret = cmd_ops->set_filter(param, trigger_data, file);
680 if (ret < 0)
681 goto out_free;
682
683 out_reg:
684 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
685 /*
686 * The above returns on success the # of functions enabled,
687 * but if it didn't find any functions it returns zero.
688 * Consider no functions a failure too.
689 */
690 if (!ret) {
691 ret = -ENOENT;
692 goto out_free;
693 } else if (ret < 0)
694 goto out_free;
695 ret = 0;
696 out:
697 return ret;
698
699 out_free:
700 if (cmd_ops->set_filter)
701 cmd_ops->set_filter(NULL, trigger_data, NULL);
702 kfree(trigger_data);
703 goto out;
704}
705
706/**
707 * set_trigger_filter - Generic event_command @set_filter implementation
708 * @filter_str: The filter string for the trigger, NULL to remove filter
709 * @trigger_data: Trigger-specific data
710 * @file: The trace_event_file associated with the event
711 *
712 * Common implementation for event command filter parsing and filter
713 * instantiation.
714 *
715 * Usually used directly as the @set_filter method in event command
716 * implementations.
717 *
718 * Also used to remove a filter (if filter_str = NULL).
719 *
720 * Return: 0 on success, errno otherwise
721 */
722int set_trigger_filter(char *filter_str,
723 struct event_trigger_data *trigger_data,
724 struct trace_event_file *file)
725{
726 struct event_trigger_data *data = trigger_data;
727 struct event_filter *filter = NULL, *tmp;
728 int ret = -EINVAL;
729 char *s;
730
731 if (!filter_str) /* clear the current filter */
732 goto assign;
733
734 s = strsep(&filter_str, " \t");
735
736 if (!strlen(s) || strcmp(s, "if") != 0)
737 goto out;
738
739 if (!filter_str)
740 goto out;
741
742 /* The filter is for the 'trigger' event, not the triggered event */
743 ret = create_event_filter(file->event_call, filter_str, false, &filter);
744 if (ret)
745 goto out;
746 assign:
747 tmp = rcu_access_pointer(data->filter);
748
749 rcu_assign_pointer(data->filter, filter);
750
751 if (tmp) {
752 /* Make sure the call is done with the filter */
753 synchronize_sched();
754 free_event_filter(tmp);
755 }
756
757 kfree(data->filter_str);
758 data->filter_str = NULL;
759
760 if (filter_str) {
761 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
762 if (!data->filter_str) {
763 free_event_filter(rcu_access_pointer(data->filter));
764 data->filter = NULL;
765 ret = -ENOMEM;
766 }
767 }
768 out:
769 return ret;
770}
771
772static LIST_HEAD(named_triggers);
773
774/**
775 * find_named_trigger - Find the common named trigger associated with @name
776 * @name: The name of the set of named triggers to find the common data for
777 *
778 * Named triggers are sets of triggers that share a common set of
779 * trigger data. The first named trigger registered with a given name
780 * owns the common trigger data that the others subsequently
781 * registered with the same name will reference. This function
782 * returns the common trigger data associated with that first
783 * registered instance.
784 *
785 * Return: the common trigger data for the given named trigger on
786 * success, NULL otherwise.
787 */
788struct event_trigger_data *find_named_trigger(const char *name)
789{
790 struct event_trigger_data *data;
791
792 if (!name)
793 return NULL;
794
795 list_for_each_entry(data, &named_triggers, named_list) {
796 if (data->named_data)
797 continue;
798 if (strcmp(data->name, name) == 0)
799 return data;
800 }
801
802 return NULL;
803}
804
805/**
806 * is_named_trigger - determine if a given trigger is a named trigger
807 * @test: The trigger data to test
808 *
809 * Return: true if 'test' is a named trigger, false otherwise.
810 */
811bool is_named_trigger(struct event_trigger_data *test)
812{
813 struct event_trigger_data *data;
814
815 list_for_each_entry(data, &named_triggers, named_list) {
816 if (test == data)
817 return true;
818 }
819
820 return false;
821}
822
823/**
824 * save_named_trigger - save the trigger in the named trigger list
825 * @name: The name of the named trigger set
826 * @data: The trigger data to save
827 *
828 * Return: 0 if successful, negative error otherwise.
829 */
830int save_named_trigger(const char *name, struct event_trigger_data *data)
831{
832 data->name = kstrdup(name, GFP_KERNEL);
833 if (!data->name)
834 return -ENOMEM;
835
836 list_add(&data->named_list, &named_triggers);
837
838 return 0;
839}
840
841/**
842 * del_named_trigger - delete a trigger from the named trigger list
843 * @data: The trigger data to delete
844 */
845void del_named_trigger(struct event_trigger_data *data)
846{
847 kfree(data->name);
848 data->name = NULL;
849
850 list_del(&data->named_list);
851}
852
853static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
854{
855 struct event_trigger_data *test;
856
857 list_for_each_entry(test, &named_triggers, named_list) {
858 if (strcmp(test->name, data->name) == 0) {
859 if (pause) {
860 test->paused_tmp = test->paused;
861 test->paused = true;
862 } else {
863 test->paused = test->paused_tmp;
864 }
865 }
866 }
867}
868
869/**
870 * pause_named_trigger - Pause all named triggers with the same name
871 * @data: The trigger data of a named trigger to pause
872 *
873 * Pauses a named trigger along with all other triggers having the
874 * same name. Because named triggers share a common set of data,
875 * pausing only one is meaningless, so pausing one named trigger needs
876 * to pause all triggers with the same name.
877 */
878void pause_named_trigger(struct event_trigger_data *data)
879{
880 __pause_named_trigger(data, true);
881}
882
883/**
884 * unpause_named_trigger - Un-pause all named triggers with the same name
885 * @data: The trigger data of a named trigger to unpause
886 *
887 * Un-pauses a named trigger along with all other triggers having the
888 * same name. Because named triggers share a common set of data,
889 * unpausing only one is meaningless, so unpausing one named trigger
890 * needs to unpause all triggers with the same name.
891 */
892void unpause_named_trigger(struct event_trigger_data *data)
893{
894 __pause_named_trigger(data, false);
895}
896
897/**
898 * set_named_trigger_data - Associate common named trigger data
899 * @data: The trigger data of a named trigger to unpause
900 *
901 * Named triggers are sets of triggers that share a common set of
902 * trigger data. The first named trigger registered with a given name
903 * owns the common trigger data that the others subsequently
904 * registered with the same name will reference. This function
905 * associates the common trigger data from the first trigger with the
906 * given trigger.
907 */
908void set_named_trigger_data(struct event_trigger_data *data,
909 struct event_trigger_data *named_data)
910{
911 data->named_data = named_data;
912}
913
914struct event_trigger_data *
915get_named_trigger_data(struct event_trigger_data *data)
916{
917 return data->named_data;
918}
919
920static void
921traceon_trigger(struct event_trigger_data *data, void *rec,
922 struct ring_buffer_event *event)
923{
924 if (tracing_is_on())
925 return;
926
927 tracing_on();
928}
929
930static void
931traceon_count_trigger(struct event_trigger_data *data, void *rec,
932 struct ring_buffer_event *event)
933{
934 if (tracing_is_on())
935 return;
936
937 if (!data->count)
938 return;
939
940 if (data->count != -1)
941 (data->count)--;
942
943 tracing_on();
944}
945
946static void
947traceoff_trigger(struct event_trigger_data *data, void *rec,
948 struct ring_buffer_event *event)
949{
950 if (!tracing_is_on())
951 return;
952
953 tracing_off();
954}
955
956static void
957traceoff_count_trigger(struct event_trigger_data *data, void *rec,
958 struct ring_buffer_event *event)
959{
960 if (!tracing_is_on())
961 return;
962
963 if (!data->count)
964 return;
965
966 if (data->count != -1)
967 (data->count)--;
968
969 tracing_off();
970}
971
972static int
973traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
974 struct event_trigger_data *data)
975{
976 return event_trigger_print("traceon", m, (void *)data->count,
977 data->filter_str);
978}
979
980static int
981traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
982 struct event_trigger_data *data)
983{
984 return event_trigger_print("traceoff", m, (void *)data->count,
985 data->filter_str);
986}
987
988static struct event_trigger_ops traceon_trigger_ops = {
989 .func = traceon_trigger,
990 .print = traceon_trigger_print,
991 .init = event_trigger_init,
992 .free = event_trigger_free,
993};
994
995static struct event_trigger_ops traceon_count_trigger_ops = {
996 .func = traceon_count_trigger,
997 .print = traceon_trigger_print,
998 .init = event_trigger_init,
999 .free = event_trigger_free,
1000};
1001
1002static struct event_trigger_ops traceoff_trigger_ops = {
1003 .func = traceoff_trigger,
1004 .print = traceoff_trigger_print,
1005 .init = event_trigger_init,
1006 .free = event_trigger_free,
1007};
1008
1009static struct event_trigger_ops traceoff_count_trigger_ops = {
1010 .func = traceoff_count_trigger,
1011 .print = traceoff_trigger_print,
1012 .init = event_trigger_init,
1013 .free = event_trigger_free,
1014};
1015
1016static struct event_trigger_ops *
1017onoff_get_trigger_ops(char *cmd, char *param)
1018{
1019 struct event_trigger_ops *ops;
1020
1021 /* we register both traceon and traceoff to this callback */
1022 if (strcmp(cmd, "traceon") == 0)
1023 ops = param ? &traceon_count_trigger_ops :
1024 &traceon_trigger_ops;
1025 else
1026 ops = param ? &traceoff_count_trigger_ops :
1027 &traceoff_trigger_ops;
1028
1029 return ops;
1030}
1031
1032static struct event_command trigger_traceon_cmd = {
1033 .name = "traceon",
1034 .trigger_type = ETT_TRACE_ONOFF,
1035 .func = event_trigger_callback,
1036 .reg = register_trigger,
1037 .unreg = unregister_trigger,
1038 .get_trigger_ops = onoff_get_trigger_ops,
1039 .set_filter = set_trigger_filter,
1040};
1041
1042static struct event_command trigger_traceoff_cmd = {
1043 .name = "traceoff",
1044 .trigger_type = ETT_TRACE_ONOFF,
1045 .flags = EVENT_CMD_FL_POST_TRIGGER,
1046 .func = event_trigger_callback,
1047 .reg = register_trigger,
1048 .unreg = unregister_trigger,
1049 .get_trigger_ops = onoff_get_trigger_ops,
1050 .set_filter = set_trigger_filter,
1051};
1052
1053#ifdef CONFIG_TRACER_SNAPSHOT
1054static void
1055snapshot_trigger(struct event_trigger_data *data, void *rec,
1056 struct ring_buffer_event *event)
1057{
1058 struct trace_event_file *file = data->private_data;
1059
1060 if (file)
1061 tracing_snapshot_instance(file->tr);
1062 else
1063 tracing_snapshot();
1064}
1065
1066static void
1067snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1068 struct ring_buffer_event *event)
1069{
1070 if (!data->count)
1071 return;
1072
1073 if (data->count != -1)
1074 (data->count)--;
1075
1076 snapshot_trigger(data, rec, event);
1077}
1078
1079static int
1080register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1081 struct event_trigger_data *data,
1082 struct trace_event_file *file)
1083{
1084 int ret = register_trigger(glob, ops, data, file);
1085
1086 if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1087 unregister_trigger(glob, ops, data, file);
1088 ret = 0;
1089 }
1090
1091 return ret;
1092}
1093
1094static int
1095snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1096 struct event_trigger_data *data)
1097{
1098 return event_trigger_print("snapshot", m, (void *)data->count,
1099 data->filter_str);
1100}
1101
1102static struct event_trigger_ops snapshot_trigger_ops = {
1103 .func = snapshot_trigger,
1104 .print = snapshot_trigger_print,
1105 .init = event_trigger_init,
1106 .free = event_trigger_free,
1107};
1108
1109static struct event_trigger_ops snapshot_count_trigger_ops = {
1110 .func = snapshot_count_trigger,
1111 .print = snapshot_trigger_print,
1112 .init = event_trigger_init,
1113 .free = event_trigger_free,
1114};
1115
1116static struct event_trigger_ops *
1117snapshot_get_trigger_ops(char *cmd, char *param)
1118{
1119 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1120}
1121
1122static struct event_command trigger_snapshot_cmd = {
1123 .name = "snapshot",
1124 .trigger_type = ETT_SNAPSHOT,
1125 .func = event_trigger_callback,
1126 .reg = register_snapshot_trigger,
1127 .unreg = unregister_trigger,
1128 .get_trigger_ops = snapshot_get_trigger_ops,
1129 .set_filter = set_trigger_filter,
1130};
1131
1132static __init int register_trigger_snapshot_cmd(void)
1133{
1134 int ret;
1135
1136 ret = register_event_command(&trigger_snapshot_cmd);
1137 WARN_ON(ret < 0);
1138
1139 return ret;
1140}
1141#else
1142static __init int register_trigger_snapshot_cmd(void) { return 0; }
1143#endif /* CONFIG_TRACER_SNAPSHOT */
1144
1145#ifdef CONFIG_STACKTRACE
1146#ifdef CONFIG_UNWINDER_ORC
1147/* Skip 2:
1148 * event_triggers_post_call()
1149 * trace_event_raw_event_xxx()
1150 */
1151# define STACK_SKIP 2
1152#else
1153/*
1154 * Skip 4:
1155 * stacktrace_trigger()
1156 * event_triggers_post_call()
1157 * trace_event_buffer_commit()
1158 * trace_event_raw_event_xxx()
1159 */
1160#define STACK_SKIP 4
1161#endif
1162
1163static void
1164stacktrace_trigger(struct event_trigger_data *data, void *rec,
1165 struct ring_buffer_event *event)
1166{
1167 trace_dump_stack(STACK_SKIP);
1168}
1169
1170static void
1171stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1172 struct ring_buffer_event *event)
1173{
1174 if (!data->count)
1175 return;
1176
1177 if (data->count != -1)
1178 (data->count)--;
1179
1180 stacktrace_trigger(data, rec, event);
1181}
1182
1183static int
1184stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1185 struct event_trigger_data *data)
1186{
1187 return event_trigger_print("stacktrace", m, (void *)data->count,
1188 data->filter_str);
1189}
1190
1191static struct event_trigger_ops stacktrace_trigger_ops = {
1192 .func = stacktrace_trigger,
1193 .print = stacktrace_trigger_print,
1194 .init = event_trigger_init,
1195 .free = event_trigger_free,
1196};
1197
1198static struct event_trigger_ops stacktrace_count_trigger_ops = {
1199 .func = stacktrace_count_trigger,
1200 .print = stacktrace_trigger_print,
1201 .init = event_trigger_init,
1202 .free = event_trigger_free,
1203};
1204
1205static struct event_trigger_ops *
1206stacktrace_get_trigger_ops(char *cmd, char *param)
1207{
1208 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1209}
1210
1211static struct event_command trigger_stacktrace_cmd = {
1212 .name = "stacktrace",
1213 .trigger_type = ETT_STACKTRACE,
1214 .flags = EVENT_CMD_FL_POST_TRIGGER,
1215 .func = event_trigger_callback,
1216 .reg = register_trigger,
1217 .unreg = unregister_trigger,
1218 .get_trigger_ops = stacktrace_get_trigger_ops,
1219 .set_filter = set_trigger_filter,
1220};
1221
1222static __init int register_trigger_stacktrace_cmd(void)
1223{
1224 int ret;
1225
1226 ret = register_event_command(&trigger_stacktrace_cmd);
1227 WARN_ON(ret < 0);
1228
1229 return ret;
1230}
1231#else
1232static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1233#endif /* CONFIG_STACKTRACE */
1234
1235static __init void unregister_trigger_traceon_traceoff_cmds(void)
1236{
1237 unregister_event_command(&trigger_traceon_cmd);
1238 unregister_event_command(&trigger_traceoff_cmd);
1239}
1240
1241static void
1242event_enable_trigger(struct event_trigger_data *data, void *rec,
1243 struct ring_buffer_event *event)
1244{
1245 struct enable_trigger_data *enable_data = data->private_data;
1246
1247 if (enable_data->enable)
1248 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1249 else
1250 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1251}
1252
1253static void
1254event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1255 struct ring_buffer_event *event)
1256{
1257 struct enable_trigger_data *enable_data = data->private_data;
1258
1259 if (!data->count)
1260 return;
1261
1262 /* Skip if the event is in a state we want to switch to */
1263 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1264 return;
1265
1266 if (data->count != -1)
1267 (data->count)--;
1268
1269 event_enable_trigger(data, rec, event);
1270}
1271
1272int event_enable_trigger_print(struct seq_file *m,
1273 struct event_trigger_ops *ops,
1274 struct event_trigger_data *data)
1275{
1276 struct enable_trigger_data *enable_data = data->private_data;
1277
1278 seq_printf(m, "%s:%s:%s",
1279 enable_data->hist ?
1280 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1281 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1282 enable_data->file->event_call->class->system,
1283 trace_event_name(enable_data->file->event_call));
1284
1285 if (data->count == -1)
1286 seq_puts(m, ":unlimited");
1287 else
1288 seq_printf(m, ":count=%ld", data->count);
1289
1290 if (data->filter_str)
1291 seq_printf(m, " if %s\n", data->filter_str);
1292 else
1293 seq_putc(m, '\n');
1294
1295 return 0;
1296}
1297
1298void event_enable_trigger_free(struct event_trigger_ops *ops,
1299 struct event_trigger_data *data)
1300{
1301 struct enable_trigger_data *enable_data = data->private_data;
1302
1303 if (WARN_ON_ONCE(data->ref <= 0))
1304 return;
1305
1306 data->ref--;
1307 if (!data->ref) {
1308 /* Remove the SOFT_MODE flag */
1309 trace_event_enable_disable(enable_data->file, 0, 1);
1310 module_put(enable_data->file->event_call->mod);
1311 trigger_data_free(data);
1312 kfree(enable_data);
1313 }
1314}
1315
1316static struct event_trigger_ops event_enable_trigger_ops = {
1317 .func = event_enable_trigger,
1318 .print = event_enable_trigger_print,
1319 .init = event_trigger_init,
1320 .free = event_enable_trigger_free,
1321};
1322
1323static struct event_trigger_ops event_enable_count_trigger_ops = {
1324 .func = event_enable_count_trigger,
1325 .print = event_enable_trigger_print,
1326 .init = event_trigger_init,
1327 .free = event_enable_trigger_free,
1328};
1329
1330static struct event_trigger_ops event_disable_trigger_ops = {
1331 .func = event_enable_trigger,
1332 .print = event_enable_trigger_print,
1333 .init = event_trigger_init,
1334 .free = event_enable_trigger_free,
1335};
1336
1337static struct event_trigger_ops event_disable_count_trigger_ops = {
1338 .func = event_enable_count_trigger,
1339 .print = event_enable_trigger_print,
1340 .init = event_trigger_init,
1341 .free = event_enable_trigger_free,
1342};
1343
1344int event_enable_trigger_func(struct event_command *cmd_ops,
1345 struct trace_event_file *file,
1346 char *glob, char *cmd, char *param)
1347{
1348 struct trace_event_file *event_enable_file;
1349 struct enable_trigger_data *enable_data;
1350 struct event_trigger_data *trigger_data;
1351 struct event_trigger_ops *trigger_ops;
1352 struct trace_array *tr = file->tr;
1353 const char *system;
1354 const char *event;
1355 bool hist = false;
1356 char *trigger;
1357 char *number;
1358 bool enable;
1359 int ret;
1360
1361 if (!param)
1362 return -EINVAL;
1363
1364 /* separate the trigger from the filter (s:e:n [if filter]) */
1365 trigger = strsep(¶m, " \t");
1366 if (!trigger)
1367 return -EINVAL;
1368
1369 system = strsep(&trigger, ":");
1370 if (!trigger)
1371 return -EINVAL;
1372
1373 event = strsep(&trigger, ":");
1374
1375 ret = -EINVAL;
1376 event_enable_file = find_event_file(tr, system, event);
1377 if (!event_enable_file)
1378 goto out;
1379
1380#ifdef CONFIG_HIST_TRIGGERS
1381 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1382 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1383
1384 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1385 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1386#else
1387 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1388#endif
1389 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1390
1391 ret = -ENOMEM;
1392 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1393 if (!trigger_data)
1394 goto out;
1395
1396 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1397 if (!enable_data) {
1398 kfree(trigger_data);
1399 goto out;
1400 }
1401
1402 trigger_data->count = -1;
1403 trigger_data->ops = trigger_ops;
1404 trigger_data->cmd_ops = cmd_ops;
1405 INIT_LIST_HEAD(&trigger_data->list);
1406 RCU_INIT_POINTER(trigger_data->filter, NULL);
1407
1408 enable_data->hist = hist;
1409 enable_data->enable = enable;
1410 enable_data->file = event_enable_file;
1411 trigger_data->private_data = enable_data;
1412
1413 if (glob[0] == '!') {
1414 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1415 kfree(trigger_data);
1416 kfree(enable_data);
1417 ret = 0;
1418 goto out;
1419 }
1420
1421 if (trigger) {
1422 number = strsep(&trigger, ":");
1423
1424 ret = -EINVAL;
1425 if (!strlen(number))
1426 goto out_free;
1427
1428 /*
1429 * We use the callback data field (which is a pointer)
1430 * as our counter.
1431 */
1432 ret = kstrtoul(number, 0, &trigger_data->count);
1433 if (ret)
1434 goto out_free;
1435 }
1436
1437 if (!param) /* if param is non-empty, it's supposed to be a filter */
1438 goto out_reg;
1439
1440 if (!cmd_ops->set_filter)
1441 goto out_reg;
1442
1443 ret = cmd_ops->set_filter(param, trigger_data, file);
1444 if (ret < 0)
1445 goto out_free;
1446
1447 out_reg:
1448 /* Don't let event modules unload while probe registered */
1449 ret = try_module_get(event_enable_file->event_call->mod);
1450 if (!ret) {
1451 ret = -EBUSY;
1452 goto out_free;
1453 }
1454
1455 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1456 if (ret < 0)
1457 goto out_put;
1458 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1459 /*
1460 * The above returns on success the # of functions enabled,
1461 * but if it didn't find any functions it returns zero.
1462 * Consider no functions a failure too.
1463 */
1464 if (!ret) {
1465 ret = -ENOENT;
1466 goto out_disable;
1467 } else if (ret < 0)
1468 goto out_disable;
1469 /* Just return zero, not the number of enabled functions */
1470 ret = 0;
1471 out:
1472 return ret;
1473
1474 out_disable:
1475 trace_event_enable_disable(event_enable_file, 0, 1);
1476 out_put:
1477 module_put(event_enable_file->event_call->mod);
1478 out_free:
1479 if (cmd_ops->set_filter)
1480 cmd_ops->set_filter(NULL, trigger_data, NULL);
1481 kfree(trigger_data);
1482 kfree(enable_data);
1483 goto out;
1484}
1485
1486int event_enable_register_trigger(char *glob,
1487 struct event_trigger_ops *ops,
1488 struct event_trigger_data *data,
1489 struct trace_event_file *file)
1490{
1491 struct enable_trigger_data *enable_data = data->private_data;
1492 struct enable_trigger_data *test_enable_data;
1493 struct event_trigger_data *test;
1494 int ret = 0;
1495
1496 list_for_each_entry_rcu(test, &file->triggers, list) {
1497 test_enable_data = test->private_data;
1498 if (test_enable_data &&
1499 (test->cmd_ops->trigger_type ==
1500 data->cmd_ops->trigger_type) &&
1501 (test_enable_data->file == enable_data->file)) {
1502 ret = -EEXIST;
1503 goto out;
1504 }
1505 }
1506
1507 if (data->ops->init) {
1508 ret = data->ops->init(data->ops, data);
1509 if (ret < 0)
1510 goto out;
1511 }
1512
1513 list_add_rcu(&data->list, &file->triggers);
1514 ret++;
1515
1516 update_cond_flag(file);
1517 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1518 list_del_rcu(&data->list);
1519 update_cond_flag(file);
1520 ret--;
1521 }
1522out:
1523 return ret;
1524}
1525
1526void event_enable_unregister_trigger(char *glob,
1527 struct event_trigger_ops *ops,
1528 struct event_trigger_data *test,
1529 struct trace_event_file *file)
1530{
1531 struct enable_trigger_data *test_enable_data = test->private_data;
1532 struct enable_trigger_data *enable_data;
1533 struct event_trigger_data *data;
1534 bool unregistered = false;
1535
1536 list_for_each_entry_rcu(data, &file->triggers, list) {
1537 enable_data = data->private_data;
1538 if (enable_data &&
1539 (data->cmd_ops->trigger_type ==
1540 test->cmd_ops->trigger_type) &&
1541 (enable_data->file == test_enable_data->file)) {
1542 unregistered = true;
1543 list_del_rcu(&data->list);
1544 trace_event_trigger_enable_disable(file, 0);
1545 update_cond_flag(file);
1546 break;
1547 }
1548 }
1549
1550 if (unregistered && data->ops->free)
1551 data->ops->free(data->ops, data);
1552}
1553
1554static struct event_trigger_ops *
1555event_enable_get_trigger_ops(char *cmd, char *param)
1556{
1557 struct event_trigger_ops *ops;
1558 bool enable;
1559
1560#ifdef CONFIG_HIST_TRIGGERS
1561 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1562 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1563#else
1564 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1565#endif
1566 if (enable)
1567 ops = param ? &event_enable_count_trigger_ops :
1568 &event_enable_trigger_ops;
1569 else
1570 ops = param ? &event_disable_count_trigger_ops :
1571 &event_disable_trigger_ops;
1572
1573 return ops;
1574}
1575
1576static struct event_command trigger_enable_cmd = {
1577 .name = ENABLE_EVENT_STR,
1578 .trigger_type = ETT_EVENT_ENABLE,
1579 .func = event_enable_trigger_func,
1580 .reg = event_enable_register_trigger,
1581 .unreg = event_enable_unregister_trigger,
1582 .get_trigger_ops = event_enable_get_trigger_ops,
1583 .set_filter = set_trigger_filter,
1584};
1585
1586static struct event_command trigger_disable_cmd = {
1587 .name = DISABLE_EVENT_STR,
1588 .trigger_type = ETT_EVENT_ENABLE,
1589 .func = event_enable_trigger_func,
1590 .reg = event_enable_register_trigger,
1591 .unreg = event_enable_unregister_trigger,
1592 .get_trigger_ops = event_enable_get_trigger_ops,
1593 .set_filter = set_trigger_filter,
1594};
1595
1596static __init void unregister_trigger_enable_disable_cmds(void)
1597{
1598 unregister_event_command(&trigger_enable_cmd);
1599 unregister_event_command(&trigger_disable_cmd);
1600}
1601
1602static __init int register_trigger_enable_disable_cmds(void)
1603{
1604 int ret;
1605
1606 ret = register_event_command(&trigger_enable_cmd);
1607 if (WARN_ON(ret < 0))
1608 return ret;
1609 ret = register_event_command(&trigger_disable_cmd);
1610 if (WARN_ON(ret < 0))
1611 unregister_trigger_enable_disable_cmds();
1612
1613 return ret;
1614}
1615
1616static __init int register_trigger_traceon_traceoff_cmds(void)
1617{
1618 int ret;
1619
1620 ret = register_event_command(&trigger_traceon_cmd);
1621 if (WARN_ON(ret < 0))
1622 return ret;
1623 ret = register_event_command(&trigger_traceoff_cmd);
1624 if (WARN_ON(ret < 0))
1625 unregister_trigger_traceon_traceoff_cmds();
1626
1627 return ret;
1628}
1629
1630__init int register_trigger_cmds(void)
1631{
1632 register_trigger_traceon_traceoff_cmds();
1633 register_trigger_snapshot_cmd();
1634 register_trigger_stacktrace_cmd();
1635 register_trigger_enable_disable_cmds();
1636 register_trigger_hist_enable_disable_cmds();
1637 register_trigger_hist_cmd();
1638
1639 return 0;
1640}