Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace_events_trigger - trace event triggers
4 *
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8#include <linux/security.h>
9#include <linux/module.h>
10#include <linux/ctype.h>
11#include <linux/mutex.h>
12#include <linux/slab.h>
13#include <linux/rculist.h>
14
15#include "trace.h"
16
17static LIST_HEAD(trigger_commands);
18static DEFINE_MUTEX(trigger_cmd_mutex);
19
20void trigger_data_free(struct event_trigger_data *data)
21{
22 if (data->cmd_ops->set_filter)
23 data->cmd_ops->set_filter(NULL, data, NULL);
24
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
27
28 kfree(data);
29}
30
31/**
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @rec: The trace entry for the event, NULL for unconditional invocation
35 *
36 * For each trigger associated with an event, invoke the trigger
37 * function registered with the associated trigger command. If rec is
38 * non-NULL, it means that the trigger requires further processing and
39 * shouldn't be unconditionally invoked. If rec is non-NULL and the
40 * trigger has a filter associated with it, rec will checked against
41 * the filter and if the record matches the trigger will be invoked.
42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43 * in any case until the current event is written, the trigger
44 * function isn't invoked but the bit associated with the deferred
45 * trigger is set in the return value.
46 *
47 * Returns an enum event_trigger_type value containing a set bit for
48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 *
50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 *
52 * Return: an enum event_trigger_type value containing a set bit for
53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 */
55enum event_trigger_type
56event_triggers_call(struct trace_event_file *file, void *rec,
57 struct ring_buffer_event *event)
58{
59 struct event_trigger_data *data;
60 enum event_trigger_type tt = ETT_NONE;
61 struct event_filter *filter;
62
63 if (list_empty(&file->triggers))
64 return tt;
65
66 list_for_each_entry_rcu(data, &file->triggers, list) {
67 if (data->paused)
68 continue;
69 if (!rec) {
70 data->ops->func(data, rec, event);
71 continue;
72 }
73 filter = rcu_dereference_sched(data->filter);
74 if (filter && !filter_match_preds(filter, rec))
75 continue;
76 if (event_command_post_trigger(data->cmd_ops)) {
77 tt |= data->cmd_ops->trigger_type;
78 continue;
79 }
80 data->ops->func(data, rec, event);
81 }
82 return tt;
83}
84EXPORT_SYMBOL_GPL(event_triggers_call);
85
86/**
87 * event_triggers_post_call - Call 'post_triggers' for a trace event
88 * @file: The trace_event_file associated with the event
89 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90 *
91 * For each trigger associated with an event, invoke the trigger
92 * function registered with the associated trigger command, if the
93 * corresponding bit is set in the tt enum passed into this function.
94 * See @event_triggers_call for details on how those bits are set.
95 *
96 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
97 */
98void
99event_triggers_post_call(struct trace_event_file *file,
100 enum event_trigger_type tt)
101{
102 struct event_trigger_data *data;
103
104 list_for_each_entry_rcu(data, &file->triggers, list) {
105 if (data->paused)
106 continue;
107 if (data->cmd_ops->trigger_type & tt)
108 data->ops->func(data, NULL, NULL);
109 }
110}
111EXPORT_SYMBOL_GPL(event_triggers_post_call);
112
113#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
114
115static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
116{
117 struct trace_event_file *event_file = event_file_data(m->private);
118
119 if (t == SHOW_AVAILABLE_TRIGGERS)
120 return NULL;
121
122 return seq_list_next(t, &event_file->triggers, pos);
123}
124
125static void *trigger_start(struct seq_file *m, loff_t *pos)
126{
127 struct trace_event_file *event_file;
128
129 /* ->stop() is called even if ->start() fails */
130 mutex_lock(&event_mutex);
131 event_file = event_file_data(m->private);
132 if (unlikely(!event_file))
133 return ERR_PTR(-ENODEV);
134
135 if (list_empty(&event_file->triggers))
136 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
137
138 return seq_list_start(&event_file->triggers, *pos);
139}
140
141static void trigger_stop(struct seq_file *m, void *t)
142{
143 mutex_unlock(&event_mutex);
144}
145
146static int trigger_show(struct seq_file *m, void *v)
147{
148 struct event_trigger_data *data;
149 struct event_command *p;
150
151 if (v == SHOW_AVAILABLE_TRIGGERS) {
152 seq_puts(m, "# Available triggers:\n");
153 seq_putc(m, '#');
154 mutex_lock(&trigger_cmd_mutex);
155 list_for_each_entry_reverse(p, &trigger_commands, list)
156 seq_printf(m, " %s", p->name);
157 seq_putc(m, '\n');
158 mutex_unlock(&trigger_cmd_mutex);
159 return 0;
160 }
161
162 data = list_entry(v, struct event_trigger_data, list);
163 data->ops->print(m, data->ops, data);
164
165 return 0;
166}
167
168static const struct seq_operations event_triggers_seq_ops = {
169 .start = trigger_start,
170 .next = trigger_next,
171 .stop = trigger_stop,
172 .show = trigger_show,
173};
174
175static int event_trigger_regex_open(struct inode *inode, struct file *file)
176{
177 int ret;
178
179 ret = security_locked_down(LOCKDOWN_TRACEFS);
180 if (ret)
181 return ret;
182
183 mutex_lock(&event_mutex);
184
185 if (unlikely(!event_file_data(file))) {
186 mutex_unlock(&event_mutex);
187 return -ENODEV;
188 }
189
190 if ((file->f_mode & FMODE_WRITE) &&
191 (file->f_flags & O_TRUNC)) {
192 struct trace_event_file *event_file;
193 struct event_command *p;
194
195 event_file = event_file_data(file);
196
197 list_for_each_entry(p, &trigger_commands, list) {
198 if (p->unreg_all)
199 p->unreg_all(event_file);
200 }
201 }
202
203 if (file->f_mode & FMODE_READ) {
204 ret = seq_open(file, &event_triggers_seq_ops);
205 if (!ret) {
206 struct seq_file *m = file->private_data;
207 m->private = file;
208 }
209 }
210
211 mutex_unlock(&event_mutex);
212
213 return ret;
214}
215
216static int trigger_process_regex(struct trace_event_file *file, char *buff)
217{
218 char *command, *next = buff;
219 struct event_command *p;
220 int ret = -EINVAL;
221
222 command = strsep(&next, ": \t");
223 command = (command[0] != '!') ? command : command + 1;
224
225 mutex_lock(&trigger_cmd_mutex);
226 list_for_each_entry(p, &trigger_commands, list) {
227 if (strcmp(p->name, command) == 0) {
228 ret = p->func(p, file, buff, command, next);
229 goto out_unlock;
230 }
231 }
232 out_unlock:
233 mutex_unlock(&trigger_cmd_mutex);
234
235 return ret;
236}
237
238static ssize_t event_trigger_regex_write(struct file *file,
239 const char __user *ubuf,
240 size_t cnt, loff_t *ppos)
241{
242 struct trace_event_file *event_file;
243 ssize_t ret;
244 char *buf;
245
246 if (!cnt)
247 return 0;
248
249 if (cnt >= PAGE_SIZE)
250 return -EINVAL;
251
252 buf = memdup_user_nul(ubuf, cnt);
253 if (IS_ERR(buf))
254 return PTR_ERR(buf);
255
256 strim(buf);
257
258 mutex_lock(&event_mutex);
259 event_file = event_file_data(file);
260 if (unlikely(!event_file)) {
261 mutex_unlock(&event_mutex);
262 kfree(buf);
263 return -ENODEV;
264 }
265 ret = trigger_process_regex(event_file, buf);
266 mutex_unlock(&event_mutex);
267
268 kfree(buf);
269 if (ret < 0)
270 goto out;
271
272 *ppos += cnt;
273 ret = cnt;
274 out:
275 return ret;
276}
277
278static int event_trigger_regex_release(struct inode *inode, struct file *file)
279{
280 mutex_lock(&event_mutex);
281
282 if (file->f_mode & FMODE_READ)
283 seq_release(inode, file);
284
285 mutex_unlock(&event_mutex);
286
287 return 0;
288}
289
290static ssize_t
291event_trigger_write(struct file *filp, const char __user *ubuf,
292 size_t cnt, loff_t *ppos)
293{
294 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
295}
296
297static int
298event_trigger_open(struct inode *inode, struct file *filp)
299{
300 /* Checks for tracefs lockdown */
301 return event_trigger_regex_open(inode, filp);
302}
303
304static int
305event_trigger_release(struct inode *inode, struct file *file)
306{
307 return event_trigger_regex_release(inode, file);
308}
309
310const struct file_operations event_trigger_fops = {
311 .open = event_trigger_open,
312 .read = seq_read,
313 .write = event_trigger_write,
314 .llseek = tracing_lseek,
315 .release = event_trigger_release,
316};
317
318/*
319 * Currently we only register event commands from __init, so mark this
320 * __init too.
321 */
322__init int register_event_command(struct event_command *cmd)
323{
324 struct event_command *p;
325 int ret = 0;
326
327 mutex_lock(&trigger_cmd_mutex);
328 list_for_each_entry(p, &trigger_commands, list) {
329 if (strcmp(cmd->name, p->name) == 0) {
330 ret = -EBUSY;
331 goto out_unlock;
332 }
333 }
334 list_add(&cmd->list, &trigger_commands);
335 out_unlock:
336 mutex_unlock(&trigger_cmd_mutex);
337
338 return ret;
339}
340
341/*
342 * Currently we only unregister event commands from __init, so mark
343 * this __init too.
344 */
345__init int unregister_event_command(struct event_command *cmd)
346{
347 struct event_command *p, *n;
348 int ret = -ENODEV;
349
350 mutex_lock(&trigger_cmd_mutex);
351 list_for_each_entry_safe(p, n, &trigger_commands, list) {
352 if (strcmp(cmd->name, p->name) == 0) {
353 ret = 0;
354 list_del_init(&p->list);
355 goto out_unlock;
356 }
357 }
358 out_unlock:
359 mutex_unlock(&trigger_cmd_mutex);
360
361 return ret;
362}
363
364/**
365 * event_trigger_print - Generic event_trigger_ops @print implementation
366 * @name: The name of the event trigger
367 * @m: The seq_file being printed to
368 * @data: Trigger-specific data
369 * @filter_str: filter_str to print, if present
370 *
371 * Common implementation for event triggers to print themselves.
372 *
373 * Usually wrapped by a function that simply sets the @name of the
374 * trigger command and then invokes this.
375 *
376 * Return: 0 on success, errno otherwise
377 */
378static int
379event_trigger_print(const char *name, struct seq_file *m,
380 void *data, char *filter_str)
381{
382 long count = (long)data;
383
384 seq_puts(m, name);
385
386 if (count == -1)
387 seq_puts(m, ":unlimited");
388 else
389 seq_printf(m, ":count=%ld", count);
390
391 if (filter_str)
392 seq_printf(m, " if %s\n", filter_str);
393 else
394 seq_putc(m, '\n');
395
396 return 0;
397}
398
399/**
400 * event_trigger_init - Generic event_trigger_ops @init implementation
401 * @ops: The trigger ops associated with the trigger
402 * @data: Trigger-specific data
403 *
404 * Common implementation of event trigger initialization.
405 *
406 * Usually used directly as the @init method in event trigger
407 * implementations.
408 *
409 * Return: 0 on success, errno otherwise
410 */
411int event_trigger_init(struct event_trigger_ops *ops,
412 struct event_trigger_data *data)
413{
414 data->ref++;
415 return 0;
416}
417
418/**
419 * event_trigger_free - Generic event_trigger_ops @free implementation
420 * @ops: The trigger ops associated with the trigger
421 * @data: Trigger-specific data
422 *
423 * Common implementation of event trigger de-initialization.
424 *
425 * Usually used directly as the @free method in event trigger
426 * implementations.
427 */
428static void
429event_trigger_free(struct event_trigger_ops *ops,
430 struct event_trigger_data *data)
431{
432 if (WARN_ON_ONCE(data->ref <= 0))
433 return;
434
435 data->ref--;
436 if (!data->ref)
437 trigger_data_free(data);
438}
439
440int trace_event_trigger_enable_disable(struct trace_event_file *file,
441 int trigger_enable)
442{
443 int ret = 0;
444
445 if (trigger_enable) {
446 if (atomic_inc_return(&file->tm_ref) > 1)
447 return ret;
448 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
449 ret = trace_event_enable_disable(file, 1, 1);
450 } else {
451 if (atomic_dec_return(&file->tm_ref) > 0)
452 return ret;
453 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
454 ret = trace_event_enable_disable(file, 0, 1);
455 }
456
457 return ret;
458}
459
460/**
461 * clear_event_triggers - Clear all triggers associated with a trace array
462 * @tr: The trace array to clear
463 *
464 * For each trigger, the triggering event has its tm_ref decremented
465 * via trace_event_trigger_enable_disable(), and any associated event
466 * (in the case of enable/disable_event triggers) will have its sm_ref
467 * decremented via free()->trace_event_enable_disable(). That
468 * combination effectively reverses the soft-mode/trigger state added
469 * by trigger registration.
470 *
471 * Must be called with event_mutex held.
472 */
473void
474clear_event_triggers(struct trace_array *tr)
475{
476 struct trace_event_file *file;
477
478 list_for_each_entry(file, &tr->events, list) {
479 struct event_trigger_data *data, *n;
480 list_for_each_entry_safe(data, n, &file->triggers, list) {
481 trace_event_trigger_enable_disable(file, 0);
482 list_del_rcu(&data->list);
483 if (data->ops->free)
484 data->ops->free(data->ops, data);
485 }
486 }
487}
488
489/**
490 * update_cond_flag - Set or reset the TRIGGER_COND bit
491 * @file: The trace_event_file associated with the event
492 *
493 * If an event has triggers and any of those triggers has a filter or
494 * a post_trigger, trigger invocation needs to be deferred until after
495 * the current event has logged its data, and the event should have
496 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
497 * cleared.
498 */
499void update_cond_flag(struct trace_event_file *file)
500{
501 struct event_trigger_data *data;
502 bool set_cond = false;
503
504 list_for_each_entry_rcu(data, &file->triggers, list) {
505 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
506 event_command_needs_rec(data->cmd_ops)) {
507 set_cond = true;
508 break;
509 }
510 }
511
512 if (set_cond)
513 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
514 else
515 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
516}
517
518/**
519 * register_trigger - Generic event_command @reg implementation
520 * @glob: The raw string used to register the trigger
521 * @ops: The trigger ops associated with the trigger
522 * @data: Trigger-specific data to associate with the trigger
523 * @file: The trace_event_file associated with the event
524 *
525 * Common implementation for event trigger registration.
526 *
527 * Usually used directly as the @reg method in event command
528 * implementations.
529 *
530 * Return: 0 on success, errno otherwise
531 */
532static int register_trigger(char *glob, struct event_trigger_ops *ops,
533 struct event_trigger_data *data,
534 struct trace_event_file *file)
535{
536 struct event_trigger_data *test;
537 int ret = 0;
538
539 list_for_each_entry_rcu(test, &file->triggers, list) {
540 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
541 ret = -EEXIST;
542 goto out;
543 }
544 }
545
546 if (data->ops->init) {
547 ret = data->ops->init(data->ops, data);
548 if (ret < 0)
549 goto out;
550 }
551
552 list_add_rcu(&data->list, &file->triggers);
553 ret++;
554
555 update_cond_flag(file);
556 if (trace_event_trigger_enable_disable(file, 1) < 0) {
557 list_del_rcu(&data->list);
558 update_cond_flag(file);
559 ret--;
560 }
561out:
562 return ret;
563}
564
565/**
566 * unregister_trigger - Generic event_command @unreg implementation
567 * @glob: The raw string used to register the trigger
568 * @ops: The trigger ops associated with the trigger
569 * @test: Trigger-specific data used to find the trigger to remove
570 * @file: The trace_event_file associated with the event
571 *
572 * Common implementation for event trigger unregistration.
573 *
574 * Usually used directly as the @unreg method in event command
575 * implementations.
576 */
577static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
578 struct event_trigger_data *test,
579 struct trace_event_file *file)
580{
581 struct event_trigger_data *data;
582 bool unregistered = false;
583
584 list_for_each_entry_rcu(data, &file->triggers, list) {
585 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
586 unregistered = true;
587 list_del_rcu(&data->list);
588 trace_event_trigger_enable_disable(file, 0);
589 update_cond_flag(file);
590 break;
591 }
592 }
593
594 if (unregistered && data->ops->free)
595 data->ops->free(data->ops, data);
596}
597
598/**
599 * event_trigger_callback - Generic event_command @func implementation
600 * @cmd_ops: The command ops, used for trigger registration
601 * @file: The trace_event_file associated with the event
602 * @glob: The raw string used to register the trigger
603 * @cmd: The cmd portion of the string used to register the trigger
604 * @param: The params portion of the string used to register the trigger
605 *
606 * Common implementation for event command parsing and trigger
607 * instantiation.
608 *
609 * Usually used directly as the @func method in event command
610 * implementations.
611 *
612 * Return: 0 on success, errno otherwise
613 */
614static int
615event_trigger_callback(struct event_command *cmd_ops,
616 struct trace_event_file *file,
617 char *glob, char *cmd, char *param)
618{
619 struct event_trigger_data *trigger_data;
620 struct event_trigger_ops *trigger_ops;
621 char *trigger = NULL;
622 char *number;
623 int ret;
624
625 /* separate the trigger from the filter (t:n [if filter]) */
626 if (param && isdigit(param[0]))
627 trigger = strsep(¶m, " \t");
628
629 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
630
631 ret = -ENOMEM;
632 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
633 if (!trigger_data)
634 goto out;
635
636 trigger_data->count = -1;
637 trigger_data->ops = trigger_ops;
638 trigger_data->cmd_ops = cmd_ops;
639 trigger_data->private_data = file;
640 INIT_LIST_HEAD(&trigger_data->list);
641 INIT_LIST_HEAD(&trigger_data->named_list);
642
643 if (glob[0] == '!') {
644 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
645 kfree(trigger_data);
646 ret = 0;
647 goto out;
648 }
649
650 if (trigger) {
651 number = strsep(&trigger, ":");
652
653 ret = -EINVAL;
654 if (!strlen(number))
655 goto out_free;
656
657 /*
658 * We use the callback data field (which is a pointer)
659 * as our counter.
660 */
661 ret = kstrtoul(number, 0, &trigger_data->count);
662 if (ret)
663 goto out_free;
664 }
665
666 if (!param) /* if param is non-empty, it's supposed to be a filter */
667 goto out_reg;
668
669 if (!cmd_ops->set_filter)
670 goto out_reg;
671
672 ret = cmd_ops->set_filter(param, trigger_data, file);
673 if (ret < 0)
674 goto out_free;
675
676 out_reg:
677 /* Up the trigger_data count to make sure reg doesn't free it on failure */
678 event_trigger_init(trigger_ops, trigger_data);
679 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
680 /*
681 * The above returns on success the # of functions enabled,
682 * but if it didn't find any functions it returns zero.
683 * Consider no functions a failure too.
684 */
685 if (!ret) {
686 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
687 ret = -ENOENT;
688 } else if (ret > 0)
689 ret = 0;
690
691 /* Down the counter of trigger_data or free it if not used anymore */
692 event_trigger_free(trigger_ops, trigger_data);
693 out:
694 return ret;
695
696 out_free:
697 if (cmd_ops->set_filter)
698 cmd_ops->set_filter(NULL, trigger_data, NULL);
699 kfree(trigger_data);
700 goto out;
701}
702
703/**
704 * set_trigger_filter - Generic event_command @set_filter implementation
705 * @filter_str: The filter string for the trigger, NULL to remove filter
706 * @trigger_data: Trigger-specific data
707 * @file: The trace_event_file associated with the event
708 *
709 * Common implementation for event command filter parsing and filter
710 * instantiation.
711 *
712 * Usually used directly as the @set_filter method in event command
713 * implementations.
714 *
715 * Also used to remove a filter (if filter_str = NULL).
716 *
717 * Return: 0 on success, errno otherwise
718 */
719int set_trigger_filter(char *filter_str,
720 struct event_trigger_data *trigger_data,
721 struct trace_event_file *file)
722{
723 struct event_trigger_data *data = trigger_data;
724 struct event_filter *filter = NULL, *tmp;
725 int ret = -EINVAL;
726 char *s;
727
728 if (!filter_str) /* clear the current filter */
729 goto assign;
730
731 s = strsep(&filter_str, " \t");
732
733 if (!strlen(s) || strcmp(s, "if") != 0)
734 goto out;
735
736 if (!filter_str)
737 goto out;
738
739 /* The filter is for the 'trigger' event, not the triggered event */
740 ret = create_event_filter(file->tr, file->event_call,
741 filter_str, false, &filter);
742 /*
743 * If create_event_filter() fails, filter still needs to be freed.
744 * Which the calling code will do with data->filter.
745 */
746 assign:
747 tmp = rcu_access_pointer(data->filter);
748
749 rcu_assign_pointer(data->filter, filter);
750
751 if (tmp) {
752 /* Make sure the call is done with the filter */
753 tracepoint_synchronize_unregister();
754 free_event_filter(tmp);
755 }
756
757 kfree(data->filter_str);
758 data->filter_str = NULL;
759
760 if (filter_str) {
761 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
762 if (!data->filter_str) {
763 free_event_filter(rcu_access_pointer(data->filter));
764 data->filter = NULL;
765 ret = -ENOMEM;
766 }
767 }
768 out:
769 return ret;
770}
771
772static LIST_HEAD(named_triggers);
773
774/**
775 * find_named_trigger - Find the common named trigger associated with @name
776 * @name: The name of the set of named triggers to find the common data for
777 *
778 * Named triggers are sets of triggers that share a common set of
779 * trigger data. The first named trigger registered with a given name
780 * owns the common trigger data that the others subsequently
781 * registered with the same name will reference. This function
782 * returns the common trigger data associated with that first
783 * registered instance.
784 *
785 * Return: the common trigger data for the given named trigger on
786 * success, NULL otherwise.
787 */
788struct event_trigger_data *find_named_trigger(const char *name)
789{
790 struct event_trigger_data *data;
791
792 if (!name)
793 return NULL;
794
795 list_for_each_entry(data, &named_triggers, named_list) {
796 if (data->named_data)
797 continue;
798 if (strcmp(data->name, name) == 0)
799 return data;
800 }
801
802 return NULL;
803}
804
805/**
806 * is_named_trigger - determine if a given trigger is a named trigger
807 * @test: The trigger data to test
808 *
809 * Return: true if 'test' is a named trigger, false otherwise.
810 */
811bool is_named_trigger(struct event_trigger_data *test)
812{
813 struct event_trigger_data *data;
814
815 list_for_each_entry(data, &named_triggers, named_list) {
816 if (test == data)
817 return true;
818 }
819
820 return false;
821}
822
823/**
824 * save_named_trigger - save the trigger in the named trigger list
825 * @name: The name of the named trigger set
826 * @data: The trigger data to save
827 *
828 * Return: 0 if successful, negative error otherwise.
829 */
830int save_named_trigger(const char *name, struct event_trigger_data *data)
831{
832 data->name = kstrdup(name, GFP_KERNEL);
833 if (!data->name)
834 return -ENOMEM;
835
836 list_add(&data->named_list, &named_triggers);
837
838 return 0;
839}
840
841/**
842 * del_named_trigger - delete a trigger from the named trigger list
843 * @data: The trigger data to delete
844 */
845void del_named_trigger(struct event_trigger_data *data)
846{
847 kfree(data->name);
848 data->name = NULL;
849
850 list_del(&data->named_list);
851}
852
853static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
854{
855 struct event_trigger_data *test;
856
857 list_for_each_entry(test, &named_triggers, named_list) {
858 if (strcmp(test->name, data->name) == 0) {
859 if (pause) {
860 test->paused_tmp = test->paused;
861 test->paused = true;
862 } else {
863 test->paused = test->paused_tmp;
864 }
865 }
866 }
867}
868
869/**
870 * pause_named_trigger - Pause all named triggers with the same name
871 * @data: The trigger data of a named trigger to pause
872 *
873 * Pauses a named trigger along with all other triggers having the
874 * same name. Because named triggers share a common set of data,
875 * pausing only one is meaningless, so pausing one named trigger needs
876 * to pause all triggers with the same name.
877 */
878void pause_named_trigger(struct event_trigger_data *data)
879{
880 __pause_named_trigger(data, true);
881}
882
883/**
884 * unpause_named_trigger - Un-pause all named triggers with the same name
885 * @data: The trigger data of a named trigger to unpause
886 *
887 * Un-pauses a named trigger along with all other triggers having the
888 * same name. Because named triggers share a common set of data,
889 * unpausing only one is meaningless, so unpausing one named trigger
890 * needs to unpause all triggers with the same name.
891 */
892void unpause_named_trigger(struct event_trigger_data *data)
893{
894 __pause_named_trigger(data, false);
895}
896
897/**
898 * set_named_trigger_data - Associate common named trigger data
899 * @data: The trigger data of a named trigger to unpause
900 *
901 * Named triggers are sets of triggers that share a common set of
902 * trigger data. The first named trigger registered with a given name
903 * owns the common trigger data that the others subsequently
904 * registered with the same name will reference. This function
905 * associates the common trigger data from the first trigger with the
906 * given trigger.
907 */
908void set_named_trigger_data(struct event_trigger_data *data,
909 struct event_trigger_data *named_data)
910{
911 data->named_data = named_data;
912}
913
914struct event_trigger_data *
915get_named_trigger_data(struct event_trigger_data *data)
916{
917 return data->named_data;
918}
919
920static void
921traceon_trigger(struct event_trigger_data *data, void *rec,
922 struct ring_buffer_event *event)
923{
924 if (tracing_is_on())
925 return;
926
927 tracing_on();
928}
929
930static void
931traceon_count_trigger(struct event_trigger_data *data, void *rec,
932 struct ring_buffer_event *event)
933{
934 if (tracing_is_on())
935 return;
936
937 if (!data->count)
938 return;
939
940 if (data->count != -1)
941 (data->count)--;
942
943 tracing_on();
944}
945
946static void
947traceoff_trigger(struct event_trigger_data *data, void *rec,
948 struct ring_buffer_event *event)
949{
950 if (!tracing_is_on())
951 return;
952
953 tracing_off();
954}
955
956static void
957traceoff_count_trigger(struct event_trigger_data *data, void *rec,
958 struct ring_buffer_event *event)
959{
960 if (!tracing_is_on())
961 return;
962
963 if (!data->count)
964 return;
965
966 if (data->count != -1)
967 (data->count)--;
968
969 tracing_off();
970}
971
972static int
973traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
974 struct event_trigger_data *data)
975{
976 return event_trigger_print("traceon", m, (void *)data->count,
977 data->filter_str);
978}
979
980static int
981traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
982 struct event_trigger_data *data)
983{
984 return event_trigger_print("traceoff", m, (void *)data->count,
985 data->filter_str);
986}
987
988static struct event_trigger_ops traceon_trigger_ops = {
989 .func = traceon_trigger,
990 .print = traceon_trigger_print,
991 .init = event_trigger_init,
992 .free = event_trigger_free,
993};
994
995static struct event_trigger_ops traceon_count_trigger_ops = {
996 .func = traceon_count_trigger,
997 .print = traceon_trigger_print,
998 .init = event_trigger_init,
999 .free = event_trigger_free,
1000};
1001
1002static struct event_trigger_ops traceoff_trigger_ops = {
1003 .func = traceoff_trigger,
1004 .print = traceoff_trigger_print,
1005 .init = event_trigger_init,
1006 .free = event_trigger_free,
1007};
1008
1009static struct event_trigger_ops traceoff_count_trigger_ops = {
1010 .func = traceoff_count_trigger,
1011 .print = traceoff_trigger_print,
1012 .init = event_trigger_init,
1013 .free = event_trigger_free,
1014};
1015
1016static struct event_trigger_ops *
1017onoff_get_trigger_ops(char *cmd, char *param)
1018{
1019 struct event_trigger_ops *ops;
1020
1021 /* we register both traceon and traceoff to this callback */
1022 if (strcmp(cmd, "traceon") == 0)
1023 ops = param ? &traceon_count_trigger_ops :
1024 &traceon_trigger_ops;
1025 else
1026 ops = param ? &traceoff_count_trigger_ops :
1027 &traceoff_trigger_ops;
1028
1029 return ops;
1030}
1031
1032static struct event_command trigger_traceon_cmd = {
1033 .name = "traceon",
1034 .trigger_type = ETT_TRACE_ONOFF,
1035 .func = event_trigger_callback,
1036 .reg = register_trigger,
1037 .unreg = unregister_trigger,
1038 .get_trigger_ops = onoff_get_trigger_ops,
1039 .set_filter = set_trigger_filter,
1040};
1041
1042static struct event_command trigger_traceoff_cmd = {
1043 .name = "traceoff",
1044 .trigger_type = ETT_TRACE_ONOFF,
1045 .flags = EVENT_CMD_FL_POST_TRIGGER,
1046 .func = event_trigger_callback,
1047 .reg = register_trigger,
1048 .unreg = unregister_trigger,
1049 .get_trigger_ops = onoff_get_trigger_ops,
1050 .set_filter = set_trigger_filter,
1051};
1052
1053#ifdef CONFIG_TRACER_SNAPSHOT
1054static void
1055snapshot_trigger(struct event_trigger_data *data, void *rec,
1056 struct ring_buffer_event *event)
1057{
1058 struct trace_event_file *file = data->private_data;
1059
1060 if (file)
1061 tracing_snapshot_instance(file->tr);
1062 else
1063 tracing_snapshot();
1064}
1065
1066static void
1067snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1068 struct ring_buffer_event *event)
1069{
1070 if (!data->count)
1071 return;
1072
1073 if (data->count != -1)
1074 (data->count)--;
1075
1076 snapshot_trigger(data, rec, event);
1077}
1078
1079static int
1080register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1081 struct event_trigger_data *data,
1082 struct trace_event_file *file)
1083{
1084 int ret = register_trigger(glob, ops, data, file);
1085
1086 if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1087 unregister_trigger(glob, ops, data, file);
1088 ret = 0;
1089 }
1090
1091 return ret;
1092}
1093
1094static int
1095snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1096 struct event_trigger_data *data)
1097{
1098 return event_trigger_print("snapshot", m, (void *)data->count,
1099 data->filter_str);
1100}
1101
1102static struct event_trigger_ops snapshot_trigger_ops = {
1103 .func = snapshot_trigger,
1104 .print = snapshot_trigger_print,
1105 .init = event_trigger_init,
1106 .free = event_trigger_free,
1107};
1108
1109static struct event_trigger_ops snapshot_count_trigger_ops = {
1110 .func = snapshot_count_trigger,
1111 .print = snapshot_trigger_print,
1112 .init = event_trigger_init,
1113 .free = event_trigger_free,
1114};
1115
1116static struct event_trigger_ops *
1117snapshot_get_trigger_ops(char *cmd, char *param)
1118{
1119 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1120}
1121
1122static struct event_command trigger_snapshot_cmd = {
1123 .name = "snapshot",
1124 .trigger_type = ETT_SNAPSHOT,
1125 .func = event_trigger_callback,
1126 .reg = register_snapshot_trigger,
1127 .unreg = unregister_trigger,
1128 .get_trigger_ops = snapshot_get_trigger_ops,
1129 .set_filter = set_trigger_filter,
1130};
1131
1132static __init int register_trigger_snapshot_cmd(void)
1133{
1134 int ret;
1135
1136 ret = register_event_command(&trigger_snapshot_cmd);
1137 WARN_ON(ret < 0);
1138
1139 return ret;
1140}
1141#else
1142static __init int register_trigger_snapshot_cmd(void) { return 0; }
1143#endif /* CONFIG_TRACER_SNAPSHOT */
1144
1145#ifdef CONFIG_STACKTRACE
1146#ifdef CONFIG_UNWINDER_ORC
1147/* Skip 2:
1148 * event_triggers_post_call()
1149 * trace_event_raw_event_xxx()
1150 */
1151# define STACK_SKIP 2
1152#else
1153/*
1154 * Skip 4:
1155 * stacktrace_trigger()
1156 * event_triggers_post_call()
1157 * trace_event_buffer_commit()
1158 * trace_event_raw_event_xxx()
1159 */
1160#define STACK_SKIP 4
1161#endif
1162
1163static void
1164stacktrace_trigger(struct event_trigger_data *data, void *rec,
1165 struct ring_buffer_event *event)
1166{
1167 trace_dump_stack(STACK_SKIP);
1168}
1169
1170static void
1171stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1172 struct ring_buffer_event *event)
1173{
1174 if (!data->count)
1175 return;
1176
1177 if (data->count != -1)
1178 (data->count)--;
1179
1180 stacktrace_trigger(data, rec, event);
1181}
1182
1183static int
1184stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1185 struct event_trigger_data *data)
1186{
1187 return event_trigger_print("stacktrace", m, (void *)data->count,
1188 data->filter_str);
1189}
1190
1191static struct event_trigger_ops stacktrace_trigger_ops = {
1192 .func = stacktrace_trigger,
1193 .print = stacktrace_trigger_print,
1194 .init = event_trigger_init,
1195 .free = event_trigger_free,
1196};
1197
1198static struct event_trigger_ops stacktrace_count_trigger_ops = {
1199 .func = stacktrace_count_trigger,
1200 .print = stacktrace_trigger_print,
1201 .init = event_trigger_init,
1202 .free = event_trigger_free,
1203};
1204
1205static struct event_trigger_ops *
1206stacktrace_get_trigger_ops(char *cmd, char *param)
1207{
1208 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1209}
1210
1211static struct event_command trigger_stacktrace_cmd = {
1212 .name = "stacktrace",
1213 .trigger_type = ETT_STACKTRACE,
1214 .flags = EVENT_CMD_FL_POST_TRIGGER,
1215 .func = event_trigger_callback,
1216 .reg = register_trigger,
1217 .unreg = unregister_trigger,
1218 .get_trigger_ops = stacktrace_get_trigger_ops,
1219 .set_filter = set_trigger_filter,
1220};
1221
1222static __init int register_trigger_stacktrace_cmd(void)
1223{
1224 int ret;
1225
1226 ret = register_event_command(&trigger_stacktrace_cmd);
1227 WARN_ON(ret < 0);
1228
1229 return ret;
1230}
1231#else
1232static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1233#endif /* CONFIG_STACKTRACE */
1234
1235static __init void unregister_trigger_traceon_traceoff_cmds(void)
1236{
1237 unregister_event_command(&trigger_traceon_cmd);
1238 unregister_event_command(&trigger_traceoff_cmd);
1239}
1240
1241static void
1242event_enable_trigger(struct event_trigger_data *data, void *rec,
1243 struct ring_buffer_event *event)
1244{
1245 struct enable_trigger_data *enable_data = data->private_data;
1246
1247 if (enable_data->enable)
1248 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1249 else
1250 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1251}
1252
1253static void
1254event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1255 struct ring_buffer_event *event)
1256{
1257 struct enable_trigger_data *enable_data = data->private_data;
1258
1259 if (!data->count)
1260 return;
1261
1262 /* Skip if the event is in a state we want to switch to */
1263 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1264 return;
1265
1266 if (data->count != -1)
1267 (data->count)--;
1268
1269 event_enable_trigger(data, rec, event);
1270}
1271
1272int event_enable_trigger_print(struct seq_file *m,
1273 struct event_trigger_ops *ops,
1274 struct event_trigger_data *data)
1275{
1276 struct enable_trigger_data *enable_data = data->private_data;
1277
1278 seq_printf(m, "%s:%s:%s",
1279 enable_data->hist ?
1280 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1281 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1282 enable_data->file->event_call->class->system,
1283 trace_event_name(enable_data->file->event_call));
1284
1285 if (data->count == -1)
1286 seq_puts(m, ":unlimited");
1287 else
1288 seq_printf(m, ":count=%ld", data->count);
1289
1290 if (data->filter_str)
1291 seq_printf(m, " if %s\n", data->filter_str);
1292 else
1293 seq_putc(m, '\n');
1294
1295 return 0;
1296}
1297
1298void event_enable_trigger_free(struct event_trigger_ops *ops,
1299 struct event_trigger_data *data)
1300{
1301 struct enable_trigger_data *enable_data = data->private_data;
1302
1303 if (WARN_ON_ONCE(data->ref <= 0))
1304 return;
1305
1306 data->ref--;
1307 if (!data->ref) {
1308 /* Remove the SOFT_MODE flag */
1309 trace_event_enable_disable(enable_data->file, 0, 1);
1310 module_put(enable_data->file->event_call->mod);
1311 trigger_data_free(data);
1312 kfree(enable_data);
1313 }
1314}
1315
1316static struct event_trigger_ops event_enable_trigger_ops = {
1317 .func = event_enable_trigger,
1318 .print = event_enable_trigger_print,
1319 .init = event_trigger_init,
1320 .free = event_enable_trigger_free,
1321};
1322
1323static struct event_trigger_ops event_enable_count_trigger_ops = {
1324 .func = event_enable_count_trigger,
1325 .print = event_enable_trigger_print,
1326 .init = event_trigger_init,
1327 .free = event_enable_trigger_free,
1328};
1329
1330static struct event_trigger_ops event_disable_trigger_ops = {
1331 .func = event_enable_trigger,
1332 .print = event_enable_trigger_print,
1333 .init = event_trigger_init,
1334 .free = event_enable_trigger_free,
1335};
1336
1337static struct event_trigger_ops event_disable_count_trigger_ops = {
1338 .func = event_enable_count_trigger,
1339 .print = event_enable_trigger_print,
1340 .init = event_trigger_init,
1341 .free = event_enable_trigger_free,
1342};
1343
1344int event_enable_trigger_func(struct event_command *cmd_ops,
1345 struct trace_event_file *file,
1346 char *glob, char *cmd, char *param)
1347{
1348 struct trace_event_file *event_enable_file;
1349 struct enable_trigger_data *enable_data;
1350 struct event_trigger_data *trigger_data;
1351 struct event_trigger_ops *trigger_ops;
1352 struct trace_array *tr = file->tr;
1353 const char *system;
1354 const char *event;
1355 bool hist = false;
1356 char *trigger;
1357 char *number;
1358 bool enable;
1359 int ret;
1360
1361 if (!param)
1362 return -EINVAL;
1363
1364 /* separate the trigger from the filter (s:e:n [if filter]) */
1365 trigger = strsep(¶m, " \t");
1366 if (!trigger)
1367 return -EINVAL;
1368
1369 system = strsep(&trigger, ":");
1370 if (!trigger)
1371 return -EINVAL;
1372
1373 event = strsep(&trigger, ":");
1374
1375 ret = -EINVAL;
1376 event_enable_file = find_event_file(tr, system, event);
1377 if (!event_enable_file)
1378 goto out;
1379
1380#ifdef CONFIG_HIST_TRIGGERS
1381 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1382 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1383
1384 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1385 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1386#else
1387 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1388#endif
1389 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1390
1391 ret = -ENOMEM;
1392 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1393 if (!trigger_data)
1394 goto out;
1395
1396 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1397 if (!enable_data) {
1398 kfree(trigger_data);
1399 goto out;
1400 }
1401
1402 trigger_data->count = -1;
1403 trigger_data->ops = trigger_ops;
1404 trigger_data->cmd_ops = cmd_ops;
1405 INIT_LIST_HEAD(&trigger_data->list);
1406 RCU_INIT_POINTER(trigger_data->filter, NULL);
1407
1408 enable_data->hist = hist;
1409 enable_data->enable = enable;
1410 enable_data->file = event_enable_file;
1411 trigger_data->private_data = enable_data;
1412
1413 if (glob[0] == '!') {
1414 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1415 kfree(trigger_data);
1416 kfree(enable_data);
1417 ret = 0;
1418 goto out;
1419 }
1420
1421 /* Up the trigger_data count to make sure nothing frees it on failure */
1422 event_trigger_init(trigger_ops, trigger_data);
1423
1424 if (trigger) {
1425 number = strsep(&trigger, ":");
1426
1427 ret = -EINVAL;
1428 if (!strlen(number))
1429 goto out_free;
1430
1431 /*
1432 * We use the callback data field (which is a pointer)
1433 * as our counter.
1434 */
1435 ret = kstrtoul(number, 0, &trigger_data->count);
1436 if (ret)
1437 goto out_free;
1438 }
1439
1440 if (!param) /* if param is non-empty, it's supposed to be a filter */
1441 goto out_reg;
1442
1443 if (!cmd_ops->set_filter)
1444 goto out_reg;
1445
1446 ret = cmd_ops->set_filter(param, trigger_data, file);
1447 if (ret < 0)
1448 goto out_free;
1449
1450 out_reg:
1451 /* Don't let event modules unload while probe registered */
1452 ret = try_module_get(event_enable_file->event_call->mod);
1453 if (!ret) {
1454 ret = -EBUSY;
1455 goto out_free;
1456 }
1457
1458 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1459 if (ret < 0)
1460 goto out_put;
1461 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1462 /*
1463 * The above returns on success the # of functions enabled,
1464 * but if it didn't find any functions it returns zero.
1465 * Consider no functions a failure too.
1466 */
1467 if (!ret) {
1468 ret = -ENOENT;
1469 goto out_disable;
1470 } else if (ret < 0)
1471 goto out_disable;
1472 /* Just return zero, not the number of enabled functions */
1473 ret = 0;
1474 event_trigger_free(trigger_ops, trigger_data);
1475 out:
1476 return ret;
1477
1478 out_disable:
1479 trace_event_enable_disable(event_enable_file, 0, 1);
1480 out_put:
1481 module_put(event_enable_file->event_call->mod);
1482 out_free:
1483 if (cmd_ops->set_filter)
1484 cmd_ops->set_filter(NULL, trigger_data, NULL);
1485 event_trigger_free(trigger_ops, trigger_data);
1486 kfree(enable_data);
1487 goto out;
1488}
1489
1490int event_enable_register_trigger(char *glob,
1491 struct event_trigger_ops *ops,
1492 struct event_trigger_data *data,
1493 struct trace_event_file *file)
1494{
1495 struct enable_trigger_data *enable_data = data->private_data;
1496 struct enable_trigger_data *test_enable_data;
1497 struct event_trigger_data *test;
1498 int ret = 0;
1499
1500 list_for_each_entry_rcu(test, &file->triggers, list) {
1501 test_enable_data = test->private_data;
1502 if (test_enable_data &&
1503 (test->cmd_ops->trigger_type ==
1504 data->cmd_ops->trigger_type) &&
1505 (test_enable_data->file == enable_data->file)) {
1506 ret = -EEXIST;
1507 goto out;
1508 }
1509 }
1510
1511 if (data->ops->init) {
1512 ret = data->ops->init(data->ops, data);
1513 if (ret < 0)
1514 goto out;
1515 }
1516
1517 list_add_rcu(&data->list, &file->triggers);
1518 ret++;
1519
1520 update_cond_flag(file);
1521 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1522 list_del_rcu(&data->list);
1523 update_cond_flag(file);
1524 ret--;
1525 }
1526out:
1527 return ret;
1528}
1529
1530void event_enable_unregister_trigger(char *glob,
1531 struct event_trigger_ops *ops,
1532 struct event_trigger_data *test,
1533 struct trace_event_file *file)
1534{
1535 struct enable_trigger_data *test_enable_data = test->private_data;
1536 struct enable_trigger_data *enable_data;
1537 struct event_trigger_data *data;
1538 bool unregistered = false;
1539
1540 list_for_each_entry_rcu(data, &file->triggers, list) {
1541 enable_data = data->private_data;
1542 if (enable_data &&
1543 (data->cmd_ops->trigger_type ==
1544 test->cmd_ops->trigger_type) &&
1545 (enable_data->file == test_enable_data->file)) {
1546 unregistered = true;
1547 list_del_rcu(&data->list);
1548 trace_event_trigger_enable_disable(file, 0);
1549 update_cond_flag(file);
1550 break;
1551 }
1552 }
1553
1554 if (unregistered && data->ops->free)
1555 data->ops->free(data->ops, data);
1556}
1557
1558static struct event_trigger_ops *
1559event_enable_get_trigger_ops(char *cmd, char *param)
1560{
1561 struct event_trigger_ops *ops;
1562 bool enable;
1563
1564#ifdef CONFIG_HIST_TRIGGERS
1565 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1566 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1567#else
1568 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1569#endif
1570 if (enable)
1571 ops = param ? &event_enable_count_trigger_ops :
1572 &event_enable_trigger_ops;
1573 else
1574 ops = param ? &event_disable_count_trigger_ops :
1575 &event_disable_trigger_ops;
1576
1577 return ops;
1578}
1579
1580static struct event_command trigger_enable_cmd = {
1581 .name = ENABLE_EVENT_STR,
1582 .trigger_type = ETT_EVENT_ENABLE,
1583 .func = event_enable_trigger_func,
1584 .reg = event_enable_register_trigger,
1585 .unreg = event_enable_unregister_trigger,
1586 .get_trigger_ops = event_enable_get_trigger_ops,
1587 .set_filter = set_trigger_filter,
1588};
1589
1590static struct event_command trigger_disable_cmd = {
1591 .name = DISABLE_EVENT_STR,
1592 .trigger_type = ETT_EVENT_ENABLE,
1593 .func = event_enable_trigger_func,
1594 .reg = event_enable_register_trigger,
1595 .unreg = event_enable_unregister_trigger,
1596 .get_trigger_ops = event_enable_get_trigger_ops,
1597 .set_filter = set_trigger_filter,
1598};
1599
1600static __init void unregister_trigger_enable_disable_cmds(void)
1601{
1602 unregister_event_command(&trigger_enable_cmd);
1603 unregister_event_command(&trigger_disable_cmd);
1604}
1605
1606static __init int register_trigger_enable_disable_cmds(void)
1607{
1608 int ret;
1609
1610 ret = register_event_command(&trigger_enable_cmd);
1611 if (WARN_ON(ret < 0))
1612 return ret;
1613 ret = register_event_command(&trigger_disable_cmd);
1614 if (WARN_ON(ret < 0))
1615 unregister_trigger_enable_disable_cmds();
1616
1617 return ret;
1618}
1619
1620static __init int register_trigger_traceon_traceoff_cmds(void)
1621{
1622 int ret;
1623
1624 ret = register_event_command(&trigger_traceon_cmd);
1625 if (WARN_ON(ret < 0))
1626 return ret;
1627 ret = register_event_command(&trigger_traceoff_cmd);
1628 if (WARN_ON(ret < 0))
1629 unregister_trigger_traceon_traceoff_cmds();
1630
1631 return ret;
1632}
1633
1634__init int register_trigger_cmds(void)
1635{
1636 register_trigger_traceon_traceoff_cmds();
1637 register_trigger_snapshot_cmd();
1638 register_trigger_stacktrace_cmd();
1639 register_trigger_enable_disable_cmds();
1640 register_trigger_hist_enable_disable_cmds();
1641 register_trigger_hist_cmd();
1642
1643 return 0;
1644}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace_events_trigger - trace event triggers
4 *
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8#include <linux/security.h>
9#include <linux/module.h>
10#include <linux/ctype.h>
11#include <linux/mutex.h>
12#include <linux/slab.h>
13#include <linux/rculist.h>
14
15#include "trace.h"
16
17static LIST_HEAD(trigger_commands);
18static DEFINE_MUTEX(trigger_cmd_mutex);
19
20void trigger_data_free(struct event_trigger_data *data)
21{
22 if (data->cmd_ops->set_filter)
23 data->cmd_ops->set_filter(NULL, data, NULL);
24
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
27
28 kfree(data);
29}
30
31/**
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @rec: The trace entry for the event, NULL for unconditional invocation
35 *
36 * For each trigger associated with an event, invoke the trigger
37 * function registered with the associated trigger command. If rec is
38 * non-NULL, it means that the trigger requires further processing and
39 * shouldn't be unconditionally invoked. If rec is non-NULL and the
40 * trigger has a filter associated with it, rec will checked against
41 * the filter and if the record matches the trigger will be invoked.
42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43 * in any case until the current event is written, the trigger
44 * function isn't invoked but the bit associated with the deferred
45 * trigger is set in the return value.
46 *
47 * Returns an enum event_trigger_type value containing a set bit for
48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 *
50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 *
52 * Return: an enum event_trigger_type value containing a set bit for
53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 */
55enum event_trigger_type
56event_triggers_call(struct trace_event_file *file,
57 struct trace_buffer *buffer, void *rec,
58 struct ring_buffer_event *event)
59{
60 struct event_trigger_data *data;
61 enum event_trigger_type tt = ETT_NONE;
62 struct event_filter *filter;
63
64 if (list_empty(&file->triggers))
65 return tt;
66
67 list_for_each_entry_rcu(data, &file->triggers, list) {
68 if (data->paused)
69 continue;
70 if (!rec) {
71 data->ops->trigger(data, buffer, rec, event);
72 continue;
73 }
74 filter = rcu_dereference_sched(data->filter);
75 if (filter && !filter_match_preds(filter, rec))
76 continue;
77 if (event_command_post_trigger(data->cmd_ops)) {
78 tt |= data->cmd_ops->trigger_type;
79 continue;
80 }
81 data->ops->trigger(data, buffer, rec, event);
82 }
83 return tt;
84}
85EXPORT_SYMBOL_GPL(event_triggers_call);
86
87bool __trace_trigger_soft_disabled(struct trace_event_file *file)
88{
89 unsigned long eflags = file->flags;
90
91 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
92 event_triggers_call(file, NULL, NULL, NULL);
93 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
94 return true;
95 if (eflags & EVENT_FILE_FL_PID_FILTER)
96 return trace_event_ignore_this_pid(file);
97 return false;
98}
99EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
100
101/**
102 * event_triggers_post_call - Call 'post_triggers' for a trace event
103 * @file: The trace_event_file associated with the event
104 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
105 *
106 * For each trigger associated with an event, invoke the trigger
107 * function registered with the associated trigger command, if the
108 * corresponding bit is set in the tt enum passed into this function.
109 * See @event_triggers_call for details on how those bits are set.
110 *
111 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
112 */
113void
114event_triggers_post_call(struct trace_event_file *file,
115 enum event_trigger_type tt)
116{
117 struct event_trigger_data *data;
118
119 list_for_each_entry_rcu(data, &file->triggers, list) {
120 if (data->paused)
121 continue;
122 if (data->cmd_ops->trigger_type & tt)
123 data->ops->trigger(data, NULL, NULL, NULL);
124 }
125}
126EXPORT_SYMBOL_GPL(event_triggers_post_call);
127
128#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
129
130static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
131{
132 struct trace_event_file *event_file = event_file_data(m->private);
133
134 if (t == SHOW_AVAILABLE_TRIGGERS) {
135 (*pos)++;
136 return NULL;
137 }
138 return seq_list_next(t, &event_file->triggers, pos);
139}
140
141static bool check_user_trigger(struct trace_event_file *file)
142{
143 struct event_trigger_data *data;
144
145 list_for_each_entry_rcu(data, &file->triggers, list,
146 lockdep_is_held(&event_mutex)) {
147 if (data->flags & EVENT_TRIGGER_FL_PROBE)
148 continue;
149 return true;
150 }
151 return false;
152}
153
154static void *trigger_start(struct seq_file *m, loff_t *pos)
155{
156 struct trace_event_file *event_file;
157
158 /* ->stop() is called even if ->start() fails */
159 mutex_lock(&event_mutex);
160 event_file = event_file_data(m->private);
161 if (unlikely(!event_file))
162 return ERR_PTR(-ENODEV);
163
164 if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
165 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
166
167 return seq_list_start(&event_file->triggers, *pos);
168}
169
170static void trigger_stop(struct seq_file *m, void *t)
171{
172 mutex_unlock(&event_mutex);
173}
174
175static int trigger_show(struct seq_file *m, void *v)
176{
177 struct event_trigger_data *data;
178 struct event_command *p;
179
180 if (v == SHOW_AVAILABLE_TRIGGERS) {
181 seq_puts(m, "# Available triggers:\n");
182 seq_putc(m, '#');
183 mutex_lock(&trigger_cmd_mutex);
184 list_for_each_entry_reverse(p, &trigger_commands, list)
185 seq_printf(m, " %s", p->name);
186 seq_putc(m, '\n');
187 mutex_unlock(&trigger_cmd_mutex);
188 return 0;
189 }
190
191 data = list_entry(v, struct event_trigger_data, list);
192 data->ops->print(m, data);
193
194 return 0;
195}
196
197static const struct seq_operations event_triggers_seq_ops = {
198 .start = trigger_start,
199 .next = trigger_next,
200 .stop = trigger_stop,
201 .show = trigger_show,
202};
203
204static int event_trigger_regex_open(struct inode *inode, struct file *file)
205{
206 int ret;
207
208 ret = security_locked_down(LOCKDOWN_TRACEFS);
209 if (ret)
210 return ret;
211
212 mutex_lock(&event_mutex);
213
214 if (unlikely(!event_file_data(file))) {
215 mutex_unlock(&event_mutex);
216 return -ENODEV;
217 }
218
219 if ((file->f_mode & FMODE_WRITE) &&
220 (file->f_flags & O_TRUNC)) {
221 struct trace_event_file *event_file;
222 struct event_command *p;
223
224 event_file = event_file_data(file);
225
226 list_for_each_entry(p, &trigger_commands, list) {
227 if (p->unreg_all)
228 p->unreg_all(event_file);
229 }
230 }
231
232 if (file->f_mode & FMODE_READ) {
233 ret = seq_open(file, &event_triggers_seq_ops);
234 if (!ret) {
235 struct seq_file *m = file->private_data;
236 m->private = file;
237 }
238 }
239
240 mutex_unlock(&event_mutex);
241
242 return ret;
243}
244
245int trigger_process_regex(struct trace_event_file *file, char *buff)
246{
247 char *command, *next;
248 struct event_command *p;
249 int ret = -EINVAL;
250
251 next = buff = skip_spaces(buff);
252 command = strsep(&next, ": \t");
253 if (next) {
254 next = skip_spaces(next);
255 if (!*next)
256 next = NULL;
257 }
258 command = (command[0] != '!') ? command : command + 1;
259
260 mutex_lock(&trigger_cmd_mutex);
261 list_for_each_entry(p, &trigger_commands, list) {
262 if (strcmp(p->name, command) == 0) {
263 ret = p->parse(p, file, buff, command, next);
264 goto out_unlock;
265 }
266 }
267 out_unlock:
268 mutex_unlock(&trigger_cmd_mutex);
269
270 return ret;
271}
272
273static ssize_t event_trigger_regex_write(struct file *file,
274 const char __user *ubuf,
275 size_t cnt, loff_t *ppos)
276{
277 struct trace_event_file *event_file;
278 ssize_t ret;
279 char *buf;
280
281 if (!cnt)
282 return 0;
283
284 if (cnt >= PAGE_SIZE)
285 return -EINVAL;
286
287 buf = memdup_user_nul(ubuf, cnt);
288 if (IS_ERR(buf))
289 return PTR_ERR(buf);
290
291 strim(buf);
292
293 mutex_lock(&event_mutex);
294 event_file = event_file_data(file);
295 if (unlikely(!event_file)) {
296 mutex_unlock(&event_mutex);
297 kfree(buf);
298 return -ENODEV;
299 }
300 ret = trigger_process_regex(event_file, buf);
301 mutex_unlock(&event_mutex);
302
303 kfree(buf);
304 if (ret < 0)
305 goto out;
306
307 *ppos += cnt;
308 ret = cnt;
309 out:
310 return ret;
311}
312
313static int event_trigger_regex_release(struct inode *inode, struct file *file)
314{
315 mutex_lock(&event_mutex);
316
317 if (file->f_mode & FMODE_READ)
318 seq_release(inode, file);
319
320 mutex_unlock(&event_mutex);
321
322 return 0;
323}
324
325static ssize_t
326event_trigger_write(struct file *filp, const char __user *ubuf,
327 size_t cnt, loff_t *ppos)
328{
329 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
330}
331
332static int
333event_trigger_open(struct inode *inode, struct file *filp)
334{
335 /* Checks for tracefs lockdown */
336 return event_trigger_regex_open(inode, filp);
337}
338
339static int
340event_trigger_release(struct inode *inode, struct file *file)
341{
342 return event_trigger_regex_release(inode, file);
343}
344
345const struct file_operations event_trigger_fops = {
346 .open = event_trigger_open,
347 .read = seq_read,
348 .write = event_trigger_write,
349 .llseek = tracing_lseek,
350 .release = event_trigger_release,
351};
352
353/*
354 * Currently we only register event commands from __init, so mark this
355 * __init too.
356 */
357__init int register_event_command(struct event_command *cmd)
358{
359 struct event_command *p;
360 int ret = 0;
361
362 mutex_lock(&trigger_cmd_mutex);
363 list_for_each_entry(p, &trigger_commands, list) {
364 if (strcmp(cmd->name, p->name) == 0) {
365 ret = -EBUSY;
366 goto out_unlock;
367 }
368 }
369 list_add(&cmd->list, &trigger_commands);
370 out_unlock:
371 mutex_unlock(&trigger_cmd_mutex);
372
373 return ret;
374}
375
376/*
377 * Currently we only unregister event commands from __init, so mark
378 * this __init too.
379 */
380__init int unregister_event_command(struct event_command *cmd)
381{
382 struct event_command *p, *n;
383 int ret = -ENODEV;
384
385 mutex_lock(&trigger_cmd_mutex);
386 list_for_each_entry_safe(p, n, &trigger_commands, list) {
387 if (strcmp(cmd->name, p->name) == 0) {
388 ret = 0;
389 list_del_init(&p->list);
390 goto out_unlock;
391 }
392 }
393 out_unlock:
394 mutex_unlock(&trigger_cmd_mutex);
395
396 return ret;
397}
398
399/**
400 * event_trigger_print - Generic event_trigger_ops @print implementation
401 * @name: The name of the event trigger
402 * @m: The seq_file being printed to
403 * @data: Trigger-specific data
404 * @filter_str: filter_str to print, if present
405 *
406 * Common implementation for event triggers to print themselves.
407 *
408 * Usually wrapped by a function that simply sets the @name of the
409 * trigger command and then invokes this.
410 *
411 * Return: 0 on success, errno otherwise
412 */
413static int
414event_trigger_print(const char *name, struct seq_file *m,
415 void *data, char *filter_str)
416{
417 long count = (long)data;
418
419 seq_puts(m, name);
420
421 if (count == -1)
422 seq_puts(m, ":unlimited");
423 else
424 seq_printf(m, ":count=%ld", count);
425
426 if (filter_str)
427 seq_printf(m, " if %s\n", filter_str);
428 else
429 seq_putc(m, '\n');
430
431 return 0;
432}
433
434/**
435 * event_trigger_init - Generic event_trigger_ops @init implementation
436 * @data: Trigger-specific data
437 *
438 * Common implementation of event trigger initialization.
439 *
440 * Usually used directly as the @init method in event trigger
441 * implementations.
442 *
443 * Return: 0 on success, errno otherwise
444 */
445int event_trigger_init(struct event_trigger_data *data)
446{
447 data->ref++;
448 return 0;
449}
450
451/**
452 * event_trigger_free - Generic event_trigger_ops @free implementation
453 * @data: Trigger-specific data
454 *
455 * Common implementation of event trigger de-initialization.
456 *
457 * Usually used directly as the @free method in event trigger
458 * implementations.
459 */
460static void
461event_trigger_free(struct event_trigger_data *data)
462{
463 if (WARN_ON_ONCE(data->ref <= 0))
464 return;
465
466 data->ref--;
467 if (!data->ref)
468 trigger_data_free(data);
469}
470
471int trace_event_trigger_enable_disable(struct trace_event_file *file,
472 int trigger_enable)
473{
474 int ret = 0;
475
476 if (trigger_enable) {
477 if (atomic_inc_return(&file->tm_ref) > 1)
478 return ret;
479 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
480 ret = trace_event_enable_disable(file, 1, 1);
481 } else {
482 if (atomic_dec_return(&file->tm_ref) > 0)
483 return ret;
484 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
485 ret = trace_event_enable_disable(file, 0, 1);
486 }
487
488 return ret;
489}
490
491/**
492 * clear_event_triggers - Clear all triggers associated with a trace array
493 * @tr: The trace array to clear
494 *
495 * For each trigger, the triggering event has its tm_ref decremented
496 * via trace_event_trigger_enable_disable(), and any associated event
497 * (in the case of enable/disable_event triggers) will have its sm_ref
498 * decremented via free()->trace_event_enable_disable(). That
499 * combination effectively reverses the soft-mode/trigger state added
500 * by trigger registration.
501 *
502 * Must be called with event_mutex held.
503 */
504void
505clear_event_triggers(struct trace_array *tr)
506{
507 struct trace_event_file *file;
508
509 list_for_each_entry(file, &tr->events, list) {
510 struct event_trigger_data *data, *n;
511 list_for_each_entry_safe(data, n, &file->triggers, list) {
512 trace_event_trigger_enable_disable(file, 0);
513 list_del_rcu(&data->list);
514 if (data->ops->free)
515 data->ops->free(data);
516 }
517 }
518}
519
520/**
521 * update_cond_flag - Set or reset the TRIGGER_COND bit
522 * @file: The trace_event_file associated with the event
523 *
524 * If an event has triggers and any of those triggers has a filter or
525 * a post_trigger, trigger invocation needs to be deferred until after
526 * the current event has logged its data, and the event should have
527 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
528 * cleared.
529 */
530void update_cond_flag(struct trace_event_file *file)
531{
532 struct event_trigger_data *data;
533 bool set_cond = false;
534
535 lockdep_assert_held(&event_mutex);
536
537 list_for_each_entry(data, &file->triggers, list) {
538 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
539 event_command_needs_rec(data->cmd_ops)) {
540 set_cond = true;
541 break;
542 }
543 }
544
545 if (set_cond)
546 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
547 else
548 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
549}
550
551/**
552 * register_trigger - Generic event_command @reg implementation
553 * @glob: The raw string used to register the trigger
554 * @data: Trigger-specific data to associate with the trigger
555 * @file: The trace_event_file associated with the event
556 *
557 * Common implementation for event trigger registration.
558 *
559 * Usually used directly as the @reg method in event command
560 * implementations.
561 *
562 * Return: 0 on success, errno otherwise
563 */
564static int register_trigger(char *glob,
565 struct event_trigger_data *data,
566 struct trace_event_file *file)
567{
568 struct event_trigger_data *test;
569 int ret = 0;
570
571 lockdep_assert_held(&event_mutex);
572
573 list_for_each_entry(test, &file->triggers, list) {
574 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
575 ret = -EEXIST;
576 goto out;
577 }
578 }
579
580 if (data->ops->init) {
581 ret = data->ops->init(data);
582 if (ret < 0)
583 goto out;
584 }
585
586 list_add_rcu(&data->list, &file->triggers);
587
588 update_cond_flag(file);
589 ret = trace_event_trigger_enable_disable(file, 1);
590 if (ret < 0) {
591 list_del_rcu(&data->list);
592 update_cond_flag(file);
593 }
594out:
595 return ret;
596}
597
598/**
599 * unregister_trigger - Generic event_command @unreg implementation
600 * @glob: The raw string used to register the trigger
601 * @test: Trigger-specific data used to find the trigger to remove
602 * @file: The trace_event_file associated with the event
603 *
604 * Common implementation for event trigger unregistration.
605 *
606 * Usually used directly as the @unreg method in event command
607 * implementations.
608 */
609static void unregister_trigger(char *glob,
610 struct event_trigger_data *test,
611 struct trace_event_file *file)
612{
613 struct event_trigger_data *data = NULL, *iter;
614
615 lockdep_assert_held(&event_mutex);
616
617 list_for_each_entry(iter, &file->triggers, list) {
618 if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
619 data = iter;
620 list_del_rcu(&data->list);
621 trace_event_trigger_enable_disable(file, 0);
622 update_cond_flag(file);
623 break;
624 }
625 }
626
627 if (data && data->ops->free)
628 data->ops->free(data);
629}
630
631/*
632 * Event trigger parsing helper functions.
633 *
634 * These functions help make it easier to write an event trigger
635 * parsing function i.e. the struct event_command.parse() callback
636 * function responsible for parsing and registering a trigger command
637 * written to the 'trigger' file.
638 *
639 * A trigger command (or just 'trigger' for short) takes the form:
640 * [trigger] [if filter]
641 *
642 * The struct event_command.parse() callback (and other struct
643 * event_command functions) refer to several components of a trigger
644 * command. Those same components are referenced by the event trigger
645 * parsing helper functions defined below. These components are:
646 *
647 * cmd - the trigger command name
648 * glob - the trigger command name optionally prefaced with '!'
649 * param_and_filter - text following cmd and ':'
650 * param - text following cmd and ':' and stripped of filter
651 * filter - the optional filter text following (and including) 'if'
652 *
653 * To illustrate the use of these componenents, here are some concrete
654 * examples. For the following triggers:
655 *
656 * echo 'traceon:5 if pid == 0' > trigger
657 * - 'traceon' is both cmd and glob
658 * - '5 if pid == 0' is the param_and_filter
659 * - '5' is the param
660 * - 'if pid == 0' is the filter
661 *
662 * echo 'enable_event:sys:event:n' > trigger
663 * - 'enable_event' is both cmd and glob
664 * - 'sys:event:n' is the param_and_filter
665 * - 'sys:event:n' is the param
666 * - there is no filter
667 *
668 * echo 'hist:keys=pid if prio > 50' > trigger
669 * - 'hist' is both cmd and glob
670 * - 'keys=pid if prio > 50' is the param_and_filter
671 * - 'keys=pid' is the param
672 * - 'if prio > 50' is the filter
673 *
674 * echo '!enable_event:sys:event:n' > trigger
675 * - 'enable_event' the cmd
676 * - '!enable_event' is the glob
677 * - 'sys:event:n' is the param_and_filter
678 * - 'sys:event:n' is the param
679 * - there is no filter
680 *
681 * echo 'traceoff' > trigger
682 * - 'traceoff' is both cmd and glob
683 * - there is no param_and_filter
684 * - there is no param
685 * - there is no filter
686 *
687 * There are a few different categories of event trigger covered by
688 * these helpers:
689 *
690 * - triggers that don't require a parameter e.g. traceon
691 * - triggers that do require a parameter e.g. enable_event and hist
692 * - triggers that though they may not require a param may support an
693 * optional 'n' param (n = number of times the trigger should fire)
694 * e.g.: traceon:5 or enable_event:sys:event:n
695 * - triggers that do not support an 'n' param e.g. hist
696 *
697 * These functions can be used or ignored as necessary - it all
698 * depends on the complexity of the trigger, and the granularity of
699 * the functions supported reflects the fact that some implementations
700 * may need to customize certain aspects of their implementations and
701 * won't need certain functions. For instance, the hist trigger
702 * implementation doesn't use event_trigger_separate_filter() because
703 * it has special requirements for handling the filter.
704 */
705
706/**
707 * event_trigger_check_remove - check whether an event trigger specifies remove
708 * @glob: The trigger command string, with optional remove(!) operator
709 *
710 * The event trigger callback implementations pass in 'glob' as a
711 * parameter. This is the command name either with or without a
712 * remove(!) operator. This function simply parses the glob and
713 * determines whether the command corresponds to a trigger removal or
714 * a trigger addition.
715 *
716 * Return: true if this is a remove command, false otherwise
717 */
718bool event_trigger_check_remove(const char *glob)
719{
720 return (glob && glob[0] == '!') ? true : false;
721}
722
723/**
724 * event_trigger_empty_param - check whether the param is empty
725 * @param: The trigger param string
726 *
727 * The event trigger callback implementations pass in 'param' as a
728 * parameter. This corresponds to the string following the command
729 * name minus the command name. This function can be called by a
730 * callback implementation for any command that requires a param; a
731 * callback that doesn't require a param can ignore it.
732 *
733 * Return: true if this is an empty param, false otherwise
734 */
735bool event_trigger_empty_param(const char *param)
736{
737 return !param;
738}
739
740/**
741 * event_trigger_separate_filter - separate an event trigger from a filter
742 * @param_and_filter: String containing trigger and possibly filter
743 * @param: outparam, will be filled with a pointer to the trigger
744 * @filter: outparam, will be filled with a pointer to the filter
745 * @param_required: Specifies whether or not the param string is required
746 *
747 * Given a param string of the form '[trigger] [if filter]', this
748 * function separates the filter from the trigger and returns the
749 * trigger in @param and the filter in @filter. Either the @param
750 * or the @filter may be set to NULL by this function - if not set to
751 * NULL, they will contain strings corresponding to the trigger and
752 * filter.
753 *
754 * There are two cases that need to be handled with respect to the
755 * passed-in param: either the param is required, or it is not
756 * required. If @param_required is set, and there's no param, it will
757 * return -EINVAL. If @param_required is not set and there's a param
758 * that starts with a number, that corresponds to the case of a
759 * trigger with :n (n = number of times the trigger should fire) and
760 * the parsing continues normally; otherwise the function just returns
761 * and assumes param just contains a filter and there's nothing else
762 * to do.
763 *
764 * Return: 0 on success, errno otherwise
765 */
766int event_trigger_separate_filter(char *param_and_filter, char **param,
767 char **filter, bool param_required)
768{
769 int ret = 0;
770
771 *param = *filter = NULL;
772
773 if (!param_and_filter) {
774 if (param_required)
775 ret = -EINVAL;
776 goto out;
777 }
778
779 /*
780 * Here we check for an optional param. The only legal
781 * optional param is :n, and if that's the case, continue
782 * below. Otherwise we assume what's left is a filter and
783 * return it as the filter string for the caller to deal with.
784 */
785 if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
786 *filter = param_and_filter;
787 goto out;
788 }
789
790 /*
791 * Separate the param from the filter (param [if filter]).
792 * Here we have either an optional :n param or a required
793 * param and an optional filter.
794 */
795 *param = strsep(¶m_and_filter, " \t");
796
797 /*
798 * Here we have a filter, though it may be empty.
799 */
800 if (param_and_filter) {
801 *filter = skip_spaces(param_and_filter);
802 if (!**filter)
803 *filter = NULL;
804 }
805out:
806 return ret;
807}
808
809/**
810 * event_trigger_alloc - allocate and init event_trigger_data for a trigger
811 * @cmd_ops: The event_command operations for the trigger
812 * @cmd: The cmd string
813 * @param: The param string
814 * @private_data: User data to associate with the event trigger
815 *
816 * Allocate an event_trigger_data instance and initialize it. The
817 * @cmd_ops are used along with the @cmd and @param to get the
818 * trigger_ops to assign to the event_trigger_data. @private_data can
819 * also be passed in and associated with the event_trigger_data.
820 *
821 * Use event_trigger_free() to free an event_trigger_data object.
822 *
823 * Return: The trigger_data object success, NULL otherwise
824 */
825struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
826 char *cmd,
827 char *param,
828 void *private_data)
829{
830 struct event_trigger_data *trigger_data;
831 struct event_trigger_ops *trigger_ops;
832
833 trigger_ops = cmd_ops->get_trigger_ops(cmd, param);
834
835 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
836 if (!trigger_data)
837 return NULL;
838
839 trigger_data->count = -1;
840 trigger_data->ops = trigger_ops;
841 trigger_data->cmd_ops = cmd_ops;
842 trigger_data->private_data = private_data;
843
844 INIT_LIST_HEAD(&trigger_data->list);
845 INIT_LIST_HEAD(&trigger_data->named_list);
846 RCU_INIT_POINTER(trigger_data->filter, NULL);
847
848 return trigger_data;
849}
850
851/**
852 * event_trigger_parse_num - parse and return the number param for a trigger
853 * @param: The param string
854 * @trigger_data: The trigger_data for the trigger
855 *
856 * Parse the :n (n = number of times the trigger should fire) param
857 * and set the count variable in the trigger_data to the parsed count.
858 *
859 * Return: 0 on success, errno otherwise
860 */
861int event_trigger_parse_num(char *param,
862 struct event_trigger_data *trigger_data)
863{
864 char *number;
865 int ret = 0;
866
867 if (param) {
868 number = strsep(¶m, ":");
869
870 if (!strlen(number))
871 return -EINVAL;
872
873 /*
874 * We use the callback data field (which is a pointer)
875 * as our counter.
876 */
877 ret = kstrtoul(number, 0, &trigger_data->count);
878 }
879
880 return ret;
881}
882
883/**
884 * event_trigger_set_filter - set an event trigger's filter
885 * @cmd_ops: The event_command operations for the trigger
886 * @file: The event file for the trigger's event
887 * @param: The string containing the filter
888 * @trigger_data: The trigger_data for the trigger
889 *
890 * Set the filter for the trigger. If the filter is NULL, just return
891 * without error.
892 *
893 * Return: 0 on success, errno otherwise
894 */
895int event_trigger_set_filter(struct event_command *cmd_ops,
896 struct trace_event_file *file,
897 char *param,
898 struct event_trigger_data *trigger_data)
899{
900 if (param && cmd_ops->set_filter)
901 return cmd_ops->set_filter(param, trigger_data, file);
902
903 return 0;
904}
905
906/**
907 * event_trigger_reset_filter - reset an event trigger's filter
908 * @cmd_ops: The event_command operations for the trigger
909 * @trigger_data: The trigger_data for the trigger
910 *
911 * Reset the filter for the trigger to no filter.
912 */
913void event_trigger_reset_filter(struct event_command *cmd_ops,
914 struct event_trigger_data *trigger_data)
915{
916 if (cmd_ops->set_filter)
917 cmd_ops->set_filter(NULL, trigger_data, NULL);
918}
919
920/**
921 * event_trigger_register - register an event trigger
922 * @cmd_ops: The event_command operations for the trigger
923 * @file: The event file for the trigger's event
924 * @glob: The trigger command string, with optional remove(!) operator
925 * @trigger_data: The trigger_data for the trigger
926 *
927 * Register an event trigger. The @cmd_ops are used to call the
928 * cmd_ops->reg() function which actually does the registration.
929 *
930 * Return: 0 on success, errno otherwise
931 */
932int event_trigger_register(struct event_command *cmd_ops,
933 struct trace_event_file *file,
934 char *glob,
935 struct event_trigger_data *trigger_data)
936{
937 return cmd_ops->reg(glob, trigger_data, file);
938}
939
940/**
941 * event_trigger_unregister - unregister an event trigger
942 * @cmd_ops: The event_command operations for the trigger
943 * @file: The event file for the trigger's event
944 * @glob: The trigger command string, with optional remove(!) operator
945 * @trigger_data: The trigger_data for the trigger
946 *
947 * Unregister an event trigger. The @cmd_ops are used to call the
948 * cmd_ops->unreg() function which actually does the unregistration.
949 */
950void event_trigger_unregister(struct event_command *cmd_ops,
951 struct trace_event_file *file,
952 char *glob,
953 struct event_trigger_data *trigger_data)
954{
955 cmd_ops->unreg(glob, trigger_data, file);
956}
957
958/*
959 * End event trigger parsing helper functions.
960 */
961
962/**
963 * event_trigger_parse - Generic event_command @parse implementation
964 * @cmd_ops: The command ops, used for trigger registration
965 * @file: The trace_event_file associated with the event
966 * @glob: The raw string used to register the trigger
967 * @cmd: The cmd portion of the string used to register the trigger
968 * @param_and_filter: The param and filter portion of the string used to register the trigger
969 *
970 * Common implementation for event command parsing and trigger
971 * instantiation.
972 *
973 * Usually used directly as the @parse method in event command
974 * implementations.
975 *
976 * Return: 0 on success, errno otherwise
977 */
978static int
979event_trigger_parse(struct event_command *cmd_ops,
980 struct trace_event_file *file,
981 char *glob, char *cmd, char *param_and_filter)
982{
983 struct event_trigger_data *trigger_data;
984 char *param, *filter;
985 bool remove;
986 int ret;
987
988 remove = event_trigger_check_remove(glob);
989
990 ret = event_trigger_separate_filter(param_and_filter, ¶m, &filter, false);
991 if (ret)
992 return ret;
993
994 ret = -ENOMEM;
995 trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
996 if (!trigger_data)
997 goto out;
998
999 if (remove) {
1000 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1001 kfree(trigger_data);
1002 ret = 0;
1003 goto out;
1004 }
1005
1006 ret = event_trigger_parse_num(param, trigger_data);
1007 if (ret)
1008 goto out_free;
1009
1010 ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1011 if (ret < 0)
1012 goto out_free;
1013
1014 /* Up the trigger_data count to make sure reg doesn't free it on failure */
1015 event_trigger_init(trigger_data);
1016
1017 ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1018 if (ret)
1019 goto out_free;
1020
1021 /* Down the counter of trigger_data or free it if not used anymore */
1022 event_trigger_free(trigger_data);
1023 out:
1024 return ret;
1025
1026 out_free:
1027 event_trigger_reset_filter(cmd_ops, trigger_data);
1028 kfree(trigger_data);
1029 goto out;
1030}
1031
1032/**
1033 * set_trigger_filter - Generic event_command @set_filter implementation
1034 * @filter_str: The filter string for the trigger, NULL to remove filter
1035 * @trigger_data: Trigger-specific data
1036 * @file: The trace_event_file associated with the event
1037 *
1038 * Common implementation for event command filter parsing and filter
1039 * instantiation.
1040 *
1041 * Usually used directly as the @set_filter method in event command
1042 * implementations.
1043 *
1044 * Also used to remove a filter (if filter_str = NULL).
1045 *
1046 * Return: 0 on success, errno otherwise
1047 */
1048int set_trigger_filter(char *filter_str,
1049 struct event_trigger_data *trigger_data,
1050 struct trace_event_file *file)
1051{
1052 struct event_trigger_data *data = trigger_data;
1053 struct event_filter *filter = NULL, *tmp;
1054 int ret = -EINVAL;
1055 char *s;
1056
1057 if (!filter_str) /* clear the current filter */
1058 goto assign;
1059
1060 s = strsep(&filter_str, " \t");
1061
1062 if (!strlen(s) || strcmp(s, "if") != 0)
1063 goto out;
1064
1065 if (!filter_str)
1066 goto out;
1067
1068 /* The filter is for the 'trigger' event, not the triggered event */
1069 ret = create_event_filter(file->tr, file->event_call,
1070 filter_str, true, &filter);
1071
1072 /* Only enabled set_str for error handling */
1073 if (filter) {
1074 kfree(filter->filter_string);
1075 filter->filter_string = NULL;
1076 }
1077
1078 /*
1079 * If create_event_filter() fails, filter still needs to be freed.
1080 * Which the calling code will do with data->filter.
1081 */
1082 assign:
1083 tmp = rcu_access_pointer(data->filter);
1084
1085 rcu_assign_pointer(data->filter, filter);
1086
1087 if (tmp) {
1088 /*
1089 * Make sure the call is done with the filter.
1090 * It is possible that a filter could fail at boot up,
1091 * and then this path will be called. Avoid the synchronization
1092 * in that case.
1093 */
1094 if (system_state != SYSTEM_BOOTING)
1095 tracepoint_synchronize_unregister();
1096 free_event_filter(tmp);
1097 }
1098
1099 kfree(data->filter_str);
1100 data->filter_str = NULL;
1101
1102 if (filter_str) {
1103 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
1104 if (!data->filter_str) {
1105 free_event_filter(rcu_access_pointer(data->filter));
1106 data->filter = NULL;
1107 ret = -ENOMEM;
1108 }
1109 }
1110 out:
1111 return ret;
1112}
1113
1114static LIST_HEAD(named_triggers);
1115
1116/**
1117 * find_named_trigger - Find the common named trigger associated with @name
1118 * @name: The name of the set of named triggers to find the common data for
1119 *
1120 * Named triggers are sets of triggers that share a common set of
1121 * trigger data. The first named trigger registered with a given name
1122 * owns the common trigger data that the others subsequently
1123 * registered with the same name will reference. This function
1124 * returns the common trigger data associated with that first
1125 * registered instance.
1126 *
1127 * Return: the common trigger data for the given named trigger on
1128 * success, NULL otherwise.
1129 */
1130struct event_trigger_data *find_named_trigger(const char *name)
1131{
1132 struct event_trigger_data *data;
1133
1134 if (!name)
1135 return NULL;
1136
1137 list_for_each_entry(data, &named_triggers, named_list) {
1138 if (data->named_data)
1139 continue;
1140 if (strcmp(data->name, name) == 0)
1141 return data;
1142 }
1143
1144 return NULL;
1145}
1146
1147/**
1148 * is_named_trigger - determine if a given trigger is a named trigger
1149 * @test: The trigger data to test
1150 *
1151 * Return: true if 'test' is a named trigger, false otherwise.
1152 */
1153bool is_named_trigger(struct event_trigger_data *test)
1154{
1155 struct event_trigger_data *data;
1156
1157 list_for_each_entry(data, &named_triggers, named_list) {
1158 if (test == data)
1159 return true;
1160 }
1161
1162 return false;
1163}
1164
1165/**
1166 * save_named_trigger - save the trigger in the named trigger list
1167 * @name: The name of the named trigger set
1168 * @data: The trigger data to save
1169 *
1170 * Return: 0 if successful, negative error otherwise.
1171 */
1172int save_named_trigger(const char *name, struct event_trigger_data *data)
1173{
1174 data->name = kstrdup(name, GFP_KERNEL);
1175 if (!data->name)
1176 return -ENOMEM;
1177
1178 list_add(&data->named_list, &named_triggers);
1179
1180 return 0;
1181}
1182
1183/**
1184 * del_named_trigger - delete a trigger from the named trigger list
1185 * @data: The trigger data to delete
1186 */
1187void del_named_trigger(struct event_trigger_data *data)
1188{
1189 kfree(data->name);
1190 data->name = NULL;
1191
1192 list_del(&data->named_list);
1193}
1194
1195static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
1196{
1197 struct event_trigger_data *test;
1198
1199 list_for_each_entry(test, &named_triggers, named_list) {
1200 if (strcmp(test->name, data->name) == 0) {
1201 if (pause) {
1202 test->paused_tmp = test->paused;
1203 test->paused = true;
1204 } else {
1205 test->paused = test->paused_tmp;
1206 }
1207 }
1208 }
1209}
1210
1211/**
1212 * pause_named_trigger - Pause all named triggers with the same name
1213 * @data: The trigger data of a named trigger to pause
1214 *
1215 * Pauses a named trigger along with all other triggers having the
1216 * same name. Because named triggers share a common set of data,
1217 * pausing only one is meaningless, so pausing one named trigger needs
1218 * to pause all triggers with the same name.
1219 */
1220void pause_named_trigger(struct event_trigger_data *data)
1221{
1222 __pause_named_trigger(data, true);
1223}
1224
1225/**
1226 * unpause_named_trigger - Un-pause all named triggers with the same name
1227 * @data: The trigger data of a named trigger to unpause
1228 *
1229 * Un-pauses a named trigger along with all other triggers having the
1230 * same name. Because named triggers share a common set of data,
1231 * unpausing only one is meaningless, so unpausing one named trigger
1232 * needs to unpause all triggers with the same name.
1233 */
1234void unpause_named_trigger(struct event_trigger_data *data)
1235{
1236 __pause_named_trigger(data, false);
1237}
1238
1239/**
1240 * set_named_trigger_data - Associate common named trigger data
1241 * @data: The trigger data to associate
1242 * @named_data: The common named trigger to be associated
1243 *
1244 * Named triggers are sets of triggers that share a common set of
1245 * trigger data. The first named trigger registered with a given name
1246 * owns the common trigger data that the others subsequently
1247 * registered with the same name will reference. This function
1248 * associates the common trigger data from the first trigger with the
1249 * given trigger.
1250 */
1251void set_named_trigger_data(struct event_trigger_data *data,
1252 struct event_trigger_data *named_data)
1253{
1254 data->named_data = named_data;
1255}
1256
1257struct event_trigger_data *
1258get_named_trigger_data(struct event_trigger_data *data)
1259{
1260 return data->named_data;
1261}
1262
1263static void
1264traceon_trigger(struct event_trigger_data *data,
1265 struct trace_buffer *buffer, void *rec,
1266 struct ring_buffer_event *event)
1267{
1268 struct trace_event_file *file = data->private_data;
1269
1270 if (file) {
1271 if (tracer_tracing_is_on(file->tr))
1272 return;
1273
1274 tracer_tracing_on(file->tr);
1275 return;
1276 }
1277
1278 if (tracing_is_on())
1279 return;
1280
1281 tracing_on();
1282}
1283
1284static void
1285traceon_count_trigger(struct event_trigger_data *data,
1286 struct trace_buffer *buffer, void *rec,
1287 struct ring_buffer_event *event)
1288{
1289 struct trace_event_file *file = data->private_data;
1290
1291 if (file) {
1292 if (tracer_tracing_is_on(file->tr))
1293 return;
1294 } else {
1295 if (tracing_is_on())
1296 return;
1297 }
1298
1299 if (!data->count)
1300 return;
1301
1302 if (data->count != -1)
1303 (data->count)--;
1304
1305 if (file)
1306 tracer_tracing_on(file->tr);
1307 else
1308 tracing_on();
1309}
1310
1311static void
1312traceoff_trigger(struct event_trigger_data *data,
1313 struct trace_buffer *buffer, void *rec,
1314 struct ring_buffer_event *event)
1315{
1316 struct trace_event_file *file = data->private_data;
1317
1318 if (file) {
1319 if (!tracer_tracing_is_on(file->tr))
1320 return;
1321
1322 tracer_tracing_off(file->tr);
1323 return;
1324 }
1325
1326 if (!tracing_is_on())
1327 return;
1328
1329 tracing_off();
1330}
1331
1332static void
1333traceoff_count_trigger(struct event_trigger_data *data,
1334 struct trace_buffer *buffer, void *rec,
1335 struct ring_buffer_event *event)
1336{
1337 struct trace_event_file *file = data->private_data;
1338
1339 if (file) {
1340 if (!tracer_tracing_is_on(file->tr))
1341 return;
1342 } else {
1343 if (!tracing_is_on())
1344 return;
1345 }
1346
1347 if (!data->count)
1348 return;
1349
1350 if (data->count != -1)
1351 (data->count)--;
1352
1353 if (file)
1354 tracer_tracing_off(file->tr);
1355 else
1356 tracing_off();
1357}
1358
1359static int
1360traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1361{
1362 return event_trigger_print("traceon", m, (void *)data->count,
1363 data->filter_str);
1364}
1365
1366static int
1367traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1368{
1369 return event_trigger_print("traceoff", m, (void *)data->count,
1370 data->filter_str);
1371}
1372
1373static struct event_trigger_ops traceon_trigger_ops = {
1374 .trigger = traceon_trigger,
1375 .print = traceon_trigger_print,
1376 .init = event_trigger_init,
1377 .free = event_trigger_free,
1378};
1379
1380static struct event_trigger_ops traceon_count_trigger_ops = {
1381 .trigger = traceon_count_trigger,
1382 .print = traceon_trigger_print,
1383 .init = event_trigger_init,
1384 .free = event_trigger_free,
1385};
1386
1387static struct event_trigger_ops traceoff_trigger_ops = {
1388 .trigger = traceoff_trigger,
1389 .print = traceoff_trigger_print,
1390 .init = event_trigger_init,
1391 .free = event_trigger_free,
1392};
1393
1394static struct event_trigger_ops traceoff_count_trigger_ops = {
1395 .trigger = traceoff_count_trigger,
1396 .print = traceoff_trigger_print,
1397 .init = event_trigger_init,
1398 .free = event_trigger_free,
1399};
1400
1401static struct event_trigger_ops *
1402onoff_get_trigger_ops(char *cmd, char *param)
1403{
1404 struct event_trigger_ops *ops;
1405
1406 /* we register both traceon and traceoff to this callback */
1407 if (strcmp(cmd, "traceon") == 0)
1408 ops = param ? &traceon_count_trigger_ops :
1409 &traceon_trigger_ops;
1410 else
1411 ops = param ? &traceoff_count_trigger_ops :
1412 &traceoff_trigger_ops;
1413
1414 return ops;
1415}
1416
1417static struct event_command trigger_traceon_cmd = {
1418 .name = "traceon",
1419 .trigger_type = ETT_TRACE_ONOFF,
1420 .parse = event_trigger_parse,
1421 .reg = register_trigger,
1422 .unreg = unregister_trigger,
1423 .get_trigger_ops = onoff_get_trigger_ops,
1424 .set_filter = set_trigger_filter,
1425};
1426
1427static struct event_command trigger_traceoff_cmd = {
1428 .name = "traceoff",
1429 .trigger_type = ETT_TRACE_ONOFF,
1430 .flags = EVENT_CMD_FL_POST_TRIGGER,
1431 .parse = event_trigger_parse,
1432 .reg = register_trigger,
1433 .unreg = unregister_trigger,
1434 .get_trigger_ops = onoff_get_trigger_ops,
1435 .set_filter = set_trigger_filter,
1436};
1437
1438#ifdef CONFIG_TRACER_SNAPSHOT
1439static void
1440snapshot_trigger(struct event_trigger_data *data,
1441 struct trace_buffer *buffer, void *rec,
1442 struct ring_buffer_event *event)
1443{
1444 struct trace_event_file *file = data->private_data;
1445
1446 if (file)
1447 tracing_snapshot_instance(file->tr);
1448 else
1449 tracing_snapshot();
1450}
1451
1452static void
1453snapshot_count_trigger(struct event_trigger_data *data,
1454 struct trace_buffer *buffer, void *rec,
1455 struct ring_buffer_event *event)
1456{
1457 if (!data->count)
1458 return;
1459
1460 if (data->count != -1)
1461 (data->count)--;
1462
1463 snapshot_trigger(data, buffer, rec, event);
1464}
1465
1466static int
1467register_snapshot_trigger(char *glob,
1468 struct event_trigger_data *data,
1469 struct trace_event_file *file)
1470{
1471 if (tracing_alloc_snapshot_instance(file->tr) != 0)
1472 return 0;
1473
1474 return register_trigger(glob, data, file);
1475}
1476
1477static int
1478snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1479{
1480 return event_trigger_print("snapshot", m, (void *)data->count,
1481 data->filter_str);
1482}
1483
1484static struct event_trigger_ops snapshot_trigger_ops = {
1485 .trigger = snapshot_trigger,
1486 .print = snapshot_trigger_print,
1487 .init = event_trigger_init,
1488 .free = event_trigger_free,
1489};
1490
1491static struct event_trigger_ops snapshot_count_trigger_ops = {
1492 .trigger = snapshot_count_trigger,
1493 .print = snapshot_trigger_print,
1494 .init = event_trigger_init,
1495 .free = event_trigger_free,
1496};
1497
1498static struct event_trigger_ops *
1499snapshot_get_trigger_ops(char *cmd, char *param)
1500{
1501 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1502}
1503
1504static struct event_command trigger_snapshot_cmd = {
1505 .name = "snapshot",
1506 .trigger_type = ETT_SNAPSHOT,
1507 .parse = event_trigger_parse,
1508 .reg = register_snapshot_trigger,
1509 .unreg = unregister_trigger,
1510 .get_trigger_ops = snapshot_get_trigger_ops,
1511 .set_filter = set_trigger_filter,
1512};
1513
1514static __init int register_trigger_snapshot_cmd(void)
1515{
1516 int ret;
1517
1518 ret = register_event_command(&trigger_snapshot_cmd);
1519 WARN_ON(ret < 0);
1520
1521 return ret;
1522}
1523#else
1524static __init int register_trigger_snapshot_cmd(void) { return 0; }
1525#endif /* CONFIG_TRACER_SNAPSHOT */
1526
1527#ifdef CONFIG_STACKTRACE
1528#ifdef CONFIG_UNWINDER_ORC
1529/* Skip 2:
1530 * event_triggers_post_call()
1531 * trace_event_raw_event_xxx()
1532 */
1533# define STACK_SKIP 2
1534#else
1535/*
1536 * Skip 4:
1537 * stacktrace_trigger()
1538 * event_triggers_post_call()
1539 * trace_event_buffer_commit()
1540 * trace_event_raw_event_xxx()
1541 */
1542#define STACK_SKIP 4
1543#endif
1544
1545static void
1546stacktrace_trigger(struct event_trigger_data *data,
1547 struct trace_buffer *buffer, void *rec,
1548 struct ring_buffer_event *event)
1549{
1550 struct trace_event_file *file = data->private_data;
1551
1552 if (file)
1553 __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
1554 else
1555 trace_dump_stack(STACK_SKIP);
1556}
1557
1558static void
1559stacktrace_count_trigger(struct event_trigger_data *data,
1560 struct trace_buffer *buffer, void *rec,
1561 struct ring_buffer_event *event)
1562{
1563 if (!data->count)
1564 return;
1565
1566 if (data->count != -1)
1567 (data->count)--;
1568
1569 stacktrace_trigger(data, buffer, rec, event);
1570}
1571
1572static int
1573stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1574{
1575 return event_trigger_print("stacktrace", m, (void *)data->count,
1576 data->filter_str);
1577}
1578
1579static struct event_trigger_ops stacktrace_trigger_ops = {
1580 .trigger = stacktrace_trigger,
1581 .print = stacktrace_trigger_print,
1582 .init = event_trigger_init,
1583 .free = event_trigger_free,
1584};
1585
1586static struct event_trigger_ops stacktrace_count_trigger_ops = {
1587 .trigger = stacktrace_count_trigger,
1588 .print = stacktrace_trigger_print,
1589 .init = event_trigger_init,
1590 .free = event_trigger_free,
1591};
1592
1593static struct event_trigger_ops *
1594stacktrace_get_trigger_ops(char *cmd, char *param)
1595{
1596 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1597}
1598
1599static struct event_command trigger_stacktrace_cmd = {
1600 .name = "stacktrace",
1601 .trigger_type = ETT_STACKTRACE,
1602 .flags = EVENT_CMD_FL_POST_TRIGGER,
1603 .parse = event_trigger_parse,
1604 .reg = register_trigger,
1605 .unreg = unregister_trigger,
1606 .get_trigger_ops = stacktrace_get_trigger_ops,
1607 .set_filter = set_trigger_filter,
1608};
1609
1610static __init int register_trigger_stacktrace_cmd(void)
1611{
1612 int ret;
1613
1614 ret = register_event_command(&trigger_stacktrace_cmd);
1615 WARN_ON(ret < 0);
1616
1617 return ret;
1618}
1619#else
1620static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1621#endif /* CONFIG_STACKTRACE */
1622
1623static __init void unregister_trigger_traceon_traceoff_cmds(void)
1624{
1625 unregister_event_command(&trigger_traceon_cmd);
1626 unregister_event_command(&trigger_traceoff_cmd);
1627}
1628
1629static void
1630event_enable_trigger(struct event_trigger_data *data,
1631 struct trace_buffer *buffer, void *rec,
1632 struct ring_buffer_event *event)
1633{
1634 struct enable_trigger_data *enable_data = data->private_data;
1635
1636 if (enable_data->enable)
1637 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1638 else
1639 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1640}
1641
1642static void
1643event_enable_count_trigger(struct event_trigger_data *data,
1644 struct trace_buffer *buffer, void *rec,
1645 struct ring_buffer_event *event)
1646{
1647 struct enable_trigger_data *enable_data = data->private_data;
1648
1649 if (!data->count)
1650 return;
1651
1652 /* Skip if the event is in a state we want to switch to */
1653 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1654 return;
1655
1656 if (data->count != -1)
1657 (data->count)--;
1658
1659 event_enable_trigger(data, buffer, rec, event);
1660}
1661
1662int event_enable_trigger_print(struct seq_file *m,
1663 struct event_trigger_data *data)
1664{
1665 struct enable_trigger_data *enable_data = data->private_data;
1666
1667 seq_printf(m, "%s:%s:%s",
1668 enable_data->hist ?
1669 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1670 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1671 enable_data->file->event_call->class->system,
1672 trace_event_name(enable_data->file->event_call));
1673
1674 if (data->count == -1)
1675 seq_puts(m, ":unlimited");
1676 else
1677 seq_printf(m, ":count=%ld", data->count);
1678
1679 if (data->filter_str)
1680 seq_printf(m, " if %s\n", data->filter_str);
1681 else
1682 seq_putc(m, '\n');
1683
1684 return 0;
1685}
1686
1687void event_enable_trigger_free(struct event_trigger_data *data)
1688{
1689 struct enable_trigger_data *enable_data = data->private_data;
1690
1691 if (WARN_ON_ONCE(data->ref <= 0))
1692 return;
1693
1694 data->ref--;
1695 if (!data->ref) {
1696 /* Remove the SOFT_MODE flag */
1697 trace_event_enable_disable(enable_data->file, 0, 1);
1698 trace_event_put_ref(enable_data->file->event_call);
1699 trigger_data_free(data);
1700 kfree(enable_data);
1701 }
1702}
1703
1704static struct event_trigger_ops event_enable_trigger_ops = {
1705 .trigger = event_enable_trigger,
1706 .print = event_enable_trigger_print,
1707 .init = event_trigger_init,
1708 .free = event_enable_trigger_free,
1709};
1710
1711static struct event_trigger_ops event_enable_count_trigger_ops = {
1712 .trigger = event_enable_count_trigger,
1713 .print = event_enable_trigger_print,
1714 .init = event_trigger_init,
1715 .free = event_enable_trigger_free,
1716};
1717
1718static struct event_trigger_ops event_disable_trigger_ops = {
1719 .trigger = event_enable_trigger,
1720 .print = event_enable_trigger_print,
1721 .init = event_trigger_init,
1722 .free = event_enable_trigger_free,
1723};
1724
1725static struct event_trigger_ops event_disable_count_trigger_ops = {
1726 .trigger = event_enable_count_trigger,
1727 .print = event_enable_trigger_print,
1728 .init = event_trigger_init,
1729 .free = event_enable_trigger_free,
1730};
1731
1732int event_enable_trigger_parse(struct event_command *cmd_ops,
1733 struct trace_event_file *file,
1734 char *glob, char *cmd, char *param_and_filter)
1735{
1736 struct trace_event_file *event_enable_file;
1737 struct enable_trigger_data *enable_data;
1738 struct event_trigger_data *trigger_data;
1739 struct trace_array *tr = file->tr;
1740 char *param, *filter;
1741 bool enable, remove;
1742 const char *system;
1743 const char *event;
1744 bool hist = false;
1745 int ret;
1746
1747 remove = event_trigger_check_remove(glob);
1748
1749 if (event_trigger_empty_param(param_and_filter))
1750 return -EINVAL;
1751
1752 ret = event_trigger_separate_filter(param_and_filter, ¶m, &filter, true);
1753 if (ret)
1754 return ret;
1755
1756 system = strsep(¶m, ":");
1757 if (!param)
1758 return -EINVAL;
1759
1760 event = strsep(¶m, ":");
1761
1762 ret = -EINVAL;
1763 event_enable_file = find_event_file(tr, system, event);
1764 if (!event_enable_file)
1765 goto out;
1766
1767#ifdef CONFIG_HIST_TRIGGERS
1768 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1769 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1770
1771 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1772 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1773#else
1774 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1775#endif
1776 ret = -ENOMEM;
1777
1778 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1779 if (!enable_data)
1780 goto out;
1781
1782 enable_data->hist = hist;
1783 enable_data->enable = enable;
1784 enable_data->file = event_enable_file;
1785
1786 trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
1787 if (!trigger_data) {
1788 kfree(enable_data);
1789 goto out;
1790 }
1791
1792 if (remove) {
1793 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1794 kfree(trigger_data);
1795 kfree(enable_data);
1796 ret = 0;
1797 goto out;
1798 }
1799
1800 /* Up the trigger_data count to make sure nothing frees it on failure */
1801 event_trigger_init(trigger_data);
1802
1803 ret = event_trigger_parse_num(param, trigger_data);
1804 if (ret)
1805 goto out_free;
1806
1807 ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1808 if (ret < 0)
1809 goto out_free;
1810
1811 /* Don't let event modules unload while probe registered */
1812 ret = trace_event_try_get_ref(event_enable_file->event_call);
1813 if (!ret) {
1814 ret = -EBUSY;
1815 goto out_free;
1816 }
1817
1818 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1819 if (ret < 0)
1820 goto out_put;
1821
1822 ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1823 if (ret)
1824 goto out_disable;
1825
1826 event_trigger_free(trigger_data);
1827 out:
1828 return ret;
1829 out_disable:
1830 trace_event_enable_disable(event_enable_file, 0, 1);
1831 out_put:
1832 trace_event_put_ref(event_enable_file->event_call);
1833 out_free:
1834 event_trigger_reset_filter(cmd_ops, trigger_data);
1835 event_trigger_free(trigger_data);
1836 kfree(enable_data);
1837
1838 goto out;
1839}
1840
1841int event_enable_register_trigger(char *glob,
1842 struct event_trigger_data *data,
1843 struct trace_event_file *file)
1844{
1845 struct enable_trigger_data *enable_data = data->private_data;
1846 struct enable_trigger_data *test_enable_data;
1847 struct event_trigger_data *test;
1848 int ret = 0;
1849
1850 lockdep_assert_held(&event_mutex);
1851
1852 list_for_each_entry(test, &file->triggers, list) {
1853 test_enable_data = test->private_data;
1854 if (test_enable_data &&
1855 (test->cmd_ops->trigger_type ==
1856 data->cmd_ops->trigger_type) &&
1857 (test_enable_data->file == enable_data->file)) {
1858 ret = -EEXIST;
1859 goto out;
1860 }
1861 }
1862
1863 if (data->ops->init) {
1864 ret = data->ops->init(data);
1865 if (ret < 0)
1866 goto out;
1867 }
1868
1869 list_add_rcu(&data->list, &file->triggers);
1870
1871 update_cond_flag(file);
1872 ret = trace_event_trigger_enable_disable(file, 1);
1873 if (ret < 0) {
1874 list_del_rcu(&data->list);
1875 update_cond_flag(file);
1876 }
1877out:
1878 return ret;
1879}
1880
1881void event_enable_unregister_trigger(char *glob,
1882 struct event_trigger_data *test,
1883 struct trace_event_file *file)
1884{
1885 struct enable_trigger_data *test_enable_data = test->private_data;
1886 struct event_trigger_data *data = NULL, *iter;
1887 struct enable_trigger_data *enable_data;
1888
1889 lockdep_assert_held(&event_mutex);
1890
1891 list_for_each_entry(iter, &file->triggers, list) {
1892 enable_data = iter->private_data;
1893 if (enable_data &&
1894 (iter->cmd_ops->trigger_type ==
1895 test->cmd_ops->trigger_type) &&
1896 (enable_data->file == test_enable_data->file)) {
1897 data = iter;
1898 list_del_rcu(&data->list);
1899 trace_event_trigger_enable_disable(file, 0);
1900 update_cond_flag(file);
1901 break;
1902 }
1903 }
1904
1905 if (data && data->ops->free)
1906 data->ops->free(data);
1907}
1908
1909static struct event_trigger_ops *
1910event_enable_get_trigger_ops(char *cmd, char *param)
1911{
1912 struct event_trigger_ops *ops;
1913 bool enable;
1914
1915#ifdef CONFIG_HIST_TRIGGERS
1916 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1917 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1918#else
1919 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1920#endif
1921 if (enable)
1922 ops = param ? &event_enable_count_trigger_ops :
1923 &event_enable_trigger_ops;
1924 else
1925 ops = param ? &event_disable_count_trigger_ops :
1926 &event_disable_trigger_ops;
1927
1928 return ops;
1929}
1930
1931static struct event_command trigger_enable_cmd = {
1932 .name = ENABLE_EVENT_STR,
1933 .trigger_type = ETT_EVENT_ENABLE,
1934 .parse = event_enable_trigger_parse,
1935 .reg = event_enable_register_trigger,
1936 .unreg = event_enable_unregister_trigger,
1937 .get_trigger_ops = event_enable_get_trigger_ops,
1938 .set_filter = set_trigger_filter,
1939};
1940
1941static struct event_command trigger_disable_cmd = {
1942 .name = DISABLE_EVENT_STR,
1943 .trigger_type = ETT_EVENT_ENABLE,
1944 .parse = event_enable_trigger_parse,
1945 .reg = event_enable_register_trigger,
1946 .unreg = event_enable_unregister_trigger,
1947 .get_trigger_ops = event_enable_get_trigger_ops,
1948 .set_filter = set_trigger_filter,
1949};
1950
1951static __init void unregister_trigger_enable_disable_cmds(void)
1952{
1953 unregister_event_command(&trigger_enable_cmd);
1954 unregister_event_command(&trigger_disable_cmd);
1955}
1956
1957static __init int register_trigger_enable_disable_cmds(void)
1958{
1959 int ret;
1960
1961 ret = register_event_command(&trigger_enable_cmd);
1962 if (WARN_ON(ret < 0))
1963 return ret;
1964 ret = register_event_command(&trigger_disable_cmd);
1965 if (WARN_ON(ret < 0))
1966 unregister_trigger_enable_disable_cmds();
1967
1968 return ret;
1969}
1970
1971static __init int register_trigger_traceon_traceoff_cmds(void)
1972{
1973 int ret;
1974
1975 ret = register_event_command(&trigger_traceon_cmd);
1976 if (WARN_ON(ret < 0))
1977 return ret;
1978 ret = register_event_command(&trigger_traceoff_cmd);
1979 if (WARN_ON(ret < 0))
1980 unregister_trigger_traceon_traceoff_cmds();
1981
1982 return ret;
1983}
1984
1985__init int register_trigger_cmds(void)
1986{
1987 register_trigger_traceon_traceoff_cmds();
1988 register_trigger_snapshot_cmd();
1989 register_trigger_stacktrace_cmd();
1990 register_trigger_enable_disable_cmds();
1991 register_trigger_hist_enable_disable_cmds();
1992 register_trigger_hist_cmd();
1993
1994 return 0;
1995}