Loading...
1/* Include in trace.c */
2
3#include <linux/stringify.h>
4#include <linux/kthread.h>
5#include <linux/delay.h>
6#include <linux/slab.h>
7
8static inline int trace_valid_entry(struct trace_entry *entry)
9{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
13 case TRACE_WAKE:
14 case TRACE_STACK:
15 case TRACE_PRINT:
16 case TRACE_BRANCH:
17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
19 return 1;
20 }
21 return 0;
22}
23
24static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
25{
26 struct ring_buffer_event *event;
27 struct trace_entry *entry;
28 unsigned int loops = 0;
29
30 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
31 entry = ring_buffer_event_data(event);
32
33 /*
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
37 */
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
41 }
42 if (!trace_valid_entry(entry)) {
43 printk(KERN_CONT ".. invalid entry %d ",
44 entry->type);
45 goto failed;
46 }
47 }
48 return 0;
49
50 failed:
51 /* disable tracing */
52 tracing_disabled = 1;
53 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
61static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
62{
63 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
65
66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags);
68 arch_spin_lock(&ftrace_max_lock);
69
70 cnt = ring_buffer_entries(buf->buffer);
71
72 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
78 */
79 tracing_off();
80 for_each_possible_cpu(cpu) {
81 ret = trace_test_buffer_cpu(buf, cpu);
82 if (ret)
83 break;
84 }
85 tracing_on();
86 arch_spin_unlock(&ftrace_max_lock);
87 local_irq_restore(flags);
88
89 if (count)
90 *count = cnt;
91
92 return ret;
93}
94
95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
99}
100#ifdef CONFIG_FUNCTION_TRACER
101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
106 unsigned long pip,
107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
109{
110 trace_selftest_test_probe1_cnt++;
111}
112
113static int trace_selftest_test_probe2_cnt;
114static void trace_selftest_test_probe2_func(unsigned long ip,
115 unsigned long pip,
116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
118{
119 trace_selftest_test_probe2_cnt++;
120}
121
122static int trace_selftest_test_probe3_cnt;
123static void trace_selftest_test_probe3_func(unsigned long ip,
124 unsigned long pip,
125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
127{
128 trace_selftest_test_probe3_cnt++;
129}
130
131static int trace_selftest_test_global_cnt;
132static void trace_selftest_test_global_func(unsigned long ip,
133 unsigned long pip,
134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
136{
137 trace_selftest_test_global_cnt++;
138}
139
140static int trace_selftest_test_dyn_cnt;
141static void trace_selftest_test_dyn_func(unsigned long ip,
142 unsigned long pip,
143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
145{
146 trace_selftest_test_dyn_cnt++;
147}
148
149static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
152};
153
154static struct ftrace_ops test_probe2 = {
155 .func = trace_selftest_test_probe2_func,
156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
157};
158
159static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
162};
163
164static struct ftrace_ops test_global = {
165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
167};
168
169static void print_counts(void)
170{
171 printk("(%d %d %d %d %d) ",
172 trace_selftest_test_probe1_cnt,
173 trace_selftest_test_probe2_cnt,
174 trace_selftest_test_probe3_cnt,
175 trace_selftest_test_global_cnt,
176 trace_selftest_test_dyn_cnt);
177}
178
179static void reset_counts(void)
180{
181 trace_selftest_test_probe1_cnt = 0;
182 trace_selftest_test_probe2_cnt = 0;
183 trace_selftest_test_probe3_cnt = 0;
184 trace_selftest_test_global_cnt = 0;
185 trace_selftest_test_dyn_cnt = 0;
186}
187
188static int trace_selftest_ops(int cnt)
189{
190 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops;
192 char *func1_name;
193 char *func2_name;
194 int len1;
195 int len2;
196 int ret = -1;
197
198 printk(KERN_CONT "PASSED\n");
199 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
200
201 ftrace_enabled = 1;
202 reset_counts();
203
204 /* Handle PPC64 '.' name */
205 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
206 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
207 len1 = strlen(func1_name);
208 len2 = strlen(func2_name);
209
210 /*
211 * Probe 1 will trace function 1.
212 * Probe 2 will trace function 2.
213 * Probe 3 will trace functions 1 and 2.
214 */
215 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
216 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
217 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
218 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
219
220 register_ftrace_function(&test_probe1);
221 register_ftrace_function(&test_probe2);
222 register_ftrace_function(&test_probe3);
223 register_ftrace_function(&test_global);
224
225 DYN_FTRACE_TEST_NAME();
226
227 print_counts();
228
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
237
238 DYN_FTRACE_TEST_NAME2();
239
240 print_counts();
241
242 if (trace_selftest_test_probe1_cnt != 1)
243 goto out;
244 if (trace_selftest_test_probe2_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe3_cnt != 2)
247 goto out;
248
249 /* Add a dynamic probe */
250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 if (!dyn_ops) {
252 printk("MEMORY ERROR ");
253 goto out;
254 }
255
256 dyn_ops->func = trace_selftest_test_dyn_func;
257
258 register_ftrace_function(dyn_ops);
259
260 trace_selftest_test_global_cnt = 0;
261
262 DYN_FTRACE_TEST_NAME();
263
264 print_counts();
265
266 if (trace_selftest_test_probe1_cnt != 2)
267 goto out_free;
268 if (trace_selftest_test_probe2_cnt != 1)
269 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free;
272 if (trace_selftest_test_global_cnt == 0)
273 goto out;
274 if (trace_selftest_test_dyn_cnt == 0)
275 goto out_free;
276
277 DYN_FTRACE_TEST_NAME2();
278
279 print_counts();
280
281 if (trace_selftest_test_probe1_cnt != 2)
282 goto out_free;
283 if (trace_selftest_test_probe2_cnt != 2)
284 goto out_free;
285 if (trace_selftest_test_probe3_cnt != 4)
286 goto out_free;
287
288 ret = 0;
289 out_free:
290 unregister_ftrace_function(dyn_ops);
291 kfree(dyn_ops);
292
293 out:
294 /* Purposely unregister in the same order */
295 unregister_ftrace_function(&test_probe1);
296 unregister_ftrace_function(&test_probe2);
297 unregister_ftrace_function(&test_probe3);
298 unregister_ftrace_function(&test_global);
299
300 /* Make sure everything is off */
301 reset_counts();
302 DYN_FTRACE_TEST_NAME();
303 DYN_FTRACE_TEST_NAME();
304
305 if (trace_selftest_test_probe1_cnt ||
306 trace_selftest_test_probe2_cnt ||
307 trace_selftest_test_probe3_cnt ||
308 trace_selftest_test_global_cnt ||
309 trace_selftest_test_dyn_cnt)
310 ret = -1;
311
312 ftrace_enabled = save_ftrace_enabled;
313
314 return ret;
315}
316
317/* Test dynamic code modification and ftrace filters */
318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
319 struct trace_array *tr,
320 int (*func)(void))
321{
322 int save_ftrace_enabled = ftrace_enabled;
323 unsigned long count;
324 char *func_name;
325 int ret;
326
327 /* The ftrace test PASSED */
328 printk(KERN_CONT "PASSED\n");
329 pr_info("Testing dynamic ftrace: ");
330
331 /* enable tracing, and record the filter function */
332 ftrace_enabled = 1;
333
334 /* passed in by parameter to fool gcc from optimizing */
335 func();
336
337 /*
338 * Some archs *cough*PowerPC*cough* add characters to the
339 * start of the function names. We simply put a '*' to
340 * accommodate them.
341 */
342 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
343
344 /* filter only on our function */
345 ftrace_set_global_filter(func_name, strlen(func_name), 1);
346
347 /* enable tracing */
348 ret = tracer_init(trace, tr);
349 if (ret) {
350 warn_failed_init_tracer(trace, ret);
351 goto out;
352 }
353
354 /* Sleep for a 1/10 of a second */
355 msleep(100);
356
357 /* we should have nothing in the buffer */
358 ret = trace_test_buffer(&tr->trace_buffer, &count);
359 if (ret)
360 goto out;
361
362 if (count) {
363 ret = -1;
364 printk(KERN_CONT ".. filter did not filter .. ");
365 goto out;
366 }
367
368 /* call our function again */
369 func();
370
371 /* sleep again */
372 msleep(100);
373
374 /* stop the tracing. */
375 tracing_stop();
376 ftrace_enabled = 0;
377
378 /* check the trace buffer */
379 ret = trace_test_buffer(&tr->trace_buffer, &count);
380 tracing_start();
381
382 /* we should only have one item */
383 if (!ret && count != 1) {
384 trace->reset(tr);
385 printk(KERN_CONT ".. filter failed count=%ld ..", count);
386 ret = -1;
387 goto out;
388 }
389
390 /* Test the ops with global tracing running */
391 ret = trace_selftest_ops(1);
392 trace->reset(tr);
393
394 out:
395 ftrace_enabled = save_ftrace_enabled;
396
397 /* Enable tracing on all functions again */
398 ftrace_set_global_filter(NULL, 0, 1);
399
400 /* Test the ops with global tracing off */
401 if (!ret)
402 ret = trace_selftest_ops(2);
403
404 return ret;
405}
406
407static int trace_selftest_recursion_cnt;
408static void trace_selftest_test_recursion_func(unsigned long ip,
409 unsigned long pip,
410 struct ftrace_ops *op,
411 struct pt_regs *pt_regs)
412{
413 /*
414 * This function is registered without the recursion safe flag.
415 * The ftrace infrastructure should provide the recursion
416 * protection. If not, this will crash the kernel!
417 */
418 if (trace_selftest_recursion_cnt++ > 10)
419 return;
420 DYN_FTRACE_TEST_NAME();
421}
422
423static void trace_selftest_test_recursion_safe_func(unsigned long ip,
424 unsigned long pip,
425 struct ftrace_ops *op,
426 struct pt_regs *pt_regs)
427{
428 /*
429 * We said we would provide our own recursion. By calling
430 * this function again, we should recurse back into this function
431 * and count again. But this only happens if the arch supports
432 * all of ftrace features and nothing else is using the function
433 * tracing utility.
434 */
435 if (trace_selftest_recursion_cnt++)
436 return;
437 DYN_FTRACE_TEST_NAME();
438}
439
440static struct ftrace_ops test_rec_probe = {
441 .func = trace_selftest_test_recursion_func,
442};
443
444static struct ftrace_ops test_recsafe_probe = {
445 .func = trace_selftest_test_recursion_safe_func,
446 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
447};
448
449static int
450trace_selftest_function_recursion(void)
451{
452 int save_ftrace_enabled = ftrace_enabled;
453 char *func_name;
454 int len;
455 int ret;
456
457 /* The previous test PASSED */
458 pr_cont("PASSED\n");
459 pr_info("Testing ftrace recursion: ");
460
461
462 /* enable tracing, and record the filter function */
463 ftrace_enabled = 1;
464
465 /* Handle PPC64 '.' name */
466 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
467 len = strlen(func_name);
468
469 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
470 if (ret) {
471 pr_cont("*Could not set filter* ");
472 goto out;
473 }
474
475 ret = register_ftrace_function(&test_rec_probe);
476 if (ret) {
477 pr_cont("*could not register callback* ");
478 goto out;
479 }
480
481 DYN_FTRACE_TEST_NAME();
482
483 unregister_ftrace_function(&test_rec_probe);
484
485 ret = -1;
486 if (trace_selftest_recursion_cnt != 1) {
487 pr_cont("*callback not called once (%d)* ",
488 trace_selftest_recursion_cnt);
489 goto out;
490 }
491
492 trace_selftest_recursion_cnt = 1;
493
494 pr_cont("PASSED\n");
495 pr_info("Testing ftrace recursion safe: ");
496
497 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
498 if (ret) {
499 pr_cont("*Could not set filter* ");
500 goto out;
501 }
502
503 ret = register_ftrace_function(&test_recsafe_probe);
504 if (ret) {
505 pr_cont("*could not register callback* ");
506 goto out;
507 }
508
509 DYN_FTRACE_TEST_NAME();
510
511 unregister_ftrace_function(&test_recsafe_probe);
512
513 ret = -1;
514 if (trace_selftest_recursion_cnt != 2) {
515 pr_cont("*callback not called expected 2 times (%d)* ",
516 trace_selftest_recursion_cnt);
517 goto out;
518 }
519
520 ret = 0;
521out:
522 ftrace_enabled = save_ftrace_enabled;
523
524 return ret;
525}
526#else
527# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
528# define trace_selftest_function_recursion() ({ 0; })
529#endif /* CONFIG_DYNAMIC_FTRACE */
530
531static enum {
532 TRACE_SELFTEST_REGS_START,
533 TRACE_SELFTEST_REGS_FOUND,
534 TRACE_SELFTEST_REGS_NOT_FOUND,
535} trace_selftest_regs_stat;
536
537static void trace_selftest_test_regs_func(unsigned long ip,
538 unsigned long pip,
539 struct ftrace_ops *op,
540 struct pt_regs *pt_regs)
541{
542 if (pt_regs)
543 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
544 else
545 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
546}
547
548static struct ftrace_ops test_regs_probe = {
549 .func = trace_selftest_test_regs_func,
550 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
551};
552
553static int
554trace_selftest_function_regs(void)
555{
556 int save_ftrace_enabled = ftrace_enabled;
557 char *func_name;
558 int len;
559 int ret;
560 int supported = 0;
561
562#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
563 supported = 1;
564#endif
565
566 /* The previous test PASSED */
567 pr_cont("PASSED\n");
568 pr_info("Testing ftrace regs%s: ",
569 !supported ? "(no arch support)" : "");
570
571 /* enable tracing, and record the filter function */
572 ftrace_enabled = 1;
573
574 /* Handle PPC64 '.' name */
575 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
576 len = strlen(func_name);
577
578 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
579 /*
580 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
581 * This test really doesn't care.
582 */
583 if (ret && ret != -ENODEV) {
584 pr_cont("*Could not set filter* ");
585 goto out;
586 }
587
588 ret = register_ftrace_function(&test_regs_probe);
589 /*
590 * Now if the arch does not support passing regs, then this should
591 * have failed.
592 */
593 if (!supported) {
594 if (!ret) {
595 pr_cont("*registered save-regs without arch support* ");
596 goto out;
597 }
598 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
599 ret = register_ftrace_function(&test_regs_probe);
600 }
601 if (ret) {
602 pr_cont("*could not register callback* ");
603 goto out;
604 }
605
606
607 DYN_FTRACE_TEST_NAME();
608
609 unregister_ftrace_function(&test_regs_probe);
610
611 ret = -1;
612
613 switch (trace_selftest_regs_stat) {
614 case TRACE_SELFTEST_REGS_START:
615 pr_cont("*callback never called* ");
616 goto out;
617
618 case TRACE_SELFTEST_REGS_FOUND:
619 if (supported)
620 break;
621 pr_cont("*callback received regs without arch support* ");
622 goto out;
623
624 case TRACE_SELFTEST_REGS_NOT_FOUND:
625 if (!supported)
626 break;
627 pr_cont("*callback received NULL regs* ");
628 goto out;
629 }
630
631 ret = 0;
632out:
633 ftrace_enabled = save_ftrace_enabled;
634
635 return ret;
636}
637
638/*
639 * Simple verification test of ftrace function tracer.
640 * Enable ftrace, sleep 1/10 second, and then read the trace
641 * buffer to see if all is in order.
642 */
643__init int
644trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
645{
646 int save_ftrace_enabled = ftrace_enabled;
647 unsigned long count;
648 int ret;
649
650#ifdef CONFIG_DYNAMIC_FTRACE
651 if (ftrace_filter_param) {
652 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
653 return 0;
654 }
655#endif
656
657 /* make sure msleep has been recorded */
658 msleep(1);
659
660 /* start the tracing */
661 ftrace_enabled = 1;
662
663 ret = tracer_init(trace, tr);
664 if (ret) {
665 warn_failed_init_tracer(trace, ret);
666 goto out;
667 }
668
669 /* Sleep for a 1/10 of a second */
670 msleep(100);
671 /* stop the tracing. */
672 tracing_stop();
673 ftrace_enabled = 0;
674
675 /* check the trace buffer */
676 ret = trace_test_buffer(&tr->trace_buffer, &count);
677 trace->reset(tr);
678 tracing_start();
679
680 if (!ret && !count) {
681 printk(KERN_CONT ".. no entries found ..");
682 ret = -1;
683 goto out;
684 }
685
686 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
687 DYN_FTRACE_TEST_NAME);
688 if (ret)
689 goto out;
690
691 ret = trace_selftest_function_recursion();
692 if (ret)
693 goto out;
694
695 ret = trace_selftest_function_regs();
696 out:
697 ftrace_enabled = save_ftrace_enabled;
698
699 /* kill ftrace totally if we failed */
700 if (ret)
701 ftrace_kill();
702
703 return ret;
704}
705#endif /* CONFIG_FUNCTION_TRACER */
706
707
708#ifdef CONFIG_FUNCTION_GRAPH_TRACER
709
710/* Maximum number of functions to trace before diagnosing a hang */
711#define GRAPH_MAX_FUNC_TEST 100000000
712
713static unsigned int graph_hang_thresh;
714
715/* Wrap the real function entry probe to avoid possible hanging */
716static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
717{
718 /* This is harmlessly racy, we want to approximately detect a hang */
719 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
720 ftrace_graph_stop();
721 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
722 if (ftrace_dump_on_oops) {
723 ftrace_dump(DUMP_ALL);
724 /* ftrace_dump() disables tracing */
725 tracing_on();
726 }
727 return 0;
728 }
729
730 return trace_graph_entry(trace);
731}
732
733/*
734 * Pretty much the same than for the function tracer from which the selftest
735 * has been borrowed.
736 */
737__init int
738trace_selftest_startup_function_graph(struct tracer *trace,
739 struct trace_array *tr)
740{
741 int ret;
742 unsigned long count;
743
744#ifdef CONFIG_DYNAMIC_FTRACE
745 if (ftrace_filter_param) {
746 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
747 return 0;
748 }
749#endif
750
751 /*
752 * Simulate the init() callback but we attach a watchdog callback
753 * to detect and recover from possible hangs
754 */
755 tracing_reset_online_cpus(&tr->trace_buffer);
756 set_graph_array(tr);
757 ret = register_ftrace_graph(&trace_graph_return,
758 &trace_graph_entry_watchdog);
759 if (ret) {
760 warn_failed_init_tracer(trace, ret);
761 goto out;
762 }
763 tracing_start_cmdline_record();
764
765 /* Sleep for a 1/10 of a second */
766 msleep(100);
767
768 /* Have we just recovered from a hang? */
769 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
770 tracing_selftest_disabled = true;
771 ret = -1;
772 goto out;
773 }
774
775 tracing_stop();
776
777 /* check the trace buffer */
778 ret = trace_test_buffer(&tr->trace_buffer, &count);
779
780 trace->reset(tr);
781 tracing_start();
782
783 if (!ret && !count) {
784 printk(KERN_CONT ".. no entries found ..");
785 ret = -1;
786 goto out;
787 }
788
789 /* Don't test dynamic tracing, the function tracer already did */
790
791out:
792 /* Stop it if we failed */
793 if (ret)
794 ftrace_graph_stop();
795
796 return ret;
797}
798#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
799
800
801#ifdef CONFIG_IRQSOFF_TRACER
802int
803trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
804{
805 unsigned long save_max = tracing_max_latency;
806 unsigned long count;
807 int ret;
808
809 /* start the tracing */
810 ret = tracer_init(trace, tr);
811 if (ret) {
812 warn_failed_init_tracer(trace, ret);
813 return ret;
814 }
815
816 /* reset the max latency */
817 tracing_max_latency = 0;
818 /* disable interrupts for a bit */
819 local_irq_disable();
820 udelay(100);
821 local_irq_enable();
822
823 /*
824 * Stop the tracer to avoid a warning subsequent
825 * to buffer flipping failure because tracing_stop()
826 * disables the tr and max buffers, making flipping impossible
827 * in case of parallels max irqs off latencies.
828 */
829 trace->stop(tr);
830 /* stop the tracing. */
831 tracing_stop();
832 /* check both trace buffers */
833 ret = trace_test_buffer(&tr->trace_buffer, NULL);
834 if (!ret)
835 ret = trace_test_buffer(&tr->max_buffer, &count);
836 trace->reset(tr);
837 tracing_start();
838
839 if (!ret && !count) {
840 printk(KERN_CONT ".. no entries found ..");
841 ret = -1;
842 }
843
844 tracing_max_latency = save_max;
845
846 return ret;
847}
848#endif /* CONFIG_IRQSOFF_TRACER */
849
850#ifdef CONFIG_PREEMPT_TRACER
851int
852trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
853{
854 unsigned long save_max = tracing_max_latency;
855 unsigned long count;
856 int ret;
857
858 /*
859 * Now that the big kernel lock is no longer preemptable,
860 * and this is called with the BKL held, it will always
861 * fail. If preemption is already disabled, simply
862 * pass the test. When the BKL is removed, or becomes
863 * preemptible again, we will once again test this,
864 * so keep it in.
865 */
866 if (preempt_count()) {
867 printk(KERN_CONT "can not test ... force ");
868 return 0;
869 }
870
871 /* start the tracing */
872 ret = tracer_init(trace, tr);
873 if (ret) {
874 warn_failed_init_tracer(trace, ret);
875 return ret;
876 }
877
878 /* reset the max latency */
879 tracing_max_latency = 0;
880 /* disable preemption for a bit */
881 preempt_disable();
882 udelay(100);
883 preempt_enable();
884
885 /*
886 * Stop the tracer to avoid a warning subsequent
887 * to buffer flipping failure because tracing_stop()
888 * disables the tr and max buffers, making flipping impossible
889 * in case of parallels max preempt off latencies.
890 */
891 trace->stop(tr);
892 /* stop the tracing. */
893 tracing_stop();
894 /* check both trace buffers */
895 ret = trace_test_buffer(&tr->trace_buffer, NULL);
896 if (!ret)
897 ret = trace_test_buffer(&tr->max_buffer, &count);
898 trace->reset(tr);
899 tracing_start();
900
901 if (!ret && !count) {
902 printk(KERN_CONT ".. no entries found ..");
903 ret = -1;
904 }
905
906 tracing_max_latency = save_max;
907
908 return ret;
909}
910#endif /* CONFIG_PREEMPT_TRACER */
911
912#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
913int
914trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
915{
916 unsigned long save_max = tracing_max_latency;
917 unsigned long count;
918 int ret;
919
920 /*
921 * Now that the big kernel lock is no longer preemptable,
922 * and this is called with the BKL held, it will always
923 * fail. If preemption is already disabled, simply
924 * pass the test. When the BKL is removed, or becomes
925 * preemptible again, we will once again test this,
926 * so keep it in.
927 */
928 if (preempt_count()) {
929 printk(KERN_CONT "can not test ... force ");
930 return 0;
931 }
932
933 /* start the tracing */
934 ret = tracer_init(trace, tr);
935 if (ret) {
936 warn_failed_init_tracer(trace, ret);
937 goto out_no_start;
938 }
939
940 /* reset the max latency */
941 tracing_max_latency = 0;
942
943 /* disable preemption and interrupts for a bit */
944 preempt_disable();
945 local_irq_disable();
946 udelay(100);
947 preempt_enable();
948 /* reverse the order of preempt vs irqs */
949 local_irq_enable();
950
951 /*
952 * Stop the tracer to avoid a warning subsequent
953 * to buffer flipping failure because tracing_stop()
954 * disables the tr and max buffers, making flipping impossible
955 * in case of parallels max irqs/preempt off latencies.
956 */
957 trace->stop(tr);
958 /* stop the tracing. */
959 tracing_stop();
960 /* check both trace buffers */
961 ret = trace_test_buffer(&tr->trace_buffer, NULL);
962 if (ret)
963 goto out;
964
965 ret = trace_test_buffer(&tr->max_buffer, &count);
966 if (ret)
967 goto out;
968
969 if (!ret && !count) {
970 printk(KERN_CONT ".. no entries found ..");
971 ret = -1;
972 goto out;
973 }
974
975 /* do the test by disabling interrupts first this time */
976 tracing_max_latency = 0;
977 tracing_start();
978 trace->start(tr);
979
980 preempt_disable();
981 local_irq_disable();
982 udelay(100);
983 preempt_enable();
984 /* reverse the order of preempt vs irqs */
985 local_irq_enable();
986
987 trace->stop(tr);
988 /* stop the tracing. */
989 tracing_stop();
990 /* check both trace buffers */
991 ret = trace_test_buffer(&tr->trace_buffer, NULL);
992 if (ret)
993 goto out;
994
995 ret = trace_test_buffer(&tr->max_buffer, &count);
996
997 if (!ret && !count) {
998 printk(KERN_CONT ".. no entries found ..");
999 ret = -1;
1000 goto out;
1001 }
1002
1003out:
1004 tracing_start();
1005out_no_start:
1006 trace->reset(tr);
1007 tracing_max_latency = save_max;
1008
1009 return ret;
1010}
1011#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1012
1013#ifdef CONFIG_NOP_TRACER
1014int
1015trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1016{
1017 /* What could possibly go wrong? */
1018 return 0;
1019}
1020#endif
1021
1022#ifdef CONFIG_SCHED_TRACER
1023static int trace_wakeup_test_thread(void *data)
1024{
1025 /* Make this a -deadline thread */
1026 static const struct sched_attr attr = {
1027 .sched_policy = SCHED_DEADLINE,
1028 .sched_runtime = 100000ULL,
1029 .sched_deadline = 10000000ULL,
1030 .sched_period = 10000000ULL
1031 };
1032 struct completion *x = data;
1033
1034 sched_setattr(current, &attr);
1035
1036 /* Make it know we have a new prio */
1037 complete(x);
1038
1039 /* now go to sleep and let the test wake us up */
1040 set_current_state(TASK_INTERRUPTIBLE);
1041 schedule();
1042
1043 complete(x);
1044
1045 /* we are awake, now wait to disappear */
1046 while (!kthread_should_stop()) {
1047 /*
1048 * This will likely be the system top priority
1049 * task, do short sleeps to let others run.
1050 */
1051 msleep(100);
1052 }
1053
1054 return 0;
1055}
1056
1057int
1058trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1059{
1060 unsigned long save_max = tracing_max_latency;
1061 struct task_struct *p;
1062 struct completion is_ready;
1063 unsigned long count;
1064 int ret;
1065
1066 init_completion(&is_ready);
1067
1068 /* create a -deadline thread */
1069 p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");
1070 if (IS_ERR(p)) {
1071 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1072 return -1;
1073 }
1074
1075 /* make sure the thread is running at -deadline policy */
1076 wait_for_completion(&is_ready);
1077
1078 /* start the tracing */
1079 ret = tracer_init(trace, tr);
1080 if (ret) {
1081 warn_failed_init_tracer(trace, ret);
1082 return ret;
1083 }
1084
1085 /* reset the max latency */
1086 tracing_max_latency = 0;
1087
1088 while (p->on_rq) {
1089 /*
1090 * Sleep to make sure the -deadline thread is asleep too.
1091 * On virtual machines we can't rely on timings,
1092 * but we want to make sure this test still works.
1093 */
1094 msleep(100);
1095 }
1096
1097 init_completion(&is_ready);
1098
1099 wake_up_process(p);
1100
1101 /* Wait for the task to wake up */
1102 wait_for_completion(&is_ready);
1103
1104 /* stop the tracing. */
1105 tracing_stop();
1106 /* check both trace buffers */
1107 ret = trace_test_buffer(&tr->trace_buffer, NULL);
1108 printk("ret = %d\n", ret);
1109 if (!ret)
1110 ret = trace_test_buffer(&tr->max_buffer, &count);
1111
1112
1113 trace->reset(tr);
1114 tracing_start();
1115
1116 tracing_max_latency = save_max;
1117
1118 /* kill the thread */
1119 kthread_stop(p);
1120
1121 if (!ret && !count) {
1122 printk(KERN_CONT ".. no entries found ..");
1123 ret = -1;
1124 }
1125
1126 return ret;
1127}
1128#endif /* CONFIG_SCHED_TRACER */
1129
1130#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1131int
1132trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1133{
1134 unsigned long count;
1135 int ret;
1136
1137 /* start the tracing */
1138 ret = tracer_init(trace, tr);
1139 if (ret) {
1140 warn_failed_init_tracer(trace, ret);
1141 return ret;
1142 }
1143
1144 /* Sleep for a 1/10 of a second */
1145 msleep(100);
1146 /* stop the tracing. */
1147 tracing_stop();
1148 /* check the trace buffer */
1149 ret = trace_test_buffer(&tr->trace_buffer, &count);
1150 trace->reset(tr);
1151 tracing_start();
1152
1153 if (!ret && !count) {
1154 printk(KERN_CONT ".. no entries found ..");
1155 ret = -1;
1156 }
1157
1158 return ret;
1159}
1160#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1161
1162#ifdef CONFIG_BRANCH_TRACER
1163int
1164trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1165{
1166 unsigned long count;
1167 int ret;
1168
1169 /* start the tracing */
1170 ret = tracer_init(trace, tr);
1171 if (ret) {
1172 warn_failed_init_tracer(trace, ret);
1173 return ret;
1174 }
1175
1176 /* Sleep for a 1/10 of a second */
1177 msleep(100);
1178 /* stop the tracing. */
1179 tracing_stop();
1180 /* check the trace buffer */
1181 ret = trace_test_buffer(&tr->trace_buffer, &count);
1182 trace->reset(tr);
1183 tracing_start();
1184
1185 if (!ret && !count) {
1186 printk(KERN_CONT ".. no entries found ..");
1187 ret = -1;
1188 }
1189
1190 return ret;
1191}
1192#endif /* CONFIG_BRANCH_TRACER */
1193
1/* Include in trace.c */
2
3#include <linux/stringify.h>
4#include <linux/kthread.h>
5#include <linux/delay.h>
6#include <linux/slab.h>
7
8static inline int trace_valid_entry(struct trace_entry *entry)
9{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
13 case TRACE_WAKE:
14 case TRACE_STACK:
15 case TRACE_PRINT:
16 case TRACE_BRANCH:
17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
19 return 1;
20 }
21 return 0;
22}
23
24static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
25{
26 struct ring_buffer_event *event;
27 struct trace_entry *entry;
28 unsigned int loops = 0;
29
30 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
31 entry = ring_buffer_event_data(event);
32
33 /*
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
37 */
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
41 }
42 if (!trace_valid_entry(entry)) {
43 printk(KERN_CONT ".. invalid entry %d ",
44 entry->type);
45 goto failed;
46 }
47 }
48 return 0;
49
50 failed:
51 /* disable tracing */
52 tracing_disabled = 1;
53 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62{
63 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
65
66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags);
68 arch_spin_lock(&ftrace_max_lock);
69
70 cnt = ring_buffer_entries(tr->buffer);
71
72 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
78 */
79 tracing_off();
80 for_each_possible_cpu(cpu) {
81 ret = trace_test_buffer_cpu(tr, cpu);
82 if (ret)
83 break;
84 }
85 tracing_on();
86 arch_spin_unlock(&ftrace_max_lock);
87 local_irq_restore(flags);
88
89 if (count)
90 *count = cnt;
91
92 return ret;
93}
94
95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
99}
100#ifdef CONFIG_FUNCTION_TRACER
101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
106 unsigned long pip)
107{
108 trace_selftest_test_probe1_cnt++;
109}
110
111static int trace_selftest_test_probe2_cnt;
112static void trace_selftest_test_probe2_func(unsigned long ip,
113 unsigned long pip)
114{
115 trace_selftest_test_probe2_cnt++;
116}
117
118static int trace_selftest_test_probe3_cnt;
119static void trace_selftest_test_probe3_func(unsigned long ip,
120 unsigned long pip)
121{
122 trace_selftest_test_probe3_cnt++;
123}
124
125static int trace_selftest_test_global_cnt;
126static void trace_selftest_test_global_func(unsigned long ip,
127 unsigned long pip)
128{
129 trace_selftest_test_global_cnt++;
130}
131
132static int trace_selftest_test_dyn_cnt;
133static void trace_selftest_test_dyn_func(unsigned long ip,
134 unsigned long pip)
135{
136 trace_selftest_test_dyn_cnt++;
137}
138
139static struct ftrace_ops test_probe1 = {
140 .func = trace_selftest_test_probe1_func,
141};
142
143static struct ftrace_ops test_probe2 = {
144 .func = trace_selftest_test_probe2_func,
145};
146
147static struct ftrace_ops test_probe3 = {
148 .func = trace_selftest_test_probe3_func,
149};
150
151static struct ftrace_ops test_global = {
152 .func = trace_selftest_test_global_func,
153 .flags = FTRACE_OPS_FL_GLOBAL,
154};
155
156static void print_counts(void)
157{
158 printk("(%d %d %d %d %d) ",
159 trace_selftest_test_probe1_cnt,
160 trace_selftest_test_probe2_cnt,
161 trace_selftest_test_probe3_cnt,
162 trace_selftest_test_global_cnt,
163 trace_selftest_test_dyn_cnt);
164}
165
166static void reset_counts(void)
167{
168 trace_selftest_test_probe1_cnt = 0;
169 trace_selftest_test_probe2_cnt = 0;
170 trace_selftest_test_probe3_cnt = 0;
171 trace_selftest_test_global_cnt = 0;
172 trace_selftest_test_dyn_cnt = 0;
173}
174
175static int trace_selftest_ops(int cnt)
176{
177 int save_ftrace_enabled = ftrace_enabled;
178 struct ftrace_ops *dyn_ops;
179 char *func1_name;
180 char *func2_name;
181 int len1;
182 int len2;
183 int ret = -1;
184
185 printk(KERN_CONT "PASSED\n");
186 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
187
188 ftrace_enabled = 1;
189 reset_counts();
190
191 /* Handle PPC64 '.' name */
192 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
193 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
194 len1 = strlen(func1_name);
195 len2 = strlen(func2_name);
196
197 /*
198 * Probe 1 will trace function 1.
199 * Probe 2 will trace function 2.
200 * Probe 3 will trace functions 1 and 2.
201 */
202 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
203 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
204 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
205 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
206
207 register_ftrace_function(&test_probe1);
208 register_ftrace_function(&test_probe2);
209 register_ftrace_function(&test_probe3);
210 register_ftrace_function(&test_global);
211
212 DYN_FTRACE_TEST_NAME();
213
214 print_counts();
215
216 if (trace_selftest_test_probe1_cnt != 1)
217 goto out;
218 if (trace_selftest_test_probe2_cnt != 0)
219 goto out;
220 if (trace_selftest_test_probe3_cnt != 1)
221 goto out;
222 if (trace_selftest_test_global_cnt == 0)
223 goto out;
224
225 DYN_FTRACE_TEST_NAME2();
226
227 print_counts();
228
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 1)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 2)
234 goto out;
235
236 /* Add a dynamic probe */
237 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
238 if (!dyn_ops) {
239 printk("MEMORY ERROR ");
240 goto out;
241 }
242
243 dyn_ops->func = trace_selftest_test_dyn_func;
244
245 register_ftrace_function(dyn_ops);
246
247 trace_selftest_test_global_cnt = 0;
248
249 DYN_FTRACE_TEST_NAME();
250
251 print_counts();
252
253 if (trace_selftest_test_probe1_cnt != 2)
254 goto out_free;
255 if (trace_selftest_test_probe2_cnt != 1)
256 goto out_free;
257 if (trace_selftest_test_probe3_cnt != 3)
258 goto out_free;
259 if (trace_selftest_test_global_cnt == 0)
260 goto out;
261 if (trace_selftest_test_dyn_cnt == 0)
262 goto out_free;
263
264 DYN_FTRACE_TEST_NAME2();
265
266 print_counts();
267
268 if (trace_selftest_test_probe1_cnt != 2)
269 goto out_free;
270 if (trace_selftest_test_probe2_cnt != 2)
271 goto out_free;
272 if (trace_selftest_test_probe3_cnt != 4)
273 goto out_free;
274
275 ret = 0;
276 out_free:
277 unregister_ftrace_function(dyn_ops);
278 kfree(dyn_ops);
279
280 out:
281 /* Purposely unregister in the same order */
282 unregister_ftrace_function(&test_probe1);
283 unregister_ftrace_function(&test_probe2);
284 unregister_ftrace_function(&test_probe3);
285 unregister_ftrace_function(&test_global);
286
287 /* Make sure everything is off */
288 reset_counts();
289 DYN_FTRACE_TEST_NAME();
290 DYN_FTRACE_TEST_NAME();
291
292 if (trace_selftest_test_probe1_cnt ||
293 trace_selftest_test_probe2_cnt ||
294 trace_selftest_test_probe3_cnt ||
295 trace_selftest_test_global_cnt ||
296 trace_selftest_test_dyn_cnt)
297 ret = -1;
298
299 ftrace_enabled = save_ftrace_enabled;
300
301 return ret;
302}
303
304/* Test dynamic code modification and ftrace filters */
305int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
306 struct trace_array *tr,
307 int (*func)(void))
308{
309 int save_ftrace_enabled = ftrace_enabled;
310 int save_tracer_enabled = tracer_enabled;
311 unsigned long count;
312 char *func_name;
313 int ret;
314
315 /* The ftrace test PASSED */
316 printk(KERN_CONT "PASSED\n");
317 pr_info("Testing dynamic ftrace: ");
318
319 /* enable tracing, and record the filter function */
320 ftrace_enabled = 1;
321 tracer_enabled = 1;
322
323 /* passed in by parameter to fool gcc from optimizing */
324 func();
325
326 /*
327 * Some archs *cough*PowerPC*cough* add characters to the
328 * start of the function names. We simply put a '*' to
329 * accommodate them.
330 */
331 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
332
333 /* filter only on our function */
334 ftrace_set_global_filter(func_name, strlen(func_name), 1);
335
336 /* enable tracing */
337 ret = tracer_init(trace, tr);
338 if (ret) {
339 warn_failed_init_tracer(trace, ret);
340 goto out;
341 }
342
343 /* Sleep for a 1/10 of a second */
344 msleep(100);
345
346 /* we should have nothing in the buffer */
347 ret = trace_test_buffer(tr, &count);
348 if (ret)
349 goto out;
350
351 if (count) {
352 ret = -1;
353 printk(KERN_CONT ".. filter did not filter .. ");
354 goto out;
355 }
356
357 /* call our function again */
358 func();
359
360 /* sleep again */
361 msleep(100);
362
363 /* stop the tracing. */
364 tracing_stop();
365 ftrace_enabled = 0;
366
367 /* check the trace buffer */
368 ret = trace_test_buffer(tr, &count);
369 tracing_start();
370
371 /* we should only have one item */
372 if (!ret && count != 1) {
373 trace->reset(tr);
374 printk(KERN_CONT ".. filter failed count=%ld ..", count);
375 ret = -1;
376 goto out;
377 }
378
379 /* Test the ops with global tracing running */
380 ret = trace_selftest_ops(1);
381 trace->reset(tr);
382
383 out:
384 ftrace_enabled = save_ftrace_enabled;
385 tracer_enabled = save_tracer_enabled;
386
387 /* Enable tracing on all functions again */
388 ftrace_set_global_filter(NULL, 0, 1);
389
390 /* Test the ops with global tracing off */
391 if (!ret)
392 ret = trace_selftest_ops(2);
393
394 return ret;
395}
396#else
397# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
398#endif /* CONFIG_DYNAMIC_FTRACE */
399
400/*
401 * Simple verification test of ftrace function tracer.
402 * Enable ftrace, sleep 1/10 second, and then read the trace
403 * buffer to see if all is in order.
404 */
405int
406trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
407{
408 int save_ftrace_enabled = ftrace_enabled;
409 int save_tracer_enabled = tracer_enabled;
410 unsigned long count;
411 int ret;
412
413 /* make sure msleep has been recorded */
414 msleep(1);
415
416 /* start the tracing */
417 ftrace_enabled = 1;
418 tracer_enabled = 1;
419
420 ret = tracer_init(trace, tr);
421 if (ret) {
422 warn_failed_init_tracer(trace, ret);
423 goto out;
424 }
425
426 /* Sleep for a 1/10 of a second */
427 msleep(100);
428 /* stop the tracing. */
429 tracing_stop();
430 ftrace_enabled = 0;
431
432 /* check the trace buffer */
433 ret = trace_test_buffer(tr, &count);
434 trace->reset(tr);
435 tracing_start();
436
437 if (!ret && !count) {
438 printk(KERN_CONT ".. no entries found ..");
439 ret = -1;
440 goto out;
441 }
442
443 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
444 DYN_FTRACE_TEST_NAME);
445
446 out:
447 ftrace_enabled = save_ftrace_enabled;
448 tracer_enabled = save_tracer_enabled;
449
450 /* kill ftrace totally if we failed */
451 if (ret)
452 ftrace_kill();
453
454 return ret;
455}
456#endif /* CONFIG_FUNCTION_TRACER */
457
458
459#ifdef CONFIG_FUNCTION_GRAPH_TRACER
460
461/* Maximum number of functions to trace before diagnosing a hang */
462#define GRAPH_MAX_FUNC_TEST 100000000
463
464static void
465__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
466static unsigned int graph_hang_thresh;
467
468/* Wrap the real function entry probe to avoid possible hanging */
469static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
470{
471 /* This is harmlessly racy, we want to approximately detect a hang */
472 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
473 ftrace_graph_stop();
474 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
475 if (ftrace_dump_on_oops)
476 __ftrace_dump(false, DUMP_ALL);
477 return 0;
478 }
479
480 return trace_graph_entry(trace);
481}
482
483/*
484 * Pretty much the same than for the function tracer from which the selftest
485 * has been borrowed.
486 */
487int
488trace_selftest_startup_function_graph(struct tracer *trace,
489 struct trace_array *tr)
490{
491 int ret;
492 unsigned long count;
493
494 /*
495 * Simulate the init() callback but we attach a watchdog callback
496 * to detect and recover from possible hangs
497 */
498 tracing_reset_online_cpus(tr);
499 set_graph_array(tr);
500 ret = register_ftrace_graph(&trace_graph_return,
501 &trace_graph_entry_watchdog);
502 if (ret) {
503 warn_failed_init_tracer(trace, ret);
504 goto out;
505 }
506 tracing_start_cmdline_record();
507
508 /* Sleep for a 1/10 of a second */
509 msleep(100);
510
511 /* Have we just recovered from a hang? */
512 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
513 tracing_selftest_disabled = true;
514 ret = -1;
515 goto out;
516 }
517
518 tracing_stop();
519
520 /* check the trace buffer */
521 ret = trace_test_buffer(tr, &count);
522
523 trace->reset(tr);
524 tracing_start();
525
526 if (!ret && !count) {
527 printk(KERN_CONT ".. no entries found ..");
528 ret = -1;
529 goto out;
530 }
531
532 /* Don't test dynamic tracing, the function tracer already did */
533
534out:
535 /* Stop it if we failed */
536 if (ret)
537 ftrace_graph_stop();
538
539 return ret;
540}
541#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
542
543
544#ifdef CONFIG_IRQSOFF_TRACER
545int
546trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
547{
548 unsigned long save_max = tracing_max_latency;
549 unsigned long count;
550 int ret;
551
552 /* start the tracing */
553 ret = tracer_init(trace, tr);
554 if (ret) {
555 warn_failed_init_tracer(trace, ret);
556 return ret;
557 }
558
559 /* reset the max latency */
560 tracing_max_latency = 0;
561 /* disable interrupts for a bit */
562 local_irq_disable();
563 udelay(100);
564 local_irq_enable();
565
566 /*
567 * Stop the tracer to avoid a warning subsequent
568 * to buffer flipping failure because tracing_stop()
569 * disables the tr and max buffers, making flipping impossible
570 * in case of parallels max irqs off latencies.
571 */
572 trace->stop(tr);
573 /* stop the tracing. */
574 tracing_stop();
575 /* check both trace buffers */
576 ret = trace_test_buffer(tr, NULL);
577 if (!ret)
578 ret = trace_test_buffer(&max_tr, &count);
579 trace->reset(tr);
580 tracing_start();
581
582 if (!ret && !count) {
583 printk(KERN_CONT ".. no entries found ..");
584 ret = -1;
585 }
586
587 tracing_max_latency = save_max;
588
589 return ret;
590}
591#endif /* CONFIG_IRQSOFF_TRACER */
592
593#ifdef CONFIG_PREEMPT_TRACER
594int
595trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
596{
597 unsigned long save_max = tracing_max_latency;
598 unsigned long count;
599 int ret;
600
601 /*
602 * Now that the big kernel lock is no longer preemptable,
603 * and this is called with the BKL held, it will always
604 * fail. If preemption is already disabled, simply
605 * pass the test. When the BKL is removed, or becomes
606 * preemptible again, we will once again test this,
607 * so keep it in.
608 */
609 if (preempt_count()) {
610 printk(KERN_CONT "can not test ... force ");
611 return 0;
612 }
613
614 /* start the tracing */
615 ret = tracer_init(trace, tr);
616 if (ret) {
617 warn_failed_init_tracer(trace, ret);
618 return ret;
619 }
620
621 /* reset the max latency */
622 tracing_max_latency = 0;
623 /* disable preemption for a bit */
624 preempt_disable();
625 udelay(100);
626 preempt_enable();
627
628 /*
629 * Stop the tracer to avoid a warning subsequent
630 * to buffer flipping failure because tracing_stop()
631 * disables the tr and max buffers, making flipping impossible
632 * in case of parallels max preempt off latencies.
633 */
634 trace->stop(tr);
635 /* stop the tracing. */
636 tracing_stop();
637 /* check both trace buffers */
638 ret = trace_test_buffer(tr, NULL);
639 if (!ret)
640 ret = trace_test_buffer(&max_tr, &count);
641 trace->reset(tr);
642 tracing_start();
643
644 if (!ret && !count) {
645 printk(KERN_CONT ".. no entries found ..");
646 ret = -1;
647 }
648
649 tracing_max_latency = save_max;
650
651 return ret;
652}
653#endif /* CONFIG_PREEMPT_TRACER */
654
655#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
656int
657trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
658{
659 unsigned long save_max = tracing_max_latency;
660 unsigned long count;
661 int ret;
662
663 /*
664 * Now that the big kernel lock is no longer preemptable,
665 * and this is called with the BKL held, it will always
666 * fail. If preemption is already disabled, simply
667 * pass the test. When the BKL is removed, or becomes
668 * preemptible again, we will once again test this,
669 * so keep it in.
670 */
671 if (preempt_count()) {
672 printk(KERN_CONT "can not test ... force ");
673 return 0;
674 }
675
676 /* start the tracing */
677 ret = tracer_init(trace, tr);
678 if (ret) {
679 warn_failed_init_tracer(trace, ret);
680 goto out_no_start;
681 }
682
683 /* reset the max latency */
684 tracing_max_latency = 0;
685
686 /* disable preemption and interrupts for a bit */
687 preempt_disable();
688 local_irq_disable();
689 udelay(100);
690 preempt_enable();
691 /* reverse the order of preempt vs irqs */
692 local_irq_enable();
693
694 /*
695 * Stop the tracer to avoid a warning subsequent
696 * to buffer flipping failure because tracing_stop()
697 * disables the tr and max buffers, making flipping impossible
698 * in case of parallels max irqs/preempt off latencies.
699 */
700 trace->stop(tr);
701 /* stop the tracing. */
702 tracing_stop();
703 /* check both trace buffers */
704 ret = trace_test_buffer(tr, NULL);
705 if (ret)
706 goto out;
707
708 ret = trace_test_buffer(&max_tr, &count);
709 if (ret)
710 goto out;
711
712 if (!ret && !count) {
713 printk(KERN_CONT ".. no entries found ..");
714 ret = -1;
715 goto out;
716 }
717
718 /* do the test by disabling interrupts first this time */
719 tracing_max_latency = 0;
720 tracing_start();
721 trace->start(tr);
722
723 preempt_disable();
724 local_irq_disable();
725 udelay(100);
726 preempt_enable();
727 /* reverse the order of preempt vs irqs */
728 local_irq_enable();
729
730 trace->stop(tr);
731 /* stop the tracing. */
732 tracing_stop();
733 /* check both trace buffers */
734 ret = trace_test_buffer(tr, NULL);
735 if (ret)
736 goto out;
737
738 ret = trace_test_buffer(&max_tr, &count);
739
740 if (!ret && !count) {
741 printk(KERN_CONT ".. no entries found ..");
742 ret = -1;
743 goto out;
744 }
745
746out:
747 tracing_start();
748out_no_start:
749 trace->reset(tr);
750 tracing_max_latency = save_max;
751
752 return ret;
753}
754#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
755
756#ifdef CONFIG_NOP_TRACER
757int
758trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
759{
760 /* What could possibly go wrong? */
761 return 0;
762}
763#endif
764
765#ifdef CONFIG_SCHED_TRACER
766static int trace_wakeup_test_thread(void *data)
767{
768 /* Make this a RT thread, doesn't need to be too high */
769 static const struct sched_param param = { .sched_priority = 5 };
770 struct completion *x = data;
771
772 sched_setscheduler(current, SCHED_FIFO, ¶m);
773
774 /* Make it know we have a new prio */
775 complete(x);
776
777 /* now go to sleep and let the test wake us up */
778 set_current_state(TASK_INTERRUPTIBLE);
779 schedule();
780
781 /* we are awake, now wait to disappear */
782 while (!kthread_should_stop()) {
783 /*
784 * This is an RT task, do short sleeps to let
785 * others run.
786 */
787 msleep(100);
788 }
789
790 return 0;
791}
792
793int
794trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
795{
796 unsigned long save_max = tracing_max_latency;
797 struct task_struct *p;
798 struct completion isrt;
799 unsigned long count;
800 int ret;
801
802 init_completion(&isrt);
803
804 /* create a high prio thread */
805 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
806 if (IS_ERR(p)) {
807 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
808 return -1;
809 }
810
811 /* make sure the thread is running at an RT prio */
812 wait_for_completion(&isrt);
813
814 /* start the tracing */
815 ret = tracer_init(trace, tr);
816 if (ret) {
817 warn_failed_init_tracer(trace, ret);
818 return ret;
819 }
820
821 /* reset the max latency */
822 tracing_max_latency = 0;
823
824 /* sleep to let the RT thread sleep too */
825 msleep(100);
826
827 /*
828 * Yes this is slightly racy. It is possible that for some
829 * strange reason that the RT thread we created, did not
830 * call schedule for 100ms after doing the completion,
831 * and we do a wakeup on a task that already is awake.
832 * But that is extremely unlikely, and the worst thing that
833 * happens in such a case, is that we disable tracing.
834 * Honestly, if this race does happen something is horrible
835 * wrong with the system.
836 */
837
838 wake_up_process(p);
839
840 /* give a little time to let the thread wake up */
841 msleep(100);
842
843 /* stop the tracing. */
844 tracing_stop();
845 /* check both trace buffers */
846 ret = trace_test_buffer(tr, NULL);
847 if (!ret)
848 ret = trace_test_buffer(&max_tr, &count);
849
850
851 trace->reset(tr);
852 tracing_start();
853
854 tracing_max_latency = save_max;
855
856 /* kill the thread */
857 kthread_stop(p);
858
859 if (!ret && !count) {
860 printk(KERN_CONT ".. no entries found ..");
861 ret = -1;
862 }
863
864 return ret;
865}
866#endif /* CONFIG_SCHED_TRACER */
867
868#ifdef CONFIG_CONTEXT_SWITCH_TRACER
869int
870trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
871{
872 unsigned long count;
873 int ret;
874
875 /* start the tracing */
876 ret = tracer_init(trace, tr);
877 if (ret) {
878 warn_failed_init_tracer(trace, ret);
879 return ret;
880 }
881
882 /* Sleep for a 1/10 of a second */
883 msleep(100);
884 /* stop the tracing. */
885 tracing_stop();
886 /* check the trace buffer */
887 ret = trace_test_buffer(tr, &count);
888 trace->reset(tr);
889 tracing_start();
890
891 if (!ret && !count) {
892 printk(KERN_CONT ".. no entries found ..");
893 ret = -1;
894 }
895
896 return ret;
897}
898#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
899
900#ifdef CONFIG_BRANCH_TRACER
901int
902trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
903{
904 unsigned long count;
905 int ret;
906
907 /* start the tracing */
908 ret = tracer_init(trace, tr);
909 if (ret) {
910 warn_failed_init_tracer(trace, ret);
911 return ret;
912 }
913
914 /* Sleep for a 1/10 of a second */
915 msleep(100);
916 /* stop the tracing. */
917 tracing_stop();
918 /* check the trace buffer */
919 ret = trace_test_buffer(tr, &count);
920 trace->reset(tr);
921 tracing_start();
922
923 if (!ret && !count) {
924 printk(KERN_CONT ".. no entries found ..");
925 ret = -1;
926 }
927
928 return ret;
929}
930#endif /* CONFIG_BRANCH_TRACER */
931