Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Include in trace.c */
3
4#include <uapi/linux/sched/types.h>
5#include <linux/stringify.h>
6#include <linux/kthread.h>
7#include <linux/delay.h>
8#include <linux/slab.h>
9
10static inline int trace_valid_entry(struct trace_entry *entry)
11{
12 switch (entry->type) {
13 case TRACE_FN:
14 case TRACE_CTX:
15 case TRACE_WAKE:
16 case TRACE_STACK:
17 case TRACE_PRINT:
18 case TRACE_BRANCH:
19 case TRACE_GRAPH_ENT:
20 case TRACE_GRAPH_RETADDR_ENT:
21 case TRACE_GRAPH_RET:
22 return 1;
23 }
24 return 0;
25}
26
27static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
28{
29 struct ring_buffer_event *event;
30 struct trace_entry *entry;
31 unsigned int loops = 0;
32
33 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
34 entry = ring_buffer_event_data(event);
35
36 /*
37 * The ring buffer is a size of trace_buf_size, if
38 * we loop more than the size, there's something wrong
39 * with the ring buffer.
40 */
41 if (loops++ > trace_buf_size) {
42 printk(KERN_CONT ".. bad ring buffer ");
43 goto failed;
44 }
45 if (!trace_valid_entry(entry)) {
46 printk(KERN_CONT ".. invalid entry %d ",
47 entry->type);
48 goto failed;
49 }
50 }
51 return 0;
52
53 failed:
54 /* disable tracing */
55 tracing_disabled = 1;
56 printk(KERN_CONT ".. corrupted trace buffer .. ");
57 return -1;
58}
59
60/*
61 * Test the trace buffer to see if all the elements
62 * are still sane.
63 */
64static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
65{
66 unsigned long flags, cnt = 0;
67 int cpu, ret = 0;
68
69 /* Don't allow flipping of max traces now */
70 local_irq_save(flags);
71 arch_spin_lock(&buf->tr->max_lock);
72
73 cnt = ring_buffer_entries(buf->buffer);
74
75 /*
76 * The trace_test_buffer_cpu runs a while loop to consume all data.
77 * If the calling tracer is broken, and is constantly filling
78 * the buffer, this will run forever, and hard lock the box.
79 * We disable the ring buffer while we do this test to prevent
80 * a hard lock up.
81 */
82 tracing_off();
83 for_each_possible_cpu(cpu) {
84 ret = trace_test_buffer_cpu(buf, cpu);
85 if (ret)
86 break;
87 }
88 tracing_on();
89 arch_spin_unlock(&buf->tr->max_lock);
90 local_irq_restore(flags);
91
92 if (count)
93 *count = cnt;
94
95 return ret;
96}
97
98static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
99{
100 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
101 trace->name, init_ret);
102}
103#ifdef CONFIG_FUNCTION_TRACER
104
105#ifdef CONFIG_DYNAMIC_FTRACE
106
107static int trace_selftest_test_probe1_cnt;
108static void trace_selftest_test_probe1_func(unsigned long ip,
109 unsigned long pip,
110 struct ftrace_ops *op,
111 struct ftrace_regs *fregs)
112{
113 trace_selftest_test_probe1_cnt++;
114}
115
116static int trace_selftest_test_probe2_cnt;
117static void trace_selftest_test_probe2_func(unsigned long ip,
118 unsigned long pip,
119 struct ftrace_ops *op,
120 struct ftrace_regs *fregs)
121{
122 trace_selftest_test_probe2_cnt++;
123}
124
125static int trace_selftest_test_probe3_cnt;
126static void trace_selftest_test_probe3_func(unsigned long ip,
127 unsigned long pip,
128 struct ftrace_ops *op,
129 struct ftrace_regs *fregs)
130{
131 trace_selftest_test_probe3_cnt++;
132}
133
134static int trace_selftest_test_global_cnt;
135static void trace_selftest_test_global_func(unsigned long ip,
136 unsigned long pip,
137 struct ftrace_ops *op,
138 struct ftrace_regs *fregs)
139{
140 trace_selftest_test_global_cnt++;
141}
142
143static int trace_selftest_test_dyn_cnt;
144static void trace_selftest_test_dyn_func(unsigned long ip,
145 unsigned long pip,
146 struct ftrace_ops *op,
147 struct ftrace_regs *fregs)
148{
149 trace_selftest_test_dyn_cnt++;
150}
151
152static struct ftrace_ops test_probe1 = {
153 .func = trace_selftest_test_probe1_func,
154};
155
156static struct ftrace_ops test_probe2 = {
157 .func = trace_selftest_test_probe2_func,
158};
159
160static struct ftrace_ops test_probe3 = {
161 .func = trace_selftest_test_probe3_func,
162};
163
164static void print_counts(void)
165{
166 printk("(%d %d %d %d %d) ",
167 trace_selftest_test_probe1_cnt,
168 trace_selftest_test_probe2_cnt,
169 trace_selftest_test_probe3_cnt,
170 trace_selftest_test_global_cnt,
171 trace_selftest_test_dyn_cnt);
172}
173
174static void reset_counts(void)
175{
176 trace_selftest_test_probe1_cnt = 0;
177 trace_selftest_test_probe2_cnt = 0;
178 trace_selftest_test_probe3_cnt = 0;
179 trace_selftest_test_global_cnt = 0;
180 trace_selftest_test_dyn_cnt = 0;
181}
182
183static int trace_selftest_ops(struct trace_array *tr, int cnt)
184{
185 int save_ftrace_enabled = ftrace_enabled;
186 struct ftrace_ops *dyn_ops;
187 char *func1_name;
188 char *func2_name;
189 int len1;
190 int len2;
191 int ret = -1;
192
193 printk(KERN_CONT "PASSED\n");
194 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
195
196 ftrace_enabled = 1;
197 reset_counts();
198
199 /* Handle PPC64 '.' name */
200 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
201 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
202 len1 = strlen(func1_name);
203 len2 = strlen(func2_name);
204
205 /*
206 * Probe 1 will trace function 1.
207 * Probe 2 will trace function 2.
208 * Probe 3 will trace functions 1 and 2.
209 */
210 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
211 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
212 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
213 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
214
215 register_ftrace_function(&test_probe1);
216 register_ftrace_function(&test_probe2);
217 register_ftrace_function(&test_probe3);
218 /* First time we are running with main function */
219 if (cnt > 1) {
220 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
221 register_ftrace_function(tr->ops);
222 }
223
224 DYN_FTRACE_TEST_NAME();
225
226 print_counts();
227
228 if (trace_selftest_test_probe1_cnt != 1)
229 goto out;
230 if (trace_selftest_test_probe2_cnt != 0)
231 goto out;
232 if (trace_selftest_test_probe3_cnt != 1)
233 goto out;
234 if (cnt > 1) {
235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
237 }
238
239 DYN_FTRACE_TEST_NAME2();
240
241 print_counts();
242
243 if (trace_selftest_test_probe1_cnt != 1)
244 goto out;
245 if (trace_selftest_test_probe2_cnt != 1)
246 goto out;
247 if (trace_selftest_test_probe3_cnt != 2)
248 goto out;
249
250 /* Add a dynamic probe */
251 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
252 if (!dyn_ops) {
253 printk("MEMORY ERROR ");
254 goto out;
255 }
256
257 dyn_ops->func = trace_selftest_test_dyn_func;
258
259 register_ftrace_function(dyn_ops);
260
261 trace_selftest_test_global_cnt = 0;
262
263 DYN_FTRACE_TEST_NAME();
264
265 print_counts();
266
267 if (trace_selftest_test_probe1_cnt != 2)
268 goto out_free;
269 if (trace_selftest_test_probe2_cnt != 1)
270 goto out_free;
271 if (trace_selftest_test_probe3_cnt != 3)
272 goto out_free;
273 if (cnt > 1) {
274 if (trace_selftest_test_global_cnt == 0)
275 goto out_free;
276 }
277 if (trace_selftest_test_dyn_cnt == 0)
278 goto out_free;
279
280 DYN_FTRACE_TEST_NAME2();
281
282 print_counts();
283
284 if (trace_selftest_test_probe1_cnt != 2)
285 goto out_free;
286 if (trace_selftest_test_probe2_cnt != 2)
287 goto out_free;
288 if (trace_selftest_test_probe3_cnt != 4)
289 goto out_free;
290
291 /* Remove trace function from probe 3 */
292 func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
293 len1 = strlen(func1_name);
294
295 ftrace_set_filter(&test_probe3, func1_name, len1, 0);
296
297 DYN_FTRACE_TEST_NAME();
298
299 print_counts();
300
301 if (trace_selftest_test_probe1_cnt != 3)
302 goto out_free;
303 if (trace_selftest_test_probe2_cnt != 2)
304 goto out_free;
305 if (trace_selftest_test_probe3_cnt != 4)
306 goto out_free;
307 if (cnt > 1) {
308 if (trace_selftest_test_global_cnt == 0)
309 goto out_free;
310 }
311 if (trace_selftest_test_dyn_cnt == 0)
312 goto out_free;
313
314 DYN_FTRACE_TEST_NAME2();
315
316 print_counts();
317
318 if (trace_selftest_test_probe1_cnt != 3)
319 goto out_free;
320 if (trace_selftest_test_probe2_cnt != 3)
321 goto out_free;
322 if (trace_selftest_test_probe3_cnt != 5)
323 goto out_free;
324
325 ret = 0;
326 out_free:
327 unregister_ftrace_function(dyn_ops);
328 kfree(dyn_ops);
329
330 out:
331 /* Purposely unregister in the same order */
332 unregister_ftrace_function(&test_probe1);
333 unregister_ftrace_function(&test_probe2);
334 unregister_ftrace_function(&test_probe3);
335 if (cnt > 1)
336 unregister_ftrace_function(tr->ops);
337 ftrace_reset_array_ops(tr);
338
339 /* Make sure everything is off */
340 reset_counts();
341 DYN_FTRACE_TEST_NAME();
342 DYN_FTRACE_TEST_NAME();
343
344 if (trace_selftest_test_probe1_cnt ||
345 trace_selftest_test_probe2_cnt ||
346 trace_selftest_test_probe3_cnt ||
347 trace_selftest_test_global_cnt ||
348 trace_selftest_test_dyn_cnt)
349 ret = -1;
350
351 ftrace_enabled = save_ftrace_enabled;
352
353 return ret;
354}
355
356/* Test dynamic code modification and ftrace filters */
357static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
358 struct trace_array *tr,
359 int (*func)(void))
360{
361 int save_ftrace_enabled = ftrace_enabled;
362 unsigned long count;
363 char *func_name;
364 int ret;
365
366 /* The ftrace test PASSED */
367 printk(KERN_CONT "PASSED\n");
368 pr_info("Testing dynamic ftrace: ");
369
370 /* enable tracing, and record the filter function */
371 ftrace_enabled = 1;
372
373 /* passed in by parameter to fool gcc from optimizing */
374 func();
375
376 /*
377 * Some archs *cough*PowerPC*cough* add characters to the
378 * start of the function names. We simply put a '*' to
379 * accommodate them.
380 */
381 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
382
383 /* filter only on our function */
384 ftrace_set_global_filter(func_name, strlen(func_name), 1);
385
386 /* enable tracing */
387 ret = tracer_init(trace, tr);
388 if (ret) {
389 warn_failed_init_tracer(trace, ret);
390 goto out;
391 }
392
393 /* Sleep for a 1/10 of a second */
394 msleep(100);
395
396 /* we should have nothing in the buffer */
397 ret = trace_test_buffer(&tr->array_buffer, &count);
398 if (ret)
399 goto out;
400
401 if (count) {
402 ret = -1;
403 printk(KERN_CONT ".. filter did not filter .. ");
404 goto out;
405 }
406
407 /* call our function again */
408 func();
409
410 /* sleep again */
411 msleep(100);
412
413 /* stop the tracing. */
414 tracing_stop();
415 ftrace_enabled = 0;
416
417 /* check the trace buffer */
418 ret = trace_test_buffer(&tr->array_buffer, &count);
419
420 ftrace_enabled = 1;
421 tracing_start();
422
423 /* we should only have one item */
424 if (!ret && count != 1) {
425 trace->reset(tr);
426 printk(KERN_CONT ".. filter failed count=%ld ..", count);
427 ret = -1;
428 goto out;
429 }
430
431 /* Test the ops with global tracing running */
432 ret = trace_selftest_ops(tr, 1);
433 trace->reset(tr);
434
435 out:
436 ftrace_enabled = save_ftrace_enabled;
437
438 /* Enable tracing on all functions again */
439 ftrace_set_global_filter(NULL, 0, 1);
440
441 /* Test the ops with global tracing off */
442 if (!ret)
443 ret = trace_selftest_ops(tr, 2);
444
445 return ret;
446}
447
448static int trace_selftest_recursion_cnt;
449static void trace_selftest_test_recursion_func(unsigned long ip,
450 unsigned long pip,
451 struct ftrace_ops *op,
452 struct ftrace_regs *fregs)
453{
454 /*
455 * This function is registered without the recursion safe flag.
456 * The ftrace infrastructure should provide the recursion
457 * protection. If not, this will crash the kernel!
458 */
459 if (trace_selftest_recursion_cnt++ > 10)
460 return;
461 DYN_FTRACE_TEST_NAME();
462}
463
464static void trace_selftest_test_recursion_safe_func(unsigned long ip,
465 unsigned long pip,
466 struct ftrace_ops *op,
467 struct ftrace_regs *fregs)
468{
469 /*
470 * We said we would provide our own recursion. By calling
471 * this function again, we should recurse back into this function
472 * and count again. But this only happens if the arch supports
473 * all of ftrace features and nothing else is using the function
474 * tracing utility.
475 */
476 if (trace_selftest_recursion_cnt++)
477 return;
478 DYN_FTRACE_TEST_NAME();
479}
480
481static struct ftrace_ops test_rec_probe = {
482 .func = trace_selftest_test_recursion_func,
483 .flags = FTRACE_OPS_FL_RECURSION,
484};
485
486static struct ftrace_ops test_recsafe_probe = {
487 .func = trace_selftest_test_recursion_safe_func,
488};
489
490static int
491trace_selftest_function_recursion(void)
492{
493 int save_ftrace_enabled = ftrace_enabled;
494 char *func_name;
495 int len;
496 int ret;
497
498 /* The previous test PASSED */
499 pr_cont("PASSED\n");
500 pr_info("Testing ftrace recursion: ");
501
502
503 /* enable tracing, and record the filter function */
504 ftrace_enabled = 1;
505
506 /* Handle PPC64 '.' name */
507 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
508 len = strlen(func_name);
509
510 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
511 if (ret) {
512 pr_cont("*Could not set filter* ");
513 goto out;
514 }
515
516 ret = register_ftrace_function(&test_rec_probe);
517 if (ret) {
518 pr_cont("*could not register callback* ");
519 goto out;
520 }
521
522 DYN_FTRACE_TEST_NAME();
523
524 unregister_ftrace_function(&test_rec_probe);
525
526 ret = -1;
527 /*
528 * Recursion allows for transitions between context,
529 * and may call the callback twice.
530 */
531 if (trace_selftest_recursion_cnt != 1 &&
532 trace_selftest_recursion_cnt != 2) {
533 pr_cont("*callback not called once (or twice) (%d)* ",
534 trace_selftest_recursion_cnt);
535 goto out;
536 }
537
538 trace_selftest_recursion_cnt = 1;
539
540 pr_cont("PASSED\n");
541 pr_info("Testing ftrace recursion safe: ");
542
543 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
544 if (ret) {
545 pr_cont("*Could not set filter* ");
546 goto out;
547 }
548
549 ret = register_ftrace_function(&test_recsafe_probe);
550 if (ret) {
551 pr_cont("*could not register callback* ");
552 goto out;
553 }
554
555 DYN_FTRACE_TEST_NAME();
556
557 unregister_ftrace_function(&test_recsafe_probe);
558
559 ret = -1;
560 if (trace_selftest_recursion_cnt != 2) {
561 pr_cont("*callback not called expected 2 times (%d)* ",
562 trace_selftest_recursion_cnt);
563 goto out;
564 }
565
566 ret = 0;
567out:
568 ftrace_enabled = save_ftrace_enabled;
569
570 return ret;
571}
572#else
573# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
574# define trace_selftest_function_recursion() ({ 0; })
575#endif /* CONFIG_DYNAMIC_FTRACE */
576
577static enum {
578 TRACE_SELFTEST_REGS_START,
579 TRACE_SELFTEST_REGS_FOUND,
580 TRACE_SELFTEST_REGS_NOT_FOUND,
581} trace_selftest_regs_stat;
582
583static void trace_selftest_test_regs_func(unsigned long ip,
584 unsigned long pip,
585 struct ftrace_ops *op,
586 struct ftrace_regs *fregs)
587{
588 struct pt_regs *regs = ftrace_get_regs(fregs);
589
590 if (regs)
591 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
592 else
593 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
594}
595
596static struct ftrace_ops test_regs_probe = {
597 .func = trace_selftest_test_regs_func,
598 .flags = FTRACE_OPS_FL_SAVE_REGS,
599};
600
601static int
602trace_selftest_function_regs(void)
603{
604 int save_ftrace_enabled = ftrace_enabled;
605 char *func_name;
606 int len;
607 int ret;
608 int supported = 0;
609
610#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
611 supported = 1;
612#endif
613
614 /* The previous test PASSED */
615 pr_cont("PASSED\n");
616 pr_info("Testing ftrace regs%s: ",
617 !supported ? "(no arch support)" : "");
618
619 /* enable tracing, and record the filter function */
620 ftrace_enabled = 1;
621
622 /* Handle PPC64 '.' name */
623 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
624 len = strlen(func_name);
625
626 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
627 /*
628 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
629 * This test really doesn't care.
630 */
631 if (ret && ret != -ENODEV) {
632 pr_cont("*Could not set filter* ");
633 goto out;
634 }
635
636 ret = register_ftrace_function(&test_regs_probe);
637 /*
638 * Now if the arch does not support passing regs, then this should
639 * have failed.
640 */
641 if (!supported) {
642 if (!ret) {
643 pr_cont("*registered save-regs without arch support* ");
644 goto out;
645 }
646 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
647 ret = register_ftrace_function(&test_regs_probe);
648 }
649 if (ret) {
650 pr_cont("*could not register callback* ");
651 goto out;
652 }
653
654
655 DYN_FTRACE_TEST_NAME();
656
657 unregister_ftrace_function(&test_regs_probe);
658
659 ret = -1;
660
661 switch (trace_selftest_regs_stat) {
662 case TRACE_SELFTEST_REGS_START:
663 pr_cont("*callback never called* ");
664 goto out;
665
666 case TRACE_SELFTEST_REGS_FOUND:
667 if (supported)
668 break;
669 pr_cont("*callback received regs without arch support* ");
670 goto out;
671
672 case TRACE_SELFTEST_REGS_NOT_FOUND:
673 if (!supported)
674 break;
675 pr_cont("*callback received NULL regs* ");
676 goto out;
677 }
678
679 ret = 0;
680out:
681 ftrace_enabled = save_ftrace_enabled;
682
683 return ret;
684}
685
686/*
687 * Simple verification test of ftrace function tracer.
688 * Enable ftrace, sleep 1/10 second, and then read the trace
689 * buffer to see if all is in order.
690 */
691__init int
692trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
693{
694 int save_ftrace_enabled = ftrace_enabled;
695 unsigned long count;
696 int ret;
697
698#ifdef CONFIG_DYNAMIC_FTRACE
699 if (ftrace_filter_param) {
700 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
701 return 0;
702 }
703#endif
704
705 /* make sure msleep has been recorded */
706 msleep(1);
707
708 /* start the tracing */
709 ftrace_enabled = 1;
710
711 ret = tracer_init(trace, tr);
712 if (ret) {
713 warn_failed_init_tracer(trace, ret);
714 goto out;
715 }
716
717 /* Sleep for a 1/10 of a second */
718 msleep(100);
719 /* stop the tracing. */
720 tracing_stop();
721 ftrace_enabled = 0;
722
723 /* check the trace buffer */
724 ret = trace_test_buffer(&tr->array_buffer, &count);
725
726 ftrace_enabled = 1;
727 trace->reset(tr);
728 tracing_start();
729
730 if (!ret && !count) {
731 printk(KERN_CONT ".. no entries found ..");
732 ret = -1;
733 goto out;
734 }
735
736 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
737 DYN_FTRACE_TEST_NAME);
738 if (ret)
739 goto out;
740
741 ret = trace_selftest_function_recursion();
742 if (ret)
743 goto out;
744
745 ret = trace_selftest_function_regs();
746 out:
747 ftrace_enabled = save_ftrace_enabled;
748
749 /* kill ftrace totally if we failed */
750 if (ret)
751 ftrace_kill();
752
753 return ret;
754}
755#endif /* CONFIG_FUNCTION_TRACER */
756
757
758#ifdef CONFIG_FUNCTION_GRAPH_TRACER
759
760#ifdef CONFIG_DYNAMIC_FTRACE
761
762#define CHAR_NUMBER 123
763#define SHORT_NUMBER 12345
764#define WORD_NUMBER 1234567890
765#define LONG_NUMBER 1234567890123456789LL
766#define ERRSTR_BUFLEN 128
767
768struct fgraph_fixture {
769 struct fgraph_ops gops;
770 int store_size;
771 const char *store_type_name;
772 char error_str_buf[ERRSTR_BUFLEN];
773 char *error_str;
774};
775
776static __init int store_entry(struct ftrace_graph_ent *trace,
777 struct fgraph_ops *gops)
778{
779 struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
780 const char *type = fixture->store_type_name;
781 int size = fixture->store_size;
782 void *p;
783
784 p = fgraph_reserve_data(gops->idx, size);
785 if (!p) {
786 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
787 "Failed to reserve %s\n", type);
788 return 0;
789 }
790
791 switch (size) {
792 case 1:
793 *(char *)p = CHAR_NUMBER;
794 break;
795 case 2:
796 *(short *)p = SHORT_NUMBER;
797 break;
798 case 4:
799 *(int *)p = WORD_NUMBER;
800 break;
801 case 8:
802 *(long long *)p = LONG_NUMBER;
803 break;
804 }
805
806 return 1;
807}
808
809static __init void store_return(struct ftrace_graph_ret *trace,
810 struct fgraph_ops *gops)
811{
812 struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
813 const char *type = fixture->store_type_name;
814 long long expect = 0;
815 long long found = -1;
816 int size;
817 char *p;
818
819 p = fgraph_retrieve_data(gops->idx, &size);
820 if (!p) {
821 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
822 "Failed to retrieve %s\n", type);
823 return;
824 }
825 if (fixture->store_size > size) {
826 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
827 "Retrieved size %d is smaller than expected %d\n",
828 size, (int)fixture->store_size);
829 return;
830 }
831
832 switch (fixture->store_size) {
833 case 1:
834 expect = CHAR_NUMBER;
835 found = *(char *)p;
836 break;
837 case 2:
838 expect = SHORT_NUMBER;
839 found = *(short *)p;
840 break;
841 case 4:
842 expect = WORD_NUMBER;
843 found = *(int *)p;
844 break;
845 case 8:
846 expect = LONG_NUMBER;
847 found = *(long long *)p;
848 break;
849 }
850
851 if (found != expect) {
852 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
853 "%s returned not %lld but %lld\n", type, expect, found);
854 return;
855 }
856 fixture->error_str = NULL;
857}
858
859static int __init init_fgraph_fixture(struct fgraph_fixture *fixture)
860{
861 char *func_name;
862 int len;
863
864 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
865 "Failed to execute storage %s\n", fixture->store_type_name);
866 fixture->error_str = fixture->error_str_buf;
867
868 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
869 len = strlen(func_name);
870
871 return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1);
872}
873
874/* Test fgraph storage for each size */
875static int __init test_graph_storage_single(struct fgraph_fixture *fixture)
876{
877 int size = fixture->store_size;
878 int ret;
879
880 pr_cont("PASSED\n");
881 pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size));
882
883 ret = init_fgraph_fixture(fixture);
884 if (ret && ret != -ENODEV) {
885 pr_cont("*Could not set filter* ");
886 return -1;
887 }
888
889 ret = register_ftrace_graph(&fixture->gops);
890 if (ret) {
891 pr_warn("Failed to init store_bytes fgraph tracing\n");
892 return -1;
893 }
894
895 DYN_FTRACE_TEST_NAME();
896
897 unregister_ftrace_graph(&fixture->gops);
898
899 if (fixture->error_str) {
900 pr_cont("*** %s ***", fixture->error_str);
901 return -1;
902 }
903
904 return 0;
905}
906
907static struct fgraph_fixture store_bytes[4] __initdata = {
908 [0] = {
909 .gops = {
910 .entryfunc = store_entry,
911 .retfunc = store_return,
912 },
913 .store_size = 1,
914 .store_type_name = "byte",
915 },
916 [1] = {
917 .gops = {
918 .entryfunc = store_entry,
919 .retfunc = store_return,
920 },
921 .store_size = 2,
922 .store_type_name = "short",
923 },
924 [2] = {
925 .gops = {
926 .entryfunc = store_entry,
927 .retfunc = store_return,
928 },
929 .store_size = 4,
930 .store_type_name = "word",
931 },
932 [3] = {
933 .gops = {
934 .entryfunc = store_entry,
935 .retfunc = store_return,
936 },
937 .store_size = 8,
938 .store_type_name = "long long",
939 },
940};
941
942static __init int test_graph_storage_multi(void)
943{
944 struct fgraph_fixture *fixture;
945 bool printed = false;
946 int i, j, ret;
947
948 pr_cont("PASSED\n");
949 pr_info("Testing multiple fgraph storage on a function: ");
950
951 for (i = 0; i < ARRAY_SIZE(store_bytes); i++) {
952 fixture = &store_bytes[i];
953 ret = init_fgraph_fixture(fixture);
954 if (ret && ret != -ENODEV) {
955 pr_cont("*Could not set filter* ");
956 printed = true;
957 goto out2;
958 }
959 }
960
961 for (j = 0; j < ARRAY_SIZE(store_bytes); j++) {
962 fixture = &store_bytes[j];
963 ret = register_ftrace_graph(&fixture->gops);
964 if (ret) {
965 pr_warn("Failed to init store_bytes fgraph tracing\n");
966 printed = true;
967 goto out1;
968 }
969 }
970
971 DYN_FTRACE_TEST_NAME();
972out1:
973 while (--j >= 0) {
974 fixture = &store_bytes[j];
975 unregister_ftrace_graph(&fixture->gops);
976
977 if (fixture->error_str && !printed) {
978 pr_cont("*** %s ***", fixture->error_str);
979 printed = true;
980 }
981 }
982out2:
983 while (--i >= 0) {
984 fixture = &store_bytes[i];
985 ftrace_free_filter(&fixture->gops.ops);
986
987 if (fixture->error_str && !printed) {
988 pr_cont("*** %s ***", fixture->error_str);
989 printed = true;
990 }
991 }
992 return printed ? -1 : 0;
993}
994
995/* Test the storage passed across function_graph entry and return */
996static __init int test_graph_storage(void)
997{
998 int ret;
999
1000 ret = test_graph_storage_single(&store_bytes[0]);
1001 if (ret)
1002 return ret;
1003 ret = test_graph_storage_single(&store_bytes[1]);
1004 if (ret)
1005 return ret;
1006 ret = test_graph_storage_single(&store_bytes[2]);
1007 if (ret)
1008 return ret;
1009 ret = test_graph_storage_single(&store_bytes[3]);
1010 if (ret)
1011 return ret;
1012 ret = test_graph_storage_multi();
1013 if (ret)
1014 return ret;
1015 return 0;
1016}
1017#else
1018static inline int test_graph_storage(void) { return 0; }
1019#endif /* CONFIG_DYNAMIC_FTRACE */
1020
1021/* Maximum number of functions to trace before diagnosing a hang */
1022#define GRAPH_MAX_FUNC_TEST 100000000
1023
1024static unsigned int graph_hang_thresh;
1025
1026/* Wrap the real function entry probe to avoid possible hanging */
1027static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace,
1028 struct fgraph_ops *gops)
1029{
1030 /* This is harmlessly racy, we want to approximately detect a hang */
1031 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
1032 ftrace_graph_stop();
1033 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
1034 if (ftrace_dump_on_oops_enabled()) {
1035 ftrace_dump(DUMP_ALL);
1036 /* ftrace_dump() disables tracing */
1037 tracing_on();
1038 }
1039 return 0;
1040 }
1041
1042 return trace_graph_entry(trace, gops);
1043}
1044
1045static struct fgraph_ops fgraph_ops __initdata = {
1046 .entryfunc = &trace_graph_entry_watchdog,
1047 .retfunc = &trace_graph_return,
1048};
1049
1050#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1051static struct ftrace_ops direct;
1052#endif
1053
1054/*
1055 * Pretty much the same than for the function tracer from which the selftest
1056 * has been borrowed.
1057 */
1058__init int
1059trace_selftest_startup_function_graph(struct tracer *trace,
1060 struct trace_array *tr)
1061{
1062 int ret;
1063 unsigned long count;
1064 char *func_name __maybe_unused;
1065
1066#ifdef CONFIG_DYNAMIC_FTRACE
1067 if (ftrace_filter_param) {
1068 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
1069 return 0;
1070 }
1071#endif
1072
1073 /*
1074 * Simulate the init() callback but we attach a watchdog callback
1075 * to detect and recover from possible hangs
1076 */
1077 tracing_reset_online_cpus(&tr->array_buffer);
1078 fgraph_ops.private = tr;
1079 ret = register_ftrace_graph(&fgraph_ops);
1080 if (ret) {
1081 warn_failed_init_tracer(trace, ret);
1082 goto out;
1083 }
1084 tracing_start_cmdline_record();
1085
1086 /* Sleep for a 1/10 of a second */
1087 msleep(100);
1088
1089 /* Have we just recovered from a hang? */
1090 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
1091 disable_tracing_selftest("recovering from a hang");
1092 ret = -1;
1093 goto out;
1094 }
1095
1096 tracing_stop();
1097
1098 /* check the trace buffer */
1099 ret = trace_test_buffer(&tr->array_buffer, &count);
1100
1101 /* Need to also simulate the tr->reset to remove this fgraph_ops */
1102 tracing_stop_cmdline_record();
1103 unregister_ftrace_graph(&fgraph_ops);
1104
1105 tracing_start();
1106
1107 if (!ret && !count) {
1108 printk(KERN_CONT ".. no entries found ..");
1109 ret = -1;
1110 goto out;
1111 }
1112
1113#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1114 /*
1115 * These tests can take some time to run. Make sure on non PREEMPT
1116 * kernels, we do not trigger the softlockup detector.
1117 */
1118 cond_resched();
1119
1120 tracing_reset_online_cpus(&tr->array_buffer);
1121 fgraph_ops.private = tr;
1122
1123 /*
1124 * Some archs *cough*PowerPC*cough* add characters to the
1125 * start of the function names. We simply put a '*' to
1126 * accommodate them.
1127 */
1128 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
1129 ftrace_set_global_filter(func_name, strlen(func_name), 1);
1130
1131 /*
1132 * Register direct function together with graph tracer
1133 * and make sure we get graph trace.
1134 */
1135 ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
1136 ret = register_ftrace_direct(&direct,
1137 (unsigned long)ftrace_stub_direct_tramp);
1138 if (ret)
1139 goto out;
1140
1141 cond_resched();
1142
1143 ret = register_ftrace_graph(&fgraph_ops);
1144 if (ret) {
1145 warn_failed_init_tracer(trace, ret);
1146 goto out;
1147 }
1148
1149 DYN_FTRACE_TEST_NAME();
1150
1151 count = 0;
1152
1153 tracing_stop();
1154 /* check the trace buffer */
1155 ret = trace_test_buffer(&tr->array_buffer, &count);
1156
1157 unregister_ftrace_graph(&fgraph_ops);
1158
1159 ret = unregister_ftrace_direct(&direct,
1160 (unsigned long)ftrace_stub_direct_tramp,
1161 true);
1162 if (ret)
1163 goto out;
1164
1165 cond_resched();
1166
1167 tracing_start();
1168
1169 if (!ret && !count) {
1170 ret = -1;
1171 goto out;
1172 }
1173
1174 /* Enable tracing on all functions again */
1175 ftrace_set_global_filter(NULL, 0, 1);
1176#endif
1177
1178 ret = test_graph_storage();
1179
1180 /* Don't test dynamic tracing, the function tracer already did */
1181out:
1182 /* Stop it if we failed */
1183 if (ret)
1184 ftrace_graph_stop();
1185
1186 return ret;
1187}
1188#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1189
1190
1191#ifdef CONFIG_IRQSOFF_TRACER
1192int
1193trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
1194{
1195 unsigned long save_max = tr->max_latency;
1196 unsigned long count;
1197 int ret;
1198
1199 /* start the tracing */
1200 ret = tracer_init(trace, tr);
1201 if (ret) {
1202 warn_failed_init_tracer(trace, ret);
1203 return ret;
1204 }
1205
1206 /* reset the max latency */
1207 tr->max_latency = 0;
1208 /* disable interrupts for a bit */
1209 local_irq_disable();
1210 udelay(100);
1211 local_irq_enable();
1212
1213 /*
1214 * Stop the tracer to avoid a warning subsequent
1215 * to buffer flipping failure because tracing_stop()
1216 * disables the tr and max buffers, making flipping impossible
1217 * in case of parallels max irqs off latencies.
1218 */
1219 trace->stop(tr);
1220 /* stop the tracing. */
1221 tracing_stop();
1222 /* check both trace buffers */
1223 ret = trace_test_buffer(&tr->array_buffer, NULL);
1224 if (!ret)
1225 ret = trace_test_buffer(&tr->max_buffer, &count);
1226 trace->reset(tr);
1227 tracing_start();
1228
1229 if (!ret && !count) {
1230 printk(KERN_CONT ".. no entries found ..");
1231 ret = -1;
1232 }
1233
1234 tr->max_latency = save_max;
1235
1236 return ret;
1237}
1238#endif /* CONFIG_IRQSOFF_TRACER */
1239
1240#ifdef CONFIG_PREEMPT_TRACER
1241int
1242trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
1243{
1244 unsigned long save_max = tr->max_latency;
1245 unsigned long count;
1246 int ret;
1247
1248 /*
1249 * Now that the big kernel lock is no longer preemptible,
1250 * and this is called with the BKL held, it will always
1251 * fail. If preemption is already disabled, simply
1252 * pass the test. When the BKL is removed, or becomes
1253 * preemptible again, we will once again test this,
1254 * so keep it in.
1255 */
1256 if (preempt_count()) {
1257 printk(KERN_CONT "can not test ... force ");
1258 return 0;
1259 }
1260
1261 /* start the tracing */
1262 ret = tracer_init(trace, tr);
1263 if (ret) {
1264 warn_failed_init_tracer(trace, ret);
1265 return ret;
1266 }
1267
1268 /* reset the max latency */
1269 tr->max_latency = 0;
1270 /* disable preemption for a bit */
1271 preempt_disable();
1272 udelay(100);
1273 preempt_enable();
1274
1275 /*
1276 * Stop the tracer to avoid a warning subsequent
1277 * to buffer flipping failure because tracing_stop()
1278 * disables the tr and max buffers, making flipping impossible
1279 * in case of parallels max preempt off latencies.
1280 */
1281 trace->stop(tr);
1282 /* stop the tracing. */
1283 tracing_stop();
1284 /* check both trace buffers */
1285 ret = trace_test_buffer(&tr->array_buffer, NULL);
1286 if (!ret)
1287 ret = trace_test_buffer(&tr->max_buffer, &count);
1288 trace->reset(tr);
1289 tracing_start();
1290
1291 if (!ret && !count) {
1292 printk(KERN_CONT ".. no entries found ..");
1293 ret = -1;
1294 }
1295
1296 tr->max_latency = save_max;
1297
1298 return ret;
1299}
1300#endif /* CONFIG_PREEMPT_TRACER */
1301
1302#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1303int
1304trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
1305{
1306 unsigned long save_max = tr->max_latency;
1307 unsigned long count;
1308 int ret;
1309
1310 /*
1311 * Now that the big kernel lock is no longer preemptible,
1312 * and this is called with the BKL held, it will always
1313 * fail. If preemption is already disabled, simply
1314 * pass the test. When the BKL is removed, or becomes
1315 * preemptible again, we will once again test this,
1316 * so keep it in.
1317 */
1318 if (preempt_count()) {
1319 printk(KERN_CONT "can not test ... force ");
1320 return 0;
1321 }
1322
1323 /* start the tracing */
1324 ret = tracer_init(trace, tr);
1325 if (ret) {
1326 warn_failed_init_tracer(trace, ret);
1327 goto out_no_start;
1328 }
1329
1330 /* reset the max latency */
1331 tr->max_latency = 0;
1332
1333 /* disable preemption and interrupts for a bit */
1334 preempt_disable();
1335 local_irq_disable();
1336 udelay(100);
1337 preempt_enable();
1338 /* reverse the order of preempt vs irqs */
1339 local_irq_enable();
1340
1341 /*
1342 * Stop the tracer to avoid a warning subsequent
1343 * to buffer flipping failure because tracing_stop()
1344 * disables the tr and max buffers, making flipping impossible
1345 * in case of parallels max irqs/preempt off latencies.
1346 */
1347 trace->stop(tr);
1348 /* stop the tracing. */
1349 tracing_stop();
1350 /* check both trace buffers */
1351 ret = trace_test_buffer(&tr->array_buffer, NULL);
1352 if (ret)
1353 goto out;
1354
1355 ret = trace_test_buffer(&tr->max_buffer, &count);
1356 if (ret)
1357 goto out;
1358
1359 if (!ret && !count) {
1360 printk(KERN_CONT ".. no entries found ..");
1361 ret = -1;
1362 goto out;
1363 }
1364
1365 /* do the test by disabling interrupts first this time */
1366 tr->max_latency = 0;
1367 tracing_start();
1368 trace->start(tr);
1369
1370 preempt_disable();
1371 local_irq_disable();
1372 udelay(100);
1373 preempt_enable();
1374 /* reverse the order of preempt vs irqs */
1375 local_irq_enable();
1376
1377 trace->stop(tr);
1378 /* stop the tracing. */
1379 tracing_stop();
1380 /* check both trace buffers */
1381 ret = trace_test_buffer(&tr->array_buffer, NULL);
1382 if (ret)
1383 goto out;
1384
1385 ret = trace_test_buffer(&tr->max_buffer, &count);
1386
1387 if (!ret && !count) {
1388 printk(KERN_CONT ".. no entries found ..");
1389 ret = -1;
1390 goto out;
1391 }
1392
1393out:
1394 tracing_start();
1395out_no_start:
1396 trace->reset(tr);
1397 tr->max_latency = save_max;
1398
1399 return ret;
1400}
1401#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1402
1403#ifdef CONFIG_NOP_TRACER
1404int
1405trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1406{
1407 /* What could possibly go wrong? */
1408 return 0;
1409}
1410#endif
1411
1412#ifdef CONFIG_SCHED_TRACER
1413
1414struct wakeup_test_data {
1415 struct completion is_ready;
1416 int go;
1417};
1418
1419static int trace_wakeup_test_thread(void *data)
1420{
1421 /* Make this a -deadline thread */
1422 static const struct sched_attr attr = {
1423 .sched_policy = SCHED_DEADLINE,
1424 .sched_runtime = 100000ULL,
1425 .sched_deadline = 10000000ULL,
1426 .sched_period = 10000000ULL
1427 };
1428 struct wakeup_test_data *x = data;
1429
1430 sched_setattr(current, &attr);
1431
1432 /* Make it know we have a new prio */
1433 complete(&x->is_ready);
1434
1435 /* now go to sleep and let the test wake us up */
1436 set_current_state(TASK_INTERRUPTIBLE);
1437 while (!x->go) {
1438 schedule();
1439 set_current_state(TASK_INTERRUPTIBLE);
1440 }
1441
1442 complete(&x->is_ready);
1443
1444 set_current_state(TASK_INTERRUPTIBLE);
1445
1446 /* we are awake, now wait to disappear */
1447 while (!kthread_should_stop()) {
1448 schedule();
1449 set_current_state(TASK_INTERRUPTIBLE);
1450 }
1451
1452 __set_current_state(TASK_RUNNING);
1453
1454 return 0;
1455}
1456int
1457trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1458{
1459 unsigned long save_max = tr->max_latency;
1460 struct task_struct *p;
1461 struct wakeup_test_data data;
1462 unsigned long count;
1463 int ret;
1464
1465 memset(&data, 0, sizeof(data));
1466
1467 init_completion(&data.is_ready);
1468
1469 /* create a -deadline thread */
1470 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1471 if (IS_ERR(p)) {
1472 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1473 return -1;
1474 }
1475
1476 /* make sure the thread is running at -deadline policy */
1477 wait_for_completion(&data.is_ready);
1478
1479 /* start the tracing */
1480 ret = tracer_init(trace, tr);
1481 if (ret) {
1482 warn_failed_init_tracer(trace, ret);
1483 return ret;
1484 }
1485
1486 /* reset the max latency */
1487 tr->max_latency = 0;
1488
1489 while (task_is_runnable(p)) {
1490 /*
1491 * Sleep to make sure the -deadline thread is asleep too.
1492 * On virtual machines we can't rely on timings,
1493 * but we want to make sure this test still works.
1494 */
1495 msleep(100);
1496 }
1497
1498 init_completion(&data.is_ready);
1499
1500 data.go = 1;
1501 /* memory barrier is in the wake_up_process() */
1502
1503 wake_up_process(p);
1504
1505 /* Wait for the task to wake up */
1506 wait_for_completion(&data.is_ready);
1507
1508 /* stop the tracing. */
1509 tracing_stop();
1510 /* check both trace buffers */
1511 ret = trace_test_buffer(&tr->array_buffer, NULL);
1512 if (!ret)
1513 ret = trace_test_buffer(&tr->max_buffer, &count);
1514
1515
1516 trace->reset(tr);
1517 tracing_start();
1518
1519 tr->max_latency = save_max;
1520
1521 /* kill the thread */
1522 kthread_stop(p);
1523
1524 if (!ret && !count) {
1525 printk(KERN_CONT ".. no entries found ..");
1526 ret = -1;
1527 }
1528
1529 return ret;
1530}
1531#endif /* CONFIG_SCHED_TRACER */
1532
1533#ifdef CONFIG_BRANCH_TRACER
1534int
1535trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1536{
1537 unsigned long count;
1538 int ret;
1539
1540 /* start the tracing */
1541 ret = tracer_init(trace, tr);
1542 if (ret) {
1543 warn_failed_init_tracer(trace, ret);
1544 return ret;
1545 }
1546
1547 /* Sleep for a 1/10 of a second */
1548 msleep(100);
1549 /* stop the tracing. */
1550 tracing_stop();
1551 /* check the trace buffer */
1552 ret = trace_test_buffer(&tr->array_buffer, &count);
1553 trace->reset(tr);
1554 tracing_start();
1555
1556 if (!ret && !count) {
1557 printk(KERN_CONT ".. no entries found ..");
1558 ret = -1;
1559 }
1560
1561 return ret;
1562}
1563#endif /* CONFIG_BRANCH_TRACER */
1564
1// SPDX-License-Identifier: GPL-2.0
2/* Include in trace.c */
3
4#include <uapi/linux/sched/types.h>
5#include <linux/stringify.h>
6#include <linux/kthread.h>
7#include <linux/delay.h>
8#include <linux/slab.h>
9
10static inline int trace_valid_entry(struct trace_entry *entry)
11{
12 switch (entry->type) {
13 case TRACE_FN:
14 case TRACE_CTX:
15 case TRACE_WAKE:
16 case TRACE_STACK:
17 case TRACE_PRINT:
18 case TRACE_BRANCH:
19 case TRACE_GRAPH_ENT:
20 case TRACE_GRAPH_RET:
21 return 1;
22 }
23 return 0;
24}
25
26static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
27{
28 struct ring_buffer_event *event;
29 struct trace_entry *entry;
30 unsigned int loops = 0;
31
32 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
33 entry = ring_buffer_event_data(event);
34
35 /*
36 * The ring buffer is a size of trace_buf_size, if
37 * we loop more than the size, there's something wrong
38 * with the ring buffer.
39 */
40 if (loops++ > trace_buf_size) {
41 printk(KERN_CONT ".. bad ring buffer ");
42 goto failed;
43 }
44 if (!trace_valid_entry(entry)) {
45 printk(KERN_CONT ".. invalid entry %d ",
46 entry->type);
47 goto failed;
48 }
49 }
50 return 0;
51
52 failed:
53 /* disable tracing */
54 tracing_disabled = 1;
55 printk(KERN_CONT ".. corrupted trace buffer .. ");
56 return -1;
57}
58
59/*
60 * Test the trace buffer to see if all the elements
61 * are still sane.
62 */
63static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
64{
65 unsigned long flags, cnt = 0;
66 int cpu, ret = 0;
67
68 /* Don't allow flipping of max traces now */
69 local_irq_save(flags);
70 arch_spin_lock(&buf->tr->max_lock);
71
72 cnt = ring_buffer_entries(buf->buffer);
73
74 /*
75 * The trace_test_buffer_cpu runs a while loop to consume all data.
76 * If the calling tracer is broken, and is constantly filling
77 * the buffer, this will run forever, and hard lock the box.
78 * We disable the ring buffer while we do this test to prevent
79 * a hard lock up.
80 */
81 tracing_off();
82 for_each_possible_cpu(cpu) {
83 ret = trace_test_buffer_cpu(buf, cpu);
84 if (ret)
85 break;
86 }
87 tracing_on();
88 arch_spin_unlock(&buf->tr->max_lock);
89 local_irq_restore(flags);
90
91 if (count)
92 *count = cnt;
93
94 return ret;
95}
96
97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
98{
99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
100 trace->name, init_ret);
101}
102#ifdef CONFIG_FUNCTION_TRACER
103
104#ifdef CONFIG_DYNAMIC_FTRACE
105
106static int trace_selftest_test_probe1_cnt;
107static void trace_selftest_test_probe1_func(unsigned long ip,
108 unsigned long pip,
109 struct ftrace_ops *op,
110 struct ftrace_regs *fregs)
111{
112 trace_selftest_test_probe1_cnt++;
113}
114
115static int trace_selftest_test_probe2_cnt;
116static void trace_selftest_test_probe2_func(unsigned long ip,
117 unsigned long pip,
118 struct ftrace_ops *op,
119 struct ftrace_regs *fregs)
120{
121 trace_selftest_test_probe2_cnt++;
122}
123
124static int trace_selftest_test_probe3_cnt;
125static void trace_selftest_test_probe3_func(unsigned long ip,
126 unsigned long pip,
127 struct ftrace_ops *op,
128 struct ftrace_regs *fregs)
129{
130 trace_selftest_test_probe3_cnt++;
131}
132
133static int trace_selftest_test_global_cnt;
134static void trace_selftest_test_global_func(unsigned long ip,
135 unsigned long pip,
136 struct ftrace_ops *op,
137 struct ftrace_regs *fregs)
138{
139 trace_selftest_test_global_cnt++;
140}
141
142static int trace_selftest_test_dyn_cnt;
143static void trace_selftest_test_dyn_func(unsigned long ip,
144 unsigned long pip,
145 struct ftrace_ops *op,
146 struct ftrace_regs *fregs)
147{
148 trace_selftest_test_dyn_cnt++;
149}
150
151static struct ftrace_ops test_probe1 = {
152 .func = trace_selftest_test_probe1_func,
153};
154
155static struct ftrace_ops test_probe2 = {
156 .func = trace_selftest_test_probe2_func,
157};
158
159static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
161};
162
163static void print_counts(void)
164{
165 printk("(%d %d %d %d %d) ",
166 trace_selftest_test_probe1_cnt,
167 trace_selftest_test_probe2_cnt,
168 trace_selftest_test_probe3_cnt,
169 trace_selftest_test_global_cnt,
170 trace_selftest_test_dyn_cnt);
171}
172
173static void reset_counts(void)
174{
175 trace_selftest_test_probe1_cnt = 0;
176 trace_selftest_test_probe2_cnt = 0;
177 trace_selftest_test_probe3_cnt = 0;
178 trace_selftest_test_global_cnt = 0;
179 trace_selftest_test_dyn_cnt = 0;
180}
181
182static int trace_selftest_ops(struct trace_array *tr, int cnt)
183{
184 int save_ftrace_enabled = ftrace_enabled;
185 struct ftrace_ops *dyn_ops;
186 char *func1_name;
187 char *func2_name;
188 int len1;
189 int len2;
190 int ret = -1;
191
192 printk(KERN_CONT "PASSED\n");
193 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
194
195 ftrace_enabled = 1;
196 reset_counts();
197
198 /* Handle PPC64 '.' name */
199 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
200 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
201 len1 = strlen(func1_name);
202 len2 = strlen(func2_name);
203
204 /*
205 * Probe 1 will trace function 1.
206 * Probe 2 will trace function 2.
207 * Probe 3 will trace functions 1 and 2.
208 */
209 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
210 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
211 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
212 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
213
214 register_ftrace_function(&test_probe1);
215 register_ftrace_function(&test_probe2);
216 register_ftrace_function(&test_probe3);
217 /* First time we are running with main function */
218 if (cnt > 1) {
219 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
220 register_ftrace_function(tr->ops);
221 }
222
223 DYN_FTRACE_TEST_NAME();
224
225 print_counts();
226
227 if (trace_selftest_test_probe1_cnt != 1)
228 goto out;
229 if (trace_selftest_test_probe2_cnt != 0)
230 goto out;
231 if (trace_selftest_test_probe3_cnt != 1)
232 goto out;
233 if (cnt > 1) {
234 if (trace_selftest_test_global_cnt == 0)
235 goto out;
236 }
237
238 DYN_FTRACE_TEST_NAME2();
239
240 print_counts();
241
242 if (trace_selftest_test_probe1_cnt != 1)
243 goto out;
244 if (trace_selftest_test_probe2_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe3_cnt != 2)
247 goto out;
248
249 /* Add a dynamic probe */
250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 if (!dyn_ops) {
252 printk("MEMORY ERROR ");
253 goto out;
254 }
255
256 dyn_ops->func = trace_selftest_test_dyn_func;
257
258 register_ftrace_function(dyn_ops);
259
260 trace_selftest_test_global_cnt = 0;
261
262 DYN_FTRACE_TEST_NAME();
263
264 print_counts();
265
266 if (trace_selftest_test_probe1_cnt != 2)
267 goto out_free;
268 if (trace_selftest_test_probe2_cnt != 1)
269 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free;
272 if (cnt > 1) {
273 if (trace_selftest_test_global_cnt == 0)
274 goto out_free;
275 }
276 if (trace_selftest_test_dyn_cnt == 0)
277 goto out_free;
278
279 DYN_FTRACE_TEST_NAME2();
280
281 print_counts();
282
283 if (trace_selftest_test_probe1_cnt != 2)
284 goto out_free;
285 if (trace_selftest_test_probe2_cnt != 2)
286 goto out_free;
287 if (trace_selftest_test_probe3_cnt != 4)
288 goto out_free;
289
290 ret = 0;
291 out_free:
292 unregister_ftrace_function(dyn_ops);
293 kfree(dyn_ops);
294
295 out:
296 /* Purposely unregister in the same order */
297 unregister_ftrace_function(&test_probe1);
298 unregister_ftrace_function(&test_probe2);
299 unregister_ftrace_function(&test_probe3);
300 if (cnt > 1)
301 unregister_ftrace_function(tr->ops);
302 ftrace_reset_array_ops(tr);
303
304 /* Make sure everything is off */
305 reset_counts();
306 DYN_FTRACE_TEST_NAME();
307 DYN_FTRACE_TEST_NAME();
308
309 if (trace_selftest_test_probe1_cnt ||
310 trace_selftest_test_probe2_cnt ||
311 trace_selftest_test_probe3_cnt ||
312 trace_selftest_test_global_cnt ||
313 trace_selftest_test_dyn_cnt)
314 ret = -1;
315
316 ftrace_enabled = save_ftrace_enabled;
317
318 return ret;
319}
320
321/* Test dynamic code modification and ftrace filters */
322static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
323 struct trace_array *tr,
324 int (*func)(void))
325{
326 int save_ftrace_enabled = ftrace_enabled;
327 unsigned long count;
328 char *func_name;
329 int ret;
330
331 /* The ftrace test PASSED */
332 printk(KERN_CONT "PASSED\n");
333 pr_info("Testing dynamic ftrace: ");
334
335 /* enable tracing, and record the filter function */
336 ftrace_enabled = 1;
337
338 /* passed in by parameter to fool gcc from optimizing */
339 func();
340
341 /*
342 * Some archs *cough*PowerPC*cough* add characters to the
343 * start of the function names. We simply put a '*' to
344 * accommodate them.
345 */
346 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
347
348 /* filter only on our function */
349 ftrace_set_global_filter(func_name, strlen(func_name), 1);
350
351 /* enable tracing */
352 ret = tracer_init(trace, tr);
353 if (ret) {
354 warn_failed_init_tracer(trace, ret);
355 goto out;
356 }
357
358 /* Sleep for a 1/10 of a second */
359 msleep(100);
360
361 /* we should have nothing in the buffer */
362 ret = trace_test_buffer(&tr->array_buffer, &count);
363 if (ret)
364 goto out;
365
366 if (count) {
367 ret = -1;
368 printk(KERN_CONT ".. filter did not filter .. ");
369 goto out;
370 }
371
372 /* call our function again */
373 func();
374
375 /* sleep again */
376 msleep(100);
377
378 /* stop the tracing. */
379 tracing_stop();
380 ftrace_enabled = 0;
381
382 /* check the trace buffer */
383 ret = trace_test_buffer(&tr->array_buffer, &count);
384
385 ftrace_enabled = 1;
386 tracing_start();
387
388 /* we should only have one item */
389 if (!ret && count != 1) {
390 trace->reset(tr);
391 printk(KERN_CONT ".. filter failed count=%ld ..", count);
392 ret = -1;
393 goto out;
394 }
395
396 /* Test the ops with global tracing running */
397 ret = trace_selftest_ops(tr, 1);
398 trace->reset(tr);
399
400 out:
401 ftrace_enabled = save_ftrace_enabled;
402
403 /* Enable tracing on all functions again */
404 ftrace_set_global_filter(NULL, 0, 1);
405
406 /* Test the ops with global tracing off */
407 if (!ret)
408 ret = trace_selftest_ops(tr, 2);
409
410 return ret;
411}
412
413static int trace_selftest_recursion_cnt;
414static void trace_selftest_test_recursion_func(unsigned long ip,
415 unsigned long pip,
416 struct ftrace_ops *op,
417 struct ftrace_regs *fregs)
418{
419 /*
420 * This function is registered without the recursion safe flag.
421 * The ftrace infrastructure should provide the recursion
422 * protection. If not, this will crash the kernel!
423 */
424 if (trace_selftest_recursion_cnt++ > 10)
425 return;
426 DYN_FTRACE_TEST_NAME();
427}
428
429static void trace_selftest_test_recursion_safe_func(unsigned long ip,
430 unsigned long pip,
431 struct ftrace_ops *op,
432 struct ftrace_regs *fregs)
433{
434 /*
435 * We said we would provide our own recursion. By calling
436 * this function again, we should recurse back into this function
437 * and count again. But this only happens if the arch supports
438 * all of ftrace features and nothing else is using the function
439 * tracing utility.
440 */
441 if (trace_selftest_recursion_cnt++)
442 return;
443 DYN_FTRACE_TEST_NAME();
444}
445
446static struct ftrace_ops test_rec_probe = {
447 .func = trace_selftest_test_recursion_func,
448 .flags = FTRACE_OPS_FL_RECURSION,
449};
450
451static struct ftrace_ops test_recsafe_probe = {
452 .func = trace_selftest_test_recursion_safe_func,
453};
454
455static int
456trace_selftest_function_recursion(void)
457{
458 int save_ftrace_enabled = ftrace_enabled;
459 char *func_name;
460 int len;
461 int ret;
462
463 /* The previous test PASSED */
464 pr_cont("PASSED\n");
465 pr_info("Testing ftrace recursion: ");
466
467
468 /* enable tracing, and record the filter function */
469 ftrace_enabled = 1;
470
471 /* Handle PPC64 '.' name */
472 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
473 len = strlen(func_name);
474
475 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
476 if (ret) {
477 pr_cont("*Could not set filter* ");
478 goto out;
479 }
480
481 ret = register_ftrace_function(&test_rec_probe);
482 if (ret) {
483 pr_cont("*could not register callback* ");
484 goto out;
485 }
486
487 DYN_FTRACE_TEST_NAME();
488
489 unregister_ftrace_function(&test_rec_probe);
490
491 ret = -1;
492 /*
493 * Recursion allows for transitions between context,
494 * and may call the callback twice.
495 */
496 if (trace_selftest_recursion_cnt != 1 &&
497 trace_selftest_recursion_cnt != 2) {
498 pr_cont("*callback not called once (or twice) (%d)* ",
499 trace_selftest_recursion_cnt);
500 goto out;
501 }
502
503 trace_selftest_recursion_cnt = 1;
504
505 pr_cont("PASSED\n");
506 pr_info("Testing ftrace recursion safe: ");
507
508 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
509 if (ret) {
510 pr_cont("*Could not set filter* ");
511 goto out;
512 }
513
514 ret = register_ftrace_function(&test_recsafe_probe);
515 if (ret) {
516 pr_cont("*could not register callback* ");
517 goto out;
518 }
519
520 DYN_FTRACE_TEST_NAME();
521
522 unregister_ftrace_function(&test_recsafe_probe);
523
524 ret = -1;
525 if (trace_selftest_recursion_cnt != 2) {
526 pr_cont("*callback not called expected 2 times (%d)* ",
527 trace_selftest_recursion_cnt);
528 goto out;
529 }
530
531 ret = 0;
532out:
533 ftrace_enabled = save_ftrace_enabled;
534
535 return ret;
536}
537#else
538# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
539# define trace_selftest_function_recursion() ({ 0; })
540#endif /* CONFIG_DYNAMIC_FTRACE */
541
542static enum {
543 TRACE_SELFTEST_REGS_START,
544 TRACE_SELFTEST_REGS_FOUND,
545 TRACE_SELFTEST_REGS_NOT_FOUND,
546} trace_selftest_regs_stat;
547
548static void trace_selftest_test_regs_func(unsigned long ip,
549 unsigned long pip,
550 struct ftrace_ops *op,
551 struct ftrace_regs *fregs)
552{
553 struct pt_regs *regs = ftrace_get_regs(fregs);
554
555 if (regs)
556 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
557 else
558 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
559}
560
561static struct ftrace_ops test_regs_probe = {
562 .func = trace_selftest_test_regs_func,
563 .flags = FTRACE_OPS_FL_SAVE_REGS,
564};
565
566static int
567trace_selftest_function_regs(void)
568{
569 int save_ftrace_enabled = ftrace_enabled;
570 char *func_name;
571 int len;
572 int ret;
573 int supported = 0;
574
575#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
576 supported = 1;
577#endif
578
579 /* The previous test PASSED */
580 pr_cont("PASSED\n");
581 pr_info("Testing ftrace regs%s: ",
582 !supported ? "(no arch support)" : "");
583
584 /* enable tracing, and record the filter function */
585 ftrace_enabled = 1;
586
587 /* Handle PPC64 '.' name */
588 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
589 len = strlen(func_name);
590
591 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
592 /*
593 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
594 * This test really doesn't care.
595 */
596 if (ret && ret != -ENODEV) {
597 pr_cont("*Could not set filter* ");
598 goto out;
599 }
600
601 ret = register_ftrace_function(&test_regs_probe);
602 /*
603 * Now if the arch does not support passing regs, then this should
604 * have failed.
605 */
606 if (!supported) {
607 if (!ret) {
608 pr_cont("*registered save-regs without arch support* ");
609 goto out;
610 }
611 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
612 ret = register_ftrace_function(&test_regs_probe);
613 }
614 if (ret) {
615 pr_cont("*could not register callback* ");
616 goto out;
617 }
618
619
620 DYN_FTRACE_TEST_NAME();
621
622 unregister_ftrace_function(&test_regs_probe);
623
624 ret = -1;
625
626 switch (trace_selftest_regs_stat) {
627 case TRACE_SELFTEST_REGS_START:
628 pr_cont("*callback never called* ");
629 goto out;
630
631 case TRACE_SELFTEST_REGS_FOUND:
632 if (supported)
633 break;
634 pr_cont("*callback received regs without arch support* ");
635 goto out;
636
637 case TRACE_SELFTEST_REGS_NOT_FOUND:
638 if (!supported)
639 break;
640 pr_cont("*callback received NULL regs* ");
641 goto out;
642 }
643
644 ret = 0;
645out:
646 ftrace_enabled = save_ftrace_enabled;
647
648 return ret;
649}
650
651/*
652 * Simple verification test of ftrace function tracer.
653 * Enable ftrace, sleep 1/10 second, and then read the trace
654 * buffer to see if all is in order.
655 */
656__init int
657trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
658{
659 int save_ftrace_enabled = ftrace_enabled;
660 unsigned long count;
661 int ret;
662
663#ifdef CONFIG_DYNAMIC_FTRACE
664 if (ftrace_filter_param) {
665 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
666 return 0;
667 }
668#endif
669
670 /* make sure msleep has been recorded */
671 msleep(1);
672
673 /* start the tracing */
674 ftrace_enabled = 1;
675
676 ret = tracer_init(trace, tr);
677 if (ret) {
678 warn_failed_init_tracer(trace, ret);
679 goto out;
680 }
681
682 /* Sleep for a 1/10 of a second */
683 msleep(100);
684 /* stop the tracing. */
685 tracing_stop();
686 ftrace_enabled = 0;
687
688 /* check the trace buffer */
689 ret = trace_test_buffer(&tr->array_buffer, &count);
690
691 ftrace_enabled = 1;
692 trace->reset(tr);
693 tracing_start();
694
695 if (!ret && !count) {
696 printk(KERN_CONT ".. no entries found ..");
697 ret = -1;
698 goto out;
699 }
700
701 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
702 DYN_FTRACE_TEST_NAME);
703 if (ret)
704 goto out;
705
706 ret = trace_selftest_function_recursion();
707 if (ret)
708 goto out;
709
710 ret = trace_selftest_function_regs();
711 out:
712 ftrace_enabled = save_ftrace_enabled;
713
714 /* kill ftrace totally if we failed */
715 if (ret)
716 ftrace_kill();
717
718 return ret;
719}
720#endif /* CONFIG_FUNCTION_TRACER */
721
722
723#ifdef CONFIG_FUNCTION_GRAPH_TRACER
724
725/* Maximum number of functions to trace before diagnosing a hang */
726#define GRAPH_MAX_FUNC_TEST 100000000
727
728static unsigned int graph_hang_thresh;
729
730/* Wrap the real function entry probe to avoid possible hanging */
731static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
732{
733 /* This is harmlessly racy, we want to approximately detect a hang */
734 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
735 ftrace_graph_stop();
736 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
737 if (ftrace_dump_on_oops) {
738 ftrace_dump(DUMP_ALL);
739 /* ftrace_dump() disables tracing */
740 tracing_on();
741 }
742 return 0;
743 }
744
745 return trace_graph_entry(trace);
746}
747
748static struct fgraph_ops fgraph_ops __initdata = {
749 .entryfunc = &trace_graph_entry_watchdog,
750 .retfunc = &trace_graph_return,
751};
752
753/*
754 * Pretty much the same than for the function tracer from which the selftest
755 * has been borrowed.
756 */
757__init int
758trace_selftest_startup_function_graph(struct tracer *trace,
759 struct trace_array *tr)
760{
761 int ret;
762 unsigned long count;
763
764#ifdef CONFIG_DYNAMIC_FTRACE
765 if (ftrace_filter_param) {
766 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
767 return 0;
768 }
769#endif
770
771 /*
772 * Simulate the init() callback but we attach a watchdog callback
773 * to detect and recover from possible hangs
774 */
775 tracing_reset_online_cpus(&tr->array_buffer);
776 set_graph_array(tr);
777 ret = register_ftrace_graph(&fgraph_ops);
778 if (ret) {
779 warn_failed_init_tracer(trace, ret);
780 goto out;
781 }
782 tracing_start_cmdline_record();
783
784 /* Sleep for a 1/10 of a second */
785 msleep(100);
786
787 /* Have we just recovered from a hang? */
788 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
789 disable_tracing_selftest("recovering from a hang");
790 ret = -1;
791 goto out;
792 }
793
794 tracing_stop();
795
796 /* check the trace buffer */
797 ret = trace_test_buffer(&tr->array_buffer, &count);
798
799 /* Need to also simulate the tr->reset to remove this fgraph_ops */
800 tracing_stop_cmdline_record();
801 unregister_ftrace_graph(&fgraph_ops);
802
803 tracing_start();
804
805 if (!ret && !count) {
806 printk(KERN_CONT ".. no entries found ..");
807 ret = -1;
808 goto out;
809 }
810
811 /* Don't test dynamic tracing, the function tracer already did */
812
813out:
814 /* Stop it if we failed */
815 if (ret)
816 ftrace_graph_stop();
817
818 return ret;
819}
820#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
821
822
823#ifdef CONFIG_IRQSOFF_TRACER
824int
825trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
826{
827 unsigned long save_max = tr->max_latency;
828 unsigned long count;
829 int ret;
830
831 /* start the tracing */
832 ret = tracer_init(trace, tr);
833 if (ret) {
834 warn_failed_init_tracer(trace, ret);
835 return ret;
836 }
837
838 /* reset the max latency */
839 tr->max_latency = 0;
840 /* disable interrupts for a bit */
841 local_irq_disable();
842 udelay(100);
843 local_irq_enable();
844
845 /*
846 * Stop the tracer to avoid a warning subsequent
847 * to buffer flipping failure because tracing_stop()
848 * disables the tr and max buffers, making flipping impossible
849 * in case of parallels max irqs off latencies.
850 */
851 trace->stop(tr);
852 /* stop the tracing. */
853 tracing_stop();
854 /* check both trace buffers */
855 ret = trace_test_buffer(&tr->array_buffer, NULL);
856 if (!ret)
857 ret = trace_test_buffer(&tr->max_buffer, &count);
858 trace->reset(tr);
859 tracing_start();
860
861 if (!ret && !count) {
862 printk(KERN_CONT ".. no entries found ..");
863 ret = -1;
864 }
865
866 tr->max_latency = save_max;
867
868 return ret;
869}
870#endif /* CONFIG_IRQSOFF_TRACER */
871
872#ifdef CONFIG_PREEMPT_TRACER
873int
874trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
875{
876 unsigned long save_max = tr->max_latency;
877 unsigned long count;
878 int ret;
879
880 /*
881 * Now that the big kernel lock is no longer preemptible,
882 * and this is called with the BKL held, it will always
883 * fail. If preemption is already disabled, simply
884 * pass the test. When the BKL is removed, or becomes
885 * preemptible again, we will once again test this,
886 * so keep it in.
887 */
888 if (preempt_count()) {
889 printk(KERN_CONT "can not test ... force ");
890 return 0;
891 }
892
893 /* start the tracing */
894 ret = tracer_init(trace, tr);
895 if (ret) {
896 warn_failed_init_tracer(trace, ret);
897 return ret;
898 }
899
900 /* reset the max latency */
901 tr->max_latency = 0;
902 /* disable preemption for a bit */
903 preempt_disable();
904 udelay(100);
905 preempt_enable();
906
907 /*
908 * Stop the tracer to avoid a warning subsequent
909 * to buffer flipping failure because tracing_stop()
910 * disables the tr and max buffers, making flipping impossible
911 * in case of parallels max preempt off latencies.
912 */
913 trace->stop(tr);
914 /* stop the tracing. */
915 tracing_stop();
916 /* check both trace buffers */
917 ret = trace_test_buffer(&tr->array_buffer, NULL);
918 if (!ret)
919 ret = trace_test_buffer(&tr->max_buffer, &count);
920 trace->reset(tr);
921 tracing_start();
922
923 if (!ret && !count) {
924 printk(KERN_CONT ".. no entries found ..");
925 ret = -1;
926 }
927
928 tr->max_latency = save_max;
929
930 return ret;
931}
932#endif /* CONFIG_PREEMPT_TRACER */
933
934#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
935int
936trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
937{
938 unsigned long save_max = tr->max_latency;
939 unsigned long count;
940 int ret;
941
942 /*
943 * Now that the big kernel lock is no longer preemptible,
944 * and this is called with the BKL held, it will always
945 * fail. If preemption is already disabled, simply
946 * pass the test. When the BKL is removed, or becomes
947 * preemptible again, we will once again test this,
948 * so keep it in.
949 */
950 if (preempt_count()) {
951 printk(KERN_CONT "can not test ... force ");
952 return 0;
953 }
954
955 /* start the tracing */
956 ret = tracer_init(trace, tr);
957 if (ret) {
958 warn_failed_init_tracer(trace, ret);
959 goto out_no_start;
960 }
961
962 /* reset the max latency */
963 tr->max_latency = 0;
964
965 /* disable preemption and interrupts for a bit */
966 preempt_disable();
967 local_irq_disable();
968 udelay(100);
969 preempt_enable();
970 /* reverse the order of preempt vs irqs */
971 local_irq_enable();
972
973 /*
974 * Stop the tracer to avoid a warning subsequent
975 * to buffer flipping failure because tracing_stop()
976 * disables the tr and max buffers, making flipping impossible
977 * in case of parallels max irqs/preempt off latencies.
978 */
979 trace->stop(tr);
980 /* stop the tracing. */
981 tracing_stop();
982 /* check both trace buffers */
983 ret = trace_test_buffer(&tr->array_buffer, NULL);
984 if (ret)
985 goto out;
986
987 ret = trace_test_buffer(&tr->max_buffer, &count);
988 if (ret)
989 goto out;
990
991 if (!ret && !count) {
992 printk(KERN_CONT ".. no entries found ..");
993 ret = -1;
994 goto out;
995 }
996
997 /* do the test by disabling interrupts first this time */
998 tr->max_latency = 0;
999 tracing_start();
1000 trace->start(tr);
1001
1002 preempt_disable();
1003 local_irq_disable();
1004 udelay(100);
1005 preempt_enable();
1006 /* reverse the order of preempt vs irqs */
1007 local_irq_enable();
1008
1009 trace->stop(tr);
1010 /* stop the tracing. */
1011 tracing_stop();
1012 /* check both trace buffers */
1013 ret = trace_test_buffer(&tr->array_buffer, NULL);
1014 if (ret)
1015 goto out;
1016
1017 ret = trace_test_buffer(&tr->max_buffer, &count);
1018
1019 if (!ret && !count) {
1020 printk(KERN_CONT ".. no entries found ..");
1021 ret = -1;
1022 goto out;
1023 }
1024
1025out:
1026 tracing_start();
1027out_no_start:
1028 trace->reset(tr);
1029 tr->max_latency = save_max;
1030
1031 return ret;
1032}
1033#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1034
1035#ifdef CONFIG_NOP_TRACER
1036int
1037trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1038{
1039 /* What could possibly go wrong? */
1040 return 0;
1041}
1042#endif
1043
1044#ifdef CONFIG_SCHED_TRACER
1045
1046struct wakeup_test_data {
1047 struct completion is_ready;
1048 int go;
1049};
1050
1051static int trace_wakeup_test_thread(void *data)
1052{
1053 /* Make this a -deadline thread */
1054 static const struct sched_attr attr = {
1055 .sched_policy = SCHED_DEADLINE,
1056 .sched_runtime = 100000ULL,
1057 .sched_deadline = 10000000ULL,
1058 .sched_period = 10000000ULL
1059 };
1060 struct wakeup_test_data *x = data;
1061
1062 sched_setattr(current, &attr);
1063
1064 /* Make it know we have a new prio */
1065 complete(&x->is_ready);
1066
1067 /* now go to sleep and let the test wake us up */
1068 set_current_state(TASK_INTERRUPTIBLE);
1069 while (!x->go) {
1070 schedule();
1071 set_current_state(TASK_INTERRUPTIBLE);
1072 }
1073
1074 complete(&x->is_ready);
1075
1076 set_current_state(TASK_INTERRUPTIBLE);
1077
1078 /* we are awake, now wait to disappear */
1079 while (!kthread_should_stop()) {
1080 schedule();
1081 set_current_state(TASK_INTERRUPTIBLE);
1082 }
1083
1084 __set_current_state(TASK_RUNNING);
1085
1086 return 0;
1087}
1088int
1089trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1090{
1091 unsigned long save_max = tr->max_latency;
1092 struct task_struct *p;
1093 struct wakeup_test_data data;
1094 unsigned long count;
1095 int ret;
1096
1097 memset(&data, 0, sizeof(data));
1098
1099 init_completion(&data.is_ready);
1100
1101 /* create a -deadline thread */
1102 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1103 if (IS_ERR(p)) {
1104 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1105 return -1;
1106 }
1107
1108 /* make sure the thread is running at -deadline policy */
1109 wait_for_completion(&data.is_ready);
1110
1111 /* start the tracing */
1112 ret = tracer_init(trace, tr);
1113 if (ret) {
1114 warn_failed_init_tracer(trace, ret);
1115 return ret;
1116 }
1117
1118 /* reset the max latency */
1119 tr->max_latency = 0;
1120
1121 while (p->on_rq) {
1122 /*
1123 * Sleep to make sure the -deadline thread is asleep too.
1124 * On virtual machines we can't rely on timings,
1125 * but we want to make sure this test still works.
1126 */
1127 msleep(100);
1128 }
1129
1130 init_completion(&data.is_ready);
1131
1132 data.go = 1;
1133 /* memory barrier is in the wake_up_process() */
1134
1135 wake_up_process(p);
1136
1137 /* Wait for the task to wake up */
1138 wait_for_completion(&data.is_ready);
1139
1140 /* stop the tracing. */
1141 tracing_stop();
1142 /* check both trace buffers */
1143 ret = trace_test_buffer(&tr->array_buffer, NULL);
1144 if (!ret)
1145 ret = trace_test_buffer(&tr->max_buffer, &count);
1146
1147
1148 trace->reset(tr);
1149 tracing_start();
1150
1151 tr->max_latency = save_max;
1152
1153 /* kill the thread */
1154 kthread_stop(p);
1155
1156 if (!ret && !count) {
1157 printk(KERN_CONT ".. no entries found ..");
1158 ret = -1;
1159 }
1160
1161 return ret;
1162}
1163#endif /* CONFIG_SCHED_TRACER */
1164
1165#ifdef CONFIG_BRANCH_TRACER
1166int
1167trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1168{
1169 unsigned long count;
1170 int ret;
1171
1172 /* start the tracing */
1173 ret = tracer_init(trace, tr);
1174 if (ret) {
1175 warn_failed_init_tracer(trace, ret);
1176 return ret;
1177 }
1178
1179 /* Sleep for a 1/10 of a second */
1180 msleep(100);
1181 /* stop the tracing. */
1182 tracing_stop();
1183 /* check the trace buffer */
1184 ret = trace_test_buffer(&tr->array_buffer, &count);
1185 trace->reset(tr);
1186 tracing_start();
1187
1188 if (!ret && !count) {
1189 printk(KERN_CONT ".. no entries found ..");
1190 ret = -1;
1191 }
1192
1193 return ret;
1194}
1195#endif /* CONFIG_BRANCH_TRACER */
1196