Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Include in trace.c */
   3
   4#include <uapi/linux/sched/types.h>
   5#include <linux/stringify.h>
   6#include <linux/kthread.h>
   7#include <linux/delay.h>
   8#include <linux/slab.h>
   9
  10static inline int trace_valid_entry(struct trace_entry *entry)
  11{
  12	switch (entry->type) {
  13	case TRACE_FN:
  14	case TRACE_CTX:
  15	case TRACE_WAKE:
  16	case TRACE_STACK:
  17	case TRACE_PRINT:
  18	case TRACE_BRANCH:
  19	case TRACE_GRAPH_ENT:
  20	case TRACE_GRAPH_RET:
  21		return 1;
  22	}
  23	return 0;
  24}
  25
  26static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
  27{
  28	struct ring_buffer_event *event;
  29	struct trace_entry *entry;
  30	unsigned int loops = 0;
  31
  32	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  33		entry = ring_buffer_event_data(event);
  34
  35		/*
  36		 * The ring buffer is a size of trace_buf_size, if
  37		 * we loop more than the size, there's something wrong
  38		 * with the ring buffer.
  39		 */
  40		if (loops++ > trace_buf_size) {
  41			printk(KERN_CONT ".. bad ring buffer ");
  42			goto failed;
  43		}
  44		if (!trace_valid_entry(entry)) {
  45			printk(KERN_CONT ".. invalid entry %d ",
  46				entry->type);
  47			goto failed;
  48		}
  49	}
  50	return 0;
  51
  52 failed:
  53	/* disable tracing */
  54	tracing_disabled = 1;
  55	printk(KERN_CONT ".. corrupted trace buffer .. ");
  56	return -1;
  57}
  58
  59/*
  60 * Test the trace buffer to see if all the elements
  61 * are still sane.
  62 */
  63static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
  64{
  65	unsigned long flags, cnt = 0;
  66	int cpu, ret = 0;
  67
  68	/* Don't allow flipping of max traces now */
  69	local_irq_save(flags);
  70	arch_spin_lock(&buf->tr->max_lock);
  71
  72	cnt = ring_buffer_entries(buf->buffer);
  73
  74	/*
  75	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  76	 * If the calling tracer is broken, and is constantly filling
  77	 * the buffer, this will run forever, and hard lock the box.
  78	 * We disable the ring buffer while we do this test to prevent
  79	 * a hard lock up.
  80	 */
  81	tracing_off();
  82	for_each_possible_cpu(cpu) {
  83		ret = trace_test_buffer_cpu(buf, cpu);
  84		if (ret)
  85			break;
  86	}
  87	tracing_on();
  88	arch_spin_unlock(&buf->tr->max_lock);
  89	local_irq_restore(flags);
  90
  91	if (count)
  92		*count = cnt;
  93
  94	return ret;
  95}
  96
  97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  98{
  99	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 100		trace->name, init_ret);
 101}
 102#ifdef CONFIG_FUNCTION_TRACER
 103
 104#ifdef CONFIG_DYNAMIC_FTRACE
 105
 106static int trace_selftest_test_probe1_cnt;
 107static void trace_selftest_test_probe1_func(unsigned long ip,
 108					    unsigned long pip,
 109					    struct ftrace_ops *op,
 110					    struct ftrace_regs *fregs)
 111{
 112	trace_selftest_test_probe1_cnt++;
 113}
 114
 115static int trace_selftest_test_probe2_cnt;
 116static void trace_selftest_test_probe2_func(unsigned long ip,
 117					    unsigned long pip,
 118					    struct ftrace_ops *op,
 119					    struct ftrace_regs *fregs)
 120{
 121	trace_selftest_test_probe2_cnt++;
 122}
 123
 124static int trace_selftest_test_probe3_cnt;
 125static void trace_selftest_test_probe3_func(unsigned long ip,
 126					    unsigned long pip,
 127					    struct ftrace_ops *op,
 128					    struct ftrace_regs *fregs)
 129{
 130	trace_selftest_test_probe3_cnt++;
 131}
 132
 133static int trace_selftest_test_global_cnt;
 134static void trace_selftest_test_global_func(unsigned long ip,
 135					    unsigned long pip,
 136					    struct ftrace_ops *op,
 137					    struct ftrace_regs *fregs)
 138{
 139	trace_selftest_test_global_cnt++;
 140}
 141
 142static int trace_selftest_test_dyn_cnt;
 143static void trace_selftest_test_dyn_func(unsigned long ip,
 144					 unsigned long pip,
 145					 struct ftrace_ops *op,
 146					 struct ftrace_regs *fregs)
 147{
 148	trace_selftest_test_dyn_cnt++;
 149}
 150
 151static struct ftrace_ops test_probe1 = {
 152	.func			= trace_selftest_test_probe1_func,
 153};
 154
 155static struct ftrace_ops test_probe2 = {
 156	.func			= trace_selftest_test_probe2_func,
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160	.func			= trace_selftest_test_probe3_func,
 161};
 162
 
 
 
 
 
 163static void print_counts(void)
 164{
 165	printk("(%d %d %d %d %d) ",
 166	       trace_selftest_test_probe1_cnt,
 167	       trace_selftest_test_probe2_cnt,
 168	       trace_selftest_test_probe3_cnt,
 169	       trace_selftest_test_global_cnt,
 170	       trace_selftest_test_dyn_cnt);
 171}
 172
 173static void reset_counts(void)
 174{
 175	trace_selftest_test_probe1_cnt = 0;
 176	trace_selftest_test_probe2_cnt = 0;
 177	trace_selftest_test_probe3_cnt = 0;
 178	trace_selftest_test_global_cnt = 0;
 179	trace_selftest_test_dyn_cnt = 0;
 180}
 181
 182static int trace_selftest_ops(struct trace_array *tr, int cnt)
 183{
 184	int save_ftrace_enabled = ftrace_enabled;
 185	struct ftrace_ops *dyn_ops;
 186	char *func1_name;
 187	char *func2_name;
 188	int len1;
 189	int len2;
 190	int ret = -1;
 191
 192	printk(KERN_CONT "PASSED\n");
 193	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 194
 195	ftrace_enabled = 1;
 196	reset_counts();
 197
 198	/* Handle PPC64 '.' name */
 199	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 200	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 201	len1 = strlen(func1_name);
 202	len2 = strlen(func2_name);
 203
 204	/*
 205	 * Probe 1 will trace function 1.
 206	 * Probe 2 will trace function 2.
 207	 * Probe 3 will trace functions 1 and 2.
 208	 */
 209	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 210	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 211	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 212	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 213
 214	register_ftrace_function(&test_probe1);
 215	register_ftrace_function(&test_probe2);
 216	register_ftrace_function(&test_probe3);
 217	/* First time we are running with main function */
 218	if (cnt > 1) {
 219		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 220		register_ftrace_function(tr->ops);
 221	}
 222
 223	DYN_FTRACE_TEST_NAME();
 224
 225	print_counts();
 226
 227	if (trace_selftest_test_probe1_cnt != 1)
 228		goto out;
 229	if (trace_selftest_test_probe2_cnt != 0)
 230		goto out;
 231	if (trace_selftest_test_probe3_cnt != 1)
 232		goto out;
 233	if (cnt > 1) {
 234		if (trace_selftest_test_global_cnt == 0)
 235			goto out;
 236	}
 237
 238	DYN_FTRACE_TEST_NAME2();
 239
 240	print_counts();
 241
 242	if (trace_selftest_test_probe1_cnt != 1)
 243		goto out;
 244	if (trace_selftest_test_probe2_cnt != 1)
 245		goto out;
 246	if (trace_selftest_test_probe3_cnt != 2)
 247		goto out;
 248
 249	/* Add a dynamic probe */
 250	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251	if (!dyn_ops) {
 252		printk("MEMORY ERROR ");
 253		goto out;
 254	}
 255
 256	dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258	register_ftrace_function(dyn_ops);
 259
 260	trace_selftest_test_global_cnt = 0;
 261
 262	DYN_FTRACE_TEST_NAME();
 263
 264	print_counts();
 265
 266	if (trace_selftest_test_probe1_cnt != 2)
 267		goto out_free;
 268	if (trace_selftest_test_probe2_cnt != 1)
 269		goto out_free;
 270	if (trace_selftest_test_probe3_cnt != 3)
 271		goto out_free;
 272	if (cnt > 1) {
 273		if (trace_selftest_test_global_cnt == 0)
 274			goto out_free;
 275	}
 276	if (trace_selftest_test_dyn_cnt == 0)
 277		goto out_free;
 278
 279	DYN_FTRACE_TEST_NAME2();
 280
 281	print_counts();
 282
 283	if (trace_selftest_test_probe1_cnt != 2)
 284		goto out_free;
 285	if (trace_selftest_test_probe2_cnt != 2)
 286		goto out_free;
 287	if (trace_selftest_test_probe3_cnt != 4)
 288		goto out_free;
 289
 290	/* Remove trace function from probe 3 */
 291	func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
 292	len1 = strlen(func1_name);
 293
 294	ftrace_set_filter(&test_probe3, func1_name, len1, 0);
 295
 296	DYN_FTRACE_TEST_NAME();
 297
 298	print_counts();
 299
 300	if (trace_selftest_test_probe1_cnt != 3)
 301		goto out_free;
 302	if (trace_selftest_test_probe2_cnt != 2)
 303		goto out_free;
 304	if (trace_selftest_test_probe3_cnt != 4)
 305		goto out_free;
 306	if (cnt > 1) {
 307		if (trace_selftest_test_global_cnt == 0)
 308			goto out_free;
 309	}
 310	if (trace_selftest_test_dyn_cnt == 0)
 311		goto out_free;
 312
 313	DYN_FTRACE_TEST_NAME2();
 314
 315	print_counts();
 316
 317	if (trace_selftest_test_probe1_cnt != 3)
 318		goto out_free;
 319	if (trace_selftest_test_probe2_cnt != 3)
 320		goto out_free;
 321	if (trace_selftest_test_probe3_cnt != 5)
 322		goto out_free;
 323
 324	ret = 0;
 325 out_free:
 326	unregister_ftrace_function(dyn_ops);
 327	kfree(dyn_ops);
 328
 329 out:
 330	/* Purposely unregister in the same order */
 331	unregister_ftrace_function(&test_probe1);
 332	unregister_ftrace_function(&test_probe2);
 333	unregister_ftrace_function(&test_probe3);
 334	if (cnt > 1)
 335		unregister_ftrace_function(tr->ops);
 336	ftrace_reset_array_ops(tr);
 337
 338	/* Make sure everything is off */
 339	reset_counts();
 340	DYN_FTRACE_TEST_NAME();
 341	DYN_FTRACE_TEST_NAME();
 342
 343	if (trace_selftest_test_probe1_cnt ||
 344	    trace_selftest_test_probe2_cnt ||
 345	    trace_selftest_test_probe3_cnt ||
 346	    trace_selftest_test_global_cnt ||
 347	    trace_selftest_test_dyn_cnt)
 348		ret = -1;
 349
 350	ftrace_enabled = save_ftrace_enabled;
 351
 352	return ret;
 353}
 354
 355/* Test dynamic code modification and ftrace filters */
 356static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 357						  struct trace_array *tr,
 358						  int (*func)(void))
 359{
 360	int save_ftrace_enabled = ftrace_enabled;
 
 361	unsigned long count;
 362	char *func_name;
 363	int ret;
 364
 365	/* The ftrace test PASSED */
 366	printk(KERN_CONT "PASSED\n");
 367	pr_info("Testing dynamic ftrace: ");
 368
 369	/* enable tracing, and record the filter function */
 370	ftrace_enabled = 1;
 
 371
 372	/* passed in by parameter to fool gcc from optimizing */
 373	func();
 374
 375	/*
 376	 * Some archs *cough*PowerPC*cough* add characters to the
 377	 * start of the function names. We simply put a '*' to
 378	 * accommodate them.
 379	 */
 380	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 381
 382	/* filter only on our function */
 383	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 384
 385	/* enable tracing */
 386	ret = tracer_init(trace, tr);
 387	if (ret) {
 388		warn_failed_init_tracer(trace, ret);
 389		goto out;
 390	}
 391
 392	/* Sleep for a 1/10 of a second */
 393	msleep(100);
 394
 395	/* we should have nothing in the buffer */
 396	ret = trace_test_buffer(&tr->array_buffer, &count);
 397	if (ret)
 398		goto out;
 399
 400	if (count) {
 401		ret = -1;
 402		printk(KERN_CONT ".. filter did not filter .. ");
 403		goto out;
 404	}
 405
 406	/* call our function again */
 407	func();
 408
 409	/* sleep again */
 410	msleep(100);
 411
 412	/* stop the tracing. */
 413	tracing_stop();
 414	ftrace_enabled = 0;
 415
 416	/* check the trace buffer */
 417	ret = trace_test_buffer(&tr->array_buffer, &count);
 418
 419	ftrace_enabled = 1;
 420	tracing_start();
 421
 422	/* we should only have one item */
 423	if (!ret && count != 1) {
 424		trace->reset(tr);
 425		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 426		ret = -1;
 427		goto out;
 428	}
 429
 430	/* Test the ops with global tracing running */
 431	ret = trace_selftest_ops(tr, 1);
 432	trace->reset(tr);
 433
 434 out:
 435	ftrace_enabled = save_ftrace_enabled;
 
 436
 437	/* Enable tracing on all functions again */
 438	ftrace_set_global_filter(NULL, 0, 1);
 439
 440	/* Test the ops with global tracing off */
 441	if (!ret)
 442		ret = trace_selftest_ops(tr, 2);
 443
 444	return ret;
 445}
 446
 447static int trace_selftest_recursion_cnt;
 448static void trace_selftest_test_recursion_func(unsigned long ip,
 449					       unsigned long pip,
 450					       struct ftrace_ops *op,
 451					       struct ftrace_regs *fregs)
 452{
 453	/*
 454	 * This function is registered without the recursion safe flag.
 455	 * The ftrace infrastructure should provide the recursion
 456	 * protection. If not, this will crash the kernel!
 457	 */
 458	if (trace_selftest_recursion_cnt++ > 10)
 459		return;
 460	DYN_FTRACE_TEST_NAME();
 461}
 462
 463static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 464						    unsigned long pip,
 465						    struct ftrace_ops *op,
 466						    struct ftrace_regs *fregs)
 467{
 468	/*
 469	 * We said we would provide our own recursion. By calling
 470	 * this function again, we should recurse back into this function
 471	 * and count again. But this only happens if the arch supports
 472	 * all of ftrace features and nothing else is using the function
 473	 * tracing utility.
 474	 */
 475	if (trace_selftest_recursion_cnt++)
 476		return;
 477	DYN_FTRACE_TEST_NAME();
 478}
 479
 480static struct ftrace_ops test_rec_probe = {
 481	.func			= trace_selftest_test_recursion_func,
 482	.flags			= FTRACE_OPS_FL_RECURSION,
 483};
 484
 485static struct ftrace_ops test_recsafe_probe = {
 486	.func			= trace_selftest_test_recursion_safe_func,
 487};
 488
 489static int
 490trace_selftest_function_recursion(void)
 491{
 492	int save_ftrace_enabled = ftrace_enabled;
 493	char *func_name;
 494	int len;
 495	int ret;
 496
 497	/* The previous test PASSED */
 498	pr_cont("PASSED\n");
 499	pr_info("Testing ftrace recursion: ");
 500
 501
 502	/* enable tracing, and record the filter function */
 503	ftrace_enabled = 1;
 504
 505	/* Handle PPC64 '.' name */
 506	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 507	len = strlen(func_name);
 508
 509	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 510	if (ret) {
 511		pr_cont("*Could not set filter* ");
 512		goto out;
 513	}
 514
 515	ret = register_ftrace_function(&test_rec_probe);
 516	if (ret) {
 517		pr_cont("*could not register callback* ");
 518		goto out;
 519	}
 520
 521	DYN_FTRACE_TEST_NAME();
 522
 523	unregister_ftrace_function(&test_rec_probe);
 524
 525	ret = -1;
 526	/*
 527	 * Recursion allows for transitions between context,
 528	 * and may call the callback twice.
 529	 */
 530	if (trace_selftest_recursion_cnt != 1 &&
 531	    trace_selftest_recursion_cnt != 2) {
 532		pr_cont("*callback not called once (or twice) (%d)* ",
 533			trace_selftest_recursion_cnt);
 534		goto out;
 535	}
 536
 537	trace_selftest_recursion_cnt = 1;
 538
 539	pr_cont("PASSED\n");
 540	pr_info("Testing ftrace recursion safe: ");
 541
 542	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 543	if (ret) {
 544		pr_cont("*Could not set filter* ");
 545		goto out;
 546	}
 547
 548	ret = register_ftrace_function(&test_recsafe_probe);
 549	if (ret) {
 550		pr_cont("*could not register callback* ");
 551		goto out;
 552	}
 553
 554	DYN_FTRACE_TEST_NAME();
 555
 556	unregister_ftrace_function(&test_recsafe_probe);
 557
 558	ret = -1;
 559	if (trace_selftest_recursion_cnt != 2) {
 560		pr_cont("*callback not called expected 2 times (%d)* ",
 561			trace_selftest_recursion_cnt);
 562		goto out;
 563	}
 564
 565	ret = 0;
 566out:
 567	ftrace_enabled = save_ftrace_enabled;
 568
 569	return ret;
 570}
 571#else
 572# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 573# define trace_selftest_function_recursion() ({ 0; })
 574#endif /* CONFIG_DYNAMIC_FTRACE */
 575
 576static enum {
 577	TRACE_SELFTEST_REGS_START,
 578	TRACE_SELFTEST_REGS_FOUND,
 579	TRACE_SELFTEST_REGS_NOT_FOUND,
 580} trace_selftest_regs_stat;
 581
 582static void trace_selftest_test_regs_func(unsigned long ip,
 583					  unsigned long pip,
 584					  struct ftrace_ops *op,
 585					  struct ftrace_regs *fregs)
 586{
 587	struct pt_regs *regs = ftrace_get_regs(fregs);
 588
 589	if (regs)
 590		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 591	else
 592		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 593}
 594
 595static struct ftrace_ops test_regs_probe = {
 596	.func		= trace_selftest_test_regs_func,
 597	.flags		= FTRACE_OPS_FL_SAVE_REGS,
 598};
 599
 600static int
 601trace_selftest_function_regs(void)
 602{
 603	int save_ftrace_enabled = ftrace_enabled;
 604	char *func_name;
 605	int len;
 606	int ret;
 607	int supported = 0;
 608
 609#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 610	supported = 1;
 611#endif
 612
 613	/* The previous test PASSED */
 614	pr_cont("PASSED\n");
 615	pr_info("Testing ftrace regs%s: ",
 616		!supported ? "(no arch support)" : "");
 617
 618	/* enable tracing, and record the filter function */
 619	ftrace_enabled = 1;
 620
 621	/* Handle PPC64 '.' name */
 622	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 623	len = strlen(func_name);
 624
 625	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 626	/*
 627	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 628	 * This test really doesn't care.
 629	 */
 630	if (ret && ret != -ENODEV) {
 631		pr_cont("*Could not set filter* ");
 632		goto out;
 633	}
 634
 635	ret = register_ftrace_function(&test_regs_probe);
 636	/*
 637	 * Now if the arch does not support passing regs, then this should
 638	 * have failed.
 639	 */
 640	if (!supported) {
 641		if (!ret) {
 642			pr_cont("*registered save-regs without arch support* ");
 643			goto out;
 644		}
 645		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 646		ret = register_ftrace_function(&test_regs_probe);
 647	}
 648	if (ret) {
 649		pr_cont("*could not register callback* ");
 650		goto out;
 651	}
 652
 653
 654	DYN_FTRACE_TEST_NAME();
 655
 656	unregister_ftrace_function(&test_regs_probe);
 657
 658	ret = -1;
 659
 660	switch (trace_selftest_regs_stat) {
 661	case TRACE_SELFTEST_REGS_START:
 662		pr_cont("*callback never called* ");
 663		goto out;
 664
 665	case TRACE_SELFTEST_REGS_FOUND:
 666		if (supported)
 667			break;
 668		pr_cont("*callback received regs without arch support* ");
 669		goto out;
 670
 671	case TRACE_SELFTEST_REGS_NOT_FOUND:
 672		if (!supported)
 673			break;
 674		pr_cont("*callback received NULL regs* ");
 675		goto out;
 676	}
 677
 678	ret = 0;
 679out:
 680	ftrace_enabled = save_ftrace_enabled;
 681
 682	return ret;
 683}
 684
 685/*
 686 * Simple verification test of ftrace function tracer.
 687 * Enable ftrace, sleep 1/10 second, and then read the trace
 688 * buffer to see if all is in order.
 689 */
 690__init int
 691trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 692{
 693	int save_ftrace_enabled = ftrace_enabled;
 
 694	unsigned long count;
 695	int ret;
 696
 697#ifdef CONFIG_DYNAMIC_FTRACE
 698	if (ftrace_filter_param) {
 699		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 700		return 0;
 701	}
 702#endif
 703
 704	/* make sure msleep has been recorded */
 705	msleep(1);
 706
 707	/* start the tracing */
 708	ftrace_enabled = 1;
 
 709
 710	ret = tracer_init(trace, tr);
 711	if (ret) {
 712		warn_failed_init_tracer(trace, ret);
 713		goto out;
 714	}
 715
 716	/* Sleep for a 1/10 of a second */
 717	msleep(100);
 718	/* stop the tracing. */
 719	tracing_stop();
 720	ftrace_enabled = 0;
 721
 722	/* check the trace buffer */
 723	ret = trace_test_buffer(&tr->array_buffer, &count);
 724
 725	ftrace_enabled = 1;
 726	trace->reset(tr);
 727	tracing_start();
 728
 729	if (!ret && !count) {
 730		printk(KERN_CONT ".. no entries found ..");
 731		ret = -1;
 732		goto out;
 733	}
 734
 735	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 736						     DYN_FTRACE_TEST_NAME);
 737	if (ret)
 738		goto out;
 739
 740	ret = trace_selftest_function_recursion();
 741	if (ret)
 742		goto out;
 743
 744	ret = trace_selftest_function_regs();
 745 out:
 746	ftrace_enabled = save_ftrace_enabled;
 
 747
 748	/* kill ftrace totally if we failed */
 749	if (ret)
 750		ftrace_kill();
 751
 752	return ret;
 753}
 754#endif /* CONFIG_FUNCTION_TRACER */
 755
 756
 757#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 758
 759/* Maximum number of functions to trace before diagnosing a hang */
 760#define GRAPH_MAX_FUNC_TEST	100000000
 761
 
 
 762static unsigned int graph_hang_thresh;
 763
 764/* Wrap the real function entry probe to avoid possible hanging */
 765static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 766{
 767	/* This is harmlessly racy, we want to approximately detect a hang */
 768	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 769		ftrace_graph_stop();
 770		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 771		if (ftrace_dump_on_oops) {
 772			ftrace_dump(DUMP_ALL);
 773			/* ftrace_dump() disables tracing */
 774			tracing_on();
 775		}
 776		return 0;
 777	}
 778
 779	return trace_graph_entry(trace);
 780}
 781
 782static struct fgraph_ops fgraph_ops __initdata  = {
 783	.entryfunc		= &trace_graph_entry_watchdog,
 784	.retfunc		= &trace_graph_return,
 785};
 786
 787#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 788#ifndef CALL_DEPTH_ACCOUNT
 789#define CALL_DEPTH_ACCOUNT ""
 790#endif
 791
 792noinline __noclone static void trace_direct_tramp(void)
 793{
 794	asm(CALL_DEPTH_ACCOUNT);
 795}
 796#endif
 797
 798/*
 799 * Pretty much the same than for the function tracer from which the selftest
 800 * has been borrowed.
 801 */
 802__init int
 803trace_selftest_startup_function_graph(struct tracer *trace,
 804					struct trace_array *tr)
 805{
 806	int ret;
 807	unsigned long count;
 808	char *func_name __maybe_unused;
 809
 810#ifdef CONFIG_DYNAMIC_FTRACE
 811	if (ftrace_filter_param) {
 812		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 813		return 0;
 814	}
 815#endif
 816
 817	/*
 818	 * Simulate the init() callback but we attach a watchdog callback
 819	 * to detect and recover from possible hangs
 820	 */
 821	tracing_reset_online_cpus(&tr->array_buffer);
 822	set_graph_array(tr);
 823	ret = register_ftrace_graph(&fgraph_ops);
 
 824	if (ret) {
 825		warn_failed_init_tracer(trace, ret);
 826		goto out;
 827	}
 828	tracing_start_cmdline_record();
 829
 830	/* Sleep for a 1/10 of a second */
 831	msleep(100);
 832
 833	/* Have we just recovered from a hang? */
 834	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 835		disable_tracing_selftest("recovering from a hang");
 836		ret = -1;
 837		goto out;
 838	}
 839
 840	tracing_stop();
 841
 842	/* check the trace buffer */
 843	ret = trace_test_buffer(&tr->array_buffer, &count);
 844
 845	/* Need to also simulate the tr->reset to remove this fgraph_ops */
 846	tracing_stop_cmdline_record();
 847	unregister_ftrace_graph(&fgraph_ops);
 848
 
 849	tracing_start();
 850
 851	if (!ret && !count) {
 852		printk(KERN_CONT ".. no entries found ..");
 853		ret = -1;
 854		goto out;
 855	}
 856
 857#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 858	tracing_reset_online_cpus(&tr->array_buffer);
 859	set_graph_array(tr);
 860
 861	/*
 862	 * Some archs *cough*PowerPC*cough* add characters to the
 863	 * start of the function names. We simply put a '*' to
 864	 * accommodate them.
 865	 */
 866	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 867	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 868
 869	/*
 870	 * Register direct function together with graph tracer
 871	 * and make sure we get graph trace.
 872	 */
 873	ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME,
 874				     (unsigned long) trace_direct_tramp);
 875	if (ret)
 876		goto out;
 877
 878	ret = register_ftrace_graph(&fgraph_ops);
 879	if (ret) {
 880		warn_failed_init_tracer(trace, ret);
 881		goto out;
 882	}
 883
 884	DYN_FTRACE_TEST_NAME();
 885
 886	count = 0;
 887
 888	tracing_stop();
 889	/* check the trace buffer */
 890	ret = trace_test_buffer(&tr->array_buffer, &count);
 891
 892	unregister_ftrace_graph(&fgraph_ops);
 893
 894	ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME,
 895				       (unsigned long) trace_direct_tramp);
 896	if (ret)
 897		goto out;
 898
 899	tracing_start();
 900
 901	if (!ret && !count) {
 902		ret = -1;
 903		goto out;
 904	}
 905
 906	/* Enable tracing on all functions again */
 907	ftrace_set_global_filter(NULL, 0, 1);
 908#endif
 909
 910	/* Don't test dynamic tracing, the function tracer already did */
 
 911out:
 912	/* Stop it if we failed */
 913	if (ret)
 914		ftrace_graph_stop();
 915
 916	return ret;
 917}
 918#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 919
 920
 921#ifdef CONFIG_IRQSOFF_TRACER
 922int
 923trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 924{
 925	unsigned long save_max = tr->max_latency;
 926	unsigned long count;
 927	int ret;
 928
 929	/* start the tracing */
 930	ret = tracer_init(trace, tr);
 931	if (ret) {
 932		warn_failed_init_tracer(trace, ret);
 933		return ret;
 934	}
 935
 936	/* reset the max latency */
 937	tr->max_latency = 0;
 938	/* disable interrupts for a bit */
 939	local_irq_disable();
 940	udelay(100);
 941	local_irq_enable();
 942
 943	/*
 944	 * Stop the tracer to avoid a warning subsequent
 945	 * to buffer flipping failure because tracing_stop()
 946	 * disables the tr and max buffers, making flipping impossible
 947	 * in case of parallels max irqs off latencies.
 948	 */
 949	trace->stop(tr);
 950	/* stop the tracing. */
 951	tracing_stop();
 952	/* check both trace buffers */
 953	ret = trace_test_buffer(&tr->array_buffer, NULL);
 954	if (!ret)
 955		ret = trace_test_buffer(&tr->max_buffer, &count);
 956	trace->reset(tr);
 957	tracing_start();
 958
 959	if (!ret && !count) {
 960		printk(KERN_CONT ".. no entries found ..");
 961		ret = -1;
 962	}
 963
 964	tr->max_latency = save_max;
 965
 966	return ret;
 967}
 968#endif /* CONFIG_IRQSOFF_TRACER */
 969
 970#ifdef CONFIG_PREEMPT_TRACER
 971int
 972trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 973{
 974	unsigned long save_max = tr->max_latency;
 975	unsigned long count;
 976	int ret;
 977
 978	/*
 979	 * Now that the big kernel lock is no longer preemptible,
 980	 * and this is called with the BKL held, it will always
 981	 * fail. If preemption is already disabled, simply
 982	 * pass the test. When the BKL is removed, or becomes
 983	 * preemptible again, we will once again test this,
 984	 * so keep it in.
 985	 */
 986	if (preempt_count()) {
 987		printk(KERN_CONT "can not test ... force ");
 988		return 0;
 989	}
 990
 991	/* start the tracing */
 992	ret = tracer_init(trace, tr);
 993	if (ret) {
 994		warn_failed_init_tracer(trace, ret);
 995		return ret;
 996	}
 997
 998	/* reset the max latency */
 999	tr->max_latency = 0;
1000	/* disable preemption for a bit */
1001	preempt_disable();
1002	udelay(100);
1003	preempt_enable();
1004
1005	/*
1006	 * Stop the tracer to avoid a warning subsequent
1007	 * to buffer flipping failure because tracing_stop()
1008	 * disables the tr and max buffers, making flipping impossible
1009	 * in case of parallels max preempt off latencies.
1010	 */
1011	trace->stop(tr);
1012	/* stop the tracing. */
1013	tracing_stop();
1014	/* check both trace buffers */
1015	ret = trace_test_buffer(&tr->array_buffer, NULL);
1016	if (!ret)
1017		ret = trace_test_buffer(&tr->max_buffer, &count);
1018	trace->reset(tr);
1019	tracing_start();
1020
1021	if (!ret && !count) {
1022		printk(KERN_CONT ".. no entries found ..");
1023		ret = -1;
1024	}
1025
1026	tr->max_latency = save_max;
1027
1028	return ret;
1029}
1030#endif /* CONFIG_PREEMPT_TRACER */
1031
1032#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1033int
1034trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
1035{
1036	unsigned long save_max = tr->max_latency;
1037	unsigned long count;
1038	int ret;
1039
1040	/*
1041	 * Now that the big kernel lock is no longer preemptible,
1042	 * and this is called with the BKL held, it will always
1043	 * fail. If preemption is already disabled, simply
1044	 * pass the test. When the BKL is removed, or becomes
1045	 * preemptible again, we will once again test this,
1046	 * so keep it in.
1047	 */
1048	if (preempt_count()) {
1049		printk(KERN_CONT "can not test ... force ");
1050		return 0;
1051	}
1052
1053	/* start the tracing */
1054	ret = tracer_init(trace, tr);
1055	if (ret) {
1056		warn_failed_init_tracer(trace, ret);
1057		goto out_no_start;
1058	}
1059
1060	/* reset the max latency */
1061	tr->max_latency = 0;
1062
1063	/* disable preemption and interrupts for a bit */
1064	preempt_disable();
1065	local_irq_disable();
1066	udelay(100);
1067	preempt_enable();
1068	/* reverse the order of preempt vs irqs */
1069	local_irq_enable();
1070
1071	/*
1072	 * Stop the tracer to avoid a warning subsequent
1073	 * to buffer flipping failure because tracing_stop()
1074	 * disables the tr and max buffers, making flipping impossible
1075	 * in case of parallels max irqs/preempt off latencies.
1076	 */
1077	trace->stop(tr);
1078	/* stop the tracing. */
1079	tracing_stop();
1080	/* check both trace buffers */
1081	ret = trace_test_buffer(&tr->array_buffer, NULL);
1082	if (ret)
1083		goto out;
1084
1085	ret = trace_test_buffer(&tr->max_buffer, &count);
1086	if (ret)
1087		goto out;
1088
1089	if (!ret && !count) {
1090		printk(KERN_CONT ".. no entries found ..");
1091		ret = -1;
1092		goto out;
1093	}
1094
1095	/* do the test by disabling interrupts first this time */
1096	tr->max_latency = 0;
1097	tracing_start();
1098	trace->start(tr);
1099
1100	preempt_disable();
1101	local_irq_disable();
1102	udelay(100);
1103	preempt_enable();
1104	/* reverse the order of preempt vs irqs */
1105	local_irq_enable();
1106
1107	trace->stop(tr);
1108	/* stop the tracing. */
1109	tracing_stop();
1110	/* check both trace buffers */
1111	ret = trace_test_buffer(&tr->array_buffer, NULL);
1112	if (ret)
1113		goto out;
1114
1115	ret = trace_test_buffer(&tr->max_buffer, &count);
1116
1117	if (!ret && !count) {
1118		printk(KERN_CONT ".. no entries found ..");
1119		ret = -1;
1120		goto out;
1121	}
1122
1123out:
1124	tracing_start();
1125out_no_start:
1126	trace->reset(tr);
1127	tr->max_latency = save_max;
1128
1129	return ret;
1130}
1131#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1132
1133#ifdef CONFIG_NOP_TRACER
1134int
1135trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1136{
1137	/* What could possibly go wrong? */
1138	return 0;
1139}
1140#endif
1141
1142#ifdef CONFIG_SCHED_TRACER
1143
1144struct wakeup_test_data {
1145	struct completion	is_ready;
1146	int			go;
1147};
1148
1149static int trace_wakeup_test_thread(void *data)
1150{
1151	/* Make this a -deadline thread */
1152	static const struct sched_attr attr = {
1153		.sched_policy = SCHED_DEADLINE,
1154		.sched_runtime = 100000ULL,
1155		.sched_deadline = 10000000ULL,
1156		.sched_period = 10000000ULL
1157	};
1158	struct wakeup_test_data *x = data;
1159
1160	sched_setattr(current, &attr);
1161
1162	/* Make it know we have a new prio */
1163	complete(&x->is_ready);
1164
1165	/* now go to sleep and let the test wake us up */
1166	set_current_state(TASK_INTERRUPTIBLE);
1167	while (!x->go) {
1168		schedule();
1169		set_current_state(TASK_INTERRUPTIBLE);
1170	}
1171
1172	complete(&x->is_ready);
1173
1174	set_current_state(TASK_INTERRUPTIBLE);
1175
1176	/* we are awake, now wait to disappear */
1177	while (!kthread_should_stop()) {
1178		schedule();
1179		set_current_state(TASK_INTERRUPTIBLE);
 
 
 
1180	}
1181
1182	__set_current_state(TASK_RUNNING);
1183
1184	return 0;
1185}
 
1186int
1187trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1188{
1189	unsigned long save_max = tr->max_latency;
1190	struct task_struct *p;
1191	struct wakeup_test_data data;
1192	unsigned long count;
1193	int ret;
1194
1195	memset(&data, 0, sizeof(data));
1196
1197	init_completion(&data.is_ready);
1198
1199	/* create a -deadline thread */
1200	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1201	if (IS_ERR(p)) {
1202		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1203		return -1;
1204	}
1205
1206	/* make sure the thread is running at -deadline policy */
1207	wait_for_completion(&data.is_ready);
1208
1209	/* start the tracing */
1210	ret = tracer_init(trace, tr);
1211	if (ret) {
1212		warn_failed_init_tracer(trace, ret);
1213		return ret;
1214	}
1215
1216	/* reset the max latency */
1217	tr->max_latency = 0;
1218
1219	while (p->on_rq) {
1220		/*
1221		 * Sleep to make sure the -deadline thread is asleep too.
1222		 * On virtual machines we can't rely on timings,
1223		 * but we want to make sure this test still works.
1224		 */
1225		msleep(100);
1226	}
1227
1228	init_completion(&data.is_ready);
 
1229
1230	data.go = 1;
1231	/* memory barrier is in the wake_up_process() */
 
 
 
 
 
 
 
 
1232
1233	wake_up_process(p);
1234
1235	/* Wait for the task to wake up */
1236	wait_for_completion(&data.is_ready);
1237
1238	/* stop the tracing. */
1239	tracing_stop();
1240	/* check both trace buffers */
1241	ret = trace_test_buffer(&tr->array_buffer, NULL);
1242	if (!ret)
1243		ret = trace_test_buffer(&tr->max_buffer, &count);
1244
1245
1246	trace->reset(tr);
1247	tracing_start();
1248
1249	tr->max_latency = save_max;
1250
1251	/* kill the thread */
1252	kthread_stop(p);
1253
1254	if (!ret && !count) {
1255		printk(KERN_CONT ".. no entries found ..");
1256		ret = -1;
1257	}
1258
1259	return ret;
1260}
1261#endif /* CONFIG_SCHED_TRACER */
1262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1263#ifdef CONFIG_BRANCH_TRACER
1264int
1265trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1266{
1267	unsigned long count;
1268	int ret;
1269
1270	/* start the tracing */
1271	ret = tracer_init(trace, tr);
1272	if (ret) {
1273		warn_failed_init_tracer(trace, ret);
1274		return ret;
1275	}
1276
1277	/* Sleep for a 1/10 of a second */
1278	msleep(100);
1279	/* stop the tracing. */
1280	tracing_stop();
1281	/* check the trace buffer */
1282	ret = trace_test_buffer(&tr->array_buffer, &count);
1283	trace->reset(tr);
1284	tracing_start();
1285
1286	if (!ret && !count) {
1287		printk(KERN_CONT ".. no entries found ..");
1288		ret = -1;
1289	}
1290
1291	return ret;
1292}
1293#endif /* CONFIG_BRANCH_TRACER */
1294
v3.5.6
 
  1/* Include in trace.c */
  2
 
  3#include <linux/stringify.h>
  4#include <linux/kthread.h>
  5#include <linux/delay.h>
  6#include <linux/slab.h>
  7
  8static inline int trace_valid_entry(struct trace_entry *entry)
  9{
 10	switch (entry->type) {
 11	case TRACE_FN:
 12	case TRACE_CTX:
 13	case TRACE_WAKE:
 14	case TRACE_STACK:
 15	case TRACE_PRINT:
 16	case TRACE_BRANCH:
 17	case TRACE_GRAPH_ENT:
 18	case TRACE_GRAPH_RET:
 19		return 1;
 20	}
 21	return 0;
 22}
 23
 24static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
 25{
 26	struct ring_buffer_event *event;
 27	struct trace_entry *entry;
 28	unsigned int loops = 0;
 29
 30	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
 31		entry = ring_buffer_event_data(event);
 32
 33		/*
 34		 * The ring buffer is a size of trace_buf_size, if
 35		 * we loop more than the size, there's something wrong
 36		 * with the ring buffer.
 37		 */
 38		if (loops++ > trace_buf_size) {
 39			printk(KERN_CONT ".. bad ring buffer ");
 40			goto failed;
 41		}
 42		if (!trace_valid_entry(entry)) {
 43			printk(KERN_CONT ".. invalid entry %d ",
 44				entry->type);
 45			goto failed;
 46		}
 47	}
 48	return 0;
 49
 50 failed:
 51	/* disable tracing */
 52	tracing_disabled = 1;
 53	printk(KERN_CONT ".. corrupted trace buffer .. ");
 54	return -1;
 55}
 56
 57/*
 58 * Test the trace buffer to see if all the elements
 59 * are still sane.
 60 */
 61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
 62{
 63	unsigned long flags, cnt = 0;
 64	int cpu, ret = 0;
 65
 66	/* Don't allow flipping of max traces now */
 67	local_irq_save(flags);
 68	arch_spin_lock(&ftrace_max_lock);
 69
 70	cnt = ring_buffer_entries(tr->buffer);
 71
 72	/*
 73	 * The trace_test_buffer_cpu runs a while loop to consume all data.
 74	 * If the calling tracer is broken, and is constantly filling
 75	 * the buffer, this will run forever, and hard lock the box.
 76	 * We disable the ring buffer while we do this test to prevent
 77	 * a hard lock up.
 78	 */
 79	tracing_off();
 80	for_each_possible_cpu(cpu) {
 81		ret = trace_test_buffer_cpu(tr, cpu);
 82		if (ret)
 83			break;
 84	}
 85	tracing_on();
 86	arch_spin_unlock(&ftrace_max_lock);
 87	local_irq_restore(flags);
 88
 89	if (count)
 90		*count = cnt;
 91
 92	return ret;
 93}
 94
 95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
 96{
 97	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 98		trace->name, init_ret);
 99}
100#ifdef CONFIG_FUNCTION_TRACER
101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
106					    unsigned long pip)
 
 
107{
108	trace_selftest_test_probe1_cnt++;
109}
110
111static int trace_selftest_test_probe2_cnt;
112static void trace_selftest_test_probe2_func(unsigned long ip,
113					    unsigned long pip)
 
 
114{
115	trace_selftest_test_probe2_cnt++;
116}
117
118static int trace_selftest_test_probe3_cnt;
119static void trace_selftest_test_probe3_func(unsigned long ip,
120					    unsigned long pip)
 
 
121{
122	trace_selftest_test_probe3_cnt++;
123}
124
125static int trace_selftest_test_global_cnt;
126static void trace_selftest_test_global_func(unsigned long ip,
127					    unsigned long pip)
 
 
128{
129	trace_selftest_test_global_cnt++;
130}
131
132static int trace_selftest_test_dyn_cnt;
133static void trace_selftest_test_dyn_func(unsigned long ip,
134					 unsigned long pip)
 
 
135{
136	trace_selftest_test_dyn_cnt++;
137}
138
139static struct ftrace_ops test_probe1 = {
140	.func			= trace_selftest_test_probe1_func,
141};
142
143static struct ftrace_ops test_probe2 = {
144	.func			= trace_selftest_test_probe2_func,
145};
146
147static struct ftrace_ops test_probe3 = {
148	.func			= trace_selftest_test_probe3_func,
149};
150
151static struct ftrace_ops test_global = {
152	.func			= trace_selftest_test_global_func,
153	.flags			= FTRACE_OPS_FL_GLOBAL,
154};
155
156static void print_counts(void)
157{
158	printk("(%d %d %d %d %d) ",
159	       trace_selftest_test_probe1_cnt,
160	       trace_selftest_test_probe2_cnt,
161	       trace_selftest_test_probe3_cnt,
162	       trace_selftest_test_global_cnt,
163	       trace_selftest_test_dyn_cnt);
164}
165
166static void reset_counts(void)
167{
168	trace_selftest_test_probe1_cnt = 0;
169	trace_selftest_test_probe2_cnt = 0;
170	trace_selftest_test_probe3_cnt = 0;
171	trace_selftest_test_global_cnt = 0;
172	trace_selftest_test_dyn_cnt = 0;
173}
174
175static int trace_selftest_ops(int cnt)
176{
177	int save_ftrace_enabled = ftrace_enabled;
178	struct ftrace_ops *dyn_ops;
179	char *func1_name;
180	char *func2_name;
181	int len1;
182	int len2;
183	int ret = -1;
184
185	printk(KERN_CONT "PASSED\n");
186	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
187
188	ftrace_enabled = 1;
189	reset_counts();
190
191	/* Handle PPC64 '.' name */
192	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
193	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
194	len1 = strlen(func1_name);
195	len2 = strlen(func2_name);
196
197	/*
198	 * Probe 1 will trace function 1.
199	 * Probe 2 will trace function 2.
200	 * Probe 3 will trace functions 1 and 2.
201	 */
202	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
203	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
204	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
205	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
206
207	register_ftrace_function(&test_probe1);
208	register_ftrace_function(&test_probe2);
209	register_ftrace_function(&test_probe3);
210	register_ftrace_function(&test_global);
 
 
 
 
211
212	DYN_FTRACE_TEST_NAME();
213
214	print_counts();
215
216	if (trace_selftest_test_probe1_cnt != 1)
217		goto out;
218	if (trace_selftest_test_probe2_cnt != 0)
219		goto out;
220	if (trace_selftest_test_probe3_cnt != 1)
221		goto out;
222	if (trace_selftest_test_global_cnt == 0)
223		goto out;
 
 
224
225	DYN_FTRACE_TEST_NAME2();
226
227	print_counts();
228
229	if (trace_selftest_test_probe1_cnt != 1)
230		goto out;
231	if (trace_selftest_test_probe2_cnt != 1)
232		goto out;
233	if (trace_selftest_test_probe3_cnt != 2)
234		goto out;
235
236	/* Add a dynamic probe */
237	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
238	if (!dyn_ops) {
239		printk("MEMORY ERROR ");
240		goto out;
241	}
242
243	dyn_ops->func = trace_selftest_test_dyn_func;
244
245	register_ftrace_function(dyn_ops);
246
247	trace_selftest_test_global_cnt = 0;
248
249	DYN_FTRACE_TEST_NAME();
250
251	print_counts();
252
253	if (trace_selftest_test_probe1_cnt != 2)
254		goto out_free;
255	if (trace_selftest_test_probe2_cnt != 1)
256		goto out_free;
257	if (trace_selftest_test_probe3_cnt != 3)
258		goto out_free;
259	if (trace_selftest_test_global_cnt == 0)
260		goto out;
 
 
261	if (trace_selftest_test_dyn_cnt == 0)
262		goto out_free;
263
264	DYN_FTRACE_TEST_NAME2();
265
266	print_counts();
267
268	if (trace_selftest_test_probe1_cnt != 2)
269		goto out_free;
270	if (trace_selftest_test_probe2_cnt != 2)
271		goto out_free;
272	if (trace_selftest_test_probe3_cnt != 4)
273		goto out_free;
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275	ret = 0;
276 out_free:
277	unregister_ftrace_function(dyn_ops);
278	kfree(dyn_ops);
279
280 out:
281	/* Purposely unregister in the same order */
282	unregister_ftrace_function(&test_probe1);
283	unregister_ftrace_function(&test_probe2);
284	unregister_ftrace_function(&test_probe3);
285	unregister_ftrace_function(&test_global);
 
 
286
287	/* Make sure everything is off */
288	reset_counts();
289	DYN_FTRACE_TEST_NAME();
290	DYN_FTRACE_TEST_NAME();
291
292	if (trace_selftest_test_probe1_cnt ||
293	    trace_selftest_test_probe2_cnt ||
294	    trace_selftest_test_probe3_cnt ||
295	    trace_selftest_test_global_cnt ||
296	    trace_selftest_test_dyn_cnt)
297		ret = -1;
298
299	ftrace_enabled = save_ftrace_enabled;
300
301	return ret;
302}
303
304/* Test dynamic code modification and ftrace filters */
305int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
306					   struct trace_array *tr,
307					   int (*func)(void))
308{
309	int save_ftrace_enabled = ftrace_enabled;
310	int save_tracer_enabled = tracer_enabled;
311	unsigned long count;
312	char *func_name;
313	int ret;
314
315	/* The ftrace test PASSED */
316	printk(KERN_CONT "PASSED\n");
317	pr_info("Testing dynamic ftrace: ");
318
319	/* enable tracing, and record the filter function */
320	ftrace_enabled = 1;
321	tracer_enabled = 1;
322
323	/* passed in by parameter to fool gcc from optimizing */
324	func();
325
326	/*
327	 * Some archs *cough*PowerPC*cough* add characters to the
328	 * start of the function names. We simply put a '*' to
329	 * accommodate them.
330	 */
331	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
332
333	/* filter only on our function */
334	ftrace_set_global_filter(func_name, strlen(func_name), 1);
335
336	/* enable tracing */
337	ret = tracer_init(trace, tr);
338	if (ret) {
339		warn_failed_init_tracer(trace, ret);
340		goto out;
341	}
342
343	/* Sleep for a 1/10 of a second */
344	msleep(100);
345
346	/* we should have nothing in the buffer */
347	ret = trace_test_buffer(tr, &count);
348	if (ret)
349		goto out;
350
351	if (count) {
352		ret = -1;
353		printk(KERN_CONT ".. filter did not filter .. ");
354		goto out;
355	}
356
357	/* call our function again */
358	func();
359
360	/* sleep again */
361	msleep(100);
362
363	/* stop the tracing. */
364	tracing_stop();
365	ftrace_enabled = 0;
366
367	/* check the trace buffer */
368	ret = trace_test_buffer(tr, &count);
 
 
369	tracing_start();
370
371	/* we should only have one item */
372	if (!ret && count != 1) {
373		trace->reset(tr);
374		printk(KERN_CONT ".. filter failed count=%ld ..", count);
375		ret = -1;
376		goto out;
377	}
378
379	/* Test the ops with global tracing running */
380	ret = trace_selftest_ops(1);
381	trace->reset(tr);
382
383 out:
384	ftrace_enabled = save_ftrace_enabled;
385	tracer_enabled = save_tracer_enabled;
386
387	/* Enable tracing on all functions again */
388	ftrace_set_global_filter(NULL, 0, 1);
389
390	/* Test the ops with global tracing off */
391	if (!ret)
392		ret = trace_selftest_ops(2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393
394	return ret;
395}
396#else
397# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 
398#endif /* CONFIG_DYNAMIC_FTRACE */
399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400/*
401 * Simple verification test of ftrace function tracer.
402 * Enable ftrace, sleep 1/10 second, and then read the trace
403 * buffer to see if all is in order.
404 */
405int
406trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
407{
408	int save_ftrace_enabled = ftrace_enabled;
409	int save_tracer_enabled = tracer_enabled;
410	unsigned long count;
411	int ret;
412
 
 
 
 
 
 
 
413	/* make sure msleep has been recorded */
414	msleep(1);
415
416	/* start the tracing */
417	ftrace_enabled = 1;
418	tracer_enabled = 1;
419
420	ret = tracer_init(trace, tr);
421	if (ret) {
422		warn_failed_init_tracer(trace, ret);
423		goto out;
424	}
425
426	/* Sleep for a 1/10 of a second */
427	msleep(100);
428	/* stop the tracing. */
429	tracing_stop();
430	ftrace_enabled = 0;
431
432	/* check the trace buffer */
433	ret = trace_test_buffer(tr, &count);
 
 
434	trace->reset(tr);
435	tracing_start();
436
437	if (!ret && !count) {
438		printk(KERN_CONT ".. no entries found ..");
439		ret = -1;
440		goto out;
441	}
442
443	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
444						     DYN_FTRACE_TEST_NAME);
 
 
445
 
 
 
 
 
446 out:
447	ftrace_enabled = save_ftrace_enabled;
448	tracer_enabled = save_tracer_enabled;
449
450	/* kill ftrace totally if we failed */
451	if (ret)
452		ftrace_kill();
453
454	return ret;
455}
456#endif /* CONFIG_FUNCTION_TRACER */
457
458
459#ifdef CONFIG_FUNCTION_GRAPH_TRACER
460
461/* Maximum number of functions to trace before diagnosing a hang */
462#define GRAPH_MAX_FUNC_TEST	100000000
463
464static void
465__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
466static unsigned int graph_hang_thresh;
467
468/* Wrap the real function entry probe to avoid possible hanging */
469static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
470{
471	/* This is harmlessly racy, we want to approximately detect a hang */
472	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
473		ftrace_graph_stop();
474		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
475		if (ftrace_dump_on_oops)
476			__ftrace_dump(false, DUMP_ALL);
 
 
 
477		return 0;
478	}
479
480	return trace_graph_entry(trace);
481}
482
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483/*
484 * Pretty much the same than for the function tracer from which the selftest
485 * has been borrowed.
486 */
487int
488trace_selftest_startup_function_graph(struct tracer *trace,
489					struct trace_array *tr)
490{
491	int ret;
492	unsigned long count;
 
 
 
 
 
 
 
 
493
494	/*
495	 * Simulate the init() callback but we attach a watchdog callback
496	 * to detect and recover from possible hangs
497	 */
498	tracing_reset_online_cpus(tr);
499	set_graph_array(tr);
500	ret = register_ftrace_graph(&trace_graph_return,
501				    &trace_graph_entry_watchdog);
502	if (ret) {
503		warn_failed_init_tracer(trace, ret);
504		goto out;
505	}
506	tracing_start_cmdline_record();
507
508	/* Sleep for a 1/10 of a second */
509	msleep(100);
510
511	/* Have we just recovered from a hang? */
512	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
513		tracing_selftest_disabled = true;
514		ret = -1;
515		goto out;
516	}
517
518	tracing_stop();
519
520	/* check the trace buffer */
521	ret = trace_test_buffer(tr, &count);
 
 
 
 
522
523	trace->reset(tr);
524	tracing_start();
525
526	if (!ret && !count) {
527		printk(KERN_CONT ".. no entries found ..");
528		ret = -1;
529		goto out;
530	}
531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532	/* Don't test dynamic tracing, the function tracer already did */
533
534out:
535	/* Stop it if we failed */
536	if (ret)
537		ftrace_graph_stop();
538
539	return ret;
540}
541#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
542
543
544#ifdef CONFIG_IRQSOFF_TRACER
545int
546trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
547{
548	unsigned long save_max = tracing_max_latency;
549	unsigned long count;
550	int ret;
551
552	/* start the tracing */
553	ret = tracer_init(trace, tr);
554	if (ret) {
555		warn_failed_init_tracer(trace, ret);
556		return ret;
557	}
558
559	/* reset the max latency */
560	tracing_max_latency = 0;
561	/* disable interrupts for a bit */
562	local_irq_disable();
563	udelay(100);
564	local_irq_enable();
565
566	/*
567	 * Stop the tracer to avoid a warning subsequent
568	 * to buffer flipping failure because tracing_stop()
569	 * disables the tr and max buffers, making flipping impossible
570	 * in case of parallels max irqs off latencies.
571	 */
572	trace->stop(tr);
573	/* stop the tracing. */
574	tracing_stop();
575	/* check both trace buffers */
576	ret = trace_test_buffer(tr, NULL);
577	if (!ret)
578		ret = trace_test_buffer(&max_tr, &count);
579	trace->reset(tr);
580	tracing_start();
581
582	if (!ret && !count) {
583		printk(KERN_CONT ".. no entries found ..");
584		ret = -1;
585	}
586
587	tracing_max_latency = save_max;
588
589	return ret;
590}
591#endif /* CONFIG_IRQSOFF_TRACER */
592
593#ifdef CONFIG_PREEMPT_TRACER
594int
595trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
596{
597	unsigned long save_max = tracing_max_latency;
598	unsigned long count;
599	int ret;
600
601	/*
602	 * Now that the big kernel lock is no longer preemptable,
603	 * and this is called with the BKL held, it will always
604	 * fail. If preemption is already disabled, simply
605	 * pass the test. When the BKL is removed, or becomes
606	 * preemptible again, we will once again test this,
607	 * so keep it in.
608	 */
609	if (preempt_count()) {
610		printk(KERN_CONT "can not test ... force ");
611		return 0;
612	}
613
614	/* start the tracing */
615	ret = tracer_init(trace, tr);
616	if (ret) {
617		warn_failed_init_tracer(trace, ret);
618		return ret;
619	}
620
621	/* reset the max latency */
622	tracing_max_latency = 0;
623	/* disable preemption for a bit */
624	preempt_disable();
625	udelay(100);
626	preempt_enable();
627
628	/*
629	 * Stop the tracer to avoid a warning subsequent
630	 * to buffer flipping failure because tracing_stop()
631	 * disables the tr and max buffers, making flipping impossible
632	 * in case of parallels max preempt off latencies.
633	 */
634	trace->stop(tr);
635	/* stop the tracing. */
636	tracing_stop();
637	/* check both trace buffers */
638	ret = trace_test_buffer(tr, NULL);
639	if (!ret)
640		ret = trace_test_buffer(&max_tr, &count);
641	trace->reset(tr);
642	tracing_start();
643
644	if (!ret && !count) {
645		printk(KERN_CONT ".. no entries found ..");
646		ret = -1;
647	}
648
649	tracing_max_latency = save_max;
650
651	return ret;
652}
653#endif /* CONFIG_PREEMPT_TRACER */
654
655#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
656int
657trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
658{
659	unsigned long save_max = tracing_max_latency;
660	unsigned long count;
661	int ret;
662
663	/*
664	 * Now that the big kernel lock is no longer preemptable,
665	 * and this is called with the BKL held, it will always
666	 * fail. If preemption is already disabled, simply
667	 * pass the test. When the BKL is removed, or becomes
668	 * preemptible again, we will once again test this,
669	 * so keep it in.
670	 */
671	if (preempt_count()) {
672		printk(KERN_CONT "can not test ... force ");
673		return 0;
674	}
675
676	/* start the tracing */
677	ret = tracer_init(trace, tr);
678	if (ret) {
679		warn_failed_init_tracer(trace, ret);
680		goto out_no_start;
681	}
682
683	/* reset the max latency */
684	tracing_max_latency = 0;
685
686	/* disable preemption and interrupts for a bit */
687	preempt_disable();
688	local_irq_disable();
689	udelay(100);
690	preempt_enable();
691	/* reverse the order of preempt vs irqs */
692	local_irq_enable();
693
694	/*
695	 * Stop the tracer to avoid a warning subsequent
696	 * to buffer flipping failure because tracing_stop()
697	 * disables the tr and max buffers, making flipping impossible
698	 * in case of parallels max irqs/preempt off latencies.
699	 */
700	trace->stop(tr);
701	/* stop the tracing. */
702	tracing_stop();
703	/* check both trace buffers */
704	ret = trace_test_buffer(tr, NULL);
705	if (ret)
706		goto out;
707
708	ret = trace_test_buffer(&max_tr, &count);
709	if (ret)
710		goto out;
711
712	if (!ret && !count) {
713		printk(KERN_CONT ".. no entries found ..");
714		ret = -1;
715		goto out;
716	}
717
718	/* do the test by disabling interrupts first this time */
719	tracing_max_latency = 0;
720	tracing_start();
721	trace->start(tr);
722
723	preempt_disable();
724	local_irq_disable();
725	udelay(100);
726	preempt_enable();
727	/* reverse the order of preempt vs irqs */
728	local_irq_enable();
729
730	trace->stop(tr);
731	/* stop the tracing. */
732	tracing_stop();
733	/* check both trace buffers */
734	ret = trace_test_buffer(tr, NULL);
735	if (ret)
736		goto out;
737
738	ret = trace_test_buffer(&max_tr, &count);
739
740	if (!ret && !count) {
741		printk(KERN_CONT ".. no entries found ..");
742		ret = -1;
743		goto out;
744	}
745
746out:
747	tracing_start();
748out_no_start:
749	trace->reset(tr);
750	tracing_max_latency = save_max;
751
752	return ret;
753}
754#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
755
756#ifdef CONFIG_NOP_TRACER
757int
758trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
759{
760	/* What could possibly go wrong? */
761	return 0;
762}
763#endif
764
765#ifdef CONFIG_SCHED_TRACER
 
 
 
 
 
 
766static int trace_wakeup_test_thread(void *data)
767{
768	/* Make this a RT thread, doesn't need to be too high */
769	static const struct sched_param param = { .sched_priority = 5 };
770	struct completion *x = data;
 
 
 
 
 
771
772	sched_setscheduler(current, SCHED_FIFO, &param);
773
774	/* Make it know we have a new prio */
775	complete(x);
776
777	/* now go to sleep and let the test wake us up */
778	set_current_state(TASK_INTERRUPTIBLE);
779	schedule();
 
 
 
 
 
 
 
780
781	/* we are awake, now wait to disappear */
782	while (!kthread_should_stop()) {
783		/*
784		 * This is an RT task, do short sleeps to let
785		 * others run.
786		 */
787		msleep(100);
788	}
789
 
 
790	return 0;
791}
792
793int
794trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
795{
796	unsigned long save_max = tracing_max_latency;
797	struct task_struct *p;
798	struct completion isrt;
799	unsigned long count;
800	int ret;
801
802	init_completion(&isrt);
803
804	/* create a high prio thread */
805	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
 
 
806	if (IS_ERR(p)) {
807		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
808		return -1;
809	}
810
811	/* make sure the thread is running at an RT prio */
812	wait_for_completion(&isrt);
813
814	/* start the tracing */
815	ret = tracer_init(trace, tr);
816	if (ret) {
817		warn_failed_init_tracer(trace, ret);
818		return ret;
819	}
820
821	/* reset the max latency */
822	tracing_max_latency = 0;
 
 
 
 
 
 
 
 
 
823
824	/* sleep to let the RT thread sleep too */
825	msleep(100);
826
827	/*
828	 * Yes this is slightly racy. It is possible that for some
829	 * strange reason that the RT thread we created, did not
830	 * call schedule for 100ms after doing the completion,
831	 * and we do a wakeup on a task that already is awake.
832	 * But that is extremely unlikely, and the worst thing that
833	 * happens in such a case, is that we disable tracing.
834	 * Honestly, if this race does happen something is horrible
835	 * wrong with the system.
836	 */
837
838	wake_up_process(p);
839
840	/* give a little time to let the thread wake up */
841	msleep(100);
842
843	/* stop the tracing. */
844	tracing_stop();
845	/* check both trace buffers */
846	ret = trace_test_buffer(tr, NULL);
847	if (!ret)
848		ret = trace_test_buffer(&max_tr, &count);
849
850
851	trace->reset(tr);
852	tracing_start();
853
854	tracing_max_latency = save_max;
855
856	/* kill the thread */
857	kthread_stop(p);
858
859	if (!ret && !count) {
860		printk(KERN_CONT ".. no entries found ..");
861		ret = -1;
862	}
863
864	return ret;
865}
866#endif /* CONFIG_SCHED_TRACER */
867
868#ifdef CONFIG_CONTEXT_SWITCH_TRACER
869int
870trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
871{
872	unsigned long count;
873	int ret;
874
875	/* start the tracing */
876	ret = tracer_init(trace, tr);
877	if (ret) {
878		warn_failed_init_tracer(trace, ret);
879		return ret;
880	}
881
882	/* Sleep for a 1/10 of a second */
883	msleep(100);
884	/* stop the tracing. */
885	tracing_stop();
886	/* check the trace buffer */
887	ret = trace_test_buffer(tr, &count);
888	trace->reset(tr);
889	tracing_start();
890
891	if (!ret && !count) {
892		printk(KERN_CONT ".. no entries found ..");
893		ret = -1;
894	}
895
896	return ret;
897}
898#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
899
900#ifdef CONFIG_BRANCH_TRACER
901int
902trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
903{
904	unsigned long count;
905	int ret;
906
907	/* start the tracing */
908	ret = tracer_init(trace, tr);
909	if (ret) {
910		warn_failed_init_tracer(trace, ret);
911		return ret;
912	}
913
914	/* Sleep for a 1/10 of a second */
915	msleep(100);
916	/* stop the tracing. */
917	tracing_stop();
918	/* check the trace buffer */
919	ret = trace_test_buffer(tr, &count);
920	trace->reset(tr);
921	tracing_start();
922
923	if (!ret && !count) {
924		printk(KERN_CONT ".. no entries found ..");
925		ret = -1;
926	}
927
928	return ret;
929}
930#endif /* CONFIG_BRANCH_TRACER */
931