Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Include in trace.c */
   3
   4#include <uapi/linux/sched/types.h>
   5#include <linux/stringify.h>
   6#include <linux/kthread.h>
   7#include <linux/delay.h>
   8#include <linux/slab.h>
   9
  10static inline int trace_valid_entry(struct trace_entry *entry)
  11{
  12	switch (entry->type) {
  13	case TRACE_FN:
  14	case TRACE_CTX:
  15	case TRACE_WAKE:
  16	case TRACE_STACK:
  17	case TRACE_PRINT:
  18	case TRACE_BRANCH:
  19	case TRACE_GRAPH_ENT:
 
  20	case TRACE_GRAPH_RET:
  21		return 1;
  22	}
  23	return 0;
  24}
  25
  26static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  27{
  28	struct ring_buffer_event *event;
  29	struct trace_entry *entry;
  30	unsigned int loops = 0;
  31
  32	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  33		entry = ring_buffer_event_data(event);
  34
  35		/*
  36		 * The ring buffer is a size of trace_buf_size, if
  37		 * we loop more than the size, there's something wrong
  38		 * with the ring buffer.
  39		 */
  40		if (loops++ > trace_buf_size) {
  41			printk(KERN_CONT ".. bad ring buffer ");
  42			goto failed;
  43		}
  44		if (!trace_valid_entry(entry)) {
  45			printk(KERN_CONT ".. invalid entry %d ",
  46				entry->type);
  47			goto failed;
  48		}
  49	}
  50	return 0;
  51
  52 failed:
  53	/* disable tracing */
  54	tracing_disabled = 1;
  55	printk(KERN_CONT ".. corrupted trace buffer .. ");
  56	return -1;
  57}
  58
  59/*
  60 * Test the trace buffer to see if all the elements
  61 * are still sane.
  62 */
  63static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
  64{
  65	unsigned long flags, cnt = 0;
  66	int cpu, ret = 0;
  67
  68	/* Don't allow flipping of max traces now */
  69	local_irq_save(flags);
  70	arch_spin_lock(&buf->tr->max_lock);
  71
  72	cnt = ring_buffer_entries(buf->buffer);
  73
  74	/*
  75	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  76	 * If the calling tracer is broken, and is constantly filling
  77	 * the buffer, this will run forever, and hard lock the box.
  78	 * We disable the ring buffer while we do this test to prevent
  79	 * a hard lock up.
  80	 */
  81	tracing_off();
  82	for_each_possible_cpu(cpu) {
  83		ret = trace_test_buffer_cpu(buf, cpu);
  84		if (ret)
  85			break;
  86	}
  87	tracing_on();
  88	arch_spin_unlock(&buf->tr->max_lock);
  89	local_irq_restore(flags);
  90
  91	if (count)
  92		*count = cnt;
  93
  94	return ret;
  95}
  96
  97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  98{
  99	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 100		trace->name, init_ret);
 101}
 102#ifdef CONFIG_FUNCTION_TRACER
 103
 104#ifdef CONFIG_DYNAMIC_FTRACE
 105
 106static int trace_selftest_test_probe1_cnt;
 107static void trace_selftest_test_probe1_func(unsigned long ip,
 108					    unsigned long pip,
 109					    struct ftrace_ops *op,
 110					    struct pt_regs *pt_regs)
 111{
 112	trace_selftest_test_probe1_cnt++;
 113}
 114
 115static int trace_selftest_test_probe2_cnt;
 116static void trace_selftest_test_probe2_func(unsigned long ip,
 117					    unsigned long pip,
 118					    struct ftrace_ops *op,
 119					    struct pt_regs *pt_regs)
 120{
 121	trace_selftest_test_probe2_cnt++;
 122}
 123
 124static int trace_selftest_test_probe3_cnt;
 125static void trace_selftest_test_probe3_func(unsigned long ip,
 126					    unsigned long pip,
 127					    struct ftrace_ops *op,
 128					    struct pt_regs *pt_regs)
 129{
 130	trace_selftest_test_probe3_cnt++;
 131}
 132
 133static int trace_selftest_test_global_cnt;
 134static void trace_selftest_test_global_func(unsigned long ip,
 135					    unsigned long pip,
 136					    struct ftrace_ops *op,
 137					    struct pt_regs *pt_regs)
 138{
 139	trace_selftest_test_global_cnt++;
 140}
 141
 142static int trace_selftest_test_dyn_cnt;
 143static void trace_selftest_test_dyn_func(unsigned long ip,
 144					 unsigned long pip,
 145					 struct ftrace_ops *op,
 146					 struct pt_regs *pt_regs)
 147{
 148	trace_selftest_test_dyn_cnt++;
 149}
 150
 151static struct ftrace_ops test_probe1 = {
 152	.func			= trace_selftest_test_probe1_func,
 153	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 154};
 155
 156static struct ftrace_ops test_probe2 = {
 157	.func			= trace_selftest_test_probe2_func,
 158	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 159};
 160
 161static struct ftrace_ops test_probe3 = {
 162	.func			= trace_selftest_test_probe3_func,
 163	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 164};
 165
 166static void print_counts(void)
 167{
 168	printk("(%d %d %d %d %d) ",
 169	       trace_selftest_test_probe1_cnt,
 170	       trace_selftest_test_probe2_cnt,
 171	       trace_selftest_test_probe3_cnt,
 172	       trace_selftest_test_global_cnt,
 173	       trace_selftest_test_dyn_cnt);
 174}
 175
 176static void reset_counts(void)
 177{
 178	trace_selftest_test_probe1_cnt = 0;
 179	trace_selftest_test_probe2_cnt = 0;
 180	trace_selftest_test_probe3_cnt = 0;
 181	trace_selftest_test_global_cnt = 0;
 182	trace_selftest_test_dyn_cnt = 0;
 183}
 184
 185static int trace_selftest_ops(struct trace_array *tr, int cnt)
 186{
 187	int save_ftrace_enabled = ftrace_enabled;
 188	struct ftrace_ops *dyn_ops;
 189	char *func1_name;
 190	char *func2_name;
 191	int len1;
 192	int len2;
 193	int ret = -1;
 194
 195	printk(KERN_CONT "PASSED\n");
 196	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 197
 198	ftrace_enabled = 1;
 199	reset_counts();
 200
 201	/* Handle PPC64 '.' name */
 202	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 203	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 204	len1 = strlen(func1_name);
 205	len2 = strlen(func2_name);
 206
 207	/*
 208	 * Probe 1 will trace function 1.
 209	 * Probe 2 will trace function 2.
 210	 * Probe 3 will trace functions 1 and 2.
 211	 */
 212	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 213	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 214	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 215	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 216
 217	register_ftrace_function(&test_probe1);
 218	register_ftrace_function(&test_probe2);
 219	register_ftrace_function(&test_probe3);
 220	/* First time we are running with main function */
 221	if (cnt > 1) {
 222		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 223		register_ftrace_function(tr->ops);
 224	}
 225
 226	DYN_FTRACE_TEST_NAME();
 227
 228	print_counts();
 229
 230	if (trace_selftest_test_probe1_cnt != 1)
 231		goto out;
 232	if (trace_selftest_test_probe2_cnt != 0)
 233		goto out;
 234	if (trace_selftest_test_probe3_cnt != 1)
 235		goto out;
 236	if (cnt > 1) {
 237		if (trace_selftest_test_global_cnt == 0)
 238			goto out;
 239	}
 240
 241	DYN_FTRACE_TEST_NAME2();
 242
 243	print_counts();
 244
 245	if (trace_selftest_test_probe1_cnt != 1)
 246		goto out;
 247	if (trace_selftest_test_probe2_cnt != 1)
 248		goto out;
 249	if (trace_selftest_test_probe3_cnt != 2)
 250		goto out;
 251
 252	/* Add a dynamic probe */
 253	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 254	if (!dyn_ops) {
 255		printk("MEMORY ERROR ");
 256		goto out;
 257	}
 258
 259	dyn_ops->func = trace_selftest_test_dyn_func;
 260
 261	register_ftrace_function(dyn_ops);
 262
 263	trace_selftest_test_global_cnt = 0;
 264
 265	DYN_FTRACE_TEST_NAME();
 266
 267	print_counts();
 268
 269	if (trace_selftest_test_probe1_cnt != 2)
 270		goto out_free;
 271	if (trace_selftest_test_probe2_cnt != 1)
 272		goto out_free;
 273	if (trace_selftest_test_probe3_cnt != 3)
 274		goto out_free;
 275	if (cnt > 1) {
 276		if (trace_selftest_test_global_cnt == 0)
 277			goto out_free;
 278	}
 279	if (trace_selftest_test_dyn_cnt == 0)
 280		goto out_free;
 281
 282	DYN_FTRACE_TEST_NAME2();
 283
 284	print_counts();
 285
 286	if (trace_selftest_test_probe1_cnt != 2)
 287		goto out_free;
 288	if (trace_selftest_test_probe2_cnt != 2)
 289		goto out_free;
 290	if (trace_selftest_test_probe3_cnt != 4)
 291		goto out_free;
 292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 293	ret = 0;
 294 out_free:
 295	unregister_ftrace_function(dyn_ops);
 296	kfree(dyn_ops);
 297
 298 out:
 299	/* Purposely unregister in the same order */
 300	unregister_ftrace_function(&test_probe1);
 301	unregister_ftrace_function(&test_probe2);
 302	unregister_ftrace_function(&test_probe3);
 303	if (cnt > 1)
 304		unregister_ftrace_function(tr->ops);
 305	ftrace_reset_array_ops(tr);
 306
 307	/* Make sure everything is off */
 308	reset_counts();
 309	DYN_FTRACE_TEST_NAME();
 310	DYN_FTRACE_TEST_NAME();
 311
 312	if (trace_selftest_test_probe1_cnt ||
 313	    trace_selftest_test_probe2_cnt ||
 314	    trace_selftest_test_probe3_cnt ||
 315	    trace_selftest_test_global_cnt ||
 316	    trace_selftest_test_dyn_cnt)
 317		ret = -1;
 318
 319	ftrace_enabled = save_ftrace_enabled;
 320
 321	return ret;
 322}
 323
 324/* Test dynamic code modification and ftrace filters */
 325static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 326						  struct trace_array *tr,
 327						  int (*func)(void))
 328{
 329	int save_ftrace_enabled = ftrace_enabled;
 330	unsigned long count;
 331	char *func_name;
 332	int ret;
 333
 334	/* The ftrace test PASSED */
 335	printk(KERN_CONT "PASSED\n");
 336	pr_info("Testing dynamic ftrace: ");
 337
 338	/* enable tracing, and record the filter function */
 339	ftrace_enabled = 1;
 340
 341	/* passed in by parameter to fool gcc from optimizing */
 342	func();
 343
 344	/*
 345	 * Some archs *cough*PowerPC*cough* add characters to the
 346	 * start of the function names. We simply put a '*' to
 347	 * accommodate them.
 348	 */
 349	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 350
 351	/* filter only on our function */
 352	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 353
 354	/* enable tracing */
 355	ret = tracer_init(trace, tr);
 356	if (ret) {
 357		warn_failed_init_tracer(trace, ret);
 358		goto out;
 359	}
 360
 361	/* Sleep for a 1/10 of a second */
 362	msleep(100);
 363
 364	/* we should have nothing in the buffer */
 365	ret = trace_test_buffer(&tr->trace_buffer, &count);
 366	if (ret)
 367		goto out;
 368
 369	if (count) {
 370		ret = -1;
 371		printk(KERN_CONT ".. filter did not filter .. ");
 372		goto out;
 373	}
 374
 375	/* call our function again */
 376	func();
 377
 378	/* sleep again */
 379	msleep(100);
 380
 381	/* stop the tracing. */
 382	tracing_stop();
 383	ftrace_enabled = 0;
 384
 385	/* check the trace buffer */
 386	ret = trace_test_buffer(&tr->trace_buffer, &count);
 387
 388	ftrace_enabled = 1;
 389	tracing_start();
 390
 391	/* we should only have one item */
 392	if (!ret && count != 1) {
 393		trace->reset(tr);
 394		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 395		ret = -1;
 396		goto out;
 397	}
 398
 399	/* Test the ops with global tracing running */
 400	ret = trace_selftest_ops(tr, 1);
 401	trace->reset(tr);
 402
 403 out:
 404	ftrace_enabled = save_ftrace_enabled;
 405
 406	/* Enable tracing on all functions again */
 407	ftrace_set_global_filter(NULL, 0, 1);
 408
 409	/* Test the ops with global tracing off */
 410	if (!ret)
 411		ret = trace_selftest_ops(tr, 2);
 412
 413	return ret;
 414}
 415
 416static int trace_selftest_recursion_cnt;
 417static void trace_selftest_test_recursion_func(unsigned long ip,
 418					       unsigned long pip,
 419					       struct ftrace_ops *op,
 420					       struct pt_regs *pt_regs)
 421{
 422	/*
 423	 * This function is registered without the recursion safe flag.
 424	 * The ftrace infrastructure should provide the recursion
 425	 * protection. If not, this will crash the kernel!
 426	 */
 427	if (trace_selftest_recursion_cnt++ > 10)
 428		return;
 429	DYN_FTRACE_TEST_NAME();
 430}
 431
 432static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 433						    unsigned long pip,
 434						    struct ftrace_ops *op,
 435						    struct pt_regs *pt_regs)
 436{
 437	/*
 438	 * We said we would provide our own recursion. By calling
 439	 * this function again, we should recurse back into this function
 440	 * and count again. But this only happens if the arch supports
 441	 * all of ftrace features and nothing else is using the function
 442	 * tracing utility.
 443	 */
 444	if (trace_selftest_recursion_cnt++)
 445		return;
 446	DYN_FTRACE_TEST_NAME();
 447}
 448
 449static struct ftrace_ops test_rec_probe = {
 450	.func			= trace_selftest_test_recursion_func,
 
 451};
 452
 453static struct ftrace_ops test_recsafe_probe = {
 454	.func			= trace_selftest_test_recursion_safe_func,
 455	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 456};
 457
 458static int
 459trace_selftest_function_recursion(void)
 460{
 461	int save_ftrace_enabled = ftrace_enabled;
 462	char *func_name;
 463	int len;
 464	int ret;
 465
 466	/* The previous test PASSED */
 467	pr_cont("PASSED\n");
 468	pr_info("Testing ftrace recursion: ");
 469
 470
 471	/* enable tracing, and record the filter function */
 472	ftrace_enabled = 1;
 473
 474	/* Handle PPC64 '.' name */
 475	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 476	len = strlen(func_name);
 477
 478	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 479	if (ret) {
 480		pr_cont("*Could not set filter* ");
 481		goto out;
 482	}
 483
 484	ret = register_ftrace_function(&test_rec_probe);
 485	if (ret) {
 486		pr_cont("*could not register callback* ");
 487		goto out;
 488	}
 489
 490	DYN_FTRACE_TEST_NAME();
 491
 492	unregister_ftrace_function(&test_rec_probe);
 493
 494	ret = -1;
 495	if (trace_selftest_recursion_cnt != 1) {
 496		pr_cont("*callback not called once (%d)* ",
 
 
 
 
 
 497			trace_selftest_recursion_cnt);
 498		goto out;
 499	}
 500
 501	trace_selftest_recursion_cnt = 1;
 502
 503	pr_cont("PASSED\n");
 504	pr_info("Testing ftrace recursion safe: ");
 505
 506	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 507	if (ret) {
 508		pr_cont("*Could not set filter* ");
 509		goto out;
 510	}
 511
 512	ret = register_ftrace_function(&test_recsafe_probe);
 513	if (ret) {
 514		pr_cont("*could not register callback* ");
 515		goto out;
 516	}
 517
 518	DYN_FTRACE_TEST_NAME();
 519
 520	unregister_ftrace_function(&test_recsafe_probe);
 521
 522	ret = -1;
 523	if (trace_selftest_recursion_cnt != 2) {
 524		pr_cont("*callback not called expected 2 times (%d)* ",
 525			trace_selftest_recursion_cnt);
 526		goto out;
 527	}
 528
 529	ret = 0;
 530out:
 531	ftrace_enabled = save_ftrace_enabled;
 532
 533	return ret;
 534}
 535#else
 536# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 537# define trace_selftest_function_recursion() ({ 0; })
 538#endif /* CONFIG_DYNAMIC_FTRACE */
 539
 540static enum {
 541	TRACE_SELFTEST_REGS_START,
 542	TRACE_SELFTEST_REGS_FOUND,
 543	TRACE_SELFTEST_REGS_NOT_FOUND,
 544} trace_selftest_regs_stat;
 545
 546static void trace_selftest_test_regs_func(unsigned long ip,
 547					  unsigned long pip,
 548					  struct ftrace_ops *op,
 549					  struct pt_regs *pt_regs)
 550{
 551	if (pt_regs)
 
 
 552		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 553	else
 554		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 555}
 556
 557static struct ftrace_ops test_regs_probe = {
 558	.func		= trace_selftest_test_regs_func,
 559	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
 560};
 561
 562static int
 563trace_selftest_function_regs(void)
 564{
 565	int save_ftrace_enabled = ftrace_enabled;
 566	char *func_name;
 567	int len;
 568	int ret;
 569	int supported = 0;
 570
 571#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 572	supported = 1;
 573#endif
 574
 575	/* The previous test PASSED */
 576	pr_cont("PASSED\n");
 577	pr_info("Testing ftrace regs%s: ",
 578		!supported ? "(no arch support)" : "");
 579
 580	/* enable tracing, and record the filter function */
 581	ftrace_enabled = 1;
 582
 583	/* Handle PPC64 '.' name */
 584	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 585	len = strlen(func_name);
 586
 587	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 588	/*
 589	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 590	 * This test really doesn't care.
 591	 */
 592	if (ret && ret != -ENODEV) {
 593		pr_cont("*Could not set filter* ");
 594		goto out;
 595	}
 596
 597	ret = register_ftrace_function(&test_regs_probe);
 598	/*
 599	 * Now if the arch does not support passing regs, then this should
 600	 * have failed.
 601	 */
 602	if (!supported) {
 603		if (!ret) {
 604			pr_cont("*registered save-regs without arch support* ");
 605			goto out;
 606		}
 607		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 608		ret = register_ftrace_function(&test_regs_probe);
 609	}
 610	if (ret) {
 611		pr_cont("*could not register callback* ");
 612		goto out;
 613	}
 614
 615
 616	DYN_FTRACE_TEST_NAME();
 617
 618	unregister_ftrace_function(&test_regs_probe);
 619
 620	ret = -1;
 621
 622	switch (trace_selftest_regs_stat) {
 623	case TRACE_SELFTEST_REGS_START:
 624		pr_cont("*callback never called* ");
 625		goto out;
 626
 627	case TRACE_SELFTEST_REGS_FOUND:
 628		if (supported)
 629			break;
 630		pr_cont("*callback received regs without arch support* ");
 631		goto out;
 632
 633	case TRACE_SELFTEST_REGS_NOT_FOUND:
 634		if (!supported)
 635			break;
 636		pr_cont("*callback received NULL regs* ");
 637		goto out;
 638	}
 639
 640	ret = 0;
 641out:
 642	ftrace_enabled = save_ftrace_enabled;
 643
 644	return ret;
 645}
 646
 647/*
 648 * Simple verification test of ftrace function tracer.
 649 * Enable ftrace, sleep 1/10 second, and then read the trace
 650 * buffer to see if all is in order.
 651 */
 652__init int
 653trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 654{
 655	int save_ftrace_enabled = ftrace_enabled;
 656	unsigned long count;
 657	int ret;
 658
 659#ifdef CONFIG_DYNAMIC_FTRACE
 660	if (ftrace_filter_param) {
 661		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 662		return 0;
 663	}
 664#endif
 665
 666	/* make sure msleep has been recorded */
 667	msleep(1);
 668
 669	/* start the tracing */
 670	ftrace_enabled = 1;
 671
 672	ret = tracer_init(trace, tr);
 673	if (ret) {
 674		warn_failed_init_tracer(trace, ret);
 675		goto out;
 676	}
 677
 678	/* Sleep for a 1/10 of a second */
 679	msleep(100);
 680	/* stop the tracing. */
 681	tracing_stop();
 682	ftrace_enabled = 0;
 683
 684	/* check the trace buffer */
 685	ret = trace_test_buffer(&tr->trace_buffer, &count);
 686
 687	ftrace_enabled = 1;
 688	trace->reset(tr);
 689	tracing_start();
 690
 691	if (!ret && !count) {
 692		printk(KERN_CONT ".. no entries found ..");
 693		ret = -1;
 694		goto out;
 695	}
 696
 697	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 698						     DYN_FTRACE_TEST_NAME);
 699	if (ret)
 700		goto out;
 701
 702	ret = trace_selftest_function_recursion();
 703	if (ret)
 704		goto out;
 705
 706	ret = trace_selftest_function_regs();
 707 out:
 708	ftrace_enabled = save_ftrace_enabled;
 709
 710	/* kill ftrace totally if we failed */
 711	if (ret)
 712		ftrace_kill();
 713
 714	return ret;
 715}
 716#endif /* CONFIG_FUNCTION_TRACER */
 717
 718
 719#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721/* Maximum number of functions to trace before diagnosing a hang */
 722#define GRAPH_MAX_FUNC_TEST	100000000
 723
 724static unsigned int graph_hang_thresh;
 725
 726/* Wrap the real function entry probe to avoid possible hanging */
 727static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 
 728{
 729	/* This is harmlessly racy, we want to approximately detect a hang */
 730	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 731		ftrace_graph_stop();
 732		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 733		if (ftrace_dump_on_oops) {
 734			ftrace_dump(DUMP_ALL);
 735			/* ftrace_dump() disables tracing */
 736			tracing_on();
 737		}
 738		return 0;
 739	}
 740
 741	return trace_graph_entry(trace);
 742}
 743
 
 
 
 
 
 
 
 
 
 744/*
 745 * Pretty much the same than for the function tracer from which the selftest
 746 * has been borrowed.
 747 */
 748__init int
 749trace_selftest_startup_function_graph(struct tracer *trace,
 750					struct trace_array *tr)
 751{
 752	int ret;
 753	unsigned long count;
 
 754
 755#ifdef CONFIG_DYNAMIC_FTRACE
 756	if (ftrace_filter_param) {
 757		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 758		return 0;
 759	}
 760#endif
 761
 762	/*
 763	 * Simulate the init() callback but we attach a watchdog callback
 764	 * to detect and recover from possible hangs
 765	 */
 766	tracing_reset_online_cpus(&tr->trace_buffer);
 767	set_graph_array(tr);
 768	ret = register_ftrace_graph(&trace_graph_return,
 769				    &trace_graph_entry_watchdog);
 770	if (ret) {
 771		warn_failed_init_tracer(trace, ret);
 772		goto out;
 773	}
 774	tracing_start_cmdline_record();
 775
 776	/* Sleep for a 1/10 of a second */
 777	msleep(100);
 778
 779	/* Have we just recovered from a hang? */
 780	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 781		tracing_selftest_disabled = true;
 782		ret = -1;
 783		goto out;
 784	}
 785
 786	tracing_stop();
 787
 788	/* check the trace buffer */
 789	ret = trace_test_buffer(&tr->trace_buffer, &count);
 
 
 
 
 790
 791	trace->reset(tr);
 792	tracing_start();
 793
 794	if (!ret && !count) {
 795		printk(KERN_CONT ".. no entries found ..");
 796		ret = -1;
 797		goto out;
 798	}
 799
 800	/* Don't test dynamic tracing, the function tracer already did */
 
 
 
 
 
 801
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802out:
 803	/* Stop it if we failed */
 804	if (ret)
 805		ftrace_graph_stop();
 806
 807	return ret;
 808}
 809#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 810
 811
 812#ifdef CONFIG_IRQSOFF_TRACER
 813int
 814trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 815{
 816	unsigned long save_max = tr->max_latency;
 817	unsigned long count;
 818	int ret;
 819
 820	/* start the tracing */
 821	ret = tracer_init(trace, tr);
 822	if (ret) {
 823		warn_failed_init_tracer(trace, ret);
 824		return ret;
 825	}
 826
 827	/* reset the max latency */
 828	tr->max_latency = 0;
 829	/* disable interrupts for a bit */
 830	local_irq_disable();
 831	udelay(100);
 832	local_irq_enable();
 833
 834	/*
 835	 * Stop the tracer to avoid a warning subsequent
 836	 * to buffer flipping failure because tracing_stop()
 837	 * disables the tr and max buffers, making flipping impossible
 838	 * in case of parallels max irqs off latencies.
 839	 */
 840	trace->stop(tr);
 841	/* stop the tracing. */
 842	tracing_stop();
 843	/* check both trace buffers */
 844	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 845	if (!ret)
 846		ret = trace_test_buffer(&tr->max_buffer, &count);
 847	trace->reset(tr);
 848	tracing_start();
 849
 850	if (!ret && !count) {
 851		printk(KERN_CONT ".. no entries found ..");
 852		ret = -1;
 853	}
 854
 855	tr->max_latency = save_max;
 856
 857	return ret;
 858}
 859#endif /* CONFIG_IRQSOFF_TRACER */
 860
 861#ifdef CONFIG_PREEMPT_TRACER
 862int
 863trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 864{
 865	unsigned long save_max = tr->max_latency;
 866	unsigned long count;
 867	int ret;
 868
 869	/*
 870	 * Now that the big kernel lock is no longer preemptable,
 871	 * and this is called with the BKL held, it will always
 872	 * fail. If preemption is already disabled, simply
 873	 * pass the test. When the BKL is removed, or becomes
 874	 * preemptible again, we will once again test this,
 875	 * so keep it in.
 876	 */
 877	if (preempt_count()) {
 878		printk(KERN_CONT "can not test ... force ");
 879		return 0;
 880	}
 881
 882	/* start the tracing */
 883	ret = tracer_init(trace, tr);
 884	if (ret) {
 885		warn_failed_init_tracer(trace, ret);
 886		return ret;
 887	}
 888
 889	/* reset the max latency */
 890	tr->max_latency = 0;
 891	/* disable preemption for a bit */
 892	preempt_disable();
 893	udelay(100);
 894	preempt_enable();
 895
 896	/*
 897	 * Stop the tracer to avoid a warning subsequent
 898	 * to buffer flipping failure because tracing_stop()
 899	 * disables the tr and max buffers, making flipping impossible
 900	 * in case of parallels max preempt off latencies.
 901	 */
 902	trace->stop(tr);
 903	/* stop the tracing. */
 904	tracing_stop();
 905	/* check both trace buffers */
 906	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 907	if (!ret)
 908		ret = trace_test_buffer(&tr->max_buffer, &count);
 909	trace->reset(tr);
 910	tracing_start();
 911
 912	if (!ret && !count) {
 913		printk(KERN_CONT ".. no entries found ..");
 914		ret = -1;
 915	}
 916
 917	tr->max_latency = save_max;
 918
 919	return ret;
 920}
 921#endif /* CONFIG_PREEMPT_TRACER */
 922
 923#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 924int
 925trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 926{
 927	unsigned long save_max = tr->max_latency;
 928	unsigned long count;
 929	int ret;
 930
 931	/*
 932	 * Now that the big kernel lock is no longer preemptable,
 933	 * and this is called with the BKL held, it will always
 934	 * fail. If preemption is already disabled, simply
 935	 * pass the test. When the BKL is removed, or becomes
 936	 * preemptible again, we will once again test this,
 937	 * so keep it in.
 938	 */
 939	if (preempt_count()) {
 940		printk(KERN_CONT "can not test ... force ");
 941		return 0;
 942	}
 943
 944	/* start the tracing */
 945	ret = tracer_init(trace, tr);
 946	if (ret) {
 947		warn_failed_init_tracer(trace, ret);
 948		goto out_no_start;
 949	}
 950
 951	/* reset the max latency */
 952	tr->max_latency = 0;
 953
 954	/* disable preemption and interrupts for a bit */
 955	preempt_disable();
 956	local_irq_disable();
 957	udelay(100);
 958	preempt_enable();
 959	/* reverse the order of preempt vs irqs */
 960	local_irq_enable();
 961
 962	/*
 963	 * Stop the tracer to avoid a warning subsequent
 964	 * to buffer flipping failure because tracing_stop()
 965	 * disables the tr and max buffers, making flipping impossible
 966	 * in case of parallels max irqs/preempt off latencies.
 967	 */
 968	trace->stop(tr);
 969	/* stop the tracing. */
 970	tracing_stop();
 971	/* check both trace buffers */
 972	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 973	if (ret)
 974		goto out;
 975
 976	ret = trace_test_buffer(&tr->max_buffer, &count);
 977	if (ret)
 978		goto out;
 979
 980	if (!ret && !count) {
 981		printk(KERN_CONT ".. no entries found ..");
 982		ret = -1;
 983		goto out;
 984	}
 985
 986	/* do the test by disabling interrupts first this time */
 987	tr->max_latency = 0;
 988	tracing_start();
 989	trace->start(tr);
 990
 991	preempt_disable();
 992	local_irq_disable();
 993	udelay(100);
 994	preempt_enable();
 995	/* reverse the order of preempt vs irqs */
 996	local_irq_enable();
 997
 998	trace->stop(tr);
 999	/* stop the tracing. */
1000	tracing_stop();
1001	/* check both trace buffers */
1002	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1003	if (ret)
1004		goto out;
1005
1006	ret = trace_test_buffer(&tr->max_buffer, &count);
1007
1008	if (!ret && !count) {
1009		printk(KERN_CONT ".. no entries found ..");
1010		ret = -1;
1011		goto out;
1012	}
1013
1014out:
1015	tracing_start();
1016out_no_start:
1017	trace->reset(tr);
1018	tr->max_latency = save_max;
1019
1020	return ret;
1021}
1022#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1023
1024#ifdef CONFIG_NOP_TRACER
1025int
1026trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1027{
1028	/* What could possibly go wrong? */
1029	return 0;
1030}
1031#endif
1032
1033#ifdef CONFIG_SCHED_TRACER
1034
1035struct wakeup_test_data {
1036	struct completion	is_ready;
1037	int			go;
1038};
1039
1040static int trace_wakeup_test_thread(void *data)
1041{
1042	/* Make this a -deadline thread */
1043	static const struct sched_attr attr = {
1044		.sched_policy = SCHED_DEADLINE,
1045		.sched_runtime = 100000ULL,
1046		.sched_deadline = 10000000ULL,
1047		.sched_period = 10000000ULL
1048	};
1049	struct wakeup_test_data *x = data;
1050
1051	sched_setattr(current, &attr);
1052
1053	/* Make it know we have a new prio */
1054	complete(&x->is_ready);
1055
1056	/* now go to sleep and let the test wake us up */
1057	set_current_state(TASK_INTERRUPTIBLE);
1058	while (!x->go) {
1059		schedule();
1060		set_current_state(TASK_INTERRUPTIBLE);
1061	}
1062
1063	complete(&x->is_ready);
1064
1065	set_current_state(TASK_INTERRUPTIBLE);
1066
1067	/* we are awake, now wait to disappear */
1068	while (!kthread_should_stop()) {
1069		schedule();
1070		set_current_state(TASK_INTERRUPTIBLE);
1071	}
1072
1073	__set_current_state(TASK_RUNNING);
1074
1075	return 0;
1076}
1077int
1078trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1079{
1080	unsigned long save_max = tr->max_latency;
1081	struct task_struct *p;
1082	struct wakeup_test_data data;
1083	unsigned long count;
1084	int ret;
1085
1086	memset(&data, 0, sizeof(data));
1087
1088	init_completion(&data.is_ready);
1089
1090	/* create a -deadline thread */
1091	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1092	if (IS_ERR(p)) {
1093		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1094		return -1;
1095	}
1096
1097	/* make sure the thread is running at -deadline policy */
1098	wait_for_completion(&data.is_ready);
1099
1100	/* start the tracing */
1101	ret = tracer_init(trace, tr);
1102	if (ret) {
1103		warn_failed_init_tracer(trace, ret);
1104		return ret;
1105	}
1106
1107	/* reset the max latency */
1108	tr->max_latency = 0;
1109
1110	while (p->on_rq) {
1111		/*
1112		 * Sleep to make sure the -deadline thread is asleep too.
1113		 * On virtual machines we can't rely on timings,
1114		 * but we want to make sure this test still works.
1115		 */
1116		msleep(100);
1117	}
1118
1119	init_completion(&data.is_ready);
1120
1121	data.go = 1;
1122	/* memory barrier is in the wake_up_process() */
1123
1124	wake_up_process(p);
1125
1126	/* Wait for the task to wake up */
1127	wait_for_completion(&data.is_ready);
1128
1129	/* stop the tracing. */
1130	tracing_stop();
1131	/* check both trace buffers */
1132	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1133	if (!ret)
1134		ret = trace_test_buffer(&tr->max_buffer, &count);
1135
1136
1137	trace->reset(tr);
1138	tracing_start();
1139
1140	tr->max_latency = save_max;
1141
1142	/* kill the thread */
1143	kthread_stop(p);
1144
1145	if (!ret && !count) {
1146		printk(KERN_CONT ".. no entries found ..");
1147		ret = -1;
1148	}
1149
1150	return ret;
1151}
1152#endif /* CONFIG_SCHED_TRACER */
1153
1154#ifdef CONFIG_BRANCH_TRACER
1155int
1156trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1157{
1158	unsigned long count;
1159	int ret;
1160
1161	/* start the tracing */
1162	ret = tracer_init(trace, tr);
1163	if (ret) {
1164		warn_failed_init_tracer(trace, ret);
1165		return ret;
1166	}
1167
1168	/* Sleep for a 1/10 of a second */
1169	msleep(100);
1170	/* stop the tracing. */
1171	tracing_stop();
1172	/* check the trace buffer */
1173	ret = trace_test_buffer(&tr->trace_buffer, &count);
1174	trace->reset(tr);
1175	tracing_start();
1176
1177	if (!ret && !count) {
1178		printk(KERN_CONT ".. no entries found ..");
1179		ret = -1;
1180	}
1181
1182	return ret;
1183}
1184#endif /* CONFIG_BRANCH_TRACER */
1185
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Include in trace.c */
   3
   4#include <uapi/linux/sched/types.h>
   5#include <linux/stringify.h>
   6#include <linux/kthread.h>
   7#include <linux/delay.h>
   8#include <linux/slab.h>
   9
  10static inline int trace_valid_entry(struct trace_entry *entry)
  11{
  12	switch (entry->type) {
  13	case TRACE_FN:
  14	case TRACE_CTX:
  15	case TRACE_WAKE:
  16	case TRACE_STACK:
  17	case TRACE_PRINT:
  18	case TRACE_BRANCH:
  19	case TRACE_GRAPH_ENT:
  20	case TRACE_GRAPH_RETADDR_ENT:
  21	case TRACE_GRAPH_RET:
  22		return 1;
  23	}
  24	return 0;
  25}
  26
  27static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
  28{
  29	struct ring_buffer_event *event;
  30	struct trace_entry *entry;
  31	unsigned int loops = 0;
  32
  33	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  34		entry = ring_buffer_event_data(event);
  35
  36		/*
  37		 * The ring buffer is a size of trace_buf_size, if
  38		 * we loop more than the size, there's something wrong
  39		 * with the ring buffer.
  40		 */
  41		if (loops++ > trace_buf_size) {
  42			printk(KERN_CONT ".. bad ring buffer ");
  43			goto failed;
  44		}
  45		if (!trace_valid_entry(entry)) {
  46			printk(KERN_CONT ".. invalid entry %d ",
  47				entry->type);
  48			goto failed;
  49		}
  50	}
  51	return 0;
  52
  53 failed:
  54	/* disable tracing */
  55	tracing_disabled = 1;
  56	printk(KERN_CONT ".. corrupted trace buffer .. ");
  57	return -1;
  58}
  59
  60/*
  61 * Test the trace buffer to see if all the elements
  62 * are still sane.
  63 */
  64static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
  65{
  66	unsigned long flags, cnt = 0;
  67	int cpu, ret = 0;
  68
  69	/* Don't allow flipping of max traces now */
  70	local_irq_save(flags);
  71	arch_spin_lock(&buf->tr->max_lock);
  72
  73	cnt = ring_buffer_entries(buf->buffer);
  74
  75	/*
  76	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  77	 * If the calling tracer is broken, and is constantly filling
  78	 * the buffer, this will run forever, and hard lock the box.
  79	 * We disable the ring buffer while we do this test to prevent
  80	 * a hard lock up.
  81	 */
  82	tracing_off();
  83	for_each_possible_cpu(cpu) {
  84		ret = trace_test_buffer_cpu(buf, cpu);
  85		if (ret)
  86			break;
  87	}
  88	tracing_on();
  89	arch_spin_unlock(&buf->tr->max_lock);
  90	local_irq_restore(flags);
  91
  92	if (count)
  93		*count = cnt;
  94
  95	return ret;
  96}
  97
  98static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  99{
 100	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 101		trace->name, init_ret);
 102}
 103#ifdef CONFIG_FUNCTION_TRACER
 104
 105#ifdef CONFIG_DYNAMIC_FTRACE
 106
 107static int trace_selftest_test_probe1_cnt;
 108static void trace_selftest_test_probe1_func(unsigned long ip,
 109					    unsigned long pip,
 110					    struct ftrace_ops *op,
 111					    struct ftrace_regs *fregs)
 112{
 113	trace_selftest_test_probe1_cnt++;
 114}
 115
 116static int trace_selftest_test_probe2_cnt;
 117static void trace_selftest_test_probe2_func(unsigned long ip,
 118					    unsigned long pip,
 119					    struct ftrace_ops *op,
 120					    struct ftrace_regs *fregs)
 121{
 122	trace_selftest_test_probe2_cnt++;
 123}
 124
 125static int trace_selftest_test_probe3_cnt;
 126static void trace_selftest_test_probe3_func(unsigned long ip,
 127					    unsigned long pip,
 128					    struct ftrace_ops *op,
 129					    struct ftrace_regs *fregs)
 130{
 131	trace_selftest_test_probe3_cnt++;
 132}
 133
 134static int trace_selftest_test_global_cnt;
 135static void trace_selftest_test_global_func(unsigned long ip,
 136					    unsigned long pip,
 137					    struct ftrace_ops *op,
 138					    struct ftrace_regs *fregs)
 139{
 140	trace_selftest_test_global_cnt++;
 141}
 142
 143static int trace_selftest_test_dyn_cnt;
 144static void trace_selftest_test_dyn_func(unsigned long ip,
 145					 unsigned long pip,
 146					 struct ftrace_ops *op,
 147					 struct ftrace_regs *fregs)
 148{
 149	trace_selftest_test_dyn_cnt++;
 150}
 151
 152static struct ftrace_ops test_probe1 = {
 153	.func			= trace_selftest_test_probe1_func,
 
 154};
 155
 156static struct ftrace_ops test_probe2 = {
 157	.func			= trace_selftest_test_probe2_func,
 
 158};
 159
 160static struct ftrace_ops test_probe3 = {
 161	.func			= trace_selftest_test_probe3_func,
 
 162};
 163
 164static void print_counts(void)
 165{
 166	printk("(%d %d %d %d %d) ",
 167	       trace_selftest_test_probe1_cnt,
 168	       trace_selftest_test_probe2_cnt,
 169	       trace_selftest_test_probe3_cnt,
 170	       trace_selftest_test_global_cnt,
 171	       trace_selftest_test_dyn_cnt);
 172}
 173
 174static void reset_counts(void)
 175{
 176	trace_selftest_test_probe1_cnt = 0;
 177	trace_selftest_test_probe2_cnt = 0;
 178	trace_selftest_test_probe3_cnt = 0;
 179	trace_selftest_test_global_cnt = 0;
 180	trace_selftest_test_dyn_cnt = 0;
 181}
 182
 183static int trace_selftest_ops(struct trace_array *tr, int cnt)
 184{
 185	int save_ftrace_enabled = ftrace_enabled;
 186	struct ftrace_ops *dyn_ops;
 187	char *func1_name;
 188	char *func2_name;
 189	int len1;
 190	int len2;
 191	int ret = -1;
 192
 193	printk(KERN_CONT "PASSED\n");
 194	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 195
 196	ftrace_enabled = 1;
 197	reset_counts();
 198
 199	/* Handle PPC64 '.' name */
 200	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 201	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 202	len1 = strlen(func1_name);
 203	len2 = strlen(func2_name);
 204
 205	/*
 206	 * Probe 1 will trace function 1.
 207	 * Probe 2 will trace function 2.
 208	 * Probe 3 will trace functions 1 and 2.
 209	 */
 210	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 211	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 212	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 213	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 214
 215	register_ftrace_function(&test_probe1);
 216	register_ftrace_function(&test_probe2);
 217	register_ftrace_function(&test_probe3);
 218	/* First time we are running with main function */
 219	if (cnt > 1) {
 220		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 221		register_ftrace_function(tr->ops);
 222	}
 223
 224	DYN_FTRACE_TEST_NAME();
 225
 226	print_counts();
 227
 228	if (trace_selftest_test_probe1_cnt != 1)
 229		goto out;
 230	if (trace_selftest_test_probe2_cnt != 0)
 231		goto out;
 232	if (trace_selftest_test_probe3_cnt != 1)
 233		goto out;
 234	if (cnt > 1) {
 235		if (trace_selftest_test_global_cnt == 0)
 236			goto out;
 237	}
 238
 239	DYN_FTRACE_TEST_NAME2();
 240
 241	print_counts();
 242
 243	if (trace_selftest_test_probe1_cnt != 1)
 244		goto out;
 245	if (trace_selftest_test_probe2_cnt != 1)
 246		goto out;
 247	if (trace_selftest_test_probe3_cnt != 2)
 248		goto out;
 249
 250	/* Add a dynamic probe */
 251	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 252	if (!dyn_ops) {
 253		printk("MEMORY ERROR ");
 254		goto out;
 255	}
 256
 257	dyn_ops->func = trace_selftest_test_dyn_func;
 258
 259	register_ftrace_function(dyn_ops);
 260
 261	trace_selftest_test_global_cnt = 0;
 262
 263	DYN_FTRACE_TEST_NAME();
 264
 265	print_counts();
 266
 267	if (trace_selftest_test_probe1_cnt != 2)
 268		goto out_free;
 269	if (trace_selftest_test_probe2_cnt != 1)
 270		goto out_free;
 271	if (trace_selftest_test_probe3_cnt != 3)
 272		goto out_free;
 273	if (cnt > 1) {
 274		if (trace_selftest_test_global_cnt == 0)
 275			goto out_free;
 276	}
 277	if (trace_selftest_test_dyn_cnt == 0)
 278		goto out_free;
 279
 280	DYN_FTRACE_TEST_NAME2();
 281
 282	print_counts();
 283
 284	if (trace_selftest_test_probe1_cnt != 2)
 285		goto out_free;
 286	if (trace_selftest_test_probe2_cnt != 2)
 287		goto out_free;
 288	if (trace_selftest_test_probe3_cnt != 4)
 289		goto out_free;
 290
 291	/* Remove trace function from probe 3 */
 292	func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
 293	len1 = strlen(func1_name);
 294
 295	ftrace_set_filter(&test_probe3, func1_name, len1, 0);
 296
 297	DYN_FTRACE_TEST_NAME();
 298
 299	print_counts();
 300
 301	if (trace_selftest_test_probe1_cnt != 3)
 302		goto out_free;
 303	if (trace_selftest_test_probe2_cnt != 2)
 304		goto out_free;
 305	if (trace_selftest_test_probe3_cnt != 4)
 306		goto out_free;
 307	if (cnt > 1) {
 308		if (trace_selftest_test_global_cnt == 0)
 309			goto out_free;
 310	}
 311	if (trace_selftest_test_dyn_cnt == 0)
 312		goto out_free;
 313
 314	DYN_FTRACE_TEST_NAME2();
 315
 316	print_counts();
 317
 318	if (trace_selftest_test_probe1_cnt != 3)
 319		goto out_free;
 320	if (trace_selftest_test_probe2_cnt != 3)
 321		goto out_free;
 322	if (trace_selftest_test_probe3_cnt != 5)
 323		goto out_free;
 324
 325	ret = 0;
 326 out_free:
 327	unregister_ftrace_function(dyn_ops);
 328	kfree(dyn_ops);
 329
 330 out:
 331	/* Purposely unregister in the same order */
 332	unregister_ftrace_function(&test_probe1);
 333	unregister_ftrace_function(&test_probe2);
 334	unregister_ftrace_function(&test_probe3);
 335	if (cnt > 1)
 336		unregister_ftrace_function(tr->ops);
 337	ftrace_reset_array_ops(tr);
 338
 339	/* Make sure everything is off */
 340	reset_counts();
 341	DYN_FTRACE_TEST_NAME();
 342	DYN_FTRACE_TEST_NAME();
 343
 344	if (trace_selftest_test_probe1_cnt ||
 345	    trace_selftest_test_probe2_cnt ||
 346	    trace_selftest_test_probe3_cnt ||
 347	    trace_selftest_test_global_cnt ||
 348	    trace_selftest_test_dyn_cnt)
 349		ret = -1;
 350
 351	ftrace_enabled = save_ftrace_enabled;
 352
 353	return ret;
 354}
 355
 356/* Test dynamic code modification and ftrace filters */
 357static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 358						  struct trace_array *tr,
 359						  int (*func)(void))
 360{
 361	int save_ftrace_enabled = ftrace_enabled;
 362	unsigned long count;
 363	char *func_name;
 364	int ret;
 365
 366	/* The ftrace test PASSED */
 367	printk(KERN_CONT "PASSED\n");
 368	pr_info("Testing dynamic ftrace: ");
 369
 370	/* enable tracing, and record the filter function */
 371	ftrace_enabled = 1;
 372
 373	/* passed in by parameter to fool gcc from optimizing */
 374	func();
 375
 376	/*
 377	 * Some archs *cough*PowerPC*cough* add characters to the
 378	 * start of the function names. We simply put a '*' to
 379	 * accommodate them.
 380	 */
 381	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 382
 383	/* filter only on our function */
 384	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 385
 386	/* enable tracing */
 387	ret = tracer_init(trace, tr);
 388	if (ret) {
 389		warn_failed_init_tracer(trace, ret);
 390		goto out;
 391	}
 392
 393	/* Sleep for a 1/10 of a second */
 394	msleep(100);
 395
 396	/* we should have nothing in the buffer */
 397	ret = trace_test_buffer(&tr->array_buffer, &count);
 398	if (ret)
 399		goto out;
 400
 401	if (count) {
 402		ret = -1;
 403		printk(KERN_CONT ".. filter did not filter .. ");
 404		goto out;
 405	}
 406
 407	/* call our function again */
 408	func();
 409
 410	/* sleep again */
 411	msleep(100);
 412
 413	/* stop the tracing. */
 414	tracing_stop();
 415	ftrace_enabled = 0;
 416
 417	/* check the trace buffer */
 418	ret = trace_test_buffer(&tr->array_buffer, &count);
 419
 420	ftrace_enabled = 1;
 421	tracing_start();
 422
 423	/* we should only have one item */
 424	if (!ret && count != 1) {
 425		trace->reset(tr);
 426		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 427		ret = -1;
 428		goto out;
 429	}
 430
 431	/* Test the ops with global tracing running */
 432	ret = trace_selftest_ops(tr, 1);
 433	trace->reset(tr);
 434
 435 out:
 436	ftrace_enabled = save_ftrace_enabled;
 437
 438	/* Enable tracing on all functions again */
 439	ftrace_set_global_filter(NULL, 0, 1);
 440
 441	/* Test the ops with global tracing off */
 442	if (!ret)
 443		ret = trace_selftest_ops(tr, 2);
 444
 445	return ret;
 446}
 447
 448static int trace_selftest_recursion_cnt;
 449static void trace_selftest_test_recursion_func(unsigned long ip,
 450					       unsigned long pip,
 451					       struct ftrace_ops *op,
 452					       struct ftrace_regs *fregs)
 453{
 454	/*
 455	 * This function is registered without the recursion safe flag.
 456	 * The ftrace infrastructure should provide the recursion
 457	 * protection. If not, this will crash the kernel!
 458	 */
 459	if (trace_selftest_recursion_cnt++ > 10)
 460		return;
 461	DYN_FTRACE_TEST_NAME();
 462}
 463
 464static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 465						    unsigned long pip,
 466						    struct ftrace_ops *op,
 467						    struct ftrace_regs *fregs)
 468{
 469	/*
 470	 * We said we would provide our own recursion. By calling
 471	 * this function again, we should recurse back into this function
 472	 * and count again. But this only happens if the arch supports
 473	 * all of ftrace features and nothing else is using the function
 474	 * tracing utility.
 475	 */
 476	if (trace_selftest_recursion_cnt++)
 477		return;
 478	DYN_FTRACE_TEST_NAME();
 479}
 480
 481static struct ftrace_ops test_rec_probe = {
 482	.func			= trace_selftest_test_recursion_func,
 483	.flags			= FTRACE_OPS_FL_RECURSION,
 484};
 485
 486static struct ftrace_ops test_recsafe_probe = {
 487	.func			= trace_selftest_test_recursion_safe_func,
 
 488};
 489
 490static int
 491trace_selftest_function_recursion(void)
 492{
 493	int save_ftrace_enabled = ftrace_enabled;
 494	char *func_name;
 495	int len;
 496	int ret;
 497
 498	/* The previous test PASSED */
 499	pr_cont("PASSED\n");
 500	pr_info("Testing ftrace recursion: ");
 501
 502
 503	/* enable tracing, and record the filter function */
 504	ftrace_enabled = 1;
 505
 506	/* Handle PPC64 '.' name */
 507	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 508	len = strlen(func_name);
 509
 510	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 511	if (ret) {
 512		pr_cont("*Could not set filter* ");
 513		goto out;
 514	}
 515
 516	ret = register_ftrace_function(&test_rec_probe);
 517	if (ret) {
 518		pr_cont("*could not register callback* ");
 519		goto out;
 520	}
 521
 522	DYN_FTRACE_TEST_NAME();
 523
 524	unregister_ftrace_function(&test_rec_probe);
 525
 526	ret = -1;
 527	/*
 528	 * Recursion allows for transitions between context,
 529	 * and may call the callback twice.
 530	 */
 531	if (trace_selftest_recursion_cnt != 1 &&
 532	    trace_selftest_recursion_cnt != 2) {
 533		pr_cont("*callback not called once (or twice) (%d)* ",
 534			trace_selftest_recursion_cnt);
 535		goto out;
 536	}
 537
 538	trace_selftest_recursion_cnt = 1;
 539
 540	pr_cont("PASSED\n");
 541	pr_info("Testing ftrace recursion safe: ");
 542
 543	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 544	if (ret) {
 545		pr_cont("*Could not set filter* ");
 546		goto out;
 547	}
 548
 549	ret = register_ftrace_function(&test_recsafe_probe);
 550	if (ret) {
 551		pr_cont("*could not register callback* ");
 552		goto out;
 553	}
 554
 555	DYN_FTRACE_TEST_NAME();
 556
 557	unregister_ftrace_function(&test_recsafe_probe);
 558
 559	ret = -1;
 560	if (trace_selftest_recursion_cnt != 2) {
 561		pr_cont("*callback not called expected 2 times (%d)* ",
 562			trace_selftest_recursion_cnt);
 563		goto out;
 564	}
 565
 566	ret = 0;
 567out:
 568	ftrace_enabled = save_ftrace_enabled;
 569
 570	return ret;
 571}
 572#else
 573# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 574# define trace_selftest_function_recursion() ({ 0; })
 575#endif /* CONFIG_DYNAMIC_FTRACE */
 576
 577static enum {
 578	TRACE_SELFTEST_REGS_START,
 579	TRACE_SELFTEST_REGS_FOUND,
 580	TRACE_SELFTEST_REGS_NOT_FOUND,
 581} trace_selftest_regs_stat;
 582
 583static void trace_selftest_test_regs_func(unsigned long ip,
 584					  unsigned long pip,
 585					  struct ftrace_ops *op,
 586					  struct ftrace_regs *fregs)
 587{
 588	struct pt_regs *regs = ftrace_get_regs(fregs);
 589
 590	if (regs)
 591		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 592	else
 593		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 594}
 595
 596static struct ftrace_ops test_regs_probe = {
 597	.func		= trace_selftest_test_regs_func,
 598	.flags		= FTRACE_OPS_FL_SAVE_REGS,
 599};
 600
 601static int
 602trace_selftest_function_regs(void)
 603{
 604	int save_ftrace_enabled = ftrace_enabled;
 605	char *func_name;
 606	int len;
 607	int ret;
 608	int supported = 0;
 609
 610#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 611	supported = 1;
 612#endif
 613
 614	/* The previous test PASSED */
 615	pr_cont("PASSED\n");
 616	pr_info("Testing ftrace regs%s: ",
 617		!supported ? "(no arch support)" : "");
 618
 619	/* enable tracing, and record the filter function */
 620	ftrace_enabled = 1;
 621
 622	/* Handle PPC64 '.' name */
 623	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 624	len = strlen(func_name);
 625
 626	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 627	/*
 628	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 629	 * This test really doesn't care.
 630	 */
 631	if (ret && ret != -ENODEV) {
 632		pr_cont("*Could not set filter* ");
 633		goto out;
 634	}
 635
 636	ret = register_ftrace_function(&test_regs_probe);
 637	/*
 638	 * Now if the arch does not support passing regs, then this should
 639	 * have failed.
 640	 */
 641	if (!supported) {
 642		if (!ret) {
 643			pr_cont("*registered save-regs without arch support* ");
 644			goto out;
 645		}
 646		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 647		ret = register_ftrace_function(&test_regs_probe);
 648	}
 649	if (ret) {
 650		pr_cont("*could not register callback* ");
 651		goto out;
 652	}
 653
 654
 655	DYN_FTRACE_TEST_NAME();
 656
 657	unregister_ftrace_function(&test_regs_probe);
 658
 659	ret = -1;
 660
 661	switch (trace_selftest_regs_stat) {
 662	case TRACE_SELFTEST_REGS_START:
 663		pr_cont("*callback never called* ");
 664		goto out;
 665
 666	case TRACE_SELFTEST_REGS_FOUND:
 667		if (supported)
 668			break;
 669		pr_cont("*callback received regs without arch support* ");
 670		goto out;
 671
 672	case TRACE_SELFTEST_REGS_NOT_FOUND:
 673		if (!supported)
 674			break;
 675		pr_cont("*callback received NULL regs* ");
 676		goto out;
 677	}
 678
 679	ret = 0;
 680out:
 681	ftrace_enabled = save_ftrace_enabled;
 682
 683	return ret;
 684}
 685
 686/*
 687 * Simple verification test of ftrace function tracer.
 688 * Enable ftrace, sleep 1/10 second, and then read the trace
 689 * buffer to see if all is in order.
 690 */
 691__init int
 692trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 693{
 694	int save_ftrace_enabled = ftrace_enabled;
 695	unsigned long count;
 696	int ret;
 697
 698#ifdef CONFIG_DYNAMIC_FTRACE
 699	if (ftrace_filter_param) {
 700		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 701		return 0;
 702	}
 703#endif
 704
 705	/* make sure msleep has been recorded */
 706	msleep(1);
 707
 708	/* start the tracing */
 709	ftrace_enabled = 1;
 710
 711	ret = tracer_init(trace, tr);
 712	if (ret) {
 713		warn_failed_init_tracer(trace, ret);
 714		goto out;
 715	}
 716
 717	/* Sleep for a 1/10 of a second */
 718	msleep(100);
 719	/* stop the tracing. */
 720	tracing_stop();
 721	ftrace_enabled = 0;
 722
 723	/* check the trace buffer */
 724	ret = trace_test_buffer(&tr->array_buffer, &count);
 725
 726	ftrace_enabled = 1;
 727	trace->reset(tr);
 728	tracing_start();
 729
 730	if (!ret && !count) {
 731		printk(KERN_CONT ".. no entries found ..");
 732		ret = -1;
 733		goto out;
 734	}
 735
 736	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 737						     DYN_FTRACE_TEST_NAME);
 738	if (ret)
 739		goto out;
 740
 741	ret = trace_selftest_function_recursion();
 742	if (ret)
 743		goto out;
 744
 745	ret = trace_selftest_function_regs();
 746 out:
 747	ftrace_enabled = save_ftrace_enabled;
 748
 749	/* kill ftrace totally if we failed */
 750	if (ret)
 751		ftrace_kill();
 752
 753	return ret;
 754}
 755#endif /* CONFIG_FUNCTION_TRACER */
 756
 757
 758#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 759
 760#ifdef CONFIG_DYNAMIC_FTRACE
 761
 762#define CHAR_NUMBER 123
 763#define SHORT_NUMBER 12345
 764#define WORD_NUMBER 1234567890
 765#define LONG_NUMBER 1234567890123456789LL
 766#define ERRSTR_BUFLEN 128
 767
 768struct fgraph_fixture {
 769	struct fgraph_ops gops;
 770	int store_size;
 771	const char *store_type_name;
 772	char error_str_buf[ERRSTR_BUFLEN];
 773	char *error_str;
 774};
 775
 776static __init int store_entry(struct ftrace_graph_ent *trace,
 777			      struct fgraph_ops *gops)
 778{
 779	struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
 780	const char *type = fixture->store_type_name;
 781	int size = fixture->store_size;
 782	void *p;
 783
 784	p = fgraph_reserve_data(gops->idx, size);
 785	if (!p) {
 786		snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
 787			 "Failed to reserve %s\n", type);
 788		return 0;
 789	}
 790
 791	switch (size) {
 792	case 1:
 793		*(char *)p = CHAR_NUMBER;
 794		break;
 795	case 2:
 796		*(short *)p = SHORT_NUMBER;
 797		break;
 798	case 4:
 799		*(int *)p = WORD_NUMBER;
 800		break;
 801	case 8:
 802		*(long long *)p = LONG_NUMBER;
 803		break;
 804	}
 805
 806	return 1;
 807}
 808
 809static __init void store_return(struct ftrace_graph_ret *trace,
 810				struct fgraph_ops *gops)
 811{
 812	struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
 813	const char *type = fixture->store_type_name;
 814	long long expect = 0;
 815	long long found = -1;
 816	int size;
 817	char *p;
 818
 819	p = fgraph_retrieve_data(gops->idx, &size);
 820	if (!p) {
 821		snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
 822			 "Failed to retrieve %s\n", type);
 823		return;
 824	}
 825	if (fixture->store_size > size) {
 826		snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
 827			 "Retrieved size %d is smaller than expected %d\n",
 828			 size, (int)fixture->store_size);
 829		return;
 830	}
 831
 832	switch (fixture->store_size) {
 833	case 1:
 834		expect = CHAR_NUMBER;
 835		found = *(char *)p;
 836		break;
 837	case 2:
 838		expect = SHORT_NUMBER;
 839		found = *(short *)p;
 840		break;
 841	case 4:
 842		expect = WORD_NUMBER;
 843		found = *(int *)p;
 844		break;
 845	case 8:
 846		expect = LONG_NUMBER;
 847		found = *(long long *)p;
 848		break;
 849	}
 850
 851	if (found != expect) {
 852		snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
 853			 "%s returned not %lld but %lld\n", type, expect, found);
 854		return;
 855	}
 856	fixture->error_str = NULL;
 857}
 858
 859static int __init init_fgraph_fixture(struct fgraph_fixture *fixture)
 860{
 861	char *func_name;
 862	int len;
 863
 864	snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
 865		 "Failed to execute storage %s\n", fixture->store_type_name);
 866	fixture->error_str = fixture->error_str_buf;
 867
 868	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 869	len = strlen(func_name);
 870
 871	return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1);
 872}
 873
 874/* Test fgraph storage for each size */
 875static int __init test_graph_storage_single(struct fgraph_fixture *fixture)
 876{
 877	int size = fixture->store_size;
 878	int ret;
 879
 880	pr_cont("PASSED\n");
 881	pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size));
 882
 883	ret = init_fgraph_fixture(fixture);
 884	if (ret && ret != -ENODEV) {
 885		pr_cont("*Could not set filter* ");
 886		return -1;
 887	}
 888
 889	ret = register_ftrace_graph(&fixture->gops);
 890	if (ret) {
 891		pr_warn("Failed to init store_bytes fgraph tracing\n");
 892		return -1;
 893	}
 894
 895	DYN_FTRACE_TEST_NAME();
 896
 897	unregister_ftrace_graph(&fixture->gops);
 898
 899	if (fixture->error_str) {
 900		pr_cont("*** %s ***", fixture->error_str);
 901		return -1;
 902	}
 903
 904	return 0;
 905}
 906
 907static struct fgraph_fixture store_bytes[4] __initdata = {
 908	[0] = {
 909		.gops = {
 910			.entryfunc		= store_entry,
 911			.retfunc		= store_return,
 912		},
 913		.store_size = 1,
 914		.store_type_name = "byte",
 915	},
 916	[1] = {
 917		.gops = {
 918			.entryfunc		= store_entry,
 919			.retfunc		= store_return,
 920		},
 921		.store_size = 2,
 922		.store_type_name = "short",
 923	},
 924	[2] = {
 925		.gops = {
 926			.entryfunc		= store_entry,
 927			.retfunc		= store_return,
 928		},
 929		.store_size = 4,
 930		.store_type_name = "word",
 931	},
 932	[3] = {
 933		.gops = {
 934			.entryfunc		= store_entry,
 935			.retfunc		= store_return,
 936		},
 937		.store_size = 8,
 938		.store_type_name = "long long",
 939	},
 940};
 941
 942static __init int test_graph_storage_multi(void)
 943{
 944	struct fgraph_fixture *fixture;
 945	bool printed = false;
 946	int i, j, ret;
 947
 948	pr_cont("PASSED\n");
 949	pr_info("Testing multiple fgraph storage on a function: ");
 950
 951	for (i = 0; i < ARRAY_SIZE(store_bytes); i++) {
 952		fixture = &store_bytes[i];
 953		ret = init_fgraph_fixture(fixture);
 954		if (ret && ret != -ENODEV) {
 955			pr_cont("*Could not set filter* ");
 956			printed = true;
 957			goto out2;
 958		}
 959	}
 960
 961	for (j = 0; j < ARRAY_SIZE(store_bytes); j++) {
 962		fixture = &store_bytes[j];
 963		ret = register_ftrace_graph(&fixture->gops);
 964		if (ret) {
 965			pr_warn("Failed to init store_bytes fgraph tracing\n");
 966			printed = true;
 967			goto out1;
 968		}
 969	}
 970
 971	DYN_FTRACE_TEST_NAME();
 972out1:
 973	while (--j >= 0) {
 974		fixture = &store_bytes[j];
 975		unregister_ftrace_graph(&fixture->gops);
 976
 977		if (fixture->error_str && !printed) {
 978			pr_cont("*** %s ***", fixture->error_str);
 979			printed = true;
 980		}
 981	}
 982out2:
 983	while (--i >= 0) {
 984		fixture = &store_bytes[i];
 985		ftrace_free_filter(&fixture->gops.ops);
 986
 987		if (fixture->error_str && !printed) {
 988			pr_cont("*** %s ***", fixture->error_str);
 989			printed = true;
 990		}
 991	}
 992	return printed ? -1 : 0;
 993}
 994
 995/* Test the storage passed across function_graph entry and return */
 996static __init int test_graph_storage(void)
 997{
 998	int ret;
 999
1000	ret = test_graph_storage_single(&store_bytes[0]);
1001	if (ret)
1002		return ret;
1003	ret = test_graph_storage_single(&store_bytes[1]);
1004	if (ret)
1005		return ret;
1006	ret = test_graph_storage_single(&store_bytes[2]);
1007	if (ret)
1008		return ret;
1009	ret = test_graph_storage_single(&store_bytes[3]);
1010	if (ret)
1011		return ret;
1012	ret = test_graph_storage_multi();
1013	if (ret)
1014		return ret;
1015	return 0;
1016}
1017#else
1018static inline int test_graph_storage(void) { return 0; }
1019#endif /* CONFIG_DYNAMIC_FTRACE */
1020
1021/* Maximum number of functions to trace before diagnosing a hang */
1022#define GRAPH_MAX_FUNC_TEST	100000000
1023
1024static unsigned int graph_hang_thresh;
1025
1026/* Wrap the real function entry probe to avoid possible hanging */
1027static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace,
1028				      struct fgraph_ops *gops)
1029{
1030	/* This is harmlessly racy, we want to approximately detect a hang */
1031	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
1032		ftrace_graph_stop();
1033		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
1034		if (ftrace_dump_on_oops_enabled()) {
1035			ftrace_dump(DUMP_ALL);
1036			/* ftrace_dump() disables tracing */
1037			tracing_on();
1038		}
1039		return 0;
1040	}
1041
1042	return trace_graph_entry(trace, gops);
1043}
1044
1045static struct fgraph_ops fgraph_ops __initdata  = {
1046	.entryfunc		= &trace_graph_entry_watchdog,
1047	.retfunc		= &trace_graph_return,
1048};
1049
1050#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1051static struct ftrace_ops direct;
1052#endif
1053
1054/*
1055 * Pretty much the same than for the function tracer from which the selftest
1056 * has been borrowed.
1057 */
1058__init int
1059trace_selftest_startup_function_graph(struct tracer *trace,
1060					struct trace_array *tr)
1061{
1062	int ret;
1063	unsigned long count;
1064	char *func_name __maybe_unused;
1065
1066#ifdef CONFIG_DYNAMIC_FTRACE
1067	if (ftrace_filter_param) {
1068		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
1069		return 0;
1070	}
1071#endif
1072
1073	/*
1074	 * Simulate the init() callback but we attach a watchdog callback
1075	 * to detect and recover from possible hangs
1076	 */
1077	tracing_reset_online_cpus(&tr->array_buffer);
1078	fgraph_ops.private = tr;
1079	ret = register_ftrace_graph(&fgraph_ops);
 
1080	if (ret) {
1081		warn_failed_init_tracer(trace, ret);
1082		goto out;
1083	}
1084	tracing_start_cmdline_record();
1085
1086	/* Sleep for a 1/10 of a second */
1087	msleep(100);
1088
1089	/* Have we just recovered from a hang? */
1090	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
1091		disable_tracing_selftest("recovering from a hang");
1092		ret = -1;
1093		goto out;
1094	}
1095
1096	tracing_stop();
1097
1098	/* check the trace buffer */
1099	ret = trace_test_buffer(&tr->array_buffer, &count);
1100
1101	/* Need to also simulate the tr->reset to remove this fgraph_ops */
1102	tracing_stop_cmdline_record();
1103	unregister_ftrace_graph(&fgraph_ops);
1104
 
1105	tracing_start();
1106
1107	if (!ret && !count) {
1108		printk(KERN_CONT ".. no entries found ..");
1109		ret = -1;
1110		goto out;
1111	}
1112
1113#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1114	/*
1115	 * These tests can take some time to run. Make sure on non PREEMPT
1116	 * kernels, we do not trigger the softlockup detector.
1117	 */
1118	cond_resched();
1119
1120	tracing_reset_online_cpus(&tr->array_buffer);
1121	fgraph_ops.private = tr;
1122
1123	/*
1124	 * Some archs *cough*PowerPC*cough* add characters to the
1125	 * start of the function names. We simply put a '*' to
1126	 * accommodate them.
1127	 */
1128	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
1129	ftrace_set_global_filter(func_name, strlen(func_name), 1);
1130
1131	/*
1132	 * Register direct function together with graph tracer
1133	 * and make sure we get graph trace.
1134	 */
1135	ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
1136	ret = register_ftrace_direct(&direct,
1137				     (unsigned long)ftrace_stub_direct_tramp);
1138	if (ret)
1139		goto out;
1140
1141	cond_resched();
1142
1143	ret = register_ftrace_graph(&fgraph_ops);
1144	if (ret) {
1145		warn_failed_init_tracer(trace, ret);
1146		goto out;
1147	}
1148
1149	DYN_FTRACE_TEST_NAME();
1150
1151	count = 0;
1152
1153	tracing_stop();
1154	/* check the trace buffer */
1155	ret = trace_test_buffer(&tr->array_buffer, &count);
1156
1157	unregister_ftrace_graph(&fgraph_ops);
1158
1159	ret = unregister_ftrace_direct(&direct,
1160				       (unsigned long)ftrace_stub_direct_tramp,
1161				       true);
1162	if (ret)
1163		goto out;
1164
1165	cond_resched();
1166
1167	tracing_start();
1168
1169	if (!ret && !count) {
1170		ret = -1;
1171		goto out;
1172	}
1173
1174	/* Enable tracing on all functions again */
1175	ftrace_set_global_filter(NULL, 0, 1);
1176#endif
1177
1178	ret = test_graph_storage();
1179
1180	/* Don't test dynamic tracing, the function tracer already did */
1181out:
1182	/* Stop it if we failed */
1183	if (ret)
1184		ftrace_graph_stop();
1185
1186	return ret;
1187}
1188#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1189
1190
1191#ifdef CONFIG_IRQSOFF_TRACER
1192int
1193trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
1194{
1195	unsigned long save_max = tr->max_latency;
1196	unsigned long count;
1197	int ret;
1198
1199	/* start the tracing */
1200	ret = tracer_init(trace, tr);
1201	if (ret) {
1202		warn_failed_init_tracer(trace, ret);
1203		return ret;
1204	}
1205
1206	/* reset the max latency */
1207	tr->max_latency = 0;
1208	/* disable interrupts for a bit */
1209	local_irq_disable();
1210	udelay(100);
1211	local_irq_enable();
1212
1213	/*
1214	 * Stop the tracer to avoid a warning subsequent
1215	 * to buffer flipping failure because tracing_stop()
1216	 * disables the tr and max buffers, making flipping impossible
1217	 * in case of parallels max irqs off latencies.
1218	 */
1219	trace->stop(tr);
1220	/* stop the tracing. */
1221	tracing_stop();
1222	/* check both trace buffers */
1223	ret = trace_test_buffer(&tr->array_buffer, NULL);
1224	if (!ret)
1225		ret = trace_test_buffer(&tr->max_buffer, &count);
1226	trace->reset(tr);
1227	tracing_start();
1228
1229	if (!ret && !count) {
1230		printk(KERN_CONT ".. no entries found ..");
1231		ret = -1;
1232	}
1233
1234	tr->max_latency = save_max;
1235
1236	return ret;
1237}
1238#endif /* CONFIG_IRQSOFF_TRACER */
1239
1240#ifdef CONFIG_PREEMPT_TRACER
1241int
1242trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
1243{
1244	unsigned long save_max = tr->max_latency;
1245	unsigned long count;
1246	int ret;
1247
1248	/*
1249	 * Now that the big kernel lock is no longer preemptible,
1250	 * and this is called with the BKL held, it will always
1251	 * fail. If preemption is already disabled, simply
1252	 * pass the test. When the BKL is removed, or becomes
1253	 * preemptible again, we will once again test this,
1254	 * so keep it in.
1255	 */
1256	if (preempt_count()) {
1257		printk(KERN_CONT "can not test ... force ");
1258		return 0;
1259	}
1260
1261	/* start the tracing */
1262	ret = tracer_init(trace, tr);
1263	if (ret) {
1264		warn_failed_init_tracer(trace, ret);
1265		return ret;
1266	}
1267
1268	/* reset the max latency */
1269	tr->max_latency = 0;
1270	/* disable preemption for a bit */
1271	preempt_disable();
1272	udelay(100);
1273	preempt_enable();
1274
1275	/*
1276	 * Stop the tracer to avoid a warning subsequent
1277	 * to buffer flipping failure because tracing_stop()
1278	 * disables the tr and max buffers, making flipping impossible
1279	 * in case of parallels max preempt off latencies.
1280	 */
1281	trace->stop(tr);
1282	/* stop the tracing. */
1283	tracing_stop();
1284	/* check both trace buffers */
1285	ret = trace_test_buffer(&tr->array_buffer, NULL);
1286	if (!ret)
1287		ret = trace_test_buffer(&tr->max_buffer, &count);
1288	trace->reset(tr);
1289	tracing_start();
1290
1291	if (!ret && !count) {
1292		printk(KERN_CONT ".. no entries found ..");
1293		ret = -1;
1294	}
1295
1296	tr->max_latency = save_max;
1297
1298	return ret;
1299}
1300#endif /* CONFIG_PREEMPT_TRACER */
1301
1302#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1303int
1304trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
1305{
1306	unsigned long save_max = tr->max_latency;
1307	unsigned long count;
1308	int ret;
1309
1310	/*
1311	 * Now that the big kernel lock is no longer preemptible,
1312	 * and this is called with the BKL held, it will always
1313	 * fail. If preemption is already disabled, simply
1314	 * pass the test. When the BKL is removed, or becomes
1315	 * preemptible again, we will once again test this,
1316	 * so keep it in.
1317	 */
1318	if (preempt_count()) {
1319		printk(KERN_CONT "can not test ... force ");
1320		return 0;
1321	}
1322
1323	/* start the tracing */
1324	ret = tracer_init(trace, tr);
1325	if (ret) {
1326		warn_failed_init_tracer(trace, ret);
1327		goto out_no_start;
1328	}
1329
1330	/* reset the max latency */
1331	tr->max_latency = 0;
1332
1333	/* disable preemption and interrupts for a bit */
1334	preempt_disable();
1335	local_irq_disable();
1336	udelay(100);
1337	preempt_enable();
1338	/* reverse the order of preempt vs irqs */
1339	local_irq_enable();
1340
1341	/*
1342	 * Stop the tracer to avoid a warning subsequent
1343	 * to buffer flipping failure because tracing_stop()
1344	 * disables the tr and max buffers, making flipping impossible
1345	 * in case of parallels max irqs/preempt off latencies.
1346	 */
1347	trace->stop(tr);
1348	/* stop the tracing. */
1349	tracing_stop();
1350	/* check both trace buffers */
1351	ret = trace_test_buffer(&tr->array_buffer, NULL);
1352	if (ret)
1353		goto out;
1354
1355	ret = trace_test_buffer(&tr->max_buffer, &count);
1356	if (ret)
1357		goto out;
1358
1359	if (!ret && !count) {
1360		printk(KERN_CONT ".. no entries found ..");
1361		ret = -1;
1362		goto out;
1363	}
1364
1365	/* do the test by disabling interrupts first this time */
1366	tr->max_latency = 0;
1367	tracing_start();
1368	trace->start(tr);
1369
1370	preempt_disable();
1371	local_irq_disable();
1372	udelay(100);
1373	preempt_enable();
1374	/* reverse the order of preempt vs irqs */
1375	local_irq_enable();
1376
1377	trace->stop(tr);
1378	/* stop the tracing. */
1379	tracing_stop();
1380	/* check both trace buffers */
1381	ret = trace_test_buffer(&tr->array_buffer, NULL);
1382	if (ret)
1383		goto out;
1384
1385	ret = trace_test_buffer(&tr->max_buffer, &count);
1386
1387	if (!ret && !count) {
1388		printk(KERN_CONT ".. no entries found ..");
1389		ret = -1;
1390		goto out;
1391	}
1392
1393out:
1394	tracing_start();
1395out_no_start:
1396	trace->reset(tr);
1397	tr->max_latency = save_max;
1398
1399	return ret;
1400}
1401#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1402
1403#ifdef CONFIG_NOP_TRACER
1404int
1405trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1406{
1407	/* What could possibly go wrong? */
1408	return 0;
1409}
1410#endif
1411
1412#ifdef CONFIG_SCHED_TRACER
1413
1414struct wakeup_test_data {
1415	struct completion	is_ready;
1416	int			go;
1417};
1418
1419static int trace_wakeup_test_thread(void *data)
1420{
1421	/* Make this a -deadline thread */
1422	static const struct sched_attr attr = {
1423		.sched_policy = SCHED_DEADLINE,
1424		.sched_runtime = 100000ULL,
1425		.sched_deadline = 10000000ULL,
1426		.sched_period = 10000000ULL
1427	};
1428	struct wakeup_test_data *x = data;
1429
1430	sched_setattr(current, &attr);
1431
1432	/* Make it know we have a new prio */
1433	complete(&x->is_ready);
1434
1435	/* now go to sleep and let the test wake us up */
1436	set_current_state(TASK_INTERRUPTIBLE);
1437	while (!x->go) {
1438		schedule();
1439		set_current_state(TASK_INTERRUPTIBLE);
1440	}
1441
1442	complete(&x->is_ready);
1443
1444	set_current_state(TASK_INTERRUPTIBLE);
1445
1446	/* we are awake, now wait to disappear */
1447	while (!kthread_should_stop()) {
1448		schedule();
1449		set_current_state(TASK_INTERRUPTIBLE);
1450	}
1451
1452	__set_current_state(TASK_RUNNING);
1453
1454	return 0;
1455}
1456int
1457trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1458{
1459	unsigned long save_max = tr->max_latency;
1460	struct task_struct *p;
1461	struct wakeup_test_data data;
1462	unsigned long count;
1463	int ret;
1464
1465	memset(&data, 0, sizeof(data));
1466
1467	init_completion(&data.is_ready);
1468
1469	/* create a -deadline thread */
1470	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1471	if (IS_ERR(p)) {
1472		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1473		return -1;
1474	}
1475
1476	/* make sure the thread is running at -deadline policy */
1477	wait_for_completion(&data.is_ready);
1478
1479	/* start the tracing */
1480	ret = tracer_init(trace, tr);
1481	if (ret) {
1482		warn_failed_init_tracer(trace, ret);
1483		return ret;
1484	}
1485
1486	/* reset the max latency */
1487	tr->max_latency = 0;
1488
1489	while (task_is_runnable(p)) {
1490		/*
1491		 * Sleep to make sure the -deadline thread is asleep too.
1492		 * On virtual machines we can't rely on timings,
1493		 * but we want to make sure this test still works.
1494		 */
1495		msleep(100);
1496	}
1497
1498	init_completion(&data.is_ready);
1499
1500	data.go = 1;
1501	/* memory barrier is in the wake_up_process() */
1502
1503	wake_up_process(p);
1504
1505	/* Wait for the task to wake up */
1506	wait_for_completion(&data.is_ready);
1507
1508	/* stop the tracing. */
1509	tracing_stop();
1510	/* check both trace buffers */
1511	ret = trace_test_buffer(&tr->array_buffer, NULL);
1512	if (!ret)
1513		ret = trace_test_buffer(&tr->max_buffer, &count);
1514
1515
1516	trace->reset(tr);
1517	tracing_start();
1518
1519	tr->max_latency = save_max;
1520
1521	/* kill the thread */
1522	kthread_stop(p);
1523
1524	if (!ret && !count) {
1525		printk(KERN_CONT ".. no entries found ..");
1526		ret = -1;
1527	}
1528
1529	return ret;
1530}
1531#endif /* CONFIG_SCHED_TRACER */
1532
1533#ifdef CONFIG_BRANCH_TRACER
1534int
1535trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1536{
1537	unsigned long count;
1538	int ret;
1539
1540	/* start the tracing */
1541	ret = tracer_init(trace, tr);
1542	if (ret) {
1543		warn_failed_init_tracer(trace, ret);
1544		return ret;
1545	}
1546
1547	/* Sleep for a 1/10 of a second */
1548	msleep(100);
1549	/* stop the tracing. */
1550	tracing_stop();
1551	/* check the trace buffer */
1552	ret = trace_test_buffer(&tr->array_buffer, &count);
1553	trace->reset(tr);
1554	tracing_start();
1555
1556	if (!ret && !count) {
1557		printk(KERN_CONT ".. no entries found ..");
1558		ret = -1;
1559	}
1560
1561	return ret;
1562}
1563#endif /* CONFIG_BRANCH_TRACER */
1564