Linux Audio

Check our new training course

Loading...
v3.15
 
   1/* Include in trace.c */
   2
 
   3#include <linux/stringify.h>
   4#include <linux/kthread.h>
   5#include <linux/delay.h>
   6#include <linux/slab.h>
   7
   8static inline int trace_valid_entry(struct trace_entry *entry)
   9{
  10	switch (entry->type) {
  11	case TRACE_FN:
  12	case TRACE_CTX:
  13	case TRACE_WAKE:
  14	case TRACE_STACK:
  15	case TRACE_PRINT:
  16	case TRACE_BRANCH:
  17	case TRACE_GRAPH_ENT:
  18	case TRACE_GRAPH_RET:
  19		return 1;
  20	}
  21	return 0;
  22}
  23
  24static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  25{
  26	struct ring_buffer_event *event;
  27	struct trace_entry *entry;
  28	unsigned int loops = 0;
  29
  30	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  31		entry = ring_buffer_event_data(event);
  32
  33		/*
  34		 * The ring buffer is a size of trace_buf_size, if
  35		 * we loop more than the size, there's something wrong
  36		 * with the ring buffer.
  37		 */
  38		if (loops++ > trace_buf_size) {
  39			printk(KERN_CONT ".. bad ring buffer ");
  40			goto failed;
  41		}
  42		if (!trace_valid_entry(entry)) {
  43			printk(KERN_CONT ".. invalid entry %d ",
  44				entry->type);
  45			goto failed;
  46		}
  47	}
  48	return 0;
  49
  50 failed:
  51	/* disable tracing */
  52	tracing_disabled = 1;
  53	printk(KERN_CONT ".. corrupted trace buffer .. ");
  54	return -1;
  55}
  56
  57/*
  58 * Test the trace buffer to see if all the elements
  59 * are still sane.
  60 */
  61static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
  62{
  63	unsigned long flags, cnt = 0;
  64	int cpu, ret = 0;
  65
  66	/* Don't allow flipping of max traces now */
  67	local_irq_save(flags);
  68	arch_spin_lock(&ftrace_max_lock);
  69
  70	cnt = ring_buffer_entries(buf->buffer);
  71
  72	/*
  73	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  74	 * If the calling tracer is broken, and is constantly filling
  75	 * the buffer, this will run forever, and hard lock the box.
  76	 * We disable the ring buffer while we do this test to prevent
  77	 * a hard lock up.
  78	 */
  79	tracing_off();
  80	for_each_possible_cpu(cpu) {
  81		ret = trace_test_buffer_cpu(buf, cpu);
  82		if (ret)
  83			break;
  84	}
  85	tracing_on();
  86	arch_spin_unlock(&ftrace_max_lock);
  87	local_irq_restore(flags);
  88
  89	if (count)
  90		*count = cnt;
  91
  92	return ret;
  93}
  94
  95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  96{
  97	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  98		trace->name, init_ret);
  99}
 100#ifdef CONFIG_FUNCTION_TRACER
 101
 102#ifdef CONFIG_DYNAMIC_FTRACE
 103
 104static int trace_selftest_test_probe1_cnt;
 105static void trace_selftest_test_probe1_func(unsigned long ip,
 106					    unsigned long pip,
 107					    struct ftrace_ops *op,
 108					    struct pt_regs *pt_regs)
 109{
 110	trace_selftest_test_probe1_cnt++;
 111}
 112
 113static int trace_selftest_test_probe2_cnt;
 114static void trace_selftest_test_probe2_func(unsigned long ip,
 115					    unsigned long pip,
 116					    struct ftrace_ops *op,
 117					    struct pt_regs *pt_regs)
 118{
 119	trace_selftest_test_probe2_cnt++;
 120}
 121
 122static int trace_selftest_test_probe3_cnt;
 123static void trace_selftest_test_probe3_func(unsigned long ip,
 124					    unsigned long pip,
 125					    struct ftrace_ops *op,
 126					    struct pt_regs *pt_regs)
 127{
 128	trace_selftest_test_probe3_cnt++;
 129}
 130
 131static int trace_selftest_test_global_cnt;
 132static void trace_selftest_test_global_func(unsigned long ip,
 133					    unsigned long pip,
 134					    struct ftrace_ops *op,
 135					    struct pt_regs *pt_regs)
 136{
 137	trace_selftest_test_global_cnt++;
 138}
 139
 140static int trace_selftest_test_dyn_cnt;
 141static void trace_selftest_test_dyn_func(unsigned long ip,
 142					 unsigned long pip,
 143					 struct ftrace_ops *op,
 144					 struct pt_regs *pt_regs)
 145{
 146	trace_selftest_test_dyn_cnt++;
 147}
 148
 149static struct ftrace_ops test_probe1 = {
 150	.func			= trace_selftest_test_probe1_func,
 151	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 152};
 153
 154static struct ftrace_ops test_probe2 = {
 155	.func			= trace_selftest_test_probe2_func,
 156	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160	.func			= trace_selftest_test_probe3_func,
 161	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 162};
 163
 164static struct ftrace_ops test_global = {
 165	.func		= trace_selftest_test_global_func,
 166	.flags		= FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 167};
 168
 169static void print_counts(void)
 170{
 171	printk("(%d %d %d %d %d) ",
 172	       trace_selftest_test_probe1_cnt,
 173	       trace_selftest_test_probe2_cnt,
 174	       trace_selftest_test_probe3_cnt,
 175	       trace_selftest_test_global_cnt,
 176	       trace_selftest_test_dyn_cnt);
 177}
 178
 179static void reset_counts(void)
 180{
 181	trace_selftest_test_probe1_cnt = 0;
 182	trace_selftest_test_probe2_cnt = 0;
 183	trace_selftest_test_probe3_cnt = 0;
 184	trace_selftest_test_global_cnt = 0;
 185	trace_selftest_test_dyn_cnt = 0;
 186}
 187
 188static int trace_selftest_ops(int cnt)
 189{
 190	int save_ftrace_enabled = ftrace_enabled;
 191	struct ftrace_ops *dyn_ops;
 192	char *func1_name;
 193	char *func2_name;
 194	int len1;
 195	int len2;
 196	int ret = -1;
 197
 198	printk(KERN_CONT "PASSED\n");
 199	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 200
 201	ftrace_enabled = 1;
 202	reset_counts();
 203
 204	/* Handle PPC64 '.' name */
 205	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 206	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 207	len1 = strlen(func1_name);
 208	len2 = strlen(func2_name);
 209
 210	/*
 211	 * Probe 1 will trace function 1.
 212	 * Probe 2 will trace function 2.
 213	 * Probe 3 will trace functions 1 and 2.
 214	 */
 215	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 216	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 217	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 218	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 219
 220	register_ftrace_function(&test_probe1);
 221	register_ftrace_function(&test_probe2);
 222	register_ftrace_function(&test_probe3);
 223	register_ftrace_function(&test_global);
 
 
 
 
 224
 225	DYN_FTRACE_TEST_NAME();
 226
 227	print_counts();
 228
 229	if (trace_selftest_test_probe1_cnt != 1)
 230		goto out;
 231	if (trace_selftest_test_probe2_cnt != 0)
 232		goto out;
 233	if (trace_selftest_test_probe3_cnt != 1)
 234		goto out;
 235	if (trace_selftest_test_global_cnt == 0)
 236		goto out;
 
 
 237
 238	DYN_FTRACE_TEST_NAME2();
 239
 240	print_counts();
 241
 242	if (trace_selftest_test_probe1_cnt != 1)
 243		goto out;
 244	if (trace_selftest_test_probe2_cnt != 1)
 245		goto out;
 246	if (trace_selftest_test_probe3_cnt != 2)
 247		goto out;
 248
 249	/* Add a dynamic probe */
 250	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251	if (!dyn_ops) {
 252		printk("MEMORY ERROR ");
 253		goto out;
 254	}
 255
 256	dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258	register_ftrace_function(dyn_ops);
 259
 260	trace_selftest_test_global_cnt = 0;
 261
 262	DYN_FTRACE_TEST_NAME();
 263
 264	print_counts();
 265
 266	if (trace_selftest_test_probe1_cnt != 2)
 267		goto out_free;
 268	if (trace_selftest_test_probe2_cnt != 1)
 269		goto out_free;
 270	if (trace_selftest_test_probe3_cnt != 3)
 271		goto out_free;
 272	if (trace_selftest_test_global_cnt == 0)
 273		goto out;
 
 
 274	if (trace_selftest_test_dyn_cnt == 0)
 275		goto out_free;
 276
 277	DYN_FTRACE_TEST_NAME2();
 278
 279	print_counts();
 280
 281	if (trace_selftest_test_probe1_cnt != 2)
 282		goto out_free;
 283	if (trace_selftest_test_probe2_cnt != 2)
 284		goto out_free;
 285	if (trace_selftest_test_probe3_cnt != 4)
 286		goto out_free;
 287
 288	ret = 0;
 289 out_free:
 290	unregister_ftrace_function(dyn_ops);
 291	kfree(dyn_ops);
 292
 293 out:
 294	/* Purposely unregister in the same order */
 295	unregister_ftrace_function(&test_probe1);
 296	unregister_ftrace_function(&test_probe2);
 297	unregister_ftrace_function(&test_probe3);
 298	unregister_ftrace_function(&test_global);
 
 
 299
 300	/* Make sure everything is off */
 301	reset_counts();
 302	DYN_FTRACE_TEST_NAME();
 303	DYN_FTRACE_TEST_NAME();
 304
 305	if (trace_selftest_test_probe1_cnt ||
 306	    trace_selftest_test_probe2_cnt ||
 307	    trace_selftest_test_probe3_cnt ||
 308	    trace_selftest_test_global_cnt ||
 309	    trace_selftest_test_dyn_cnt)
 310		ret = -1;
 311
 312	ftrace_enabled = save_ftrace_enabled;
 313
 314	return ret;
 315}
 316
 317/* Test dynamic code modification and ftrace filters */
 318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 319					   struct trace_array *tr,
 320					   int (*func)(void))
 321{
 322	int save_ftrace_enabled = ftrace_enabled;
 323	unsigned long count;
 324	char *func_name;
 325	int ret;
 326
 327	/* The ftrace test PASSED */
 328	printk(KERN_CONT "PASSED\n");
 329	pr_info("Testing dynamic ftrace: ");
 330
 331	/* enable tracing, and record the filter function */
 332	ftrace_enabled = 1;
 333
 334	/* passed in by parameter to fool gcc from optimizing */
 335	func();
 336
 337	/*
 338	 * Some archs *cough*PowerPC*cough* add characters to the
 339	 * start of the function names. We simply put a '*' to
 340	 * accommodate them.
 341	 */
 342	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 343
 344	/* filter only on our function */
 345	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 346
 347	/* enable tracing */
 348	ret = tracer_init(trace, tr);
 349	if (ret) {
 350		warn_failed_init_tracer(trace, ret);
 351		goto out;
 352	}
 353
 354	/* Sleep for a 1/10 of a second */
 355	msleep(100);
 356
 357	/* we should have nothing in the buffer */
 358	ret = trace_test_buffer(&tr->trace_buffer, &count);
 359	if (ret)
 360		goto out;
 361
 362	if (count) {
 363		ret = -1;
 364		printk(KERN_CONT ".. filter did not filter .. ");
 365		goto out;
 366	}
 367
 368	/* call our function again */
 369	func();
 370
 371	/* sleep again */
 372	msleep(100);
 373
 374	/* stop the tracing. */
 375	tracing_stop();
 376	ftrace_enabled = 0;
 377
 378	/* check the trace buffer */
 379	ret = trace_test_buffer(&tr->trace_buffer, &count);
 
 
 380	tracing_start();
 381
 382	/* we should only have one item */
 383	if (!ret && count != 1) {
 384		trace->reset(tr);
 385		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 386		ret = -1;
 387		goto out;
 388	}
 389
 390	/* Test the ops with global tracing running */
 391	ret = trace_selftest_ops(1);
 392	trace->reset(tr);
 393
 394 out:
 395	ftrace_enabled = save_ftrace_enabled;
 396
 397	/* Enable tracing on all functions again */
 398	ftrace_set_global_filter(NULL, 0, 1);
 399
 400	/* Test the ops with global tracing off */
 401	if (!ret)
 402		ret = trace_selftest_ops(2);
 403
 404	return ret;
 405}
 406
 407static int trace_selftest_recursion_cnt;
 408static void trace_selftest_test_recursion_func(unsigned long ip,
 409					       unsigned long pip,
 410					       struct ftrace_ops *op,
 411					       struct pt_regs *pt_regs)
 412{
 413	/*
 414	 * This function is registered without the recursion safe flag.
 415	 * The ftrace infrastructure should provide the recursion
 416	 * protection. If not, this will crash the kernel!
 417	 */
 418	if (trace_selftest_recursion_cnt++ > 10)
 419		return;
 420	DYN_FTRACE_TEST_NAME();
 421}
 422
 423static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 424						    unsigned long pip,
 425						    struct ftrace_ops *op,
 426						    struct pt_regs *pt_regs)
 427{
 428	/*
 429	 * We said we would provide our own recursion. By calling
 430	 * this function again, we should recurse back into this function
 431	 * and count again. But this only happens if the arch supports
 432	 * all of ftrace features and nothing else is using the function
 433	 * tracing utility.
 434	 */
 435	if (trace_selftest_recursion_cnt++)
 436		return;
 437	DYN_FTRACE_TEST_NAME();
 438}
 439
 440static struct ftrace_ops test_rec_probe = {
 441	.func			= trace_selftest_test_recursion_func,
 442};
 443
 444static struct ftrace_ops test_recsafe_probe = {
 445	.func			= trace_selftest_test_recursion_safe_func,
 446	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 447};
 448
 449static int
 450trace_selftest_function_recursion(void)
 451{
 452	int save_ftrace_enabled = ftrace_enabled;
 453	char *func_name;
 454	int len;
 455	int ret;
 456
 457	/* The previous test PASSED */
 458	pr_cont("PASSED\n");
 459	pr_info("Testing ftrace recursion: ");
 460
 461
 462	/* enable tracing, and record the filter function */
 463	ftrace_enabled = 1;
 464
 465	/* Handle PPC64 '.' name */
 466	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 467	len = strlen(func_name);
 468
 469	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 470	if (ret) {
 471		pr_cont("*Could not set filter* ");
 472		goto out;
 473	}
 474
 475	ret = register_ftrace_function(&test_rec_probe);
 476	if (ret) {
 477		pr_cont("*could not register callback* ");
 478		goto out;
 479	}
 480
 481	DYN_FTRACE_TEST_NAME();
 482
 483	unregister_ftrace_function(&test_rec_probe);
 484
 485	ret = -1;
 486	if (trace_selftest_recursion_cnt != 1) {
 487		pr_cont("*callback not called once (%d)* ",
 488			trace_selftest_recursion_cnt);
 489		goto out;
 490	}
 491
 492	trace_selftest_recursion_cnt = 1;
 493
 494	pr_cont("PASSED\n");
 495	pr_info("Testing ftrace recursion safe: ");
 496
 497	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 498	if (ret) {
 499		pr_cont("*Could not set filter* ");
 500		goto out;
 501	}
 502
 503	ret = register_ftrace_function(&test_recsafe_probe);
 504	if (ret) {
 505		pr_cont("*could not register callback* ");
 506		goto out;
 507	}
 508
 509	DYN_FTRACE_TEST_NAME();
 510
 511	unregister_ftrace_function(&test_recsafe_probe);
 512
 513	ret = -1;
 514	if (trace_selftest_recursion_cnt != 2) {
 515		pr_cont("*callback not called expected 2 times (%d)* ",
 516			trace_selftest_recursion_cnt);
 517		goto out;
 518	}
 519
 520	ret = 0;
 521out:
 522	ftrace_enabled = save_ftrace_enabled;
 523
 524	return ret;
 525}
 526#else
 527# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 528# define trace_selftest_function_recursion() ({ 0; })
 529#endif /* CONFIG_DYNAMIC_FTRACE */
 530
 531static enum {
 532	TRACE_SELFTEST_REGS_START,
 533	TRACE_SELFTEST_REGS_FOUND,
 534	TRACE_SELFTEST_REGS_NOT_FOUND,
 535} trace_selftest_regs_stat;
 536
 537static void trace_selftest_test_regs_func(unsigned long ip,
 538					  unsigned long pip,
 539					  struct ftrace_ops *op,
 540					  struct pt_regs *pt_regs)
 541{
 542	if (pt_regs)
 543		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 544	else
 545		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 546}
 547
 548static struct ftrace_ops test_regs_probe = {
 549	.func		= trace_selftest_test_regs_func,
 550	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
 551};
 552
 553static int
 554trace_selftest_function_regs(void)
 555{
 556	int save_ftrace_enabled = ftrace_enabled;
 557	char *func_name;
 558	int len;
 559	int ret;
 560	int supported = 0;
 561
 562#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 563	supported = 1;
 564#endif
 565
 566	/* The previous test PASSED */
 567	pr_cont("PASSED\n");
 568	pr_info("Testing ftrace regs%s: ",
 569		!supported ? "(no arch support)" : "");
 570
 571	/* enable tracing, and record the filter function */
 572	ftrace_enabled = 1;
 573
 574	/* Handle PPC64 '.' name */
 575	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 576	len = strlen(func_name);
 577
 578	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 579	/*
 580	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 581	 * This test really doesn't care.
 582	 */
 583	if (ret && ret != -ENODEV) {
 584		pr_cont("*Could not set filter* ");
 585		goto out;
 586	}
 587
 588	ret = register_ftrace_function(&test_regs_probe);
 589	/*
 590	 * Now if the arch does not support passing regs, then this should
 591	 * have failed.
 592	 */
 593	if (!supported) {
 594		if (!ret) {
 595			pr_cont("*registered save-regs without arch support* ");
 596			goto out;
 597		}
 598		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 599		ret = register_ftrace_function(&test_regs_probe);
 600	}
 601	if (ret) {
 602		pr_cont("*could not register callback* ");
 603		goto out;
 604	}
 605
 606
 607	DYN_FTRACE_TEST_NAME();
 608
 609	unregister_ftrace_function(&test_regs_probe);
 610
 611	ret = -1;
 612
 613	switch (trace_selftest_regs_stat) {
 614	case TRACE_SELFTEST_REGS_START:
 615		pr_cont("*callback never called* ");
 616		goto out;
 617
 618	case TRACE_SELFTEST_REGS_FOUND:
 619		if (supported)
 620			break;
 621		pr_cont("*callback received regs without arch support* ");
 622		goto out;
 623
 624	case TRACE_SELFTEST_REGS_NOT_FOUND:
 625		if (!supported)
 626			break;
 627		pr_cont("*callback received NULL regs* ");
 628		goto out;
 629	}
 630
 631	ret = 0;
 632out:
 633	ftrace_enabled = save_ftrace_enabled;
 634
 635	return ret;
 636}
 637
 638/*
 639 * Simple verification test of ftrace function tracer.
 640 * Enable ftrace, sleep 1/10 second, and then read the trace
 641 * buffer to see if all is in order.
 642 */
 643__init int
 644trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 645{
 646	int save_ftrace_enabled = ftrace_enabled;
 647	unsigned long count;
 648	int ret;
 649
 650#ifdef CONFIG_DYNAMIC_FTRACE
 651	if (ftrace_filter_param) {
 652		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 653		return 0;
 654	}
 655#endif
 656
 657	/* make sure msleep has been recorded */
 658	msleep(1);
 659
 660	/* start the tracing */
 661	ftrace_enabled = 1;
 662
 663	ret = tracer_init(trace, tr);
 664	if (ret) {
 665		warn_failed_init_tracer(trace, ret);
 666		goto out;
 667	}
 668
 669	/* Sleep for a 1/10 of a second */
 670	msleep(100);
 671	/* stop the tracing. */
 672	tracing_stop();
 673	ftrace_enabled = 0;
 674
 675	/* check the trace buffer */
 676	ret = trace_test_buffer(&tr->trace_buffer, &count);
 
 
 677	trace->reset(tr);
 678	tracing_start();
 679
 680	if (!ret && !count) {
 681		printk(KERN_CONT ".. no entries found ..");
 682		ret = -1;
 683		goto out;
 684	}
 685
 686	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 687						     DYN_FTRACE_TEST_NAME);
 688	if (ret)
 689		goto out;
 690
 691	ret = trace_selftest_function_recursion();
 692	if (ret)
 693		goto out;
 694
 695	ret = trace_selftest_function_regs();
 696 out:
 697	ftrace_enabled = save_ftrace_enabled;
 698
 699	/* kill ftrace totally if we failed */
 700	if (ret)
 701		ftrace_kill();
 702
 703	return ret;
 704}
 705#endif /* CONFIG_FUNCTION_TRACER */
 706
 707
 708#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 709
 710/* Maximum number of functions to trace before diagnosing a hang */
 711#define GRAPH_MAX_FUNC_TEST	100000000
 712
 713static unsigned int graph_hang_thresh;
 714
 715/* Wrap the real function entry probe to avoid possible hanging */
 716static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 717{
 718	/* This is harmlessly racy, we want to approximately detect a hang */
 719	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 720		ftrace_graph_stop();
 721		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 722		if (ftrace_dump_on_oops) {
 723			ftrace_dump(DUMP_ALL);
 724			/* ftrace_dump() disables tracing */
 725			tracing_on();
 726		}
 727		return 0;
 728	}
 729
 730	return trace_graph_entry(trace);
 731}
 732
 733/*
 734 * Pretty much the same than for the function tracer from which the selftest
 735 * has been borrowed.
 736 */
 737__init int
 738trace_selftest_startup_function_graph(struct tracer *trace,
 739					struct trace_array *tr)
 740{
 741	int ret;
 742	unsigned long count;
 743
 744#ifdef CONFIG_DYNAMIC_FTRACE
 745	if (ftrace_filter_param) {
 746		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 747		return 0;
 748	}
 749#endif
 750
 751	/*
 752	 * Simulate the init() callback but we attach a watchdog callback
 753	 * to detect and recover from possible hangs
 754	 */
 755	tracing_reset_online_cpus(&tr->trace_buffer);
 756	set_graph_array(tr);
 757	ret = register_ftrace_graph(&trace_graph_return,
 758				    &trace_graph_entry_watchdog);
 759	if (ret) {
 760		warn_failed_init_tracer(trace, ret);
 761		goto out;
 762	}
 763	tracing_start_cmdline_record();
 764
 765	/* Sleep for a 1/10 of a second */
 766	msleep(100);
 767
 768	/* Have we just recovered from a hang? */
 769	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 770		tracing_selftest_disabled = true;
 771		ret = -1;
 772		goto out;
 773	}
 774
 775	tracing_stop();
 776
 777	/* check the trace buffer */
 778	ret = trace_test_buffer(&tr->trace_buffer, &count);
 779
 780	trace->reset(tr);
 781	tracing_start();
 782
 783	if (!ret && !count) {
 784		printk(KERN_CONT ".. no entries found ..");
 785		ret = -1;
 786		goto out;
 787	}
 788
 789	/* Don't test dynamic tracing, the function tracer already did */
 790
 791out:
 792	/* Stop it if we failed */
 793	if (ret)
 794		ftrace_graph_stop();
 795
 796	return ret;
 797}
 798#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 799
 800
 801#ifdef CONFIG_IRQSOFF_TRACER
 802int
 803trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 804{
 805	unsigned long save_max = tracing_max_latency;
 806	unsigned long count;
 807	int ret;
 808
 809	/* start the tracing */
 810	ret = tracer_init(trace, tr);
 811	if (ret) {
 812		warn_failed_init_tracer(trace, ret);
 813		return ret;
 814	}
 815
 816	/* reset the max latency */
 817	tracing_max_latency = 0;
 818	/* disable interrupts for a bit */
 819	local_irq_disable();
 820	udelay(100);
 821	local_irq_enable();
 822
 823	/*
 824	 * Stop the tracer to avoid a warning subsequent
 825	 * to buffer flipping failure because tracing_stop()
 826	 * disables the tr and max buffers, making flipping impossible
 827	 * in case of parallels max irqs off latencies.
 828	 */
 829	trace->stop(tr);
 830	/* stop the tracing. */
 831	tracing_stop();
 832	/* check both trace buffers */
 833	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 834	if (!ret)
 835		ret = trace_test_buffer(&tr->max_buffer, &count);
 836	trace->reset(tr);
 837	tracing_start();
 838
 839	if (!ret && !count) {
 840		printk(KERN_CONT ".. no entries found ..");
 841		ret = -1;
 842	}
 843
 844	tracing_max_latency = save_max;
 845
 846	return ret;
 847}
 848#endif /* CONFIG_IRQSOFF_TRACER */
 849
 850#ifdef CONFIG_PREEMPT_TRACER
 851int
 852trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 853{
 854	unsigned long save_max = tracing_max_latency;
 855	unsigned long count;
 856	int ret;
 857
 858	/*
 859	 * Now that the big kernel lock is no longer preemptable,
 860	 * and this is called with the BKL held, it will always
 861	 * fail. If preemption is already disabled, simply
 862	 * pass the test. When the BKL is removed, or becomes
 863	 * preemptible again, we will once again test this,
 864	 * so keep it in.
 865	 */
 866	if (preempt_count()) {
 867		printk(KERN_CONT "can not test ... force ");
 868		return 0;
 869	}
 870
 871	/* start the tracing */
 872	ret = tracer_init(trace, tr);
 873	if (ret) {
 874		warn_failed_init_tracer(trace, ret);
 875		return ret;
 876	}
 877
 878	/* reset the max latency */
 879	tracing_max_latency = 0;
 880	/* disable preemption for a bit */
 881	preempt_disable();
 882	udelay(100);
 883	preempt_enable();
 884
 885	/*
 886	 * Stop the tracer to avoid a warning subsequent
 887	 * to buffer flipping failure because tracing_stop()
 888	 * disables the tr and max buffers, making flipping impossible
 889	 * in case of parallels max preempt off latencies.
 890	 */
 891	trace->stop(tr);
 892	/* stop the tracing. */
 893	tracing_stop();
 894	/* check both trace buffers */
 895	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 896	if (!ret)
 897		ret = trace_test_buffer(&tr->max_buffer, &count);
 898	trace->reset(tr);
 899	tracing_start();
 900
 901	if (!ret && !count) {
 902		printk(KERN_CONT ".. no entries found ..");
 903		ret = -1;
 904	}
 905
 906	tracing_max_latency = save_max;
 907
 908	return ret;
 909}
 910#endif /* CONFIG_PREEMPT_TRACER */
 911
 912#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 913int
 914trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 915{
 916	unsigned long save_max = tracing_max_latency;
 917	unsigned long count;
 918	int ret;
 919
 920	/*
 921	 * Now that the big kernel lock is no longer preemptable,
 922	 * and this is called with the BKL held, it will always
 923	 * fail. If preemption is already disabled, simply
 924	 * pass the test. When the BKL is removed, or becomes
 925	 * preemptible again, we will once again test this,
 926	 * so keep it in.
 927	 */
 928	if (preempt_count()) {
 929		printk(KERN_CONT "can not test ... force ");
 930		return 0;
 931	}
 932
 933	/* start the tracing */
 934	ret = tracer_init(trace, tr);
 935	if (ret) {
 936		warn_failed_init_tracer(trace, ret);
 937		goto out_no_start;
 938	}
 939
 940	/* reset the max latency */
 941	tracing_max_latency = 0;
 942
 943	/* disable preemption and interrupts for a bit */
 944	preempt_disable();
 945	local_irq_disable();
 946	udelay(100);
 947	preempt_enable();
 948	/* reverse the order of preempt vs irqs */
 949	local_irq_enable();
 950
 951	/*
 952	 * Stop the tracer to avoid a warning subsequent
 953	 * to buffer flipping failure because tracing_stop()
 954	 * disables the tr and max buffers, making flipping impossible
 955	 * in case of parallels max irqs/preempt off latencies.
 956	 */
 957	trace->stop(tr);
 958	/* stop the tracing. */
 959	tracing_stop();
 960	/* check both trace buffers */
 961	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 962	if (ret)
 963		goto out;
 964
 965	ret = trace_test_buffer(&tr->max_buffer, &count);
 966	if (ret)
 967		goto out;
 968
 969	if (!ret && !count) {
 970		printk(KERN_CONT ".. no entries found ..");
 971		ret = -1;
 972		goto out;
 973	}
 974
 975	/* do the test by disabling interrupts first this time */
 976	tracing_max_latency = 0;
 977	tracing_start();
 978	trace->start(tr);
 979
 980	preempt_disable();
 981	local_irq_disable();
 982	udelay(100);
 983	preempt_enable();
 984	/* reverse the order of preempt vs irqs */
 985	local_irq_enable();
 986
 987	trace->stop(tr);
 988	/* stop the tracing. */
 989	tracing_stop();
 990	/* check both trace buffers */
 991	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 992	if (ret)
 993		goto out;
 994
 995	ret = trace_test_buffer(&tr->max_buffer, &count);
 996
 997	if (!ret && !count) {
 998		printk(KERN_CONT ".. no entries found ..");
 999		ret = -1;
1000		goto out;
1001	}
1002
1003out:
1004	tracing_start();
1005out_no_start:
1006	trace->reset(tr);
1007	tracing_max_latency = save_max;
1008
1009	return ret;
1010}
1011#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1012
1013#ifdef CONFIG_NOP_TRACER
1014int
1015trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1016{
1017	/* What could possibly go wrong? */
1018	return 0;
1019}
1020#endif
1021
1022#ifdef CONFIG_SCHED_TRACER
 
 
 
 
 
 
1023static int trace_wakeup_test_thread(void *data)
1024{
1025	/* Make this a -deadline thread */
1026	static const struct sched_attr attr = {
1027		.sched_policy = SCHED_DEADLINE,
1028		.sched_runtime = 100000ULL,
1029		.sched_deadline = 10000000ULL,
1030		.sched_period = 10000000ULL
1031	};
1032	struct completion *x = data;
1033
1034	sched_setattr(current, &attr);
1035
1036	/* Make it know we have a new prio */
1037	complete(x);
1038
1039	/* now go to sleep and let the test wake us up */
1040	set_current_state(TASK_INTERRUPTIBLE);
1041	schedule();
 
 
 
1042
1043	complete(x);
 
 
1044
1045	/* we are awake, now wait to disappear */
1046	while (!kthread_should_stop()) {
1047		/*
1048		 * This will likely be the system top priority
1049		 * task, do short sleeps to let others run.
1050		 */
1051		msleep(100);
1052	}
1053
 
 
1054	return 0;
1055}
1056
1057int
1058trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1059{
1060	unsigned long save_max = tracing_max_latency;
1061	struct task_struct *p;
1062	struct completion is_ready;
1063	unsigned long count;
1064	int ret;
1065
1066	init_completion(&is_ready);
 
 
1067
1068	/* create a -deadline thread */
1069	p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");
1070	if (IS_ERR(p)) {
1071		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1072		return -1;
1073	}
1074
1075	/* make sure the thread is running at -deadline policy */
1076	wait_for_completion(&is_ready);
1077
1078	/* start the tracing */
1079	ret = tracer_init(trace, tr);
1080	if (ret) {
1081		warn_failed_init_tracer(trace, ret);
1082		return ret;
1083	}
1084
1085	/* reset the max latency */
1086	tracing_max_latency = 0;
1087
1088	while (p->on_rq) {
1089		/*
1090		 * Sleep to make sure the -deadline thread is asleep too.
1091		 * On virtual machines we can't rely on timings,
1092		 * but we want to make sure this test still works.
1093		 */
1094		msleep(100);
1095	}
1096
1097	init_completion(&is_ready);
 
 
 
1098
1099	wake_up_process(p);
1100
1101	/* Wait for the task to wake up */
1102	wait_for_completion(&is_ready);
1103
1104	/* stop the tracing. */
1105	tracing_stop();
1106	/* check both trace buffers */
1107	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1108	printk("ret = %d\n", ret);
1109	if (!ret)
1110		ret = trace_test_buffer(&tr->max_buffer, &count);
1111
1112
1113	trace->reset(tr);
1114	tracing_start();
1115
1116	tracing_max_latency = save_max;
1117
1118	/* kill the thread */
1119	kthread_stop(p);
1120
1121	if (!ret && !count) {
1122		printk(KERN_CONT ".. no entries found ..");
1123		ret = -1;
1124	}
1125
1126	return ret;
1127}
1128#endif /* CONFIG_SCHED_TRACER */
1129
1130#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1131int
1132trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1133{
1134	unsigned long count;
1135	int ret;
1136
1137	/* start the tracing */
1138	ret = tracer_init(trace, tr);
1139	if (ret) {
1140		warn_failed_init_tracer(trace, ret);
1141		return ret;
1142	}
1143
1144	/* Sleep for a 1/10 of a second */
1145	msleep(100);
1146	/* stop the tracing. */
1147	tracing_stop();
1148	/* check the trace buffer */
1149	ret = trace_test_buffer(&tr->trace_buffer, &count);
1150	trace->reset(tr);
1151	tracing_start();
1152
1153	if (!ret && !count) {
1154		printk(KERN_CONT ".. no entries found ..");
1155		ret = -1;
1156	}
1157
1158	return ret;
1159}
1160#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1161
1162#ifdef CONFIG_BRANCH_TRACER
1163int
1164trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1165{
1166	unsigned long count;
1167	int ret;
1168
1169	/* start the tracing */
1170	ret = tracer_init(trace, tr);
1171	if (ret) {
1172		warn_failed_init_tracer(trace, ret);
1173		return ret;
1174	}
1175
1176	/* Sleep for a 1/10 of a second */
1177	msleep(100);
1178	/* stop the tracing. */
1179	tracing_stop();
1180	/* check the trace buffer */
1181	ret = trace_test_buffer(&tr->trace_buffer, &count);
1182	trace->reset(tr);
1183	tracing_start();
1184
1185	if (!ret && !count) {
1186		printk(KERN_CONT ".. no entries found ..");
1187		ret = -1;
1188	}
1189
1190	return ret;
1191}
1192#endif /* CONFIG_BRANCH_TRACER */
1193
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Include in trace.c */
   3
   4#include <uapi/linux/sched/types.h>
   5#include <linux/stringify.h>
   6#include <linux/kthread.h>
   7#include <linux/delay.h>
   8#include <linux/slab.h>
   9
  10static inline int trace_valid_entry(struct trace_entry *entry)
  11{
  12	switch (entry->type) {
  13	case TRACE_FN:
  14	case TRACE_CTX:
  15	case TRACE_WAKE:
  16	case TRACE_STACK:
  17	case TRACE_PRINT:
  18	case TRACE_BRANCH:
  19	case TRACE_GRAPH_ENT:
  20	case TRACE_GRAPH_RET:
  21		return 1;
  22	}
  23	return 0;
  24}
  25
  26static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  27{
  28	struct ring_buffer_event *event;
  29	struct trace_entry *entry;
  30	unsigned int loops = 0;
  31
  32	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  33		entry = ring_buffer_event_data(event);
  34
  35		/*
  36		 * The ring buffer is a size of trace_buf_size, if
  37		 * we loop more than the size, there's something wrong
  38		 * with the ring buffer.
  39		 */
  40		if (loops++ > trace_buf_size) {
  41			printk(KERN_CONT ".. bad ring buffer ");
  42			goto failed;
  43		}
  44		if (!trace_valid_entry(entry)) {
  45			printk(KERN_CONT ".. invalid entry %d ",
  46				entry->type);
  47			goto failed;
  48		}
  49	}
  50	return 0;
  51
  52 failed:
  53	/* disable tracing */
  54	tracing_disabled = 1;
  55	printk(KERN_CONT ".. corrupted trace buffer .. ");
  56	return -1;
  57}
  58
  59/*
  60 * Test the trace buffer to see if all the elements
  61 * are still sane.
  62 */
  63static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
  64{
  65	unsigned long flags, cnt = 0;
  66	int cpu, ret = 0;
  67
  68	/* Don't allow flipping of max traces now */
  69	local_irq_save(flags);
  70	arch_spin_lock(&buf->tr->max_lock);
  71
  72	cnt = ring_buffer_entries(buf->buffer);
  73
  74	/*
  75	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  76	 * If the calling tracer is broken, and is constantly filling
  77	 * the buffer, this will run forever, and hard lock the box.
  78	 * We disable the ring buffer while we do this test to prevent
  79	 * a hard lock up.
  80	 */
  81	tracing_off();
  82	for_each_possible_cpu(cpu) {
  83		ret = trace_test_buffer_cpu(buf, cpu);
  84		if (ret)
  85			break;
  86	}
  87	tracing_on();
  88	arch_spin_unlock(&buf->tr->max_lock);
  89	local_irq_restore(flags);
  90
  91	if (count)
  92		*count = cnt;
  93
  94	return ret;
  95}
  96
  97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  98{
  99	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 100		trace->name, init_ret);
 101}
 102#ifdef CONFIG_FUNCTION_TRACER
 103
 104#ifdef CONFIG_DYNAMIC_FTRACE
 105
 106static int trace_selftest_test_probe1_cnt;
 107static void trace_selftest_test_probe1_func(unsigned long ip,
 108					    unsigned long pip,
 109					    struct ftrace_ops *op,
 110					    struct pt_regs *pt_regs)
 111{
 112	trace_selftest_test_probe1_cnt++;
 113}
 114
 115static int trace_selftest_test_probe2_cnt;
 116static void trace_selftest_test_probe2_func(unsigned long ip,
 117					    unsigned long pip,
 118					    struct ftrace_ops *op,
 119					    struct pt_regs *pt_regs)
 120{
 121	trace_selftest_test_probe2_cnt++;
 122}
 123
 124static int trace_selftest_test_probe3_cnt;
 125static void trace_selftest_test_probe3_func(unsigned long ip,
 126					    unsigned long pip,
 127					    struct ftrace_ops *op,
 128					    struct pt_regs *pt_regs)
 129{
 130	trace_selftest_test_probe3_cnt++;
 131}
 132
 133static int trace_selftest_test_global_cnt;
 134static void trace_selftest_test_global_func(unsigned long ip,
 135					    unsigned long pip,
 136					    struct ftrace_ops *op,
 137					    struct pt_regs *pt_regs)
 138{
 139	trace_selftest_test_global_cnt++;
 140}
 141
 142static int trace_selftest_test_dyn_cnt;
 143static void trace_selftest_test_dyn_func(unsigned long ip,
 144					 unsigned long pip,
 145					 struct ftrace_ops *op,
 146					 struct pt_regs *pt_regs)
 147{
 148	trace_selftest_test_dyn_cnt++;
 149}
 150
 151static struct ftrace_ops test_probe1 = {
 152	.func			= trace_selftest_test_probe1_func,
 153	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 154};
 155
 156static struct ftrace_ops test_probe2 = {
 157	.func			= trace_selftest_test_probe2_func,
 158	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 159};
 160
 161static struct ftrace_ops test_probe3 = {
 162	.func			= trace_selftest_test_probe3_func,
 163	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 164};
 165
 
 
 
 
 
 166static void print_counts(void)
 167{
 168	printk("(%d %d %d %d %d) ",
 169	       trace_selftest_test_probe1_cnt,
 170	       trace_selftest_test_probe2_cnt,
 171	       trace_selftest_test_probe3_cnt,
 172	       trace_selftest_test_global_cnt,
 173	       trace_selftest_test_dyn_cnt);
 174}
 175
 176static void reset_counts(void)
 177{
 178	trace_selftest_test_probe1_cnt = 0;
 179	trace_selftest_test_probe2_cnt = 0;
 180	trace_selftest_test_probe3_cnt = 0;
 181	trace_selftest_test_global_cnt = 0;
 182	trace_selftest_test_dyn_cnt = 0;
 183}
 184
 185static int trace_selftest_ops(struct trace_array *tr, int cnt)
 186{
 187	int save_ftrace_enabled = ftrace_enabled;
 188	struct ftrace_ops *dyn_ops;
 189	char *func1_name;
 190	char *func2_name;
 191	int len1;
 192	int len2;
 193	int ret = -1;
 194
 195	printk(KERN_CONT "PASSED\n");
 196	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 197
 198	ftrace_enabled = 1;
 199	reset_counts();
 200
 201	/* Handle PPC64 '.' name */
 202	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 203	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 204	len1 = strlen(func1_name);
 205	len2 = strlen(func2_name);
 206
 207	/*
 208	 * Probe 1 will trace function 1.
 209	 * Probe 2 will trace function 2.
 210	 * Probe 3 will trace functions 1 and 2.
 211	 */
 212	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 213	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 214	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 215	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 216
 217	register_ftrace_function(&test_probe1);
 218	register_ftrace_function(&test_probe2);
 219	register_ftrace_function(&test_probe3);
 220	/* First time we are running with main function */
 221	if (cnt > 1) {
 222		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 223		register_ftrace_function(tr->ops);
 224	}
 225
 226	DYN_FTRACE_TEST_NAME();
 227
 228	print_counts();
 229
 230	if (trace_selftest_test_probe1_cnt != 1)
 231		goto out;
 232	if (trace_selftest_test_probe2_cnt != 0)
 233		goto out;
 234	if (trace_selftest_test_probe3_cnt != 1)
 235		goto out;
 236	if (cnt > 1) {
 237		if (trace_selftest_test_global_cnt == 0)
 238			goto out;
 239	}
 240
 241	DYN_FTRACE_TEST_NAME2();
 242
 243	print_counts();
 244
 245	if (trace_selftest_test_probe1_cnt != 1)
 246		goto out;
 247	if (trace_selftest_test_probe2_cnt != 1)
 248		goto out;
 249	if (trace_selftest_test_probe3_cnt != 2)
 250		goto out;
 251
 252	/* Add a dynamic probe */
 253	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 254	if (!dyn_ops) {
 255		printk("MEMORY ERROR ");
 256		goto out;
 257	}
 258
 259	dyn_ops->func = trace_selftest_test_dyn_func;
 260
 261	register_ftrace_function(dyn_ops);
 262
 263	trace_selftest_test_global_cnt = 0;
 264
 265	DYN_FTRACE_TEST_NAME();
 266
 267	print_counts();
 268
 269	if (trace_selftest_test_probe1_cnt != 2)
 270		goto out_free;
 271	if (trace_selftest_test_probe2_cnt != 1)
 272		goto out_free;
 273	if (trace_selftest_test_probe3_cnt != 3)
 274		goto out_free;
 275	if (cnt > 1) {
 276		if (trace_selftest_test_global_cnt == 0)
 277			goto out_free;
 278	}
 279	if (trace_selftest_test_dyn_cnt == 0)
 280		goto out_free;
 281
 282	DYN_FTRACE_TEST_NAME2();
 283
 284	print_counts();
 285
 286	if (trace_selftest_test_probe1_cnt != 2)
 287		goto out_free;
 288	if (trace_selftest_test_probe2_cnt != 2)
 289		goto out_free;
 290	if (trace_selftest_test_probe3_cnt != 4)
 291		goto out_free;
 292
 293	ret = 0;
 294 out_free:
 295	unregister_ftrace_function(dyn_ops);
 296	kfree(dyn_ops);
 297
 298 out:
 299	/* Purposely unregister in the same order */
 300	unregister_ftrace_function(&test_probe1);
 301	unregister_ftrace_function(&test_probe2);
 302	unregister_ftrace_function(&test_probe3);
 303	if (cnt > 1)
 304		unregister_ftrace_function(tr->ops);
 305	ftrace_reset_array_ops(tr);
 306
 307	/* Make sure everything is off */
 308	reset_counts();
 309	DYN_FTRACE_TEST_NAME();
 310	DYN_FTRACE_TEST_NAME();
 311
 312	if (trace_selftest_test_probe1_cnt ||
 313	    trace_selftest_test_probe2_cnt ||
 314	    trace_selftest_test_probe3_cnt ||
 315	    trace_selftest_test_global_cnt ||
 316	    trace_selftest_test_dyn_cnt)
 317		ret = -1;
 318
 319	ftrace_enabled = save_ftrace_enabled;
 320
 321	return ret;
 322}
 323
 324/* Test dynamic code modification and ftrace filters */
 325static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 326						  struct trace_array *tr,
 327						  int (*func)(void))
 328{
 329	int save_ftrace_enabled = ftrace_enabled;
 330	unsigned long count;
 331	char *func_name;
 332	int ret;
 333
 334	/* The ftrace test PASSED */
 335	printk(KERN_CONT "PASSED\n");
 336	pr_info("Testing dynamic ftrace: ");
 337
 338	/* enable tracing, and record the filter function */
 339	ftrace_enabled = 1;
 340
 341	/* passed in by parameter to fool gcc from optimizing */
 342	func();
 343
 344	/*
 345	 * Some archs *cough*PowerPC*cough* add characters to the
 346	 * start of the function names. We simply put a '*' to
 347	 * accommodate them.
 348	 */
 349	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 350
 351	/* filter only on our function */
 352	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 353
 354	/* enable tracing */
 355	ret = tracer_init(trace, tr);
 356	if (ret) {
 357		warn_failed_init_tracer(trace, ret);
 358		goto out;
 359	}
 360
 361	/* Sleep for a 1/10 of a second */
 362	msleep(100);
 363
 364	/* we should have nothing in the buffer */
 365	ret = trace_test_buffer(&tr->trace_buffer, &count);
 366	if (ret)
 367		goto out;
 368
 369	if (count) {
 370		ret = -1;
 371		printk(KERN_CONT ".. filter did not filter .. ");
 372		goto out;
 373	}
 374
 375	/* call our function again */
 376	func();
 377
 378	/* sleep again */
 379	msleep(100);
 380
 381	/* stop the tracing. */
 382	tracing_stop();
 383	ftrace_enabled = 0;
 384
 385	/* check the trace buffer */
 386	ret = trace_test_buffer(&tr->trace_buffer, &count);
 387
 388	ftrace_enabled = 1;
 389	tracing_start();
 390
 391	/* we should only have one item */
 392	if (!ret && count != 1) {
 393		trace->reset(tr);
 394		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 395		ret = -1;
 396		goto out;
 397	}
 398
 399	/* Test the ops with global tracing running */
 400	ret = trace_selftest_ops(tr, 1);
 401	trace->reset(tr);
 402
 403 out:
 404	ftrace_enabled = save_ftrace_enabled;
 405
 406	/* Enable tracing on all functions again */
 407	ftrace_set_global_filter(NULL, 0, 1);
 408
 409	/* Test the ops with global tracing off */
 410	if (!ret)
 411		ret = trace_selftest_ops(tr, 2);
 412
 413	return ret;
 414}
 415
 416static int trace_selftest_recursion_cnt;
 417static void trace_selftest_test_recursion_func(unsigned long ip,
 418					       unsigned long pip,
 419					       struct ftrace_ops *op,
 420					       struct pt_regs *pt_regs)
 421{
 422	/*
 423	 * This function is registered without the recursion safe flag.
 424	 * The ftrace infrastructure should provide the recursion
 425	 * protection. If not, this will crash the kernel!
 426	 */
 427	if (trace_selftest_recursion_cnt++ > 10)
 428		return;
 429	DYN_FTRACE_TEST_NAME();
 430}
 431
 432static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 433						    unsigned long pip,
 434						    struct ftrace_ops *op,
 435						    struct pt_regs *pt_regs)
 436{
 437	/*
 438	 * We said we would provide our own recursion. By calling
 439	 * this function again, we should recurse back into this function
 440	 * and count again. But this only happens if the arch supports
 441	 * all of ftrace features and nothing else is using the function
 442	 * tracing utility.
 443	 */
 444	if (trace_selftest_recursion_cnt++)
 445		return;
 446	DYN_FTRACE_TEST_NAME();
 447}
 448
 449static struct ftrace_ops test_rec_probe = {
 450	.func			= trace_selftest_test_recursion_func,
 451};
 452
 453static struct ftrace_ops test_recsafe_probe = {
 454	.func			= trace_selftest_test_recursion_safe_func,
 455	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 456};
 457
 458static int
 459trace_selftest_function_recursion(void)
 460{
 461	int save_ftrace_enabled = ftrace_enabled;
 462	char *func_name;
 463	int len;
 464	int ret;
 465
 466	/* The previous test PASSED */
 467	pr_cont("PASSED\n");
 468	pr_info("Testing ftrace recursion: ");
 469
 470
 471	/* enable tracing, and record the filter function */
 472	ftrace_enabled = 1;
 473
 474	/* Handle PPC64 '.' name */
 475	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 476	len = strlen(func_name);
 477
 478	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 479	if (ret) {
 480		pr_cont("*Could not set filter* ");
 481		goto out;
 482	}
 483
 484	ret = register_ftrace_function(&test_rec_probe);
 485	if (ret) {
 486		pr_cont("*could not register callback* ");
 487		goto out;
 488	}
 489
 490	DYN_FTRACE_TEST_NAME();
 491
 492	unregister_ftrace_function(&test_rec_probe);
 493
 494	ret = -1;
 495	if (trace_selftest_recursion_cnt != 1) {
 496		pr_cont("*callback not called once (%d)* ",
 497			trace_selftest_recursion_cnt);
 498		goto out;
 499	}
 500
 501	trace_selftest_recursion_cnt = 1;
 502
 503	pr_cont("PASSED\n");
 504	pr_info("Testing ftrace recursion safe: ");
 505
 506	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 507	if (ret) {
 508		pr_cont("*Could not set filter* ");
 509		goto out;
 510	}
 511
 512	ret = register_ftrace_function(&test_recsafe_probe);
 513	if (ret) {
 514		pr_cont("*could not register callback* ");
 515		goto out;
 516	}
 517
 518	DYN_FTRACE_TEST_NAME();
 519
 520	unregister_ftrace_function(&test_recsafe_probe);
 521
 522	ret = -1;
 523	if (trace_selftest_recursion_cnt != 2) {
 524		pr_cont("*callback not called expected 2 times (%d)* ",
 525			trace_selftest_recursion_cnt);
 526		goto out;
 527	}
 528
 529	ret = 0;
 530out:
 531	ftrace_enabled = save_ftrace_enabled;
 532
 533	return ret;
 534}
 535#else
 536# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 537# define trace_selftest_function_recursion() ({ 0; })
 538#endif /* CONFIG_DYNAMIC_FTRACE */
 539
 540static enum {
 541	TRACE_SELFTEST_REGS_START,
 542	TRACE_SELFTEST_REGS_FOUND,
 543	TRACE_SELFTEST_REGS_NOT_FOUND,
 544} trace_selftest_regs_stat;
 545
 546static void trace_selftest_test_regs_func(unsigned long ip,
 547					  unsigned long pip,
 548					  struct ftrace_ops *op,
 549					  struct pt_regs *pt_regs)
 550{
 551	if (pt_regs)
 552		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 553	else
 554		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 555}
 556
 557static struct ftrace_ops test_regs_probe = {
 558	.func		= trace_selftest_test_regs_func,
 559	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
 560};
 561
 562static int
 563trace_selftest_function_regs(void)
 564{
 565	int save_ftrace_enabled = ftrace_enabled;
 566	char *func_name;
 567	int len;
 568	int ret;
 569	int supported = 0;
 570
 571#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 572	supported = 1;
 573#endif
 574
 575	/* The previous test PASSED */
 576	pr_cont("PASSED\n");
 577	pr_info("Testing ftrace regs%s: ",
 578		!supported ? "(no arch support)" : "");
 579
 580	/* enable tracing, and record the filter function */
 581	ftrace_enabled = 1;
 582
 583	/* Handle PPC64 '.' name */
 584	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 585	len = strlen(func_name);
 586
 587	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 588	/*
 589	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 590	 * This test really doesn't care.
 591	 */
 592	if (ret && ret != -ENODEV) {
 593		pr_cont("*Could not set filter* ");
 594		goto out;
 595	}
 596
 597	ret = register_ftrace_function(&test_regs_probe);
 598	/*
 599	 * Now if the arch does not support passing regs, then this should
 600	 * have failed.
 601	 */
 602	if (!supported) {
 603		if (!ret) {
 604			pr_cont("*registered save-regs without arch support* ");
 605			goto out;
 606		}
 607		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 608		ret = register_ftrace_function(&test_regs_probe);
 609	}
 610	if (ret) {
 611		pr_cont("*could not register callback* ");
 612		goto out;
 613	}
 614
 615
 616	DYN_FTRACE_TEST_NAME();
 617
 618	unregister_ftrace_function(&test_regs_probe);
 619
 620	ret = -1;
 621
 622	switch (trace_selftest_regs_stat) {
 623	case TRACE_SELFTEST_REGS_START:
 624		pr_cont("*callback never called* ");
 625		goto out;
 626
 627	case TRACE_SELFTEST_REGS_FOUND:
 628		if (supported)
 629			break;
 630		pr_cont("*callback received regs without arch support* ");
 631		goto out;
 632
 633	case TRACE_SELFTEST_REGS_NOT_FOUND:
 634		if (!supported)
 635			break;
 636		pr_cont("*callback received NULL regs* ");
 637		goto out;
 638	}
 639
 640	ret = 0;
 641out:
 642	ftrace_enabled = save_ftrace_enabled;
 643
 644	return ret;
 645}
 646
 647/*
 648 * Simple verification test of ftrace function tracer.
 649 * Enable ftrace, sleep 1/10 second, and then read the trace
 650 * buffer to see if all is in order.
 651 */
 652__init int
 653trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 654{
 655	int save_ftrace_enabled = ftrace_enabled;
 656	unsigned long count;
 657	int ret;
 658
 659#ifdef CONFIG_DYNAMIC_FTRACE
 660	if (ftrace_filter_param) {
 661		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 662		return 0;
 663	}
 664#endif
 665
 666	/* make sure msleep has been recorded */
 667	msleep(1);
 668
 669	/* start the tracing */
 670	ftrace_enabled = 1;
 671
 672	ret = tracer_init(trace, tr);
 673	if (ret) {
 674		warn_failed_init_tracer(trace, ret);
 675		goto out;
 676	}
 677
 678	/* Sleep for a 1/10 of a second */
 679	msleep(100);
 680	/* stop the tracing. */
 681	tracing_stop();
 682	ftrace_enabled = 0;
 683
 684	/* check the trace buffer */
 685	ret = trace_test_buffer(&tr->trace_buffer, &count);
 686
 687	ftrace_enabled = 1;
 688	trace->reset(tr);
 689	tracing_start();
 690
 691	if (!ret && !count) {
 692		printk(KERN_CONT ".. no entries found ..");
 693		ret = -1;
 694		goto out;
 695	}
 696
 697	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 698						     DYN_FTRACE_TEST_NAME);
 699	if (ret)
 700		goto out;
 701
 702	ret = trace_selftest_function_recursion();
 703	if (ret)
 704		goto out;
 705
 706	ret = trace_selftest_function_regs();
 707 out:
 708	ftrace_enabled = save_ftrace_enabled;
 709
 710	/* kill ftrace totally if we failed */
 711	if (ret)
 712		ftrace_kill();
 713
 714	return ret;
 715}
 716#endif /* CONFIG_FUNCTION_TRACER */
 717
 718
 719#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 720
 721/* Maximum number of functions to trace before diagnosing a hang */
 722#define GRAPH_MAX_FUNC_TEST	100000000
 723
 724static unsigned int graph_hang_thresh;
 725
 726/* Wrap the real function entry probe to avoid possible hanging */
 727static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 728{
 729	/* This is harmlessly racy, we want to approximately detect a hang */
 730	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 731		ftrace_graph_stop();
 732		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 733		if (ftrace_dump_on_oops) {
 734			ftrace_dump(DUMP_ALL);
 735			/* ftrace_dump() disables tracing */
 736			tracing_on();
 737		}
 738		return 0;
 739	}
 740
 741	return trace_graph_entry(trace);
 742}
 743
 744/*
 745 * Pretty much the same than for the function tracer from which the selftest
 746 * has been borrowed.
 747 */
 748__init int
 749trace_selftest_startup_function_graph(struct tracer *trace,
 750					struct trace_array *tr)
 751{
 752	int ret;
 753	unsigned long count;
 754
 755#ifdef CONFIG_DYNAMIC_FTRACE
 756	if (ftrace_filter_param) {
 757		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 758		return 0;
 759	}
 760#endif
 761
 762	/*
 763	 * Simulate the init() callback but we attach a watchdog callback
 764	 * to detect and recover from possible hangs
 765	 */
 766	tracing_reset_online_cpus(&tr->trace_buffer);
 767	set_graph_array(tr);
 768	ret = register_ftrace_graph(&trace_graph_return,
 769				    &trace_graph_entry_watchdog);
 770	if (ret) {
 771		warn_failed_init_tracer(trace, ret);
 772		goto out;
 773	}
 774	tracing_start_cmdline_record();
 775
 776	/* Sleep for a 1/10 of a second */
 777	msleep(100);
 778
 779	/* Have we just recovered from a hang? */
 780	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 781		tracing_selftest_disabled = true;
 782		ret = -1;
 783		goto out;
 784	}
 785
 786	tracing_stop();
 787
 788	/* check the trace buffer */
 789	ret = trace_test_buffer(&tr->trace_buffer, &count);
 790
 791	trace->reset(tr);
 792	tracing_start();
 793
 794	if (!ret && !count) {
 795		printk(KERN_CONT ".. no entries found ..");
 796		ret = -1;
 797		goto out;
 798	}
 799
 800	/* Don't test dynamic tracing, the function tracer already did */
 801
 802out:
 803	/* Stop it if we failed */
 804	if (ret)
 805		ftrace_graph_stop();
 806
 807	return ret;
 808}
 809#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 810
 811
 812#ifdef CONFIG_IRQSOFF_TRACER
 813int
 814trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 815{
 816	unsigned long save_max = tr->max_latency;
 817	unsigned long count;
 818	int ret;
 819
 820	/* start the tracing */
 821	ret = tracer_init(trace, tr);
 822	if (ret) {
 823		warn_failed_init_tracer(trace, ret);
 824		return ret;
 825	}
 826
 827	/* reset the max latency */
 828	tr->max_latency = 0;
 829	/* disable interrupts for a bit */
 830	local_irq_disable();
 831	udelay(100);
 832	local_irq_enable();
 833
 834	/*
 835	 * Stop the tracer to avoid a warning subsequent
 836	 * to buffer flipping failure because tracing_stop()
 837	 * disables the tr and max buffers, making flipping impossible
 838	 * in case of parallels max irqs off latencies.
 839	 */
 840	trace->stop(tr);
 841	/* stop the tracing. */
 842	tracing_stop();
 843	/* check both trace buffers */
 844	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 845	if (!ret)
 846		ret = trace_test_buffer(&tr->max_buffer, &count);
 847	trace->reset(tr);
 848	tracing_start();
 849
 850	if (!ret && !count) {
 851		printk(KERN_CONT ".. no entries found ..");
 852		ret = -1;
 853	}
 854
 855	tr->max_latency = save_max;
 856
 857	return ret;
 858}
 859#endif /* CONFIG_IRQSOFF_TRACER */
 860
 861#ifdef CONFIG_PREEMPT_TRACER
 862int
 863trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 864{
 865	unsigned long save_max = tr->max_latency;
 866	unsigned long count;
 867	int ret;
 868
 869	/*
 870	 * Now that the big kernel lock is no longer preemptable,
 871	 * and this is called with the BKL held, it will always
 872	 * fail. If preemption is already disabled, simply
 873	 * pass the test. When the BKL is removed, or becomes
 874	 * preemptible again, we will once again test this,
 875	 * so keep it in.
 876	 */
 877	if (preempt_count()) {
 878		printk(KERN_CONT "can not test ... force ");
 879		return 0;
 880	}
 881
 882	/* start the tracing */
 883	ret = tracer_init(trace, tr);
 884	if (ret) {
 885		warn_failed_init_tracer(trace, ret);
 886		return ret;
 887	}
 888
 889	/* reset the max latency */
 890	tr->max_latency = 0;
 891	/* disable preemption for a bit */
 892	preempt_disable();
 893	udelay(100);
 894	preempt_enable();
 895
 896	/*
 897	 * Stop the tracer to avoid a warning subsequent
 898	 * to buffer flipping failure because tracing_stop()
 899	 * disables the tr and max buffers, making flipping impossible
 900	 * in case of parallels max preempt off latencies.
 901	 */
 902	trace->stop(tr);
 903	/* stop the tracing. */
 904	tracing_stop();
 905	/* check both trace buffers */
 906	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 907	if (!ret)
 908		ret = trace_test_buffer(&tr->max_buffer, &count);
 909	trace->reset(tr);
 910	tracing_start();
 911
 912	if (!ret && !count) {
 913		printk(KERN_CONT ".. no entries found ..");
 914		ret = -1;
 915	}
 916
 917	tr->max_latency = save_max;
 918
 919	return ret;
 920}
 921#endif /* CONFIG_PREEMPT_TRACER */
 922
 923#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 924int
 925trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 926{
 927	unsigned long save_max = tr->max_latency;
 928	unsigned long count;
 929	int ret;
 930
 931	/*
 932	 * Now that the big kernel lock is no longer preemptable,
 933	 * and this is called with the BKL held, it will always
 934	 * fail. If preemption is already disabled, simply
 935	 * pass the test. When the BKL is removed, or becomes
 936	 * preemptible again, we will once again test this,
 937	 * so keep it in.
 938	 */
 939	if (preempt_count()) {
 940		printk(KERN_CONT "can not test ... force ");
 941		return 0;
 942	}
 943
 944	/* start the tracing */
 945	ret = tracer_init(trace, tr);
 946	if (ret) {
 947		warn_failed_init_tracer(trace, ret);
 948		goto out_no_start;
 949	}
 950
 951	/* reset the max latency */
 952	tr->max_latency = 0;
 953
 954	/* disable preemption and interrupts for a bit */
 955	preempt_disable();
 956	local_irq_disable();
 957	udelay(100);
 958	preempt_enable();
 959	/* reverse the order of preempt vs irqs */
 960	local_irq_enable();
 961
 962	/*
 963	 * Stop the tracer to avoid a warning subsequent
 964	 * to buffer flipping failure because tracing_stop()
 965	 * disables the tr and max buffers, making flipping impossible
 966	 * in case of parallels max irqs/preempt off latencies.
 967	 */
 968	trace->stop(tr);
 969	/* stop the tracing. */
 970	tracing_stop();
 971	/* check both trace buffers */
 972	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 973	if (ret)
 974		goto out;
 975
 976	ret = trace_test_buffer(&tr->max_buffer, &count);
 977	if (ret)
 978		goto out;
 979
 980	if (!ret && !count) {
 981		printk(KERN_CONT ".. no entries found ..");
 982		ret = -1;
 983		goto out;
 984	}
 985
 986	/* do the test by disabling interrupts first this time */
 987	tr->max_latency = 0;
 988	tracing_start();
 989	trace->start(tr);
 990
 991	preempt_disable();
 992	local_irq_disable();
 993	udelay(100);
 994	preempt_enable();
 995	/* reverse the order of preempt vs irqs */
 996	local_irq_enable();
 997
 998	trace->stop(tr);
 999	/* stop the tracing. */
1000	tracing_stop();
1001	/* check both trace buffers */
1002	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1003	if (ret)
1004		goto out;
1005
1006	ret = trace_test_buffer(&tr->max_buffer, &count);
1007
1008	if (!ret && !count) {
1009		printk(KERN_CONT ".. no entries found ..");
1010		ret = -1;
1011		goto out;
1012	}
1013
1014out:
1015	tracing_start();
1016out_no_start:
1017	trace->reset(tr);
1018	tr->max_latency = save_max;
1019
1020	return ret;
1021}
1022#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1023
1024#ifdef CONFIG_NOP_TRACER
1025int
1026trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1027{
1028	/* What could possibly go wrong? */
1029	return 0;
1030}
1031#endif
1032
1033#ifdef CONFIG_SCHED_TRACER
1034
1035struct wakeup_test_data {
1036	struct completion	is_ready;
1037	int			go;
1038};
1039
1040static int trace_wakeup_test_thread(void *data)
1041{
1042	/* Make this a -deadline thread */
1043	static const struct sched_attr attr = {
1044		.sched_policy = SCHED_DEADLINE,
1045		.sched_runtime = 100000ULL,
1046		.sched_deadline = 10000000ULL,
1047		.sched_period = 10000000ULL
1048	};
1049	struct wakeup_test_data *x = data;
1050
1051	sched_setattr(current, &attr);
1052
1053	/* Make it know we have a new prio */
1054	complete(&x->is_ready);
1055
1056	/* now go to sleep and let the test wake us up */
1057	set_current_state(TASK_INTERRUPTIBLE);
1058	while (!x->go) {
1059		schedule();
1060		set_current_state(TASK_INTERRUPTIBLE);
1061	}
1062
1063	complete(&x->is_ready);
1064
1065	set_current_state(TASK_INTERRUPTIBLE);
1066
1067	/* we are awake, now wait to disappear */
1068	while (!kthread_should_stop()) {
1069		schedule();
1070		set_current_state(TASK_INTERRUPTIBLE);
 
 
 
1071	}
1072
1073	__set_current_state(TASK_RUNNING);
1074
1075	return 0;
1076}
 
1077int
1078trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1079{
1080	unsigned long save_max = tr->max_latency;
1081	struct task_struct *p;
1082	struct wakeup_test_data data;
1083	unsigned long count;
1084	int ret;
1085
1086	memset(&data, 0, sizeof(data));
1087
1088	init_completion(&data.is_ready);
1089
1090	/* create a -deadline thread */
1091	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1092	if (IS_ERR(p)) {
1093		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1094		return -1;
1095	}
1096
1097	/* make sure the thread is running at -deadline policy */
1098	wait_for_completion(&data.is_ready);
1099
1100	/* start the tracing */
1101	ret = tracer_init(trace, tr);
1102	if (ret) {
1103		warn_failed_init_tracer(trace, ret);
1104		return ret;
1105	}
1106
1107	/* reset the max latency */
1108	tr->max_latency = 0;
1109
1110	while (p->on_rq) {
1111		/*
1112		 * Sleep to make sure the -deadline thread is asleep too.
1113		 * On virtual machines we can't rely on timings,
1114		 * but we want to make sure this test still works.
1115		 */
1116		msleep(100);
1117	}
1118
1119	init_completion(&data.is_ready);
1120
1121	data.go = 1;
1122	/* memory barrier is in the wake_up_process() */
1123
1124	wake_up_process(p);
1125
1126	/* Wait for the task to wake up */
1127	wait_for_completion(&data.is_ready);
1128
1129	/* stop the tracing. */
1130	tracing_stop();
1131	/* check both trace buffers */
1132	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 
1133	if (!ret)
1134		ret = trace_test_buffer(&tr->max_buffer, &count);
1135
1136
1137	trace->reset(tr);
1138	tracing_start();
1139
1140	tr->max_latency = save_max;
1141
1142	/* kill the thread */
1143	kthread_stop(p);
1144
1145	if (!ret && !count) {
1146		printk(KERN_CONT ".. no entries found ..");
1147		ret = -1;
1148	}
1149
1150	return ret;
1151}
1152#endif /* CONFIG_SCHED_TRACER */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153
1154#ifdef CONFIG_BRANCH_TRACER
1155int
1156trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1157{
1158	unsigned long count;
1159	int ret;
1160
1161	/* start the tracing */
1162	ret = tracer_init(trace, tr);
1163	if (ret) {
1164		warn_failed_init_tracer(trace, ret);
1165		return ret;
1166	}
1167
1168	/* Sleep for a 1/10 of a second */
1169	msleep(100);
1170	/* stop the tracing. */
1171	tracing_stop();
1172	/* check the trace buffer */
1173	ret = trace_test_buffer(&tr->trace_buffer, &count);
1174	trace->reset(tr);
1175	tracing_start();
1176
1177	if (!ret && !count) {
1178		printk(KERN_CONT ".. no entries found ..");
1179		ret = -1;
1180	}
1181
1182	return ret;
1183}
1184#endif /* CONFIG_BRANCH_TRACER */
1185