Linux Audio

Check our new training course

Loading...
v4.6
 
   1/* Include in trace.c */
   2
 
   3#include <linux/stringify.h>
   4#include <linux/kthread.h>
   5#include <linux/delay.h>
   6#include <linux/slab.h>
   7
   8static inline int trace_valid_entry(struct trace_entry *entry)
   9{
  10	switch (entry->type) {
  11	case TRACE_FN:
  12	case TRACE_CTX:
  13	case TRACE_WAKE:
  14	case TRACE_STACK:
  15	case TRACE_PRINT:
  16	case TRACE_BRANCH:
  17	case TRACE_GRAPH_ENT:
  18	case TRACE_GRAPH_RET:
  19		return 1;
  20	}
  21	return 0;
  22}
  23
  24static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  25{
  26	struct ring_buffer_event *event;
  27	struct trace_entry *entry;
  28	unsigned int loops = 0;
  29
  30	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  31		entry = ring_buffer_event_data(event);
  32
  33		/*
  34		 * The ring buffer is a size of trace_buf_size, if
  35		 * we loop more than the size, there's something wrong
  36		 * with the ring buffer.
  37		 */
  38		if (loops++ > trace_buf_size) {
  39			printk(KERN_CONT ".. bad ring buffer ");
  40			goto failed;
  41		}
  42		if (!trace_valid_entry(entry)) {
  43			printk(KERN_CONT ".. invalid entry %d ",
  44				entry->type);
  45			goto failed;
  46		}
  47	}
  48	return 0;
  49
  50 failed:
  51	/* disable tracing */
  52	tracing_disabled = 1;
  53	printk(KERN_CONT ".. corrupted trace buffer .. ");
  54	return -1;
  55}
  56
  57/*
  58 * Test the trace buffer to see if all the elements
  59 * are still sane.
  60 */
  61static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
  62{
  63	unsigned long flags, cnt = 0;
  64	int cpu, ret = 0;
  65
  66	/* Don't allow flipping of max traces now */
  67	local_irq_save(flags);
  68	arch_spin_lock(&buf->tr->max_lock);
  69
  70	cnt = ring_buffer_entries(buf->buffer);
  71
  72	/*
  73	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  74	 * If the calling tracer is broken, and is constantly filling
  75	 * the buffer, this will run forever, and hard lock the box.
  76	 * We disable the ring buffer while we do this test to prevent
  77	 * a hard lock up.
  78	 */
  79	tracing_off();
  80	for_each_possible_cpu(cpu) {
  81		ret = trace_test_buffer_cpu(buf, cpu);
  82		if (ret)
  83			break;
  84	}
  85	tracing_on();
  86	arch_spin_unlock(&buf->tr->max_lock);
  87	local_irq_restore(flags);
  88
  89	if (count)
  90		*count = cnt;
  91
  92	return ret;
  93}
  94
  95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  96{
  97	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  98		trace->name, init_ret);
  99}
 100#ifdef CONFIG_FUNCTION_TRACER
 101
 102#ifdef CONFIG_DYNAMIC_FTRACE
 103
 104static int trace_selftest_test_probe1_cnt;
 105static void trace_selftest_test_probe1_func(unsigned long ip,
 106					    unsigned long pip,
 107					    struct ftrace_ops *op,
 108					    struct pt_regs *pt_regs)
 109{
 110	trace_selftest_test_probe1_cnt++;
 111}
 112
 113static int trace_selftest_test_probe2_cnt;
 114static void trace_selftest_test_probe2_func(unsigned long ip,
 115					    unsigned long pip,
 116					    struct ftrace_ops *op,
 117					    struct pt_regs *pt_regs)
 118{
 119	trace_selftest_test_probe2_cnt++;
 120}
 121
 122static int trace_selftest_test_probe3_cnt;
 123static void trace_selftest_test_probe3_func(unsigned long ip,
 124					    unsigned long pip,
 125					    struct ftrace_ops *op,
 126					    struct pt_regs *pt_regs)
 127{
 128	trace_selftest_test_probe3_cnt++;
 129}
 130
 131static int trace_selftest_test_global_cnt;
 132static void trace_selftest_test_global_func(unsigned long ip,
 133					    unsigned long pip,
 134					    struct ftrace_ops *op,
 135					    struct pt_regs *pt_regs)
 136{
 137	trace_selftest_test_global_cnt++;
 138}
 139
 140static int trace_selftest_test_dyn_cnt;
 141static void trace_selftest_test_dyn_func(unsigned long ip,
 142					 unsigned long pip,
 143					 struct ftrace_ops *op,
 144					 struct pt_regs *pt_regs)
 145{
 146	trace_selftest_test_dyn_cnt++;
 147}
 148
 149static struct ftrace_ops test_probe1 = {
 150	.func			= trace_selftest_test_probe1_func,
 151	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 152};
 153
 154static struct ftrace_ops test_probe2 = {
 155	.func			= trace_selftest_test_probe2_func,
 156	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160	.func			= trace_selftest_test_probe3_func,
 161	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 162};
 163
 164static void print_counts(void)
 165{
 166	printk("(%d %d %d %d %d) ",
 167	       trace_selftest_test_probe1_cnt,
 168	       trace_selftest_test_probe2_cnt,
 169	       trace_selftest_test_probe3_cnt,
 170	       trace_selftest_test_global_cnt,
 171	       trace_selftest_test_dyn_cnt);
 172}
 173
 174static void reset_counts(void)
 175{
 176	trace_selftest_test_probe1_cnt = 0;
 177	trace_selftest_test_probe2_cnt = 0;
 178	trace_selftest_test_probe3_cnt = 0;
 179	trace_selftest_test_global_cnt = 0;
 180	trace_selftest_test_dyn_cnt = 0;
 181}
 182
 183static int trace_selftest_ops(struct trace_array *tr, int cnt)
 184{
 185	int save_ftrace_enabled = ftrace_enabled;
 186	struct ftrace_ops *dyn_ops;
 187	char *func1_name;
 188	char *func2_name;
 189	int len1;
 190	int len2;
 191	int ret = -1;
 192
 193	printk(KERN_CONT "PASSED\n");
 194	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 195
 196	ftrace_enabled = 1;
 197	reset_counts();
 198
 199	/* Handle PPC64 '.' name */
 200	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 201	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 202	len1 = strlen(func1_name);
 203	len2 = strlen(func2_name);
 204
 205	/*
 206	 * Probe 1 will trace function 1.
 207	 * Probe 2 will trace function 2.
 208	 * Probe 3 will trace functions 1 and 2.
 209	 */
 210	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 211	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 212	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 213	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 214
 215	register_ftrace_function(&test_probe1);
 216	register_ftrace_function(&test_probe2);
 217	register_ftrace_function(&test_probe3);
 218	/* First time we are running with main function */
 219	if (cnt > 1) {
 220		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 221		register_ftrace_function(tr->ops);
 222	}
 223
 224	DYN_FTRACE_TEST_NAME();
 225
 226	print_counts();
 227
 228	if (trace_selftest_test_probe1_cnt != 1)
 229		goto out;
 230	if (trace_selftest_test_probe2_cnt != 0)
 231		goto out;
 232	if (trace_selftest_test_probe3_cnt != 1)
 233		goto out;
 234	if (cnt > 1) {
 235		if (trace_selftest_test_global_cnt == 0)
 236			goto out;
 237	}
 238
 239	DYN_FTRACE_TEST_NAME2();
 240
 241	print_counts();
 242
 243	if (trace_selftest_test_probe1_cnt != 1)
 244		goto out;
 245	if (trace_selftest_test_probe2_cnt != 1)
 246		goto out;
 247	if (trace_selftest_test_probe3_cnt != 2)
 248		goto out;
 249
 250	/* Add a dynamic probe */
 251	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 252	if (!dyn_ops) {
 253		printk("MEMORY ERROR ");
 254		goto out;
 255	}
 256
 257	dyn_ops->func = trace_selftest_test_dyn_func;
 258
 259	register_ftrace_function(dyn_ops);
 260
 261	trace_selftest_test_global_cnt = 0;
 262
 263	DYN_FTRACE_TEST_NAME();
 264
 265	print_counts();
 266
 267	if (trace_selftest_test_probe1_cnt != 2)
 268		goto out_free;
 269	if (trace_selftest_test_probe2_cnt != 1)
 270		goto out_free;
 271	if (trace_selftest_test_probe3_cnt != 3)
 272		goto out_free;
 273	if (cnt > 1) {
 274		if (trace_selftest_test_global_cnt == 0)
 275			goto out;
 276	}
 277	if (trace_selftest_test_dyn_cnt == 0)
 278		goto out_free;
 279
 280	DYN_FTRACE_TEST_NAME2();
 281
 282	print_counts();
 283
 284	if (trace_selftest_test_probe1_cnt != 2)
 285		goto out_free;
 286	if (trace_selftest_test_probe2_cnt != 2)
 287		goto out_free;
 288	if (trace_selftest_test_probe3_cnt != 4)
 289		goto out_free;
 290
 291	ret = 0;
 292 out_free:
 293	unregister_ftrace_function(dyn_ops);
 294	kfree(dyn_ops);
 295
 296 out:
 297	/* Purposely unregister in the same order */
 298	unregister_ftrace_function(&test_probe1);
 299	unregister_ftrace_function(&test_probe2);
 300	unregister_ftrace_function(&test_probe3);
 301	if (cnt > 1)
 302		unregister_ftrace_function(tr->ops);
 303	ftrace_reset_array_ops(tr);
 304
 305	/* Make sure everything is off */
 306	reset_counts();
 307	DYN_FTRACE_TEST_NAME();
 308	DYN_FTRACE_TEST_NAME();
 309
 310	if (trace_selftest_test_probe1_cnt ||
 311	    trace_selftest_test_probe2_cnt ||
 312	    trace_selftest_test_probe3_cnt ||
 313	    trace_selftest_test_global_cnt ||
 314	    trace_selftest_test_dyn_cnt)
 315		ret = -1;
 316
 317	ftrace_enabled = save_ftrace_enabled;
 318
 319	return ret;
 320}
 321
 322/* Test dynamic code modification and ftrace filters */
 323static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 324						  struct trace_array *tr,
 325						  int (*func)(void))
 326{
 327	int save_ftrace_enabled = ftrace_enabled;
 328	unsigned long count;
 329	char *func_name;
 330	int ret;
 331
 332	/* The ftrace test PASSED */
 333	printk(KERN_CONT "PASSED\n");
 334	pr_info("Testing dynamic ftrace: ");
 335
 336	/* enable tracing, and record the filter function */
 337	ftrace_enabled = 1;
 338
 339	/* passed in by parameter to fool gcc from optimizing */
 340	func();
 341
 342	/*
 343	 * Some archs *cough*PowerPC*cough* add characters to the
 344	 * start of the function names. We simply put a '*' to
 345	 * accommodate them.
 346	 */
 347	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 348
 349	/* filter only on our function */
 350	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 351
 352	/* enable tracing */
 353	ret = tracer_init(trace, tr);
 354	if (ret) {
 355		warn_failed_init_tracer(trace, ret);
 356		goto out;
 357	}
 358
 359	/* Sleep for a 1/10 of a second */
 360	msleep(100);
 361
 362	/* we should have nothing in the buffer */
 363	ret = trace_test_buffer(&tr->trace_buffer, &count);
 364	if (ret)
 365		goto out;
 366
 367	if (count) {
 368		ret = -1;
 369		printk(KERN_CONT ".. filter did not filter .. ");
 370		goto out;
 371	}
 372
 373	/* call our function again */
 374	func();
 375
 376	/* sleep again */
 377	msleep(100);
 378
 379	/* stop the tracing. */
 380	tracing_stop();
 381	ftrace_enabled = 0;
 382
 383	/* check the trace buffer */
 384	ret = trace_test_buffer(&tr->trace_buffer, &count);
 385
 386	ftrace_enabled = 1;
 387	tracing_start();
 388
 389	/* we should only have one item */
 390	if (!ret && count != 1) {
 391		trace->reset(tr);
 392		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 393		ret = -1;
 394		goto out;
 395	}
 396
 397	/* Test the ops with global tracing running */
 398	ret = trace_selftest_ops(tr, 1);
 399	trace->reset(tr);
 400
 401 out:
 402	ftrace_enabled = save_ftrace_enabled;
 403
 404	/* Enable tracing on all functions again */
 405	ftrace_set_global_filter(NULL, 0, 1);
 406
 407	/* Test the ops with global tracing off */
 408	if (!ret)
 409		ret = trace_selftest_ops(tr, 2);
 410
 411	return ret;
 412}
 413
 414static int trace_selftest_recursion_cnt;
 415static void trace_selftest_test_recursion_func(unsigned long ip,
 416					       unsigned long pip,
 417					       struct ftrace_ops *op,
 418					       struct pt_regs *pt_regs)
 419{
 420	/*
 421	 * This function is registered without the recursion safe flag.
 422	 * The ftrace infrastructure should provide the recursion
 423	 * protection. If not, this will crash the kernel!
 424	 */
 425	if (trace_selftest_recursion_cnt++ > 10)
 426		return;
 427	DYN_FTRACE_TEST_NAME();
 428}
 429
 430static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 431						    unsigned long pip,
 432						    struct ftrace_ops *op,
 433						    struct pt_regs *pt_regs)
 434{
 435	/*
 436	 * We said we would provide our own recursion. By calling
 437	 * this function again, we should recurse back into this function
 438	 * and count again. But this only happens if the arch supports
 439	 * all of ftrace features and nothing else is using the function
 440	 * tracing utility.
 441	 */
 442	if (trace_selftest_recursion_cnt++)
 443		return;
 444	DYN_FTRACE_TEST_NAME();
 445}
 446
 447static struct ftrace_ops test_rec_probe = {
 448	.func			= trace_selftest_test_recursion_func,
 
 449};
 450
 451static struct ftrace_ops test_recsafe_probe = {
 452	.func			= trace_selftest_test_recursion_safe_func,
 453	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 454};
 455
 456static int
 457trace_selftest_function_recursion(void)
 458{
 459	int save_ftrace_enabled = ftrace_enabled;
 460	char *func_name;
 461	int len;
 462	int ret;
 463
 464	/* The previous test PASSED */
 465	pr_cont("PASSED\n");
 466	pr_info("Testing ftrace recursion: ");
 467
 468
 469	/* enable tracing, and record the filter function */
 470	ftrace_enabled = 1;
 471
 472	/* Handle PPC64 '.' name */
 473	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 474	len = strlen(func_name);
 475
 476	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 477	if (ret) {
 478		pr_cont("*Could not set filter* ");
 479		goto out;
 480	}
 481
 482	ret = register_ftrace_function(&test_rec_probe);
 483	if (ret) {
 484		pr_cont("*could not register callback* ");
 485		goto out;
 486	}
 487
 488	DYN_FTRACE_TEST_NAME();
 489
 490	unregister_ftrace_function(&test_rec_probe);
 491
 492	ret = -1;
 493	if (trace_selftest_recursion_cnt != 1) {
 494		pr_cont("*callback not called once (%d)* ",
 
 
 
 
 
 495			trace_selftest_recursion_cnt);
 496		goto out;
 497	}
 498
 499	trace_selftest_recursion_cnt = 1;
 500
 501	pr_cont("PASSED\n");
 502	pr_info("Testing ftrace recursion safe: ");
 503
 504	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 505	if (ret) {
 506		pr_cont("*Could not set filter* ");
 507		goto out;
 508	}
 509
 510	ret = register_ftrace_function(&test_recsafe_probe);
 511	if (ret) {
 512		pr_cont("*could not register callback* ");
 513		goto out;
 514	}
 515
 516	DYN_FTRACE_TEST_NAME();
 517
 518	unregister_ftrace_function(&test_recsafe_probe);
 519
 520	ret = -1;
 521	if (trace_selftest_recursion_cnt != 2) {
 522		pr_cont("*callback not called expected 2 times (%d)* ",
 523			trace_selftest_recursion_cnt);
 524		goto out;
 525	}
 526
 527	ret = 0;
 528out:
 529	ftrace_enabled = save_ftrace_enabled;
 530
 531	return ret;
 532}
 533#else
 534# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 535# define trace_selftest_function_recursion() ({ 0; })
 536#endif /* CONFIG_DYNAMIC_FTRACE */
 537
 538static enum {
 539	TRACE_SELFTEST_REGS_START,
 540	TRACE_SELFTEST_REGS_FOUND,
 541	TRACE_SELFTEST_REGS_NOT_FOUND,
 542} trace_selftest_regs_stat;
 543
 544static void trace_selftest_test_regs_func(unsigned long ip,
 545					  unsigned long pip,
 546					  struct ftrace_ops *op,
 547					  struct pt_regs *pt_regs)
 548{
 549	if (pt_regs)
 
 
 550		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 551	else
 552		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 553}
 554
 555static struct ftrace_ops test_regs_probe = {
 556	.func		= trace_selftest_test_regs_func,
 557	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
 558};
 559
 560static int
 561trace_selftest_function_regs(void)
 562{
 563	int save_ftrace_enabled = ftrace_enabled;
 564	char *func_name;
 565	int len;
 566	int ret;
 567	int supported = 0;
 568
 569#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 570	supported = 1;
 571#endif
 572
 573	/* The previous test PASSED */
 574	pr_cont("PASSED\n");
 575	pr_info("Testing ftrace regs%s: ",
 576		!supported ? "(no arch support)" : "");
 577
 578	/* enable tracing, and record the filter function */
 579	ftrace_enabled = 1;
 580
 581	/* Handle PPC64 '.' name */
 582	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 583	len = strlen(func_name);
 584
 585	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 586	/*
 587	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 588	 * This test really doesn't care.
 589	 */
 590	if (ret && ret != -ENODEV) {
 591		pr_cont("*Could not set filter* ");
 592		goto out;
 593	}
 594
 595	ret = register_ftrace_function(&test_regs_probe);
 596	/*
 597	 * Now if the arch does not support passing regs, then this should
 598	 * have failed.
 599	 */
 600	if (!supported) {
 601		if (!ret) {
 602			pr_cont("*registered save-regs without arch support* ");
 603			goto out;
 604		}
 605		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 606		ret = register_ftrace_function(&test_regs_probe);
 607	}
 608	if (ret) {
 609		pr_cont("*could not register callback* ");
 610		goto out;
 611	}
 612
 613
 614	DYN_FTRACE_TEST_NAME();
 615
 616	unregister_ftrace_function(&test_regs_probe);
 617
 618	ret = -1;
 619
 620	switch (trace_selftest_regs_stat) {
 621	case TRACE_SELFTEST_REGS_START:
 622		pr_cont("*callback never called* ");
 623		goto out;
 624
 625	case TRACE_SELFTEST_REGS_FOUND:
 626		if (supported)
 627			break;
 628		pr_cont("*callback received regs without arch support* ");
 629		goto out;
 630
 631	case TRACE_SELFTEST_REGS_NOT_FOUND:
 632		if (!supported)
 633			break;
 634		pr_cont("*callback received NULL regs* ");
 635		goto out;
 636	}
 637
 638	ret = 0;
 639out:
 640	ftrace_enabled = save_ftrace_enabled;
 641
 642	return ret;
 643}
 644
 645/*
 646 * Simple verification test of ftrace function tracer.
 647 * Enable ftrace, sleep 1/10 second, and then read the trace
 648 * buffer to see if all is in order.
 649 */
 650__init int
 651trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 652{
 653	int save_ftrace_enabled = ftrace_enabled;
 654	unsigned long count;
 655	int ret;
 656
 657#ifdef CONFIG_DYNAMIC_FTRACE
 658	if (ftrace_filter_param) {
 659		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 660		return 0;
 661	}
 662#endif
 663
 664	/* make sure msleep has been recorded */
 665	msleep(1);
 666
 667	/* start the tracing */
 668	ftrace_enabled = 1;
 669
 670	ret = tracer_init(trace, tr);
 671	if (ret) {
 672		warn_failed_init_tracer(trace, ret);
 673		goto out;
 674	}
 675
 676	/* Sleep for a 1/10 of a second */
 677	msleep(100);
 678	/* stop the tracing. */
 679	tracing_stop();
 680	ftrace_enabled = 0;
 681
 682	/* check the trace buffer */
 683	ret = trace_test_buffer(&tr->trace_buffer, &count);
 684
 685	ftrace_enabled = 1;
 686	trace->reset(tr);
 687	tracing_start();
 688
 689	if (!ret && !count) {
 690		printk(KERN_CONT ".. no entries found ..");
 691		ret = -1;
 692		goto out;
 693	}
 694
 695	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 696						     DYN_FTRACE_TEST_NAME);
 697	if (ret)
 698		goto out;
 699
 700	ret = trace_selftest_function_recursion();
 701	if (ret)
 702		goto out;
 703
 704	ret = trace_selftest_function_regs();
 705 out:
 706	ftrace_enabled = save_ftrace_enabled;
 707
 708	/* kill ftrace totally if we failed */
 709	if (ret)
 710		ftrace_kill();
 711
 712	return ret;
 713}
 714#endif /* CONFIG_FUNCTION_TRACER */
 715
 716
 717#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 718
 719/* Maximum number of functions to trace before diagnosing a hang */
 720#define GRAPH_MAX_FUNC_TEST	100000000
 721
 722static unsigned int graph_hang_thresh;
 723
 724/* Wrap the real function entry probe to avoid possible hanging */
 725static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 726{
 727	/* This is harmlessly racy, we want to approximately detect a hang */
 728	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 729		ftrace_graph_stop();
 730		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 731		if (ftrace_dump_on_oops) {
 732			ftrace_dump(DUMP_ALL);
 733			/* ftrace_dump() disables tracing */
 734			tracing_on();
 735		}
 736		return 0;
 737	}
 738
 739	return trace_graph_entry(trace);
 740}
 741
 
 
 
 
 
 742/*
 743 * Pretty much the same than for the function tracer from which the selftest
 744 * has been borrowed.
 745 */
 746__init int
 747trace_selftest_startup_function_graph(struct tracer *trace,
 748					struct trace_array *tr)
 749{
 750	int ret;
 751	unsigned long count;
 752
 753#ifdef CONFIG_DYNAMIC_FTRACE
 754	if (ftrace_filter_param) {
 755		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 756		return 0;
 757	}
 758#endif
 759
 760	/*
 761	 * Simulate the init() callback but we attach a watchdog callback
 762	 * to detect and recover from possible hangs
 763	 */
 764	tracing_reset_online_cpus(&tr->trace_buffer);
 765	set_graph_array(tr);
 766	ret = register_ftrace_graph(&trace_graph_return,
 767				    &trace_graph_entry_watchdog);
 768	if (ret) {
 769		warn_failed_init_tracer(trace, ret);
 770		goto out;
 771	}
 772	tracing_start_cmdline_record();
 773
 774	/* Sleep for a 1/10 of a second */
 775	msleep(100);
 776
 777	/* Have we just recovered from a hang? */
 778	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 779		tracing_selftest_disabled = true;
 780		ret = -1;
 781		goto out;
 782	}
 783
 784	tracing_stop();
 785
 786	/* check the trace buffer */
 787	ret = trace_test_buffer(&tr->trace_buffer, &count);
 
 
 
 
 788
 789	trace->reset(tr);
 790	tracing_start();
 791
 792	if (!ret && !count) {
 793		printk(KERN_CONT ".. no entries found ..");
 794		ret = -1;
 795		goto out;
 796	}
 797
 798	/* Don't test dynamic tracing, the function tracer already did */
 799
 800out:
 801	/* Stop it if we failed */
 802	if (ret)
 803		ftrace_graph_stop();
 804
 805	return ret;
 806}
 807#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 808
 809
 810#ifdef CONFIG_IRQSOFF_TRACER
 811int
 812trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 813{
 814	unsigned long save_max = tr->max_latency;
 815	unsigned long count;
 816	int ret;
 817
 818	/* start the tracing */
 819	ret = tracer_init(trace, tr);
 820	if (ret) {
 821		warn_failed_init_tracer(trace, ret);
 822		return ret;
 823	}
 824
 825	/* reset the max latency */
 826	tr->max_latency = 0;
 827	/* disable interrupts for a bit */
 828	local_irq_disable();
 829	udelay(100);
 830	local_irq_enable();
 831
 832	/*
 833	 * Stop the tracer to avoid a warning subsequent
 834	 * to buffer flipping failure because tracing_stop()
 835	 * disables the tr and max buffers, making flipping impossible
 836	 * in case of parallels max irqs off latencies.
 837	 */
 838	trace->stop(tr);
 839	/* stop the tracing. */
 840	tracing_stop();
 841	/* check both trace buffers */
 842	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 843	if (!ret)
 844		ret = trace_test_buffer(&tr->max_buffer, &count);
 845	trace->reset(tr);
 846	tracing_start();
 847
 848	if (!ret && !count) {
 849		printk(KERN_CONT ".. no entries found ..");
 850		ret = -1;
 851	}
 852
 853	tr->max_latency = save_max;
 854
 855	return ret;
 856}
 857#endif /* CONFIG_IRQSOFF_TRACER */
 858
 859#ifdef CONFIG_PREEMPT_TRACER
 860int
 861trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 862{
 863	unsigned long save_max = tr->max_latency;
 864	unsigned long count;
 865	int ret;
 866
 867	/*
 868	 * Now that the big kernel lock is no longer preemptable,
 869	 * and this is called with the BKL held, it will always
 870	 * fail. If preemption is already disabled, simply
 871	 * pass the test. When the BKL is removed, or becomes
 872	 * preemptible again, we will once again test this,
 873	 * so keep it in.
 874	 */
 875	if (preempt_count()) {
 876		printk(KERN_CONT "can not test ... force ");
 877		return 0;
 878	}
 879
 880	/* start the tracing */
 881	ret = tracer_init(trace, tr);
 882	if (ret) {
 883		warn_failed_init_tracer(trace, ret);
 884		return ret;
 885	}
 886
 887	/* reset the max latency */
 888	tr->max_latency = 0;
 889	/* disable preemption for a bit */
 890	preempt_disable();
 891	udelay(100);
 892	preempt_enable();
 893
 894	/*
 895	 * Stop the tracer to avoid a warning subsequent
 896	 * to buffer flipping failure because tracing_stop()
 897	 * disables the tr and max buffers, making flipping impossible
 898	 * in case of parallels max preempt off latencies.
 899	 */
 900	trace->stop(tr);
 901	/* stop the tracing. */
 902	tracing_stop();
 903	/* check both trace buffers */
 904	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 905	if (!ret)
 906		ret = trace_test_buffer(&tr->max_buffer, &count);
 907	trace->reset(tr);
 908	tracing_start();
 909
 910	if (!ret && !count) {
 911		printk(KERN_CONT ".. no entries found ..");
 912		ret = -1;
 913	}
 914
 915	tr->max_latency = save_max;
 916
 917	return ret;
 918}
 919#endif /* CONFIG_PREEMPT_TRACER */
 920
 921#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 922int
 923trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 924{
 925	unsigned long save_max = tr->max_latency;
 926	unsigned long count;
 927	int ret;
 928
 929	/*
 930	 * Now that the big kernel lock is no longer preemptable,
 931	 * and this is called with the BKL held, it will always
 932	 * fail. If preemption is already disabled, simply
 933	 * pass the test. When the BKL is removed, or becomes
 934	 * preemptible again, we will once again test this,
 935	 * so keep it in.
 936	 */
 937	if (preempt_count()) {
 938		printk(KERN_CONT "can not test ... force ");
 939		return 0;
 940	}
 941
 942	/* start the tracing */
 943	ret = tracer_init(trace, tr);
 944	if (ret) {
 945		warn_failed_init_tracer(trace, ret);
 946		goto out_no_start;
 947	}
 948
 949	/* reset the max latency */
 950	tr->max_latency = 0;
 951
 952	/* disable preemption and interrupts for a bit */
 953	preempt_disable();
 954	local_irq_disable();
 955	udelay(100);
 956	preempt_enable();
 957	/* reverse the order of preempt vs irqs */
 958	local_irq_enable();
 959
 960	/*
 961	 * Stop the tracer to avoid a warning subsequent
 962	 * to buffer flipping failure because tracing_stop()
 963	 * disables the tr and max buffers, making flipping impossible
 964	 * in case of parallels max irqs/preempt off latencies.
 965	 */
 966	trace->stop(tr);
 967	/* stop the tracing. */
 968	tracing_stop();
 969	/* check both trace buffers */
 970	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 971	if (ret)
 972		goto out;
 973
 974	ret = trace_test_buffer(&tr->max_buffer, &count);
 975	if (ret)
 976		goto out;
 977
 978	if (!ret && !count) {
 979		printk(KERN_CONT ".. no entries found ..");
 980		ret = -1;
 981		goto out;
 982	}
 983
 984	/* do the test by disabling interrupts first this time */
 985	tr->max_latency = 0;
 986	tracing_start();
 987	trace->start(tr);
 988
 989	preempt_disable();
 990	local_irq_disable();
 991	udelay(100);
 992	preempt_enable();
 993	/* reverse the order of preempt vs irqs */
 994	local_irq_enable();
 995
 996	trace->stop(tr);
 997	/* stop the tracing. */
 998	tracing_stop();
 999	/* check both trace buffers */
1000	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1001	if (ret)
1002		goto out;
1003
1004	ret = trace_test_buffer(&tr->max_buffer, &count);
1005
1006	if (!ret && !count) {
1007		printk(KERN_CONT ".. no entries found ..");
1008		ret = -1;
1009		goto out;
1010	}
1011
1012out:
1013	tracing_start();
1014out_no_start:
1015	trace->reset(tr);
1016	tr->max_latency = save_max;
1017
1018	return ret;
1019}
1020#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1021
1022#ifdef CONFIG_NOP_TRACER
1023int
1024trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1025{
1026	/* What could possibly go wrong? */
1027	return 0;
1028}
1029#endif
1030
1031#ifdef CONFIG_SCHED_TRACER
1032
1033struct wakeup_test_data {
1034	struct completion	is_ready;
1035	int			go;
1036};
1037
1038static int trace_wakeup_test_thread(void *data)
1039{
1040	/* Make this a -deadline thread */
1041	static const struct sched_attr attr = {
1042		.sched_policy = SCHED_DEADLINE,
1043		.sched_runtime = 100000ULL,
1044		.sched_deadline = 10000000ULL,
1045		.sched_period = 10000000ULL
1046	};
1047	struct wakeup_test_data *x = data;
1048
1049	sched_setattr(current, &attr);
1050
1051	/* Make it know we have a new prio */
1052	complete(&x->is_ready);
1053
1054	/* now go to sleep and let the test wake us up */
1055	set_current_state(TASK_INTERRUPTIBLE);
1056	while (!x->go) {
1057		schedule();
1058		set_current_state(TASK_INTERRUPTIBLE);
1059	}
1060
1061	complete(&x->is_ready);
1062
1063	set_current_state(TASK_INTERRUPTIBLE);
1064
1065	/* we are awake, now wait to disappear */
1066	while (!kthread_should_stop()) {
1067		schedule();
1068		set_current_state(TASK_INTERRUPTIBLE);
1069	}
1070
1071	__set_current_state(TASK_RUNNING);
1072
1073	return 0;
1074}
1075int
1076trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1077{
1078	unsigned long save_max = tr->max_latency;
1079	struct task_struct *p;
1080	struct wakeup_test_data data;
1081	unsigned long count;
1082	int ret;
1083
1084	memset(&data, 0, sizeof(data));
1085
1086	init_completion(&data.is_ready);
1087
1088	/* create a -deadline thread */
1089	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1090	if (IS_ERR(p)) {
1091		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1092		return -1;
1093	}
1094
1095	/* make sure the thread is running at -deadline policy */
1096	wait_for_completion(&data.is_ready);
1097
1098	/* start the tracing */
1099	ret = tracer_init(trace, tr);
1100	if (ret) {
1101		warn_failed_init_tracer(trace, ret);
1102		return ret;
1103	}
1104
1105	/* reset the max latency */
1106	tr->max_latency = 0;
1107
1108	while (p->on_rq) {
1109		/*
1110		 * Sleep to make sure the -deadline thread is asleep too.
1111		 * On virtual machines we can't rely on timings,
1112		 * but we want to make sure this test still works.
1113		 */
1114		msleep(100);
1115	}
1116
1117	init_completion(&data.is_ready);
1118
1119	data.go = 1;
1120	/* memory barrier is in the wake_up_process() */
1121
1122	wake_up_process(p);
1123
1124	/* Wait for the task to wake up */
1125	wait_for_completion(&data.is_ready);
1126
1127	/* stop the tracing. */
1128	tracing_stop();
1129	/* check both trace buffers */
1130	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1131	if (!ret)
1132		ret = trace_test_buffer(&tr->max_buffer, &count);
1133
1134
1135	trace->reset(tr);
1136	tracing_start();
1137
1138	tr->max_latency = save_max;
1139
1140	/* kill the thread */
1141	kthread_stop(p);
1142
1143	if (!ret && !count) {
1144		printk(KERN_CONT ".. no entries found ..");
1145		ret = -1;
1146	}
1147
1148	return ret;
1149}
1150#endif /* CONFIG_SCHED_TRACER */
1151
1152#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1153int
1154trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1155{
1156	unsigned long count;
1157	int ret;
1158
1159	/* start the tracing */
1160	ret = tracer_init(trace, tr);
1161	if (ret) {
1162		warn_failed_init_tracer(trace, ret);
1163		return ret;
1164	}
1165
1166	/* Sleep for a 1/10 of a second */
1167	msleep(100);
1168	/* stop the tracing. */
1169	tracing_stop();
1170	/* check the trace buffer */
1171	ret = trace_test_buffer(&tr->trace_buffer, &count);
1172	trace->reset(tr);
1173	tracing_start();
1174
1175	if (!ret && !count) {
1176		printk(KERN_CONT ".. no entries found ..");
1177		ret = -1;
1178	}
1179
1180	return ret;
1181}
1182#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1183
1184#ifdef CONFIG_BRANCH_TRACER
1185int
1186trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1187{
1188	unsigned long count;
1189	int ret;
1190
1191	/* start the tracing */
1192	ret = tracer_init(trace, tr);
1193	if (ret) {
1194		warn_failed_init_tracer(trace, ret);
1195		return ret;
1196	}
1197
1198	/* Sleep for a 1/10 of a second */
1199	msleep(100);
1200	/* stop the tracing. */
1201	tracing_stop();
1202	/* check the trace buffer */
1203	ret = trace_test_buffer(&tr->trace_buffer, &count);
1204	trace->reset(tr);
1205	tracing_start();
1206
1207	if (!ret && !count) {
1208		printk(KERN_CONT ".. no entries found ..");
1209		ret = -1;
1210	}
1211
1212	return ret;
1213}
1214#endif /* CONFIG_BRANCH_TRACER */
1215
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Include in trace.c */
   3
   4#include <uapi/linux/sched/types.h>
   5#include <linux/stringify.h>
   6#include <linux/kthread.h>
   7#include <linux/delay.h>
   8#include <linux/slab.h>
   9
  10static inline int trace_valid_entry(struct trace_entry *entry)
  11{
  12	switch (entry->type) {
  13	case TRACE_FN:
  14	case TRACE_CTX:
  15	case TRACE_WAKE:
  16	case TRACE_STACK:
  17	case TRACE_PRINT:
  18	case TRACE_BRANCH:
  19	case TRACE_GRAPH_ENT:
  20	case TRACE_GRAPH_RET:
  21		return 1;
  22	}
  23	return 0;
  24}
  25
  26static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
  27{
  28	struct ring_buffer_event *event;
  29	struct trace_entry *entry;
  30	unsigned int loops = 0;
  31
  32	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  33		entry = ring_buffer_event_data(event);
  34
  35		/*
  36		 * The ring buffer is a size of trace_buf_size, if
  37		 * we loop more than the size, there's something wrong
  38		 * with the ring buffer.
  39		 */
  40		if (loops++ > trace_buf_size) {
  41			printk(KERN_CONT ".. bad ring buffer ");
  42			goto failed;
  43		}
  44		if (!trace_valid_entry(entry)) {
  45			printk(KERN_CONT ".. invalid entry %d ",
  46				entry->type);
  47			goto failed;
  48		}
  49	}
  50	return 0;
  51
  52 failed:
  53	/* disable tracing */
  54	tracing_disabled = 1;
  55	printk(KERN_CONT ".. corrupted trace buffer .. ");
  56	return -1;
  57}
  58
  59/*
  60 * Test the trace buffer to see if all the elements
  61 * are still sane.
  62 */
  63static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
  64{
  65	unsigned long flags, cnt = 0;
  66	int cpu, ret = 0;
  67
  68	/* Don't allow flipping of max traces now */
  69	local_irq_save(flags);
  70	arch_spin_lock(&buf->tr->max_lock);
  71
  72	cnt = ring_buffer_entries(buf->buffer);
  73
  74	/*
  75	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  76	 * If the calling tracer is broken, and is constantly filling
  77	 * the buffer, this will run forever, and hard lock the box.
  78	 * We disable the ring buffer while we do this test to prevent
  79	 * a hard lock up.
  80	 */
  81	tracing_off();
  82	for_each_possible_cpu(cpu) {
  83		ret = trace_test_buffer_cpu(buf, cpu);
  84		if (ret)
  85			break;
  86	}
  87	tracing_on();
  88	arch_spin_unlock(&buf->tr->max_lock);
  89	local_irq_restore(flags);
  90
  91	if (count)
  92		*count = cnt;
  93
  94	return ret;
  95}
  96
  97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  98{
  99	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 100		trace->name, init_ret);
 101}
 102#ifdef CONFIG_FUNCTION_TRACER
 103
 104#ifdef CONFIG_DYNAMIC_FTRACE
 105
 106static int trace_selftest_test_probe1_cnt;
 107static void trace_selftest_test_probe1_func(unsigned long ip,
 108					    unsigned long pip,
 109					    struct ftrace_ops *op,
 110					    struct ftrace_regs *fregs)
 111{
 112	trace_selftest_test_probe1_cnt++;
 113}
 114
 115static int trace_selftest_test_probe2_cnt;
 116static void trace_selftest_test_probe2_func(unsigned long ip,
 117					    unsigned long pip,
 118					    struct ftrace_ops *op,
 119					    struct ftrace_regs *fregs)
 120{
 121	trace_selftest_test_probe2_cnt++;
 122}
 123
 124static int trace_selftest_test_probe3_cnt;
 125static void trace_selftest_test_probe3_func(unsigned long ip,
 126					    unsigned long pip,
 127					    struct ftrace_ops *op,
 128					    struct ftrace_regs *fregs)
 129{
 130	trace_selftest_test_probe3_cnt++;
 131}
 132
 133static int trace_selftest_test_global_cnt;
 134static void trace_selftest_test_global_func(unsigned long ip,
 135					    unsigned long pip,
 136					    struct ftrace_ops *op,
 137					    struct ftrace_regs *fregs)
 138{
 139	trace_selftest_test_global_cnt++;
 140}
 141
 142static int trace_selftest_test_dyn_cnt;
 143static void trace_selftest_test_dyn_func(unsigned long ip,
 144					 unsigned long pip,
 145					 struct ftrace_ops *op,
 146					 struct ftrace_regs *fregs)
 147{
 148	trace_selftest_test_dyn_cnt++;
 149}
 150
 151static struct ftrace_ops test_probe1 = {
 152	.func			= trace_selftest_test_probe1_func,
 
 153};
 154
 155static struct ftrace_ops test_probe2 = {
 156	.func			= trace_selftest_test_probe2_func,
 
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160	.func			= trace_selftest_test_probe3_func,
 
 161};
 162
 163static void print_counts(void)
 164{
 165	printk("(%d %d %d %d %d) ",
 166	       trace_selftest_test_probe1_cnt,
 167	       trace_selftest_test_probe2_cnt,
 168	       trace_selftest_test_probe3_cnt,
 169	       trace_selftest_test_global_cnt,
 170	       trace_selftest_test_dyn_cnt);
 171}
 172
 173static void reset_counts(void)
 174{
 175	trace_selftest_test_probe1_cnt = 0;
 176	trace_selftest_test_probe2_cnt = 0;
 177	trace_selftest_test_probe3_cnt = 0;
 178	trace_selftest_test_global_cnt = 0;
 179	trace_selftest_test_dyn_cnt = 0;
 180}
 181
 182static int trace_selftest_ops(struct trace_array *tr, int cnt)
 183{
 184	int save_ftrace_enabled = ftrace_enabled;
 185	struct ftrace_ops *dyn_ops;
 186	char *func1_name;
 187	char *func2_name;
 188	int len1;
 189	int len2;
 190	int ret = -1;
 191
 192	printk(KERN_CONT "PASSED\n");
 193	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 194
 195	ftrace_enabled = 1;
 196	reset_counts();
 197
 198	/* Handle PPC64 '.' name */
 199	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 200	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 201	len1 = strlen(func1_name);
 202	len2 = strlen(func2_name);
 203
 204	/*
 205	 * Probe 1 will trace function 1.
 206	 * Probe 2 will trace function 2.
 207	 * Probe 3 will trace functions 1 and 2.
 208	 */
 209	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 210	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 211	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 212	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 213
 214	register_ftrace_function(&test_probe1);
 215	register_ftrace_function(&test_probe2);
 216	register_ftrace_function(&test_probe3);
 217	/* First time we are running with main function */
 218	if (cnt > 1) {
 219		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 220		register_ftrace_function(tr->ops);
 221	}
 222
 223	DYN_FTRACE_TEST_NAME();
 224
 225	print_counts();
 226
 227	if (trace_selftest_test_probe1_cnt != 1)
 228		goto out;
 229	if (trace_selftest_test_probe2_cnt != 0)
 230		goto out;
 231	if (trace_selftest_test_probe3_cnt != 1)
 232		goto out;
 233	if (cnt > 1) {
 234		if (trace_selftest_test_global_cnt == 0)
 235			goto out;
 236	}
 237
 238	DYN_FTRACE_TEST_NAME2();
 239
 240	print_counts();
 241
 242	if (trace_selftest_test_probe1_cnt != 1)
 243		goto out;
 244	if (trace_selftest_test_probe2_cnt != 1)
 245		goto out;
 246	if (trace_selftest_test_probe3_cnt != 2)
 247		goto out;
 248
 249	/* Add a dynamic probe */
 250	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251	if (!dyn_ops) {
 252		printk("MEMORY ERROR ");
 253		goto out;
 254	}
 255
 256	dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258	register_ftrace_function(dyn_ops);
 259
 260	trace_selftest_test_global_cnt = 0;
 261
 262	DYN_FTRACE_TEST_NAME();
 263
 264	print_counts();
 265
 266	if (trace_selftest_test_probe1_cnt != 2)
 267		goto out_free;
 268	if (trace_selftest_test_probe2_cnt != 1)
 269		goto out_free;
 270	if (trace_selftest_test_probe3_cnt != 3)
 271		goto out_free;
 272	if (cnt > 1) {
 273		if (trace_selftest_test_global_cnt == 0)
 274			goto out_free;
 275	}
 276	if (trace_selftest_test_dyn_cnt == 0)
 277		goto out_free;
 278
 279	DYN_FTRACE_TEST_NAME2();
 280
 281	print_counts();
 282
 283	if (trace_selftest_test_probe1_cnt != 2)
 284		goto out_free;
 285	if (trace_selftest_test_probe2_cnt != 2)
 286		goto out_free;
 287	if (trace_selftest_test_probe3_cnt != 4)
 288		goto out_free;
 289
 290	ret = 0;
 291 out_free:
 292	unregister_ftrace_function(dyn_ops);
 293	kfree(dyn_ops);
 294
 295 out:
 296	/* Purposely unregister in the same order */
 297	unregister_ftrace_function(&test_probe1);
 298	unregister_ftrace_function(&test_probe2);
 299	unregister_ftrace_function(&test_probe3);
 300	if (cnt > 1)
 301		unregister_ftrace_function(tr->ops);
 302	ftrace_reset_array_ops(tr);
 303
 304	/* Make sure everything is off */
 305	reset_counts();
 306	DYN_FTRACE_TEST_NAME();
 307	DYN_FTRACE_TEST_NAME();
 308
 309	if (trace_selftest_test_probe1_cnt ||
 310	    trace_selftest_test_probe2_cnt ||
 311	    trace_selftest_test_probe3_cnt ||
 312	    trace_selftest_test_global_cnt ||
 313	    trace_selftest_test_dyn_cnt)
 314		ret = -1;
 315
 316	ftrace_enabled = save_ftrace_enabled;
 317
 318	return ret;
 319}
 320
 321/* Test dynamic code modification and ftrace filters */
 322static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 323						  struct trace_array *tr,
 324						  int (*func)(void))
 325{
 326	int save_ftrace_enabled = ftrace_enabled;
 327	unsigned long count;
 328	char *func_name;
 329	int ret;
 330
 331	/* The ftrace test PASSED */
 332	printk(KERN_CONT "PASSED\n");
 333	pr_info("Testing dynamic ftrace: ");
 334
 335	/* enable tracing, and record the filter function */
 336	ftrace_enabled = 1;
 337
 338	/* passed in by parameter to fool gcc from optimizing */
 339	func();
 340
 341	/*
 342	 * Some archs *cough*PowerPC*cough* add characters to the
 343	 * start of the function names. We simply put a '*' to
 344	 * accommodate them.
 345	 */
 346	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 347
 348	/* filter only on our function */
 349	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 350
 351	/* enable tracing */
 352	ret = tracer_init(trace, tr);
 353	if (ret) {
 354		warn_failed_init_tracer(trace, ret);
 355		goto out;
 356	}
 357
 358	/* Sleep for a 1/10 of a second */
 359	msleep(100);
 360
 361	/* we should have nothing in the buffer */
 362	ret = trace_test_buffer(&tr->array_buffer, &count);
 363	if (ret)
 364		goto out;
 365
 366	if (count) {
 367		ret = -1;
 368		printk(KERN_CONT ".. filter did not filter .. ");
 369		goto out;
 370	}
 371
 372	/* call our function again */
 373	func();
 374
 375	/* sleep again */
 376	msleep(100);
 377
 378	/* stop the tracing. */
 379	tracing_stop();
 380	ftrace_enabled = 0;
 381
 382	/* check the trace buffer */
 383	ret = trace_test_buffer(&tr->array_buffer, &count);
 384
 385	ftrace_enabled = 1;
 386	tracing_start();
 387
 388	/* we should only have one item */
 389	if (!ret && count != 1) {
 390		trace->reset(tr);
 391		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 392		ret = -1;
 393		goto out;
 394	}
 395
 396	/* Test the ops with global tracing running */
 397	ret = trace_selftest_ops(tr, 1);
 398	trace->reset(tr);
 399
 400 out:
 401	ftrace_enabled = save_ftrace_enabled;
 402
 403	/* Enable tracing on all functions again */
 404	ftrace_set_global_filter(NULL, 0, 1);
 405
 406	/* Test the ops with global tracing off */
 407	if (!ret)
 408		ret = trace_selftest_ops(tr, 2);
 409
 410	return ret;
 411}
 412
 413static int trace_selftest_recursion_cnt;
 414static void trace_selftest_test_recursion_func(unsigned long ip,
 415					       unsigned long pip,
 416					       struct ftrace_ops *op,
 417					       struct ftrace_regs *fregs)
 418{
 419	/*
 420	 * This function is registered without the recursion safe flag.
 421	 * The ftrace infrastructure should provide the recursion
 422	 * protection. If not, this will crash the kernel!
 423	 */
 424	if (trace_selftest_recursion_cnt++ > 10)
 425		return;
 426	DYN_FTRACE_TEST_NAME();
 427}
 428
 429static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 430						    unsigned long pip,
 431						    struct ftrace_ops *op,
 432						    struct ftrace_regs *fregs)
 433{
 434	/*
 435	 * We said we would provide our own recursion. By calling
 436	 * this function again, we should recurse back into this function
 437	 * and count again. But this only happens if the arch supports
 438	 * all of ftrace features and nothing else is using the function
 439	 * tracing utility.
 440	 */
 441	if (trace_selftest_recursion_cnt++)
 442		return;
 443	DYN_FTRACE_TEST_NAME();
 444}
 445
 446static struct ftrace_ops test_rec_probe = {
 447	.func			= trace_selftest_test_recursion_func,
 448	.flags			= FTRACE_OPS_FL_RECURSION,
 449};
 450
 451static struct ftrace_ops test_recsafe_probe = {
 452	.func			= trace_selftest_test_recursion_safe_func,
 
 453};
 454
 455static int
 456trace_selftest_function_recursion(void)
 457{
 458	int save_ftrace_enabled = ftrace_enabled;
 459	char *func_name;
 460	int len;
 461	int ret;
 462
 463	/* The previous test PASSED */
 464	pr_cont("PASSED\n");
 465	pr_info("Testing ftrace recursion: ");
 466
 467
 468	/* enable tracing, and record the filter function */
 469	ftrace_enabled = 1;
 470
 471	/* Handle PPC64 '.' name */
 472	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 473	len = strlen(func_name);
 474
 475	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 476	if (ret) {
 477		pr_cont("*Could not set filter* ");
 478		goto out;
 479	}
 480
 481	ret = register_ftrace_function(&test_rec_probe);
 482	if (ret) {
 483		pr_cont("*could not register callback* ");
 484		goto out;
 485	}
 486
 487	DYN_FTRACE_TEST_NAME();
 488
 489	unregister_ftrace_function(&test_rec_probe);
 490
 491	ret = -1;
 492	/*
 493	 * Recursion allows for transitions between context,
 494	 * and may call the callback twice.
 495	 */
 496	if (trace_selftest_recursion_cnt != 1 &&
 497	    trace_selftest_recursion_cnt != 2) {
 498		pr_cont("*callback not called once (or twice) (%d)* ",
 499			trace_selftest_recursion_cnt);
 500		goto out;
 501	}
 502
 503	trace_selftest_recursion_cnt = 1;
 504
 505	pr_cont("PASSED\n");
 506	pr_info("Testing ftrace recursion safe: ");
 507
 508	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 509	if (ret) {
 510		pr_cont("*Could not set filter* ");
 511		goto out;
 512	}
 513
 514	ret = register_ftrace_function(&test_recsafe_probe);
 515	if (ret) {
 516		pr_cont("*could not register callback* ");
 517		goto out;
 518	}
 519
 520	DYN_FTRACE_TEST_NAME();
 521
 522	unregister_ftrace_function(&test_recsafe_probe);
 523
 524	ret = -1;
 525	if (trace_selftest_recursion_cnt != 2) {
 526		pr_cont("*callback not called expected 2 times (%d)* ",
 527			trace_selftest_recursion_cnt);
 528		goto out;
 529	}
 530
 531	ret = 0;
 532out:
 533	ftrace_enabled = save_ftrace_enabled;
 534
 535	return ret;
 536}
 537#else
 538# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 539# define trace_selftest_function_recursion() ({ 0; })
 540#endif /* CONFIG_DYNAMIC_FTRACE */
 541
 542static enum {
 543	TRACE_SELFTEST_REGS_START,
 544	TRACE_SELFTEST_REGS_FOUND,
 545	TRACE_SELFTEST_REGS_NOT_FOUND,
 546} trace_selftest_regs_stat;
 547
 548static void trace_selftest_test_regs_func(unsigned long ip,
 549					  unsigned long pip,
 550					  struct ftrace_ops *op,
 551					  struct ftrace_regs *fregs)
 552{
 553	struct pt_regs *regs = ftrace_get_regs(fregs);
 554
 555	if (regs)
 556		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 557	else
 558		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 559}
 560
 561static struct ftrace_ops test_regs_probe = {
 562	.func		= trace_selftest_test_regs_func,
 563	.flags		= FTRACE_OPS_FL_SAVE_REGS,
 564};
 565
 566static int
 567trace_selftest_function_regs(void)
 568{
 569	int save_ftrace_enabled = ftrace_enabled;
 570	char *func_name;
 571	int len;
 572	int ret;
 573	int supported = 0;
 574
 575#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 576	supported = 1;
 577#endif
 578
 579	/* The previous test PASSED */
 580	pr_cont("PASSED\n");
 581	pr_info("Testing ftrace regs%s: ",
 582		!supported ? "(no arch support)" : "");
 583
 584	/* enable tracing, and record the filter function */
 585	ftrace_enabled = 1;
 586
 587	/* Handle PPC64 '.' name */
 588	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 589	len = strlen(func_name);
 590
 591	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 592	/*
 593	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 594	 * This test really doesn't care.
 595	 */
 596	if (ret && ret != -ENODEV) {
 597		pr_cont("*Could not set filter* ");
 598		goto out;
 599	}
 600
 601	ret = register_ftrace_function(&test_regs_probe);
 602	/*
 603	 * Now if the arch does not support passing regs, then this should
 604	 * have failed.
 605	 */
 606	if (!supported) {
 607		if (!ret) {
 608			pr_cont("*registered save-regs without arch support* ");
 609			goto out;
 610		}
 611		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 612		ret = register_ftrace_function(&test_regs_probe);
 613	}
 614	if (ret) {
 615		pr_cont("*could not register callback* ");
 616		goto out;
 617	}
 618
 619
 620	DYN_FTRACE_TEST_NAME();
 621
 622	unregister_ftrace_function(&test_regs_probe);
 623
 624	ret = -1;
 625
 626	switch (trace_selftest_regs_stat) {
 627	case TRACE_SELFTEST_REGS_START:
 628		pr_cont("*callback never called* ");
 629		goto out;
 630
 631	case TRACE_SELFTEST_REGS_FOUND:
 632		if (supported)
 633			break;
 634		pr_cont("*callback received regs without arch support* ");
 635		goto out;
 636
 637	case TRACE_SELFTEST_REGS_NOT_FOUND:
 638		if (!supported)
 639			break;
 640		pr_cont("*callback received NULL regs* ");
 641		goto out;
 642	}
 643
 644	ret = 0;
 645out:
 646	ftrace_enabled = save_ftrace_enabled;
 647
 648	return ret;
 649}
 650
 651/*
 652 * Simple verification test of ftrace function tracer.
 653 * Enable ftrace, sleep 1/10 second, and then read the trace
 654 * buffer to see if all is in order.
 655 */
 656__init int
 657trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 658{
 659	int save_ftrace_enabled = ftrace_enabled;
 660	unsigned long count;
 661	int ret;
 662
 663#ifdef CONFIG_DYNAMIC_FTRACE
 664	if (ftrace_filter_param) {
 665		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 666		return 0;
 667	}
 668#endif
 669
 670	/* make sure msleep has been recorded */
 671	msleep(1);
 672
 673	/* start the tracing */
 674	ftrace_enabled = 1;
 675
 676	ret = tracer_init(trace, tr);
 677	if (ret) {
 678		warn_failed_init_tracer(trace, ret);
 679		goto out;
 680	}
 681
 682	/* Sleep for a 1/10 of a second */
 683	msleep(100);
 684	/* stop the tracing. */
 685	tracing_stop();
 686	ftrace_enabled = 0;
 687
 688	/* check the trace buffer */
 689	ret = trace_test_buffer(&tr->array_buffer, &count);
 690
 691	ftrace_enabled = 1;
 692	trace->reset(tr);
 693	tracing_start();
 694
 695	if (!ret && !count) {
 696		printk(KERN_CONT ".. no entries found ..");
 697		ret = -1;
 698		goto out;
 699	}
 700
 701	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 702						     DYN_FTRACE_TEST_NAME);
 703	if (ret)
 704		goto out;
 705
 706	ret = trace_selftest_function_recursion();
 707	if (ret)
 708		goto out;
 709
 710	ret = trace_selftest_function_regs();
 711 out:
 712	ftrace_enabled = save_ftrace_enabled;
 713
 714	/* kill ftrace totally if we failed */
 715	if (ret)
 716		ftrace_kill();
 717
 718	return ret;
 719}
 720#endif /* CONFIG_FUNCTION_TRACER */
 721
 722
 723#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 724
 725/* Maximum number of functions to trace before diagnosing a hang */
 726#define GRAPH_MAX_FUNC_TEST	100000000
 727
 728static unsigned int graph_hang_thresh;
 729
 730/* Wrap the real function entry probe to avoid possible hanging */
 731static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 732{
 733	/* This is harmlessly racy, we want to approximately detect a hang */
 734	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 735		ftrace_graph_stop();
 736		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 737		if (ftrace_dump_on_oops) {
 738			ftrace_dump(DUMP_ALL);
 739			/* ftrace_dump() disables tracing */
 740			tracing_on();
 741		}
 742		return 0;
 743	}
 744
 745	return trace_graph_entry(trace);
 746}
 747
 748static struct fgraph_ops fgraph_ops __initdata  = {
 749	.entryfunc		= &trace_graph_entry_watchdog,
 750	.retfunc		= &trace_graph_return,
 751};
 752
 753/*
 754 * Pretty much the same than for the function tracer from which the selftest
 755 * has been borrowed.
 756 */
 757__init int
 758trace_selftest_startup_function_graph(struct tracer *trace,
 759					struct trace_array *tr)
 760{
 761	int ret;
 762	unsigned long count;
 763
 764#ifdef CONFIG_DYNAMIC_FTRACE
 765	if (ftrace_filter_param) {
 766		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 767		return 0;
 768	}
 769#endif
 770
 771	/*
 772	 * Simulate the init() callback but we attach a watchdog callback
 773	 * to detect and recover from possible hangs
 774	 */
 775	tracing_reset_online_cpus(&tr->array_buffer);
 776	set_graph_array(tr);
 777	ret = register_ftrace_graph(&fgraph_ops);
 
 778	if (ret) {
 779		warn_failed_init_tracer(trace, ret);
 780		goto out;
 781	}
 782	tracing_start_cmdline_record();
 783
 784	/* Sleep for a 1/10 of a second */
 785	msleep(100);
 786
 787	/* Have we just recovered from a hang? */
 788	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 789		disable_tracing_selftest("recovering from a hang");
 790		ret = -1;
 791		goto out;
 792	}
 793
 794	tracing_stop();
 795
 796	/* check the trace buffer */
 797	ret = trace_test_buffer(&tr->array_buffer, &count);
 798
 799	/* Need to also simulate the tr->reset to remove this fgraph_ops */
 800	tracing_stop_cmdline_record();
 801	unregister_ftrace_graph(&fgraph_ops);
 802
 
 803	tracing_start();
 804
 805	if (!ret && !count) {
 806		printk(KERN_CONT ".. no entries found ..");
 807		ret = -1;
 808		goto out;
 809	}
 810
 811	/* Don't test dynamic tracing, the function tracer already did */
 812
 813out:
 814	/* Stop it if we failed */
 815	if (ret)
 816		ftrace_graph_stop();
 817
 818	return ret;
 819}
 820#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 821
 822
 823#ifdef CONFIG_IRQSOFF_TRACER
 824int
 825trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 826{
 827	unsigned long save_max = tr->max_latency;
 828	unsigned long count;
 829	int ret;
 830
 831	/* start the tracing */
 832	ret = tracer_init(trace, tr);
 833	if (ret) {
 834		warn_failed_init_tracer(trace, ret);
 835		return ret;
 836	}
 837
 838	/* reset the max latency */
 839	tr->max_latency = 0;
 840	/* disable interrupts for a bit */
 841	local_irq_disable();
 842	udelay(100);
 843	local_irq_enable();
 844
 845	/*
 846	 * Stop the tracer to avoid a warning subsequent
 847	 * to buffer flipping failure because tracing_stop()
 848	 * disables the tr and max buffers, making flipping impossible
 849	 * in case of parallels max irqs off latencies.
 850	 */
 851	trace->stop(tr);
 852	/* stop the tracing. */
 853	tracing_stop();
 854	/* check both trace buffers */
 855	ret = trace_test_buffer(&tr->array_buffer, NULL);
 856	if (!ret)
 857		ret = trace_test_buffer(&tr->max_buffer, &count);
 858	trace->reset(tr);
 859	tracing_start();
 860
 861	if (!ret && !count) {
 862		printk(KERN_CONT ".. no entries found ..");
 863		ret = -1;
 864	}
 865
 866	tr->max_latency = save_max;
 867
 868	return ret;
 869}
 870#endif /* CONFIG_IRQSOFF_TRACER */
 871
 872#ifdef CONFIG_PREEMPT_TRACER
 873int
 874trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 875{
 876	unsigned long save_max = tr->max_latency;
 877	unsigned long count;
 878	int ret;
 879
 880	/*
 881	 * Now that the big kernel lock is no longer preemptible,
 882	 * and this is called with the BKL held, it will always
 883	 * fail. If preemption is already disabled, simply
 884	 * pass the test. When the BKL is removed, or becomes
 885	 * preemptible again, we will once again test this,
 886	 * so keep it in.
 887	 */
 888	if (preempt_count()) {
 889		printk(KERN_CONT "can not test ... force ");
 890		return 0;
 891	}
 892
 893	/* start the tracing */
 894	ret = tracer_init(trace, tr);
 895	if (ret) {
 896		warn_failed_init_tracer(trace, ret);
 897		return ret;
 898	}
 899
 900	/* reset the max latency */
 901	tr->max_latency = 0;
 902	/* disable preemption for a bit */
 903	preempt_disable();
 904	udelay(100);
 905	preempt_enable();
 906
 907	/*
 908	 * Stop the tracer to avoid a warning subsequent
 909	 * to buffer flipping failure because tracing_stop()
 910	 * disables the tr and max buffers, making flipping impossible
 911	 * in case of parallels max preempt off latencies.
 912	 */
 913	trace->stop(tr);
 914	/* stop the tracing. */
 915	tracing_stop();
 916	/* check both trace buffers */
 917	ret = trace_test_buffer(&tr->array_buffer, NULL);
 918	if (!ret)
 919		ret = trace_test_buffer(&tr->max_buffer, &count);
 920	trace->reset(tr);
 921	tracing_start();
 922
 923	if (!ret && !count) {
 924		printk(KERN_CONT ".. no entries found ..");
 925		ret = -1;
 926	}
 927
 928	tr->max_latency = save_max;
 929
 930	return ret;
 931}
 932#endif /* CONFIG_PREEMPT_TRACER */
 933
 934#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 935int
 936trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 937{
 938	unsigned long save_max = tr->max_latency;
 939	unsigned long count;
 940	int ret;
 941
 942	/*
 943	 * Now that the big kernel lock is no longer preemptible,
 944	 * and this is called with the BKL held, it will always
 945	 * fail. If preemption is already disabled, simply
 946	 * pass the test. When the BKL is removed, or becomes
 947	 * preemptible again, we will once again test this,
 948	 * so keep it in.
 949	 */
 950	if (preempt_count()) {
 951		printk(KERN_CONT "can not test ... force ");
 952		return 0;
 953	}
 954
 955	/* start the tracing */
 956	ret = tracer_init(trace, tr);
 957	if (ret) {
 958		warn_failed_init_tracer(trace, ret);
 959		goto out_no_start;
 960	}
 961
 962	/* reset the max latency */
 963	tr->max_latency = 0;
 964
 965	/* disable preemption and interrupts for a bit */
 966	preempt_disable();
 967	local_irq_disable();
 968	udelay(100);
 969	preempt_enable();
 970	/* reverse the order of preempt vs irqs */
 971	local_irq_enable();
 972
 973	/*
 974	 * Stop the tracer to avoid a warning subsequent
 975	 * to buffer flipping failure because tracing_stop()
 976	 * disables the tr and max buffers, making flipping impossible
 977	 * in case of parallels max irqs/preempt off latencies.
 978	 */
 979	trace->stop(tr);
 980	/* stop the tracing. */
 981	tracing_stop();
 982	/* check both trace buffers */
 983	ret = trace_test_buffer(&tr->array_buffer, NULL);
 984	if (ret)
 985		goto out;
 986
 987	ret = trace_test_buffer(&tr->max_buffer, &count);
 988	if (ret)
 989		goto out;
 990
 991	if (!ret && !count) {
 992		printk(KERN_CONT ".. no entries found ..");
 993		ret = -1;
 994		goto out;
 995	}
 996
 997	/* do the test by disabling interrupts first this time */
 998	tr->max_latency = 0;
 999	tracing_start();
1000	trace->start(tr);
1001
1002	preempt_disable();
1003	local_irq_disable();
1004	udelay(100);
1005	preempt_enable();
1006	/* reverse the order of preempt vs irqs */
1007	local_irq_enable();
1008
1009	trace->stop(tr);
1010	/* stop the tracing. */
1011	tracing_stop();
1012	/* check both trace buffers */
1013	ret = trace_test_buffer(&tr->array_buffer, NULL);
1014	if (ret)
1015		goto out;
1016
1017	ret = trace_test_buffer(&tr->max_buffer, &count);
1018
1019	if (!ret && !count) {
1020		printk(KERN_CONT ".. no entries found ..");
1021		ret = -1;
1022		goto out;
1023	}
1024
1025out:
1026	tracing_start();
1027out_no_start:
1028	trace->reset(tr);
1029	tr->max_latency = save_max;
1030
1031	return ret;
1032}
1033#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1034
1035#ifdef CONFIG_NOP_TRACER
1036int
1037trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1038{
1039	/* What could possibly go wrong? */
1040	return 0;
1041}
1042#endif
1043
1044#ifdef CONFIG_SCHED_TRACER
1045
1046struct wakeup_test_data {
1047	struct completion	is_ready;
1048	int			go;
1049};
1050
1051static int trace_wakeup_test_thread(void *data)
1052{
1053	/* Make this a -deadline thread */
1054	static const struct sched_attr attr = {
1055		.sched_policy = SCHED_DEADLINE,
1056		.sched_runtime = 100000ULL,
1057		.sched_deadline = 10000000ULL,
1058		.sched_period = 10000000ULL
1059	};
1060	struct wakeup_test_data *x = data;
1061
1062	sched_setattr(current, &attr);
1063
1064	/* Make it know we have a new prio */
1065	complete(&x->is_ready);
1066
1067	/* now go to sleep and let the test wake us up */
1068	set_current_state(TASK_INTERRUPTIBLE);
1069	while (!x->go) {
1070		schedule();
1071		set_current_state(TASK_INTERRUPTIBLE);
1072	}
1073
1074	complete(&x->is_ready);
1075
1076	set_current_state(TASK_INTERRUPTIBLE);
1077
1078	/* we are awake, now wait to disappear */
1079	while (!kthread_should_stop()) {
1080		schedule();
1081		set_current_state(TASK_INTERRUPTIBLE);
1082	}
1083
1084	__set_current_state(TASK_RUNNING);
1085
1086	return 0;
1087}
1088int
1089trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1090{
1091	unsigned long save_max = tr->max_latency;
1092	struct task_struct *p;
1093	struct wakeup_test_data data;
1094	unsigned long count;
1095	int ret;
1096
1097	memset(&data, 0, sizeof(data));
1098
1099	init_completion(&data.is_ready);
1100
1101	/* create a -deadline thread */
1102	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1103	if (IS_ERR(p)) {
1104		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1105		return -1;
1106	}
1107
1108	/* make sure the thread is running at -deadline policy */
1109	wait_for_completion(&data.is_ready);
1110
1111	/* start the tracing */
1112	ret = tracer_init(trace, tr);
1113	if (ret) {
1114		warn_failed_init_tracer(trace, ret);
1115		return ret;
1116	}
1117
1118	/* reset the max latency */
1119	tr->max_latency = 0;
1120
1121	while (p->on_rq) {
1122		/*
1123		 * Sleep to make sure the -deadline thread is asleep too.
1124		 * On virtual machines we can't rely on timings,
1125		 * but we want to make sure this test still works.
1126		 */
1127		msleep(100);
1128	}
1129
1130	init_completion(&data.is_ready);
1131
1132	data.go = 1;
1133	/* memory barrier is in the wake_up_process() */
1134
1135	wake_up_process(p);
1136
1137	/* Wait for the task to wake up */
1138	wait_for_completion(&data.is_ready);
1139
1140	/* stop the tracing. */
1141	tracing_stop();
1142	/* check both trace buffers */
1143	ret = trace_test_buffer(&tr->array_buffer, NULL);
1144	if (!ret)
1145		ret = trace_test_buffer(&tr->max_buffer, &count);
1146
1147
1148	trace->reset(tr);
1149	tracing_start();
1150
1151	tr->max_latency = save_max;
1152
1153	/* kill the thread */
1154	kthread_stop(p);
1155
1156	if (!ret && !count) {
1157		printk(KERN_CONT ".. no entries found ..");
1158		ret = -1;
1159	}
1160
1161	return ret;
1162}
1163#endif /* CONFIG_SCHED_TRACER */
1164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1165#ifdef CONFIG_BRANCH_TRACER
1166int
1167trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1168{
1169	unsigned long count;
1170	int ret;
1171
1172	/* start the tracing */
1173	ret = tracer_init(trace, tr);
1174	if (ret) {
1175		warn_failed_init_tracer(trace, ret);
1176		return ret;
1177	}
1178
1179	/* Sleep for a 1/10 of a second */
1180	msleep(100);
1181	/* stop the tracing. */
1182	tracing_stop();
1183	/* check the trace buffer */
1184	ret = trace_test_buffer(&tr->array_buffer, &count);
1185	trace->reset(tr);
1186	tracing_start();
1187
1188	if (!ret && !count) {
1189		printk(KERN_CONT ".. no entries found ..");
1190		ret = -1;
1191	}
1192
1193	return ret;
1194}
1195#endif /* CONFIG_BRANCH_TRACER */
1196