Linux Audio

Check our new training course

Loading...
v3.15
 
   1/* Include in trace.c */
   2
 
   3#include <linux/stringify.h>
   4#include <linux/kthread.h>
   5#include <linux/delay.h>
   6#include <linux/slab.h>
   7
   8static inline int trace_valid_entry(struct trace_entry *entry)
   9{
  10	switch (entry->type) {
  11	case TRACE_FN:
  12	case TRACE_CTX:
  13	case TRACE_WAKE:
  14	case TRACE_STACK:
  15	case TRACE_PRINT:
  16	case TRACE_BRANCH:
  17	case TRACE_GRAPH_ENT:
  18	case TRACE_GRAPH_RET:
  19		return 1;
  20	}
  21	return 0;
  22}
  23
  24static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  25{
  26	struct ring_buffer_event *event;
  27	struct trace_entry *entry;
  28	unsigned int loops = 0;
  29
  30	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  31		entry = ring_buffer_event_data(event);
  32
  33		/*
  34		 * The ring buffer is a size of trace_buf_size, if
  35		 * we loop more than the size, there's something wrong
  36		 * with the ring buffer.
  37		 */
  38		if (loops++ > trace_buf_size) {
  39			printk(KERN_CONT ".. bad ring buffer ");
  40			goto failed;
  41		}
  42		if (!trace_valid_entry(entry)) {
  43			printk(KERN_CONT ".. invalid entry %d ",
  44				entry->type);
  45			goto failed;
  46		}
  47	}
  48	return 0;
  49
  50 failed:
  51	/* disable tracing */
  52	tracing_disabled = 1;
  53	printk(KERN_CONT ".. corrupted trace buffer .. ");
  54	return -1;
  55}
  56
  57/*
  58 * Test the trace buffer to see if all the elements
  59 * are still sane.
  60 */
  61static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
  62{
  63	unsigned long flags, cnt = 0;
  64	int cpu, ret = 0;
  65
  66	/* Don't allow flipping of max traces now */
  67	local_irq_save(flags);
  68	arch_spin_lock(&ftrace_max_lock);
  69
  70	cnt = ring_buffer_entries(buf->buffer);
  71
  72	/*
  73	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  74	 * If the calling tracer is broken, and is constantly filling
  75	 * the buffer, this will run forever, and hard lock the box.
  76	 * We disable the ring buffer while we do this test to prevent
  77	 * a hard lock up.
  78	 */
  79	tracing_off();
  80	for_each_possible_cpu(cpu) {
  81		ret = trace_test_buffer_cpu(buf, cpu);
  82		if (ret)
  83			break;
  84	}
  85	tracing_on();
  86	arch_spin_unlock(&ftrace_max_lock);
  87	local_irq_restore(flags);
  88
  89	if (count)
  90		*count = cnt;
  91
  92	return ret;
  93}
  94
  95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  96{
  97	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  98		trace->name, init_ret);
  99}
 100#ifdef CONFIG_FUNCTION_TRACER
 101
 102#ifdef CONFIG_DYNAMIC_FTRACE
 103
 104static int trace_selftest_test_probe1_cnt;
 105static void trace_selftest_test_probe1_func(unsigned long ip,
 106					    unsigned long pip,
 107					    struct ftrace_ops *op,
 108					    struct pt_regs *pt_regs)
 109{
 110	trace_selftest_test_probe1_cnt++;
 111}
 112
 113static int trace_selftest_test_probe2_cnt;
 114static void trace_selftest_test_probe2_func(unsigned long ip,
 115					    unsigned long pip,
 116					    struct ftrace_ops *op,
 117					    struct pt_regs *pt_regs)
 118{
 119	trace_selftest_test_probe2_cnt++;
 120}
 121
 122static int trace_selftest_test_probe3_cnt;
 123static void trace_selftest_test_probe3_func(unsigned long ip,
 124					    unsigned long pip,
 125					    struct ftrace_ops *op,
 126					    struct pt_regs *pt_regs)
 127{
 128	trace_selftest_test_probe3_cnt++;
 129}
 130
 131static int trace_selftest_test_global_cnt;
 132static void trace_selftest_test_global_func(unsigned long ip,
 133					    unsigned long pip,
 134					    struct ftrace_ops *op,
 135					    struct pt_regs *pt_regs)
 136{
 137	trace_selftest_test_global_cnt++;
 138}
 139
 140static int trace_selftest_test_dyn_cnt;
 141static void trace_selftest_test_dyn_func(unsigned long ip,
 142					 unsigned long pip,
 143					 struct ftrace_ops *op,
 144					 struct pt_regs *pt_regs)
 145{
 146	trace_selftest_test_dyn_cnt++;
 147}
 148
 149static struct ftrace_ops test_probe1 = {
 150	.func			= trace_selftest_test_probe1_func,
 151	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 152};
 153
 154static struct ftrace_ops test_probe2 = {
 155	.func			= trace_selftest_test_probe2_func,
 156	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160	.func			= trace_selftest_test_probe3_func,
 161	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 162};
 163
 164static struct ftrace_ops test_global = {
 165	.func		= trace_selftest_test_global_func,
 166	.flags		= FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 167};
 168
 169static void print_counts(void)
 170{
 171	printk("(%d %d %d %d %d) ",
 172	       trace_selftest_test_probe1_cnt,
 173	       trace_selftest_test_probe2_cnt,
 174	       trace_selftest_test_probe3_cnt,
 175	       trace_selftest_test_global_cnt,
 176	       trace_selftest_test_dyn_cnt);
 177}
 178
 179static void reset_counts(void)
 180{
 181	trace_selftest_test_probe1_cnt = 0;
 182	trace_selftest_test_probe2_cnt = 0;
 183	trace_selftest_test_probe3_cnt = 0;
 184	trace_selftest_test_global_cnt = 0;
 185	trace_selftest_test_dyn_cnt = 0;
 186}
 187
 188static int trace_selftest_ops(int cnt)
 189{
 190	int save_ftrace_enabled = ftrace_enabled;
 191	struct ftrace_ops *dyn_ops;
 192	char *func1_name;
 193	char *func2_name;
 194	int len1;
 195	int len2;
 196	int ret = -1;
 197
 198	printk(KERN_CONT "PASSED\n");
 199	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 200
 201	ftrace_enabled = 1;
 202	reset_counts();
 203
 204	/* Handle PPC64 '.' name */
 205	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 206	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 207	len1 = strlen(func1_name);
 208	len2 = strlen(func2_name);
 209
 210	/*
 211	 * Probe 1 will trace function 1.
 212	 * Probe 2 will trace function 2.
 213	 * Probe 3 will trace functions 1 and 2.
 214	 */
 215	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 216	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 217	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 218	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 219
 220	register_ftrace_function(&test_probe1);
 221	register_ftrace_function(&test_probe2);
 222	register_ftrace_function(&test_probe3);
 223	register_ftrace_function(&test_global);
 
 
 
 
 224
 225	DYN_FTRACE_TEST_NAME();
 226
 227	print_counts();
 228
 229	if (trace_selftest_test_probe1_cnt != 1)
 230		goto out;
 231	if (trace_selftest_test_probe2_cnt != 0)
 232		goto out;
 233	if (trace_selftest_test_probe3_cnt != 1)
 234		goto out;
 235	if (trace_selftest_test_global_cnt == 0)
 236		goto out;
 
 
 237
 238	DYN_FTRACE_TEST_NAME2();
 239
 240	print_counts();
 241
 242	if (trace_selftest_test_probe1_cnt != 1)
 243		goto out;
 244	if (trace_selftest_test_probe2_cnt != 1)
 245		goto out;
 246	if (trace_selftest_test_probe3_cnt != 2)
 247		goto out;
 248
 249	/* Add a dynamic probe */
 250	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251	if (!dyn_ops) {
 252		printk("MEMORY ERROR ");
 253		goto out;
 254	}
 255
 256	dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258	register_ftrace_function(dyn_ops);
 259
 260	trace_selftest_test_global_cnt = 0;
 261
 262	DYN_FTRACE_TEST_NAME();
 263
 264	print_counts();
 265
 266	if (trace_selftest_test_probe1_cnt != 2)
 267		goto out_free;
 268	if (trace_selftest_test_probe2_cnt != 1)
 269		goto out_free;
 270	if (trace_selftest_test_probe3_cnt != 3)
 271		goto out_free;
 272	if (trace_selftest_test_global_cnt == 0)
 273		goto out;
 
 
 274	if (trace_selftest_test_dyn_cnt == 0)
 275		goto out_free;
 276
 277	DYN_FTRACE_TEST_NAME2();
 278
 279	print_counts();
 280
 281	if (trace_selftest_test_probe1_cnt != 2)
 282		goto out_free;
 283	if (trace_selftest_test_probe2_cnt != 2)
 284		goto out_free;
 285	if (trace_selftest_test_probe3_cnt != 4)
 286		goto out_free;
 287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 288	ret = 0;
 289 out_free:
 290	unregister_ftrace_function(dyn_ops);
 291	kfree(dyn_ops);
 292
 293 out:
 294	/* Purposely unregister in the same order */
 295	unregister_ftrace_function(&test_probe1);
 296	unregister_ftrace_function(&test_probe2);
 297	unregister_ftrace_function(&test_probe3);
 298	unregister_ftrace_function(&test_global);
 
 
 299
 300	/* Make sure everything is off */
 301	reset_counts();
 302	DYN_FTRACE_TEST_NAME();
 303	DYN_FTRACE_TEST_NAME();
 304
 305	if (trace_selftest_test_probe1_cnt ||
 306	    trace_selftest_test_probe2_cnt ||
 307	    trace_selftest_test_probe3_cnt ||
 308	    trace_selftest_test_global_cnt ||
 309	    trace_selftest_test_dyn_cnt)
 310		ret = -1;
 311
 312	ftrace_enabled = save_ftrace_enabled;
 313
 314	return ret;
 315}
 316
 317/* Test dynamic code modification and ftrace filters */
 318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 319					   struct trace_array *tr,
 320					   int (*func)(void))
 321{
 322	int save_ftrace_enabled = ftrace_enabled;
 323	unsigned long count;
 324	char *func_name;
 325	int ret;
 326
 327	/* The ftrace test PASSED */
 328	printk(KERN_CONT "PASSED\n");
 329	pr_info("Testing dynamic ftrace: ");
 330
 331	/* enable tracing, and record the filter function */
 332	ftrace_enabled = 1;
 333
 334	/* passed in by parameter to fool gcc from optimizing */
 335	func();
 336
 337	/*
 338	 * Some archs *cough*PowerPC*cough* add characters to the
 339	 * start of the function names. We simply put a '*' to
 340	 * accommodate them.
 341	 */
 342	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 343
 344	/* filter only on our function */
 345	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 346
 347	/* enable tracing */
 348	ret = tracer_init(trace, tr);
 349	if (ret) {
 350		warn_failed_init_tracer(trace, ret);
 351		goto out;
 352	}
 353
 354	/* Sleep for a 1/10 of a second */
 355	msleep(100);
 356
 357	/* we should have nothing in the buffer */
 358	ret = trace_test_buffer(&tr->trace_buffer, &count);
 359	if (ret)
 360		goto out;
 361
 362	if (count) {
 363		ret = -1;
 364		printk(KERN_CONT ".. filter did not filter .. ");
 365		goto out;
 366	}
 367
 368	/* call our function again */
 369	func();
 370
 371	/* sleep again */
 372	msleep(100);
 373
 374	/* stop the tracing. */
 375	tracing_stop();
 376	ftrace_enabled = 0;
 377
 378	/* check the trace buffer */
 379	ret = trace_test_buffer(&tr->trace_buffer, &count);
 
 
 380	tracing_start();
 381
 382	/* we should only have one item */
 383	if (!ret && count != 1) {
 384		trace->reset(tr);
 385		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 386		ret = -1;
 387		goto out;
 388	}
 389
 390	/* Test the ops with global tracing running */
 391	ret = trace_selftest_ops(1);
 392	trace->reset(tr);
 393
 394 out:
 395	ftrace_enabled = save_ftrace_enabled;
 396
 397	/* Enable tracing on all functions again */
 398	ftrace_set_global_filter(NULL, 0, 1);
 399
 400	/* Test the ops with global tracing off */
 401	if (!ret)
 402		ret = trace_selftest_ops(2);
 403
 404	return ret;
 405}
 406
 407static int trace_selftest_recursion_cnt;
 408static void trace_selftest_test_recursion_func(unsigned long ip,
 409					       unsigned long pip,
 410					       struct ftrace_ops *op,
 411					       struct pt_regs *pt_regs)
 412{
 413	/*
 414	 * This function is registered without the recursion safe flag.
 415	 * The ftrace infrastructure should provide the recursion
 416	 * protection. If not, this will crash the kernel!
 417	 */
 418	if (trace_selftest_recursion_cnt++ > 10)
 419		return;
 420	DYN_FTRACE_TEST_NAME();
 421}
 422
 423static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 424						    unsigned long pip,
 425						    struct ftrace_ops *op,
 426						    struct pt_regs *pt_regs)
 427{
 428	/*
 429	 * We said we would provide our own recursion. By calling
 430	 * this function again, we should recurse back into this function
 431	 * and count again. But this only happens if the arch supports
 432	 * all of ftrace features and nothing else is using the function
 433	 * tracing utility.
 434	 */
 435	if (trace_selftest_recursion_cnt++)
 436		return;
 437	DYN_FTRACE_TEST_NAME();
 438}
 439
 440static struct ftrace_ops test_rec_probe = {
 441	.func			= trace_selftest_test_recursion_func,
 
 442};
 443
 444static struct ftrace_ops test_recsafe_probe = {
 445	.func			= trace_selftest_test_recursion_safe_func,
 446	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
 447};
 448
 449static int
 450trace_selftest_function_recursion(void)
 451{
 452	int save_ftrace_enabled = ftrace_enabled;
 453	char *func_name;
 454	int len;
 455	int ret;
 456
 457	/* The previous test PASSED */
 458	pr_cont("PASSED\n");
 459	pr_info("Testing ftrace recursion: ");
 460
 461
 462	/* enable tracing, and record the filter function */
 463	ftrace_enabled = 1;
 464
 465	/* Handle PPC64 '.' name */
 466	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 467	len = strlen(func_name);
 468
 469	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 470	if (ret) {
 471		pr_cont("*Could not set filter* ");
 472		goto out;
 473	}
 474
 475	ret = register_ftrace_function(&test_rec_probe);
 476	if (ret) {
 477		pr_cont("*could not register callback* ");
 478		goto out;
 479	}
 480
 481	DYN_FTRACE_TEST_NAME();
 482
 483	unregister_ftrace_function(&test_rec_probe);
 484
 485	ret = -1;
 486	if (trace_selftest_recursion_cnt != 1) {
 487		pr_cont("*callback not called once (%d)* ",
 
 
 
 
 
 488			trace_selftest_recursion_cnt);
 489		goto out;
 490	}
 491
 492	trace_selftest_recursion_cnt = 1;
 493
 494	pr_cont("PASSED\n");
 495	pr_info("Testing ftrace recursion safe: ");
 496
 497	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 498	if (ret) {
 499		pr_cont("*Could not set filter* ");
 500		goto out;
 501	}
 502
 503	ret = register_ftrace_function(&test_recsafe_probe);
 504	if (ret) {
 505		pr_cont("*could not register callback* ");
 506		goto out;
 507	}
 508
 509	DYN_FTRACE_TEST_NAME();
 510
 511	unregister_ftrace_function(&test_recsafe_probe);
 512
 513	ret = -1;
 514	if (trace_selftest_recursion_cnt != 2) {
 515		pr_cont("*callback not called expected 2 times (%d)* ",
 516			trace_selftest_recursion_cnt);
 517		goto out;
 518	}
 519
 520	ret = 0;
 521out:
 522	ftrace_enabled = save_ftrace_enabled;
 523
 524	return ret;
 525}
 526#else
 527# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 528# define trace_selftest_function_recursion() ({ 0; })
 529#endif /* CONFIG_DYNAMIC_FTRACE */
 530
 531static enum {
 532	TRACE_SELFTEST_REGS_START,
 533	TRACE_SELFTEST_REGS_FOUND,
 534	TRACE_SELFTEST_REGS_NOT_FOUND,
 535} trace_selftest_regs_stat;
 536
 537static void trace_selftest_test_regs_func(unsigned long ip,
 538					  unsigned long pip,
 539					  struct ftrace_ops *op,
 540					  struct pt_regs *pt_regs)
 541{
 542	if (pt_regs)
 
 
 543		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 544	else
 545		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 546}
 547
 548static struct ftrace_ops test_regs_probe = {
 549	.func		= trace_selftest_test_regs_func,
 550	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
 551};
 552
 553static int
 554trace_selftest_function_regs(void)
 555{
 556	int save_ftrace_enabled = ftrace_enabled;
 557	char *func_name;
 558	int len;
 559	int ret;
 560	int supported = 0;
 561
 562#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 563	supported = 1;
 564#endif
 565
 566	/* The previous test PASSED */
 567	pr_cont("PASSED\n");
 568	pr_info("Testing ftrace regs%s: ",
 569		!supported ? "(no arch support)" : "");
 570
 571	/* enable tracing, and record the filter function */
 572	ftrace_enabled = 1;
 573
 574	/* Handle PPC64 '.' name */
 575	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 576	len = strlen(func_name);
 577
 578	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 579	/*
 580	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 581	 * This test really doesn't care.
 582	 */
 583	if (ret && ret != -ENODEV) {
 584		pr_cont("*Could not set filter* ");
 585		goto out;
 586	}
 587
 588	ret = register_ftrace_function(&test_regs_probe);
 589	/*
 590	 * Now if the arch does not support passing regs, then this should
 591	 * have failed.
 592	 */
 593	if (!supported) {
 594		if (!ret) {
 595			pr_cont("*registered save-regs without arch support* ");
 596			goto out;
 597		}
 598		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 599		ret = register_ftrace_function(&test_regs_probe);
 600	}
 601	if (ret) {
 602		pr_cont("*could not register callback* ");
 603		goto out;
 604	}
 605
 606
 607	DYN_FTRACE_TEST_NAME();
 608
 609	unregister_ftrace_function(&test_regs_probe);
 610
 611	ret = -1;
 612
 613	switch (trace_selftest_regs_stat) {
 614	case TRACE_SELFTEST_REGS_START:
 615		pr_cont("*callback never called* ");
 616		goto out;
 617
 618	case TRACE_SELFTEST_REGS_FOUND:
 619		if (supported)
 620			break;
 621		pr_cont("*callback received regs without arch support* ");
 622		goto out;
 623
 624	case TRACE_SELFTEST_REGS_NOT_FOUND:
 625		if (!supported)
 626			break;
 627		pr_cont("*callback received NULL regs* ");
 628		goto out;
 629	}
 630
 631	ret = 0;
 632out:
 633	ftrace_enabled = save_ftrace_enabled;
 634
 635	return ret;
 636}
 637
 638/*
 639 * Simple verification test of ftrace function tracer.
 640 * Enable ftrace, sleep 1/10 second, and then read the trace
 641 * buffer to see if all is in order.
 642 */
 643__init int
 644trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 645{
 646	int save_ftrace_enabled = ftrace_enabled;
 647	unsigned long count;
 648	int ret;
 649
 650#ifdef CONFIG_DYNAMIC_FTRACE
 651	if (ftrace_filter_param) {
 652		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 653		return 0;
 654	}
 655#endif
 656
 657	/* make sure msleep has been recorded */
 658	msleep(1);
 659
 660	/* start the tracing */
 661	ftrace_enabled = 1;
 662
 663	ret = tracer_init(trace, tr);
 664	if (ret) {
 665		warn_failed_init_tracer(trace, ret);
 666		goto out;
 667	}
 668
 669	/* Sleep for a 1/10 of a second */
 670	msleep(100);
 671	/* stop the tracing. */
 672	tracing_stop();
 673	ftrace_enabled = 0;
 674
 675	/* check the trace buffer */
 676	ret = trace_test_buffer(&tr->trace_buffer, &count);
 
 
 677	trace->reset(tr);
 678	tracing_start();
 679
 680	if (!ret && !count) {
 681		printk(KERN_CONT ".. no entries found ..");
 682		ret = -1;
 683		goto out;
 684	}
 685
 686	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 687						     DYN_FTRACE_TEST_NAME);
 688	if (ret)
 689		goto out;
 690
 691	ret = trace_selftest_function_recursion();
 692	if (ret)
 693		goto out;
 694
 695	ret = trace_selftest_function_regs();
 696 out:
 697	ftrace_enabled = save_ftrace_enabled;
 698
 699	/* kill ftrace totally if we failed */
 700	if (ret)
 701		ftrace_kill();
 702
 703	return ret;
 704}
 705#endif /* CONFIG_FUNCTION_TRACER */
 706
 707
 708#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 709
 710/* Maximum number of functions to trace before diagnosing a hang */
 711#define GRAPH_MAX_FUNC_TEST	100000000
 712
 713static unsigned int graph_hang_thresh;
 714
 715/* Wrap the real function entry probe to avoid possible hanging */
 716static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 717{
 718	/* This is harmlessly racy, we want to approximately detect a hang */
 719	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 720		ftrace_graph_stop();
 721		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 722		if (ftrace_dump_on_oops) {
 723			ftrace_dump(DUMP_ALL);
 724			/* ftrace_dump() disables tracing */
 725			tracing_on();
 726		}
 727		return 0;
 728	}
 729
 730	return trace_graph_entry(trace);
 731}
 732
 
 
 
 
 
 
 
 
 
 733/*
 734 * Pretty much the same than for the function tracer from which the selftest
 735 * has been borrowed.
 736 */
 737__init int
 738trace_selftest_startup_function_graph(struct tracer *trace,
 739					struct trace_array *tr)
 740{
 741	int ret;
 742	unsigned long count;
 
 743
 744#ifdef CONFIG_DYNAMIC_FTRACE
 745	if (ftrace_filter_param) {
 746		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 747		return 0;
 748	}
 749#endif
 750
 751	/*
 752	 * Simulate the init() callback but we attach a watchdog callback
 753	 * to detect and recover from possible hangs
 754	 */
 755	tracing_reset_online_cpus(&tr->trace_buffer);
 756	set_graph_array(tr);
 757	ret = register_ftrace_graph(&trace_graph_return,
 758				    &trace_graph_entry_watchdog);
 759	if (ret) {
 760		warn_failed_init_tracer(trace, ret);
 761		goto out;
 762	}
 763	tracing_start_cmdline_record();
 764
 765	/* Sleep for a 1/10 of a second */
 766	msleep(100);
 767
 768	/* Have we just recovered from a hang? */
 769	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 770		tracing_selftest_disabled = true;
 771		ret = -1;
 772		goto out;
 773	}
 774
 775	tracing_stop();
 776
 777	/* check the trace buffer */
 778	ret = trace_test_buffer(&tr->trace_buffer, &count);
 
 
 
 
 779
 780	trace->reset(tr);
 781	tracing_start();
 782
 783	if (!ret && !count) {
 784		printk(KERN_CONT ".. no entries found ..");
 785		ret = -1;
 786		goto out;
 787	}
 788
 789	/* Don't test dynamic tracing, the function tracer already did */
 
 
 
 
 
 
 
 
 790
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 791out:
 792	/* Stop it if we failed */
 793	if (ret)
 794		ftrace_graph_stop();
 795
 796	return ret;
 797}
 798#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 799
 800
 801#ifdef CONFIG_IRQSOFF_TRACER
 802int
 803trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 804{
 805	unsigned long save_max = tracing_max_latency;
 806	unsigned long count;
 807	int ret;
 808
 809	/* start the tracing */
 810	ret = tracer_init(trace, tr);
 811	if (ret) {
 812		warn_failed_init_tracer(trace, ret);
 813		return ret;
 814	}
 815
 816	/* reset the max latency */
 817	tracing_max_latency = 0;
 818	/* disable interrupts for a bit */
 819	local_irq_disable();
 820	udelay(100);
 821	local_irq_enable();
 822
 823	/*
 824	 * Stop the tracer to avoid a warning subsequent
 825	 * to buffer flipping failure because tracing_stop()
 826	 * disables the tr and max buffers, making flipping impossible
 827	 * in case of parallels max irqs off latencies.
 828	 */
 829	trace->stop(tr);
 830	/* stop the tracing. */
 831	tracing_stop();
 832	/* check both trace buffers */
 833	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 834	if (!ret)
 835		ret = trace_test_buffer(&tr->max_buffer, &count);
 836	trace->reset(tr);
 837	tracing_start();
 838
 839	if (!ret && !count) {
 840		printk(KERN_CONT ".. no entries found ..");
 841		ret = -1;
 842	}
 843
 844	tracing_max_latency = save_max;
 845
 846	return ret;
 847}
 848#endif /* CONFIG_IRQSOFF_TRACER */
 849
 850#ifdef CONFIG_PREEMPT_TRACER
 851int
 852trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 853{
 854	unsigned long save_max = tracing_max_latency;
 855	unsigned long count;
 856	int ret;
 857
 858	/*
 859	 * Now that the big kernel lock is no longer preemptable,
 860	 * and this is called with the BKL held, it will always
 861	 * fail. If preemption is already disabled, simply
 862	 * pass the test. When the BKL is removed, or becomes
 863	 * preemptible again, we will once again test this,
 864	 * so keep it in.
 865	 */
 866	if (preempt_count()) {
 867		printk(KERN_CONT "can not test ... force ");
 868		return 0;
 869	}
 870
 871	/* start the tracing */
 872	ret = tracer_init(trace, tr);
 873	if (ret) {
 874		warn_failed_init_tracer(trace, ret);
 875		return ret;
 876	}
 877
 878	/* reset the max latency */
 879	tracing_max_latency = 0;
 880	/* disable preemption for a bit */
 881	preempt_disable();
 882	udelay(100);
 883	preempt_enable();
 884
 885	/*
 886	 * Stop the tracer to avoid a warning subsequent
 887	 * to buffer flipping failure because tracing_stop()
 888	 * disables the tr and max buffers, making flipping impossible
 889	 * in case of parallels max preempt off latencies.
 890	 */
 891	trace->stop(tr);
 892	/* stop the tracing. */
 893	tracing_stop();
 894	/* check both trace buffers */
 895	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 896	if (!ret)
 897		ret = trace_test_buffer(&tr->max_buffer, &count);
 898	trace->reset(tr);
 899	tracing_start();
 900
 901	if (!ret && !count) {
 902		printk(KERN_CONT ".. no entries found ..");
 903		ret = -1;
 904	}
 905
 906	tracing_max_latency = save_max;
 907
 908	return ret;
 909}
 910#endif /* CONFIG_PREEMPT_TRACER */
 911
 912#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 913int
 914trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 915{
 916	unsigned long save_max = tracing_max_latency;
 917	unsigned long count;
 918	int ret;
 919
 920	/*
 921	 * Now that the big kernel lock is no longer preemptable,
 922	 * and this is called with the BKL held, it will always
 923	 * fail. If preemption is already disabled, simply
 924	 * pass the test. When the BKL is removed, or becomes
 925	 * preemptible again, we will once again test this,
 926	 * so keep it in.
 927	 */
 928	if (preempt_count()) {
 929		printk(KERN_CONT "can not test ... force ");
 930		return 0;
 931	}
 932
 933	/* start the tracing */
 934	ret = tracer_init(trace, tr);
 935	if (ret) {
 936		warn_failed_init_tracer(trace, ret);
 937		goto out_no_start;
 938	}
 939
 940	/* reset the max latency */
 941	tracing_max_latency = 0;
 942
 943	/* disable preemption and interrupts for a bit */
 944	preempt_disable();
 945	local_irq_disable();
 946	udelay(100);
 947	preempt_enable();
 948	/* reverse the order of preempt vs irqs */
 949	local_irq_enable();
 950
 951	/*
 952	 * Stop the tracer to avoid a warning subsequent
 953	 * to buffer flipping failure because tracing_stop()
 954	 * disables the tr and max buffers, making flipping impossible
 955	 * in case of parallels max irqs/preempt off latencies.
 956	 */
 957	trace->stop(tr);
 958	/* stop the tracing. */
 959	tracing_stop();
 960	/* check both trace buffers */
 961	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 962	if (ret)
 963		goto out;
 964
 965	ret = trace_test_buffer(&tr->max_buffer, &count);
 966	if (ret)
 967		goto out;
 968
 969	if (!ret && !count) {
 970		printk(KERN_CONT ".. no entries found ..");
 971		ret = -1;
 972		goto out;
 973	}
 974
 975	/* do the test by disabling interrupts first this time */
 976	tracing_max_latency = 0;
 977	tracing_start();
 978	trace->start(tr);
 979
 980	preempt_disable();
 981	local_irq_disable();
 982	udelay(100);
 983	preempt_enable();
 984	/* reverse the order of preempt vs irqs */
 985	local_irq_enable();
 986
 987	trace->stop(tr);
 988	/* stop the tracing. */
 989	tracing_stop();
 990	/* check both trace buffers */
 991	ret = trace_test_buffer(&tr->trace_buffer, NULL);
 992	if (ret)
 993		goto out;
 994
 995	ret = trace_test_buffer(&tr->max_buffer, &count);
 996
 997	if (!ret && !count) {
 998		printk(KERN_CONT ".. no entries found ..");
 999		ret = -1;
1000		goto out;
1001	}
1002
1003out:
1004	tracing_start();
1005out_no_start:
1006	trace->reset(tr);
1007	tracing_max_latency = save_max;
1008
1009	return ret;
1010}
1011#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1012
1013#ifdef CONFIG_NOP_TRACER
1014int
1015trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1016{
1017	/* What could possibly go wrong? */
1018	return 0;
1019}
1020#endif
1021
1022#ifdef CONFIG_SCHED_TRACER
 
 
 
 
 
 
1023static int trace_wakeup_test_thread(void *data)
1024{
1025	/* Make this a -deadline thread */
1026	static const struct sched_attr attr = {
1027		.sched_policy = SCHED_DEADLINE,
1028		.sched_runtime = 100000ULL,
1029		.sched_deadline = 10000000ULL,
1030		.sched_period = 10000000ULL
1031	};
1032	struct completion *x = data;
1033
1034	sched_setattr(current, &attr);
1035
1036	/* Make it know we have a new prio */
1037	complete(x);
1038
1039	/* now go to sleep and let the test wake us up */
1040	set_current_state(TASK_INTERRUPTIBLE);
1041	schedule();
 
 
 
1042
1043	complete(x);
 
 
1044
1045	/* we are awake, now wait to disappear */
1046	while (!kthread_should_stop()) {
1047		/*
1048		 * This will likely be the system top priority
1049		 * task, do short sleeps to let others run.
1050		 */
1051		msleep(100);
1052	}
1053
 
 
1054	return 0;
1055}
1056
1057int
1058trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1059{
1060	unsigned long save_max = tracing_max_latency;
1061	struct task_struct *p;
1062	struct completion is_ready;
1063	unsigned long count;
1064	int ret;
1065
1066	init_completion(&is_ready);
 
 
1067
1068	/* create a -deadline thread */
1069	p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");
1070	if (IS_ERR(p)) {
1071		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1072		return -1;
1073	}
1074
1075	/* make sure the thread is running at -deadline policy */
1076	wait_for_completion(&is_ready);
1077
1078	/* start the tracing */
1079	ret = tracer_init(trace, tr);
1080	if (ret) {
1081		warn_failed_init_tracer(trace, ret);
1082		return ret;
1083	}
1084
1085	/* reset the max latency */
1086	tracing_max_latency = 0;
1087
1088	while (p->on_rq) {
1089		/*
1090		 * Sleep to make sure the -deadline thread is asleep too.
1091		 * On virtual machines we can't rely on timings,
1092		 * but we want to make sure this test still works.
1093		 */
1094		msleep(100);
1095	}
1096
1097	init_completion(&is_ready);
 
 
 
1098
1099	wake_up_process(p);
1100
1101	/* Wait for the task to wake up */
1102	wait_for_completion(&is_ready);
1103
1104	/* stop the tracing. */
1105	tracing_stop();
1106	/* check both trace buffers */
1107	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1108	printk("ret = %d\n", ret);
1109	if (!ret)
1110		ret = trace_test_buffer(&tr->max_buffer, &count);
1111
1112
1113	trace->reset(tr);
1114	tracing_start();
1115
1116	tracing_max_latency = save_max;
1117
1118	/* kill the thread */
1119	kthread_stop(p);
1120
1121	if (!ret && !count) {
1122		printk(KERN_CONT ".. no entries found ..");
1123		ret = -1;
1124	}
1125
1126	return ret;
1127}
1128#endif /* CONFIG_SCHED_TRACER */
1129
1130#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1131int
1132trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1133{
1134	unsigned long count;
1135	int ret;
1136
1137	/* start the tracing */
1138	ret = tracer_init(trace, tr);
1139	if (ret) {
1140		warn_failed_init_tracer(trace, ret);
1141		return ret;
1142	}
1143
1144	/* Sleep for a 1/10 of a second */
1145	msleep(100);
1146	/* stop the tracing. */
1147	tracing_stop();
1148	/* check the trace buffer */
1149	ret = trace_test_buffer(&tr->trace_buffer, &count);
1150	trace->reset(tr);
1151	tracing_start();
1152
1153	if (!ret && !count) {
1154		printk(KERN_CONT ".. no entries found ..");
1155		ret = -1;
1156	}
1157
1158	return ret;
1159}
1160#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1161
1162#ifdef CONFIG_BRANCH_TRACER
1163int
1164trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1165{
1166	unsigned long count;
1167	int ret;
1168
1169	/* start the tracing */
1170	ret = tracer_init(trace, tr);
1171	if (ret) {
1172		warn_failed_init_tracer(trace, ret);
1173		return ret;
1174	}
1175
1176	/* Sleep for a 1/10 of a second */
1177	msleep(100);
1178	/* stop the tracing. */
1179	tracing_stop();
1180	/* check the trace buffer */
1181	ret = trace_test_buffer(&tr->trace_buffer, &count);
1182	trace->reset(tr);
1183	tracing_start();
1184
1185	if (!ret && !count) {
1186		printk(KERN_CONT ".. no entries found ..");
1187		ret = -1;
1188	}
1189
1190	return ret;
1191}
1192#endif /* CONFIG_BRANCH_TRACER */
1193
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Include in trace.c */
   3
   4#include <uapi/linux/sched/types.h>
   5#include <linux/stringify.h>
   6#include <linux/kthread.h>
   7#include <linux/delay.h>
   8#include <linux/slab.h>
   9
  10static inline int trace_valid_entry(struct trace_entry *entry)
  11{
  12	switch (entry->type) {
  13	case TRACE_FN:
  14	case TRACE_CTX:
  15	case TRACE_WAKE:
  16	case TRACE_STACK:
  17	case TRACE_PRINT:
  18	case TRACE_BRANCH:
  19	case TRACE_GRAPH_ENT:
  20	case TRACE_GRAPH_RET:
  21		return 1;
  22	}
  23	return 0;
  24}
  25
  26static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
  27{
  28	struct ring_buffer_event *event;
  29	struct trace_entry *entry;
  30	unsigned int loops = 0;
  31
  32	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  33		entry = ring_buffer_event_data(event);
  34
  35		/*
  36		 * The ring buffer is a size of trace_buf_size, if
  37		 * we loop more than the size, there's something wrong
  38		 * with the ring buffer.
  39		 */
  40		if (loops++ > trace_buf_size) {
  41			printk(KERN_CONT ".. bad ring buffer ");
  42			goto failed;
  43		}
  44		if (!trace_valid_entry(entry)) {
  45			printk(KERN_CONT ".. invalid entry %d ",
  46				entry->type);
  47			goto failed;
  48		}
  49	}
  50	return 0;
  51
  52 failed:
  53	/* disable tracing */
  54	tracing_disabled = 1;
  55	printk(KERN_CONT ".. corrupted trace buffer .. ");
  56	return -1;
  57}
  58
  59/*
  60 * Test the trace buffer to see if all the elements
  61 * are still sane.
  62 */
  63static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
  64{
  65	unsigned long flags, cnt = 0;
  66	int cpu, ret = 0;
  67
  68	/* Don't allow flipping of max traces now */
  69	local_irq_save(flags);
  70	arch_spin_lock(&buf->tr->max_lock);
  71
  72	cnt = ring_buffer_entries(buf->buffer);
  73
  74	/*
  75	 * The trace_test_buffer_cpu runs a while loop to consume all data.
  76	 * If the calling tracer is broken, and is constantly filling
  77	 * the buffer, this will run forever, and hard lock the box.
  78	 * We disable the ring buffer while we do this test to prevent
  79	 * a hard lock up.
  80	 */
  81	tracing_off();
  82	for_each_possible_cpu(cpu) {
  83		ret = trace_test_buffer_cpu(buf, cpu);
  84		if (ret)
  85			break;
  86	}
  87	tracing_on();
  88	arch_spin_unlock(&buf->tr->max_lock);
  89	local_irq_restore(flags);
  90
  91	if (count)
  92		*count = cnt;
  93
  94	return ret;
  95}
  96
  97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  98{
  99	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 100		trace->name, init_ret);
 101}
 102#ifdef CONFIG_FUNCTION_TRACER
 103
 104#ifdef CONFIG_DYNAMIC_FTRACE
 105
 106static int trace_selftest_test_probe1_cnt;
 107static void trace_selftest_test_probe1_func(unsigned long ip,
 108					    unsigned long pip,
 109					    struct ftrace_ops *op,
 110					    struct ftrace_regs *fregs)
 111{
 112	trace_selftest_test_probe1_cnt++;
 113}
 114
 115static int trace_selftest_test_probe2_cnt;
 116static void trace_selftest_test_probe2_func(unsigned long ip,
 117					    unsigned long pip,
 118					    struct ftrace_ops *op,
 119					    struct ftrace_regs *fregs)
 120{
 121	trace_selftest_test_probe2_cnt++;
 122}
 123
 124static int trace_selftest_test_probe3_cnt;
 125static void trace_selftest_test_probe3_func(unsigned long ip,
 126					    unsigned long pip,
 127					    struct ftrace_ops *op,
 128					    struct ftrace_regs *fregs)
 129{
 130	trace_selftest_test_probe3_cnt++;
 131}
 132
 133static int trace_selftest_test_global_cnt;
 134static void trace_selftest_test_global_func(unsigned long ip,
 135					    unsigned long pip,
 136					    struct ftrace_ops *op,
 137					    struct ftrace_regs *fregs)
 138{
 139	trace_selftest_test_global_cnt++;
 140}
 141
 142static int trace_selftest_test_dyn_cnt;
 143static void trace_selftest_test_dyn_func(unsigned long ip,
 144					 unsigned long pip,
 145					 struct ftrace_ops *op,
 146					 struct ftrace_regs *fregs)
 147{
 148	trace_selftest_test_dyn_cnt++;
 149}
 150
 151static struct ftrace_ops test_probe1 = {
 152	.func			= trace_selftest_test_probe1_func,
 
 153};
 154
 155static struct ftrace_ops test_probe2 = {
 156	.func			= trace_selftest_test_probe2_func,
 
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160	.func			= trace_selftest_test_probe3_func,
 
 
 
 
 
 
 161};
 162
 163static void print_counts(void)
 164{
 165	printk("(%d %d %d %d %d) ",
 166	       trace_selftest_test_probe1_cnt,
 167	       trace_selftest_test_probe2_cnt,
 168	       trace_selftest_test_probe3_cnt,
 169	       trace_selftest_test_global_cnt,
 170	       trace_selftest_test_dyn_cnt);
 171}
 172
 173static void reset_counts(void)
 174{
 175	trace_selftest_test_probe1_cnt = 0;
 176	trace_selftest_test_probe2_cnt = 0;
 177	trace_selftest_test_probe3_cnt = 0;
 178	trace_selftest_test_global_cnt = 0;
 179	trace_selftest_test_dyn_cnt = 0;
 180}
 181
 182static int trace_selftest_ops(struct trace_array *tr, int cnt)
 183{
 184	int save_ftrace_enabled = ftrace_enabled;
 185	struct ftrace_ops *dyn_ops;
 186	char *func1_name;
 187	char *func2_name;
 188	int len1;
 189	int len2;
 190	int ret = -1;
 191
 192	printk(KERN_CONT "PASSED\n");
 193	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 194
 195	ftrace_enabled = 1;
 196	reset_counts();
 197
 198	/* Handle PPC64 '.' name */
 199	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 200	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 201	len1 = strlen(func1_name);
 202	len2 = strlen(func2_name);
 203
 204	/*
 205	 * Probe 1 will trace function 1.
 206	 * Probe 2 will trace function 2.
 207	 * Probe 3 will trace functions 1 and 2.
 208	 */
 209	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 210	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 211	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 212	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 213
 214	register_ftrace_function(&test_probe1);
 215	register_ftrace_function(&test_probe2);
 216	register_ftrace_function(&test_probe3);
 217	/* First time we are running with main function */
 218	if (cnt > 1) {
 219		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 220		register_ftrace_function(tr->ops);
 221	}
 222
 223	DYN_FTRACE_TEST_NAME();
 224
 225	print_counts();
 226
 227	if (trace_selftest_test_probe1_cnt != 1)
 228		goto out;
 229	if (trace_selftest_test_probe2_cnt != 0)
 230		goto out;
 231	if (trace_selftest_test_probe3_cnt != 1)
 232		goto out;
 233	if (cnt > 1) {
 234		if (trace_selftest_test_global_cnt == 0)
 235			goto out;
 236	}
 237
 238	DYN_FTRACE_TEST_NAME2();
 239
 240	print_counts();
 241
 242	if (trace_selftest_test_probe1_cnt != 1)
 243		goto out;
 244	if (trace_selftest_test_probe2_cnt != 1)
 245		goto out;
 246	if (trace_selftest_test_probe3_cnt != 2)
 247		goto out;
 248
 249	/* Add a dynamic probe */
 250	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251	if (!dyn_ops) {
 252		printk("MEMORY ERROR ");
 253		goto out;
 254	}
 255
 256	dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258	register_ftrace_function(dyn_ops);
 259
 260	trace_selftest_test_global_cnt = 0;
 261
 262	DYN_FTRACE_TEST_NAME();
 263
 264	print_counts();
 265
 266	if (trace_selftest_test_probe1_cnt != 2)
 267		goto out_free;
 268	if (trace_selftest_test_probe2_cnt != 1)
 269		goto out_free;
 270	if (trace_selftest_test_probe3_cnt != 3)
 271		goto out_free;
 272	if (cnt > 1) {
 273		if (trace_selftest_test_global_cnt == 0)
 274			goto out_free;
 275	}
 276	if (trace_selftest_test_dyn_cnt == 0)
 277		goto out_free;
 278
 279	DYN_FTRACE_TEST_NAME2();
 280
 281	print_counts();
 282
 283	if (trace_selftest_test_probe1_cnt != 2)
 284		goto out_free;
 285	if (trace_selftest_test_probe2_cnt != 2)
 286		goto out_free;
 287	if (trace_selftest_test_probe3_cnt != 4)
 288		goto out_free;
 289
 290	/* Remove trace function from probe 3 */
 291	func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
 292	len1 = strlen(func1_name);
 293
 294	ftrace_set_filter(&test_probe3, func1_name, len1, 0);
 295
 296	DYN_FTRACE_TEST_NAME();
 297
 298	print_counts();
 299
 300	if (trace_selftest_test_probe1_cnt != 3)
 301		goto out_free;
 302	if (trace_selftest_test_probe2_cnt != 2)
 303		goto out_free;
 304	if (trace_selftest_test_probe3_cnt != 4)
 305		goto out_free;
 306	if (cnt > 1) {
 307		if (trace_selftest_test_global_cnt == 0)
 308			goto out_free;
 309	}
 310	if (trace_selftest_test_dyn_cnt == 0)
 311		goto out_free;
 312
 313	DYN_FTRACE_TEST_NAME2();
 314
 315	print_counts();
 316
 317	if (trace_selftest_test_probe1_cnt != 3)
 318		goto out_free;
 319	if (trace_selftest_test_probe2_cnt != 3)
 320		goto out_free;
 321	if (trace_selftest_test_probe3_cnt != 5)
 322		goto out_free;
 323
 324	ret = 0;
 325 out_free:
 326	unregister_ftrace_function(dyn_ops);
 327	kfree(dyn_ops);
 328
 329 out:
 330	/* Purposely unregister in the same order */
 331	unregister_ftrace_function(&test_probe1);
 332	unregister_ftrace_function(&test_probe2);
 333	unregister_ftrace_function(&test_probe3);
 334	if (cnt > 1)
 335		unregister_ftrace_function(tr->ops);
 336	ftrace_reset_array_ops(tr);
 337
 338	/* Make sure everything is off */
 339	reset_counts();
 340	DYN_FTRACE_TEST_NAME();
 341	DYN_FTRACE_TEST_NAME();
 342
 343	if (trace_selftest_test_probe1_cnt ||
 344	    trace_selftest_test_probe2_cnt ||
 345	    trace_selftest_test_probe3_cnt ||
 346	    trace_selftest_test_global_cnt ||
 347	    trace_selftest_test_dyn_cnt)
 348		ret = -1;
 349
 350	ftrace_enabled = save_ftrace_enabled;
 351
 352	return ret;
 353}
 354
 355/* Test dynamic code modification and ftrace filters */
 356static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 357						  struct trace_array *tr,
 358						  int (*func)(void))
 359{
 360	int save_ftrace_enabled = ftrace_enabled;
 361	unsigned long count;
 362	char *func_name;
 363	int ret;
 364
 365	/* The ftrace test PASSED */
 366	printk(KERN_CONT "PASSED\n");
 367	pr_info("Testing dynamic ftrace: ");
 368
 369	/* enable tracing, and record the filter function */
 370	ftrace_enabled = 1;
 371
 372	/* passed in by parameter to fool gcc from optimizing */
 373	func();
 374
 375	/*
 376	 * Some archs *cough*PowerPC*cough* add characters to the
 377	 * start of the function names. We simply put a '*' to
 378	 * accommodate them.
 379	 */
 380	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 381
 382	/* filter only on our function */
 383	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 384
 385	/* enable tracing */
 386	ret = tracer_init(trace, tr);
 387	if (ret) {
 388		warn_failed_init_tracer(trace, ret);
 389		goto out;
 390	}
 391
 392	/* Sleep for a 1/10 of a second */
 393	msleep(100);
 394
 395	/* we should have nothing in the buffer */
 396	ret = trace_test_buffer(&tr->array_buffer, &count);
 397	if (ret)
 398		goto out;
 399
 400	if (count) {
 401		ret = -1;
 402		printk(KERN_CONT ".. filter did not filter .. ");
 403		goto out;
 404	}
 405
 406	/* call our function again */
 407	func();
 408
 409	/* sleep again */
 410	msleep(100);
 411
 412	/* stop the tracing. */
 413	tracing_stop();
 414	ftrace_enabled = 0;
 415
 416	/* check the trace buffer */
 417	ret = trace_test_buffer(&tr->array_buffer, &count);
 418
 419	ftrace_enabled = 1;
 420	tracing_start();
 421
 422	/* we should only have one item */
 423	if (!ret && count != 1) {
 424		trace->reset(tr);
 425		printk(KERN_CONT ".. filter failed count=%ld ..", count);
 426		ret = -1;
 427		goto out;
 428	}
 429
 430	/* Test the ops with global tracing running */
 431	ret = trace_selftest_ops(tr, 1);
 432	trace->reset(tr);
 433
 434 out:
 435	ftrace_enabled = save_ftrace_enabled;
 436
 437	/* Enable tracing on all functions again */
 438	ftrace_set_global_filter(NULL, 0, 1);
 439
 440	/* Test the ops with global tracing off */
 441	if (!ret)
 442		ret = trace_selftest_ops(tr, 2);
 443
 444	return ret;
 445}
 446
 447static int trace_selftest_recursion_cnt;
 448static void trace_selftest_test_recursion_func(unsigned long ip,
 449					       unsigned long pip,
 450					       struct ftrace_ops *op,
 451					       struct ftrace_regs *fregs)
 452{
 453	/*
 454	 * This function is registered without the recursion safe flag.
 455	 * The ftrace infrastructure should provide the recursion
 456	 * protection. If not, this will crash the kernel!
 457	 */
 458	if (trace_selftest_recursion_cnt++ > 10)
 459		return;
 460	DYN_FTRACE_TEST_NAME();
 461}
 462
 463static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 464						    unsigned long pip,
 465						    struct ftrace_ops *op,
 466						    struct ftrace_regs *fregs)
 467{
 468	/*
 469	 * We said we would provide our own recursion. By calling
 470	 * this function again, we should recurse back into this function
 471	 * and count again. But this only happens if the arch supports
 472	 * all of ftrace features and nothing else is using the function
 473	 * tracing utility.
 474	 */
 475	if (trace_selftest_recursion_cnt++)
 476		return;
 477	DYN_FTRACE_TEST_NAME();
 478}
 479
 480static struct ftrace_ops test_rec_probe = {
 481	.func			= trace_selftest_test_recursion_func,
 482	.flags			= FTRACE_OPS_FL_RECURSION,
 483};
 484
 485static struct ftrace_ops test_recsafe_probe = {
 486	.func			= trace_selftest_test_recursion_safe_func,
 
 487};
 488
 489static int
 490trace_selftest_function_recursion(void)
 491{
 492	int save_ftrace_enabled = ftrace_enabled;
 493	char *func_name;
 494	int len;
 495	int ret;
 496
 497	/* The previous test PASSED */
 498	pr_cont("PASSED\n");
 499	pr_info("Testing ftrace recursion: ");
 500
 501
 502	/* enable tracing, and record the filter function */
 503	ftrace_enabled = 1;
 504
 505	/* Handle PPC64 '.' name */
 506	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 507	len = strlen(func_name);
 508
 509	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 510	if (ret) {
 511		pr_cont("*Could not set filter* ");
 512		goto out;
 513	}
 514
 515	ret = register_ftrace_function(&test_rec_probe);
 516	if (ret) {
 517		pr_cont("*could not register callback* ");
 518		goto out;
 519	}
 520
 521	DYN_FTRACE_TEST_NAME();
 522
 523	unregister_ftrace_function(&test_rec_probe);
 524
 525	ret = -1;
 526	/*
 527	 * Recursion allows for transitions between context,
 528	 * and may call the callback twice.
 529	 */
 530	if (trace_selftest_recursion_cnt != 1 &&
 531	    trace_selftest_recursion_cnt != 2) {
 532		pr_cont("*callback not called once (or twice) (%d)* ",
 533			trace_selftest_recursion_cnt);
 534		goto out;
 535	}
 536
 537	trace_selftest_recursion_cnt = 1;
 538
 539	pr_cont("PASSED\n");
 540	pr_info("Testing ftrace recursion safe: ");
 541
 542	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 543	if (ret) {
 544		pr_cont("*Could not set filter* ");
 545		goto out;
 546	}
 547
 548	ret = register_ftrace_function(&test_recsafe_probe);
 549	if (ret) {
 550		pr_cont("*could not register callback* ");
 551		goto out;
 552	}
 553
 554	DYN_FTRACE_TEST_NAME();
 555
 556	unregister_ftrace_function(&test_recsafe_probe);
 557
 558	ret = -1;
 559	if (trace_selftest_recursion_cnt != 2) {
 560		pr_cont("*callback not called expected 2 times (%d)* ",
 561			trace_selftest_recursion_cnt);
 562		goto out;
 563	}
 564
 565	ret = 0;
 566out:
 567	ftrace_enabled = save_ftrace_enabled;
 568
 569	return ret;
 570}
 571#else
 572# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 573# define trace_selftest_function_recursion() ({ 0; })
 574#endif /* CONFIG_DYNAMIC_FTRACE */
 575
 576static enum {
 577	TRACE_SELFTEST_REGS_START,
 578	TRACE_SELFTEST_REGS_FOUND,
 579	TRACE_SELFTEST_REGS_NOT_FOUND,
 580} trace_selftest_regs_stat;
 581
 582static void trace_selftest_test_regs_func(unsigned long ip,
 583					  unsigned long pip,
 584					  struct ftrace_ops *op,
 585					  struct ftrace_regs *fregs)
 586{
 587	struct pt_regs *regs = ftrace_get_regs(fregs);
 588
 589	if (regs)
 590		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 591	else
 592		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 593}
 594
 595static struct ftrace_ops test_regs_probe = {
 596	.func		= trace_selftest_test_regs_func,
 597	.flags		= FTRACE_OPS_FL_SAVE_REGS,
 598};
 599
 600static int
 601trace_selftest_function_regs(void)
 602{
 603	int save_ftrace_enabled = ftrace_enabled;
 604	char *func_name;
 605	int len;
 606	int ret;
 607	int supported = 0;
 608
 609#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 610	supported = 1;
 611#endif
 612
 613	/* The previous test PASSED */
 614	pr_cont("PASSED\n");
 615	pr_info("Testing ftrace regs%s: ",
 616		!supported ? "(no arch support)" : "");
 617
 618	/* enable tracing, and record the filter function */
 619	ftrace_enabled = 1;
 620
 621	/* Handle PPC64 '.' name */
 622	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 623	len = strlen(func_name);
 624
 625	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 626	/*
 627	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 628	 * This test really doesn't care.
 629	 */
 630	if (ret && ret != -ENODEV) {
 631		pr_cont("*Could not set filter* ");
 632		goto out;
 633	}
 634
 635	ret = register_ftrace_function(&test_regs_probe);
 636	/*
 637	 * Now if the arch does not support passing regs, then this should
 638	 * have failed.
 639	 */
 640	if (!supported) {
 641		if (!ret) {
 642			pr_cont("*registered save-regs without arch support* ");
 643			goto out;
 644		}
 645		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 646		ret = register_ftrace_function(&test_regs_probe);
 647	}
 648	if (ret) {
 649		pr_cont("*could not register callback* ");
 650		goto out;
 651	}
 652
 653
 654	DYN_FTRACE_TEST_NAME();
 655
 656	unregister_ftrace_function(&test_regs_probe);
 657
 658	ret = -1;
 659
 660	switch (trace_selftest_regs_stat) {
 661	case TRACE_SELFTEST_REGS_START:
 662		pr_cont("*callback never called* ");
 663		goto out;
 664
 665	case TRACE_SELFTEST_REGS_FOUND:
 666		if (supported)
 667			break;
 668		pr_cont("*callback received regs without arch support* ");
 669		goto out;
 670
 671	case TRACE_SELFTEST_REGS_NOT_FOUND:
 672		if (!supported)
 673			break;
 674		pr_cont("*callback received NULL regs* ");
 675		goto out;
 676	}
 677
 678	ret = 0;
 679out:
 680	ftrace_enabled = save_ftrace_enabled;
 681
 682	return ret;
 683}
 684
 685/*
 686 * Simple verification test of ftrace function tracer.
 687 * Enable ftrace, sleep 1/10 second, and then read the trace
 688 * buffer to see if all is in order.
 689 */
 690__init int
 691trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 692{
 693	int save_ftrace_enabled = ftrace_enabled;
 694	unsigned long count;
 695	int ret;
 696
 697#ifdef CONFIG_DYNAMIC_FTRACE
 698	if (ftrace_filter_param) {
 699		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 700		return 0;
 701	}
 702#endif
 703
 704	/* make sure msleep has been recorded */
 705	msleep(1);
 706
 707	/* start the tracing */
 708	ftrace_enabled = 1;
 709
 710	ret = tracer_init(trace, tr);
 711	if (ret) {
 712		warn_failed_init_tracer(trace, ret);
 713		goto out;
 714	}
 715
 716	/* Sleep for a 1/10 of a second */
 717	msleep(100);
 718	/* stop the tracing. */
 719	tracing_stop();
 720	ftrace_enabled = 0;
 721
 722	/* check the trace buffer */
 723	ret = trace_test_buffer(&tr->array_buffer, &count);
 724
 725	ftrace_enabled = 1;
 726	trace->reset(tr);
 727	tracing_start();
 728
 729	if (!ret && !count) {
 730		printk(KERN_CONT ".. no entries found ..");
 731		ret = -1;
 732		goto out;
 733	}
 734
 735	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 736						     DYN_FTRACE_TEST_NAME);
 737	if (ret)
 738		goto out;
 739
 740	ret = trace_selftest_function_recursion();
 741	if (ret)
 742		goto out;
 743
 744	ret = trace_selftest_function_regs();
 745 out:
 746	ftrace_enabled = save_ftrace_enabled;
 747
 748	/* kill ftrace totally if we failed */
 749	if (ret)
 750		ftrace_kill();
 751
 752	return ret;
 753}
 754#endif /* CONFIG_FUNCTION_TRACER */
 755
 756
 757#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 758
 759/* Maximum number of functions to trace before diagnosing a hang */
 760#define GRAPH_MAX_FUNC_TEST	100000000
 761
 762static unsigned int graph_hang_thresh;
 763
 764/* Wrap the real function entry probe to avoid possible hanging */
 765static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 766{
 767	/* This is harmlessly racy, we want to approximately detect a hang */
 768	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 769		ftrace_graph_stop();
 770		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 771		if (ftrace_dump_on_oops) {
 772			ftrace_dump(DUMP_ALL);
 773			/* ftrace_dump() disables tracing */
 774			tracing_on();
 775		}
 776		return 0;
 777	}
 778
 779	return trace_graph_entry(trace);
 780}
 781
 782static struct fgraph_ops fgraph_ops __initdata  = {
 783	.entryfunc		= &trace_graph_entry_watchdog,
 784	.retfunc		= &trace_graph_return,
 785};
 786
 787#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 788static struct ftrace_ops direct;
 789#endif
 790
 791/*
 792 * Pretty much the same than for the function tracer from which the selftest
 793 * has been borrowed.
 794 */
 795__init int
 796trace_selftest_startup_function_graph(struct tracer *trace,
 797					struct trace_array *tr)
 798{
 799	int ret;
 800	unsigned long count;
 801	char *func_name __maybe_unused;
 802
 803#ifdef CONFIG_DYNAMIC_FTRACE
 804	if (ftrace_filter_param) {
 805		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 806		return 0;
 807	}
 808#endif
 809
 810	/*
 811	 * Simulate the init() callback but we attach a watchdog callback
 812	 * to detect and recover from possible hangs
 813	 */
 814	tracing_reset_online_cpus(&tr->array_buffer);
 815	set_graph_array(tr);
 816	ret = register_ftrace_graph(&fgraph_ops);
 
 817	if (ret) {
 818		warn_failed_init_tracer(trace, ret);
 819		goto out;
 820	}
 821	tracing_start_cmdline_record();
 822
 823	/* Sleep for a 1/10 of a second */
 824	msleep(100);
 825
 826	/* Have we just recovered from a hang? */
 827	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 828		disable_tracing_selftest("recovering from a hang");
 829		ret = -1;
 830		goto out;
 831	}
 832
 833	tracing_stop();
 834
 835	/* check the trace buffer */
 836	ret = trace_test_buffer(&tr->array_buffer, &count);
 837
 838	/* Need to also simulate the tr->reset to remove this fgraph_ops */
 839	tracing_stop_cmdline_record();
 840	unregister_ftrace_graph(&fgraph_ops);
 841
 
 842	tracing_start();
 843
 844	if (!ret && !count) {
 845		printk(KERN_CONT ".. no entries found ..");
 846		ret = -1;
 847		goto out;
 848	}
 849
 850#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 851	/*
 852	 * These tests can take some time to run. Make sure on non PREEMPT
 853	 * kernels, we do not trigger the softlockup detector.
 854	 */
 855	cond_resched();
 856
 857	tracing_reset_online_cpus(&tr->array_buffer);
 858	set_graph_array(tr);
 859
 860	/*
 861	 * Some archs *cough*PowerPC*cough* add characters to the
 862	 * start of the function names. We simply put a '*' to
 863	 * accommodate them.
 864	 */
 865	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 866	ftrace_set_global_filter(func_name, strlen(func_name), 1);
 867
 868	/*
 869	 * Register direct function together with graph tracer
 870	 * and make sure we get graph trace.
 871	 */
 872	ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
 873	ret = register_ftrace_direct(&direct,
 874				     (unsigned long)ftrace_stub_direct_tramp);
 875	if (ret)
 876		goto out;
 877
 878	cond_resched();
 879
 880	ret = register_ftrace_graph(&fgraph_ops);
 881	if (ret) {
 882		warn_failed_init_tracer(trace, ret);
 883		goto out;
 884	}
 885
 886	DYN_FTRACE_TEST_NAME();
 887
 888	count = 0;
 889
 890	tracing_stop();
 891	/* check the trace buffer */
 892	ret = trace_test_buffer(&tr->array_buffer, &count);
 893
 894	unregister_ftrace_graph(&fgraph_ops);
 895
 896	ret = unregister_ftrace_direct(&direct,
 897				       (unsigned long)ftrace_stub_direct_tramp,
 898				       true);
 899	if (ret)
 900		goto out;
 901
 902	cond_resched();
 903
 904	tracing_start();
 905
 906	if (!ret && !count) {
 907		ret = -1;
 908		goto out;
 909	}
 910
 911	/* Enable tracing on all functions again */
 912	ftrace_set_global_filter(NULL, 0, 1);
 913#endif
 914
 915	/* Don't test dynamic tracing, the function tracer already did */
 916out:
 917	/* Stop it if we failed */
 918	if (ret)
 919		ftrace_graph_stop();
 920
 921	return ret;
 922}
 923#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 924
 925
 926#ifdef CONFIG_IRQSOFF_TRACER
 927int
 928trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 929{
 930	unsigned long save_max = tr->max_latency;
 931	unsigned long count;
 932	int ret;
 933
 934	/* start the tracing */
 935	ret = tracer_init(trace, tr);
 936	if (ret) {
 937		warn_failed_init_tracer(trace, ret);
 938		return ret;
 939	}
 940
 941	/* reset the max latency */
 942	tr->max_latency = 0;
 943	/* disable interrupts for a bit */
 944	local_irq_disable();
 945	udelay(100);
 946	local_irq_enable();
 947
 948	/*
 949	 * Stop the tracer to avoid a warning subsequent
 950	 * to buffer flipping failure because tracing_stop()
 951	 * disables the tr and max buffers, making flipping impossible
 952	 * in case of parallels max irqs off latencies.
 953	 */
 954	trace->stop(tr);
 955	/* stop the tracing. */
 956	tracing_stop();
 957	/* check both trace buffers */
 958	ret = trace_test_buffer(&tr->array_buffer, NULL);
 959	if (!ret)
 960		ret = trace_test_buffer(&tr->max_buffer, &count);
 961	trace->reset(tr);
 962	tracing_start();
 963
 964	if (!ret && !count) {
 965		printk(KERN_CONT ".. no entries found ..");
 966		ret = -1;
 967	}
 968
 969	tr->max_latency = save_max;
 970
 971	return ret;
 972}
 973#endif /* CONFIG_IRQSOFF_TRACER */
 974
 975#ifdef CONFIG_PREEMPT_TRACER
 976int
 977trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 978{
 979	unsigned long save_max = tr->max_latency;
 980	unsigned long count;
 981	int ret;
 982
 983	/*
 984	 * Now that the big kernel lock is no longer preemptible,
 985	 * and this is called with the BKL held, it will always
 986	 * fail. If preemption is already disabled, simply
 987	 * pass the test. When the BKL is removed, or becomes
 988	 * preemptible again, we will once again test this,
 989	 * so keep it in.
 990	 */
 991	if (preempt_count()) {
 992		printk(KERN_CONT "can not test ... force ");
 993		return 0;
 994	}
 995
 996	/* start the tracing */
 997	ret = tracer_init(trace, tr);
 998	if (ret) {
 999		warn_failed_init_tracer(trace, ret);
1000		return ret;
1001	}
1002
1003	/* reset the max latency */
1004	tr->max_latency = 0;
1005	/* disable preemption for a bit */
1006	preempt_disable();
1007	udelay(100);
1008	preempt_enable();
1009
1010	/*
1011	 * Stop the tracer to avoid a warning subsequent
1012	 * to buffer flipping failure because tracing_stop()
1013	 * disables the tr and max buffers, making flipping impossible
1014	 * in case of parallels max preempt off latencies.
1015	 */
1016	trace->stop(tr);
1017	/* stop the tracing. */
1018	tracing_stop();
1019	/* check both trace buffers */
1020	ret = trace_test_buffer(&tr->array_buffer, NULL);
1021	if (!ret)
1022		ret = trace_test_buffer(&tr->max_buffer, &count);
1023	trace->reset(tr);
1024	tracing_start();
1025
1026	if (!ret && !count) {
1027		printk(KERN_CONT ".. no entries found ..");
1028		ret = -1;
1029	}
1030
1031	tr->max_latency = save_max;
1032
1033	return ret;
1034}
1035#endif /* CONFIG_PREEMPT_TRACER */
1036
1037#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1038int
1039trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
1040{
1041	unsigned long save_max = tr->max_latency;
1042	unsigned long count;
1043	int ret;
1044
1045	/*
1046	 * Now that the big kernel lock is no longer preemptible,
1047	 * and this is called with the BKL held, it will always
1048	 * fail. If preemption is already disabled, simply
1049	 * pass the test. When the BKL is removed, or becomes
1050	 * preemptible again, we will once again test this,
1051	 * so keep it in.
1052	 */
1053	if (preempt_count()) {
1054		printk(KERN_CONT "can not test ... force ");
1055		return 0;
1056	}
1057
1058	/* start the tracing */
1059	ret = tracer_init(trace, tr);
1060	if (ret) {
1061		warn_failed_init_tracer(trace, ret);
1062		goto out_no_start;
1063	}
1064
1065	/* reset the max latency */
1066	tr->max_latency = 0;
1067
1068	/* disable preemption and interrupts for a bit */
1069	preempt_disable();
1070	local_irq_disable();
1071	udelay(100);
1072	preempt_enable();
1073	/* reverse the order of preempt vs irqs */
1074	local_irq_enable();
1075
1076	/*
1077	 * Stop the tracer to avoid a warning subsequent
1078	 * to buffer flipping failure because tracing_stop()
1079	 * disables the tr and max buffers, making flipping impossible
1080	 * in case of parallels max irqs/preempt off latencies.
1081	 */
1082	trace->stop(tr);
1083	/* stop the tracing. */
1084	tracing_stop();
1085	/* check both trace buffers */
1086	ret = trace_test_buffer(&tr->array_buffer, NULL);
1087	if (ret)
1088		goto out;
1089
1090	ret = trace_test_buffer(&tr->max_buffer, &count);
1091	if (ret)
1092		goto out;
1093
1094	if (!ret && !count) {
1095		printk(KERN_CONT ".. no entries found ..");
1096		ret = -1;
1097		goto out;
1098	}
1099
1100	/* do the test by disabling interrupts first this time */
1101	tr->max_latency = 0;
1102	tracing_start();
1103	trace->start(tr);
1104
1105	preempt_disable();
1106	local_irq_disable();
1107	udelay(100);
1108	preempt_enable();
1109	/* reverse the order of preempt vs irqs */
1110	local_irq_enable();
1111
1112	trace->stop(tr);
1113	/* stop the tracing. */
1114	tracing_stop();
1115	/* check both trace buffers */
1116	ret = trace_test_buffer(&tr->array_buffer, NULL);
1117	if (ret)
1118		goto out;
1119
1120	ret = trace_test_buffer(&tr->max_buffer, &count);
1121
1122	if (!ret && !count) {
1123		printk(KERN_CONT ".. no entries found ..");
1124		ret = -1;
1125		goto out;
1126	}
1127
1128out:
1129	tracing_start();
1130out_no_start:
1131	trace->reset(tr);
1132	tr->max_latency = save_max;
1133
1134	return ret;
1135}
1136#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1137
1138#ifdef CONFIG_NOP_TRACER
1139int
1140trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1141{
1142	/* What could possibly go wrong? */
1143	return 0;
1144}
1145#endif
1146
1147#ifdef CONFIG_SCHED_TRACER
1148
1149struct wakeup_test_data {
1150	struct completion	is_ready;
1151	int			go;
1152};
1153
1154static int trace_wakeup_test_thread(void *data)
1155{
1156	/* Make this a -deadline thread */
1157	static const struct sched_attr attr = {
1158		.sched_policy = SCHED_DEADLINE,
1159		.sched_runtime = 100000ULL,
1160		.sched_deadline = 10000000ULL,
1161		.sched_period = 10000000ULL
1162	};
1163	struct wakeup_test_data *x = data;
1164
1165	sched_setattr(current, &attr);
1166
1167	/* Make it know we have a new prio */
1168	complete(&x->is_ready);
1169
1170	/* now go to sleep and let the test wake us up */
1171	set_current_state(TASK_INTERRUPTIBLE);
1172	while (!x->go) {
1173		schedule();
1174		set_current_state(TASK_INTERRUPTIBLE);
1175	}
1176
1177	complete(&x->is_ready);
1178
1179	set_current_state(TASK_INTERRUPTIBLE);
1180
1181	/* we are awake, now wait to disappear */
1182	while (!kthread_should_stop()) {
1183		schedule();
1184		set_current_state(TASK_INTERRUPTIBLE);
 
 
 
1185	}
1186
1187	__set_current_state(TASK_RUNNING);
1188
1189	return 0;
1190}
 
1191int
1192trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1193{
1194	unsigned long save_max = tr->max_latency;
1195	struct task_struct *p;
1196	struct wakeup_test_data data;
1197	unsigned long count;
1198	int ret;
1199
1200	memset(&data, 0, sizeof(data));
1201
1202	init_completion(&data.is_ready);
1203
1204	/* create a -deadline thread */
1205	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1206	if (IS_ERR(p)) {
1207		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1208		return -1;
1209	}
1210
1211	/* make sure the thread is running at -deadline policy */
1212	wait_for_completion(&data.is_ready);
1213
1214	/* start the tracing */
1215	ret = tracer_init(trace, tr);
1216	if (ret) {
1217		warn_failed_init_tracer(trace, ret);
1218		return ret;
1219	}
1220
1221	/* reset the max latency */
1222	tr->max_latency = 0;
1223
1224	while (p->on_rq) {
1225		/*
1226		 * Sleep to make sure the -deadline thread is asleep too.
1227		 * On virtual machines we can't rely on timings,
1228		 * but we want to make sure this test still works.
1229		 */
1230		msleep(100);
1231	}
1232
1233	init_completion(&data.is_ready);
1234
1235	data.go = 1;
1236	/* memory barrier is in the wake_up_process() */
1237
1238	wake_up_process(p);
1239
1240	/* Wait for the task to wake up */
1241	wait_for_completion(&data.is_ready);
1242
1243	/* stop the tracing. */
1244	tracing_stop();
1245	/* check both trace buffers */
1246	ret = trace_test_buffer(&tr->array_buffer, NULL);
 
1247	if (!ret)
1248		ret = trace_test_buffer(&tr->max_buffer, &count);
1249
1250
1251	trace->reset(tr);
1252	tracing_start();
1253
1254	tr->max_latency = save_max;
1255
1256	/* kill the thread */
1257	kthread_stop(p);
1258
1259	if (!ret && !count) {
1260		printk(KERN_CONT ".. no entries found ..");
1261		ret = -1;
1262	}
1263
1264	return ret;
1265}
1266#endif /* CONFIG_SCHED_TRACER */
1267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268#ifdef CONFIG_BRANCH_TRACER
1269int
1270trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1271{
1272	unsigned long count;
1273	int ret;
1274
1275	/* start the tracing */
1276	ret = tracer_init(trace, tr);
1277	if (ret) {
1278		warn_failed_init_tracer(trace, ret);
1279		return ret;
1280	}
1281
1282	/* Sleep for a 1/10 of a second */
1283	msleep(100);
1284	/* stop the tracing. */
1285	tracing_stop();
1286	/* check the trace buffer */
1287	ret = trace_test_buffer(&tr->array_buffer, &count);
1288	trace->reset(tr);
1289	tracing_start();
1290
1291	if (!ret && !count) {
1292		printk(KERN_CONT ".. no entries found ..");
1293		ret = -1;
1294	}
1295
1296	return ret;
1297}
1298#endif /* CONFIG_BRANCH_TRACER */
1299