Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM sched
  4
  5#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_SCHED_H
  7
  8#include <linux/kthread.h>
  9#include <linux/sched/numa_balancing.h>
 10#include <linux/tracepoint.h>
 11#include <linux/binfmts.h>
 12
 13/*
 14 * Tracepoint for calling kthread_stop, performed to end a kthread:
 15 */
 16TRACE_EVENT(sched_kthread_stop,
 17
 18	TP_PROTO(struct task_struct *t),
 19
 20	TP_ARGS(t),
 21
 22	TP_STRUCT__entry(
 23		__array(	char,	comm,	TASK_COMM_LEN	)
 24		__field(	pid_t,	pid			)
 25	),
 26
 27	TP_fast_assign(
 28		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
 29		__entry->pid	= t->pid;
 30	),
 31
 32	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
 33);
 34
 35/*
 36 * Tracepoint for the return value of the kthread stopping:
 37 */
 38TRACE_EVENT(sched_kthread_stop_ret,
 39
 40	TP_PROTO(int ret),
 41
 42	TP_ARGS(ret),
 43
 44	TP_STRUCT__entry(
 45		__field(	int,	ret	)
 46	),
 47
 48	TP_fast_assign(
 49		__entry->ret	= ret;
 50	),
 51
 52	TP_printk("ret=%d", __entry->ret)
 53);
 54
 55/**
 56 * sched_kthread_work_queue_work - called when a work gets queued
 57 * @worker:	pointer to the kthread_worker
 58 * @work:	pointer to struct kthread_work
 59 *
 60 * This event occurs when a work is queued immediately or once a
 61 * delayed work is actually queued (ie: once the delay has been
 62 * reached).
 63 */
 64TRACE_EVENT(sched_kthread_work_queue_work,
 65
 66	TP_PROTO(struct kthread_worker *worker,
 67		 struct kthread_work *work),
 68
 69	TP_ARGS(worker, work),
 70
 71	TP_STRUCT__entry(
 72		__field( void *,	work	)
 73		__field( void *,	function)
 74		__field( void *,	worker)
 75	),
 76
 77	TP_fast_assign(
 78		__entry->work		= work;
 79		__entry->function	= work->func;
 80		__entry->worker		= worker;
 81	),
 82
 83	TP_printk("work struct=%p function=%ps worker=%p",
 84		  __entry->work, __entry->function, __entry->worker)
 85);
 86
 87/**
 88 * sched_kthread_work_execute_start - called immediately before the work callback
 89 * @work:	pointer to struct kthread_work
 90 *
 91 * Allows to track kthread work execution.
 92 */
 93TRACE_EVENT(sched_kthread_work_execute_start,
 94
 95	TP_PROTO(struct kthread_work *work),
 96
 97	TP_ARGS(work),
 98
 99	TP_STRUCT__entry(
100		__field( void *,	work	)
101		__field( void *,	function)
102	),
103
104	TP_fast_assign(
105		__entry->work		= work;
106		__entry->function	= work->func;
107	),
108
109	TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110);
111
112/**
113 * sched_kthread_work_execute_end - called immediately after the work callback
114 * @work:	pointer to struct work_struct
115 * @function:   pointer to worker function
116 *
117 * Allows to track workqueue execution.
118 */
119TRACE_EVENT(sched_kthread_work_execute_end,
120
121	TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123	TP_ARGS(work, function),
124
125	TP_STRUCT__entry(
126		__field( void *,	work	)
127		__field( void *,	function)
128	),
129
130	TP_fast_assign(
131		__entry->work		= work;
132		__entry->function	= function;
133	),
134
135	TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136);
137
138/*
139 * Tracepoint for waking up a task:
140 */
141DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143	TP_PROTO(struct task_struct *p),
144
145	TP_ARGS(__perf_task(p)),
146
147	TP_STRUCT__entry(
148		__array(	char,	comm,	TASK_COMM_LEN	)
149		__field(	pid_t,	pid			)
150		__field(	int,	prio			)
 
151		__field(	int,	target_cpu		)
152	),
153
154	TP_fast_assign(
155		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156		__entry->pid		= p->pid;
157		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
 
158		__entry->target_cpu	= task_cpu(p);
159	),
160
161	TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162		  __entry->comm, __entry->pid, __entry->prio,
163		  __entry->target_cpu)
164);
165
166/*
167 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168 * called from the waking context.
169 */
170DEFINE_EVENT(sched_wakeup_template, sched_waking,
171	     TP_PROTO(struct task_struct *p),
172	     TP_ARGS(p));
173
174/*
175 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176 * It is not always called from the waking context.
177 */
178DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179	     TP_PROTO(struct task_struct *p),
180	     TP_ARGS(p));
181
182/*
183 * Tracepoint for waking up a new task:
184 */
185DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186	     TP_PROTO(struct task_struct *p),
187	     TP_ARGS(p));
188
189#ifdef CREATE_TRACE_POINTS
190static inline long __trace_sched_switch_state(bool preempt,
191					      unsigned int prev_state,
192					      struct task_struct *p)
193{
194	unsigned int state;
195
196#ifdef CONFIG_SCHED_DEBUG
197	BUG_ON(p != current);
198#endif /* CONFIG_SCHED_DEBUG */
199
200	/*
201	 * Preemption ignores task state, therefore preempted tasks are always
202	 * RUNNING (we will not have dequeued if state != RUNNING).
203	 */
204	if (preempt)
205		return TASK_REPORT_MAX;
206
207	/*
208	 * task_state_index() uses fls() and returns a value from 0-8 range.
209	 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
210	 * it for left shift operation to get the correct task->state
211	 * mapping.
212	 */
213	state = __task_state_index(prev_state, p->exit_state);
214
215	return state ? (1 << (state - 1)) : state;
216}
217#endif /* CREATE_TRACE_POINTS */
218
219/*
220 * Tracepoint for task switches, performed by the scheduler:
221 */
222TRACE_EVENT(sched_switch,
223
224	TP_PROTO(bool preempt,
225		 struct task_struct *prev,
226		 struct task_struct *next,
227		 unsigned int prev_state),
228
229	TP_ARGS(preempt, prev, next, prev_state),
230
231	TP_STRUCT__entry(
232		__array(	char,	prev_comm,	TASK_COMM_LEN	)
233		__field(	pid_t,	prev_pid			)
234		__field(	int,	prev_prio			)
235		__field(	long,	prev_state			)
236		__array(	char,	next_comm,	TASK_COMM_LEN	)
237		__field(	pid_t,	next_pid			)
238		__field(	int,	next_prio			)
239	),
240
241	TP_fast_assign(
242		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
243		__entry->prev_pid	= prev->pid;
244		__entry->prev_prio	= prev->prio;
245		__entry->prev_state	= __trace_sched_switch_state(preempt, prev_state, prev);
246		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
247		__entry->next_pid	= next->pid;
248		__entry->next_prio	= next->prio;
249		/* XXX SCHED_DEADLINE */
250	),
251
252	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
253		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
254
255		(__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
256		  __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
257				{ TASK_INTERRUPTIBLE, "S" },
258				{ TASK_UNINTERRUPTIBLE, "D" },
259				{ __TASK_STOPPED, "T" },
260				{ __TASK_TRACED, "t" },
261				{ EXIT_DEAD, "X" },
262				{ EXIT_ZOMBIE, "Z" },
263				{ TASK_PARKED, "P" },
264				{ TASK_DEAD, "I" }) :
265		  "R",
266
267		__entry->prev_state & TASK_REPORT_MAX ? "+" : "",
268		__entry->next_comm, __entry->next_pid, __entry->next_prio)
269);
270
271/*
272 * Tracepoint for a task being migrated:
273 */
274TRACE_EVENT(sched_migrate_task,
275
276	TP_PROTO(struct task_struct *p, int dest_cpu),
277
278	TP_ARGS(p, dest_cpu),
279
280	TP_STRUCT__entry(
281		__array(	char,	comm,	TASK_COMM_LEN	)
282		__field(	pid_t,	pid			)
283		__field(	int,	prio			)
284		__field(	int,	orig_cpu		)
285		__field(	int,	dest_cpu		)
286	),
287
288	TP_fast_assign(
289		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
290		__entry->pid		= p->pid;
291		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
292		__entry->orig_cpu	= task_cpu(p);
293		__entry->dest_cpu	= dest_cpu;
294	),
295
296	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
297		  __entry->comm, __entry->pid, __entry->prio,
298		  __entry->orig_cpu, __entry->dest_cpu)
299);
300
301DECLARE_EVENT_CLASS(sched_process_template,
302
303	TP_PROTO(struct task_struct *p),
304
305	TP_ARGS(p),
306
307	TP_STRUCT__entry(
308		__array(	char,	comm,	TASK_COMM_LEN	)
309		__field(	pid_t,	pid			)
310		__field(	int,	prio			)
311	),
312
313	TP_fast_assign(
314		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
315		__entry->pid		= p->pid;
316		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
317	),
318
319	TP_printk("comm=%s pid=%d prio=%d",
320		  __entry->comm, __entry->pid, __entry->prio)
321);
322
323/*
324 * Tracepoint for freeing a task:
325 */
326DEFINE_EVENT(sched_process_template, sched_process_free,
327	     TP_PROTO(struct task_struct *p),
328	     TP_ARGS(p));
329
330/*
331 * Tracepoint for a task exiting:
332 */
333DEFINE_EVENT(sched_process_template, sched_process_exit,
334	     TP_PROTO(struct task_struct *p),
335	     TP_ARGS(p));
336
337/*
338 * Tracepoint for waiting on task to unschedule:
339 */
340DEFINE_EVENT(sched_process_template, sched_wait_task,
341	TP_PROTO(struct task_struct *p),
342	TP_ARGS(p));
343
344/*
345 * Tracepoint for a waiting task:
346 */
347TRACE_EVENT(sched_process_wait,
348
349	TP_PROTO(struct pid *pid),
350
351	TP_ARGS(pid),
352
353	TP_STRUCT__entry(
354		__array(	char,	comm,	TASK_COMM_LEN	)
355		__field(	pid_t,	pid			)
356		__field(	int,	prio			)
357	),
358
359	TP_fast_assign(
360		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
361		__entry->pid		= pid_nr(pid);
362		__entry->prio		= current->prio; /* XXX SCHED_DEADLINE */
363	),
364
365	TP_printk("comm=%s pid=%d prio=%d",
366		  __entry->comm, __entry->pid, __entry->prio)
367);
368
369/*
370 * Tracepoint for kernel_clone:
371 */
372TRACE_EVENT(sched_process_fork,
373
374	TP_PROTO(struct task_struct *parent, struct task_struct *child),
375
376	TP_ARGS(parent, child),
377
378	TP_STRUCT__entry(
379		__array(	char,	parent_comm,	TASK_COMM_LEN	)
380		__field(	pid_t,	parent_pid			)
381		__array(	char,	child_comm,	TASK_COMM_LEN	)
382		__field(	pid_t,	child_pid			)
383	),
384
385	TP_fast_assign(
386		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
387		__entry->parent_pid	= parent->pid;
388		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
389		__entry->child_pid	= child->pid;
390	),
391
392	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
393		__entry->parent_comm, __entry->parent_pid,
394		__entry->child_comm, __entry->child_pid)
395);
396
397/*
398 * Tracepoint for exec:
399 */
400TRACE_EVENT(sched_process_exec,
401
402	TP_PROTO(struct task_struct *p, pid_t old_pid,
403		 struct linux_binprm *bprm),
404
405	TP_ARGS(p, old_pid, bprm),
406
407	TP_STRUCT__entry(
408		__string(	filename,	bprm->filename	)
409		__field(	pid_t,		pid		)
410		__field(	pid_t,		old_pid		)
411	),
412
413	TP_fast_assign(
414		__assign_str(filename, bprm->filename);
415		__entry->pid		= p->pid;
416		__entry->old_pid	= old_pid;
417	),
418
419	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
420		  __entry->pid, __entry->old_pid)
421);
422
423
424#ifdef CONFIG_SCHEDSTATS
425#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
426#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
427#else
428#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
429#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
430#endif
431
432/*
433 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
434 *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
435 */
436DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
437
438	TP_PROTO(struct task_struct *tsk, u64 delay),
439
440	TP_ARGS(__perf_task(tsk), __perf_count(delay)),
441
442	TP_STRUCT__entry(
443		__array( char,	comm,	TASK_COMM_LEN	)
444		__field( pid_t,	pid			)
445		__field( u64,	delay			)
446	),
447
448	TP_fast_assign(
449		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
450		__entry->pid	= tsk->pid;
451		__entry->delay	= delay;
452	),
453
454	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
455			__entry->comm, __entry->pid,
456			(unsigned long long)__entry->delay)
457);
458
459/*
460 * Tracepoint for accounting wait time (time the task is runnable
461 * but not actually running due to scheduler contention).
462 */
463DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
464	     TP_PROTO(struct task_struct *tsk, u64 delay),
465	     TP_ARGS(tsk, delay));
466
467/*
468 * Tracepoint for accounting sleep time (time the task is not runnable,
469 * including iowait, see below).
470 */
471DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
472	     TP_PROTO(struct task_struct *tsk, u64 delay),
473	     TP_ARGS(tsk, delay));
474
475/*
476 * Tracepoint for accounting iowait time (time the task is not runnable
477 * due to waiting on IO to complete).
478 */
479DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
480	     TP_PROTO(struct task_struct *tsk, u64 delay),
481	     TP_ARGS(tsk, delay));
482
483/*
484 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
485 */
486DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
487	     TP_PROTO(struct task_struct *tsk, u64 delay),
488	     TP_ARGS(tsk, delay));
489
490/*
491 * Tracepoint for accounting runtime (time the task is executing
492 * on a CPU).
493 */
494DECLARE_EVENT_CLASS(sched_stat_runtime,
495
496	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
497
498	TP_ARGS(tsk, __perf_count(runtime), vruntime),
499
500	TP_STRUCT__entry(
501		__array( char,	comm,	TASK_COMM_LEN	)
502		__field( pid_t,	pid			)
503		__field( u64,	runtime			)
504		__field( u64,	vruntime			)
505	),
506
507	TP_fast_assign(
508		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
509		__entry->pid		= tsk->pid;
510		__entry->runtime	= runtime;
511		__entry->vruntime	= vruntime;
512	),
513
514	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
515			__entry->comm, __entry->pid,
516			(unsigned long long)__entry->runtime,
517			(unsigned long long)__entry->vruntime)
518);
519
520DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
521	     TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
522	     TP_ARGS(tsk, runtime, vruntime));
523
524/*
525 * Tracepoint for showing priority inheritance modifying a tasks
526 * priority.
527 */
528TRACE_EVENT(sched_pi_setprio,
529
530	TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
531
532	TP_ARGS(tsk, pi_task),
533
534	TP_STRUCT__entry(
535		__array( char,	comm,	TASK_COMM_LEN	)
536		__field( pid_t,	pid			)
537		__field( int,	oldprio			)
538		__field( int,	newprio			)
539	),
540
541	TP_fast_assign(
542		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
543		__entry->pid		= tsk->pid;
544		__entry->oldprio	= tsk->prio;
545		__entry->newprio	= pi_task ?
546				min(tsk->normal_prio, pi_task->prio) :
547				tsk->normal_prio;
548		/* XXX SCHED_DEADLINE bits missing */
549	),
550
551	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
552			__entry->comm, __entry->pid,
553			__entry->oldprio, __entry->newprio)
554);
555
556#ifdef CONFIG_DETECT_HUNG_TASK
557TRACE_EVENT(sched_process_hang,
558	TP_PROTO(struct task_struct *tsk),
559	TP_ARGS(tsk),
560
561	TP_STRUCT__entry(
562		__array( char,	comm,	TASK_COMM_LEN	)
563		__field( pid_t,	pid			)
564	),
565
566	TP_fast_assign(
567		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
568		__entry->pid = tsk->pid;
569	),
570
571	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
572);
573#endif /* CONFIG_DETECT_HUNG_TASK */
574
575/*
576 * Tracks migration of tasks from one runqueue to another. Can be used to
577 * detect if automatic NUMA balancing is bouncing between nodes.
578 */
579TRACE_EVENT(sched_move_numa,
580
581	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
582
583	TP_ARGS(tsk, src_cpu, dst_cpu),
584
585	TP_STRUCT__entry(
586		__field( pid_t,	pid			)
587		__field( pid_t,	tgid			)
588		__field( pid_t,	ngid			)
589		__field( int,	src_cpu			)
590		__field( int,	src_nid			)
591		__field( int,	dst_cpu			)
592		__field( int,	dst_nid			)
593	),
594
595	TP_fast_assign(
596		__entry->pid		= task_pid_nr(tsk);
597		__entry->tgid		= task_tgid_nr(tsk);
598		__entry->ngid		= task_numa_group_id(tsk);
599		__entry->src_cpu	= src_cpu;
600		__entry->src_nid	= cpu_to_node(src_cpu);
601		__entry->dst_cpu	= dst_cpu;
602		__entry->dst_nid	= cpu_to_node(dst_cpu);
603	),
604
605	TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
606			__entry->pid, __entry->tgid, __entry->ngid,
607			__entry->src_cpu, __entry->src_nid,
608			__entry->dst_cpu, __entry->dst_nid)
609);
610
611DECLARE_EVENT_CLASS(sched_numa_pair_template,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
612
613	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
614		 struct task_struct *dst_tsk, int dst_cpu),
615
616	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
617
618	TP_STRUCT__entry(
619		__field( pid_t,	src_pid			)
620		__field( pid_t,	src_tgid		)
621		__field( pid_t,	src_ngid		)
622		__field( int,	src_cpu			)
623		__field( int,	src_nid			)
624		__field( pid_t,	dst_pid			)
625		__field( pid_t,	dst_tgid		)
626		__field( pid_t,	dst_ngid		)
627		__field( int,	dst_cpu			)
628		__field( int,	dst_nid			)
629	),
630
631	TP_fast_assign(
632		__entry->src_pid	= task_pid_nr(src_tsk);
633		__entry->src_tgid	= task_tgid_nr(src_tsk);
634		__entry->src_ngid	= task_numa_group_id(src_tsk);
635		__entry->src_cpu	= src_cpu;
636		__entry->src_nid	= cpu_to_node(src_cpu);
637		__entry->dst_pid	= dst_tsk ? task_pid_nr(dst_tsk) : 0;
638		__entry->dst_tgid	= dst_tsk ? task_tgid_nr(dst_tsk) : 0;
639		__entry->dst_ngid	= dst_tsk ? task_numa_group_id(dst_tsk) : 0;
640		__entry->dst_cpu	= dst_cpu;
641		__entry->dst_nid	= dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
642	),
643
644	TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
645			__entry->src_pid, __entry->src_tgid, __entry->src_ngid,
646			__entry->src_cpu, __entry->src_nid,
647			__entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
648			__entry->dst_cpu, __entry->dst_nid)
649);
650
651DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
652
653	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
654		 struct task_struct *dst_tsk, int dst_cpu),
655
656	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
657);
658
659DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
660
661	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
662		 struct task_struct *dst_tsk, int dst_cpu),
663
664	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
665);
666
667
668/*
669 * Tracepoint for waking a polling cpu without an IPI.
670 */
671TRACE_EVENT(sched_wake_idle_without_ipi,
672
673	TP_PROTO(int cpu),
674
675	TP_ARGS(cpu),
676
677	TP_STRUCT__entry(
678		__field(	int,	cpu	)
679	),
680
681	TP_fast_assign(
682		__entry->cpu	= cpu;
683	),
684
685	TP_printk("cpu=%d", __entry->cpu)
686);
687
688/*
689 * Following tracepoints are not exported in tracefs and provide hooking
690 * mechanisms only for testing and debugging purposes.
691 *
692 * Postfixed with _tp to make them easily identifiable in the code.
693 */
694DECLARE_TRACE(pelt_cfs_tp,
695	TP_PROTO(struct cfs_rq *cfs_rq),
696	TP_ARGS(cfs_rq));
697
698DECLARE_TRACE(pelt_rt_tp,
699	TP_PROTO(struct rq *rq),
700	TP_ARGS(rq));
701
702DECLARE_TRACE(pelt_dl_tp,
703	TP_PROTO(struct rq *rq),
704	TP_ARGS(rq));
705
706DECLARE_TRACE(pelt_thermal_tp,
707	TP_PROTO(struct rq *rq),
708	TP_ARGS(rq));
709
710DECLARE_TRACE(pelt_irq_tp,
711	TP_PROTO(struct rq *rq),
712	TP_ARGS(rq));
713
714DECLARE_TRACE(pelt_se_tp,
715	TP_PROTO(struct sched_entity *se),
716	TP_ARGS(se));
717
718DECLARE_TRACE(sched_cpu_capacity_tp,
719	TP_PROTO(struct rq *rq),
720	TP_ARGS(rq));
721
722DECLARE_TRACE(sched_overutilized_tp,
723	TP_PROTO(struct root_domain *rd, bool overutilized),
724	TP_ARGS(rd, overutilized));
725
726DECLARE_TRACE(sched_util_est_cfs_tp,
727	TP_PROTO(struct cfs_rq *cfs_rq),
728	TP_ARGS(cfs_rq));
729
730DECLARE_TRACE(sched_util_est_se_tp,
731	TP_PROTO(struct sched_entity *se),
732	TP_ARGS(se));
733
734DECLARE_TRACE(sched_update_nr_running_tp,
735	TP_PROTO(struct rq *rq, int change),
736	TP_ARGS(rq, change));
737
738#endif /* _TRACE_SCHED_H */
739
740/* This part must be outside protection */
741#include <trace/define_trace.h>
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM sched
  4
  5#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_SCHED_H
  7
 
  8#include <linux/sched/numa_balancing.h>
  9#include <linux/tracepoint.h>
 10#include <linux/binfmts.h>
 11
 12/*
 13 * Tracepoint for calling kthread_stop, performed to end a kthread:
 14 */
 15TRACE_EVENT(sched_kthread_stop,
 16
 17	TP_PROTO(struct task_struct *t),
 18
 19	TP_ARGS(t),
 20
 21	TP_STRUCT__entry(
 22		__array(	char,	comm,	TASK_COMM_LEN	)
 23		__field(	pid_t,	pid			)
 24	),
 25
 26	TP_fast_assign(
 27		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
 28		__entry->pid	= t->pid;
 29	),
 30
 31	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
 32);
 33
 34/*
 35 * Tracepoint for the return value of the kthread stopping:
 36 */
 37TRACE_EVENT(sched_kthread_stop_ret,
 38
 39	TP_PROTO(int ret),
 40
 41	TP_ARGS(ret),
 42
 43	TP_STRUCT__entry(
 44		__field(	int,	ret	)
 45	),
 46
 47	TP_fast_assign(
 48		__entry->ret	= ret;
 49	),
 50
 51	TP_printk("ret=%d", __entry->ret)
 52);
 53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54/*
 55 * Tracepoint for waking up a task:
 56 */
 57DECLARE_EVENT_CLASS(sched_wakeup_template,
 58
 59	TP_PROTO(struct task_struct *p),
 60
 61	TP_ARGS(__perf_task(p)),
 62
 63	TP_STRUCT__entry(
 64		__array(	char,	comm,	TASK_COMM_LEN	)
 65		__field(	pid_t,	pid			)
 66		__field(	int,	prio			)
 67		__field(	int,	success			)
 68		__field(	int,	target_cpu		)
 69	),
 70
 71	TP_fast_assign(
 72		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 73		__entry->pid		= p->pid;
 74		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
 75		__entry->success	= 1; /* rudiment, kill when possible */
 76		__entry->target_cpu	= task_cpu(p);
 77	),
 78
 79	TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
 80		  __entry->comm, __entry->pid, __entry->prio,
 81		  __entry->target_cpu)
 82);
 83
 84/*
 85 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
 86 * called from the waking context.
 87 */
 88DEFINE_EVENT(sched_wakeup_template, sched_waking,
 89	     TP_PROTO(struct task_struct *p),
 90	     TP_ARGS(p));
 91
 92/*
 93 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
 94 * It it not always called from the waking context.
 95 */
 96DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
 97	     TP_PROTO(struct task_struct *p),
 98	     TP_ARGS(p));
 99
100/*
101 * Tracepoint for waking up a new task:
102 */
103DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
104	     TP_PROTO(struct task_struct *p),
105	     TP_ARGS(p));
106
107#ifdef CREATE_TRACE_POINTS
108static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
 
 
109{
110	unsigned int state;
111
112#ifdef CONFIG_SCHED_DEBUG
113	BUG_ON(p != current);
114#endif /* CONFIG_SCHED_DEBUG */
115
116	/*
117	 * Preemption ignores task state, therefore preempted tasks are always
118	 * RUNNING (we will not have dequeued if state != RUNNING).
119	 */
120	if (preempt)
121		return TASK_REPORT_MAX;
122
123	/*
124	 * task_state_index() uses fls() and returns a value from 0-8 range.
125	 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
126	 * it for left shift operation to get the correct task->state
127	 * mapping.
128	 */
129	state = task_state_index(p);
130
131	return state ? (1 << (state - 1)) : state;
132}
133#endif /* CREATE_TRACE_POINTS */
134
135/*
136 * Tracepoint for task switches, performed by the scheduler:
137 */
138TRACE_EVENT(sched_switch,
139
140	TP_PROTO(bool preempt,
141		 struct task_struct *prev,
142		 struct task_struct *next),
 
143
144	TP_ARGS(preempt, prev, next),
145
146	TP_STRUCT__entry(
147		__array(	char,	prev_comm,	TASK_COMM_LEN	)
148		__field(	pid_t,	prev_pid			)
149		__field(	int,	prev_prio			)
150		__field(	long,	prev_state			)
151		__array(	char,	next_comm,	TASK_COMM_LEN	)
152		__field(	pid_t,	next_pid			)
153		__field(	int,	next_prio			)
154	),
155
156	TP_fast_assign(
157		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
158		__entry->prev_pid	= prev->pid;
159		__entry->prev_prio	= prev->prio;
160		__entry->prev_state	= __trace_sched_switch_state(preempt, prev);
161		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
162		__entry->next_pid	= next->pid;
163		__entry->next_prio	= next->prio;
164		/* XXX SCHED_DEADLINE */
165	),
166
167	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
168		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
169
170		(__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
171		  __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
172				{ TASK_INTERRUPTIBLE, "S" },
173				{ TASK_UNINTERRUPTIBLE, "D" },
174				{ __TASK_STOPPED, "T" },
175				{ __TASK_TRACED, "t" },
176				{ EXIT_DEAD, "X" },
177				{ EXIT_ZOMBIE, "Z" },
178				{ TASK_PARKED, "P" },
179				{ TASK_DEAD, "I" }) :
180		  "R",
181
182		__entry->prev_state & TASK_REPORT_MAX ? "+" : "",
183		__entry->next_comm, __entry->next_pid, __entry->next_prio)
184);
185
186/*
187 * Tracepoint for a task being migrated:
188 */
189TRACE_EVENT(sched_migrate_task,
190
191	TP_PROTO(struct task_struct *p, int dest_cpu),
192
193	TP_ARGS(p, dest_cpu),
194
195	TP_STRUCT__entry(
196		__array(	char,	comm,	TASK_COMM_LEN	)
197		__field(	pid_t,	pid			)
198		__field(	int,	prio			)
199		__field(	int,	orig_cpu		)
200		__field(	int,	dest_cpu		)
201	),
202
203	TP_fast_assign(
204		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
205		__entry->pid		= p->pid;
206		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
207		__entry->orig_cpu	= task_cpu(p);
208		__entry->dest_cpu	= dest_cpu;
209	),
210
211	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
212		  __entry->comm, __entry->pid, __entry->prio,
213		  __entry->orig_cpu, __entry->dest_cpu)
214);
215
216DECLARE_EVENT_CLASS(sched_process_template,
217
218	TP_PROTO(struct task_struct *p),
219
220	TP_ARGS(p),
221
222	TP_STRUCT__entry(
223		__array(	char,	comm,	TASK_COMM_LEN	)
224		__field(	pid_t,	pid			)
225		__field(	int,	prio			)
226	),
227
228	TP_fast_assign(
229		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
230		__entry->pid		= p->pid;
231		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
232	),
233
234	TP_printk("comm=%s pid=%d prio=%d",
235		  __entry->comm, __entry->pid, __entry->prio)
236);
237
238/*
239 * Tracepoint for freeing a task:
240 */
241DEFINE_EVENT(sched_process_template, sched_process_free,
242	     TP_PROTO(struct task_struct *p),
243	     TP_ARGS(p));
244
245/*
246 * Tracepoint for a task exiting:
247 */
248DEFINE_EVENT(sched_process_template, sched_process_exit,
249	     TP_PROTO(struct task_struct *p),
250	     TP_ARGS(p));
251
252/*
253 * Tracepoint for waiting on task to unschedule:
254 */
255DEFINE_EVENT(sched_process_template, sched_wait_task,
256	TP_PROTO(struct task_struct *p),
257	TP_ARGS(p));
258
259/*
260 * Tracepoint for a waiting task:
261 */
262TRACE_EVENT(sched_process_wait,
263
264	TP_PROTO(struct pid *pid),
265
266	TP_ARGS(pid),
267
268	TP_STRUCT__entry(
269		__array(	char,	comm,	TASK_COMM_LEN	)
270		__field(	pid_t,	pid			)
271		__field(	int,	prio			)
272	),
273
274	TP_fast_assign(
275		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
276		__entry->pid		= pid_nr(pid);
277		__entry->prio		= current->prio; /* XXX SCHED_DEADLINE */
278	),
279
280	TP_printk("comm=%s pid=%d prio=%d",
281		  __entry->comm, __entry->pid, __entry->prio)
282);
283
284/*
285 * Tracepoint for do_fork:
286 */
287TRACE_EVENT(sched_process_fork,
288
289	TP_PROTO(struct task_struct *parent, struct task_struct *child),
290
291	TP_ARGS(parent, child),
292
293	TP_STRUCT__entry(
294		__array(	char,	parent_comm,	TASK_COMM_LEN	)
295		__field(	pid_t,	parent_pid			)
296		__array(	char,	child_comm,	TASK_COMM_LEN	)
297		__field(	pid_t,	child_pid			)
298	),
299
300	TP_fast_assign(
301		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
302		__entry->parent_pid	= parent->pid;
303		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
304		__entry->child_pid	= child->pid;
305	),
306
307	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
308		__entry->parent_comm, __entry->parent_pid,
309		__entry->child_comm, __entry->child_pid)
310);
311
312/*
313 * Tracepoint for exec:
314 */
315TRACE_EVENT(sched_process_exec,
316
317	TP_PROTO(struct task_struct *p, pid_t old_pid,
318		 struct linux_binprm *bprm),
319
320	TP_ARGS(p, old_pid, bprm),
321
322	TP_STRUCT__entry(
323		__string(	filename,	bprm->filename	)
324		__field(	pid_t,		pid		)
325		__field(	pid_t,		old_pid		)
326	),
327
328	TP_fast_assign(
329		__assign_str(filename, bprm->filename);
330		__entry->pid		= p->pid;
331		__entry->old_pid	= old_pid;
332	),
333
334	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
335		  __entry->pid, __entry->old_pid)
336);
337
338
339#ifdef CONFIG_SCHEDSTATS
340#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
341#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
342#else
343#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
344#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
345#endif
346
347/*
348 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
349 *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
350 */
351DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
352
353	TP_PROTO(struct task_struct *tsk, u64 delay),
354
355	TP_ARGS(__perf_task(tsk), __perf_count(delay)),
356
357	TP_STRUCT__entry(
358		__array( char,	comm,	TASK_COMM_LEN	)
359		__field( pid_t,	pid			)
360		__field( u64,	delay			)
361	),
362
363	TP_fast_assign(
364		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
365		__entry->pid	= tsk->pid;
366		__entry->delay	= delay;
367	),
368
369	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
370			__entry->comm, __entry->pid,
371			(unsigned long long)__entry->delay)
372);
373
374/*
375 * Tracepoint for accounting wait time (time the task is runnable
376 * but not actually running due to scheduler contention).
377 */
378DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
379	     TP_PROTO(struct task_struct *tsk, u64 delay),
380	     TP_ARGS(tsk, delay));
381
382/*
383 * Tracepoint for accounting sleep time (time the task is not runnable,
384 * including iowait, see below).
385 */
386DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
387	     TP_PROTO(struct task_struct *tsk, u64 delay),
388	     TP_ARGS(tsk, delay));
389
390/*
391 * Tracepoint for accounting iowait time (time the task is not runnable
392 * due to waiting on IO to complete).
393 */
394DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
395	     TP_PROTO(struct task_struct *tsk, u64 delay),
396	     TP_ARGS(tsk, delay));
397
398/*
399 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
400 */
401DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
402	     TP_PROTO(struct task_struct *tsk, u64 delay),
403	     TP_ARGS(tsk, delay));
404
405/*
406 * Tracepoint for accounting runtime (time the task is executing
407 * on a CPU).
408 */
409DECLARE_EVENT_CLASS(sched_stat_runtime,
410
411	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
412
413	TP_ARGS(tsk, __perf_count(runtime), vruntime),
414
415	TP_STRUCT__entry(
416		__array( char,	comm,	TASK_COMM_LEN	)
417		__field( pid_t,	pid			)
418		__field( u64,	runtime			)
419		__field( u64,	vruntime			)
420	),
421
422	TP_fast_assign(
423		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
424		__entry->pid		= tsk->pid;
425		__entry->runtime	= runtime;
426		__entry->vruntime	= vruntime;
427	),
428
429	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
430			__entry->comm, __entry->pid,
431			(unsigned long long)__entry->runtime,
432			(unsigned long long)__entry->vruntime)
433);
434
435DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
436	     TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
437	     TP_ARGS(tsk, runtime, vruntime));
438
439/*
440 * Tracepoint for showing priority inheritance modifying a tasks
441 * priority.
442 */
443TRACE_EVENT(sched_pi_setprio,
444
445	TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
446
447	TP_ARGS(tsk, pi_task),
448
449	TP_STRUCT__entry(
450		__array( char,	comm,	TASK_COMM_LEN	)
451		__field( pid_t,	pid			)
452		__field( int,	oldprio			)
453		__field( int,	newprio			)
454	),
455
456	TP_fast_assign(
457		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
458		__entry->pid		= tsk->pid;
459		__entry->oldprio	= tsk->prio;
460		__entry->newprio	= pi_task ?
461				min(tsk->normal_prio, pi_task->prio) :
462				tsk->normal_prio;
463		/* XXX SCHED_DEADLINE bits missing */
464	),
465
466	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
467			__entry->comm, __entry->pid,
468			__entry->oldprio, __entry->newprio)
469);
470
471#ifdef CONFIG_DETECT_HUNG_TASK
472TRACE_EVENT(sched_process_hang,
473	TP_PROTO(struct task_struct *tsk),
474	TP_ARGS(tsk),
475
476	TP_STRUCT__entry(
477		__array( char,	comm,	TASK_COMM_LEN	)
478		__field( pid_t,	pid			)
479	),
480
481	TP_fast_assign(
482		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
483		__entry->pid = tsk->pid;
484	),
485
486	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
487);
488#endif /* CONFIG_DETECT_HUNG_TASK */
489
490DECLARE_EVENT_CLASS(sched_move_task_template,
 
 
 
 
491
492	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
493
494	TP_ARGS(tsk, src_cpu, dst_cpu),
495
496	TP_STRUCT__entry(
497		__field( pid_t,	pid			)
498		__field( pid_t,	tgid			)
499		__field( pid_t,	ngid			)
500		__field( int,	src_cpu			)
501		__field( int,	src_nid			)
502		__field( int,	dst_cpu			)
503		__field( int,	dst_nid			)
504	),
505
506	TP_fast_assign(
507		__entry->pid		= task_pid_nr(tsk);
508		__entry->tgid		= task_tgid_nr(tsk);
509		__entry->ngid		= task_numa_group_id(tsk);
510		__entry->src_cpu	= src_cpu;
511		__entry->src_nid	= cpu_to_node(src_cpu);
512		__entry->dst_cpu	= dst_cpu;
513		__entry->dst_nid	= cpu_to_node(dst_cpu);
514	),
515
516	TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
517			__entry->pid, __entry->tgid, __entry->ngid,
518			__entry->src_cpu, __entry->src_nid,
519			__entry->dst_cpu, __entry->dst_nid)
520);
521
522/*
523 * Tracks migration of tasks from one runqueue to another. Can be used to
524 * detect if automatic NUMA balancing is bouncing between nodes
525 */
526DEFINE_EVENT(sched_move_task_template, sched_move_numa,
527	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
528
529	TP_ARGS(tsk, src_cpu, dst_cpu)
530);
531
532DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
533	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
534
535	TP_ARGS(tsk, src_cpu, dst_cpu)
536);
537
538TRACE_EVENT(sched_swap_numa,
539
540	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
541		 struct task_struct *dst_tsk, int dst_cpu),
542
543	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
544
545	TP_STRUCT__entry(
546		__field( pid_t,	src_pid			)
547		__field( pid_t,	src_tgid		)
548		__field( pid_t,	src_ngid		)
549		__field( int,	src_cpu			)
550		__field( int,	src_nid			)
551		__field( pid_t,	dst_pid			)
552		__field( pid_t,	dst_tgid		)
553		__field( pid_t,	dst_ngid		)
554		__field( int,	dst_cpu			)
555		__field( int,	dst_nid			)
556	),
557
558	TP_fast_assign(
559		__entry->src_pid	= task_pid_nr(src_tsk);
560		__entry->src_tgid	= task_tgid_nr(src_tsk);
561		__entry->src_ngid	= task_numa_group_id(src_tsk);
562		__entry->src_cpu	= src_cpu;
563		__entry->src_nid	= cpu_to_node(src_cpu);
564		__entry->dst_pid	= task_pid_nr(dst_tsk);
565		__entry->dst_tgid	= task_tgid_nr(dst_tsk);
566		__entry->dst_ngid	= task_numa_group_id(dst_tsk);
567		__entry->dst_cpu	= dst_cpu;
568		__entry->dst_nid	= cpu_to_node(dst_cpu);
569	),
570
571	TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
572			__entry->src_pid, __entry->src_tgid, __entry->src_ngid,
573			__entry->src_cpu, __entry->src_nid,
574			__entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
575			__entry->dst_cpu, __entry->dst_nid)
576);
577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578/*
579 * Tracepoint for waking a polling cpu without an IPI.
580 */
581TRACE_EVENT(sched_wake_idle_without_ipi,
582
583	TP_PROTO(int cpu),
584
585	TP_ARGS(cpu),
586
587	TP_STRUCT__entry(
588		__field(	int,	cpu	)
589	),
590
591	TP_fast_assign(
592		__entry->cpu	= cpu;
593	),
594
595	TP_printk("cpu=%d", __entry->cpu)
596);
597
598/*
599 * Following tracepoints are not exported in tracefs and provide hooking
600 * mechanisms only for testing and debugging purposes.
601 *
602 * Postfixed with _tp to make them easily identifiable in the code.
603 */
604DECLARE_TRACE(pelt_cfs_tp,
605	TP_PROTO(struct cfs_rq *cfs_rq),
606	TP_ARGS(cfs_rq));
607
608DECLARE_TRACE(pelt_rt_tp,
609	TP_PROTO(struct rq *rq),
610	TP_ARGS(rq));
611
612DECLARE_TRACE(pelt_dl_tp,
613	TP_PROTO(struct rq *rq),
614	TP_ARGS(rq));
615
 
 
 
 
616DECLARE_TRACE(pelt_irq_tp,
617	TP_PROTO(struct rq *rq),
618	TP_ARGS(rq));
619
620DECLARE_TRACE(pelt_se_tp,
621	TP_PROTO(struct sched_entity *se),
622	TP_ARGS(se));
623
 
 
 
 
624DECLARE_TRACE(sched_overutilized_tp,
625	TP_PROTO(struct root_domain *rd, bool overutilized),
626	TP_ARGS(rd, overutilized));
 
 
 
 
 
 
 
 
 
 
 
 
627
628#endif /* _TRACE_SCHED_H */
629
630/* This part must be outside protection */
631#include <trace/define_trace.h>