Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM sched
4
5#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_SCHED_H
7
8#include <linux/kthread.h>
9#include <linux/sched/numa_balancing.h>
10#include <linux/tracepoint.h>
11#include <linux/binfmts.h>
12
13/*
14 * Tracepoint for calling kthread_stop, performed to end a kthread:
15 */
16TRACE_EVENT(sched_kthread_stop,
17
18 TP_PROTO(struct task_struct *t),
19
20 TP_ARGS(t),
21
22 TP_STRUCT__entry(
23 __array( char, comm, TASK_COMM_LEN )
24 __field( pid_t, pid )
25 ),
26
27 TP_fast_assign(
28 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29 __entry->pid = t->pid;
30 ),
31
32 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33);
34
35/*
36 * Tracepoint for the return value of the kthread stopping:
37 */
38TRACE_EVENT(sched_kthread_stop_ret,
39
40 TP_PROTO(int ret),
41
42 TP_ARGS(ret),
43
44 TP_STRUCT__entry(
45 __field( int, ret )
46 ),
47
48 TP_fast_assign(
49 __entry->ret = ret;
50 ),
51
52 TP_printk("ret=%d", __entry->ret)
53);
54
55/**
56 * sched_kthread_work_queue_work - called when a work gets queued
57 * @worker: pointer to the kthread_worker
58 * @work: pointer to struct kthread_work
59 *
60 * This event occurs when a work is queued immediately or once a
61 * delayed work is actually queued (ie: once the delay has been
62 * reached).
63 */
64TRACE_EVENT(sched_kthread_work_queue_work,
65
66 TP_PROTO(struct kthread_worker *worker,
67 struct kthread_work *work),
68
69 TP_ARGS(worker, work),
70
71 TP_STRUCT__entry(
72 __field( void *, work )
73 __field( void *, function)
74 __field( void *, worker)
75 ),
76
77 TP_fast_assign(
78 __entry->work = work;
79 __entry->function = work->func;
80 __entry->worker = worker;
81 ),
82
83 TP_printk("work struct=%p function=%ps worker=%p",
84 __entry->work, __entry->function, __entry->worker)
85);
86
87/**
88 * sched_kthread_work_execute_start - called immediately before the work callback
89 * @work: pointer to struct kthread_work
90 *
91 * Allows to track kthread work execution.
92 */
93TRACE_EVENT(sched_kthread_work_execute_start,
94
95 TP_PROTO(struct kthread_work *work),
96
97 TP_ARGS(work),
98
99 TP_STRUCT__entry(
100 __field( void *, work )
101 __field( void *, function)
102 ),
103
104 TP_fast_assign(
105 __entry->work = work;
106 __entry->function = work->func;
107 ),
108
109 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110);
111
112/**
113 * sched_kthread_work_execute_end - called immediately after the work callback
114 * @work: pointer to struct work_struct
115 * @function: pointer to worker function
116 *
117 * Allows to track workqueue execution.
118 */
119TRACE_EVENT(sched_kthread_work_execute_end,
120
121 TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123 TP_ARGS(work, function),
124
125 TP_STRUCT__entry(
126 __field( void *, work )
127 __field( void *, function)
128 ),
129
130 TP_fast_assign(
131 __entry->work = work;
132 __entry->function = function;
133 ),
134
135 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136);
137
138/*
139 * Tracepoint for waking up a task:
140 */
141DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143 TP_PROTO(struct task_struct *p),
144
145 TP_ARGS(__perf_task(p)),
146
147 TP_STRUCT__entry(
148 __array( char, comm, TASK_COMM_LEN )
149 __field( pid_t, pid )
150 __field( int, prio )
151 __field( int, target_cpu )
152 ),
153
154 TP_fast_assign(
155 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156 __entry->pid = p->pid;
157 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
158 __entry->target_cpu = task_cpu(p);
159 ),
160
161 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162 __entry->comm, __entry->pid, __entry->prio,
163 __entry->target_cpu)
164);
165
166/*
167 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168 * called from the waking context.
169 */
170DEFINE_EVENT(sched_wakeup_template, sched_waking,
171 TP_PROTO(struct task_struct *p),
172 TP_ARGS(p));
173
174/*
175 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176 * It is not always called from the waking context.
177 */
178DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179 TP_PROTO(struct task_struct *p),
180 TP_ARGS(p));
181
182/*
183 * Tracepoint for waking up a new task:
184 */
185DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186 TP_PROTO(struct task_struct *p),
187 TP_ARGS(p));
188
189#ifdef CREATE_TRACE_POINTS
190static inline long __trace_sched_switch_state(bool preempt,
191 unsigned int prev_state,
192 struct task_struct *p)
193{
194 unsigned int state;
195
196#ifdef CONFIG_SCHED_DEBUG
197 BUG_ON(p != current);
198#endif /* CONFIG_SCHED_DEBUG */
199
200 /*
201 * Preemption ignores task state, therefore preempted tasks are always
202 * RUNNING (we will not have dequeued if state != RUNNING).
203 */
204 if (preempt)
205 return TASK_REPORT_MAX;
206
207 /*
208 * task_state_index() uses fls() and returns a value from 0-8 range.
209 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
210 * it for left shift operation to get the correct task->state
211 * mapping.
212 */
213 state = __task_state_index(prev_state, p->exit_state);
214
215 return state ? (1 << (state - 1)) : state;
216}
217#endif /* CREATE_TRACE_POINTS */
218
219/*
220 * Tracepoint for task switches, performed by the scheduler:
221 */
222TRACE_EVENT(sched_switch,
223
224 TP_PROTO(bool preempt,
225 struct task_struct *prev,
226 struct task_struct *next,
227 unsigned int prev_state),
228
229 TP_ARGS(preempt, prev, next, prev_state),
230
231 TP_STRUCT__entry(
232 __array( char, prev_comm, TASK_COMM_LEN )
233 __field( pid_t, prev_pid )
234 __field( int, prev_prio )
235 __field( long, prev_state )
236 __array( char, next_comm, TASK_COMM_LEN )
237 __field( pid_t, next_pid )
238 __field( int, next_prio )
239 ),
240
241 TP_fast_assign(
242 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
243 __entry->prev_pid = prev->pid;
244 __entry->prev_prio = prev->prio;
245 __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
246 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
247 __entry->next_pid = next->pid;
248 __entry->next_prio = next->prio;
249 /* XXX SCHED_DEADLINE */
250 ),
251
252 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
253 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
254
255 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
256 __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
257 { TASK_INTERRUPTIBLE, "S" },
258 { TASK_UNINTERRUPTIBLE, "D" },
259 { __TASK_STOPPED, "T" },
260 { __TASK_TRACED, "t" },
261 { EXIT_DEAD, "X" },
262 { EXIT_ZOMBIE, "Z" },
263 { TASK_PARKED, "P" },
264 { TASK_DEAD, "I" }) :
265 "R",
266
267 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
268 __entry->next_comm, __entry->next_pid, __entry->next_prio)
269);
270
271/*
272 * Tracepoint for a task being migrated:
273 */
274TRACE_EVENT(sched_migrate_task,
275
276 TP_PROTO(struct task_struct *p, int dest_cpu),
277
278 TP_ARGS(p, dest_cpu),
279
280 TP_STRUCT__entry(
281 __array( char, comm, TASK_COMM_LEN )
282 __field( pid_t, pid )
283 __field( int, prio )
284 __field( int, orig_cpu )
285 __field( int, dest_cpu )
286 ),
287
288 TP_fast_assign(
289 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
290 __entry->pid = p->pid;
291 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
292 __entry->orig_cpu = task_cpu(p);
293 __entry->dest_cpu = dest_cpu;
294 ),
295
296 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
297 __entry->comm, __entry->pid, __entry->prio,
298 __entry->orig_cpu, __entry->dest_cpu)
299);
300
301DECLARE_EVENT_CLASS(sched_process_template,
302
303 TP_PROTO(struct task_struct *p),
304
305 TP_ARGS(p),
306
307 TP_STRUCT__entry(
308 __array( char, comm, TASK_COMM_LEN )
309 __field( pid_t, pid )
310 __field( int, prio )
311 ),
312
313 TP_fast_assign(
314 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
315 __entry->pid = p->pid;
316 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
317 ),
318
319 TP_printk("comm=%s pid=%d prio=%d",
320 __entry->comm, __entry->pid, __entry->prio)
321);
322
323/*
324 * Tracepoint for freeing a task:
325 */
326DEFINE_EVENT(sched_process_template, sched_process_free,
327 TP_PROTO(struct task_struct *p),
328 TP_ARGS(p));
329
330/*
331 * Tracepoint for a task exiting:
332 */
333DEFINE_EVENT(sched_process_template, sched_process_exit,
334 TP_PROTO(struct task_struct *p),
335 TP_ARGS(p));
336
337/*
338 * Tracepoint for waiting on task to unschedule:
339 */
340DEFINE_EVENT(sched_process_template, sched_wait_task,
341 TP_PROTO(struct task_struct *p),
342 TP_ARGS(p));
343
344/*
345 * Tracepoint for a waiting task:
346 */
347TRACE_EVENT(sched_process_wait,
348
349 TP_PROTO(struct pid *pid),
350
351 TP_ARGS(pid),
352
353 TP_STRUCT__entry(
354 __array( char, comm, TASK_COMM_LEN )
355 __field( pid_t, pid )
356 __field( int, prio )
357 ),
358
359 TP_fast_assign(
360 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
361 __entry->pid = pid_nr(pid);
362 __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
363 ),
364
365 TP_printk("comm=%s pid=%d prio=%d",
366 __entry->comm, __entry->pid, __entry->prio)
367);
368
369/*
370 * Tracepoint for kernel_clone:
371 */
372TRACE_EVENT(sched_process_fork,
373
374 TP_PROTO(struct task_struct *parent, struct task_struct *child),
375
376 TP_ARGS(parent, child),
377
378 TP_STRUCT__entry(
379 __array( char, parent_comm, TASK_COMM_LEN )
380 __field( pid_t, parent_pid )
381 __array( char, child_comm, TASK_COMM_LEN )
382 __field( pid_t, child_pid )
383 ),
384
385 TP_fast_assign(
386 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
387 __entry->parent_pid = parent->pid;
388 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
389 __entry->child_pid = child->pid;
390 ),
391
392 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
393 __entry->parent_comm, __entry->parent_pid,
394 __entry->child_comm, __entry->child_pid)
395);
396
397/*
398 * Tracepoint for exec:
399 */
400TRACE_EVENT(sched_process_exec,
401
402 TP_PROTO(struct task_struct *p, pid_t old_pid,
403 struct linux_binprm *bprm),
404
405 TP_ARGS(p, old_pid, bprm),
406
407 TP_STRUCT__entry(
408 __string( filename, bprm->filename )
409 __field( pid_t, pid )
410 __field( pid_t, old_pid )
411 ),
412
413 TP_fast_assign(
414 __assign_str(filename, bprm->filename);
415 __entry->pid = p->pid;
416 __entry->old_pid = old_pid;
417 ),
418
419 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
420 __entry->pid, __entry->old_pid)
421);
422
423
424#ifdef CONFIG_SCHEDSTATS
425#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
426#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
427#else
428#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
429#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
430#endif
431
432/*
433 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
434 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
435 */
436DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
437
438 TP_PROTO(struct task_struct *tsk, u64 delay),
439
440 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
441
442 TP_STRUCT__entry(
443 __array( char, comm, TASK_COMM_LEN )
444 __field( pid_t, pid )
445 __field( u64, delay )
446 ),
447
448 TP_fast_assign(
449 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
450 __entry->pid = tsk->pid;
451 __entry->delay = delay;
452 ),
453
454 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
455 __entry->comm, __entry->pid,
456 (unsigned long long)__entry->delay)
457);
458
459/*
460 * Tracepoint for accounting wait time (time the task is runnable
461 * but not actually running due to scheduler contention).
462 */
463DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
464 TP_PROTO(struct task_struct *tsk, u64 delay),
465 TP_ARGS(tsk, delay));
466
467/*
468 * Tracepoint for accounting sleep time (time the task is not runnable,
469 * including iowait, see below).
470 */
471DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
472 TP_PROTO(struct task_struct *tsk, u64 delay),
473 TP_ARGS(tsk, delay));
474
475/*
476 * Tracepoint for accounting iowait time (time the task is not runnable
477 * due to waiting on IO to complete).
478 */
479DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
480 TP_PROTO(struct task_struct *tsk, u64 delay),
481 TP_ARGS(tsk, delay));
482
483/*
484 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
485 */
486DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
487 TP_PROTO(struct task_struct *tsk, u64 delay),
488 TP_ARGS(tsk, delay));
489
490/*
491 * Tracepoint for accounting runtime (time the task is executing
492 * on a CPU).
493 */
494DECLARE_EVENT_CLASS(sched_stat_runtime,
495
496 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
497
498 TP_ARGS(tsk, __perf_count(runtime), vruntime),
499
500 TP_STRUCT__entry(
501 __array( char, comm, TASK_COMM_LEN )
502 __field( pid_t, pid )
503 __field( u64, runtime )
504 __field( u64, vruntime )
505 ),
506
507 TP_fast_assign(
508 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
509 __entry->pid = tsk->pid;
510 __entry->runtime = runtime;
511 __entry->vruntime = vruntime;
512 ),
513
514 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
515 __entry->comm, __entry->pid,
516 (unsigned long long)__entry->runtime,
517 (unsigned long long)__entry->vruntime)
518);
519
520DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
521 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
522 TP_ARGS(tsk, runtime, vruntime));
523
524/*
525 * Tracepoint for showing priority inheritance modifying a tasks
526 * priority.
527 */
528TRACE_EVENT(sched_pi_setprio,
529
530 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
531
532 TP_ARGS(tsk, pi_task),
533
534 TP_STRUCT__entry(
535 __array( char, comm, TASK_COMM_LEN )
536 __field( pid_t, pid )
537 __field( int, oldprio )
538 __field( int, newprio )
539 ),
540
541 TP_fast_assign(
542 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
543 __entry->pid = tsk->pid;
544 __entry->oldprio = tsk->prio;
545 __entry->newprio = pi_task ?
546 min(tsk->normal_prio, pi_task->prio) :
547 tsk->normal_prio;
548 /* XXX SCHED_DEADLINE bits missing */
549 ),
550
551 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
552 __entry->comm, __entry->pid,
553 __entry->oldprio, __entry->newprio)
554);
555
556#ifdef CONFIG_DETECT_HUNG_TASK
557TRACE_EVENT(sched_process_hang,
558 TP_PROTO(struct task_struct *tsk),
559 TP_ARGS(tsk),
560
561 TP_STRUCT__entry(
562 __array( char, comm, TASK_COMM_LEN )
563 __field( pid_t, pid )
564 ),
565
566 TP_fast_assign(
567 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
568 __entry->pid = tsk->pid;
569 ),
570
571 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
572);
573#endif /* CONFIG_DETECT_HUNG_TASK */
574
575/*
576 * Tracks migration of tasks from one runqueue to another. Can be used to
577 * detect if automatic NUMA balancing is bouncing between nodes.
578 */
579TRACE_EVENT(sched_move_numa,
580
581 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
582
583 TP_ARGS(tsk, src_cpu, dst_cpu),
584
585 TP_STRUCT__entry(
586 __field( pid_t, pid )
587 __field( pid_t, tgid )
588 __field( pid_t, ngid )
589 __field( int, src_cpu )
590 __field( int, src_nid )
591 __field( int, dst_cpu )
592 __field( int, dst_nid )
593 ),
594
595 TP_fast_assign(
596 __entry->pid = task_pid_nr(tsk);
597 __entry->tgid = task_tgid_nr(tsk);
598 __entry->ngid = task_numa_group_id(tsk);
599 __entry->src_cpu = src_cpu;
600 __entry->src_nid = cpu_to_node(src_cpu);
601 __entry->dst_cpu = dst_cpu;
602 __entry->dst_nid = cpu_to_node(dst_cpu);
603 ),
604
605 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
606 __entry->pid, __entry->tgid, __entry->ngid,
607 __entry->src_cpu, __entry->src_nid,
608 __entry->dst_cpu, __entry->dst_nid)
609);
610
611DECLARE_EVENT_CLASS(sched_numa_pair_template,
612
613 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
614 struct task_struct *dst_tsk, int dst_cpu),
615
616 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
617
618 TP_STRUCT__entry(
619 __field( pid_t, src_pid )
620 __field( pid_t, src_tgid )
621 __field( pid_t, src_ngid )
622 __field( int, src_cpu )
623 __field( int, src_nid )
624 __field( pid_t, dst_pid )
625 __field( pid_t, dst_tgid )
626 __field( pid_t, dst_ngid )
627 __field( int, dst_cpu )
628 __field( int, dst_nid )
629 ),
630
631 TP_fast_assign(
632 __entry->src_pid = task_pid_nr(src_tsk);
633 __entry->src_tgid = task_tgid_nr(src_tsk);
634 __entry->src_ngid = task_numa_group_id(src_tsk);
635 __entry->src_cpu = src_cpu;
636 __entry->src_nid = cpu_to_node(src_cpu);
637 __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
638 __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
639 __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
640 __entry->dst_cpu = dst_cpu;
641 __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
642 ),
643
644 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
645 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
646 __entry->src_cpu, __entry->src_nid,
647 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
648 __entry->dst_cpu, __entry->dst_nid)
649);
650
651DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
652
653 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
654 struct task_struct *dst_tsk, int dst_cpu),
655
656 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
657);
658
659DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
660
661 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
662 struct task_struct *dst_tsk, int dst_cpu),
663
664 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
665);
666
667
668/*
669 * Tracepoint for waking a polling cpu without an IPI.
670 */
671TRACE_EVENT(sched_wake_idle_without_ipi,
672
673 TP_PROTO(int cpu),
674
675 TP_ARGS(cpu),
676
677 TP_STRUCT__entry(
678 __field( int, cpu )
679 ),
680
681 TP_fast_assign(
682 __entry->cpu = cpu;
683 ),
684
685 TP_printk("cpu=%d", __entry->cpu)
686);
687
688/*
689 * Following tracepoints are not exported in tracefs and provide hooking
690 * mechanisms only for testing and debugging purposes.
691 *
692 * Postfixed with _tp to make them easily identifiable in the code.
693 */
694DECLARE_TRACE(pelt_cfs_tp,
695 TP_PROTO(struct cfs_rq *cfs_rq),
696 TP_ARGS(cfs_rq));
697
698DECLARE_TRACE(pelt_rt_tp,
699 TP_PROTO(struct rq *rq),
700 TP_ARGS(rq));
701
702DECLARE_TRACE(pelt_dl_tp,
703 TP_PROTO(struct rq *rq),
704 TP_ARGS(rq));
705
706DECLARE_TRACE(pelt_thermal_tp,
707 TP_PROTO(struct rq *rq),
708 TP_ARGS(rq));
709
710DECLARE_TRACE(pelt_irq_tp,
711 TP_PROTO(struct rq *rq),
712 TP_ARGS(rq));
713
714DECLARE_TRACE(pelt_se_tp,
715 TP_PROTO(struct sched_entity *se),
716 TP_ARGS(se));
717
718DECLARE_TRACE(sched_cpu_capacity_tp,
719 TP_PROTO(struct rq *rq),
720 TP_ARGS(rq));
721
722DECLARE_TRACE(sched_overutilized_tp,
723 TP_PROTO(struct root_domain *rd, bool overutilized),
724 TP_ARGS(rd, overutilized));
725
726DECLARE_TRACE(sched_util_est_cfs_tp,
727 TP_PROTO(struct cfs_rq *cfs_rq),
728 TP_ARGS(cfs_rq));
729
730DECLARE_TRACE(sched_util_est_se_tp,
731 TP_PROTO(struct sched_entity *se),
732 TP_ARGS(se));
733
734DECLARE_TRACE(sched_update_nr_running_tp,
735 TP_PROTO(struct rq *rq, int change),
736 TP_ARGS(rq, change));
737
738#endif /* _TRACE_SCHED_H */
739
740/* This part must be outside protection */
741#include <trace/define_trace.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM sched
4
5#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_SCHED_H
7
8#include <linux/kthread.h>
9#include <linux/sched/numa_balancing.h>
10#include <linux/tracepoint.h>
11#include <linux/binfmts.h>
12
13/*
14 * Tracepoint for calling kthread_stop, performed to end a kthread:
15 */
16TRACE_EVENT(sched_kthread_stop,
17
18 TP_PROTO(struct task_struct *t),
19
20 TP_ARGS(t),
21
22 TP_STRUCT__entry(
23 __array( char, comm, TASK_COMM_LEN )
24 __field( pid_t, pid )
25 ),
26
27 TP_fast_assign(
28 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29 __entry->pid = t->pid;
30 ),
31
32 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33);
34
35/*
36 * Tracepoint for the return value of the kthread stopping:
37 */
38TRACE_EVENT(sched_kthread_stop_ret,
39
40 TP_PROTO(int ret),
41
42 TP_ARGS(ret),
43
44 TP_STRUCT__entry(
45 __field( int, ret )
46 ),
47
48 TP_fast_assign(
49 __entry->ret = ret;
50 ),
51
52 TP_printk("ret=%d", __entry->ret)
53);
54
55/**
56 * sched_kthread_work_queue_work - called when a work gets queued
57 * @worker: pointer to the kthread_worker
58 * @work: pointer to struct kthread_work
59 *
60 * This event occurs when a work is queued immediately or once a
61 * delayed work is actually queued (ie: once the delay has been
62 * reached).
63 */
64TRACE_EVENT(sched_kthread_work_queue_work,
65
66 TP_PROTO(struct kthread_worker *worker,
67 struct kthread_work *work),
68
69 TP_ARGS(worker, work),
70
71 TP_STRUCT__entry(
72 __field( void *, work )
73 __field( void *, function)
74 __field( void *, worker)
75 ),
76
77 TP_fast_assign(
78 __entry->work = work;
79 __entry->function = work->func;
80 __entry->worker = worker;
81 ),
82
83 TP_printk("work struct=%p function=%ps worker=%p",
84 __entry->work, __entry->function, __entry->worker)
85);
86
87/**
88 * sched_kthread_work_execute_start - called immediately before the work callback
89 * @work: pointer to struct kthread_work
90 *
91 * Allows to track kthread work execution.
92 */
93TRACE_EVENT(sched_kthread_work_execute_start,
94
95 TP_PROTO(struct kthread_work *work),
96
97 TP_ARGS(work),
98
99 TP_STRUCT__entry(
100 __field( void *, work )
101 __field( void *, function)
102 ),
103
104 TP_fast_assign(
105 __entry->work = work;
106 __entry->function = work->func;
107 ),
108
109 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110);
111
112/**
113 * sched_kthread_work_execute_end - called immediately after the work callback
114 * @work: pointer to struct work_struct
115 * @function: pointer to worker function
116 *
117 * Allows to track workqueue execution.
118 */
119TRACE_EVENT(sched_kthread_work_execute_end,
120
121 TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123 TP_ARGS(work, function),
124
125 TP_STRUCT__entry(
126 __field( void *, work )
127 __field( void *, function)
128 ),
129
130 TP_fast_assign(
131 __entry->work = work;
132 __entry->function = function;
133 ),
134
135 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136);
137
138/*
139 * Tracepoint for waking up a task:
140 */
141DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143 TP_PROTO(struct task_struct *p),
144
145 TP_ARGS(__perf_task(p)),
146
147 TP_STRUCT__entry(
148 __array( char, comm, TASK_COMM_LEN )
149 __field( pid_t, pid )
150 __field( int, prio )
151 __field( int, target_cpu )
152 ),
153
154 TP_fast_assign(
155 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156 __entry->pid = p->pid;
157 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
158 __entry->target_cpu = task_cpu(p);
159 ),
160
161 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162 __entry->comm, __entry->pid, __entry->prio,
163 __entry->target_cpu)
164);
165
166/*
167 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168 * called from the waking context.
169 */
170DEFINE_EVENT(sched_wakeup_template, sched_waking,
171 TP_PROTO(struct task_struct *p),
172 TP_ARGS(p));
173
174/*
175 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176 * It is not always called from the waking context.
177 */
178DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179 TP_PROTO(struct task_struct *p),
180 TP_ARGS(p));
181
182/*
183 * Tracepoint for waking up a new task:
184 */
185DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186 TP_PROTO(struct task_struct *p),
187 TP_ARGS(p));
188
189#ifdef CREATE_TRACE_POINTS
190static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
191{
192 unsigned int state;
193
194#ifdef CONFIG_SCHED_DEBUG
195 BUG_ON(p != current);
196#endif /* CONFIG_SCHED_DEBUG */
197
198 /*
199 * Preemption ignores task state, therefore preempted tasks are always
200 * RUNNING (we will not have dequeued if state != RUNNING).
201 */
202 if (preempt)
203 return TASK_REPORT_MAX;
204
205 /*
206 * task_state_index() uses fls() and returns a value from 0-8 range.
207 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
208 * it for left shift operation to get the correct task->state
209 * mapping.
210 */
211 state = task_state_index(p);
212
213 return state ? (1 << (state - 1)) : state;
214}
215#endif /* CREATE_TRACE_POINTS */
216
217/*
218 * Tracepoint for task switches, performed by the scheduler:
219 */
220TRACE_EVENT(sched_switch,
221
222 TP_PROTO(bool preempt,
223 struct task_struct *prev,
224 struct task_struct *next),
225
226 TP_ARGS(preempt, prev, next),
227
228 TP_STRUCT__entry(
229 __array( char, prev_comm, TASK_COMM_LEN )
230 __field( pid_t, prev_pid )
231 __field( int, prev_prio )
232 __field( long, prev_state )
233 __array( char, next_comm, TASK_COMM_LEN )
234 __field( pid_t, next_pid )
235 __field( int, next_prio )
236 ),
237
238 TP_fast_assign(
239 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
240 __entry->prev_pid = prev->pid;
241 __entry->prev_prio = prev->prio;
242 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
243 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
244 __entry->next_pid = next->pid;
245 __entry->next_prio = next->prio;
246 /* XXX SCHED_DEADLINE */
247 ),
248
249 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
250 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
251
252 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
253 __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
254 { TASK_INTERRUPTIBLE, "S" },
255 { TASK_UNINTERRUPTIBLE, "D" },
256 { __TASK_STOPPED, "T" },
257 { __TASK_TRACED, "t" },
258 { EXIT_DEAD, "X" },
259 { EXIT_ZOMBIE, "Z" },
260 { TASK_PARKED, "P" },
261 { TASK_DEAD, "I" }) :
262 "R",
263
264 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
265 __entry->next_comm, __entry->next_pid, __entry->next_prio)
266);
267
268/*
269 * Tracepoint for a task being migrated:
270 */
271TRACE_EVENT(sched_migrate_task,
272
273 TP_PROTO(struct task_struct *p, int dest_cpu),
274
275 TP_ARGS(p, dest_cpu),
276
277 TP_STRUCT__entry(
278 __array( char, comm, TASK_COMM_LEN )
279 __field( pid_t, pid )
280 __field( int, prio )
281 __field( int, orig_cpu )
282 __field( int, dest_cpu )
283 ),
284
285 TP_fast_assign(
286 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
287 __entry->pid = p->pid;
288 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
289 __entry->orig_cpu = task_cpu(p);
290 __entry->dest_cpu = dest_cpu;
291 ),
292
293 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
294 __entry->comm, __entry->pid, __entry->prio,
295 __entry->orig_cpu, __entry->dest_cpu)
296);
297
298DECLARE_EVENT_CLASS(sched_process_template,
299
300 TP_PROTO(struct task_struct *p),
301
302 TP_ARGS(p),
303
304 TP_STRUCT__entry(
305 __array( char, comm, TASK_COMM_LEN )
306 __field( pid_t, pid )
307 __field( int, prio )
308 ),
309
310 TP_fast_assign(
311 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
312 __entry->pid = p->pid;
313 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
314 ),
315
316 TP_printk("comm=%s pid=%d prio=%d",
317 __entry->comm, __entry->pid, __entry->prio)
318);
319
320/*
321 * Tracepoint for freeing a task:
322 */
323DEFINE_EVENT(sched_process_template, sched_process_free,
324 TP_PROTO(struct task_struct *p),
325 TP_ARGS(p));
326
327/*
328 * Tracepoint for a task exiting:
329 */
330DEFINE_EVENT(sched_process_template, sched_process_exit,
331 TP_PROTO(struct task_struct *p),
332 TP_ARGS(p));
333
334/*
335 * Tracepoint for waiting on task to unschedule:
336 */
337DEFINE_EVENT(sched_process_template, sched_wait_task,
338 TP_PROTO(struct task_struct *p),
339 TP_ARGS(p));
340
341/*
342 * Tracepoint for a waiting task:
343 */
344TRACE_EVENT(sched_process_wait,
345
346 TP_PROTO(struct pid *pid),
347
348 TP_ARGS(pid),
349
350 TP_STRUCT__entry(
351 __array( char, comm, TASK_COMM_LEN )
352 __field( pid_t, pid )
353 __field( int, prio )
354 ),
355
356 TP_fast_assign(
357 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
358 __entry->pid = pid_nr(pid);
359 __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
360 ),
361
362 TP_printk("comm=%s pid=%d prio=%d",
363 __entry->comm, __entry->pid, __entry->prio)
364);
365
366/*
367 * Tracepoint for kernel_clone:
368 */
369TRACE_EVENT(sched_process_fork,
370
371 TP_PROTO(struct task_struct *parent, struct task_struct *child),
372
373 TP_ARGS(parent, child),
374
375 TP_STRUCT__entry(
376 __array( char, parent_comm, TASK_COMM_LEN )
377 __field( pid_t, parent_pid )
378 __array( char, child_comm, TASK_COMM_LEN )
379 __field( pid_t, child_pid )
380 ),
381
382 TP_fast_assign(
383 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
384 __entry->parent_pid = parent->pid;
385 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
386 __entry->child_pid = child->pid;
387 ),
388
389 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
390 __entry->parent_comm, __entry->parent_pid,
391 __entry->child_comm, __entry->child_pid)
392);
393
394/*
395 * Tracepoint for exec:
396 */
397TRACE_EVENT(sched_process_exec,
398
399 TP_PROTO(struct task_struct *p, pid_t old_pid,
400 struct linux_binprm *bprm),
401
402 TP_ARGS(p, old_pid, bprm),
403
404 TP_STRUCT__entry(
405 __string( filename, bprm->filename )
406 __field( pid_t, pid )
407 __field( pid_t, old_pid )
408 ),
409
410 TP_fast_assign(
411 __assign_str(filename, bprm->filename);
412 __entry->pid = p->pid;
413 __entry->old_pid = old_pid;
414 ),
415
416 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
417 __entry->pid, __entry->old_pid)
418);
419
420
421#ifdef CONFIG_SCHEDSTATS
422#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
423#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
424#else
425#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
426#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
427#endif
428
429/*
430 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
431 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
432 */
433DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
434
435 TP_PROTO(struct task_struct *tsk, u64 delay),
436
437 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
438
439 TP_STRUCT__entry(
440 __array( char, comm, TASK_COMM_LEN )
441 __field( pid_t, pid )
442 __field( u64, delay )
443 ),
444
445 TP_fast_assign(
446 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
447 __entry->pid = tsk->pid;
448 __entry->delay = delay;
449 ),
450
451 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
452 __entry->comm, __entry->pid,
453 (unsigned long long)__entry->delay)
454);
455
456/*
457 * Tracepoint for accounting wait time (time the task is runnable
458 * but not actually running due to scheduler contention).
459 */
460DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
461 TP_PROTO(struct task_struct *tsk, u64 delay),
462 TP_ARGS(tsk, delay));
463
464/*
465 * Tracepoint for accounting sleep time (time the task is not runnable,
466 * including iowait, see below).
467 */
468DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
469 TP_PROTO(struct task_struct *tsk, u64 delay),
470 TP_ARGS(tsk, delay));
471
472/*
473 * Tracepoint for accounting iowait time (time the task is not runnable
474 * due to waiting on IO to complete).
475 */
476DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
477 TP_PROTO(struct task_struct *tsk, u64 delay),
478 TP_ARGS(tsk, delay));
479
480/*
481 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
482 */
483DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
484 TP_PROTO(struct task_struct *tsk, u64 delay),
485 TP_ARGS(tsk, delay));
486
487/*
488 * Tracepoint for accounting runtime (time the task is executing
489 * on a CPU).
490 */
491DECLARE_EVENT_CLASS(sched_stat_runtime,
492
493 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
494
495 TP_ARGS(tsk, __perf_count(runtime), vruntime),
496
497 TP_STRUCT__entry(
498 __array( char, comm, TASK_COMM_LEN )
499 __field( pid_t, pid )
500 __field( u64, runtime )
501 __field( u64, vruntime )
502 ),
503
504 TP_fast_assign(
505 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
506 __entry->pid = tsk->pid;
507 __entry->runtime = runtime;
508 __entry->vruntime = vruntime;
509 ),
510
511 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
512 __entry->comm, __entry->pid,
513 (unsigned long long)__entry->runtime,
514 (unsigned long long)__entry->vruntime)
515);
516
517DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
518 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
519 TP_ARGS(tsk, runtime, vruntime));
520
521/*
522 * Tracepoint for showing priority inheritance modifying a tasks
523 * priority.
524 */
525TRACE_EVENT(sched_pi_setprio,
526
527 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
528
529 TP_ARGS(tsk, pi_task),
530
531 TP_STRUCT__entry(
532 __array( char, comm, TASK_COMM_LEN )
533 __field( pid_t, pid )
534 __field( int, oldprio )
535 __field( int, newprio )
536 ),
537
538 TP_fast_assign(
539 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
540 __entry->pid = tsk->pid;
541 __entry->oldprio = tsk->prio;
542 __entry->newprio = pi_task ?
543 min(tsk->normal_prio, pi_task->prio) :
544 tsk->normal_prio;
545 /* XXX SCHED_DEADLINE bits missing */
546 ),
547
548 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
549 __entry->comm, __entry->pid,
550 __entry->oldprio, __entry->newprio)
551);
552
553#ifdef CONFIG_DETECT_HUNG_TASK
554TRACE_EVENT(sched_process_hang,
555 TP_PROTO(struct task_struct *tsk),
556 TP_ARGS(tsk),
557
558 TP_STRUCT__entry(
559 __array( char, comm, TASK_COMM_LEN )
560 __field( pid_t, pid )
561 ),
562
563 TP_fast_assign(
564 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
565 __entry->pid = tsk->pid;
566 ),
567
568 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
569);
570#endif /* CONFIG_DETECT_HUNG_TASK */
571
572/*
573 * Tracks migration of tasks from one runqueue to another. Can be used to
574 * detect if automatic NUMA balancing is bouncing between nodes.
575 */
576TRACE_EVENT(sched_move_numa,
577
578 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
579
580 TP_ARGS(tsk, src_cpu, dst_cpu),
581
582 TP_STRUCT__entry(
583 __field( pid_t, pid )
584 __field( pid_t, tgid )
585 __field( pid_t, ngid )
586 __field( int, src_cpu )
587 __field( int, src_nid )
588 __field( int, dst_cpu )
589 __field( int, dst_nid )
590 ),
591
592 TP_fast_assign(
593 __entry->pid = task_pid_nr(tsk);
594 __entry->tgid = task_tgid_nr(tsk);
595 __entry->ngid = task_numa_group_id(tsk);
596 __entry->src_cpu = src_cpu;
597 __entry->src_nid = cpu_to_node(src_cpu);
598 __entry->dst_cpu = dst_cpu;
599 __entry->dst_nid = cpu_to_node(dst_cpu);
600 ),
601
602 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
603 __entry->pid, __entry->tgid, __entry->ngid,
604 __entry->src_cpu, __entry->src_nid,
605 __entry->dst_cpu, __entry->dst_nid)
606);
607
608DECLARE_EVENT_CLASS(sched_numa_pair_template,
609
610 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
611 struct task_struct *dst_tsk, int dst_cpu),
612
613 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
614
615 TP_STRUCT__entry(
616 __field( pid_t, src_pid )
617 __field( pid_t, src_tgid )
618 __field( pid_t, src_ngid )
619 __field( int, src_cpu )
620 __field( int, src_nid )
621 __field( pid_t, dst_pid )
622 __field( pid_t, dst_tgid )
623 __field( pid_t, dst_ngid )
624 __field( int, dst_cpu )
625 __field( int, dst_nid )
626 ),
627
628 TP_fast_assign(
629 __entry->src_pid = task_pid_nr(src_tsk);
630 __entry->src_tgid = task_tgid_nr(src_tsk);
631 __entry->src_ngid = task_numa_group_id(src_tsk);
632 __entry->src_cpu = src_cpu;
633 __entry->src_nid = cpu_to_node(src_cpu);
634 __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
635 __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
636 __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
637 __entry->dst_cpu = dst_cpu;
638 __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
639 ),
640
641 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
642 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
643 __entry->src_cpu, __entry->src_nid,
644 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
645 __entry->dst_cpu, __entry->dst_nid)
646);
647
648DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
649
650 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
651 struct task_struct *dst_tsk, int dst_cpu),
652
653 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
654);
655
656DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
657
658 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
659 struct task_struct *dst_tsk, int dst_cpu),
660
661 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
662);
663
664
665/*
666 * Tracepoint for waking a polling cpu without an IPI.
667 */
668TRACE_EVENT(sched_wake_idle_without_ipi,
669
670 TP_PROTO(int cpu),
671
672 TP_ARGS(cpu),
673
674 TP_STRUCT__entry(
675 __field( int, cpu )
676 ),
677
678 TP_fast_assign(
679 __entry->cpu = cpu;
680 ),
681
682 TP_printk("cpu=%d", __entry->cpu)
683);
684
685/*
686 * Following tracepoints are not exported in tracefs and provide hooking
687 * mechanisms only for testing and debugging purposes.
688 *
689 * Postfixed with _tp to make them easily identifiable in the code.
690 */
691DECLARE_TRACE(pelt_cfs_tp,
692 TP_PROTO(struct cfs_rq *cfs_rq),
693 TP_ARGS(cfs_rq));
694
695DECLARE_TRACE(pelt_rt_tp,
696 TP_PROTO(struct rq *rq),
697 TP_ARGS(rq));
698
699DECLARE_TRACE(pelt_dl_tp,
700 TP_PROTO(struct rq *rq),
701 TP_ARGS(rq));
702
703DECLARE_TRACE(pelt_thermal_tp,
704 TP_PROTO(struct rq *rq),
705 TP_ARGS(rq));
706
707DECLARE_TRACE(pelt_irq_tp,
708 TP_PROTO(struct rq *rq),
709 TP_ARGS(rq));
710
711DECLARE_TRACE(pelt_se_tp,
712 TP_PROTO(struct sched_entity *se),
713 TP_ARGS(se));
714
715DECLARE_TRACE(sched_cpu_capacity_tp,
716 TP_PROTO(struct rq *rq),
717 TP_ARGS(rq));
718
719DECLARE_TRACE(sched_overutilized_tp,
720 TP_PROTO(struct root_domain *rd, bool overutilized),
721 TP_ARGS(rd, overutilized));
722
723DECLARE_TRACE(sched_util_est_cfs_tp,
724 TP_PROTO(struct cfs_rq *cfs_rq),
725 TP_ARGS(cfs_rq));
726
727DECLARE_TRACE(sched_util_est_se_tp,
728 TP_PROTO(struct sched_entity *se),
729 TP_ARGS(se));
730
731DECLARE_TRACE(sched_update_nr_running_tp,
732 TP_PROTO(struct rq *rq, int change),
733 TP_ARGS(rq, change));
734
735#endif /* _TRACE_SCHED_H */
736
737/* This part must be outside protection */
738#include <trace/define_trace.h>