Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM workqueue
4
5#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_WORKQUEUE_H
7
8#include <linux/tracepoint.h>
9#include <linux/workqueue.h>
10
11DECLARE_EVENT_CLASS(workqueue_work,
12
13 TP_PROTO(struct work_struct *work),
14
15 TP_ARGS(work),
16
17 TP_STRUCT__entry(
18 __field( void *, work )
19 ),
20
21 TP_fast_assign(
22 __entry->work = work;
23 ),
24
25 TP_printk("work struct %p", __entry->work)
26);
27
28struct pool_workqueue;
29
30/**
31 * workqueue_queue_work - called when a work gets queued
32 * @req_cpu: the requested cpu
33 * @pwq: pointer to struct pool_workqueue
34 * @work: pointer to struct work_struct
35 *
36 * This event occurs when a work is queued immediately or once a
37 * delayed work is actually queued on a workqueue (ie: once the delay
38 * has been reached).
39 */
40TRACE_EVENT(workqueue_queue_work,
41
42 TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
43 struct work_struct *work),
44
45 TP_ARGS(req_cpu, pwq, work),
46
47 TP_STRUCT__entry(
48 __field( void *, work )
49 __field( void *, function)
50 __field( void *, workqueue)
51 __field( unsigned int, req_cpu )
52 __field( unsigned int, cpu )
53 ),
54
55 TP_fast_assign(
56 __entry->work = work;
57 __entry->function = work->func;
58 __entry->workqueue = pwq->wq;
59 __entry->req_cpu = req_cpu;
60 __entry->cpu = pwq->pool->cpu;
61 ),
62
63 TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u",
64 __entry->work, __entry->function, __entry->workqueue,
65 __entry->req_cpu, __entry->cpu)
66);
67
68/**
69 * workqueue_activate_work - called when a work gets activated
70 * @work: pointer to struct work_struct
71 *
72 * This event occurs when a queued work is put on the active queue,
73 * which happens immediately after queueing unless @max_active limit
74 * is reached.
75 */
76DEFINE_EVENT(workqueue_work, workqueue_activate_work,
77
78 TP_PROTO(struct work_struct *work),
79
80 TP_ARGS(work)
81);
82
83/**
84 * workqueue_execute_start - called immediately before the workqueue callback
85 * @work: pointer to struct work_struct
86 *
87 * Allows to track workqueue execution.
88 */
89TRACE_EVENT(workqueue_execute_start,
90
91 TP_PROTO(struct work_struct *work),
92
93 TP_ARGS(work),
94
95 TP_STRUCT__entry(
96 __field( void *, work )
97 __field( void *, function)
98 ),
99
100 TP_fast_assign(
101 __entry->work = work;
102 __entry->function = work->func;
103 ),
104
105 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
106);
107
108/**
109 * workqueue_execute_end - called immediately after the workqueue callback
110 * @work: pointer to struct work_struct
111 *
112 * Allows to track workqueue execution.
113 */
114DEFINE_EVENT(workqueue_work, workqueue_execute_end,
115
116 TP_PROTO(struct work_struct *work),
117
118 TP_ARGS(work)
119);
120
121#endif /* _TRACE_WORKQUEUE_H */
122
123/* This part must be outside protection */
124#include <trace/define_trace.h>
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM workqueue
3
4#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_WORKQUEUE_H
6
7#include <linux/tracepoint.h>
8#include <linux/workqueue.h>
9
10DECLARE_EVENT_CLASS(workqueue_work,
11
12 TP_PROTO(struct work_struct *work),
13
14 TP_ARGS(work),
15
16 TP_STRUCT__entry(
17 __field( void *, work )
18 ),
19
20 TP_fast_assign(
21 __entry->work = work;
22 ),
23
24 TP_printk("work struct %p", __entry->work)
25);
26
27/**
28 * workqueue_queue_work - called when a work gets queued
29 * @req_cpu: the requested cpu
30 * @pwq: pointer to struct pool_workqueue
31 * @work: pointer to struct work_struct
32 *
33 * This event occurs when a work is queued immediately or once a
34 * delayed work is actually queued on a workqueue (ie: once the delay
35 * has been reached).
36 */
37TRACE_EVENT(workqueue_queue_work,
38
39 TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
40 struct work_struct *work),
41
42 TP_ARGS(req_cpu, pwq, work),
43
44 TP_STRUCT__entry(
45 __field( void *, work )
46 __field( void *, function)
47 __field( void *, workqueue)
48 __field( unsigned int, req_cpu )
49 __field( unsigned int, cpu )
50 ),
51
52 TP_fast_assign(
53 __entry->work = work;
54 __entry->function = work->func;
55 __entry->workqueue = pwq->wq;
56 __entry->req_cpu = req_cpu;
57 __entry->cpu = pwq->pool->cpu;
58 ),
59
60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
61 __entry->work, __entry->function, __entry->workqueue,
62 __entry->req_cpu, __entry->cpu)
63);
64
65/**
66 * workqueue_activate_work - called when a work gets activated
67 * @work: pointer to struct work_struct
68 *
69 * This event occurs when a queued work is put on the active queue,
70 * which happens immediately after queueing unless @max_active limit
71 * is reached.
72 */
73DEFINE_EVENT(workqueue_work, workqueue_activate_work,
74
75 TP_PROTO(struct work_struct *work),
76
77 TP_ARGS(work)
78);
79
80/**
81 * workqueue_execute_start - called immediately before the workqueue callback
82 * @work: pointer to struct work_struct
83 *
84 * Allows to track workqueue execution.
85 */
86TRACE_EVENT(workqueue_execute_start,
87
88 TP_PROTO(struct work_struct *work),
89
90 TP_ARGS(work),
91
92 TP_STRUCT__entry(
93 __field( void *, work )
94 __field( void *, function)
95 ),
96
97 TP_fast_assign(
98 __entry->work = work;
99 __entry->function = work->func;
100 ),
101
102 TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
103);
104
105/**
106 * workqueue_execute_end - called immediately after the workqueue callback
107 * @work: pointer to struct work_struct
108 *
109 * Allows to track workqueue execution.
110 */
111DEFINE_EVENT(workqueue_work, workqueue_execute_end,
112
113 TP_PROTO(struct work_struct *work),
114
115 TP_ARGS(work)
116);
117
118#endif /* _TRACE_WORKQUEUE_H */
119
120/* This part must be outside protection */
121#include <trace/define_trace.h>