Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM writeback
  4
  5#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_WRITEBACK_H
  7
  8#include <linux/tracepoint.h>
  9#include <linux/backing-dev.h>
 10#include <linux/writeback.h>
 11
 12#define show_inode_state(state)					\
 13	__print_flags(state, "|",				\
 14		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
 15		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
 16		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
 17		{I_NEW,			"I_NEW"},		\
 18		{I_WILL_FREE,		"I_WILL_FREE"},		\
 19		{I_FREEING,		"I_FREEING"},		\
 20		{I_CLEAR,		"I_CLEAR"},		\
 21		{I_SYNC,		"I_SYNC"},		\
 22		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
 23		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
 24		{I_REFERENCED,		"I_REFERENCED"}		\
 25	)
 26
 27/* enums need to be exported to user space */
 28#undef EM
 29#undef EMe
 30#define EM(a,b) 	TRACE_DEFINE_ENUM(a);
 31#define EMe(a,b)	TRACE_DEFINE_ENUM(a);
 32
 33#define WB_WORK_REASON							\
 34	EM( WB_REASON_BACKGROUND,		"background")		\
 35	EM( WB_REASON_VMSCAN,			"vmscan")		\
 36	EM( WB_REASON_SYNC,			"sync")			\
 37	EM( WB_REASON_PERIODIC,			"periodic")		\
 38	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
 39	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
 40	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
 41	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
 42
 43WB_WORK_REASON
 44
 45/*
 46 * Now redefine the EM() and EMe() macros to map the enums to the strings
 47 * that will be printed in the output.
 48 */
 49#undef EM
 50#undef EMe
 51#define EM(a,b)		{ a, b },
 52#define EMe(a,b)	{ a, b }
 53
 54struct wb_writeback_work;
 55
 56DECLARE_EVENT_CLASS(writeback_page_template,
 57
 58	TP_PROTO(struct page *page, struct address_space *mapping),
 59
 60	TP_ARGS(page, mapping),
 61
 62	TP_STRUCT__entry (
 63		__array(char, name, 32)
 64		__field(unsigned long, ino)
 65		__field(pgoff_t, index)
 66	),
 67
 68	TP_fast_assign(
 69		strscpy_pad(__entry->name,
 70			    mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
 71			    32);
 72		__entry->ino = mapping ? mapping->host->i_ino : 0;
 73		__entry->index = page->index;
 74	),
 75
 76	TP_printk("bdi %s: ino=%lu index=%lu",
 77		__entry->name,
 78		__entry->ino,
 79		__entry->index
 80	)
 81);
 82
 83DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
 84
 85	TP_PROTO(struct page *page, struct address_space *mapping),
 86
 87	TP_ARGS(page, mapping)
 88);
 89
 90DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
 91
 92	TP_PROTO(struct page *page, struct address_space *mapping),
 93
 94	TP_ARGS(page, mapping)
 95);
 96
 97DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
 98
 99	TP_PROTO(struct inode *inode, int flags),
100
101	TP_ARGS(inode, flags),
102
103	TP_STRUCT__entry (
104		__array(char, name, 32)
105		__field(unsigned long, ino)
106		__field(unsigned long, state)
107		__field(unsigned long, flags)
108	),
109
110	TP_fast_assign(
111		struct backing_dev_info *bdi = inode_to_bdi(inode);
112
113		/* may be called for files on pseudo FSes w/ unregistered bdi */
114		strscpy_pad(__entry->name,
115			    bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
116		__entry->ino		= inode->i_ino;
117		__entry->state		= inode->i_state;
118		__entry->flags		= flags;
119	),
120
121	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
122		__entry->name,
123		__entry->ino,
124		show_inode_state(__entry->state),
125		show_inode_state(__entry->flags)
126	)
127);
128
129DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
130
131	TP_PROTO(struct inode *inode, int flags),
132
133	TP_ARGS(inode, flags)
134);
135
136DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
137
138	TP_PROTO(struct inode *inode, int flags),
139
140	TP_ARGS(inode, flags)
141);
142
143DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
144
145	TP_PROTO(struct inode *inode, int flags),
146
147	TP_ARGS(inode, flags)
148);
149
150#ifdef CREATE_TRACE_POINTS
151#ifdef CONFIG_CGROUP_WRITEBACK
152
153static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
154{
155	return wb->memcg_css->cgroup->kn->id.ino;
156}
157
158static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
159{
160	if (wbc->wb)
161		return __trace_wb_assign_cgroup(wbc->wb);
162	else
163		return -1U;
164}
165#else	/* CONFIG_CGROUP_WRITEBACK */
166
167static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
168{
169	return -1U;
170}
171
172static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
173{
174	return -1U;
175}
176
177#endif	/* CONFIG_CGROUP_WRITEBACK */
178#endif	/* CREATE_TRACE_POINTS */
179
180#ifdef CONFIG_CGROUP_WRITEBACK
181TRACE_EVENT(inode_foreign_history,
182
183	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
184		 unsigned int history),
185
186	TP_ARGS(inode, wbc, history),
187
188	TP_STRUCT__entry(
189		__array(char,		name, 32)
190		__field(unsigned long,	ino)
191		__field(unsigned int,	cgroup_ino)
192		__field(unsigned int,	history)
193	),
194
195	TP_fast_assign(
196		strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
197		__entry->ino		= inode->i_ino;
198		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
199		__entry->history	= history;
200	),
201
202	TP_printk("bdi %s: ino=%lu cgroup_ino=%u history=0x%x",
203		__entry->name,
204		__entry->ino,
205		__entry->cgroup_ino,
206		__entry->history
207	)
208);
209
210TRACE_EVENT(inode_switch_wbs,
211
212	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
213		 struct bdi_writeback *new_wb),
214
215	TP_ARGS(inode, old_wb, new_wb),
216
217	TP_STRUCT__entry(
218		__array(char,		name, 32)
219		__field(unsigned long,	ino)
220		__field(unsigned int,	old_cgroup_ino)
221		__field(unsigned int,	new_cgroup_ino)
222	),
223
224	TP_fast_assign(
225		strncpy(__entry->name,	dev_name(old_wb->bdi->dev), 32);
226		__entry->ino		= inode->i_ino;
227		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
228		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
229	),
230
231	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%u new_cgroup_ino=%u",
232		__entry->name,
233		__entry->ino,
234		__entry->old_cgroup_ino,
235		__entry->new_cgroup_ino
236	)
237);
238
239TRACE_EVENT(track_foreign_dirty,
240
241	TP_PROTO(struct page *page, struct bdi_writeback *wb),
242
243	TP_ARGS(page, wb),
244
245	TP_STRUCT__entry(
246		__array(char,		name, 32)
247		__field(u64,		bdi_id)
248		__field(unsigned long,	ino)
249		__field(unsigned int,	memcg_id)
250		__field(unsigned int,	cgroup_ino)
251		__field(unsigned int,	page_cgroup_ino)
252	),
253
254	TP_fast_assign(
255		struct address_space *mapping = page_mapping(page);
256		struct inode *inode = mapping ? mapping->host : NULL;
257
258		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
259		__entry->bdi_id		= wb->bdi->id;
260		__entry->ino		= inode ? inode->i_ino : 0;
261		__entry->memcg_id	= wb->memcg_css->id;
262		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
263		__entry->page_cgroup_ino = page->mem_cgroup->css.cgroup->kn->id.ino;
264	),
265
266	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%u page_cgroup_ino=%u",
267		__entry->name,
268		__entry->bdi_id,
269		__entry->ino,
270		__entry->memcg_id,
271		__entry->cgroup_ino,
272		__entry->page_cgroup_ino
273	)
274);
275
276TRACE_EVENT(flush_foreign,
277
278	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
279		 unsigned int frn_memcg_id),
280
281	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
282
283	TP_STRUCT__entry(
284		__array(char,		name, 32)
285		__field(unsigned int,	cgroup_ino)
286		__field(unsigned int,	frn_bdi_id)
287		__field(unsigned int,	frn_memcg_id)
288	),
289
290	TP_fast_assign(
291		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
292		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
293		__entry->frn_bdi_id	= frn_bdi_id;
294		__entry->frn_memcg_id	= frn_memcg_id;
295	),
296
297	TP_printk("bdi %s: cgroup_ino=%u frn_bdi_id=%u frn_memcg_id=%u",
298		__entry->name,
299		__entry->cgroup_ino,
300		__entry->frn_bdi_id,
301		__entry->frn_memcg_id
302	)
303);
304#endif
305
306DECLARE_EVENT_CLASS(writeback_write_inode_template,
307
308	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
309
310	TP_ARGS(inode, wbc),
311
312	TP_STRUCT__entry (
313		__array(char, name, 32)
314		__field(unsigned long, ino)
315		__field(int, sync_mode)
316		__field(unsigned int, cgroup_ino)
317	),
318
319	TP_fast_assign(
320		strscpy_pad(__entry->name,
321			    dev_name(inode_to_bdi(inode)->dev), 32);
322		__entry->ino		= inode->i_ino;
323		__entry->sync_mode	= wbc->sync_mode;
324		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
325	),
326
327	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
328		__entry->name,
329		__entry->ino,
330		__entry->sync_mode,
331		__entry->cgroup_ino
332	)
333);
334
335DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
336
337	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
338
339	TP_ARGS(inode, wbc)
340);
341
342DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
343
344	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
345
346	TP_ARGS(inode, wbc)
347);
348
349DECLARE_EVENT_CLASS(writeback_work_class,
350	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
351	TP_ARGS(wb, work),
352	TP_STRUCT__entry(
353		__array(char, name, 32)
354		__field(long, nr_pages)
355		__field(dev_t, sb_dev)
356		__field(int, sync_mode)
357		__field(int, for_kupdate)
358		__field(int, range_cyclic)
359		__field(int, for_background)
360		__field(int, reason)
361		__field(unsigned int, cgroup_ino)
362	),
363	TP_fast_assign(
364		strscpy_pad(__entry->name,
365			    wb->bdi->dev ? dev_name(wb->bdi->dev) :
366			    "(unknown)", 32);
367		__entry->nr_pages = work->nr_pages;
368		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
369		__entry->sync_mode = work->sync_mode;
370		__entry->for_kupdate = work->for_kupdate;
371		__entry->range_cyclic = work->range_cyclic;
372		__entry->for_background	= work->for_background;
373		__entry->reason = work->reason;
374		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
375	),
376	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
377		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
378		  __entry->name,
379		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
380		  __entry->nr_pages,
381		  __entry->sync_mode,
382		  __entry->for_kupdate,
383		  __entry->range_cyclic,
384		  __entry->for_background,
385		  __print_symbolic(__entry->reason, WB_WORK_REASON),
386		  __entry->cgroup_ino
387	)
388);
389#define DEFINE_WRITEBACK_WORK_EVENT(name) \
390DEFINE_EVENT(writeback_work_class, name, \
391	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
392	TP_ARGS(wb, work))
393DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
394DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
395DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
396DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
397DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
398
399TRACE_EVENT(writeback_pages_written,
400	TP_PROTO(long pages_written),
401	TP_ARGS(pages_written),
402	TP_STRUCT__entry(
403		__field(long,		pages)
404	),
405	TP_fast_assign(
406		__entry->pages		= pages_written;
407	),
408	TP_printk("%ld", __entry->pages)
409);
410
411DECLARE_EVENT_CLASS(writeback_class,
412	TP_PROTO(struct bdi_writeback *wb),
413	TP_ARGS(wb),
414	TP_STRUCT__entry(
415		__array(char, name, 32)
416		__field(unsigned int, cgroup_ino)
417	),
418	TP_fast_assign(
419		strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
420		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
421	),
422	TP_printk("bdi %s: cgroup_ino=%u",
423		  __entry->name,
424		  __entry->cgroup_ino
425	)
426);
427#define DEFINE_WRITEBACK_EVENT(name) \
428DEFINE_EVENT(writeback_class, name, \
429	TP_PROTO(struct bdi_writeback *wb), \
430	TP_ARGS(wb))
431
432DEFINE_WRITEBACK_EVENT(writeback_wake_background);
433
434TRACE_EVENT(writeback_bdi_register,
435	TP_PROTO(struct backing_dev_info *bdi),
436	TP_ARGS(bdi),
437	TP_STRUCT__entry(
438		__array(char, name, 32)
439	),
440	TP_fast_assign(
441		strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
442	),
443	TP_printk("bdi %s",
444		__entry->name
445	)
446);
447
448DECLARE_EVENT_CLASS(wbc_class,
449	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
450	TP_ARGS(wbc, bdi),
451	TP_STRUCT__entry(
452		__array(char, name, 32)
453		__field(long, nr_to_write)
454		__field(long, pages_skipped)
455		__field(int, sync_mode)
456		__field(int, for_kupdate)
457		__field(int, for_background)
458		__field(int, for_reclaim)
459		__field(int, range_cyclic)
460		__field(long, range_start)
461		__field(long, range_end)
462		__field(unsigned int, cgroup_ino)
463	),
464
465	TP_fast_assign(
466		strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
467		__entry->nr_to_write	= wbc->nr_to_write;
468		__entry->pages_skipped	= wbc->pages_skipped;
469		__entry->sync_mode	= wbc->sync_mode;
470		__entry->for_kupdate	= wbc->for_kupdate;
471		__entry->for_background	= wbc->for_background;
472		__entry->for_reclaim	= wbc->for_reclaim;
473		__entry->range_cyclic	= wbc->range_cyclic;
474		__entry->range_start	= (long)wbc->range_start;
475		__entry->range_end	= (long)wbc->range_end;
476		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
477	),
478
479	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
480		"bgrd=%d reclm=%d cyclic=%d "
481		"start=0x%lx end=0x%lx cgroup_ino=%u",
482		__entry->name,
483		__entry->nr_to_write,
484		__entry->pages_skipped,
485		__entry->sync_mode,
486		__entry->for_kupdate,
487		__entry->for_background,
488		__entry->for_reclaim,
489		__entry->range_cyclic,
490		__entry->range_start,
491		__entry->range_end,
492		__entry->cgroup_ino
493	)
494)
495
496#define DEFINE_WBC_EVENT(name) \
497DEFINE_EVENT(wbc_class, name, \
498	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
499	TP_ARGS(wbc, bdi))
500DEFINE_WBC_EVENT(wbc_writepage);
501
502TRACE_EVENT(writeback_queue_io,
503	TP_PROTO(struct bdi_writeback *wb,
504		 struct wb_writeback_work *work,
505		 int moved),
506	TP_ARGS(wb, work, moved),
507	TP_STRUCT__entry(
508		__array(char,		name, 32)
509		__field(unsigned long,	older)
510		__field(long,		age)
511		__field(int,		moved)
512		__field(int,		reason)
513		__field(unsigned int,	cgroup_ino)
514	),
515	TP_fast_assign(
516		unsigned long *older_than_this = work->older_than_this;
517		strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
518		__entry->older	= older_than_this ?  *older_than_this : 0;
519		__entry->age	= older_than_this ?
520				  (jiffies - *older_than_this) * 1000 / HZ : -1;
521		__entry->moved	= moved;
522		__entry->reason	= work->reason;
523		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
524	),
525	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
526		__entry->name,
527		__entry->older,	/* older_than_this in jiffies */
528		__entry->age,	/* older_than_this in relative milliseconds */
529		__entry->moved,
530		__print_symbolic(__entry->reason, WB_WORK_REASON),
531		__entry->cgroup_ino
532	)
533);
534
535TRACE_EVENT(global_dirty_state,
536
537	TP_PROTO(unsigned long background_thresh,
538		 unsigned long dirty_thresh
539	),
540
541	TP_ARGS(background_thresh,
542		dirty_thresh
543	),
544
545	TP_STRUCT__entry(
546		__field(unsigned long,	nr_dirty)
547		__field(unsigned long,	nr_writeback)
548		__field(unsigned long,	nr_unstable)
549		__field(unsigned long,	background_thresh)
550		__field(unsigned long,	dirty_thresh)
551		__field(unsigned long,	dirty_limit)
552		__field(unsigned long,	nr_dirtied)
553		__field(unsigned long,	nr_written)
554	),
555
556	TP_fast_assign(
557		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
558		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
559		__entry->nr_unstable	= global_node_page_state(NR_UNSTABLE_NFS);
560		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
561		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
562		__entry->background_thresh = background_thresh;
563		__entry->dirty_thresh	= dirty_thresh;
564		__entry->dirty_limit	= global_wb_domain.dirty_limit;
565	),
566
567	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
568		  "bg_thresh=%lu thresh=%lu limit=%lu "
569		  "dirtied=%lu written=%lu",
570		  __entry->nr_dirty,
571		  __entry->nr_writeback,
572		  __entry->nr_unstable,
573		  __entry->background_thresh,
574		  __entry->dirty_thresh,
575		  __entry->dirty_limit,
576		  __entry->nr_dirtied,
577		  __entry->nr_written
578	)
579);
580
581#define KBps(x)			((x) << (PAGE_SHIFT - 10))
582
583TRACE_EVENT(bdi_dirty_ratelimit,
584
585	TP_PROTO(struct bdi_writeback *wb,
586		 unsigned long dirty_rate,
587		 unsigned long task_ratelimit),
588
589	TP_ARGS(wb, dirty_rate, task_ratelimit),
590
591	TP_STRUCT__entry(
592		__array(char,		bdi, 32)
593		__field(unsigned long,	write_bw)
594		__field(unsigned long,	avg_write_bw)
595		__field(unsigned long,	dirty_rate)
596		__field(unsigned long,	dirty_ratelimit)
597		__field(unsigned long,	task_ratelimit)
598		__field(unsigned long,	balanced_dirty_ratelimit)
599		__field(unsigned int,	cgroup_ino)
600	),
601
602	TP_fast_assign(
603		strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
604		__entry->write_bw	= KBps(wb->write_bandwidth);
605		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
606		__entry->dirty_rate	= KBps(dirty_rate);
607		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
608		__entry->task_ratelimit	= KBps(task_ratelimit);
609		__entry->balanced_dirty_ratelimit =
610					KBps(wb->balanced_dirty_ratelimit);
611		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
612	),
613
614	TP_printk("bdi %s: "
615		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
616		  "dirty_ratelimit=%lu task_ratelimit=%lu "
617		  "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
618		  __entry->bdi,
619		  __entry->write_bw,		/* write bandwidth */
620		  __entry->avg_write_bw,	/* avg write bandwidth */
621		  __entry->dirty_rate,		/* bdi dirty rate */
622		  __entry->dirty_ratelimit,	/* base ratelimit */
623		  __entry->task_ratelimit, /* ratelimit with position control */
624		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
625		  __entry->cgroup_ino
626	)
627);
628
629TRACE_EVENT(balance_dirty_pages,
630
631	TP_PROTO(struct bdi_writeback *wb,
632		 unsigned long thresh,
633		 unsigned long bg_thresh,
634		 unsigned long dirty,
635		 unsigned long bdi_thresh,
636		 unsigned long bdi_dirty,
637		 unsigned long dirty_ratelimit,
638		 unsigned long task_ratelimit,
639		 unsigned long dirtied,
640		 unsigned long period,
641		 long pause,
642		 unsigned long start_time),
643
644	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
645		dirty_ratelimit, task_ratelimit,
646		dirtied, period, pause, start_time),
647
648	TP_STRUCT__entry(
649		__array(	 char,	bdi, 32)
650		__field(unsigned long,	limit)
651		__field(unsigned long,	setpoint)
652		__field(unsigned long,	dirty)
653		__field(unsigned long,	bdi_setpoint)
654		__field(unsigned long,	bdi_dirty)
655		__field(unsigned long,	dirty_ratelimit)
656		__field(unsigned long,	task_ratelimit)
657		__field(unsigned int,	dirtied)
658		__field(unsigned int,	dirtied_pause)
659		__field(unsigned long,	paused)
660		__field(	 long,	pause)
661		__field(unsigned long,	period)
662		__field(	 long,	think)
663		__field(unsigned int,	cgroup_ino)
664	),
665
666	TP_fast_assign(
667		unsigned long freerun = (thresh + bg_thresh) / 2;
668		strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
669
670		__entry->limit		= global_wb_domain.dirty_limit;
671		__entry->setpoint	= (global_wb_domain.dirty_limit +
672						freerun) / 2;
673		__entry->dirty		= dirty;
674		__entry->bdi_setpoint	= __entry->setpoint *
675						bdi_thresh / (thresh + 1);
676		__entry->bdi_dirty	= bdi_dirty;
677		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
678		__entry->task_ratelimit	= KBps(task_ratelimit);
679		__entry->dirtied	= dirtied;
680		__entry->dirtied_pause	= current->nr_dirtied_pause;
681		__entry->think		= current->dirty_paused_when == 0 ? 0 :
682			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
683		__entry->period		= period * 1000 / HZ;
684		__entry->pause		= pause * 1000 / HZ;
685		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
686		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
687	),
688
689
690	TP_printk("bdi %s: "
691		  "limit=%lu setpoint=%lu dirty=%lu "
692		  "bdi_setpoint=%lu bdi_dirty=%lu "
693		  "dirty_ratelimit=%lu task_ratelimit=%lu "
694		  "dirtied=%u dirtied_pause=%u "
695		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
696		  __entry->bdi,
697		  __entry->limit,
698		  __entry->setpoint,
699		  __entry->dirty,
700		  __entry->bdi_setpoint,
701		  __entry->bdi_dirty,
702		  __entry->dirty_ratelimit,
703		  __entry->task_ratelimit,
704		  __entry->dirtied,
705		  __entry->dirtied_pause,
706		  __entry->paused,	/* ms */
707		  __entry->pause,	/* ms */
708		  __entry->period,	/* ms */
709		  __entry->think,	/* ms */
710		  __entry->cgroup_ino
711	  )
712);
713
714TRACE_EVENT(writeback_sb_inodes_requeue,
715
716	TP_PROTO(struct inode *inode),
717	TP_ARGS(inode),
718
719	TP_STRUCT__entry(
720		__array(char, name, 32)
721		__field(unsigned long, ino)
722		__field(unsigned long, state)
723		__field(unsigned long, dirtied_when)
724		__field(unsigned int, cgroup_ino)
725	),
726
727	TP_fast_assign(
728		strscpy_pad(__entry->name,
729			    dev_name(inode_to_bdi(inode)->dev), 32);
730		__entry->ino		= inode->i_ino;
731		__entry->state		= inode->i_state;
732		__entry->dirtied_when	= inode->dirtied_when;
733		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
734	),
735
736	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
737		  __entry->name,
738		  __entry->ino,
739		  show_inode_state(__entry->state),
740		  __entry->dirtied_when,
741		  (jiffies - __entry->dirtied_when) / HZ,
742		  __entry->cgroup_ino
743	)
744);
745
746DECLARE_EVENT_CLASS(writeback_congest_waited_template,
747
748	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
749
750	TP_ARGS(usec_timeout, usec_delayed),
751
752	TP_STRUCT__entry(
753		__field(	unsigned int,	usec_timeout	)
754		__field(	unsigned int,	usec_delayed	)
755	),
756
757	TP_fast_assign(
758		__entry->usec_timeout	= usec_timeout;
759		__entry->usec_delayed	= usec_delayed;
760	),
761
762	TP_printk("usec_timeout=%u usec_delayed=%u",
763			__entry->usec_timeout,
764			__entry->usec_delayed)
765);
766
767DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
768
769	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
770
771	TP_ARGS(usec_timeout, usec_delayed)
772);
773
774DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
775
776	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
777
778	TP_ARGS(usec_timeout, usec_delayed)
779);
780
781DECLARE_EVENT_CLASS(writeback_single_inode_template,
782
783	TP_PROTO(struct inode *inode,
784		 struct writeback_control *wbc,
785		 unsigned long nr_to_write
786	),
787
788	TP_ARGS(inode, wbc, nr_to_write),
789
790	TP_STRUCT__entry(
791		__array(char, name, 32)
792		__field(unsigned long, ino)
793		__field(unsigned long, state)
794		__field(unsigned long, dirtied_when)
795		__field(unsigned long, writeback_index)
796		__field(long, nr_to_write)
797		__field(unsigned long, wrote)
798		__field(unsigned int, cgroup_ino)
799	),
800
801	TP_fast_assign(
802		strscpy_pad(__entry->name,
803			    dev_name(inode_to_bdi(inode)->dev), 32);
804		__entry->ino		= inode->i_ino;
805		__entry->state		= inode->i_state;
806		__entry->dirtied_when	= inode->dirtied_when;
807		__entry->writeback_index = inode->i_mapping->writeback_index;
808		__entry->nr_to_write	= nr_to_write;
809		__entry->wrote		= nr_to_write - wbc->nr_to_write;
810		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
811	),
812
813	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
814		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
815		  __entry->name,
816		  __entry->ino,
817		  show_inode_state(__entry->state),
818		  __entry->dirtied_when,
819		  (jiffies - __entry->dirtied_when) / HZ,
820		  __entry->writeback_index,
821		  __entry->nr_to_write,
822		  __entry->wrote,
823		  __entry->cgroup_ino
824	)
825);
826
827DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
828	TP_PROTO(struct inode *inode,
829		 struct writeback_control *wbc,
830		 unsigned long nr_to_write),
831	TP_ARGS(inode, wbc, nr_to_write)
832);
833
834DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
835	TP_PROTO(struct inode *inode,
836		 struct writeback_control *wbc,
837		 unsigned long nr_to_write),
838	TP_ARGS(inode, wbc, nr_to_write)
839);
840
841DECLARE_EVENT_CLASS(writeback_inode_template,
842	TP_PROTO(struct inode *inode),
843
844	TP_ARGS(inode),
845
846	TP_STRUCT__entry(
847		__field(	dev_t,	dev			)
848		__field(unsigned long,	ino			)
849		__field(unsigned long,	state			)
850		__field(	__u16, mode			)
851		__field(unsigned long, dirtied_when		)
852	),
853
854	TP_fast_assign(
855		__entry->dev	= inode->i_sb->s_dev;
856		__entry->ino	= inode->i_ino;
857		__entry->state	= inode->i_state;
858		__entry->mode	= inode->i_mode;
859		__entry->dirtied_when = inode->dirtied_when;
860	),
861
862	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
863		  MAJOR(__entry->dev), MINOR(__entry->dev),
864		  __entry->ino, __entry->dirtied_when,
865		  show_inode_state(__entry->state), __entry->mode)
866);
867
868DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
869	TP_PROTO(struct inode *inode),
870
871	TP_ARGS(inode)
872);
873
874DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
875	TP_PROTO(struct inode *inode),
876
877	TP_ARGS(inode)
878);
879
880DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
881
882	TP_PROTO(struct inode *inode),
883
884	TP_ARGS(inode)
885);
886
887/*
888 * Inode writeback list tracking.
889 */
890
891DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
892	TP_PROTO(struct inode *inode),
893	TP_ARGS(inode)
894);
895
896DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
897	TP_PROTO(struct inode *inode),
898	TP_ARGS(inode)
899);
900
901#endif /* _TRACE_WRITEBACK_H */
902
903/* This part must be outside protection */
904#include <trace/define_trace.h>
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM writeback
  4
  5#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_WRITEBACK_H
  7
  8#include <linux/tracepoint.h>
  9#include <linux/backing-dev.h>
 10#include <linux/writeback.h>
 11
 12#define show_inode_state(state)					\
 13	__print_flags(state, "|",				\
 14		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
 15		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
 16		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
 17		{I_NEW,			"I_NEW"},		\
 18		{I_WILL_FREE,		"I_WILL_FREE"},		\
 19		{I_FREEING,		"I_FREEING"},		\
 20		{I_CLEAR,		"I_CLEAR"},		\
 21		{I_SYNC,		"I_SYNC"},		\
 22		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
 23		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
 24		{I_REFERENCED,		"I_REFERENCED"}		\
 25	)
 26
 27/* enums need to be exported to user space */
 28#undef EM
 29#undef EMe
 30#define EM(a,b) 	TRACE_DEFINE_ENUM(a);
 31#define EMe(a,b)	TRACE_DEFINE_ENUM(a);
 32
 33#define WB_WORK_REASON							\
 34	EM( WB_REASON_BACKGROUND,		"background")		\
 35	EM( WB_REASON_VMSCAN,			"vmscan")		\
 36	EM( WB_REASON_SYNC,			"sync")			\
 37	EM( WB_REASON_PERIODIC,			"periodic")		\
 38	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
 39	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
 40	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
 41	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
 42
 43WB_WORK_REASON
 44
 45/*
 46 * Now redefine the EM() and EMe() macros to map the enums to the strings
 47 * that will be printed in the output.
 48 */
 49#undef EM
 50#undef EMe
 51#define EM(a,b)		{ a, b },
 52#define EMe(a,b)	{ a, b }
 53
 54struct wb_writeback_work;
 55
 56TRACE_EVENT(writeback_dirty_page,
 57
 58	TP_PROTO(struct page *page, struct address_space *mapping),
 59
 60	TP_ARGS(page, mapping),
 61
 62	TP_STRUCT__entry (
 63		__array(char, name, 32)
 64		__field(unsigned long, ino)
 65		__field(pgoff_t, index)
 66	),
 67
 68	TP_fast_assign(
 69		strncpy(__entry->name,
 70			mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
 
 71		__entry->ino = mapping ? mapping->host->i_ino : 0;
 72		__entry->index = page->index;
 73	),
 74
 75	TP_printk("bdi %s: ino=%lu index=%lu",
 76		__entry->name,
 77		__entry->ino,
 78		__entry->index
 79	)
 80);
 81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
 83
 84	TP_PROTO(struct inode *inode, int flags),
 85
 86	TP_ARGS(inode, flags),
 87
 88	TP_STRUCT__entry (
 89		__array(char, name, 32)
 90		__field(unsigned long, ino)
 91		__field(unsigned long, state)
 92		__field(unsigned long, flags)
 93	),
 94
 95	TP_fast_assign(
 96		struct backing_dev_info *bdi = inode_to_bdi(inode);
 97
 98		/* may be called for files on pseudo FSes w/ unregistered bdi */
 99		strncpy(__entry->name,
100			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
101		__entry->ino		= inode->i_ino;
102		__entry->state		= inode->i_state;
103		__entry->flags		= flags;
104	),
105
106	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
107		__entry->name,
108		__entry->ino,
109		show_inode_state(__entry->state),
110		show_inode_state(__entry->flags)
111	)
112);
113
114DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
115
116	TP_PROTO(struct inode *inode, int flags),
117
118	TP_ARGS(inode, flags)
119);
120
121DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
122
123	TP_PROTO(struct inode *inode, int flags),
124
125	TP_ARGS(inode, flags)
126);
127
128DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
129
130	TP_PROTO(struct inode *inode, int flags),
131
132	TP_ARGS(inode, flags)
133);
134
135#ifdef CREATE_TRACE_POINTS
136#ifdef CONFIG_CGROUP_WRITEBACK
137
138static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
139{
140	return wb->memcg_css->cgroup->kn->id.ino;
141}
142
143static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
144{
145	if (wbc->wb)
146		return __trace_wb_assign_cgroup(wbc->wb);
147	else
148		return -1U;
149}
150#else	/* CONFIG_CGROUP_WRITEBACK */
151
152static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
153{
154	return -1U;
155}
156
157static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
158{
159	return -1U;
160}
161
162#endif	/* CONFIG_CGROUP_WRITEBACK */
163#endif	/* CREATE_TRACE_POINTS */
164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165DECLARE_EVENT_CLASS(writeback_write_inode_template,
166
167	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
168
169	TP_ARGS(inode, wbc),
170
171	TP_STRUCT__entry (
172		__array(char, name, 32)
173		__field(unsigned long, ino)
174		__field(int, sync_mode)
175		__field(unsigned int, cgroup_ino)
176	),
177
178	TP_fast_assign(
179		strncpy(__entry->name,
180			dev_name(inode_to_bdi(inode)->dev), 32);
181		__entry->ino		= inode->i_ino;
182		__entry->sync_mode	= wbc->sync_mode;
183		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
184	),
185
186	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
187		__entry->name,
188		__entry->ino,
189		__entry->sync_mode,
190		__entry->cgroup_ino
191	)
192);
193
194DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
195
196	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
197
198	TP_ARGS(inode, wbc)
199);
200
201DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
202
203	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
204
205	TP_ARGS(inode, wbc)
206);
207
208DECLARE_EVENT_CLASS(writeback_work_class,
209	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
210	TP_ARGS(wb, work),
211	TP_STRUCT__entry(
212		__array(char, name, 32)
213		__field(long, nr_pages)
214		__field(dev_t, sb_dev)
215		__field(int, sync_mode)
216		__field(int, for_kupdate)
217		__field(int, range_cyclic)
218		__field(int, for_background)
219		__field(int, reason)
220		__field(unsigned int, cgroup_ino)
221	),
222	TP_fast_assign(
223		strncpy(__entry->name,
224			wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
 
225		__entry->nr_pages = work->nr_pages;
226		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
227		__entry->sync_mode = work->sync_mode;
228		__entry->for_kupdate = work->for_kupdate;
229		__entry->range_cyclic = work->range_cyclic;
230		__entry->for_background	= work->for_background;
231		__entry->reason = work->reason;
232		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
233	),
234	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
235		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
236		  __entry->name,
237		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
238		  __entry->nr_pages,
239		  __entry->sync_mode,
240		  __entry->for_kupdate,
241		  __entry->range_cyclic,
242		  __entry->for_background,
243		  __print_symbolic(__entry->reason, WB_WORK_REASON),
244		  __entry->cgroup_ino
245	)
246);
247#define DEFINE_WRITEBACK_WORK_EVENT(name) \
248DEFINE_EVENT(writeback_work_class, name, \
249	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
250	TP_ARGS(wb, work))
251DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
252DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
253DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
254DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
255DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
256
257TRACE_EVENT(writeback_pages_written,
258	TP_PROTO(long pages_written),
259	TP_ARGS(pages_written),
260	TP_STRUCT__entry(
261		__field(long,		pages)
262	),
263	TP_fast_assign(
264		__entry->pages		= pages_written;
265	),
266	TP_printk("%ld", __entry->pages)
267);
268
269DECLARE_EVENT_CLASS(writeback_class,
270	TP_PROTO(struct bdi_writeback *wb),
271	TP_ARGS(wb),
272	TP_STRUCT__entry(
273		__array(char, name, 32)
274		__field(unsigned int, cgroup_ino)
275	),
276	TP_fast_assign(
277		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
278		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
279	),
280	TP_printk("bdi %s: cgroup_ino=%u",
281		  __entry->name,
282		  __entry->cgroup_ino
283	)
284);
285#define DEFINE_WRITEBACK_EVENT(name) \
286DEFINE_EVENT(writeback_class, name, \
287	TP_PROTO(struct bdi_writeback *wb), \
288	TP_ARGS(wb))
289
290DEFINE_WRITEBACK_EVENT(writeback_wake_background);
291
292TRACE_EVENT(writeback_bdi_register,
293	TP_PROTO(struct backing_dev_info *bdi),
294	TP_ARGS(bdi),
295	TP_STRUCT__entry(
296		__array(char, name, 32)
297	),
298	TP_fast_assign(
299		strncpy(__entry->name, dev_name(bdi->dev), 32);
300	),
301	TP_printk("bdi %s",
302		__entry->name
303	)
304);
305
306DECLARE_EVENT_CLASS(wbc_class,
307	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
308	TP_ARGS(wbc, bdi),
309	TP_STRUCT__entry(
310		__array(char, name, 32)
311		__field(long, nr_to_write)
312		__field(long, pages_skipped)
313		__field(int, sync_mode)
314		__field(int, for_kupdate)
315		__field(int, for_background)
316		__field(int, for_reclaim)
317		__field(int, range_cyclic)
318		__field(long, range_start)
319		__field(long, range_end)
320		__field(unsigned int, cgroup_ino)
321	),
322
323	TP_fast_assign(
324		strncpy(__entry->name, dev_name(bdi->dev), 32);
325		__entry->nr_to_write	= wbc->nr_to_write;
326		__entry->pages_skipped	= wbc->pages_skipped;
327		__entry->sync_mode	= wbc->sync_mode;
328		__entry->for_kupdate	= wbc->for_kupdate;
329		__entry->for_background	= wbc->for_background;
330		__entry->for_reclaim	= wbc->for_reclaim;
331		__entry->range_cyclic	= wbc->range_cyclic;
332		__entry->range_start	= (long)wbc->range_start;
333		__entry->range_end	= (long)wbc->range_end;
334		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
335	),
336
337	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
338		"bgrd=%d reclm=%d cyclic=%d "
339		"start=0x%lx end=0x%lx cgroup_ino=%u",
340		__entry->name,
341		__entry->nr_to_write,
342		__entry->pages_skipped,
343		__entry->sync_mode,
344		__entry->for_kupdate,
345		__entry->for_background,
346		__entry->for_reclaim,
347		__entry->range_cyclic,
348		__entry->range_start,
349		__entry->range_end,
350		__entry->cgroup_ino
351	)
352)
353
354#define DEFINE_WBC_EVENT(name) \
355DEFINE_EVENT(wbc_class, name, \
356	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
357	TP_ARGS(wbc, bdi))
358DEFINE_WBC_EVENT(wbc_writepage);
359
360TRACE_EVENT(writeback_queue_io,
361	TP_PROTO(struct bdi_writeback *wb,
362		 struct wb_writeback_work *work,
363		 int moved),
364	TP_ARGS(wb, work, moved),
365	TP_STRUCT__entry(
366		__array(char,		name, 32)
367		__field(unsigned long,	older)
368		__field(long,		age)
369		__field(int,		moved)
370		__field(int,		reason)
371		__field(unsigned int,	cgroup_ino)
372	),
373	TP_fast_assign(
374		unsigned long *older_than_this = work->older_than_this;
375		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
376		__entry->older	= older_than_this ?  *older_than_this : 0;
377		__entry->age	= older_than_this ?
378				  (jiffies - *older_than_this) * 1000 / HZ : -1;
379		__entry->moved	= moved;
380		__entry->reason	= work->reason;
381		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
382	),
383	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
384		__entry->name,
385		__entry->older,	/* older_than_this in jiffies */
386		__entry->age,	/* older_than_this in relative milliseconds */
387		__entry->moved,
388		__print_symbolic(__entry->reason, WB_WORK_REASON),
389		__entry->cgroup_ino
390	)
391);
392
393TRACE_EVENT(global_dirty_state,
394
395	TP_PROTO(unsigned long background_thresh,
396		 unsigned long dirty_thresh
397	),
398
399	TP_ARGS(background_thresh,
400		dirty_thresh
401	),
402
403	TP_STRUCT__entry(
404		__field(unsigned long,	nr_dirty)
405		__field(unsigned long,	nr_writeback)
406		__field(unsigned long,	nr_unstable)
407		__field(unsigned long,	background_thresh)
408		__field(unsigned long,	dirty_thresh)
409		__field(unsigned long,	dirty_limit)
410		__field(unsigned long,	nr_dirtied)
411		__field(unsigned long,	nr_written)
412	),
413
414	TP_fast_assign(
415		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
416		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
417		__entry->nr_unstable	= global_node_page_state(NR_UNSTABLE_NFS);
418		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
419		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
420		__entry->background_thresh = background_thresh;
421		__entry->dirty_thresh	= dirty_thresh;
422		__entry->dirty_limit	= global_wb_domain.dirty_limit;
423	),
424
425	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
426		  "bg_thresh=%lu thresh=%lu limit=%lu "
427		  "dirtied=%lu written=%lu",
428		  __entry->nr_dirty,
429		  __entry->nr_writeback,
430		  __entry->nr_unstable,
431		  __entry->background_thresh,
432		  __entry->dirty_thresh,
433		  __entry->dirty_limit,
434		  __entry->nr_dirtied,
435		  __entry->nr_written
436	)
437);
438
439#define KBps(x)			((x) << (PAGE_SHIFT - 10))
440
441TRACE_EVENT(bdi_dirty_ratelimit,
442
443	TP_PROTO(struct bdi_writeback *wb,
444		 unsigned long dirty_rate,
445		 unsigned long task_ratelimit),
446
447	TP_ARGS(wb, dirty_rate, task_ratelimit),
448
449	TP_STRUCT__entry(
450		__array(char,		bdi, 32)
451		__field(unsigned long,	write_bw)
452		__field(unsigned long,	avg_write_bw)
453		__field(unsigned long,	dirty_rate)
454		__field(unsigned long,	dirty_ratelimit)
455		__field(unsigned long,	task_ratelimit)
456		__field(unsigned long,	balanced_dirty_ratelimit)
457		__field(unsigned int,	cgroup_ino)
458	),
459
460	TP_fast_assign(
461		strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
462		__entry->write_bw	= KBps(wb->write_bandwidth);
463		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
464		__entry->dirty_rate	= KBps(dirty_rate);
465		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
466		__entry->task_ratelimit	= KBps(task_ratelimit);
467		__entry->balanced_dirty_ratelimit =
468					KBps(wb->balanced_dirty_ratelimit);
469		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
470	),
471
472	TP_printk("bdi %s: "
473		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
474		  "dirty_ratelimit=%lu task_ratelimit=%lu "
475		  "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
476		  __entry->bdi,
477		  __entry->write_bw,		/* write bandwidth */
478		  __entry->avg_write_bw,	/* avg write bandwidth */
479		  __entry->dirty_rate,		/* bdi dirty rate */
480		  __entry->dirty_ratelimit,	/* base ratelimit */
481		  __entry->task_ratelimit, /* ratelimit with position control */
482		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
483		  __entry->cgroup_ino
484	)
485);
486
487TRACE_EVENT(balance_dirty_pages,
488
489	TP_PROTO(struct bdi_writeback *wb,
490		 unsigned long thresh,
491		 unsigned long bg_thresh,
492		 unsigned long dirty,
493		 unsigned long bdi_thresh,
494		 unsigned long bdi_dirty,
495		 unsigned long dirty_ratelimit,
496		 unsigned long task_ratelimit,
497		 unsigned long dirtied,
498		 unsigned long period,
499		 long pause,
500		 unsigned long start_time),
501
502	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
503		dirty_ratelimit, task_ratelimit,
504		dirtied, period, pause, start_time),
505
506	TP_STRUCT__entry(
507		__array(	 char,	bdi, 32)
508		__field(unsigned long,	limit)
509		__field(unsigned long,	setpoint)
510		__field(unsigned long,	dirty)
511		__field(unsigned long,	bdi_setpoint)
512		__field(unsigned long,	bdi_dirty)
513		__field(unsigned long,	dirty_ratelimit)
514		__field(unsigned long,	task_ratelimit)
515		__field(unsigned int,	dirtied)
516		__field(unsigned int,	dirtied_pause)
517		__field(unsigned long,	paused)
518		__field(	 long,	pause)
519		__field(unsigned long,	period)
520		__field(	 long,	think)
521		__field(unsigned int,	cgroup_ino)
522	),
523
524	TP_fast_assign(
525		unsigned long freerun = (thresh + bg_thresh) / 2;
526		strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
527
528		__entry->limit		= global_wb_domain.dirty_limit;
529		__entry->setpoint	= (global_wb_domain.dirty_limit +
530						freerun) / 2;
531		__entry->dirty		= dirty;
532		__entry->bdi_setpoint	= __entry->setpoint *
533						bdi_thresh / (thresh + 1);
534		__entry->bdi_dirty	= bdi_dirty;
535		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
536		__entry->task_ratelimit	= KBps(task_ratelimit);
537		__entry->dirtied	= dirtied;
538		__entry->dirtied_pause	= current->nr_dirtied_pause;
539		__entry->think		= current->dirty_paused_when == 0 ? 0 :
540			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
541		__entry->period		= period * 1000 / HZ;
542		__entry->pause		= pause * 1000 / HZ;
543		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
544		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
545	),
546
547
548	TP_printk("bdi %s: "
549		  "limit=%lu setpoint=%lu dirty=%lu "
550		  "bdi_setpoint=%lu bdi_dirty=%lu "
551		  "dirty_ratelimit=%lu task_ratelimit=%lu "
552		  "dirtied=%u dirtied_pause=%u "
553		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
554		  __entry->bdi,
555		  __entry->limit,
556		  __entry->setpoint,
557		  __entry->dirty,
558		  __entry->bdi_setpoint,
559		  __entry->bdi_dirty,
560		  __entry->dirty_ratelimit,
561		  __entry->task_ratelimit,
562		  __entry->dirtied,
563		  __entry->dirtied_pause,
564		  __entry->paused,	/* ms */
565		  __entry->pause,	/* ms */
566		  __entry->period,	/* ms */
567		  __entry->think,	/* ms */
568		  __entry->cgroup_ino
569	  )
570);
571
572TRACE_EVENT(writeback_sb_inodes_requeue,
573
574	TP_PROTO(struct inode *inode),
575	TP_ARGS(inode),
576
577	TP_STRUCT__entry(
578		__array(char, name, 32)
579		__field(unsigned long, ino)
580		__field(unsigned long, state)
581		__field(unsigned long, dirtied_when)
582		__field(unsigned int, cgroup_ino)
583	),
584
585	TP_fast_assign(
586		strncpy(__entry->name,
587		        dev_name(inode_to_bdi(inode)->dev), 32);
588		__entry->ino		= inode->i_ino;
589		__entry->state		= inode->i_state;
590		__entry->dirtied_when	= inode->dirtied_when;
591		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
592	),
593
594	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
595		  __entry->name,
596		  __entry->ino,
597		  show_inode_state(__entry->state),
598		  __entry->dirtied_when,
599		  (jiffies - __entry->dirtied_when) / HZ,
600		  __entry->cgroup_ino
601	)
602);
603
604DECLARE_EVENT_CLASS(writeback_congest_waited_template,
605
606	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
607
608	TP_ARGS(usec_timeout, usec_delayed),
609
610	TP_STRUCT__entry(
611		__field(	unsigned int,	usec_timeout	)
612		__field(	unsigned int,	usec_delayed	)
613	),
614
615	TP_fast_assign(
616		__entry->usec_timeout	= usec_timeout;
617		__entry->usec_delayed	= usec_delayed;
618	),
619
620	TP_printk("usec_timeout=%u usec_delayed=%u",
621			__entry->usec_timeout,
622			__entry->usec_delayed)
623);
624
625DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
626
627	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
628
629	TP_ARGS(usec_timeout, usec_delayed)
630);
631
632DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
633
634	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
635
636	TP_ARGS(usec_timeout, usec_delayed)
637);
638
639DECLARE_EVENT_CLASS(writeback_single_inode_template,
640
641	TP_PROTO(struct inode *inode,
642		 struct writeback_control *wbc,
643		 unsigned long nr_to_write
644	),
645
646	TP_ARGS(inode, wbc, nr_to_write),
647
648	TP_STRUCT__entry(
649		__array(char, name, 32)
650		__field(unsigned long, ino)
651		__field(unsigned long, state)
652		__field(unsigned long, dirtied_when)
653		__field(unsigned long, writeback_index)
654		__field(long, nr_to_write)
655		__field(unsigned long, wrote)
656		__field(unsigned int, cgroup_ino)
657	),
658
659	TP_fast_assign(
660		strncpy(__entry->name,
661			dev_name(inode_to_bdi(inode)->dev), 32);
662		__entry->ino		= inode->i_ino;
663		__entry->state		= inode->i_state;
664		__entry->dirtied_when	= inode->dirtied_when;
665		__entry->writeback_index = inode->i_mapping->writeback_index;
666		__entry->nr_to_write	= nr_to_write;
667		__entry->wrote		= nr_to_write - wbc->nr_to_write;
668		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
669	),
670
671	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
672		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
673		  __entry->name,
674		  __entry->ino,
675		  show_inode_state(__entry->state),
676		  __entry->dirtied_when,
677		  (jiffies - __entry->dirtied_when) / HZ,
678		  __entry->writeback_index,
679		  __entry->nr_to_write,
680		  __entry->wrote,
681		  __entry->cgroup_ino
682	)
683);
684
685DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
686	TP_PROTO(struct inode *inode,
687		 struct writeback_control *wbc,
688		 unsigned long nr_to_write),
689	TP_ARGS(inode, wbc, nr_to_write)
690);
691
692DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
693	TP_PROTO(struct inode *inode,
694		 struct writeback_control *wbc,
695		 unsigned long nr_to_write),
696	TP_ARGS(inode, wbc, nr_to_write)
697);
698
699DECLARE_EVENT_CLASS(writeback_inode_template,
700	TP_PROTO(struct inode *inode),
701
702	TP_ARGS(inode),
703
704	TP_STRUCT__entry(
705		__field(	dev_t,	dev			)
706		__field(unsigned long,	ino			)
707		__field(unsigned long,	state			)
708		__field(	__u16, mode			)
709		__field(unsigned long, dirtied_when		)
710	),
711
712	TP_fast_assign(
713		__entry->dev	= inode->i_sb->s_dev;
714		__entry->ino	= inode->i_ino;
715		__entry->state	= inode->i_state;
716		__entry->mode	= inode->i_mode;
717		__entry->dirtied_when = inode->dirtied_when;
718	),
719
720	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
721		  MAJOR(__entry->dev), MINOR(__entry->dev),
722		  __entry->ino, __entry->dirtied_when,
723		  show_inode_state(__entry->state), __entry->mode)
724);
725
726DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
727	TP_PROTO(struct inode *inode),
728
729	TP_ARGS(inode)
730);
731
732DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
733	TP_PROTO(struct inode *inode),
734
735	TP_ARGS(inode)
736);
737
738DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
739
740	TP_PROTO(struct inode *inode),
741
742	TP_ARGS(inode)
743);
744
745/*
746 * Inode writeback list tracking.
747 */
748
749DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
750	TP_PROTO(struct inode *inode),
751	TP_ARGS(inode)
752);
753
754DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
755	TP_PROTO(struct inode *inode),
756	TP_ARGS(inode)
757);
758
759#endif /* _TRACE_WRITEBACK_H */
760
761/* This part must be outside protection */
762#include <trace/define_trace.h>