Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM writeback
  4
  5#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_WRITEBACK_H
  7
  8#include <linux/tracepoint.h>
  9#include <linux/backing-dev.h>
 10#include <linux/writeback.h>
 11
 12#define show_inode_state(state)					\
 13	__print_flags(state, "|",				\
 14		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
 15		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
 16		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
 17		{I_NEW,			"I_NEW"},		\
 18		{I_WILL_FREE,		"I_WILL_FREE"},		\
 19		{I_FREEING,		"I_FREEING"},		\
 20		{I_CLEAR,		"I_CLEAR"},		\
 21		{I_SYNC,		"I_SYNC"},		\
 22		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
 23		{I_REFERENCED,		"I_REFERENCED"}		\
 
 
 
 
 
 
 
 
 24	)
 25
 26/* enums need to be exported to user space */
 27#undef EM
 28#undef EMe
 29#define EM(a,b) 	TRACE_DEFINE_ENUM(a);
 30#define EMe(a,b)	TRACE_DEFINE_ENUM(a);
 31
 32#define WB_WORK_REASON							\
 33	EM( WB_REASON_BACKGROUND,		"background")		\
 34	EM( WB_REASON_VMSCAN,			"vmscan")		\
 35	EM( WB_REASON_SYNC,			"sync")			\
 36	EM( WB_REASON_PERIODIC,			"periodic")		\
 37	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
 38	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
 39	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
 
 40
 41WB_WORK_REASON
 42
 43/*
 44 * Now redefine the EM() and EMe() macros to map the enums to the strings
 45 * that will be printed in the output.
 46 */
 47#undef EM
 48#undef EMe
 49#define EM(a,b)		{ a, b },
 50#define EMe(a,b)	{ a, b }
 51
 52struct wb_writeback_work;
 53
 54DECLARE_EVENT_CLASS(writeback_page_template,
 55
 56	TP_PROTO(struct page *page, struct address_space *mapping),
 57
 58	TP_ARGS(page, mapping),
 59
 60	TP_STRUCT__entry (
 61		__array(char, name, 32)
 62		__field(ino_t, ino)
 63		__field(pgoff_t, index)
 64	),
 65
 66	TP_fast_assign(
 67		strscpy_pad(__entry->name,
 68			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
 69					 NULL), 32);
 70		__entry->ino = mapping ? mapping->host->i_ino : 0;
 71		__entry->index = page->index;
 72	),
 73
 74	TP_printk("bdi %s: ino=%lu index=%lu",
 75		__entry->name,
 76		(unsigned long)__entry->ino,
 77		__entry->index
 78	)
 79);
 80
 81DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
 82
 83	TP_PROTO(struct page *page, struct address_space *mapping),
 84
 85	TP_ARGS(page, mapping)
 86);
 87
 88DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
 89
 90	TP_PROTO(struct page *page, struct address_space *mapping),
 91
 92	TP_ARGS(page, mapping)
 93);
 94
 95DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
 96
 97	TP_PROTO(struct inode *inode, int flags),
 98
 99	TP_ARGS(inode, flags),
100
101	TP_STRUCT__entry (
102		__array(char, name, 32)
103		__field(ino_t, ino)
104		__field(unsigned long, state)
105		__field(unsigned long, flags)
106	),
107
108	TP_fast_assign(
109		struct backing_dev_info *bdi = inode_to_bdi(inode);
110
111		/* may be called for files on pseudo FSes w/ unregistered bdi */
112		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
113		__entry->ino		= inode->i_ino;
114		__entry->state		= inode->i_state;
115		__entry->flags		= flags;
116	),
117
118	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
119		__entry->name,
120		(unsigned long)__entry->ino,
121		show_inode_state(__entry->state),
122		show_inode_state(__entry->flags)
123	)
124);
125
126DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
127
128	TP_PROTO(struct inode *inode, int flags),
129
130	TP_ARGS(inode, flags)
131);
132
133DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
134
135	TP_PROTO(struct inode *inode, int flags),
136
137	TP_ARGS(inode, flags)
138);
139
140DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
141
142	TP_PROTO(struct inode *inode, int flags),
143
144	TP_ARGS(inode, flags)
145);
146
147#ifdef CREATE_TRACE_POINTS
148#ifdef CONFIG_CGROUP_WRITEBACK
149
150static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
151{
152	return cgroup_ino(wb->memcg_css->cgroup);
153}
154
155static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
156{
157	if (wbc->wb)
158		return __trace_wb_assign_cgroup(wbc->wb);
159	else
160		return 1;
161}
162#else	/* CONFIG_CGROUP_WRITEBACK */
163
164static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
165{
166	return 1;
167}
168
169static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
170{
171	return 1;
172}
173
174#endif	/* CONFIG_CGROUP_WRITEBACK */
175#endif	/* CREATE_TRACE_POINTS */
176
177#ifdef CONFIG_CGROUP_WRITEBACK
178TRACE_EVENT(inode_foreign_history,
179
180	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
181		 unsigned int history),
182
183	TP_ARGS(inode, wbc, history),
184
185	TP_STRUCT__entry(
186		__array(char,		name, 32)
187		__field(ino_t,		ino)
188		__field(ino_t,		cgroup_ino)
189		__field(unsigned int,	history)
190	),
191
192	TP_fast_assign(
193		strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
194		__entry->ino		= inode->i_ino;
195		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
196		__entry->history	= history;
197	),
198
199	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
200		__entry->name,
201		(unsigned long)__entry->ino,
202		(unsigned long)__entry->cgroup_ino,
203		__entry->history
204	)
205);
206
207TRACE_EVENT(inode_switch_wbs,
208
209	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
210		 struct bdi_writeback *new_wb),
211
212	TP_ARGS(inode, old_wb, new_wb),
213
214	TP_STRUCT__entry(
215		__array(char,		name, 32)
216		__field(ino_t,		ino)
217		__field(ino_t,		old_cgroup_ino)
218		__field(ino_t,		new_cgroup_ino)
219	),
220
221	TP_fast_assign(
222		strncpy(__entry->name,	bdi_dev_name(old_wb->bdi), 32);
223		__entry->ino		= inode->i_ino;
224		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
225		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
226	),
227
228	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
229		__entry->name,
230		(unsigned long)__entry->ino,
231		(unsigned long)__entry->old_cgroup_ino,
232		(unsigned long)__entry->new_cgroup_ino
233	)
234);
235
236TRACE_EVENT(track_foreign_dirty,
237
238	TP_PROTO(struct page *page, struct bdi_writeback *wb),
239
240	TP_ARGS(page, wb),
241
242	TP_STRUCT__entry(
243		__array(char,		name, 32)
244		__field(u64,		bdi_id)
245		__field(ino_t,		ino)
246		__field(unsigned int,	memcg_id)
247		__field(ino_t,		cgroup_ino)
248		__field(ino_t,		page_cgroup_ino)
249	),
250
251	TP_fast_assign(
252		struct address_space *mapping = page_mapping(page);
253		struct inode *inode = mapping ? mapping->host : NULL;
254
255		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
256		__entry->bdi_id		= wb->bdi->id;
257		__entry->ino		= inode ? inode->i_ino : 0;
258		__entry->memcg_id	= wb->memcg_css->id;
259		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
260		__entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
261	),
262
263	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
264		__entry->name,
265		__entry->bdi_id,
266		(unsigned long)__entry->ino,
267		__entry->memcg_id,
268		(unsigned long)__entry->cgroup_ino,
269		(unsigned long)__entry->page_cgroup_ino
270	)
271);
272
273TRACE_EVENT(flush_foreign,
274
275	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
276		 unsigned int frn_memcg_id),
277
278	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
279
280	TP_STRUCT__entry(
281		__array(char,		name, 32)
282		__field(ino_t,		cgroup_ino)
283		__field(unsigned int,	frn_bdi_id)
284		__field(unsigned int,	frn_memcg_id)
285	),
286
287	TP_fast_assign(
288		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
289		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
290		__entry->frn_bdi_id	= frn_bdi_id;
291		__entry->frn_memcg_id	= frn_memcg_id;
292	),
293
294	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
295		__entry->name,
296		(unsigned long)__entry->cgroup_ino,
297		__entry->frn_bdi_id,
298		__entry->frn_memcg_id
299	)
300);
301#endif
302
303DECLARE_EVENT_CLASS(writeback_write_inode_template,
304
305	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
306
307	TP_ARGS(inode, wbc),
308
309	TP_STRUCT__entry (
310		__array(char, name, 32)
311		__field(ino_t, ino)
312		__field(int, sync_mode)
313		__field(ino_t, cgroup_ino)
314	),
315
316	TP_fast_assign(
317		strscpy_pad(__entry->name,
318			    bdi_dev_name(inode_to_bdi(inode)), 32);
319		__entry->ino		= inode->i_ino;
320		__entry->sync_mode	= wbc->sync_mode;
321		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
322	),
323
324	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
325		__entry->name,
326		(unsigned long)__entry->ino,
327		__entry->sync_mode,
328		(unsigned long)__entry->cgroup_ino
329	)
330);
331
332DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
333
334	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
335
336	TP_ARGS(inode, wbc)
337);
338
339DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
340
341	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
342
343	TP_ARGS(inode, wbc)
344);
345
346DECLARE_EVENT_CLASS(writeback_work_class,
347	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
348	TP_ARGS(wb, work),
349	TP_STRUCT__entry(
350		__array(char, name, 32)
351		__field(long, nr_pages)
352		__field(dev_t, sb_dev)
353		__field(int, sync_mode)
354		__field(int, for_kupdate)
355		__field(int, range_cyclic)
356		__field(int, for_background)
357		__field(int, reason)
358		__field(ino_t, cgroup_ino)
359	),
360	TP_fast_assign(
361		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
362		__entry->nr_pages = work->nr_pages;
363		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
364		__entry->sync_mode = work->sync_mode;
365		__entry->for_kupdate = work->for_kupdate;
366		__entry->range_cyclic = work->range_cyclic;
367		__entry->for_background	= work->for_background;
368		__entry->reason = work->reason;
369		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
370	),
371	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
372		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
373		  __entry->name,
374		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
375		  __entry->nr_pages,
376		  __entry->sync_mode,
377		  __entry->for_kupdate,
378		  __entry->range_cyclic,
379		  __entry->for_background,
380		  __print_symbolic(__entry->reason, WB_WORK_REASON),
381		  (unsigned long)__entry->cgroup_ino
382	)
383);
384#define DEFINE_WRITEBACK_WORK_EVENT(name) \
385DEFINE_EVENT(writeback_work_class, name, \
386	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
387	TP_ARGS(wb, work))
388DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
389DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
390DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
391DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
392DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
393
394TRACE_EVENT(writeback_pages_written,
395	TP_PROTO(long pages_written),
396	TP_ARGS(pages_written),
397	TP_STRUCT__entry(
398		__field(long,		pages)
399	),
400	TP_fast_assign(
401		__entry->pages		= pages_written;
402	),
403	TP_printk("%ld", __entry->pages)
404);
405
406DECLARE_EVENT_CLASS(writeback_class,
407	TP_PROTO(struct bdi_writeback *wb),
408	TP_ARGS(wb),
409	TP_STRUCT__entry(
410		__array(char, name, 32)
411		__field(ino_t, cgroup_ino)
412	),
413	TP_fast_assign(
414		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
415		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
416	),
417	TP_printk("bdi %s: cgroup_ino=%lu",
418		  __entry->name,
419		  (unsigned long)__entry->cgroup_ino
420	)
421);
422#define DEFINE_WRITEBACK_EVENT(name) \
423DEFINE_EVENT(writeback_class, name, \
424	TP_PROTO(struct bdi_writeback *wb), \
425	TP_ARGS(wb))
426
427DEFINE_WRITEBACK_EVENT(writeback_wake_background);
428
429TRACE_EVENT(writeback_bdi_register,
430	TP_PROTO(struct backing_dev_info *bdi),
431	TP_ARGS(bdi),
432	TP_STRUCT__entry(
433		__array(char, name, 32)
434	),
435	TP_fast_assign(
436		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
437	),
438	TP_printk("bdi %s",
439		__entry->name
440	)
441);
442
443DECLARE_EVENT_CLASS(wbc_class,
444	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
445	TP_ARGS(wbc, bdi),
446	TP_STRUCT__entry(
447		__array(char, name, 32)
448		__field(long, nr_to_write)
449		__field(long, pages_skipped)
450		__field(int, sync_mode)
451		__field(int, for_kupdate)
452		__field(int, for_background)
453		__field(int, for_reclaim)
454		__field(int, range_cyclic)
455		__field(long, range_start)
456		__field(long, range_end)
457		__field(ino_t, cgroup_ino)
458	),
459
460	TP_fast_assign(
461		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
462		__entry->nr_to_write	= wbc->nr_to_write;
463		__entry->pages_skipped	= wbc->pages_skipped;
464		__entry->sync_mode	= wbc->sync_mode;
465		__entry->for_kupdate	= wbc->for_kupdate;
466		__entry->for_background	= wbc->for_background;
467		__entry->for_reclaim	= wbc->for_reclaim;
468		__entry->range_cyclic	= wbc->range_cyclic;
469		__entry->range_start	= (long)wbc->range_start;
470		__entry->range_end	= (long)wbc->range_end;
471		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
472	),
473
474	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
475		"bgrd=%d reclm=%d cyclic=%d "
476		"start=0x%lx end=0x%lx cgroup_ino=%lu",
477		__entry->name,
478		__entry->nr_to_write,
479		__entry->pages_skipped,
480		__entry->sync_mode,
481		__entry->for_kupdate,
482		__entry->for_background,
483		__entry->for_reclaim,
484		__entry->range_cyclic,
485		__entry->range_start,
486		__entry->range_end,
487		(unsigned long)__entry->cgroup_ino
488	)
489)
490
491#define DEFINE_WBC_EVENT(name) \
492DEFINE_EVENT(wbc_class, name, \
493	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
494	TP_ARGS(wbc, bdi))
495DEFINE_WBC_EVENT(wbc_writepage);
496
497TRACE_EVENT(writeback_queue_io,
498	TP_PROTO(struct bdi_writeback *wb,
499		 struct wb_writeback_work *work,
500		 unsigned long dirtied_before,
501		 int moved),
502	TP_ARGS(wb, work, dirtied_before, moved),
503	TP_STRUCT__entry(
504		__array(char,		name, 32)
505		__field(unsigned long,	older)
506		__field(long,		age)
507		__field(int,		moved)
508		__field(int,		reason)
509		__field(ino_t,		cgroup_ino)
510	),
511	TP_fast_assign(
512		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
513		__entry->older	= dirtied_before;
514		__entry->age	= (jiffies - dirtied_before) * 1000 / HZ;
515		__entry->moved	= moved;
516		__entry->reason	= work->reason;
517		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
518	),
519	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
520		__entry->name,
521		__entry->older,	/* dirtied_before in jiffies */
522		__entry->age,	/* dirtied_before in relative milliseconds */
523		__entry->moved,
524		__print_symbolic(__entry->reason, WB_WORK_REASON),
525		(unsigned long)__entry->cgroup_ino
526	)
527);
528
529TRACE_EVENT(global_dirty_state,
530
531	TP_PROTO(unsigned long background_thresh,
532		 unsigned long dirty_thresh
533	),
534
535	TP_ARGS(background_thresh,
536		dirty_thresh
537	),
538
539	TP_STRUCT__entry(
540		__field(unsigned long,	nr_dirty)
541		__field(unsigned long,	nr_writeback)
542		__field(unsigned long,	background_thresh)
543		__field(unsigned long,	dirty_thresh)
544		__field(unsigned long,	dirty_limit)
545		__field(unsigned long,	nr_dirtied)
546		__field(unsigned long,	nr_written)
547	),
548
549	TP_fast_assign(
550		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
551		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
552		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
553		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
554		__entry->background_thresh = background_thresh;
555		__entry->dirty_thresh	= dirty_thresh;
556		__entry->dirty_limit	= global_wb_domain.dirty_limit;
557	),
558
559	TP_printk("dirty=%lu writeback=%lu "
560		  "bg_thresh=%lu thresh=%lu limit=%lu "
561		  "dirtied=%lu written=%lu",
562		  __entry->nr_dirty,
563		  __entry->nr_writeback,
564		  __entry->background_thresh,
565		  __entry->dirty_thresh,
566		  __entry->dirty_limit,
567		  __entry->nr_dirtied,
568		  __entry->nr_written
569	)
570);
571
572#define KBps(x)			((x) << (PAGE_SHIFT - 10))
573
574TRACE_EVENT(bdi_dirty_ratelimit,
575
576	TP_PROTO(struct bdi_writeback *wb,
577		 unsigned long dirty_rate,
578		 unsigned long task_ratelimit),
579
580	TP_ARGS(wb, dirty_rate, task_ratelimit),
581
582	TP_STRUCT__entry(
583		__array(char,		bdi, 32)
584		__field(unsigned long,	write_bw)
585		__field(unsigned long,	avg_write_bw)
586		__field(unsigned long,	dirty_rate)
587		__field(unsigned long,	dirty_ratelimit)
588		__field(unsigned long,	task_ratelimit)
589		__field(unsigned long,	balanced_dirty_ratelimit)
590		__field(ino_t,		cgroup_ino)
591	),
592
593	TP_fast_assign(
594		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
595		__entry->write_bw	= KBps(wb->write_bandwidth);
596		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
597		__entry->dirty_rate	= KBps(dirty_rate);
598		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
599		__entry->task_ratelimit	= KBps(task_ratelimit);
600		__entry->balanced_dirty_ratelimit =
601					KBps(wb->balanced_dirty_ratelimit);
602		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
603	),
604
605	TP_printk("bdi %s: "
606		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
607		  "dirty_ratelimit=%lu task_ratelimit=%lu "
608		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
609		  __entry->bdi,
610		  __entry->write_bw,		/* write bandwidth */
611		  __entry->avg_write_bw,	/* avg write bandwidth */
612		  __entry->dirty_rate,		/* bdi dirty rate */
613		  __entry->dirty_ratelimit,	/* base ratelimit */
614		  __entry->task_ratelimit, /* ratelimit with position control */
615		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
616		  (unsigned long)__entry->cgroup_ino
617	)
618);
619
620TRACE_EVENT(balance_dirty_pages,
621
622	TP_PROTO(struct bdi_writeback *wb,
623		 unsigned long thresh,
624		 unsigned long bg_thresh,
625		 unsigned long dirty,
626		 unsigned long bdi_thresh,
627		 unsigned long bdi_dirty,
628		 unsigned long dirty_ratelimit,
629		 unsigned long task_ratelimit,
630		 unsigned long dirtied,
631		 unsigned long period,
632		 long pause,
633		 unsigned long start_time),
634
635	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
636		dirty_ratelimit, task_ratelimit,
637		dirtied, period, pause, start_time),
638
639	TP_STRUCT__entry(
640		__array(	 char,	bdi, 32)
641		__field(unsigned long,	limit)
642		__field(unsigned long,	setpoint)
643		__field(unsigned long,	dirty)
644		__field(unsigned long,	bdi_setpoint)
645		__field(unsigned long,	bdi_dirty)
646		__field(unsigned long,	dirty_ratelimit)
647		__field(unsigned long,	task_ratelimit)
648		__field(unsigned int,	dirtied)
649		__field(unsigned int,	dirtied_pause)
650		__field(unsigned long,	paused)
651		__field(	 long,	pause)
652		__field(unsigned long,	period)
653		__field(	 long,	think)
654		__field(ino_t,		cgroup_ino)
655	),
656
657	TP_fast_assign(
658		unsigned long freerun = (thresh + bg_thresh) / 2;
659		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
660
661		__entry->limit		= global_wb_domain.dirty_limit;
662		__entry->setpoint	= (global_wb_domain.dirty_limit +
663						freerun) / 2;
664		__entry->dirty		= dirty;
665		__entry->bdi_setpoint	= __entry->setpoint *
666						bdi_thresh / (thresh + 1);
667		__entry->bdi_dirty	= bdi_dirty;
668		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
669		__entry->task_ratelimit	= KBps(task_ratelimit);
670		__entry->dirtied	= dirtied;
671		__entry->dirtied_pause	= current->nr_dirtied_pause;
672		__entry->think		= current->dirty_paused_when == 0 ? 0 :
673			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
674		__entry->period		= period * 1000 / HZ;
675		__entry->pause		= pause * 1000 / HZ;
676		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
677		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
678	),
679
680
681	TP_printk("bdi %s: "
682		  "limit=%lu setpoint=%lu dirty=%lu "
683		  "bdi_setpoint=%lu bdi_dirty=%lu "
684		  "dirty_ratelimit=%lu task_ratelimit=%lu "
685		  "dirtied=%u dirtied_pause=%u "
686		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
687		  __entry->bdi,
688		  __entry->limit,
689		  __entry->setpoint,
690		  __entry->dirty,
691		  __entry->bdi_setpoint,
692		  __entry->bdi_dirty,
693		  __entry->dirty_ratelimit,
694		  __entry->task_ratelimit,
695		  __entry->dirtied,
696		  __entry->dirtied_pause,
697		  __entry->paused,	/* ms */
698		  __entry->pause,	/* ms */
699		  __entry->period,	/* ms */
700		  __entry->think,	/* ms */
701		  (unsigned long)__entry->cgroup_ino
702	  )
703);
704
705TRACE_EVENT(writeback_sb_inodes_requeue,
706
707	TP_PROTO(struct inode *inode),
708	TP_ARGS(inode),
709
710	TP_STRUCT__entry(
711		__array(char, name, 32)
712		__field(ino_t, ino)
713		__field(unsigned long, state)
714		__field(unsigned long, dirtied_when)
715		__field(ino_t, cgroup_ino)
716	),
717
718	TP_fast_assign(
719		strscpy_pad(__entry->name,
720			    bdi_dev_name(inode_to_bdi(inode)), 32);
721		__entry->ino		= inode->i_ino;
722		__entry->state		= inode->i_state;
723		__entry->dirtied_when	= inode->dirtied_when;
724		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
725	),
726
727	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
728		  __entry->name,
729		  (unsigned long)__entry->ino,
730		  show_inode_state(__entry->state),
731		  __entry->dirtied_when,
732		  (jiffies - __entry->dirtied_when) / HZ,
733		  (unsigned long)__entry->cgroup_ino
734	)
735);
736
737DECLARE_EVENT_CLASS(writeback_congest_waited_template,
738
739	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
740
741	TP_ARGS(usec_timeout, usec_delayed),
742
743	TP_STRUCT__entry(
744		__field(	unsigned int,	usec_timeout	)
745		__field(	unsigned int,	usec_delayed	)
746	),
747
748	TP_fast_assign(
749		__entry->usec_timeout	= usec_timeout;
750		__entry->usec_delayed	= usec_delayed;
751	),
752
753	TP_printk("usec_timeout=%u usec_delayed=%u",
754			__entry->usec_timeout,
755			__entry->usec_delayed)
756);
757
758DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
759
760	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
761
762	TP_ARGS(usec_timeout, usec_delayed)
763);
764
765DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
766
767	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
768
769	TP_ARGS(usec_timeout, usec_delayed)
770);
771
772DECLARE_EVENT_CLASS(writeback_single_inode_template,
773
774	TP_PROTO(struct inode *inode,
775		 struct writeback_control *wbc,
776		 unsigned long nr_to_write
777	),
778
779	TP_ARGS(inode, wbc, nr_to_write),
780
781	TP_STRUCT__entry(
782		__array(char, name, 32)
783		__field(ino_t, ino)
784		__field(unsigned long, state)
785		__field(unsigned long, dirtied_when)
786		__field(unsigned long, writeback_index)
787		__field(long, nr_to_write)
788		__field(unsigned long, wrote)
789		__field(ino_t, cgroup_ino)
790	),
791
792	TP_fast_assign(
793		strscpy_pad(__entry->name,
794			    bdi_dev_name(inode_to_bdi(inode)), 32);
795		__entry->ino		= inode->i_ino;
796		__entry->state		= inode->i_state;
797		__entry->dirtied_when	= inode->dirtied_when;
798		__entry->writeback_index = inode->i_mapping->writeback_index;
799		__entry->nr_to_write	= nr_to_write;
800		__entry->wrote		= nr_to_write - wbc->nr_to_write;
801		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
802	),
803
804	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
805		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
806		  __entry->name,
807		  (unsigned long)__entry->ino,
808		  show_inode_state(__entry->state),
809		  __entry->dirtied_when,
810		  (jiffies - __entry->dirtied_when) / HZ,
811		  __entry->writeback_index,
812		  __entry->nr_to_write,
813		  __entry->wrote,
814		  (unsigned long)__entry->cgroup_ino
815	)
816);
817
818DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
819	TP_PROTO(struct inode *inode,
820		 struct writeback_control *wbc,
821		 unsigned long nr_to_write),
822	TP_ARGS(inode, wbc, nr_to_write)
823);
824
825DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
826	TP_PROTO(struct inode *inode,
827		 struct writeback_control *wbc,
828		 unsigned long nr_to_write),
829	TP_ARGS(inode, wbc, nr_to_write)
830);
831
832DECLARE_EVENT_CLASS(writeback_inode_template,
833	TP_PROTO(struct inode *inode),
834
835	TP_ARGS(inode),
836
837	TP_STRUCT__entry(
838		__field(	dev_t,	dev			)
839		__field(	ino_t,	ino			)
840		__field(unsigned long,	state			)
841		__field(	__u16, mode			)
842		__field(unsigned long, dirtied_when		)
843	),
844
845	TP_fast_assign(
846		__entry->dev	= inode->i_sb->s_dev;
847		__entry->ino	= inode->i_ino;
848		__entry->state	= inode->i_state;
849		__entry->mode	= inode->i_mode;
850		__entry->dirtied_when = inode->dirtied_when;
851	),
852
853	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
854		  MAJOR(__entry->dev), MINOR(__entry->dev),
855		  (unsigned long)__entry->ino, __entry->dirtied_when,
856		  show_inode_state(__entry->state), __entry->mode)
857);
858
859DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
860	TP_PROTO(struct inode *inode),
861
862	TP_ARGS(inode)
863);
864
865DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
866	TP_PROTO(struct inode *inode),
867
868	TP_ARGS(inode)
869);
870
871DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
872
873	TP_PROTO(struct inode *inode),
874
875	TP_ARGS(inode)
876);
877
878/*
879 * Inode writeback list tracking.
880 */
881
882DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
883	TP_PROTO(struct inode *inode),
884	TP_ARGS(inode)
885);
886
887DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
888	TP_PROTO(struct inode *inode),
889	TP_ARGS(inode)
890);
891
892#endif /* _TRACE_WRITEBACK_H */
893
894/* This part must be outside protection */
895#include <trace/define_trace.h>
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM writeback
  4
  5#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_WRITEBACK_H
  7
  8#include <linux/tracepoint.h>
  9#include <linux/backing-dev.h>
 10#include <linux/writeback.h>
 11
 12#define show_inode_state(state)					\
 13	__print_flags(state, "|",				\
 14		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
 15		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
 16		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
 17		{I_NEW,			"I_NEW"},		\
 18		{I_WILL_FREE,		"I_WILL_FREE"},		\
 19		{I_FREEING,		"I_FREEING"},		\
 20		{I_CLEAR,		"I_CLEAR"},		\
 21		{I_SYNC,		"I_SYNC"},		\
 22		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
 23		{I_REFERENCED,		"I_REFERENCED"},	\
 24		{I_LINKABLE,		"I_LINKABLE"},		\
 25		{I_WB_SWITCH,		"I_WB_SWITCH"},		\
 26		{I_OVL_INUSE,		"I_OVL_INUSE"},		\
 27		{I_CREATING,		"I_CREATING"},		\
 28		{I_DONTCACHE,		"I_DONTCACHE"},		\
 29		{I_SYNC_QUEUED,		"I_SYNC_QUEUED"},	\
 30		{I_PINNING_NETFS_WB,	"I_PINNING_NETFS_WB"},	\
 31		{I_LRU_ISOLATING,	"I_LRU_ISOLATING"}	\
 32	)
 33
 34/* enums need to be exported to user space */
 35#undef EM
 36#undef EMe
 37#define EM(a,b) 	TRACE_DEFINE_ENUM(a);
 38#define EMe(a,b)	TRACE_DEFINE_ENUM(a);
 39
 40#define WB_WORK_REASON							\
 41	EM( WB_REASON_BACKGROUND,		"background")		\
 42	EM( WB_REASON_VMSCAN,			"vmscan")		\
 43	EM( WB_REASON_SYNC,			"sync")			\
 44	EM( WB_REASON_PERIODIC,			"periodic")		\
 45	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
 46	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
 47	EM( WB_REASON_FORKER_THREAD,		"forker_thread")	\
 48	EMe(WB_REASON_FOREIGN_FLUSH,		"foreign_flush")
 49
 50WB_WORK_REASON
 51
 52/*
 53 * Now redefine the EM() and EMe() macros to map the enums to the strings
 54 * that will be printed in the output.
 55 */
 56#undef EM
 57#undef EMe
 58#define EM(a,b)		{ a, b },
 59#define EMe(a,b)	{ a, b }
 60
 61struct wb_writeback_work;
 62
 63DECLARE_EVENT_CLASS(writeback_folio_template,
 64
 65	TP_PROTO(struct folio *folio, struct address_space *mapping),
 66
 67	TP_ARGS(folio, mapping),
 68
 69	TP_STRUCT__entry (
 70		__array(char, name, 32)
 71		__field(ino_t, ino)
 72		__field(pgoff_t, index)
 73	),
 74
 75	TP_fast_assign(
 76		strscpy_pad(__entry->name,
 77			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
 78					 NULL), 32);
 79		__entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
 80		__entry->index = folio->index;
 81	),
 82
 83	TP_printk("bdi %s: ino=%lu index=%lu",
 84		__entry->name,
 85		(unsigned long)__entry->ino,
 86		__entry->index
 87	)
 88);
 89
 90DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
 91
 92	TP_PROTO(struct folio *folio, struct address_space *mapping),
 93
 94	TP_ARGS(folio, mapping)
 95);
 96
 97DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
 98
 99	TP_PROTO(struct folio *folio, struct address_space *mapping),
100
101	TP_ARGS(folio, mapping)
102);
103
104DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
105
106	TP_PROTO(struct inode *inode, int flags),
107
108	TP_ARGS(inode, flags),
109
110	TP_STRUCT__entry (
111		__array(char, name, 32)
112		__field(ino_t, ino)
113		__field(unsigned long, state)
114		__field(unsigned long, flags)
115	),
116
117	TP_fast_assign(
118		struct backing_dev_info *bdi = inode_to_bdi(inode);
119
120		/* may be called for files on pseudo FSes w/ unregistered bdi */
121		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
122		__entry->ino		= inode->i_ino;
123		__entry->state		= inode->i_state;
124		__entry->flags		= flags;
125	),
126
127	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
128		__entry->name,
129		(unsigned long)__entry->ino,
130		show_inode_state(__entry->state),
131		show_inode_state(__entry->flags)
132	)
133);
134
135DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
136
137	TP_PROTO(struct inode *inode, int flags),
138
139	TP_ARGS(inode, flags)
140);
141
142DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
143
144	TP_PROTO(struct inode *inode, int flags),
145
146	TP_ARGS(inode, flags)
147);
148
149DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
150
151	TP_PROTO(struct inode *inode, int flags),
152
153	TP_ARGS(inode, flags)
154);
155
156#ifdef CREATE_TRACE_POINTS
157#ifdef CONFIG_CGROUP_WRITEBACK
158
159static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
160{
161	return cgroup_ino(wb->memcg_css->cgroup);
162}
163
164static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
165{
166	if (wbc->wb)
167		return __trace_wb_assign_cgroup(wbc->wb);
168	else
169		return 1;
170}
171#else	/* CONFIG_CGROUP_WRITEBACK */
172
173static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
174{
175	return 1;
176}
177
178static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
179{
180	return 1;
181}
182
183#endif	/* CONFIG_CGROUP_WRITEBACK */
184#endif	/* CREATE_TRACE_POINTS */
185
186#ifdef CONFIG_CGROUP_WRITEBACK
187TRACE_EVENT(inode_foreign_history,
188
189	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
190		 unsigned int history),
191
192	TP_ARGS(inode, wbc, history),
193
194	TP_STRUCT__entry(
195		__array(char,		name, 32)
196		__field(ino_t,		ino)
197		__field(ino_t,		cgroup_ino)
198		__field(unsigned int,	history)
199	),
200
201	TP_fast_assign(
202		strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
203		__entry->ino		= inode->i_ino;
204		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
205		__entry->history	= history;
206	),
207
208	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
209		__entry->name,
210		(unsigned long)__entry->ino,
211		(unsigned long)__entry->cgroup_ino,
212		__entry->history
213	)
214);
215
216TRACE_EVENT(inode_switch_wbs,
217
218	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
219		 struct bdi_writeback *new_wb),
220
221	TP_ARGS(inode, old_wb, new_wb),
222
223	TP_STRUCT__entry(
224		__array(char,		name, 32)
225		__field(ino_t,		ino)
226		__field(ino_t,		old_cgroup_ino)
227		__field(ino_t,		new_cgroup_ino)
228	),
229
230	TP_fast_assign(
231		strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
232		__entry->ino		= inode->i_ino;
233		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
234		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
235	),
236
237	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
238		__entry->name,
239		(unsigned long)__entry->ino,
240		(unsigned long)__entry->old_cgroup_ino,
241		(unsigned long)__entry->new_cgroup_ino
242	)
243);
244
245TRACE_EVENT(track_foreign_dirty,
246
247	TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
248
249	TP_ARGS(folio, wb),
250
251	TP_STRUCT__entry(
252		__array(char,		name, 32)
253		__field(u64,		bdi_id)
254		__field(ino_t,		ino)
255		__field(unsigned int,	memcg_id)
256		__field(ino_t,		cgroup_ino)
257		__field(ino_t,		page_cgroup_ino)
258	),
259
260	TP_fast_assign(
261		struct address_space *mapping = folio_mapping(folio);
262		struct inode *inode = mapping ? mapping->host : NULL;
263
264		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
265		__entry->bdi_id		= wb->bdi->id;
266		__entry->ino		= inode ? inode->i_ino : 0;
267		__entry->memcg_id	= wb->memcg_css->id;
268		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
269		__entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
270	),
271
272	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
273		__entry->name,
274		__entry->bdi_id,
275		(unsigned long)__entry->ino,
276		__entry->memcg_id,
277		(unsigned long)__entry->cgroup_ino,
278		(unsigned long)__entry->page_cgroup_ino
279	)
280);
281
282TRACE_EVENT(flush_foreign,
283
284	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
285		 unsigned int frn_memcg_id),
286
287	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
288
289	TP_STRUCT__entry(
290		__array(char,		name, 32)
291		__field(ino_t,		cgroup_ino)
292		__field(unsigned int,	frn_bdi_id)
293		__field(unsigned int,	frn_memcg_id)
294	),
295
296	TP_fast_assign(
297		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
298		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
299		__entry->frn_bdi_id	= frn_bdi_id;
300		__entry->frn_memcg_id	= frn_memcg_id;
301	),
302
303	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
304		__entry->name,
305		(unsigned long)__entry->cgroup_ino,
306		__entry->frn_bdi_id,
307		__entry->frn_memcg_id
308	)
309);
310#endif
311
312DECLARE_EVENT_CLASS(writeback_write_inode_template,
313
314	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
315
316	TP_ARGS(inode, wbc),
317
318	TP_STRUCT__entry (
319		__array(char, name, 32)
320		__field(ino_t, ino)
321		__field(int, sync_mode)
322		__field(ino_t, cgroup_ino)
323	),
324
325	TP_fast_assign(
326		strscpy_pad(__entry->name,
327			    bdi_dev_name(inode_to_bdi(inode)), 32);
328		__entry->ino		= inode->i_ino;
329		__entry->sync_mode	= wbc->sync_mode;
330		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
331	),
332
333	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
334		__entry->name,
335		(unsigned long)__entry->ino,
336		__entry->sync_mode,
337		(unsigned long)__entry->cgroup_ino
338	)
339);
340
341DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
342
343	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
344
345	TP_ARGS(inode, wbc)
346);
347
348DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
349
350	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
351
352	TP_ARGS(inode, wbc)
353);
354
355DECLARE_EVENT_CLASS(writeback_work_class,
356	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
357	TP_ARGS(wb, work),
358	TP_STRUCT__entry(
359		__array(char, name, 32)
360		__field(long, nr_pages)
361		__field(dev_t, sb_dev)
362		__field(int, sync_mode)
363		__field(int, for_kupdate)
364		__field(int, range_cyclic)
365		__field(int, for_background)
366		__field(int, reason)
367		__field(ino_t, cgroup_ino)
368	),
369	TP_fast_assign(
370		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
371		__entry->nr_pages = work->nr_pages;
372		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
373		__entry->sync_mode = work->sync_mode;
374		__entry->for_kupdate = work->for_kupdate;
375		__entry->range_cyclic = work->range_cyclic;
376		__entry->for_background	= work->for_background;
377		__entry->reason = work->reason;
378		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
379	),
380	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
381		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
382		  __entry->name,
383		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
384		  __entry->nr_pages,
385		  __entry->sync_mode,
386		  __entry->for_kupdate,
387		  __entry->range_cyclic,
388		  __entry->for_background,
389		  __print_symbolic(__entry->reason, WB_WORK_REASON),
390		  (unsigned long)__entry->cgroup_ino
391	)
392);
393#define DEFINE_WRITEBACK_WORK_EVENT(name) \
394DEFINE_EVENT(writeback_work_class, name, \
395	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
396	TP_ARGS(wb, work))
397DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
398DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
399DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
400DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
401DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
402
403TRACE_EVENT(writeback_pages_written,
404	TP_PROTO(long pages_written),
405	TP_ARGS(pages_written),
406	TP_STRUCT__entry(
407		__field(long,		pages)
408	),
409	TP_fast_assign(
410		__entry->pages		= pages_written;
411	),
412	TP_printk("%ld", __entry->pages)
413);
414
415DECLARE_EVENT_CLASS(writeback_class,
416	TP_PROTO(struct bdi_writeback *wb),
417	TP_ARGS(wb),
418	TP_STRUCT__entry(
419		__array(char, name, 32)
420		__field(ino_t, cgroup_ino)
421	),
422	TP_fast_assign(
423		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
424		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
425	),
426	TP_printk("bdi %s: cgroup_ino=%lu",
427		  __entry->name,
428		  (unsigned long)__entry->cgroup_ino
429	)
430);
431#define DEFINE_WRITEBACK_EVENT(name) \
432DEFINE_EVENT(writeback_class, name, \
433	TP_PROTO(struct bdi_writeback *wb), \
434	TP_ARGS(wb))
435
436DEFINE_WRITEBACK_EVENT(writeback_wake_background);
437
438TRACE_EVENT(writeback_bdi_register,
439	TP_PROTO(struct backing_dev_info *bdi),
440	TP_ARGS(bdi),
441	TP_STRUCT__entry(
442		__array(char, name, 32)
443	),
444	TP_fast_assign(
445		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
446	),
447	TP_printk("bdi %s",
448		__entry->name
449	)
450);
451
452DECLARE_EVENT_CLASS(wbc_class,
453	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
454	TP_ARGS(wbc, bdi),
455	TP_STRUCT__entry(
456		__array(char, name, 32)
457		__field(long, nr_to_write)
458		__field(long, pages_skipped)
459		__field(int, sync_mode)
460		__field(int, for_kupdate)
461		__field(int, for_background)
462		__field(int, for_reclaim)
463		__field(int, range_cyclic)
464		__field(long, range_start)
465		__field(long, range_end)
466		__field(ino_t, cgroup_ino)
467	),
468
469	TP_fast_assign(
470		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
471		__entry->nr_to_write	= wbc->nr_to_write;
472		__entry->pages_skipped	= wbc->pages_skipped;
473		__entry->sync_mode	= wbc->sync_mode;
474		__entry->for_kupdate	= wbc->for_kupdate;
475		__entry->for_background	= wbc->for_background;
476		__entry->for_reclaim	= wbc->for_reclaim;
477		__entry->range_cyclic	= wbc->range_cyclic;
478		__entry->range_start	= (long)wbc->range_start;
479		__entry->range_end	= (long)wbc->range_end;
480		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
481	),
482
483	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
484		"bgrd=%d reclm=%d cyclic=%d "
485		"start=0x%lx end=0x%lx cgroup_ino=%lu",
486		__entry->name,
487		__entry->nr_to_write,
488		__entry->pages_skipped,
489		__entry->sync_mode,
490		__entry->for_kupdate,
491		__entry->for_background,
492		__entry->for_reclaim,
493		__entry->range_cyclic,
494		__entry->range_start,
495		__entry->range_end,
496		(unsigned long)__entry->cgroup_ino
497	)
498)
499
500#define DEFINE_WBC_EVENT(name) \
501DEFINE_EVENT(wbc_class, name, \
502	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
503	TP_ARGS(wbc, bdi))
504DEFINE_WBC_EVENT(wbc_writepage);
505
506TRACE_EVENT(writeback_queue_io,
507	TP_PROTO(struct bdi_writeback *wb,
508		 struct wb_writeback_work *work,
509		 unsigned long dirtied_before,
510		 int moved),
511	TP_ARGS(wb, work, dirtied_before, moved),
512	TP_STRUCT__entry(
513		__array(char,		name, 32)
514		__field(unsigned long,	older)
515		__field(long,		age)
516		__field(int,		moved)
517		__field(int,		reason)
518		__field(ino_t,		cgroup_ino)
519	),
520	TP_fast_assign(
521		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
522		__entry->older	= dirtied_before;
523		__entry->age	= (jiffies - dirtied_before) * 1000 / HZ;
524		__entry->moved	= moved;
525		__entry->reason	= work->reason;
526		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
527	),
528	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
529		__entry->name,
530		__entry->older,	/* dirtied_before in jiffies */
531		__entry->age,	/* dirtied_before in relative milliseconds */
532		__entry->moved,
533		__print_symbolic(__entry->reason, WB_WORK_REASON),
534		(unsigned long)__entry->cgroup_ino
535	)
536);
537
538TRACE_EVENT(global_dirty_state,
539
540	TP_PROTO(unsigned long background_thresh,
541		 unsigned long dirty_thresh
542	),
543
544	TP_ARGS(background_thresh,
545		dirty_thresh
546	),
547
548	TP_STRUCT__entry(
549		__field(unsigned long,	nr_dirty)
550		__field(unsigned long,	nr_writeback)
551		__field(unsigned long,	background_thresh)
552		__field(unsigned long,	dirty_thresh)
553		__field(unsigned long,	dirty_limit)
554		__field(unsigned long,	nr_dirtied)
555		__field(unsigned long,	nr_written)
556	),
557
558	TP_fast_assign(
559		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
560		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
561		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
562		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
563		__entry->background_thresh = background_thresh;
564		__entry->dirty_thresh	= dirty_thresh;
565		__entry->dirty_limit	= global_wb_domain.dirty_limit;
566	),
567
568	TP_printk("dirty=%lu writeback=%lu "
569		  "bg_thresh=%lu thresh=%lu limit=%lu "
570		  "dirtied=%lu written=%lu",
571		  __entry->nr_dirty,
572		  __entry->nr_writeback,
573		  __entry->background_thresh,
574		  __entry->dirty_thresh,
575		  __entry->dirty_limit,
576		  __entry->nr_dirtied,
577		  __entry->nr_written
578	)
579);
580
581#define KBps(x)			((x) << (PAGE_SHIFT - 10))
582
583TRACE_EVENT(bdi_dirty_ratelimit,
584
585	TP_PROTO(struct bdi_writeback *wb,
586		 unsigned long dirty_rate,
587		 unsigned long task_ratelimit),
588
589	TP_ARGS(wb, dirty_rate, task_ratelimit),
590
591	TP_STRUCT__entry(
592		__array(char,		bdi, 32)
593		__field(unsigned long,	write_bw)
594		__field(unsigned long,	avg_write_bw)
595		__field(unsigned long,	dirty_rate)
596		__field(unsigned long,	dirty_ratelimit)
597		__field(unsigned long,	task_ratelimit)
598		__field(unsigned long,	balanced_dirty_ratelimit)
599		__field(ino_t,		cgroup_ino)
600	),
601
602	TP_fast_assign(
603		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
604		__entry->write_bw	= KBps(wb->write_bandwidth);
605		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
606		__entry->dirty_rate	= KBps(dirty_rate);
607		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
608		__entry->task_ratelimit	= KBps(task_ratelimit);
609		__entry->balanced_dirty_ratelimit =
610					KBps(wb->balanced_dirty_ratelimit);
611		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
612	),
613
614	TP_printk("bdi %s: "
615		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
616		  "dirty_ratelimit=%lu task_ratelimit=%lu "
617		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
618		  __entry->bdi,
619		  __entry->write_bw,		/* write bandwidth */
620		  __entry->avg_write_bw,	/* avg write bandwidth */
621		  __entry->dirty_rate,		/* bdi dirty rate */
622		  __entry->dirty_ratelimit,	/* base ratelimit */
623		  __entry->task_ratelimit, /* ratelimit with position control */
624		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
625		  (unsigned long)__entry->cgroup_ino
626	)
627);
628
629TRACE_EVENT(balance_dirty_pages,
630
631	TP_PROTO(struct bdi_writeback *wb,
632		 unsigned long thresh,
633		 unsigned long bg_thresh,
634		 unsigned long dirty,
635		 unsigned long bdi_thresh,
636		 unsigned long bdi_dirty,
637		 unsigned long dirty_ratelimit,
638		 unsigned long task_ratelimit,
639		 unsigned long dirtied,
640		 unsigned long period,
641		 long pause,
642		 unsigned long start_time),
643
644	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
645		dirty_ratelimit, task_ratelimit,
646		dirtied, period, pause, start_time),
647
648	TP_STRUCT__entry(
649		__array(	 char,	bdi, 32)
650		__field(unsigned long,	limit)
651		__field(unsigned long,	setpoint)
652		__field(unsigned long,	dirty)
653		__field(unsigned long,	bdi_setpoint)
654		__field(unsigned long,	bdi_dirty)
655		__field(unsigned long,	dirty_ratelimit)
656		__field(unsigned long,	task_ratelimit)
657		__field(unsigned int,	dirtied)
658		__field(unsigned int,	dirtied_pause)
659		__field(unsigned long,	paused)
660		__field(	 long,	pause)
661		__field(unsigned long,	period)
662		__field(	 long,	think)
663		__field(ino_t,		cgroup_ino)
664	),
665
666	TP_fast_assign(
667		unsigned long freerun = (thresh + bg_thresh) / 2;
668		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
669
670		__entry->limit		= global_wb_domain.dirty_limit;
671		__entry->setpoint	= (global_wb_domain.dirty_limit +
672						freerun) / 2;
673		__entry->dirty		= dirty;
674		__entry->bdi_setpoint	= __entry->setpoint *
675						bdi_thresh / (thresh + 1);
676		__entry->bdi_dirty	= bdi_dirty;
677		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
678		__entry->task_ratelimit	= KBps(task_ratelimit);
679		__entry->dirtied	= dirtied;
680		__entry->dirtied_pause	= current->nr_dirtied_pause;
681		__entry->think		= current->dirty_paused_when == 0 ? 0 :
682			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
683		__entry->period		= period * 1000 / HZ;
684		__entry->pause		= pause * 1000 / HZ;
685		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
686		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
687	),
688
689
690	TP_printk("bdi %s: "
691		  "limit=%lu setpoint=%lu dirty=%lu "
692		  "bdi_setpoint=%lu bdi_dirty=%lu "
693		  "dirty_ratelimit=%lu task_ratelimit=%lu "
694		  "dirtied=%u dirtied_pause=%u "
695		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
696		  __entry->bdi,
697		  __entry->limit,
698		  __entry->setpoint,
699		  __entry->dirty,
700		  __entry->bdi_setpoint,
701		  __entry->bdi_dirty,
702		  __entry->dirty_ratelimit,
703		  __entry->task_ratelimit,
704		  __entry->dirtied,
705		  __entry->dirtied_pause,
706		  __entry->paused,	/* ms */
707		  __entry->pause,	/* ms */
708		  __entry->period,	/* ms */
709		  __entry->think,	/* ms */
710		  (unsigned long)__entry->cgroup_ino
711	  )
712);
713
714TRACE_EVENT(writeback_sb_inodes_requeue,
715
716	TP_PROTO(struct inode *inode),
717	TP_ARGS(inode),
718
719	TP_STRUCT__entry(
720		__array(char, name, 32)
721		__field(ino_t, ino)
722		__field(unsigned long, state)
723		__field(unsigned long, dirtied_when)
724		__field(ino_t, cgroup_ino)
725	),
726
727	TP_fast_assign(
728		strscpy_pad(__entry->name,
729			    bdi_dev_name(inode_to_bdi(inode)), 32);
730		__entry->ino		= inode->i_ino;
731		__entry->state		= inode->i_state;
732		__entry->dirtied_when	= inode->dirtied_when;
733		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
734	),
735
736	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
737		  __entry->name,
738		  (unsigned long)__entry->ino,
739		  show_inode_state(__entry->state),
740		  __entry->dirtied_when,
741		  (jiffies - __entry->dirtied_when) / HZ,
742		  (unsigned long)__entry->cgroup_ino
743	)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744);
745
746DECLARE_EVENT_CLASS(writeback_single_inode_template,
747
748	TP_PROTO(struct inode *inode,
749		 struct writeback_control *wbc,
750		 unsigned long nr_to_write
751	),
752
753	TP_ARGS(inode, wbc, nr_to_write),
754
755	TP_STRUCT__entry(
756		__array(char, name, 32)
757		__field(ino_t, ino)
758		__field(unsigned long, state)
759		__field(unsigned long, dirtied_when)
760		__field(unsigned long, writeback_index)
761		__field(long, nr_to_write)
762		__field(unsigned long, wrote)
763		__field(ino_t, cgroup_ino)
764	),
765
766	TP_fast_assign(
767		strscpy_pad(__entry->name,
768			    bdi_dev_name(inode_to_bdi(inode)), 32);
769		__entry->ino		= inode->i_ino;
770		__entry->state		= inode->i_state;
771		__entry->dirtied_when	= inode->dirtied_when;
772		__entry->writeback_index = inode->i_mapping->writeback_index;
773		__entry->nr_to_write	= nr_to_write;
774		__entry->wrote		= nr_to_write - wbc->nr_to_write;
775		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
776	),
777
778	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
779		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
780		  __entry->name,
781		  (unsigned long)__entry->ino,
782		  show_inode_state(__entry->state),
783		  __entry->dirtied_when,
784		  (jiffies - __entry->dirtied_when) / HZ,
785		  __entry->writeback_index,
786		  __entry->nr_to_write,
787		  __entry->wrote,
788		  (unsigned long)__entry->cgroup_ino
789	)
790);
791
792DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
793	TP_PROTO(struct inode *inode,
794		 struct writeback_control *wbc,
795		 unsigned long nr_to_write),
796	TP_ARGS(inode, wbc, nr_to_write)
797);
798
799DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
800	TP_PROTO(struct inode *inode,
801		 struct writeback_control *wbc,
802		 unsigned long nr_to_write),
803	TP_ARGS(inode, wbc, nr_to_write)
804);
805
806DECLARE_EVENT_CLASS(writeback_inode_template,
807	TP_PROTO(struct inode *inode),
808
809	TP_ARGS(inode),
810
811	TP_STRUCT__entry(
812		__field(	dev_t,	dev			)
813		__field(	ino_t,	ino			)
814		__field(unsigned long,	state			)
815		__field(	__u16, mode			)
816		__field(unsigned long, dirtied_when		)
817	),
818
819	TP_fast_assign(
820		__entry->dev	= inode->i_sb->s_dev;
821		__entry->ino	= inode->i_ino;
822		__entry->state	= inode->i_state;
823		__entry->mode	= inode->i_mode;
824		__entry->dirtied_when = inode->dirtied_when;
825	),
826
827	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
828		  MAJOR(__entry->dev), MINOR(__entry->dev),
829		  (unsigned long)__entry->ino, __entry->dirtied_when,
830		  show_inode_state(__entry->state), __entry->mode)
831);
832
833DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
834	TP_PROTO(struct inode *inode),
835
836	TP_ARGS(inode)
837);
838
839DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
840	TP_PROTO(struct inode *inode),
841
842	TP_ARGS(inode)
843);
844
845DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
846
847	TP_PROTO(struct inode *inode),
848
849	TP_ARGS(inode)
850);
851
852/*
853 * Inode writeback list tracking.
854 */
855
856DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
857	TP_PROTO(struct inode *inode),
858	TP_ARGS(inode)
859);
860
861DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
862	TP_PROTO(struct inode *inode),
863	TP_ARGS(inode)
864);
865
866#endif /* _TRACE_WRITEBACK_H */
867
868/* This part must be outside protection */
869#include <trace/define_trace.h>