Linux Audio

Check our new training course

Loading...
v4.6
  1#undef TRACE_SYSTEM
  2#define TRACE_SYSTEM writeback
  3
  4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  5#define _TRACE_WRITEBACK_H
  6
  7#include <linux/tracepoint.h>
  8#include <linux/backing-dev.h>
  9#include <linux/writeback.h>
 10
 11#define show_inode_state(state)					\
 12	__print_flags(state, "|",				\
 13		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
 14		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
 15		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
 16		{I_NEW,			"I_NEW"},		\
 17		{I_WILL_FREE,		"I_WILL_FREE"},		\
 18		{I_FREEING,		"I_FREEING"},		\
 19		{I_CLEAR,		"I_CLEAR"},		\
 20		{I_SYNC,		"I_SYNC"},		\
 21		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
 22		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
 23		{I_REFERENCED,		"I_REFERENCED"}		\
 24	)
 25
 26/* enums need to be exported to user space */
 27#undef EM
 28#undef EMe
 29#define EM(a,b) 	TRACE_DEFINE_ENUM(a);
 30#define EMe(a,b)	TRACE_DEFINE_ENUM(a);
 31
 32#define WB_WORK_REASON							\
 33	EM( WB_REASON_BACKGROUND,		"background")		\
 34	EM( WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages")	\
 35	EM( WB_REASON_SYNC,			"sync")			\
 36	EM( WB_REASON_PERIODIC,			"periodic")		\
 37	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
 38	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
 39	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
 40	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
 41
 42WB_WORK_REASON
 43
 44/*
 45 * Now redefine the EM() and EMe() macros to map the enums to the strings
 46 * that will be printed in the output.
 47 */
 48#undef EM
 49#undef EMe
 50#define EM(a,b)		{ a, b },
 51#define EMe(a,b)	{ a, b }
 52
 53struct wb_writeback_work;
 54
 55TRACE_EVENT(writeback_dirty_page,
 56
 57	TP_PROTO(struct page *page, struct address_space *mapping),
 58
 59	TP_ARGS(page, mapping),
 60
 61	TP_STRUCT__entry (
 62		__array(char, name, 32)
 63		__field(unsigned long, ino)
 64		__field(pgoff_t, index)
 65	),
 66
 67	TP_fast_assign(
 68		strncpy(__entry->name,
 69			mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
 70		__entry->ino = mapping ? mapping->host->i_ino : 0;
 71		__entry->index = page->index;
 72	),
 73
 74	TP_printk("bdi %s: ino=%lu index=%lu",
 75		__entry->name,
 76		__entry->ino,
 77		__entry->index
 78	)
 79);
 80
 81DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
 82
 83	TP_PROTO(struct inode *inode, int flags),
 84
 85	TP_ARGS(inode, flags),
 86
 87	TP_STRUCT__entry (
 88		__array(char, name, 32)
 89		__field(unsigned long, ino)
 90		__field(unsigned long, state)
 91		__field(unsigned long, flags)
 92	),
 93
 94	TP_fast_assign(
 95		struct backing_dev_info *bdi = inode_to_bdi(inode);
 96
 97		/* may be called for files on pseudo FSes w/ unregistered bdi */
 98		strncpy(__entry->name,
 99			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
100		__entry->ino		= inode->i_ino;
101		__entry->state		= inode->i_state;
102		__entry->flags		= flags;
103	),
104
105	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
106		__entry->name,
107		__entry->ino,
108		show_inode_state(__entry->state),
109		show_inode_state(__entry->flags)
110	)
111);
112
113DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
114
115	TP_PROTO(struct inode *inode, int flags),
116
117	TP_ARGS(inode, flags)
118);
119
120DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
121
122	TP_PROTO(struct inode *inode, int flags),
123
124	TP_ARGS(inode, flags)
125);
126
127DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
128
129	TP_PROTO(struct inode *inode, int flags),
130
131	TP_ARGS(inode, flags)
132);
133
134#ifdef CREATE_TRACE_POINTS
135#ifdef CONFIG_CGROUP_WRITEBACK
136
137static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
138{
139	return wb->memcg_css->cgroup->kn->ino;
140}
141
142static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
143{
144	if (wbc->wb)
145		return __trace_wb_assign_cgroup(wbc->wb);
146	else
147		return -1U;
148}
149#else	/* CONFIG_CGROUP_WRITEBACK */
150
151static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
152{
153	return -1U;
154}
155
156static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
157{
158	return -1U;
159}
160
161#endif	/* CONFIG_CGROUP_WRITEBACK */
162#endif	/* CREATE_TRACE_POINTS */
163
164DECLARE_EVENT_CLASS(writeback_write_inode_template,
165
166	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
167
168	TP_ARGS(inode, wbc),
169
170	TP_STRUCT__entry (
171		__array(char, name, 32)
172		__field(unsigned long, ino)
173		__field(int, sync_mode)
174		__field(unsigned int, cgroup_ino)
175	),
176
177	TP_fast_assign(
178		strncpy(__entry->name,
179			dev_name(inode_to_bdi(inode)->dev), 32);
180		__entry->ino		= inode->i_ino;
181		__entry->sync_mode	= wbc->sync_mode;
182		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
183	),
184
185	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
186		__entry->name,
187		__entry->ino,
188		__entry->sync_mode,
189		__entry->cgroup_ino
190	)
191);
192
193DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
194
195	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
196
197	TP_ARGS(inode, wbc)
198);
199
200DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
201
202	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
203
204	TP_ARGS(inode, wbc)
205);
206
207DECLARE_EVENT_CLASS(writeback_work_class,
208	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
209	TP_ARGS(wb, work),
210	TP_STRUCT__entry(
211		__array(char, name, 32)
212		__field(long, nr_pages)
213		__field(dev_t, sb_dev)
214		__field(int, sync_mode)
215		__field(int, for_kupdate)
216		__field(int, range_cyclic)
217		__field(int, for_background)
218		__field(int, reason)
219		__field(unsigned int, cgroup_ino)
220	),
221	TP_fast_assign(
222		strncpy(__entry->name,
223			wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
 
 
224		__entry->nr_pages = work->nr_pages;
225		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
226		__entry->sync_mode = work->sync_mode;
227		__entry->for_kupdate = work->for_kupdate;
228		__entry->range_cyclic = work->range_cyclic;
229		__entry->for_background	= work->for_background;
230		__entry->reason = work->reason;
231		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
232	),
233	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
234		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
235		  __entry->name,
236		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
237		  __entry->nr_pages,
238		  __entry->sync_mode,
239		  __entry->for_kupdate,
240		  __entry->range_cyclic,
241		  __entry->for_background,
242		  __print_symbolic(__entry->reason, WB_WORK_REASON),
243		  __entry->cgroup_ino
244	)
245);
246#define DEFINE_WRITEBACK_WORK_EVENT(name) \
247DEFINE_EVENT(writeback_work_class, name, \
248	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
249	TP_ARGS(wb, work))
 
250DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
251DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
252DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
253DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
254DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
255
256TRACE_EVENT(writeback_pages_written,
257	TP_PROTO(long pages_written),
258	TP_ARGS(pages_written),
259	TP_STRUCT__entry(
260		__field(long,		pages)
261	),
262	TP_fast_assign(
263		__entry->pages		= pages_written;
264	),
265	TP_printk("%ld", __entry->pages)
266);
267
268DECLARE_EVENT_CLASS(writeback_class,
269	TP_PROTO(struct bdi_writeback *wb),
270	TP_ARGS(wb),
271	TP_STRUCT__entry(
272		__array(char, name, 32)
273		__field(unsigned int, cgroup_ino)
274	),
275	TP_fast_assign(
276		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
277		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
278	),
279	TP_printk("bdi %s: cgroup_ino=%u",
280		  __entry->name,
281		  __entry->cgroup_ino
282	)
283);
284#define DEFINE_WRITEBACK_EVENT(name) \
285DEFINE_EVENT(writeback_class, name, \
286	TP_PROTO(struct bdi_writeback *wb), \
287	TP_ARGS(wb))
288
289DEFINE_WRITEBACK_EVENT(writeback_nowork);
290DEFINE_WRITEBACK_EVENT(writeback_wake_background);
291
292TRACE_EVENT(writeback_bdi_register,
293	TP_PROTO(struct backing_dev_info *bdi),
294	TP_ARGS(bdi),
295	TP_STRUCT__entry(
296		__array(char, name, 32)
297	),
298	TP_fast_assign(
299		strncpy(__entry->name, dev_name(bdi->dev), 32);
300	),
301	TP_printk("bdi %s",
302		__entry->name
303	)
304);
305
306DECLARE_EVENT_CLASS(wbc_class,
307	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
308	TP_ARGS(wbc, bdi),
309	TP_STRUCT__entry(
310		__array(char, name, 32)
311		__field(long, nr_to_write)
312		__field(long, pages_skipped)
313		__field(int, sync_mode)
314		__field(int, for_kupdate)
315		__field(int, for_background)
316		__field(int, for_reclaim)
317		__field(int, range_cyclic)
318		__field(long, range_start)
319		__field(long, range_end)
320		__field(unsigned int, cgroup_ino)
321	),
322
323	TP_fast_assign(
324		strncpy(__entry->name, dev_name(bdi->dev), 32);
325		__entry->nr_to_write	= wbc->nr_to_write;
326		__entry->pages_skipped	= wbc->pages_skipped;
327		__entry->sync_mode	= wbc->sync_mode;
328		__entry->for_kupdate	= wbc->for_kupdate;
329		__entry->for_background	= wbc->for_background;
330		__entry->for_reclaim	= wbc->for_reclaim;
331		__entry->range_cyclic	= wbc->range_cyclic;
332		__entry->range_start	= (long)wbc->range_start;
333		__entry->range_end	= (long)wbc->range_end;
334		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
335	),
336
337	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
338		"bgrd=%d reclm=%d cyclic=%d "
339		"start=0x%lx end=0x%lx cgroup_ino=%u",
340		__entry->name,
341		__entry->nr_to_write,
342		__entry->pages_skipped,
343		__entry->sync_mode,
344		__entry->for_kupdate,
345		__entry->for_background,
346		__entry->for_reclaim,
347		__entry->range_cyclic,
348		__entry->range_start,
349		__entry->range_end,
350		__entry->cgroup_ino
351	)
352)
353
354#define DEFINE_WBC_EVENT(name) \
355DEFINE_EVENT(wbc_class, name, \
356	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
357	TP_ARGS(wbc, bdi))
358DEFINE_WBC_EVENT(wbc_writepage);
359
360TRACE_EVENT(writeback_queue_io,
361	TP_PROTO(struct bdi_writeback *wb,
362		 struct wb_writeback_work *work,
363		 int moved),
364	TP_ARGS(wb, work, moved),
365	TP_STRUCT__entry(
366		__array(char,		name, 32)
367		__field(unsigned long,	older)
368		__field(long,		age)
369		__field(int,		moved)
370		__field(int,		reason)
371		__field(unsigned int,	cgroup_ino)
372	),
373	TP_fast_assign(
374		unsigned long *older_than_this = work->older_than_this;
375		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
376		__entry->older	= older_than_this ?  *older_than_this : 0;
377		__entry->age	= older_than_this ?
378				  (jiffies - *older_than_this) * 1000 / HZ : -1;
379		__entry->moved	= moved;
380		__entry->reason	= work->reason;
381		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
382	),
383	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
384		__entry->name,
385		__entry->older,	/* older_than_this in jiffies */
386		__entry->age,	/* older_than_this in relative milliseconds */
387		__entry->moved,
388		__print_symbolic(__entry->reason, WB_WORK_REASON),
389		__entry->cgroup_ino
390	)
391);
392
393TRACE_EVENT(global_dirty_state,
394
395	TP_PROTO(unsigned long background_thresh,
396		 unsigned long dirty_thresh
397	),
398
399	TP_ARGS(background_thresh,
400		dirty_thresh
401	),
402
403	TP_STRUCT__entry(
404		__field(unsigned long,	nr_dirty)
405		__field(unsigned long,	nr_writeback)
406		__field(unsigned long,	nr_unstable)
407		__field(unsigned long,	background_thresh)
408		__field(unsigned long,	dirty_thresh)
409		__field(unsigned long,	dirty_limit)
410		__field(unsigned long,	nr_dirtied)
411		__field(unsigned long,	nr_written)
412	),
413
414	TP_fast_assign(
415		__entry->nr_dirty	= global_page_state(NR_FILE_DIRTY);
416		__entry->nr_writeback	= global_page_state(NR_WRITEBACK);
417		__entry->nr_unstable	= global_page_state(NR_UNSTABLE_NFS);
418		__entry->nr_dirtied	= global_page_state(NR_DIRTIED);
419		__entry->nr_written	= global_page_state(NR_WRITTEN);
420		__entry->background_thresh = background_thresh;
421		__entry->dirty_thresh	= dirty_thresh;
422		__entry->dirty_limit	= global_wb_domain.dirty_limit;
423	),
424
425	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
426		  "bg_thresh=%lu thresh=%lu limit=%lu "
427		  "dirtied=%lu written=%lu",
428		  __entry->nr_dirty,
429		  __entry->nr_writeback,
430		  __entry->nr_unstable,
431		  __entry->background_thresh,
432		  __entry->dirty_thresh,
433		  __entry->dirty_limit,
434		  __entry->nr_dirtied,
435		  __entry->nr_written
436	)
437);
438
439#define KBps(x)			((x) << (PAGE_SHIFT - 10))
440
441TRACE_EVENT(bdi_dirty_ratelimit,
442
443	TP_PROTO(struct bdi_writeback *wb,
444		 unsigned long dirty_rate,
445		 unsigned long task_ratelimit),
446
447	TP_ARGS(wb, dirty_rate, task_ratelimit),
448
449	TP_STRUCT__entry(
450		__array(char,		bdi, 32)
451		__field(unsigned long,	write_bw)
452		__field(unsigned long,	avg_write_bw)
453		__field(unsigned long,	dirty_rate)
454		__field(unsigned long,	dirty_ratelimit)
455		__field(unsigned long,	task_ratelimit)
456		__field(unsigned long,	balanced_dirty_ratelimit)
457		__field(unsigned int,	cgroup_ino)
458	),
459
460	TP_fast_assign(
461		strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
462		__entry->write_bw	= KBps(wb->write_bandwidth);
463		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
464		__entry->dirty_rate	= KBps(dirty_rate);
465		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
466		__entry->task_ratelimit	= KBps(task_ratelimit);
467		__entry->balanced_dirty_ratelimit =
468					KBps(wb->balanced_dirty_ratelimit);
469		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
470	),
471
472	TP_printk("bdi %s: "
473		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
474		  "dirty_ratelimit=%lu task_ratelimit=%lu "
475		  "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
476		  __entry->bdi,
477		  __entry->write_bw,		/* write bandwidth */
478		  __entry->avg_write_bw,	/* avg write bandwidth */
479		  __entry->dirty_rate,		/* bdi dirty rate */
480		  __entry->dirty_ratelimit,	/* base ratelimit */
481		  __entry->task_ratelimit, /* ratelimit with position control */
482		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
483		  __entry->cgroup_ino
484	)
485);
486
487TRACE_EVENT(balance_dirty_pages,
488
489	TP_PROTO(struct bdi_writeback *wb,
490		 unsigned long thresh,
491		 unsigned long bg_thresh,
492		 unsigned long dirty,
493		 unsigned long bdi_thresh,
494		 unsigned long bdi_dirty,
495		 unsigned long dirty_ratelimit,
496		 unsigned long task_ratelimit,
497		 unsigned long dirtied,
498		 unsigned long period,
499		 long pause,
500		 unsigned long start_time),
501
502	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
503		dirty_ratelimit, task_ratelimit,
504		dirtied, period, pause, start_time),
505
506	TP_STRUCT__entry(
507		__array(	 char,	bdi, 32)
508		__field(unsigned long,	limit)
509		__field(unsigned long,	setpoint)
510		__field(unsigned long,	dirty)
511		__field(unsigned long,	bdi_setpoint)
512		__field(unsigned long,	bdi_dirty)
513		__field(unsigned long,	dirty_ratelimit)
514		__field(unsigned long,	task_ratelimit)
515		__field(unsigned int,	dirtied)
516		__field(unsigned int,	dirtied_pause)
517		__field(unsigned long,	paused)
518		__field(	 long,	pause)
519		__field(unsigned long,	period)
520		__field(	 long,	think)
521		__field(unsigned int,	cgroup_ino)
522	),
523
524	TP_fast_assign(
525		unsigned long freerun = (thresh + bg_thresh) / 2;
526		strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
527
528		__entry->limit		= global_wb_domain.dirty_limit;
529		__entry->setpoint	= (global_wb_domain.dirty_limit +
530						freerun) / 2;
531		__entry->dirty		= dirty;
532		__entry->bdi_setpoint	= __entry->setpoint *
533						bdi_thresh / (thresh + 1);
534		__entry->bdi_dirty	= bdi_dirty;
535		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
536		__entry->task_ratelimit	= KBps(task_ratelimit);
537		__entry->dirtied	= dirtied;
538		__entry->dirtied_pause	= current->nr_dirtied_pause;
539		__entry->think		= current->dirty_paused_when == 0 ? 0 :
540			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
541		__entry->period		= period * 1000 / HZ;
542		__entry->pause		= pause * 1000 / HZ;
543		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
544		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
545	),
546
547
548	TP_printk("bdi %s: "
549		  "limit=%lu setpoint=%lu dirty=%lu "
550		  "bdi_setpoint=%lu bdi_dirty=%lu "
551		  "dirty_ratelimit=%lu task_ratelimit=%lu "
552		  "dirtied=%u dirtied_pause=%u "
553		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
554		  __entry->bdi,
555		  __entry->limit,
556		  __entry->setpoint,
557		  __entry->dirty,
558		  __entry->bdi_setpoint,
559		  __entry->bdi_dirty,
560		  __entry->dirty_ratelimit,
561		  __entry->task_ratelimit,
562		  __entry->dirtied,
563		  __entry->dirtied_pause,
564		  __entry->paused,	/* ms */
565		  __entry->pause,	/* ms */
566		  __entry->period,	/* ms */
567		  __entry->think,	/* ms */
568		  __entry->cgroup_ino
569	  )
570);
571
572TRACE_EVENT(writeback_sb_inodes_requeue,
573
574	TP_PROTO(struct inode *inode),
575	TP_ARGS(inode),
576
577	TP_STRUCT__entry(
578		__array(char, name, 32)
579		__field(unsigned long, ino)
580		__field(unsigned long, state)
581		__field(unsigned long, dirtied_when)
582		__field(unsigned int, cgroup_ino)
583	),
584
585	TP_fast_assign(
586		strncpy(__entry->name,
587		        dev_name(inode_to_bdi(inode)->dev), 32);
588		__entry->ino		= inode->i_ino;
589		__entry->state		= inode->i_state;
590		__entry->dirtied_when	= inode->dirtied_when;
591		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
592	),
593
594	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
595		  __entry->name,
596		  __entry->ino,
597		  show_inode_state(__entry->state),
598		  __entry->dirtied_when,
599		  (jiffies - __entry->dirtied_when) / HZ,
600		  __entry->cgroup_ino
601	)
602);
603
604DECLARE_EVENT_CLASS(writeback_congest_waited_template,
605
606	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
607
608	TP_ARGS(usec_timeout, usec_delayed),
609
610	TP_STRUCT__entry(
611		__field(	unsigned int,	usec_timeout	)
612		__field(	unsigned int,	usec_delayed	)
613	),
614
615	TP_fast_assign(
616		__entry->usec_timeout	= usec_timeout;
617		__entry->usec_delayed	= usec_delayed;
618	),
619
620	TP_printk("usec_timeout=%u usec_delayed=%u",
621			__entry->usec_timeout,
622			__entry->usec_delayed)
623);
624
625DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
626
627	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
628
629	TP_ARGS(usec_timeout, usec_delayed)
630);
631
632DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
633
634	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
635
636	TP_ARGS(usec_timeout, usec_delayed)
637);
638
639DECLARE_EVENT_CLASS(writeback_single_inode_template,
640
641	TP_PROTO(struct inode *inode,
642		 struct writeback_control *wbc,
643		 unsigned long nr_to_write
644	),
645
646	TP_ARGS(inode, wbc, nr_to_write),
647
648	TP_STRUCT__entry(
649		__array(char, name, 32)
650		__field(unsigned long, ino)
651		__field(unsigned long, state)
652		__field(unsigned long, dirtied_when)
653		__field(unsigned long, writeback_index)
654		__field(long, nr_to_write)
655		__field(unsigned long, wrote)
656		__field(unsigned int, cgroup_ino)
657	),
658
659	TP_fast_assign(
660		strncpy(__entry->name,
661			dev_name(inode_to_bdi(inode)->dev), 32);
662		__entry->ino		= inode->i_ino;
663		__entry->state		= inode->i_state;
664		__entry->dirtied_when	= inode->dirtied_when;
665		__entry->writeback_index = inode->i_mapping->writeback_index;
666		__entry->nr_to_write	= nr_to_write;
667		__entry->wrote		= nr_to_write - wbc->nr_to_write;
668		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
669	),
670
671	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
672		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
673		  __entry->name,
674		  __entry->ino,
675		  show_inode_state(__entry->state),
676		  __entry->dirtied_when,
677		  (jiffies - __entry->dirtied_when) / HZ,
678		  __entry->writeback_index,
679		  __entry->nr_to_write,
680		  __entry->wrote,
681		  __entry->cgroup_ino
682	)
683);
684
685DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
686	TP_PROTO(struct inode *inode,
687		 struct writeback_control *wbc,
688		 unsigned long nr_to_write),
689	TP_ARGS(inode, wbc, nr_to_write)
690);
691
692DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
693	TP_PROTO(struct inode *inode,
694		 struct writeback_control *wbc,
695		 unsigned long nr_to_write),
696	TP_ARGS(inode, wbc, nr_to_write)
697);
698
699DECLARE_EVENT_CLASS(writeback_lazytime_template,
700	TP_PROTO(struct inode *inode),
701
702	TP_ARGS(inode),
703
704	TP_STRUCT__entry(
705		__field(	dev_t,	dev			)
706		__field(unsigned long,	ino			)
707		__field(unsigned long,	state			)
708		__field(	__u16, mode			)
709		__field(unsigned long, dirtied_when		)
710	),
711
712	TP_fast_assign(
713		__entry->dev	= inode->i_sb->s_dev;
714		__entry->ino	= inode->i_ino;
715		__entry->state	= inode->i_state;
716		__entry->mode	= inode->i_mode;
717		__entry->dirtied_when = inode->dirtied_when;
718	),
719
720	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
721		  MAJOR(__entry->dev), MINOR(__entry->dev),
722		  __entry->ino, __entry->dirtied_when,
723		  show_inode_state(__entry->state), __entry->mode)
724);
725
726DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
727	TP_PROTO(struct inode *inode),
728
729	TP_ARGS(inode)
730);
731
732DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
733	TP_PROTO(struct inode *inode),
734
735	TP_ARGS(inode)
736);
737
738DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
739
740	TP_PROTO(struct inode *inode),
741
742	TP_ARGS(inode)
743);
744
745#endif /* _TRACE_WRITEBACK_H */
746
747/* This part must be outside protection */
748#include <trace/define_trace.h>
v3.5.6
  1#undef TRACE_SYSTEM
  2#define TRACE_SYSTEM writeback
  3
  4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  5#define _TRACE_WRITEBACK_H
  6
 
  7#include <linux/backing-dev.h>
  8#include <linux/writeback.h>
  9
 10#define show_inode_state(state)					\
 11	__print_flags(state, "|",				\
 12		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
 13		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
 14		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
 15		{I_NEW,			"I_NEW"},		\
 16		{I_WILL_FREE,		"I_WILL_FREE"},		\
 17		{I_FREEING,		"I_FREEING"},		\
 18		{I_CLEAR,		"I_CLEAR"},		\
 19		{I_SYNC,		"I_SYNC"},		\
 
 
 20		{I_REFERENCED,		"I_REFERENCED"}		\
 21	)
 22
 
 
 
 
 
 
 23#define WB_WORK_REASON							\
 24		{WB_REASON_BACKGROUND,		"background"},		\
 25		{WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages"},	\
 26		{WB_REASON_SYNC,		"sync"},		\
 27		{WB_REASON_PERIODIC,		"periodic"},		\
 28		{WB_REASON_LAPTOP_TIMER,	"laptop_timer"},	\
 29		{WB_REASON_FREE_MORE_MEM,	"free_more_memory"},	\
 30		{WB_REASON_FS_FREE_SPACE,	"fs_free_space"},	\
 31		{WB_REASON_FORKER_THREAD,	"forker_thread"}
 
 
 
 
 
 
 
 
 
 
 
 32
 33struct wb_writeback_work;
 34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35DECLARE_EVENT_CLASS(writeback_work_class,
 36	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
 37	TP_ARGS(bdi, work),
 38	TP_STRUCT__entry(
 39		__array(char, name, 32)
 40		__field(long, nr_pages)
 41		__field(dev_t, sb_dev)
 42		__field(int, sync_mode)
 43		__field(int, for_kupdate)
 44		__field(int, range_cyclic)
 45		__field(int, for_background)
 46		__field(int, reason)
 
 47	),
 48	TP_fast_assign(
 49		struct device *dev = bdi->dev;
 50		if (!dev)
 51			dev = default_backing_dev_info.dev;
 52		strncpy(__entry->name, dev_name(dev), 32);
 53		__entry->nr_pages = work->nr_pages;
 54		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
 55		__entry->sync_mode = work->sync_mode;
 56		__entry->for_kupdate = work->for_kupdate;
 57		__entry->range_cyclic = work->range_cyclic;
 58		__entry->for_background	= work->for_background;
 59		__entry->reason = work->reason;
 
 60	),
 61	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
 62		  "kupdate=%d range_cyclic=%d background=%d reason=%s",
 63		  __entry->name,
 64		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
 65		  __entry->nr_pages,
 66		  __entry->sync_mode,
 67		  __entry->for_kupdate,
 68		  __entry->range_cyclic,
 69		  __entry->for_background,
 70		  __print_symbolic(__entry->reason, WB_WORK_REASON)
 
 71	)
 72);
 73#define DEFINE_WRITEBACK_WORK_EVENT(name) \
 74DEFINE_EVENT(writeback_work_class, name, \
 75	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
 76	TP_ARGS(bdi, work))
 77DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
 78DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
 79DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
 80DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
 81DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
 82DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
 83
 84TRACE_EVENT(writeback_pages_written,
 85	TP_PROTO(long pages_written),
 86	TP_ARGS(pages_written),
 87	TP_STRUCT__entry(
 88		__field(long,		pages)
 89	),
 90	TP_fast_assign(
 91		__entry->pages		= pages_written;
 92	),
 93	TP_printk("%ld", __entry->pages)
 94);
 95
 96DECLARE_EVENT_CLASS(writeback_class,
 97	TP_PROTO(struct backing_dev_info *bdi),
 98	TP_ARGS(bdi),
 99	TP_STRUCT__entry(
100		__array(char, name, 32)
 
101	),
102	TP_fast_assign(
103		strncpy(__entry->name, dev_name(bdi->dev), 32);
 
104	),
105	TP_printk("bdi %s",
106		  __entry->name
 
107	)
108);
109#define DEFINE_WRITEBACK_EVENT(name) \
110DEFINE_EVENT(writeback_class, name, \
111	TP_PROTO(struct backing_dev_info *bdi), \
112	TP_ARGS(bdi))
113
114DEFINE_WRITEBACK_EVENT(writeback_nowork);
115DEFINE_WRITEBACK_EVENT(writeback_wake_background);
116DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
117DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
118DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
119DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
120DEFINE_WRITEBACK_EVENT(writeback_thread_start);
121DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
 
 
 
 
 
 
 
 
122
123DECLARE_EVENT_CLASS(wbc_class,
124	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
125	TP_ARGS(wbc, bdi),
126	TP_STRUCT__entry(
127		__array(char, name, 32)
128		__field(long, nr_to_write)
129		__field(long, pages_skipped)
130		__field(int, sync_mode)
131		__field(int, for_kupdate)
132		__field(int, for_background)
133		__field(int, for_reclaim)
134		__field(int, range_cyclic)
135		__field(long, range_start)
136		__field(long, range_end)
 
137	),
138
139	TP_fast_assign(
140		strncpy(__entry->name, dev_name(bdi->dev), 32);
141		__entry->nr_to_write	= wbc->nr_to_write;
142		__entry->pages_skipped	= wbc->pages_skipped;
143		__entry->sync_mode	= wbc->sync_mode;
144		__entry->for_kupdate	= wbc->for_kupdate;
145		__entry->for_background	= wbc->for_background;
146		__entry->for_reclaim	= wbc->for_reclaim;
147		__entry->range_cyclic	= wbc->range_cyclic;
148		__entry->range_start	= (long)wbc->range_start;
149		__entry->range_end	= (long)wbc->range_end;
 
150	),
151
152	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
153		"bgrd=%d reclm=%d cyclic=%d "
154		"start=0x%lx end=0x%lx",
155		__entry->name,
156		__entry->nr_to_write,
157		__entry->pages_skipped,
158		__entry->sync_mode,
159		__entry->for_kupdate,
160		__entry->for_background,
161		__entry->for_reclaim,
162		__entry->range_cyclic,
163		__entry->range_start,
164		__entry->range_end)
 
 
165)
166
167#define DEFINE_WBC_EVENT(name) \
168DEFINE_EVENT(wbc_class, name, \
169	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
170	TP_ARGS(wbc, bdi))
171DEFINE_WBC_EVENT(wbc_writepage);
172
173TRACE_EVENT(writeback_queue_io,
174	TP_PROTO(struct bdi_writeback *wb,
175		 struct wb_writeback_work *work,
176		 int moved),
177	TP_ARGS(wb, work, moved),
178	TP_STRUCT__entry(
179		__array(char,		name, 32)
180		__field(unsigned long,	older)
181		__field(long,		age)
182		__field(int,		moved)
183		__field(int,		reason)
 
184	),
185	TP_fast_assign(
186		unsigned long *older_than_this = work->older_than_this;
187		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
188		__entry->older	= older_than_this ?  *older_than_this : 0;
189		__entry->age	= older_than_this ?
190				  (jiffies - *older_than_this) * 1000 / HZ : -1;
191		__entry->moved	= moved;
192		__entry->reason	= work->reason;
 
193	),
194	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
195		__entry->name,
196		__entry->older,	/* older_than_this in jiffies */
197		__entry->age,	/* older_than_this in relative milliseconds */
198		__entry->moved,
199		__print_symbolic(__entry->reason, WB_WORK_REASON)
 
200	)
201);
202
203TRACE_EVENT(global_dirty_state,
204
205	TP_PROTO(unsigned long background_thresh,
206		 unsigned long dirty_thresh
207	),
208
209	TP_ARGS(background_thresh,
210		dirty_thresh
211	),
212
213	TP_STRUCT__entry(
214		__field(unsigned long,	nr_dirty)
215		__field(unsigned long,	nr_writeback)
216		__field(unsigned long,	nr_unstable)
217		__field(unsigned long,	background_thresh)
218		__field(unsigned long,	dirty_thresh)
219		__field(unsigned long,	dirty_limit)
220		__field(unsigned long,	nr_dirtied)
221		__field(unsigned long,	nr_written)
222	),
223
224	TP_fast_assign(
225		__entry->nr_dirty	= global_page_state(NR_FILE_DIRTY);
226		__entry->nr_writeback	= global_page_state(NR_WRITEBACK);
227		__entry->nr_unstable	= global_page_state(NR_UNSTABLE_NFS);
228		__entry->nr_dirtied	= global_page_state(NR_DIRTIED);
229		__entry->nr_written	= global_page_state(NR_WRITTEN);
230		__entry->background_thresh = background_thresh;
231		__entry->dirty_thresh	= dirty_thresh;
232		__entry->dirty_limit = global_dirty_limit;
233	),
234
235	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
236		  "bg_thresh=%lu thresh=%lu limit=%lu "
237		  "dirtied=%lu written=%lu",
238		  __entry->nr_dirty,
239		  __entry->nr_writeback,
240		  __entry->nr_unstable,
241		  __entry->background_thresh,
242		  __entry->dirty_thresh,
243		  __entry->dirty_limit,
244		  __entry->nr_dirtied,
245		  __entry->nr_written
246	)
247);
248
249#define KBps(x)			((x) << (PAGE_SHIFT - 10))
250
251TRACE_EVENT(bdi_dirty_ratelimit,
252
253	TP_PROTO(struct backing_dev_info *bdi,
254		 unsigned long dirty_rate,
255		 unsigned long task_ratelimit),
256
257	TP_ARGS(bdi, dirty_rate, task_ratelimit),
258
259	TP_STRUCT__entry(
260		__array(char,		bdi, 32)
261		__field(unsigned long,	write_bw)
262		__field(unsigned long,	avg_write_bw)
263		__field(unsigned long,	dirty_rate)
264		__field(unsigned long,	dirty_ratelimit)
265		__field(unsigned long,	task_ratelimit)
266		__field(unsigned long,	balanced_dirty_ratelimit)
 
267	),
268
269	TP_fast_assign(
270		strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
271		__entry->write_bw	= KBps(bdi->write_bandwidth);
272		__entry->avg_write_bw	= KBps(bdi->avg_write_bandwidth);
273		__entry->dirty_rate	= KBps(dirty_rate);
274		__entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
275		__entry->task_ratelimit	= KBps(task_ratelimit);
276		__entry->balanced_dirty_ratelimit =
277					  KBps(bdi->balanced_dirty_ratelimit);
 
278	),
279
280	TP_printk("bdi %s: "
281		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
282		  "dirty_ratelimit=%lu task_ratelimit=%lu "
283		  "balanced_dirty_ratelimit=%lu",
284		  __entry->bdi,
285		  __entry->write_bw,		/* write bandwidth */
286		  __entry->avg_write_bw,	/* avg write bandwidth */
287		  __entry->dirty_rate,		/* bdi dirty rate */
288		  __entry->dirty_ratelimit,	/* base ratelimit */
289		  __entry->task_ratelimit, /* ratelimit with position control */
290		  __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
 
291	)
292);
293
294TRACE_EVENT(balance_dirty_pages,
295
296	TP_PROTO(struct backing_dev_info *bdi,
297		 unsigned long thresh,
298		 unsigned long bg_thresh,
299		 unsigned long dirty,
300		 unsigned long bdi_thresh,
301		 unsigned long bdi_dirty,
302		 unsigned long dirty_ratelimit,
303		 unsigned long task_ratelimit,
304		 unsigned long dirtied,
305		 unsigned long period,
306		 long pause,
307		 unsigned long start_time),
308
309	TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
310		dirty_ratelimit, task_ratelimit,
311		dirtied, period, pause, start_time),
312
313	TP_STRUCT__entry(
314		__array(	 char,	bdi, 32)
315		__field(unsigned long,	limit)
316		__field(unsigned long,	setpoint)
317		__field(unsigned long,	dirty)
318		__field(unsigned long,	bdi_setpoint)
319		__field(unsigned long,	bdi_dirty)
320		__field(unsigned long,	dirty_ratelimit)
321		__field(unsigned long,	task_ratelimit)
322		__field(unsigned int,	dirtied)
323		__field(unsigned int,	dirtied_pause)
324		__field(unsigned long,	paused)
325		__field(	 long,	pause)
326		__field(unsigned long,	period)
327		__field(	 long,	think)
 
328	),
329
330	TP_fast_assign(
331		unsigned long freerun = (thresh + bg_thresh) / 2;
332		strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
333
334		__entry->limit		= global_dirty_limit;
335		__entry->setpoint	= (global_dirty_limit + freerun) / 2;
 
336		__entry->dirty		= dirty;
337		__entry->bdi_setpoint	= __entry->setpoint *
338						bdi_thresh / (thresh + 1);
339		__entry->bdi_dirty	= bdi_dirty;
340		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
341		__entry->task_ratelimit	= KBps(task_ratelimit);
342		__entry->dirtied	= dirtied;
343		__entry->dirtied_pause	= current->nr_dirtied_pause;
344		__entry->think		= current->dirty_paused_when == 0 ? 0 :
345			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
346		__entry->period		= period * 1000 / HZ;
347		__entry->pause		= pause * 1000 / HZ;
348		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
 
349	),
350
351
352	TP_printk("bdi %s: "
353		  "limit=%lu setpoint=%lu dirty=%lu "
354		  "bdi_setpoint=%lu bdi_dirty=%lu "
355		  "dirty_ratelimit=%lu task_ratelimit=%lu "
356		  "dirtied=%u dirtied_pause=%u "
357		  "paused=%lu pause=%ld period=%lu think=%ld",
358		  __entry->bdi,
359		  __entry->limit,
360		  __entry->setpoint,
361		  __entry->dirty,
362		  __entry->bdi_setpoint,
363		  __entry->bdi_dirty,
364		  __entry->dirty_ratelimit,
365		  __entry->task_ratelimit,
366		  __entry->dirtied,
367		  __entry->dirtied_pause,
368		  __entry->paused,	/* ms */
369		  __entry->pause,	/* ms */
370		  __entry->period,	/* ms */
371		  __entry->think	/* ms */
 
372	  )
373);
374
375TRACE_EVENT(writeback_sb_inodes_requeue,
376
377	TP_PROTO(struct inode *inode),
378	TP_ARGS(inode),
379
380	TP_STRUCT__entry(
381		__array(char, name, 32)
382		__field(unsigned long, ino)
383		__field(unsigned long, state)
384		__field(unsigned long, dirtied_when)
 
385	),
386
387	TP_fast_assign(
388		strncpy(__entry->name,
389		        dev_name(inode_to_bdi(inode)->dev), 32);
390		__entry->ino		= inode->i_ino;
391		__entry->state		= inode->i_state;
392		__entry->dirtied_when	= inode->dirtied_when;
 
393	),
394
395	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
396		  __entry->name,
397		  __entry->ino,
398		  show_inode_state(__entry->state),
399		  __entry->dirtied_when,
400		  (jiffies - __entry->dirtied_when) / HZ
 
401	)
402);
403
404DECLARE_EVENT_CLASS(writeback_congest_waited_template,
405
406	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
407
408	TP_ARGS(usec_timeout, usec_delayed),
409
410	TP_STRUCT__entry(
411		__field(	unsigned int,	usec_timeout	)
412		__field(	unsigned int,	usec_delayed	)
413	),
414
415	TP_fast_assign(
416		__entry->usec_timeout	= usec_timeout;
417		__entry->usec_delayed	= usec_delayed;
418	),
419
420	TP_printk("usec_timeout=%u usec_delayed=%u",
421			__entry->usec_timeout,
422			__entry->usec_delayed)
423);
424
425DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
426
427	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
428
429	TP_ARGS(usec_timeout, usec_delayed)
430);
431
432DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
433
434	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
435
436	TP_ARGS(usec_timeout, usec_delayed)
437);
438
439DECLARE_EVENT_CLASS(writeback_single_inode_template,
440
441	TP_PROTO(struct inode *inode,
442		 struct writeback_control *wbc,
443		 unsigned long nr_to_write
444	),
445
446	TP_ARGS(inode, wbc, nr_to_write),
447
448	TP_STRUCT__entry(
449		__array(char, name, 32)
450		__field(unsigned long, ino)
451		__field(unsigned long, state)
452		__field(unsigned long, dirtied_when)
453		__field(unsigned long, writeback_index)
454		__field(long, nr_to_write)
455		__field(unsigned long, wrote)
 
456	),
457
458	TP_fast_assign(
459		strncpy(__entry->name,
460			dev_name(inode_to_bdi(inode)->dev), 32);
461		__entry->ino		= inode->i_ino;
462		__entry->state		= inode->i_state;
463		__entry->dirtied_when	= inode->dirtied_when;
464		__entry->writeback_index = inode->i_mapping->writeback_index;
465		__entry->nr_to_write	= nr_to_write;
466		__entry->wrote		= nr_to_write - wbc->nr_to_write;
 
467	),
468
469	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
470		  "index=%lu to_write=%ld wrote=%lu",
471		  __entry->name,
472		  __entry->ino,
473		  show_inode_state(__entry->state),
474		  __entry->dirtied_when,
475		  (jiffies - __entry->dirtied_when) / HZ,
476		  __entry->writeback_index,
477		  __entry->nr_to_write,
478		  __entry->wrote
 
479	)
480);
481
 
 
 
 
 
 
 
482DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
483	TP_PROTO(struct inode *inode,
484		 struct writeback_control *wbc,
485		 unsigned long nr_to_write),
486	TP_ARGS(inode, wbc, nr_to_write)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487);
488
489#endif /* _TRACE_WRITEBACK_H */
490
491/* This part must be outside protection */
492#include <trace/define_trace.h>