Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM kmem
  4
  5#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_KMEM_H
  7
  8#include <linux/types.h>
  9#include <linux/tracepoint.h>
 10#include <trace/events/mmflags.h>
 11
 12TRACE_EVENT(kmem_cache_alloc,
 13
 14	TP_PROTO(unsigned long call_site,
 15		 const void *ptr,
 16		 struct kmem_cache *s,
 17		 gfp_t gfp_flags,
 18		 int node),
 19
 20	TP_ARGS(call_site, ptr, s, gfp_flags, node),
 21
 22	TP_STRUCT__entry(
 23		__field(	unsigned long,	call_site	)
 24		__field(	const void *,	ptr		)
 25		__field(	size_t,		bytes_req	)
 26		__field(	size_t,		bytes_alloc	)
 27		__field(	unsigned long,	gfp_flags	)
 28		__field(	int,		node		)
 29		__field(	bool,		accounted	)
 30	),
 31
 32	TP_fast_assign(
 33		__entry->call_site	= call_site;
 34		__entry->ptr		= ptr;
 35		__entry->bytes_req	= s->object_size;
 36		__entry->bytes_alloc	= s->size;
 37		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 38		__entry->node		= node;
 39		__entry->accounted	= IS_ENABLED(CONFIG_MEMCG_KMEM) ?
 40					  ((gfp_flags & __GFP_ACCOUNT) ||
 41					  (s->flags & SLAB_ACCOUNT)) : false;
 42	),
 43
 44	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
 45		(void *)__entry->call_site,
 46		__entry->ptr,
 47		__entry->bytes_req,
 48		__entry->bytes_alloc,
 49		show_gfp_flags(__entry->gfp_flags),
 50		__entry->node,
 51		__entry->accounted ? "true" : "false")
 52);
 53
 54TRACE_EVENT(kmalloc,
 55
 56	TP_PROTO(unsigned long call_site,
 57		 const void *ptr,
 58		 size_t bytes_req,
 59		 size_t bytes_alloc,
 60		 gfp_t gfp_flags,
 61		 int node),
 62
 63	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
 64
 65	TP_STRUCT__entry(
 66		__field(	unsigned long,	call_site	)
 67		__field(	const void *,	ptr		)
 68		__field(	size_t,		bytes_req	)
 69		__field(	size_t,		bytes_alloc	)
 70		__field(	unsigned long,	gfp_flags	)
 71		__field(	int,		node		)
 72	),
 73
 74	TP_fast_assign(
 75		__entry->call_site	= call_site;
 76		__entry->ptr		= ptr;
 77		__entry->bytes_req	= bytes_req;
 78		__entry->bytes_alloc	= bytes_alloc;
 79		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 80		__entry->node		= node;
 81	),
 82
 83	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
 84		(void *)__entry->call_site,
 85		__entry->ptr,
 86		__entry->bytes_req,
 87		__entry->bytes_alloc,
 88		show_gfp_flags(__entry->gfp_flags),
 89		__entry->node,
 90		(IS_ENABLED(CONFIG_MEMCG_KMEM) &&
 91		 (__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
 92);
 93
 94TRACE_EVENT(kfree,
 95
 96	TP_PROTO(unsigned long call_site, const void *ptr),
 97
 98	TP_ARGS(call_site, ptr),
 99
100	TP_STRUCT__entry(
101		__field(	unsigned long,	call_site	)
102		__field(	const void *,	ptr		)
103	),
104
105	TP_fast_assign(
106		__entry->call_site	= call_site;
107		__entry->ptr		= ptr;
108	),
109
110	TP_printk("call_site=%pS ptr=%p",
111		  (void *)__entry->call_site, __entry->ptr)
112);
113
114TRACE_EVENT(kmem_cache_free,
115
116	TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s),
117
118	TP_ARGS(call_site, ptr, s),
119
120	TP_STRUCT__entry(
121		__field(	unsigned long,	call_site	)
122		__field(	const void *,	ptr		)
123		__string(	name,		s->name		)
124	),
125
126	TP_fast_assign(
127		__entry->call_site	= call_site;
128		__entry->ptr		= ptr;
129		__assign_str(name, s->name);
130	),
131
132	TP_printk("call_site=%pS ptr=%p name=%s",
133		  (void *)__entry->call_site, __entry->ptr, __get_str(name))
134);
135
136TRACE_EVENT(mm_page_free,
137
138	TP_PROTO(struct page *page, unsigned int order),
139
140	TP_ARGS(page, order),
141
142	TP_STRUCT__entry(
143		__field(	unsigned long,	pfn		)
144		__field(	unsigned int,	order		)
145	),
146
147	TP_fast_assign(
148		__entry->pfn		= page_to_pfn(page);
149		__entry->order		= order;
150	),
151
152	TP_printk("page=%p pfn=0x%lx order=%d",
153			pfn_to_page(__entry->pfn),
154			__entry->pfn,
155			__entry->order)
156);
157
158TRACE_EVENT(mm_page_free_batched,
159
160	TP_PROTO(struct page *page),
161
162	TP_ARGS(page),
163
164	TP_STRUCT__entry(
165		__field(	unsigned long,	pfn		)
166	),
167
168	TP_fast_assign(
169		__entry->pfn		= page_to_pfn(page);
170	),
171
172	TP_printk("page=%p pfn=0x%lx order=0",
173			pfn_to_page(__entry->pfn),
174			__entry->pfn)
175);
176
177TRACE_EVENT(mm_page_alloc,
178
179	TP_PROTO(struct page *page, unsigned int order,
180			gfp_t gfp_flags, int migratetype),
181
182	TP_ARGS(page, order, gfp_flags, migratetype),
183
184	TP_STRUCT__entry(
185		__field(	unsigned long,	pfn		)
186		__field(	unsigned int,	order		)
187		__field(	unsigned long,	gfp_flags	)
188		__field(	int,		migratetype	)
189	),
190
191	TP_fast_assign(
192		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
193		__entry->order		= order;
194		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
195		__entry->migratetype	= migratetype;
196	),
197
198	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
199		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
200		__entry->pfn != -1UL ? __entry->pfn : 0,
201		__entry->order,
202		__entry->migratetype,
203		show_gfp_flags(__entry->gfp_flags))
204);
205
206DECLARE_EVENT_CLASS(mm_page,
207
208	TP_PROTO(struct page *page, unsigned int order, int migratetype,
209		 int percpu_refill),
210
211	TP_ARGS(page, order, migratetype, percpu_refill),
212
213	TP_STRUCT__entry(
214		__field(	unsigned long,	pfn		)
215		__field(	unsigned int,	order		)
216		__field(	int,		migratetype	)
217		__field(	int,		percpu_refill	)
218	),
219
220	TP_fast_assign(
221		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
222		__entry->order		= order;
223		__entry->migratetype	= migratetype;
224		__entry->percpu_refill	= percpu_refill;
225	),
226
227	TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
228		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
229		__entry->pfn != -1UL ? __entry->pfn : 0,
230		__entry->order,
231		__entry->migratetype,
232		__entry->percpu_refill)
233);
234
235DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
236
237	TP_PROTO(struct page *page, unsigned int order, int migratetype,
238		 int percpu_refill),
239
240	TP_ARGS(page, order, migratetype, percpu_refill)
241);
242
243TRACE_EVENT(mm_page_pcpu_drain,
244
245	TP_PROTO(struct page *page, unsigned int order, int migratetype),
246
247	TP_ARGS(page, order, migratetype),
248
249	TP_STRUCT__entry(
250		__field(	unsigned long,	pfn		)
251		__field(	unsigned int,	order		)
252		__field(	int,		migratetype	)
253	),
254
255	TP_fast_assign(
256		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
257		__entry->order		= order;
258		__entry->migratetype	= migratetype;
259	),
260
261	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
262		pfn_to_page(__entry->pfn), __entry->pfn,
263		__entry->order, __entry->migratetype)
264);
265
266TRACE_EVENT(mm_page_alloc_extfrag,
267
268	TP_PROTO(struct page *page,
269		int alloc_order, int fallback_order,
270		int alloc_migratetype, int fallback_migratetype),
271
272	TP_ARGS(page,
273		alloc_order, fallback_order,
274		alloc_migratetype, fallback_migratetype),
275
276	TP_STRUCT__entry(
277		__field(	unsigned long,	pfn			)
278		__field(	int,		alloc_order		)
279		__field(	int,		fallback_order		)
280		__field(	int,		alloc_migratetype	)
281		__field(	int,		fallback_migratetype	)
282		__field(	int,		change_ownership	)
283	),
284
285	TP_fast_assign(
286		__entry->pfn			= page_to_pfn(page);
287		__entry->alloc_order		= alloc_order;
288		__entry->fallback_order		= fallback_order;
289		__entry->alloc_migratetype	= alloc_migratetype;
290		__entry->fallback_migratetype	= fallback_migratetype;
291		__entry->change_ownership	= (alloc_migratetype ==
292					get_pageblock_migratetype(page));
293	),
294
295	TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
296		pfn_to_page(__entry->pfn),
297		__entry->pfn,
298		__entry->alloc_order,
299		__entry->fallback_order,
300		pageblock_order,
301		__entry->alloc_migratetype,
302		__entry->fallback_migratetype,
303		__entry->fallback_order < pageblock_order,
304		__entry->change_ownership)
305);
306
307/*
308 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
309 */
310#ifndef __PTR_TO_HASHVAL
311static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
312{
313	int ret;
314	unsigned long hashval;
315
316	ret = ptr_to_hashval(ptr, &hashval);
317	if (ret)
318		return 0;
319
320	/* The hashed value is only 32-bit */
321	return (unsigned int)hashval;
322}
323#define __PTR_TO_HASHVAL
324#endif
325
326#define TRACE_MM_PAGES		\
327	EM(MM_FILEPAGES)	\
328	EM(MM_ANONPAGES)	\
329	EM(MM_SWAPENTS)		\
330	EMe(MM_SHMEMPAGES)
331
332#undef EM
333#undef EMe
334
335#define EM(a)	TRACE_DEFINE_ENUM(a);
336#define EMe(a)	TRACE_DEFINE_ENUM(a);
337
338TRACE_MM_PAGES
339
340#undef EM
341#undef EMe
342
343#define EM(a)	{ a, #a },
344#define EMe(a)	{ a, #a }
345
346TRACE_EVENT(rss_stat,
347
348	TP_PROTO(struct mm_struct *mm,
349		int member),
350
351	TP_ARGS(mm, member),
352
353	TP_STRUCT__entry(
354		__field(unsigned int, mm_id)
355		__field(unsigned int, curr)
356		__field(int, member)
357		__field(long, size)
358	),
359
360	TP_fast_assign(
361		__entry->mm_id = mm_ptr_to_hash(mm);
362		__entry->curr = !!(current->mm == mm);
363		__entry->member = member;
364		__entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member])
365							    << PAGE_SHIFT);
366	),
367
368	TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
369		__entry->mm_id,
370		__entry->curr,
371		__print_symbolic(__entry->member, TRACE_MM_PAGES),
372		__entry->size)
373	);
374#endif /* _TRACE_KMEM_H */
375
376/* This part must be outside protection */
377#include <trace/define_trace.h>
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM kmem
  4
  5#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_KMEM_H
  7
  8#include <linux/types.h>
  9#include <linux/tracepoint.h>
 10#include <trace/events/mmflags.h>
 11
 12TRACE_EVENT(kmem_cache_alloc,
 13
 14	TP_PROTO(unsigned long call_site,
 15		 const void *ptr,
 16		 struct kmem_cache *s,
 17		 gfp_t gfp_flags,
 18		 int node),
 19
 20	TP_ARGS(call_site, ptr, s, gfp_flags, node),
 21
 22	TP_STRUCT__entry(
 23		__field(	unsigned long,	call_site	)
 24		__field(	const void *,	ptr		)
 25		__field(	size_t,		bytes_req	)
 26		__field(	size_t,		bytes_alloc	)
 27		__field(	unsigned long,	gfp_flags	)
 28		__field(	int,		node		)
 29		__field(	bool,		accounted	)
 30	),
 31
 32	TP_fast_assign(
 33		__entry->call_site	= call_site;
 34		__entry->ptr		= ptr;
 35		__entry->bytes_req	= s->object_size;
 36		__entry->bytes_alloc	= s->size;
 37		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 38		__entry->node		= node;
 39		__entry->accounted	= IS_ENABLED(CONFIG_MEMCG_KMEM) ?
 40					  ((gfp_flags & __GFP_ACCOUNT) ||
 41					  (s->flags & SLAB_ACCOUNT)) : false;
 42	),
 43
 44	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
 45		(void *)__entry->call_site,
 46		__entry->ptr,
 47		__entry->bytes_req,
 48		__entry->bytes_alloc,
 49		show_gfp_flags(__entry->gfp_flags),
 50		__entry->node,
 51		__entry->accounted ? "true" : "false")
 52);
 53
 54TRACE_EVENT(kmalloc,
 55
 56	TP_PROTO(unsigned long call_site,
 57		 const void *ptr,
 58		 size_t bytes_req,
 59		 size_t bytes_alloc,
 60		 gfp_t gfp_flags,
 61		 int node),
 62
 63	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
 64
 65	TP_STRUCT__entry(
 66		__field(	unsigned long,	call_site	)
 67		__field(	const void *,	ptr		)
 68		__field(	size_t,		bytes_req	)
 69		__field(	size_t,		bytes_alloc	)
 70		__field(	unsigned long,	gfp_flags	)
 71		__field(	int,		node		)
 72	),
 73
 74	TP_fast_assign(
 75		__entry->call_site	= call_site;
 76		__entry->ptr		= ptr;
 77		__entry->bytes_req	= bytes_req;
 78		__entry->bytes_alloc	= bytes_alloc;
 79		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 80		__entry->node		= node;
 81	),
 82
 83	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
 84		(void *)__entry->call_site,
 85		__entry->ptr,
 86		__entry->bytes_req,
 87		__entry->bytes_alloc,
 88		show_gfp_flags(__entry->gfp_flags),
 89		__entry->node,
 90		(IS_ENABLED(CONFIG_MEMCG_KMEM) &&
 91		 (__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
 92);
 93
 94TRACE_EVENT(kfree,
 95
 96	TP_PROTO(unsigned long call_site, const void *ptr),
 97
 98	TP_ARGS(call_site, ptr),
 99
100	TP_STRUCT__entry(
101		__field(	unsigned long,	call_site	)
102		__field(	const void *,	ptr		)
103	),
104
105	TP_fast_assign(
106		__entry->call_site	= call_site;
107		__entry->ptr		= ptr;
108	),
109
110	TP_printk("call_site=%pS ptr=%p",
111		  (void *)__entry->call_site, __entry->ptr)
112);
113
114TRACE_EVENT(kmem_cache_free,
115
116	TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s),
117
118	TP_ARGS(call_site, ptr, s),
119
120	TP_STRUCT__entry(
121		__field(	unsigned long,	call_site	)
122		__field(	const void *,	ptr		)
123		__string(	name,		s->name		)
124	),
125
126	TP_fast_assign(
127		__entry->call_site	= call_site;
128		__entry->ptr		= ptr;
129		__assign_str(name, s->name);
130	),
131
132	TP_printk("call_site=%pS ptr=%p name=%s",
133		  (void *)__entry->call_site, __entry->ptr, __get_str(name))
134);
135
136TRACE_EVENT(mm_page_free,
137
138	TP_PROTO(struct page *page, unsigned int order),
139
140	TP_ARGS(page, order),
141
142	TP_STRUCT__entry(
143		__field(	unsigned long,	pfn		)
144		__field(	unsigned int,	order		)
145	),
146
147	TP_fast_assign(
148		__entry->pfn		= page_to_pfn(page);
149		__entry->order		= order;
150	),
151
152	TP_printk("page=%p pfn=0x%lx order=%d",
153			pfn_to_page(__entry->pfn),
154			__entry->pfn,
155			__entry->order)
156);
157
158TRACE_EVENT(mm_page_free_batched,
159
160	TP_PROTO(struct page *page),
161
162	TP_ARGS(page),
163
164	TP_STRUCT__entry(
165		__field(	unsigned long,	pfn		)
166	),
167
168	TP_fast_assign(
169		__entry->pfn		= page_to_pfn(page);
170	),
171
172	TP_printk("page=%p pfn=0x%lx order=0",
173			pfn_to_page(__entry->pfn),
174			__entry->pfn)
175);
176
177TRACE_EVENT(mm_page_alloc,
178
179	TP_PROTO(struct page *page, unsigned int order,
180			gfp_t gfp_flags, int migratetype),
181
182	TP_ARGS(page, order, gfp_flags, migratetype),
183
184	TP_STRUCT__entry(
185		__field(	unsigned long,	pfn		)
186		__field(	unsigned int,	order		)
187		__field(	unsigned long,	gfp_flags	)
188		__field(	int,		migratetype	)
189	),
190
191	TP_fast_assign(
192		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
193		__entry->order		= order;
194		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
195		__entry->migratetype	= migratetype;
196	),
197
198	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
199		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
200		__entry->pfn != -1UL ? __entry->pfn : 0,
201		__entry->order,
202		__entry->migratetype,
203		show_gfp_flags(__entry->gfp_flags))
204);
205
206DECLARE_EVENT_CLASS(mm_page,
207
208	TP_PROTO(struct page *page, unsigned int order, int migratetype,
209		 int percpu_refill),
210
211	TP_ARGS(page, order, migratetype, percpu_refill),
212
213	TP_STRUCT__entry(
214		__field(	unsigned long,	pfn		)
215		__field(	unsigned int,	order		)
216		__field(	int,		migratetype	)
217		__field(	int,		percpu_refill	)
218	),
219
220	TP_fast_assign(
221		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
222		__entry->order		= order;
223		__entry->migratetype	= migratetype;
224		__entry->percpu_refill	= percpu_refill;
225	),
226
227	TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
228		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
229		__entry->pfn != -1UL ? __entry->pfn : 0,
230		__entry->order,
231		__entry->migratetype,
232		__entry->percpu_refill)
233);
234
235DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
236
237	TP_PROTO(struct page *page, unsigned int order, int migratetype,
238		 int percpu_refill),
239
240	TP_ARGS(page, order, migratetype, percpu_refill)
241);
242
243TRACE_EVENT(mm_page_pcpu_drain,
244
245	TP_PROTO(struct page *page, unsigned int order, int migratetype),
246
247	TP_ARGS(page, order, migratetype),
248
249	TP_STRUCT__entry(
250		__field(	unsigned long,	pfn		)
251		__field(	unsigned int,	order		)
252		__field(	int,		migratetype	)
253	),
254
255	TP_fast_assign(
256		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
257		__entry->order		= order;
258		__entry->migratetype	= migratetype;
259	),
260
261	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
262		pfn_to_page(__entry->pfn), __entry->pfn,
263		__entry->order, __entry->migratetype)
264);
265
266TRACE_EVENT(mm_page_alloc_extfrag,
267
268	TP_PROTO(struct page *page,
269		int alloc_order, int fallback_order,
270		int alloc_migratetype, int fallback_migratetype),
271
272	TP_ARGS(page,
273		alloc_order, fallback_order,
274		alloc_migratetype, fallback_migratetype),
275
276	TP_STRUCT__entry(
277		__field(	unsigned long,	pfn			)
278		__field(	int,		alloc_order		)
279		__field(	int,		fallback_order		)
280		__field(	int,		alloc_migratetype	)
281		__field(	int,		fallback_migratetype	)
282		__field(	int,		change_ownership	)
283	),
284
285	TP_fast_assign(
286		__entry->pfn			= page_to_pfn(page);
287		__entry->alloc_order		= alloc_order;
288		__entry->fallback_order		= fallback_order;
289		__entry->alloc_migratetype	= alloc_migratetype;
290		__entry->fallback_migratetype	= fallback_migratetype;
291		__entry->change_ownership	= (alloc_migratetype ==
292					get_pageblock_migratetype(page));
293	),
294
295	TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
296		pfn_to_page(__entry->pfn),
297		__entry->pfn,
298		__entry->alloc_order,
299		__entry->fallback_order,
300		pageblock_order,
301		__entry->alloc_migratetype,
302		__entry->fallback_migratetype,
303		__entry->fallback_order < pageblock_order,
304		__entry->change_ownership)
305);
306
307/*
308 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
309 */
310#ifndef __PTR_TO_HASHVAL
311static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
312{
313	int ret;
314	unsigned long hashval;
315
316	ret = ptr_to_hashval(ptr, &hashval);
317	if (ret)
318		return 0;
319
320	/* The hashed value is only 32-bit */
321	return (unsigned int)hashval;
322}
323#define __PTR_TO_HASHVAL
324#endif
325
326#define TRACE_MM_PAGES		\
327	EM(MM_FILEPAGES)	\
328	EM(MM_ANONPAGES)	\
329	EM(MM_SWAPENTS)		\
330	EMe(MM_SHMEMPAGES)
331
332#undef EM
333#undef EMe
334
335#define EM(a)	TRACE_DEFINE_ENUM(a);
336#define EMe(a)	TRACE_DEFINE_ENUM(a);
337
338TRACE_MM_PAGES
339
340#undef EM
341#undef EMe
342
343#define EM(a)	{ a, #a },
344#define EMe(a)	{ a, #a }
345
346TRACE_EVENT(rss_stat,
347
348	TP_PROTO(struct mm_struct *mm,
349		int member),
350
351	TP_ARGS(mm, member),
352
353	TP_STRUCT__entry(
354		__field(unsigned int, mm_id)
355		__field(unsigned int, curr)
356		__field(int, member)
357		__field(long, size)
358	),
359
360	TP_fast_assign(
361		__entry->mm_id = mm_ptr_to_hash(mm);
362		__entry->curr = !!(current->mm == mm);
363		__entry->member = member;
364		__entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member])
365							    << PAGE_SHIFT);
366	),
367
368	TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
369		__entry->mm_id,
370		__entry->curr,
371		__print_symbolic(__entry->member, TRACE_MM_PAGES),
372		__entry->size)
373	);
374#endif /* _TRACE_KMEM_H */
375
376/* This part must be outside protection */
377#include <trace/define_trace.h>