Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.14.15.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * allocation tagging
  4 */
  5#ifndef _LINUX_ALLOC_TAG_H
  6#define _LINUX_ALLOC_TAG_H
  7
  8#include <linux/bug.h>
  9#include <linux/codetag.h>
 10#include <linux/container_of.h>
 11#include <linux/preempt.h>
 12#include <asm/percpu.h>
 13#include <linux/cpumask.h>
 14#include <linux/smp.h>
 15#include <linux/static_key.h>
 16#include <linux/irqflags.h>
 17
 18struct alloc_tag_counters {
 19	u64 bytes;
 20	u64 calls;
 21};
 22
 23/*
 24 * An instance of this structure is created in a special ELF section at every
 25 * allocation callsite. At runtime, the special section is treated as
 26 * an array of these. Embedded codetag utilizes codetag framework.
 27 */
 28struct alloc_tag {
 29	struct codetag			ct;
 30	struct alloc_tag_counters __percpu	*counters;
 31} __aligned(8);
 32
 33struct alloc_tag_kernel_section {
 34	struct alloc_tag *first_tag;
 35	unsigned long count;
 36};
 37
 38struct alloc_tag_module_section {
 39	union {
 40		unsigned long start_addr;
 41		struct alloc_tag *first_tag;
 42	};
 43	unsigned long end_addr;
 44	/* used size */
 45	unsigned long size;
 46};
 47
 48#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
 49
 50#define CODETAG_EMPTY	((void *)1)
 51
 52static inline bool is_codetag_empty(union codetag_ref *ref)
 53{
 54	return ref->ct == CODETAG_EMPTY;
 55}
 56
 57static inline void set_codetag_empty(union codetag_ref *ref)
 58{
 59	if (ref)
 60		ref->ct = CODETAG_EMPTY;
 61}
 62
 63#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
 64
 65static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
 66
 67static inline void set_codetag_empty(union codetag_ref *ref)
 68{
 69	if (ref)
 70		ref->ct = NULL;
 71}
 72
 73#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
 74
 75#ifdef CONFIG_MEM_ALLOC_PROFILING
 76
 77#define ALLOC_TAG_SECTION_NAME	"alloc_tags"
 78
 79struct codetag_bytes {
 80	struct codetag *ct;
 81	s64 bytes;
 82};
 83
 84size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
 85
 86static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
 87{
 88	return container_of(ct, struct alloc_tag, ct);
 89}
 90
 91#ifdef ARCH_NEEDS_WEAK_PER_CPU
 92/*
 93 * When percpu variables are required to be defined as weak, static percpu
 94 * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
 95 * Instead we will account all module allocations to a single counter.
 96 */
 97DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
 98
 99#define DEFINE_ALLOC_TAG(_alloc_tag)						\
100	static struct alloc_tag _alloc_tag __used __aligned(8)			\
101	__section(ALLOC_TAG_SECTION_NAME) = {					\
102		.ct = CODE_TAG_INIT,						\
103		.counters = &_shared_alloc_tag };
104
105#else /* ARCH_NEEDS_WEAK_PER_CPU */
106
107#define DEFINE_ALLOC_TAG(_alloc_tag)						\
108	static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr);	\
109	static struct alloc_tag _alloc_tag __used __aligned(8)			\
110	__section(ALLOC_TAG_SECTION_NAME) = {					\
111		.ct = CODE_TAG_INIT,						\
112		.counters = &_alloc_tag_cntr };
113
114#endif /* ARCH_NEEDS_WEAK_PER_CPU */
115
116DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
117			mem_alloc_profiling_key);
118
119static inline bool mem_alloc_profiling_enabled(void)
120{
121	return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
122				   &mem_alloc_profiling_key);
123}
124
125static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
126{
127	struct alloc_tag_counters v = { 0, 0 };
128	struct alloc_tag_counters *counter;
129	int cpu;
130
131	for_each_possible_cpu(cpu) {
132		counter = per_cpu_ptr(tag->counters, cpu);
133		v.bytes += counter->bytes;
134		v.calls += counter->calls;
135	}
136
137	return v;
138}
139
140#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
141static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
142{
143	WARN_ONCE(ref && ref->ct && !is_codetag_empty(ref),
144		  "alloc_tag was not cleared (got tag for %s:%u)\n",
145		  ref->ct->filename, ref->ct->lineno);
146
147	WARN_ONCE(!tag, "current->alloc_tag not set\n");
148}
149
150static inline void alloc_tag_sub_check(union codetag_ref *ref)
151{
152	WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
153}
154#else
155static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
156static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
157#endif
158
159/* Caller should verify both ref and tag to be valid */
160static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
161{
162	alloc_tag_add_check(ref, tag);
163	if (!ref || !tag)
164		return false;
165
166	ref->ct = &tag->ct;
167	return true;
168}
169
170static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
171{
172	if (unlikely(!__alloc_tag_ref_set(ref, tag)))
173		return false;
174
175	/*
176	 * We need in increment the call counter every time we have a new
177	 * allocation or when we split a large allocation into smaller ones.
178	 * Each new reference for every sub-allocation needs to increment call
179	 * counter because when we free each part the counter will be decremented.
180	 */
181	this_cpu_inc(tag->counters->calls);
182	return true;
183}
184
185static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
186{
187	if (likely(alloc_tag_ref_set(ref, tag)))
188		this_cpu_add(tag->counters->bytes, bytes);
189}
190
191static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
192{
193	struct alloc_tag *tag;
194
195	alloc_tag_sub_check(ref);
196	if (!ref || !ref->ct)
197		return;
198
199	if (is_codetag_empty(ref)) {
200		ref->ct = NULL;
201		return;
202	}
203
204	tag = ct_to_alloc_tag(ref->ct);
205
206	this_cpu_sub(tag->counters->bytes, bytes);
207	this_cpu_dec(tag->counters->calls);
208
209	ref->ct = NULL;
210}
211
212#define alloc_tag_record(p)	((p) = current->alloc_tag)
213
214#else /* CONFIG_MEM_ALLOC_PROFILING */
215
216#define DEFINE_ALLOC_TAG(_alloc_tag)
217static inline bool mem_alloc_profiling_enabled(void) { return false; }
218static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
219				 size_t bytes) {}
220static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
221#define alloc_tag_record(p)	do {} while (0)
222
223#endif /* CONFIG_MEM_ALLOC_PROFILING */
224
225#define alloc_hooks_tag(_tag, _do_alloc)				\
226({									\
227	typeof(_do_alloc) _res;						\
228	if (mem_alloc_profiling_enabled()) {				\
229		struct alloc_tag * __maybe_unused _old;			\
230		_old = alloc_tag_save(_tag);				\
231		_res = _do_alloc;					\
232		alloc_tag_restore(_tag, _old);				\
233	} else								\
234		_res = _do_alloc;					\
235	_res;								\
236})
237
238#define alloc_hooks(_do_alloc)						\
239({									\
240	DEFINE_ALLOC_TAG(_alloc_tag);					\
241	alloc_hooks_tag(&_alloc_tag, _do_alloc);			\
242})
243
244#endif /* _LINUX_ALLOC_TAG_H */