Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
  3#include <vmlinux.h>
  4#include <bpf/bpf_tracing.h>
  5#include <bpf/bpf_helpers.h>
  6
  7#include "bpf_experimental.h"
  8#include "bpf_misc.h"
  9
 
 
 
 
 10struct generic_map_value {
 11	void *data;
 12};
 13
 14char _license[] SEC("license") = "GPL";
 15
 16const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
 17const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
 18
 19const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
 20const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
 21
 22int err = 0;
 23u32 pid = 0;
 24
 25#define DEFINE_ARRAY_WITH_KPTR(_size) \
 26	struct bin_data_##_size { \
 27		char data[_size - sizeof(void *)]; \
 28	}; \
 29	/* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */	\
 30	struct bin_data_##_size *__bin_data_##_size; \
 31	struct map_value_##_size { \
 32		struct bin_data_##_size __kptr * data; \
 33	}; \
 34	struct { \
 35		__uint(type, BPF_MAP_TYPE_ARRAY); \
 36		__type(key, int); \
 37		__type(value, struct map_value_##_size); \
 38		__uint(max_entries, 128); \
 39	} array_##_size SEC(".maps")
 40
 41#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
 42	struct percpu_bin_data_##_size { \
 43		char data[_size]; \
 44	}; \
 45	struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
 46	struct map_value_percpu_##_size { \
 47		struct percpu_bin_data_##_size __percpu_kptr * data; \
 48	}; \
 49	struct { \
 50		__uint(type, BPF_MAP_TYPE_ARRAY); \
 51		__type(key, int); \
 52		__type(value, struct map_value_percpu_##_size); \
 53		__uint(max_entries, 128); \
 54	} array_percpu_##_size SEC(".maps")
 55
 56static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
 57{
 58	struct generic_map_value *value;
 59	unsigned int i, key;
 60	void *old, *new;
 61
 62	for (i = 0; i < batch; i++) {
 63		key = i;
 64		value = bpf_map_lookup_elem(map, &key);
 65		if (!value) {
 66			err = 1;
 67			return;
 68		}
 69		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
 70		if (!new) {
 71			err = 2;
 72			return;
 73		}
 74		old = bpf_kptr_xchg(&value->data, new);
 75		if (old) {
 76			bpf_obj_drop(old);
 77			err = 3;
 78			return;
 79		}
 80	}
 81}
 82
 83static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
 84{
 85	struct generic_map_value *value;
 86	unsigned int i, key;
 87	void *old;
 88
 89	for (i = 0; i < batch; i++) {
 90		key = i;
 91		value = bpf_map_lookup_elem(map, &key);
 92		if (!value) {
 93			err = 4;
 94			return;
 95		}
 96		old = bpf_kptr_xchg(&value->data, NULL);
 97		if (!old) {
 98			err = 5;
 99			return;
100		}
101		bpf_obj_drop(old);
102	}
103}
104
105static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
106					       unsigned int idx)
107{
108	struct generic_map_value *value;
109	unsigned int i, key;
110	void *old, *new;
111
112	for (i = 0; i < batch; i++) {
113		key = i;
114		value = bpf_map_lookup_elem(map, &key);
115		if (!value) {
116			err = 1;
117			return;
118		}
119		/* per-cpu allocator may not be able to refill in time */
120		new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
121		if (!new)
122			continue;
123
124		old = bpf_kptr_xchg(&value->data, new);
125		if (old) {
126			bpf_percpu_obj_drop(old);
127			err = 2;
128			return;
129		}
130	}
131}
132
133static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
134					      unsigned int idx)
135{
136	struct generic_map_value *value;
137	unsigned int i, key;
138	void *old;
139
140	for (i = 0; i < batch; i++) {
141		key = i;
142		value = bpf_map_lookup_elem(map, &key);
143		if (!value) {
144			err = 3;
145			return;
146		}
147		old = bpf_kptr_xchg(&value->data, NULL);
148		if (!old)
149			continue;
150		bpf_percpu_obj_drop(old);
151	}
152}
153
154#define CALL_BATCH_ALLOC(size, batch, idx) \
155	batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
156
157#define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
158	do { \
159		batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
160		batch_free((struct bpf_map *)(&array_##size), batch, idx); \
161	} while (0)
162
163#define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
164	batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
165
166#define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
167	do { \
168		batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
169		batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
170	} while (0)
171
172/* kptr doesn't support bin_data_8 which is a zero-sized array */
173DEFINE_ARRAY_WITH_KPTR(16);
174DEFINE_ARRAY_WITH_KPTR(32);
175DEFINE_ARRAY_WITH_KPTR(64);
176DEFINE_ARRAY_WITH_KPTR(96);
177DEFINE_ARRAY_WITH_KPTR(128);
178DEFINE_ARRAY_WITH_KPTR(192);
179DEFINE_ARRAY_WITH_KPTR(256);
180DEFINE_ARRAY_WITH_KPTR(512);
181DEFINE_ARRAY_WITH_KPTR(1024);
182DEFINE_ARRAY_WITH_KPTR(2048);
183DEFINE_ARRAY_WITH_KPTR(4096);
184
185DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
186DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
187DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
188DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
189DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
190DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
191DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
192DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
193DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
194
195SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
196int test_batch_alloc_free(void *ctx)
197{
198	if ((u32)bpf_get_current_pid_tgid() != pid)
199		return 0;
200
201	/* Alloc 128 16-bytes objects in batch to trigger refilling,
202	 * then free 128 16-bytes objects in batch to trigger freeing.
203	 */
204	CALL_BATCH_ALLOC_FREE(16, 128, 0);
205	CALL_BATCH_ALLOC_FREE(32, 128, 1);
206	CALL_BATCH_ALLOC_FREE(64, 128, 2);
207	CALL_BATCH_ALLOC_FREE(96, 128, 3);
208	CALL_BATCH_ALLOC_FREE(128, 128, 4);
209	CALL_BATCH_ALLOC_FREE(192, 128, 5);
210	CALL_BATCH_ALLOC_FREE(256, 128, 6);
211	CALL_BATCH_ALLOC_FREE(512, 64, 7);
212	CALL_BATCH_ALLOC_FREE(1024, 32, 8);
213	CALL_BATCH_ALLOC_FREE(2048, 16, 9);
214	CALL_BATCH_ALLOC_FREE(4096, 8, 10);
215
216	return 0;
217}
218
219SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
220int test_free_through_map_free(void *ctx)
221{
222	if ((u32)bpf_get_current_pid_tgid() != pid)
223		return 0;
224
225	/* Alloc 128 16-bytes objects in batch to trigger refilling,
226	 * then free these objects through map free.
227	 */
228	CALL_BATCH_ALLOC(16, 128, 0);
229	CALL_BATCH_ALLOC(32, 128, 1);
230	CALL_BATCH_ALLOC(64, 128, 2);
231	CALL_BATCH_ALLOC(96, 128, 3);
232	CALL_BATCH_ALLOC(128, 128, 4);
233	CALL_BATCH_ALLOC(192, 128, 5);
234	CALL_BATCH_ALLOC(256, 128, 6);
235	CALL_BATCH_ALLOC(512, 64, 7);
236	CALL_BATCH_ALLOC(1024, 32, 8);
237	CALL_BATCH_ALLOC(2048, 16, 9);
238	CALL_BATCH_ALLOC(4096, 8, 10);
239
240	return 0;
241}
242
243SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
244int test_batch_percpu_alloc_free(void *ctx)
245{
246	if ((u32)bpf_get_current_pid_tgid() != pid)
247		return 0;
248
249	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
250	 * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
251	 */
252	CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
253	CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
254	CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
255	CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
256	CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
257	CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
258	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
259	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
260	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
261
262	return 0;
263}
264
265SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
266int test_percpu_free_through_map_free(void *ctx)
267{
268	if ((u32)bpf_get_current_pid_tgid() != pid)
269		return 0;
270
271	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
272	 * then free these object through map free.
273	 */
274	CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
275	CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
276	CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
277	CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
278	CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
279	CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
280	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
281	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
282	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
283
284	return 0;
285}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
  3#include <vmlinux.h>
  4#include <bpf/bpf_tracing.h>
  5#include <bpf/bpf_helpers.h>
  6
  7#include "bpf_experimental.h"
  8#include "bpf_misc.h"
  9
 10#ifndef ARRAY_SIZE
 11#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 12#endif
 13
 14struct generic_map_value {
 15	void *data;
 16};
 17
 18char _license[] SEC("license") = "GPL";
 19
 20const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
 21const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
 22
 23const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
 24const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
 25
 26int err = 0;
 27u32 pid = 0;
 28
 29#define DEFINE_ARRAY_WITH_KPTR(_size) \
 30	struct bin_data_##_size { \
 31		char data[_size - sizeof(void *)]; \
 32	}; \
 33	/* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */	\
 34	struct bin_data_##_size *__bin_data_##_size; \
 35	struct map_value_##_size { \
 36		struct bin_data_##_size __kptr * data; \
 37	}; \
 38	struct { \
 39		__uint(type, BPF_MAP_TYPE_ARRAY); \
 40		__type(key, int); \
 41		__type(value, struct map_value_##_size); \
 42		__uint(max_entries, 128); \
 43	} array_##_size SEC(".maps")
 44
 45#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
 46	struct percpu_bin_data_##_size { \
 47		char data[_size]; \
 48	}; \
 49	struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
 50	struct map_value_percpu_##_size { \
 51		struct percpu_bin_data_##_size __percpu_kptr * data; \
 52	}; \
 53	struct { \
 54		__uint(type, BPF_MAP_TYPE_ARRAY); \
 55		__type(key, int); \
 56		__type(value, struct map_value_percpu_##_size); \
 57		__uint(max_entries, 128); \
 58	} array_percpu_##_size SEC(".maps")
 59
 60static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
 61{
 62	struct generic_map_value *value;
 63	unsigned int i, key;
 64	void *old, *new;
 65
 66	for (i = 0; i < batch; i++) {
 67		key = i;
 68		value = bpf_map_lookup_elem(map, &key);
 69		if (!value) {
 70			err = 1;
 71			return;
 72		}
 73		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
 74		if (!new) {
 75			err = 2;
 76			return;
 77		}
 78		old = bpf_kptr_xchg(&value->data, new);
 79		if (old) {
 80			bpf_obj_drop(old);
 81			err = 3;
 82			return;
 83		}
 84	}
 85}
 86
 87static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
 88{
 89	struct generic_map_value *value;
 90	unsigned int i, key;
 91	void *old;
 92
 93	for (i = 0; i < batch; i++) {
 94		key = i;
 95		value = bpf_map_lookup_elem(map, &key);
 96		if (!value) {
 97			err = 4;
 98			return;
 99		}
100		old = bpf_kptr_xchg(&value->data, NULL);
101		if (!old) {
102			err = 5;
103			return;
104		}
105		bpf_obj_drop(old);
106	}
107}
108
109static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
110					       unsigned int idx)
111{
112	struct generic_map_value *value;
113	unsigned int i, key;
114	void *old, *new;
115
116	for (i = 0; i < batch; i++) {
117		key = i;
118		value = bpf_map_lookup_elem(map, &key);
119		if (!value) {
120			err = 1;
121			return;
122		}
123		/* per-cpu allocator may not be able to refill in time */
124		new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
125		if (!new)
126			continue;
127
128		old = bpf_kptr_xchg(&value->data, new);
129		if (old) {
130			bpf_percpu_obj_drop(old);
131			err = 2;
132			return;
133		}
134	}
135}
136
137static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
138					      unsigned int idx)
139{
140	struct generic_map_value *value;
141	unsigned int i, key;
142	void *old;
143
144	for (i = 0; i < batch; i++) {
145		key = i;
146		value = bpf_map_lookup_elem(map, &key);
147		if (!value) {
148			err = 3;
149			return;
150		}
151		old = bpf_kptr_xchg(&value->data, NULL);
152		if (!old)
153			continue;
154		bpf_percpu_obj_drop(old);
155	}
156}
157
158#define CALL_BATCH_ALLOC(size, batch, idx) \
159	batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
160
161#define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
162	do { \
163		batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
164		batch_free((struct bpf_map *)(&array_##size), batch, idx); \
165	} while (0)
166
167#define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
168	batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
169
170#define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
171	do { \
172		batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
173		batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
174	} while (0)
175
176/* kptr doesn't support bin_data_8 which is a zero-sized array */
177DEFINE_ARRAY_WITH_KPTR(16);
178DEFINE_ARRAY_WITH_KPTR(32);
179DEFINE_ARRAY_WITH_KPTR(64);
180DEFINE_ARRAY_WITH_KPTR(96);
181DEFINE_ARRAY_WITH_KPTR(128);
182DEFINE_ARRAY_WITH_KPTR(192);
183DEFINE_ARRAY_WITH_KPTR(256);
184DEFINE_ARRAY_WITH_KPTR(512);
185DEFINE_ARRAY_WITH_KPTR(1024);
186DEFINE_ARRAY_WITH_KPTR(2048);
187DEFINE_ARRAY_WITH_KPTR(4096);
188
189DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
190DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
191DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
192DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
193DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
194DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
195DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
196DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
197DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
198
199SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
200int test_batch_alloc_free(void *ctx)
201{
202	if ((u32)bpf_get_current_pid_tgid() != pid)
203		return 0;
204
205	/* Alloc 128 16-bytes objects in batch to trigger refilling,
206	 * then free 128 16-bytes objects in batch to trigger freeing.
207	 */
208	CALL_BATCH_ALLOC_FREE(16, 128, 0);
209	CALL_BATCH_ALLOC_FREE(32, 128, 1);
210	CALL_BATCH_ALLOC_FREE(64, 128, 2);
211	CALL_BATCH_ALLOC_FREE(96, 128, 3);
212	CALL_BATCH_ALLOC_FREE(128, 128, 4);
213	CALL_BATCH_ALLOC_FREE(192, 128, 5);
214	CALL_BATCH_ALLOC_FREE(256, 128, 6);
215	CALL_BATCH_ALLOC_FREE(512, 64, 7);
216	CALL_BATCH_ALLOC_FREE(1024, 32, 8);
217	CALL_BATCH_ALLOC_FREE(2048, 16, 9);
218	CALL_BATCH_ALLOC_FREE(4096, 8, 10);
219
220	return 0;
221}
222
223SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
224int test_free_through_map_free(void *ctx)
225{
226	if ((u32)bpf_get_current_pid_tgid() != pid)
227		return 0;
228
229	/* Alloc 128 16-bytes objects in batch to trigger refilling,
230	 * then free these objects through map free.
231	 */
232	CALL_BATCH_ALLOC(16, 128, 0);
233	CALL_BATCH_ALLOC(32, 128, 1);
234	CALL_BATCH_ALLOC(64, 128, 2);
235	CALL_BATCH_ALLOC(96, 128, 3);
236	CALL_BATCH_ALLOC(128, 128, 4);
237	CALL_BATCH_ALLOC(192, 128, 5);
238	CALL_BATCH_ALLOC(256, 128, 6);
239	CALL_BATCH_ALLOC(512, 64, 7);
240	CALL_BATCH_ALLOC(1024, 32, 8);
241	CALL_BATCH_ALLOC(2048, 16, 9);
242	CALL_BATCH_ALLOC(4096, 8, 10);
243
244	return 0;
245}
246
247SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
248int test_batch_percpu_alloc_free(void *ctx)
249{
250	if ((u32)bpf_get_current_pid_tgid() != pid)
251		return 0;
252
253	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
254	 * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
255	 */
256	CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
257	CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
258	CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
259	CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
260	CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
261	CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
262	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
263	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
264	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
265
266	return 0;
267}
268
269SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
270int test_percpu_free_through_map_free(void *ctx)
271{
272	if ((u32)bpf_get_current_pid_tgid() != pid)
273		return 0;
274
275	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
276	 * then free these object through map free.
277	 */
278	CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
279	CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
280	CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
281	CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
282	CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
283	CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
284	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
285	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
286	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
287
288	return 0;
289}