Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2020 Facebook */
  3#define _GNU_SOURCE
  4#include <argp.h>
  5#include <unistd.h>
  6#include <stdint.h>
  7#include "bpf_util.h"
  8#include "bench.h"
  9#include "trigger_bench.skel.h"
 10#include "trace_helpers.h"
 11
 12#define MAX_TRIG_BATCH_ITERS 1000
 13
 14static struct {
 15	__u32 batch_iters;
 16} args = {
 17	.batch_iters = 100,
 18};
 19
 20enum {
 21	ARG_TRIG_BATCH_ITERS = 7000,
 22};
 23
 24static const struct argp_option opts[] = {
 25	{ "trig-batch-iters", ARG_TRIG_BATCH_ITERS, "BATCH_ITER_CNT", 0,
 26		"Number of in-kernel iterations per one driver test run"},
 27	{},
 28};
 29
 30static error_t parse_arg(int key, char *arg, struct argp_state *state)
 31{
 32	long ret;
 33
 34	switch (key) {
 35	case ARG_TRIG_BATCH_ITERS:
 36		ret = strtol(arg, NULL, 10);
 37		if (ret < 1 || ret > MAX_TRIG_BATCH_ITERS) {
 38			fprintf(stderr, "invalid --trig-batch-iters value (should be between %d and %d)\n",
 39				1, MAX_TRIG_BATCH_ITERS);
 40			argp_usage(state);
 41		}
 42		args.batch_iters = ret;
 43		break;
 44	default:
 45		return ARGP_ERR_UNKNOWN;
 46	}
 47
 48	return 0;
 49}
 50
 51const struct argp bench_trigger_batch_argp = {
 52	.options = opts,
 53	.parser = parse_arg,
 54};
 55
 56/* adjust slot shift in inc_hits() if changing */
 57#define MAX_BUCKETS 256
 58
 59#pragma GCC diagnostic ignored "-Wattributes"
 60
 61/* BPF triggering benchmarks */
 62static struct trigger_ctx {
 63	struct trigger_bench *skel;
 64	bool usermode_counters;
 65	int driver_prog_fd;
 66} ctx;
 67
 68static struct counter base_hits[MAX_BUCKETS];
 69
 70static __always_inline void inc_counter(struct counter *counters)
 71{
 72	static __thread int tid = 0;
 73	unsigned slot;
 74
 75	if (unlikely(tid == 0))
 76		tid = sys_gettid();
 77
 78	/* multiplicative hashing, it's fast */
 79	slot = 2654435769U * tid;
 80	slot >>= 24;
 81
 82	atomic_inc(&base_hits[slot].value); /* use highest byte as an index */
 83}
 84
 85static long sum_and_reset_counters(struct counter *counters)
 86{
 87	int i;
 88	long sum = 0;
 89
 90	for (i = 0; i < MAX_BUCKETS; i++)
 91		sum += atomic_swap(&counters[i].value, 0);
 92	return sum;
 93}
 94
 95static void trigger_validate(void)
 96{
 97	if (env.consumer_cnt != 0) {
 98		fprintf(stderr, "benchmark doesn't support consumer!\n");
 99		exit(1);
100	}
101}
102
103static void *trigger_producer(void *input)
104{
105	if (ctx.usermode_counters) {
106		while (true) {
107			(void)syscall(__NR_getpgid);
108			inc_counter(base_hits);
109		}
110	} else {
111		while (true)
112			(void)syscall(__NR_getpgid);
113	}
114	return NULL;
115}
116
117static void *trigger_producer_batch(void *input)
118{
119	int fd = ctx.driver_prog_fd ?: bpf_program__fd(ctx.skel->progs.trigger_driver);
 
120
 
 
121	while (true)
122		bpf_prog_test_run_opts(fd, NULL);
123
124	return NULL;
125}
126
127static void trigger_measure(struct bench_res *res)
128{
129	if (ctx.usermode_counters)
130		res->hits = sum_and_reset_counters(base_hits);
131	else
132		res->hits = sum_and_reset_counters(ctx.skel->bss->hits);
133}
134
135static void setup_ctx(void)
136{
137	setup_libbpf();
138
139	ctx.skel = trigger_bench__open();
140	if (!ctx.skel) {
141		fprintf(stderr, "failed to open skeleton\n");
142		exit(1);
143	}
144
145	/* default "driver" BPF program */
146	bpf_program__set_autoload(ctx.skel->progs.trigger_driver, true);
147
148	ctx.skel->rodata->batch_iters = args.batch_iters;
149}
150
151static void load_ctx(void)
152{
153	int err;
154
155	err = trigger_bench__load(ctx.skel);
156	if (err) {
157		fprintf(stderr, "failed to open skeleton\n");
158		exit(1);
159	}
160}
161
162static void attach_bpf(struct bpf_program *prog)
163{
164	struct bpf_link *link;
165
166	link = bpf_program__attach(prog);
167	if (!link) {
168		fprintf(stderr, "failed to attach program!\n");
169		exit(1);
170	}
171}
172
173static void trigger_syscall_count_setup(void)
174{
175	ctx.usermode_counters = true;
 
176}
177
178/* Batched, staying mostly in-kernel triggering setups */
179static void trigger_kernel_count_setup(void)
180{
181	setup_ctx();
182	bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
183	bpf_program__set_autoload(ctx.skel->progs.trigger_count, true);
184	load_ctx();
185	/* override driver program */
186	ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_count);
187}
188
189static void trigger_kprobe_setup(void)
190{
191	setup_ctx();
192	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kprobe, true);
193	load_ctx();
194	attach_bpf(ctx.skel->progs.bench_trigger_kprobe);
195}
196
197static void trigger_kretprobe_setup(void)
198{
199	setup_ctx();
200	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kretprobe, true);
201	load_ctx();
202	attach_bpf(ctx.skel->progs.bench_trigger_kretprobe);
203}
204
205static void trigger_kprobe_multi_setup(void)
206{
207	setup_ctx();
208	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kprobe_multi, true);
209	load_ctx();
210	attach_bpf(ctx.skel->progs.bench_trigger_kprobe_multi);
211}
212
213static void trigger_kretprobe_multi_setup(void)
214{
215	setup_ctx();
216	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kretprobe_multi, true);
217	load_ctx();
218	attach_bpf(ctx.skel->progs.bench_trigger_kretprobe_multi);
219}
220
221static void trigger_fentry_setup(void)
222{
223	setup_ctx();
224	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fentry, true);
225	load_ctx();
226	attach_bpf(ctx.skel->progs.bench_trigger_fentry);
227}
228
229static void trigger_fexit_setup(void)
230{
231	setup_ctx();
232	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fexit, true);
233	load_ctx();
234	attach_bpf(ctx.skel->progs.bench_trigger_fexit);
235}
236
237static void trigger_fmodret_setup(void)
238{
239	setup_ctx();
240	bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
241	bpf_program__set_autoload(ctx.skel->progs.trigger_driver_kfunc, true);
242	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fmodret, true);
243	load_ctx();
244	/* override driver program */
245	ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_driver_kfunc);
246	attach_bpf(ctx.skel->progs.bench_trigger_fmodret);
247}
248
249static void trigger_tp_setup(void)
250{
251	setup_ctx();
252	bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
253	bpf_program__set_autoload(ctx.skel->progs.trigger_driver_kfunc, true);
254	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_tp, true);
255	load_ctx();
256	/* override driver program */
257	ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_driver_kfunc);
258	attach_bpf(ctx.skel->progs.bench_trigger_tp);
259}
260
261static void trigger_rawtp_setup(void)
262{
263	setup_ctx();
264	bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
265	bpf_program__set_autoload(ctx.skel->progs.trigger_driver_kfunc, true);
266	bpf_program__set_autoload(ctx.skel->progs.bench_trigger_rawtp, true);
267	load_ctx();
268	/* override driver program */
269	ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_driver_kfunc);
270	attach_bpf(ctx.skel->progs.bench_trigger_rawtp);
271}
272
273/* make sure call is not inlined and not avoided by compiler, so __weak and
274 * inline asm volatile in the body of the function
275 *
276 * There is a performance difference between uprobing at nop location vs other
277 * instructions. So use two different targets, one of which starts with nop
278 * and another doesn't.
279 *
280 * GCC doesn't generate stack setup preamble for these functions due to them
281 * having no input arguments and doing nothing in the body.
282 */
283__nocf_check __weak void uprobe_target_nop(void)
284{
285	asm volatile ("nop");
286}
287
288__weak void opaque_noop_func(void)
289{
290}
291
292__nocf_check __weak int uprobe_target_push(void)
293{
294	/* overhead of function call is negligible compared to uprobe
295	 * triggering, so this shouldn't affect benchmark results much
296	 */
297	opaque_noop_func();
298	return 1;
299}
300
301__nocf_check __weak void uprobe_target_ret(void)
302{
303	asm volatile ("");
304}
305
306static void *uprobe_producer_count(void *input)
307{
308	while (true) {
309		uprobe_target_nop();
310		inc_counter(base_hits);
311	}
312	return NULL;
313}
314
315static void *uprobe_producer_nop(void *input)
316{
317	while (true)
318		uprobe_target_nop();
319	return NULL;
320}
321
322static void *uprobe_producer_push(void *input)
323{
324	while (true)
325		uprobe_target_push();
326	return NULL;
327}
328
329static void *uprobe_producer_ret(void *input)
330{
331	while (true)
332		uprobe_target_ret();
333	return NULL;
334}
335
336static void usetup(bool use_retprobe, bool use_multi, void *target_addr)
337{
338	size_t uprobe_offset;
339	struct bpf_link *link;
340	int err;
341
342	setup_libbpf();
343
344	ctx.skel = trigger_bench__open();
345	if (!ctx.skel) {
346		fprintf(stderr, "failed to open skeleton\n");
347		exit(1);
348	}
349
350	if (use_multi)
351		bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe_multi, true);
352	else
353		bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe, true);
354
355	err = trigger_bench__load(ctx.skel);
356	if (err) {
357		fprintf(stderr, "failed to load skeleton\n");
358		exit(1);
359	}
360
361	uprobe_offset = get_uprobe_offset(target_addr);
362	if (use_multi) {
363		LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
364			.retprobe = use_retprobe,
365			.cnt = 1,
366			.offsets = &uprobe_offset,
367		);
368		link = bpf_program__attach_uprobe_multi(
369			ctx.skel->progs.bench_trigger_uprobe_multi,
370			-1 /* all PIDs */, "/proc/self/exe", NULL, &opts);
371		ctx.skel->links.bench_trigger_uprobe_multi = link;
372	} else {
373		link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
374						  use_retprobe,
375						  -1 /* all PIDs */,
376						  "/proc/self/exe",
377						  uprobe_offset);
378		ctx.skel->links.bench_trigger_uprobe = link;
379	}
380	if (!link) {
381		fprintf(stderr, "failed to attach %s!\n", use_multi ? "multi-uprobe" : "uprobe");
382		exit(1);
383	}
 
384}
385
386static void usermode_count_setup(void)
387{
388	ctx.usermode_counters = true;
389}
390
391static void uprobe_nop_setup(void)
392{
393	usetup(false, false /* !use_multi */, &uprobe_target_nop);
394}
395
396static void uretprobe_nop_setup(void)
397{
398	usetup(true, false /* !use_multi */, &uprobe_target_nop);
399}
400
401static void uprobe_push_setup(void)
402{
403	usetup(false, false /* !use_multi */, &uprobe_target_push);
404}
405
406static void uretprobe_push_setup(void)
407{
408	usetup(true, false /* !use_multi */, &uprobe_target_push);
409}
410
411static void uprobe_ret_setup(void)
412{
413	usetup(false, false /* !use_multi */, &uprobe_target_ret);
414}
415
416static void uretprobe_ret_setup(void)
417{
418	usetup(true, false /* !use_multi */, &uprobe_target_ret);
419}
 
 
 
 
420
421static void uprobe_multi_nop_setup(void)
422{
423	usetup(false, true /* use_multi */, &uprobe_target_nop);
424}
 
 
 
 
 
425
426static void uretprobe_multi_nop_setup(void)
427{
428	usetup(true, true /* use_multi */, &uprobe_target_nop);
429}
 
 
 
 
 
430
431static void uprobe_multi_push_setup(void)
432{
433	usetup(false, true /* use_multi */, &uprobe_target_push);
434}
 
 
 
 
 
435
436static void uretprobe_multi_push_setup(void)
437{
438	usetup(true, true /* use_multi */, &uprobe_target_push);
439}
 
 
 
 
 
440
441static void uprobe_multi_ret_setup(void)
442{
443	usetup(false, true /* use_multi */, &uprobe_target_ret);
444}
 
 
 
 
 
445
446static void uretprobe_multi_ret_setup(void)
447{
448	usetup(true, true /* use_multi */, &uprobe_target_ret);
449}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
451const struct bench bench_trig_syscall_count = {
452	.name = "trig-syscall-count",
453	.validate = trigger_validate,
454	.setup = trigger_syscall_count_setup,
455	.producer_thread = trigger_producer,
456	.measure = trigger_measure,
457	.report_progress = hits_drops_report_progress,
458	.report_final = hits_drops_report_final,
459};
460
461/* batched (staying mostly in kernel) kprobe/fentry benchmarks */
462#define BENCH_TRIG_KERNEL(KIND, NAME)					\
463const struct bench bench_trig_##KIND = {				\
464	.name = "trig-" NAME,						\
465	.setup = trigger_##KIND##_setup,				\
466	.producer_thread = trigger_producer_batch,			\
467	.measure = trigger_measure,					\
468	.report_progress = hits_drops_report_progress,			\
469	.report_final = hits_drops_report_final,			\
470	.argp = &bench_trigger_batch_argp,				\
471}
472
473BENCH_TRIG_KERNEL(kernel_count, "kernel-count");
474BENCH_TRIG_KERNEL(kprobe, "kprobe");
475BENCH_TRIG_KERNEL(kretprobe, "kretprobe");
476BENCH_TRIG_KERNEL(kprobe_multi, "kprobe-multi");
477BENCH_TRIG_KERNEL(kretprobe_multi, "kretprobe-multi");
478BENCH_TRIG_KERNEL(fentry, "fentry");
479BENCH_TRIG_KERNEL(fexit, "fexit");
480BENCH_TRIG_KERNEL(fmodret, "fmodret");
481BENCH_TRIG_KERNEL(tp, "tp");
482BENCH_TRIG_KERNEL(rawtp, "rawtp");
483
484/* uprobe benchmarks */
485#define BENCH_TRIG_USERMODE(KIND, PRODUCER, NAME)			\
486const struct bench bench_trig_##KIND = {				\
487	.name = "trig-" NAME,						\
488	.validate = trigger_validate,					\
489	.setup = KIND##_setup,						\
490	.producer_thread = uprobe_producer_##PRODUCER,			\
491	.measure = trigger_measure,					\
492	.report_progress = hits_drops_report_progress,			\
493	.report_final = hits_drops_report_final,			\
494}
495
496BENCH_TRIG_USERMODE(usermode_count, count, "usermode-count");
497BENCH_TRIG_USERMODE(uprobe_nop, nop, "uprobe-nop");
498BENCH_TRIG_USERMODE(uprobe_push, push, "uprobe-push");
499BENCH_TRIG_USERMODE(uprobe_ret, ret, "uprobe-ret");
500BENCH_TRIG_USERMODE(uretprobe_nop, nop, "uretprobe-nop");
501BENCH_TRIG_USERMODE(uretprobe_push, push, "uretprobe-push");
502BENCH_TRIG_USERMODE(uretprobe_ret, ret, "uretprobe-ret");
503BENCH_TRIG_USERMODE(uprobe_multi_nop, nop, "uprobe-multi-nop");
504BENCH_TRIG_USERMODE(uprobe_multi_push, push, "uprobe-multi-push");
505BENCH_TRIG_USERMODE(uprobe_multi_ret, ret, "uprobe-multi-ret");
506BENCH_TRIG_USERMODE(uretprobe_multi_nop, nop, "uretprobe-multi-nop");
507BENCH_TRIG_USERMODE(uretprobe_multi_push, push, "uretprobe-multi-push");
508BENCH_TRIG_USERMODE(uretprobe_multi_ret, ret, "uretprobe-multi-ret");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2020 Facebook */
 
 
 
 
 
  3#include "bench.h"
  4#include "trigger_bench.skel.h"
  5#include "trace_helpers.h"
  6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  7/* BPF triggering benchmarks */
  8static struct trigger_ctx {
  9	struct trigger_bench *skel;
 
 
 10} ctx;
 11
 12static struct counter base_hits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13
 14static void trigger_validate(void)
 15{
 16	if (env.consumer_cnt != 0) {
 17		fprintf(stderr, "benchmark doesn't support consumer!\n");
 18		exit(1);
 19	}
 20}
 21
 22static void *trigger_base_producer(void *input)
 23{
 24	while (true) {
 25		(void)syscall(__NR_getpgid);
 26		atomic_inc(&base_hits.value);
 
 
 
 
 
 27	}
 28	return NULL;
 29}
 30
 31static void trigger_base_measure(struct bench_res *res)
 32{
 33	res->hits = atomic_swap(&base_hits.value, 0);
 34}
 35
 36static void *trigger_producer(void *input)
 37{
 38	while (true)
 39		(void)syscall(__NR_getpgid);
 
 40	return NULL;
 41}
 42
 43static void trigger_measure(struct bench_res *res)
 44{
 45	res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
 
 
 
 46}
 47
 48static void setup_ctx(void)
 49{
 50	setup_libbpf();
 51
 52	ctx.skel = trigger_bench__open_and_load();
 53	if (!ctx.skel) {
 54		fprintf(stderr, "failed to open skeleton\n");
 55		exit(1);
 56	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57}
 58
 59static void attach_bpf(struct bpf_program *prog)
 60{
 61	struct bpf_link *link;
 62
 63	link = bpf_program__attach(prog);
 64	if (!link) {
 65		fprintf(stderr, "failed to attach program!\n");
 66		exit(1);
 67	}
 68}
 69
 70static void trigger_tp_setup(void)
 71{
 72	setup_ctx();
 73	attach_bpf(ctx.skel->progs.bench_trigger_tp);
 74}
 75
 76static void trigger_rawtp_setup(void)
 
 77{
 78	setup_ctx();
 79	attach_bpf(ctx.skel->progs.bench_trigger_raw_tp);
 
 
 
 
 80}
 81
 82static void trigger_kprobe_setup(void)
 83{
 84	setup_ctx();
 
 
 85	attach_bpf(ctx.skel->progs.bench_trigger_kprobe);
 86}
 87
 88static void trigger_kretprobe_setup(void)
 89{
 90	setup_ctx();
 
 
 91	attach_bpf(ctx.skel->progs.bench_trigger_kretprobe);
 92}
 93
 94static void trigger_kprobe_multi_setup(void)
 95{
 96	setup_ctx();
 
 
 97	attach_bpf(ctx.skel->progs.bench_trigger_kprobe_multi);
 98}
 99
100static void trigger_kretprobe_multi_setup(void)
101{
102	setup_ctx();
 
 
103	attach_bpf(ctx.skel->progs.bench_trigger_kretprobe_multi);
104}
105
106static void trigger_fentry_setup(void)
107{
108	setup_ctx();
 
 
109	attach_bpf(ctx.skel->progs.bench_trigger_fentry);
110}
111
112static void trigger_fexit_setup(void)
113{
114	setup_ctx();
 
 
115	attach_bpf(ctx.skel->progs.bench_trigger_fexit);
116}
117
118static void trigger_fentry_sleep_setup(void)
119{
120	setup_ctx();
121	attach_bpf(ctx.skel->progs.bench_trigger_fentry_sleep);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122}
123
124static void trigger_fmodret_setup(void)
125{
126	setup_ctx();
127	attach_bpf(ctx.skel->progs.bench_trigger_fmodret);
 
 
 
 
 
 
128}
129
130/* make sure call is not inlined and not avoided by compiler, so __weak and
131 * inline asm volatile in the body of the function
132 *
133 * There is a performance difference between uprobing at nop location vs other
134 * instructions. So use two different targets, one of which starts with nop
135 * and another doesn't.
136 *
137 * GCC doesn't generate stack setup preample for these functions due to them
138 * having no input arguments and doing nothing in the body.
139 */
140__weak void uprobe_target_nop(void)
141{
142	asm volatile ("nop");
143}
144
145__weak void opaque_noop_func(void)
146{
147}
148
149__weak int uprobe_target_push(void)
150{
151	/* overhead of function call is negligible compared to uprobe
152	 * triggering, so this shouldn't affect benchmark results much
153	 */
154	opaque_noop_func();
155	return 1;
156}
157
158__weak void uprobe_target_ret(void)
159{
160	asm volatile ("");
161}
162
163static void *uprobe_base_producer(void *input)
164{
165	while (true) {
166		uprobe_target_nop();
167		atomic_inc(&base_hits.value);
168	}
169	return NULL;
170}
171
172static void *uprobe_producer_nop(void *input)
173{
174	while (true)
175		uprobe_target_nop();
176	return NULL;
177}
178
179static void *uprobe_producer_push(void *input)
180{
181	while (true)
182		uprobe_target_push();
183	return NULL;
184}
185
186static void *uprobe_producer_ret(void *input)
187{
188	while (true)
189		uprobe_target_ret();
190	return NULL;
191}
192
193static void usetup(bool use_retprobe, void *target_addr)
194{
195	size_t uprobe_offset;
196	struct bpf_link *link;
 
197
198	setup_libbpf();
199
200	ctx.skel = trigger_bench__open_and_load();
201	if (!ctx.skel) {
202		fprintf(stderr, "failed to open skeleton\n");
203		exit(1);
204	}
205
 
 
 
 
 
 
 
 
 
 
 
206	uprobe_offset = get_uprobe_offset(target_addr);
207	link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
208					  use_retprobe,
209					  -1 /* all PIDs */,
210					  "/proc/self/exe",
211					  uprobe_offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
212	if (!link) {
213		fprintf(stderr, "failed to attach uprobe!\n");
214		exit(1);
215	}
216	ctx.skel->links.bench_trigger_uprobe = link;
217}
218
219static void uprobe_setup_nop(void)
220{
221	usetup(false, &uprobe_target_nop);
222}
223
224static void uretprobe_setup_nop(void)
225{
226	usetup(true, &uprobe_target_nop);
227}
228
229static void uprobe_setup_push(void)
230{
231	usetup(false, &uprobe_target_push);
232}
233
234static void uretprobe_setup_push(void)
235{
236	usetup(true, &uprobe_target_push);
237}
238
239static void uprobe_setup_ret(void)
240{
241	usetup(false, &uprobe_target_ret);
242}
243
244static void uretprobe_setup_ret(void)
245{
246	usetup(true, &uprobe_target_ret);
247}
248
249const struct bench bench_trig_base = {
250	.name = "trig-base",
251	.validate = trigger_validate,
252	.producer_thread = trigger_base_producer,
253	.measure = trigger_base_measure,
254	.report_progress = hits_drops_report_progress,
255	.report_final = hits_drops_report_final,
256};
257
258const struct bench bench_trig_tp = {
259	.name = "trig-tp",
260	.validate = trigger_validate,
261	.setup = trigger_tp_setup,
262	.producer_thread = trigger_producer,
263	.measure = trigger_measure,
264	.report_progress = hits_drops_report_progress,
265	.report_final = hits_drops_report_final,
266};
267
268const struct bench bench_trig_rawtp = {
269	.name = "trig-rawtp",
270	.validate = trigger_validate,
271	.setup = trigger_rawtp_setup,
272	.producer_thread = trigger_producer,
273	.measure = trigger_measure,
274	.report_progress = hits_drops_report_progress,
275	.report_final = hits_drops_report_final,
276};
277
278const struct bench bench_trig_kprobe = {
279	.name = "trig-kprobe",
280	.validate = trigger_validate,
281	.setup = trigger_kprobe_setup,
282	.producer_thread = trigger_producer,
283	.measure = trigger_measure,
284	.report_progress = hits_drops_report_progress,
285	.report_final = hits_drops_report_final,
286};
287
288const struct bench bench_trig_kretprobe = {
289	.name = "trig-kretprobe",
290	.validate = trigger_validate,
291	.setup = trigger_kretprobe_setup,
292	.producer_thread = trigger_producer,
293	.measure = trigger_measure,
294	.report_progress = hits_drops_report_progress,
295	.report_final = hits_drops_report_final,
296};
297
298const struct bench bench_trig_kprobe_multi = {
299	.name = "trig-kprobe-multi",
300	.validate = trigger_validate,
301	.setup = trigger_kprobe_multi_setup,
302	.producer_thread = trigger_producer,
303	.measure = trigger_measure,
304	.report_progress = hits_drops_report_progress,
305	.report_final = hits_drops_report_final,
306};
307
308const struct bench bench_trig_kretprobe_multi = {
309	.name = "trig-kretprobe-multi",
310	.validate = trigger_validate,
311	.setup = trigger_kretprobe_multi_setup,
312	.producer_thread = trigger_producer,
313	.measure = trigger_measure,
314	.report_progress = hits_drops_report_progress,
315	.report_final = hits_drops_report_final,
316};
317
318const struct bench bench_trig_fentry = {
319	.name = "trig-fentry",
320	.validate = trigger_validate,
321	.setup = trigger_fentry_setup,
322	.producer_thread = trigger_producer,
323	.measure = trigger_measure,
324	.report_progress = hits_drops_report_progress,
325	.report_final = hits_drops_report_final,
326};
327
328const struct bench bench_trig_fexit = {
329	.name = "trig-fexit",
330	.validate = trigger_validate,
331	.setup = trigger_fexit_setup,
332	.producer_thread = trigger_producer,
333	.measure = trigger_measure,
334	.report_progress = hits_drops_report_progress,
335	.report_final = hits_drops_report_final,
336};
337
338const struct bench bench_trig_fentry_sleep = {
339	.name = "trig-fentry-sleep",
340	.validate = trigger_validate,
341	.setup = trigger_fentry_sleep_setup,
342	.producer_thread = trigger_producer,
343	.measure = trigger_measure,
344	.report_progress = hits_drops_report_progress,
345	.report_final = hits_drops_report_final,
346};
347
348const struct bench bench_trig_fmodret = {
349	.name = "trig-fmodret",
350	.validate = trigger_validate,
351	.setup = trigger_fmodret_setup,
352	.producer_thread = trigger_producer,
353	.measure = trigger_measure,
354	.report_progress = hits_drops_report_progress,
355	.report_final = hits_drops_report_final,
356};
357
358const struct bench bench_trig_uprobe_base = {
359	.name = "trig-uprobe-base",
360	.setup = NULL, /* no uprobe/uretprobe is attached */
361	.producer_thread = uprobe_base_producer,
362	.measure = trigger_base_measure,
363	.report_progress = hits_drops_report_progress,
364	.report_final = hits_drops_report_final,
365};
366
367const struct bench bench_trig_uprobe_nop = {
368	.name = "trig-uprobe-nop",
369	.setup = uprobe_setup_nop,
370	.producer_thread = uprobe_producer_nop,
371	.measure = trigger_measure,
372	.report_progress = hits_drops_report_progress,
373	.report_final = hits_drops_report_final,
374};
375
376const struct bench bench_trig_uretprobe_nop = {
377	.name = "trig-uretprobe-nop",
378	.setup = uretprobe_setup_nop,
379	.producer_thread = uprobe_producer_nop,
380	.measure = trigger_measure,
381	.report_progress = hits_drops_report_progress,
382	.report_final = hits_drops_report_final,
383};
384
385const struct bench bench_trig_uprobe_push = {
386	.name = "trig-uprobe-push",
387	.setup = uprobe_setup_push,
388	.producer_thread = uprobe_producer_push,
389	.measure = trigger_measure,
390	.report_progress = hits_drops_report_progress,
391	.report_final = hits_drops_report_final,
392};
393
394const struct bench bench_trig_uretprobe_push = {
395	.name = "trig-uretprobe-push",
396	.setup = uretprobe_setup_push,
397	.producer_thread = uprobe_producer_push,
398	.measure = trigger_measure,
399	.report_progress = hits_drops_report_progress,
400	.report_final = hits_drops_report_final,
401};
402
403const struct bench bench_trig_uprobe_ret = {
404	.name = "trig-uprobe-ret",
405	.setup = uprobe_setup_ret,
406	.producer_thread = uprobe_producer_ret,
407	.measure = trigger_measure,
408	.report_progress = hits_drops_report_progress,
409	.report_final = hits_drops_report_final,
410};
411
412const struct bench bench_trig_uretprobe_ret = {
413	.name = "trig-uretprobe-ret",
414	.setup = uretprobe_setup_ret,
415	.producer_thread = uprobe_producer_ret,
416	.measure = trigger_measure,
417	.report_progress = hits_drops_report_progress,
418	.report_final = hits_drops_report_final,
419};