Loading...
Note: File does not exist in v6.8.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include "bpf_misc.h"
7#include "../bpf_testmod/bpf_testmod.h"
8#include "../bpf_testmod/bpf_testmod_kfunc.h"
9
10char _license[] SEC("license") = "GPL";
11
12void __kfunc_btf_root(void)
13{
14 bpf_kfunc_st_ops_inc10(NULL);
15}
16
17static __noinline __used int subprog(struct st_ops_args *args)
18{
19 args->a += 1;
20 return args->a;
21}
22
23__success
24/* prologue */
25__xlated("0: r6 = *(u64 *)(r1 +0)")
26__xlated("1: r7 = *(u64 *)(r6 +0)")
27__xlated("2: r7 += 1000")
28__xlated("3: *(u64 *)(r6 +0) = r7")
29/* main prog */
30__xlated("4: r1 = *(u64 *)(r1 +0)")
31__xlated("5: r6 = r1")
32__xlated("6: call kernel-function")
33__xlated("7: r1 = r6")
34__xlated("8: call pc+1")
35__xlated("9: exit")
36SEC("struct_ops/test_prologue")
37__naked int test_prologue(void)
38{
39 asm volatile (
40 "r1 = *(u64 *)(r1 +0);"
41 "r6 = r1;"
42 "call %[bpf_kfunc_st_ops_inc10];"
43 "r1 = r6;"
44 "call subprog;"
45 "exit;"
46 :
47 : __imm(bpf_kfunc_st_ops_inc10)
48 : __clobber_all);
49}
50
51__success
52/* save __u64 *ctx to stack */
53__xlated("0: *(u64 *)(r10 -8) = r1")
54/* main prog */
55__xlated("1: r1 = *(u64 *)(r1 +0)")
56__xlated("2: r6 = r1")
57__xlated("3: call kernel-function")
58__xlated("4: r1 = r6")
59__xlated("5: call pc+")
60/* epilogue */
61__xlated("6: r1 = *(u64 *)(r10 -8)")
62__xlated("7: r1 = *(u64 *)(r1 +0)")
63__xlated("8: r6 = *(u64 *)(r1 +0)")
64__xlated("9: r6 += 10000")
65__xlated("10: *(u64 *)(r1 +0) = r6")
66__xlated("11: r0 = r6")
67__xlated("12: r0 *= 2")
68__xlated("13: exit")
69SEC("struct_ops/test_epilogue")
70__naked int test_epilogue(void)
71{
72 asm volatile (
73 "r1 = *(u64 *)(r1 +0);"
74 "r6 = r1;"
75 "call %[bpf_kfunc_st_ops_inc10];"
76 "r1 = r6;"
77 "call subprog;"
78 "exit;"
79 :
80 : __imm(bpf_kfunc_st_ops_inc10)
81 : __clobber_all);
82}
83
84__success
85/* prologue */
86__xlated("0: r6 = *(u64 *)(r1 +0)")
87__xlated("1: r7 = *(u64 *)(r6 +0)")
88__xlated("2: r7 += 1000")
89__xlated("3: *(u64 *)(r6 +0) = r7")
90/* save __u64 *ctx to stack */
91__xlated("4: *(u64 *)(r10 -8) = r1")
92/* main prog */
93__xlated("5: r1 = *(u64 *)(r1 +0)")
94__xlated("6: r6 = r1")
95__xlated("7: call kernel-function")
96__xlated("8: r1 = r6")
97__xlated("9: call pc+")
98/* epilogue */
99__xlated("10: r1 = *(u64 *)(r10 -8)")
100__xlated("11: r1 = *(u64 *)(r1 +0)")
101__xlated("12: r6 = *(u64 *)(r1 +0)")
102__xlated("13: r6 += 10000")
103__xlated("14: *(u64 *)(r1 +0) = r6")
104__xlated("15: r0 = r6")
105__xlated("16: r0 *= 2")
106__xlated("17: exit")
107SEC("struct_ops/test_pro_epilogue")
108__naked int test_pro_epilogue(void)
109{
110 asm volatile (
111 "r1 = *(u64 *)(r1 +0);"
112 "r6 = r1;"
113 "call %[bpf_kfunc_st_ops_inc10];"
114 "r1 = r6;"
115 "call subprog;"
116 "exit;"
117 :
118 : __imm(bpf_kfunc_st_ops_inc10)
119 : __clobber_all);
120}
121
122SEC("syscall")
123__retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */
124int syscall_prologue(void *ctx)
125{
126 struct st_ops_args args = {};
127
128 return bpf_kfunc_st_ops_test_prologue(&args);
129}
130
131SEC("syscall")
132__retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
133int syscall_epilogue(void *ctx)
134{
135 struct st_ops_args args = {};
136
137 return bpf_kfunc_st_ops_test_epilogue(&args);
138}
139
140SEC("syscall")
141__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
142int syscall_pro_epilogue(void *ctx)
143{
144 struct st_ops_args args = {};
145
146 return bpf_kfunc_st_ops_test_pro_epilogue(&args);
147}
148
149SEC(".struct_ops.link")
150struct bpf_testmod_st_ops pro_epilogue = {
151 .test_prologue = (void *)test_prologue,
152 .test_epilogue = (void *)test_epilogue,
153 .test_pro_epilogue = (void *)test_pro_epilogue,
154};