Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2020 Facebook
4 * Copyright 2020 Google LLC.
5 */
6
7#include <linux/pid.h>
8#include <linux/sched.h>
9#include <linux/rculist.h>
10#include <linux/list.h>
11#include <linux/hash.h>
12#include <linux/types.h>
13#include <linux/spinlock.h>
14#include <linux/bpf.h>
15#include <linux/bpf_local_storage.h>
16#include <linux/filter.h>
17#include <uapi/linux/btf.h>
18#include <linux/btf_ids.h>
19#include <linux/fdtable.h>
20#include <linux/rcupdate_trace.h>
21
22DEFINE_BPF_STORAGE_CACHE(task_cache);
23
24static DEFINE_PER_CPU(int, bpf_task_storage_busy);
25
26static void bpf_task_storage_lock(void)
27{
28 migrate_disable();
29 this_cpu_inc(bpf_task_storage_busy);
30}
31
32static void bpf_task_storage_unlock(void)
33{
34 this_cpu_dec(bpf_task_storage_busy);
35 migrate_enable();
36}
37
38static bool bpf_task_storage_trylock(void)
39{
40 migrate_disable();
41 if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
42 this_cpu_dec(bpf_task_storage_busy);
43 migrate_enable();
44 return false;
45 }
46 return true;
47}
48
49static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
50{
51 struct task_struct *task = owner;
52
53 return &task->bpf_storage;
54}
55
56static struct bpf_local_storage_data *
57task_storage_lookup(struct task_struct *task, struct bpf_map *map,
58 bool cacheit_lockit)
59{
60 struct bpf_local_storage *task_storage;
61 struct bpf_local_storage_map *smap;
62
63 task_storage =
64 rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
65 if (!task_storage)
66 return NULL;
67
68 smap = (struct bpf_local_storage_map *)map;
69 return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
70}
71
72void bpf_task_storage_free(struct task_struct *task)
73{
74 struct bpf_local_storage *local_storage;
75
76 rcu_read_lock();
77
78 local_storage = rcu_dereference(task->bpf_storage);
79 if (!local_storage) {
80 rcu_read_unlock();
81 return;
82 }
83
84 bpf_task_storage_lock();
85 bpf_local_storage_destroy(local_storage);
86 bpf_task_storage_unlock();
87 rcu_read_unlock();
88}
89
90static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
91{
92 struct bpf_local_storage_data *sdata;
93 struct task_struct *task;
94 unsigned int f_flags;
95 struct pid *pid;
96 int fd, err;
97
98 fd = *(int *)key;
99 pid = pidfd_get_pid(fd, &f_flags);
100 if (IS_ERR(pid))
101 return ERR_CAST(pid);
102
103 /* We should be in an RCU read side critical section, it should be safe
104 * to call pid_task.
105 */
106 WARN_ON_ONCE(!rcu_read_lock_held());
107 task = pid_task(pid, PIDTYPE_PID);
108 if (!task) {
109 err = -ENOENT;
110 goto out;
111 }
112
113 bpf_task_storage_lock();
114 sdata = task_storage_lookup(task, map, true);
115 bpf_task_storage_unlock();
116 put_pid(pid);
117 return sdata ? sdata->data : NULL;
118out:
119 put_pid(pid);
120 return ERR_PTR(err);
121}
122
123static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
124 void *value, u64 map_flags)
125{
126 struct bpf_local_storage_data *sdata;
127 struct task_struct *task;
128 unsigned int f_flags;
129 struct pid *pid;
130 int fd, err;
131
132 fd = *(int *)key;
133 pid = pidfd_get_pid(fd, &f_flags);
134 if (IS_ERR(pid))
135 return PTR_ERR(pid);
136
137 /* We should be in an RCU read side critical section, it should be safe
138 * to call pid_task.
139 */
140 WARN_ON_ONCE(!rcu_read_lock_held());
141 task = pid_task(pid, PIDTYPE_PID);
142 if (!task) {
143 err = -ENOENT;
144 goto out;
145 }
146
147 bpf_task_storage_lock();
148 sdata = bpf_local_storage_update(
149 task, (struct bpf_local_storage_map *)map, value, map_flags,
150 GFP_ATOMIC);
151 bpf_task_storage_unlock();
152
153 err = PTR_ERR_OR_ZERO(sdata);
154out:
155 put_pid(pid);
156 return err;
157}
158
159static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
160 bool nobusy)
161{
162 struct bpf_local_storage_data *sdata;
163
164 sdata = task_storage_lookup(task, map, false);
165 if (!sdata)
166 return -ENOENT;
167
168 if (!nobusy)
169 return -EBUSY;
170
171 bpf_selem_unlink(SELEM(sdata), false);
172
173 return 0;
174}
175
176static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
177{
178 struct task_struct *task;
179 unsigned int f_flags;
180 struct pid *pid;
181 int fd, err;
182
183 fd = *(int *)key;
184 pid = pidfd_get_pid(fd, &f_flags);
185 if (IS_ERR(pid))
186 return PTR_ERR(pid);
187
188 /* We should be in an RCU read side critical section, it should be safe
189 * to call pid_task.
190 */
191 WARN_ON_ONCE(!rcu_read_lock_held());
192 task = pid_task(pid, PIDTYPE_PID);
193 if (!task) {
194 err = -ENOENT;
195 goto out;
196 }
197
198 bpf_task_storage_lock();
199 err = task_storage_delete(task, map, true);
200 bpf_task_storage_unlock();
201out:
202 put_pid(pid);
203 return err;
204}
205
206/* Called by bpf_task_storage_get*() helpers */
207static void *__bpf_task_storage_get(struct bpf_map *map,
208 struct task_struct *task, void *value,
209 u64 flags, gfp_t gfp_flags, bool nobusy)
210{
211 struct bpf_local_storage_data *sdata;
212
213 sdata = task_storage_lookup(task, map, nobusy);
214 if (sdata)
215 return sdata->data;
216
217 /* only allocate new storage, when the task is refcounted */
218 if (refcount_read(&task->usage) &&
219 (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
220 sdata = bpf_local_storage_update(
221 task, (struct bpf_local_storage_map *)map, value,
222 BPF_NOEXIST, gfp_flags);
223 return IS_ERR(sdata) ? NULL : sdata->data;
224 }
225
226 return NULL;
227}
228
229/* *gfp_flags* is a hidden argument provided by the verifier */
230BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
231 task, void *, value, u64, flags, gfp_t, gfp_flags)
232{
233 bool nobusy;
234 void *data;
235
236 WARN_ON_ONCE(!bpf_rcu_lock_held());
237 if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
238 return (unsigned long)NULL;
239
240 nobusy = bpf_task_storage_trylock();
241 data = __bpf_task_storage_get(map, task, value, flags,
242 gfp_flags, nobusy);
243 if (nobusy)
244 bpf_task_storage_unlock();
245 return (unsigned long)data;
246}
247
248/* *gfp_flags* is a hidden argument provided by the verifier */
249BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
250 task, void *, value, u64, flags, gfp_t, gfp_flags)
251{
252 void *data;
253
254 WARN_ON_ONCE(!bpf_rcu_lock_held());
255 if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
256 return (unsigned long)NULL;
257
258 bpf_task_storage_lock();
259 data = __bpf_task_storage_get(map, task, value, flags,
260 gfp_flags, true);
261 bpf_task_storage_unlock();
262 return (unsigned long)data;
263}
264
265BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
266 task)
267{
268 bool nobusy;
269 int ret;
270
271 WARN_ON_ONCE(!bpf_rcu_lock_held());
272 if (!task)
273 return -EINVAL;
274
275 nobusy = bpf_task_storage_trylock();
276 /* This helper must only be called from places where the lifetime of the task
277 * is guaranteed. Either by being refcounted or by being protected
278 * by an RCU read-side critical section.
279 */
280 ret = task_storage_delete(task, map, nobusy);
281 if (nobusy)
282 bpf_task_storage_unlock();
283 return ret;
284}
285
286BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
287 task)
288{
289 int ret;
290
291 WARN_ON_ONCE(!bpf_rcu_lock_held());
292 if (!task)
293 return -EINVAL;
294
295 bpf_task_storage_lock();
296 /* This helper must only be called from places where the lifetime of the task
297 * is guaranteed. Either by being refcounted or by being protected
298 * by an RCU read-side critical section.
299 */
300 ret = task_storage_delete(task, map, true);
301 bpf_task_storage_unlock();
302 return ret;
303}
304
305static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
306{
307 return -ENOTSUPP;
308}
309
310static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
311{
312 return bpf_local_storage_map_alloc(attr, &task_cache, true);
313}
314
315static void task_storage_map_free(struct bpf_map *map)
316{
317 bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
318}
319
320BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
321const struct bpf_map_ops task_storage_map_ops = {
322 .map_meta_equal = bpf_map_meta_equal,
323 .map_alloc_check = bpf_local_storage_map_alloc_check,
324 .map_alloc = task_storage_map_alloc,
325 .map_free = task_storage_map_free,
326 .map_get_next_key = notsupp_get_next_key,
327 .map_lookup_elem = bpf_pid_task_storage_lookup_elem,
328 .map_update_elem = bpf_pid_task_storage_update_elem,
329 .map_delete_elem = bpf_pid_task_storage_delete_elem,
330 .map_check_btf = bpf_local_storage_map_check_btf,
331 .map_mem_usage = bpf_local_storage_map_mem_usage,
332 .map_btf_id = &bpf_local_storage_map_btf_id[0],
333 .map_owner_storage_ptr = task_storage_ptr,
334};
335
336const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
337 .func = bpf_task_storage_get_recur,
338 .gpl_only = false,
339 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
340 .arg1_type = ARG_CONST_MAP_PTR,
341 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
342 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
343 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
344 .arg4_type = ARG_ANYTHING,
345};
346
347const struct bpf_func_proto bpf_task_storage_get_proto = {
348 .func = bpf_task_storage_get,
349 .gpl_only = false,
350 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
351 .arg1_type = ARG_CONST_MAP_PTR,
352 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
353 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
354 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
355 .arg4_type = ARG_ANYTHING,
356};
357
358const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
359 .func = bpf_task_storage_delete_recur,
360 .gpl_only = false,
361 .ret_type = RET_INTEGER,
362 .arg1_type = ARG_CONST_MAP_PTR,
363 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
364 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
365};
366
367const struct bpf_func_proto bpf_task_storage_delete_proto = {
368 .func = bpf_task_storage_delete,
369 .gpl_only = false,
370 .ret_type = RET_INTEGER,
371 .arg1_type = ARG_CONST_MAP_PTR,
372 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
373 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
374};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2020 Facebook
4 * Copyright 2020 Google LLC.
5 */
6
7#include <linux/pid.h>
8#include <linux/sched.h>
9#include <linux/rculist.h>
10#include <linux/list.h>
11#include <linux/hash.h>
12#include <linux/types.h>
13#include <linux/spinlock.h>
14#include <linux/bpf.h>
15#include <linux/bpf_local_storage.h>
16#include <linux/filter.h>
17#include <uapi/linux/btf.h>
18#include <linux/btf_ids.h>
19#include <linux/fdtable.h>
20#include <linux/rcupdate_trace.h>
21
22DEFINE_BPF_STORAGE_CACHE(task_cache);
23
24static DEFINE_PER_CPU(int, bpf_task_storage_busy);
25
26static void bpf_task_storage_lock(void)
27{
28 migrate_disable();
29 this_cpu_inc(bpf_task_storage_busy);
30}
31
32static void bpf_task_storage_unlock(void)
33{
34 this_cpu_dec(bpf_task_storage_busy);
35 migrate_enable();
36}
37
38static bool bpf_task_storage_trylock(void)
39{
40 migrate_disable();
41 if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
42 this_cpu_dec(bpf_task_storage_busy);
43 migrate_enable();
44 return false;
45 }
46 return true;
47}
48
49static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
50{
51 struct task_struct *task = owner;
52
53 return &task->bpf_storage;
54}
55
56static struct bpf_local_storage_data *
57task_storage_lookup(struct task_struct *task, struct bpf_map *map,
58 bool cacheit_lockit)
59{
60 struct bpf_local_storage *task_storage;
61 struct bpf_local_storage_map *smap;
62
63 task_storage =
64 rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
65 if (!task_storage)
66 return NULL;
67
68 smap = (struct bpf_local_storage_map *)map;
69 return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
70}
71
72void bpf_task_storage_free(struct task_struct *task)
73{
74 struct bpf_local_storage *local_storage;
75 bool free_task_storage = false;
76 unsigned long flags;
77
78 rcu_read_lock();
79
80 local_storage = rcu_dereference(task->bpf_storage);
81 if (!local_storage) {
82 rcu_read_unlock();
83 return;
84 }
85
86 bpf_task_storage_lock();
87 raw_spin_lock_irqsave(&local_storage->lock, flags);
88 free_task_storage = bpf_local_storage_unlink_nolock(local_storage);
89 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
90 bpf_task_storage_unlock();
91 rcu_read_unlock();
92
93 if (free_task_storage)
94 kfree_rcu(local_storage, rcu);
95}
96
97static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
98{
99 struct bpf_local_storage_data *sdata;
100 struct task_struct *task;
101 unsigned int f_flags;
102 struct pid *pid;
103 int fd, err;
104
105 fd = *(int *)key;
106 pid = pidfd_get_pid(fd, &f_flags);
107 if (IS_ERR(pid))
108 return ERR_CAST(pid);
109
110 /* We should be in an RCU read side critical section, it should be safe
111 * to call pid_task.
112 */
113 WARN_ON_ONCE(!rcu_read_lock_held());
114 task = pid_task(pid, PIDTYPE_PID);
115 if (!task) {
116 err = -ENOENT;
117 goto out;
118 }
119
120 bpf_task_storage_lock();
121 sdata = task_storage_lookup(task, map, true);
122 bpf_task_storage_unlock();
123 put_pid(pid);
124 return sdata ? sdata->data : NULL;
125out:
126 put_pid(pid);
127 return ERR_PTR(err);
128}
129
130static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
131 void *value, u64 map_flags)
132{
133 struct bpf_local_storage_data *sdata;
134 struct task_struct *task;
135 unsigned int f_flags;
136 struct pid *pid;
137 int fd, err;
138
139 fd = *(int *)key;
140 pid = pidfd_get_pid(fd, &f_flags);
141 if (IS_ERR(pid))
142 return PTR_ERR(pid);
143
144 /* We should be in an RCU read side critical section, it should be safe
145 * to call pid_task.
146 */
147 WARN_ON_ONCE(!rcu_read_lock_held());
148 task = pid_task(pid, PIDTYPE_PID);
149 if (!task) {
150 err = -ENOENT;
151 goto out;
152 }
153
154 bpf_task_storage_lock();
155 sdata = bpf_local_storage_update(
156 task, (struct bpf_local_storage_map *)map, value, map_flags,
157 GFP_ATOMIC);
158 bpf_task_storage_unlock();
159
160 err = PTR_ERR_OR_ZERO(sdata);
161out:
162 put_pid(pid);
163 return err;
164}
165
166static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
167 bool nobusy)
168{
169 struct bpf_local_storage_data *sdata;
170
171 sdata = task_storage_lookup(task, map, false);
172 if (!sdata)
173 return -ENOENT;
174
175 if (!nobusy)
176 return -EBUSY;
177
178 bpf_selem_unlink(SELEM(sdata), true);
179
180 return 0;
181}
182
183static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
184{
185 struct task_struct *task;
186 unsigned int f_flags;
187 struct pid *pid;
188 int fd, err;
189
190 fd = *(int *)key;
191 pid = pidfd_get_pid(fd, &f_flags);
192 if (IS_ERR(pid))
193 return PTR_ERR(pid);
194
195 /* We should be in an RCU read side critical section, it should be safe
196 * to call pid_task.
197 */
198 WARN_ON_ONCE(!rcu_read_lock_held());
199 task = pid_task(pid, PIDTYPE_PID);
200 if (!task) {
201 err = -ENOENT;
202 goto out;
203 }
204
205 bpf_task_storage_lock();
206 err = task_storage_delete(task, map, true);
207 bpf_task_storage_unlock();
208out:
209 put_pid(pid);
210 return err;
211}
212
213/* Called by bpf_task_storage_get*() helpers */
214static void *__bpf_task_storage_get(struct bpf_map *map,
215 struct task_struct *task, void *value,
216 u64 flags, gfp_t gfp_flags, bool nobusy)
217{
218 struct bpf_local_storage_data *sdata;
219
220 sdata = task_storage_lookup(task, map, nobusy);
221 if (sdata)
222 return sdata->data;
223
224 /* only allocate new storage, when the task is refcounted */
225 if (refcount_read(&task->usage) &&
226 (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
227 sdata = bpf_local_storage_update(
228 task, (struct bpf_local_storage_map *)map, value,
229 BPF_NOEXIST, gfp_flags);
230 return IS_ERR(sdata) ? NULL : sdata->data;
231 }
232
233 return NULL;
234}
235
236/* *gfp_flags* is a hidden argument provided by the verifier */
237BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
238 task, void *, value, u64, flags, gfp_t, gfp_flags)
239{
240 bool nobusy;
241 void *data;
242
243 WARN_ON_ONCE(!bpf_rcu_lock_held());
244 if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
245 return (unsigned long)NULL;
246
247 nobusy = bpf_task_storage_trylock();
248 data = __bpf_task_storage_get(map, task, value, flags,
249 gfp_flags, nobusy);
250 if (nobusy)
251 bpf_task_storage_unlock();
252 return (unsigned long)data;
253}
254
255/* *gfp_flags* is a hidden argument provided by the verifier */
256BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
257 task, void *, value, u64, flags, gfp_t, gfp_flags)
258{
259 void *data;
260
261 WARN_ON_ONCE(!bpf_rcu_lock_held());
262 if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
263 return (unsigned long)NULL;
264
265 bpf_task_storage_lock();
266 data = __bpf_task_storage_get(map, task, value, flags,
267 gfp_flags, true);
268 bpf_task_storage_unlock();
269 return (unsigned long)data;
270}
271
272BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
273 task)
274{
275 bool nobusy;
276 int ret;
277
278 WARN_ON_ONCE(!bpf_rcu_lock_held());
279 if (!task)
280 return -EINVAL;
281
282 nobusy = bpf_task_storage_trylock();
283 /* This helper must only be called from places where the lifetime of the task
284 * is guaranteed. Either by being refcounted or by being protected
285 * by an RCU read-side critical section.
286 */
287 ret = task_storage_delete(task, map, nobusy);
288 if (nobusy)
289 bpf_task_storage_unlock();
290 return ret;
291}
292
293BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
294 task)
295{
296 int ret;
297
298 WARN_ON_ONCE(!bpf_rcu_lock_held());
299 if (!task)
300 return -EINVAL;
301
302 bpf_task_storage_lock();
303 /* This helper must only be called from places where the lifetime of the task
304 * is guaranteed. Either by being refcounted or by being protected
305 * by an RCU read-side critical section.
306 */
307 ret = task_storage_delete(task, map, true);
308 bpf_task_storage_unlock();
309 return ret;
310}
311
312static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
313{
314 return -ENOTSUPP;
315}
316
317static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
318{
319 return bpf_local_storage_map_alloc(attr, &task_cache);
320}
321
322static void task_storage_map_free(struct bpf_map *map)
323{
324 bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
325}
326
327BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
328const struct bpf_map_ops task_storage_map_ops = {
329 .map_meta_equal = bpf_map_meta_equal,
330 .map_alloc_check = bpf_local_storage_map_alloc_check,
331 .map_alloc = task_storage_map_alloc,
332 .map_free = task_storage_map_free,
333 .map_get_next_key = notsupp_get_next_key,
334 .map_lookup_elem = bpf_pid_task_storage_lookup_elem,
335 .map_update_elem = bpf_pid_task_storage_update_elem,
336 .map_delete_elem = bpf_pid_task_storage_delete_elem,
337 .map_check_btf = bpf_local_storage_map_check_btf,
338 .map_btf_id = &bpf_local_storage_map_btf_id[0],
339 .map_owner_storage_ptr = task_storage_ptr,
340};
341
342const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
343 .func = bpf_task_storage_get_recur,
344 .gpl_only = false,
345 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
346 .arg1_type = ARG_CONST_MAP_PTR,
347 .arg2_type = ARG_PTR_TO_BTF_ID,
348 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
349 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
350 .arg4_type = ARG_ANYTHING,
351};
352
353const struct bpf_func_proto bpf_task_storage_get_proto = {
354 .func = bpf_task_storage_get,
355 .gpl_only = false,
356 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
357 .arg1_type = ARG_CONST_MAP_PTR,
358 .arg2_type = ARG_PTR_TO_BTF_ID,
359 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
360 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
361 .arg4_type = ARG_ANYTHING,
362};
363
364const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
365 .func = bpf_task_storage_delete_recur,
366 .gpl_only = false,
367 .ret_type = RET_INTEGER,
368 .arg1_type = ARG_CONST_MAP_PTR,
369 .arg2_type = ARG_PTR_TO_BTF_ID,
370 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
371};
372
373const struct bpf_func_proto bpf_task_storage_delete_proto = {
374 .func = bpf_task_storage_delete,
375 .gpl_only = false,
376 .ret_type = RET_INTEGER,
377 .arg1_type = ARG_CONST_MAP_PTR,
378 .arg2_type = ARG_PTR_TO_BTF_ID,
379 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
380};