Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4 */
5
6#include <linux/types.h>
7#include <linux/bpf.h>
8#include <linux/bpf_local_storage.h>
9#include <uapi/linux/btf.h>
10#include <linux/btf_ids.h>
11
12DEFINE_BPF_STORAGE_CACHE(cgroup_cache);
13
14static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
15
16static void bpf_cgrp_storage_lock(void)
17{
18 migrate_disable();
19 this_cpu_inc(bpf_cgrp_storage_busy);
20}
21
22static void bpf_cgrp_storage_unlock(void)
23{
24 this_cpu_dec(bpf_cgrp_storage_busy);
25 migrate_enable();
26}
27
28static bool bpf_cgrp_storage_trylock(void)
29{
30 migrate_disable();
31 if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
32 this_cpu_dec(bpf_cgrp_storage_busy);
33 migrate_enable();
34 return false;
35 }
36 return true;
37}
38
39static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner)
40{
41 struct cgroup *cg = owner;
42
43 return &cg->bpf_cgrp_storage;
44}
45
46void bpf_cgrp_storage_free(struct cgroup *cgroup)
47{
48 struct bpf_local_storage *local_storage;
49
50 rcu_read_lock();
51 local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
52 if (!local_storage) {
53 rcu_read_unlock();
54 return;
55 }
56
57 bpf_cgrp_storage_lock();
58 bpf_local_storage_destroy(local_storage);
59 bpf_cgrp_storage_unlock();
60 rcu_read_unlock();
61}
62
63static struct bpf_local_storage_data *
64cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit)
65{
66 struct bpf_local_storage *cgroup_storage;
67 struct bpf_local_storage_map *smap;
68
69 cgroup_storage = rcu_dereference_check(cgroup->bpf_cgrp_storage,
70 bpf_rcu_lock_held());
71 if (!cgroup_storage)
72 return NULL;
73
74 smap = (struct bpf_local_storage_map *)map;
75 return bpf_local_storage_lookup(cgroup_storage, smap, cacheit_lockit);
76}
77
78static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
79{
80 struct bpf_local_storage_data *sdata;
81 struct cgroup *cgroup;
82 int fd;
83
84 fd = *(int *)key;
85 cgroup = cgroup_v1v2_get_from_fd(fd);
86 if (IS_ERR(cgroup))
87 return ERR_CAST(cgroup);
88
89 bpf_cgrp_storage_lock();
90 sdata = cgroup_storage_lookup(cgroup, map, true);
91 bpf_cgrp_storage_unlock();
92 cgroup_put(cgroup);
93 return sdata ? sdata->data : NULL;
94}
95
96static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
97 void *value, u64 map_flags)
98{
99 struct bpf_local_storage_data *sdata;
100 struct cgroup *cgroup;
101 int fd;
102
103 fd = *(int *)key;
104 cgroup = cgroup_v1v2_get_from_fd(fd);
105 if (IS_ERR(cgroup))
106 return PTR_ERR(cgroup);
107
108 bpf_cgrp_storage_lock();
109 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
110 value, map_flags, GFP_ATOMIC);
111 bpf_cgrp_storage_unlock();
112 cgroup_put(cgroup);
113 return PTR_ERR_OR_ZERO(sdata);
114}
115
116static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
117{
118 struct bpf_local_storage_data *sdata;
119
120 sdata = cgroup_storage_lookup(cgroup, map, false);
121 if (!sdata)
122 return -ENOENT;
123
124 bpf_selem_unlink(SELEM(sdata), false);
125 return 0;
126}
127
128static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
129{
130 struct cgroup *cgroup;
131 int err, fd;
132
133 fd = *(int *)key;
134 cgroup = cgroup_v1v2_get_from_fd(fd);
135 if (IS_ERR(cgroup))
136 return PTR_ERR(cgroup);
137
138 bpf_cgrp_storage_lock();
139 err = cgroup_storage_delete(cgroup, map);
140 bpf_cgrp_storage_unlock();
141 cgroup_put(cgroup);
142 return err;
143}
144
145static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
146{
147 return -ENOTSUPP;
148}
149
150static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
151{
152 return bpf_local_storage_map_alloc(attr, &cgroup_cache, true);
153}
154
155static void cgroup_storage_map_free(struct bpf_map *map)
156{
157 bpf_local_storage_map_free(map, &cgroup_cache, NULL);
158}
159
160/* *gfp_flags* is a hidden argument provided by the verifier */
161BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
162 void *, value, u64, flags, gfp_t, gfp_flags)
163{
164 struct bpf_local_storage_data *sdata;
165
166 WARN_ON_ONCE(!bpf_rcu_lock_held());
167 if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
168 return (unsigned long)NULL;
169
170 if (!cgroup)
171 return (unsigned long)NULL;
172
173 if (!bpf_cgrp_storage_trylock())
174 return (unsigned long)NULL;
175
176 sdata = cgroup_storage_lookup(cgroup, map, true);
177 if (sdata)
178 goto unlock;
179
180 /* only allocate new storage, when the cgroup is refcounted */
181 if (!percpu_ref_is_dying(&cgroup->self.refcnt) &&
182 (flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
183 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
184 value, BPF_NOEXIST, gfp_flags);
185
186unlock:
187 bpf_cgrp_storage_unlock();
188 return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
189}
190
191BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup)
192{
193 int ret;
194
195 WARN_ON_ONCE(!bpf_rcu_lock_held());
196 if (!cgroup)
197 return -EINVAL;
198
199 if (!bpf_cgrp_storage_trylock())
200 return -EBUSY;
201
202 ret = cgroup_storage_delete(cgroup, map);
203 bpf_cgrp_storage_unlock();
204 return ret;
205}
206
207const struct bpf_map_ops cgrp_storage_map_ops = {
208 .map_meta_equal = bpf_map_meta_equal,
209 .map_alloc_check = bpf_local_storage_map_alloc_check,
210 .map_alloc = cgroup_storage_map_alloc,
211 .map_free = cgroup_storage_map_free,
212 .map_get_next_key = notsupp_get_next_key,
213 .map_lookup_elem = bpf_cgrp_storage_lookup_elem,
214 .map_update_elem = bpf_cgrp_storage_update_elem,
215 .map_delete_elem = bpf_cgrp_storage_delete_elem,
216 .map_check_btf = bpf_local_storage_map_check_btf,
217 .map_mem_usage = bpf_local_storage_map_mem_usage,
218 .map_btf_id = &bpf_local_storage_map_btf_id[0],
219 .map_owner_storage_ptr = cgroup_storage_ptr,
220};
221
222const struct bpf_func_proto bpf_cgrp_storage_get_proto = {
223 .func = bpf_cgrp_storage_get,
224 .gpl_only = false,
225 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
226 .arg1_type = ARG_CONST_MAP_PTR,
227 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
228 .arg2_btf_id = &bpf_cgroup_btf_id[0],
229 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
230 .arg4_type = ARG_ANYTHING,
231};
232
233const struct bpf_func_proto bpf_cgrp_storage_delete_proto = {
234 .func = bpf_cgrp_storage_delete,
235 .gpl_only = false,
236 .ret_type = RET_INTEGER,
237 .arg1_type = ARG_CONST_MAP_PTR,
238 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
239 .arg2_btf_id = &bpf_cgroup_btf_id[0],
240};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4 */
5
6#include <linux/types.h>
7#include <linux/bpf.h>
8#include <linux/bpf_local_storage.h>
9#include <uapi/linux/btf.h>
10#include <linux/btf_ids.h>
11
12DEFINE_BPF_STORAGE_CACHE(cgroup_cache);
13
14static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
15
16static void bpf_cgrp_storage_lock(void)
17{
18 migrate_disable();
19 this_cpu_inc(bpf_cgrp_storage_busy);
20}
21
22static void bpf_cgrp_storage_unlock(void)
23{
24 this_cpu_dec(bpf_cgrp_storage_busy);
25 migrate_enable();
26}
27
28static bool bpf_cgrp_storage_trylock(void)
29{
30 migrate_disable();
31 if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
32 this_cpu_dec(bpf_cgrp_storage_busy);
33 migrate_enable();
34 return false;
35 }
36 return true;
37}
38
39static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner)
40{
41 struct cgroup *cg = owner;
42
43 return &cg->bpf_cgrp_storage;
44}
45
46void bpf_cgrp_storage_free(struct cgroup *cgroup)
47{
48 struct bpf_local_storage *local_storage;
49 bool free_cgroup_storage = false;
50 unsigned long flags;
51
52 rcu_read_lock();
53 local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
54 if (!local_storage) {
55 rcu_read_unlock();
56 return;
57 }
58
59 bpf_cgrp_storage_lock();
60 raw_spin_lock_irqsave(&local_storage->lock, flags);
61 free_cgroup_storage = bpf_local_storage_unlink_nolock(local_storage);
62 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
63 bpf_cgrp_storage_unlock();
64 rcu_read_unlock();
65
66 if (free_cgroup_storage)
67 kfree_rcu(local_storage, rcu);
68}
69
70static struct bpf_local_storage_data *
71cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit)
72{
73 struct bpf_local_storage *cgroup_storage;
74 struct bpf_local_storage_map *smap;
75
76 cgroup_storage = rcu_dereference_check(cgroup->bpf_cgrp_storage,
77 bpf_rcu_lock_held());
78 if (!cgroup_storage)
79 return NULL;
80
81 smap = (struct bpf_local_storage_map *)map;
82 return bpf_local_storage_lookup(cgroup_storage, smap, cacheit_lockit);
83}
84
85static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
86{
87 struct bpf_local_storage_data *sdata;
88 struct cgroup *cgroup;
89 int fd;
90
91 fd = *(int *)key;
92 cgroup = cgroup_get_from_fd(fd);
93 if (IS_ERR(cgroup))
94 return ERR_CAST(cgroup);
95
96 bpf_cgrp_storage_lock();
97 sdata = cgroup_storage_lookup(cgroup, map, true);
98 bpf_cgrp_storage_unlock();
99 cgroup_put(cgroup);
100 return sdata ? sdata->data : NULL;
101}
102
103static int bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
104 void *value, u64 map_flags)
105{
106 struct bpf_local_storage_data *sdata;
107 struct cgroup *cgroup;
108 int fd;
109
110 fd = *(int *)key;
111 cgroup = cgroup_get_from_fd(fd);
112 if (IS_ERR(cgroup))
113 return PTR_ERR(cgroup);
114
115 bpf_cgrp_storage_lock();
116 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
117 value, map_flags, GFP_ATOMIC);
118 bpf_cgrp_storage_unlock();
119 cgroup_put(cgroup);
120 return PTR_ERR_OR_ZERO(sdata);
121}
122
123static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
124{
125 struct bpf_local_storage_data *sdata;
126
127 sdata = cgroup_storage_lookup(cgroup, map, false);
128 if (!sdata)
129 return -ENOENT;
130
131 bpf_selem_unlink(SELEM(sdata), true);
132 return 0;
133}
134
135static int bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
136{
137 struct cgroup *cgroup;
138 int err, fd;
139
140 fd = *(int *)key;
141 cgroup = cgroup_get_from_fd(fd);
142 if (IS_ERR(cgroup))
143 return PTR_ERR(cgroup);
144
145 bpf_cgrp_storage_lock();
146 err = cgroup_storage_delete(cgroup, map);
147 bpf_cgrp_storage_unlock();
148 cgroup_put(cgroup);
149 return err;
150}
151
152static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
153{
154 return -ENOTSUPP;
155}
156
157static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
158{
159 return bpf_local_storage_map_alloc(attr, &cgroup_cache);
160}
161
162static void cgroup_storage_map_free(struct bpf_map *map)
163{
164 bpf_local_storage_map_free(map, &cgroup_cache, NULL);
165}
166
167/* *gfp_flags* is a hidden argument provided by the verifier */
168BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
169 void *, value, u64, flags, gfp_t, gfp_flags)
170{
171 struct bpf_local_storage_data *sdata;
172
173 WARN_ON_ONCE(!bpf_rcu_lock_held());
174 if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
175 return (unsigned long)NULL;
176
177 if (!cgroup)
178 return (unsigned long)NULL;
179
180 if (!bpf_cgrp_storage_trylock())
181 return (unsigned long)NULL;
182
183 sdata = cgroup_storage_lookup(cgroup, map, true);
184 if (sdata)
185 goto unlock;
186
187 /* only allocate new storage, when the cgroup is refcounted */
188 if (!percpu_ref_is_dying(&cgroup->self.refcnt) &&
189 (flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
190 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
191 value, BPF_NOEXIST, gfp_flags);
192
193unlock:
194 bpf_cgrp_storage_unlock();
195 return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
196}
197
198BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup)
199{
200 int ret;
201
202 WARN_ON_ONCE(!bpf_rcu_lock_held());
203 if (!cgroup)
204 return -EINVAL;
205
206 if (!bpf_cgrp_storage_trylock())
207 return -EBUSY;
208
209 ret = cgroup_storage_delete(cgroup, map);
210 bpf_cgrp_storage_unlock();
211 return ret;
212}
213
214const struct bpf_map_ops cgrp_storage_map_ops = {
215 .map_meta_equal = bpf_map_meta_equal,
216 .map_alloc_check = bpf_local_storage_map_alloc_check,
217 .map_alloc = cgroup_storage_map_alloc,
218 .map_free = cgroup_storage_map_free,
219 .map_get_next_key = notsupp_get_next_key,
220 .map_lookup_elem = bpf_cgrp_storage_lookup_elem,
221 .map_update_elem = bpf_cgrp_storage_update_elem,
222 .map_delete_elem = bpf_cgrp_storage_delete_elem,
223 .map_check_btf = bpf_local_storage_map_check_btf,
224 .map_btf_id = &bpf_local_storage_map_btf_id[0],
225 .map_owner_storage_ptr = cgroup_storage_ptr,
226};
227
228const struct bpf_func_proto bpf_cgrp_storage_get_proto = {
229 .func = bpf_cgrp_storage_get,
230 .gpl_only = false,
231 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
232 .arg1_type = ARG_CONST_MAP_PTR,
233 .arg2_type = ARG_PTR_TO_BTF_ID,
234 .arg2_btf_id = &bpf_cgroup_btf_id[0],
235 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
236 .arg4_type = ARG_ANYTHING,
237};
238
239const struct bpf_func_proto bpf_cgrp_storage_delete_proto = {
240 .func = bpf_cgrp_storage_delete,
241 .gpl_only = false,
242 .ret_type = RET_INTEGER,
243 .arg1_type = ARG_CONST_MAP_PTR,
244 .arg2_type = ARG_PTR_TO_BTF_ID,
245 .arg2_btf_id = &bpf_cgroup_btf_id[0],
246};