Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3#include <linux/rculist.h>
4#include <linux/list.h>
5#include <linux/hash.h>
6#include <linux/types.h>
7#include <linux/spinlock.h>
8#include <linux/bpf.h>
9#include <linux/btf_ids.h>
10#include <linux/bpf_local_storage.h>
11#include <net/sock.h>
12#include <uapi/linux/sock_diag.h>
13#include <uapi/linux/btf.h>
14#include <linux/rcupdate.h>
15#include <linux/rcupdate_trace.h>
16#include <linux/rcupdate_wait.h>
17
18#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19
20static struct bpf_local_storage_map_bucket *
21select_bucket(struct bpf_local_storage_map *smap,
22 struct bpf_local_storage_elem *selem)
23{
24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25}
26
27static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28{
29 struct bpf_map *map = &smap->map;
30
31 if (!map->ops->map_local_storage_charge)
32 return 0;
33
34 return map->ops->map_local_storage_charge(smap, owner, size);
35}
36
37static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38 u32 size)
39{
40 struct bpf_map *map = &smap->map;
41
42 if (map->ops->map_local_storage_uncharge)
43 map->ops->map_local_storage_uncharge(smap, owner, size);
44}
45
46static struct bpf_local_storage __rcu **
47owner_storage(struct bpf_local_storage_map *smap, void *owner)
48{
49 struct bpf_map *map = &smap->map;
50
51 return map->ops->map_owner_storage_ptr(owner);
52}
53
54static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
55{
56 return !hlist_unhashed_lockless(&selem->snode);
57}
58
59static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60{
61 return !hlist_unhashed(&selem->snode);
62}
63
64static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
65{
66 return !hlist_unhashed_lockless(&selem->map_node);
67}
68
69static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70{
71 return !hlist_unhashed(&selem->map_node);
72}
73
74struct bpf_local_storage_elem *
75bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76 void *value, bool charge_mem, bool swap_uptrs, gfp_t gfp_flags)
77{
78 struct bpf_local_storage_elem *selem;
79
80 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81 return NULL;
82
83 if (smap->bpf_ma) {
84 migrate_disable();
85 selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
86 migrate_enable();
87 if (selem)
88 /* Keep the original bpf_map_kzalloc behavior
89 * before started using the bpf_mem_cache_alloc.
90 *
91 * No need to use zero_map_value. The bpf_selem_free()
92 * only does bpf_mem_cache_free when there is
93 * no other bpf prog is using the selem.
94 */
95 memset(SDATA(selem)->data, 0, smap->map.value_size);
96 } else {
97 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
98 gfp_flags | __GFP_NOWARN);
99 }
100
101 if (selem) {
102 if (value) {
103 /* No need to call check_and_init_map_value as memory is zero init */
104 copy_map_value(&smap->map, SDATA(selem)->data, value);
105 if (swap_uptrs)
106 bpf_obj_swap_uptrs(smap->map.record, SDATA(selem)->data, value);
107 }
108 return selem;
109 }
110
111 if (charge_mem)
112 mem_uncharge(smap, owner, smap->elem_size);
113
114 return NULL;
115}
116
117/* rcu tasks trace callback for bpf_ma == false */
118static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
119{
120 struct bpf_local_storage *local_storage;
121
122 /* If RCU Tasks Trace grace period implies RCU grace period, do
123 * kfree(), else do kfree_rcu().
124 */
125 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
126 if (rcu_trace_implies_rcu_gp())
127 kfree(local_storage);
128 else
129 kfree_rcu(local_storage, rcu);
130}
131
132static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
133{
134 struct bpf_local_storage *local_storage;
135
136 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
137 bpf_mem_cache_raw_free(local_storage);
138}
139
140static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
141{
142 if (rcu_trace_implies_rcu_gp())
143 bpf_local_storage_free_rcu(rcu);
144 else
145 call_rcu(rcu, bpf_local_storage_free_rcu);
146}
147
148/* Handle bpf_ma == false */
149static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
150 bool vanilla_rcu)
151{
152 if (vanilla_rcu)
153 kfree_rcu(local_storage, rcu);
154 else
155 call_rcu_tasks_trace(&local_storage->rcu,
156 __bpf_local_storage_free_trace_rcu);
157}
158
159static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
160 struct bpf_local_storage_map *smap,
161 bool bpf_ma, bool reuse_now)
162{
163 if (!local_storage)
164 return;
165
166 if (!bpf_ma) {
167 __bpf_local_storage_free(local_storage, reuse_now);
168 return;
169 }
170
171 if (!reuse_now) {
172 call_rcu_tasks_trace(&local_storage->rcu,
173 bpf_local_storage_free_trace_rcu);
174 return;
175 }
176
177 if (smap) {
178 migrate_disable();
179 bpf_mem_cache_free(&smap->storage_ma, local_storage);
180 migrate_enable();
181 } else {
182 /* smap could be NULL if the selem that triggered
183 * this 'local_storage' creation had been long gone.
184 * In this case, directly do call_rcu().
185 */
186 call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
187 }
188}
189
190/* rcu tasks trace callback for bpf_ma == false */
191static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
192{
193 struct bpf_local_storage_elem *selem;
194
195 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
196 if (rcu_trace_implies_rcu_gp())
197 kfree(selem);
198 else
199 kfree_rcu(selem, rcu);
200}
201
202/* Handle bpf_ma == false */
203static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
204 bool vanilla_rcu)
205{
206 if (vanilla_rcu)
207 kfree_rcu(selem, rcu);
208 else
209 call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
210}
211
212static void bpf_selem_free_rcu(struct rcu_head *rcu)
213{
214 struct bpf_local_storage_elem *selem;
215 struct bpf_local_storage_map *smap;
216
217 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
218 /* The bpf_local_storage_map_free will wait for rcu_barrier */
219 smap = rcu_dereference_check(SDATA(selem)->smap, 1);
220 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
221 bpf_mem_cache_raw_free(selem);
222}
223
224static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
225{
226 if (rcu_trace_implies_rcu_gp())
227 bpf_selem_free_rcu(rcu);
228 else
229 call_rcu(rcu, bpf_selem_free_rcu);
230}
231
232void bpf_selem_free(struct bpf_local_storage_elem *selem,
233 struct bpf_local_storage_map *smap,
234 bool reuse_now)
235{
236 if (!smap->bpf_ma) {
237 /* Only task storage has uptrs and task storage
238 * has moved to bpf_mem_alloc. Meaning smap->bpf_ma == true
239 * for task storage, so this bpf_obj_free_fields() won't unpin
240 * any uptr.
241 */
242 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
243 __bpf_selem_free(selem, reuse_now);
244 return;
245 }
246
247 if (reuse_now) {
248 /* reuse_now == true only happens when the storage owner
249 * (e.g. task_struct) is being destructed or the map itself
250 * is being destructed (ie map_free). In both cases,
251 * no bpf prog can have a hold on the selem. It is
252 * safe to unpin the uptrs and free the selem now.
253 */
254 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
255 /* Instead of using the vanilla call_rcu(),
256 * bpf_mem_cache_free will be able to reuse selem
257 * immediately.
258 */
259 migrate_disable();
260 bpf_mem_cache_free(&smap->selem_ma, selem);
261 migrate_enable();
262 return;
263 }
264
265 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
266}
267
268static void bpf_selem_free_list(struct hlist_head *list, bool reuse_now)
269{
270 struct bpf_local_storage_elem *selem;
271 struct bpf_local_storage_map *smap;
272 struct hlist_node *n;
273
274 /* The "_safe" iteration is needed.
275 * The loop is not removing the selem from the list
276 * but bpf_selem_free will use the selem->rcu_head
277 * which is union-ized with the selem->free_node.
278 */
279 hlist_for_each_entry_safe(selem, n, list, free_node) {
280 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
281 bpf_selem_free(selem, smap, reuse_now);
282 }
283}
284
285/* local_storage->lock must be held and selem->local_storage == local_storage.
286 * The caller must ensure selem->smap is still valid to be
287 * dereferenced for its smap->elem_size and smap->cache_idx.
288 */
289static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
290 struct bpf_local_storage_elem *selem,
291 bool uncharge_mem, struct hlist_head *free_selem_list)
292{
293 struct bpf_local_storage_map *smap;
294 bool free_local_storage;
295 void *owner;
296
297 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
298 owner = local_storage->owner;
299
300 /* All uncharging on the owner must be done first.
301 * The owner may be freed once the last selem is unlinked
302 * from local_storage.
303 */
304 if (uncharge_mem)
305 mem_uncharge(smap, owner, smap->elem_size);
306
307 free_local_storage = hlist_is_singular_node(&selem->snode,
308 &local_storage->list);
309 if (free_local_storage) {
310 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
311 local_storage->owner = NULL;
312
313 /* After this RCU_INIT, owner may be freed and cannot be used */
314 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
315
316 /* local_storage is not freed now. local_storage->lock is
317 * still held and raw_spin_unlock_bh(&local_storage->lock)
318 * will be done by the caller.
319 *
320 * Although the unlock will be done under
321 * rcu_read_lock(), it is more intuitive to
322 * read if the freeing of the storage is done
323 * after the raw_spin_unlock_bh(&local_storage->lock).
324 *
325 * Hence, a "bool free_local_storage" is returned
326 * to the caller which then calls then frees the storage after
327 * all the RCU grace periods have expired.
328 */
329 }
330 hlist_del_init_rcu(&selem->snode);
331 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
332 SDATA(selem))
333 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
334
335 hlist_add_head(&selem->free_node, free_selem_list);
336
337 if (rcu_access_pointer(local_storage->smap) == smap)
338 RCU_INIT_POINTER(local_storage->smap, NULL);
339
340 return free_local_storage;
341}
342
343static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
344 struct bpf_local_storage_map *storage_smap,
345 struct bpf_local_storage_elem *selem)
346{
347
348 struct bpf_local_storage_map *selem_smap;
349
350 /* local_storage->smap may be NULL. If it is, get the bpf_ma
351 * from any selem in the local_storage->list. The bpf_ma of all
352 * local_storage and selem should have the same value
353 * for the same map type.
354 *
355 * If the local_storage->list is already empty, the caller will not
356 * care about the bpf_ma value also because the caller is not
357 * responsible to free the local_storage.
358 */
359
360 if (storage_smap)
361 return storage_smap->bpf_ma;
362
363 if (!selem) {
364 struct hlist_node *n;
365
366 n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
367 bpf_rcu_lock_held());
368 if (!n)
369 return false;
370
371 selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
372 }
373 selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
374
375 return selem_smap->bpf_ma;
376}
377
378static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
379 bool reuse_now)
380{
381 struct bpf_local_storage_map *storage_smap;
382 struct bpf_local_storage *local_storage;
383 bool bpf_ma, free_local_storage = false;
384 HLIST_HEAD(selem_free_list);
385 unsigned long flags;
386
387 if (unlikely(!selem_linked_to_storage_lockless(selem)))
388 /* selem has already been unlinked from sk */
389 return;
390
391 local_storage = rcu_dereference_check(selem->local_storage,
392 bpf_rcu_lock_held());
393 storage_smap = rcu_dereference_check(local_storage->smap,
394 bpf_rcu_lock_held());
395 bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
396
397 raw_spin_lock_irqsave(&local_storage->lock, flags);
398 if (likely(selem_linked_to_storage(selem)))
399 free_local_storage = bpf_selem_unlink_storage_nolock(
400 local_storage, selem, true, &selem_free_list);
401 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
402
403 bpf_selem_free_list(&selem_free_list, reuse_now);
404
405 if (free_local_storage)
406 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
407}
408
409void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
410 struct bpf_local_storage_elem *selem)
411{
412 RCU_INIT_POINTER(selem->local_storage, local_storage);
413 hlist_add_head_rcu(&selem->snode, &local_storage->list);
414}
415
416static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
417{
418 struct bpf_local_storage_map *smap;
419 struct bpf_local_storage_map_bucket *b;
420 unsigned long flags;
421
422 if (unlikely(!selem_linked_to_map_lockless(selem)))
423 /* selem has already be unlinked from smap */
424 return;
425
426 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
427 b = select_bucket(smap, selem);
428 raw_spin_lock_irqsave(&b->lock, flags);
429 if (likely(selem_linked_to_map(selem)))
430 hlist_del_init_rcu(&selem->map_node);
431 raw_spin_unlock_irqrestore(&b->lock, flags);
432}
433
434void bpf_selem_link_map(struct bpf_local_storage_map *smap,
435 struct bpf_local_storage_elem *selem)
436{
437 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
438 unsigned long flags;
439
440 raw_spin_lock_irqsave(&b->lock, flags);
441 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
442 hlist_add_head_rcu(&selem->map_node, &b->list);
443 raw_spin_unlock_irqrestore(&b->lock, flags);
444}
445
446void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
447{
448 /* Always unlink from map before unlinking from local_storage
449 * because selem will be freed after successfully unlinked from
450 * the local_storage.
451 */
452 bpf_selem_unlink_map(selem);
453 bpf_selem_unlink_storage(selem, reuse_now);
454}
455
456void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
457 struct bpf_local_storage_map *smap,
458 struct bpf_local_storage_elem *selem)
459{
460 unsigned long flags;
461
462 /* spinlock is needed to avoid racing with the
463 * parallel delete. Otherwise, publishing an already
464 * deleted sdata to the cache will become a use-after-free
465 * problem in the next bpf_local_storage_lookup().
466 */
467 raw_spin_lock_irqsave(&local_storage->lock, flags);
468 if (selem_linked_to_storage(selem))
469 rcu_assign_pointer(local_storage->cache[smap->cache_idx], SDATA(selem));
470 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
471}
472
473static int check_flags(const struct bpf_local_storage_data *old_sdata,
474 u64 map_flags)
475{
476 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
477 /* elem already exists */
478 return -EEXIST;
479
480 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
481 /* elem doesn't exist, cannot update it */
482 return -ENOENT;
483
484 return 0;
485}
486
487int bpf_local_storage_alloc(void *owner,
488 struct bpf_local_storage_map *smap,
489 struct bpf_local_storage_elem *first_selem,
490 gfp_t gfp_flags)
491{
492 struct bpf_local_storage *prev_storage, *storage;
493 struct bpf_local_storage **owner_storage_ptr;
494 int err;
495
496 err = mem_charge(smap, owner, sizeof(*storage));
497 if (err)
498 return err;
499
500 if (smap->bpf_ma) {
501 migrate_disable();
502 storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
503 migrate_enable();
504 } else {
505 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
506 gfp_flags | __GFP_NOWARN);
507 }
508
509 if (!storage) {
510 err = -ENOMEM;
511 goto uncharge;
512 }
513
514 RCU_INIT_POINTER(storage->smap, smap);
515 INIT_HLIST_HEAD(&storage->list);
516 raw_spin_lock_init(&storage->lock);
517 storage->owner = owner;
518
519 bpf_selem_link_storage_nolock(storage, first_selem);
520 bpf_selem_link_map(smap, first_selem);
521
522 owner_storage_ptr =
523 (struct bpf_local_storage **)owner_storage(smap, owner);
524 /* Publish storage to the owner.
525 * Instead of using any lock of the kernel object (i.e. owner),
526 * cmpxchg will work with any kernel object regardless what
527 * the running context is, bh, irq...etc.
528 *
529 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
530 * is protected by the storage->lock. Hence, when freeing
531 * the owner->storage, the storage->lock must be held before
532 * setting owner->storage ptr to NULL.
533 */
534 prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
535 if (unlikely(prev_storage)) {
536 bpf_selem_unlink_map(first_selem);
537 err = -EAGAIN;
538 goto uncharge;
539
540 /* Note that even first_selem was linked to smap's
541 * bucket->list, first_selem can be freed immediately
542 * (instead of kfree_rcu) because
543 * bpf_local_storage_map_free() does a
544 * synchronize_rcu_mult (waiting for both sleepable and
545 * normal programs) before walking the bucket->list.
546 * Hence, no one is accessing selem from the
547 * bucket->list under rcu_read_lock().
548 */
549 }
550
551 return 0;
552
553uncharge:
554 bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
555 mem_uncharge(smap, owner, sizeof(*storage));
556 return err;
557}
558
559/* sk cannot be going away because it is linking new elem
560 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
561 * Otherwise, it will become a leak (and other memory issues
562 * during map destruction).
563 */
564struct bpf_local_storage_data *
565bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
566 void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags)
567{
568 struct bpf_local_storage_data *old_sdata = NULL;
569 struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
570 struct bpf_local_storage *local_storage;
571 HLIST_HEAD(old_selem_free_list);
572 unsigned long flags;
573 int err;
574
575 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
576 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
577 /* BPF_F_LOCK can only be used in a value with spin_lock */
578 unlikely((map_flags & BPF_F_LOCK) &&
579 !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
580 return ERR_PTR(-EINVAL);
581
582 if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
583 return ERR_PTR(-EINVAL);
584
585 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
586 bpf_rcu_lock_held());
587 if (!local_storage || hlist_empty(&local_storage->list)) {
588 /* Very first elem for the owner */
589 err = check_flags(NULL, map_flags);
590 if (err)
591 return ERR_PTR(err);
592
593 selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags);
594 if (!selem)
595 return ERR_PTR(-ENOMEM);
596
597 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
598 if (err) {
599 bpf_selem_free(selem, smap, true);
600 mem_uncharge(smap, owner, smap->elem_size);
601 return ERR_PTR(err);
602 }
603
604 return SDATA(selem);
605 }
606
607 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
608 /* Hoping to find an old_sdata to do inline update
609 * such that it can avoid taking the local_storage->lock
610 * and changing the lists.
611 */
612 old_sdata =
613 bpf_local_storage_lookup(local_storage, smap, false);
614 err = check_flags(old_sdata, map_flags);
615 if (err)
616 return ERR_PTR(err);
617 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
618 copy_map_value_locked(&smap->map, old_sdata->data,
619 value, false);
620 return old_sdata;
621 }
622 }
623
624 /* A lookup has just been done before and concluded a new selem is
625 * needed. The chance of an unnecessary alloc is unlikely.
626 */
627 alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags);
628 if (!alloc_selem)
629 return ERR_PTR(-ENOMEM);
630
631 raw_spin_lock_irqsave(&local_storage->lock, flags);
632
633 /* Recheck local_storage->list under local_storage->lock */
634 if (unlikely(hlist_empty(&local_storage->list))) {
635 /* A parallel del is happening and local_storage is going
636 * away. It has just been checked before, so very
637 * unlikely. Return instead of retry to keep things
638 * simple.
639 */
640 err = -EAGAIN;
641 goto unlock;
642 }
643
644 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
645 err = check_flags(old_sdata, map_flags);
646 if (err)
647 goto unlock;
648
649 if (old_sdata && (map_flags & BPF_F_LOCK)) {
650 copy_map_value_locked(&smap->map, old_sdata->data, value,
651 false);
652 selem = SELEM(old_sdata);
653 goto unlock;
654 }
655
656 alloc_selem = NULL;
657 /* First, link the new selem to the map */
658 bpf_selem_link_map(smap, selem);
659
660 /* Second, link (and publish) the new selem to local_storage */
661 bpf_selem_link_storage_nolock(local_storage, selem);
662
663 /* Third, remove old selem, SELEM(old_sdata) */
664 if (old_sdata) {
665 bpf_selem_unlink_map(SELEM(old_sdata));
666 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
667 true, &old_selem_free_list);
668 }
669
670unlock:
671 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
672 bpf_selem_free_list(&old_selem_free_list, false);
673 if (alloc_selem) {
674 mem_uncharge(smap, owner, smap->elem_size);
675 bpf_selem_free(alloc_selem, smap, true);
676 }
677 return err ? ERR_PTR(err) : SDATA(selem);
678}
679
680static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
681{
682 u64 min_usage = U64_MAX;
683 u16 i, res = 0;
684
685 spin_lock(&cache->idx_lock);
686
687 for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
688 if (cache->idx_usage_counts[i] < min_usage) {
689 min_usage = cache->idx_usage_counts[i];
690 res = i;
691
692 /* Found a free cache_idx */
693 if (!min_usage)
694 break;
695 }
696 }
697 cache->idx_usage_counts[res]++;
698
699 spin_unlock(&cache->idx_lock);
700
701 return res;
702}
703
704static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
705 u16 idx)
706{
707 spin_lock(&cache->idx_lock);
708 cache->idx_usage_counts[idx]--;
709 spin_unlock(&cache->idx_lock);
710}
711
712int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
713{
714 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
715 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
716 attr->max_entries ||
717 attr->key_size != sizeof(int) || !attr->value_size ||
718 /* Enforce BTF for userspace sk dumping */
719 !attr->btf_key_type_id || !attr->btf_value_type_id)
720 return -EINVAL;
721
722 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
723 return -E2BIG;
724
725 return 0;
726}
727
728int bpf_local_storage_map_check_btf(const struct bpf_map *map,
729 const struct btf *btf,
730 const struct btf_type *key_type,
731 const struct btf_type *value_type)
732{
733 u32 int_data;
734
735 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
736 return -EINVAL;
737
738 int_data = *(u32 *)(key_type + 1);
739 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
740 return -EINVAL;
741
742 return 0;
743}
744
745void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
746{
747 struct bpf_local_storage_map *storage_smap;
748 struct bpf_local_storage_elem *selem;
749 bool bpf_ma, free_storage = false;
750 HLIST_HEAD(free_selem_list);
751 struct hlist_node *n;
752 unsigned long flags;
753
754 storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
755 bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
756
757 /* Neither the bpf_prog nor the bpf_map's syscall
758 * could be modifying the local_storage->list now.
759 * Thus, no elem can be added to or deleted from the
760 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
761 *
762 * It is racing with bpf_local_storage_map_free() alone
763 * when unlinking elem from the local_storage->list and
764 * the map's bucket->list.
765 */
766 raw_spin_lock_irqsave(&local_storage->lock, flags);
767 hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
768 /* Always unlink from map before unlinking from
769 * local_storage.
770 */
771 bpf_selem_unlink_map(selem);
772 /* If local_storage list has only one element, the
773 * bpf_selem_unlink_storage_nolock() will return true.
774 * Otherwise, it will return false. The current loop iteration
775 * intends to remove all local storage. So the last iteration
776 * of the loop will set the free_cgroup_storage to true.
777 */
778 free_storage = bpf_selem_unlink_storage_nolock(
779 local_storage, selem, true, &free_selem_list);
780 }
781 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
782
783 bpf_selem_free_list(&free_selem_list, true);
784
785 if (free_storage)
786 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
787}
788
789u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
790{
791 struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
792 u64 usage = sizeof(*smap);
793
794 /* The dynamically callocated selems are not counted currently. */
795 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
796 return usage;
797}
798
799/* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
800 * A deadlock free allocator is useful for storage that the bpf prog can easily
801 * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
802 * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
803 * memory immediately. To be reuse-immediate safe, the owner destruction
804 * code path needs to go through a rcu grace period before calling
805 * bpf_local_storage_destroy().
806 *
807 * When bpf_ma == false, the kmalloc and kfree are used.
808 */
809struct bpf_map *
810bpf_local_storage_map_alloc(union bpf_attr *attr,
811 struct bpf_local_storage_cache *cache,
812 bool bpf_ma)
813{
814 struct bpf_local_storage_map *smap;
815 unsigned int i;
816 u32 nbuckets;
817 int err;
818
819 smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
820 if (!smap)
821 return ERR_PTR(-ENOMEM);
822 bpf_map_init_from_attr(&smap->map, attr);
823
824 nbuckets = roundup_pow_of_two(num_possible_cpus());
825 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
826 nbuckets = max_t(u32, 2, nbuckets);
827 smap->bucket_log = ilog2(nbuckets);
828
829 smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
830 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
831 if (!smap->buckets) {
832 err = -ENOMEM;
833 goto free_smap;
834 }
835
836 for (i = 0; i < nbuckets; i++) {
837 INIT_HLIST_HEAD(&smap->buckets[i].list);
838 raw_spin_lock_init(&smap->buckets[i].lock);
839 }
840
841 smap->elem_size = offsetof(struct bpf_local_storage_elem,
842 sdata.data[attr->value_size]);
843
844 /* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
845 * preemptible context. Thus, enforce all storages to use
846 * bpf_mem_alloc when CONFIG_PREEMPT_RT is enabled.
847 */
848 smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
849 if (smap->bpf_ma) {
850 err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
851 if (err)
852 goto free_smap;
853
854 err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
855 if (err) {
856 bpf_mem_alloc_destroy(&smap->selem_ma);
857 goto free_smap;
858 }
859 }
860
861 smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
862 return &smap->map;
863
864free_smap:
865 kvfree(smap->buckets);
866 bpf_map_area_free(smap);
867 return ERR_PTR(err);
868}
869
870void bpf_local_storage_map_free(struct bpf_map *map,
871 struct bpf_local_storage_cache *cache,
872 int __percpu *busy_counter)
873{
874 struct bpf_local_storage_map_bucket *b;
875 struct bpf_local_storage_elem *selem;
876 struct bpf_local_storage_map *smap;
877 unsigned int i;
878
879 smap = (struct bpf_local_storage_map *)map;
880 bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
881
882 /* Note that this map might be concurrently cloned from
883 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
884 * RCU read section to finish before proceeding. New RCU
885 * read sections should be prevented via bpf_map_inc_not_zero.
886 */
887 synchronize_rcu();
888
889 /* bpf prog and the userspace can no longer access this map
890 * now. No new selem (of this map) can be added
891 * to the owner->storage or to the map bucket's list.
892 *
893 * The elem of this map can be cleaned up here
894 * or when the storage is freed e.g.
895 * by bpf_sk_storage_free() during __sk_destruct().
896 */
897 for (i = 0; i < (1U << smap->bucket_log); i++) {
898 b = &smap->buckets[i];
899
900 rcu_read_lock();
901 /* No one is adding to b->list now */
902 while ((selem = hlist_entry_safe(
903 rcu_dereference_raw(hlist_first_rcu(&b->list)),
904 struct bpf_local_storage_elem, map_node))) {
905 if (busy_counter) {
906 migrate_disable();
907 this_cpu_inc(*busy_counter);
908 }
909 bpf_selem_unlink(selem, true);
910 if (busy_counter) {
911 this_cpu_dec(*busy_counter);
912 migrate_enable();
913 }
914 cond_resched_rcu();
915 }
916 rcu_read_unlock();
917 }
918
919 /* While freeing the storage we may still need to access the map.
920 *
921 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
922 * which then made the above while((selem = ...)) loop
923 * exit immediately.
924 *
925 * However, while freeing the storage one still needs to access the
926 * smap->elem_size to do the uncharging in
927 * bpf_selem_unlink_storage_nolock().
928 *
929 * Hence, wait another rcu grace period for the storage to be freed.
930 */
931 synchronize_rcu();
932
933 if (smap->bpf_ma) {
934 rcu_barrier_tasks_trace();
935 if (!rcu_trace_implies_rcu_gp())
936 rcu_barrier();
937 bpf_mem_alloc_destroy(&smap->selem_ma);
938 bpf_mem_alloc_destroy(&smap->storage_ma);
939 }
940 kvfree(smap->buckets);
941 bpf_map_area_free(smap);
942}