Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3#include <linux/rculist.h>
4#include <linux/list.h>
5#include <linux/hash.h>
6#include <linux/types.h>
7#include <linux/spinlock.h>
8#include <linux/bpf.h>
9#include <linux/btf_ids.h>
10#include <linux/bpf_local_storage.h>
11#include <net/sock.h>
12#include <uapi/linux/sock_diag.h>
13#include <uapi/linux/btf.h>
14#include <linux/rcupdate.h>
15#include <linux/rcupdate_trace.h>
16#include <linux/rcupdate_wait.h>
17
18#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19
20static struct bpf_local_storage_map_bucket *
21select_bucket(struct bpf_local_storage_map *smap,
22 struct bpf_local_storage_elem *selem)
23{
24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25}
26
27static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28{
29 struct bpf_map *map = &smap->map;
30
31 if (!map->ops->map_local_storage_charge)
32 return 0;
33
34 return map->ops->map_local_storage_charge(smap, owner, size);
35}
36
37static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38 u32 size)
39{
40 struct bpf_map *map = &smap->map;
41
42 if (map->ops->map_local_storage_uncharge)
43 map->ops->map_local_storage_uncharge(smap, owner, size);
44}
45
46static struct bpf_local_storage __rcu **
47owner_storage(struct bpf_local_storage_map *smap, void *owner)
48{
49 struct bpf_map *map = &smap->map;
50
51 return map->ops->map_owner_storage_ptr(owner);
52}
53
54static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
55{
56 return !hlist_unhashed_lockless(&selem->snode);
57}
58
59static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60{
61 return !hlist_unhashed(&selem->snode);
62}
63
64static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
65{
66 return !hlist_unhashed_lockless(&selem->map_node);
67}
68
69static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70{
71 return !hlist_unhashed(&selem->map_node);
72}
73
74struct bpf_local_storage_elem *
75bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76 void *value, bool charge_mem, gfp_t gfp_flags)
77{
78 struct bpf_local_storage_elem *selem;
79
80 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81 return NULL;
82
83 if (smap->bpf_ma) {
84 migrate_disable();
85 selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
86 migrate_enable();
87 if (selem)
88 /* Keep the original bpf_map_kzalloc behavior
89 * before started using the bpf_mem_cache_alloc.
90 *
91 * No need to use zero_map_value. The bpf_selem_free()
92 * only does bpf_mem_cache_free when there is
93 * no other bpf prog is using the selem.
94 */
95 memset(SDATA(selem)->data, 0, smap->map.value_size);
96 } else {
97 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
98 gfp_flags | __GFP_NOWARN);
99 }
100
101 if (selem) {
102 if (value)
103 copy_map_value(&smap->map, SDATA(selem)->data, value);
104 /* No need to call check_and_init_map_value as memory is zero init */
105 return selem;
106 }
107
108 if (charge_mem)
109 mem_uncharge(smap, owner, smap->elem_size);
110
111 return NULL;
112}
113
114/* rcu tasks trace callback for bpf_ma == false */
115static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
116{
117 struct bpf_local_storage *local_storage;
118
119 /* If RCU Tasks Trace grace period implies RCU grace period, do
120 * kfree(), else do kfree_rcu().
121 */
122 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
123 if (rcu_trace_implies_rcu_gp())
124 kfree(local_storage);
125 else
126 kfree_rcu(local_storage, rcu);
127}
128
129static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
130{
131 struct bpf_local_storage *local_storage;
132
133 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
134 bpf_mem_cache_raw_free(local_storage);
135}
136
137static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
138{
139 if (rcu_trace_implies_rcu_gp())
140 bpf_local_storage_free_rcu(rcu);
141 else
142 call_rcu(rcu, bpf_local_storage_free_rcu);
143}
144
145/* Handle bpf_ma == false */
146static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
147 bool vanilla_rcu)
148{
149 if (vanilla_rcu)
150 kfree_rcu(local_storage, rcu);
151 else
152 call_rcu_tasks_trace(&local_storage->rcu,
153 __bpf_local_storage_free_trace_rcu);
154}
155
156static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
157 struct bpf_local_storage_map *smap,
158 bool bpf_ma, bool reuse_now)
159{
160 if (!local_storage)
161 return;
162
163 if (!bpf_ma) {
164 __bpf_local_storage_free(local_storage, reuse_now);
165 return;
166 }
167
168 if (!reuse_now) {
169 call_rcu_tasks_trace(&local_storage->rcu,
170 bpf_local_storage_free_trace_rcu);
171 return;
172 }
173
174 if (smap) {
175 migrate_disable();
176 bpf_mem_cache_free(&smap->storage_ma, local_storage);
177 migrate_enable();
178 } else {
179 /* smap could be NULL if the selem that triggered
180 * this 'local_storage' creation had been long gone.
181 * In this case, directly do call_rcu().
182 */
183 call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
184 }
185}
186
187/* rcu tasks trace callback for bpf_ma == false */
188static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
189{
190 struct bpf_local_storage_elem *selem;
191
192 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
193 if (rcu_trace_implies_rcu_gp())
194 kfree(selem);
195 else
196 kfree_rcu(selem, rcu);
197}
198
199/* Handle bpf_ma == false */
200static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
201 bool vanilla_rcu)
202{
203 if (vanilla_rcu)
204 kfree_rcu(selem, rcu);
205 else
206 call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
207}
208
209static void bpf_selem_free_rcu(struct rcu_head *rcu)
210{
211 struct bpf_local_storage_elem *selem;
212
213 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
214 bpf_mem_cache_raw_free(selem);
215}
216
217static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
218{
219 if (rcu_trace_implies_rcu_gp())
220 bpf_selem_free_rcu(rcu);
221 else
222 call_rcu(rcu, bpf_selem_free_rcu);
223}
224
225void bpf_selem_free(struct bpf_local_storage_elem *selem,
226 struct bpf_local_storage_map *smap,
227 bool reuse_now)
228{
229 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
230
231 if (!smap->bpf_ma) {
232 __bpf_selem_free(selem, reuse_now);
233 return;
234 }
235
236 if (!reuse_now) {
237 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
238 } else {
239 /* Instead of using the vanilla call_rcu(),
240 * bpf_mem_cache_free will be able to reuse selem
241 * immediately.
242 */
243 migrate_disable();
244 bpf_mem_cache_free(&smap->selem_ma, selem);
245 migrate_enable();
246 }
247}
248
249/* local_storage->lock must be held and selem->local_storage == local_storage.
250 * The caller must ensure selem->smap is still valid to be
251 * dereferenced for its smap->elem_size and smap->cache_idx.
252 */
253static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
254 struct bpf_local_storage_elem *selem,
255 bool uncharge_mem, bool reuse_now)
256{
257 struct bpf_local_storage_map *smap;
258 bool free_local_storage;
259 void *owner;
260
261 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
262 owner = local_storage->owner;
263
264 /* All uncharging on the owner must be done first.
265 * The owner may be freed once the last selem is unlinked
266 * from local_storage.
267 */
268 if (uncharge_mem)
269 mem_uncharge(smap, owner, smap->elem_size);
270
271 free_local_storage = hlist_is_singular_node(&selem->snode,
272 &local_storage->list);
273 if (free_local_storage) {
274 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
275 local_storage->owner = NULL;
276
277 /* After this RCU_INIT, owner may be freed and cannot be used */
278 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
279
280 /* local_storage is not freed now. local_storage->lock is
281 * still held and raw_spin_unlock_bh(&local_storage->lock)
282 * will be done by the caller.
283 *
284 * Although the unlock will be done under
285 * rcu_read_lock(), it is more intuitive to
286 * read if the freeing of the storage is done
287 * after the raw_spin_unlock_bh(&local_storage->lock).
288 *
289 * Hence, a "bool free_local_storage" is returned
290 * to the caller which then calls then frees the storage after
291 * all the RCU grace periods have expired.
292 */
293 }
294 hlist_del_init_rcu(&selem->snode);
295 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
296 SDATA(selem))
297 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
298
299 bpf_selem_free(selem, smap, reuse_now);
300
301 if (rcu_access_pointer(local_storage->smap) == smap)
302 RCU_INIT_POINTER(local_storage->smap, NULL);
303
304 return free_local_storage;
305}
306
307static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
308 struct bpf_local_storage_map *storage_smap,
309 struct bpf_local_storage_elem *selem)
310{
311
312 struct bpf_local_storage_map *selem_smap;
313
314 /* local_storage->smap may be NULL. If it is, get the bpf_ma
315 * from any selem in the local_storage->list. The bpf_ma of all
316 * local_storage and selem should have the same value
317 * for the same map type.
318 *
319 * If the local_storage->list is already empty, the caller will not
320 * care about the bpf_ma value also because the caller is not
321 * responsibile to free the local_storage.
322 */
323
324 if (storage_smap)
325 return storage_smap->bpf_ma;
326
327 if (!selem) {
328 struct hlist_node *n;
329
330 n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
331 bpf_rcu_lock_held());
332 if (!n)
333 return false;
334
335 selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
336 }
337 selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
338
339 return selem_smap->bpf_ma;
340}
341
342static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
343 bool reuse_now)
344{
345 struct bpf_local_storage_map *storage_smap;
346 struct bpf_local_storage *local_storage;
347 bool bpf_ma, free_local_storage = false;
348 unsigned long flags;
349
350 if (unlikely(!selem_linked_to_storage_lockless(selem)))
351 /* selem has already been unlinked from sk */
352 return;
353
354 local_storage = rcu_dereference_check(selem->local_storage,
355 bpf_rcu_lock_held());
356 storage_smap = rcu_dereference_check(local_storage->smap,
357 bpf_rcu_lock_held());
358 bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
359
360 raw_spin_lock_irqsave(&local_storage->lock, flags);
361 if (likely(selem_linked_to_storage(selem)))
362 free_local_storage = bpf_selem_unlink_storage_nolock(
363 local_storage, selem, true, reuse_now);
364 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
365
366 if (free_local_storage)
367 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
368}
369
370void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
371 struct bpf_local_storage_elem *selem)
372{
373 RCU_INIT_POINTER(selem->local_storage, local_storage);
374 hlist_add_head_rcu(&selem->snode, &local_storage->list);
375}
376
377static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
378{
379 struct bpf_local_storage_map *smap;
380 struct bpf_local_storage_map_bucket *b;
381 unsigned long flags;
382
383 if (unlikely(!selem_linked_to_map_lockless(selem)))
384 /* selem has already be unlinked from smap */
385 return;
386
387 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
388 b = select_bucket(smap, selem);
389 raw_spin_lock_irqsave(&b->lock, flags);
390 if (likely(selem_linked_to_map(selem)))
391 hlist_del_init_rcu(&selem->map_node);
392 raw_spin_unlock_irqrestore(&b->lock, flags);
393}
394
395void bpf_selem_link_map(struct bpf_local_storage_map *smap,
396 struct bpf_local_storage_elem *selem)
397{
398 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
399 unsigned long flags;
400
401 raw_spin_lock_irqsave(&b->lock, flags);
402 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
403 hlist_add_head_rcu(&selem->map_node, &b->list);
404 raw_spin_unlock_irqrestore(&b->lock, flags);
405}
406
407void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
408{
409 /* Always unlink from map before unlinking from local_storage
410 * because selem will be freed after successfully unlinked from
411 * the local_storage.
412 */
413 bpf_selem_unlink_map(selem);
414 bpf_selem_unlink_storage(selem, reuse_now);
415}
416
417/* If cacheit_lockit is false, this lookup function is lockless */
418struct bpf_local_storage_data *
419bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
420 struct bpf_local_storage_map *smap,
421 bool cacheit_lockit)
422{
423 struct bpf_local_storage_data *sdata;
424 struct bpf_local_storage_elem *selem;
425
426 /* Fast path (cache hit) */
427 sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
428 bpf_rcu_lock_held());
429 if (sdata && rcu_access_pointer(sdata->smap) == smap)
430 return sdata;
431
432 /* Slow path (cache miss) */
433 hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
434 rcu_read_lock_trace_held())
435 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
436 break;
437
438 if (!selem)
439 return NULL;
440
441 sdata = SDATA(selem);
442 if (cacheit_lockit) {
443 unsigned long flags;
444
445 /* spinlock is needed to avoid racing with the
446 * parallel delete. Otherwise, publishing an already
447 * deleted sdata to the cache will become a use-after-free
448 * problem in the next bpf_local_storage_lookup().
449 */
450 raw_spin_lock_irqsave(&local_storage->lock, flags);
451 if (selem_linked_to_storage(selem))
452 rcu_assign_pointer(local_storage->cache[smap->cache_idx],
453 sdata);
454 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
455 }
456
457 return sdata;
458}
459
460static int check_flags(const struct bpf_local_storage_data *old_sdata,
461 u64 map_flags)
462{
463 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
464 /* elem already exists */
465 return -EEXIST;
466
467 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
468 /* elem doesn't exist, cannot update it */
469 return -ENOENT;
470
471 return 0;
472}
473
474int bpf_local_storage_alloc(void *owner,
475 struct bpf_local_storage_map *smap,
476 struct bpf_local_storage_elem *first_selem,
477 gfp_t gfp_flags)
478{
479 struct bpf_local_storage *prev_storage, *storage;
480 struct bpf_local_storage **owner_storage_ptr;
481 int err;
482
483 err = mem_charge(smap, owner, sizeof(*storage));
484 if (err)
485 return err;
486
487 if (smap->bpf_ma) {
488 migrate_disable();
489 storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
490 migrate_enable();
491 } else {
492 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
493 gfp_flags | __GFP_NOWARN);
494 }
495
496 if (!storage) {
497 err = -ENOMEM;
498 goto uncharge;
499 }
500
501 RCU_INIT_POINTER(storage->smap, smap);
502 INIT_HLIST_HEAD(&storage->list);
503 raw_spin_lock_init(&storage->lock);
504 storage->owner = owner;
505
506 bpf_selem_link_storage_nolock(storage, first_selem);
507 bpf_selem_link_map(smap, first_selem);
508
509 owner_storage_ptr =
510 (struct bpf_local_storage **)owner_storage(smap, owner);
511 /* Publish storage to the owner.
512 * Instead of using any lock of the kernel object (i.e. owner),
513 * cmpxchg will work with any kernel object regardless what
514 * the running context is, bh, irq...etc.
515 *
516 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
517 * is protected by the storage->lock. Hence, when freeing
518 * the owner->storage, the storage->lock must be held before
519 * setting owner->storage ptr to NULL.
520 */
521 prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
522 if (unlikely(prev_storage)) {
523 bpf_selem_unlink_map(first_selem);
524 err = -EAGAIN;
525 goto uncharge;
526
527 /* Note that even first_selem was linked to smap's
528 * bucket->list, first_selem can be freed immediately
529 * (instead of kfree_rcu) because
530 * bpf_local_storage_map_free() does a
531 * synchronize_rcu_mult (waiting for both sleepable and
532 * normal programs) before walking the bucket->list.
533 * Hence, no one is accessing selem from the
534 * bucket->list under rcu_read_lock().
535 */
536 }
537
538 return 0;
539
540uncharge:
541 bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
542 mem_uncharge(smap, owner, sizeof(*storage));
543 return err;
544}
545
546/* sk cannot be going away because it is linking new elem
547 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
548 * Otherwise, it will become a leak (and other memory issues
549 * during map destruction).
550 */
551struct bpf_local_storage_data *
552bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
553 void *value, u64 map_flags, gfp_t gfp_flags)
554{
555 struct bpf_local_storage_data *old_sdata = NULL;
556 struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
557 struct bpf_local_storage *local_storage;
558 unsigned long flags;
559 int err;
560
561 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
562 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
563 /* BPF_F_LOCK can only be used in a value with spin_lock */
564 unlikely((map_flags & BPF_F_LOCK) &&
565 !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
566 return ERR_PTR(-EINVAL);
567
568 if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
569 return ERR_PTR(-EINVAL);
570
571 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
572 bpf_rcu_lock_held());
573 if (!local_storage || hlist_empty(&local_storage->list)) {
574 /* Very first elem for the owner */
575 err = check_flags(NULL, map_flags);
576 if (err)
577 return ERR_PTR(err);
578
579 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
580 if (!selem)
581 return ERR_PTR(-ENOMEM);
582
583 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
584 if (err) {
585 bpf_selem_free(selem, smap, true);
586 mem_uncharge(smap, owner, smap->elem_size);
587 return ERR_PTR(err);
588 }
589
590 return SDATA(selem);
591 }
592
593 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
594 /* Hoping to find an old_sdata to do inline update
595 * such that it can avoid taking the local_storage->lock
596 * and changing the lists.
597 */
598 old_sdata =
599 bpf_local_storage_lookup(local_storage, smap, false);
600 err = check_flags(old_sdata, map_flags);
601 if (err)
602 return ERR_PTR(err);
603 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
604 copy_map_value_locked(&smap->map, old_sdata->data,
605 value, false);
606 return old_sdata;
607 }
608 }
609
610 /* A lookup has just been done before and concluded a new selem is
611 * needed. The chance of an unnecessary alloc is unlikely.
612 */
613 alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
614 if (!alloc_selem)
615 return ERR_PTR(-ENOMEM);
616
617 raw_spin_lock_irqsave(&local_storage->lock, flags);
618
619 /* Recheck local_storage->list under local_storage->lock */
620 if (unlikely(hlist_empty(&local_storage->list))) {
621 /* A parallel del is happening and local_storage is going
622 * away. It has just been checked before, so very
623 * unlikely. Return instead of retry to keep things
624 * simple.
625 */
626 err = -EAGAIN;
627 goto unlock;
628 }
629
630 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
631 err = check_flags(old_sdata, map_flags);
632 if (err)
633 goto unlock;
634
635 if (old_sdata && (map_flags & BPF_F_LOCK)) {
636 copy_map_value_locked(&smap->map, old_sdata->data, value,
637 false);
638 selem = SELEM(old_sdata);
639 goto unlock;
640 }
641
642 alloc_selem = NULL;
643 /* First, link the new selem to the map */
644 bpf_selem_link_map(smap, selem);
645
646 /* Second, link (and publish) the new selem to local_storage */
647 bpf_selem_link_storage_nolock(local_storage, selem);
648
649 /* Third, remove old selem, SELEM(old_sdata) */
650 if (old_sdata) {
651 bpf_selem_unlink_map(SELEM(old_sdata));
652 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
653 true, false);
654 }
655
656unlock:
657 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
658 if (alloc_selem) {
659 mem_uncharge(smap, owner, smap->elem_size);
660 bpf_selem_free(alloc_selem, smap, true);
661 }
662 return err ? ERR_PTR(err) : SDATA(selem);
663}
664
665static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
666{
667 u64 min_usage = U64_MAX;
668 u16 i, res = 0;
669
670 spin_lock(&cache->idx_lock);
671
672 for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
673 if (cache->idx_usage_counts[i] < min_usage) {
674 min_usage = cache->idx_usage_counts[i];
675 res = i;
676
677 /* Found a free cache_idx */
678 if (!min_usage)
679 break;
680 }
681 }
682 cache->idx_usage_counts[res]++;
683
684 spin_unlock(&cache->idx_lock);
685
686 return res;
687}
688
689static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
690 u16 idx)
691{
692 spin_lock(&cache->idx_lock);
693 cache->idx_usage_counts[idx]--;
694 spin_unlock(&cache->idx_lock);
695}
696
697int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
698{
699 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
700 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
701 attr->max_entries ||
702 attr->key_size != sizeof(int) || !attr->value_size ||
703 /* Enforce BTF for userspace sk dumping */
704 !attr->btf_key_type_id || !attr->btf_value_type_id)
705 return -EINVAL;
706
707 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
708 return -E2BIG;
709
710 return 0;
711}
712
713int bpf_local_storage_map_check_btf(const struct bpf_map *map,
714 const struct btf *btf,
715 const struct btf_type *key_type,
716 const struct btf_type *value_type)
717{
718 u32 int_data;
719
720 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
721 return -EINVAL;
722
723 int_data = *(u32 *)(key_type + 1);
724 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
725 return -EINVAL;
726
727 return 0;
728}
729
730void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
731{
732 struct bpf_local_storage_map *storage_smap;
733 struct bpf_local_storage_elem *selem;
734 bool bpf_ma, free_storage = false;
735 struct hlist_node *n;
736 unsigned long flags;
737
738 storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
739 bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
740
741 /* Neither the bpf_prog nor the bpf_map's syscall
742 * could be modifying the local_storage->list now.
743 * Thus, no elem can be added to or deleted from the
744 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
745 *
746 * It is racing with bpf_local_storage_map_free() alone
747 * when unlinking elem from the local_storage->list and
748 * the map's bucket->list.
749 */
750 raw_spin_lock_irqsave(&local_storage->lock, flags);
751 hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
752 /* Always unlink from map before unlinking from
753 * local_storage.
754 */
755 bpf_selem_unlink_map(selem);
756 /* If local_storage list has only one element, the
757 * bpf_selem_unlink_storage_nolock() will return true.
758 * Otherwise, it will return false. The current loop iteration
759 * intends to remove all local storage. So the last iteration
760 * of the loop will set the free_cgroup_storage to true.
761 */
762 free_storage = bpf_selem_unlink_storage_nolock(
763 local_storage, selem, true, true);
764 }
765 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
766
767 if (free_storage)
768 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
769}
770
771u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
772{
773 struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
774 u64 usage = sizeof(*smap);
775
776 /* The dynamically callocated selems are not counted currently. */
777 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
778 return usage;
779}
780
781/* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
782 * A deadlock free allocator is useful for storage that the bpf prog can easily
783 * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
784 * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
785 * memory immediately. To be reuse-immediate safe, the owner destruction
786 * code path needs to go through a rcu grace period before calling
787 * bpf_local_storage_destroy().
788 *
789 * When bpf_ma == false, the kmalloc and kfree are used.
790 */
791struct bpf_map *
792bpf_local_storage_map_alloc(union bpf_attr *attr,
793 struct bpf_local_storage_cache *cache,
794 bool bpf_ma)
795{
796 struct bpf_local_storage_map *smap;
797 unsigned int i;
798 u32 nbuckets;
799 int err;
800
801 smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
802 if (!smap)
803 return ERR_PTR(-ENOMEM);
804 bpf_map_init_from_attr(&smap->map, attr);
805
806 nbuckets = roundup_pow_of_two(num_possible_cpus());
807 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
808 nbuckets = max_t(u32, 2, nbuckets);
809 smap->bucket_log = ilog2(nbuckets);
810
811 smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
812 nbuckets, GFP_USER | __GFP_NOWARN);
813 if (!smap->buckets) {
814 err = -ENOMEM;
815 goto free_smap;
816 }
817
818 for (i = 0; i < nbuckets; i++) {
819 INIT_HLIST_HEAD(&smap->buckets[i].list);
820 raw_spin_lock_init(&smap->buckets[i].lock);
821 }
822
823 smap->elem_size = offsetof(struct bpf_local_storage_elem,
824 sdata.data[attr->value_size]);
825
826 smap->bpf_ma = bpf_ma;
827 if (bpf_ma) {
828 err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
829 if (err)
830 goto free_smap;
831
832 err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
833 if (err) {
834 bpf_mem_alloc_destroy(&smap->selem_ma);
835 goto free_smap;
836 }
837 }
838
839 smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
840 return &smap->map;
841
842free_smap:
843 kvfree(smap->buckets);
844 bpf_map_area_free(smap);
845 return ERR_PTR(err);
846}
847
848void bpf_local_storage_map_free(struct bpf_map *map,
849 struct bpf_local_storage_cache *cache,
850 int __percpu *busy_counter)
851{
852 struct bpf_local_storage_map_bucket *b;
853 struct bpf_local_storage_elem *selem;
854 struct bpf_local_storage_map *smap;
855 unsigned int i;
856
857 smap = (struct bpf_local_storage_map *)map;
858 bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
859
860 /* Note that this map might be concurrently cloned from
861 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
862 * RCU read section to finish before proceeding. New RCU
863 * read sections should be prevented via bpf_map_inc_not_zero.
864 */
865 synchronize_rcu();
866
867 /* bpf prog and the userspace can no longer access this map
868 * now. No new selem (of this map) can be added
869 * to the owner->storage or to the map bucket's list.
870 *
871 * The elem of this map can be cleaned up here
872 * or when the storage is freed e.g.
873 * by bpf_sk_storage_free() during __sk_destruct().
874 */
875 for (i = 0; i < (1U << smap->bucket_log); i++) {
876 b = &smap->buckets[i];
877
878 rcu_read_lock();
879 /* No one is adding to b->list now */
880 while ((selem = hlist_entry_safe(
881 rcu_dereference_raw(hlist_first_rcu(&b->list)),
882 struct bpf_local_storage_elem, map_node))) {
883 if (busy_counter) {
884 migrate_disable();
885 this_cpu_inc(*busy_counter);
886 }
887 bpf_selem_unlink(selem, true);
888 if (busy_counter) {
889 this_cpu_dec(*busy_counter);
890 migrate_enable();
891 }
892 cond_resched_rcu();
893 }
894 rcu_read_unlock();
895 }
896
897 /* While freeing the storage we may still need to access the map.
898 *
899 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
900 * which then made the above while((selem = ...)) loop
901 * exit immediately.
902 *
903 * However, while freeing the storage one still needs to access the
904 * smap->elem_size to do the uncharging in
905 * bpf_selem_unlink_storage_nolock().
906 *
907 * Hence, wait another rcu grace period for the storage to be freed.
908 */
909 synchronize_rcu();
910
911 if (smap->bpf_ma) {
912 bpf_mem_alloc_destroy(&smap->selem_ma);
913 bpf_mem_alloc_destroy(&smap->storage_ma);
914 }
915 kvfree(smap->buckets);
916 bpf_map_area_free(smap);
917}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3#include <linux/rculist.h>
4#include <linux/list.h>
5#include <linux/hash.h>
6#include <linux/types.h>
7#include <linux/spinlock.h>
8#include <linux/bpf.h>
9#include <linux/btf_ids.h>
10#include <linux/bpf_local_storage.h>
11#include <net/sock.h>
12#include <uapi/linux/sock_diag.h>
13#include <uapi/linux/btf.h>
14#include <linux/rcupdate.h>
15#include <linux/rcupdate_trace.h>
16#include <linux/rcupdate_wait.h>
17
18#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19
20static struct bpf_local_storage_map_bucket *
21select_bucket(struct bpf_local_storage_map *smap,
22 struct bpf_local_storage_elem *selem)
23{
24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25}
26
27static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28{
29 struct bpf_map *map = &smap->map;
30
31 if (!map->ops->map_local_storage_charge)
32 return 0;
33
34 return map->ops->map_local_storage_charge(smap, owner, size);
35}
36
37static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38 u32 size)
39{
40 struct bpf_map *map = &smap->map;
41
42 if (map->ops->map_local_storage_uncharge)
43 map->ops->map_local_storage_uncharge(smap, owner, size);
44}
45
46static struct bpf_local_storage __rcu **
47owner_storage(struct bpf_local_storage_map *smap, void *owner)
48{
49 struct bpf_map *map = &smap->map;
50
51 return map->ops->map_owner_storage_ptr(owner);
52}
53
54static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
55{
56 return !hlist_unhashed(&selem->snode);
57}
58
59static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
60{
61 return !hlist_unhashed(&selem->map_node);
62}
63
64struct bpf_local_storage_elem *
65bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
66 void *value, bool charge_mem, gfp_t gfp_flags)
67{
68 struct bpf_local_storage_elem *selem;
69
70 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
71 return NULL;
72
73 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
74 gfp_flags | __GFP_NOWARN);
75 if (selem) {
76 if (value)
77 copy_map_value(&smap->map, SDATA(selem)->data, value);
78 return selem;
79 }
80
81 if (charge_mem)
82 mem_uncharge(smap, owner, smap->elem_size);
83
84 return NULL;
85}
86
87void bpf_local_storage_free_rcu(struct rcu_head *rcu)
88{
89 struct bpf_local_storage *local_storage;
90
91 /* If RCU Tasks Trace grace period implies RCU grace period, do
92 * kfree(), else do kfree_rcu().
93 */
94 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
95 if (rcu_trace_implies_rcu_gp())
96 kfree(local_storage);
97 else
98 kfree_rcu(local_storage, rcu);
99}
100
101static void bpf_selem_free_rcu(struct rcu_head *rcu)
102{
103 struct bpf_local_storage_elem *selem;
104
105 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
106 if (rcu_trace_implies_rcu_gp())
107 kfree(selem);
108 else
109 kfree_rcu(selem, rcu);
110}
111
112/* local_storage->lock must be held and selem->local_storage == local_storage.
113 * The caller must ensure selem->smap is still valid to be
114 * dereferenced for its smap->elem_size and smap->cache_idx.
115 */
116static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
117 struct bpf_local_storage_elem *selem,
118 bool uncharge_mem, bool use_trace_rcu)
119{
120 struct bpf_local_storage_map *smap;
121 bool free_local_storage;
122 void *owner;
123
124 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
125 owner = local_storage->owner;
126
127 /* All uncharging on the owner must be done first.
128 * The owner may be freed once the last selem is unlinked
129 * from local_storage.
130 */
131 if (uncharge_mem)
132 mem_uncharge(smap, owner, smap->elem_size);
133
134 free_local_storage = hlist_is_singular_node(&selem->snode,
135 &local_storage->list);
136 if (free_local_storage) {
137 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
138 local_storage->owner = NULL;
139
140 /* After this RCU_INIT, owner may be freed and cannot be used */
141 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
142
143 /* local_storage is not freed now. local_storage->lock is
144 * still held and raw_spin_unlock_bh(&local_storage->lock)
145 * will be done by the caller.
146 *
147 * Although the unlock will be done under
148 * rcu_read_lock(), it is more intuitive to
149 * read if the freeing of the storage is done
150 * after the raw_spin_unlock_bh(&local_storage->lock).
151 *
152 * Hence, a "bool free_local_storage" is returned
153 * to the caller which then calls then frees the storage after
154 * all the RCU grace periods have expired.
155 */
156 }
157 hlist_del_init_rcu(&selem->snode);
158 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
159 SDATA(selem))
160 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
161
162 if (use_trace_rcu)
163 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
164 else
165 kfree_rcu(selem, rcu);
166
167 return free_local_storage;
168}
169
170static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
171 bool use_trace_rcu)
172{
173 struct bpf_local_storage *local_storage;
174 bool free_local_storage = false;
175 unsigned long flags;
176
177 if (unlikely(!selem_linked_to_storage(selem)))
178 /* selem has already been unlinked from sk */
179 return;
180
181 local_storage = rcu_dereference_check(selem->local_storage,
182 bpf_rcu_lock_held());
183 raw_spin_lock_irqsave(&local_storage->lock, flags);
184 if (likely(selem_linked_to_storage(selem)))
185 free_local_storage = bpf_selem_unlink_storage_nolock(
186 local_storage, selem, true, use_trace_rcu);
187 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
188
189 if (free_local_storage) {
190 if (use_trace_rcu)
191 call_rcu_tasks_trace(&local_storage->rcu,
192 bpf_local_storage_free_rcu);
193 else
194 kfree_rcu(local_storage, rcu);
195 }
196}
197
198void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
199 struct bpf_local_storage_elem *selem)
200{
201 RCU_INIT_POINTER(selem->local_storage, local_storage);
202 hlist_add_head_rcu(&selem->snode, &local_storage->list);
203}
204
205void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
206{
207 struct bpf_local_storage_map *smap;
208 struct bpf_local_storage_map_bucket *b;
209 unsigned long flags;
210
211 if (unlikely(!selem_linked_to_map(selem)))
212 /* selem has already be unlinked from smap */
213 return;
214
215 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
216 b = select_bucket(smap, selem);
217 raw_spin_lock_irqsave(&b->lock, flags);
218 if (likely(selem_linked_to_map(selem)))
219 hlist_del_init_rcu(&selem->map_node);
220 raw_spin_unlock_irqrestore(&b->lock, flags);
221}
222
223void bpf_selem_link_map(struct bpf_local_storage_map *smap,
224 struct bpf_local_storage_elem *selem)
225{
226 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
227 unsigned long flags;
228
229 raw_spin_lock_irqsave(&b->lock, flags);
230 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
231 hlist_add_head_rcu(&selem->map_node, &b->list);
232 raw_spin_unlock_irqrestore(&b->lock, flags);
233}
234
235void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
236{
237 /* Always unlink from map before unlinking from local_storage
238 * because selem will be freed after successfully unlinked from
239 * the local_storage.
240 */
241 bpf_selem_unlink_map(selem);
242 __bpf_selem_unlink_storage(selem, use_trace_rcu);
243}
244
245/* If cacheit_lockit is false, this lookup function is lockless */
246struct bpf_local_storage_data *
247bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
248 struct bpf_local_storage_map *smap,
249 bool cacheit_lockit)
250{
251 struct bpf_local_storage_data *sdata;
252 struct bpf_local_storage_elem *selem;
253
254 /* Fast path (cache hit) */
255 sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
256 bpf_rcu_lock_held());
257 if (sdata && rcu_access_pointer(sdata->smap) == smap)
258 return sdata;
259
260 /* Slow path (cache miss) */
261 hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
262 rcu_read_lock_trace_held())
263 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
264 break;
265
266 if (!selem)
267 return NULL;
268
269 sdata = SDATA(selem);
270 if (cacheit_lockit) {
271 unsigned long flags;
272
273 /* spinlock is needed to avoid racing with the
274 * parallel delete. Otherwise, publishing an already
275 * deleted sdata to the cache will become a use-after-free
276 * problem in the next bpf_local_storage_lookup().
277 */
278 raw_spin_lock_irqsave(&local_storage->lock, flags);
279 if (selem_linked_to_storage(selem))
280 rcu_assign_pointer(local_storage->cache[smap->cache_idx],
281 sdata);
282 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
283 }
284
285 return sdata;
286}
287
288static int check_flags(const struct bpf_local_storage_data *old_sdata,
289 u64 map_flags)
290{
291 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
292 /* elem already exists */
293 return -EEXIST;
294
295 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
296 /* elem doesn't exist, cannot update it */
297 return -ENOENT;
298
299 return 0;
300}
301
302int bpf_local_storage_alloc(void *owner,
303 struct bpf_local_storage_map *smap,
304 struct bpf_local_storage_elem *first_selem,
305 gfp_t gfp_flags)
306{
307 struct bpf_local_storage *prev_storage, *storage;
308 struct bpf_local_storage **owner_storage_ptr;
309 int err;
310
311 err = mem_charge(smap, owner, sizeof(*storage));
312 if (err)
313 return err;
314
315 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
316 gfp_flags | __GFP_NOWARN);
317 if (!storage) {
318 err = -ENOMEM;
319 goto uncharge;
320 }
321
322 INIT_HLIST_HEAD(&storage->list);
323 raw_spin_lock_init(&storage->lock);
324 storage->owner = owner;
325
326 bpf_selem_link_storage_nolock(storage, first_selem);
327 bpf_selem_link_map(smap, first_selem);
328
329 owner_storage_ptr =
330 (struct bpf_local_storage **)owner_storage(smap, owner);
331 /* Publish storage to the owner.
332 * Instead of using any lock of the kernel object (i.e. owner),
333 * cmpxchg will work with any kernel object regardless what
334 * the running context is, bh, irq...etc.
335 *
336 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
337 * is protected by the storage->lock. Hence, when freeing
338 * the owner->storage, the storage->lock must be held before
339 * setting owner->storage ptr to NULL.
340 */
341 prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
342 if (unlikely(prev_storage)) {
343 bpf_selem_unlink_map(first_selem);
344 err = -EAGAIN;
345 goto uncharge;
346
347 /* Note that even first_selem was linked to smap's
348 * bucket->list, first_selem can be freed immediately
349 * (instead of kfree_rcu) because
350 * bpf_local_storage_map_free() does a
351 * synchronize_rcu_mult (waiting for both sleepable and
352 * normal programs) before walking the bucket->list.
353 * Hence, no one is accessing selem from the
354 * bucket->list under rcu_read_lock().
355 */
356 }
357
358 return 0;
359
360uncharge:
361 kfree(storage);
362 mem_uncharge(smap, owner, sizeof(*storage));
363 return err;
364}
365
366/* sk cannot be going away because it is linking new elem
367 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
368 * Otherwise, it will become a leak (and other memory issues
369 * during map destruction).
370 */
371struct bpf_local_storage_data *
372bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
373 void *value, u64 map_flags, gfp_t gfp_flags)
374{
375 struct bpf_local_storage_data *old_sdata = NULL;
376 struct bpf_local_storage_elem *selem = NULL;
377 struct bpf_local_storage *local_storage;
378 unsigned long flags;
379 int err;
380
381 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
382 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
383 /* BPF_F_LOCK can only be used in a value with spin_lock */
384 unlikely((map_flags & BPF_F_LOCK) &&
385 !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
386 return ERR_PTR(-EINVAL);
387
388 if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
389 return ERR_PTR(-EINVAL);
390
391 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
392 bpf_rcu_lock_held());
393 if (!local_storage || hlist_empty(&local_storage->list)) {
394 /* Very first elem for the owner */
395 err = check_flags(NULL, map_flags);
396 if (err)
397 return ERR_PTR(err);
398
399 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
400 if (!selem)
401 return ERR_PTR(-ENOMEM);
402
403 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
404 if (err) {
405 kfree(selem);
406 mem_uncharge(smap, owner, smap->elem_size);
407 return ERR_PTR(err);
408 }
409
410 return SDATA(selem);
411 }
412
413 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
414 /* Hoping to find an old_sdata to do inline update
415 * such that it can avoid taking the local_storage->lock
416 * and changing the lists.
417 */
418 old_sdata =
419 bpf_local_storage_lookup(local_storage, smap, false);
420 err = check_flags(old_sdata, map_flags);
421 if (err)
422 return ERR_PTR(err);
423 if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) {
424 copy_map_value_locked(&smap->map, old_sdata->data,
425 value, false);
426 return old_sdata;
427 }
428 }
429
430 if (gfp_flags == GFP_KERNEL) {
431 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
432 if (!selem)
433 return ERR_PTR(-ENOMEM);
434 }
435
436 raw_spin_lock_irqsave(&local_storage->lock, flags);
437
438 /* Recheck local_storage->list under local_storage->lock */
439 if (unlikely(hlist_empty(&local_storage->list))) {
440 /* A parallel del is happening and local_storage is going
441 * away. It has just been checked before, so very
442 * unlikely. Return instead of retry to keep things
443 * simple.
444 */
445 err = -EAGAIN;
446 goto unlock_err;
447 }
448
449 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
450 err = check_flags(old_sdata, map_flags);
451 if (err)
452 goto unlock_err;
453
454 if (old_sdata && (map_flags & BPF_F_LOCK)) {
455 copy_map_value_locked(&smap->map, old_sdata->data, value,
456 false);
457 selem = SELEM(old_sdata);
458 goto unlock;
459 }
460
461 if (gfp_flags != GFP_KERNEL) {
462 /* local_storage->lock is held. Hence, we are sure
463 * we can unlink and uncharge the old_sdata successfully
464 * later. Hence, instead of charging the new selem now
465 * and then uncharge the old selem later (which may cause
466 * a potential but unnecessary charge failure), avoid taking
467 * a charge at all here (the "!old_sdata" check) and the
468 * old_sdata will not be uncharged later during
469 * bpf_selem_unlink_storage_nolock().
470 */
471 selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
472 if (!selem) {
473 err = -ENOMEM;
474 goto unlock_err;
475 }
476 }
477
478 /* First, link the new selem to the map */
479 bpf_selem_link_map(smap, selem);
480
481 /* Second, link (and publish) the new selem to local_storage */
482 bpf_selem_link_storage_nolock(local_storage, selem);
483
484 /* Third, remove old selem, SELEM(old_sdata) */
485 if (old_sdata) {
486 bpf_selem_unlink_map(SELEM(old_sdata));
487 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
488 false, true);
489 }
490
491unlock:
492 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
493 return SDATA(selem);
494
495unlock_err:
496 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
497 if (selem) {
498 mem_uncharge(smap, owner, smap->elem_size);
499 kfree(selem);
500 }
501 return ERR_PTR(err);
502}
503
504static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
505{
506 u64 min_usage = U64_MAX;
507 u16 i, res = 0;
508
509 spin_lock(&cache->idx_lock);
510
511 for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
512 if (cache->idx_usage_counts[i] < min_usage) {
513 min_usage = cache->idx_usage_counts[i];
514 res = i;
515
516 /* Found a free cache_idx */
517 if (!min_usage)
518 break;
519 }
520 }
521 cache->idx_usage_counts[res]++;
522
523 spin_unlock(&cache->idx_lock);
524
525 return res;
526}
527
528static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
529 u16 idx)
530{
531 spin_lock(&cache->idx_lock);
532 cache->idx_usage_counts[idx]--;
533 spin_unlock(&cache->idx_lock);
534}
535
536int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
537{
538 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
539 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
540 attr->max_entries ||
541 attr->key_size != sizeof(int) || !attr->value_size ||
542 /* Enforce BTF for userspace sk dumping */
543 !attr->btf_key_type_id || !attr->btf_value_type_id)
544 return -EINVAL;
545
546 if (!bpf_capable())
547 return -EPERM;
548
549 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
550 return -E2BIG;
551
552 return 0;
553}
554
555static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_attr *attr)
556{
557 struct bpf_local_storage_map *smap;
558 unsigned int i;
559 u32 nbuckets;
560
561 smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
562 if (!smap)
563 return ERR_PTR(-ENOMEM);
564 bpf_map_init_from_attr(&smap->map, attr);
565
566 nbuckets = roundup_pow_of_two(num_possible_cpus());
567 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
568 nbuckets = max_t(u32, 2, nbuckets);
569 smap->bucket_log = ilog2(nbuckets);
570
571 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
572 GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
573 if (!smap->buckets) {
574 bpf_map_area_free(smap);
575 return ERR_PTR(-ENOMEM);
576 }
577
578 for (i = 0; i < nbuckets; i++) {
579 INIT_HLIST_HEAD(&smap->buckets[i].list);
580 raw_spin_lock_init(&smap->buckets[i].lock);
581 }
582
583 smap->elem_size =
584 sizeof(struct bpf_local_storage_elem) + attr->value_size;
585
586 return smap;
587}
588
589int bpf_local_storage_map_check_btf(const struct bpf_map *map,
590 const struct btf *btf,
591 const struct btf_type *key_type,
592 const struct btf_type *value_type)
593{
594 u32 int_data;
595
596 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
597 return -EINVAL;
598
599 int_data = *(u32 *)(key_type + 1);
600 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
601 return -EINVAL;
602
603 return 0;
604}
605
606bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage)
607{
608 struct bpf_local_storage_elem *selem;
609 bool free_storage = false;
610 struct hlist_node *n;
611
612 /* Neither the bpf_prog nor the bpf_map's syscall
613 * could be modifying the local_storage->list now.
614 * Thus, no elem can be added to or deleted from the
615 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
616 *
617 * It is racing with bpf_local_storage_map_free() alone
618 * when unlinking elem from the local_storage->list and
619 * the map's bucket->list.
620 */
621 hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
622 /* Always unlink from map before unlinking from
623 * local_storage.
624 */
625 bpf_selem_unlink_map(selem);
626 /* If local_storage list has only one element, the
627 * bpf_selem_unlink_storage_nolock() will return true.
628 * Otherwise, it will return false. The current loop iteration
629 * intends to remove all local storage. So the last iteration
630 * of the loop will set the free_cgroup_storage to true.
631 */
632 free_storage = bpf_selem_unlink_storage_nolock(
633 local_storage, selem, false, false);
634 }
635
636 return free_storage;
637}
638
639struct bpf_map *
640bpf_local_storage_map_alloc(union bpf_attr *attr,
641 struct bpf_local_storage_cache *cache)
642{
643 struct bpf_local_storage_map *smap;
644
645 smap = __bpf_local_storage_map_alloc(attr);
646 if (IS_ERR(smap))
647 return ERR_CAST(smap);
648
649 smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
650 return &smap->map;
651}
652
653void bpf_local_storage_map_free(struct bpf_map *map,
654 struct bpf_local_storage_cache *cache,
655 int __percpu *busy_counter)
656{
657 struct bpf_local_storage_map_bucket *b;
658 struct bpf_local_storage_elem *selem;
659 struct bpf_local_storage_map *smap;
660 unsigned int i;
661
662 smap = (struct bpf_local_storage_map *)map;
663 bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
664
665 /* Note that this map might be concurrently cloned from
666 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
667 * RCU read section to finish before proceeding. New RCU
668 * read sections should be prevented via bpf_map_inc_not_zero.
669 */
670 synchronize_rcu();
671
672 /* bpf prog and the userspace can no longer access this map
673 * now. No new selem (of this map) can be added
674 * to the owner->storage or to the map bucket's list.
675 *
676 * The elem of this map can be cleaned up here
677 * or when the storage is freed e.g.
678 * by bpf_sk_storage_free() during __sk_destruct().
679 */
680 for (i = 0; i < (1U << smap->bucket_log); i++) {
681 b = &smap->buckets[i];
682
683 rcu_read_lock();
684 /* No one is adding to b->list now */
685 while ((selem = hlist_entry_safe(
686 rcu_dereference_raw(hlist_first_rcu(&b->list)),
687 struct bpf_local_storage_elem, map_node))) {
688 if (busy_counter) {
689 migrate_disable();
690 this_cpu_inc(*busy_counter);
691 }
692 bpf_selem_unlink(selem, false);
693 if (busy_counter) {
694 this_cpu_dec(*busy_counter);
695 migrate_enable();
696 }
697 cond_resched_rcu();
698 }
699 rcu_read_unlock();
700 }
701
702 /* While freeing the storage we may still need to access the map.
703 *
704 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
705 * which then made the above while((selem = ...)) loop
706 * exit immediately.
707 *
708 * However, while freeing the storage one still needs to access the
709 * smap->elem_size to do the uncharging in
710 * bpf_selem_unlink_storage_nolock().
711 *
712 * Hence, wait another rcu grace period for the storage to be freed.
713 */
714 synchronize_rcu();
715
716 kvfree(smap->buckets);
717 bpf_map_area_free(smap);
718}