Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2019 Facebook  */
  3#include <linux/rculist.h>
  4#include <linux/list.h>
  5#include <linux/hash.h>
  6#include <linux/types.h>
  7#include <linux/spinlock.h>
  8#include <linux/bpf.h>
  9#include <linux/btf_ids.h>
 10#include <linux/bpf_local_storage.h>
 11#include <net/sock.h>
 12#include <uapi/linux/sock_diag.h>
 13#include <uapi/linux/btf.h>
 14#include <linux/rcupdate.h>
 15#include <linux/rcupdate_trace.h>
 16#include <linux/rcupdate_wait.h>
 17
 18#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
 19
 20static struct bpf_local_storage_map_bucket *
 21select_bucket(struct bpf_local_storage_map *smap,
 22	      struct bpf_local_storage_elem *selem)
 23{
 24	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
 25}
 26
 27static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
 28{
 29	struct bpf_map *map = &smap->map;
 30
 31	if (!map->ops->map_local_storage_charge)
 32		return 0;
 33
 34	return map->ops->map_local_storage_charge(smap, owner, size);
 35}
 36
 37static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
 38			 u32 size)
 39{
 40	struct bpf_map *map = &smap->map;
 41
 42	if (map->ops->map_local_storage_uncharge)
 43		map->ops->map_local_storage_uncharge(smap, owner, size);
 44}
 45
 46static struct bpf_local_storage __rcu **
 47owner_storage(struct bpf_local_storage_map *smap, void *owner)
 48{
 49	struct bpf_map *map = &smap->map;
 50
 51	return map->ops->map_owner_storage_ptr(owner);
 52}
 53
 54static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
 55{
 56	return !hlist_unhashed_lockless(&selem->snode);
 57}
 58
 59static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
 60{
 61	return !hlist_unhashed(&selem->snode);
 62}
 63
 64static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
 65{
 66	return !hlist_unhashed_lockless(&selem->map_node);
 67}
 68
 69static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
 70{
 71	return !hlist_unhashed(&selem->map_node);
 72}
 73
 74struct bpf_local_storage_elem *
 75bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
 76		void *value, bool charge_mem, gfp_t gfp_flags)
 77{
 78	struct bpf_local_storage_elem *selem;
 79
 80	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
 81		return NULL;
 82
 83	if (smap->bpf_ma) {
 84		migrate_disable();
 85		selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
 86		migrate_enable();
 87		if (selem)
 88			/* Keep the original bpf_map_kzalloc behavior
 89			 * before started using the bpf_mem_cache_alloc.
 90			 *
 91			 * No need to use zero_map_value. The bpf_selem_free()
 92			 * only does bpf_mem_cache_free when there is
 93			 * no other bpf prog is using the selem.
 94			 */
 95			memset(SDATA(selem)->data, 0, smap->map.value_size);
 96	} else {
 97		selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
 98					gfp_flags | __GFP_NOWARN);
 99	}
100
101	if (selem) {
102		if (value)
 
103			copy_map_value(&smap->map, SDATA(selem)->data, value);
104		/* No need to call check_and_init_map_value as memory is zero init */
 
 
105		return selem;
106	}
107
108	if (charge_mem)
109		mem_uncharge(smap, owner, smap->elem_size);
110
111	return NULL;
112}
113
114/* rcu tasks trace callback for bpf_ma == false */
115static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
116{
117	struct bpf_local_storage *local_storage;
118
119	/* If RCU Tasks Trace grace period implies RCU grace period, do
120	 * kfree(), else do kfree_rcu().
121	 */
122	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
123	if (rcu_trace_implies_rcu_gp())
124		kfree(local_storage);
125	else
126		kfree_rcu(local_storage, rcu);
127}
128
129static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
130{
131	struct bpf_local_storage *local_storage;
132
133	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
134	bpf_mem_cache_raw_free(local_storage);
135}
136
137static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
138{
139	if (rcu_trace_implies_rcu_gp())
140		bpf_local_storage_free_rcu(rcu);
141	else
142		call_rcu(rcu, bpf_local_storage_free_rcu);
143}
144
145/* Handle bpf_ma == false */
146static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
147				     bool vanilla_rcu)
148{
149	if (vanilla_rcu)
150		kfree_rcu(local_storage, rcu);
151	else
152		call_rcu_tasks_trace(&local_storage->rcu,
153				     __bpf_local_storage_free_trace_rcu);
154}
155
156static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
157				   struct bpf_local_storage_map *smap,
158				   bool bpf_ma, bool reuse_now)
159{
160	if (!local_storage)
161		return;
162
163	if (!bpf_ma) {
164		__bpf_local_storage_free(local_storage, reuse_now);
165		return;
166	}
167
168	if (!reuse_now) {
169		call_rcu_tasks_trace(&local_storage->rcu,
170				     bpf_local_storage_free_trace_rcu);
171		return;
172	}
173
174	if (smap) {
175		migrate_disable();
176		bpf_mem_cache_free(&smap->storage_ma, local_storage);
177		migrate_enable();
178	} else {
179		/* smap could be NULL if the selem that triggered
180		 * this 'local_storage' creation had been long gone.
181		 * In this case, directly do call_rcu().
182		 */
183		call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
184	}
185}
186
187/* rcu tasks trace callback for bpf_ma == false */
188static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
189{
190	struct bpf_local_storage_elem *selem;
191
192	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
193	if (rcu_trace_implies_rcu_gp())
194		kfree(selem);
195	else
196		kfree_rcu(selem, rcu);
197}
198
199/* Handle bpf_ma == false */
200static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
201			     bool vanilla_rcu)
202{
203	if (vanilla_rcu)
204		kfree_rcu(selem, rcu);
205	else
206		call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
207}
208
209static void bpf_selem_free_rcu(struct rcu_head *rcu)
210{
211	struct bpf_local_storage_elem *selem;
 
212
213	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
 
 
 
214	bpf_mem_cache_raw_free(selem);
215}
216
217static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
218{
219	if (rcu_trace_implies_rcu_gp())
220		bpf_selem_free_rcu(rcu);
221	else
222		call_rcu(rcu, bpf_selem_free_rcu);
223}
224
225void bpf_selem_free(struct bpf_local_storage_elem *selem,
226		    struct bpf_local_storage_map *smap,
227		    bool reuse_now)
228{
229	bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
230
231	if (!smap->bpf_ma) {
 
 
 
 
 
 
232		__bpf_selem_free(selem, reuse_now);
233		return;
234	}
235
236	if (!reuse_now) {
237		call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
238	} else {
 
 
 
 
 
239		/* Instead of using the vanilla call_rcu(),
240		 * bpf_mem_cache_free will be able to reuse selem
241		 * immediately.
242		 */
243		migrate_disable();
244		bpf_mem_cache_free(&smap->selem_ma, selem);
245		migrate_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246	}
247}
248
249/* local_storage->lock must be held and selem->local_storage == local_storage.
250 * The caller must ensure selem->smap is still valid to be
251 * dereferenced for its smap->elem_size and smap->cache_idx.
252 */
253static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
254					    struct bpf_local_storage_elem *selem,
255					    bool uncharge_mem, bool reuse_now)
256{
257	struct bpf_local_storage_map *smap;
258	bool free_local_storage;
259	void *owner;
260
261	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
262	owner = local_storage->owner;
263
264	/* All uncharging on the owner must be done first.
265	 * The owner may be freed once the last selem is unlinked
266	 * from local_storage.
267	 */
268	if (uncharge_mem)
269		mem_uncharge(smap, owner, smap->elem_size);
270
271	free_local_storage = hlist_is_singular_node(&selem->snode,
272						    &local_storage->list);
273	if (free_local_storage) {
274		mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
275		local_storage->owner = NULL;
276
277		/* After this RCU_INIT, owner may be freed and cannot be used */
278		RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
279
280		/* local_storage is not freed now.  local_storage->lock is
281		 * still held and raw_spin_unlock_bh(&local_storage->lock)
282		 * will be done by the caller.
283		 *
284		 * Although the unlock will be done under
285		 * rcu_read_lock(),  it is more intuitive to
286		 * read if the freeing of the storage is done
287		 * after the raw_spin_unlock_bh(&local_storage->lock).
288		 *
289		 * Hence, a "bool free_local_storage" is returned
290		 * to the caller which then calls then frees the storage after
291		 * all the RCU grace periods have expired.
292		 */
293	}
294	hlist_del_init_rcu(&selem->snode);
295	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
296	    SDATA(selem))
297		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
298
299	bpf_selem_free(selem, smap, reuse_now);
300
301	if (rcu_access_pointer(local_storage->smap) == smap)
302		RCU_INIT_POINTER(local_storage->smap, NULL);
303
304	return free_local_storage;
305}
306
307static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
308				 struct bpf_local_storage_map *storage_smap,
309				 struct bpf_local_storage_elem *selem)
310{
311
312	struct bpf_local_storage_map *selem_smap;
313
314	/* local_storage->smap may be NULL. If it is, get the bpf_ma
315	 * from any selem in the local_storage->list. The bpf_ma of all
316	 * local_storage and selem should have the same value
317	 * for the same map type.
318	 *
319	 * If the local_storage->list is already empty, the caller will not
320	 * care about the bpf_ma value also because the caller is not
321	 * responsibile to free the local_storage.
322	 */
323
324	if (storage_smap)
325		return storage_smap->bpf_ma;
326
327	if (!selem) {
328		struct hlist_node *n;
329
330		n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
331					  bpf_rcu_lock_held());
332		if (!n)
333			return false;
334
335		selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
336	}
337	selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
338
339	return selem_smap->bpf_ma;
340}
341
342static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
343				     bool reuse_now)
344{
345	struct bpf_local_storage_map *storage_smap;
346	struct bpf_local_storage *local_storage;
347	bool bpf_ma, free_local_storage = false;
 
348	unsigned long flags;
349
350	if (unlikely(!selem_linked_to_storage_lockless(selem)))
351		/* selem has already been unlinked from sk */
352		return;
353
354	local_storage = rcu_dereference_check(selem->local_storage,
355					      bpf_rcu_lock_held());
356	storage_smap = rcu_dereference_check(local_storage->smap,
357					     bpf_rcu_lock_held());
358	bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
359
360	raw_spin_lock_irqsave(&local_storage->lock, flags);
361	if (likely(selem_linked_to_storage(selem)))
362		free_local_storage = bpf_selem_unlink_storage_nolock(
363			local_storage, selem, true, reuse_now);
364	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
365
 
 
366	if (free_local_storage)
367		bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
368}
369
370void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
371				   struct bpf_local_storage_elem *selem)
372{
373	RCU_INIT_POINTER(selem->local_storage, local_storage);
374	hlist_add_head_rcu(&selem->snode, &local_storage->list);
375}
376
377static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
378{
379	struct bpf_local_storage_map *smap;
380	struct bpf_local_storage_map_bucket *b;
381	unsigned long flags;
382
383	if (unlikely(!selem_linked_to_map_lockless(selem)))
384		/* selem has already be unlinked from smap */
385		return;
386
387	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
388	b = select_bucket(smap, selem);
389	raw_spin_lock_irqsave(&b->lock, flags);
390	if (likely(selem_linked_to_map(selem)))
391		hlist_del_init_rcu(&selem->map_node);
392	raw_spin_unlock_irqrestore(&b->lock, flags);
393}
394
395void bpf_selem_link_map(struct bpf_local_storage_map *smap,
396			struct bpf_local_storage_elem *selem)
397{
398	struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
399	unsigned long flags;
400
401	raw_spin_lock_irqsave(&b->lock, flags);
402	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
403	hlist_add_head_rcu(&selem->map_node, &b->list);
404	raw_spin_unlock_irqrestore(&b->lock, flags);
405}
406
407void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
408{
409	/* Always unlink from map before unlinking from local_storage
410	 * because selem will be freed after successfully unlinked from
411	 * the local_storage.
412	 */
413	bpf_selem_unlink_map(selem);
414	bpf_selem_unlink_storage(selem, reuse_now);
415}
416
417/* If cacheit_lockit is false, this lookup function is lockless */
418struct bpf_local_storage_data *
419bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
420			 struct bpf_local_storage_map *smap,
421			 bool cacheit_lockit)
422{
423	struct bpf_local_storage_data *sdata;
424	struct bpf_local_storage_elem *selem;
425
426	/* Fast path (cache hit) */
427	sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
428				      bpf_rcu_lock_held());
429	if (sdata && rcu_access_pointer(sdata->smap) == smap)
430		return sdata;
431
432	/* Slow path (cache miss) */
433	hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
434				  rcu_read_lock_trace_held())
435		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
436			break;
437
438	if (!selem)
439		return NULL;
440
441	sdata = SDATA(selem);
442	if (cacheit_lockit) {
443		unsigned long flags;
444
445		/* spinlock is needed to avoid racing with the
446		 * parallel delete.  Otherwise, publishing an already
447		 * deleted sdata to the cache will become a use-after-free
448		 * problem in the next bpf_local_storage_lookup().
449		 */
450		raw_spin_lock_irqsave(&local_storage->lock, flags);
451		if (selem_linked_to_storage(selem))
452			rcu_assign_pointer(local_storage->cache[smap->cache_idx],
453					   sdata);
454		raw_spin_unlock_irqrestore(&local_storage->lock, flags);
455	}
456
457	return sdata;
 
 
 
 
 
 
 
 
458}
459
460static int check_flags(const struct bpf_local_storage_data *old_sdata,
461		       u64 map_flags)
462{
463	if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
464		/* elem already exists */
465		return -EEXIST;
466
467	if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
468		/* elem doesn't exist, cannot update it */
469		return -ENOENT;
470
471	return 0;
472}
473
474int bpf_local_storage_alloc(void *owner,
475			    struct bpf_local_storage_map *smap,
476			    struct bpf_local_storage_elem *first_selem,
477			    gfp_t gfp_flags)
478{
479	struct bpf_local_storage *prev_storage, *storage;
480	struct bpf_local_storage **owner_storage_ptr;
481	int err;
482
483	err = mem_charge(smap, owner, sizeof(*storage));
484	if (err)
485		return err;
486
487	if (smap->bpf_ma) {
488		migrate_disable();
489		storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
490		migrate_enable();
491	} else {
492		storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
493					  gfp_flags | __GFP_NOWARN);
494	}
495
496	if (!storage) {
497		err = -ENOMEM;
498		goto uncharge;
499	}
500
501	RCU_INIT_POINTER(storage->smap, smap);
502	INIT_HLIST_HEAD(&storage->list);
503	raw_spin_lock_init(&storage->lock);
504	storage->owner = owner;
505
506	bpf_selem_link_storage_nolock(storage, first_selem);
507	bpf_selem_link_map(smap, first_selem);
508
509	owner_storage_ptr =
510		(struct bpf_local_storage **)owner_storage(smap, owner);
511	/* Publish storage to the owner.
512	 * Instead of using any lock of the kernel object (i.e. owner),
513	 * cmpxchg will work with any kernel object regardless what
514	 * the running context is, bh, irq...etc.
515	 *
516	 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
517	 * is protected by the storage->lock.  Hence, when freeing
518	 * the owner->storage, the storage->lock must be held before
519	 * setting owner->storage ptr to NULL.
520	 */
521	prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
522	if (unlikely(prev_storage)) {
523		bpf_selem_unlink_map(first_selem);
524		err = -EAGAIN;
525		goto uncharge;
526
527		/* Note that even first_selem was linked to smap's
528		 * bucket->list, first_selem can be freed immediately
529		 * (instead of kfree_rcu) because
530		 * bpf_local_storage_map_free() does a
531		 * synchronize_rcu_mult (waiting for both sleepable and
532		 * normal programs) before walking the bucket->list.
533		 * Hence, no one is accessing selem from the
534		 * bucket->list under rcu_read_lock().
535		 */
536	}
537
538	return 0;
539
540uncharge:
541	bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
542	mem_uncharge(smap, owner, sizeof(*storage));
543	return err;
544}
545
546/* sk cannot be going away because it is linking new elem
547 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
548 * Otherwise, it will become a leak (and other memory issues
549 * during map destruction).
550 */
551struct bpf_local_storage_data *
552bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
553			 void *value, u64 map_flags, gfp_t gfp_flags)
554{
555	struct bpf_local_storage_data *old_sdata = NULL;
556	struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
557	struct bpf_local_storage *local_storage;
 
558	unsigned long flags;
559	int err;
560
561	/* BPF_EXIST and BPF_NOEXIST cannot be both set */
562	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
563	    /* BPF_F_LOCK can only be used in a value with spin_lock */
564	    unlikely((map_flags & BPF_F_LOCK) &&
565		     !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
566		return ERR_PTR(-EINVAL);
567
568	if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
569		return ERR_PTR(-EINVAL);
570
571	local_storage = rcu_dereference_check(*owner_storage(smap, owner),
572					      bpf_rcu_lock_held());
573	if (!local_storage || hlist_empty(&local_storage->list)) {
574		/* Very first elem for the owner */
575		err = check_flags(NULL, map_flags);
576		if (err)
577			return ERR_PTR(err);
578
579		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
580		if (!selem)
581			return ERR_PTR(-ENOMEM);
582
583		err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
584		if (err) {
585			bpf_selem_free(selem, smap, true);
586			mem_uncharge(smap, owner, smap->elem_size);
587			return ERR_PTR(err);
588		}
589
590		return SDATA(selem);
591	}
592
593	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
594		/* Hoping to find an old_sdata to do inline update
595		 * such that it can avoid taking the local_storage->lock
596		 * and changing the lists.
597		 */
598		old_sdata =
599			bpf_local_storage_lookup(local_storage, smap, false);
600		err = check_flags(old_sdata, map_flags);
601		if (err)
602			return ERR_PTR(err);
603		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
604			copy_map_value_locked(&smap->map, old_sdata->data,
605					      value, false);
606			return old_sdata;
607		}
608	}
609
610	/* A lookup has just been done before and concluded a new selem is
611	 * needed. The chance of an unnecessary alloc is unlikely.
612	 */
613	alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
614	if (!alloc_selem)
615		return ERR_PTR(-ENOMEM);
616
617	raw_spin_lock_irqsave(&local_storage->lock, flags);
618
619	/* Recheck local_storage->list under local_storage->lock */
620	if (unlikely(hlist_empty(&local_storage->list))) {
621		/* A parallel del is happening and local_storage is going
622		 * away.  It has just been checked before, so very
623		 * unlikely.  Return instead of retry to keep things
624		 * simple.
625		 */
626		err = -EAGAIN;
627		goto unlock;
628	}
629
630	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
631	err = check_flags(old_sdata, map_flags);
632	if (err)
633		goto unlock;
634
635	if (old_sdata && (map_flags & BPF_F_LOCK)) {
636		copy_map_value_locked(&smap->map, old_sdata->data, value,
637				      false);
638		selem = SELEM(old_sdata);
639		goto unlock;
640	}
641
642	alloc_selem = NULL;
643	/* First, link the new selem to the map */
644	bpf_selem_link_map(smap, selem);
645
646	/* Second, link (and publish) the new selem to local_storage */
647	bpf_selem_link_storage_nolock(local_storage, selem);
648
649	/* Third, remove old selem, SELEM(old_sdata) */
650	if (old_sdata) {
651		bpf_selem_unlink_map(SELEM(old_sdata));
652		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
653						true, false);
654	}
655
656unlock:
657	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
 
658	if (alloc_selem) {
659		mem_uncharge(smap, owner, smap->elem_size);
660		bpf_selem_free(alloc_selem, smap, true);
661	}
662	return err ? ERR_PTR(err) : SDATA(selem);
663}
664
665static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
666{
667	u64 min_usage = U64_MAX;
668	u16 i, res = 0;
669
670	spin_lock(&cache->idx_lock);
671
672	for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
673		if (cache->idx_usage_counts[i] < min_usage) {
674			min_usage = cache->idx_usage_counts[i];
675			res = i;
676
677			/* Found a free cache_idx */
678			if (!min_usage)
679				break;
680		}
681	}
682	cache->idx_usage_counts[res]++;
683
684	spin_unlock(&cache->idx_lock);
685
686	return res;
687}
688
689static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
690					     u16 idx)
691{
692	spin_lock(&cache->idx_lock);
693	cache->idx_usage_counts[idx]--;
694	spin_unlock(&cache->idx_lock);
695}
696
697int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
698{
699	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
700	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
701	    attr->max_entries ||
702	    attr->key_size != sizeof(int) || !attr->value_size ||
703	    /* Enforce BTF for userspace sk dumping */
704	    !attr->btf_key_type_id || !attr->btf_value_type_id)
705		return -EINVAL;
706
707	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
708		return -E2BIG;
709
710	return 0;
711}
712
713int bpf_local_storage_map_check_btf(const struct bpf_map *map,
714				    const struct btf *btf,
715				    const struct btf_type *key_type,
716				    const struct btf_type *value_type)
717{
718	u32 int_data;
719
720	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
721		return -EINVAL;
722
723	int_data = *(u32 *)(key_type + 1);
724	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
725		return -EINVAL;
726
727	return 0;
728}
729
730void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
731{
732	struct bpf_local_storage_map *storage_smap;
733	struct bpf_local_storage_elem *selem;
734	bool bpf_ma, free_storage = false;
 
735	struct hlist_node *n;
736	unsigned long flags;
737
738	storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
739	bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
740
741	/* Neither the bpf_prog nor the bpf_map's syscall
742	 * could be modifying the local_storage->list now.
743	 * Thus, no elem can be added to or deleted from the
744	 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
745	 *
746	 * It is racing with bpf_local_storage_map_free() alone
747	 * when unlinking elem from the local_storage->list and
748	 * the map's bucket->list.
749	 */
750	raw_spin_lock_irqsave(&local_storage->lock, flags);
751	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
752		/* Always unlink from map before unlinking from
753		 * local_storage.
754		 */
755		bpf_selem_unlink_map(selem);
756		/* If local_storage list has only one element, the
757		 * bpf_selem_unlink_storage_nolock() will return true.
758		 * Otherwise, it will return false. The current loop iteration
759		 * intends to remove all local storage. So the last iteration
760		 * of the loop will set the free_cgroup_storage to true.
761		 */
762		free_storage = bpf_selem_unlink_storage_nolock(
763			local_storage, selem, true, true);
764	}
765	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
766
 
 
767	if (free_storage)
768		bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
769}
770
771u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
772{
773	struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
774	u64 usage = sizeof(*smap);
775
776	/* The dynamically callocated selems are not counted currently. */
777	usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
778	return usage;
779}
780
781/* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
782 * A deadlock free allocator is useful for storage that the bpf prog can easily
783 * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
784 * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
785 * memory immediately. To be reuse-immediate safe, the owner destruction
786 * code path needs to go through a rcu grace period before calling
787 * bpf_local_storage_destroy().
788 *
789 * When bpf_ma == false, the kmalloc and kfree are used.
790 */
791struct bpf_map *
792bpf_local_storage_map_alloc(union bpf_attr *attr,
793			    struct bpf_local_storage_cache *cache,
794			    bool bpf_ma)
795{
796	struct bpf_local_storage_map *smap;
797	unsigned int i;
798	u32 nbuckets;
799	int err;
800
801	smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
802	if (!smap)
803		return ERR_PTR(-ENOMEM);
804	bpf_map_init_from_attr(&smap->map, attr);
805
806	nbuckets = roundup_pow_of_two(num_possible_cpus());
807	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
808	nbuckets = max_t(u32, 2, nbuckets);
809	smap->bucket_log = ilog2(nbuckets);
810
811	smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
812					 nbuckets, GFP_USER | __GFP_NOWARN);
813	if (!smap->buckets) {
814		err = -ENOMEM;
815		goto free_smap;
816	}
817
818	for (i = 0; i < nbuckets; i++) {
819		INIT_HLIST_HEAD(&smap->buckets[i].list);
820		raw_spin_lock_init(&smap->buckets[i].lock);
821	}
822
823	smap->elem_size = offsetof(struct bpf_local_storage_elem,
824				   sdata.data[attr->value_size]);
825
826	smap->bpf_ma = bpf_ma;
827	if (bpf_ma) {
 
 
 
 
828		err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
829		if (err)
830			goto free_smap;
831
832		err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
833		if (err) {
834			bpf_mem_alloc_destroy(&smap->selem_ma);
835			goto free_smap;
836		}
837	}
838
839	smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
840	return &smap->map;
841
842free_smap:
843	kvfree(smap->buckets);
844	bpf_map_area_free(smap);
845	return ERR_PTR(err);
846}
847
848void bpf_local_storage_map_free(struct bpf_map *map,
849				struct bpf_local_storage_cache *cache,
850				int __percpu *busy_counter)
851{
852	struct bpf_local_storage_map_bucket *b;
853	struct bpf_local_storage_elem *selem;
854	struct bpf_local_storage_map *smap;
855	unsigned int i;
856
857	smap = (struct bpf_local_storage_map *)map;
858	bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
859
860	/* Note that this map might be concurrently cloned from
861	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
862	 * RCU read section to finish before proceeding. New RCU
863	 * read sections should be prevented via bpf_map_inc_not_zero.
864	 */
865	synchronize_rcu();
866
867	/* bpf prog and the userspace can no longer access this map
868	 * now.  No new selem (of this map) can be added
869	 * to the owner->storage or to the map bucket's list.
870	 *
871	 * The elem of this map can be cleaned up here
872	 * or when the storage is freed e.g.
873	 * by bpf_sk_storage_free() during __sk_destruct().
874	 */
875	for (i = 0; i < (1U << smap->bucket_log); i++) {
876		b = &smap->buckets[i];
877
878		rcu_read_lock();
879		/* No one is adding to b->list now */
880		while ((selem = hlist_entry_safe(
881				rcu_dereference_raw(hlist_first_rcu(&b->list)),
882				struct bpf_local_storage_elem, map_node))) {
883			if (busy_counter) {
884				migrate_disable();
885				this_cpu_inc(*busy_counter);
886			}
887			bpf_selem_unlink(selem, true);
888			if (busy_counter) {
889				this_cpu_dec(*busy_counter);
890				migrate_enable();
891			}
892			cond_resched_rcu();
893		}
894		rcu_read_unlock();
895	}
896
897	/* While freeing the storage we may still need to access the map.
898	 *
899	 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
900	 * which then made the above while((selem = ...)) loop
901	 * exit immediately.
902	 *
903	 * However, while freeing the storage one still needs to access the
904	 * smap->elem_size to do the uncharging in
905	 * bpf_selem_unlink_storage_nolock().
906	 *
907	 * Hence, wait another rcu grace period for the storage to be freed.
908	 */
909	synchronize_rcu();
910
911	if (smap->bpf_ma) {
 
 
 
912		bpf_mem_alloc_destroy(&smap->selem_ma);
913		bpf_mem_alloc_destroy(&smap->storage_ma);
914	}
915	kvfree(smap->buckets);
916	bpf_map_area_free(smap);
917}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2019 Facebook  */
  3#include <linux/rculist.h>
  4#include <linux/list.h>
  5#include <linux/hash.h>
  6#include <linux/types.h>
  7#include <linux/spinlock.h>
  8#include <linux/bpf.h>
  9#include <linux/btf_ids.h>
 10#include <linux/bpf_local_storage.h>
 11#include <net/sock.h>
 12#include <uapi/linux/sock_diag.h>
 13#include <uapi/linux/btf.h>
 14#include <linux/rcupdate.h>
 15#include <linux/rcupdate_trace.h>
 16#include <linux/rcupdate_wait.h>
 17
 18#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
 19
 20static struct bpf_local_storage_map_bucket *
 21select_bucket(struct bpf_local_storage_map *smap,
 22	      struct bpf_local_storage_elem *selem)
 23{
 24	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
 25}
 26
 27static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
 28{
 29	struct bpf_map *map = &smap->map;
 30
 31	if (!map->ops->map_local_storage_charge)
 32		return 0;
 33
 34	return map->ops->map_local_storage_charge(smap, owner, size);
 35}
 36
 37static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
 38			 u32 size)
 39{
 40	struct bpf_map *map = &smap->map;
 41
 42	if (map->ops->map_local_storage_uncharge)
 43		map->ops->map_local_storage_uncharge(smap, owner, size);
 44}
 45
 46static struct bpf_local_storage __rcu **
 47owner_storage(struct bpf_local_storage_map *smap, void *owner)
 48{
 49	struct bpf_map *map = &smap->map;
 50
 51	return map->ops->map_owner_storage_ptr(owner);
 52}
 53
 54static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
 55{
 56	return !hlist_unhashed_lockless(&selem->snode);
 57}
 58
 59static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
 60{
 61	return !hlist_unhashed(&selem->snode);
 62}
 63
 64static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
 65{
 66	return !hlist_unhashed_lockless(&selem->map_node);
 67}
 68
 69static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
 70{
 71	return !hlist_unhashed(&selem->map_node);
 72}
 73
 74struct bpf_local_storage_elem *
 75bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
 76		void *value, bool charge_mem, bool swap_uptrs, gfp_t gfp_flags)
 77{
 78	struct bpf_local_storage_elem *selem;
 79
 80	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
 81		return NULL;
 82
 83	if (smap->bpf_ma) {
 84		migrate_disable();
 85		selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
 86		migrate_enable();
 87		if (selem)
 88			/* Keep the original bpf_map_kzalloc behavior
 89			 * before started using the bpf_mem_cache_alloc.
 90			 *
 91			 * No need to use zero_map_value. The bpf_selem_free()
 92			 * only does bpf_mem_cache_free when there is
 93			 * no other bpf prog is using the selem.
 94			 */
 95			memset(SDATA(selem)->data, 0, smap->map.value_size);
 96	} else {
 97		selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
 98					gfp_flags | __GFP_NOWARN);
 99	}
100
101	if (selem) {
102		if (value) {
103			/* No need to call check_and_init_map_value as memory is zero init */
104			copy_map_value(&smap->map, SDATA(selem)->data, value);
105			if (swap_uptrs)
106				bpf_obj_swap_uptrs(smap->map.record, SDATA(selem)->data, value);
107		}
108		return selem;
109	}
110
111	if (charge_mem)
112		mem_uncharge(smap, owner, smap->elem_size);
113
114	return NULL;
115}
116
117/* rcu tasks trace callback for bpf_ma == false */
118static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
119{
120	struct bpf_local_storage *local_storage;
121
122	/* If RCU Tasks Trace grace period implies RCU grace period, do
123	 * kfree(), else do kfree_rcu().
124	 */
125	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
126	if (rcu_trace_implies_rcu_gp())
127		kfree(local_storage);
128	else
129		kfree_rcu(local_storage, rcu);
130}
131
132static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
133{
134	struct bpf_local_storage *local_storage;
135
136	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
137	bpf_mem_cache_raw_free(local_storage);
138}
139
140static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
141{
142	if (rcu_trace_implies_rcu_gp())
143		bpf_local_storage_free_rcu(rcu);
144	else
145		call_rcu(rcu, bpf_local_storage_free_rcu);
146}
147
148/* Handle bpf_ma == false */
149static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
150				     bool vanilla_rcu)
151{
152	if (vanilla_rcu)
153		kfree_rcu(local_storage, rcu);
154	else
155		call_rcu_tasks_trace(&local_storage->rcu,
156				     __bpf_local_storage_free_trace_rcu);
157}
158
159static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
160				   struct bpf_local_storage_map *smap,
161				   bool bpf_ma, bool reuse_now)
162{
163	if (!local_storage)
164		return;
165
166	if (!bpf_ma) {
167		__bpf_local_storage_free(local_storage, reuse_now);
168		return;
169	}
170
171	if (!reuse_now) {
172		call_rcu_tasks_trace(&local_storage->rcu,
173				     bpf_local_storage_free_trace_rcu);
174		return;
175	}
176
177	if (smap) {
178		migrate_disable();
179		bpf_mem_cache_free(&smap->storage_ma, local_storage);
180		migrate_enable();
181	} else {
182		/* smap could be NULL if the selem that triggered
183		 * this 'local_storage' creation had been long gone.
184		 * In this case, directly do call_rcu().
185		 */
186		call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
187	}
188}
189
190/* rcu tasks trace callback for bpf_ma == false */
191static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
192{
193	struct bpf_local_storage_elem *selem;
194
195	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
196	if (rcu_trace_implies_rcu_gp())
197		kfree(selem);
198	else
199		kfree_rcu(selem, rcu);
200}
201
202/* Handle bpf_ma == false */
203static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
204			     bool vanilla_rcu)
205{
206	if (vanilla_rcu)
207		kfree_rcu(selem, rcu);
208	else
209		call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
210}
211
212static void bpf_selem_free_rcu(struct rcu_head *rcu)
213{
214	struct bpf_local_storage_elem *selem;
215	struct bpf_local_storage_map *smap;
216
217	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
218	/* The bpf_local_storage_map_free will wait for rcu_barrier */
219	smap = rcu_dereference_check(SDATA(selem)->smap, 1);
220	bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
221	bpf_mem_cache_raw_free(selem);
222}
223
224static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
225{
226	if (rcu_trace_implies_rcu_gp())
227		bpf_selem_free_rcu(rcu);
228	else
229		call_rcu(rcu, bpf_selem_free_rcu);
230}
231
232void bpf_selem_free(struct bpf_local_storage_elem *selem,
233		    struct bpf_local_storage_map *smap,
234		    bool reuse_now)
235{
 
 
236	if (!smap->bpf_ma) {
237		/* Only task storage has uptrs and task storage
238		 * has moved to bpf_mem_alloc. Meaning smap->bpf_ma == true
239		 * for task storage, so this bpf_obj_free_fields() won't unpin
240		 * any uptr.
241		 */
242		bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
243		__bpf_selem_free(selem, reuse_now);
244		return;
245	}
246
247	if (reuse_now) {
248		/* reuse_now == true only happens when the storage owner
249		 * (e.g. task_struct) is being destructed or the map itself
250		 * is being destructed (ie map_free). In both cases,
251		 * no bpf prog can have a hold on the selem. It is
252		 * safe to unpin the uptrs and free the selem now.
253		 */
254		bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
255		/* Instead of using the vanilla call_rcu(),
256		 * bpf_mem_cache_free will be able to reuse selem
257		 * immediately.
258		 */
259		migrate_disable();
260		bpf_mem_cache_free(&smap->selem_ma, selem);
261		migrate_enable();
262		return;
263	}
264
265	call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
266}
267
268static void bpf_selem_free_list(struct hlist_head *list, bool reuse_now)
269{
270	struct bpf_local_storage_elem *selem;
271	struct bpf_local_storage_map *smap;
272	struct hlist_node *n;
273
274	/* The "_safe" iteration is needed.
275	 * The loop is not removing the selem from the list
276	 * but bpf_selem_free will use the selem->rcu_head
277	 * which is union-ized with the selem->free_node.
278	 */
279	hlist_for_each_entry_safe(selem, n, list, free_node) {
280		smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
281		bpf_selem_free(selem, smap, reuse_now);
282	}
283}
284
285/* local_storage->lock must be held and selem->local_storage == local_storage.
286 * The caller must ensure selem->smap is still valid to be
287 * dereferenced for its smap->elem_size and smap->cache_idx.
288 */
289static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
290					    struct bpf_local_storage_elem *selem,
291					    bool uncharge_mem, struct hlist_head *free_selem_list)
292{
293	struct bpf_local_storage_map *smap;
294	bool free_local_storage;
295	void *owner;
296
297	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
298	owner = local_storage->owner;
299
300	/* All uncharging on the owner must be done first.
301	 * The owner may be freed once the last selem is unlinked
302	 * from local_storage.
303	 */
304	if (uncharge_mem)
305		mem_uncharge(smap, owner, smap->elem_size);
306
307	free_local_storage = hlist_is_singular_node(&selem->snode,
308						    &local_storage->list);
309	if (free_local_storage) {
310		mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
311		local_storage->owner = NULL;
312
313		/* After this RCU_INIT, owner may be freed and cannot be used */
314		RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
315
316		/* local_storage is not freed now.  local_storage->lock is
317		 * still held and raw_spin_unlock_bh(&local_storage->lock)
318		 * will be done by the caller.
319		 *
320		 * Although the unlock will be done under
321		 * rcu_read_lock(),  it is more intuitive to
322		 * read if the freeing of the storage is done
323		 * after the raw_spin_unlock_bh(&local_storage->lock).
324		 *
325		 * Hence, a "bool free_local_storage" is returned
326		 * to the caller which then calls then frees the storage after
327		 * all the RCU grace periods have expired.
328		 */
329	}
330	hlist_del_init_rcu(&selem->snode);
331	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
332	    SDATA(selem))
333		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
334
335	hlist_add_head(&selem->free_node, free_selem_list);
336
337	if (rcu_access_pointer(local_storage->smap) == smap)
338		RCU_INIT_POINTER(local_storage->smap, NULL);
339
340	return free_local_storage;
341}
342
343static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
344				 struct bpf_local_storage_map *storage_smap,
345				 struct bpf_local_storage_elem *selem)
346{
347
348	struct bpf_local_storage_map *selem_smap;
349
350	/* local_storage->smap may be NULL. If it is, get the bpf_ma
351	 * from any selem in the local_storage->list. The bpf_ma of all
352	 * local_storage and selem should have the same value
353	 * for the same map type.
354	 *
355	 * If the local_storage->list is already empty, the caller will not
356	 * care about the bpf_ma value also because the caller is not
357	 * responsible to free the local_storage.
358	 */
359
360	if (storage_smap)
361		return storage_smap->bpf_ma;
362
363	if (!selem) {
364		struct hlist_node *n;
365
366		n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
367					  bpf_rcu_lock_held());
368		if (!n)
369			return false;
370
371		selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
372	}
373	selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
374
375	return selem_smap->bpf_ma;
376}
377
378static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
379				     bool reuse_now)
380{
381	struct bpf_local_storage_map *storage_smap;
382	struct bpf_local_storage *local_storage;
383	bool bpf_ma, free_local_storage = false;
384	HLIST_HEAD(selem_free_list);
385	unsigned long flags;
386
387	if (unlikely(!selem_linked_to_storage_lockless(selem)))
388		/* selem has already been unlinked from sk */
389		return;
390
391	local_storage = rcu_dereference_check(selem->local_storage,
392					      bpf_rcu_lock_held());
393	storage_smap = rcu_dereference_check(local_storage->smap,
394					     bpf_rcu_lock_held());
395	bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
396
397	raw_spin_lock_irqsave(&local_storage->lock, flags);
398	if (likely(selem_linked_to_storage(selem)))
399		free_local_storage = bpf_selem_unlink_storage_nolock(
400			local_storage, selem, true, &selem_free_list);
401	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
402
403	bpf_selem_free_list(&selem_free_list, reuse_now);
404
405	if (free_local_storage)
406		bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
407}
408
409void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
410				   struct bpf_local_storage_elem *selem)
411{
412	RCU_INIT_POINTER(selem->local_storage, local_storage);
413	hlist_add_head_rcu(&selem->snode, &local_storage->list);
414}
415
416static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
417{
418	struct bpf_local_storage_map *smap;
419	struct bpf_local_storage_map_bucket *b;
420	unsigned long flags;
421
422	if (unlikely(!selem_linked_to_map_lockless(selem)))
423		/* selem has already be unlinked from smap */
424		return;
425
426	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
427	b = select_bucket(smap, selem);
428	raw_spin_lock_irqsave(&b->lock, flags);
429	if (likely(selem_linked_to_map(selem)))
430		hlist_del_init_rcu(&selem->map_node);
431	raw_spin_unlock_irqrestore(&b->lock, flags);
432}
433
434void bpf_selem_link_map(struct bpf_local_storage_map *smap,
435			struct bpf_local_storage_elem *selem)
436{
437	struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
438	unsigned long flags;
439
440	raw_spin_lock_irqsave(&b->lock, flags);
441	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
442	hlist_add_head_rcu(&selem->map_node, &b->list);
443	raw_spin_unlock_irqrestore(&b->lock, flags);
444}
445
446void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
447{
448	/* Always unlink from map before unlinking from local_storage
449	 * because selem will be freed after successfully unlinked from
450	 * the local_storage.
451	 */
452	bpf_selem_unlink_map(selem);
453	bpf_selem_unlink_storage(selem, reuse_now);
454}
455
456void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
457				      struct bpf_local_storage_map *smap,
458				      struct bpf_local_storage_elem *selem)
 
 
459{
460	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461
462	/* spinlock is needed to avoid racing with the
463	 * parallel delete.  Otherwise, publishing an already
464	 * deleted sdata to the cache will become a use-after-free
465	 * problem in the next bpf_local_storage_lookup().
466	 */
467	raw_spin_lock_irqsave(&local_storage->lock, flags);
468	if (selem_linked_to_storage(selem))
469		rcu_assign_pointer(local_storage->cache[smap->cache_idx], SDATA(selem));
470	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
471}
472
473static int check_flags(const struct bpf_local_storage_data *old_sdata,
474		       u64 map_flags)
475{
476	if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
477		/* elem already exists */
478		return -EEXIST;
479
480	if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
481		/* elem doesn't exist, cannot update it */
482		return -ENOENT;
483
484	return 0;
485}
486
487int bpf_local_storage_alloc(void *owner,
488			    struct bpf_local_storage_map *smap,
489			    struct bpf_local_storage_elem *first_selem,
490			    gfp_t gfp_flags)
491{
492	struct bpf_local_storage *prev_storage, *storage;
493	struct bpf_local_storage **owner_storage_ptr;
494	int err;
495
496	err = mem_charge(smap, owner, sizeof(*storage));
497	if (err)
498		return err;
499
500	if (smap->bpf_ma) {
501		migrate_disable();
502		storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
503		migrate_enable();
504	} else {
505		storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
506					  gfp_flags | __GFP_NOWARN);
507	}
508
509	if (!storage) {
510		err = -ENOMEM;
511		goto uncharge;
512	}
513
514	RCU_INIT_POINTER(storage->smap, smap);
515	INIT_HLIST_HEAD(&storage->list);
516	raw_spin_lock_init(&storage->lock);
517	storage->owner = owner;
518
519	bpf_selem_link_storage_nolock(storage, first_selem);
520	bpf_selem_link_map(smap, first_selem);
521
522	owner_storage_ptr =
523		(struct bpf_local_storage **)owner_storage(smap, owner);
524	/* Publish storage to the owner.
525	 * Instead of using any lock of the kernel object (i.e. owner),
526	 * cmpxchg will work with any kernel object regardless what
527	 * the running context is, bh, irq...etc.
528	 *
529	 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
530	 * is protected by the storage->lock.  Hence, when freeing
531	 * the owner->storage, the storage->lock must be held before
532	 * setting owner->storage ptr to NULL.
533	 */
534	prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
535	if (unlikely(prev_storage)) {
536		bpf_selem_unlink_map(first_selem);
537		err = -EAGAIN;
538		goto uncharge;
539
540		/* Note that even first_selem was linked to smap's
541		 * bucket->list, first_selem can be freed immediately
542		 * (instead of kfree_rcu) because
543		 * bpf_local_storage_map_free() does a
544		 * synchronize_rcu_mult (waiting for both sleepable and
545		 * normal programs) before walking the bucket->list.
546		 * Hence, no one is accessing selem from the
547		 * bucket->list under rcu_read_lock().
548		 */
549	}
550
551	return 0;
552
553uncharge:
554	bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
555	mem_uncharge(smap, owner, sizeof(*storage));
556	return err;
557}
558
559/* sk cannot be going away because it is linking new elem
560 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
561 * Otherwise, it will become a leak (and other memory issues
562 * during map destruction).
563 */
564struct bpf_local_storage_data *
565bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
566			 void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags)
567{
568	struct bpf_local_storage_data *old_sdata = NULL;
569	struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
570	struct bpf_local_storage *local_storage;
571	HLIST_HEAD(old_selem_free_list);
572	unsigned long flags;
573	int err;
574
575	/* BPF_EXIST and BPF_NOEXIST cannot be both set */
576	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
577	    /* BPF_F_LOCK can only be used in a value with spin_lock */
578	    unlikely((map_flags & BPF_F_LOCK) &&
579		     !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
580		return ERR_PTR(-EINVAL);
581
582	if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
583		return ERR_PTR(-EINVAL);
584
585	local_storage = rcu_dereference_check(*owner_storage(smap, owner),
586					      bpf_rcu_lock_held());
587	if (!local_storage || hlist_empty(&local_storage->list)) {
588		/* Very first elem for the owner */
589		err = check_flags(NULL, map_flags);
590		if (err)
591			return ERR_PTR(err);
592
593		selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags);
594		if (!selem)
595			return ERR_PTR(-ENOMEM);
596
597		err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
598		if (err) {
599			bpf_selem_free(selem, smap, true);
600			mem_uncharge(smap, owner, smap->elem_size);
601			return ERR_PTR(err);
602		}
603
604		return SDATA(selem);
605	}
606
607	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
608		/* Hoping to find an old_sdata to do inline update
609		 * such that it can avoid taking the local_storage->lock
610		 * and changing the lists.
611		 */
612		old_sdata =
613			bpf_local_storage_lookup(local_storage, smap, false);
614		err = check_flags(old_sdata, map_flags);
615		if (err)
616			return ERR_PTR(err);
617		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
618			copy_map_value_locked(&smap->map, old_sdata->data,
619					      value, false);
620			return old_sdata;
621		}
622	}
623
624	/* A lookup has just been done before and concluded a new selem is
625	 * needed. The chance of an unnecessary alloc is unlikely.
626	 */
627	alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags);
628	if (!alloc_selem)
629		return ERR_PTR(-ENOMEM);
630
631	raw_spin_lock_irqsave(&local_storage->lock, flags);
632
633	/* Recheck local_storage->list under local_storage->lock */
634	if (unlikely(hlist_empty(&local_storage->list))) {
635		/* A parallel del is happening and local_storage is going
636		 * away.  It has just been checked before, so very
637		 * unlikely.  Return instead of retry to keep things
638		 * simple.
639		 */
640		err = -EAGAIN;
641		goto unlock;
642	}
643
644	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
645	err = check_flags(old_sdata, map_flags);
646	if (err)
647		goto unlock;
648
649	if (old_sdata && (map_flags & BPF_F_LOCK)) {
650		copy_map_value_locked(&smap->map, old_sdata->data, value,
651				      false);
652		selem = SELEM(old_sdata);
653		goto unlock;
654	}
655
656	alloc_selem = NULL;
657	/* First, link the new selem to the map */
658	bpf_selem_link_map(smap, selem);
659
660	/* Second, link (and publish) the new selem to local_storage */
661	bpf_selem_link_storage_nolock(local_storage, selem);
662
663	/* Third, remove old selem, SELEM(old_sdata) */
664	if (old_sdata) {
665		bpf_selem_unlink_map(SELEM(old_sdata));
666		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
667						true, &old_selem_free_list);
668	}
669
670unlock:
671	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
672	bpf_selem_free_list(&old_selem_free_list, false);
673	if (alloc_selem) {
674		mem_uncharge(smap, owner, smap->elem_size);
675		bpf_selem_free(alloc_selem, smap, true);
676	}
677	return err ? ERR_PTR(err) : SDATA(selem);
678}
679
680static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
681{
682	u64 min_usage = U64_MAX;
683	u16 i, res = 0;
684
685	spin_lock(&cache->idx_lock);
686
687	for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
688		if (cache->idx_usage_counts[i] < min_usage) {
689			min_usage = cache->idx_usage_counts[i];
690			res = i;
691
692			/* Found a free cache_idx */
693			if (!min_usage)
694				break;
695		}
696	}
697	cache->idx_usage_counts[res]++;
698
699	spin_unlock(&cache->idx_lock);
700
701	return res;
702}
703
704static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
705					     u16 idx)
706{
707	spin_lock(&cache->idx_lock);
708	cache->idx_usage_counts[idx]--;
709	spin_unlock(&cache->idx_lock);
710}
711
712int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
713{
714	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
715	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
716	    attr->max_entries ||
717	    attr->key_size != sizeof(int) || !attr->value_size ||
718	    /* Enforce BTF for userspace sk dumping */
719	    !attr->btf_key_type_id || !attr->btf_value_type_id)
720		return -EINVAL;
721
722	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
723		return -E2BIG;
724
725	return 0;
726}
727
728int bpf_local_storage_map_check_btf(const struct bpf_map *map,
729				    const struct btf *btf,
730				    const struct btf_type *key_type,
731				    const struct btf_type *value_type)
732{
733	u32 int_data;
734
735	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
736		return -EINVAL;
737
738	int_data = *(u32 *)(key_type + 1);
739	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
740		return -EINVAL;
741
742	return 0;
743}
744
745void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
746{
747	struct bpf_local_storage_map *storage_smap;
748	struct bpf_local_storage_elem *selem;
749	bool bpf_ma, free_storage = false;
750	HLIST_HEAD(free_selem_list);
751	struct hlist_node *n;
752	unsigned long flags;
753
754	storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
755	bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
756
757	/* Neither the bpf_prog nor the bpf_map's syscall
758	 * could be modifying the local_storage->list now.
759	 * Thus, no elem can be added to or deleted from the
760	 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
761	 *
762	 * It is racing with bpf_local_storage_map_free() alone
763	 * when unlinking elem from the local_storage->list and
764	 * the map's bucket->list.
765	 */
766	raw_spin_lock_irqsave(&local_storage->lock, flags);
767	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
768		/* Always unlink from map before unlinking from
769		 * local_storage.
770		 */
771		bpf_selem_unlink_map(selem);
772		/* If local_storage list has only one element, the
773		 * bpf_selem_unlink_storage_nolock() will return true.
774		 * Otherwise, it will return false. The current loop iteration
775		 * intends to remove all local storage. So the last iteration
776		 * of the loop will set the free_cgroup_storage to true.
777		 */
778		free_storage = bpf_selem_unlink_storage_nolock(
779			local_storage, selem, true, &free_selem_list);
780	}
781	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
782
783	bpf_selem_free_list(&free_selem_list, true);
784
785	if (free_storage)
786		bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
787}
788
789u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
790{
791	struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
792	u64 usage = sizeof(*smap);
793
794	/* The dynamically callocated selems are not counted currently. */
795	usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
796	return usage;
797}
798
799/* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
800 * A deadlock free allocator is useful for storage that the bpf prog can easily
801 * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
802 * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
803 * memory immediately. To be reuse-immediate safe, the owner destruction
804 * code path needs to go through a rcu grace period before calling
805 * bpf_local_storage_destroy().
806 *
807 * When bpf_ma == false, the kmalloc and kfree are used.
808 */
809struct bpf_map *
810bpf_local_storage_map_alloc(union bpf_attr *attr,
811			    struct bpf_local_storage_cache *cache,
812			    bool bpf_ma)
813{
814	struct bpf_local_storage_map *smap;
815	unsigned int i;
816	u32 nbuckets;
817	int err;
818
819	smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
820	if (!smap)
821		return ERR_PTR(-ENOMEM);
822	bpf_map_init_from_attr(&smap->map, attr);
823
824	nbuckets = roundup_pow_of_two(num_possible_cpus());
825	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
826	nbuckets = max_t(u32, 2, nbuckets);
827	smap->bucket_log = ilog2(nbuckets);
828
829	smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
830					 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
831	if (!smap->buckets) {
832		err = -ENOMEM;
833		goto free_smap;
834	}
835
836	for (i = 0; i < nbuckets; i++) {
837		INIT_HLIST_HEAD(&smap->buckets[i].list);
838		raw_spin_lock_init(&smap->buckets[i].lock);
839	}
840
841	smap->elem_size = offsetof(struct bpf_local_storage_elem,
842				   sdata.data[attr->value_size]);
843
844	/* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
845	 * preemptible context. Thus, enforce all storages to use
846	 * bpf_mem_alloc when CONFIG_PREEMPT_RT is enabled.
847	 */
848	smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
849	if (smap->bpf_ma) {
850		err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
851		if (err)
852			goto free_smap;
853
854		err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
855		if (err) {
856			bpf_mem_alloc_destroy(&smap->selem_ma);
857			goto free_smap;
858		}
859	}
860
861	smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
862	return &smap->map;
863
864free_smap:
865	kvfree(smap->buckets);
866	bpf_map_area_free(smap);
867	return ERR_PTR(err);
868}
869
870void bpf_local_storage_map_free(struct bpf_map *map,
871				struct bpf_local_storage_cache *cache,
872				int __percpu *busy_counter)
873{
874	struct bpf_local_storage_map_bucket *b;
875	struct bpf_local_storage_elem *selem;
876	struct bpf_local_storage_map *smap;
877	unsigned int i;
878
879	smap = (struct bpf_local_storage_map *)map;
880	bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
881
882	/* Note that this map might be concurrently cloned from
883	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
884	 * RCU read section to finish before proceeding. New RCU
885	 * read sections should be prevented via bpf_map_inc_not_zero.
886	 */
887	synchronize_rcu();
888
889	/* bpf prog and the userspace can no longer access this map
890	 * now.  No new selem (of this map) can be added
891	 * to the owner->storage or to the map bucket's list.
892	 *
893	 * The elem of this map can be cleaned up here
894	 * or when the storage is freed e.g.
895	 * by bpf_sk_storage_free() during __sk_destruct().
896	 */
897	for (i = 0; i < (1U << smap->bucket_log); i++) {
898		b = &smap->buckets[i];
899
900		rcu_read_lock();
901		/* No one is adding to b->list now */
902		while ((selem = hlist_entry_safe(
903				rcu_dereference_raw(hlist_first_rcu(&b->list)),
904				struct bpf_local_storage_elem, map_node))) {
905			if (busy_counter) {
906				migrate_disable();
907				this_cpu_inc(*busy_counter);
908			}
909			bpf_selem_unlink(selem, true);
910			if (busy_counter) {
911				this_cpu_dec(*busy_counter);
912				migrate_enable();
913			}
914			cond_resched_rcu();
915		}
916		rcu_read_unlock();
917	}
918
919	/* While freeing the storage we may still need to access the map.
920	 *
921	 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
922	 * which then made the above while((selem = ...)) loop
923	 * exit immediately.
924	 *
925	 * However, while freeing the storage one still needs to access the
926	 * smap->elem_size to do the uncharging in
927	 * bpf_selem_unlink_storage_nolock().
928	 *
929	 * Hence, wait another rcu grace period for the storage to be freed.
930	 */
931	synchronize_rcu();
932
933	if (smap->bpf_ma) {
934		rcu_barrier_tasks_trace();
935		if (!rcu_trace_implies_rcu_gp())
936			rcu_barrier();
937		bpf_mem_alloc_destroy(&smap->selem_ma);
938		bpf_mem_alloc_destroy(&smap->storage_ma);
939	}
940	kvfree(smap->buckets);
941	bpf_map_area_free(smap);
942}