Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/jhash.h>
   8#include <linux/filter.h>
   9#include <linux/rculist_nulls.h>
 
  10#include <linux/random.h>
  11#include <uapi/linux/btf.h>
  12#include <linux/rcupdate_trace.h>
  13#include <linux/btf_ids.h>
  14#include "percpu_freelist.h"
  15#include "bpf_lru_list.h"
  16#include "map_in_map.h"
  17#include <linux/bpf_mem_alloc.h>
  18
  19#define HTAB_CREATE_FLAG_MASK						\
  20	(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |	\
  21	 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
  22
  23#define BATCH_OPS(_name)			\
  24	.map_lookup_batch =			\
  25	_name##_map_lookup_batch,		\
  26	.map_lookup_and_delete_batch =		\
  27	_name##_map_lookup_and_delete_batch,	\
  28	.map_update_batch =			\
  29	generic_map_update_batch,		\
  30	.map_delete_batch =			\
  31	generic_map_delete_batch
  32
  33/*
  34 * The bucket lock has two protection scopes:
  35 *
  36 * 1) Serializing concurrent operations from BPF programs on different
  37 *    CPUs
  38 *
  39 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
  40 *
  41 * BPF programs can execute in any context including perf, kprobes and
  42 * tracing. As there are almost no limits where perf, kprobes and tracing
  43 * can be invoked from the lock operations need to be protected against
  44 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
  45 * the lock held section when functions which acquire this lock are invoked
  46 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
  47 * variable bpf_prog_active, which prevents BPF programs attached to perf
  48 * events, kprobes and tracing to be invoked before the prior invocation
  49 * from one of these contexts completed. sys_bpf() uses the same mechanism
  50 * by pinning the task to the current CPU and incrementing the recursion
  51 * protection across the map operation.
  52 *
  53 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
  54 * operations like memory allocations (even with GFP_ATOMIC) from atomic
  55 * contexts. This is required because even with GFP_ATOMIC the memory
  56 * allocator calls into code paths which acquire locks with long held lock
  57 * sections. To ensure the deterministic behaviour these locks are regular
  58 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
  59 * true atomic contexts on an RT kernel are the low level hardware
  60 * handling, scheduling, low level interrupt handling, NMIs etc. None of
  61 * these contexts should ever do memory allocations.
  62 *
  63 * As regular device interrupt handlers and soft interrupts are forced into
  64 * thread context, the existing code which does
  65 *   spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
  66 * just works.
  67 *
  68 * In theory the BPF locks could be converted to regular spinlocks as well,
  69 * but the bucket locks and percpu_freelist locks can be taken from
  70 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
  71 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
  72 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
  73 * because there is no memory allocation within the lock held sections. However
  74 * after hash map was fully converted to use bpf_mem_alloc, there will be
  75 * non-synchronous memory allocation for non-preallocated hash map, so it is
  76 * safe to always use raw spinlock for bucket lock.
  77 */
  78struct bucket {
  79	struct hlist_nulls_head head;
  80	raw_spinlock_t raw_lock;
  81};
  82
  83#define HASHTAB_MAP_LOCK_COUNT 8
  84#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
  85
  86struct bpf_htab {
  87	struct bpf_map map;
  88	struct bpf_mem_alloc ma;
  89	struct bpf_mem_alloc pcpu_ma;
  90	struct bucket *buckets;
  91	void *elems;
  92	union {
  93		struct pcpu_freelist freelist;
  94		struct bpf_lru lru;
  95	};
  96	struct htab_elem *__percpu *extra_elems;
  97	/* number of elements in non-preallocated hashtable are kept
  98	 * in either pcount or count
  99	 */
 100	struct percpu_counter pcount;
 101	atomic_t count;
 102	bool use_percpu_counter;
 103	u32 n_buckets;	/* number of hash buckets */
 104	u32 elem_size;	/* size of each element in bytes */
 105	u32 hashrnd;
 106	struct lock_class_key lockdep_key;
 107	int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
 108};
 109
 110/* each htab element is struct htab_elem + key + value */
 111struct htab_elem {
 112	union {
 113		struct hlist_nulls_node hash_node;
 114		struct {
 115			void *padding;
 116			union {
 117				struct pcpu_freelist_node fnode;
 118				struct htab_elem *batch_flink;
 119			};
 120		};
 121	};
 122	union {
 123		/* pointer to per-cpu pointer */
 124		void *ptr_to_pptr;
 125		struct bpf_lru_node lru_node;
 126	};
 127	u32 hash;
 128	char key[] __aligned(8);
 129};
 130
 131static inline bool htab_is_prealloc(const struct bpf_htab *htab)
 132{
 133	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
 134}
 135
 136static void htab_init_buckets(struct bpf_htab *htab)
 137{
 138	unsigned int i;
 139
 140	for (i = 0; i < htab->n_buckets; i++) {
 141		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
 142		raw_spin_lock_init(&htab->buckets[i].raw_lock);
 143		lockdep_set_class(&htab->buckets[i].raw_lock,
 144					  &htab->lockdep_key);
 145		cond_resched();
 146	}
 147}
 148
 149static inline int htab_lock_bucket(const struct bpf_htab *htab,
 150				   struct bucket *b, u32 hash,
 151				   unsigned long *pflags)
 152{
 153	unsigned long flags;
 154
 155	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
 156
 157	preempt_disable();
 
 158	if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
 159		__this_cpu_dec(*(htab->map_locked[hash]));
 
 160		preempt_enable();
 161		return -EBUSY;
 162	}
 163
 164	raw_spin_lock_irqsave(&b->raw_lock, flags);
 165	*pflags = flags;
 166
 167	return 0;
 168}
 169
 170static inline void htab_unlock_bucket(const struct bpf_htab *htab,
 171				      struct bucket *b, u32 hash,
 172				      unsigned long flags)
 173{
 174	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
 175	raw_spin_unlock_irqrestore(&b->raw_lock, flags);
 176	__this_cpu_dec(*(htab->map_locked[hash]));
 
 177	preempt_enable();
 178}
 179
 180static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
 181
 182static bool htab_is_lru(const struct bpf_htab *htab)
 183{
 184	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
 185		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 186}
 187
 188static bool htab_is_percpu(const struct bpf_htab *htab)
 189{
 190	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 191		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 192}
 193
 194static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
 195				     void __percpu *pptr)
 196{
 197	*(void __percpu **)(l->key + key_size) = pptr;
 198}
 199
 200static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
 201{
 202	return *(void __percpu **)(l->key + key_size);
 203}
 204
 205static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
 206{
 207	return *(void **)(l->key + roundup(map->key_size, 8));
 208}
 209
 210static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
 211{
 212	return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
 213}
 214
 215static bool htab_has_extra_elems(struct bpf_htab *htab)
 216{
 217	return !htab_is_percpu(htab) && !htab_is_lru(htab);
 218}
 219
 220static void htab_free_prealloced_timers(struct bpf_htab *htab)
 221{
 222	u32 num_entries = htab->map.max_entries;
 223	int i;
 224
 225	if (!btf_record_has_field(htab->map.record, BPF_TIMER))
 226		return;
 227	if (htab_has_extra_elems(htab))
 228		num_entries += num_possible_cpus();
 229
 230	for (i = 0; i < num_entries; i++) {
 231		struct htab_elem *elem;
 232
 233		elem = get_htab_elem(htab, i);
 234		bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
 
 
 
 
 
 235		cond_resched();
 236	}
 237}
 238
 239static void htab_free_prealloced_fields(struct bpf_htab *htab)
 240{
 241	u32 num_entries = htab->map.max_entries;
 242	int i;
 243
 244	if (IS_ERR_OR_NULL(htab->map.record))
 245		return;
 246	if (htab_has_extra_elems(htab))
 247		num_entries += num_possible_cpus();
 248	for (i = 0; i < num_entries; i++) {
 249		struct htab_elem *elem;
 250
 251		elem = get_htab_elem(htab, i);
 252		bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
 
 
 
 
 
 
 
 
 
 
 
 253		cond_resched();
 254	}
 255}
 256
 257static void htab_free_elems(struct bpf_htab *htab)
 258{
 259	int i;
 260
 261	if (!htab_is_percpu(htab))
 262		goto free_elems;
 263
 264	for (i = 0; i < htab->map.max_entries; i++) {
 265		void __percpu *pptr;
 266
 267		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
 268					 htab->map.key_size);
 269		free_percpu(pptr);
 270		cond_resched();
 271	}
 272free_elems:
 273	bpf_map_area_free(htab->elems);
 274}
 275
 276/* The LRU list has a lock (lru_lock). Each htab bucket has a lock
 277 * (bucket_lock). If both locks need to be acquired together, the lock
 278 * order is always lru_lock -> bucket_lock and this only happens in
 279 * bpf_lru_list.c logic. For example, certain code path of
 280 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
 281 * will acquire lru_lock first followed by acquiring bucket_lock.
 282 *
 283 * In hashtab.c, to avoid deadlock, lock acquisition of
 284 * bucket_lock followed by lru_lock is not allowed. In such cases,
 285 * bucket_lock needs to be released first before acquiring lru_lock.
 286 */
 287static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
 288					  u32 hash)
 289{
 290	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
 291	struct htab_elem *l;
 292
 293	if (node) {
 
 294		l = container_of(node, struct htab_elem, lru_node);
 295		memcpy(l->key, key, htab->map.key_size);
 296		return l;
 297	}
 298
 299	return NULL;
 300}
 301
 302static int prealloc_init(struct bpf_htab *htab)
 303{
 304	u32 num_entries = htab->map.max_entries;
 305	int err = -ENOMEM, i;
 306
 307	if (htab_has_extra_elems(htab))
 308		num_entries += num_possible_cpus();
 309
 310	htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
 311					 htab->map.numa_node);
 312	if (!htab->elems)
 313		return -ENOMEM;
 314
 315	if (!htab_is_percpu(htab))
 316		goto skip_percpu_elems;
 317
 318	for (i = 0; i < num_entries; i++) {
 319		u32 size = round_up(htab->map.value_size, 8);
 320		void __percpu *pptr;
 321
 322		pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
 323					    GFP_USER | __GFP_NOWARN);
 324		if (!pptr)
 325			goto free_elems;
 326		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
 327				  pptr);
 328		cond_resched();
 329	}
 330
 331skip_percpu_elems:
 332	if (htab_is_lru(htab))
 333		err = bpf_lru_init(&htab->lru,
 334				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
 335				   offsetof(struct htab_elem, hash) -
 336				   offsetof(struct htab_elem, lru_node),
 337				   htab_lru_map_delete_node,
 338				   htab);
 339	else
 340		err = pcpu_freelist_init(&htab->freelist);
 341
 342	if (err)
 343		goto free_elems;
 344
 345	if (htab_is_lru(htab))
 346		bpf_lru_populate(&htab->lru, htab->elems,
 347				 offsetof(struct htab_elem, lru_node),
 348				 htab->elem_size, num_entries);
 349	else
 350		pcpu_freelist_populate(&htab->freelist,
 351				       htab->elems + offsetof(struct htab_elem, fnode),
 352				       htab->elem_size, num_entries);
 353
 354	return 0;
 355
 356free_elems:
 357	htab_free_elems(htab);
 358	return err;
 359}
 360
 361static void prealloc_destroy(struct bpf_htab *htab)
 362{
 363	htab_free_elems(htab);
 364
 365	if (htab_is_lru(htab))
 366		bpf_lru_destroy(&htab->lru);
 367	else
 368		pcpu_freelist_destroy(&htab->freelist);
 369}
 370
 371static int alloc_extra_elems(struct bpf_htab *htab)
 372{
 373	struct htab_elem *__percpu *pptr, *l_new;
 374	struct pcpu_freelist_node *l;
 375	int cpu;
 376
 377	pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
 378				    GFP_USER | __GFP_NOWARN);
 379	if (!pptr)
 380		return -ENOMEM;
 381
 382	for_each_possible_cpu(cpu) {
 383		l = pcpu_freelist_pop(&htab->freelist);
 384		/* pop will succeed, since prealloc_init()
 385		 * preallocated extra num_possible_cpus elements
 386		 */
 387		l_new = container_of(l, struct htab_elem, fnode);
 388		*per_cpu_ptr(pptr, cpu) = l_new;
 389	}
 390	htab->extra_elems = pptr;
 391	return 0;
 392}
 393
 394/* Called from syscall */
 395static int htab_map_alloc_check(union bpf_attr *attr)
 396{
 397	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 398		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 399	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 400		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 401	/* percpu_lru means each cpu has its own LRU list.
 402	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 403	 * the map's value itself is percpu.  percpu_lru has
 404	 * nothing to do with the map's value.
 405	 */
 406	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 407	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 408	bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
 409	int numa_node = bpf_map_attr_numa_node(attr);
 410
 411	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
 412		     offsetof(struct htab_elem, hash_node.pprev));
 413
 414	if (lru && !bpf_capable())
 415		/* LRU implementation is much complicated than other
 416		 * maps.  Hence, limit to CAP_BPF.
 417		 */
 418		return -EPERM;
 419
 420	if (zero_seed && !capable(CAP_SYS_ADMIN))
 421		/* Guard against local DoS, and discourage production use. */
 422		return -EPERM;
 423
 424	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
 425	    !bpf_map_flags_access_ok(attr->map_flags))
 426		return -EINVAL;
 427
 428	if (!lru && percpu_lru)
 429		return -EINVAL;
 430
 431	if (lru && !prealloc)
 432		return -ENOTSUPP;
 433
 434	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
 435		return -EINVAL;
 436
 437	/* check sanity of attributes.
 438	 * value_size == 0 may be allowed in the future to use map as a set
 439	 */
 440	if (attr->max_entries == 0 || attr->key_size == 0 ||
 441	    attr->value_size == 0)
 442		return -EINVAL;
 443
 444	if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
 445	   sizeof(struct htab_elem))
 446		/* if key_size + value_size is bigger, the user space won't be
 447		 * able to access the elements via bpf syscall. This check
 448		 * also makes sure that the elem_size doesn't overflow and it's
 449		 * kmalloc-able later in htab_map_update_elem()
 450		 */
 451		return -E2BIG;
 
 
 
 452
 453	return 0;
 454}
 455
 456static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 457{
 458	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 459		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 460	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 461		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 462	/* percpu_lru means each cpu has its own LRU list.
 463	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 464	 * the map's value itself is percpu.  percpu_lru has
 465	 * nothing to do with the map's value.
 466	 */
 467	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 468	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 469	struct bpf_htab *htab;
 470	int err, i;
 471
 472	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
 473	if (!htab)
 474		return ERR_PTR(-ENOMEM);
 475
 476	lockdep_register_key(&htab->lockdep_key);
 477
 478	bpf_map_init_from_attr(&htab->map, attr);
 479
 480	if (percpu_lru) {
 481		/* ensure each CPU's lru list has >=1 elements.
 482		 * since we are at it, make each lru list has the same
 483		 * number of elements.
 484		 */
 485		htab->map.max_entries = roundup(attr->max_entries,
 486						num_possible_cpus());
 487		if (htab->map.max_entries < attr->max_entries)
 488			htab->map.max_entries = rounddown(attr->max_entries,
 489							  num_possible_cpus());
 490	}
 491
 492	/* hash table size must be power of 2 */
 
 
 
 
 
 
 493	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
 494
 495	htab->elem_size = sizeof(struct htab_elem) +
 496			  round_up(htab->map.key_size, 8);
 497	if (percpu)
 498		htab->elem_size += sizeof(void *);
 499	else
 500		htab->elem_size += round_up(htab->map.value_size, 8);
 501
 502	err = -E2BIG;
 503	/* prevent zero size kmalloc and check for u32 overflow */
 504	if (htab->n_buckets == 0 ||
 505	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
 
 
 506		goto free_htab;
 507
 508	err = -ENOMEM;
 509	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
 510					   sizeof(struct bucket),
 511					   htab->map.numa_node);
 512	if (!htab->buckets)
 513		goto free_htab;
 514
 515	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
 516		htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
 517							   sizeof(int),
 518							   sizeof(int),
 519							   GFP_USER);
 520		if (!htab->map_locked[i])
 521			goto free_map_locked;
 522	}
 523
 524	if (htab->map.map_flags & BPF_F_ZERO_SEED)
 525		htab->hashrnd = 0;
 526	else
 527		htab->hashrnd = get_random_u32();
 528
 529	htab_init_buckets(htab);
 530
 531/* compute_batch_value() computes batch value as num_online_cpus() * 2
 532 * and __percpu_counter_compare() needs
 533 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
 534 * for percpu_counter to be faster than atomic_t. In practice the average bpf
 535 * hash map size is 10k, which means that a system with 64 cpus will fill
 536 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
 537 * define our own batch count as 32 then 10k hash map can be filled up to 80%:
 538 * 10k - 8k > 32 _batch_ * 64 _cpus_
 539 * and __percpu_counter_compare() will still be fast. At that point hash map
 540 * collisions will dominate its performance anyway. Assume that hash map filled
 541 * to 50+% isn't going to be O(1) and use the following formula to choose
 542 * between percpu_counter and atomic_t.
 543 */
 544#define PERCPU_COUNTER_BATCH 32
 545	if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
 546		htab->use_percpu_counter = true;
 547
 548	if (htab->use_percpu_counter) {
 549		err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
 550		if (err)
 551			goto free_map_locked;
 552	}
 553
 554	if (prealloc) {
 555		err = prealloc_init(htab);
 556		if (err)
 557			goto free_map_locked;
 558
 559		if (!percpu && !lru) {
 560			/* lru itself can remove the least used element, so
 561			 * there is no need for an extra elem during map_update.
 562			 */
 563			err = alloc_extra_elems(htab);
 564			if (err)
 565				goto free_prealloc;
 566		}
 567	} else {
 568		err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
 569		if (err)
 570			goto free_map_locked;
 571		if (percpu) {
 572			err = bpf_mem_alloc_init(&htab->pcpu_ma,
 573						 round_up(htab->map.value_size, 8), true);
 574			if (err)
 575				goto free_map_locked;
 576		}
 577	}
 578
 579	return &htab->map;
 580
 581free_prealloc:
 582	prealloc_destroy(htab);
 583free_map_locked:
 584	if (htab->use_percpu_counter)
 585		percpu_counter_destroy(&htab->pcount);
 586	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
 587		free_percpu(htab->map_locked[i]);
 588	bpf_map_area_free(htab->buckets);
 589	bpf_mem_alloc_destroy(&htab->pcpu_ma);
 590	bpf_mem_alloc_destroy(&htab->ma);
 
 
 591free_htab:
 592	lockdep_unregister_key(&htab->lockdep_key);
 593	bpf_map_area_free(htab);
 594	return ERR_PTR(err);
 595}
 596
 597static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
 598{
 
 
 599	return jhash(key, key_len, hashrnd);
 600}
 601
 602static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
 603{
 604	return &htab->buckets[hash & (htab->n_buckets - 1)];
 605}
 606
 607static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
 608{
 609	return &__select_bucket(htab, hash)->head;
 610}
 611
 612/* this lookup function can only be called with bucket lock taken */
 613static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
 614					 void *key, u32 key_size)
 615{
 616	struct hlist_nulls_node *n;
 617	struct htab_elem *l;
 618
 619	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 620		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 621			return l;
 622
 623	return NULL;
 624}
 625
 626/* can be called without bucket lock. it will repeat the loop in
 627 * the unlikely event when elements moved from one bucket into another
 628 * while link list is being walked
 629 */
 630static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
 631					       u32 hash, void *key,
 632					       u32 key_size, u32 n_buckets)
 633{
 634	struct hlist_nulls_node *n;
 635	struct htab_elem *l;
 636
 637again:
 638	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 639		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 640			return l;
 641
 642	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
 643		goto again;
 644
 645	return NULL;
 646}
 647
 648/* Called from syscall or from eBPF program directly, so
 649 * arguments have to match bpf_map_lookup_elem() exactly.
 650 * The return value is adjusted by BPF instructions
 651 * in htab_map_gen_lookup().
 652 */
 653static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 654{
 655	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 656	struct hlist_nulls_head *head;
 657	struct htab_elem *l;
 658	u32 hash, key_size;
 659
 660	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
 661		     !rcu_read_lock_bh_held());
 662
 663	key_size = map->key_size;
 664
 665	hash = htab_map_hash(key, key_size, htab->hashrnd);
 666
 667	head = select_bucket(htab, hash);
 668
 669	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 670
 671	return l;
 672}
 673
 674static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
 675{
 676	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 677
 678	if (l)
 679		return l->key + round_up(map->key_size, 8);
 680
 681	return NULL;
 682}
 683
 684/* inline bpf_map_lookup_elem() call.
 685 * Instead of:
 686 * bpf_prog
 687 *   bpf_map_lookup_elem
 688 *     map->ops->map_lookup_elem
 689 *       htab_map_lookup_elem
 690 *         __htab_map_lookup_elem
 691 * do:
 692 * bpf_prog
 693 *   __htab_map_lookup_elem
 694 */
 695static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 696{
 697	struct bpf_insn *insn = insn_buf;
 698	const int ret = BPF_REG_0;
 699
 700	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 701		     (void *(*)(struct bpf_map *map, void *key))NULL));
 702	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 703	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
 704	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 705				offsetof(struct htab_elem, key) +
 706				round_up(map->key_size, 8));
 707	return insn - insn_buf;
 708}
 709
 710static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
 711							void *key, const bool mark)
 712{
 713	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 714
 715	if (l) {
 716		if (mark)
 717			bpf_lru_node_set_ref(&l->lru_node);
 718		return l->key + round_up(map->key_size, 8);
 719	}
 720
 721	return NULL;
 722}
 723
 724static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
 725{
 726	return __htab_lru_map_lookup_elem(map, key, true);
 727}
 728
 729static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
 730{
 731	return __htab_lru_map_lookup_elem(map, key, false);
 732}
 733
 734static int htab_lru_map_gen_lookup(struct bpf_map *map,
 735				   struct bpf_insn *insn_buf)
 736{
 737	struct bpf_insn *insn = insn_buf;
 738	const int ret = BPF_REG_0;
 739	const int ref_reg = BPF_REG_1;
 740
 741	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 742		     (void *(*)(struct bpf_map *map, void *key))NULL));
 743	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 744	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
 745	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
 746			      offsetof(struct htab_elem, lru_node) +
 747			      offsetof(struct bpf_lru_node, ref));
 748	*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
 749	*insn++ = BPF_ST_MEM(BPF_B, ret,
 750			     offsetof(struct htab_elem, lru_node) +
 751			     offsetof(struct bpf_lru_node, ref),
 752			     1);
 753	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 754				offsetof(struct htab_elem, key) +
 755				round_up(map->key_size, 8));
 756	return insn - insn_buf;
 757}
 758
 759static void check_and_free_fields(struct bpf_htab *htab,
 760				  struct htab_elem *elem)
 761{
 762	void *map_value = elem->key + round_up(htab->map.key_size, 8);
 
 
 
 
 
 
 
 763
 764	bpf_obj_free_fields(htab->map.record, map_value);
 
 765}
 766
 767/* It is called from the bpf_lru_list when the LRU needs to delete
 768 * older elements from the htab.
 769 */
 770static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 771{
 772	struct bpf_htab *htab = arg;
 773	struct htab_elem *l = NULL, *tgt_l;
 774	struct hlist_nulls_head *head;
 775	struct hlist_nulls_node *n;
 776	unsigned long flags;
 777	struct bucket *b;
 778	int ret;
 779
 780	tgt_l = container_of(node, struct htab_elem, lru_node);
 781	b = __select_bucket(htab, tgt_l->hash);
 782	head = &b->head;
 783
 784	ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
 785	if (ret)
 786		return false;
 787
 788	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 789		if (l == tgt_l) {
 790			hlist_nulls_del_rcu(&l->hash_node);
 791			check_and_free_fields(htab, l);
 
 792			break;
 793		}
 794
 795	htab_unlock_bucket(htab, b, tgt_l->hash, flags);
 796
 797	return l == tgt_l;
 798}
 799
 800/* Called from syscall */
 801static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 802{
 803	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 804	struct hlist_nulls_head *head;
 805	struct htab_elem *l, *next_l;
 806	u32 hash, key_size;
 807	int i = 0;
 808
 809	WARN_ON_ONCE(!rcu_read_lock_held());
 810
 811	key_size = map->key_size;
 812
 813	if (!key)
 814		goto find_first_elem;
 815
 816	hash = htab_map_hash(key, key_size, htab->hashrnd);
 817
 818	head = select_bucket(htab, hash);
 819
 820	/* lookup the key */
 821	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 822
 823	if (!l)
 824		goto find_first_elem;
 825
 826	/* key was found, get next key in the same bucket */
 827	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
 828				  struct htab_elem, hash_node);
 829
 830	if (next_l) {
 831		/* if next elem in this hash list is non-zero, just return it */
 832		memcpy(next_key, next_l->key, key_size);
 833		return 0;
 834	}
 835
 836	/* no more elements in this hash list, go to the next bucket */
 837	i = hash & (htab->n_buckets - 1);
 838	i++;
 839
 840find_first_elem:
 841	/* iterate over buckets */
 842	for (; i < htab->n_buckets; i++) {
 843		head = select_bucket(htab, i);
 844
 845		/* pick first element in the bucket */
 846		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
 847					  struct htab_elem, hash_node);
 848		if (next_l) {
 849			/* if it's not empty, just return it */
 850			memcpy(next_key, next_l->key, key_size);
 851			return 0;
 852		}
 853	}
 854
 855	/* iterated over all buckets and all elements */
 856	return -ENOENT;
 857}
 858
 859static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
 860{
 
 
 
 861	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
 862		bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
 863	check_and_free_fields(htab, l);
 864	bpf_mem_cache_free(&htab->ma, l);
 
 865}
 866
 867static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
 868{
 869	struct bpf_map *map = &htab->map;
 870	void *ptr;
 871
 872	if (map->ops->map_fd_put_ptr) {
 873		ptr = fd_htab_map_get_ptr(map, l);
 874		map->ops->map_fd_put_ptr(ptr);
 875	}
 876}
 877
 878static bool is_map_full(struct bpf_htab *htab)
 879{
 880	if (htab->use_percpu_counter)
 881		return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
 882						PERCPU_COUNTER_BATCH) >= 0;
 883	return atomic_read(&htab->count) >= htab->map.max_entries;
 884}
 885
 886static void inc_elem_count(struct bpf_htab *htab)
 887{
 
 
 888	if (htab->use_percpu_counter)
 889		percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
 890	else
 891		atomic_inc(&htab->count);
 892}
 893
 894static void dec_elem_count(struct bpf_htab *htab)
 895{
 
 
 896	if (htab->use_percpu_counter)
 897		percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
 898	else
 899		atomic_dec(&htab->count);
 900}
 901
 902
 903static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 904{
 905	htab_put_fd_value(htab, l);
 906
 907	if (htab_is_prealloc(htab)) {
 
 908		check_and_free_fields(htab, l);
 909		__pcpu_freelist_push(&htab->freelist, &l->fnode);
 910	} else {
 911		dec_elem_count(htab);
 912		htab_elem_free(htab, l);
 913	}
 914}
 915
 916static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 917			    void *value, bool onallcpus)
 918{
 919	if (!onallcpus) {
 920		/* copy true value_size bytes */
 921		memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
 922	} else {
 923		u32 size = round_up(htab->map.value_size, 8);
 924		int off = 0, cpu;
 925
 926		for_each_possible_cpu(cpu) {
 927			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
 928					value + off, size);
 929			off += size;
 930		}
 931	}
 932}
 933
 934static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
 935			    void *value, bool onallcpus)
 936{
 937	/* When not setting the initial value on all cpus, zero-fill element
 938	 * values for other cpus. Otherwise, bpf program has no way to ensure
 939	 * known initial values for cpus other than current one
 940	 * (onallcpus=false always when coming from bpf prog).
 941	 */
 942	if (!onallcpus) {
 943		u32 size = round_up(htab->map.value_size, 8);
 944		int current_cpu = raw_smp_processor_id();
 945		int cpu;
 946
 947		for_each_possible_cpu(cpu) {
 948			if (cpu == current_cpu)
 949				bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
 950						size);
 951			else
 952				memset(per_cpu_ptr(pptr, cpu), 0, size);
 953		}
 954	} else {
 955		pcpu_copy_value(htab, pptr, value, onallcpus);
 956	}
 957}
 958
 959static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
 960{
 961	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
 962	       BITS_PER_LONG == 64;
 963}
 964
 965static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 966					 void *value, u32 key_size, u32 hash,
 967					 bool percpu, bool onallcpus,
 968					 struct htab_elem *old_elem)
 969{
 970	u32 size = htab->map.value_size;
 971	bool prealloc = htab_is_prealloc(htab);
 972	struct htab_elem *l_new, **pl_new;
 973	void __percpu *pptr;
 974
 975	if (prealloc) {
 976		if (old_elem) {
 977			/* if we're updating the existing element,
 978			 * use per-cpu extra elems to avoid freelist_pop/push
 979			 */
 980			pl_new = this_cpu_ptr(htab->extra_elems);
 981			l_new = *pl_new;
 982			htab_put_fd_value(htab, old_elem);
 983			*pl_new = old_elem;
 984		} else {
 985			struct pcpu_freelist_node *l;
 986
 987			l = __pcpu_freelist_pop(&htab->freelist);
 988			if (!l)
 989				return ERR_PTR(-E2BIG);
 990			l_new = container_of(l, struct htab_elem, fnode);
 
 991		}
 992	} else {
 993		if (is_map_full(htab))
 994			if (!old_elem)
 995				/* when map is full and update() is replacing
 996				 * old element, it's ok to allocate, since
 997				 * old element will be freed immediately.
 998				 * Otherwise return an error
 999				 */
1000				return ERR_PTR(-E2BIG);
1001		inc_elem_count(htab);
1002		l_new = bpf_mem_cache_alloc(&htab->ma);
1003		if (!l_new) {
1004			l_new = ERR_PTR(-ENOMEM);
1005			goto dec_count;
1006		}
1007		check_and_init_map_value(&htab->map,
1008					 l_new->key + round_up(key_size, 8));
1009	}
1010
1011	memcpy(l_new->key, key, key_size);
1012	if (percpu) {
1013		if (prealloc) {
1014			pptr = htab_elem_get_ptr(l_new, key_size);
1015		} else {
1016			/* alloc_percpu zero-fills */
1017			pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1018			if (!pptr) {
 
1019				bpf_mem_cache_free(&htab->ma, l_new);
1020				l_new = ERR_PTR(-ENOMEM);
1021				goto dec_count;
1022			}
1023			l_new->ptr_to_pptr = pptr;
1024			pptr = *(void **)pptr;
1025		}
1026
1027		pcpu_init_value(htab, pptr, value, onallcpus);
1028
1029		if (!prealloc)
1030			htab_elem_set_ptr(l_new, key_size, pptr);
1031	} else if (fd_htab_map_needs_adjust(htab)) {
1032		size = round_up(size, 8);
1033		memcpy(l_new->key + round_up(key_size, 8), value, size);
1034	} else {
1035		copy_map_value(&htab->map,
1036			       l_new->key + round_up(key_size, 8),
1037			       value);
1038	}
1039
1040	l_new->hash = hash;
1041	return l_new;
1042dec_count:
1043	dec_elem_count(htab);
1044	return l_new;
1045}
1046
1047static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1048		       u64 map_flags)
1049{
1050	if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1051		/* elem already exists */
1052		return -EEXIST;
1053
1054	if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1055		/* elem doesn't exist, cannot update it */
1056		return -ENOENT;
1057
1058	return 0;
1059}
1060
1061/* Called from syscall or from eBPF program */
1062static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1063				u64 map_flags)
1064{
1065	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1066	struct htab_elem *l_new = NULL, *l_old;
1067	struct hlist_nulls_head *head;
1068	unsigned long flags;
 
1069	struct bucket *b;
1070	u32 key_size, hash;
1071	int ret;
1072
1073	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1074		/* unknown flags */
1075		return -EINVAL;
1076
1077	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1078		     !rcu_read_lock_bh_held());
1079
1080	key_size = map->key_size;
1081
1082	hash = htab_map_hash(key, key_size, htab->hashrnd);
1083
1084	b = __select_bucket(htab, hash);
1085	head = &b->head;
1086
1087	if (unlikely(map_flags & BPF_F_LOCK)) {
1088		if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1089			return -EINVAL;
1090		/* find an element without taking the bucket lock */
1091		l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1092					      htab->n_buckets);
1093		ret = check_flags(htab, l_old, map_flags);
1094		if (ret)
1095			return ret;
1096		if (l_old) {
1097			/* grab the element lock and update value in place */
1098			copy_map_value_locked(map,
1099					      l_old->key + round_up(key_size, 8),
1100					      value, false);
1101			return 0;
1102		}
1103		/* fall through, grab the bucket lock and lookup again.
1104		 * 99.9% chance that the element won't be found,
1105		 * but second lookup under lock has to be done.
1106		 */
1107	}
1108
1109	ret = htab_lock_bucket(htab, b, hash, &flags);
1110	if (ret)
1111		return ret;
1112
1113	l_old = lookup_elem_raw(head, hash, key, key_size);
1114
1115	ret = check_flags(htab, l_old, map_flags);
1116	if (ret)
1117		goto err;
1118
1119	if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1120		/* first lookup without the bucket lock didn't find the element,
1121		 * but second lookup with the bucket lock found it.
1122		 * This case is highly unlikely, but has to be dealt with:
1123		 * grab the element lock in addition to the bucket lock
1124		 * and update element in place
1125		 */
1126		copy_map_value_locked(map,
1127				      l_old->key + round_up(key_size, 8),
1128				      value, false);
1129		ret = 0;
1130		goto err;
1131	}
1132
1133	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1134				l_old);
1135	if (IS_ERR(l_new)) {
1136		/* all pre-allocated elements are in use or memory exhausted */
1137		ret = PTR_ERR(l_new);
1138		goto err;
1139	}
1140
1141	/* add new element to the head of the list, so that
1142	 * concurrent search will find it before old elem
1143	 */
1144	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1145	if (l_old) {
1146		hlist_nulls_del_rcu(&l_old->hash_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147		if (!htab_is_prealloc(htab))
1148			free_htab_elem(htab, l_old);
1149		else
1150			check_and_free_fields(htab, l_old);
1151	}
1152	ret = 0;
1153err:
1154	htab_unlock_bucket(htab, b, hash, flags);
1155	return ret;
1156}
1157
1158static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1159{
1160	check_and_free_fields(htab, elem);
 
1161	bpf_lru_push_free(&htab->lru, &elem->lru_node);
1162}
1163
1164static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1165				    u64 map_flags)
1166{
1167	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1168	struct htab_elem *l_new, *l_old = NULL;
1169	struct hlist_nulls_head *head;
1170	unsigned long flags;
1171	struct bucket *b;
1172	u32 key_size, hash;
1173	int ret;
1174
1175	if (unlikely(map_flags > BPF_EXIST))
1176		/* unknown flags */
1177		return -EINVAL;
1178
1179	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1180		     !rcu_read_lock_bh_held());
1181
1182	key_size = map->key_size;
1183
1184	hash = htab_map_hash(key, key_size, htab->hashrnd);
1185
1186	b = __select_bucket(htab, hash);
1187	head = &b->head;
1188
1189	/* For LRU, we need to alloc before taking bucket's
1190	 * spinlock because getting free nodes from LRU may need
1191	 * to remove older elements from htab and this removal
1192	 * operation will need a bucket lock.
1193	 */
1194	l_new = prealloc_lru_pop(htab, key, hash);
1195	if (!l_new)
1196		return -ENOMEM;
1197	copy_map_value(&htab->map,
1198		       l_new->key + round_up(map->key_size, 8), value);
1199
1200	ret = htab_lock_bucket(htab, b, hash, &flags);
1201	if (ret)
1202		return ret;
1203
1204	l_old = lookup_elem_raw(head, hash, key, key_size);
1205
1206	ret = check_flags(htab, l_old, map_flags);
1207	if (ret)
1208		goto err;
1209
1210	/* add new element to the head of the list, so that
1211	 * concurrent search will find it before old elem
1212	 */
1213	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1214	if (l_old) {
1215		bpf_lru_node_set_ref(&l_new->lru_node);
1216		hlist_nulls_del_rcu(&l_old->hash_node);
1217	}
1218	ret = 0;
1219
1220err:
1221	htab_unlock_bucket(htab, b, hash, flags);
1222
 
1223	if (ret)
1224		htab_lru_push_free(htab, l_new);
1225	else if (l_old)
1226		htab_lru_push_free(htab, l_old);
1227
1228	return ret;
1229}
1230
1231static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1232					 void *value, u64 map_flags,
1233					 bool onallcpus)
1234{
1235	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1236	struct htab_elem *l_new = NULL, *l_old;
1237	struct hlist_nulls_head *head;
1238	unsigned long flags;
1239	struct bucket *b;
1240	u32 key_size, hash;
1241	int ret;
1242
1243	if (unlikely(map_flags > BPF_EXIST))
1244		/* unknown flags */
1245		return -EINVAL;
1246
1247	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1248		     !rcu_read_lock_bh_held());
1249
1250	key_size = map->key_size;
1251
1252	hash = htab_map_hash(key, key_size, htab->hashrnd);
1253
1254	b = __select_bucket(htab, hash);
1255	head = &b->head;
1256
1257	ret = htab_lock_bucket(htab, b, hash, &flags);
1258	if (ret)
1259		return ret;
1260
1261	l_old = lookup_elem_raw(head, hash, key, key_size);
1262
1263	ret = check_flags(htab, l_old, map_flags);
1264	if (ret)
1265		goto err;
1266
1267	if (l_old) {
1268		/* per-cpu hash map can update value in-place */
1269		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1270				value, onallcpus);
1271	} else {
1272		l_new = alloc_htab_elem(htab, key, value, key_size,
1273					hash, true, onallcpus, NULL);
1274		if (IS_ERR(l_new)) {
1275			ret = PTR_ERR(l_new);
1276			goto err;
1277		}
1278		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1279	}
1280	ret = 0;
1281err:
1282	htab_unlock_bucket(htab, b, hash, flags);
1283	return ret;
1284}
1285
1286static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1287					     void *value, u64 map_flags,
1288					     bool onallcpus)
1289{
1290	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1291	struct htab_elem *l_new = NULL, *l_old;
1292	struct hlist_nulls_head *head;
1293	unsigned long flags;
1294	struct bucket *b;
1295	u32 key_size, hash;
1296	int ret;
1297
1298	if (unlikely(map_flags > BPF_EXIST))
1299		/* unknown flags */
1300		return -EINVAL;
1301
1302	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1303		     !rcu_read_lock_bh_held());
1304
1305	key_size = map->key_size;
1306
1307	hash = htab_map_hash(key, key_size, htab->hashrnd);
1308
1309	b = __select_bucket(htab, hash);
1310	head = &b->head;
1311
1312	/* For LRU, we need to alloc before taking bucket's
1313	 * spinlock because LRU's elem alloc may need
1314	 * to remove older elem from htab and this removal
1315	 * operation will need a bucket lock.
1316	 */
1317	if (map_flags != BPF_EXIST) {
1318		l_new = prealloc_lru_pop(htab, key, hash);
1319		if (!l_new)
1320			return -ENOMEM;
1321	}
1322
1323	ret = htab_lock_bucket(htab, b, hash, &flags);
1324	if (ret)
1325		return ret;
1326
1327	l_old = lookup_elem_raw(head, hash, key, key_size);
1328
1329	ret = check_flags(htab, l_old, map_flags);
1330	if (ret)
1331		goto err;
1332
1333	if (l_old) {
1334		bpf_lru_node_set_ref(&l_old->lru_node);
1335
1336		/* per-cpu hash map can update value in-place */
1337		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1338				value, onallcpus);
1339	} else {
1340		pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1341				value, onallcpus);
1342		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1343		l_new = NULL;
1344	}
1345	ret = 0;
1346err:
1347	htab_unlock_bucket(htab, b, hash, flags);
1348	if (l_new)
 
 
1349		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
 
1350	return ret;
1351}
1352
1353static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1354				       void *value, u64 map_flags)
1355{
1356	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1357}
1358
1359static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1360					   void *value, u64 map_flags)
1361{
1362	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1363						 false);
1364}
1365
1366/* Called from syscall or from eBPF program */
1367static int htab_map_delete_elem(struct bpf_map *map, void *key)
1368{
1369	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1370	struct hlist_nulls_head *head;
1371	struct bucket *b;
1372	struct htab_elem *l;
1373	unsigned long flags;
1374	u32 hash, key_size;
1375	int ret;
1376
1377	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1378		     !rcu_read_lock_bh_held());
1379
1380	key_size = map->key_size;
1381
1382	hash = htab_map_hash(key, key_size, htab->hashrnd);
1383	b = __select_bucket(htab, hash);
1384	head = &b->head;
1385
1386	ret = htab_lock_bucket(htab, b, hash, &flags);
1387	if (ret)
1388		return ret;
1389
1390	l = lookup_elem_raw(head, hash, key, key_size);
1391
1392	if (l) {
1393		hlist_nulls_del_rcu(&l->hash_node);
1394		free_htab_elem(htab, l);
1395	} else {
1396		ret = -ENOENT;
1397	}
1398
1399	htab_unlock_bucket(htab, b, hash, flags);
 
 
 
1400	return ret;
1401}
1402
1403static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1404{
1405	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1406	struct hlist_nulls_head *head;
1407	struct bucket *b;
1408	struct htab_elem *l;
1409	unsigned long flags;
1410	u32 hash, key_size;
1411	int ret;
1412
1413	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1414		     !rcu_read_lock_bh_held());
1415
1416	key_size = map->key_size;
1417
1418	hash = htab_map_hash(key, key_size, htab->hashrnd);
1419	b = __select_bucket(htab, hash);
1420	head = &b->head;
1421
1422	ret = htab_lock_bucket(htab, b, hash, &flags);
1423	if (ret)
1424		return ret;
1425
1426	l = lookup_elem_raw(head, hash, key, key_size);
1427
1428	if (l)
1429		hlist_nulls_del_rcu(&l->hash_node);
1430	else
1431		ret = -ENOENT;
1432
1433	htab_unlock_bucket(htab, b, hash, flags);
1434	if (l)
1435		htab_lru_push_free(htab, l);
1436	return ret;
1437}
1438
1439static void delete_all_elements(struct bpf_htab *htab)
1440{
1441	int i;
1442
1443	/* It's called from a worker thread, so disable migration here,
1444	 * since bpf_mem_cache_free() relies on that.
1445	 */
1446	migrate_disable();
1447	for (i = 0; i < htab->n_buckets; i++) {
1448		struct hlist_nulls_head *head = select_bucket(htab, i);
1449		struct hlist_nulls_node *n;
1450		struct htab_elem *l;
1451
1452		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1453			hlist_nulls_del_rcu(&l->hash_node);
1454			htab_elem_free(htab, l);
1455		}
 
1456	}
1457	migrate_enable();
1458}
1459
1460static void htab_free_malloced_timers(struct bpf_htab *htab)
1461{
1462	int i;
1463
1464	rcu_read_lock();
1465	for (i = 0; i < htab->n_buckets; i++) {
1466		struct hlist_nulls_head *head = select_bucket(htab, i);
1467		struct hlist_nulls_node *n;
1468		struct htab_elem *l;
1469
1470		hlist_nulls_for_each_entry(l, n, head, hash_node) {
1471			/* We only free timer on uref dropping to zero */
1472			bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8));
 
 
 
 
 
1473		}
1474		cond_resched_rcu();
1475	}
1476	rcu_read_unlock();
1477}
1478
1479static void htab_map_free_timers(struct bpf_map *map)
1480{
1481	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1482
1483	/* We only free timer on uref dropping to zero */
1484	if (!btf_record_has_field(htab->map.record, BPF_TIMER))
1485		return;
1486	if (!htab_is_prealloc(htab))
1487		htab_free_malloced_timers(htab);
1488	else
1489		htab_free_prealloced_timers(htab);
1490}
1491
1492/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1493static void htab_map_free(struct bpf_map *map)
1494{
1495	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1496	int i;
1497
1498	/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1499	 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1500	 * There is no need to synchronize_rcu() here to protect map elements.
1501	 */
1502
1503	/* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1504	 * underneath and is reponsible for waiting for callbacks to finish
1505	 * during bpf_mem_alloc_destroy().
1506	 */
1507	if (!htab_is_prealloc(htab)) {
1508		delete_all_elements(htab);
1509	} else {
1510		htab_free_prealloced_fields(htab);
1511		prealloc_destroy(htab);
1512	}
1513
 
1514	free_percpu(htab->extra_elems);
1515	bpf_map_area_free(htab->buckets);
1516	bpf_mem_alloc_destroy(&htab->pcpu_ma);
1517	bpf_mem_alloc_destroy(&htab->ma);
1518	if (htab->use_percpu_counter)
1519		percpu_counter_destroy(&htab->pcount);
1520	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1521		free_percpu(htab->map_locked[i]);
1522	lockdep_unregister_key(&htab->lockdep_key);
1523	bpf_map_area_free(htab);
1524}
1525
1526static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1527				   struct seq_file *m)
1528{
1529	void *value;
1530
1531	rcu_read_lock();
1532
1533	value = htab_map_lookup_elem(map, key);
1534	if (!value) {
1535		rcu_read_unlock();
1536		return;
1537	}
1538
1539	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1540	seq_puts(m, ": ");
1541	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1542	seq_puts(m, "\n");
1543
1544	rcu_read_unlock();
1545}
1546
1547static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1548					     void *value, bool is_lru_map,
1549					     bool is_percpu, u64 flags)
1550{
1551	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1552	struct hlist_nulls_head *head;
1553	unsigned long bflags;
1554	struct htab_elem *l;
1555	u32 hash, key_size;
1556	struct bucket *b;
1557	int ret;
1558
1559	key_size = map->key_size;
1560
1561	hash = htab_map_hash(key, key_size, htab->hashrnd);
1562	b = __select_bucket(htab, hash);
1563	head = &b->head;
1564
1565	ret = htab_lock_bucket(htab, b, hash, &bflags);
1566	if (ret)
1567		return ret;
1568
1569	l = lookup_elem_raw(head, hash, key, key_size);
1570	if (!l) {
1571		ret = -ENOENT;
1572	} else {
1573		if (is_percpu) {
1574			u32 roundup_value_size = round_up(map->value_size, 8);
1575			void __percpu *pptr;
1576			int off = 0, cpu;
1577
1578			pptr = htab_elem_get_ptr(l, key_size);
1579			for_each_possible_cpu(cpu) {
1580				bpf_long_memcpy(value + off,
1581						per_cpu_ptr(pptr, cpu),
1582						roundup_value_size);
1583				off += roundup_value_size;
1584			}
1585		} else {
1586			u32 roundup_key_size = round_up(map->key_size, 8);
1587
1588			if (flags & BPF_F_LOCK)
1589				copy_map_value_locked(map, value, l->key +
1590						      roundup_key_size,
1591						      true);
1592			else
1593				copy_map_value(map, value, l->key +
1594					       roundup_key_size);
 
1595			check_and_init_map_value(map, value);
1596		}
1597
1598		hlist_nulls_del_rcu(&l->hash_node);
1599		if (!is_lru_map)
1600			free_htab_elem(htab, l);
1601	}
1602
1603	htab_unlock_bucket(htab, b, hash, bflags);
1604
1605	if (is_lru_map && l)
1606		htab_lru_push_free(htab, l);
1607
1608	return ret;
1609}
1610
1611static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1612					   void *value, u64 flags)
1613{
1614	return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1615						 flags);
1616}
1617
1618static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1619						  void *key, void *value,
1620						  u64 flags)
1621{
1622	return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1623						 flags);
1624}
1625
1626static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1627					       void *value, u64 flags)
1628{
1629	return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1630						 flags);
1631}
1632
1633static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1634						      void *key, void *value,
1635						      u64 flags)
1636{
1637	return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1638						 flags);
1639}
1640
1641static int
1642__htab_map_lookup_and_delete_batch(struct bpf_map *map,
1643				   const union bpf_attr *attr,
1644				   union bpf_attr __user *uattr,
1645				   bool do_delete, bool is_lru_map,
1646				   bool is_percpu)
1647{
1648	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1649	u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1650	void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1651	void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1652	void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1653	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1654	u32 batch, max_count, size, bucket_size, map_id;
1655	struct htab_elem *node_to_free = NULL;
1656	u64 elem_map_flags, map_flags;
1657	struct hlist_nulls_head *head;
1658	struct hlist_nulls_node *n;
1659	unsigned long flags = 0;
1660	bool locked = false;
1661	struct htab_elem *l;
1662	struct bucket *b;
1663	int ret = 0;
1664
1665	elem_map_flags = attr->batch.elem_flags;
1666	if ((elem_map_flags & ~BPF_F_LOCK) ||
1667	    ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1668		return -EINVAL;
1669
1670	map_flags = attr->batch.flags;
1671	if (map_flags)
1672		return -EINVAL;
1673
1674	max_count = attr->batch.count;
1675	if (!max_count)
1676		return 0;
1677
1678	if (put_user(0, &uattr->batch.count))
1679		return -EFAULT;
1680
1681	batch = 0;
1682	if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1683		return -EFAULT;
1684
1685	if (batch >= htab->n_buckets)
1686		return -ENOENT;
1687
1688	key_size = htab->map.key_size;
1689	roundup_key_size = round_up(htab->map.key_size, 8);
1690	value_size = htab->map.value_size;
1691	size = round_up(value_size, 8);
1692	if (is_percpu)
1693		value_size = size * num_possible_cpus();
1694	total = 0;
1695	/* while experimenting with hash tables with sizes ranging from 10 to
1696	 * 1000, it was observed that a bucket can have up to 5 entries.
1697	 */
1698	bucket_size = 5;
1699
1700alloc:
1701	/* We cannot do copy_from_user or copy_to_user inside
1702	 * the rcu_read_lock. Allocate enough space here.
1703	 */
1704	keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1705	values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1706	if (!keys || !values) {
1707		ret = -ENOMEM;
1708		goto after_loop;
1709	}
1710
1711again:
1712	bpf_disable_instrumentation();
1713	rcu_read_lock();
1714again_nocopy:
1715	dst_key = keys;
1716	dst_val = values;
1717	b = &htab->buckets[batch];
1718	head = &b->head;
1719	/* do not grab the lock unless need it (bucket_cnt > 0). */
1720	if (locked) {
1721		ret = htab_lock_bucket(htab, b, batch, &flags);
1722		if (ret) {
1723			rcu_read_unlock();
1724			bpf_enable_instrumentation();
1725			goto after_loop;
1726		}
1727	}
1728
1729	bucket_cnt = 0;
1730	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1731		bucket_cnt++;
1732
1733	if (bucket_cnt && !locked) {
1734		locked = true;
1735		goto again_nocopy;
1736	}
1737
1738	if (bucket_cnt > (max_count - total)) {
1739		if (total == 0)
1740			ret = -ENOSPC;
1741		/* Note that since bucket_cnt > 0 here, it is implicit
1742		 * that the locked was grabbed, so release it.
1743		 */
1744		htab_unlock_bucket(htab, b, batch, flags);
1745		rcu_read_unlock();
1746		bpf_enable_instrumentation();
1747		goto after_loop;
1748	}
1749
1750	if (bucket_cnt > bucket_size) {
1751		bucket_size = bucket_cnt;
1752		/* Note that since bucket_cnt > 0 here, it is implicit
1753		 * that the locked was grabbed, so release it.
1754		 */
1755		htab_unlock_bucket(htab, b, batch, flags);
1756		rcu_read_unlock();
1757		bpf_enable_instrumentation();
1758		kvfree(keys);
1759		kvfree(values);
1760		goto alloc;
1761	}
1762
1763	/* Next block is only safe to run if you have grabbed the lock */
1764	if (!locked)
1765		goto next_batch;
1766
1767	hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1768		memcpy(dst_key, l->key, key_size);
1769
1770		if (is_percpu) {
1771			int off = 0, cpu;
1772			void __percpu *pptr;
1773
1774			pptr = htab_elem_get_ptr(l, map->key_size);
1775			for_each_possible_cpu(cpu) {
1776				bpf_long_memcpy(dst_val + off,
1777						per_cpu_ptr(pptr, cpu), size);
1778				off += size;
1779			}
1780		} else {
1781			value = l->key + roundup_key_size;
1782			if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1783				struct bpf_map **inner_map = value;
1784
1785				 /* Actual value is the id of the inner map */
1786				map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1787				value = &map_id;
1788			}
1789
1790			if (elem_map_flags & BPF_F_LOCK)
1791				copy_map_value_locked(map, dst_val, value,
1792						      true);
1793			else
1794				copy_map_value(map, dst_val, value);
 
1795			check_and_init_map_value(map, dst_val);
1796		}
1797		if (do_delete) {
1798			hlist_nulls_del_rcu(&l->hash_node);
1799
1800			/* bpf_lru_push_free() will acquire lru_lock, which
1801			 * may cause deadlock. See comments in function
1802			 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1803			 * after releasing the bucket lock.
 
 
 
 
 
1804			 */
1805			if (is_lru_map) {
1806				l->batch_flink = node_to_free;
1807				node_to_free = l;
1808			} else {
1809				free_htab_elem(htab, l);
1810			}
1811		}
1812		dst_key += key_size;
1813		dst_val += value_size;
1814	}
1815
1816	htab_unlock_bucket(htab, b, batch, flags);
1817	locked = false;
1818
1819	while (node_to_free) {
1820		l = node_to_free;
1821		node_to_free = node_to_free->batch_flink;
1822		htab_lru_push_free(htab, l);
 
 
 
1823	}
1824
1825next_batch:
1826	/* If we are not copying data, we can go to next bucket and avoid
1827	 * unlocking the rcu.
1828	 */
1829	if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1830		batch++;
1831		goto again_nocopy;
1832	}
1833
1834	rcu_read_unlock();
1835	bpf_enable_instrumentation();
1836	if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1837	    key_size * bucket_cnt) ||
1838	    copy_to_user(uvalues + total * value_size, values,
1839	    value_size * bucket_cnt))) {
1840		ret = -EFAULT;
1841		goto after_loop;
1842	}
1843
1844	total += bucket_cnt;
1845	batch++;
1846	if (batch >= htab->n_buckets) {
1847		ret = -ENOENT;
1848		goto after_loop;
1849	}
1850	goto again;
1851
1852after_loop:
1853	if (ret == -EFAULT)
1854		goto out;
1855
1856	/* copy # of entries and next batch */
1857	ubatch = u64_to_user_ptr(attr->batch.out_batch);
1858	if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1859	    put_user(total, &uattr->batch.count))
1860		ret = -EFAULT;
1861
1862out:
1863	kvfree(keys);
1864	kvfree(values);
1865	return ret;
1866}
1867
1868static int
1869htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1870			     union bpf_attr __user *uattr)
1871{
1872	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1873						  false, true);
1874}
1875
1876static int
1877htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1878					const union bpf_attr *attr,
1879					union bpf_attr __user *uattr)
1880{
1881	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1882						  false, true);
1883}
1884
1885static int
1886htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1887		      union bpf_attr __user *uattr)
1888{
1889	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1890						  false, false);
1891}
1892
1893static int
1894htab_map_lookup_and_delete_batch(struct bpf_map *map,
1895				 const union bpf_attr *attr,
1896				 union bpf_attr __user *uattr)
1897{
1898	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1899						  false, false);
1900}
1901
1902static int
1903htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1904				 const union bpf_attr *attr,
1905				 union bpf_attr __user *uattr)
1906{
1907	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1908						  true, true);
1909}
1910
1911static int
1912htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1913					    const union bpf_attr *attr,
1914					    union bpf_attr __user *uattr)
1915{
1916	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1917						  true, true);
1918}
1919
1920static int
1921htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1922			  union bpf_attr __user *uattr)
1923{
1924	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1925						  true, false);
1926}
1927
1928static int
1929htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1930				     const union bpf_attr *attr,
1931				     union bpf_attr __user *uattr)
1932{
1933	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1934						  true, false);
1935}
1936
1937struct bpf_iter_seq_hash_map_info {
1938	struct bpf_map *map;
1939	struct bpf_htab *htab;
1940	void *percpu_value_buf; // non-zero means percpu hash
1941	u32 bucket_id;
1942	u32 skip_elems;
1943};
1944
1945static struct htab_elem *
1946bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
1947			   struct htab_elem *prev_elem)
1948{
1949	const struct bpf_htab *htab = info->htab;
1950	u32 skip_elems = info->skip_elems;
1951	u32 bucket_id = info->bucket_id;
1952	struct hlist_nulls_head *head;
1953	struct hlist_nulls_node *n;
1954	struct htab_elem *elem;
1955	struct bucket *b;
1956	u32 i, count;
1957
1958	if (bucket_id >= htab->n_buckets)
1959		return NULL;
1960
1961	/* try to find next elem in the same bucket */
1962	if (prev_elem) {
1963		/* no update/deletion on this bucket, prev_elem should be still valid
1964		 * and we won't skip elements.
1965		 */
1966		n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
1967		elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
1968		if (elem)
1969			return elem;
1970
1971		/* not found, unlock and go to the next bucket */
1972		b = &htab->buckets[bucket_id++];
1973		rcu_read_unlock();
1974		skip_elems = 0;
1975	}
1976
1977	for (i = bucket_id; i < htab->n_buckets; i++) {
1978		b = &htab->buckets[i];
1979		rcu_read_lock();
1980
1981		count = 0;
1982		head = &b->head;
1983		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
1984			if (count >= skip_elems) {
1985				info->bucket_id = i;
1986				info->skip_elems = count;
1987				return elem;
1988			}
1989			count++;
1990		}
1991
1992		rcu_read_unlock();
1993		skip_elems = 0;
1994	}
1995
1996	info->bucket_id = i;
1997	info->skip_elems = 0;
1998	return NULL;
1999}
2000
2001static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2002{
2003	struct bpf_iter_seq_hash_map_info *info = seq->private;
2004	struct htab_elem *elem;
2005
2006	elem = bpf_hash_map_seq_find_next(info, NULL);
2007	if (!elem)
2008		return NULL;
2009
2010	if (*pos == 0)
2011		++*pos;
2012	return elem;
2013}
2014
2015static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2016{
2017	struct bpf_iter_seq_hash_map_info *info = seq->private;
2018
2019	++*pos;
2020	++info->skip_elems;
2021	return bpf_hash_map_seq_find_next(info, v);
2022}
2023
2024static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2025{
2026	struct bpf_iter_seq_hash_map_info *info = seq->private;
2027	u32 roundup_key_size, roundup_value_size;
2028	struct bpf_iter__bpf_map_elem ctx = {};
2029	struct bpf_map *map = info->map;
2030	struct bpf_iter_meta meta;
2031	int ret = 0, off = 0, cpu;
2032	struct bpf_prog *prog;
2033	void __percpu *pptr;
2034
2035	meta.seq = seq;
2036	prog = bpf_iter_get_info(&meta, elem == NULL);
2037	if (prog) {
2038		ctx.meta = &meta;
2039		ctx.map = info->map;
2040		if (elem) {
2041			roundup_key_size = round_up(map->key_size, 8);
2042			ctx.key = elem->key;
2043			if (!info->percpu_value_buf) {
2044				ctx.value = elem->key + roundup_key_size;
2045			} else {
2046				roundup_value_size = round_up(map->value_size, 8);
2047				pptr = htab_elem_get_ptr(elem, map->key_size);
2048				for_each_possible_cpu(cpu) {
2049					bpf_long_memcpy(info->percpu_value_buf + off,
2050							per_cpu_ptr(pptr, cpu),
2051							roundup_value_size);
2052					off += roundup_value_size;
2053				}
2054				ctx.value = info->percpu_value_buf;
2055			}
2056		}
2057		ret = bpf_iter_run_prog(prog, &ctx);
2058	}
2059
2060	return ret;
2061}
2062
2063static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2064{
2065	return __bpf_hash_map_seq_show(seq, v);
2066}
2067
2068static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2069{
2070	if (!v)
2071		(void)__bpf_hash_map_seq_show(seq, NULL);
2072	else
2073		rcu_read_unlock();
2074}
2075
2076static int bpf_iter_init_hash_map(void *priv_data,
2077				  struct bpf_iter_aux_info *aux)
2078{
2079	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2080	struct bpf_map *map = aux->map;
2081	void *value_buf;
2082	u32 buf_size;
2083
2084	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2085	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2086		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2087		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2088		if (!value_buf)
2089			return -ENOMEM;
2090
2091		seq_info->percpu_value_buf = value_buf;
2092	}
2093
2094	bpf_map_inc_with_uref(map);
2095	seq_info->map = map;
2096	seq_info->htab = container_of(map, struct bpf_htab, map);
2097	return 0;
2098}
2099
2100static void bpf_iter_fini_hash_map(void *priv_data)
2101{
2102	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2103
2104	bpf_map_put_with_uref(seq_info->map);
2105	kfree(seq_info->percpu_value_buf);
2106}
2107
2108static const struct seq_operations bpf_hash_map_seq_ops = {
2109	.start	= bpf_hash_map_seq_start,
2110	.next	= bpf_hash_map_seq_next,
2111	.stop	= bpf_hash_map_seq_stop,
2112	.show	= bpf_hash_map_seq_show,
2113};
2114
2115static const struct bpf_iter_seq_info iter_seq_info = {
2116	.seq_ops		= &bpf_hash_map_seq_ops,
2117	.init_seq_private	= bpf_iter_init_hash_map,
2118	.fini_seq_private	= bpf_iter_fini_hash_map,
2119	.seq_priv_size		= sizeof(struct bpf_iter_seq_hash_map_info),
2120};
2121
2122static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2123				  void *callback_ctx, u64 flags)
2124{
2125	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2126	struct hlist_nulls_head *head;
2127	struct hlist_nulls_node *n;
2128	struct htab_elem *elem;
2129	u32 roundup_key_size;
2130	int i, num_elems = 0;
2131	void __percpu *pptr;
2132	struct bucket *b;
2133	void *key, *val;
2134	bool is_percpu;
2135	u64 ret = 0;
2136
2137	if (flags != 0)
2138		return -EINVAL;
2139
2140	is_percpu = htab_is_percpu(htab);
2141
2142	roundup_key_size = round_up(map->key_size, 8);
2143	/* disable migration so percpu value prepared here will be the
2144	 * same as the one seen by the bpf program with bpf_map_lookup_elem().
2145	 */
2146	if (is_percpu)
2147		migrate_disable();
2148	for (i = 0; i < htab->n_buckets; i++) {
2149		b = &htab->buckets[i];
2150		rcu_read_lock();
2151		head = &b->head;
2152		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2153			key = elem->key;
2154			if (is_percpu) {
2155				/* current cpu value for percpu map */
2156				pptr = htab_elem_get_ptr(elem, map->key_size);
2157				val = this_cpu_ptr(pptr);
2158			} else {
2159				val = elem->key + roundup_key_size;
2160			}
2161			num_elems++;
2162			ret = callback_fn((u64)(long)map, (u64)(long)key,
2163					  (u64)(long)val, (u64)(long)callback_ctx, 0);
2164			/* return value: 0 - continue, 1 - stop and return */
2165			if (ret) {
2166				rcu_read_unlock();
2167				goto out;
2168			}
2169		}
2170		rcu_read_unlock();
2171	}
2172out:
2173	if (is_percpu)
2174		migrate_enable();
2175	return num_elems;
2176}
2177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2178BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2179const struct bpf_map_ops htab_map_ops = {
2180	.map_meta_equal = bpf_map_meta_equal,
2181	.map_alloc_check = htab_map_alloc_check,
2182	.map_alloc = htab_map_alloc,
2183	.map_free = htab_map_free,
2184	.map_get_next_key = htab_map_get_next_key,
2185	.map_release_uref = htab_map_free_timers,
2186	.map_lookup_elem = htab_map_lookup_elem,
2187	.map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2188	.map_update_elem = htab_map_update_elem,
2189	.map_delete_elem = htab_map_delete_elem,
2190	.map_gen_lookup = htab_map_gen_lookup,
2191	.map_seq_show_elem = htab_map_seq_show_elem,
2192	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2193	.map_for_each_callback = bpf_for_each_hash_elem,
 
2194	BATCH_OPS(htab),
2195	.map_btf_id = &htab_map_btf_ids[0],
2196	.iter_seq_info = &iter_seq_info,
2197};
2198
2199const struct bpf_map_ops htab_lru_map_ops = {
2200	.map_meta_equal = bpf_map_meta_equal,
2201	.map_alloc_check = htab_map_alloc_check,
2202	.map_alloc = htab_map_alloc,
2203	.map_free = htab_map_free,
2204	.map_get_next_key = htab_map_get_next_key,
2205	.map_release_uref = htab_map_free_timers,
2206	.map_lookup_elem = htab_lru_map_lookup_elem,
2207	.map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2208	.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2209	.map_update_elem = htab_lru_map_update_elem,
2210	.map_delete_elem = htab_lru_map_delete_elem,
2211	.map_gen_lookup = htab_lru_map_gen_lookup,
2212	.map_seq_show_elem = htab_map_seq_show_elem,
2213	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2214	.map_for_each_callback = bpf_for_each_hash_elem,
 
2215	BATCH_OPS(htab_lru),
2216	.map_btf_id = &htab_map_btf_ids[0],
2217	.iter_seq_info = &iter_seq_info,
2218};
2219
2220/* Called from eBPF program */
2221static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2222{
2223	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2224
2225	if (l)
2226		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2227	else
2228		return NULL;
2229}
2230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2231static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2232{
2233	struct htab_elem *l;
2234
2235	if (cpu >= nr_cpu_ids)
2236		return NULL;
2237
2238	l = __htab_map_lookup_elem(map, key);
2239	if (l)
2240		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2241	else
2242		return NULL;
2243}
2244
2245static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2246{
2247	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2248
2249	if (l) {
2250		bpf_lru_node_set_ref(&l->lru_node);
2251		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2252	}
2253
2254	return NULL;
2255}
2256
2257static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2258{
2259	struct htab_elem *l;
2260
2261	if (cpu >= nr_cpu_ids)
2262		return NULL;
2263
2264	l = __htab_map_lookup_elem(map, key);
2265	if (l) {
2266		bpf_lru_node_set_ref(&l->lru_node);
2267		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2268	}
2269
2270	return NULL;
2271}
2272
2273int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2274{
2275	struct htab_elem *l;
2276	void __percpu *pptr;
2277	int ret = -ENOENT;
2278	int cpu, off = 0;
2279	u32 size;
2280
2281	/* per_cpu areas are zero-filled and bpf programs can only
2282	 * access 'value_size' of them, so copying rounded areas
2283	 * will not leak any kernel data
2284	 */
2285	size = round_up(map->value_size, 8);
2286	rcu_read_lock();
2287	l = __htab_map_lookup_elem(map, key);
2288	if (!l)
2289		goto out;
2290	/* We do not mark LRU map element here in order to not mess up
2291	 * eviction heuristics when user space does a map walk.
2292	 */
2293	pptr = htab_elem_get_ptr(l, map->key_size);
2294	for_each_possible_cpu(cpu) {
2295		bpf_long_memcpy(value + off,
2296				per_cpu_ptr(pptr, cpu), size);
2297		off += size;
2298	}
2299	ret = 0;
2300out:
2301	rcu_read_unlock();
2302	return ret;
2303}
2304
2305int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2306			   u64 map_flags)
2307{
2308	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2309	int ret;
2310
2311	rcu_read_lock();
2312	if (htab_is_lru(htab))
2313		ret = __htab_lru_percpu_map_update_elem(map, key, value,
2314							map_flags, true);
2315	else
2316		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2317						    true);
2318	rcu_read_unlock();
2319
2320	return ret;
2321}
2322
2323static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2324					  struct seq_file *m)
2325{
2326	struct htab_elem *l;
2327	void __percpu *pptr;
2328	int cpu;
2329
2330	rcu_read_lock();
2331
2332	l = __htab_map_lookup_elem(map, key);
2333	if (!l) {
2334		rcu_read_unlock();
2335		return;
2336	}
2337
2338	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2339	seq_puts(m, ": {\n");
2340	pptr = htab_elem_get_ptr(l, map->key_size);
2341	for_each_possible_cpu(cpu) {
2342		seq_printf(m, "\tcpu%d: ", cpu);
2343		btf_type_seq_show(map->btf, map->btf_value_type_id,
2344				  per_cpu_ptr(pptr, cpu), m);
2345		seq_puts(m, "\n");
2346	}
2347	seq_puts(m, "}\n");
2348
2349	rcu_read_unlock();
2350}
2351
2352const struct bpf_map_ops htab_percpu_map_ops = {
2353	.map_meta_equal = bpf_map_meta_equal,
2354	.map_alloc_check = htab_map_alloc_check,
2355	.map_alloc = htab_map_alloc,
2356	.map_free = htab_map_free,
2357	.map_get_next_key = htab_map_get_next_key,
2358	.map_lookup_elem = htab_percpu_map_lookup_elem,
 
2359	.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2360	.map_update_elem = htab_percpu_map_update_elem,
2361	.map_delete_elem = htab_map_delete_elem,
2362	.map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2363	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2364	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2365	.map_for_each_callback = bpf_for_each_hash_elem,
 
2366	BATCH_OPS(htab_percpu),
2367	.map_btf_id = &htab_map_btf_ids[0],
2368	.iter_seq_info = &iter_seq_info,
2369};
2370
2371const struct bpf_map_ops htab_lru_percpu_map_ops = {
2372	.map_meta_equal = bpf_map_meta_equal,
2373	.map_alloc_check = htab_map_alloc_check,
2374	.map_alloc = htab_map_alloc,
2375	.map_free = htab_map_free,
2376	.map_get_next_key = htab_map_get_next_key,
2377	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2378	.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2379	.map_update_elem = htab_lru_percpu_map_update_elem,
2380	.map_delete_elem = htab_lru_map_delete_elem,
2381	.map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2382	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2383	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2384	.map_for_each_callback = bpf_for_each_hash_elem,
 
2385	BATCH_OPS(htab_lru_percpu),
2386	.map_btf_id = &htab_map_btf_ids[0],
2387	.iter_seq_info = &iter_seq_info,
2388};
2389
2390static int fd_htab_map_alloc_check(union bpf_attr *attr)
2391{
2392	if (attr->value_size != sizeof(u32))
2393		return -EINVAL;
2394	return htab_map_alloc_check(attr);
2395}
2396
2397static void fd_htab_map_free(struct bpf_map *map)
2398{
2399	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2400	struct hlist_nulls_node *n;
2401	struct hlist_nulls_head *head;
2402	struct htab_elem *l;
2403	int i;
2404
2405	for (i = 0; i < htab->n_buckets; i++) {
2406		head = select_bucket(htab, i);
2407
2408		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2409			void *ptr = fd_htab_map_get_ptr(map, l);
2410
2411			map->ops->map_fd_put_ptr(ptr);
2412		}
2413	}
2414
2415	htab_map_free(map);
2416}
2417
2418/* only called from syscall */
2419int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2420{
2421	void **ptr;
2422	int ret = 0;
2423
2424	if (!map->ops->map_fd_sys_lookup_elem)
2425		return -ENOTSUPP;
2426
2427	rcu_read_lock();
2428	ptr = htab_map_lookup_elem(map, key);
2429	if (ptr)
2430		*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2431	else
2432		ret = -ENOENT;
2433	rcu_read_unlock();
2434
2435	return ret;
2436}
2437
2438/* only called from syscall */
2439int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2440				void *key, void *value, u64 map_flags)
2441{
2442	void *ptr;
2443	int ret;
2444	u32 ufd = *(u32 *)value;
2445
2446	ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2447	if (IS_ERR(ptr))
2448		return PTR_ERR(ptr);
2449
 
 
 
 
 
2450	ret = htab_map_update_elem(map, key, &ptr, map_flags);
 
2451	if (ret)
2452		map->ops->map_fd_put_ptr(ptr);
2453
2454	return ret;
2455}
2456
2457static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2458{
2459	struct bpf_map *map, *inner_map_meta;
2460
2461	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2462	if (IS_ERR(inner_map_meta))
2463		return inner_map_meta;
2464
2465	map = htab_map_alloc(attr);
2466	if (IS_ERR(map)) {
2467		bpf_map_meta_free(inner_map_meta);
2468		return map;
2469	}
2470
2471	map->inner_map_meta = inner_map_meta;
2472
2473	return map;
2474}
2475
2476static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2477{
2478	struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2479
2480	if (!inner_map)
2481		return NULL;
2482
2483	return READ_ONCE(*inner_map);
2484}
2485
2486static int htab_of_map_gen_lookup(struct bpf_map *map,
2487				  struct bpf_insn *insn_buf)
2488{
2489	struct bpf_insn *insn = insn_buf;
2490	const int ret = BPF_REG_0;
2491
2492	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2493		     (void *(*)(struct bpf_map *map, void *key))NULL));
2494	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2495	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2496	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2497				offsetof(struct htab_elem, key) +
2498				round_up(map->key_size, 8));
2499	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2500
2501	return insn - insn_buf;
2502}
2503
2504static void htab_of_map_free(struct bpf_map *map)
2505{
2506	bpf_map_meta_free(map->inner_map_meta);
2507	fd_htab_map_free(map);
2508}
2509
2510const struct bpf_map_ops htab_of_maps_map_ops = {
2511	.map_alloc_check = fd_htab_map_alloc_check,
2512	.map_alloc = htab_of_map_alloc,
2513	.map_free = htab_of_map_free,
2514	.map_get_next_key = htab_map_get_next_key,
2515	.map_lookup_elem = htab_of_map_lookup_elem,
2516	.map_delete_elem = htab_map_delete_elem,
2517	.map_fd_get_ptr = bpf_map_fd_get_ptr,
2518	.map_fd_put_ptr = bpf_map_fd_put_ptr,
2519	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2520	.map_gen_lookup = htab_of_map_gen_lookup,
2521	.map_check_btf = map_check_no_btf,
 
2522	BATCH_OPS(htab),
2523	.map_btf_id = &htab_map_btf_ids[0],
2524};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/jhash.h>
   8#include <linux/filter.h>
   9#include <linux/rculist_nulls.h>
  10#include <linux/rcupdate_wait.h>
  11#include <linux/random.h>
  12#include <uapi/linux/btf.h>
  13#include <linux/rcupdate_trace.h>
  14#include <linux/btf_ids.h>
  15#include "percpu_freelist.h"
  16#include "bpf_lru_list.h"
  17#include "map_in_map.h"
  18#include <linux/bpf_mem_alloc.h>
  19
  20#define HTAB_CREATE_FLAG_MASK						\
  21	(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |	\
  22	 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
  23
  24#define BATCH_OPS(_name)			\
  25	.map_lookup_batch =			\
  26	_name##_map_lookup_batch,		\
  27	.map_lookup_and_delete_batch =		\
  28	_name##_map_lookup_and_delete_batch,	\
  29	.map_update_batch =			\
  30	generic_map_update_batch,		\
  31	.map_delete_batch =			\
  32	generic_map_delete_batch
  33
  34/*
  35 * The bucket lock has two protection scopes:
  36 *
  37 * 1) Serializing concurrent operations from BPF programs on different
  38 *    CPUs
  39 *
  40 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
  41 *
  42 * BPF programs can execute in any context including perf, kprobes and
  43 * tracing. As there are almost no limits where perf, kprobes and tracing
  44 * can be invoked from the lock operations need to be protected against
  45 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
  46 * the lock held section when functions which acquire this lock are invoked
  47 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
  48 * variable bpf_prog_active, which prevents BPF programs attached to perf
  49 * events, kprobes and tracing to be invoked before the prior invocation
  50 * from one of these contexts completed. sys_bpf() uses the same mechanism
  51 * by pinning the task to the current CPU and incrementing the recursion
  52 * protection across the map operation.
  53 *
  54 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
  55 * operations like memory allocations (even with GFP_ATOMIC) from atomic
  56 * contexts. This is required because even with GFP_ATOMIC the memory
  57 * allocator calls into code paths which acquire locks with long held lock
  58 * sections. To ensure the deterministic behaviour these locks are regular
  59 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
  60 * true atomic contexts on an RT kernel are the low level hardware
  61 * handling, scheduling, low level interrupt handling, NMIs etc. None of
  62 * these contexts should ever do memory allocations.
  63 *
  64 * As regular device interrupt handlers and soft interrupts are forced into
  65 * thread context, the existing code which does
  66 *   spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
  67 * just works.
  68 *
  69 * In theory the BPF locks could be converted to regular spinlocks as well,
  70 * but the bucket locks and percpu_freelist locks can be taken from
  71 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
  72 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
  73 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
  74 * because there is no memory allocation within the lock held sections. However
  75 * after hash map was fully converted to use bpf_mem_alloc, there will be
  76 * non-synchronous memory allocation for non-preallocated hash map, so it is
  77 * safe to always use raw spinlock for bucket lock.
  78 */
  79struct bucket {
  80	struct hlist_nulls_head head;
  81	raw_spinlock_t raw_lock;
  82};
  83
  84#define HASHTAB_MAP_LOCK_COUNT 8
  85#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
  86
  87struct bpf_htab {
  88	struct bpf_map map;
  89	struct bpf_mem_alloc ma;
  90	struct bpf_mem_alloc pcpu_ma;
  91	struct bucket *buckets;
  92	void *elems;
  93	union {
  94		struct pcpu_freelist freelist;
  95		struct bpf_lru lru;
  96	};
  97	struct htab_elem *__percpu *extra_elems;
  98	/* number of elements in non-preallocated hashtable are kept
  99	 * in either pcount or count
 100	 */
 101	struct percpu_counter pcount;
 102	atomic_t count;
 103	bool use_percpu_counter;
 104	u32 n_buckets;	/* number of hash buckets */
 105	u32 elem_size;	/* size of each element in bytes */
 106	u32 hashrnd;
 107	struct lock_class_key lockdep_key;
 108	int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
 109};
 110
 111/* each htab element is struct htab_elem + key + value */
 112struct htab_elem {
 113	union {
 114		struct hlist_nulls_node hash_node;
 115		struct {
 116			void *padding;
 117			union {
 118				struct pcpu_freelist_node fnode;
 119				struct htab_elem *batch_flink;
 120			};
 121		};
 122	};
 123	union {
 124		/* pointer to per-cpu pointer */
 125		void *ptr_to_pptr;
 126		struct bpf_lru_node lru_node;
 127	};
 128	u32 hash;
 129	char key[] __aligned(8);
 130};
 131
 132static inline bool htab_is_prealloc(const struct bpf_htab *htab)
 133{
 134	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
 135}
 136
 137static void htab_init_buckets(struct bpf_htab *htab)
 138{
 139	unsigned int i;
 140
 141	for (i = 0; i < htab->n_buckets; i++) {
 142		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
 143		raw_spin_lock_init(&htab->buckets[i].raw_lock);
 144		lockdep_set_class(&htab->buckets[i].raw_lock,
 145					  &htab->lockdep_key);
 146		cond_resched();
 147	}
 148}
 149
 150static inline int htab_lock_bucket(const struct bpf_htab *htab,
 151				   struct bucket *b, u32 hash,
 152				   unsigned long *pflags)
 153{
 154	unsigned long flags;
 155
 156	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
 157
 158	preempt_disable();
 159	local_irq_save(flags);
 160	if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
 161		__this_cpu_dec(*(htab->map_locked[hash]));
 162		local_irq_restore(flags);
 163		preempt_enable();
 164		return -EBUSY;
 165	}
 166
 167	raw_spin_lock(&b->raw_lock);
 168	*pflags = flags;
 169
 170	return 0;
 171}
 172
 173static inline void htab_unlock_bucket(const struct bpf_htab *htab,
 174				      struct bucket *b, u32 hash,
 175				      unsigned long flags)
 176{
 177	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
 178	raw_spin_unlock(&b->raw_lock);
 179	__this_cpu_dec(*(htab->map_locked[hash]));
 180	local_irq_restore(flags);
 181	preempt_enable();
 182}
 183
 184static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
 185
 186static bool htab_is_lru(const struct bpf_htab *htab)
 187{
 188	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
 189		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 190}
 191
 192static bool htab_is_percpu(const struct bpf_htab *htab)
 193{
 194	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 195		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 196}
 197
 198static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
 199				     void __percpu *pptr)
 200{
 201	*(void __percpu **)(l->key + key_size) = pptr;
 202}
 203
 204static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
 205{
 206	return *(void __percpu **)(l->key + key_size);
 207}
 208
 209static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
 210{
 211	return *(void **)(l->key + roundup(map->key_size, 8));
 212}
 213
 214static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
 215{
 216	return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
 217}
 218
 219static bool htab_has_extra_elems(struct bpf_htab *htab)
 220{
 221	return !htab_is_percpu(htab) && !htab_is_lru(htab);
 222}
 223
 224static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
 225{
 226	u32 num_entries = htab->map.max_entries;
 227	int i;
 228
 
 
 229	if (htab_has_extra_elems(htab))
 230		num_entries += num_possible_cpus();
 231
 232	for (i = 0; i < num_entries; i++) {
 233		struct htab_elem *elem;
 234
 235		elem = get_htab_elem(htab, i);
 236		if (btf_record_has_field(htab->map.record, BPF_TIMER))
 237			bpf_obj_free_timer(htab->map.record,
 238					   elem->key + round_up(htab->map.key_size, 8));
 239		if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
 240			bpf_obj_free_workqueue(htab->map.record,
 241					       elem->key + round_up(htab->map.key_size, 8));
 242		cond_resched();
 243	}
 244}
 245
 246static void htab_free_prealloced_fields(struct bpf_htab *htab)
 247{
 248	u32 num_entries = htab->map.max_entries;
 249	int i;
 250
 251	if (IS_ERR_OR_NULL(htab->map.record))
 252		return;
 253	if (htab_has_extra_elems(htab))
 254		num_entries += num_possible_cpus();
 255	for (i = 0; i < num_entries; i++) {
 256		struct htab_elem *elem;
 257
 258		elem = get_htab_elem(htab, i);
 259		if (htab_is_percpu(htab)) {
 260			void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
 261			int cpu;
 262
 263			for_each_possible_cpu(cpu) {
 264				bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
 265				cond_resched();
 266			}
 267		} else {
 268			bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
 269			cond_resched();
 270		}
 271		cond_resched();
 272	}
 273}
 274
 275static void htab_free_elems(struct bpf_htab *htab)
 276{
 277	int i;
 278
 279	if (!htab_is_percpu(htab))
 280		goto free_elems;
 281
 282	for (i = 0; i < htab->map.max_entries; i++) {
 283		void __percpu *pptr;
 284
 285		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
 286					 htab->map.key_size);
 287		free_percpu(pptr);
 288		cond_resched();
 289	}
 290free_elems:
 291	bpf_map_area_free(htab->elems);
 292}
 293
 294/* The LRU list has a lock (lru_lock). Each htab bucket has a lock
 295 * (bucket_lock). If both locks need to be acquired together, the lock
 296 * order is always lru_lock -> bucket_lock and this only happens in
 297 * bpf_lru_list.c logic. For example, certain code path of
 298 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
 299 * will acquire lru_lock first followed by acquiring bucket_lock.
 300 *
 301 * In hashtab.c, to avoid deadlock, lock acquisition of
 302 * bucket_lock followed by lru_lock is not allowed. In such cases,
 303 * bucket_lock needs to be released first before acquiring lru_lock.
 304 */
 305static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
 306					  u32 hash)
 307{
 308	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
 309	struct htab_elem *l;
 310
 311	if (node) {
 312		bpf_map_inc_elem_count(&htab->map);
 313		l = container_of(node, struct htab_elem, lru_node);
 314		memcpy(l->key, key, htab->map.key_size);
 315		return l;
 316	}
 317
 318	return NULL;
 319}
 320
 321static int prealloc_init(struct bpf_htab *htab)
 322{
 323	u32 num_entries = htab->map.max_entries;
 324	int err = -ENOMEM, i;
 325
 326	if (htab_has_extra_elems(htab))
 327		num_entries += num_possible_cpus();
 328
 329	htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
 330					 htab->map.numa_node);
 331	if (!htab->elems)
 332		return -ENOMEM;
 333
 334	if (!htab_is_percpu(htab))
 335		goto skip_percpu_elems;
 336
 337	for (i = 0; i < num_entries; i++) {
 338		u32 size = round_up(htab->map.value_size, 8);
 339		void __percpu *pptr;
 340
 341		pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
 342					    GFP_USER | __GFP_NOWARN);
 343		if (!pptr)
 344			goto free_elems;
 345		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
 346				  pptr);
 347		cond_resched();
 348	}
 349
 350skip_percpu_elems:
 351	if (htab_is_lru(htab))
 352		err = bpf_lru_init(&htab->lru,
 353				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
 354				   offsetof(struct htab_elem, hash) -
 355				   offsetof(struct htab_elem, lru_node),
 356				   htab_lru_map_delete_node,
 357				   htab);
 358	else
 359		err = pcpu_freelist_init(&htab->freelist);
 360
 361	if (err)
 362		goto free_elems;
 363
 364	if (htab_is_lru(htab))
 365		bpf_lru_populate(&htab->lru, htab->elems,
 366				 offsetof(struct htab_elem, lru_node),
 367				 htab->elem_size, num_entries);
 368	else
 369		pcpu_freelist_populate(&htab->freelist,
 370				       htab->elems + offsetof(struct htab_elem, fnode),
 371				       htab->elem_size, num_entries);
 372
 373	return 0;
 374
 375free_elems:
 376	htab_free_elems(htab);
 377	return err;
 378}
 379
 380static void prealloc_destroy(struct bpf_htab *htab)
 381{
 382	htab_free_elems(htab);
 383
 384	if (htab_is_lru(htab))
 385		bpf_lru_destroy(&htab->lru);
 386	else
 387		pcpu_freelist_destroy(&htab->freelist);
 388}
 389
 390static int alloc_extra_elems(struct bpf_htab *htab)
 391{
 392	struct htab_elem *__percpu *pptr, *l_new;
 393	struct pcpu_freelist_node *l;
 394	int cpu;
 395
 396	pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
 397				    GFP_USER | __GFP_NOWARN);
 398	if (!pptr)
 399		return -ENOMEM;
 400
 401	for_each_possible_cpu(cpu) {
 402		l = pcpu_freelist_pop(&htab->freelist);
 403		/* pop will succeed, since prealloc_init()
 404		 * preallocated extra num_possible_cpus elements
 405		 */
 406		l_new = container_of(l, struct htab_elem, fnode);
 407		*per_cpu_ptr(pptr, cpu) = l_new;
 408	}
 409	htab->extra_elems = pptr;
 410	return 0;
 411}
 412
 413/* Called from syscall */
 414static int htab_map_alloc_check(union bpf_attr *attr)
 415{
 416	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 417		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 418	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 419		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 420	/* percpu_lru means each cpu has its own LRU list.
 421	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 422	 * the map's value itself is percpu.  percpu_lru has
 423	 * nothing to do with the map's value.
 424	 */
 425	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 426	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 427	bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
 428	int numa_node = bpf_map_attr_numa_node(attr);
 429
 430	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
 431		     offsetof(struct htab_elem, hash_node.pprev));
 432
 
 
 
 
 
 
 433	if (zero_seed && !capable(CAP_SYS_ADMIN))
 434		/* Guard against local DoS, and discourage production use. */
 435		return -EPERM;
 436
 437	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
 438	    !bpf_map_flags_access_ok(attr->map_flags))
 439		return -EINVAL;
 440
 441	if (!lru && percpu_lru)
 442		return -EINVAL;
 443
 444	if (lru && !prealloc)
 445		return -ENOTSUPP;
 446
 447	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
 448		return -EINVAL;
 449
 450	/* check sanity of attributes.
 451	 * value_size == 0 may be allowed in the future to use map as a set
 452	 */
 453	if (attr->max_entries == 0 || attr->key_size == 0 ||
 454	    attr->value_size == 0)
 455		return -EINVAL;
 456
 457	if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
 458	   sizeof(struct htab_elem))
 459		/* if key_size + value_size is bigger, the user space won't be
 460		 * able to access the elements via bpf syscall. This check
 461		 * also makes sure that the elem_size doesn't overflow and it's
 462		 * kmalloc-able later in htab_map_update_elem()
 463		 */
 464		return -E2BIG;
 465	/* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
 466	if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
 467		return -E2BIG;
 468
 469	return 0;
 470}
 471
 472static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 473{
 474	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 475		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 476	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 477		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 478	/* percpu_lru means each cpu has its own LRU list.
 479	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 480	 * the map's value itself is percpu.  percpu_lru has
 481	 * nothing to do with the map's value.
 482	 */
 483	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 484	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 485	struct bpf_htab *htab;
 486	int err, i;
 487
 488	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
 489	if (!htab)
 490		return ERR_PTR(-ENOMEM);
 491
 492	lockdep_register_key(&htab->lockdep_key);
 493
 494	bpf_map_init_from_attr(&htab->map, attr);
 495
 496	if (percpu_lru) {
 497		/* ensure each CPU's lru list has >=1 elements.
 498		 * since we are at it, make each lru list has the same
 499		 * number of elements.
 500		 */
 501		htab->map.max_entries = roundup(attr->max_entries,
 502						num_possible_cpus());
 503		if (htab->map.max_entries < attr->max_entries)
 504			htab->map.max_entries = rounddown(attr->max_entries,
 505							  num_possible_cpus());
 506	}
 507
 508	/* hash table size must be power of 2; roundup_pow_of_two() can overflow
 509	 * into UB on 32-bit arches, so check that first
 510	 */
 511	err = -E2BIG;
 512	if (htab->map.max_entries > 1UL << 31)
 513		goto free_htab;
 514
 515	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
 516
 517	htab->elem_size = sizeof(struct htab_elem) +
 518			  round_up(htab->map.key_size, 8);
 519	if (percpu)
 520		htab->elem_size += sizeof(void *);
 521	else
 522		htab->elem_size += round_up(htab->map.value_size, 8);
 523
 524	/* check for u32 overflow */
 525	if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
 526		goto free_htab;
 527
 528	err = bpf_map_init_elem_count(&htab->map);
 529	if (err)
 530		goto free_htab;
 531
 532	err = -ENOMEM;
 533	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
 534					   sizeof(struct bucket),
 535					   htab->map.numa_node);
 536	if (!htab->buckets)
 537		goto free_elem_count;
 538
 539	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
 540		htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
 541							   sizeof(int),
 542							   sizeof(int),
 543							   GFP_USER);
 544		if (!htab->map_locked[i])
 545			goto free_map_locked;
 546	}
 547
 548	if (htab->map.map_flags & BPF_F_ZERO_SEED)
 549		htab->hashrnd = 0;
 550	else
 551		htab->hashrnd = get_random_u32();
 552
 553	htab_init_buckets(htab);
 554
 555/* compute_batch_value() computes batch value as num_online_cpus() * 2
 556 * and __percpu_counter_compare() needs
 557 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
 558 * for percpu_counter to be faster than atomic_t. In practice the average bpf
 559 * hash map size is 10k, which means that a system with 64 cpus will fill
 560 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
 561 * define our own batch count as 32 then 10k hash map can be filled up to 80%:
 562 * 10k - 8k > 32 _batch_ * 64 _cpus_
 563 * and __percpu_counter_compare() will still be fast. At that point hash map
 564 * collisions will dominate its performance anyway. Assume that hash map filled
 565 * to 50+% isn't going to be O(1) and use the following formula to choose
 566 * between percpu_counter and atomic_t.
 567 */
 568#define PERCPU_COUNTER_BATCH 32
 569	if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
 570		htab->use_percpu_counter = true;
 571
 572	if (htab->use_percpu_counter) {
 573		err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
 574		if (err)
 575			goto free_map_locked;
 576	}
 577
 578	if (prealloc) {
 579		err = prealloc_init(htab);
 580		if (err)
 581			goto free_map_locked;
 582
 583		if (!percpu && !lru) {
 584			/* lru itself can remove the least used element, so
 585			 * there is no need for an extra elem during map_update.
 586			 */
 587			err = alloc_extra_elems(htab);
 588			if (err)
 589				goto free_prealloc;
 590		}
 591	} else {
 592		err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
 593		if (err)
 594			goto free_map_locked;
 595		if (percpu) {
 596			err = bpf_mem_alloc_init(&htab->pcpu_ma,
 597						 round_up(htab->map.value_size, 8), true);
 598			if (err)
 599				goto free_map_locked;
 600		}
 601	}
 602
 603	return &htab->map;
 604
 605free_prealloc:
 606	prealloc_destroy(htab);
 607free_map_locked:
 608	if (htab->use_percpu_counter)
 609		percpu_counter_destroy(&htab->pcount);
 610	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
 611		free_percpu(htab->map_locked[i]);
 612	bpf_map_area_free(htab->buckets);
 613	bpf_mem_alloc_destroy(&htab->pcpu_ma);
 614	bpf_mem_alloc_destroy(&htab->ma);
 615free_elem_count:
 616	bpf_map_free_elem_count(&htab->map);
 617free_htab:
 618	lockdep_unregister_key(&htab->lockdep_key);
 619	bpf_map_area_free(htab);
 620	return ERR_PTR(err);
 621}
 622
 623static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
 624{
 625	if (likely(key_len % 4 == 0))
 626		return jhash2(key, key_len / 4, hashrnd);
 627	return jhash(key, key_len, hashrnd);
 628}
 629
 630static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
 631{
 632	return &htab->buckets[hash & (htab->n_buckets - 1)];
 633}
 634
 635static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
 636{
 637	return &__select_bucket(htab, hash)->head;
 638}
 639
 640/* this lookup function can only be called with bucket lock taken */
 641static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
 642					 void *key, u32 key_size)
 643{
 644	struct hlist_nulls_node *n;
 645	struct htab_elem *l;
 646
 647	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 648		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 649			return l;
 650
 651	return NULL;
 652}
 653
 654/* can be called without bucket lock. it will repeat the loop in
 655 * the unlikely event when elements moved from one bucket into another
 656 * while link list is being walked
 657 */
 658static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
 659					       u32 hash, void *key,
 660					       u32 key_size, u32 n_buckets)
 661{
 662	struct hlist_nulls_node *n;
 663	struct htab_elem *l;
 664
 665again:
 666	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 667		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 668			return l;
 669
 670	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
 671		goto again;
 672
 673	return NULL;
 674}
 675
 676/* Called from syscall or from eBPF program directly, so
 677 * arguments have to match bpf_map_lookup_elem() exactly.
 678 * The return value is adjusted by BPF instructions
 679 * in htab_map_gen_lookup().
 680 */
 681static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 682{
 683	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 684	struct hlist_nulls_head *head;
 685	struct htab_elem *l;
 686	u32 hash, key_size;
 687
 688	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
 689		     !rcu_read_lock_bh_held());
 690
 691	key_size = map->key_size;
 692
 693	hash = htab_map_hash(key, key_size, htab->hashrnd);
 694
 695	head = select_bucket(htab, hash);
 696
 697	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 698
 699	return l;
 700}
 701
 702static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
 703{
 704	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 705
 706	if (l)
 707		return l->key + round_up(map->key_size, 8);
 708
 709	return NULL;
 710}
 711
 712/* inline bpf_map_lookup_elem() call.
 713 * Instead of:
 714 * bpf_prog
 715 *   bpf_map_lookup_elem
 716 *     map->ops->map_lookup_elem
 717 *       htab_map_lookup_elem
 718 *         __htab_map_lookup_elem
 719 * do:
 720 * bpf_prog
 721 *   __htab_map_lookup_elem
 722 */
 723static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 724{
 725	struct bpf_insn *insn = insn_buf;
 726	const int ret = BPF_REG_0;
 727
 728	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 729		     (void *(*)(struct bpf_map *map, void *key))NULL));
 730	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 731	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
 732	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 733				offsetof(struct htab_elem, key) +
 734				round_up(map->key_size, 8));
 735	return insn - insn_buf;
 736}
 737
 738static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
 739							void *key, const bool mark)
 740{
 741	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 742
 743	if (l) {
 744		if (mark)
 745			bpf_lru_node_set_ref(&l->lru_node);
 746		return l->key + round_up(map->key_size, 8);
 747	}
 748
 749	return NULL;
 750}
 751
 752static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
 753{
 754	return __htab_lru_map_lookup_elem(map, key, true);
 755}
 756
 757static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
 758{
 759	return __htab_lru_map_lookup_elem(map, key, false);
 760}
 761
 762static int htab_lru_map_gen_lookup(struct bpf_map *map,
 763				   struct bpf_insn *insn_buf)
 764{
 765	struct bpf_insn *insn = insn_buf;
 766	const int ret = BPF_REG_0;
 767	const int ref_reg = BPF_REG_1;
 768
 769	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 770		     (void *(*)(struct bpf_map *map, void *key))NULL));
 771	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 772	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
 773	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
 774			      offsetof(struct htab_elem, lru_node) +
 775			      offsetof(struct bpf_lru_node, ref));
 776	*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
 777	*insn++ = BPF_ST_MEM(BPF_B, ret,
 778			     offsetof(struct htab_elem, lru_node) +
 779			     offsetof(struct bpf_lru_node, ref),
 780			     1);
 781	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 782				offsetof(struct htab_elem, key) +
 783				round_up(map->key_size, 8));
 784	return insn - insn_buf;
 785}
 786
 787static void check_and_free_fields(struct bpf_htab *htab,
 788				  struct htab_elem *elem)
 789{
 790	if (htab_is_percpu(htab)) {
 791		void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
 792		int cpu;
 793
 794		for_each_possible_cpu(cpu)
 795			bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
 796	} else {
 797		void *map_value = elem->key + round_up(htab->map.key_size, 8);
 798
 799		bpf_obj_free_fields(htab->map.record, map_value);
 800	}
 801}
 802
 803/* It is called from the bpf_lru_list when the LRU needs to delete
 804 * older elements from the htab.
 805 */
 806static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 807{
 808	struct bpf_htab *htab = arg;
 809	struct htab_elem *l = NULL, *tgt_l;
 810	struct hlist_nulls_head *head;
 811	struct hlist_nulls_node *n;
 812	unsigned long flags;
 813	struct bucket *b;
 814	int ret;
 815
 816	tgt_l = container_of(node, struct htab_elem, lru_node);
 817	b = __select_bucket(htab, tgt_l->hash);
 818	head = &b->head;
 819
 820	ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
 821	if (ret)
 822		return false;
 823
 824	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 825		if (l == tgt_l) {
 826			hlist_nulls_del_rcu(&l->hash_node);
 827			check_and_free_fields(htab, l);
 828			bpf_map_dec_elem_count(&htab->map);
 829			break;
 830		}
 831
 832	htab_unlock_bucket(htab, b, tgt_l->hash, flags);
 833
 834	return l == tgt_l;
 835}
 836
 837/* Called from syscall */
 838static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 839{
 840	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 841	struct hlist_nulls_head *head;
 842	struct htab_elem *l, *next_l;
 843	u32 hash, key_size;
 844	int i = 0;
 845
 846	WARN_ON_ONCE(!rcu_read_lock_held());
 847
 848	key_size = map->key_size;
 849
 850	if (!key)
 851		goto find_first_elem;
 852
 853	hash = htab_map_hash(key, key_size, htab->hashrnd);
 854
 855	head = select_bucket(htab, hash);
 856
 857	/* lookup the key */
 858	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 859
 860	if (!l)
 861		goto find_first_elem;
 862
 863	/* key was found, get next key in the same bucket */
 864	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
 865				  struct htab_elem, hash_node);
 866
 867	if (next_l) {
 868		/* if next elem in this hash list is non-zero, just return it */
 869		memcpy(next_key, next_l->key, key_size);
 870		return 0;
 871	}
 872
 873	/* no more elements in this hash list, go to the next bucket */
 874	i = hash & (htab->n_buckets - 1);
 875	i++;
 876
 877find_first_elem:
 878	/* iterate over buckets */
 879	for (; i < htab->n_buckets; i++) {
 880		head = select_bucket(htab, i);
 881
 882		/* pick first element in the bucket */
 883		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
 884					  struct htab_elem, hash_node);
 885		if (next_l) {
 886			/* if it's not empty, just return it */
 887			memcpy(next_key, next_l->key, key_size);
 888			return 0;
 889		}
 890	}
 891
 892	/* iterated over all buckets and all elements */
 893	return -ENOENT;
 894}
 895
 896static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
 897{
 898	check_and_free_fields(htab, l);
 899
 900	migrate_disable();
 901	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
 902		bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
 
 903	bpf_mem_cache_free(&htab->ma, l);
 904	migrate_enable();
 905}
 906
 907static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
 908{
 909	struct bpf_map *map = &htab->map;
 910	void *ptr;
 911
 912	if (map->ops->map_fd_put_ptr) {
 913		ptr = fd_htab_map_get_ptr(map, l);
 914		map->ops->map_fd_put_ptr(map, ptr, true);
 915	}
 916}
 917
 918static bool is_map_full(struct bpf_htab *htab)
 919{
 920	if (htab->use_percpu_counter)
 921		return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
 922						PERCPU_COUNTER_BATCH) >= 0;
 923	return atomic_read(&htab->count) >= htab->map.max_entries;
 924}
 925
 926static void inc_elem_count(struct bpf_htab *htab)
 927{
 928	bpf_map_inc_elem_count(&htab->map);
 929
 930	if (htab->use_percpu_counter)
 931		percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
 932	else
 933		atomic_inc(&htab->count);
 934}
 935
 936static void dec_elem_count(struct bpf_htab *htab)
 937{
 938	bpf_map_dec_elem_count(&htab->map);
 939
 940	if (htab->use_percpu_counter)
 941		percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
 942	else
 943		atomic_dec(&htab->count);
 944}
 945
 946
 947static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 948{
 949	htab_put_fd_value(htab, l);
 950
 951	if (htab_is_prealloc(htab)) {
 952		bpf_map_dec_elem_count(&htab->map);
 953		check_and_free_fields(htab, l);
 954		pcpu_freelist_push(&htab->freelist, &l->fnode);
 955	} else {
 956		dec_elem_count(htab);
 957		htab_elem_free(htab, l);
 958	}
 959}
 960
 961static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 962			    void *value, bool onallcpus)
 963{
 964	if (!onallcpus) {
 965		/* copy true value_size bytes */
 966		copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
 967	} else {
 968		u32 size = round_up(htab->map.value_size, 8);
 969		int off = 0, cpu;
 970
 971		for_each_possible_cpu(cpu) {
 972			copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
 
 973			off += size;
 974		}
 975	}
 976}
 977
 978static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
 979			    void *value, bool onallcpus)
 980{
 981	/* When not setting the initial value on all cpus, zero-fill element
 982	 * values for other cpus. Otherwise, bpf program has no way to ensure
 983	 * known initial values for cpus other than current one
 984	 * (onallcpus=false always when coming from bpf prog).
 985	 */
 986	if (!onallcpus) {
 
 987		int current_cpu = raw_smp_processor_id();
 988		int cpu;
 989
 990		for_each_possible_cpu(cpu) {
 991			if (cpu == current_cpu)
 992				copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
 993			else /* Since elem is preallocated, we cannot touch special fields */
 994				zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
 
 995		}
 996	} else {
 997		pcpu_copy_value(htab, pptr, value, onallcpus);
 998	}
 999}
1000
1001static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
1002{
1003	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
1004	       BITS_PER_LONG == 64;
1005}
1006
1007static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1008					 void *value, u32 key_size, u32 hash,
1009					 bool percpu, bool onallcpus,
1010					 struct htab_elem *old_elem)
1011{
1012	u32 size = htab->map.value_size;
1013	bool prealloc = htab_is_prealloc(htab);
1014	struct htab_elem *l_new, **pl_new;
1015	void __percpu *pptr;
1016
1017	if (prealloc) {
1018		if (old_elem) {
1019			/* if we're updating the existing element,
1020			 * use per-cpu extra elems to avoid freelist_pop/push
1021			 */
1022			pl_new = this_cpu_ptr(htab->extra_elems);
1023			l_new = *pl_new;
 
1024			*pl_new = old_elem;
1025		} else {
1026			struct pcpu_freelist_node *l;
1027
1028			l = __pcpu_freelist_pop(&htab->freelist);
1029			if (!l)
1030				return ERR_PTR(-E2BIG);
1031			l_new = container_of(l, struct htab_elem, fnode);
1032			bpf_map_inc_elem_count(&htab->map);
1033		}
1034	} else {
1035		if (is_map_full(htab))
1036			if (!old_elem)
1037				/* when map is full and update() is replacing
1038				 * old element, it's ok to allocate, since
1039				 * old element will be freed immediately.
1040				 * Otherwise return an error
1041				 */
1042				return ERR_PTR(-E2BIG);
1043		inc_elem_count(htab);
1044		l_new = bpf_mem_cache_alloc(&htab->ma);
1045		if (!l_new) {
1046			l_new = ERR_PTR(-ENOMEM);
1047			goto dec_count;
1048		}
 
 
1049	}
1050
1051	memcpy(l_new->key, key, key_size);
1052	if (percpu) {
1053		if (prealloc) {
1054			pptr = htab_elem_get_ptr(l_new, key_size);
1055		} else {
1056			/* alloc_percpu zero-fills */
1057			void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1058
1059			if (!ptr) {
1060				bpf_mem_cache_free(&htab->ma, l_new);
1061				l_new = ERR_PTR(-ENOMEM);
1062				goto dec_count;
1063			}
1064			l_new->ptr_to_pptr = ptr;
1065			pptr = *(void __percpu **)ptr;
1066		}
1067
1068		pcpu_init_value(htab, pptr, value, onallcpus);
1069
1070		if (!prealloc)
1071			htab_elem_set_ptr(l_new, key_size, pptr);
1072	} else if (fd_htab_map_needs_adjust(htab)) {
1073		size = round_up(size, 8);
1074		memcpy(l_new->key + round_up(key_size, 8), value, size);
1075	} else {
1076		copy_map_value(&htab->map,
1077			       l_new->key + round_up(key_size, 8),
1078			       value);
1079	}
1080
1081	l_new->hash = hash;
1082	return l_new;
1083dec_count:
1084	dec_elem_count(htab);
1085	return l_new;
1086}
1087
1088static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1089		       u64 map_flags)
1090{
1091	if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1092		/* elem already exists */
1093		return -EEXIST;
1094
1095	if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1096		/* elem doesn't exist, cannot update it */
1097		return -ENOENT;
1098
1099	return 0;
1100}
1101
1102/* Called from syscall or from eBPF program */
1103static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1104				 u64 map_flags)
1105{
1106	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1107	struct htab_elem *l_new = NULL, *l_old;
1108	struct hlist_nulls_head *head;
1109	unsigned long flags;
1110	void *old_map_ptr;
1111	struct bucket *b;
1112	u32 key_size, hash;
1113	int ret;
1114
1115	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1116		/* unknown flags */
1117		return -EINVAL;
1118
1119	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1120		     !rcu_read_lock_bh_held());
1121
1122	key_size = map->key_size;
1123
1124	hash = htab_map_hash(key, key_size, htab->hashrnd);
1125
1126	b = __select_bucket(htab, hash);
1127	head = &b->head;
1128
1129	if (unlikely(map_flags & BPF_F_LOCK)) {
1130		if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1131			return -EINVAL;
1132		/* find an element without taking the bucket lock */
1133		l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1134					      htab->n_buckets);
1135		ret = check_flags(htab, l_old, map_flags);
1136		if (ret)
1137			return ret;
1138		if (l_old) {
1139			/* grab the element lock and update value in place */
1140			copy_map_value_locked(map,
1141					      l_old->key + round_up(key_size, 8),
1142					      value, false);
1143			return 0;
1144		}
1145		/* fall through, grab the bucket lock and lookup again.
1146		 * 99.9% chance that the element won't be found,
1147		 * but second lookup under lock has to be done.
1148		 */
1149	}
1150
1151	ret = htab_lock_bucket(htab, b, hash, &flags);
1152	if (ret)
1153		return ret;
1154
1155	l_old = lookup_elem_raw(head, hash, key, key_size);
1156
1157	ret = check_flags(htab, l_old, map_flags);
1158	if (ret)
1159		goto err;
1160
1161	if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1162		/* first lookup without the bucket lock didn't find the element,
1163		 * but second lookup with the bucket lock found it.
1164		 * This case is highly unlikely, but has to be dealt with:
1165		 * grab the element lock in addition to the bucket lock
1166		 * and update element in place
1167		 */
1168		copy_map_value_locked(map,
1169				      l_old->key + round_up(key_size, 8),
1170				      value, false);
1171		ret = 0;
1172		goto err;
1173	}
1174
1175	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1176				l_old);
1177	if (IS_ERR(l_new)) {
1178		/* all pre-allocated elements are in use or memory exhausted */
1179		ret = PTR_ERR(l_new);
1180		goto err;
1181	}
1182
1183	/* add new element to the head of the list, so that
1184	 * concurrent search will find it before old elem
1185	 */
1186	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1187	if (l_old) {
1188		hlist_nulls_del_rcu(&l_old->hash_node);
1189
1190		/* l_old has already been stashed in htab->extra_elems, free
1191		 * its special fields before it is available for reuse. Also
1192		 * save the old map pointer in htab of maps before unlock
1193		 * and release it after unlock.
1194		 */
1195		old_map_ptr = NULL;
1196		if (htab_is_prealloc(htab)) {
1197			if (map->ops->map_fd_put_ptr)
1198				old_map_ptr = fd_htab_map_get_ptr(map, l_old);
1199			check_and_free_fields(htab, l_old);
1200		}
1201	}
1202	htab_unlock_bucket(htab, b, hash, flags);
1203	if (l_old) {
1204		if (old_map_ptr)
1205			map->ops->map_fd_put_ptr(map, old_map_ptr, true);
1206		if (!htab_is_prealloc(htab))
1207			free_htab_elem(htab, l_old);
 
 
1208	}
1209	return 0;
1210err:
1211	htab_unlock_bucket(htab, b, hash, flags);
1212	return ret;
1213}
1214
1215static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1216{
1217	check_and_free_fields(htab, elem);
1218	bpf_map_dec_elem_count(&htab->map);
1219	bpf_lru_push_free(&htab->lru, &elem->lru_node);
1220}
1221
1222static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1223				     u64 map_flags)
1224{
1225	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1226	struct htab_elem *l_new, *l_old = NULL;
1227	struct hlist_nulls_head *head;
1228	unsigned long flags;
1229	struct bucket *b;
1230	u32 key_size, hash;
1231	int ret;
1232
1233	if (unlikely(map_flags > BPF_EXIST))
1234		/* unknown flags */
1235		return -EINVAL;
1236
1237	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1238		     !rcu_read_lock_bh_held());
1239
1240	key_size = map->key_size;
1241
1242	hash = htab_map_hash(key, key_size, htab->hashrnd);
1243
1244	b = __select_bucket(htab, hash);
1245	head = &b->head;
1246
1247	/* For LRU, we need to alloc before taking bucket's
1248	 * spinlock because getting free nodes from LRU may need
1249	 * to remove older elements from htab and this removal
1250	 * operation will need a bucket lock.
1251	 */
1252	l_new = prealloc_lru_pop(htab, key, hash);
1253	if (!l_new)
1254		return -ENOMEM;
1255	copy_map_value(&htab->map,
1256		       l_new->key + round_up(map->key_size, 8), value);
1257
1258	ret = htab_lock_bucket(htab, b, hash, &flags);
1259	if (ret)
1260		goto err_lock_bucket;
1261
1262	l_old = lookup_elem_raw(head, hash, key, key_size);
1263
1264	ret = check_flags(htab, l_old, map_flags);
1265	if (ret)
1266		goto err;
1267
1268	/* add new element to the head of the list, so that
1269	 * concurrent search will find it before old elem
1270	 */
1271	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1272	if (l_old) {
1273		bpf_lru_node_set_ref(&l_new->lru_node);
1274		hlist_nulls_del_rcu(&l_old->hash_node);
1275	}
1276	ret = 0;
1277
1278err:
1279	htab_unlock_bucket(htab, b, hash, flags);
1280
1281err_lock_bucket:
1282	if (ret)
1283		htab_lru_push_free(htab, l_new);
1284	else if (l_old)
1285		htab_lru_push_free(htab, l_old);
1286
1287	return ret;
1288}
1289
1290static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1291					  void *value, u64 map_flags,
1292					  bool onallcpus)
1293{
1294	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1295	struct htab_elem *l_new = NULL, *l_old;
1296	struct hlist_nulls_head *head;
1297	unsigned long flags;
1298	struct bucket *b;
1299	u32 key_size, hash;
1300	int ret;
1301
1302	if (unlikely(map_flags > BPF_EXIST))
1303		/* unknown flags */
1304		return -EINVAL;
1305
1306	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1307		     !rcu_read_lock_bh_held());
1308
1309	key_size = map->key_size;
1310
1311	hash = htab_map_hash(key, key_size, htab->hashrnd);
1312
1313	b = __select_bucket(htab, hash);
1314	head = &b->head;
1315
1316	ret = htab_lock_bucket(htab, b, hash, &flags);
1317	if (ret)
1318		return ret;
1319
1320	l_old = lookup_elem_raw(head, hash, key, key_size);
1321
1322	ret = check_flags(htab, l_old, map_flags);
1323	if (ret)
1324		goto err;
1325
1326	if (l_old) {
1327		/* per-cpu hash map can update value in-place */
1328		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1329				value, onallcpus);
1330	} else {
1331		l_new = alloc_htab_elem(htab, key, value, key_size,
1332					hash, true, onallcpus, NULL);
1333		if (IS_ERR(l_new)) {
1334			ret = PTR_ERR(l_new);
1335			goto err;
1336		}
1337		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1338	}
1339	ret = 0;
1340err:
1341	htab_unlock_bucket(htab, b, hash, flags);
1342	return ret;
1343}
1344
1345static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1346					      void *value, u64 map_flags,
1347					      bool onallcpus)
1348{
1349	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1350	struct htab_elem *l_new = NULL, *l_old;
1351	struct hlist_nulls_head *head;
1352	unsigned long flags;
1353	struct bucket *b;
1354	u32 key_size, hash;
1355	int ret;
1356
1357	if (unlikely(map_flags > BPF_EXIST))
1358		/* unknown flags */
1359		return -EINVAL;
1360
1361	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1362		     !rcu_read_lock_bh_held());
1363
1364	key_size = map->key_size;
1365
1366	hash = htab_map_hash(key, key_size, htab->hashrnd);
1367
1368	b = __select_bucket(htab, hash);
1369	head = &b->head;
1370
1371	/* For LRU, we need to alloc before taking bucket's
1372	 * spinlock because LRU's elem alloc may need
1373	 * to remove older elem from htab and this removal
1374	 * operation will need a bucket lock.
1375	 */
1376	if (map_flags != BPF_EXIST) {
1377		l_new = prealloc_lru_pop(htab, key, hash);
1378		if (!l_new)
1379			return -ENOMEM;
1380	}
1381
1382	ret = htab_lock_bucket(htab, b, hash, &flags);
1383	if (ret)
1384		goto err_lock_bucket;
1385
1386	l_old = lookup_elem_raw(head, hash, key, key_size);
1387
1388	ret = check_flags(htab, l_old, map_flags);
1389	if (ret)
1390		goto err;
1391
1392	if (l_old) {
1393		bpf_lru_node_set_ref(&l_old->lru_node);
1394
1395		/* per-cpu hash map can update value in-place */
1396		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1397				value, onallcpus);
1398	} else {
1399		pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1400				value, onallcpus);
1401		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1402		l_new = NULL;
1403	}
1404	ret = 0;
1405err:
1406	htab_unlock_bucket(htab, b, hash, flags);
1407err_lock_bucket:
1408	if (l_new) {
1409		bpf_map_dec_elem_count(&htab->map);
1410		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1411	}
1412	return ret;
1413}
1414
1415static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1416					void *value, u64 map_flags)
1417{
1418	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1419}
1420
1421static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1422					    void *value, u64 map_flags)
1423{
1424	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1425						 false);
1426}
1427
1428/* Called from syscall or from eBPF program */
1429static long htab_map_delete_elem(struct bpf_map *map, void *key)
1430{
1431	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1432	struct hlist_nulls_head *head;
1433	struct bucket *b;
1434	struct htab_elem *l;
1435	unsigned long flags;
1436	u32 hash, key_size;
1437	int ret;
1438
1439	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1440		     !rcu_read_lock_bh_held());
1441
1442	key_size = map->key_size;
1443
1444	hash = htab_map_hash(key, key_size, htab->hashrnd);
1445	b = __select_bucket(htab, hash);
1446	head = &b->head;
1447
1448	ret = htab_lock_bucket(htab, b, hash, &flags);
1449	if (ret)
1450		return ret;
1451
1452	l = lookup_elem_raw(head, hash, key, key_size);
1453	if (l)
 
1454		hlist_nulls_del_rcu(&l->hash_node);
1455	else
 
1456		ret = -ENOENT;
 
1457
1458	htab_unlock_bucket(htab, b, hash, flags);
1459
1460	if (l)
1461		free_htab_elem(htab, l);
1462	return ret;
1463}
1464
1465static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1466{
1467	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1468	struct hlist_nulls_head *head;
1469	struct bucket *b;
1470	struct htab_elem *l;
1471	unsigned long flags;
1472	u32 hash, key_size;
1473	int ret;
1474
1475	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1476		     !rcu_read_lock_bh_held());
1477
1478	key_size = map->key_size;
1479
1480	hash = htab_map_hash(key, key_size, htab->hashrnd);
1481	b = __select_bucket(htab, hash);
1482	head = &b->head;
1483
1484	ret = htab_lock_bucket(htab, b, hash, &flags);
1485	if (ret)
1486		return ret;
1487
1488	l = lookup_elem_raw(head, hash, key, key_size);
1489
1490	if (l)
1491		hlist_nulls_del_rcu(&l->hash_node);
1492	else
1493		ret = -ENOENT;
1494
1495	htab_unlock_bucket(htab, b, hash, flags);
1496	if (l)
1497		htab_lru_push_free(htab, l);
1498	return ret;
1499}
1500
1501static void delete_all_elements(struct bpf_htab *htab)
1502{
1503	int i;
1504
1505	/* It's called from a worker thread, so disable migration here,
1506	 * since bpf_mem_cache_free() relies on that.
1507	 */
1508	migrate_disable();
1509	for (i = 0; i < htab->n_buckets; i++) {
1510		struct hlist_nulls_head *head = select_bucket(htab, i);
1511		struct hlist_nulls_node *n;
1512		struct htab_elem *l;
1513
1514		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1515			hlist_nulls_del_rcu(&l->hash_node);
1516			htab_elem_free(htab, l);
1517		}
1518		cond_resched();
1519	}
1520	migrate_enable();
1521}
1522
1523static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
1524{
1525	int i;
1526
1527	rcu_read_lock();
1528	for (i = 0; i < htab->n_buckets; i++) {
1529		struct hlist_nulls_head *head = select_bucket(htab, i);
1530		struct hlist_nulls_node *n;
1531		struct htab_elem *l;
1532
1533		hlist_nulls_for_each_entry(l, n, head, hash_node) {
1534			/* We only free timer on uref dropping to zero */
1535			if (btf_record_has_field(htab->map.record, BPF_TIMER))
1536				bpf_obj_free_timer(htab->map.record,
1537						   l->key + round_up(htab->map.key_size, 8));
1538			if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
1539				bpf_obj_free_workqueue(htab->map.record,
1540						       l->key + round_up(htab->map.key_size, 8));
1541		}
1542		cond_resched_rcu();
1543	}
1544	rcu_read_unlock();
1545}
1546
1547static void htab_map_free_timers_and_wq(struct bpf_map *map)
1548{
1549	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1550
1551	/* We only free timer and workqueue on uref dropping to zero */
1552	if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE)) {
1553		if (!htab_is_prealloc(htab))
1554			htab_free_malloced_timers_and_wq(htab);
1555		else
1556			htab_free_prealloced_timers_and_wq(htab);
1557	}
1558}
1559
1560/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1561static void htab_map_free(struct bpf_map *map)
1562{
1563	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1564	int i;
1565
1566	/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1567	 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1568	 * There is no need to synchronize_rcu() here to protect map elements.
1569	 */
1570
1571	/* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1572	 * underneath and is responsible for waiting for callbacks to finish
1573	 * during bpf_mem_alloc_destroy().
1574	 */
1575	if (!htab_is_prealloc(htab)) {
1576		delete_all_elements(htab);
1577	} else {
1578		htab_free_prealloced_fields(htab);
1579		prealloc_destroy(htab);
1580	}
1581
1582	bpf_map_free_elem_count(map);
1583	free_percpu(htab->extra_elems);
1584	bpf_map_area_free(htab->buckets);
1585	bpf_mem_alloc_destroy(&htab->pcpu_ma);
1586	bpf_mem_alloc_destroy(&htab->ma);
1587	if (htab->use_percpu_counter)
1588		percpu_counter_destroy(&htab->pcount);
1589	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1590		free_percpu(htab->map_locked[i]);
1591	lockdep_unregister_key(&htab->lockdep_key);
1592	bpf_map_area_free(htab);
1593}
1594
1595static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1596				   struct seq_file *m)
1597{
1598	void *value;
1599
1600	rcu_read_lock();
1601
1602	value = htab_map_lookup_elem(map, key);
1603	if (!value) {
1604		rcu_read_unlock();
1605		return;
1606	}
1607
1608	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1609	seq_puts(m, ": ");
1610	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1611	seq_putc(m, '\n');
1612
1613	rcu_read_unlock();
1614}
1615
1616static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1617					     void *value, bool is_lru_map,
1618					     bool is_percpu, u64 flags)
1619{
1620	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1621	struct hlist_nulls_head *head;
1622	unsigned long bflags;
1623	struct htab_elem *l;
1624	u32 hash, key_size;
1625	struct bucket *b;
1626	int ret;
1627
1628	key_size = map->key_size;
1629
1630	hash = htab_map_hash(key, key_size, htab->hashrnd);
1631	b = __select_bucket(htab, hash);
1632	head = &b->head;
1633
1634	ret = htab_lock_bucket(htab, b, hash, &bflags);
1635	if (ret)
1636		return ret;
1637
1638	l = lookup_elem_raw(head, hash, key, key_size);
1639	if (!l) {
1640		ret = -ENOENT;
1641	} else {
1642		if (is_percpu) {
1643			u32 roundup_value_size = round_up(map->value_size, 8);
1644			void __percpu *pptr;
1645			int off = 0, cpu;
1646
1647			pptr = htab_elem_get_ptr(l, key_size);
1648			for_each_possible_cpu(cpu) {
1649				copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
1650				check_and_init_map_value(&htab->map, value + off);
 
1651				off += roundup_value_size;
1652			}
1653		} else {
1654			u32 roundup_key_size = round_up(map->key_size, 8);
1655
1656			if (flags & BPF_F_LOCK)
1657				copy_map_value_locked(map, value, l->key +
1658						      roundup_key_size,
1659						      true);
1660			else
1661				copy_map_value(map, value, l->key +
1662					       roundup_key_size);
1663			/* Zeroing special fields in the temp buffer */
1664			check_and_init_map_value(map, value);
1665		}
1666
1667		hlist_nulls_del_rcu(&l->hash_node);
1668		if (!is_lru_map)
1669			free_htab_elem(htab, l);
1670	}
1671
1672	htab_unlock_bucket(htab, b, hash, bflags);
1673
1674	if (is_lru_map && l)
1675		htab_lru_push_free(htab, l);
1676
1677	return ret;
1678}
1679
1680static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1681					   void *value, u64 flags)
1682{
1683	return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1684						 flags);
1685}
1686
1687static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1688						  void *key, void *value,
1689						  u64 flags)
1690{
1691	return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1692						 flags);
1693}
1694
1695static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1696					       void *value, u64 flags)
1697{
1698	return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1699						 flags);
1700}
1701
1702static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1703						      void *key, void *value,
1704						      u64 flags)
1705{
1706	return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1707						 flags);
1708}
1709
1710static int
1711__htab_map_lookup_and_delete_batch(struct bpf_map *map,
1712				   const union bpf_attr *attr,
1713				   union bpf_attr __user *uattr,
1714				   bool do_delete, bool is_lru_map,
1715				   bool is_percpu)
1716{
1717	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1718	u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1719	void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1720	void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1721	void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1722	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1723	u32 batch, max_count, size, bucket_size, map_id;
1724	struct htab_elem *node_to_free = NULL;
1725	u64 elem_map_flags, map_flags;
1726	struct hlist_nulls_head *head;
1727	struct hlist_nulls_node *n;
1728	unsigned long flags = 0;
1729	bool locked = false;
1730	struct htab_elem *l;
1731	struct bucket *b;
1732	int ret = 0;
1733
1734	elem_map_flags = attr->batch.elem_flags;
1735	if ((elem_map_flags & ~BPF_F_LOCK) ||
1736	    ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1737		return -EINVAL;
1738
1739	map_flags = attr->batch.flags;
1740	if (map_flags)
1741		return -EINVAL;
1742
1743	max_count = attr->batch.count;
1744	if (!max_count)
1745		return 0;
1746
1747	if (put_user(0, &uattr->batch.count))
1748		return -EFAULT;
1749
1750	batch = 0;
1751	if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1752		return -EFAULT;
1753
1754	if (batch >= htab->n_buckets)
1755		return -ENOENT;
1756
1757	key_size = htab->map.key_size;
1758	roundup_key_size = round_up(htab->map.key_size, 8);
1759	value_size = htab->map.value_size;
1760	size = round_up(value_size, 8);
1761	if (is_percpu)
1762		value_size = size * num_possible_cpus();
1763	total = 0;
1764	/* while experimenting with hash tables with sizes ranging from 10 to
1765	 * 1000, it was observed that a bucket can have up to 5 entries.
1766	 */
1767	bucket_size = 5;
1768
1769alloc:
1770	/* We cannot do copy_from_user or copy_to_user inside
1771	 * the rcu_read_lock. Allocate enough space here.
1772	 */
1773	keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1774	values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1775	if (!keys || !values) {
1776		ret = -ENOMEM;
1777		goto after_loop;
1778	}
1779
1780again:
1781	bpf_disable_instrumentation();
1782	rcu_read_lock();
1783again_nocopy:
1784	dst_key = keys;
1785	dst_val = values;
1786	b = &htab->buckets[batch];
1787	head = &b->head;
1788	/* do not grab the lock unless need it (bucket_cnt > 0). */
1789	if (locked) {
1790		ret = htab_lock_bucket(htab, b, batch, &flags);
1791		if (ret) {
1792			rcu_read_unlock();
1793			bpf_enable_instrumentation();
1794			goto after_loop;
1795		}
1796	}
1797
1798	bucket_cnt = 0;
1799	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1800		bucket_cnt++;
1801
1802	if (bucket_cnt && !locked) {
1803		locked = true;
1804		goto again_nocopy;
1805	}
1806
1807	if (bucket_cnt > (max_count - total)) {
1808		if (total == 0)
1809			ret = -ENOSPC;
1810		/* Note that since bucket_cnt > 0 here, it is implicit
1811		 * that the locked was grabbed, so release it.
1812		 */
1813		htab_unlock_bucket(htab, b, batch, flags);
1814		rcu_read_unlock();
1815		bpf_enable_instrumentation();
1816		goto after_loop;
1817	}
1818
1819	if (bucket_cnt > bucket_size) {
1820		bucket_size = bucket_cnt;
1821		/* Note that since bucket_cnt > 0 here, it is implicit
1822		 * that the locked was grabbed, so release it.
1823		 */
1824		htab_unlock_bucket(htab, b, batch, flags);
1825		rcu_read_unlock();
1826		bpf_enable_instrumentation();
1827		kvfree(keys);
1828		kvfree(values);
1829		goto alloc;
1830	}
1831
1832	/* Next block is only safe to run if you have grabbed the lock */
1833	if (!locked)
1834		goto next_batch;
1835
1836	hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1837		memcpy(dst_key, l->key, key_size);
1838
1839		if (is_percpu) {
1840			int off = 0, cpu;
1841			void __percpu *pptr;
1842
1843			pptr = htab_elem_get_ptr(l, map->key_size);
1844			for_each_possible_cpu(cpu) {
1845				copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
1846				check_and_init_map_value(&htab->map, dst_val + off);
1847				off += size;
1848			}
1849		} else {
1850			value = l->key + roundup_key_size;
1851			if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1852				struct bpf_map **inner_map = value;
1853
1854				 /* Actual value is the id of the inner map */
1855				map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1856				value = &map_id;
1857			}
1858
1859			if (elem_map_flags & BPF_F_LOCK)
1860				copy_map_value_locked(map, dst_val, value,
1861						      true);
1862			else
1863				copy_map_value(map, dst_val, value);
1864			/* Zeroing special fields in the temp buffer */
1865			check_and_init_map_value(map, dst_val);
1866		}
1867		if (do_delete) {
1868			hlist_nulls_del_rcu(&l->hash_node);
1869
1870			/* bpf_lru_push_free() will acquire lru_lock, which
1871			 * may cause deadlock. See comments in function
1872			 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1873			 * after releasing the bucket lock.
1874			 *
1875			 * For htab of maps, htab_put_fd_value() in
1876			 * free_htab_elem() may acquire a spinlock with bucket
1877			 * lock being held and it violates the lock rule, so
1878			 * invoke free_htab_elem() after unlock as well.
1879			 */
1880			l->batch_flink = node_to_free;
1881			node_to_free = l;
 
 
 
 
1882		}
1883		dst_key += key_size;
1884		dst_val += value_size;
1885	}
1886
1887	htab_unlock_bucket(htab, b, batch, flags);
1888	locked = false;
1889
1890	while (node_to_free) {
1891		l = node_to_free;
1892		node_to_free = node_to_free->batch_flink;
1893		if (is_lru_map)
1894			htab_lru_push_free(htab, l);
1895		else
1896			free_htab_elem(htab, l);
1897	}
1898
1899next_batch:
1900	/* If we are not copying data, we can go to next bucket and avoid
1901	 * unlocking the rcu.
1902	 */
1903	if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1904		batch++;
1905		goto again_nocopy;
1906	}
1907
1908	rcu_read_unlock();
1909	bpf_enable_instrumentation();
1910	if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1911	    key_size * bucket_cnt) ||
1912	    copy_to_user(uvalues + total * value_size, values,
1913	    value_size * bucket_cnt))) {
1914		ret = -EFAULT;
1915		goto after_loop;
1916	}
1917
1918	total += bucket_cnt;
1919	batch++;
1920	if (batch >= htab->n_buckets) {
1921		ret = -ENOENT;
1922		goto after_loop;
1923	}
1924	goto again;
1925
1926after_loop:
1927	if (ret == -EFAULT)
1928		goto out;
1929
1930	/* copy # of entries and next batch */
1931	ubatch = u64_to_user_ptr(attr->batch.out_batch);
1932	if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1933	    put_user(total, &uattr->batch.count))
1934		ret = -EFAULT;
1935
1936out:
1937	kvfree(keys);
1938	kvfree(values);
1939	return ret;
1940}
1941
1942static int
1943htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1944			     union bpf_attr __user *uattr)
1945{
1946	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1947						  false, true);
1948}
1949
1950static int
1951htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1952					const union bpf_attr *attr,
1953					union bpf_attr __user *uattr)
1954{
1955	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1956						  false, true);
1957}
1958
1959static int
1960htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1961		      union bpf_attr __user *uattr)
1962{
1963	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1964						  false, false);
1965}
1966
1967static int
1968htab_map_lookup_and_delete_batch(struct bpf_map *map,
1969				 const union bpf_attr *attr,
1970				 union bpf_attr __user *uattr)
1971{
1972	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1973						  false, false);
1974}
1975
1976static int
1977htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1978				 const union bpf_attr *attr,
1979				 union bpf_attr __user *uattr)
1980{
1981	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1982						  true, true);
1983}
1984
1985static int
1986htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1987					    const union bpf_attr *attr,
1988					    union bpf_attr __user *uattr)
1989{
1990	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1991						  true, true);
1992}
1993
1994static int
1995htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1996			  union bpf_attr __user *uattr)
1997{
1998	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1999						  true, false);
2000}
2001
2002static int
2003htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
2004				     const union bpf_attr *attr,
2005				     union bpf_attr __user *uattr)
2006{
2007	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
2008						  true, false);
2009}
2010
2011struct bpf_iter_seq_hash_map_info {
2012	struct bpf_map *map;
2013	struct bpf_htab *htab;
2014	void *percpu_value_buf; // non-zero means percpu hash
2015	u32 bucket_id;
2016	u32 skip_elems;
2017};
2018
2019static struct htab_elem *
2020bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
2021			   struct htab_elem *prev_elem)
2022{
2023	const struct bpf_htab *htab = info->htab;
2024	u32 skip_elems = info->skip_elems;
2025	u32 bucket_id = info->bucket_id;
2026	struct hlist_nulls_head *head;
2027	struct hlist_nulls_node *n;
2028	struct htab_elem *elem;
2029	struct bucket *b;
2030	u32 i, count;
2031
2032	if (bucket_id >= htab->n_buckets)
2033		return NULL;
2034
2035	/* try to find next elem in the same bucket */
2036	if (prev_elem) {
2037		/* no update/deletion on this bucket, prev_elem should be still valid
2038		 * and we won't skip elements.
2039		 */
2040		n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
2041		elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
2042		if (elem)
2043			return elem;
2044
2045		/* not found, unlock and go to the next bucket */
2046		b = &htab->buckets[bucket_id++];
2047		rcu_read_unlock();
2048		skip_elems = 0;
2049	}
2050
2051	for (i = bucket_id; i < htab->n_buckets; i++) {
2052		b = &htab->buckets[i];
2053		rcu_read_lock();
2054
2055		count = 0;
2056		head = &b->head;
2057		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2058			if (count >= skip_elems) {
2059				info->bucket_id = i;
2060				info->skip_elems = count;
2061				return elem;
2062			}
2063			count++;
2064		}
2065
2066		rcu_read_unlock();
2067		skip_elems = 0;
2068	}
2069
2070	info->bucket_id = i;
2071	info->skip_elems = 0;
2072	return NULL;
2073}
2074
2075static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2076{
2077	struct bpf_iter_seq_hash_map_info *info = seq->private;
2078	struct htab_elem *elem;
2079
2080	elem = bpf_hash_map_seq_find_next(info, NULL);
2081	if (!elem)
2082		return NULL;
2083
2084	if (*pos == 0)
2085		++*pos;
2086	return elem;
2087}
2088
2089static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2090{
2091	struct bpf_iter_seq_hash_map_info *info = seq->private;
2092
2093	++*pos;
2094	++info->skip_elems;
2095	return bpf_hash_map_seq_find_next(info, v);
2096}
2097
2098static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2099{
2100	struct bpf_iter_seq_hash_map_info *info = seq->private;
2101	u32 roundup_key_size, roundup_value_size;
2102	struct bpf_iter__bpf_map_elem ctx = {};
2103	struct bpf_map *map = info->map;
2104	struct bpf_iter_meta meta;
2105	int ret = 0, off = 0, cpu;
2106	struct bpf_prog *prog;
2107	void __percpu *pptr;
2108
2109	meta.seq = seq;
2110	prog = bpf_iter_get_info(&meta, elem == NULL);
2111	if (prog) {
2112		ctx.meta = &meta;
2113		ctx.map = info->map;
2114		if (elem) {
2115			roundup_key_size = round_up(map->key_size, 8);
2116			ctx.key = elem->key;
2117			if (!info->percpu_value_buf) {
2118				ctx.value = elem->key + roundup_key_size;
2119			} else {
2120				roundup_value_size = round_up(map->value_size, 8);
2121				pptr = htab_elem_get_ptr(elem, map->key_size);
2122				for_each_possible_cpu(cpu) {
2123					copy_map_value_long(map, info->percpu_value_buf + off,
2124							    per_cpu_ptr(pptr, cpu));
2125					check_and_init_map_value(map, info->percpu_value_buf + off);
2126					off += roundup_value_size;
2127				}
2128				ctx.value = info->percpu_value_buf;
2129			}
2130		}
2131		ret = bpf_iter_run_prog(prog, &ctx);
2132	}
2133
2134	return ret;
2135}
2136
2137static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2138{
2139	return __bpf_hash_map_seq_show(seq, v);
2140}
2141
2142static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2143{
2144	if (!v)
2145		(void)__bpf_hash_map_seq_show(seq, NULL);
2146	else
2147		rcu_read_unlock();
2148}
2149
2150static int bpf_iter_init_hash_map(void *priv_data,
2151				  struct bpf_iter_aux_info *aux)
2152{
2153	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2154	struct bpf_map *map = aux->map;
2155	void *value_buf;
2156	u32 buf_size;
2157
2158	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2159	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2160		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2161		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2162		if (!value_buf)
2163			return -ENOMEM;
2164
2165		seq_info->percpu_value_buf = value_buf;
2166	}
2167
2168	bpf_map_inc_with_uref(map);
2169	seq_info->map = map;
2170	seq_info->htab = container_of(map, struct bpf_htab, map);
2171	return 0;
2172}
2173
2174static void bpf_iter_fini_hash_map(void *priv_data)
2175{
2176	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2177
2178	bpf_map_put_with_uref(seq_info->map);
2179	kfree(seq_info->percpu_value_buf);
2180}
2181
2182static const struct seq_operations bpf_hash_map_seq_ops = {
2183	.start	= bpf_hash_map_seq_start,
2184	.next	= bpf_hash_map_seq_next,
2185	.stop	= bpf_hash_map_seq_stop,
2186	.show	= bpf_hash_map_seq_show,
2187};
2188
2189static const struct bpf_iter_seq_info iter_seq_info = {
2190	.seq_ops		= &bpf_hash_map_seq_ops,
2191	.init_seq_private	= bpf_iter_init_hash_map,
2192	.fini_seq_private	= bpf_iter_fini_hash_map,
2193	.seq_priv_size		= sizeof(struct bpf_iter_seq_hash_map_info),
2194};
2195
2196static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2197				   void *callback_ctx, u64 flags)
2198{
2199	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2200	struct hlist_nulls_head *head;
2201	struct hlist_nulls_node *n;
2202	struct htab_elem *elem;
2203	u32 roundup_key_size;
2204	int i, num_elems = 0;
2205	void __percpu *pptr;
2206	struct bucket *b;
2207	void *key, *val;
2208	bool is_percpu;
2209	u64 ret = 0;
2210
2211	if (flags != 0)
2212		return -EINVAL;
2213
2214	is_percpu = htab_is_percpu(htab);
2215
2216	roundup_key_size = round_up(map->key_size, 8);
2217	/* disable migration so percpu value prepared here will be the
2218	 * same as the one seen by the bpf program with bpf_map_lookup_elem().
2219	 */
2220	if (is_percpu)
2221		migrate_disable();
2222	for (i = 0; i < htab->n_buckets; i++) {
2223		b = &htab->buckets[i];
2224		rcu_read_lock();
2225		head = &b->head;
2226		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2227			key = elem->key;
2228			if (is_percpu) {
2229				/* current cpu value for percpu map */
2230				pptr = htab_elem_get_ptr(elem, map->key_size);
2231				val = this_cpu_ptr(pptr);
2232			} else {
2233				val = elem->key + roundup_key_size;
2234			}
2235			num_elems++;
2236			ret = callback_fn((u64)(long)map, (u64)(long)key,
2237					  (u64)(long)val, (u64)(long)callback_ctx, 0);
2238			/* return value: 0 - continue, 1 - stop and return */
2239			if (ret) {
2240				rcu_read_unlock();
2241				goto out;
2242			}
2243		}
2244		rcu_read_unlock();
2245	}
2246out:
2247	if (is_percpu)
2248		migrate_enable();
2249	return num_elems;
2250}
2251
2252static u64 htab_map_mem_usage(const struct bpf_map *map)
2253{
2254	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2255	u32 value_size = round_up(htab->map.value_size, 8);
2256	bool prealloc = htab_is_prealloc(htab);
2257	bool percpu = htab_is_percpu(htab);
2258	bool lru = htab_is_lru(htab);
2259	u64 num_entries;
2260	u64 usage = sizeof(struct bpf_htab);
2261
2262	usage += sizeof(struct bucket) * htab->n_buckets;
2263	usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
2264	if (prealloc) {
2265		num_entries = map->max_entries;
2266		if (htab_has_extra_elems(htab))
2267			num_entries += num_possible_cpus();
2268
2269		usage += htab->elem_size * num_entries;
2270
2271		if (percpu)
2272			usage += value_size * num_possible_cpus() * num_entries;
2273		else if (!lru)
2274			usage += sizeof(struct htab_elem *) * num_possible_cpus();
2275	} else {
2276#define LLIST_NODE_SZ sizeof(struct llist_node)
2277
2278		num_entries = htab->use_percpu_counter ?
2279					  percpu_counter_sum(&htab->pcount) :
2280					  atomic_read(&htab->count);
2281		usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
2282		if (percpu) {
2283			usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
2284			usage += value_size * num_possible_cpus() * num_entries;
2285		}
2286	}
2287	return usage;
2288}
2289
2290BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2291const struct bpf_map_ops htab_map_ops = {
2292	.map_meta_equal = bpf_map_meta_equal,
2293	.map_alloc_check = htab_map_alloc_check,
2294	.map_alloc = htab_map_alloc,
2295	.map_free = htab_map_free,
2296	.map_get_next_key = htab_map_get_next_key,
2297	.map_release_uref = htab_map_free_timers_and_wq,
2298	.map_lookup_elem = htab_map_lookup_elem,
2299	.map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2300	.map_update_elem = htab_map_update_elem,
2301	.map_delete_elem = htab_map_delete_elem,
2302	.map_gen_lookup = htab_map_gen_lookup,
2303	.map_seq_show_elem = htab_map_seq_show_elem,
2304	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2305	.map_for_each_callback = bpf_for_each_hash_elem,
2306	.map_mem_usage = htab_map_mem_usage,
2307	BATCH_OPS(htab),
2308	.map_btf_id = &htab_map_btf_ids[0],
2309	.iter_seq_info = &iter_seq_info,
2310};
2311
2312const struct bpf_map_ops htab_lru_map_ops = {
2313	.map_meta_equal = bpf_map_meta_equal,
2314	.map_alloc_check = htab_map_alloc_check,
2315	.map_alloc = htab_map_alloc,
2316	.map_free = htab_map_free,
2317	.map_get_next_key = htab_map_get_next_key,
2318	.map_release_uref = htab_map_free_timers_and_wq,
2319	.map_lookup_elem = htab_lru_map_lookup_elem,
2320	.map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2321	.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2322	.map_update_elem = htab_lru_map_update_elem,
2323	.map_delete_elem = htab_lru_map_delete_elem,
2324	.map_gen_lookup = htab_lru_map_gen_lookup,
2325	.map_seq_show_elem = htab_map_seq_show_elem,
2326	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2327	.map_for_each_callback = bpf_for_each_hash_elem,
2328	.map_mem_usage = htab_map_mem_usage,
2329	BATCH_OPS(htab_lru),
2330	.map_btf_id = &htab_map_btf_ids[0],
2331	.iter_seq_info = &iter_seq_info,
2332};
2333
2334/* Called from eBPF program */
2335static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2336{
2337	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2338
2339	if (l)
2340		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2341	else
2342		return NULL;
2343}
2344
2345/* inline bpf_map_lookup_elem() call for per-CPU hashmap */
2346static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
2347{
2348	struct bpf_insn *insn = insn_buf;
2349
2350	if (!bpf_jit_supports_percpu_insn())
2351		return -EOPNOTSUPP;
2352
2353	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2354		     (void *(*)(struct bpf_map *map, void *key))NULL));
2355	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2356	*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3);
2357	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
2358				offsetof(struct htab_elem, key) + map->key_size);
2359	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
2360	*insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
2361
2362	return insn - insn_buf;
2363}
2364
2365static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2366{
2367	struct htab_elem *l;
2368
2369	if (cpu >= nr_cpu_ids)
2370		return NULL;
2371
2372	l = __htab_map_lookup_elem(map, key);
2373	if (l)
2374		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2375	else
2376		return NULL;
2377}
2378
2379static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2380{
2381	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2382
2383	if (l) {
2384		bpf_lru_node_set_ref(&l->lru_node);
2385		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2386	}
2387
2388	return NULL;
2389}
2390
2391static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2392{
2393	struct htab_elem *l;
2394
2395	if (cpu >= nr_cpu_ids)
2396		return NULL;
2397
2398	l = __htab_map_lookup_elem(map, key);
2399	if (l) {
2400		bpf_lru_node_set_ref(&l->lru_node);
2401		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2402	}
2403
2404	return NULL;
2405}
2406
2407int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2408{
2409	struct htab_elem *l;
2410	void __percpu *pptr;
2411	int ret = -ENOENT;
2412	int cpu, off = 0;
2413	u32 size;
2414
2415	/* per_cpu areas are zero-filled and bpf programs can only
2416	 * access 'value_size' of them, so copying rounded areas
2417	 * will not leak any kernel data
2418	 */
2419	size = round_up(map->value_size, 8);
2420	rcu_read_lock();
2421	l = __htab_map_lookup_elem(map, key);
2422	if (!l)
2423		goto out;
2424	/* We do not mark LRU map element here in order to not mess up
2425	 * eviction heuristics when user space does a map walk.
2426	 */
2427	pptr = htab_elem_get_ptr(l, map->key_size);
2428	for_each_possible_cpu(cpu) {
2429		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
2430		check_and_init_map_value(map, value + off);
2431		off += size;
2432	}
2433	ret = 0;
2434out:
2435	rcu_read_unlock();
2436	return ret;
2437}
2438
2439int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2440			   u64 map_flags)
2441{
2442	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2443	int ret;
2444
2445	rcu_read_lock();
2446	if (htab_is_lru(htab))
2447		ret = __htab_lru_percpu_map_update_elem(map, key, value,
2448							map_flags, true);
2449	else
2450		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2451						    true);
2452	rcu_read_unlock();
2453
2454	return ret;
2455}
2456
2457static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2458					  struct seq_file *m)
2459{
2460	struct htab_elem *l;
2461	void __percpu *pptr;
2462	int cpu;
2463
2464	rcu_read_lock();
2465
2466	l = __htab_map_lookup_elem(map, key);
2467	if (!l) {
2468		rcu_read_unlock();
2469		return;
2470	}
2471
2472	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2473	seq_puts(m, ": {\n");
2474	pptr = htab_elem_get_ptr(l, map->key_size);
2475	for_each_possible_cpu(cpu) {
2476		seq_printf(m, "\tcpu%d: ", cpu);
2477		btf_type_seq_show(map->btf, map->btf_value_type_id,
2478				  per_cpu_ptr(pptr, cpu), m);
2479		seq_putc(m, '\n');
2480	}
2481	seq_puts(m, "}\n");
2482
2483	rcu_read_unlock();
2484}
2485
2486const struct bpf_map_ops htab_percpu_map_ops = {
2487	.map_meta_equal = bpf_map_meta_equal,
2488	.map_alloc_check = htab_map_alloc_check,
2489	.map_alloc = htab_map_alloc,
2490	.map_free = htab_map_free,
2491	.map_get_next_key = htab_map_get_next_key,
2492	.map_lookup_elem = htab_percpu_map_lookup_elem,
2493	.map_gen_lookup = htab_percpu_map_gen_lookup,
2494	.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2495	.map_update_elem = htab_percpu_map_update_elem,
2496	.map_delete_elem = htab_map_delete_elem,
2497	.map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2498	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2499	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2500	.map_for_each_callback = bpf_for_each_hash_elem,
2501	.map_mem_usage = htab_map_mem_usage,
2502	BATCH_OPS(htab_percpu),
2503	.map_btf_id = &htab_map_btf_ids[0],
2504	.iter_seq_info = &iter_seq_info,
2505};
2506
2507const struct bpf_map_ops htab_lru_percpu_map_ops = {
2508	.map_meta_equal = bpf_map_meta_equal,
2509	.map_alloc_check = htab_map_alloc_check,
2510	.map_alloc = htab_map_alloc,
2511	.map_free = htab_map_free,
2512	.map_get_next_key = htab_map_get_next_key,
2513	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2514	.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2515	.map_update_elem = htab_lru_percpu_map_update_elem,
2516	.map_delete_elem = htab_lru_map_delete_elem,
2517	.map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2518	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2519	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2520	.map_for_each_callback = bpf_for_each_hash_elem,
2521	.map_mem_usage = htab_map_mem_usage,
2522	BATCH_OPS(htab_lru_percpu),
2523	.map_btf_id = &htab_map_btf_ids[0],
2524	.iter_seq_info = &iter_seq_info,
2525};
2526
2527static int fd_htab_map_alloc_check(union bpf_attr *attr)
2528{
2529	if (attr->value_size != sizeof(u32))
2530		return -EINVAL;
2531	return htab_map_alloc_check(attr);
2532}
2533
2534static void fd_htab_map_free(struct bpf_map *map)
2535{
2536	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2537	struct hlist_nulls_node *n;
2538	struct hlist_nulls_head *head;
2539	struct htab_elem *l;
2540	int i;
2541
2542	for (i = 0; i < htab->n_buckets; i++) {
2543		head = select_bucket(htab, i);
2544
2545		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2546			void *ptr = fd_htab_map_get_ptr(map, l);
2547
2548			map->ops->map_fd_put_ptr(map, ptr, false);
2549		}
2550	}
2551
2552	htab_map_free(map);
2553}
2554
2555/* only called from syscall */
2556int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2557{
2558	void **ptr;
2559	int ret = 0;
2560
2561	if (!map->ops->map_fd_sys_lookup_elem)
2562		return -ENOTSUPP;
2563
2564	rcu_read_lock();
2565	ptr = htab_map_lookup_elem(map, key);
2566	if (ptr)
2567		*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2568	else
2569		ret = -ENOENT;
2570	rcu_read_unlock();
2571
2572	return ret;
2573}
2574
2575/* only called from syscall */
2576int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2577				void *key, void *value, u64 map_flags)
2578{
2579	void *ptr;
2580	int ret;
2581	u32 ufd = *(u32 *)value;
2582
2583	ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2584	if (IS_ERR(ptr))
2585		return PTR_ERR(ptr);
2586
2587	/* The htab bucket lock is always held during update operations in fd
2588	 * htab map, and the following rcu_read_lock() is only used to avoid
2589	 * the WARN_ON_ONCE in htab_map_update_elem().
2590	 */
2591	rcu_read_lock();
2592	ret = htab_map_update_elem(map, key, &ptr, map_flags);
2593	rcu_read_unlock();
2594	if (ret)
2595		map->ops->map_fd_put_ptr(map, ptr, false);
2596
2597	return ret;
2598}
2599
2600static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2601{
2602	struct bpf_map *map, *inner_map_meta;
2603
2604	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2605	if (IS_ERR(inner_map_meta))
2606		return inner_map_meta;
2607
2608	map = htab_map_alloc(attr);
2609	if (IS_ERR(map)) {
2610		bpf_map_meta_free(inner_map_meta);
2611		return map;
2612	}
2613
2614	map->inner_map_meta = inner_map_meta;
2615
2616	return map;
2617}
2618
2619static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2620{
2621	struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2622
2623	if (!inner_map)
2624		return NULL;
2625
2626	return READ_ONCE(*inner_map);
2627}
2628
2629static int htab_of_map_gen_lookup(struct bpf_map *map,
2630				  struct bpf_insn *insn_buf)
2631{
2632	struct bpf_insn *insn = insn_buf;
2633	const int ret = BPF_REG_0;
2634
2635	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2636		     (void *(*)(struct bpf_map *map, void *key))NULL));
2637	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2638	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2639	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2640				offsetof(struct htab_elem, key) +
2641				round_up(map->key_size, 8));
2642	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2643
2644	return insn - insn_buf;
2645}
2646
2647static void htab_of_map_free(struct bpf_map *map)
2648{
2649	bpf_map_meta_free(map->inner_map_meta);
2650	fd_htab_map_free(map);
2651}
2652
2653const struct bpf_map_ops htab_of_maps_map_ops = {
2654	.map_alloc_check = fd_htab_map_alloc_check,
2655	.map_alloc = htab_of_map_alloc,
2656	.map_free = htab_of_map_free,
2657	.map_get_next_key = htab_map_get_next_key,
2658	.map_lookup_elem = htab_of_map_lookup_elem,
2659	.map_delete_elem = htab_map_delete_elem,
2660	.map_fd_get_ptr = bpf_map_fd_get_ptr,
2661	.map_fd_put_ptr = bpf_map_fd_put_ptr,
2662	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2663	.map_gen_lookup = htab_of_map_gen_lookup,
2664	.map_check_btf = map_check_no_btf,
2665	.map_mem_usage = htab_map_mem_usage,
2666	BATCH_OPS(htab),
2667	.map_btf_id = &htab_map_btf_ids[0],
2668};