Linux Audio

Check our new training course

Loading...
v4.17
 
   1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   2 * Copyright (c) 2016 Facebook
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of version 2 of the GNU General Public
   6 * License as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/bpf.h>
 
  14#include <linux/jhash.h>
  15#include <linux/filter.h>
  16#include <linux/rculist_nulls.h>
 
 
 
 
 
  17#include "percpu_freelist.h"
  18#include "bpf_lru_list.h"
  19#include "map_in_map.h"
 
  20
  21#define HTAB_CREATE_FLAG_MASK						\
  22	(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |	\
  23	 BPF_F_RDONLY | BPF_F_WRONLY)
 
 
 
 
 
 
 
 
 
 
  24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  25struct bucket {
  26	struct hlist_nulls_head head;
  27	raw_spinlock_t lock;
  28};
  29
 
 
 
  30struct bpf_htab {
  31	struct bpf_map map;
 
 
  32	struct bucket *buckets;
  33	void *elems;
  34	union {
  35		struct pcpu_freelist freelist;
  36		struct bpf_lru lru;
  37	};
  38	struct htab_elem *__percpu *extra_elems;
  39	atomic_t count;	/* number of elements in this hashtable */
 
 
 
 
 
  40	u32 n_buckets;	/* number of hash buckets */
  41	u32 elem_size;	/* size of each element in bytes */
 
 
 
  42};
  43
  44/* each htab element is struct htab_elem + key + value */
  45struct htab_elem {
  46	union {
  47		struct hlist_nulls_node hash_node;
  48		struct {
  49			void *padding;
  50			union {
  51				struct bpf_htab *htab;
  52				struct pcpu_freelist_node fnode;
 
  53			};
  54		};
  55	};
  56	union {
  57		struct rcu_head rcu;
 
  58		struct bpf_lru_node lru_node;
  59	};
  60	u32 hash;
  61	char key[0] __aligned(8);
  62};
  63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
  65
  66static bool htab_is_lru(const struct bpf_htab *htab)
  67{
  68	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
  69		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  70}
  71
  72static bool htab_is_percpu(const struct bpf_htab *htab)
  73{
  74	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  75		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  76}
  77
  78static bool htab_is_prealloc(const struct bpf_htab *htab)
  79{
  80	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
  81}
  82
  83static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
  84				     void __percpu *pptr)
  85{
  86	*(void __percpu **)(l->key + key_size) = pptr;
  87}
  88
  89static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
  90{
  91	return *(void __percpu **)(l->key + key_size);
  92}
  93
  94static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
  95{
  96	return *(void **)(l->key + roundup(map->key_size, 8));
  97}
  98
  99static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
 100{
 101	return (struct htab_elem *) (htab->elems + i * htab->elem_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 102}
 103
 104static void htab_free_elems(struct bpf_htab *htab)
 105{
 106	int i;
 107
 108	if (!htab_is_percpu(htab))
 109		goto free_elems;
 110
 111	for (i = 0; i < htab->map.max_entries; i++) {
 112		void __percpu *pptr;
 113
 114		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
 115					 htab->map.key_size);
 116		free_percpu(pptr);
 117		cond_resched();
 118	}
 119free_elems:
 120	bpf_map_area_free(htab->elems);
 121}
 122
 
 
 
 
 
 
 
 
 
 
 
 123static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
 124					  u32 hash)
 125{
 126	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
 127	struct htab_elem *l;
 128
 129	if (node) {
 
 130		l = container_of(node, struct htab_elem, lru_node);
 131		memcpy(l->key, key, htab->map.key_size);
 132		return l;
 133	}
 134
 135	return NULL;
 136}
 137
 138static int prealloc_init(struct bpf_htab *htab)
 139{
 140	u32 num_entries = htab->map.max_entries;
 141	int err = -ENOMEM, i;
 142
 143	if (!htab_is_percpu(htab) && !htab_is_lru(htab))
 144		num_entries += num_possible_cpus();
 145
 146	htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
 147					 htab->map.numa_node);
 148	if (!htab->elems)
 149		return -ENOMEM;
 150
 151	if (!htab_is_percpu(htab))
 152		goto skip_percpu_elems;
 153
 154	for (i = 0; i < num_entries; i++) {
 155		u32 size = round_up(htab->map.value_size, 8);
 156		void __percpu *pptr;
 157
 158		pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
 
 159		if (!pptr)
 160			goto free_elems;
 161		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
 162				  pptr);
 163		cond_resched();
 164	}
 165
 166skip_percpu_elems:
 167	if (htab_is_lru(htab))
 168		err = bpf_lru_init(&htab->lru,
 169				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
 170				   offsetof(struct htab_elem, hash) -
 171				   offsetof(struct htab_elem, lru_node),
 172				   htab_lru_map_delete_node,
 173				   htab);
 174	else
 175		err = pcpu_freelist_init(&htab->freelist);
 176
 177	if (err)
 178		goto free_elems;
 179
 180	if (htab_is_lru(htab))
 181		bpf_lru_populate(&htab->lru, htab->elems,
 182				 offsetof(struct htab_elem, lru_node),
 183				 htab->elem_size, num_entries);
 184	else
 185		pcpu_freelist_populate(&htab->freelist,
 186				       htab->elems + offsetof(struct htab_elem, fnode),
 187				       htab->elem_size, num_entries);
 188
 189	return 0;
 190
 191free_elems:
 192	htab_free_elems(htab);
 193	return err;
 194}
 195
 196static void prealloc_destroy(struct bpf_htab *htab)
 197{
 198	htab_free_elems(htab);
 199
 200	if (htab_is_lru(htab))
 201		bpf_lru_destroy(&htab->lru);
 202	else
 203		pcpu_freelist_destroy(&htab->freelist);
 204}
 205
 206static int alloc_extra_elems(struct bpf_htab *htab)
 207{
 208	struct htab_elem *__percpu *pptr, *l_new;
 209	struct pcpu_freelist_node *l;
 210	int cpu;
 211
 212	pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
 213				  GFP_USER | __GFP_NOWARN);
 214	if (!pptr)
 215		return -ENOMEM;
 216
 217	for_each_possible_cpu(cpu) {
 218		l = pcpu_freelist_pop(&htab->freelist);
 219		/* pop will succeed, since prealloc_init()
 220		 * preallocated extra num_possible_cpus elements
 221		 */
 222		l_new = container_of(l, struct htab_elem, fnode);
 223		*per_cpu_ptr(pptr, cpu) = l_new;
 224	}
 225	htab->extra_elems = pptr;
 226	return 0;
 227}
 228
 229/* Called from syscall */
 230static int htab_map_alloc_check(union bpf_attr *attr)
 231{
 232	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 233		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 234	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 235		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 236	/* percpu_lru means each cpu has its own LRU list.
 237	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 238	 * the map's value itself is percpu.  percpu_lru has
 239	 * nothing to do with the map's value.
 240	 */
 241	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 242	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 
 243	int numa_node = bpf_map_attr_numa_node(attr);
 244
 245	BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
 246		     offsetof(struct htab_elem, hash_node.pprev));
 247	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
 248		     offsetof(struct htab_elem, hash_node.pprev));
 249
 250	if (lru && !capable(CAP_SYS_ADMIN))
 251		/* LRU implementation is much complicated than other
 252		 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
 253		 */
 254		return -EPERM;
 255
 256	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
 257		/* reserved bits should not be used */
 258		return -EINVAL;
 259
 260	if (!lru && percpu_lru)
 261		return -EINVAL;
 262
 263	if (lru && !prealloc)
 264		return -ENOTSUPP;
 265
 266	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
 267		return -EINVAL;
 268
 269	/* check sanity of attributes.
 270	 * value_size == 0 may be allowed in the future to use map as a set
 271	 */
 272	if (attr->max_entries == 0 || attr->key_size == 0 ||
 273	    attr->value_size == 0)
 274		return -EINVAL;
 275
 276	if (attr->key_size > MAX_BPF_STACK)
 277		/* eBPF programs initialize keys on stack, so they cannot be
 278		 * larger than max stack size
 279		 */
 280		return -E2BIG;
 281
 282	if (attr->value_size >= KMALLOC_MAX_SIZE -
 283	    MAX_BPF_STACK - sizeof(struct htab_elem))
 284		/* if value_size is bigger, the user space won't be able to
 285		 * access the elements via bpf syscall. This check also makes
 286		 * sure that the elem_size doesn't overflow and it's
 287		 * kmalloc-able later in htab_map_update_elem()
 288		 */
 289		return -E2BIG;
 
 
 
 290
 291	return 0;
 292}
 293
 294static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 295{
 296	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 297		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 298	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 299		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 300	/* percpu_lru means each cpu has its own LRU list.
 301	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 302	 * the map's value itself is percpu.  percpu_lru has
 303	 * nothing to do with the map's value.
 304	 */
 305	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 306	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 307	struct bpf_htab *htab;
 308	int err, i;
 309	u64 cost;
 310
 311	htab = kzalloc(sizeof(*htab), GFP_USER);
 312	if (!htab)
 313		return ERR_PTR(-ENOMEM);
 314
 
 
 315	bpf_map_init_from_attr(&htab->map, attr);
 316
 317	if (percpu_lru) {
 318		/* ensure each CPU's lru list has >=1 elements.
 319		 * since we are at it, make each lru list has the same
 320		 * number of elements.
 321		 */
 322		htab->map.max_entries = roundup(attr->max_entries,
 323						num_possible_cpus());
 324		if (htab->map.max_entries < attr->max_entries)
 325			htab->map.max_entries = rounddown(attr->max_entries,
 326							  num_possible_cpus());
 327	}
 328
 329	/* hash table size must be power of 2 */
 
 
 
 
 
 
 330	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
 331
 332	htab->elem_size = sizeof(struct htab_elem) +
 333			  round_up(htab->map.key_size, 8);
 334	if (percpu)
 335		htab->elem_size += sizeof(void *);
 336	else
 337		htab->elem_size += round_up(htab->map.value_size, 8);
 338
 339	err = -E2BIG;
 340	/* prevent zero size kmalloc and check for u32 overflow */
 341	if (htab->n_buckets == 0 ||
 342	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
 343		goto free_htab;
 344
 345	cost = (u64) htab->n_buckets * sizeof(struct bucket) +
 346	       (u64) htab->elem_size * htab->map.max_entries;
 347
 348	if (percpu)
 349		cost += (u64) round_up(htab->map.value_size, 8) *
 350			num_possible_cpus() * htab->map.max_entries;
 351	else
 352	       cost += (u64) htab->elem_size * num_possible_cpus();
 353
 354	if (cost >= U32_MAX - PAGE_SIZE)
 355		/* make sure page count doesn't overflow */
 356		goto free_htab;
 357
 358	htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 359
 360	/* if map size is larger than memlock limit, reject it early */
 361	err = bpf_map_precharge_memlock(htab->map.pages);
 362	if (err)
 363		goto free_htab;
 364
 365	err = -ENOMEM;
 366	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
 367					   sizeof(struct bucket),
 368					   htab->map.numa_node);
 369	if (!htab->buckets)
 370		goto free_htab;
 371
 372	for (i = 0; i < htab->n_buckets; i++) {
 373		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
 374		raw_spin_lock_init(&htab->buckets[i].lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375	}
 376
 377	if (prealloc) {
 378		err = prealloc_init(htab);
 379		if (err)
 380			goto free_buckets;
 381
 382		if (!percpu && !lru) {
 383			/* lru itself can remove the least used element, so
 384			 * there is no need for an extra elem during map_update.
 385			 */
 386			err = alloc_extra_elems(htab);
 387			if (err)
 388				goto free_prealloc;
 389		}
 
 
 
 
 
 
 
 
 
 
 390	}
 391
 392	return &htab->map;
 393
 394free_prealloc:
 395	prealloc_destroy(htab);
 396free_buckets:
 
 
 
 
 397	bpf_map_area_free(htab->buckets);
 
 
 
 
 398free_htab:
 399	kfree(htab);
 
 400	return ERR_PTR(err);
 401}
 402
 403static inline u32 htab_map_hash(const void *key, u32 key_len)
 404{
 405	return jhash(key, key_len, 0);
 
 
 406}
 407
 408static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
 409{
 410	return &htab->buckets[hash & (htab->n_buckets - 1)];
 411}
 412
 413static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
 414{
 415	return &__select_bucket(htab, hash)->head;
 416}
 417
 418/* this lookup function can only be called with bucket lock taken */
 419static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
 420					 void *key, u32 key_size)
 421{
 422	struct hlist_nulls_node *n;
 423	struct htab_elem *l;
 424
 425	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 426		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 427			return l;
 428
 429	return NULL;
 430}
 431
 432/* can be called without bucket lock. it will repeat the loop in
 433 * the unlikely event when elements moved from one bucket into another
 434 * while link list is being walked
 435 */
 436static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
 437					       u32 hash, void *key,
 438					       u32 key_size, u32 n_buckets)
 439{
 440	struct hlist_nulls_node *n;
 441	struct htab_elem *l;
 442
 443again:
 444	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 445		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 446			return l;
 447
 448	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
 449		goto again;
 450
 451	return NULL;
 452}
 453
 454/* Called from syscall or from eBPF program directly, so
 455 * arguments have to match bpf_map_lookup_elem() exactly.
 456 * The return value is adjusted by BPF instructions
 457 * in htab_map_gen_lookup().
 458 */
 459static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 460{
 461	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 462	struct hlist_nulls_head *head;
 463	struct htab_elem *l;
 464	u32 hash, key_size;
 465
 466	/* Must be called with rcu_read_lock. */
 467	WARN_ON_ONCE(!rcu_read_lock_held());
 468
 469	key_size = map->key_size;
 470
 471	hash = htab_map_hash(key, key_size);
 472
 473	head = select_bucket(htab, hash);
 474
 475	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 476
 477	return l;
 478}
 479
 480static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
 481{
 482	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 483
 484	if (l)
 485		return l->key + round_up(map->key_size, 8);
 486
 487	return NULL;
 488}
 489
 490/* inline bpf_map_lookup_elem() call.
 491 * Instead of:
 492 * bpf_prog
 493 *   bpf_map_lookup_elem
 494 *     map->ops->map_lookup_elem
 495 *       htab_map_lookup_elem
 496 *         __htab_map_lookup_elem
 497 * do:
 498 * bpf_prog
 499 *   __htab_map_lookup_elem
 500 */
 501static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 502{
 503	struct bpf_insn *insn = insn_buf;
 504	const int ret = BPF_REG_0;
 505
 506	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
 
 
 507	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
 508	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 509				offsetof(struct htab_elem, key) +
 510				round_up(map->key_size, 8));
 511	return insn - insn_buf;
 512}
 513
 514static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
 
 515{
 516	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 517
 518	if (l) {
 519		bpf_lru_node_set_ref(&l->lru_node);
 
 520		return l->key + round_up(map->key_size, 8);
 521	}
 522
 523	return NULL;
 524}
 525
 526static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
 
 
 
 
 
 
 
 
 
 
 527				   struct bpf_insn *insn_buf)
 528{
 529	struct bpf_insn *insn = insn_buf;
 530	const int ret = BPF_REG_0;
 531	const int ref_reg = BPF_REG_1;
 532
 533	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
 
 
 534	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
 535	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
 536			      offsetof(struct htab_elem, lru_node) +
 537			      offsetof(struct bpf_lru_node, ref));
 538	*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
 539	*insn++ = BPF_ST_MEM(BPF_B, ret,
 540			     offsetof(struct htab_elem, lru_node) +
 541			     offsetof(struct bpf_lru_node, ref),
 542			     1);
 543	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 544				offsetof(struct htab_elem, key) +
 545				round_up(map->key_size, 8));
 546	return insn - insn_buf;
 547}
 548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549/* It is called from the bpf_lru_list when the LRU needs to delete
 550 * older elements from the htab.
 551 */
 552static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 553{
 554	struct bpf_htab *htab = (struct bpf_htab *)arg;
 555	struct htab_elem *l = NULL, *tgt_l;
 556	struct hlist_nulls_head *head;
 557	struct hlist_nulls_node *n;
 558	unsigned long flags;
 559	struct bucket *b;
 
 560
 561	tgt_l = container_of(node, struct htab_elem, lru_node);
 562	b = __select_bucket(htab, tgt_l->hash);
 563	head = &b->head;
 564
 565	raw_spin_lock_irqsave(&b->lock, flags);
 
 
 566
 567	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 568		if (l == tgt_l) {
 569			hlist_nulls_del_rcu(&l->hash_node);
 
 
 570			break;
 571		}
 572
 573	raw_spin_unlock_irqrestore(&b->lock, flags);
 574
 575	return l == tgt_l;
 576}
 577
 578/* Called from syscall */
 579static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 580{
 581	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 582	struct hlist_nulls_head *head;
 583	struct htab_elem *l, *next_l;
 584	u32 hash, key_size;
 585	int i = 0;
 586
 587	WARN_ON_ONCE(!rcu_read_lock_held());
 588
 589	key_size = map->key_size;
 590
 591	if (!key)
 592		goto find_first_elem;
 593
 594	hash = htab_map_hash(key, key_size);
 595
 596	head = select_bucket(htab, hash);
 597
 598	/* lookup the key */
 599	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 600
 601	if (!l)
 602		goto find_first_elem;
 603
 604	/* key was found, get next key in the same bucket */
 605	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
 606				  struct htab_elem, hash_node);
 607
 608	if (next_l) {
 609		/* if next elem in this hash list is non-zero, just return it */
 610		memcpy(next_key, next_l->key, key_size);
 611		return 0;
 612	}
 613
 614	/* no more elements in this hash list, go to the next bucket */
 615	i = hash & (htab->n_buckets - 1);
 616	i++;
 617
 618find_first_elem:
 619	/* iterate over buckets */
 620	for (; i < htab->n_buckets; i++) {
 621		head = select_bucket(htab, i);
 622
 623		/* pick first element in the bucket */
 624		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
 625					  struct htab_elem, hash_node);
 626		if (next_l) {
 627			/* if it's not empty, just return it */
 628			memcpy(next_key, next_l->key, key_size);
 629			return 0;
 630		}
 631	}
 632
 633	/* iterated over all buckets and all elements */
 634	return -ENOENT;
 635}
 636
 637static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
 638{
 
 
 
 639	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
 640		free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
 641	kfree(l);
 
 642}
 643
 644static void htab_elem_free_rcu(struct rcu_head *head)
 645{
 646	struct htab_elem *l = container_of(head, struct htab_elem, rcu);
 647	struct bpf_htab *htab = l->htab;
 648
 649	/* must increment bpf_prog_active to avoid kprobe+bpf triggering while
 650	 * we're calling kfree, otherwise deadlock is possible if kprobes
 651	 * are placed somewhere inside of slub
 652	 */
 653	preempt_disable();
 654	__this_cpu_inc(bpf_prog_active);
 655	htab_elem_free(htab, l);
 656	__this_cpu_dec(bpf_prog_active);
 657	preempt_enable();
 658}
 659
 660static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 661{
 662	struct bpf_map *map = &htab->map;
 
 
 
 
 663
 664	if (map->ops->map_fd_put_ptr) {
 665		void *ptr = fd_htab_map_get_ptr(map, l);
 
 666
 667		map->ops->map_fd_put_ptr(ptr);
 668	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 669
 670	if (htab_is_prealloc(htab)) {
 
 
 671		pcpu_freelist_push(&htab->freelist, &l->fnode);
 672	} else {
 673		atomic_dec(&htab->count);
 674		l->htab = htab;
 675		call_rcu(&l->rcu, htab_elem_free_rcu);
 676	}
 677}
 678
 679static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 680			    void *value, bool onallcpus)
 681{
 682	if (!onallcpus) {
 683		/* copy true value_size bytes */
 684		memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
 685	} else {
 686		u32 size = round_up(htab->map.value_size, 8);
 687		int off = 0, cpu;
 688
 689		for_each_possible_cpu(cpu) {
 690			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
 691					value + off, size);
 692			off += size;
 693		}
 694	}
 695}
 696
 697static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
 
 698{
 699	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
 700	       BITS_PER_LONG == 64;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 701}
 702
 703static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
 704{
 705	u32 size = htab->map.value_size;
 706
 707	if (percpu || fd_htab_map_needs_adjust(htab))
 708		size = round_up(size, 8);
 709	return size;
 710}
 711
 712static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 713					 void *value, u32 key_size, u32 hash,
 714					 bool percpu, bool onallcpus,
 715					 struct htab_elem *old_elem)
 716{
 717	u32 size = htab_size_value(htab, percpu);
 718	bool prealloc = htab_is_prealloc(htab);
 719	struct htab_elem *l_new, **pl_new;
 720	void __percpu *pptr;
 721
 722	if (prealloc) {
 723		if (old_elem) {
 724			/* if we're updating the existing element,
 725			 * use per-cpu extra elems to avoid freelist_pop/push
 726			 */
 727			pl_new = this_cpu_ptr(htab->extra_elems);
 728			l_new = *pl_new;
 729			*pl_new = old_elem;
 730		} else {
 731			struct pcpu_freelist_node *l;
 732
 733			l = pcpu_freelist_pop(&htab->freelist);
 734			if (!l)
 735				return ERR_PTR(-E2BIG);
 736			l_new = container_of(l, struct htab_elem, fnode);
 
 737		}
 738	} else {
 739		if (atomic_inc_return(&htab->count) > htab->map.max_entries)
 740			if (!old_elem) {
 741				/* when map is full and update() is replacing
 742				 * old element, it's ok to allocate, since
 743				 * old element will be freed immediately.
 744				 * Otherwise return an error
 745				 */
 746				atomic_dec(&htab->count);
 747				return ERR_PTR(-E2BIG);
 748			}
 749		l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
 750				     htab->map.numa_node);
 751		if (!l_new)
 752			return ERR_PTR(-ENOMEM);
 
 753	}
 754
 755	memcpy(l_new->key, key, key_size);
 756	if (percpu) {
 757		if (prealloc) {
 758			pptr = htab_elem_get_ptr(l_new, key_size);
 759		} else {
 760			/* alloc_percpu zero-fills */
 761			pptr = __alloc_percpu_gfp(size, 8,
 762						  GFP_ATOMIC | __GFP_NOWARN);
 763			if (!pptr) {
 764				kfree(l_new);
 765				return ERR_PTR(-ENOMEM);
 
 766			}
 
 
 767		}
 768
 769		pcpu_copy_value(htab, pptr, value, onallcpus);
 770
 771		if (!prealloc)
 772			htab_elem_set_ptr(l_new, key_size, pptr);
 773	} else {
 
 774		memcpy(l_new->key + round_up(key_size, 8), value, size);
 
 
 
 
 775	}
 776
 777	l_new->hash = hash;
 778	return l_new;
 
 
 
 779}
 780
 781static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
 782		       u64 map_flags)
 783{
 784	if (l_old && map_flags == BPF_NOEXIST)
 785		/* elem already exists */
 786		return -EEXIST;
 787
 788	if (!l_old && map_flags == BPF_EXIST)
 789		/* elem doesn't exist, cannot update it */
 790		return -ENOENT;
 791
 792	return 0;
 793}
 794
 795/* Called from syscall or from eBPF program */
 796static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
 797				u64 map_flags)
 798{
 799	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 800	struct htab_elem *l_new = NULL, *l_old;
 801	struct hlist_nulls_head *head;
 802	unsigned long flags;
 
 803	struct bucket *b;
 804	u32 key_size, hash;
 805	int ret;
 806
 807	if (unlikely(map_flags > BPF_EXIST))
 808		/* unknown flags */
 809		return -EINVAL;
 810
 811	WARN_ON_ONCE(!rcu_read_lock_held());
 
 812
 813	key_size = map->key_size;
 814
 815	hash = htab_map_hash(key, key_size);
 816
 817	b = __select_bucket(htab, hash);
 818	head = &b->head;
 819
 820	/* bpf_map_update_elem() can be called in_irq() */
 821	raw_spin_lock_irqsave(&b->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822
 823	l_old = lookup_elem_raw(head, hash, key, key_size);
 824
 825	ret = check_flags(htab, l_old, map_flags);
 826	if (ret)
 827		goto err;
 828
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
 830				l_old);
 831	if (IS_ERR(l_new)) {
 832		/* all pre-allocated elements are in use or memory exhausted */
 833		ret = PTR_ERR(l_new);
 834		goto err;
 835	}
 836
 837	/* add new element to the head of the list, so that
 838	 * concurrent search will find it before old elem
 839	 */
 840	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
 841	if (l_old) {
 842		hlist_nulls_del_rcu(&l_old->hash_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843		if (!htab_is_prealloc(htab))
 844			free_htab_elem(htab, l_old);
 845	}
 846	ret = 0;
 847err:
 848	raw_spin_unlock_irqrestore(&b->lock, flags);
 849	return ret;
 850}
 851
 852static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
 853				    u64 map_flags)
 
 
 
 
 
 
 
 854{
 855	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 856	struct htab_elem *l_new, *l_old = NULL;
 857	struct hlist_nulls_head *head;
 858	unsigned long flags;
 859	struct bucket *b;
 860	u32 key_size, hash;
 861	int ret;
 862
 863	if (unlikely(map_flags > BPF_EXIST))
 864		/* unknown flags */
 865		return -EINVAL;
 866
 867	WARN_ON_ONCE(!rcu_read_lock_held());
 
 868
 869	key_size = map->key_size;
 870
 871	hash = htab_map_hash(key, key_size);
 872
 873	b = __select_bucket(htab, hash);
 874	head = &b->head;
 875
 876	/* For LRU, we need to alloc before taking bucket's
 877	 * spinlock because getting free nodes from LRU may need
 878	 * to remove older elements from htab and this removal
 879	 * operation will need a bucket lock.
 880	 */
 881	l_new = prealloc_lru_pop(htab, key, hash);
 882	if (!l_new)
 883		return -ENOMEM;
 884	memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
 
 885
 886	/* bpf_map_update_elem() can be called in_irq() */
 887	raw_spin_lock_irqsave(&b->lock, flags);
 
 888
 889	l_old = lookup_elem_raw(head, hash, key, key_size);
 890
 891	ret = check_flags(htab, l_old, map_flags);
 892	if (ret)
 893		goto err;
 894
 895	/* add new element to the head of the list, so that
 896	 * concurrent search will find it before old elem
 897	 */
 898	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
 899	if (l_old) {
 900		bpf_lru_node_set_ref(&l_new->lru_node);
 901		hlist_nulls_del_rcu(&l_old->hash_node);
 902	}
 903	ret = 0;
 904
 905err:
 906	raw_spin_unlock_irqrestore(&b->lock, flags);
 907
 
 908	if (ret)
 909		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
 910	else if (l_old)
 911		bpf_lru_push_free(&htab->lru, &l_old->lru_node);
 912
 913	return ret;
 914}
 915
 916static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
 917					 void *value, u64 map_flags,
 918					 bool onallcpus)
 919{
 920	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 921	struct htab_elem *l_new = NULL, *l_old;
 922	struct hlist_nulls_head *head;
 923	unsigned long flags;
 924	struct bucket *b;
 925	u32 key_size, hash;
 926	int ret;
 927
 928	if (unlikely(map_flags > BPF_EXIST))
 929		/* unknown flags */
 930		return -EINVAL;
 931
 932	WARN_ON_ONCE(!rcu_read_lock_held());
 
 933
 934	key_size = map->key_size;
 935
 936	hash = htab_map_hash(key, key_size);
 937
 938	b = __select_bucket(htab, hash);
 939	head = &b->head;
 940
 941	/* bpf_map_update_elem() can be called in_irq() */
 942	raw_spin_lock_irqsave(&b->lock, flags);
 
 943
 944	l_old = lookup_elem_raw(head, hash, key, key_size);
 945
 946	ret = check_flags(htab, l_old, map_flags);
 947	if (ret)
 948		goto err;
 949
 950	if (l_old) {
 951		/* per-cpu hash map can update value in-place */
 952		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
 953				value, onallcpus);
 954	} else {
 955		l_new = alloc_htab_elem(htab, key, value, key_size,
 956					hash, true, onallcpus, NULL);
 957		if (IS_ERR(l_new)) {
 958			ret = PTR_ERR(l_new);
 959			goto err;
 960		}
 961		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
 962	}
 963	ret = 0;
 964err:
 965	raw_spin_unlock_irqrestore(&b->lock, flags);
 966	return ret;
 967}
 968
 969static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 970					     void *value, u64 map_flags,
 971					     bool onallcpus)
 972{
 973	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 974	struct htab_elem *l_new = NULL, *l_old;
 975	struct hlist_nulls_head *head;
 976	unsigned long flags;
 977	struct bucket *b;
 978	u32 key_size, hash;
 979	int ret;
 980
 981	if (unlikely(map_flags > BPF_EXIST))
 982		/* unknown flags */
 983		return -EINVAL;
 984
 985	WARN_ON_ONCE(!rcu_read_lock_held());
 
 986
 987	key_size = map->key_size;
 988
 989	hash = htab_map_hash(key, key_size);
 990
 991	b = __select_bucket(htab, hash);
 992	head = &b->head;
 993
 994	/* For LRU, we need to alloc before taking bucket's
 995	 * spinlock because LRU's elem alloc may need
 996	 * to remove older elem from htab and this removal
 997	 * operation will need a bucket lock.
 998	 */
 999	if (map_flags != BPF_EXIST) {
1000		l_new = prealloc_lru_pop(htab, key, hash);
1001		if (!l_new)
1002			return -ENOMEM;
1003	}
1004
1005	/* bpf_map_update_elem() can be called in_irq() */
1006	raw_spin_lock_irqsave(&b->lock, flags);
 
1007
1008	l_old = lookup_elem_raw(head, hash, key, key_size);
1009
1010	ret = check_flags(htab, l_old, map_flags);
1011	if (ret)
1012		goto err;
1013
1014	if (l_old) {
1015		bpf_lru_node_set_ref(&l_old->lru_node);
1016
1017		/* per-cpu hash map can update value in-place */
1018		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1019				value, onallcpus);
1020	} else {
1021		pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
1022				value, onallcpus);
1023		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1024		l_new = NULL;
1025	}
1026	ret = 0;
1027err:
1028	raw_spin_unlock_irqrestore(&b->lock, flags);
1029	if (l_new)
 
 
1030		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
 
1031	return ret;
1032}
1033
1034static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1035				       void *value, u64 map_flags)
1036{
1037	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1038}
1039
1040static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1041					   void *value, u64 map_flags)
1042{
1043	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1044						 false);
1045}
1046
1047/* Called from syscall or from eBPF program */
1048static int htab_map_delete_elem(struct bpf_map *map, void *key)
1049{
1050	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1051	struct hlist_nulls_head *head;
1052	struct bucket *b;
1053	struct htab_elem *l;
1054	unsigned long flags;
1055	u32 hash, key_size;
1056	int ret = -ENOENT;
1057
1058	WARN_ON_ONCE(!rcu_read_lock_held());
 
1059
1060	key_size = map->key_size;
1061
1062	hash = htab_map_hash(key, key_size);
1063	b = __select_bucket(htab, hash);
1064	head = &b->head;
1065
1066	raw_spin_lock_irqsave(&b->lock, flags);
 
 
1067
1068	l = lookup_elem_raw(head, hash, key, key_size);
1069
1070	if (l) {
1071		hlist_nulls_del_rcu(&l->hash_node);
1072		free_htab_elem(htab, l);
1073		ret = 0;
1074	}
 
1075
1076	raw_spin_unlock_irqrestore(&b->lock, flags);
 
1077	return ret;
1078}
1079
1080static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1081{
1082	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1083	struct hlist_nulls_head *head;
1084	struct bucket *b;
1085	struct htab_elem *l;
1086	unsigned long flags;
1087	u32 hash, key_size;
1088	int ret = -ENOENT;
1089
1090	WARN_ON_ONCE(!rcu_read_lock_held());
 
1091
1092	key_size = map->key_size;
1093
1094	hash = htab_map_hash(key, key_size);
1095	b = __select_bucket(htab, hash);
1096	head = &b->head;
1097
1098	raw_spin_lock_irqsave(&b->lock, flags);
 
 
1099
1100	l = lookup_elem_raw(head, hash, key, key_size);
1101
1102	if (l) {
1103		hlist_nulls_del_rcu(&l->hash_node);
1104		ret = 0;
1105	}
1106
1107	raw_spin_unlock_irqrestore(&b->lock, flags);
1108	if (l)
1109		bpf_lru_push_free(&htab->lru, &l->lru_node);
1110	return ret;
1111}
1112
1113static void delete_all_elements(struct bpf_htab *htab)
1114{
1115	int i;
1116
 
 
 
 
1117	for (i = 0; i < htab->n_buckets; i++) {
1118		struct hlist_nulls_head *head = select_bucket(htab, i);
1119		struct hlist_nulls_node *n;
1120		struct htab_elem *l;
1121
1122		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1123			hlist_nulls_del_rcu(&l->hash_node);
1124			htab_elem_free(htab, l);
1125		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1126	}
1127}
1128
1129/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1130static void htab_map_free(struct bpf_map *map)
1131{
1132	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 
1133
1134	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
1135	 * so the programs (can be more than one that used this map) were
1136	 * disconnected from events. Wait for outstanding critical sections in
1137	 * these programs to complete
1138	 */
1139	synchronize_rcu();
1140
1141	/* some of free_htab_elem() callbacks for elements of this map may
1142	 * not have executed. Wait for them.
 
1143	 */
1144	rcu_barrier();
1145	if (!htab_is_prealloc(htab))
1146		delete_all_elements(htab);
1147	else
 
1148		prealloc_destroy(htab);
 
1149
 
1150	free_percpu(htab->extra_elems);
1151	bpf_map_area_free(htab->buckets);
1152	kfree(htab);
 
 
 
 
 
 
 
1153}
1154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155const struct bpf_map_ops htab_map_ops = {
 
1156	.map_alloc_check = htab_map_alloc_check,
1157	.map_alloc = htab_map_alloc,
1158	.map_free = htab_map_free,
1159	.map_get_next_key = htab_map_get_next_key,
 
1160	.map_lookup_elem = htab_map_lookup_elem,
 
1161	.map_update_elem = htab_map_update_elem,
1162	.map_delete_elem = htab_map_delete_elem,
1163	.map_gen_lookup = htab_map_gen_lookup,
 
 
 
 
 
 
 
1164};
1165
1166const struct bpf_map_ops htab_lru_map_ops = {
 
1167	.map_alloc_check = htab_map_alloc_check,
1168	.map_alloc = htab_map_alloc,
1169	.map_free = htab_map_free,
1170	.map_get_next_key = htab_map_get_next_key,
 
1171	.map_lookup_elem = htab_lru_map_lookup_elem,
 
 
1172	.map_update_elem = htab_lru_map_update_elem,
1173	.map_delete_elem = htab_lru_map_delete_elem,
1174	.map_gen_lookup = htab_lru_map_gen_lookup,
 
 
 
 
 
 
 
1175};
1176
1177/* Called from eBPF program */
1178static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1179{
1180	struct htab_elem *l = __htab_map_lookup_elem(map, key);
1181
1182	if (l)
1183		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1184	else
1185		return NULL;
1186}
1187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1188static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1189{
1190	struct htab_elem *l = __htab_map_lookup_elem(map, key);
1191
1192	if (l) {
1193		bpf_lru_node_set_ref(&l->lru_node);
1194		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1195	}
1196
1197	return NULL;
1198}
1199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1200int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1201{
1202	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1203	struct htab_elem *l;
1204	void __percpu *pptr;
1205	int ret = -ENOENT;
1206	int cpu, off = 0;
1207	u32 size;
1208
1209	/* per_cpu areas are zero-filled and bpf programs can only
1210	 * access 'value_size' of them, so copying rounded areas
1211	 * will not leak any kernel data
1212	 */
1213	size = round_up(map->value_size, 8);
1214	rcu_read_lock();
1215	l = __htab_map_lookup_elem(map, key);
1216	if (!l)
1217		goto out;
1218	if (htab_is_lru(htab))
1219		bpf_lru_node_set_ref(&l->lru_node);
 
1220	pptr = htab_elem_get_ptr(l, map->key_size);
1221	for_each_possible_cpu(cpu) {
1222		bpf_long_memcpy(value + off,
1223				per_cpu_ptr(pptr, cpu), size);
1224		off += size;
1225	}
1226	ret = 0;
1227out:
1228	rcu_read_unlock();
1229	return ret;
1230}
1231
1232int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1233			   u64 map_flags)
1234{
1235	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1236	int ret;
1237
1238	rcu_read_lock();
1239	if (htab_is_lru(htab))
1240		ret = __htab_lru_percpu_map_update_elem(map, key, value,
1241							map_flags, true);
1242	else
1243		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
1244						    true);
1245	rcu_read_unlock();
1246
1247	return ret;
1248}
1249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1250const struct bpf_map_ops htab_percpu_map_ops = {
 
1251	.map_alloc_check = htab_map_alloc_check,
1252	.map_alloc = htab_map_alloc,
1253	.map_free = htab_map_free,
1254	.map_get_next_key = htab_map_get_next_key,
1255	.map_lookup_elem = htab_percpu_map_lookup_elem,
 
 
1256	.map_update_elem = htab_percpu_map_update_elem,
1257	.map_delete_elem = htab_map_delete_elem,
 
 
 
 
 
 
 
 
1258};
1259
1260const struct bpf_map_ops htab_lru_percpu_map_ops = {
 
1261	.map_alloc_check = htab_map_alloc_check,
1262	.map_alloc = htab_map_alloc,
1263	.map_free = htab_map_free,
1264	.map_get_next_key = htab_map_get_next_key,
1265	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
 
1266	.map_update_elem = htab_lru_percpu_map_update_elem,
1267	.map_delete_elem = htab_lru_map_delete_elem,
 
 
 
 
 
 
 
 
1268};
1269
1270static int fd_htab_map_alloc_check(union bpf_attr *attr)
1271{
1272	if (attr->value_size != sizeof(u32))
1273		return -EINVAL;
1274	return htab_map_alloc_check(attr);
1275}
1276
1277static void fd_htab_map_free(struct bpf_map *map)
1278{
1279	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1280	struct hlist_nulls_node *n;
1281	struct hlist_nulls_head *head;
1282	struct htab_elem *l;
1283	int i;
1284
1285	for (i = 0; i < htab->n_buckets; i++) {
1286		head = select_bucket(htab, i);
1287
1288		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1289			void *ptr = fd_htab_map_get_ptr(map, l);
1290
1291			map->ops->map_fd_put_ptr(ptr);
1292		}
1293	}
1294
1295	htab_map_free(map);
1296}
1297
1298/* only called from syscall */
1299int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
1300{
1301	void **ptr;
1302	int ret = 0;
1303
1304	if (!map->ops->map_fd_sys_lookup_elem)
1305		return -ENOTSUPP;
1306
1307	rcu_read_lock();
1308	ptr = htab_map_lookup_elem(map, key);
1309	if (ptr)
1310		*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
1311	else
1312		ret = -ENOENT;
1313	rcu_read_unlock();
1314
1315	return ret;
1316}
1317
1318/* only called from syscall */
1319int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1320				void *key, void *value, u64 map_flags)
1321{
1322	void *ptr;
1323	int ret;
1324	u32 ufd = *(u32 *)value;
1325
1326	ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
1327	if (IS_ERR(ptr))
1328		return PTR_ERR(ptr);
1329
 
 
 
 
 
1330	ret = htab_map_update_elem(map, key, &ptr, map_flags);
 
1331	if (ret)
1332		map->ops->map_fd_put_ptr(ptr);
1333
1334	return ret;
1335}
1336
1337static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
1338{
1339	struct bpf_map *map, *inner_map_meta;
1340
1341	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1342	if (IS_ERR(inner_map_meta))
1343		return inner_map_meta;
1344
1345	map = htab_map_alloc(attr);
1346	if (IS_ERR(map)) {
1347		bpf_map_meta_free(inner_map_meta);
1348		return map;
1349	}
1350
1351	map->inner_map_meta = inner_map_meta;
1352
1353	return map;
1354}
1355
1356static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
1357{
1358	struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
1359
1360	if (!inner_map)
1361		return NULL;
1362
1363	return READ_ONCE(*inner_map);
1364}
1365
1366static u32 htab_of_map_gen_lookup(struct bpf_map *map,
1367				  struct bpf_insn *insn_buf)
1368{
1369	struct bpf_insn *insn = insn_buf;
1370	const int ret = BPF_REG_0;
1371
1372	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
 
 
1373	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
1374	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
1375				offsetof(struct htab_elem, key) +
1376				round_up(map->key_size, 8));
1377	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1378
1379	return insn - insn_buf;
1380}
1381
1382static void htab_of_map_free(struct bpf_map *map)
1383{
1384	bpf_map_meta_free(map->inner_map_meta);
1385	fd_htab_map_free(map);
1386}
1387
1388const struct bpf_map_ops htab_of_maps_map_ops = {
1389	.map_alloc_check = fd_htab_map_alloc_check,
1390	.map_alloc = htab_of_map_alloc,
1391	.map_free = htab_of_map_free,
1392	.map_get_next_key = htab_map_get_next_key,
1393	.map_lookup_elem = htab_of_map_lookup_elem,
1394	.map_delete_elem = htab_map_delete_elem,
1395	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1396	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1397	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1398	.map_gen_lookup = htab_of_map_gen_lookup,
 
 
 
 
1399};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
 
 
 
 
 
 
 
 
 
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/jhash.h>
   8#include <linux/filter.h>
   9#include <linux/rculist_nulls.h>
  10#include <linux/rcupdate_wait.h>
  11#include <linux/random.h>
  12#include <uapi/linux/btf.h>
  13#include <linux/rcupdate_trace.h>
  14#include <linux/btf_ids.h>
  15#include "percpu_freelist.h"
  16#include "bpf_lru_list.h"
  17#include "map_in_map.h"
  18#include <linux/bpf_mem_alloc.h>
  19
  20#define HTAB_CREATE_FLAG_MASK						\
  21	(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |	\
  22	 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
  23
  24#define BATCH_OPS(_name)			\
  25	.map_lookup_batch =			\
  26	_name##_map_lookup_batch,		\
  27	.map_lookup_and_delete_batch =		\
  28	_name##_map_lookup_and_delete_batch,	\
  29	.map_update_batch =			\
  30	generic_map_update_batch,		\
  31	.map_delete_batch =			\
  32	generic_map_delete_batch
  33
  34/*
  35 * The bucket lock has two protection scopes:
  36 *
  37 * 1) Serializing concurrent operations from BPF programs on different
  38 *    CPUs
  39 *
  40 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
  41 *
  42 * BPF programs can execute in any context including perf, kprobes and
  43 * tracing. As there are almost no limits where perf, kprobes and tracing
  44 * can be invoked from the lock operations need to be protected against
  45 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
  46 * the lock held section when functions which acquire this lock are invoked
  47 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
  48 * variable bpf_prog_active, which prevents BPF programs attached to perf
  49 * events, kprobes and tracing to be invoked before the prior invocation
  50 * from one of these contexts completed. sys_bpf() uses the same mechanism
  51 * by pinning the task to the current CPU and incrementing the recursion
  52 * protection across the map operation.
  53 *
  54 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
  55 * operations like memory allocations (even with GFP_ATOMIC) from atomic
  56 * contexts. This is required because even with GFP_ATOMIC the memory
  57 * allocator calls into code paths which acquire locks with long held lock
  58 * sections. To ensure the deterministic behaviour these locks are regular
  59 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
  60 * true atomic contexts on an RT kernel are the low level hardware
  61 * handling, scheduling, low level interrupt handling, NMIs etc. None of
  62 * these contexts should ever do memory allocations.
  63 *
  64 * As regular device interrupt handlers and soft interrupts are forced into
  65 * thread context, the existing code which does
  66 *   spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
  67 * just works.
  68 *
  69 * In theory the BPF locks could be converted to regular spinlocks as well,
  70 * but the bucket locks and percpu_freelist locks can be taken from
  71 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
  72 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
  73 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
  74 * because there is no memory allocation within the lock held sections. However
  75 * after hash map was fully converted to use bpf_mem_alloc, there will be
  76 * non-synchronous memory allocation for non-preallocated hash map, so it is
  77 * safe to always use raw spinlock for bucket lock.
  78 */
  79struct bucket {
  80	struct hlist_nulls_head head;
  81	raw_spinlock_t raw_lock;
  82};
  83
  84#define HASHTAB_MAP_LOCK_COUNT 8
  85#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
  86
  87struct bpf_htab {
  88	struct bpf_map map;
  89	struct bpf_mem_alloc ma;
  90	struct bpf_mem_alloc pcpu_ma;
  91	struct bucket *buckets;
  92	void *elems;
  93	union {
  94		struct pcpu_freelist freelist;
  95		struct bpf_lru lru;
  96	};
  97	struct htab_elem *__percpu *extra_elems;
  98	/* number of elements in non-preallocated hashtable are kept
  99	 * in either pcount or count
 100	 */
 101	struct percpu_counter pcount;
 102	atomic_t count;
 103	bool use_percpu_counter;
 104	u32 n_buckets;	/* number of hash buckets */
 105	u32 elem_size;	/* size of each element in bytes */
 106	u32 hashrnd;
 107	struct lock_class_key lockdep_key;
 108	int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
 109};
 110
 111/* each htab element is struct htab_elem + key + value */
 112struct htab_elem {
 113	union {
 114		struct hlist_nulls_node hash_node;
 115		struct {
 116			void *padding;
 117			union {
 
 118				struct pcpu_freelist_node fnode;
 119				struct htab_elem *batch_flink;
 120			};
 121		};
 122	};
 123	union {
 124		/* pointer to per-cpu pointer */
 125		void *ptr_to_pptr;
 126		struct bpf_lru_node lru_node;
 127	};
 128	u32 hash;
 129	char key[] __aligned(8);
 130};
 131
 132static inline bool htab_is_prealloc(const struct bpf_htab *htab)
 133{
 134	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
 135}
 136
 137static void htab_init_buckets(struct bpf_htab *htab)
 138{
 139	unsigned int i;
 140
 141	for (i = 0; i < htab->n_buckets; i++) {
 142		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
 143		raw_spin_lock_init(&htab->buckets[i].raw_lock);
 144		lockdep_set_class(&htab->buckets[i].raw_lock,
 145					  &htab->lockdep_key);
 146		cond_resched();
 147	}
 148}
 149
 150static inline int htab_lock_bucket(const struct bpf_htab *htab,
 151				   struct bucket *b, u32 hash,
 152				   unsigned long *pflags)
 153{
 154	unsigned long flags;
 155
 156	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
 157
 158	preempt_disable();
 159	local_irq_save(flags);
 160	if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
 161		__this_cpu_dec(*(htab->map_locked[hash]));
 162		local_irq_restore(flags);
 163		preempt_enable();
 164		return -EBUSY;
 165	}
 166
 167	raw_spin_lock(&b->raw_lock);
 168	*pflags = flags;
 169
 170	return 0;
 171}
 172
 173static inline void htab_unlock_bucket(const struct bpf_htab *htab,
 174				      struct bucket *b, u32 hash,
 175				      unsigned long flags)
 176{
 177	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
 178	raw_spin_unlock(&b->raw_lock);
 179	__this_cpu_dec(*(htab->map_locked[hash]));
 180	local_irq_restore(flags);
 181	preempt_enable();
 182}
 183
 184static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
 185
 186static bool htab_is_lru(const struct bpf_htab *htab)
 187{
 188	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
 189		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 190}
 191
 192static bool htab_is_percpu(const struct bpf_htab *htab)
 193{
 194	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 195		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 196}
 197
 
 
 
 
 
 198static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
 199				     void __percpu *pptr)
 200{
 201	*(void __percpu **)(l->key + key_size) = pptr;
 202}
 203
 204static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
 205{
 206	return *(void __percpu **)(l->key + key_size);
 207}
 208
 209static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
 210{
 211	return *(void **)(l->key + roundup(map->key_size, 8));
 212}
 213
 214static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
 215{
 216	return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
 217}
 218
 219static bool htab_has_extra_elems(struct bpf_htab *htab)
 220{
 221	return !htab_is_percpu(htab) && !htab_is_lru(htab);
 222}
 223
 224static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
 225{
 226	u32 num_entries = htab->map.max_entries;
 227	int i;
 228
 229	if (htab_has_extra_elems(htab))
 230		num_entries += num_possible_cpus();
 231
 232	for (i = 0; i < num_entries; i++) {
 233		struct htab_elem *elem;
 234
 235		elem = get_htab_elem(htab, i);
 236		if (btf_record_has_field(htab->map.record, BPF_TIMER))
 237			bpf_obj_free_timer(htab->map.record,
 238					   elem->key + round_up(htab->map.key_size, 8));
 239		if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
 240			bpf_obj_free_workqueue(htab->map.record,
 241					       elem->key + round_up(htab->map.key_size, 8));
 242		cond_resched();
 243	}
 244}
 245
 246static void htab_free_prealloced_fields(struct bpf_htab *htab)
 247{
 248	u32 num_entries = htab->map.max_entries;
 249	int i;
 250
 251	if (IS_ERR_OR_NULL(htab->map.record))
 252		return;
 253	if (htab_has_extra_elems(htab))
 254		num_entries += num_possible_cpus();
 255	for (i = 0; i < num_entries; i++) {
 256		struct htab_elem *elem;
 257
 258		elem = get_htab_elem(htab, i);
 259		if (htab_is_percpu(htab)) {
 260			void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
 261			int cpu;
 262
 263			for_each_possible_cpu(cpu) {
 264				bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
 265				cond_resched();
 266			}
 267		} else {
 268			bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
 269			cond_resched();
 270		}
 271		cond_resched();
 272	}
 273}
 274
 275static void htab_free_elems(struct bpf_htab *htab)
 276{
 277	int i;
 278
 279	if (!htab_is_percpu(htab))
 280		goto free_elems;
 281
 282	for (i = 0; i < htab->map.max_entries; i++) {
 283		void __percpu *pptr;
 284
 285		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
 286					 htab->map.key_size);
 287		free_percpu(pptr);
 288		cond_resched();
 289	}
 290free_elems:
 291	bpf_map_area_free(htab->elems);
 292}
 293
 294/* The LRU list has a lock (lru_lock). Each htab bucket has a lock
 295 * (bucket_lock). If both locks need to be acquired together, the lock
 296 * order is always lru_lock -> bucket_lock and this only happens in
 297 * bpf_lru_list.c logic. For example, certain code path of
 298 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
 299 * will acquire lru_lock first followed by acquiring bucket_lock.
 300 *
 301 * In hashtab.c, to avoid deadlock, lock acquisition of
 302 * bucket_lock followed by lru_lock is not allowed. In such cases,
 303 * bucket_lock needs to be released first before acquiring lru_lock.
 304 */
 305static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
 306					  u32 hash)
 307{
 308	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
 309	struct htab_elem *l;
 310
 311	if (node) {
 312		bpf_map_inc_elem_count(&htab->map);
 313		l = container_of(node, struct htab_elem, lru_node);
 314		memcpy(l->key, key, htab->map.key_size);
 315		return l;
 316	}
 317
 318	return NULL;
 319}
 320
 321static int prealloc_init(struct bpf_htab *htab)
 322{
 323	u32 num_entries = htab->map.max_entries;
 324	int err = -ENOMEM, i;
 325
 326	if (htab_has_extra_elems(htab))
 327		num_entries += num_possible_cpus();
 328
 329	htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
 330					 htab->map.numa_node);
 331	if (!htab->elems)
 332		return -ENOMEM;
 333
 334	if (!htab_is_percpu(htab))
 335		goto skip_percpu_elems;
 336
 337	for (i = 0; i < num_entries; i++) {
 338		u32 size = round_up(htab->map.value_size, 8);
 339		void __percpu *pptr;
 340
 341		pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
 342					    GFP_USER | __GFP_NOWARN);
 343		if (!pptr)
 344			goto free_elems;
 345		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
 346				  pptr);
 347		cond_resched();
 348	}
 349
 350skip_percpu_elems:
 351	if (htab_is_lru(htab))
 352		err = bpf_lru_init(&htab->lru,
 353				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
 354				   offsetof(struct htab_elem, hash) -
 355				   offsetof(struct htab_elem, lru_node),
 356				   htab_lru_map_delete_node,
 357				   htab);
 358	else
 359		err = pcpu_freelist_init(&htab->freelist);
 360
 361	if (err)
 362		goto free_elems;
 363
 364	if (htab_is_lru(htab))
 365		bpf_lru_populate(&htab->lru, htab->elems,
 366				 offsetof(struct htab_elem, lru_node),
 367				 htab->elem_size, num_entries);
 368	else
 369		pcpu_freelist_populate(&htab->freelist,
 370				       htab->elems + offsetof(struct htab_elem, fnode),
 371				       htab->elem_size, num_entries);
 372
 373	return 0;
 374
 375free_elems:
 376	htab_free_elems(htab);
 377	return err;
 378}
 379
 380static void prealloc_destroy(struct bpf_htab *htab)
 381{
 382	htab_free_elems(htab);
 383
 384	if (htab_is_lru(htab))
 385		bpf_lru_destroy(&htab->lru);
 386	else
 387		pcpu_freelist_destroy(&htab->freelist);
 388}
 389
 390static int alloc_extra_elems(struct bpf_htab *htab)
 391{
 392	struct htab_elem *__percpu *pptr, *l_new;
 393	struct pcpu_freelist_node *l;
 394	int cpu;
 395
 396	pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
 397				    GFP_USER | __GFP_NOWARN);
 398	if (!pptr)
 399		return -ENOMEM;
 400
 401	for_each_possible_cpu(cpu) {
 402		l = pcpu_freelist_pop(&htab->freelist);
 403		/* pop will succeed, since prealloc_init()
 404		 * preallocated extra num_possible_cpus elements
 405		 */
 406		l_new = container_of(l, struct htab_elem, fnode);
 407		*per_cpu_ptr(pptr, cpu) = l_new;
 408	}
 409	htab->extra_elems = pptr;
 410	return 0;
 411}
 412
 413/* Called from syscall */
 414static int htab_map_alloc_check(union bpf_attr *attr)
 415{
 416	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 417		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 418	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 419		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 420	/* percpu_lru means each cpu has its own LRU list.
 421	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 422	 * the map's value itself is percpu.  percpu_lru has
 423	 * nothing to do with the map's value.
 424	 */
 425	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 426	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 427	bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
 428	int numa_node = bpf_map_attr_numa_node(attr);
 429
 
 
 430	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
 431		     offsetof(struct htab_elem, hash_node.pprev));
 432
 433	if (zero_seed && !capable(CAP_SYS_ADMIN))
 434		/* Guard against local DoS, and discourage production use. */
 
 
 435		return -EPERM;
 436
 437	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
 438	    !bpf_map_flags_access_ok(attr->map_flags))
 439		return -EINVAL;
 440
 441	if (!lru && percpu_lru)
 442		return -EINVAL;
 443
 444	if (lru && !prealloc)
 445		return -ENOTSUPP;
 446
 447	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
 448		return -EINVAL;
 449
 450	/* check sanity of attributes.
 451	 * value_size == 0 may be allowed in the future to use map as a set
 452	 */
 453	if (attr->max_entries == 0 || attr->key_size == 0 ||
 454	    attr->value_size == 0)
 455		return -EINVAL;
 456
 457	if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
 458	   sizeof(struct htab_elem))
 459		/* if key_size + value_size is bigger, the user space won't be
 460		 * able to access the elements via bpf syscall. This check
 461		 * also makes sure that the elem_size doesn't overflow and it's
 
 
 
 
 
 
 462		 * kmalloc-able later in htab_map_update_elem()
 463		 */
 464		return -E2BIG;
 465	/* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
 466	if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
 467		return -E2BIG;
 468
 469	return 0;
 470}
 471
 472static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 473{
 474	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 475		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 476	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 477		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 478	/* percpu_lru means each cpu has its own LRU list.
 479	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 480	 * the map's value itself is percpu.  percpu_lru has
 481	 * nothing to do with the map's value.
 482	 */
 483	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 484	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 485	struct bpf_htab *htab;
 486	int err, i;
 
 487
 488	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
 489	if (!htab)
 490		return ERR_PTR(-ENOMEM);
 491
 492	lockdep_register_key(&htab->lockdep_key);
 493
 494	bpf_map_init_from_attr(&htab->map, attr);
 495
 496	if (percpu_lru) {
 497		/* ensure each CPU's lru list has >=1 elements.
 498		 * since we are at it, make each lru list has the same
 499		 * number of elements.
 500		 */
 501		htab->map.max_entries = roundup(attr->max_entries,
 502						num_possible_cpus());
 503		if (htab->map.max_entries < attr->max_entries)
 504			htab->map.max_entries = rounddown(attr->max_entries,
 505							  num_possible_cpus());
 506	}
 507
 508	/* hash table size must be power of 2; roundup_pow_of_two() can overflow
 509	 * into UB on 32-bit arches, so check that first
 510	 */
 511	err = -E2BIG;
 512	if (htab->map.max_entries > 1UL << 31)
 513		goto free_htab;
 514
 515	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
 516
 517	htab->elem_size = sizeof(struct htab_elem) +
 518			  round_up(htab->map.key_size, 8);
 519	if (percpu)
 520		htab->elem_size += sizeof(void *);
 521	else
 522		htab->elem_size += round_up(htab->map.value_size, 8);
 523
 524	/* check for u32 overflow */
 525	if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
 
 
 526		goto free_htab;
 527
 528	err = bpf_map_init_elem_count(&htab->map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529	if (err)
 530		goto free_htab;
 531
 532	err = -ENOMEM;
 533	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
 534					   sizeof(struct bucket),
 535					   htab->map.numa_node);
 536	if (!htab->buckets)
 537		goto free_elem_count;
 538
 539	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
 540		htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
 541							   sizeof(int),
 542							   sizeof(int),
 543							   GFP_USER);
 544		if (!htab->map_locked[i])
 545			goto free_map_locked;
 546	}
 547
 548	if (htab->map.map_flags & BPF_F_ZERO_SEED)
 549		htab->hashrnd = 0;
 550	else
 551		htab->hashrnd = get_random_u32();
 552
 553	htab_init_buckets(htab);
 554
 555/* compute_batch_value() computes batch value as num_online_cpus() * 2
 556 * and __percpu_counter_compare() needs
 557 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
 558 * for percpu_counter to be faster than atomic_t. In practice the average bpf
 559 * hash map size is 10k, which means that a system with 64 cpus will fill
 560 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
 561 * define our own batch count as 32 then 10k hash map can be filled up to 80%:
 562 * 10k - 8k > 32 _batch_ * 64 _cpus_
 563 * and __percpu_counter_compare() will still be fast. At that point hash map
 564 * collisions will dominate its performance anyway. Assume that hash map filled
 565 * to 50+% isn't going to be O(1) and use the following formula to choose
 566 * between percpu_counter and atomic_t.
 567 */
 568#define PERCPU_COUNTER_BATCH 32
 569	if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
 570		htab->use_percpu_counter = true;
 571
 572	if (htab->use_percpu_counter) {
 573		err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
 574		if (err)
 575			goto free_map_locked;
 576	}
 577
 578	if (prealloc) {
 579		err = prealloc_init(htab);
 580		if (err)
 581			goto free_map_locked;
 582
 583		if (!percpu && !lru) {
 584			/* lru itself can remove the least used element, so
 585			 * there is no need for an extra elem during map_update.
 586			 */
 587			err = alloc_extra_elems(htab);
 588			if (err)
 589				goto free_prealloc;
 590		}
 591	} else {
 592		err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
 593		if (err)
 594			goto free_map_locked;
 595		if (percpu) {
 596			err = bpf_mem_alloc_init(&htab->pcpu_ma,
 597						 round_up(htab->map.value_size, 8), true);
 598			if (err)
 599				goto free_map_locked;
 600		}
 601	}
 602
 603	return &htab->map;
 604
 605free_prealloc:
 606	prealloc_destroy(htab);
 607free_map_locked:
 608	if (htab->use_percpu_counter)
 609		percpu_counter_destroy(&htab->pcount);
 610	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
 611		free_percpu(htab->map_locked[i]);
 612	bpf_map_area_free(htab->buckets);
 613	bpf_mem_alloc_destroy(&htab->pcpu_ma);
 614	bpf_mem_alloc_destroy(&htab->ma);
 615free_elem_count:
 616	bpf_map_free_elem_count(&htab->map);
 617free_htab:
 618	lockdep_unregister_key(&htab->lockdep_key);
 619	bpf_map_area_free(htab);
 620	return ERR_PTR(err);
 621}
 622
 623static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
 624{
 625	if (likely(key_len % 4 == 0))
 626		return jhash2(key, key_len / 4, hashrnd);
 627	return jhash(key, key_len, hashrnd);
 628}
 629
 630static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
 631{
 632	return &htab->buckets[hash & (htab->n_buckets - 1)];
 633}
 634
 635static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
 636{
 637	return &__select_bucket(htab, hash)->head;
 638}
 639
 640/* this lookup function can only be called with bucket lock taken */
 641static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
 642					 void *key, u32 key_size)
 643{
 644	struct hlist_nulls_node *n;
 645	struct htab_elem *l;
 646
 647	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 648		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 649			return l;
 650
 651	return NULL;
 652}
 653
 654/* can be called without bucket lock. it will repeat the loop in
 655 * the unlikely event when elements moved from one bucket into another
 656 * while link list is being walked
 657 */
 658static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
 659					       u32 hash, void *key,
 660					       u32 key_size, u32 n_buckets)
 661{
 662	struct hlist_nulls_node *n;
 663	struct htab_elem *l;
 664
 665again:
 666	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 667		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 668			return l;
 669
 670	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
 671		goto again;
 672
 673	return NULL;
 674}
 675
 676/* Called from syscall or from eBPF program directly, so
 677 * arguments have to match bpf_map_lookup_elem() exactly.
 678 * The return value is adjusted by BPF instructions
 679 * in htab_map_gen_lookup().
 680 */
 681static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 682{
 683	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 684	struct hlist_nulls_head *head;
 685	struct htab_elem *l;
 686	u32 hash, key_size;
 687
 688	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
 689		     !rcu_read_lock_bh_held());
 690
 691	key_size = map->key_size;
 692
 693	hash = htab_map_hash(key, key_size, htab->hashrnd);
 694
 695	head = select_bucket(htab, hash);
 696
 697	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 698
 699	return l;
 700}
 701
 702static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
 703{
 704	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 705
 706	if (l)
 707		return l->key + round_up(map->key_size, 8);
 708
 709	return NULL;
 710}
 711
 712/* inline bpf_map_lookup_elem() call.
 713 * Instead of:
 714 * bpf_prog
 715 *   bpf_map_lookup_elem
 716 *     map->ops->map_lookup_elem
 717 *       htab_map_lookup_elem
 718 *         __htab_map_lookup_elem
 719 * do:
 720 * bpf_prog
 721 *   __htab_map_lookup_elem
 722 */
 723static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 724{
 725	struct bpf_insn *insn = insn_buf;
 726	const int ret = BPF_REG_0;
 727
 728	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 729		     (void *(*)(struct bpf_map *map, void *key))NULL));
 730	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 731	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
 732	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 733				offsetof(struct htab_elem, key) +
 734				round_up(map->key_size, 8));
 735	return insn - insn_buf;
 736}
 737
 738static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
 739							void *key, const bool mark)
 740{
 741	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 742
 743	if (l) {
 744		if (mark)
 745			bpf_lru_node_set_ref(&l->lru_node);
 746		return l->key + round_up(map->key_size, 8);
 747	}
 748
 749	return NULL;
 750}
 751
 752static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
 753{
 754	return __htab_lru_map_lookup_elem(map, key, true);
 755}
 756
 757static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
 758{
 759	return __htab_lru_map_lookup_elem(map, key, false);
 760}
 761
 762static int htab_lru_map_gen_lookup(struct bpf_map *map,
 763				   struct bpf_insn *insn_buf)
 764{
 765	struct bpf_insn *insn = insn_buf;
 766	const int ret = BPF_REG_0;
 767	const int ref_reg = BPF_REG_1;
 768
 769	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 770		     (void *(*)(struct bpf_map *map, void *key))NULL));
 771	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 772	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
 773	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
 774			      offsetof(struct htab_elem, lru_node) +
 775			      offsetof(struct bpf_lru_node, ref));
 776	*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
 777	*insn++ = BPF_ST_MEM(BPF_B, ret,
 778			     offsetof(struct htab_elem, lru_node) +
 779			     offsetof(struct bpf_lru_node, ref),
 780			     1);
 781	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 782				offsetof(struct htab_elem, key) +
 783				round_up(map->key_size, 8));
 784	return insn - insn_buf;
 785}
 786
 787static void check_and_free_fields(struct bpf_htab *htab,
 788				  struct htab_elem *elem)
 789{
 790	if (htab_is_percpu(htab)) {
 791		void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
 792		int cpu;
 793
 794		for_each_possible_cpu(cpu)
 795			bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
 796	} else {
 797		void *map_value = elem->key + round_up(htab->map.key_size, 8);
 798
 799		bpf_obj_free_fields(htab->map.record, map_value);
 800	}
 801}
 802
 803/* It is called from the bpf_lru_list when the LRU needs to delete
 804 * older elements from the htab.
 805 */
 806static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 807{
 808	struct bpf_htab *htab = arg;
 809	struct htab_elem *l = NULL, *tgt_l;
 810	struct hlist_nulls_head *head;
 811	struct hlist_nulls_node *n;
 812	unsigned long flags;
 813	struct bucket *b;
 814	int ret;
 815
 816	tgt_l = container_of(node, struct htab_elem, lru_node);
 817	b = __select_bucket(htab, tgt_l->hash);
 818	head = &b->head;
 819
 820	ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
 821	if (ret)
 822		return false;
 823
 824	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 825		if (l == tgt_l) {
 826			hlist_nulls_del_rcu(&l->hash_node);
 827			check_and_free_fields(htab, l);
 828			bpf_map_dec_elem_count(&htab->map);
 829			break;
 830		}
 831
 832	htab_unlock_bucket(htab, b, tgt_l->hash, flags);
 833
 834	return l == tgt_l;
 835}
 836
 837/* Called from syscall */
 838static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 839{
 840	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 841	struct hlist_nulls_head *head;
 842	struct htab_elem *l, *next_l;
 843	u32 hash, key_size;
 844	int i = 0;
 845
 846	WARN_ON_ONCE(!rcu_read_lock_held());
 847
 848	key_size = map->key_size;
 849
 850	if (!key)
 851		goto find_first_elem;
 852
 853	hash = htab_map_hash(key, key_size, htab->hashrnd);
 854
 855	head = select_bucket(htab, hash);
 856
 857	/* lookup the key */
 858	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 859
 860	if (!l)
 861		goto find_first_elem;
 862
 863	/* key was found, get next key in the same bucket */
 864	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
 865				  struct htab_elem, hash_node);
 866
 867	if (next_l) {
 868		/* if next elem in this hash list is non-zero, just return it */
 869		memcpy(next_key, next_l->key, key_size);
 870		return 0;
 871	}
 872
 873	/* no more elements in this hash list, go to the next bucket */
 874	i = hash & (htab->n_buckets - 1);
 875	i++;
 876
 877find_first_elem:
 878	/* iterate over buckets */
 879	for (; i < htab->n_buckets; i++) {
 880		head = select_bucket(htab, i);
 881
 882		/* pick first element in the bucket */
 883		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
 884					  struct htab_elem, hash_node);
 885		if (next_l) {
 886			/* if it's not empty, just return it */
 887			memcpy(next_key, next_l->key, key_size);
 888			return 0;
 889		}
 890	}
 891
 892	/* iterated over all buckets and all elements */
 893	return -ENOENT;
 894}
 895
 896static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
 897{
 898	check_and_free_fields(htab, l);
 899
 900	migrate_disable();
 901	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
 902		bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
 903	bpf_mem_cache_free(&htab->ma, l);
 904	migrate_enable();
 905}
 906
 907static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
 908{
 909	struct bpf_map *map = &htab->map;
 910	void *ptr;
 911
 912	if (map->ops->map_fd_put_ptr) {
 913		ptr = fd_htab_map_get_ptr(map, l);
 914		map->ops->map_fd_put_ptr(map, ptr, true);
 915	}
 
 
 
 
 
 916}
 917
 918static bool is_map_full(struct bpf_htab *htab)
 919{
 920	if (htab->use_percpu_counter)
 921		return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
 922						PERCPU_COUNTER_BATCH) >= 0;
 923	return atomic_read(&htab->count) >= htab->map.max_entries;
 924}
 925
 926static void inc_elem_count(struct bpf_htab *htab)
 927{
 928	bpf_map_inc_elem_count(&htab->map);
 929
 930	if (htab->use_percpu_counter)
 931		percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
 932	else
 933		atomic_inc(&htab->count);
 934}
 935
 936static void dec_elem_count(struct bpf_htab *htab)
 937{
 938	bpf_map_dec_elem_count(&htab->map);
 939
 940	if (htab->use_percpu_counter)
 941		percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
 942	else
 943		atomic_dec(&htab->count);
 944}
 945
 946
 947static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 948{
 949	htab_put_fd_value(htab, l);
 950
 951	if (htab_is_prealloc(htab)) {
 952		bpf_map_dec_elem_count(&htab->map);
 953		check_and_free_fields(htab, l);
 954		pcpu_freelist_push(&htab->freelist, &l->fnode);
 955	} else {
 956		dec_elem_count(htab);
 957		htab_elem_free(htab, l);
 
 958	}
 959}
 960
 961static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 962			    void *value, bool onallcpus)
 963{
 964	if (!onallcpus) {
 965		/* copy true value_size bytes */
 966		copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
 967	} else {
 968		u32 size = round_up(htab->map.value_size, 8);
 969		int off = 0, cpu;
 970
 971		for_each_possible_cpu(cpu) {
 972			copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
 
 973			off += size;
 974		}
 975	}
 976}
 977
 978static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
 979			    void *value, bool onallcpus)
 980{
 981	/* When not setting the initial value on all cpus, zero-fill element
 982	 * values for other cpus. Otherwise, bpf program has no way to ensure
 983	 * known initial values for cpus other than current one
 984	 * (onallcpus=false always when coming from bpf prog).
 985	 */
 986	if (!onallcpus) {
 987		int current_cpu = raw_smp_processor_id();
 988		int cpu;
 989
 990		for_each_possible_cpu(cpu) {
 991			if (cpu == current_cpu)
 992				copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
 993			else /* Since elem is preallocated, we cannot touch special fields */
 994				zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
 995		}
 996	} else {
 997		pcpu_copy_value(htab, pptr, value, onallcpus);
 998	}
 999}
1000
1001static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
1002{
1003	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
1004	       BITS_PER_LONG == 64;
 
 
 
1005}
1006
1007static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1008					 void *value, u32 key_size, u32 hash,
1009					 bool percpu, bool onallcpus,
1010					 struct htab_elem *old_elem)
1011{
1012	u32 size = htab->map.value_size;
1013	bool prealloc = htab_is_prealloc(htab);
1014	struct htab_elem *l_new, **pl_new;
1015	void __percpu *pptr;
1016
1017	if (prealloc) {
1018		if (old_elem) {
1019			/* if we're updating the existing element,
1020			 * use per-cpu extra elems to avoid freelist_pop/push
1021			 */
1022			pl_new = this_cpu_ptr(htab->extra_elems);
1023			l_new = *pl_new;
1024			*pl_new = old_elem;
1025		} else {
1026			struct pcpu_freelist_node *l;
1027
1028			l = __pcpu_freelist_pop(&htab->freelist);
1029			if (!l)
1030				return ERR_PTR(-E2BIG);
1031			l_new = container_of(l, struct htab_elem, fnode);
1032			bpf_map_inc_elem_count(&htab->map);
1033		}
1034	} else {
1035		if (is_map_full(htab))
1036			if (!old_elem)
1037				/* when map is full and update() is replacing
1038				 * old element, it's ok to allocate, since
1039				 * old element will be freed immediately.
1040				 * Otherwise return an error
1041				 */
 
1042				return ERR_PTR(-E2BIG);
1043		inc_elem_count(htab);
1044		l_new = bpf_mem_cache_alloc(&htab->ma);
1045		if (!l_new) {
1046			l_new = ERR_PTR(-ENOMEM);
1047			goto dec_count;
1048		}
1049	}
1050
1051	memcpy(l_new->key, key, key_size);
1052	if (percpu) {
1053		if (prealloc) {
1054			pptr = htab_elem_get_ptr(l_new, key_size);
1055		} else {
1056			/* alloc_percpu zero-fills */
1057			void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1058
1059			if (!ptr) {
1060				bpf_mem_cache_free(&htab->ma, l_new);
1061				l_new = ERR_PTR(-ENOMEM);
1062				goto dec_count;
1063			}
1064			l_new->ptr_to_pptr = ptr;
1065			pptr = *(void __percpu **)ptr;
1066		}
1067
1068		pcpu_init_value(htab, pptr, value, onallcpus);
1069
1070		if (!prealloc)
1071			htab_elem_set_ptr(l_new, key_size, pptr);
1072	} else if (fd_htab_map_needs_adjust(htab)) {
1073		size = round_up(size, 8);
1074		memcpy(l_new->key + round_up(key_size, 8), value, size);
1075	} else {
1076		copy_map_value(&htab->map,
1077			       l_new->key + round_up(key_size, 8),
1078			       value);
1079	}
1080
1081	l_new->hash = hash;
1082	return l_new;
1083dec_count:
1084	dec_elem_count(htab);
1085	return l_new;
1086}
1087
1088static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1089		       u64 map_flags)
1090{
1091	if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1092		/* elem already exists */
1093		return -EEXIST;
1094
1095	if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1096		/* elem doesn't exist, cannot update it */
1097		return -ENOENT;
1098
1099	return 0;
1100}
1101
1102/* Called from syscall or from eBPF program */
1103static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1104				 u64 map_flags)
1105{
1106	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1107	struct htab_elem *l_new = NULL, *l_old;
1108	struct hlist_nulls_head *head;
1109	unsigned long flags;
1110	void *old_map_ptr;
1111	struct bucket *b;
1112	u32 key_size, hash;
1113	int ret;
1114
1115	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1116		/* unknown flags */
1117		return -EINVAL;
1118
1119	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1120		     !rcu_read_lock_bh_held());
1121
1122	key_size = map->key_size;
1123
1124	hash = htab_map_hash(key, key_size, htab->hashrnd);
1125
1126	b = __select_bucket(htab, hash);
1127	head = &b->head;
1128
1129	if (unlikely(map_flags & BPF_F_LOCK)) {
1130		if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1131			return -EINVAL;
1132		/* find an element without taking the bucket lock */
1133		l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1134					      htab->n_buckets);
1135		ret = check_flags(htab, l_old, map_flags);
1136		if (ret)
1137			return ret;
1138		if (l_old) {
1139			/* grab the element lock and update value in place */
1140			copy_map_value_locked(map,
1141					      l_old->key + round_up(key_size, 8),
1142					      value, false);
1143			return 0;
1144		}
1145		/* fall through, grab the bucket lock and lookup again.
1146		 * 99.9% chance that the element won't be found,
1147		 * but second lookup under lock has to be done.
1148		 */
1149	}
1150
1151	ret = htab_lock_bucket(htab, b, hash, &flags);
1152	if (ret)
1153		return ret;
1154
1155	l_old = lookup_elem_raw(head, hash, key, key_size);
1156
1157	ret = check_flags(htab, l_old, map_flags);
1158	if (ret)
1159		goto err;
1160
1161	if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1162		/* first lookup without the bucket lock didn't find the element,
1163		 * but second lookup with the bucket lock found it.
1164		 * This case is highly unlikely, but has to be dealt with:
1165		 * grab the element lock in addition to the bucket lock
1166		 * and update element in place
1167		 */
1168		copy_map_value_locked(map,
1169				      l_old->key + round_up(key_size, 8),
1170				      value, false);
1171		ret = 0;
1172		goto err;
1173	}
1174
1175	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1176				l_old);
1177	if (IS_ERR(l_new)) {
1178		/* all pre-allocated elements are in use or memory exhausted */
1179		ret = PTR_ERR(l_new);
1180		goto err;
1181	}
1182
1183	/* add new element to the head of the list, so that
1184	 * concurrent search will find it before old elem
1185	 */
1186	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1187	if (l_old) {
1188		hlist_nulls_del_rcu(&l_old->hash_node);
1189
1190		/* l_old has already been stashed in htab->extra_elems, free
1191		 * its special fields before it is available for reuse. Also
1192		 * save the old map pointer in htab of maps before unlock
1193		 * and release it after unlock.
1194		 */
1195		old_map_ptr = NULL;
1196		if (htab_is_prealloc(htab)) {
1197			if (map->ops->map_fd_put_ptr)
1198				old_map_ptr = fd_htab_map_get_ptr(map, l_old);
1199			check_and_free_fields(htab, l_old);
1200		}
1201	}
1202	htab_unlock_bucket(htab, b, hash, flags);
1203	if (l_old) {
1204		if (old_map_ptr)
1205			map->ops->map_fd_put_ptr(map, old_map_ptr, true);
1206		if (!htab_is_prealloc(htab))
1207			free_htab_elem(htab, l_old);
1208	}
1209	return 0;
1210err:
1211	htab_unlock_bucket(htab, b, hash, flags);
1212	return ret;
1213}
1214
1215static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1216{
1217	check_and_free_fields(htab, elem);
1218	bpf_map_dec_elem_count(&htab->map);
1219	bpf_lru_push_free(&htab->lru, &elem->lru_node);
1220}
1221
1222static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1223				     u64 map_flags)
1224{
1225	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1226	struct htab_elem *l_new, *l_old = NULL;
1227	struct hlist_nulls_head *head;
1228	unsigned long flags;
1229	struct bucket *b;
1230	u32 key_size, hash;
1231	int ret;
1232
1233	if (unlikely(map_flags > BPF_EXIST))
1234		/* unknown flags */
1235		return -EINVAL;
1236
1237	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1238		     !rcu_read_lock_bh_held());
1239
1240	key_size = map->key_size;
1241
1242	hash = htab_map_hash(key, key_size, htab->hashrnd);
1243
1244	b = __select_bucket(htab, hash);
1245	head = &b->head;
1246
1247	/* For LRU, we need to alloc before taking bucket's
1248	 * spinlock because getting free nodes from LRU may need
1249	 * to remove older elements from htab and this removal
1250	 * operation will need a bucket lock.
1251	 */
1252	l_new = prealloc_lru_pop(htab, key, hash);
1253	if (!l_new)
1254		return -ENOMEM;
1255	copy_map_value(&htab->map,
1256		       l_new->key + round_up(map->key_size, 8), value);
1257
1258	ret = htab_lock_bucket(htab, b, hash, &flags);
1259	if (ret)
1260		goto err_lock_bucket;
1261
1262	l_old = lookup_elem_raw(head, hash, key, key_size);
1263
1264	ret = check_flags(htab, l_old, map_flags);
1265	if (ret)
1266		goto err;
1267
1268	/* add new element to the head of the list, so that
1269	 * concurrent search will find it before old elem
1270	 */
1271	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1272	if (l_old) {
1273		bpf_lru_node_set_ref(&l_new->lru_node);
1274		hlist_nulls_del_rcu(&l_old->hash_node);
1275	}
1276	ret = 0;
1277
1278err:
1279	htab_unlock_bucket(htab, b, hash, flags);
1280
1281err_lock_bucket:
1282	if (ret)
1283		htab_lru_push_free(htab, l_new);
1284	else if (l_old)
1285		htab_lru_push_free(htab, l_old);
1286
1287	return ret;
1288}
1289
1290static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1291					  void *value, u64 map_flags,
1292					  bool onallcpus)
1293{
1294	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1295	struct htab_elem *l_new = NULL, *l_old;
1296	struct hlist_nulls_head *head;
1297	unsigned long flags;
1298	struct bucket *b;
1299	u32 key_size, hash;
1300	int ret;
1301
1302	if (unlikely(map_flags > BPF_EXIST))
1303		/* unknown flags */
1304		return -EINVAL;
1305
1306	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1307		     !rcu_read_lock_bh_held());
1308
1309	key_size = map->key_size;
1310
1311	hash = htab_map_hash(key, key_size, htab->hashrnd);
1312
1313	b = __select_bucket(htab, hash);
1314	head = &b->head;
1315
1316	ret = htab_lock_bucket(htab, b, hash, &flags);
1317	if (ret)
1318		return ret;
1319
1320	l_old = lookup_elem_raw(head, hash, key, key_size);
1321
1322	ret = check_flags(htab, l_old, map_flags);
1323	if (ret)
1324		goto err;
1325
1326	if (l_old) {
1327		/* per-cpu hash map can update value in-place */
1328		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1329				value, onallcpus);
1330	} else {
1331		l_new = alloc_htab_elem(htab, key, value, key_size,
1332					hash, true, onallcpus, NULL);
1333		if (IS_ERR(l_new)) {
1334			ret = PTR_ERR(l_new);
1335			goto err;
1336		}
1337		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1338	}
1339	ret = 0;
1340err:
1341	htab_unlock_bucket(htab, b, hash, flags);
1342	return ret;
1343}
1344
1345static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1346					      void *value, u64 map_flags,
1347					      bool onallcpus)
1348{
1349	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1350	struct htab_elem *l_new = NULL, *l_old;
1351	struct hlist_nulls_head *head;
1352	unsigned long flags;
1353	struct bucket *b;
1354	u32 key_size, hash;
1355	int ret;
1356
1357	if (unlikely(map_flags > BPF_EXIST))
1358		/* unknown flags */
1359		return -EINVAL;
1360
1361	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1362		     !rcu_read_lock_bh_held());
1363
1364	key_size = map->key_size;
1365
1366	hash = htab_map_hash(key, key_size, htab->hashrnd);
1367
1368	b = __select_bucket(htab, hash);
1369	head = &b->head;
1370
1371	/* For LRU, we need to alloc before taking bucket's
1372	 * spinlock because LRU's elem alloc may need
1373	 * to remove older elem from htab and this removal
1374	 * operation will need a bucket lock.
1375	 */
1376	if (map_flags != BPF_EXIST) {
1377		l_new = prealloc_lru_pop(htab, key, hash);
1378		if (!l_new)
1379			return -ENOMEM;
1380	}
1381
1382	ret = htab_lock_bucket(htab, b, hash, &flags);
1383	if (ret)
1384		goto err_lock_bucket;
1385
1386	l_old = lookup_elem_raw(head, hash, key, key_size);
1387
1388	ret = check_flags(htab, l_old, map_flags);
1389	if (ret)
1390		goto err;
1391
1392	if (l_old) {
1393		bpf_lru_node_set_ref(&l_old->lru_node);
1394
1395		/* per-cpu hash map can update value in-place */
1396		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1397				value, onallcpus);
1398	} else {
1399		pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1400				value, onallcpus);
1401		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1402		l_new = NULL;
1403	}
1404	ret = 0;
1405err:
1406	htab_unlock_bucket(htab, b, hash, flags);
1407err_lock_bucket:
1408	if (l_new) {
1409		bpf_map_dec_elem_count(&htab->map);
1410		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1411	}
1412	return ret;
1413}
1414
1415static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1416					void *value, u64 map_flags)
1417{
1418	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1419}
1420
1421static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1422					    void *value, u64 map_flags)
1423{
1424	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1425						 false);
1426}
1427
1428/* Called from syscall or from eBPF program */
1429static long htab_map_delete_elem(struct bpf_map *map, void *key)
1430{
1431	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1432	struct hlist_nulls_head *head;
1433	struct bucket *b;
1434	struct htab_elem *l;
1435	unsigned long flags;
1436	u32 hash, key_size;
1437	int ret;
1438
1439	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1440		     !rcu_read_lock_bh_held());
1441
1442	key_size = map->key_size;
1443
1444	hash = htab_map_hash(key, key_size, htab->hashrnd);
1445	b = __select_bucket(htab, hash);
1446	head = &b->head;
1447
1448	ret = htab_lock_bucket(htab, b, hash, &flags);
1449	if (ret)
1450		return ret;
1451
1452	l = lookup_elem_raw(head, hash, key, key_size);
1453	if (l)
 
1454		hlist_nulls_del_rcu(&l->hash_node);
1455	else
1456		ret = -ENOENT;
1457
1458	htab_unlock_bucket(htab, b, hash, flags);
1459
1460	if (l)
1461		free_htab_elem(htab, l);
1462	return ret;
1463}
1464
1465static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1466{
1467	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1468	struct hlist_nulls_head *head;
1469	struct bucket *b;
1470	struct htab_elem *l;
1471	unsigned long flags;
1472	u32 hash, key_size;
1473	int ret;
1474
1475	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1476		     !rcu_read_lock_bh_held());
1477
1478	key_size = map->key_size;
1479
1480	hash = htab_map_hash(key, key_size, htab->hashrnd);
1481	b = __select_bucket(htab, hash);
1482	head = &b->head;
1483
1484	ret = htab_lock_bucket(htab, b, hash, &flags);
1485	if (ret)
1486		return ret;
1487
1488	l = lookup_elem_raw(head, hash, key, key_size);
1489
1490	if (l)
1491		hlist_nulls_del_rcu(&l->hash_node);
1492	else
1493		ret = -ENOENT;
1494
1495	htab_unlock_bucket(htab, b, hash, flags);
1496	if (l)
1497		htab_lru_push_free(htab, l);
1498	return ret;
1499}
1500
1501static void delete_all_elements(struct bpf_htab *htab)
1502{
1503	int i;
1504
1505	/* It's called from a worker thread, so disable migration here,
1506	 * since bpf_mem_cache_free() relies on that.
1507	 */
1508	migrate_disable();
1509	for (i = 0; i < htab->n_buckets; i++) {
1510		struct hlist_nulls_head *head = select_bucket(htab, i);
1511		struct hlist_nulls_node *n;
1512		struct htab_elem *l;
1513
1514		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1515			hlist_nulls_del_rcu(&l->hash_node);
1516			htab_elem_free(htab, l);
1517		}
1518		cond_resched();
1519	}
1520	migrate_enable();
1521}
1522
1523static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
1524{
1525	int i;
1526
1527	rcu_read_lock();
1528	for (i = 0; i < htab->n_buckets; i++) {
1529		struct hlist_nulls_head *head = select_bucket(htab, i);
1530		struct hlist_nulls_node *n;
1531		struct htab_elem *l;
1532
1533		hlist_nulls_for_each_entry(l, n, head, hash_node) {
1534			/* We only free timer on uref dropping to zero */
1535			if (btf_record_has_field(htab->map.record, BPF_TIMER))
1536				bpf_obj_free_timer(htab->map.record,
1537						   l->key + round_up(htab->map.key_size, 8));
1538			if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
1539				bpf_obj_free_workqueue(htab->map.record,
1540						       l->key + round_up(htab->map.key_size, 8));
1541		}
1542		cond_resched_rcu();
1543	}
1544	rcu_read_unlock();
1545}
1546
1547static void htab_map_free_timers_and_wq(struct bpf_map *map)
1548{
1549	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1550
1551	/* We only free timer and workqueue on uref dropping to zero */
1552	if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE)) {
1553		if (!htab_is_prealloc(htab))
1554			htab_free_malloced_timers_and_wq(htab);
1555		else
1556			htab_free_prealloced_timers_and_wq(htab);
1557	}
1558}
1559
1560/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1561static void htab_map_free(struct bpf_map *map)
1562{
1563	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1564	int i;
1565
1566	/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1567	 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1568	 * There is no need to synchronize_rcu() here to protect map elements.
 
1569	 */
 
1570
1571	/* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1572	 * underneath and is responsible for waiting for callbacks to finish
1573	 * during bpf_mem_alloc_destroy().
1574	 */
1575	if (!htab_is_prealloc(htab)) {
 
1576		delete_all_elements(htab);
1577	} else {
1578		htab_free_prealloced_fields(htab);
1579		prealloc_destroy(htab);
1580	}
1581
1582	bpf_map_free_elem_count(map);
1583	free_percpu(htab->extra_elems);
1584	bpf_map_area_free(htab->buckets);
1585	bpf_mem_alloc_destroy(&htab->pcpu_ma);
1586	bpf_mem_alloc_destroy(&htab->ma);
1587	if (htab->use_percpu_counter)
1588		percpu_counter_destroy(&htab->pcount);
1589	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1590		free_percpu(htab->map_locked[i]);
1591	lockdep_unregister_key(&htab->lockdep_key);
1592	bpf_map_area_free(htab);
1593}
1594
1595static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1596				   struct seq_file *m)
1597{
1598	void *value;
1599
1600	rcu_read_lock();
1601
1602	value = htab_map_lookup_elem(map, key);
1603	if (!value) {
1604		rcu_read_unlock();
1605		return;
1606	}
1607
1608	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1609	seq_puts(m, ": ");
1610	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1611	seq_putc(m, '\n');
1612
1613	rcu_read_unlock();
1614}
1615
1616static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1617					     void *value, bool is_lru_map,
1618					     bool is_percpu, u64 flags)
1619{
1620	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1621	struct hlist_nulls_head *head;
1622	unsigned long bflags;
1623	struct htab_elem *l;
1624	u32 hash, key_size;
1625	struct bucket *b;
1626	int ret;
1627
1628	key_size = map->key_size;
1629
1630	hash = htab_map_hash(key, key_size, htab->hashrnd);
1631	b = __select_bucket(htab, hash);
1632	head = &b->head;
1633
1634	ret = htab_lock_bucket(htab, b, hash, &bflags);
1635	if (ret)
1636		return ret;
1637
1638	l = lookup_elem_raw(head, hash, key, key_size);
1639	if (!l) {
1640		ret = -ENOENT;
1641	} else {
1642		if (is_percpu) {
1643			u32 roundup_value_size = round_up(map->value_size, 8);
1644			void __percpu *pptr;
1645			int off = 0, cpu;
1646
1647			pptr = htab_elem_get_ptr(l, key_size);
1648			for_each_possible_cpu(cpu) {
1649				copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
1650				check_and_init_map_value(&htab->map, value + off);
1651				off += roundup_value_size;
1652			}
1653		} else {
1654			u32 roundup_key_size = round_up(map->key_size, 8);
1655
1656			if (flags & BPF_F_LOCK)
1657				copy_map_value_locked(map, value, l->key +
1658						      roundup_key_size,
1659						      true);
1660			else
1661				copy_map_value(map, value, l->key +
1662					       roundup_key_size);
1663			/* Zeroing special fields in the temp buffer */
1664			check_and_init_map_value(map, value);
1665		}
1666
1667		hlist_nulls_del_rcu(&l->hash_node);
1668		if (!is_lru_map)
1669			free_htab_elem(htab, l);
1670	}
1671
1672	htab_unlock_bucket(htab, b, hash, bflags);
1673
1674	if (is_lru_map && l)
1675		htab_lru_push_free(htab, l);
1676
1677	return ret;
1678}
1679
1680static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1681					   void *value, u64 flags)
1682{
1683	return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1684						 flags);
1685}
1686
1687static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1688						  void *key, void *value,
1689						  u64 flags)
1690{
1691	return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1692						 flags);
1693}
1694
1695static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1696					       void *value, u64 flags)
1697{
1698	return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1699						 flags);
1700}
1701
1702static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1703						      void *key, void *value,
1704						      u64 flags)
1705{
1706	return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1707						 flags);
1708}
1709
1710static int
1711__htab_map_lookup_and_delete_batch(struct bpf_map *map,
1712				   const union bpf_attr *attr,
1713				   union bpf_attr __user *uattr,
1714				   bool do_delete, bool is_lru_map,
1715				   bool is_percpu)
1716{
1717	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1718	u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1719	void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1720	void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1721	void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1722	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1723	u32 batch, max_count, size, bucket_size, map_id;
1724	struct htab_elem *node_to_free = NULL;
1725	u64 elem_map_flags, map_flags;
1726	struct hlist_nulls_head *head;
1727	struct hlist_nulls_node *n;
1728	unsigned long flags = 0;
1729	bool locked = false;
1730	struct htab_elem *l;
1731	struct bucket *b;
1732	int ret = 0;
1733
1734	elem_map_flags = attr->batch.elem_flags;
1735	if ((elem_map_flags & ~BPF_F_LOCK) ||
1736	    ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1737		return -EINVAL;
1738
1739	map_flags = attr->batch.flags;
1740	if (map_flags)
1741		return -EINVAL;
1742
1743	max_count = attr->batch.count;
1744	if (!max_count)
1745		return 0;
1746
1747	if (put_user(0, &uattr->batch.count))
1748		return -EFAULT;
1749
1750	batch = 0;
1751	if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1752		return -EFAULT;
1753
1754	if (batch >= htab->n_buckets)
1755		return -ENOENT;
1756
1757	key_size = htab->map.key_size;
1758	roundup_key_size = round_up(htab->map.key_size, 8);
1759	value_size = htab->map.value_size;
1760	size = round_up(value_size, 8);
1761	if (is_percpu)
1762		value_size = size * num_possible_cpus();
1763	total = 0;
1764	/* while experimenting with hash tables with sizes ranging from 10 to
1765	 * 1000, it was observed that a bucket can have up to 5 entries.
1766	 */
1767	bucket_size = 5;
1768
1769alloc:
1770	/* We cannot do copy_from_user or copy_to_user inside
1771	 * the rcu_read_lock. Allocate enough space here.
1772	 */
1773	keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1774	values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1775	if (!keys || !values) {
1776		ret = -ENOMEM;
1777		goto after_loop;
1778	}
1779
1780again:
1781	bpf_disable_instrumentation();
1782	rcu_read_lock();
1783again_nocopy:
1784	dst_key = keys;
1785	dst_val = values;
1786	b = &htab->buckets[batch];
1787	head = &b->head;
1788	/* do not grab the lock unless need it (bucket_cnt > 0). */
1789	if (locked) {
1790		ret = htab_lock_bucket(htab, b, batch, &flags);
1791		if (ret) {
1792			rcu_read_unlock();
1793			bpf_enable_instrumentation();
1794			goto after_loop;
1795		}
1796	}
1797
1798	bucket_cnt = 0;
1799	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1800		bucket_cnt++;
1801
1802	if (bucket_cnt && !locked) {
1803		locked = true;
1804		goto again_nocopy;
1805	}
1806
1807	if (bucket_cnt > (max_count - total)) {
1808		if (total == 0)
1809			ret = -ENOSPC;
1810		/* Note that since bucket_cnt > 0 here, it is implicit
1811		 * that the locked was grabbed, so release it.
1812		 */
1813		htab_unlock_bucket(htab, b, batch, flags);
1814		rcu_read_unlock();
1815		bpf_enable_instrumentation();
1816		goto after_loop;
1817	}
1818
1819	if (bucket_cnt > bucket_size) {
1820		bucket_size = bucket_cnt;
1821		/* Note that since bucket_cnt > 0 here, it is implicit
1822		 * that the locked was grabbed, so release it.
1823		 */
1824		htab_unlock_bucket(htab, b, batch, flags);
1825		rcu_read_unlock();
1826		bpf_enable_instrumentation();
1827		kvfree(keys);
1828		kvfree(values);
1829		goto alloc;
1830	}
1831
1832	/* Next block is only safe to run if you have grabbed the lock */
1833	if (!locked)
1834		goto next_batch;
1835
1836	hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1837		memcpy(dst_key, l->key, key_size);
1838
1839		if (is_percpu) {
1840			int off = 0, cpu;
1841			void __percpu *pptr;
1842
1843			pptr = htab_elem_get_ptr(l, map->key_size);
1844			for_each_possible_cpu(cpu) {
1845				copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
1846				check_and_init_map_value(&htab->map, dst_val + off);
1847				off += size;
1848			}
1849		} else {
1850			value = l->key + roundup_key_size;
1851			if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1852				struct bpf_map **inner_map = value;
1853
1854				 /* Actual value is the id of the inner map */
1855				map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1856				value = &map_id;
1857			}
1858
1859			if (elem_map_flags & BPF_F_LOCK)
1860				copy_map_value_locked(map, dst_val, value,
1861						      true);
1862			else
1863				copy_map_value(map, dst_val, value);
1864			/* Zeroing special fields in the temp buffer */
1865			check_and_init_map_value(map, dst_val);
1866		}
1867		if (do_delete) {
1868			hlist_nulls_del_rcu(&l->hash_node);
1869
1870			/* bpf_lru_push_free() will acquire lru_lock, which
1871			 * may cause deadlock. See comments in function
1872			 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1873			 * after releasing the bucket lock.
1874			 *
1875			 * For htab of maps, htab_put_fd_value() in
1876			 * free_htab_elem() may acquire a spinlock with bucket
1877			 * lock being held and it violates the lock rule, so
1878			 * invoke free_htab_elem() after unlock as well.
1879			 */
1880			l->batch_flink = node_to_free;
1881			node_to_free = l;
1882		}
1883		dst_key += key_size;
1884		dst_val += value_size;
1885	}
1886
1887	htab_unlock_bucket(htab, b, batch, flags);
1888	locked = false;
1889
1890	while (node_to_free) {
1891		l = node_to_free;
1892		node_to_free = node_to_free->batch_flink;
1893		if (is_lru_map)
1894			htab_lru_push_free(htab, l);
1895		else
1896			free_htab_elem(htab, l);
1897	}
1898
1899next_batch:
1900	/* If we are not copying data, we can go to next bucket and avoid
1901	 * unlocking the rcu.
1902	 */
1903	if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1904		batch++;
1905		goto again_nocopy;
1906	}
1907
1908	rcu_read_unlock();
1909	bpf_enable_instrumentation();
1910	if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1911	    key_size * bucket_cnt) ||
1912	    copy_to_user(uvalues + total * value_size, values,
1913	    value_size * bucket_cnt))) {
1914		ret = -EFAULT;
1915		goto after_loop;
1916	}
1917
1918	total += bucket_cnt;
1919	batch++;
1920	if (batch >= htab->n_buckets) {
1921		ret = -ENOENT;
1922		goto after_loop;
1923	}
1924	goto again;
1925
1926after_loop:
1927	if (ret == -EFAULT)
1928		goto out;
1929
1930	/* copy # of entries and next batch */
1931	ubatch = u64_to_user_ptr(attr->batch.out_batch);
1932	if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1933	    put_user(total, &uattr->batch.count))
1934		ret = -EFAULT;
1935
1936out:
1937	kvfree(keys);
1938	kvfree(values);
1939	return ret;
1940}
1941
1942static int
1943htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1944			     union bpf_attr __user *uattr)
1945{
1946	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1947						  false, true);
1948}
1949
1950static int
1951htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1952					const union bpf_attr *attr,
1953					union bpf_attr __user *uattr)
1954{
1955	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1956						  false, true);
1957}
1958
1959static int
1960htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1961		      union bpf_attr __user *uattr)
1962{
1963	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1964						  false, false);
1965}
1966
1967static int
1968htab_map_lookup_and_delete_batch(struct bpf_map *map,
1969				 const union bpf_attr *attr,
1970				 union bpf_attr __user *uattr)
1971{
1972	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1973						  false, false);
1974}
1975
1976static int
1977htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1978				 const union bpf_attr *attr,
1979				 union bpf_attr __user *uattr)
1980{
1981	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1982						  true, true);
1983}
1984
1985static int
1986htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1987					    const union bpf_attr *attr,
1988					    union bpf_attr __user *uattr)
1989{
1990	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1991						  true, true);
1992}
1993
1994static int
1995htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1996			  union bpf_attr __user *uattr)
1997{
1998	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1999						  true, false);
2000}
2001
2002static int
2003htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
2004				     const union bpf_attr *attr,
2005				     union bpf_attr __user *uattr)
2006{
2007	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
2008						  true, false);
2009}
2010
2011struct bpf_iter_seq_hash_map_info {
2012	struct bpf_map *map;
2013	struct bpf_htab *htab;
2014	void *percpu_value_buf; // non-zero means percpu hash
2015	u32 bucket_id;
2016	u32 skip_elems;
2017};
2018
2019static struct htab_elem *
2020bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
2021			   struct htab_elem *prev_elem)
2022{
2023	const struct bpf_htab *htab = info->htab;
2024	u32 skip_elems = info->skip_elems;
2025	u32 bucket_id = info->bucket_id;
2026	struct hlist_nulls_head *head;
2027	struct hlist_nulls_node *n;
2028	struct htab_elem *elem;
2029	struct bucket *b;
2030	u32 i, count;
2031
2032	if (bucket_id >= htab->n_buckets)
2033		return NULL;
2034
2035	/* try to find next elem in the same bucket */
2036	if (prev_elem) {
2037		/* no update/deletion on this bucket, prev_elem should be still valid
2038		 * and we won't skip elements.
2039		 */
2040		n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
2041		elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
2042		if (elem)
2043			return elem;
2044
2045		/* not found, unlock and go to the next bucket */
2046		b = &htab->buckets[bucket_id++];
2047		rcu_read_unlock();
2048		skip_elems = 0;
2049	}
2050
2051	for (i = bucket_id; i < htab->n_buckets; i++) {
2052		b = &htab->buckets[i];
2053		rcu_read_lock();
2054
2055		count = 0;
2056		head = &b->head;
2057		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2058			if (count >= skip_elems) {
2059				info->bucket_id = i;
2060				info->skip_elems = count;
2061				return elem;
2062			}
2063			count++;
2064		}
2065
2066		rcu_read_unlock();
2067		skip_elems = 0;
2068	}
2069
2070	info->bucket_id = i;
2071	info->skip_elems = 0;
2072	return NULL;
2073}
2074
2075static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2076{
2077	struct bpf_iter_seq_hash_map_info *info = seq->private;
2078	struct htab_elem *elem;
2079
2080	elem = bpf_hash_map_seq_find_next(info, NULL);
2081	if (!elem)
2082		return NULL;
2083
2084	if (*pos == 0)
2085		++*pos;
2086	return elem;
2087}
2088
2089static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2090{
2091	struct bpf_iter_seq_hash_map_info *info = seq->private;
2092
2093	++*pos;
2094	++info->skip_elems;
2095	return bpf_hash_map_seq_find_next(info, v);
2096}
2097
2098static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2099{
2100	struct bpf_iter_seq_hash_map_info *info = seq->private;
2101	u32 roundup_key_size, roundup_value_size;
2102	struct bpf_iter__bpf_map_elem ctx = {};
2103	struct bpf_map *map = info->map;
2104	struct bpf_iter_meta meta;
2105	int ret = 0, off = 0, cpu;
2106	struct bpf_prog *prog;
2107	void __percpu *pptr;
2108
2109	meta.seq = seq;
2110	prog = bpf_iter_get_info(&meta, elem == NULL);
2111	if (prog) {
2112		ctx.meta = &meta;
2113		ctx.map = info->map;
2114		if (elem) {
2115			roundup_key_size = round_up(map->key_size, 8);
2116			ctx.key = elem->key;
2117			if (!info->percpu_value_buf) {
2118				ctx.value = elem->key + roundup_key_size;
2119			} else {
2120				roundup_value_size = round_up(map->value_size, 8);
2121				pptr = htab_elem_get_ptr(elem, map->key_size);
2122				for_each_possible_cpu(cpu) {
2123					copy_map_value_long(map, info->percpu_value_buf + off,
2124							    per_cpu_ptr(pptr, cpu));
2125					check_and_init_map_value(map, info->percpu_value_buf + off);
2126					off += roundup_value_size;
2127				}
2128				ctx.value = info->percpu_value_buf;
2129			}
2130		}
2131		ret = bpf_iter_run_prog(prog, &ctx);
2132	}
2133
2134	return ret;
2135}
2136
2137static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2138{
2139	return __bpf_hash_map_seq_show(seq, v);
2140}
2141
2142static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2143{
2144	if (!v)
2145		(void)__bpf_hash_map_seq_show(seq, NULL);
2146	else
2147		rcu_read_unlock();
2148}
2149
2150static int bpf_iter_init_hash_map(void *priv_data,
2151				  struct bpf_iter_aux_info *aux)
2152{
2153	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2154	struct bpf_map *map = aux->map;
2155	void *value_buf;
2156	u32 buf_size;
2157
2158	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2159	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2160		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2161		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2162		if (!value_buf)
2163			return -ENOMEM;
2164
2165		seq_info->percpu_value_buf = value_buf;
2166	}
2167
2168	bpf_map_inc_with_uref(map);
2169	seq_info->map = map;
2170	seq_info->htab = container_of(map, struct bpf_htab, map);
2171	return 0;
2172}
2173
2174static void bpf_iter_fini_hash_map(void *priv_data)
2175{
2176	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2177
2178	bpf_map_put_with_uref(seq_info->map);
2179	kfree(seq_info->percpu_value_buf);
2180}
2181
2182static const struct seq_operations bpf_hash_map_seq_ops = {
2183	.start	= bpf_hash_map_seq_start,
2184	.next	= bpf_hash_map_seq_next,
2185	.stop	= bpf_hash_map_seq_stop,
2186	.show	= bpf_hash_map_seq_show,
2187};
2188
2189static const struct bpf_iter_seq_info iter_seq_info = {
2190	.seq_ops		= &bpf_hash_map_seq_ops,
2191	.init_seq_private	= bpf_iter_init_hash_map,
2192	.fini_seq_private	= bpf_iter_fini_hash_map,
2193	.seq_priv_size		= sizeof(struct bpf_iter_seq_hash_map_info),
2194};
2195
2196static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2197				   void *callback_ctx, u64 flags)
2198{
2199	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2200	struct hlist_nulls_head *head;
2201	struct hlist_nulls_node *n;
2202	struct htab_elem *elem;
2203	u32 roundup_key_size;
2204	int i, num_elems = 0;
2205	void __percpu *pptr;
2206	struct bucket *b;
2207	void *key, *val;
2208	bool is_percpu;
2209	u64 ret = 0;
2210
2211	if (flags != 0)
2212		return -EINVAL;
2213
2214	is_percpu = htab_is_percpu(htab);
2215
2216	roundup_key_size = round_up(map->key_size, 8);
2217	/* disable migration so percpu value prepared here will be the
2218	 * same as the one seen by the bpf program with bpf_map_lookup_elem().
2219	 */
2220	if (is_percpu)
2221		migrate_disable();
2222	for (i = 0; i < htab->n_buckets; i++) {
2223		b = &htab->buckets[i];
2224		rcu_read_lock();
2225		head = &b->head;
2226		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2227			key = elem->key;
2228			if (is_percpu) {
2229				/* current cpu value for percpu map */
2230				pptr = htab_elem_get_ptr(elem, map->key_size);
2231				val = this_cpu_ptr(pptr);
2232			} else {
2233				val = elem->key + roundup_key_size;
2234			}
2235			num_elems++;
2236			ret = callback_fn((u64)(long)map, (u64)(long)key,
2237					  (u64)(long)val, (u64)(long)callback_ctx, 0);
2238			/* return value: 0 - continue, 1 - stop and return */
2239			if (ret) {
2240				rcu_read_unlock();
2241				goto out;
2242			}
2243		}
2244		rcu_read_unlock();
2245	}
2246out:
2247	if (is_percpu)
2248		migrate_enable();
2249	return num_elems;
2250}
2251
2252static u64 htab_map_mem_usage(const struct bpf_map *map)
2253{
2254	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2255	u32 value_size = round_up(htab->map.value_size, 8);
2256	bool prealloc = htab_is_prealloc(htab);
2257	bool percpu = htab_is_percpu(htab);
2258	bool lru = htab_is_lru(htab);
2259	u64 num_entries;
2260	u64 usage = sizeof(struct bpf_htab);
2261
2262	usage += sizeof(struct bucket) * htab->n_buckets;
2263	usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
2264	if (prealloc) {
2265		num_entries = map->max_entries;
2266		if (htab_has_extra_elems(htab))
2267			num_entries += num_possible_cpus();
2268
2269		usage += htab->elem_size * num_entries;
2270
2271		if (percpu)
2272			usage += value_size * num_possible_cpus() * num_entries;
2273		else if (!lru)
2274			usage += sizeof(struct htab_elem *) * num_possible_cpus();
2275	} else {
2276#define LLIST_NODE_SZ sizeof(struct llist_node)
2277
2278		num_entries = htab->use_percpu_counter ?
2279					  percpu_counter_sum(&htab->pcount) :
2280					  atomic_read(&htab->count);
2281		usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
2282		if (percpu) {
2283			usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
2284			usage += value_size * num_possible_cpus() * num_entries;
2285		}
2286	}
2287	return usage;
2288}
2289
2290BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2291const struct bpf_map_ops htab_map_ops = {
2292	.map_meta_equal = bpf_map_meta_equal,
2293	.map_alloc_check = htab_map_alloc_check,
2294	.map_alloc = htab_map_alloc,
2295	.map_free = htab_map_free,
2296	.map_get_next_key = htab_map_get_next_key,
2297	.map_release_uref = htab_map_free_timers_and_wq,
2298	.map_lookup_elem = htab_map_lookup_elem,
2299	.map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2300	.map_update_elem = htab_map_update_elem,
2301	.map_delete_elem = htab_map_delete_elem,
2302	.map_gen_lookup = htab_map_gen_lookup,
2303	.map_seq_show_elem = htab_map_seq_show_elem,
2304	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2305	.map_for_each_callback = bpf_for_each_hash_elem,
2306	.map_mem_usage = htab_map_mem_usage,
2307	BATCH_OPS(htab),
2308	.map_btf_id = &htab_map_btf_ids[0],
2309	.iter_seq_info = &iter_seq_info,
2310};
2311
2312const struct bpf_map_ops htab_lru_map_ops = {
2313	.map_meta_equal = bpf_map_meta_equal,
2314	.map_alloc_check = htab_map_alloc_check,
2315	.map_alloc = htab_map_alloc,
2316	.map_free = htab_map_free,
2317	.map_get_next_key = htab_map_get_next_key,
2318	.map_release_uref = htab_map_free_timers_and_wq,
2319	.map_lookup_elem = htab_lru_map_lookup_elem,
2320	.map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2321	.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2322	.map_update_elem = htab_lru_map_update_elem,
2323	.map_delete_elem = htab_lru_map_delete_elem,
2324	.map_gen_lookup = htab_lru_map_gen_lookup,
2325	.map_seq_show_elem = htab_map_seq_show_elem,
2326	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2327	.map_for_each_callback = bpf_for_each_hash_elem,
2328	.map_mem_usage = htab_map_mem_usage,
2329	BATCH_OPS(htab_lru),
2330	.map_btf_id = &htab_map_btf_ids[0],
2331	.iter_seq_info = &iter_seq_info,
2332};
2333
2334/* Called from eBPF program */
2335static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2336{
2337	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2338
2339	if (l)
2340		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2341	else
2342		return NULL;
2343}
2344
2345/* inline bpf_map_lookup_elem() call for per-CPU hashmap */
2346static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
2347{
2348	struct bpf_insn *insn = insn_buf;
2349
2350	if (!bpf_jit_supports_percpu_insn())
2351		return -EOPNOTSUPP;
2352
2353	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2354		     (void *(*)(struct bpf_map *map, void *key))NULL));
2355	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2356	*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3);
2357	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
2358				offsetof(struct htab_elem, key) + map->key_size);
2359	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
2360	*insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
2361
2362	return insn - insn_buf;
2363}
2364
2365static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2366{
2367	struct htab_elem *l;
2368
2369	if (cpu >= nr_cpu_ids)
2370		return NULL;
2371
2372	l = __htab_map_lookup_elem(map, key);
2373	if (l)
2374		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2375	else
2376		return NULL;
2377}
2378
2379static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2380{
2381	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2382
2383	if (l) {
2384		bpf_lru_node_set_ref(&l->lru_node);
2385		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2386	}
2387
2388	return NULL;
2389}
2390
2391static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2392{
2393	struct htab_elem *l;
2394
2395	if (cpu >= nr_cpu_ids)
2396		return NULL;
2397
2398	l = __htab_map_lookup_elem(map, key);
2399	if (l) {
2400		bpf_lru_node_set_ref(&l->lru_node);
2401		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2402	}
2403
2404	return NULL;
2405}
2406
2407int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2408{
 
2409	struct htab_elem *l;
2410	void __percpu *pptr;
2411	int ret = -ENOENT;
2412	int cpu, off = 0;
2413	u32 size;
2414
2415	/* per_cpu areas are zero-filled and bpf programs can only
2416	 * access 'value_size' of them, so copying rounded areas
2417	 * will not leak any kernel data
2418	 */
2419	size = round_up(map->value_size, 8);
2420	rcu_read_lock();
2421	l = __htab_map_lookup_elem(map, key);
2422	if (!l)
2423		goto out;
2424	/* We do not mark LRU map element here in order to not mess up
2425	 * eviction heuristics when user space does a map walk.
2426	 */
2427	pptr = htab_elem_get_ptr(l, map->key_size);
2428	for_each_possible_cpu(cpu) {
2429		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
2430		check_and_init_map_value(map, value + off);
2431		off += size;
2432	}
2433	ret = 0;
2434out:
2435	rcu_read_unlock();
2436	return ret;
2437}
2438
2439int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2440			   u64 map_flags)
2441{
2442	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2443	int ret;
2444
2445	rcu_read_lock();
2446	if (htab_is_lru(htab))
2447		ret = __htab_lru_percpu_map_update_elem(map, key, value,
2448							map_flags, true);
2449	else
2450		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2451						    true);
2452	rcu_read_unlock();
2453
2454	return ret;
2455}
2456
2457static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2458					  struct seq_file *m)
2459{
2460	struct htab_elem *l;
2461	void __percpu *pptr;
2462	int cpu;
2463
2464	rcu_read_lock();
2465
2466	l = __htab_map_lookup_elem(map, key);
2467	if (!l) {
2468		rcu_read_unlock();
2469		return;
2470	}
2471
2472	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2473	seq_puts(m, ": {\n");
2474	pptr = htab_elem_get_ptr(l, map->key_size);
2475	for_each_possible_cpu(cpu) {
2476		seq_printf(m, "\tcpu%d: ", cpu);
2477		btf_type_seq_show(map->btf, map->btf_value_type_id,
2478				  per_cpu_ptr(pptr, cpu), m);
2479		seq_putc(m, '\n');
2480	}
2481	seq_puts(m, "}\n");
2482
2483	rcu_read_unlock();
2484}
2485
2486const struct bpf_map_ops htab_percpu_map_ops = {
2487	.map_meta_equal = bpf_map_meta_equal,
2488	.map_alloc_check = htab_map_alloc_check,
2489	.map_alloc = htab_map_alloc,
2490	.map_free = htab_map_free,
2491	.map_get_next_key = htab_map_get_next_key,
2492	.map_lookup_elem = htab_percpu_map_lookup_elem,
2493	.map_gen_lookup = htab_percpu_map_gen_lookup,
2494	.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2495	.map_update_elem = htab_percpu_map_update_elem,
2496	.map_delete_elem = htab_map_delete_elem,
2497	.map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2498	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2499	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2500	.map_for_each_callback = bpf_for_each_hash_elem,
2501	.map_mem_usage = htab_map_mem_usage,
2502	BATCH_OPS(htab_percpu),
2503	.map_btf_id = &htab_map_btf_ids[0],
2504	.iter_seq_info = &iter_seq_info,
2505};
2506
2507const struct bpf_map_ops htab_lru_percpu_map_ops = {
2508	.map_meta_equal = bpf_map_meta_equal,
2509	.map_alloc_check = htab_map_alloc_check,
2510	.map_alloc = htab_map_alloc,
2511	.map_free = htab_map_free,
2512	.map_get_next_key = htab_map_get_next_key,
2513	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2514	.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2515	.map_update_elem = htab_lru_percpu_map_update_elem,
2516	.map_delete_elem = htab_lru_map_delete_elem,
2517	.map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2518	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2519	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2520	.map_for_each_callback = bpf_for_each_hash_elem,
2521	.map_mem_usage = htab_map_mem_usage,
2522	BATCH_OPS(htab_lru_percpu),
2523	.map_btf_id = &htab_map_btf_ids[0],
2524	.iter_seq_info = &iter_seq_info,
2525};
2526
2527static int fd_htab_map_alloc_check(union bpf_attr *attr)
2528{
2529	if (attr->value_size != sizeof(u32))
2530		return -EINVAL;
2531	return htab_map_alloc_check(attr);
2532}
2533
2534static void fd_htab_map_free(struct bpf_map *map)
2535{
2536	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2537	struct hlist_nulls_node *n;
2538	struct hlist_nulls_head *head;
2539	struct htab_elem *l;
2540	int i;
2541
2542	for (i = 0; i < htab->n_buckets; i++) {
2543		head = select_bucket(htab, i);
2544
2545		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2546			void *ptr = fd_htab_map_get_ptr(map, l);
2547
2548			map->ops->map_fd_put_ptr(map, ptr, false);
2549		}
2550	}
2551
2552	htab_map_free(map);
2553}
2554
2555/* only called from syscall */
2556int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2557{
2558	void **ptr;
2559	int ret = 0;
2560
2561	if (!map->ops->map_fd_sys_lookup_elem)
2562		return -ENOTSUPP;
2563
2564	rcu_read_lock();
2565	ptr = htab_map_lookup_elem(map, key);
2566	if (ptr)
2567		*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2568	else
2569		ret = -ENOENT;
2570	rcu_read_unlock();
2571
2572	return ret;
2573}
2574
2575/* only called from syscall */
2576int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2577				void *key, void *value, u64 map_flags)
2578{
2579	void *ptr;
2580	int ret;
2581	u32 ufd = *(u32 *)value;
2582
2583	ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2584	if (IS_ERR(ptr))
2585		return PTR_ERR(ptr);
2586
2587	/* The htab bucket lock is always held during update operations in fd
2588	 * htab map, and the following rcu_read_lock() is only used to avoid
2589	 * the WARN_ON_ONCE in htab_map_update_elem().
2590	 */
2591	rcu_read_lock();
2592	ret = htab_map_update_elem(map, key, &ptr, map_flags);
2593	rcu_read_unlock();
2594	if (ret)
2595		map->ops->map_fd_put_ptr(map, ptr, false);
2596
2597	return ret;
2598}
2599
2600static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2601{
2602	struct bpf_map *map, *inner_map_meta;
2603
2604	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2605	if (IS_ERR(inner_map_meta))
2606		return inner_map_meta;
2607
2608	map = htab_map_alloc(attr);
2609	if (IS_ERR(map)) {
2610		bpf_map_meta_free(inner_map_meta);
2611		return map;
2612	}
2613
2614	map->inner_map_meta = inner_map_meta;
2615
2616	return map;
2617}
2618
2619static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2620{
2621	struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2622
2623	if (!inner_map)
2624		return NULL;
2625
2626	return READ_ONCE(*inner_map);
2627}
2628
2629static int htab_of_map_gen_lookup(struct bpf_map *map,
2630				  struct bpf_insn *insn_buf)
2631{
2632	struct bpf_insn *insn = insn_buf;
2633	const int ret = BPF_REG_0;
2634
2635	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2636		     (void *(*)(struct bpf_map *map, void *key))NULL));
2637	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2638	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2639	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2640				offsetof(struct htab_elem, key) +
2641				round_up(map->key_size, 8));
2642	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2643
2644	return insn - insn_buf;
2645}
2646
2647static void htab_of_map_free(struct bpf_map *map)
2648{
2649	bpf_map_meta_free(map->inner_map_meta);
2650	fd_htab_map_free(map);
2651}
2652
2653const struct bpf_map_ops htab_of_maps_map_ops = {
2654	.map_alloc_check = fd_htab_map_alloc_check,
2655	.map_alloc = htab_of_map_alloc,
2656	.map_free = htab_of_map_free,
2657	.map_get_next_key = htab_map_get_next_key,
2658	.map_lookup_elem = htab_of_map_lookup_elem,
2659	.map_delete_elem = htab_map_delete_elem,
2660	.map_fd_get_ptr = bpf_map_fd_get_ptr,
2661	.map_fd_put_ptr = bpf_map_fd_put_ptr,
2662	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2663	.map_gen_lookup = htab_of_map_gen_lookup,
2664	.map_check_btf = map_check_no_btf,
2665	.map_mem_usage = htab_map_mem_usage,
2666	BATCH_OPS(htab),
2667	.map_btf_id = &htab_map_btf_ids[0],
2668};