Linux Audio

Check our new training course

Loading...
v4.10.11
   1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   2 * Copyright (c) 2016 Facebook
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of version 2 of the GNU General Public
   6 * License as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/bpf.h>
  14#include <linux/jhash.h>
  15#include <linux/filter.h>
 
  16#include "percpu_freelist.h"
  17#include "bpf_lru_list.h"
 
 
 
 
 
  18
  19struct bucket {
  20	struct hlist_head head;
  21	raw_spinlock_t lock;
  22};
  23
  24struct bpf_htab {
  25	struct bpf_map map;
  26	struct bucket *buckets;
  27	void *elems;
  28	union {
  29		struct pcpu_freelist freelist;
  30		struct bpf_lru lru;
  31	};
  32	void __percpu *extra_elems;
  33	atomic_t count;	/* number of elements in this hashtable */
  34	u32 n_buckets;	/* number of hash buckets */
  35	u32 elem_size;	/* size of each element in bytes */
  36};
  37
  38enum extra_elem_state {
  39	HTAB_NOT_AN_EXTRA_ELEM = 0,
  40	HTAB_EXTRA_ELEM_FREE,
  41	HTAB_EXTRA_ELEM_USED
  42};
  43
  44/* each htab element is struct htab_elem + key + value */
  45struct htab_elem {
  46	union {
  47		struct hlist_node hash_node;
  48		struct bpf_htab *htab;
  49		struct pcpu_freelist_node fnode;
 
 
 
 
 
  50	};
  51	union {
  52		struct rcu_head rcu;
  53		enum extra_elem_state state;
  54		struct bpf_lru_node lru_node;
  55	};
  56	u32 hash;
  57	char key[0] __aligned(8);
  58};
  59
  60static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
  61
  62static bool htab_is_lru(const struct bpf_htab *htab)
  63{
  64	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
  65		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  66}
  67
  68static bool htab_is_percpu(const struct bpf_htab *htab)
  69{
  70	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  71		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  72}
  73
 
 
 
 
 
  74static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
  75				     void __percpu *pptr)
  76{
  77	*(void __percpu **)(l->key + key_size) = pptr;
  78}
  79
  80static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
  81{
  82	return *(void __percpu **)(l->key + key_size);
  83}
  84
 
 
 
 
 
  85static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
  86{
  87	return (struct htab_elem *) (htab->elems + i * htab->elem_size);
  88}
  89
  90static void htab_free_elems(struct bpf_htab *htab)
  91{
  92	int i;
  93
  94	if (!htab_is_percpu(htab))
  95		goto free_elems;
  96
  97	for (i = 0; i < htab->map.max_entries; i++) {
  98		void __percpu *pptr;
  99
 100		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
 101					 htab->map.key_size);
 102		free_percpu(pptr);
 
 103	}
 104free_elems:
 105	bpf_map_area_free(htab->elems);
 106}
 107
 108static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
 109					  u32 hash)
 110{
 111	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
 112	struct htab_elem *l;
 113
 114	if (node) {
 115		l = container_of(node, struct htab_elem, lru_node);
 116		memcpy(l->key, key, htab->map.key_size);
 117		return l;
 118	}
 119
 120	return NULL;
 121}
 122
 123static int prealloc_init(struct bpf_htab *htab)
 124{
 
 125	int err = -ENOMEM, i;
 126
 127	htab->elems = bpf_map_area_alloc(htab->elem_size *
 128					 htab->map.max_entries);
 
 
 
 129	if (!htab->elems)
 130		return -ENOMEM;
 131
 132	if (!htab_is_percpu(htab))
 133		goto skip_percpu_elems;
 134
 135	for (i = 0; i < htab->map.max_entries; i++) {
 136		u32 size = round_up(htab->map.value_size, 8);
 137		void __percpu *pptr;
 138
 139		pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
 140		if (!pptr)
 141			goto free_elems;
 142		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
 143				  pptr);
 
 144	}
 145
 146skip_percpu_elems:
 147	if (htab_is_lru(htab))
 148		err = bpf_lru_init(&htab->lru,
 149				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
 150				   offsetof(struct htab_elem, hash) -
 151				   offsetof(struct htab_elem, lru_node),
 152				   htab_lru_map_delete_node,
 153				   htab);
 154	else
 155		err = pcpu_freelist_init(&htab->freelist);
 156
 157	if (err)
 158		goto free_elems;
 159
 160	if (htab_is_lru(htab))
 161		bpf_lru_populate(&htab->lru, htab->elems,
 162				 offsetof(struct htab_elem, lru_node),
 163				 htab->elem_size, htab->map.max_entries);
 164	else
 165		pcpu_freelist_populate(&htab->freelist, htab->elems,
 166				       htab->elem_size, htab->map.max_entries);
 
 167
 168	return 0;
 169
 170free_elems:
 171	htab_free_elems(htab);
 172	return err;
 173}
 174
 175static void prealloc_destroy(struct bpf_htab *htab)
 176{
 177	htab_free_elems(htab);
 178
 179	if (htab_is_lru(htab))
 180		bpf_lru_destroy(&htab->lru);
 181	else
 182		pcpu_freelist_destroy(&htab->freelist);
 183}
 184
 185static int alloc_extra_elems(struct bpf_htab *htab)
 186{
 187	void __percpu *pptr;
 
 188	int cpu;
 189
 190	pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
 
 191	if (!pptr)
 192		return -ENOMEM;
 193
 194	for_each_possible_cpu(cpu) {
 195		((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
 196			HTAB_EXTRA_ELEM_FREE;
 
 
 
 
 197	}
 198	htab->extra_elems = pptr;
 199	return 0;
 200}
 201
 202/* Called from syscall */
 203static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 204{
 205	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 206		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 207	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 208		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 209	/* percpu_lru means each cpu has its own LRU list.
 210	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 211	 * the map's value itself is percpu.  percpu_lru has
 212	 * nothing to do with the map's value.
 213	 */
 214	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 215	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 216	struct bpf_htab *htab;
 217	int err, i;
 218	u64 cost;
 
 
 
 219
 220	if (lru && !capable(CAP_SYS_ADMIN))
 221		/* LRU implementation is much complicated than other
 222		 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
 223		 */
 224		return ERR_PTR(-EPERM);
 225
 226	if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU))
 227		/* reserved bits should not be used */
 228		return ERR_PTR(-EINVAL);
 229
 230	if (!lru && percpu_lru)
 231		return ERR_PTR(-EINVAL);
 232
 233	if (lru && !prealloc)
 234		return ERR_PTR(-ENOTSUPP);
 235
 236	htab = kzalloc(sizeof(*htab), GFP_USER);
 237	if (!htab)
 238		return ERR_PTR(-ENOMEM);
 239
 240	/* mandatory map attributes */
 241	htab->map.map_type = attr->map_type;
 242	htab->map.key_size = attr->key_size;
 243	htab->map.value_size = attr->value_size;
 244	htab->map.max_entries = attr->max_entries;
 245	htab->map.map_flags = attr->map_flags;
 246
 247	/* check sanity of attributes.
 248	 * value_size == 0 may be allowed in the future to use map as a set
 249	 */
 250	err = -EINVAL;
 251	if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
 252	    htab->map.value_size == 0)
 253		goto free_htab;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 254
 255	if (percpu_lru) {
 256		/* ensure each CPU's lru list has >=1 elements.
 257		 * since we are at it, make each lru list has the same
 258		 * number of elements.
 259		 */
 260		htab->map.max_entries = roundup(attr->max_entries,
 261						num_possible_cpus());
 262		if (htab->map.max_entries < attr->max_entries)
 263			htab->map.max_entries = rounddown(attr->max_entries,
 264							  num_possible_cpus());
 265	}
 266
 267	/* hash table size must be power of 2 */
 268	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
 269
 270	err = -E2BIG;
 271	if (htab->map.key_size > MAX_BPF_STACK)
 272		/* eBPF programs initialize keys on stack, so they cannot be
 273		 * larger than max stack size
 274		 */
 275		goto free_htab;
 276
 277	if (htab->map.value_size >= KMALLOC_MAX_SIZE -
 278	    MAX_BPF_STACK - sizeof(struct htab_elem))
 279		/* if value_size is bigger, the user space won't be able to
 280		 * access the elements via bpf syscall. This check also makes
 281		 * sure that the elem_size doesn't overflow and it's
 282		 * kmalloc-able later in htab_map_update_elem()
 283		 */
 284		goto free_htab;
 285
 286	if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
 287		/* make sure the size for pcpu_alloc() is reasonable */
 288		goto free_htab;
 289
 290	htab->elem_size = sizeof(struct htab_elem) +
 291			  round_up(htab->map.key_size, 8);
 292	if (percpu)
 293		htab->elem_size += sizeof(void *);
 294	else
 295		htab->elem_size += round_up(htab->map.value_size, 8);
 296
 
 297	/* prevent zero size kmalloc and check for u32 overflow */
 298	if (htab->n_buckets == 0 ||
 299	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
 300		goto free_htab;
 301
 302	cost = (u64) htab->n_buckets * sizeof(struct bucket) +
 303	       (u64) htab->elem_size * htab->map.max_entries;
 304
 305	if (percpu)
 306		cost += (u64) round_up(htab->map.value_size, 8) *
 307			num_possible_cpus() * htab->map.max_entries;
 308	else
 309	       cost += (u64) htab->elem_size * num_possible_cpus();
 310
 311	if (cost >= U32_MAX - PAGE_SIZE)
 312		/* make sure page count doesn't overflow */
 313		goto free_htab;
 314
 315	htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 316
 317	/* if map size is larger than memlock limit, reject it early */
 318	err = bpf_map_precharge_memlock(htab->map.pages);
 319	if (err)
 320		goto free_htab;
 321
 322	err = -ENOMEM;
 323	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
 324					   sizeof(struct bucket));
 
 325	if (!htab->buckets)
 326		goto free_htab;
 327
 328	for (i = 0; i < htab->n_buckets; i++) {
 329		INIT_HLIST_HEAD(&htab->buckets[i].head);
 330		raw_spin_lock_init(&htab->buckets[i].lock);
 331	}
 332
 333	if (!percpu && !lru) {
 334		/* lru itself can remove the least used element, so
 335		 * there is no need for an extra elem during map_update.
 336		 */
 337		err = alloc_extra_elems(htab);
 338		if (err)
 339			goto free_buckets;
 340	}
 341
 342	if (prealloc) {
 343		err = prealloc_init(htab);
 344		if (err)
 345			goto free_extra_elems;
 
 
 
 
 
 
 
 
 
 346	}
 347
 348	return &htab->map;
 349
 350free_extra_elems:
 351	free_percpu(htab->extra_elems);
 352free_buckets:
 353	bpf_map_area_free(htab->buckets);
 354free_htab:
 355	kfree(htab);
 356	return ERR_PTR(err);
 357}
 358
 359static inline u32 htab_map_hash(const void *key, u32 key_len)
 360{
 361	return jhash(key, key_len, 0);
 362}
 363
 364static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
 365{
 366	return &htab->buckets[hash & (htab->n_buckets - 1)];
 367}
 368
 369static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
 370{
 371	return &__select_bucket(htab, hash)->head;
 372}
 373
 374static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
 
 375					 void *key, u32 key_size)
 376{
 
 377	struct htab_elem *l;
 378
 379	hlist_for_each_entry_rcu(l, head, hash_node)
 380		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 381			return l;
 382
 383	return NULL;
 384}
 385
 386/* Called from syscall or from eBPF program */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 388{
 389	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 390	struct hlist_head *head;
 391	struct htab_elem *l;
 392	u32 hash, key_size;
 393
 394	/* Must be called with rcu_read_lock. */
 395	WARN_ON_ONCE(!rcu_read_lock_held());
 396
 397	key_size = map->key_size;
 398
 399	hash = htab_map_hash(key, key_size);
 400
 401	head = select_bucket(htab, hash);
 402
 403	l = lookup_elem_raw(head, hash, key, key_size);
 404
 405	return l;
 406}
 407
 408static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
 409{
 410	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 411
 412	if (l)
 413		return l->key + round_up(map->key_size, 8);
 414
 415	return NULL;
 416}
 417
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 418static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
 419{
 420	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 421
 422	if (l) {
 423		bpf_lru_node_set_ref(&l->lru_node);
 424		return l->key + round_up(map->key_size, 8);
 425	}
 426
 427	return NULL;
 428}
 429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430/* It is called from the bpf_lru_list when the LRU needs to delete
 431 * older elements from the htab.
 432 */
 433static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 434{
 435	struct bpf_htab *htab = (struct bpf_htab *)arg;
 436	struct htab_elem *l, *tgt_l;
 437	struct hlist_head *head;
 
 438	unsigned long flags;
 439	struct bucket *b;
 440
 441	tgt_l = container_of(node, struct htab_elem, lru_node);
 442	b = __select_bucket(htab, tgt_l->hash);
 443	head = &b->head;
 444
 445	raw_spin_lock_irqsave(&b->lock, flags);
 446
 447	hlist_for_each_entry_rcu(l, head, hash_node)
 448		if (l == tgt_l) {
 449			hlist_del_rcu(&l->hash_node);
 450			break;
 451		}
 452
 453	raw_spin_unlock_irqrestore(&b->lock, flags);
 454
 455	return l == tgt_l;
 456}
 457
 458/* Called from syscall */
 459static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 460{
 461	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 462	struct hlist_head *head;
 463	struct htab_elem *l, *next_l;
 464	u32 hash, key_size;
 465	int i;
 466
 467	WARN_ON_ONCE(!rcu_read_lock_held());
 468
 469	key_size = map->key_size;
 470
 
 
 
 471	hash = htab_map_hash(key, key_size);
 472
 473	head = select_bucket(htab, hash);
 474
 475	/* lookup the key */
 476	l = lookup_elem_raw(head, hash, key, key_size);
 477
 478	if (!l) {
 479		i = 0;
 480		goto find_first_elem;
 481	}
 482
 483	/* key was found, get next key in the same bucket */
 484	next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
 485				  struct htab_elem, hash_node);
 486
 487	if (next_l) {
 488		/* if next elem in this hash list is non-zero, just return it */
 489		memcpy(next_key, next_l->key, key_size);
 490		return 0;
 491	}
 492
 493	/* no more elements in this hash list, go to the next bucket */
 494	i = hash & (htab->n_buckets - 1);
 495	i++;
 496
 497find_first_elem:
 498	/* iterate over buckets */
 499	for (; i < htab->n_buckets; i++) {
 500		head = select_bucket(htab, i);
 501
 502		/* pick first element in the bucket */
 503		next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
 504					  struct htab_elem, hash_node);
 505		if (next_l) {
 506			/* if it's not empty, just return it */
 507			memcpy(next_key, next_l->key, key_size);
 508			return 0;
 509		}
 510	}
 511
 512	/* iterated over all buckets and all elements */
 513	return -ENOENT;
 514}
 515
 516static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
 517{
 518	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
 519		free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
 520	kfree(l);
 521}
 522
 523static void htab_elem_free_rcu(struct rcu_head *head)
 524{
 525	struct htab_elem *l = container_of(head, struct htab_elem, rcu);
 526	struct bpf_htab *htab = l->htab;
 527
 528	/* must increment bpf_prog_active to avoid kprobe+bpf triggering while
 529	 * we're calling kfree, otherwise deadlock is possible if kprobes
 530	 * are placed somewhere inside of slub
 531	 */
 532	preempt_disable();
 533	__this_cpu_inc(bpf_prog_active);
 534	htab_elem_free(htab, l);
 535	__this_cpu_dec(bpf_prog_active);
 536	preempt_enable();
 537}
 538
 539static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 540{
 541	if (l->state == HTAB_EXTRA_ELEM_USED) {
 542		l->state = HTAB_EXTRA_ELEM_FREE;
 543		return;
 
 
 
 544	}
 545
 546	if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
 547		pcpu_freelist_push(&htab->freelist, &l->fnode);
 548	} else {
 549		atomic_dec(&htab->count);
 550		l->htab = htab;
 551		call_rcu(&l->rcu, htab_elem_free_rcu);
 552	}
 553}
 554
 555static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 556			    void *value, bool onallcpus)
 557{
 558	if (!onallcpus) {
 559		/* copy true value_size bytes */
 560		memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
 561	} else {
 562		u32 size = round_up(htab->map.value_size, 8);
 563		int off = 0, cpu;
 564
 565		for_each_possible_cpu(cpu) {
 566			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
 567					value + off, size);
 568			off += size;
 569		}
 570	}
 571}
 572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 573static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 574					 void *value, u32 key_size, u32 hash,
 575					 bool percpu, bool onallcpus,
 576					 bool old_elem_exists)
 577{
 578	u32 size = htab->map.value_size;
 579	bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
 580	struct htab_elem *l_new;
 581	void __percpu *pptr;
 582	int err = 0;
 583
 584	if (prealloc) {
 585		l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
 586		if (!l_new)
 587			err = -E2BIG;
 588	} else {
 589		if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 590			atomic_dec(&htab->count);
 591			err = -E2BIG;
 592		} else {
 593			l_new = kmalloc(htab->elem_size,
 594					GFP_ATOMIC | __GFP_NOWARN);
 595			if (!l_new)
 596				return ERR_PTR(-ENOMEM);
 597		}
 598	}
 599
 600	if (err) {
 601		if (!old_elem_exists)
 602			return ERR_PTR(err);
 603
 604		/* if we're updating the existing element and the hash table
 605		 * is full, use per-cpu extra elems
 606		 */
 607		l_new = this_cpu_ptr(htab->extra_elems);
 608		if (l_new->state != HTAB_EXTRA_ELEM_FREE)
 609			return ERR_PTR(-E2BIG);
 610		l_new->state = HTAB_EXTRA_ELEM_USED;
 611	} else {
 612		l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 613	}
 614
 615	memcpy(l_new->key, key, key_size);
 616	if (percpu) {
 617		/* round up value_size to 8 bytes */
 618		size = round_up(size, 8);
 619
 620		if (prealloc) {
 621			pptr = htab_elem_get_ptr(l_new, key_size);
 622		} else {
 623			/* alloc_percpu zero-fills */
 624			pptr = __alloc_percpu_gfp(size, 8,
 625						  GFP_ATOMIC | __GFP_NOWARN);
 626			if (!pptr) {
 627				kfree(l_new);
 628				return ERR_PTR(-ENOMEM);
 629			}
 630		}
 631
 632		pcpu_copy_value(htab, pptr, value, onallcpus);
 633
 634		if (!prealloc)
 635			htab_elem_set_ptr(l_new, key_size, pptr);
 636	} else {
 637		memcpy(l_new->key + round_up(key_size, 8), value, size);
 638	}
 639
 640	l_new->hash = hash;
 641	return l_new;
 642}
 643
 644static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
 645		       u64 map_flags)
 646{
 647	if (l_old && map_flags == BPF_NOEXIST)
 648		/* elem already exists */
 649		return -EEXIST;
 650
 651	if (!l_old && map_flags == BPF_EXIST)
 652		/* elem doesn't exist, cannot update it */
 653		return -ENOENT;
 654
 655	return 0;
 656}
 657
 658/* Called from syscall or from eBPF program */
 659static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
 660				u64 map_flags)
 661{
 662	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 663	struct htab_elem *l_new = NULL, *l_old;
 664	struct hlist_head *head;
 665	unsigned long flags;
 666	struct bucket *b;
 667	u32 key_size, hash;
 668	int ret;
 669
 670	if (unlikely(map_flags > BPF_EXIST))
 671		/* unknown flags */
 672		return -EINVAL;
 673
 674	WARN_ON_ONCE(!rcu_read_lock_held());
 675
 676	key_size = map->key_size;
 677
 678	hash = htab_map_hash(key, key_size);
 679
 680	b = __select_bucket(htab, hash);
 681	head = &b->head;
 682
 683	/* bpf_map_update_elem() can be called in_irq() */
 684	raw_spin_lock_irqsave(&b->lock, flags);
 685
 686	l_old = lookup_elem_raw(head, hash, key, key_size);
 687
 688	ret = check_flags(htab, l_old, map_flags);
 689	if (ret)
 690		goto err;
 691
 692	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
 693				!!l_old);
 694	if (IS_ERR(l_new)) {
 695		/* all pre-allocated elements are in use or memory exhausted */
 696		ret = PTR_ERR(l_new);
 697		goto err;
 698	}
 699
 700	/* add new element to the head of the list, so that
 701	 * concurrent search will find it before old elem
 702	 */
 703	hlist_add_head_rcu(&l_new->hash_node, head);
 704	if (l_old) {
 705		hlist_del_rcu(&l_old->hash_node);
 706		free_htab_elem(htab, l_old);
 
 707	}
 708	ret = 0;
 709err:
 710	raw_spin_unlock_irqrestore(&b->lock, flags);
 711	return ret;
 712}
 713
 714static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
 715				    u64 map_flags)
 716{
 717	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 718	struct htab_elem *l_new, *l_old = NULL;
 719	struct hlist_head *head;
 720	unsigned long flags;
 721	struct bucket *b;
 722	u32 key_size, hash;
 723	int ret;
 724
 725	if (unlikely(map_flags > BPF_EXIST))
 726		/* unknown flags */
 727		return -EINVAL;
 728
 729	WARN_ON_ONCE(!rcu_read_lock_held());
 730
 731	key_size = map->key_size;
 732
 733	hash = htab_map_hash(key, key_size);
 734
 735	b = __select_bucket(htab, hash);
 736	head = &b->head;
 737
 738	/* For LRU, we need to alloc before taking bucket's
 739	 * spinlock because getting free nodes from LRU may need
 740	 * to remove older elements from htab and this removal
 741	 * operation will need a bucket lock.
 742	 */
 743	l_new = prealloc_lru_pop(htab, key, hash);
 744	if (!l_new)
 745		return -ENOMEM;
 746	memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
 747
 748	/* bpf_map_update_elem() can be called in_irq() */
 749	raw_spin_lock_irqsave(&b->lock, flags);
 750
 751	l_old = lookup_elem_raw(head, hash, key, key_size);
 752
 753	ret = check_flags(htab, l_old, map_flags);
 754	if (ret)
 755		goto err;
 756
 757	/* add new element to the head of the list, so that
 758	 * concurrent search will find it before old elem
 759	 */
 760	hlist_add_head_rcu(&l_new->hash_node, head);
 761	if (l_old) {
 762		bpf_lru_node_set_ref(&l_new->lru_node);
 763		hlist_del_rcu(&l_old->hash_node);
 764	}
 765	ret = 0;
 766
 767err:
 768	raw_spin_unlock_irqrestore(&b->lock, flags);
 769
 770	if (ret)
 771		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
 772	else if (l_old)
 773		bpf_lru_push_free(&htab->lru, &l_old->lru_node);
 774
 775	return ret;
 776}
 777
 778static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
 779					 void *value, u64 map_flags,
 780					 bool onallcpus)
 781{
 782	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 783	struct htab_elem *l_new = NULL, *l_old;
 784	struct hlist_head *head;
 785	unsigned long flags;
 786	struct bucket *b;
 787	u32 key_size, hash;
 788	int ret;
 789
 790	if (unlikely(map_flags > BPF_EXIST))
 791		/* unknown flags */
 792		return -EINVAL;
 793
 794	WARN_ON_ONCE(!rcu_read_lock_held());
 795
 796	key_size = map->key_size;
 797
 798	hash = htab_map_hash(key, key_size);
 799
 800	b = __select_bucket(htab, hash);
 801	head = &b->head;
 802
 803	/* bpf_map_update_elem() can be called in_irq() */
 804	raw_spin_lock_irqsave(&b->lock, flags);
 805
 806	l_old = lookup_elem_raw(head, hash, key, key_size);
 807
 808	ret = check_flags(htab, l_old, map_flags);
 809	if (ret)
 810		goto err;
 811
 812	if (l_old) {
 813		/* per-cpu hash map can update value in-place */
 814		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
 815				value, onallcpus);
 816	} else {
 817		l_new = alloc_htab_elem(htab, key, value, key_size,
 818					hash, true, onallcpus, false);
 819		if (IS_ERR(l_new)) {
 820			ret = PTR_ERR(l_new);
 821			goto err;
 822		}
 823		hlist_add_head_rcu(&l_new->hash_node, head);
 824	}
 825	ret = 0;
 826err:
 827	raw_spin_unlock_irqrestore(&b->lock, flags);
 828	return ret;
 829}
 830
 831static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 832					     void *value, u64 map_flags,
 833					     bool onallcpus)
 834{
 835	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 836	struct htab_elem *l_new = NULL, *l_old;
 837	struct hlist_head *head;
 838	unsigned long flags;
 839	struct bucket *b;
 840	u32 key_size, hash;
 841	int ret;
 842
 843	if (unlikely(map_flags > BPF_EXIST))
 844		/* unknown flags */
 845		return -EINVAL;
 846
 847	WARN_ON_ONCE(!rcu_read_lock_held());
 848
 849	key_size = map->key_size;
 850
 851	hash = htab_map_hash(key, key_size);
 852
 853	b = __select_bucket(htab, hash);
 854	head = &b->head;
 855
 856	/* For LRU, we need to alloc before taking bucket's
 857	 * spinlock because LRU's elem alloc may need
 858	 * to remove older elem from htab and this removal
 859	 * operation will need a bucket lock.
 860	 */
 861	if (map_flags != BPF_EXIST) {
 862		l_new = prealloc_lru_pop(htab, key, hash);
 863		if (!l_new)
 864			return -ENOMEM;
 865	}
 866
 867	/* bpf_map_update_elem() can be called in_irq() */
 868	raw_spin_lock_irqsave(&b->lock, flags);
 869
 870	l_old = lookup_elem_raw(head, hash, key, key_size);
 871
 872	ret = check_flags(htab, l_old, map_flags);
 873	if (ret)
 874		goto err;
 875
 876	if (l_old) {
 877		bpf_lru_node_set_ref(&l_old->lru_node);
 878
 879		/* per-cpu hash map can update value in-place */
 880		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
 881				value, onallcpus);
 882	} else {
 883		pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
 884				value, onallcpus);
 885		hlist_add_head_rcu(&l_new->hash_node, head);
 886		l_new = NULL;
 887	}
 888	ret = 0;
 889err:
 890	raw_spin_unlock_irqrestore(&b->lock, flags);
 891	if (l_new)
 892		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
 893	return ret;
 894}
 895
 896static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
 897				       void *value, u64 map_flags)
 898{
 899	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
 900}
 901
 902static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 903					   void *value, u64 map_flags)
 904{
 905	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
 906						 false);
 907}
 908
 909/* Called from syscall or from eBPF program */
 910static int htab_map_delete_elem(struct bpf_map *map, void *key)
 911{
 912	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 913	struct hlist_head *head;
 914	struct bucket *b;
 915	struct htab_elem *l;
 916	unsigned long flags;
 917	u32 hash, key_size;
 918	int ret = -ENOENT;
 919
 920	WARN_ON_ONCE(!rcu_read_lock_held());
 921
 922	key_size = map->key_size;
 923
 924	hash = htab_map_hash(key, key_size);
 925	b = __select_bucket(htab, hash);
 926	head = &b->head;
 927
 928	raw_spin_lock_irqsave(&b->lock, flags);
 929
 930	l = lookup_elem_raw(head, hash, key, key_size);
 931
 932	if (l) {
 933		hlist_del_rcu(&l->hash_node);
 934		free_htab_elem(htab, l);
 935		ret = 0;
 936	}
 937
 938	raw_spin_unlock_irqrestore(&b->lock, flags);
 939	return ret;
 940}
 941
 942static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
 943{
 944	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 945	struct hlist_head *head;
 946	struct bucket *b;
 947	struct htab_elem *l;
 948	unsigned long flags;
 949	u32 hash, key_size;
 950	int ret = -ENOENT;
 951
 952	WARN_ON_ONCE(!rcu_read_lock_held());
 953
 954	key_size = map->key_size;
 955
 956	hash = htab_map_hash(key, key_size);
 957	b = __select_bucket(htab, hash);
 958	head = &b->head;
 959
 960	raw_spin_lock_irqsave(&b->lock, flags);
 961
 962	l = lookup_elem_raw(head, hash, key, key_size);
 963
 964	if (l) {
 965		hlist_del_rcu(&l->hash_node);
 966		ret = 0;
 967	}
 968
 969	raw_spin_unlock_irqrestore(&b->lock, flags);
 970	if (l)
 971		bpf_lru_push_free(&htab->lru, &l->lru_node);
 972	return ret;
 973}
 974
 975static void delete_all_elements(struct bpf_htab *htab)
 976{
 977	int i;
 978
 979	for (i = 0; i < htab->n_buckets; i++) {
 980		struct hlist_head *head = select_bucket(htab, i);
 981		struct hlist_node *n;
 982		struct htab_elem *l;
 983
 984		hlist_for_each_entry_safe(l, n, head, hash_node) {
 985			hlist_del_rcu(&l->hash_node);
 986			if (l->state != HTAB_EXTRA_ELEM_USED)
 987				htab_elem_free(htab, l);
 988		}
 989	}
 990}
 
 991/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 992static void htab_map_free(struct bpf_map *map)
 993{
 994	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 995
 996	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
 997	 * so the programs (can be more than one that used this map) were
 998	 * disconnected from events. Wait for outstanding critical sections in
 999	 * these programs to complete
1000	 */
1001	synchronize_rcu();
1002
1003	/* some of free_htab_elem() callbacks for elements of this map may
1004	 * not have executed. Wait for them.
1005	 */
1006	rcu_barrier();
1007	if (htab->map.map_flags & BPF_F_NO_PREALLOC)
1008		delete_all_elements(htab);
1009	else
1010		prealloc_destroy(htab);
1011
1012	free_percpu(htab->extra_elems);
1013	bpf_map_area_free(htab->buckets);
1014	kfree(htab);
1015}
1016
1017static const struct bpf_map_ops htab_ops = {
 
1018	.map_alloc = htab_map_alloc,
1019	.map_free = htab_map_free,
1020	.map_get_next_key = htab_map_get_next_key,
1021	.map_lookup_elem = htab_map_lookup_elem,
1022	.map_update_elem = htab_map_update_elem,
1023	.map_delete_elem = htab_map_delete_elem,
 
1024};
1025
1026static struct bpf_map_type_list htab_type __read_mostly = {
1027	.ops = &htab_ops,
1028	.type = BPF_MAP_TYPE_HASH,
1029};
1030
1031static const struct bpf_map_ops htab_lru_ops = {
1032	.map_alloc = htab_map_alloc,
1033	.map_free = htab_map_free,
1034	.map_get_next_key = htab_map_get_next_key,
1035	.map_lookup_elem = htab_lru_map_lookup_elem,
1036	.map_update_elem = htab_lru_map_update_elem,
1037	.map_delete_elem = htab_lru_map_delete_elem,
1038};
1039
1040static struct bpf_map_type_list htab_lru_type __read_mostly = {
1041	.ops = &htab_lru_ops,
1042	.type = BPF_MAP_TYPE_LRU_HASH,
1043};
1044
1045/* Called from eBPF program */
1046static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1047{
1048	struct htab_elem *l = __htab_map_lookup_elem(map, key);
1049
1050	if (l)
1051		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1052	else
1053		return NULL;
1054}
1055
1056static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1057{
1058	struct htab_elem *l = __htab_map_lookup_elem(map, key);
1059
1060	if (l) {
1061		bpf_lru_node_set_ref(&l->lru_node);
1062		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1063	}
1064
1065	return NULL;
1066}
1067
1068int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1069{
1070	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1071	struct htab_elem *l;
1072	void __percpu *pptr;
1073	int ret = -ENOENT;
1074	int cpu, off = 0;
1075	u32 size;
1076
1077	/* per_cpu areas are zero-filled and bpf programs can only
1078	 * access 'value_size' of them, so copying rounded areas
1079	 * will not leak any kernel data
1080	 */
1081	size = round_up(map->value_size, 8);
1082	rcu_read_lock();
1083	l = __htab_map_lookup_elem(map, key);
1084	if (!l)
1085		goto out;
1086	if (htab_is_lru(htab))
1087		bpf_lru_node_set_ref(&l->lru_node);
1088	pptr = htab_elem_get_ptr(l, map->key_size);
1089	for_each_possible_cpu(cpu) {
1090		bpf_long_memcpy(value + off,
1091				per_cpu_ptr(pptr, cpu), size);
1092		off += size;
1093	}
1094	ret = 0;
1095out:
1096	rcu_read_unlock();
1097	return ret;
1098}
1099
1100int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1101			   u64 map_flags)
1102{
1103	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1104	int ret;
1105
1106	rcu_read_lock();
1107	if (htab_is_lru(htab))
1108		ret = __htab_lru_percpu_map_update_elem(map, key, value,
1109							map_flags, true);
1110	else
1111		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
1112						    true);
1113	rcu_read_unlock();
1114
1115	return ret;
1116}
1117
1118static const struct bpf_map_ops htab_percpu_ops = {
 
1119	.map_alloc = htab_map_alloc,
1120	.map_free = htab_map_free,
1121	.map_get_next_key = htab_map_get_next_key,
1122	.map_lookup_elem = htab_percpu_map_lookup_elem,
1123	.map_update_elem = htab_percpu_map_update_elem,
1124	.map_delete_elem = htab_map_delete_elem,
1125};
1126
1127static struct bpf_map_type_list htab_percpu_type __read_mostly = {
1128	.ops = &htab_percpu_ops,
1129	.type = BPF_MAP_TYPE_PERCPU_HASH,
1130};
1131
1132static const struct bpf_map_ops htab_lru_percpu_ops = {
1133	.map_alloc = htab_map_alloc,
1134	.map_free = htab_map_free,
1135	.map_get_next_key = htab_map_get_next_key,
1136	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
1137	.map_update_elem = htab_lru_percpu_map_update_elem,
1138	.map_delete_elem = htab_lru_map_delete_elem,
1139};
1140
1141static struct bpf_map_type_list htab_lru_percpu_type __read_mostly = {
1142	.ops = &htab_lru_percpu_ops,
1143	.type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
1144};
 
 
1145
1146static int __init register_htab_map(void)
1147{
1148	bpf_register_map_type(&htab_type);
1149	bpf_register_map_type(&htab_percpu_type);
1150	bpf_register_map_type(&htab_lru_type);
1151	bpf_register_map_type(&htab_lru_percpu_type);
1152	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153}
1154late_initcall(register_htab_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.17
   1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   2 * Copyright (c) 2016 Facebook
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of version 2 of the GNU General Public
   6 * License as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/bpf.h>
  14#include <linux/jhash.h>
  15#include <linux/filter.h>
  16#include <linux/rculist_nulls.h>
  17#include "percpu_freelist.h"
  18#include "bpf_lru_list.h"
  19#include "map_in_map.h"
  20
  21#define HTAB_CREATE_FLAG_MASK						\
  22	(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |	\
  23	 BPF_F_RDONLY | BPF_F_WRONLY)
  24
  25struct bucket {
  26	struct hlist_nulls_head head;
  27	raw_spinlock_t lock;
  28};
  29
  30struct bpf_htab {
  31	struct bpf_map map;
  32	struct bucket *buckets;
  33	void *elems;
  34	union {
  35		struct pcpu_freelist freelist;
  36		struct bpf_lru lru;
  37	};
  38	struct htab_elem *__percpu *extra_elems;
  39	atomic_t count;	/* number of elements in this hashtable */
  40	u32 n_buckets;	/* number of hash buckets */
  41	u32 elem_size;	/* size of each element in bytes */
  42};
  43
 
 
 
 
 
 
  44/* each htab element is struct htab_elem + key + value */
  45struct htab_elem {
  46	union {
  47		struct hlist_nulls_node hash_node;
  48		struct {
  49			void *padding;
  50			union {
  51				struct bpf_htab *htab;
  52				struct pcpu_freelist_node fnode;
  53			};
  54		};
  55	};
  56	union {
  57		struct rcu_head rcu;
 
  58		struct bpf_lru_node lru_node;
  59	};
  60	u32 hash;
  61	char key[0] __aligned(8);
  62};
  63
  64static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
  65
  66static bool htab_is_lru(const struct bpf_htab *htab)
  67{
  68	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
  69		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  70}
  71
  72static bool htab_is_percpu(const struct bpf_htab *htab)
  73{
  74	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  75		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  76}
  77
  78static bool htab_is_prealloc(const struct bpf_htab *htab)
  79{
  80	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
  81}
  82
  83static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
  84				     void __percpu *pptr)
  85{
  86	*(void __percpu **)(l->key + key_size) = pptr;
  87}
  88
  89static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
  90{
  91	return *(void __percpu **)(l->key + key_size);
  92}
  93
  94static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
  95{
  96	return *(void **)(l->key + roundup(map->key_size, 8));
  97}
  98
  99static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
 100{
 101	return (struct htab_elem *) (htab->elems + i * htab->elem_size);
 102}
 103
 104static void htab_free_elems(struct bpf_htab *htab)
 105{
 106	int i;
 107
 108	if (!htab_is_percpu(htab))
 109		goto free_elems;
 110
 111	for (i = 0; i < htab->map.max_entries; i++) {
 112		void __percpu *pptr;
 113
 114		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
 115					 htab->map.key_size);
 116		free_percpu(pptr);
 117		cond_resched();
 118	}
 119free_elems:
 120	bpf_map_area_free(htab->elems);
 121}
 122
 123static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
 124					  u32 hash)
 125{
 126	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
 127	struct htab_elem *l;
 128
 129	if (node) {
 130		l = container_of(node, struct htab_elem, lru_node);
 131		memcpy(l->key, key, htab->map.key_size);
 132		return l;
 133	}
 134
 135	return NULL;
 136}
 137
 138static int prealloc_init(struct bpf_htab *htab)
 139{
 140	u32 num_entries = htab->map.max_entries;
 141	int err = -ENOMEM, i;
 142
 143	if (!htab_is_percpu(htab) && !htab_is_lru(htab))
 144		num_entries += num_possible_cpus();
 145
 146	htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
 147					 htab->map.numa_node);
 148	if (!htab->elems)
 149		return -ENOMEM;
 150
 151	if (!htab_is_percpu(htab))
 152		goto skip_percpu_elems;
 153
 154	for (i = 0; i < num_entries; i++) {
 155		u32 size = round_up(htab->map.value_size, 8);
 156		void __percpu *pptr;
 157
 158		pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
 159		if (!pptr)
 160			goto free_elems;
 161		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
 162				  pptr);
 163		cond_resched();
 164	}
 165
 166skip_percpu_elems:
 167	if (htab_is_lru(htab))
 168		err = bpf_lru_init(&htab->lru,
 169				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
 170				   offsetof(struct htab_elem, hash) -
 171				   offsetof(struct htab_elem, lru_node),
 172				   htab_lru_map_delete_node,
 173				   htab);
 174	else
 175		err = pcpu_freelist_init(&htab->freelist);
 176
 177	if (err)
 178		goto free_elems;
 179
 180	if (htab_is_lru(htab))
 181		bpf_lru_populate(&htab->lru, htab->elems,
 182				 offsetof(struct htab_elem, lru_node),
 183				 htab->elem_size, num_entries);
 184	else
 185		pcpu_freelist_populate(&htab->freelist,
 186				       htab->elems + offsetof(struct htab_elem, fnode),
 187				       htab->elem_size, num_entries);
 188
 189	return 0;
 190
 191free_elems:
 192	htab_free_elems(htab);
 193	return err;
 194}
 195
 196static void prealloc_destroy(struct bpf_htab *htab)
 197{
 198	htab_free_elems(htab);
 199
 200	if (htab_is_lru(htab))
 201		bpf_lru_destroy(&htab->lru);
 202	else
 203		pcpu_freelist_destroy(&htab->freelist);
 204}
 205
 206static int alloc_extra_elems(struct bpf_htab *htab)
 207{
 208	struct htab_elem *__percpu *pptr, *l_new;
 209	struct pcpu_freelist_node *l;
 210	int cpu;
 211
 212	pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
 213				  GFP_USER | __GFP_NOWARN);
 214	if (!pptr)
 215		return -ENOMEM;
 216
 217	for_each_possible_cpu(cpu) {
 218		l = pcpu_freelist_pop(&htab->freelist);
 219		/* pop will succeed, since prealloc_init()
 220		 * preallocated extra num_possible_cpus elements
 221		 */
 222		l_new = container_of(l, struct htab_elem, fnode);
 223		*per_cpu_ptr(pptr, cpu) = l_new;
 224	}
 225	htab->extra_elems = pptr;
 226	return 0;
 227}
 228
 229/* Called from syscall */
 230static int htab_map_alloc_check(union bpf_attr *attr)
 231{
 232	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 233		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 234	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 235		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 236	/* percpu_lru means each cpu has its own LRU list.
 237	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 238	 * the map's value itself is percpu.  percpu_lru has
 239	 * nothing to do with the map's value.
 240	 */
 241	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 242	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 243	int numa_node = bpf_map_attr_numa_node(attr);
 244
 245	BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
 246		     offsetof(struct htab_elem, hash_node.pprev));
 247	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
 248		     offsetof(struct htab_elem, hash_node.pprev));
 249
 250	if (lru && !capable(CAP_SYS_ADMIN))
 251		/* LRU implementation is much complicated than other
 252		 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
 253		 */
 254		return -EPERM;
 255
 256	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
 257		/* reserved bits should not be used */
 258		return -EINVAL;
 259
 260	if (!lru && percpu_lru)
 261		return -EINVAL;
 262
 263	if (lru && !prealloc)
 264		return -ENOTSUPP;
 265
 266	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
 267		return -EINVAL;
 
 
 
 
 
 
 
 
 268
 269	/* check sanity of attributes.
 270	 * value_size == 0 may be allowed in the future to use map as a set
 271	 */
 272	if (attr->max_entries == 0 || attr->key_size == 0 ||
 273	    attr->value_size == 0)
 274		return -EINVAL;
 275
 276	if (attr->key_size > MAX_BPF_STACK)
 277		/* eBPF programs initialize keys on stack, so they cannot be
 278		 * larger than max stack size
 279		 */
 280		return -E2BIG;
 281
 282	if (attr->value_size >= KMALLOC_MAX_SIZE -
 283	    MAX_BPF_STACK - sizeof(struct htab_elem))
 284		/* if value_size is bigger, the user space won't be able to
 285		 * access the elements via bpf syscall. This check also makes
 286		 * sure that the elem_size doesn't overflow and it's
 287		 * kmalloc-able later in htab_map_update_elem()
 288		 */
 289		return -E2BIG;
 290
 291	return 0;
 292}
 293
 294static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 295{
 296	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 297		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 298	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
 299		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
 300	/* percpu_lru means each cpu has its own LRU list.
 301	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
 302	 * the map's value itself is percpu.  percpu_lru has
 303	 * nothing to do with the map's value.
 304	 */
 305	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 306	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 307	struct bpf_htab *htab;
 308	int err, i;
 309	u64 cost;
 310
 311	htab = kzalloc(sizeof(*htab), GFP_USER);
 312	if (!htab)
 313		return ERR_PTR(-ENOMEM);
 314
 315	bpf_map_init_from_attr(&htab->map, attr);
 316
 317	if (percpu_lru) {
 318		/* ensure each CPU's lru list has >=1 elements.
 319		 * since we are at it, make each lru list has the same
 320		 * number of elements.
 321		 */
 322		htab->map.max_entries = roundup(attr->max_entries,
 323						num_possible_cpus());
 324		if (htab->map.max_entries < attr->max_entries)
 325			htab->map.max_entries = rounddown(attr->max_entries,
 326							  num_possible_cpus());
 327	}
 328
 329	/* hash table size must be power of 2 */
 330	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
 331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332	htab->elem_size = sizeof(struct htab_elem) +
 333			  round_up(htab->map.key_size, 8);
 334	if (percpu)
 335		htab->elem_size += sizeof(void *);
 336	else
 337		htab->elem_size += round_up(htab->map.value_size, 8);
 338
 339	err = -E2BIG;
 340	/* prevent zero size kmalloc and check for u32 overflow */
 341	if (htab->n_buckets == 0 ||
 342	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
 343		goto free_htab;
 344
 345	cost = (u64) htab->n_buckets * sizeof(struct bucket) +
 346	       (u64) htab->elem_size * htab->map.max_entries;
 347
 348	if (percpu)
 349		cost += (u64) round_up(htab->map.value_size, 8) *
 350			num_possible_cpus() * htab->map.max_entries;
 351	else
 352	       cost += (u64) htab->elem_size * num_possible_cpus();
 353
 354	if (cost >= U32_MAX - PAGE_SIZE)
 355		/* make sure page count doesn't overflow */
 356		goto free_htab;
 357
 358	htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 359
 360	/* if map size is larger than memlock limit, reject it early */
 361	err = bpf_map_precharge_memlock(htab->map.pages);
 362	if (err)
 363		goto free_htab;
 364
 365	err = -ENOMEM;
 366	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
 367					   sizeof(struct bucket),
 368					   htab->map.numa_node);
 369	if (!htab->buckets)
 370		goto free_htab;
 371
 372	for (i = 0; i < htab->n_buckets; i++) {
 373		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
 374		raw_spin_lock_init(&htab->buckets[i].lock);
 375	}
 376
 
 
 
 
 
 
 
 
 
 377	if (prealloc) {
 378		err = prealloc_init(htab);
 379		if (err)
 380			goto free_buckets;
 381
 382		if (!percpu && !lru) {
 383			/* lru itself can remove the least used element, so
 384			 * there is no need for an extra elem during map_update.
 385			 */
 386			err = alloc_extra_elems(htab);
 387			if (err)
 388				goto free_prealloc;
 389		}
 390	}
 391
 392	return &htab->map;
 393
 394free_prealloc:
 395	prealloc_destroy(htab);
 396free_buckets:
 397	bpf_map_area_free(htab->buckets);
 398free_htab:
 399	kfree(htab);
 400	return ERR_PTR(err);
 401}
 402
 403static inline u32 htab_map_hash(const void *key, u32 key_len)
 404{
 405	return jhash(key, key_len, 0);
 406}
 407
 408static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
 409{
 410	return &htab->buckets[hash & (htab->n_buckets - 1)];
 411}
 412
 413static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
 414{
 415	return &__select_bucket(htab, hash)->head;
 416}
 417
 418/* this lookup function can only be called with bucket lock taken */
 419static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
 420					 void *key, u32 key_size)
 421{
 422	struct hlist_nulls_node *n;
 423	struct htab_elem *l;
 424
 425	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 426		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 427			return l;
 428
 429	return NULL;
 430}
 431
 432/* can be called without bucket lock. it will repeat the loop in
 433 * the unlikely event when elements moved from one bucket into another
 434 * while link list is being walked
 435 */
 436static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
 437					       u32 hash, void *key,
 438					       u32 key_size, u32 n_buckets)
 439{
 440	struct hlist_nulls_node *n;
 441	struct htab_elem *l;
 442
 443again:
 444	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 445		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 446			return l;
 447
 448	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
 449		goto again;
 450
 451	return NULL;
 452}
 453
 454/* Called from syscall or from eBPF program directly, so
 455 * arguments have to match bpf_map_lookup_elem() exactly.
 456 * The return value is adjusted by BPF instructions
 457 * in htab_map_gen_lookup().
 458 */
 459static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 460{
 461	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 462	struct hlist_nulls_head *head;
 463	struct htab_elem *l;
 464	u32 hash, key_size;
 465
 466	/* Must be called with rcu_read_lock. */
 467	WARN_ON_ONCE(!rcu_read_lock_held());
 468
 469	key_size = map->key_size;
 470
 471	hash = htab_map_hash(key, key_size);
 472
 473	head = select_bucket(htab, hash);
 474
 475	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 476
 477	return l;
 478}
 479
 480static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
 481{
 482	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 483
 484	if (l)
 485		return l->key + round_up(map->key_size, 8);
 486
 487	return NULL;
 488}
 489
 490/* inline bpf_map_lookup_elem() call.
 491 * Instead of:
 492 * bpf_prog
 493 *   bpf_map_lookup_elem
 494 *     map->ops->map_lookup_elem
 495 *       htab_map_lookup_elem
 496 *         __htab_map_lookup_elem
 497 * do:
 498 * bpf_prog
 499 *   __htab_map_lookup_elem
 500 */
 501static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 502{
 503	struct bpf_insn *insn = insn_buf;
 504	const int ret = BPF_REG_0;
 505
 506	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
 507	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
 508	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 509				offsetof(struct htab_elem, key) +
 510				round_up(map->key_size, 8));
 511	return insn - insn_buf;
 512}
 513
 514static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
 515{
 516	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 517
 518	if (l) {
 519		bpf_lru_node_set_ref(&l->lru_node);
 520		return l->key + round_up(map->key_size, 8);
 521	}
 522
 523	return NULL;
 524}
 525
 526static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
 527				   struct bpf_insn *insn_buf)
 528{
 529	struct bpf_insn *insn = insn_buf;
 530	const int ret = BPF_REG_0;
 531	const int ref_reg = BPF_REG_1;
 532
 533	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
 534	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
 535	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
 536			      offsetof(struct htab_elem, lru_node) +
 537			      offsetof(struct bpf_lru_node, ref));
 538	*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
 539	*insn++ = BPF_ST_MEM(BPF_B, ret,
 540			     offsetof(struct htab_elem, lru_node) +
 541			     offsetof(struct bpf_lru_node, ref),
 542			     1);
 543	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 544				offsetof(struct htab_elem, key) +
 545				round_up(map->key_size, 8));
 546	return insn - insn_buf;
 547}
 548
 549/* It is called from the bpf_lru_list when the LRU needs to delete
 550 * older elements from the htab.
 551 */
 552static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 553{
 554	struct bpf_htab *htab = (struct bpf_htab *)arg;
 555	struct htab_elem *l = NULL, *tgt_l;
 556	struct hlist_nulls_head *head;
 557	struct hlist_nulls_node *n;
 558	unsigned long flags;
 559	struct bucket *b;
 560
 561	tgt_l = container_of(node, struct htab_elem, lru_node);
 562	b = __select_bucket(htab, tgt_l->hash);
 563	head = &b->head;
 564
 565	raw_spin_lock_irqsave(&b->lock, flags);
 566
 567	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 568		if (l == tgt_l) {
 569			hlist_nulls_del_rcu(&l->hash_node);
 570			break;
 571		}
 572
 573	raw_spin_unlock_irqrestore(&b->lock, flags);
 574
 575	return l == tgt_l;
 576}
 577
 578/* Called from syscall */
 579static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 580{
 581	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 582	struct hlist_nulls_head *head;
 583	struct htab_elem *l, *next_l;
 584	u32 hash, key_size;
 585	int i = 0;
 586
 587	WARN_ON_ONCE(!rcu_read_lock_held());
 588
 589	key_size = map->key_size;
 590
 591	if (!key)
 592		goto find_first_elem;
 593
 594	hash = htab_map_hash(key, key_size);
 595
 596	head = select_bucket(htab, hash);
 597
 598	/* lookup the key */
 599	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 600
 601	if (!l)
 
 602		goto find_first_elem;
 
 603
 604	/* key was found, get next key in the same bucket */
 605	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
 606				  struct htab_elem, hash_node);
 607
 608	if (next_l) {
 609		/* if next elem in this hash list is non-zero, just return it */
 610		memcpy(next_key, next_l->key, key_size);
 611		return 0;
 612	}
 613
 614	/* no more elements in this hash list, go to the next bucket */
 615	i = hash & (htab->n_buckets - 1);
 616	i++;
 617
 618find_first_elem:
 619	/* iterate over buckets */
 620	for (; i < htab->n_buckets; i++) {
 621		head = select_bucket(htab, i);
 622
 623		/* pick first element in the bucket */
 624		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
 625					  struct htab_elem, hash_node);
 626		if (next_l) {
 627			/* if it's not empty, just return it */
 628			memcpy(next_key, next_l->key, key_size);
 629			return 0;
 630		}
 631	}
 632
 633	/* iterated over all buckets and all elements */
 634	return -ENOENT;
 635}
 636
 637static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
 638{
 639	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
 640		free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
 641	kfree(l);
 642}
 643
 644static void htab_elem_free_rcu(struct rcu_head *head)
 645{
 646	struct htab_elem *l = container_of(head, struct htab_elem, rcu);
 647	struct bpf_htab *htab = l->htab;
 648
 649	/* must increment bpf_prog_active to avoid kprobe+bpf triggering while
 650	 * we're calling kfree, otherwise deadlock is possible if kprobes
 651	 * are placed somewhere inside of slub
 652	 */
 653	preempt_disable();
 654	__this_cpu_inc(bpf_prog_active);
 655	htab_elem_free(htab, l);
 656	__this_cpu_dec(bpf_prog_active);
 657	preempt_enable();
 658}
 659
 660static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 661{
 662	struct bpf_map *map = &htab->map;
 663
 664	if (map->ops->map_fd_put_ptr) {
 665		void *ptr = fd_htab_map_get_ptr(map, l);
 666
 667		map->ops->map_fd_put_ptr(ptr);
 668	}
 669
 670	if (htab_is_prealloc(htab)) {
 671		pcpu_freelist_push(&htab->freelist, &l->fnode);
 672	} else {
 673		atomic_dec(&htab->count);
 674		l->htab = htab;
 675		call_rcu(&l->rcu, htab_elem_free_rcu);
 676	}
 677}
 678
 679static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 680			    void *value, bool onallcpus)
 681{
 682	if (!onallcpus) {
 683		/* copy true value_size bytes */
 684		memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
 685	} else {
 686		u32 size = round_up(htab->map.value_size, 8);
 687		int off = 0, cpu;
 688
 689		for_each_possible_cpu(cpu) {
 690			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
 691					value + off, size);
 692			off += size;
 693		}
 694	}
 695}
 696
 697static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
 698{
 699	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
 700	       BITS_PER_LONG == 64;
 701}
 702
 703static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
 704{
 705	u32 size = htab->map.value_size;
 706
 707	if (percpu || fd_htab_map_needs_adjust(htab))
 708		size = round_up(size, 8);
 709	return size;
 710}
 711
 712static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 713					 void *value, u32 key_size, u32 hash,
 714					 bool percpu, bool onallcpus,
 715					 struct htab_elem *old_elem)
 716{
 717	u32 size = htab_size_value(htab, percpu);
 718	bool prealloc = htab_is_prealloc(htab);
 719	struct htab_elem *l_new, **pl_new;
 720	void __percpu *pptr;
 
 721
 722	if (prealloc) {
 723		if (old_elem) {
 724			/* if we're updating the existing element,
 725			 * use per-cpu extra elems to avoid freelist_pop/push
 726			 */
 727			pl_new = this_cpu_ptr(htab->extra_elems);
 728			l_new = *pl_new;
 729			*pl_new = old_elem;
 730		} else {
 731			struct pcpu_freelist_node *l;
 
 
 
 
 
 
 
 
 
 732
 733			l = pcpu_freelist_pop(&htab->freelist);
 734			if (!l)
 735				return ERR_PTR(-E2BIG);
 736			l_new = container_of(l, struct htab_elem, fnode);
 737		}
 
 
 738	} else {
 739		if (atomic_inc_return(&htab->count) > htab->map.max_entries)
 740			if (!old_elem) {
 741				/* when map is full and update() is replacing
 742				 * old element, it's ok to allocate, since
 743				 * old element will be freed immediately.
 744				 * Otherwise return an error
 745				 */
 746				atomic_dec(&htab->count);
 747				return ERR_PTR(-E2BIG);
 748			}
 749		l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
 750				     htab->map.numa_node);
 751		if (!l_new)
 752			return ERR_PTR(-ENOMEM);
 753	}
 754
 755	memcpy(l_new->key, key, key_size);
 756	if (percpu) {
 
 
 
 757		if (prealloc) {
 758			pptr = htab_elem_get_ptr(l_new, key_size);
 759		} else {
 760			/* alloc_percpu zero-fills */
 761			pptr = __alloc_percpu_gfp(size, 8,
 762						  GFP_ATOMIC | __GFP_NOWARN);
 763			if (!pptr) {
 764				kfree(l_new);
 765				return ERR_PTR(-ENOMEM);
 766			}
 767		}
 768
 769		pcpu_copy_value(htab, pptr, value, onallcpus);
 770
 771		if (!prealloc)
 772			htab_elem_set_ptr(l_new, key_size, pptr);
 773	} else {
 774		memcpy(l_new->key + round_up(key_size, 8), value, size);
 775	}
 776
 777	l_new->hash = hash;
 778	return l_new;
 779}
 780
 781static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
 782		       u64 map_flags)
 783{
 784	if (l_old && map_flags == BPF_NOEXIST)
 785		/* elem already exists */
 786		return -EEXIST;
 787
 788	if (!l_old && map_flags == BPF_EXIST)
 789		/* elem doesn't exist, cannot update it */
 790		return -ENOENT;
 791
 792	return 0;
 793}
 794
 795/* Called from syscall or from eBPF program */
 796static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
 797				u64 map_flags)
 798{
 799	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 800	struct htab_elem *l_new = NULL, *l_old;
 801	struct hlist_nulls_head *head;
 802	unsigned long flags;
 803	struct bucket *b;
 804	u32 key_size, hash;
 805	int ret;
 806
 807	if (unlikely(map_flags > BPF_EXIST))
 808		/* unknown flags */
 809		return -EINVAL;
 810
 811	WARN_ON_ONCE(!rcu_read_lock_held());
 812
 813	key_size = map->key_size;
 814
 815	hash = htab_map_hash(key, key_size);
 816
 817	b = __select_bucket(htab, hash);
 818	head = &b->head;
 819
 820	/* bpf_map_update_elem() can be called in_irq() */
 821	raw_spin_lock_irqsave(&b->lock, flags);
 822
 823	l_old = lookup_elem_raw(head, hash, key, key_size);
 824
 825	ret = check_flags(htab, l_old, map_flags);
 826	if (ret)
 827		goto err;
 828
 829	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
 830				l_old);
 831	if (IS_ERR(l_new)) {
 832		/* all pre-allocated elements are in use or memory exhausted */
 833		ret = PTR_ERR(l_new);
 834		goto err;
 835	}
 836
 837	/* add new element to the head of the list, so that
 838	 * concurrent search will find it before old elem
 839	 */
 840	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
 841	if (l_old) {
 842		hlist_nulls_del_rcu(&l_old->hash_node);
 843		if (!htab_is_prealloc(htab))
 844			free_htab_elem(htab, l_old);
 845	}
 846	ret = 0;
 847err:
 848	raw_spin_unlock_irqrestore(&b->lock, flags);
 849	return ret;
 850}
 851
 852static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
 853				    u64 map_flags)
 854{
 855	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 856	struct htab_elem *l_new, *l_old = NULL;
 857	struct hlist_nulls_head *head;
 858	unsigned long flags;
 859	struct bucket *b;
 860	u32 key_size, hash;
 861	int ret;
 862
 863	if (unlikely(map_flags > BPF_EXIST))
 864		/* unknown flags */
 865		return -EINVAL;
 866
 867	WARN_ON_ONCE(!rcu_read_lock_held());
 868
 869	key_size = map->key_size;
 870
 871	hash = htab_map_hash(key, key_size);
 872
 873	b = __select_bucket(htab, hash);
 874	head = &b->head;
 875
 876	/* For LRU, we need to alloc before taking bucket's
 877	 * spinlock because getting free nodes from LRU may need
 878	 * to remove older elements from htab and this removal
 879	 * operation will need a bucket lock.
 880	 */
 881	l_new = prealloc_lru_pop(htab, key, hash);
 882	if (!l_new)
 883		return -ENOMEM;
 884	memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
 885
 886	/* bpf_map_update_elem() can be called in_irq() */
 887	raw_spin_lock_irqsave(&b->lock, flags);
 888
 889	l_old = lookup_elem_raw(head, hash, key, key_size);
 890
 891	ret = check_flags(htab, l_old, map_flags);
 892	if (ret)
 893		goto err;
 894
 895	/* add new element to the head of the list, so that
 896	 * concurrent search will find it before old elem
 897	 */
 898	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
 899	if (l_old) {
 900		bpf_lru_node_set_ref(&l_new->lru_node);
 901		hlist_nulls_del_rcu(&l_old->hash_node);
 902	}
 903	ret = 0;
 904
 905err:
 906	raw_spin_unlock_irqrestore(&b->lock, flags);
 907
 908	if (ret)
 909		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
 910	else if (l_old)
 911		bpf_lru_push_free(&htab->lru, &l_old->lru_node);
 912
 913	return ret;
 914}
 915
 916static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
 917					 void *value, u64 map_flags,
 918					 bool onallcpus)
 919{
 920	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 921	struct htab_elem *l_new = NULL, *l_old;
 922	struct hlist_nulls_head *head;
 923	unsigned long flags;
 924	struct bucket *b;
 925	u32 key_size, hash;
 926	int ret;
 927
 928	if (unlikely(map_flags > BPF_EXIST))
 929		/* unknown flags */
 930		return -EINVAL;
 931
 932	WARN_ON_ONCE(!rcu_read_lock_held());
 933
 934	key_size = map->key_size;
 935
 936	hash = htab_map_hash(key, key_size);
 937
 938	b = __select_bucket(htab, hash);
 939	head = &b->head;
 940
 941	/* bpf_map_update_elem() can be called in_irq() */
 942	raw_spin_lock_irqsave(&b->lock, flags);
 943
 944	l_old = lookup_elem_raw(head, hash, key, key_size);
 945
 946	ret = check_flags(htab, l_old, map_flags);
 947	if (ret)
 948		goto err;
 949
 950	if (l_old) {
 951		/* per-cpu hash map can update value in-place */
 952		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
 953				value, onallcpus);
 954	} else {
 955		l_new = alloc_htab_elem(htab, key, value, key_size,
 956					hash, true, onallcpus, NULL);
 957		if (IS_ERR(l_new)) {
 958			ret = PTR_ERR(l_new);
 959			goto err;
 960		}
 961		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
 962	}
 963	ret = 0;
 964err:
 965	raw_spin_unlock_irqrestore(&b->lock, flags);
 966	return ret;
 967}
 968
 969static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 970					     void *value, u64 map_flags,
 971					     bool onallcpus)
 972{
 973	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 974	struct htab_elem *l_new = NULL, *l_old;
 975	struct hlist_nulls_head *head;
 976	unsigned long flags;
 977	struct bucket *b;
 978	u32 key_size, hash;
 979	int ret;
 980
 981	if (unlikely(map_flags > BPF_EXIST))
 982		/* unknown flags */
 983		return -EINVAL;
 984
 985	WARN_ON_ONCE(!rcu_read_lock_held());
 986
 987	key_size = map->key_size;
 988
 989	hash = htab_map_hash(key, key_size);
 990
 991	b = __select_bucket(htab, hash);
 992	head = &b->head;
 993
 994	/* For LRU, we need to alloc before taking bucket's
 995	 * spinlock because LRU's elem alloc may need
 996	 * to remove older elem from htab and this removal
 997	 * operation will need a bucket lock.
 998	 */
 999	if (map_flags != BPF_EXIST) {
1000		l_new = prealloc_lru_pop(htab, key, hash);
1001		if (!l_new)
1002			return -ENOMEM;
1003	}
1004
1005	/* bpf_map_update_elem() can be called in_irq() */
1006	raw_spin_lock_irqsave(&b->lock, flags);
1007
1008	l_old = lookup_elem_raw(head, hash, key, key_size);
1009
1010	ret = check_flags(htab, l_old, map_flags);
1011	if (ret)
1012		goto err;
1013
1014	if (l_old) {
1015		bpf_lru_node_set_ref(&l_old->lru_node);
1016
1017		/* per-cpu hash map can update value in-place */
1018		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1019				value, onallcpus);
1020	} else {
1021		pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
1022				value, onallcpus);
1023		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1024		l_new = NULL;
1025	}
1026	ret = 0;
1027err:
1028	raw_spin_unlock_irqrestore(&b->lock, flags);
1029	if (l_new)
1030		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1031	return ret;
1032}
1033
1034static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1035				       void *value, u64 map_flags)
1036{
1037	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1038}
1039
1040static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1041					   void *value, u64 map_flags)
1042{
1043	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1044						 false);
1045}
1046
1047/* Called from syscall or from eBPF program */
1048static int htab_map_delete_elem(struct bpf_map *map, void *key)
1049{
1050	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1051	struct hlist_nulls_head *head;
1052	struct bucket *b;
1053	struct htab_elem *l;
1054	unsigned long flags;
1055	u32 hash, key_size;
1056	int ret = -ENOENT;
1057
1058	WARN_ON_ONCE(!rcu_read_lock_held());
1059
1060	key_size = map->key_size;
1061
1062	hash = htab_map_hash(key, key_size);
1063	b = __select_bucket(htab, hash);
1064	head = &b->head;
1065
1066	raw_spin_lock_irqsave(&b->lock, flags);
1067
1068	l = lookup_elem_raw(head, hash, key, key_size);
1069
1070	if (l) {
1071		hlist_nulls_del_rcu(&l->hash_node);
1072		free_htab_elem(htab, l);
1073		ret = 0;
1074	}
1075
1076	raw_spin_unlock_irqrestore(&b->lock, flags);
1077	return ret;
1078}
1079
1080static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1081{
1082	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1083	struct hlist_nulls_head *head;
1084	struct bucket *b;
1085	struct htab_elem *l;
1086	unsigned long flags;
1087	u32 hash, key_size;
1088	int ret = -ENOENT;
1089
1090	WARN_ON_ONCE(!rcu_read_lock_held());
1091
1092	key_size = map->key_size;
1093
1094	hash = htab_map_hash(key, key_size);
1095	b = __select_bucket(htab, hash);
1096	head = &b->head;
1097
1098	raw_spin_lock_irqsave(&b->lock, flags);
1099
1100	l = lookup_elem_raw(head, hash, key, key_size);
1101
1102	if (l) {
1103		hlist_nulls_del_rcu(&l->hash_node);
1104		ret = 0;
1105	}
1106
1107	raw_spin_unlock_irqrestore(&b->lock, flags);
1108	if (l)
1109		bpf_lru_push_free(&htab->lru, &l->lru_node);
1110	return ret;
1111}
1112
1113static void delete_all_elements(struct bpf_htab *htab)
1114{
1115	int i;
1116
1117	for (i = 0; i < htab->n_buckets; i++) {
1118		struct hlist_nulls_head *head = select_bucket(htab, i);
1119		struct hlist_nulls_node *n;
1120		struct htab_elem *l;
1121
1122		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1123			hlist_nulls_del_rcu(&l->hash_node);
1124			htab_elem_free(htab, l);
 
1125		}
1126	}
1127}
1128
1129/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1130static void htab_map_free(struct bpf_map *map)
1131{
1132	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1133
1134	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
1135	 * so the programs (can be more than one that used this map) were
1136	 * disconnected from events. Wait for outstanding critical sections in
1137	 * these programs to complete
1138	 */
1139	synchronize_rcu();
1140
1141	/* some of free_htab_elem() callbacks for elements of this map may
1142	 * not have executed. Wait for them.
1143	 */
1144	rcu_barrier();
1145	if (!htab_is_prealloc(htab))
1146		delete_all_elements(htab);
1147	else
1148		prealloc_destroy(htab);
1149
1150	free_percpu(htab->extra_elems);
1151	bpf_map_area_free(htab->buckets);
1152	kfree(htab);
1153}
1154
1155const struct bpf_map_ops htab_map_ops = {
1156	.map_alloc_check = htab_map_alloc_check,
1157	.map_alloc = htab_map_alloc,
1158	.map_free = htab_map_free,
1159	.map_get_next_key = htab_map_get_next_key,
1160	.map_lookup_elem = htab_map_lookup_elem,
1161	.map_update_elem = htab_map_update_elem,
1162	.map_delete_elem = htab_map_delete_elem,
1163	.map_gen_lookup = htab_map_gen_lookup,
1164};
1165
1166const struct bpf_map_ops htab_lru_map_ops = {
1167	.map_alloc_check = htab_map_alloc_check,
 
 
 
 
1168	.map_alloc = htab_map_alloc,
1169	.map_free = htab_map_free,
1170	.map_get_next_key = htab_map_get_next_key,
1171	.map_lookup_elem = htab_lru_map_lookup_elem,
1172	.map_update_elem = htab_lru_map_update_elem,
1173	.map_delete_elem = htab_lru_map_delete_elem,
1174	.map_gen_lookup = htab_lru_map_gen_lookup,
 
 
 
 
1175};
1176
1177/* Called from eBPF program */
1178static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1179{
1180	struct htab_elem *l = __htab_map_lookup_elem(map, key);
1181
1182	if (l)
1183		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1184	else
1185		return NULL;
1186}
1187
1188static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1189{
1190	struct htab_elem *l = __htab_map_lookup_elem(map, key);
1191
1192	if (l) {
1193		bpf_lru_node_set_ref(&l->lru_node);
1194		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1195	}
1196
1197	return NULL;
1198}
1199
1200int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1201{
1202	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1203	struct htab_elem *l;
1204	void __percpu *pptr;
1205	int ret = -ENOENT;
1206	int cpu, off = 0;
1207	u32 size;
1208
1209	/* per_cpu areas are zero-filled and bpf programs can only
1210	 * access 'value_size' of them, so copying rounded areas
1211	 * will not leak any kernel data
1212	 */
1213	size = round_up(map->value_size, 8);
1214	rcu_read_lock();
1215	l = __htab_map_lookup_elem(map, key);
1216	if (!l)
1217		goto out;
1218	if (htab_is_lru(htab))
1219		bpf_lru_node_set_ref(&l->lru_node);
1220	pptr = htab_elem_get_ptr(l, map->key_size);
1221	for_each_possible_cpu(cpu) {
1222		bpf_long_memcpy(value + off,
1223				per_cpu_ptr(pptr, cpu), size);
1224		off += size;
1225	}
1226	ret = 0;
1227out:
1228	rcu_read_unlock();
1229	return ret;
1230}
1231
1232int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1233			   u64 map_flags)
1234{
1235	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1236	int ret;
1237
1238	rcu_read_lock();
1239	if (htab_is_lru(htab))
1240		ret = __htab_lru_percpu_map_update_elem(map, key, value,
1241							map_flags, true);
1242	else
1243		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
1244						    true);
1245	rcu_read_unlock();
1246
1247	return ret;
1248}
1249
1250const struct bpf_map_ops htab_percpu_map_ops = {
1251	.map_alloc_check = htab_map_alloc_check,
1252	.map_alloc = htab_map_alloc,
1253	.map_free = htab_map_free,
1254	.map_get_next_key = htab_map_get_next_key,
1255	.map_lookup_elem = htab_percpu_map_lookup_elem,
1256	.map_update_elem = htab_percpu_map_update_elem,
1257	.map_delete_elem = htab_map_delete_elem,
1258};
1259
1260const struct bpf_map_ops htab_lru_percpu_map_ops = {
1261	.map_alloc_check = htab_map_alloc_check,
 
 
 
 
1262	.map_alloc = htab_map_alloc,
1263	.map_free = htab_map_free,
1264	.map_get_next_key = htab_map_get_next_key,
1265	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
1266	.map_update_elem = htab_lru_percpu_map_update_elem,
1267	.map_delete_elem = htab_lru_map_delete_elem,
1268};
1269
1270static int fd_htab_map_alloc_check(union bpf_attr *attr)
1271{
1272	if (attr->value_size != sizeof(u32))
1273		return -EINVAL;
1274	return htab_map_alloc_check(attr);
1275}
1276
1277static void fd_htab_map_free(struct bpf_map *map)
1278{
1279	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1280	struct hlist_nulls_node *n;
1281	struct hlist_nulls_head *head;
1282	struct htab_elem *l;
1283	int i;
1284
1285	for (i = 0; i < htab->n_buckets; i++) {
1286		head = select_bucket(htab, i);
1287
1288		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1289			void *ptr = fd_htab_map_get_ptr(map, l);
1290
1291			map->ops->map_fd_put_ptr(ptr);
1292		}
1293	}
1294
1295	htab_map_free(map);
1296}
1297
1298/* only called from syscall */
1299int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
1300{
1301	void **ptr;
1302	int ret = 0;
1303
1304	if (!map->ops->map_fd_sys_lookup_elem)
1305		return -ENOTSUPP;
1306
1307	rcu_read_lock();
1308	ptr = htab_map_lookup_elem(map, key);
1309	if (ptr)
1310		*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
1311	else
1312		ret = -ENOENT;
1313	rcu_read_unlock();
1314
1315	return ret;
1316}
1317
1318/* only called from syscall */
1319int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1320				void *key, void *value, u64 map_flags)
1321{
1322	void *ptr;
1323	int ret;
1324	u32 ufd = *(u32 *)value;
1325
1326	ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
1327	if (IS_ERR(ptr))
1328		return PTR_ERR(ptr);
1329
1330	ret = htab_map_update_elem(map, key, &ptr, map_flags);
1331	if (ret)
1332		map->ops->map_fd_put_ptr(ptr);
1333
1334	return ret;
1335}
1336
1337static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
1338{
1339	struct bpf_map *map, *inner_map_meta;
1340
1341	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1342	if (IS_ERR(inner_map_meta))
1343		return inner_map_meta;
1344
1345	map = htab_map_alloc(attr);
1346	if (IS_ERR(map)) {
1347		bpf_map_meta_free(inner_map_meta);
1348		return map;
1349	}
1350
1351	map->inner_map_meta = inner_map_meta;
1352
1353	return map;
1354}
1355
1356static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
1357{
1358	struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
1359
1360	if (!inner_map)
1361		return NULL;
1362
1363	return READ_ONCE(*inner_map);
1364}
1365
1366static u32 htab_of_map_gen_lookup(struct bpf_map *map,
1367				  struct bpf_insn *insn_buf)
1368{
1369	struct bpf_insn *insn = insn_buf;
1370	const int ret = BPF_REG_0;
1371
1372	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
1373	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
1374	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
1375				offsetof(struct htab_elem, key) +
1376				round_up(map->key_size, 8));
1377	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1378
1379	return insn - insn_buf;
1380}
1381
1382static void htab_of_map_free(struct bpf_map *map)
1383{
1384	bpf_map_meta_free(map->inner_map_meta);
1385	fd_htab_map_free(map);
1386}
1387
1388const struct bpf_map_ops htab_of_maps_map_ops = {
1389	.map_alloc_check = fd_htab_map_alloc_check,
1390	.map_alloc = htab_of_map_alloc,
1391	.map_free = htab_of_map_free,
1392	.map_get_next_key = htab_map_get_next_key,
1393	.map_lookup_elem = htab_of_map_lookup_elem,
1394	.map_delete_elem = htab_map_delete_elem,
1395	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1396	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1397	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1398	.map_gen_lookup = htab_of_map_gen_lookup,
1399};