Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016,2017 Facebook
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/err.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/filter.h>
  11#include <linux/perf_event.h>
  12#include <uapi/linux/btf.h>
  13#include <linux/rcupdate_trace.h>
  14#include <linux/btf_ids.h>
  15
  16#include "map_in_map.h"
  17
  18#define ARRAY_CREATE_FLAG_MASK \
  19	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
  20	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
  21
  22static void bpf_array_free_percpu(struct bpf_array *array)
  23{
  24	int i;
  25
  26	for (i = 0; i < array->map.max_entries; i++) {
  27		free_percpu(array->pptrs[i]);
  28		cond_resched();
  29	}
  30}
  31
  32static int bpf_array_alloc_percpu(struct bpf_array *array)
  33{
  34	void __percpu *ptr;
  35	int i;
  36
  37	for (i = 0; i < array->map.max_entries; i++) {
  38		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
  39					   GFP_USER | __GFP_NOWARN);
  40		if (!ptr) {
  41			bpf_array_free_percpu(array);
  42			return -ENOMEM;
  43		}
  44		array->pptrs[i] = ptr;
  45		cond_resched();
  46	}
  47
  48	return 0;
  49}
  50
  51/* Called from syscall */
  52int array_map_alloc_check(union bpf_attr *attr)
  53{
  54	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  55	int numa_node = bpf_map_attr_numa_node(attr);
  56
  57	/* check sanity of attributes */
  58	if (attr->max_entries == 0 || attr->key_size != 4 ||
  59	    attr->value_size == 0 ||
  60	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  61	    !bpf_map_flags_access_ok(attr->map_flags) ||
  62	    (percpu && numa_node != NUMA_NO_NODE))
  63		return -EINVAL;
  64
  65	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  66	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
  67		return -EINVAL;
  68
  69	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
  70	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
  71		return -EINVAL;
  72
  73	/* avoid overflow on round_up(map->value_size) */
  74	if (attr->value_size > INT_MAX)
  75		return -E2BIG;
 
 
 
  76
  77	return 0;
  78}
  79
  80static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  81{
  82	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  83	int numa_node = bpf_map_attr_numa_node(attr);
  84	u32 elem_size, index_mask, max_entries;
  85	bool bypass_spec_v1 = bpf_bypass_spec_v1();
  86	u64 array_size, mask64;
  87	struct bpf_array *array;
  88
  89	elem_size = round_up(attr->value_size, 8);
  90
  91	max_entries = attr->max_entries;
  92
  93	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
  94	 * upper most bit set in u32 space is undefined behavior due to
  95	 * resulting 1U << 32, so do it manually here in u64 space.
  96	 */
  97	mask64 = fls_long(max_entries - 1);
  98	mask64 = 1ULL << mask64;
  99	mask64 -= 1;
 100
 101	index_mask = mask64;
 102	if (!bypass_spec_v1) {
 103		/* round up array size to nearest power of 2,
 104		 * since cpu will speculate within index_mask limits
 105		 */
 106		max_entries = index_mask + 1;
 107		/* Check for overflows. */
 108		if (max_entries < attr->max_entries)
 109			return ERR_PTR(-E2BIG);
 110	}
 111
 112	array_size = sizeof(*array);
 113	if (percpu) {
 114		array_size += (u64) max_entries * sizeof(void *);
 115	} else {
 116		/* rely on vmalloc() to return page-aligned memory and
 117		 * ensure array->value is exactly page-aligned
 118		 */
 119		if (attr->map_flags & BPF_F_MMAPABLE) {
 120			array_size = PAGE_ALIGN(array_size);
 121			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
 122		} else {
 123			array_size += (u64) max_entries * elem_size;
 124		}
 125	}
 126
 127	/* allocate all map elements and zero-initialize them */
 128	if (attr->map_flags & BPF_F_MMAPABLE) {
 129		void *data;
 130
 131		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
 132		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
 133		if (!data)
 134			return ERR_PTR(-ENOMEM);
 135		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
 136			- offsetof(struct bpf_array, value);
 137	} else {
 138		array = bpf_map_area_alloc(array_size, numa_node);
 139	}
 140	if (!array)
 141		return ERR_PTR(-ENOMEM);
 142	array->index_mask = index_mask;
 143	array->map.bypass_spec_v1 = bypass_spec_v1;
 144
 145	/* copy mandatory map attributes */
 146	bpf_map_init_from_attr(&array->map, attr);
 147	array->elem_size = elem_size;
 148
 149	if (percpu && bpf_array_alloc_percpu(array)) {
 150		bpf_map_area_free(array);
 151		return ERR_PTR(-ENOMEM);
 152	}
 153
 154	return &array->map;
 155}
 156
 157static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
 158{
 159	return array->value + (u64)array->elem_size * index;
 160}
 161
 162/* Called from syscall or from eBPF program */
 163static void *array_map_lookup_elem(struct bpf_map *map, void *key)
 164{
 165	struct bpf_array *array = container_of(map, struct bpf_array, map);
 166	u32 index = *(u32 *)key;
 167
 168	if (unlikely(index >= array->map.max_entries))
 169		return NULL;
 170
 171	return array->value + (u64)array->elem_size * (index & array->index_mask);
 172}
 173
 174static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
 175				       u32 off)
 176{
 177	struct bpf_array *array = container_of(map, struct bpf_array, map);
 178
 179	if (map->max_entries != 1)
 180		return -ENOTSUPP;
 181	if (off >= map->value_size)
 182		return -EINVAL;
 183
 184	*imm = (unsigned long)array->value;
 185	return 0;
 186}
 187
 188static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
 189				       u32 *off)
 190{
 191	struct bpf_array *array = container_of(map, struct bpf_array, map);
 192	u64 base = (unsigned long)array->value;
 193	u64 range = array->elem_size;
 194
 195	if (map->max_entries != 1)
 196		return -ENOTSUPP;
 197	if (imm < base || imm >= base + range)
 198		return -ENOENT;
 199
 200	*off = imm - base;
 201	return 0;
 202}
 203
 204/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 205static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 206{
 207	struct bpf_array *array = container_of(map, struct bpf_array, map);
 208	struct bpf_insn *insn = insn_buf;
 209	u32 elem_size = array->elem_size;
 210	const int ret = BPF_REG_0;
 211	const int map_ptr = BPF_REG_1;
 212	const int index = BPF_REG_2;
 213
 214	if (map->map_flags & BPF_F_INNER_MAP)
 215		return -EOPNOTSUPP;
 216
 217	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
 218	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
 219	if (!map->bypass_spec_v1) {
 220		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
 221		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
 222	} else {
 223		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
 224	}
 225
 226	if (is_power_of_2(elem_size)) {
 227		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
 228	} else {
 229		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
 230	}
 231	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
 232	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
 233	*insn++ = BPF_MOV64_IMM(ret, 0);
 234	return insn - insn_buf;
 235}
 236
 237/* Called from eBPF program */
 238static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 239{
 240	struct bpf_array *array = container_of(map, struct bpf_array, map);
 241	u32 index = *(u32 *)key;
 242
 243	if (unlikely(index >= array->map.max_entries))
 244		return NULL;
 245
 246	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 247}
 248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 249static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
 250{
 251	struct bpf_array *array = container_of(map, struct bpf_array, map);
 252	u32 index = *(u32 *)key;
 253
 254	if (cpu >= nr_cpu_ids)
 255		return NULL;
 256
 257	if (unlikely(index >= array->map.max_entries))
 258		return NULL;
 259
 260	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
 261}
 262
 263int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
 264{
 265	struct bpf_array *array = container_of(map, struct bpf_array, map);
 266	u32 index = *(u32 *)key;
 267	void __percpu *pptr;
 268	int cpu, off = 0;
 269	u32 size;
 270
 271	if (unlikely(index >= array->map.max_entries))
 272		return -ENOENT;
 273
 274	/* per_cpu areas are zero-filled and bpf programs can only
 275	 * access 'value_size' of them, so copying rounded areas
 276	 * will not leak any kernel data
 277	 */
 278	size = array->elem_size;
 279	rcu_read_lock();
 280	pptr = array->pptrs[index & array->index_mask];
 281	for_each_possible_cpu(cpu) {
 282		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
 283		check_and_init_map_value(map, value + off);
 284		off += size;
 285	}
 286	rcu_read_unlock();
 287	return 0;
 288}
 289
 290/* Called from syscall */
 291static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 292{
 293	struct bpf_array *array = container_of(map, struct bpf_array, map);
 294	u32 index = key ? *(u32 *)key : U32_MAX;
 295	u32 *next = (u32 *)next_key;
 296
 297	if (index >= array->map.max_entries) {
 298		*next = 0;
 299		return 0;
 300	}
 301
 302	if (index == array->map.max_entries - 1)
 303		return -ENOENT;
 304
 305	*next = index + 1;
 306	return 0;
 307}
 308
 309/* Called from syscall or from eBPF program */
 310static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
 311				 u64 map_flags)
 312{
 313	struct bpf_array *array = container_of(map, struct bpf_array, map);
 314	u32 index = *(u32 *)key;
 315	char *val;
 316
 317	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
 318		/* unknown flags */
 319		return -EINVAL;
 320
 321	if (unlikely(index >= array->map.max_entries))
 322		/* all elements were pre-allocated, cannot insert a new one */
 323		return -E2BIG;
 324
 325	if (unlikely(map_flags & BPF_NOEXIST))
 326		/* all elements already exist */
 327		return -EEXIST;
 328
 329	if (unlikely((map_flags & BPF_F_LOCK) &&
 330		     !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
 331		return -EINVAL;
 332
 333	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 334		val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
 335		copy_map_value(map, val, value);
 336		bpf_obj_free_fields(array->map.record, val);
 337	} else {
 338		val = array->value +
 339			(u64)array->elem_size * (index & array->index_mask);
 340		if (map_flags & BPF_F_LOCK)
 341			copy_map_value_locked(map, val, value, false);
 342		else
 343			copy_map_value(map, val, value);
 344		bpf_obj_free_fields(array->map.record, val);
 345	}
 346	return 0;
 347}
 348
 349int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
 350			    u64 map_flags)
 351{
 352	struct bpf_array *array = container_of(map, struct bpf_array, map);
 353	u32 index = *(u32 *)key;
 354	void __percpu *pptr;
 355	int cpu, off = 0;
 356	u32 size;
 357
 358	if (unlikely(map_flags > BPF_EXIST))
 359		/* unknown flags */
 360		return -EINVAL;
 361
 362	if (unlikely(index >= array->map.max_entries))
 363		/* all elements were pre-allocated, cannot insert a new one */
 364		return -E2BIG;
 365
 366	if (unlikely(map_flags == BPF_NOEXIST))
 367		/* all elements already exist */
 368		return -EEXIST;
 369
 370	/* the user space will provide round_up(value_size, 8) bytes that
 371	 * will be copied into per-cpu area. bpf programs can only access
 372	 * value_size of it. During lookup the same extra bytes will be
 373	 * returned or zeros which were zero-filled by percpu_alloc,
 374	 * so no kernel data leaks possible
 375	 */
 376	size = array->elem_size;
 377	rcu_read_lock();
 378	pptr = array->pptrs[index & array->index_mask];
 379	for_each_possible_cpu(cpu) {
 380		copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
 381		bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
 382		off += size;
 383	}
 384	rcu_read_unlock();
 385	return 0;
 386}
 387
 388/* Called from syscall or from eBPF program */
 389static int array_map_delete_elem(struct bpf_map *map, void *key)
 390{
 391	return -EINVAL;
 392}
 393
 394static void *array_map_vmalloc_addr(struct bpf_array *array)
 395{
 396	return (void *)round_down((unsigned long)array, PAGE_SIZE);
 397}
 398
 399static void array_map_free_timers(struct bpf_map *map)
 400{
 401	struct bpf_array *array = container_of(map, struct bpf_array, map);
 402	int i;
 403
 404	/* We don't reset or free fields other than timer on uref dropping to zero. */
 405	if (!btf_record_has_field(map->record, BPF_TIMER))
 406		return;
 407
 408	for (i = 0; i < array->map.max_entries; i++)
 409		bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
 
 
 
 
 
 410}
 411
 412/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 413static void array_map_free(struct bpf_map *map)
 414{
 415	struct bpf_array *array = container_of(map, struct bpf_array, map);
 416	int i;
 417
 418	if (!IS_ERR_OR_NULL(map->record)) {
 419		if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 420			for (i = 0; i < array->map.max_entries; i++) {
 421				void __percpu *pptr = array->pptrs[i & array->index_mask];
 422				int cpu;
 423
 424				for_each_possible_cpu(cpu) {
 425					bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
 426					cond_resched();
 427				}
 428			}
 429		} else {
 430			for (i = 0; i < array->map.max_entries; i++)
 431				bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
 432		}
 433	}
 434
 435	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 436		bpf_array_free_percpu(array);
 437
 438	if (array->map.map_flags & BPF_F_MMAPABLE)
 439		bpf_map_area_free(array_map_vmalloc_addr(array));
 440	else
 441		bpf_map_area_free(array);
 442}
 443
 444static void array_map_seq_show_elem(struct bpf_map *map, void *key,
 445				    struct seq_file *m)
 446{
 447	void *value;
 448
 449	rcu_read_lock();
 450
 451	value = array_map_lookup_elem(map, key);
 452	if (!value) {
 453		rcu_read_unlock();
 454		return;
 455	}
 456
 457	if (map->btf_key_type_id)
 458		seq_printf(m, "%u: ", *(u32 *)key);
 459	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
 460	seq_puts(m, "\n");
 461
 462	rcu_read_unlock();
 463}
 464
 465static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
 466					   struct seq_file *m)
 467{
 468	struct bpf_array *array = container_of(map, struct bpf_array, map);
 469	u32 index = *(u32 *)key;
 470	void __percpu *pptr;
 471	int cpu;
 472
 473	rcu_read_lock();
 474
 475	seq_printf(m, "%u: {\n", *(u32 *)key);
 476	pptr = array->pptrs[index & array->index_mask];
 477	for_each_possible_cpu(cpu) {
 478		seq_printf(m, "\tcpu%d: ", cpu);
 479		btf_type_seq_show(map->btf, map->btf_value_type_id,
 480				  per_cpu_ptr(pptr, cpu), m);
 481		seq_puts(m, "\n");
 482	}
 483	seq_puts(m, "}\n");
 484
 485	rcu_read_unlock();
 486}
 487
 488static int array_map_check_btf(const struct bpf_map *map,
 489			       const struct btf *btf,
 490			       const struct btf_type *key_type,
 491			       const struct btf_type *value_type)
 492{
 493	u32 int_data;
 494
 495	/* One exception for keyless BTF: .bss/.data/.rodata map */
 496	if (btf_type_is_void(key_type)) {
 497		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
 498		    map->max_entries != 1)
 499			return -EINVAL;
 500
 501		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
 502			return -EINVAL;
 503
 504		return 0;
 505	}
 506
 507	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
 508		return -EINVAL;
 509
 510	int_data = *(u32 *)(key_type + 1);
 511	/* bpf array can only take a u32 key. This check makes sure
 512	 * that the btf matches the attr used during map_create.
 513	 */
 514	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
 515		return -EINVAL;
 516
 517	return 0;
 518}
 519
 520static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
 521{
 522	struct bpf_array *array = container_of(map, struct bpf_array, map);
 523	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
 524
 525	if (!(map->map_flags & BPF_F_MMAPABLE))
 526		return -EINVAL;
 527
 528	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
 529	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
 530		return -EINVAL;
 531
 532	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
 533				   vma->vm_pgoff + pgoff);
 534}
 535
 536static bool array_map_meta_equal(const struct bpf_map *meta0,
 537				 const struct bpf_map *meta1)
 538{
 539	if (!bpf_map_meta_equal(meta0, meta1))
 540		return false;
 541	return meta0->map_flags & BPF_F_INNER_MAP ? true :
 542	       meta0->max_entries == meta1->max_entries;
 543}
 544
 545struct bpf_iter_seq_array_map_info {
 546	struct bpf_map *map;
 547	void *percpu_value_buf;
 548	u32 index;
 549};
 550
 551static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
 552{
 553	struct bpf_iter_seq_array_map_info *info = seq->private;
 554	struct bpf_map *map = info->map;
 555	struct bpf_array *array;
 556	u32 index;
 557
 558	if (info->index >= map->max_entries)
 559		return NULL;
 560
 561	if (*pos == 0)
 562		++*pos;
 563	array = container_of(map, struct bpf_array, map);
 564	index = info->index & array->index_mask;
 565	if (info->percpu_value_buf)
 566	       return array->pptrs[index];
 567	return array_map_elem_ptr(array, index);
 568}
 569
 570static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 571{
 572	struct bpf_iter_seq_array_map_info *info = seq->private;
 573	struct bpf_map *map = info->map;
 574	struct bpf_array *array;
 575	u32 index;
 576
 577	++*pos;
 578	++info->index;
 579	if (info->index >= map->max_entries)
 580		return NULL;
 581
 582	array = container_of(map, struct bpf_array, map);
 583	index = info->index & array->index_mask;
 584	if (info->percpu_value_buf)
 585	       return array->pptrs[index];
 586	return array_map_elem_ptr(array, index);
 587}
 588
 589static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
 590{
 591	struct bpf_iter_seq_array_map_info *info = seq->private;
 592	struct bpf_iter__bpf_map_elem ctx = {};
 593	struct bpf_map *map = info->map;
 594	struct bpf_array *array = container_of(map, struct bpf_array, map);
 595	struct bpf_iter_meta meta;
 596	struct bpf_prog *prog;
 597	int off = 0, cpu = 0;
 598	void __percpu **pptr;
 599	u32 size;
 600
 601	meta.seq = seq;
 602	prog = bpf_iter_get_info(&meta, v == NULL);
 603	if (!prog)
 604		return 0;
 605
 606	ctx.meta = &meta;
 607	ctx.map = info->map;
 608	if (v) {
 609		ctx.key = &info->index;
 610
 611		if (!info->percpu_value_buf) {
 612			ctx.value = v;
 613		} else {
 614			pptr = v;
 615			size = array->elem_size;
 616			for_each_possible_cpu(cpu) {
 617				copy_map_value_long(map, info->percpu_value_buf + off,
 618						    per_cpu_ptr(pptr, cpu));
 619				check_and_init_map_value(map, info->percpu_value_buf + off);
 620				off += size;
 621			}
 622			ctx.value = info->percpu_value_buf;
 623		}
 624	}
 625
 626	return bpf_iter_run_prog(prog, &ctx);
 627}
 628
 629static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
 630{
 631	return __bpf_array_map_seq_show(seq, v);
 632}
 633
 634static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
 635{
 636	if (!v)
 637		(void)__bpf_array_map_seq_show(seq, NULL);
 638}
 639
 640static int bpf_iter_init_array_map(void *priv_data,
 641				   struct bpf_iter_aux_info *aux)
 642{
 643	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 644	struct bpf_map *map = aux->map;
 645	struct bpf_array *array = container_of(map, struct bpf_array, map);
 646	void *value_buf;
 647	u32 buf_size;
 648
 649	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 650		buf_size = array->elem_size * num_possible_cpus();
 651		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
 652		if (!value_buf)
 653			return -ENOMEM;
 654
 655		seq_info->percpu_value_buf = value_buf;
 656	}
 657
 658	/* bpf_iter_attach_map() acquires a map uref, and the uref may be
 659	 * released before or in the middle of iterating map elements, so
 660	 * acquire an extra map uref for iterator.
 661	 */
 662	bpf_map_inc_with_uref(map);
 663	seq_info->map = map;
 664	return 0;
 665}
 666
 667static void bpf_iter_fini_array_map(void *priv_data)
 668{
 669	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 670
 671	bpf_map_put_with_uref(seq_info->map);
 672	kfree(seq_info->percpu_value_buf);
 673}
 674
 675static const struct seq_operations bpf_array_map_seq_ops = {
 676	.start	= bpf_array_map_seq_start,
 677	.next	= bpf_array_map_seq_next,
 678	.stop	= bpf_array_map_seq_stop,
 679	.show	= bpf_array_map_seq_show,
 680};
 681
 682static const struct bpf_iter_seq_info iter_seq_info = {
 683	.seq_ops		= &bpf_array_map_seq_ops,
 684	.init_seq_private	= bpf_iter_init_array_map,
 685	.fini_seq_private	= bpf_iter_fini_array_map,
 686	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
 687};
 688
 689static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
 690				   void *callback_ctx, u64 flags)
 691{
 692	u32 i, key, num_elems = 0;
 693	struct bpf_array *array;
 694	bool is_percpu;
 695	u64 ret = 0;
 696	void *val;
 697
 698	if (flags != 0)
 699		return -EINVAL;
 700
 701	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 702	array = container_of(map, struct bpf_array, map);
 703	if (is_percpu)
 704		migrate_disable();
 705	for (i = 0; i < map->max_entries; i++) {
 706		if (is_percpu)
 707			val = this_cpu_ptr(array->pptrs[i]);
 708		else
 709			val = array_map_elem_ptr(array, i);
 710		num_elems++;
 711		key = i;
 712		ret = callback_fn((u64)(long)map, (u64)(long)&key,
 713				  (u64)(long)val, (u64)(long)callback_ctx, 0);
 714		/* return value: 0 - continue, 1 - stop and return */
 715		if (ret)
 716			break;
 717	}
 718
 719	if (is_percpu)
 720		migrate_enable();
 721	return num_elems;
 722}
 723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
 725const struct bpf_map_ops array_map_ops = {
 726	.map_meta_equal = array_map_meta_equal,
 727	.map_alloc_check = array_map_alloc_check,
 728	.map_alloc = array_map_alloc,
 729	.map_free = array_map_free,
 730	.map_get_next_key = array_map_get_next_key,
 731	.map_release_uref = array_map_free_timers,
 732	.map_lookup_elem = array_map_lookup_elem,
 733	.map_update_elem = array_map_update_elem,
 734	.map_delete_elem = array_map_delete_elem,
 735	.map_gen_lookup = array_map_gen_lookup,
 736	.map_direct_value_addr = array_map_direct_value_addr,
 737	.map_direct_value_meta = array_map_direct_value_meta,
 738	.map_mmap = array_map_mmap,
 739	.map_seq_show_elem = array_map_seq_show_elem,
 740	.map_check_btf = array_map_check_btf,
 741	.map_lookup_batch = generic_map_lookup_batch,
 742	.map_update_batch = generic_map_update_batch,
 743	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 744	.map_for_each_callback = bpf_for_each_array_elem,
 
 745	.map_btf_id = &array_map_btf_ids[0],
 746	.iter_seq_info = &iter_seq_info,
 747};
 748
 749const struct bpf_map_ops percpu_array_map_ops = {
 750	.map_meta_equal = bpf_map_meta_equal,
 751	.map_alloc_check = array_map_alloc_check,
 752	.map_alloc = array_map_alloc,
 753	.map_free = array_map_free,
 754	.map_get_next_key = array_map_get_next_key,
 755	.map_lookup_elem = percpu_array_map_lookup_elem,
 
 756	.map_update_elem = array_map_update_elem,
 757	.map_delete_elem = array_map_delete_elem,
 758	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
 759	.map_seq_show_elem = percpu_array_map_seq_show_elem,
 760	.map_check_btf = array_map_check_btf,
 761	.map_lookup_batch = generic_map_lookup_batch,
 762	.map_update_batch = generic_map_update_batch,
 763	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 764	.map_for_each_callback = bpf_for_each_array_elem,
 
 765	.map_btf_id = &array_map_btf_ids[0],
 766	.iter_seq_info = &iter_seq_info,
 767};
 768
 769static int fd_array_map_alloc_check(union bpf_attr *attr)
 770{
 771	/* only file descriptors can be stored in this type of map */
 772	if (attr->value_size != sizeof(u32))
 773		return -EINVAL;
 774	/* Program read-only/write-only not supported for special maps yet. */
 775	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
 776		return -EINVAL;
 777	return array_map_alloc_check(attr);
 778}
 779
 780static void fd_array_map_free(struct bpf_map *map)
 781{
 782	struct bpf_array *array = container_of(map, struct bpf_array, map);
 783	int i;
 784
 785	/* make sure it's empty */
 786	for (i = 0; i < array->map.max_entries; i++)
 787		BUG_ON(array->ptrs[i] != NULL);
 788
 789	bpf_map_area_free(array);
 790}
 791
 792static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 793{
 794	return ERR_PTR(-EOPNOTSUPP);
 795}
 796
 797/* only called from syscall */
 798int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
 799{
 800	void **elem, *ptr;
 801	int ret =  0;
 802
 803	if (!map->ops->map_fd_sys_lookup_elem)
 804		return -ENOTSUPP;
 805
 806	rcu_read_lock();
 807	elem = array_map_lookup_elem(map, key);
 808	if (elem && (ptr = READ_ONCE(*elem)))
 809		*value = map->ops->map_fd_sys_lookup_elem(ptr);
 810	else
 811		ret = -ENOENT;
 812	rcu_read_unlock();
 813
 814	return ret;
 815}
 816
 817/* only called from syscall */
 818int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
 819				 void *key, void *value, u64 map_flags)
 820{
 821	struct bpf_array *array = container_of(map, struct bpf_array, map);
 822	void *new_ptr, *old_ptr;
 823	u32 index = *(u32 *)key, ufd;
 824
 825	if (map_flags != BPF_ANY)
 826		return -EINVAL;
 827
 828	if (index >= array->map.max_entries)
 829		return -E2BIG;
 830
 831	ufd = *(u32 *)value;
 832	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
 833	if (IS_ERR(new_ptr))
 834		return PTR_ERR(new_ptr);
 835
 836	if (map->ops->map_poke_run) {
 837		mutex_lock(&array->aux->poke_mutex);
 838		old_ptr = xchg(array->ptrs + index, new_ptr);
 839		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
 840		mutex_unlock(&array->aux->poke_mutex);
 841	} else {
 842		old_ptr = xchg(array->ptrs + index, new_ptr);
 843	}
 844
 845	if (old_ptr)
 846		map->ops->map_fd_put_ptr(old_ptr);
 847	return 0;
 848}
 849
 850static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
 851{
 852	struct bpf_array *array = container_of(map, struct bpf_array, map);
 853	void *old_ptr;
 854	u32 index = *(u32 *)key;
 855
 856	if (index >= array->map.max_entries)
 857		return -E2BIG;
 858
 859	if (map->ops->map_poke_run) {
 860		mutex_lock(&array->aux->poke_mutex);
 861		old_ptr = xchg(array->ptrs + index, NULL);
 862		map->ops->map_poke_run(map, index, old_ptr, NULL);
 863		mutex_unlock(&array->aux->poke_mutex);
 864	} else {
 865		old_ptr = xchg(array->ptrs + index, NULL);
 866	}
 867
 868	if (old_ptr) {
 869		map->ops->map_fd_put_ptr(old_ptr);
 870		return 0;
 871	} else {
 872		return -ENOENT;
 873	}
 874}
 875
 
 
 
 
 
 876static void *prog_fd_array_get_ptr(struct bpf_map *map,
 877				   struct file *map_file, int fd)
 878{
 879	struct bpf_prog *prog = bpf_prog_get(fd);
 
 880
 881	if (IS_ERR(prog))
 882		return prog;
 883
 884	if (!bpf_prog_map_compatible(map, prog)) {
 
 885		bpf_prog_put(prog);
 886		return ERR_PTR(-EINVAL);
 887	}
 888
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889	return prog;
 890}
 891
 892static void prog_fd_array_put_ptr(void *ptr)
 893{
 894	bpf_prog_put(ptr);
 
 
 
 
 
 
 895}
 896
 897static u32 prog_fd_array_sys_lookup_elem(void *ptr)
 898{
 899	return ((struct bpf_prog *)ptr)->aux->id;
 900}
 901
 902/* decrement refcnt of all bpf_progs that are stored in this map */
 903static void bpf_fd_array_map_clear(struct bpf_map *map)
 904{
 905	struct bpf_array *array = container_of(map, struct bpf_array, map);
 906	int i;
 907
 908	for (i = 0; i < array->map.max_entries; i++)
 909		fd_array_map_delete_elem(map, &i);
 910}
 911
 912static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
 913					 struct seq_file *m)
 914{
 915	void **elem, *ptr;
 916	u32 prog_id;
 917
 918	rcu_read_lock();
 919
 920	elem = array_map_lookup_elem(map, key);
 921	if (elem) {
 922		ptr = READ_ONCE(*elem);
 923		if (ptr) {
 924			seq_printf(m, "%u: ", *(u32 *)key);
 925			prog_id = prog_fd_array_sys_lookup_elem(ptr);
 926			btf_type_seq_show(map->btf, map->btf_value_type_id,
 927					  &prog_id, m);
 928			seq_puts(m, "\n");
 929		}
 930	}
 931
 932	rcu_read_unlock();
 933}
 934
 935struct prog_poke_elem {
 936	struct list_head list;
 937	struct bpf_prog_aux *aux;
 938};
 939
 940static int prog_array_map_poke_track(struct bpf_map *map,
 941				     struct bpf_prog_aux *prog_aux)
 942{
 943	struct prog_poke_elem *elem;
 944	struct bpf_array_aux *aux;
 945	int ret = 0;
 946
 947	aux = container_of(map, struct bpf_array, map)->aux;
 948	mutex_lock(&aux->poke_mutex);
 949	list_for_each_entry(elem, &aux->poke_progs, list) {
 950		if (elem->aux == prog_aux)
 951			goto out;
 952	}
 953
 954	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
 955	if (!elem) {
 956		ret = -ENOMEM;
 957		goto out;
 958	}
 959
 960	INIT_LIST_HEAD(&elem->list);
 961	/* We must track the program's aux info at this point in time
 962	 * since the program pointer itself may not be stable yet, see
 963	 * also comment in prog_array_map_poke_run().
 964	 */
 965	elem->aux = prog_aux;
 966
 967	list_add_tail(&elem->list, &aux->poke_progs);
 968out:
 969	mutex_unlock(&aux->poke_mutex);
 970	return ret;
 971}
 972
 973static void prog_array_map_poke_untrack(struct bpf_map *map,
 974					struct bpf_prog_aux *prog_aux)
 975{
 976	struct prog_poke_elem *elem, *tmp;
 977	struct bpf_array_aux *aux;
 978
 979	aux = container_of(map, struct bpf_array, map)->aux;
 980	mutex_lock(&aux->poke_mutex);
 981	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
 982		if (elem->aux == prog_aux) {
 983			list_del_init(&elem->list);
 984			kfree(elem);
 985			break;
 986		}
 987	}
 988	mutex_unlock(&aux->poke_mutex);
 989}
 990
 
 
 
 
 
 
 991static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
 992				    struct bpf_prog *old,
 993				    struct bpf_prog *new)
 994{
 995	u8 *old_addr, *new_addr, *old_bypass_addr;
 996	struct prog_poke_elem *elem;
 997	struct bpf_array_aux *aux;
 998
 999	aux = container_of(map, struct bpf_array, map)->aux;
1000	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1001
1002	list_for_each_entry(elem, &aux->poke_progs, list) {
1003		struct bpf_jit_poke_descriptor *poke;
1004		int i, ret;
1005
1006		for (i = 0; i < elem->aux->size_poke_tab; i++) {
1007			poke = &elem->aux->poke_tab[i];
1008
1009			/* Few things to be aware of:
1010			 *
1011			 * 1) We can only ever access aux in this context, but
1012			 *    not aux->prog since it might not be stable yet and
1013			 *    there could be danger of use after free otherwise.
1014			 * 2) Initially when we start tracking aux, the program
1015			 *    is not JITed yet and also does not have a kallsyms
1016			 *    entry. We skip these as poke->tailcall_target_stable
1017			 *    is not active yet. The JIT will do the final fixup
1018			 *    before setting it stable. The various
1019			 *    poke->tailcall_target_stable are successively
1020			 *    activated, so tail call updates can arrive from here
1021			 *    while JIT is still finishing its final fixup for
1022			 *    non-activated poke entries.
1023			 * 3) On program teardown, the program's kallsym entry gets
1024			 *    removed out of RCU callback, but we can only untrack
1025			 *    from sleepable context, therefore bpf_arch_text_poke()
1026			 *    might not see that this is in BPF text section and
1027			 *    bails out with -EINVAL. As these are unreachable since
1028			 *    RCU grace period already passed, we simply skip them.
1029			 * 4) Also programs reaching refcount of zero while patching
1030			 *    is in progress is okay since we're protected under
1031			 *    poke_mutex and untrack the programs before the JIT
1032			 *    buffer is freed. When we're still in the middle of
1033			 *    patching and suddenly kallsyms entry of the program
1034			 *    gets evicted, we just skip the rest which is fine due
1035			 *    to point 3).
1036			 * 5) Any other error happening below from bpf_arch_text_poke()
1037			 *    is a unexpected bug.
1038			 */
1039			if (!READ_ONCE(poke->tailcall_target_stable))
1040				continue;
1041			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1042				continue;
1043			if (poke->tail_call.map != map ||
1044			    poke->tail_call.key != key)
1045				continue;
1046
1047			old_bypass_addr = old ? NULL : poke->bypass_addr;
1048			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1049			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1050
1051			if (new) {
1052				ret = bpf_arch_text_poke(poke->tailcall_target,
1053							 BPF_MOD_JUMP,
1054							 old_addr, new_addr);
1055				BUG_ON(ret < 0 && ret != -EINVAL);
1056				if (!old) {
1057					ret = bpf_arch_text_poke(poke->tailcall_bypass,
1058								 BPF_MOD_JUMP,
1059								 poke->bypass_addr,
1060								 NULL);
1061					BUG_ON(ret < 0 && ret != -EINVAL);
1062				}
1063			} else {
1064				ret = bpf_arch_text_poke(poke->tailcall_bypass,
1065							 BPF_MOD_JUMP,
1066							 old_bypass_addr,
1067							 poke->bypass_addr);
1068				BUG_ON(ret < 0 && ret != -EINVAL);
1069				/* let other CPUs finish the execution of program
1070				 * so that it will not possible to expose them
1071				 * to invalid nop, stack unwind, nop state
1072				 */
1073				if (!ret)
1074					synchronize_rcu();
1075				ret = bpf_arch_text_poke(poke->tailcall_target,
1076							 BPF_MOD_JUMP,
1077							 old_addr, NULL);
1078				BUG_ON(ret < 0 && ret != -EINVAL);
1079			}
1080		}
1081	}
1082}
1083
1084static void prog_array_map_clear_deferred(struct work_struct *work)
1085{
1086	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1087					   work)->map;
1088	bpf_fd_array_map_clear(map);
1089	bpf_map_put(map);
1090}
1091
1092static void prog_array_map_clear(struct bpf_map *map)
1093{
1094	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1095						 map)->aux;
1096	bpf_map_inc(map);
1097	schedule_work(&aux->work);
1098}
1099
1100static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1101{
1102	struct bpf_array_aux *aux;
1103	struct bpf_map *map;
1104
1105	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1106	if (!aux)
1107		return ERR_PTR(-ENOMEM);
1108
1109	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1110	INIT_LIST_HEAD(&aux->poke_progs);
1111	mutex_init(&aux->poke_mutex);
1112
1113	map = array_map_alloc(attr);
1114	if (IS_ERR(map)) {
1115		kfree(aux);
1116		return map;
1117	}
1118
1119	container_of(map, struct bpf_array, map)->aux = aux;
1120	aux->map = map;
1121
1122	return map;
1123}
1124
1125static void prog_array_map_free(struct bpf_map *map)
1126{
1127	struct prog_poke_elem *elem, *tmp;
1128	struct bpf_array_aux *aux;
1129
1130	aux = container_of(map, struct bpf_array, map)->aux;
1131	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1132		list_del_init(&elem->list);
1133		kfree(elem);
1134	}
1135	kfree(aux);
1136	fd_array_map_free(map);
1137}
1138
1139/* prog_array->aux->{type,jited} is a runtime binding.
1140 * Doing static check alone in the verifier is not enough.
1141 * Thus, prog_array_map cannot be used as an inner_map
1142 * and map_meta_equal is not implemented.
1143 */
1144const struct bpf_map_ops prog_array_map_ops = {
1145	.map_alloc_check = fd_array_map_alloc_check,
1146	.map_alloc = prog_array_map_alloc,
1147	.map_free = prog_array_map_free,
1148	.map_poke_track = prog_array_map_poke_track,
1149	.map_poke_untrack = prog_array_map_poke_untrack,
1150	.map_poke_run = prog_array_map_poke_run,
1151	.map_get_next_key = array_map_get_next_key,
1152	.map_lookup_elem = fd_array_map_lookup_elem,
1153	.map_delete_elem = fd_array_map_delete_elem,
1154	.map_fd_get_ptr = prog_fd_array_get_ptr,
1155	.map_fd_put_ptr = prog_fd_array_put_ptr,
1156	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1157	.map_release_uref = prog_array_map_clear,
1158	.map_seq_show_elem = prog_array_map_seq_show_elem,
 
1159	.map_btf_id = &array_map_btf_ids[0],
1160};
1161
1162static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1163						   struct file *map_file)
1164{
1165	struct bpf_event_entry *ee;
1166
1167	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1168	if (ee) {
1169		ee->event = perf_file->private_data;
1170		ee->perf_file = perf_file;
1171		ee->map_file = map_file;
1172	}
1173
1174	return ee;
1175}
1176
1177static void __bpf_event_entry_free(struct rcu_head *rcu)
1178{
1179	struct bpf_event_entry *ee;
1180
1181	ee = container_of(rcu, struct bpf_event_entry, rcu);
1182	fput(ee->perf_file);
1183	kfree(ee);
1184}
1185
1186static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1187{
1188	call_rcu(&ee->rcu, __bpf_event_entry_free);
1189}
1190
1191static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1192					 struct file *map_file, int fd)
1193{
1194	struct bpf_event_entry *ee;
1195	struct perf_event *event;
1196	struct file *perf_file;
1197	u64 value;
1198
1199	perf_file = perf_event_get(fd);
1200	if (IS_ERR(perf_file))
1201		return perf_file;
1202
1203	ee = ERR_PTR(-EOPNOTSUPP);
1204	event = perf_file->private_data;
1205	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1206		goto err_out;
1207
1208	ee = bpf_event_entry_gen(perf_file, map_file);
1209	if (ee)
1210		return ee;
1211	ee = ERR_PTR(-ENOMEM);
1212err_out:
1213	fput(perf_file);
1214	return ee;
1215}
1216
1217static void perf_event_fd_array_put_ptr(void *ptr)
1218{
 
1219	bpf_event_entry_free_rcu(ptr);
1220}
1221
1222static void perf_event_fd_array_release(struct bpf_map *map,
1223					struct file *map_file)
1224{
1225	struct bpf_array *array = container_of(map, struct bpf_array, map);
1226	struct bpf_event_entry *ee;
1227	int i;
1228
1229	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1230		return;
1231
1232	rcu_read_lock();
1233	for (i = 0; i < array->map.max_entries; i++) {
1234		ee = READ_ONCE(array->ptrs[i]);
1235		if (ee && ee->map_file == map_file)
1236			fd_array_map_delete_elem(map, &i);
1237	}
1238	rcu_read_unlock();
1239}
1240
1241static void perf_event_fd_array_map_free(struct bpf_map *map)
1242{
1243	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1244		bpf_fd_array_map_clear(map);
1245	fd_array_map_free(map);
1246}
1247
1248const struct bpf_map_ops perf_event_array_map_ops = {
1249	.map_meta_equal = bpf_map_meta_equal,
1250	.map_alloc_check = fd_array_map_alloc_check,
1251	.map_alloc = array_map_alloc,
1252	.map_free = perf_event_fd_array_map_free,
1253	.map_get_next_key = array_map_get_next_key,
1254	.map_lookup_elem = fd_array_map_lookup_elem,
1255	.map_delete_elem = fd_array_map_delete_elem,
1256	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1257	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1258	.map_release = perf_event_fd_array_release,
1259	.map_check_btf = map_check_no_btf,
 
1260	.map_btf_id = &array_map_btf_ids[0],
1261};
1262
1263#ifdef CONFIG_CGROUPS
1264static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1265				     struct file *map_file /* not used */,
1266				     int fd)
1267{
1268	return cgroup_get_from_fd(fd);
1269}
1270
1271static void cgroup_fd_array_put_ptr(void *ptr)
1272{
1273	/* cgroup_put free cgrp after a rcu grace period */
1274	cgroup_put(ptr);
1275}
1276
1277static void cgroup_fd_array_free(struct bpf_map *map)
1278{
1279	bpf_fd_array_map_clear(map);
1280	fd_array_map_free(map);
1281}
1282
1283const struct bpf_map_ops cgroup_array_map_ops = {
1284	.map_meta_equal = bpf_map_meta_equal,
1285	.map_alloc_check = fd_array_map_alloc_check,
1286	.map_alloc = array_map_alloc,
1287	.map_free = cgroup_fd_array_free,
1288	.map_get_next_key = array_map_get_next_key,
1289	.map_lookup_elem = fd_array_map_lookup_elem,
1290	.map_delete_elem = fd_array_map_delete_elem,
1291	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1292	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1293	.map_check_btf = map_check_no_btf,
 
1294	.map_btf_id = &array_map_btf_ids[0],
1295};
1296#endif
1297
1298static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1299{
1300	struct bpf_map *map, *inner_map_meta;
1301
1302	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1303	if (IS_ERR(inner_map_meta))
1304		return inner_map_meta;
1305
1306	map = array_map_alloc(attr);
1307	if (IS_ERR(map)) {
1308		bpf_map_meta_free(inner_map_meta);
1309		return map;
1310	}
1311
1312	map->inner_map_meta = inner_map_meta;
1313
1314	return map;
1315}
1316
1317static void array_of_map_free(struct bpf_map *map)
1318{
1319	/* map->inner_map_meta is only accessed by syscall which
1320	 * is protected by fdget/fdput.
1321	 */
1322	bpf_map_meta_free(map->inner_map_meta);
1323	bpf_fd_array_map_clear(map);
1324	fd_array_map_free(map);
1325}
1326
1327static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1328{
1329	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1330
1331	if (!inner_map)
1332		return NULL;
1333
1334	return READ_ONCE(*inner_map);
1335}
1336
1337static int array_of_map_gen_lookup(struct bpf_map *map,
1338				   struct bpf_insn *insn_buf)
1339{
1340	struct bpf_array *array = container_of(map, struct bpf_array, map);
1341	u32 elem_size = array->elem_size;
1342	struct bpf_insn *insn = insn_buf;
1343	const int ret = BPF_REG_0;
1344	const int map_ptr = BPF_REG_1;
1345	const int index = BPF_REG_2;
1346
1347	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1348	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1349	if (!map->bypass_spec_v1) {
1350		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1351		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1352	} else {
1353		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1354	}
1355	if (is_power_of_2(elem_size))
1356		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1357	else
1358		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1359	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1360	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1361	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1362	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1363	*insn++ = BPF_MOV64_IMM(ret, 0);
1364
1365	return insn - insn_buf;
1366}
1367
1368const struct bpf_map_ops array_of_maps_map_ops = {
1369	.map_alloc_check = fd_array_map_alloc_check,
1370	.map_alloc = array_of_map_alloc,
1371	.map_free = array_of_map_free,
1372	.map_get_next_key = array_map_get_next_key,
1373	.map_lookup_elem = array_of_map_lookup_elem,
1374	.map_delete_elem = fd_array_map_delete_elem,
1375	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1376	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1377	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1378	.map_gen_lookup = array_of_map_gen_lookup,
1379	.map_lookup_batch = generic_map_lookup_batch,
1380	.map_update_batch = generic_map_update_batch,
1381	.map_check_btf = map_check_no_btf,
 
1382	.map_btf_id = &array_map_btf_ids[0],
1383};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016,2017 Facebook
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/err.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/filter.h>
  11#include <linux/perf_event.h>
  12#include <uapi/linux/btf.h>
  13#include <linux/rcupdate_trace.h>
  14#include <linux/btf_ids.h>
  15
  16#include "map_in_map.h"
  17
  18#define ARRAY_CREATE_FLAG_MASK \
  19	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
  20	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
  21
  22static void bpf_array_free_percpu(struct bpf_array *array)
  23{
  24	int i;
  25
  26	for (i = 0; i < array->map.max_entries; i++) {
  27		free_percpu(array->pptrs[i]);
  28		cond_resched();
  29	}
  30}
  31
  32static int bpf_array_alloc_percpu(struct bpf_array *array)
  33{
  34	void __percpu *ptr;
  35	int i;
  36
  37	for (i = 0; i < array->map.max_entries; i++) {
  38		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
  39					   GFP_USER | __GFP_NOWARN);
  40		if (!ptr) {
  41			bpf_array_free_percpu(array);
  42			return -ENOMEM;
  43		}
  44		array->pptrs[i] = ptr;
  45		cond_resched();
  46	}
  47
  48	return 0;
  49}
  50
  51/* Called from syscall */
  52int array_map_alloc_check(union bpf_attr *attr)
  53{
  54	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  55	int numa_node = bpf_map_attr_numa_node(attr);
  56
  57	/* check sanity of attributes */
  58	if (attr->max_entries == 0 || attr->key_size != 4 ||
  59	    attr->value_size == 0 ||
  60	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  61	    !bpf_map_flags_access_ok(attr->map_flags) ||
  62	    (percpu && numa_node != NUMA_NO_NODE))
  63		return -EINVAL;
  64
  65	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  66	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
  67		return -EINVAL;
  68
  69	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
  70	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
  71		return -EINVAL;
  72
  73	/* avoid overflow on round_up(map->value_size) */
  74	if (attr->value_size > INT_MAX)
  75		return -E2BIG;
  76	/* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
  77	if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
  78		return -E2BIG;
  79
  80	return 0;
  81}
  82
  83static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  84{
  85	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  86	int numa_node = bpf_map_attr_numa_node(attr);
  87	u32 elem_size, index_mask, max_entries;
  88	bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
  89	u64 array_size, mask64;
  90	struct bpf_array *array;
  91
  92	elem_size = round_up(attr->value_size, 8);
  93
  94	max_entries = attr->max_entries;
  95
  96	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
  97	 * upper most bit set in u32 space is undefined behavior due to
  98	 * resulting 1U << 32, so do it manually here in u64 space.
  99	 */
 100	mask64 = fls_long(max_entries - 1);
 101	mask64 = 1ULL << mask64;
 102	mask64 -= 1;
 103
 104	index_mask = mask64;
 105	if (!bypass_spec_v1) {
 106		/* round up array size to nearest power of 2,
 107		 * since cpu will speculate within index_mask limits
 108		 */
 109		max_entries = index_mask + 1;
 110		/* Check for overflows. */
 111		if (max_entries < attr->max_entries)
 112			return ERR_PTR(-E2BIG);
 113	}
 114
 115	array_size = sizeof(*array);
 116	if (percpu) {
 117		array_size += (u64) max_entries * sizeof(void *);
 118	} else {
 119		/* rely on vmalloc() to return page-aligned memory and
 120		 * ensure array->value is exactly page-aligned
 121		 */
 122		if (attr->map_flags & BPF_F_MMAPABLE) {
 123			array_size = PAGE_ALIGN(array_size);
 124			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
 125		} else {
 126			array_size += (u64) max_entries * elem_size;
 127		}
 128	}
 129
 130	/* allocate all map elements and zero-initialize them */
 131	if (attr->map_flags & BPF_F_MMAPABLE) {
 132		void *data;
 133
 134		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
 135		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
 136		if (!data)
 137			return ERR_PTR(-ENOMEM);
 138		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
 139			- offsetof(struct bpf_array, value);
 140	} else {
 141		array = bpf_map_area_alloc(array_size, numa_node);
 142	}
 143	if (!array)
 144		return ERR_PTR(-ENOMEM);
 145	array->index_mask = index_mask;
 146	array->map.bypass_spec_v1 = bypass_spec_v1;
 147
 148	/* copy mandatory map attributes */
 149	bpf_map_init_from_attr(&array->map, attr);
 150	array->elem_size = elem_size;
 151
 152	if (percpu && bpf_array_alloc_percpu(array)) {
 153		bpf_map_area_free(array);
 154		return ERR_PTR(-ENOMEM);
 155	}
 156
 157	return &array->map;
 158}
 159
 160static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
 161{
 162	return array->value + (u64)array->elem_size * index;
 163}
 164
 165/* Called from syscall or from eBPF program */
 166static void *array_map_lookup_elem(struct bpf_map *map, void *key)
 167{
 168	struct bpf_array *array = container_of(map, struct bpf_array, map);
 169	u32 index = *(u32 *)key;
 170
 171	if (unlikely(index >= array->map.max_entries))
 172		return NULL;
 173
 174	return array->value + (u64)array->elem_size * (index & array->index_mask);
 175}
 176
 177static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
 178				       u32 off)
 179{
 180	struct bpf_array *array = container_of(map, struct bpf_array, map);
 181
 182	if (map->max_entries != 1)
 183		return -ENOTSUPP;
 184	if (off >= map->value_size)
 185		return -EINVAL;
 186
 187	*imm = (unsigned long)array->value;
 188	return 0;
 189}
 190
 191static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
 192				       u32 *off)
 193{
 194	struct bpf_array *array = container_of(map, struct bpf_array, map);
 195	u64 base = (unsigned long)array->value;
 196	u64 range = array->elem_size;
 197
 198	if (map->max_entries != 1)
 199		return -ENOTSUPP;
 200	if (imm < base || imm >= base + range)
 201		return -ENOENT;
 202
 203	*off = imm - base;
 204	return 0;
 205}
 206
 207/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 208static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 209{
 210	struct bpf_array *array = container_of(map, struct bpf_array, map);
 211	struct bpf_insn *insn = insn_buf;
 212	u32 elem_size = array->elem_size;
 213	const int ret = BPF_REG_0;
 214	const int map_ptr = BPF_REG_1;
 215	const int index = BPF_REG_2;
 216
 217	if (map->map_flags & BPF_F_INNER_MAP)
 218		return -EOPNOTSUPP;
 219
 220	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
 221	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
 222	if (!map->bypass_spec_v1) {
 223		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
 224		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
 225	} else {
 226		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
 227	}
 228
 229	if (is_power_of_2(elem_size)) {
 230		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
 231	} else {
 232		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
 233	}
 234	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
 235	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
 236	*insn++ = BPF_MOV64_IMM(ret, 0);
 237	return insn - insn_buf;
 238}
 239
 240/* Called from eBPF program */
 241static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 242{
 243	struct bpf_array *array = container_of(map, struct bpf_array, map);
 244	u32 index = *(u32 *)key;
 245
 246	if (unlikely(index >= array->map.max_entries))
 247		return NULL;
 248
 249	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 250}
 251
 252/* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
 253static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 254{
 255	struct bpf_array *array = container_of(map, struct bpf_array, map);
 256	struct bpf_insn *insn = insn_buf;
 257
 258	if (!bpf_jit_supports_percpu_insn())
 259		return -EOPNOTSUPP;
 260
 261	if (map->map_flags & BPF_F_INNER_MAP)
 262		return -EOPNOTSUPP;
 263
 264	BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
 265	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
 266
 267	*insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
 268	if (!map->bypass_spec_v1) {
 269		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
 270		*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
 271	} else {
 272		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
 273	}
 274
 275	*insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
 276	*insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
 277	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
 278	*insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
 279	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
 280	*insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
 281	return insn - insn_buf;
 282}
 283
 284static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
 285{
 286	struct bpf_array *array = container_of(map, struct bpf_array, map);
 287	u32 index = *(u32 *)key;
 288
 289	if (cpu >= nr_cpu_ids)
 290		return NULL;
 291
 292	if (unlikely(index >= array->map.max_entries))
 293		return NULL;
 294
 295	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
 296}
 297
 298int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
 299{
 300	struct bpf_array *array = container_of(map, struct bpf_array, map);
 301	u32 index = *(u32 *)key;
 302	void __percpu *pptr;
 303	int cpu, off = 0;
 304	u32 size;
 305
 306	if (unlikely(index >= array->map.max_entries))
 307		return -ENOENT;
 308
 309	/* per_cpu areas are zero-filled and bpf programs can only
 310	 * access 'value_size' of them, so copying rounded areas
 311	 * will not leak any kernel data
 312	 */
 313	size = array->elem_size;
 314	rcu_read_lock();
 315	pptr = array->pptrs[index & array->index_mask];
 316	for_each_possible_cpu(cpu) {
 317		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
 318		check_and_init_map_value(map, value + off);
 319		off += size;
 320	}
 321	rcu_read_unlock();
 322	return 0;
 323}
 324
 325/* Called from syscall */
 326static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 327{
 328	struct bpf_array *array = container_of(map, struct bpf_array, map);
 329	u32 index = key ? *(u32 *)key : U32_MAX;
 330	u32 *next = (u32 *)next_key;
 331
 332	if (index >= array->map.max_entries) {
 333		*next = 0;
 334		return 0;
 335	}
 336
 337	if (index == array->map.max_entries - 1)
 338		return -ENOENT;
 339
 340	*next = index + 1;
 341	return 0;
 342}
 343
 344/* Called from syscall or from eBPF program */
 345static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
 346				  u64 map_flags)
 347{
 348	struct bpf_array *array = container_of(map, struct bpf_array, map);
 349	u32 index = *(u32 *)key;
 350	char *val;
 351
 352	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
 353		/* unknown flags */
 354		return -EINVAL;
 355
 356	if (unlikely(index >= array->map.max_entries))
 357		/* all elements were pre-allocated, cannot insert a new one */
 358		return -E2BIG;
 359
 360	if (unlikely(map_flags & BPF_NOEXIST))
 361		/* all elements already exist */
 362		return -EEXIST;
 363
 364	if (unlikely((map_flags & BPF_F_LOCK) &&
 365		     !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
 366		return -EINVAL;
 367
 368	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 369		val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
 370		copy_map_value(map, val, value);
 371		bpf_obj_free_fields(array->map.record, val);
 372	} else {
 373		val = array->value +
 374			(u64)array->elem_size * (index & array->index_mask);
 375		if (map_flags & BPF_F_LOCK)
 376			copy_map_value_locked(map, val, value, false);
 377		else
 378			copy_map_value(map, val, value);
 379		bpf_obj_free_fields(array->map.record, val);
 380	}
 381	return 0;
 382}
 383
 384int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
 385			    u64 map_flags)
 386{
 387	struct bpf_array *array = container_of(map, struct bpf_array, map);
 388	u32 index = *(u32 *)key;
 389	void __percpu *pptr;
 390	int cpu, off = 0;
 391	u32 size;
 392
 393	if (unlikely(map_flags > BPF_EXIST))
 394		/* unknown flags */
 395		return -EINVAL;
 396
 397	if (unlikely(index >= array->map.max_entries))
 398		/* all elements were pre-allocated, cannot insert a new one */
 399		return -E2BIG;
 400
 401	if (unlikely(map_flags == BPF_NOEXIST))
 402		/* all elements already exist */
 403		return -EEXIST;
 404
 405	/* the user space will provide round_up(value_size, 8) bytes that
 406	 * will be copied into per-cpu area. bpf programs can only access
 407	 * value_size of it. During lookup the same extra bytes will be
 408	 * returned or zeros which were zero-filled by percpu_alloc,
 409	 * so no kernel data leaks possible
 410	 */
 411	size = array->elem_size;
 412	rcu_read_lock();
 413	pptr = array->pptrs[index & array->index_mask];
 414	for_each_possible_cpu(cpu) {
 415		copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
 416		bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
 417		off += size;
 418	}
 419	rcu_read_unlock();
 420	return 0;
 421}
 422
 423/* Called from syscall or from eBPF program */
 424static long array_map_delete_elem(struct bpf_map *map, void *key)
 425{
 426	return -EINVAL;
 427}
 428
 429static void *array_map_vmalloc_addr(struct bpf_array *array)
 430{
 431	return (void *)round_down((unsigned long)array, PAGE_SIZE);
 432}
 433
 434static void array_map_free_timers_wq(struct bpf_map *map)
 435{
 436	struct bpf_array *array = container_of(map, struct bpf_array, map);
 437	int i;
 438
 439	/* We don't reset or free fields other than timer and workqueue
 440	 * on uref dropping to zero.
 441	 */
 442	if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) {
 443		for (i = 0; i < array->map.max_entries; i++) {
 444			if (btf_record_has_field(map->record, BPF_TIMER))
 445				bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
 446			if (btf_record_has_field(map->record, BPF_WORKQUEUE))
 447				bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
 448		}
 449	}
 450}
 451
 452/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 453static void array_map_free(struct bpf_map *map)
 454{
 455	struct bpf_array *array = container_of(map, struct bpf_array, map);
 456	int i;
 457
 458	if (!IS_ERR_OR_NULL(map->record)) {
 459		if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 460			for (i = 0; i < array->map.max_entries; i++) {
 461				void __percpu *pptr = array->pptrs[i & array->index_mask];
 462				int cpu;
 463
 464				for_each_possible_cpu(cpu) {
 465					bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
 466					cond_resched();
 467				}
 468			}
 469		} else {
 470			for (i = 0; i < array->map.max_entries; i++)
 471				bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
 472		}
 473	}
 474
 475	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 476		bpf_array_free_percpu(array);
 477
 478	if (array->map.map_flags & BPF_F_MMAPABLE)
 479		bpf_map_area_free(array_map_vmalloc_addr(array));
 480	else
 481		bpf_map_area_free(array);
 482}
 483
 484static void array_map_seq_show_elem(struct bpf_map *map, void *key,
 485				    struct seq_file *m)
 486{
 487	void *value;
 488
 489	rcu_read_lock();
 490
 491	value = array_map_lookup_elem(map, key);
 492	if (!value) {
 493		rcu_read_unlock();
 494		return;
 495	}
 496
 497	if (map->btf_key_type_id)
 498		seq_printf(m, "%u: ", *(u32 *)key);
 499	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
 500	seq_putc(m, '\n');
 501
 502	rcu_read_unlock();
 503}
 504
 505static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
 506					   struct seq_file *m)
 507{
 508	struct bpf_array *array = container_of(map, struct bpf_array, map);
 509	u32 index = *(u32 *)key;
 510	void __percpu *pptr;
 511	int cpu;
 512
 513	rcu_read_lock();
 514
 515	seq_printf(m, "%u: {\n", *(u32 *)key);
 516	pptr = array->pptrs[index & array->index_mask];
 517	for_each_possible_cpu(cpu) {
 518		seq_printf(m, "\tcpu%d: ", cpu);
 519		btf_type_seq_show(map->btf, map->btf_value_type_id,
 520				  per_cpu_ptr(pptr, cpu), m);
 521		seq_putc(m, '\n');
 522	}
 523	seq_puts(m, "}\n");
 524
 525	rcu_read_unlock();
 526}
 527
 528static int array_map_check_btf(const struct bpf_map *map,
 529			       const struct btf *btf,
 530			       const struct btf_type *key_type,
 531			       const struct btf_type *value_type)
 532{
 533	u32 int_data;
 534
 535	/* One exception for keyless BTF: .bss/.data/.rodata map */
 536	if (btf_type_is_void(key_type)) {
 537		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
 538		    map->max_entries != 1)
 539			return -EINVAL;
 540
 541		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
 542			return -EINVAL;
 543
 544		return 0;
 545	}
 546
 547	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
 548		return -EINVAL;
 549
 550	int_data = *(u32 *)(key_type + 1);
 551	/* bpf array can only take a u32 key. This check makes sure
 552	 * that the btf matches the attr used during map_create.
 553	 */
 554	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
 555		return -EINVAL;
 556
 557	return 0;
 558}
 559
 560static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
 561{
 562	struct bpf_array *array = container_of(map, struct bpf_array, map);
 563	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
 564
 565	if (!(map->map_flags & BPF_F_MMAPABLE))
 566		return -EINVAL;
 567
 568	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
 569	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
 570		return -EINVAL;
 571
 572	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
 573				   vma->vm_pgoff + pgoff);
 574}
 575
 576static bool array_map_meta_equal(const struct bpf_map *meta0,
 577				 const struct bpf_map *meta1)
 578{
 579	if (!bpf_map_meta_equal(meta0, meta1))
 580		return false;
 581	return meta0->map_flags & BPF_F_INNER_MAP ? true :
 582	       meta0->max_entries == meta1->max_entries;
 583}
 584
 585struct bpf_iter_seq_array_map_info {
 586	struct bpf_map *map;
 587	void *percpu_value_buf;
 588	u32 index;
 589};
 590
 591static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
 592{
 593	struct bpf_iter_seq_array_map_info *info = seq->private;
 594	struct bpf_map *map = info->map;
 595	struct bpf_array *array;
 596	u32 index;
 597
 598	if (info->index >= map->max_entries)
 599		return NULL;
 600
 601	if (*pos == 0)
 602		++*pos;
 603	array = container_of(map, struct bpf_array, map);
 604	index = info->index & array->index_mask;
 605	if (info->percpu_value_buf)
 606		return (void *)(uintptr_t)array->pptrs[index];
 607	return array_map_elem_ptr(array, index);
 608}
 609
 610static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 611{
 612	struct bpf_iter_seq_array_map_info *info = seq->private;
 613	struct bpf_map *map = info->map;
 614	struct bpf_array *array;
 615	u32 index;
 616
 617	++*pos;
 618	++info->index;
 619	if (info->index >= map->max_entries)
 620		return NULL;
 621
 622	array = container_of(map, struct bpf_array, map);
 623	index = info->index & array->index_mask;
 624	if (info->percpu_value_buf)
 625		return (void *)(uintptr_t)array->pptrs[index];
 626	return array_map_elem_ptr(array, index);
 627}
 628
 629static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
 630{
 631	struct bpf_iter_seq_array_map_info *info = seq->private;
 632	struct bpf_iter__bpf_map_elem ctx = {};
 633	struct bpf_map *map = info->map;
 634	struct bpf_array *array = container_of(map, struct bpf_array, map);
 635	struct bpf_iter_meta meta;
 636	struct bpf_prog *prog;
 637	int off = 0, cpu = 0;
 638	void __percpu *pptr;
 639	u32 size;
 640
 641	meta.seq = seq;
 642	prog = bpf_iter_get_info(&meta, v == NULL);
 643	if (!prog)
 644		return 0;
 645
 646	ctx.meta = &meta;
 647	ctx.map = info->map;
 648	if (v) {
 649		ctx.key = &info->index;
 650
 651		if (!info->percpu_value_buf) {
 652			ctx.value = v;
 653		} else {
 654			pptr = (void __percpu *)(uintptr_t)v;
 655			size = array->elem_size;
 656			for_each_possible_cpu(cpu) {
 657				copy_map_value_long(map, info->percpu_value_buf + off,
 658						    per_cpu_ptr(pptr, cpu));
 659				check_and_init_map_value(map, info->percpu_value_buf + off);
 660				off += size;
 661			}
 662			ctx.value = info->percpu_value_buf;
 663		}
 664	}
 665
 666	return bpf_iter_run_prog(prog, &ctx);
 667}
 668
 669static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
 670{
 671	return __bpf_array_map_seq_show(seq, v);
 672}
 673
 674static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
 675{
 676	if (!v)
 677		(void)__bpf_array_map_seq_show(seq, NULL);
 678}
 679
 680static int bpf_iter_init_array_map(void *priv_data,
 681				   struct bpf_iter_aux_info *aux)
 682{
 683	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 684	struct bpf_map *map = aux->map;
 685	struct bpf_array *array = container_of(map, struct bpf_array, map);
 686	void *value_buf;
 687	u32 buf_size;
 688
 689	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 690		buf_size = array->elem_size * num_possible_cpus();
 691		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
 692		if (!value_buf)
 693			return -ENOMEM;
 694
 695		seq_info->percpu_value_buf = value_buf;
 696	}
 697
 698	/* bpf_iter_attach_map() acquires a map uref, and the uref may be
 699	 * released before or in the middle of iterating map elements, so
 700	 * acquire an extra map uref for iterator.
 701	 */
 702	bpf_map_inc_with_uref(map);
 703	seq_info->map = map;
 704	return 0;
 705}
 706
 707static void bpf_iter_fini_array_map(void *priv_data)
 708{
 709	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 710
 711	bpf_map_put_with_uref(seq_info->map);
 712	kfree(seq_info->percpu_value_buf);
 713}
 714
 715static const struct seq_operations bpf_array_map_seq_ops = {
 716	.start	= bpf_array_map_seq_start,
 717	.next	= bpf_array_map_seq_next,
 718	.stop	= bpf_array_map_seq_stop,
 719	.show	= bpf_array_map_seq_show,
 720};
 721
 722static const struct bpf_iter_seq_info iter_seq_info = {
 723	.seq_ops		= &bpf_array_map_seq_ops,
 724	.init_seq_private	= bpf_iter_init_array_map,
 725	.fini_seq_private	= bpf_iter_fini_array_map,
 726	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
 727};
 728
 729static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
 730				    void *callback_ctx, u64 flags)
 731{
 732	u32 i, key, num_elems = 0;
 733	struct bpf_array *array;
 734	bool is_percpu;
 735	u64 ret = 0;
 736	void *val;
 737
 738	if (flags != 0)
 739		return -EINVAL;
 740
 741	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 742	array = container_of(map, struct bpf_array, map);
 743	if (is_percpu)
 744		migrate_disable();
 745	for (i = 0; i < map->max_entries; i++) {
 746		if (is_percpu)
 747			val = this_cpu_ptr(array->pptrs[i]);
 748		else
 749			val = array_map_elem_ptr(array, i);
 750		num_elems++;
 751		key = i;
 752		ret = callback_fn((u64)(long)map, (u64)(long)&key,
 753				  (u64)(long)val, (u64)(long)callback_ctx, 0);
 754		/* return value: 0 - continue, 1 - stop and return */
 755		if (ret)
 756			break;
 757	}
 758
 759	if (is_percpu)
 760		migrate_enable();
 761	return num_elems;
 762}
 763
 764static u64 array_map_mem_usage(const struct bpf_map *map)
 765{
 766	struct bpf_array *array = container_of(map, struct bpf_array, map);
 767	bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 768	u32 elem_size = array->elem_size;
 769	u64 entries = map->max_entries;
 770	u64 usage = sizeof(*array);
 771
 772	if (percpu) {
 773		usage += entries * sizeof(void *);
 774		usage += entries * elem_size * num_possible_cpus();
 775	} else {
 776		if (map->map_flags & BPF_F_MMAPABLE) {
 777			usage = PAGE_ALIGN(usage);
 778			usage += PAGE_ALIGN(entries * elem_size);
 779		} else {
 780			usage += entries * elem_size;
 781		}
 782	}
 783	return usage;
 784}
 785
 786BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
 787const struct bpf_map_ops array_map_ops = {
 788	.map_meta_equal = array_map_meta_equal,
 789	.map_alloc_check = array_map_alloc_check,
 790	.map_alloc = array_map_alloc,
 791	.map_free = array_map_free,
 792	.map_get_next_key = array_map_get_next_key,
 793	.map_release_uref = array_map_free_timers_wq,
 794	.map_lookup_elem = array_map_lookup_elem,
 795	.map_update_elem = array_map_update_elem,
 796	.map_delete_elem = array_map_delete_elem,
 797	.map_gen_lookup = array_map_gen_lookup,
 798	.map_direct_value_addr = array_map_direct_value_addr,
 799	.map_direct_value_meta = array_map_direct_value_meta,
 800	.map_mmap = array_map_mmap,
 801	.map_seq_show_elem = array_map_seq_show_elem,
 802	.map_check_btf = array_map_check_btf,
 803	.map_lookup_batch = generic_map_lookup_batch,
 804	.map_update_batch = generic_map_update_batch,
 805	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 806	.map_for_each_callback = bpf_for_each_array_elem,
 807	.map_mem_usage = array_map_mem_usage,
 808	.map_btf_id = &array_map_btf_ids[0],
 809	.iter_seq_info = &iter_seq_info,
 810};
 811
 812const struct bpf_map_ops percpu_array_map_ops = {
 813	.map_meta_equal = bpf_map_meta_equal,
 814	.map_alloc_check = array_map_alloc_check,
 815	.map_alloc = array_map_alloc,
 816	.map_free = array_map_free,
 817	.map_get_next_key = array_map_get_next_key,
 818	.map_lookup_elem = percpu_array_map_lookup_elem,
 819	.map_gen_lookup = percpu_array_map_gen_lookup,
 820	.map_update_elem = array_map_update_elem,
 821	.map_delete_elem = array_map_delete_elem,
 822	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
 823	.map_seq_show_elem = percpu_array_map_seq_show_elem,
 824	.map_check_btf = array_map_check_btf,
 825	.map_lookup_batch = generic_map_lookup_batch,
 826	.map_update_batch = generic_map_update_batch,
 827	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 828	.map_for_each_callback = bpf_for_each_array_elem,
 829	.map_mem_usage = array_map_mem_usage,
 830	.map_btf_id = &array_map_btf_ids[0],
 831	.iter_seq_info = &iter_seq_info,
 832};
 833
 834static int fd_array_map_alloc_check(union bpf_attr *attr)
 835{
 836	/* only file descriptors can be stored in this type of map */
 837	if (attr->value_size != sizeof(u32))
 838		return -EINVAL;
 839	/* Program read-only/write-only not supported for special maps yet. */
 840	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
 841		return -EINVAL;
 842	return array_map_alloc_check(attr);
 843}
 844
 845static void fd_array_map_free(struct bpf_map *map)
 846{
 847	struct bpf_array *array = container_of(map, struct bpf_array, map);
 848	int i;
 849
 850	/* make sure it's empty */
 851	for (i = 0; i < array->map.max_entries; i++)
 852		BUG_ON(array->ptrs[i] != NULL);
 853
 854	bpf_map_area_free(array);
 855}
 856
 857static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 858{
 859	return ERR_PTR(-EOPNOTSUPP);
 860}
 861
 862/* only called from syscall */
 863int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
 864{
 865	void **elem, *ptr;
 866	int ret =  0;
 867
 868	if (!map->ops->map_fd_sys_lookup_elem)
 869		return -ENOTSUPP;
 870
 871	rcu_read_lock();
 872	elem = array_map_lookup_elem(map, key);
 873	if (elem && (ptr = READ_ONCE(*elem)))
 874		*value = map->ops->map_fd_sys_lookup_elem(ptr);
 875	else
 876		ret = -ENOENT;
 877	rcu_read_unlock();
 878
 879	return ret;
 880}
 881
 882/* only called from syscall */
 883int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
 884				 void *key, void *value, u64 map_flags)
 885{
 886	struct bpf_array *array = container_of(map, struct bpf_array, map);
 887	void *new_ptr, *old_ptr;
 888	u32 index = *(u32 *)key, ufd;
 889
 890	if (map_flags != BPF_ANY)
 891		return -EINVAL;
 892
 893	if (index >= array->map.max_entries)
 894		return -E2BIG;
 895
 896	ufd = *(u32 *)value;
 897	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
 898	if (IS_ERR(new_ptr))
 899		return PTR_ERR(new_ptr);
 900
 901	if (map->ops->map_poke_run) {
 902		mutex_lock(&array->aux->poke_mutex);
 903		old_ptr = xchg(array->ptrs + index, new_ptr);
 904		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
 905		mutex_unlock(&array->aux->poke_mutex);
 906	} else {
 907		old_ptr = xchg(array->ptrs + index, new_ptr);
 908	}
 909
 910	if (old_ptr)
 911		map->ops->map_fd_put_ptr(map, old_ptr, true);
 912	return 0;
 913}
 914
 915static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
 916{
 917	struct bpf_array *array = container_of(map, struct bpf_array, map);
 918	void *old_ptr;
 919	u32 index = *(u32 *)key;
 920
 921	if (index >= array->map.max_entries)
 922		return -E2BIG;
 923
 924	if (map->ops->map_poke_run) {
 925		mutex_lock(&array->aux->poke_mutex);
 926		old_ptr = xchg(array->ptrs + index, NULL);
 927		map->ops->map_poke_run(map, index, old_ptr, NULL);
 928		mutex_unlock(&array->aux->poke_mutex);
 929	} else {
 930		old_ptr = xchg(array->ptrs + index, NULL);
 931	}
 932
 933	if (old_ptr) {
 934		map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
 935		return 0;
 936	} else {
 937		return -ENOENT;
 938	}
 939}
 940
 941static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
 942{
 943	return __fd_array_map_delete_elem(map, key, true);
 944}
 945
 946static void *prog_fd_array_get_ptr(struct bpf_map *map,
 947				   struct file *map_file, int fd)
 948{
 949	struct bpf_prog *prog = bpf_prog_get(fd);
 950	bool is_extended;
 951
 952	if (IS_ERR(prog))
 953		return prog;
 954
 955	if (prog->type == BPF_PROG_TYPE_EXT ||
 956	    !bpf_prog_map_compatible(map, prog)) {
 957		bpf_prog_put(prog);
 958		return ERR_PTR(-EINVAL);
 959	}
 960
 961	mutex_lock(&prog->aux->ext_mutex);
 962	is_extended = prog->aux->is_extended;
 963	if (!is_extended)
 964		prog->aux->prog_array_member_cnt++;
 965	mutex_unlock(&prog->aux->ext_mutex);
 966	if (is_extended) {
 967		/* Extended prog can not be tail callee. It's to prevent a
 968		 * potential infinite loop like:
 969		 * tail callee prog entry -> tail callee prog subprog ->
 970		 * freplace prog entry --tailcall-> tail callee prog entry.
 971		 */
 972		bpf_prog_put(prog);
 973		return ERR_PTR(-EBUSY);
 974	}
 975
 976	return prog;
 977}
 978
 979static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
 980{
 981	struct bpf_prog *prog = ptr;
 982
 983	mutex_lock(&prog->aux->ext_mutex);
 984	prog->aux->prog_array_member_cnt--;
 985	mutex_unlock(&prog->aux->ext_mutex);
 986	/* bpf_prog is freed after one RCU or tasks trace grace period */
 987	bpf_prog_put(prog);
 988}
 989
 990static u32 prog_fd_array_sys_lookup_elem(void *ptr)
 991{
 992	return ((struct bpf_prog *)ptr)->aux->id;
 993}
 994
 995/* decrement refcnt of all bpf_progs that are stored in this map */
 996static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
 997{
 998	struct bpf_array *array = container_of(map, struct bpf_array, map);
 999	int i;
1000
1001	for (i = 0; i < array->map.max_entries; i++)
1002		__fd_array_map_delete_elem(map, &i, need_defer);
1003}
1004
1005static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
1006					 struct seq_file *m)
1007{
1008	void **elem, *ptr;
1009	u32 prog_id;
1010
1011	rcu_read_lock();
1012
1013	elem = array_map_lookup_elem(map, key);
1014	if (elem) {
1015		ptr = READ_ONCE(*elem);
1016		if (ptr) {
1017			seq_printf(m, "%u: ", *(u32 *)key);
1018			prog_id = prog_fd_array_sys_lookup_elem(ptr);
1019			btf_type_seq_show(map->btf, map->btf_value_type_id,
1020					  &prog_id, m);
1021			seq_putc(m, '\n');
1022		}
1023	}
1024
1025	rcu_read_unlock();
1026}
1027
1028struct prog_poke_elem {
1029	struct list_head list;
1030	struct bpf_prog_aux *aux;
1031};
1032
1033static int prog_array_map_poke_track(struct bpf_map *map,
1034				     struct bpf_prog_aux *prog_aux)
1035{
1036	struct prog_poke_elem *elem;
1037	struct bpf_array_aux *aux;
1038	int ret = 0;
1039
1040	aux = container_of(map, struct bpf_array, map)->aux;
1041	mutex_lock(&aux->poke_mutex);
1042	list_for_each_entry(elem, &aux->poke_progs, list) {
1043		if (elem->aux == prog_aux)
1044			goto out;
1045	}
1046
1047	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
1048	if (!elem) {
1049		ret = -ENOMEM;
1050		goto out;
1051	}
1052
1053	INIT_LIST_HEAD(&elem->list);
1054	/* We must track the program's aux info at this point in time
1055	 * since the program pointer itself may not be stable yet, see
1056	 * also comment in prog_array_map_poke_run().
1057	 */
1058	elem->aux = prog_aux;
1059
1060	list_add_tail(&elem->list, &aux->poke_progs);
1061out:
1062	mutex_unlock(&aux->poke_mutex);
1063	return ret;
1064}
1065
1066static void prog_array_map_poke_untrack(struct bpf_map *map,
1067					struct bpf_prog_aux *prog_aux)
1068{
1069	struct prog_poke_elem *elem, *tmp;
1070	struct bpf_array_aux *aux;
1071
1072	aux = container_of(map, struct bpf_array, map)->aux;
1073	mutex_lock(&aux->poke_mutex);
1074	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1075		if (elem->aux == prog_aux) {
1076			list_del_init(&elem->list);
1077			kfree(elem);
1078			break;
1079		}
1080	}
1081	mutex_unlock(&aux->poke_mutex);
1082}
1083
1084void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
1085				      struct bpf_prog *new, struct bpf_prog *old)
1086{
1087	WARN_ON_ONCE(1);
1088}
1089
1090static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
1091				    struct bpf_prog *old,
1092				    struct bpf_prog *new)
1093{
 
1094	struct prog_poke_elem *elem;
1095	struct bpf_array_aux *aux;
1096
1097	aux = container_of(map, struct bpf_array, map)->aux;
1098	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1099
1100	list_for_each_entry(elem, &aux->poke_progs, list) {
1101		struct bpf_jit_poke_descriptor *poke;
1102		int i;
1103
1104		for (i = 0; i < elem->aux->size_poke_tab; i++) {
1105			poke = &elem->aux->poke_tab[i];
1106
1107			/* Few things to be aware of:
1108			 *
1109			 * 1) We can only ever access aux in this context, but
1110			 *    not aux->prog since it might not be stable yet and
1111			 *    there could be danger of use after free otherwise.
1112			 * 2) Initially when we start tracking aux, the program
1113			 *    is not JITed yet and also does not have a kallsyms
1114			 *    entry. We skip these as poke->tailcall_target_stable
1115			 *    is not active yet. The JIT will do the final fixup
1116			 *    before setting it stable. The various
1117			 *    poke->tailcall_target_stable are successively
1118			 *    activated, so tail call updates can arrive from here
1119			 *    while JIT is still finishing its final fixup for
1120			 *    non-activated poke entries.
1121			 * 3) Also programs reaching refcount of zero while patching
 
 
 
 
 
 
1122			 *    is in progress is okay since we're protected under
1123			 *    poke_mutex and untrack the programs before the JIT
1124			 *    buffer is freed.
 
 
 
 
 
1125			 */
1126			if (!READ_ONCE(poke->tailcall_target_stable))
1127				continue;
1128			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1129				continue;
1130			if (poke->tail_call.map != map ||
1131			    poke->tail_call.key != key)
1132				continue;
1133
1134			bpf_arch_poke_desc_update(poke, new, old);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135		}
1136	}
1137}
1138
1139static void prog_array_map_clear_deferred(struct work_struct *work)
1140{
1141	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1142					   work)->map;
1143	bpf_fd_array_map_clear(map, true);
1144	bpf_map_put(map);
1145}
1146
1147static void prog_array_map_clear(struct bpf_map *map)
1148{
1149	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1150						 map)->aux;
1151	bpf_map_inc(map);
1152	schedule_work(&aux->work);
1153}
1154
1155static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1156{
1157	struct bpf_array_aux *aux;
1158	struct bpf_map *map;
1159
1160	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1161	if (!aux)
1162		return ERR_PTR(-ENOMEM);
1163
1164	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1165	INIT_LIST_HEAD(&aux->poke_progs);
1166	mutex_init(&aux->poke_mutex);
1167
1168	map = array_map_alloc(attr);
1169	if (IS_ERR(map)) {
1170		kfree(aux);
1171		return map;
1172	}
1173
1174	container_of(map, struct bpf_array, map)->aux = aux;
1175	aux->map = map;
1176
1177	return map;
1178}
1179
1180static void prog_array_map_free(struct bpf_map *map)
1181{
1182	struct prog_poke_elem *elem, *tmp;
1183	struct bpf_array_aux *aux;
1184
1185	aux = container_of(map, struct bpf_array, map)->aux;
1186	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1187		list_del_init(&elem->list);
1188		kfree(elem);
1189	}
1190	kfree(aux);
1191	fd_array_map_free(map);
1192}
1193
1194/* prog_array->aux->{type,jited} is a runtime binding.
1195 * Doing static check alone in the verifier is not enough.
1196 * Thus, prog_array_map cannot be used as an inner_map
1197 * and map_meta_equal is not implemented.
1198 */
1199const struct bpf_map_ops prog_array_map_ops = {
1200	.map_alloc_check = fd_array_map_alloc_check,
1201	.map_alloc = prog_array_map_alloc,
1202	.map_free = prog_array_map_free,
1203	.map_poke_track = prog_array_map_poke_track,
1204	.map_poke_untrack = prog_array_map_poke_untrack,
1205	.map_poke_run = prog_array_map_poke_run,
1206	.map_get_next_key = array_map_get_next_key,
1207	.map_lookup_elem = fd_array_map_lookup_elem,
1208	.map_delete_elem = fd_array_map_delete_elem,
1209	.map_fd_get_ptr = prog_fd_array_get_ptr,
1210	.map_fd_put_ptr = prog_fd_array_put_ptr,
1211	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1212	.map_release_uref = prog_array_map_clear,
1213	.map_seq_show_elem = prog_array_map_seq_show_elem,
1214	.map_mem_usage = array_map_mem_usage,
1215	.map_btf_id = &array_map_btf_ids[0],
1216};
1217
1218static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1219						   struct file *map_file)
1220{
1221	struct bpf_event_entry *ee;
1222
1223	ee = kzalloc(sizeof(*ee), GFP_KERNEL);
1224	if (ee) {
1225		ee->event = perf_file->private_data;
1226		ee->perf_file = perf_file;
1227		ee->map_file = map_file;
1228	}
1229
1230	return ee;
1231}
1232
1233static void __bpf_event_entry_free(struct rcu_head *rcu)
1234{
1235	struct bpf_event_entry *ee;
1236
1237	ee = container_of(rcu, struct bpf_event_entry, rcu);
1238	fput(ee->perf_file);
1239	kfree(ee);
1240}
1241
1242static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1243{
1244	call_rcu(&ee->rcu, __bpf_event_entry_free);
1245}
1246
1247static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1248					 struct file *map_file, int fd)
1249{
1250	struct bpf_event_entry *ee;
1251	struct perf_event *event;
1252	struct file *perf_file;
1253	u64 value;
1254
1255	perf_file = perf_event_get(fd);
1256	if (IS_ERR(perf_file))
1257		return perf_file;
1258
1259	ee = ERR_PTR(-EOPNOTSUPP);
1260	event = perf_file->private_data;
1261	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1262		goto err_out;
1263
1264	ee = bpf_event_entry_gen(perf_file, map_file);
1265	if (ee)
1266		return ee;
1267	ee = ERR_PTR(-ENOMEM);
1268err_out:
1269	fput(perf_file);
1270	return ee;
1271}
1272
1273static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1274{
1275	/* bpf_perf_event is freed after one RCU grace period */
1276	bpf_event_entry_free_rcu(ptr);
1277}
1278
1279static void perf_event_fd_array_release(struct bpf_map *map,
1280					struct file *map_file)
1281{
1282	struct bpf_array *array = container_of(map, struct bpf_array, map);
1283	struct bpf_event_entry *ee;
1284	int i;
1285
1286	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1287		return;
1288
1289	rcu_read_lock();
1290	for (i = 0; i < array->map.max_entries; i++) {
1291		ee = READ_ONCE(array->ptrs[i]);
1292		if (ee && ee->map_file == map_file)
1293			__fd_array_map_delete_elem(map, &i, true);
1294	}
1295	rcu_read_unlock();
1296}
1297
1298static void perf_event_fd_array_map_free(struct bpf_map *map)
1299{
1300	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1301		bpf_fd_array_map_clear(map, false);
1302	fd_array_map_free(map);
1303}
1304
1305const struct bpf_map_ops perf_event_array_map_ops = {
1306	.map_meta_equal = bpf_map_meta_equal,
1307	.map_alloc_check = fd_array_map_alloc_check,
1308	.map_alloc = array_map_alloc,
1309	.map_free = perf_event_fd_array_map_free,
1310	.map_get_next_key = array_map_get_next_key,
1311	.map_lookup_elem = fd_array_map_lookup_elem,
1312	.map_delete_elem = fd_array_map_delete_elem,
1313	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1314	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1315	.map_release = perf_event_fd_array_release,
1316	.map_check_btf = map_check_no_btf,
1317	.map_mem_usage = array_map_mem_usage,
1318	.map_btf_id = &array_map_btf_ids[0],
1319};
1320
1321#ifdef CONFIG_CGROUPS
1322static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1323				     struct file *map_file /* not used */,
1324				     int fd)
1325{
1326	return cgroup_get_from_fd(fd);
1327}
1328
1329static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1330{
1331	/* cgroup_put free cgrp after a rcu grace period */
1332	cgroup_put(ptr);
1333}
1334
1335static void cgroup_fd_array_free(struct bpf_map *map)
1336{
1337	bpf_fd_array_map_clear(map, false);
1338	fd_array_map_free(map);
1339}
1340
1341const struct bpf_map_ops cgroup_array_map_ops = {
1342	.map_meta_equal = bpf_map_meta_equal,
1343	.map_alloc_check = fd_array_map_alloc_check,
1344	.map_alloc = array_map_alloc,
1345	.map_free = cgroup_fd_array_free,
1346	.map_get_next_key = array_map_get_next_key,
1347	.map_lookup_elem = fd_array_map_lookup_elem,
1348	.map_delete_elem = fd_array_map_delete_elem,
1349	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1350	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1351	.map_check_btf = map_check_no_btf,
1352	.map_mem_usage = array_map_mem_usage,
1353	.map_btf_id = &array_map_btf_ids[0],
1354};
1355#endif
1356
1357static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1358{
1359	struct bpf_map *map, *inner_map_meta;
1360
1361	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1362	if (IS_ERR(inner_map_meta))
1363		return inner_map_meta;
1364
1365	map = array_map_alloc(attr);
1366	if (IS_ERR(map)) {
1367		bpf_map_meta_free(inner_map_meta);
1368		return map;
1369	}
1370
1371	map->inner_map_meta = inner_map_meta;
1372
1373	return map;
1374}
1375
1376static void array_of_map_free(struct bpf_map *map)
1377{
1378	/* map->inner_map_meta is only accessed by syscall which
1379	 * is protected by fdget/fdput.
1380	 */
1381	bpf_map_meta_free(map->inner_map_meta);
1382	bpf_fd_array_map_clear(map, false);
1383	fd_array_map_free(map);
1384}
1385
1386static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1387{
1388	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1389
1390	if (!inner_map)
1391		return NULL;
1392
1393	return READ_ONCE(*inner_map);
1394}
1395
1396static int array_of_map_gen_lookup(struct bpf_map *map,
1397				   struct bpf_insn *insn_buf)
1398{
1399	struct bpf_array *array = container_of(map, struct bpf_array, map);
1400	u32 elem_size = array->elem_size;
1401	struct bpf_insn *insn = insn_buf;
1402	const int ret = BPF_REG_0;
1403	const int map_ptr = BPF_REG_1;
1404	const int index = BPF_REG_2;
1405
1406	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1407	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1408	if (!map->bypass_spec_v1) {
1409		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1410		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1411	} else {
1412		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1413	}
1414	if (is_power_of_2(elem_size))
1415		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1416	else
1417		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1418	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1419	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1420	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1421	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1422	*insn++ = BPF_MOV64_IMM(ret, 0);
1423
1424	return insn - insn_buf;
1425}
1426
1427const struct bpf_map_ops array_of_maps_map_ops = {
1428	.map_alloc_check = fd_array_map_alloc_check,
1429	.map_alloc = array_of_map_alloc,
1430	.map_free = array_of_map_free,
1431	.map_get_next_key = array_map_get_next_key,
1432	.map_lookup_elem = array_of_map_lookup_elem,
1433	.map_delete_elem = fd_array_map_delete_elem,
1434	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1435	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1436	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1437	.map_gen_lookup = array_of_map_gen_lookup,
1438	.map_lookup_batch = generic_map_lookup_batch,
1439	.map_update_batch = generic_map_update_batch,
1440	.map_check_btf = map_check_no_btf,
1441	.map_mem_usage = array_map_mem_usage,
1442	.map_btf_id = &array_map_btf_ids[0],
1443};