Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016,2017 Facebook
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/err.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/filter.h>
  11#include <linux/perf_event.h>
  12#include <uapi/linux/btf.h>
  13#include <linux/rcupdate_trace.h>
  14#include <linux/btf_ids.h>
  15
  16#include "map_in_map.h"
  17
  18#define ARRAY_CREATE_FLAG_MASK \
  19	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
  20	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
  21
  22static void bpf_array_free_percpu(struct bpf_array *array)
  23{
  24	int i;
  25
  26	for (i = 0; i < array->map.max_entries; i++) {
  27		free_percpu(array->pptrs[i]);
  28		cond_resched();
  29	}
  30}
  31
  32static int bpf_array_alloc_percpu(struct bpf_array *array)
  33{
  34	void __percpu *ptr;
  35	int i;
  36
  37	for (i = 0; i < array->map.max_entries; i++) {
  38		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
  39					   GFP_USER | __GFP_NOWARN);
  40		if (!ptr) {
  41			bpf_array_free_percpu(array);
  42			return -ENOMEM;
  43		}
  44		array->pptrs[i] = ptr;
  45		cond_resched();
  46	}
  47
  48	return 0;
  49}
  50
  51/* Called from syscall */
  52int array_map_alloc_check(union bpf_attr *attr)
  53{
  54	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  55	int numa_node = bpf_map_attr_numa_node(attr);
  56
  57	/* check sanity of attributes */
  58	if (attr->max_entries == 0 || attr->key_size != 4 ||
  59	    attr->value_size == 0 ||
  60	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  61	    !bpf_map_flags_access_ok(attr->map_flags) ||
  62	    (percpu && numa_node != NUMA_NO_NODE))
  63		return -EINVAL;
  64
  65	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  66	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
  67		return -EINVAL;
  68
  69	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
  70	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
  71		return -EINVAL;
  72
  73	/* avoid overflow on round_up(map->value_size) */
  74	if (attr->value_size > INT_MAX)
  75		return -E2BIG;
  76
  77	return 0;
  78}
  79
  80static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  81{
  82	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  83	int numa_node = bpf_map_attr_numa_node(attr);
  84	u32 elem_size, index_mask, max_entries;
  85	bool bypass_spec_v1 = bpf_bypass_spec_v1();
  86	u64 array_size, mask64;
  87	struct bpf_array *array;
  88
  89	elem_size = round_up(attr->value_size, 8);
  90
  91	max_entries = attr->max_entries;
  92
  93	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
  94	 * upper most bit set in u32 space is undefined behavior due to
  95	 * resulting 1U << 32, so do it manually here in u64 space.
  96	 */
  97	mask64 = fls_long(max_entries - 1);
  98	mask64 = 1ULL << mask64;
  99	mask64 -= 1;
 100
 101	index_mask = mask64;
 102	if (!bypass_spec_v1) {
 103		/* round up array size to nearest power of 2,
 104		 * since cpu will speculate within index_mask limits
 105		 */
 106		max_entries = index_mask + 1;
 107		/* Check for overflows. */
 108		if (max_entries < attr->max_entries)
 109			return ERR_PTR(-E2BIG);
 110	}
 111
 112	array_size = sizeof(*array);
 113	if (percpu) {
 114		array_size += (u64) max_entries * sizeof(void *);
 115	} else {
 116		/* rely on vmalloc() to return page-aligned memory and
 117		 * ensure array->value is exactly page-aligned
 118		 */
 119		if (attr->map_flags & BPF_F_MMAPABLE) {
 120			array_size = PAGE_ALIGN(array_size);
 121			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
 122		} else {
 123			array_size += (u64) max_entries * elem_size;
 124		}
 125	}
 126
 127	/* allocate all map elements and zero-initialize them */
 128	if (attr->map_flags & BPF_F_MMAPABLE) {
 129		void *data;
 130
 131		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
 132		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
 133		if (!data)
 134			return ERR_PTR(-ENOMEM);
 135		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
 136			- offsetof(struct bpf_array, value);
 137	} else {
 138		array = bpf_map_area_alloc(array_size, numa_node);
 139	}
 140	if (!array)
 141		return ERR_PTR(-ENOMEM);
 142	array->index_mask = index_mask;
 143	array->map.bypass_spec_v1 = bypass_spec_v1;
 144
 145	/* copy mandatory map attributes */
 146	bpf_map_init_from_attr(&array->map, attr);
 147	array->elem_size = elem_size;
 148
 149	if (percpu && bpf_array_alloc_percpu(array)) {
 150		bpf_map_area_free(array);
 151		return ERR_PTR(-ENOMEM);
 152	}
 153
 154	return &array->map;
 155}
 156
 157static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
 158{
 159	return array->value + (u64)array->elem_size * index;
 160}
 161
 162/* Called from syscall or from eBPF program */
 163static void *array_map_lookup_elem(struct bpf_map *map, void *key)
 164{
 165	struct bpf_array *array = container_of(map, struct bpf_array, map);
 166	u32 index = *(u32 *)key;
 167
 168	if (unlikely(index >= array->map.max_entries))
 169		return NULL;
 170
 171	return array->value + (u64)array->elem_size * (index & array->index_mask);
 172}
 173
 174static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
 175				       u32 off)
 176{
 177	struct bpf_array *array = container_of(map, struct bpf_array, map);
 178
 179	if (map->max_entries != 1)
 180		return -ENOTSUPP;
 181	if (off >= map->value_size)
 182		return -EINVAL;
 183
 184	*imm = (unsigned long)array->value;
 185	return 0;
 186}
 187
 188static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
 189				       u32 *off)
 190{
 191	struct bpf_array *array = container_of(map, struct bpf_array, map);
 192	u64 base = (unsigned long)array->value;
 193	u64 range = array->elem_size;
 194
 195	if (map->max_entries != 1)
 196		return -ENOTSUPP;
 197	if (imm < base || imm >= base + range)
 198		return -ENOENT;
 199
 200	*off = imm - base;
 201	return 0;
 202}
 203
 204/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 205static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 206{
 207	struct bpf_array *array = container_of(map, struct bpf_array, map);
 208	struct bpf_insn *insn = insn_buf;
 209	u32 elem_size = array->elem_size;
 210	const int ret = BPF_REG_0;
 211	const int map_ptr = BPF_REG_1;
 212	const int index = BPF_REG_2;
 213
 214	if (map->map_flags & BPF_F_INNER_MAP)
 215		return -EOPNOTSUPP;
 216
 217	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
 218	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
 219	if (!map->bypass_spec_v1) {
 220		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
 221		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
 222	} else {
 223		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
 224	}
 225
 226	if (is_power_of_2(elem_size)) {
 227		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
 228	} else {
 229		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
 230	}
 231	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
 232	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
 233	*insn++ = BPF_MOV64_IMM(ret, 0);
 234	return insn - insn_buf;
 235}
 236
 237/* Called from eBPF program */
 238static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 239{
 240	struct bpf_array *array = container_of(map, struct bpf_array, map);
 241	u32 index = *(u32 *)key;
 242
 243	if (unlikely(index >= array->map.max_entries))
 244		return NULL;
 245
 246	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 247}
 248
 249static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
 250{
 251	struct bpf_array *array = container_of(map, struct bpf_array, map);
 252	u32 index = *(u32 *)key;
 253
 254	if (cpu >= nr_cpu_ids)
 255		return NULL;
 256
 257	if (unlikely(index >= array->map.max_entries))
 258		return NULL;
 259
 260	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
 261}
 262
 263int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
 264{
 265	struct bpf_array *array = container_of(map, struct bpf_array, map);
 266	u32 index = *(u32 *)key;
 267	void __percpu *pptr;
 268	int cpu, off = 0;
 269	u32 size;
 270
 271	if (unlikely(index >= array->map.max_entries))
 272		return -ENOENT;
 273
 274	/* per_cpu areas are zero-filled and bpf programs can only
 275	 * access 'value_size' of them, so copying rounded areas
 276	 * will not leak any kernel data
 277	 */
 278	size = array->elem_size;
 279	rcu_read_lock();
 280	pptr = array->pptrs[index & array->index_mask];
 281	for_each_possible_cpu(cpu) {
 282		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
 283		check_and_init_map_value(map, value + off);
 284		off += size;
 285	}
 286	rcu_read_unlock();
 287	return 0;
 288}
 289
 290/* Called from syscall */
 291static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 292{
 293	struct bpf_array *array = container_of(map, struct bpf_array, map);
 294	u32 index = key ? *(u32 *)key : U32_MAX;
 295	u32 *next = (u32 *)next_key;
 296
 297	if (index >= array->map.max_entries) {
 298		*next = 0;
 299		return 0;
 300	}
 301
 302	if (index == array->map.max_entries - 1)
 303		return -ENOENT;
 304
 305	*next = index + 1;
 306	return 0;
 307}
 308
 309/* Called from syscall or from eBPF program */
 310static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
 311				 u64 map_flags)
 312{
 313	struct bpf_array *array = container_of(map, struct bpf_array, map);
 314	u32 index = *(u32 *)key;
 315	char *val;
 316
 317	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
 318		/* unknown flags */
 319		return -EINVAL;
 320
 321	if (unlikely(index >= array->map.max_entries))
 322		/* all elements were pre-allocated, cannot insert a new one */
 323		return -E2BIG;
 324
 325	if (unlikely(map_flags & BPF_NOEXIST))
 326		/* all elements already exist */
 327		return -EEXIST;
 328
 329	if (unlikely((map_flags & BPF_F_LOCK) &&
 330		     !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
 331		return -EINVAL;
 332
 333	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 334		val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
 335		copy_map_value(map, val, value);
 336		bpf_obj_free_fields(array->map.record, val);
 337	} else {
 338		val = array->value +
 339			(u64)array->elem_size * (index & array->index_mask);
 340		if (map_flags & BPF_F_LOCK)
 341			copy_map_value_locked(map, val, value, false);
 342		else
 343			copy_map_value(map, val, value);
 344		bpf_obj_free_fields(array->map.record, val);
 345	}
 346	return 0;
 347}
 348
 349int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
 350			    u64 map_flags)
 351{
 352	struct bpf_array *array = container_of(map, struct bpf_array, map);
 353	u32 index = *(u32 *)key;
 354	void __percpu *pptr;
 355	int cpu, off = 0;
 356	u32 size;
 357
 358	if (unlikely(map_flags > BPF_EXIST))
 359		/* unknown flags */
 360		return -EINVAL;
 361
 362	if (unlikely(index >= array->map.max_entries))
 363		/* all elements were pre-allocated, cannot insert a new one */
 364		return -E2BIG;
 365
 366	if (unlikely(map_flags == BPF_NOEXIST))
 367		/* all elements already exist */
 368		return -EEXIST;
 369
 370	/* the user space will provide round_up(value_size, 8) bytes that
 371	 * will be copied into per-cpu area. bpf programs can only access
 372	 * value_size of it. During lookup the same extra bytes will be
 373	 * returned or zeros which were zero-filled by percpu_alloc,
 374	 * so no kernel data leaks possible
 375	 */
 376	size = array->elem_size;
 377	rcu_read_lock();
 378	pptr = array->pptrs[index & array->index_mask];
 379	for_each_possible_cpu(cpu) {
 380		copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
 381		bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
 382		off += size;
 383	}
 384	rcu_read_unlock();
 385	return 0;
 386}
 387
 388/* Called from syscall or from eBPF program */
 389static int array_map_delete_elem(struct bpf_map *map, void *key)
 390{
 391	return -EINVAL;
 392}
 393
 394static void *array_map_vmalloc_addr(struct bpf_array *array)
 395{
 396	return (void *)round_down((unsigned long)array, PAGE_SIZE);
 397}
 398
 399static void array_map_free_timers(struct bpf_map *map)
 400{
 401	struct bpf_array *array = container_of(map, struct bpf_array, map);
 402	int i;
 403
 404	/* We don't reset or free fields other than timer on uref dropping to zero. */
 405	if (!btf_record_has_field(map->record, BPF_TIMER))
 406		return;
 407
 408	for (i = 0; i < array->map.max_entries; i++)
 409		bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
 410}
 411
 412/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 413static void array_map_free(struct bpf_map *map)
 414{
 415	struct bpf_array *array = container_of(map, struct bpf_array, map);
 416	int i;
 417
 418	if (!IS_ERR_OR_NULL(map->record)) {
 419		if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 420			for (i = 0; i < array->map.max_entries; i++) {
 421				void __percpu *pptr = array->pptrs[i & array->index_mask];
 422				int cpu;
 423
 424				for_each_possible_cpu(cpu) {
 425					bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
 426					cond_resched();
 427				}
 428			}
 429		} else {
 430			for (i = 0; i < array->map.max_entries; i++)
 431				bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
 432		}
 433	}
 434
 435	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 436		bpf_array_free_percpu(array);
 437
 438	if (array->map.map_flags & BPF_F_MMAPABLE)
 439		bpf_map_area_free(array_map_vmalloc_addr(array));
 440	else
 441		bpf_map_area_free(array);
 442}
 443
 444static void array_map_seq_show_elem(struct bpf_map *map, void *key,
 445				    struct seq_file *m)
 446{
 447	void *value;
 448
 449	rcu_read_lock();
 450
 451	value = array_map_lookup_elem(map, key);
 452	if (!value) {
 453		rcu_read_unlock();
 454		return;
 455	}
 456
 457	if (map->btf_key_type_id)
 458		seq_printf(m, "%u: ", *(u32 *)key);
 459	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
 460	seq_puts(m, "\n");
 461
 462	rcu_read_unlock();
 463}
 464
 465static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
 466					   struct seq_file *m)
 467{
 468	struct bpf_array *array = container_of(map, struct bpf_array, map);
 469	u32 index = *(u32 *)key;
 470	void __percpu *pptr;
 471	int cpu;
 472
 473	rcu_read_lock();
 474
 475	seq_printf(m, "%u: {\n", *(u32 *)key);
 476	pptr = array->pptrs[index & array->index_mask];
 477	for_each_possible_cpu(cpu) {
 478		seq_printf(m, "\tcpu%d: ", cpu);
 479		btf_type_seq_show(map->btf, map->btf_value_type_id,
 480				  per_cpu_ptr(pptr, cpu), m);
 481		seq_puts(m, "\n");
 482	}
 483	seq_puts(m, "}\n");
 484
 485	rcu_read_unlock();
 486}
 487
 488static int array_map_check_btf(const struct bpf_map *map,
 489			       const struct btf *btf,
 490			       const struct btf_type *key_type,
 491			       const struct btf_type *value_type)
 492{
 493	u32 int_data;
 494
 495	/* One exception for keyless BTF: .bss/.data/.rodata map */
 496	if (btf_type_is_void(key_type)) {
 497		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
 498		    map->max_entries != 1)
 499			return -EINVAL;
 500
 501		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
 502			return -EINVAL;
 503
 504		return 0;
 505	}
 506
 507	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
 508		return -EINVAL;
 509
 510	int_data = *(u32 *)(key_type + 1);
 511	/* bpf array can only take a u32 key. This check makes sure
 512	 * that the btf matches the attr used during map_create.
 513	 */
 514	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
 515		return -EINVAL;
 516
 517	return 0;
 518}
 519
 520static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
 521{
 522	struct bpf_array *array = container_of(map, struct bpf_array, map);
 523	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
 524
 525	if (!(map->map_flags & BPF_F_MMAPABLE))
 526		return -EINVAL;
 527
 528	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
 529	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
 530		return -EINVAL;
 531
 532	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
 533				   vma->vm_pgoff + pgoff);
 534}
 535
 536static bool array_map_meta_equal(const struct bpf_map *meta0,
 537				 const struct bpf_map *meta1)
 538{
 539	if (!bpf_map_meta_equal(meta0, meta1))
 540		return false;
 541	return meta0->map_flags & BPF_F_INNER_MAP ? true :
 542	       meta0->max_entries == meta1->max_entries;
 543}
 544
 545struct bpf_iter_seq_array_map_info {
 546	struct bpf_map *map;
 547	void *percpu_value_buf;
 548	u32 index;
 549};
 550
 551static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
 552{
 553	struct bpf_iter_seq_array_map_info *info = seq->private;
 554	struct bpf_map *map = info->map;
 555	struct bpf_array *array;
 556	u32 index;
 557
 558	if (info->index >= map->max_entries)
 559		return NULL;
 560
 561	if (*pos == 0)
 562		++*pos;
 563	array = container_of(map, struct bpf_array, map);
 564	index = info->index & array->index_mask;
 565	if (info->percpu_value_buf)
 566	       return array->pptrs[index];
 567	return array_map_elem_ptr(array, index);
 568}
 569
 570static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 571{
 572	struct bpf_iter_seq_array_map_info *info = seq->private;
 573	struct bpf_map *map = info->map;
 574	struct bpf_array *array;
 575	u32 index;
 576
 577	++*pos;
 578	++info->index;
 579	if (info->index >= map->max_entries)
 580		return NULL;
 581
 582	array = container_of(map, struct bpf_array, map);
 583	index = info->index & array->index_mask;
 584	if (info->percpu_value_buf)
 585	       return array->pptrs[index];
 586	return array_map_elem_ptr(array, index);
 587}
 588
 589static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
 590{
 591	struct bpf_iter_seq_array_map_info *info = seq->private;
 592	struct bpf_iter__bpf_map_elem ctx = {};
 593	struct bpf_map *map = info->map;
 594	struct bpf_array *array = container_of(map, struct bpf_array, map);
 595	struct bpf_iter_meta meta;
 596	struct bpf_prog *prog;
 597	int off = 0, cpu = 0;
 598	void __percpu **pptr;
 599	u32 size;
 600
 601	meta.seq = seq;
 602	prog = bpf_iter_get_info(&meta, v == NULL);
 603	if (!prog)
 604		return 0;
 605
 606	ctx.meta = &meta;
 607	ctx.map = info->map;
 608	if (v) {
 609		ctx.key = &info->index;
 610
 611		if (!info->percpu_value_buf) {
 612			ctx.value = v;
 613		} else {
 614			pptr = v;
 615			size = array->elem_size;
 616			for_each_possible_cpu(cpu) {
 617				copy_map_value_long(map, info->percpu_value_buf + off,
 618						    per_cpu_ptr(pptr, cpu));
 619				check_and_init_map_value(map, info->percpu_value_buf + off);
 620				off += size;
 621			}
 622			ctx.value = info->percpu_value_buf;
 623		}
 624	}
 625
 626	return bpf_iter_run_prog(prog, &ctx);
 627}
 628
 629static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
 630{
 631	return __bpf_array_map_seq_show(seq, v);
 632}
 633
 634static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
 635{
 636	if (!v)
 637		(void)__bpf_array_map_seq_show(seq, NULL);
 638}
 639
 640static int bpf_iter_init_array_map(void *priv_data,
 641				   struct bpf_iter_aux_info *aux)
 642{
 643	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 644	struct bpf_map *map = aux->map;
 645	struct bpf_array *array = container_of(map, struct bpf_array, map);
 646	void *value_buf;
 647	u32 buf_size;
 648
 649	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 650		buf_size = array->elem_size * num_possible_cpus();
 651		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
 652		if (!value_buf)
 653			return -ENOMEM;
 654
 655		seq_info->percpu_value_buf = value_buf;
 656	}
 657
 658	/* bpf_iter_attach_map() acquires a map uref, and the uref may be
 659	 * released before or in the middle of iterating map elements, so
 660	 * acquire an extra map uref for iterator.
 661	 */
 662	bpf_map_inc_with_uref(map);
 663	seq_info->map = map;
 664	return 0;
 665}
 666
 667static void bpf_iter_fini_array_map(void *priv_data)
 668{
 669	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 670
 671	bpf_map_put_with_uref(seq_info->map);
 672	kfree(seq_info->percpu_value_buf);
 673}
 674
 675static const struct seq_operations bpf_array_map_seq_ops = {
 676	.start	= bpf_array_map_seq_start,
 677	.next	= bpf_array_map_seq_next,
 678	.stop	= bpf_array_map_seq_stop,
 679	.show	= bpf_array_map_seq_show,
 680};
 681
 682static const struct bpf_iter_seq_info iter_seq_info = {
 683	.seq_ops		= &bpf_array_map_seq_ops,
 684	.init_seq_private	= bpf_iter_init_array_map,
 685	.fini_seq_private	= bpf_iter_fini_array_map,
 686	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
 687};
 688
 689static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
 690				   void *callback_ctx, u64 flags)
 691{
 692	u32 i, key, num_elems = 0;
 693	struct bpf_array *array;
 694	bool is_percpu;
 695	u64 ret = 0;
 696	void *val;
 697
 698	if (flags != 0)
 699		return -EINVAL;
 700
 701	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 702	array = container_of(map, struct bpf_array, map);
 703	if (is_percpu)
 704		migrate_disable();
 705	for (i = 0; i < map->max_entries; i++) {
 706		if (is_percpu)
 707			val = this_cpu_ptr(array->pptrs[i]);
 708		else
 709			val = array_map_elem_ptr(array, i);
 710		num_elems++;
 711		key = i;
 712		ret = callback_fn((u64)(long)map, (u64)(long)&key,
 713				  (u64)(long)val, (u64)(long)callback_ctx, 0);
 714		/* return value: 0 - continue, 1 - stop and return */
 715		if (ret)
 716			break;
 717	}
 718
 719	if (is_percpu)
 720		migrate_enable();
 721	return num_elems;
 722}
 723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
 725const struct bpf_map_ops array_map_ops = {
 726	.map_meta_equal = array_map_meta_equal,
 727	.map_alloc_check = array_map_alloc_check,
 728	.map_alloc = array_map_alloc,
 729	.map_free = array_map_free,
 730	.map_get_next_key = array_map_get_next_key,
 731	.map_release_uref = array_map_free_timers,
 732	.map_lookup_elem = array_map_lookup_elem,
 733	.map_update_elem = array_map_update_elem,
 734	.map_delete_elem = array_map_delete_elem,
 735	.map_gen_lookup = array_map_gen_lookup,
 736	.map_direct_value_addr = array_map_direct_value_addr,
 737	.map_direct_value_meta = array_map_direct_value_meta,
 738	.map_mmap = array_map_mmap,
 739	.map_seq_show_elem = array_map_seq_show_elem,
 740	.map_check_btf = array_map_check_btf,
 741	.map_lookup_batch = generic_map_lookup_batch,
 742	.map_update_batch = generic_map_update_batch,
 743	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 744	.map_for_each_callback = bpf_for_each_array_elem,
 
 745	.map_btf_id = &array_map_btf_ids[0],
 746	.iter_seq_info = &iter_seq_info,
 747};
 748
 749const struct bpf_map_ops percpu_array_map_ops = {
 750	.map_meta_equal = bpf_map_meta_equal,
 751	.map_alloc_check = array_map_alloc_check,
 752	.map_alloc = array_map_alloc,
 753	.map_free = array_map_free,
 754	.map_get_next_key = array_map_get_next_key,
 755	.map_lookup_elem = percpu_array_map_lookup_elem,
 756	.map_update_elem = array_map_update_elem,
 757	.map_delete_elem = array_map_delete_elem,
 758	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
 759	.map_seq_show_elem = percpu_array_map_seq_show_elem,
 760	.map_check_btf = array_map_check_btf,
 761	.map_lookup_batch = generic_map_lookup_batch,
 762	.map_update_batch = generic_map_update_batch,
 763	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 764	.map_for_each_callback = bpf_for_each_array_elem,
 
 765	.map_btf_id = &array_map_btf_ids[0],
 766	.iter_seq_info = &iter_seq_info,
 767};
 768
 769static int fd_array_map_alloc_check(union bpf_attr *attr)
 770{
 771	/* only file descriptors can be stored in this type of map */
 772	if (attr->value_size != sizeof(u32))
 773		return -EINVAL;
 774	/* Program read-only/write-only not supported for special maps yet. */
 775	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
 776		return -EINVAL;
 777	return array_map_alloc_check(attr);
 778}
 779
 780static void fd_array_map_free(struct bpf_map *map)
 781{
 782	struct bpf_array *array = container_of(map, struct bpf_array, map);
 783	int i;
 784
 785	/* make sure it's empty */
 786	for (i = 0; i < array->map.max_entries; i++)
 787		BUG_ON(array->ptrs[i] != NULL);
 788
 789	bpf_map_area_free(array);
 790}
 791
 792static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 793{
 794	return ERR_PTR(-EOPNOTSUPP);
 795}
 796
 797/* only called from syscall */
 798int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
 799{
 800	void **elem, *ptr;
 801	int ret =  0;
 802
 803	if (!map->ops->map_fd_sys_lookup_elem)
 804		return -ENOTSUPP;
 805
 806	rcu_read_lock();
 807	elem = array_map_lookup_elem(map, key);
 808	if (elem && (ptr = READ_ONCE(*elem)))
 809		*value = map->ops->map_fd_sys_lookup_elem(ptr);
 810	else
 811		ret = -ENOENT;
 812	rcu_read_unlock();
 813
 814	return ret;
 815}
 816
 817/* only called from syscall */
 818int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
 819				 void *key, void *value, u64 map_flags)
 820{
 821	struct bpf_array *array = container_of(map, struct bpf_array, map);
 822	void *new_ptr, *old_ptr;
 823	u32 index = *(u32 *)key, ufd;
 824
 825	if (map_flags != BPF_ANY)
 826		return -EINVAL;
 827
 828	if (index >= array->map.max_entries)
 829		return -E2BIG;
 830
 831	ufd = *(u32 *)value;
 832	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
 833	if (IS_ERR(new_ptr))
 834		return PTR_ERR(new_ptr);
 835
 836	if (map->ops->map_poke_run) {
 837		mutex_lock(&array->aux->poke_mutex);
 838		old_ptr = xchg(array->ptrs + index, new_ptr);
 839		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
 840		mutex_unlock(&array->aux->poke_mutex);
 841	} else {
 842		old_ptr = xchg(array->ptrs + index, new_ptr);
 843	}
 844
 845	if (old_ptr)
 846		map->ops->map_fd_put_ptr(old_ptr);
 847	return 0;
 848}
 849
 850static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
 851{
 852	struct bpf_array *array = container_of(map, struct bpf_array, map);
 853	void *old_ptr;
 854	u32 index = *(u32 *)key;
 855
 856	if (index >= array->map.max_entries)
 857		return -E2BIG;
 858
 859	if (map->ops->map_poke_run) {
 860		mutex_lock(&array->aux->poke_mutex);
 861		old_ptr = xchg(array->ptrs + index, NULL);
 862		map->ops->map_poke_run(map, index, old_ptr, NULL);
 863		mutex_unlock(&array->aux->poke_mutex);
 864	} else {
 865		old_ptr = xchg(array->ptrs + index, NULL);
 866	}
 867
 868	if (old_ptr) {
 869		map->ops->map_fd_put_ptr(old_ptr);
 870		return 0;
 871	} else {
 872		return -ENOENT;
 873	}
 874}
 875
 
 
 
 
 
 876static void *prog_fd_array_get_ptr(struct bpf_map *map,
 877				   struct file *map_file, int fd)
 878{
 879	struct bpf_prog *prog = bpf_prog_get(fd);
 880
 881	if (IS_ERR(prog))
 882		return prog;
 883
 884	if (!bpf_prog_map_compatible(map, prog)) {
 885		bpf_prog_put(prog);
 886		return ERR_PTR(-EINVAL);
 887	}
 888
 889	return prog;
 890}
 891
 892static void prog_fd_array_put_ptr(void *ptr)
 893{
 
 894	bpf_prog_put(ptr);
 895}
 896
 897static u32 prog_fd_array_sys_lookup_elem(void *ptr)
 898{
 899	return ((struct bpf_prog *)ptr)->aux->id;
 900}
 901
 902/* decrement refcnt of all bpf_progs that are stored in this map */
 903static void bpf_fd_array_map_clear(struct bpf_map *map)
 904{
 905	struct bpf_array *array = container_of(map, struct bpf_array, map);
 906	int i;
 907
 908	for (i = 0; i < array->map.max_entries; i++)
 909		fd_array_map_delete_elem(map, &i);
 910}
 911
 912static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
 913					 struct seq_file *m)
 914{
 915	void **elem, *ptr;
 916	u32 prog_id;
 917
 918	rcu_read_lock();
 919
 920	elem = array_map_lookup_elem(map, key);
 921	if (elem) {
 922		ptr = READ_ONCE(*elem);
 923		if (ptr) {
 924			seq_printf(m, "%u: ", *(u32 *)key);
 925			prog_id = prog_fd_array_sys_lookup_elem(ptr);
 926			btf_type_seq_show(map->btf, map->btf_value_type_id,
 927					  &prog_id, m);
 928			seq_puts(m, "\n");
 929		}
 930	}
 931
 932	rcu_read_unlock();
 933}
 934
 935struct prog_poke_elem {
 936	struct list_head list;
 937	struct bpf_prog_aux *aux;
 938};
 939
 940static int prog_array_map_poke_track(struct bpf_map *map,
 941				     struct bpf_prog_aux *prog_aux)
 942{
 943	struct prog_poke_elem *elem;
 944	struct bpf_array_aux *aux;
 945	int ret = 0;
 946
 947	aux = container_of(map, struct bpf_array, map)->aux;
 948	mutex_lock(&aux->poke_mutex);
 949	list_for_each_entry(elem, &aux->poke_progs, list) {
 950		if (elem->aux == prog_aux)
 951			goto out;
 952	}
 953
 954	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
 955	if (!elem) {
 956		ret = -ENOMEM;
 957		goto out;
 958	}
 959
 960	INIT_LIST_HEAD(&elem->list);
 961	/* We must track the program's aux info at this point in time
 962	 * since the program pointer itself may not be stable yet, see
 963	 * also comment in prog_array_map_poke_run().
 964	 */
 965	elem->aux = prog_aux;
 966
 967	list_add_tail(&elem->list, &aux->poke_progs);
 968out:
 969	mutex_unlock(&aux->poke_mutex);
 970	return ret;
 971}
 972
 973static void prog_array_map_poke_untrack(struct bpf_map *map,
 974					struct bpf_prog_aux *prog_aux)
 975{
 976	struct prog_poke_elem *elem, *tmp;
 977	struct bpf_array_aux *aux;
 978
 979	aux = container_of(map, struct bpf_array, map)->aux;
 980	mutex_lock(&aux->poke_mutex);
 981	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
 982		if (elem->aux == prog_aux) {
 983			list_del_init(&elem->list);
 984			kfree(elem);
 985			break;
 986		}
 987	}
 988	mutex_unlock(&aux->poke_mutex);
 989}
 990
 
 
 
 
 
 
 991static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
 992				    struct bpf_prog *old,
 993				    struct bpf_prog *new)
 994{
 995	u8 *old_addr, *new_addr, *old_bypass_addr;
 996	struct prog_poke_elem *elem;
 997	struct bpf_array_aux *aux;
 998
 999	aux = container_of(map, struct bpf_array, map)->aux;
1000	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1001
1002	list_for_each_entry(elem, &aux->poke_progs, list) {
1003		struct bpf_jit_poke_descriptor *poke;
1004		int i, ret;
1005
1006		for (i = 0; i < elem->aux->size_poke_tab; i++) {
1007			poke = &elem->aux->poke_tab[i];
1008
1009			/* Few things to be aware of:
1010			 *
1011			 * 1) We can only ever access aux in this context, but
1012			 *    not aux->prog since it might not be stable yet and
1013			 *    there could be danger of use after free otherwise.
1014			 * 2) Initially when we start tracking aux, the program
1015			 *    is not JITed yet and also does not have a kallsyms
1016			 *    entry. We skip these as poke->tailcall_target_stable
1017			 *    is not active yet. The JIT will do the final fixup
1018			 *    before setting it stable. The various
1019			 *    poke->tailcall_target_stable are successively
1020			 *    activated, so tail call updates can arrive from here
1021			 *    while JIT is still finishing its final fixup for
1022			 *    non-activated poke entries.
1023			 * 3) On program teardown, the program's kallsym entry gets
1024			 *    removed out of RCU callback, but we can only untrack
1025			 *    from sleepable context, therefore bpf_arch_text_poke()
1026			 *    might not see that this is in BPF text section and
1027			 *    bails out with -EINVAL. As these are unreachable since
1028			 *    RCU grace period already passed, we simply skip them.
1029			 * 4) Also programs reaching refcount of zero while patching
1030			 *    is in progress is okay since we're protected under
1031			 *    poke_mutex and untrack the programs before the JIT
1032			 *    buffer is freed. When we're still in the middle of
1033			 *    patching and suddenly kallsyms entry of the program
1034			 *    gets evicted, we just skip the rest which is fine due
1035			 *    to point 3).
1036			 * 5) Any other error happening below from bpf_arch_text_poke()
1037			 *    is a unexpected bug.
1038			 */
1039			if (!READ_ONCE(poke->tailcall_target_stable))
1040				continue;
1041			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1042				continue;
1043			if (poke->tail_call.map != map ||
1044			    poke->tail_call.key != key)
1045				continue;
1046
1047			old_bypass_addr = old ? NULL : poke->bypass_addr;
1048			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1049			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1050
1051			if (new) {
1052				ret = bpf_arch_text_poke(poke->tailcall_target,
1053							 BPF_MOD_JUMP,
1054							 old_addr, new_addr);
1055				BUG_ON(ret < 0 && ret != -EINVAL);
1056				if (!old) {
1057					ret = bpf_arch_text_poke(poke->tailcall_bypass,
1058								 BPF_MOD_JUMP,
1059								 poke->bypass_addr,
1060								 NULL);
1061					BUG_ON(ret < 0 && ret != -EINVAL);
1062				}
1063			} else {
1064				ret = bpf_arch_text_poke(poke->tailcall_bypass,
1065							 BPF_MOD_JUMP,
1066							 old_bypass_addr,
1067							 poke->bypass_addr);
1068				BUG_ON(ret < 0 && ret != -EINVAL);
1069				/* let other CPUs finish the execution of program
1070				 * so that it will not possible to expose them
1071				 * to invalid nop, stack unwind, nop state
1072				 */
1073				if (!ret)
1074					synchronize_rcu();
1075				ret = bpf_arch_text_poke(poke->tailcall_target,
1076							 BPF_MOD_JUMP,
1077							 old_addr, NULL);
1078				BUG_ON(ret < 0 && ret != -EINVAL);
1079			}
1080		}
1081	}
1082}
1083
1084static void prog_array_map_clear_deferred(struct work_struct *work)
1085{
1086	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1087					   work)->map;
1088	bpf_fd_array_map_clear(map);
1089	bpf_map_put(map);
1090}
1091
1092static void prog_array_map_clear(struct bpf_map *map)
1093{
1094	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1095						 map)->aux;
1096	bpf_map_inc(map);
1097	schedule_work(&aux->work);
1098}
1099
1100static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1101{
1102	struct bpf_array_aux *aux;
1103	struct bpf_map *map;
1104
1105	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1106	if (!aux)
1107		return ERR_PTR(-ENOMEM);
1108
1109	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1110	INIT_LIST_HEAD(&aux->poke_progs);
1111	mutex_init(&aux->poke_mutex);
1112
1113	map = array_map_alloc(attr);
1114	if (IS_ERR(map)) {
1115		kfree(aux);
1116		return map;
1117	}
1118
1119	container_of(map, struct bpf_array, map)->aux = aux;
1120	aux->map = map;
1121
1122	return map;
1123}
1124
1125static void prog_array_map_free(struct bpf_map *map)
1126{
1127	struct prog_poke_elem *elem, *tmp;
1128	struct bpf_array_aux *aux;
1129
1130	aux = container_of(map, struct bpf_array, map)->aux;
1131	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1132		list_del_init(&elem->list);
1133		kfree(elem);
1134	}
1135	kfree(aux);
1136	fd_array_map_free(map);
1137}
1138
1139/* prog_array->aux->{type,jited} is a runtime binding.
1140 * Doing static check alone in the verifier is not enough.
1141 * Thus, prog_array_map cannot be used as an inner_map
1142 * and map_meta_equal is not implemented.
1143 */
1144const struct bpf_map_ops prog_array_map_ops = {
1145	.map_alloc_check = fd_array_map_alloc_check,
1146	.map_alloc = prog_array_map_alloc,
1147	.map_free = prog_array_map_free,
1148	.map_poke_track = prog_array_map_poke_track,
1149	.map_poke_untrack = prog_array_map_poke_untrack,
1150	.map_poke_run = prog_array_map_poke_run,
1151	.map_get_next_key = array_map_get_next_key,
1152	.map_lookup_elem = fd_array_map_lookup_elem,
1153	.map_delete_elem = fd_array_map_delete_elem,
1154	.map_fd_get_ptr = prog_fd_array_get_ptr,
1155	.map_fd_put_ptr = prog_fd_array_put_ptr,
1156	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1157	.map_release_uref = prog_array_map_clear,
1158	.map_seq_show_elem = prog_array_map_seq_show_elem,
 
1159	.map_btf_id = &array_map_btf_ids[0],
1160};
1161
1162static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1163						   struct file *map_file)
1164{
1165	struct bpf_event_entry *ee;
1166
1167	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1168	if (ee) {
1169		ee->event = perf_file->private_data;
1170		ee->perf_file = perf_file;
1171		ee->map_file = map_file;
1172	}
1173
1174	return ee;
1175}
1176
1177static void __bpf_event_entry_free(struct rcu_head *rcu)
1178{
1179	struct bpf_event_entry *ee;
1180
1181	ee = container_of(rcu, struct bpf_event_entry, rcu);
1182	fput(ee->perf_file);
1183	kfree(ee);
1184}
1185
1186static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1187{
1188	call_rcu(&ee->rcu, __bpf_event_entry_free);
1189}
1190
1191static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1192					 struct file *map_file, int fd)
1193{
1194	struct bpf_event_entry *ee;
1195	struct perf_event *event;
1196	struct file *perf_file;
1197	u64 value;
1198
1199	perf_file = perf_event_get(fd);
1200	if (IS_ERR(perf_file))
1201		return perf_file;
1202
1203	ee = ERR_PTR(-EOPNOTSUPP);
1204	event = perf_file->private_data;
1205	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1206		goto err_out;
1207
1208	ee = bpf_event_entry_gen(perf_file, map_file);
1209	if (ee)
1210		return ee;
1211	ee = ERR_PTR(-ENOMEM);
1212err_out:
1213	fput(perf_file);
1214	return ee;
1215}
1216
1217static void perf_event_fd_array_put_ptr(void *ptr)
1218{
 
1219	bpf_event_entry_free_rcu(ptr);
1220}
1221
1222static void perf_event_fd_array_release(struct bpf_map *map,
1223					struct file *map_file)
1224{
1225	struct bpf_array *array = container_of(map, struct bpf_array, map);
1226	struct bpf_event_entry *ee;
1227	int i;
1228
1229	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1230		return;
1231
1232	rcu_read_lock();
1233	for (i = 0; i < array->map.max_entries; i++) {
1234		ee = READ_ONCE(array->ptrs[i]);
1235		if (ee && ee->map_file == map_file)
1236			fd_array_map_delete_elem(map, &i);
1237	}
1238	rcu_read_unlock();
1239}
1240
1241static void perf_event_fd_array_map_free(struct bpf_map *map)
1242{
1243	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1244		bpf_fd_array_map_clear(map);
1245	fd_array_map_free(map);
1246}
1247
1248const struct bpf_map_ops perf_event_array_map_ops = {
1249	.map_meta_equal = bpf_map_meta_equal,
1250	.map_alloc_check = fd_array_map_alloc_check,
1251	.map_alloc = array_map_alloc,
1252	.map_free = perf_event_fd_array_map_free,
1253	.map_get_next_key = array_map_get_next_key,
1254	.map_lookup_elem = fd_array_map_lookup_elem,
1255	.map_delete_elem = fd_array_map_delete_elem,
1256	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1257	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1258	.map_release = perf_event_fd_array_release,
1259	.map_check_btf = map_check_no_btf,
 
1260	.map_btf_id = &array_map_btf_ids[0],
1261};
1262
1263#ifdef CONFIG_CGROUPS
1264static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1265				     struct file *map_file /* not used */,
1266				     int fd)
1267{
1268	return cgroup_get_from_fd(fd);
1269}
1270
1271static void cgroup_fd_array_put_ptr(void *ptr)
1272{
1273	/* cgroup_put free cgrp after a rcu grace period */
1274	cgroup_put(ptr);
1275}
1276
1277static void cgroup_fd_array_free(struct bpf_map *map)
1278{
1279	bpf_fd_array_map_clear(map);
1280	fd_array_map_free(map);
1281}
1282
1283const struct bpf_map_ops cgroup_array_map_ops = {
1284	.map_meta_equal = bpf_map_meta_equal,
1285	.map_alloc_check = fd_array_map_alloc_check,
1286	.map_alloc = array_map_alloc,
1287	.map_free = cgroup_fd_array_free,
1288	.map_get_next_key = array_map_get_next_key,
1289	.map_lookup_elem = fd_array_map_lookup_elem,
1290	.map_delete_elem = fd_array_map_delete_elem,
1291	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1292	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1293	.map_check_btf = map_check_no_btf,
 
1294	.map_btf_id = &array_map_btf_ids[0],
1295};
1296#endif
1297
1298static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1299{
1300	struct bpf_map *map, *inner_map_meta;
1301
1302	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1303	if (IS_ERR(inner_map_meta))
1304		return inner_map_meta;
1305
1306	map = array_map_alloc(attr);
1307	if (IS_ERR(map)) {
1308		bpf_map_meta_free(inner_map_meta);
1309		return map;
1310	}
1311
1312	map->inner_map_meta = inner_map_meta;
1313
1314	return map;
1315}
1316
1317static void array_of_map_free(struct bpf_map *map)
1318{
1319	/* map->inner_map_meta is only accessed by syscall which
1320	 * is protected by fdget/fdput.
1321	 */
1322	bpf_map_meta_free(map->inner_map_meta);
1323	bpf_fd_array_map_clear(map);
1324	fd_array_map_free(map);
1325}
1326
1327static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1328{
1329	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1330
1331	if (!inner_map)
1332		return NULL;
1333
1334	return READ_ONCE(*inner_map);
1335}
1336
1337static int array_of_map_gen_lookup(struct bpf_map *map,
1338				   struct bpf_insn *insn_buf)
1339{
1340	struct bpf_array *array = container_of(map, struct bpf_array, map);
1341	u32 elem_size = array->elem_size;
1342	struct bpf_insn *insn = insn_buf;
1343	const int ret = BPF_REG_0;
1344	const int map_ptr = BPF_REG_1;
1345	const int index = BPF_REG_2;
1346
1347	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1348	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1349	if (!map->bypass_spec_v1) {
1350		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1351		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1352	} else {
1353		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1354	}
1355	if (is_power_of_2(elem_size))
1356		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1357	else
1358		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1359	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1360	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1361	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1362	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1363	*insn++ = BPF_MOV64_IMM(ret, 0);
1364
1365	return insn - insn_buf;
1366}
1367
1368const struct bpf_map_ops array_of_maps_map_ops = {
1369	.map_alloc_check = fd_array_map_alloc_check,
1370	.map_alloc = array_of_map_alloc,
1371	.map_free = array_of_map_free,
1372	.map_get_next_key = array_map_get_next_key,
1373	.map_lookup_elem = array_of_map_lookup_elem,
1374	.map_delete_elem = fd_array_map_delete_elem,
1375	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1376	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1377	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1378	.map_gen_lookup = array_of_map_gen_lookup,
1379	.map_lookup_batch = generic_map_lookup_batch,
1380	.map_update_batch = generic_map_update_batch,
1381	.map_check_btf = map_check_no_btf,
 
1382	.map_btf_id = &array_map_btf_ids[0],
1383};
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016,2017 Facebook
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/err.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/filter.h>
  11#include <linux/perf_event.h>
  12#include <uapi/linux/btf.h>
  13#include <linux/rcupdate_trace.h>
  14#include <linux/btf_ids.h>
  15
  16#include "map_in_map.h"
  17
  18#define ARRAY_CREATE_FLAG_MASK \
  19	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
  20	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
  21
  22static void bpf_array_free_percpu(struct bpf_array *array)
  23{
  24	int i;
  25
  26	for (i = 0; i < array->map.max_entries; i++) {
  27		free_percpu(array->pptrs[i]);
  28		cond_resched();
  29	}
  30}
  31
  32static int bpf_array_alloc_percpu(struct bpf_array *array)
  33{
  34	void __percpu *ptr;
  35	int i;
  36
  37	for (i = 0; i < array->map.max_entries; i++) {
  38		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
  39					   GFP_USER | __GFP_NOWARN);
  40		if (!ptr) {
  41			bpf_array_free_percpu(array);
  42			return -ENOMEM;
  43		}
  44		array->pptrs[i] = ptr;
  45		cond_resched();
  46	}
  47
  48	return 0;
  49}
  50
  51/* Called from syscall */
  52int array_map_alloc_check(union bpf_attr *attr)
  53{
  54	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  55	int numa_node = bpf_map_attr_numa_node(attr);
  56
  57	/* check sanity of attributes */
  58	if (attr->max_entries == 0 || attr->key_size != 4 ||
  59	    attr->value_size == 0 ||
  60	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  61	    !bpf_map_flags_access_ok(attr->map_flags) ||
  62	    (percpu && numa_node != NUMA_NO_NODE))
  63		return -EINVAL;
  64
  65	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  66	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
  67		return -EINVAL;
  68
  69	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
  70	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
  71		return -EINVAL;
  72
  73	/* avoid overflow on round_up(map->value_size) */
  74	if (attr->value_size > INT_MAX)
  75		return -E2BIG;
  76
  77	return 0;
  78}
  79
  80static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  81{
  82	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  83	int numa_node = bpf_map_attr_numa_node(attr);
  84	u32 elem_size, index_mask, max_entries;
  85	bool bypass_spec_v1 = bpf_bypass_spec_v1();
  86	u64 array_size, mask64;
  87	struct bpf_array *array;
  88
  89	elem_size = round_up(attr->value_size, 8);
  90
  91	max_entries = attr->max_entries;
  92
  93	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
  94	 * upper most bit set in u32 space is undefined behavior due to
  95	 * resulting 1U << 32, so do it manually here in u64 space.
  96	 */
  97	mask64 = fls_long(max_entries - 1);
  98	mask64 = 1ULL << mask64;
  99	mask64 -= 1;
 100
 101	index_mask = mask64;
 102	if (!bypass_spec_v1) {
 103		/* round up array size to nearest power of 2,
 104		 * since cpu will speculate within index_mask limits
 105		 */
 106		max_entries = index_mask + 1;
 107		/* Check for overflows. */
 108		if (max_entries < attr->max_entries)
 109			return ERR_PTR(-E2BIG);
 110	}
 111
 112	array_size = sizeof(*array);
 113	if (percpu) {
 114		array_size += (u64) max_entries * sizeof(void *);
 115	} else {
 116		/* rely on vmalloc() to return page-aligned memory and
 117		 * ensure array->value is exactly page-aligned
 118		 */
 119		if (attr->map_flags & BPF_F_MMAPABLE) {
 120			array_size = PAGE_ALIGN(array_size);
 121			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
 122		} else {
 123			array_size += (u64) max_entries * elem_size;
 124		}
 125	}
 126
 127	/* allocate all map elements and zero-initialize them */
 128	if (attr->map_flags & BPF_F_MMAPABLE) {
 129		void *data;
 130
 131		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
 132		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
 133		if (!data)
 134			return ERR_PTR(-ENOMEM);
 135		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
 136			- offsetof(struct bpf_array, value);
 137	} else {
 138		array = bpf_map_area_alloc(array_size, numa_node);
 139	}
 140	if (!array)
 141		return ERR_PTR(-ENOMEM);
 142	array->index_mask = index_mask;
 143	array->map.bypass_spec_v1 = bypass_spec_v1;
 144
 145	/* copy mandatory map attributes */
 146	bpf_map_init_from_attr(&array->map, attr);
 147	array->elem_size = elem_size;
 148
 149	if (percpu && bpf_array_alloc_percpu(array)) {
 150		bpf_map_area_free(array);
 151		return ERR_PTR(-ENOMEM);
 152	}
 153
 154	return &array->map;
 155}
 156
 157static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
 158{
 159	return array->value + (u64)array->elem_size * index;
 160}
 161
 162/* Called from syscall or from eBPF program */
 163static void *array_map_lookup_elem(struct bpf_map *map, void *key)
 164{
 165	struct bpf_array *array = container_of(map, struct bpf_array, map);
 166	u32 index = *(u32 *)key;
 167
 168	if (unlikely(index >= array->map.max_entries))
 169		return NULL;
 170
 171	return array->value + (u64)array->elem_size * (index & array->index_mask);
 172}
 173
 174static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
 175				       u32 off)
 176{
 177	struct bpf_array *array = container_of(map, struct bpf_array, map);
 178
 179	if (map->max_entries != 1)
 180		return -ENOTSUPP;
 181	if (off >= map->value_size)
 182		return -EINVAL;
 183
 184	*imm = (unsigned long)array->value;
 185	return 0;
 186}
 187
 188static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
 189				       u32 *off)
 190{
 191	struct bpf_array *array = container_of(map, struct bpf_array, map);
 192	u64 base = (unsigned long)array->value;
 193	u64 range = array->elem_size;
 194
 195	if (map->max_entries != 1)
 196		return -ENOTSUPP;
 197	if (imm < base || imm >= base + range)
 198		return -ENOENT;
 199
 200	*off = imm - base;
 201	return 0;
 202}
 203
 204/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 205static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 206{
 207	struct bpf_array *array = container_of(map, struct bpf_array, map);
 208	struct bpf_insn *insn = insn_buf;
 209	u32 elem_size = array->elem_size;
 210	const int ret = BPF_REG_0;
 211	const int map_ptr = BPF_REG_1;
 212	const int index = BPF_REG_2;
 213
 214	if (map->map_flags & BPF_F_INNER_MAP)
 215		return -EOPNOTSUPP;
 216
 217	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
 218	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
 219	if (!map->bypass_spec_v1) {
 220		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
 221		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
 222	} else {
 223		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
 224	}
 225
 226	if (is_power_of_2(elem_size)) {
 227		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
 228	} else {
 229		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
 230	}
 231	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
 232	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
 233	*insn++ = BPF_MOV64_IMM(ret, 0);
 234	return insn - insn_buf;
 235}
 236
 237/* Called from eBPF program */
 238static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 239{
 240	struct bpf_array *array = container_of(map, struct bpf_array, map);
 241	u32 index = *(u32 *)key;
 242
 243	if (unlikely(index >= array->map.max_entries))
 244		return NULL;
 245
 246	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 247}
 248
 249static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
 250{
 251	struct bpf_array *array = container_of(map, struct bpf_array, map);
 252	u32 index = *(u32 *)key;
 253
 254	if (cpu >= nr_cpu_ids)
 255		return NULL;
 256
 257	if (unlikely(index >= array->map.max_entries))
 258		return NULL;
 259
 260	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
 261}
 262
 263int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
 264{
 265	struct bpf_array *array = container_of(map, struct bpf_array, map);
 266	u32 index = *(u32 *)key;
 267	void __percpu *pptr;
 268	int cpu, off = 0;
 269	u32 size;
 270
 271	if (unlikely(index >= array->map.max_entries))
 272		return -ENOENT;
 273
 274	/* per_cpu areas are zero-filled and bpf programs can only
 275	 * access 'value_size' of them, so copying rounded areas
 276	 * will not leak any kernel data
 277	 */
 278	size = array->elem_size;
 279	rcu_read_lock();
 280	pptr = array->pptrs[index & array->index_mask];
 281	for_each_possible_cpu(cpu) {
 282		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
 283		check_and_init_map_value(map, value + off);
 284		off += size;
 285	}
 286	rcu_read_unlock();
 287	return 0;
 288}
 289
 290/* Called from syscall */
 291static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 292{
 293	struct bpf_array *array = container_of(map, struct bpf_array, map);
 294	u32 index = key ? *(u32 *)key : U32_MAX;
 295	u32 *next = (u32 *)next_key;
 296
 297	if (index >= array->map.max_entries) {
 298		*next = 0;
 299		return 0;
 300	}
 301
 302	if (index == array->map.max_entries - 1)
 303		return -ENOENT;
 304
 305	*next = index + 1;
 306	return 0;
 307}
 308
 309/* Called from syscall or from eBPF program */
 310static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
 311				  u64 map_flags)
 312{
 313	struct bpf_array *array = container_of(map, struct bpf_array, map);
 314	u32 index = *(u32 *)key;
 315	char *val;
 316
 317	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
 318		/* unknown flags */
 319		return -EINVAL;
 320
 321	if (unlikely(index >= array->map.max_entries))
 322		/* all elements were pre-allocated, cannot insert a new one */
 323		return -E2BIG;
 324
 325	if (unlikely(map_flags & BPF_NOEXIST))
 326		/* all elements already exist */
 327		return -EEXIST;
 328
 329	if (unlikely((map_flags & BPF_F_LOCK) &&
 330		     !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
 331		return -EINVAL;
 332
 333	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 334		val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
 335		copy_map_value(map, val, value);
 336		bpf_obj_free_fields(array->map.record, val);
 337	} else {
 338		val = array->value +
 339			(u64)array->elem_size * (index & array->index_mask);
 340		if (map_flags & BPF_F_LOCK)
 341			copy_map_value_locked(map, val, value, false);
 342		else
 343			copy_map_value(map, val, value);
 344		bpf_obj_free_fields(array->map.record, val);
 345	}
 346	return 0;
 347}
 348
 349int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
 350			    u64 map_flags)
 351{
 352	struct bpf_array *array = container_of(map, struct bpf_array, map);
 353	u32 index = *(u32 *)key;
 354	void __percpu *pptr;
 355	int cpu, off = 0;
 356	u32 size;
 357
 358	if (unlikely(map_flags > BPF_EXIST))
 359		/* unknown flags */
 360		return -EINVAL;
 361
 362	if (unlikely(index >= array->map.max_entries))
 363		/* all elements were pre-allocated, cannot insert a new one */
 364		return -E2BIG;
 365
 366	if (unlikely(map_flags == BPF_NOEXIST))
 367		/* all elements already exist */
 368		return -EEXIST;
 369
 370	/* the user space will provide round_up(value_size, 8) bytes that
 371	 * will be copied into per-cpu area. bpf programs can only access
 372	 * value_size of it. During lookup the same extra bytes will be
 373	 * returned or zeros which were zero-filled by percpu_alloc,
 374	 * so no kernel data leaks possible
 375	 */
 376	size = array->elem_size;
 377	rcu_read_lock();
 378	pptr = array->pptrs[index & array->index_mask];
 379	for_each_possible_cpu(cpu) {
 380		copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
 381		bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
 382		off += size;
 383	}
 384	rcu_read_unlock();
 385	return 0;
 386}
 387
 388/* Called from syscall or from eBPF program */
 389static long array_map_delete_elem(struct bpf_map *map, void *key)
 390{
 391	return -EINVAL;
 392}
 393
 394static void *array_map_vmalloc_addr(struct bpf_array *array)
 395{
 396	return (void *)round_down((unsigned long)array, PAGE_SIZE);
 397}
 398
 399static void array_map_free_timers(struct bpf_map *map)
 400{
 401	struct bpf_array *array = container_of(map, struct bpf_array, map);
 402	int i;
 403
 404	/* We don't reset or free fields other than timer on uref dropping to zero. */
 405	if (!btf_record_has_field(map->record, BPF_TIMER))
 406		return;
 407
 408	for (i = 0; i < array->map.max_entries; i++)
 409		bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
 410}
 411
 412/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 413static void array_map_free(struct bpf_map *map)
 414{
 415	struct bpf_array *array = container_of(map, struct bpf_array, map);
 416	int i;
 417
 418	if (!IS_ERR_OR_NULL(map->record)) {
 419		if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 420			for (i = 0; i < array->map.max_entries; i++) {
 421				void __percpu *pptr = array->pptrs[i & array->index_mask];
 422				int cpu;
 423
 424				for_each_possible_cpu(cpu) {
 425					bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
 426					cond_resched();
 427				}
 428			}
 429		} else {
 430			for (i = 0; i < array->map.max_entries; i++)
 431				bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
 432		}
 433	}
 434
 435	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 436		bpf_array_free_percpu(array);
 437
 438	if (array->map.map_flags & BPF_F_MMAPABLE)
 439		bpf_map_area_free(array_map_vmalloc_addr(array));
 440	else
 441		bpf_map_area_free(array);
 442}
 443
 444static void array_map_seq_show_elem(struct bpf_map *map, void *key,
 445				    struct seq_file *m)
 446{
 447	void *value;
 448
 449	rcu_read_lock();
 450
 451	value = array_map_lookup_elem(map, key);
 452	if (!value) {
 453		rcu_read_unlock();
 454		return;
 455	}
 456
 457	if (map->btf_key_type_id)
 458		seq_printf(m, "%u: ", *(u32 *)key);
 459	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
 460	seq_puts(m, "\n");
 461
 462	rcu_read_unlock();
 463}
 464
 465static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
 466					   struct seq_file *m)
 467{
 468	struct bpf_array *array = container_of(map, struct bpf_array, map);
 469	u32 index = *(u32 *)key;
 470	void __percpu *pptr;
 471	int cpu;
 472
 473	rcu_read_lock();
 474
 475	seq_printf(m, "%u: {\n", *(u32 *)key);
 476	pptr = array->pptrs[index & array->index_mask];
 477	for_each_possible_cpu(cpu) {
 478		seq_printf(m, "\tcpu%d: ", cpu);
 479		btf_type_seq_show(map->btf, map->btf_value_type_id,
 480				  per_cpu_ptr(pptr, cpu), m);
 481		seq_puts(m, "\n");
 482	}
 483	seq_puts(m, "}\n");
 484
 485	rcu_read_unlock();
 486}
 487
 488static int array_map_check_btf(const struct bpf_map *map,
 489			       const struct btf *btf,
 490			       const struct btf_type *key_type,
 491			       const struct btf_type *value_type)
 492{
 493	u32 int_data;
 494
 495	/* One exception for keyless BTF: .bss/.data/.rodata map */
 496	if (btf_type_is_void(key_type)) {
 497		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
 498		    map->max_entries != 1)
 499			return -EINVAL;
 500
 501		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
 502			return -EINVAL;
 503
 504		return 0;
 505	}
 506
 507	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
 508		return -EINVAL;
 509
 510	int_data = *(u32 *)(key_type + 1);
 511	/* bpf array can only take a u32 key. This check makes sure
 512	 * that the btf matches the attr used during map_create.
 513	 */
 514	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
 515		return -EINVAL;
 516
 517	return 0;
 518}
 519
 520static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
 521{
 522	struct bpf_array *array = container_of(map, struct bpf_array, map);
 523	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
 524
 525	if (!(map->map_flags & BPF_F_MMAPABLE))
 526		return -EINVAL;
 527
 528	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
 529	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
 530		return -EINVAL;
 531
 532	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
 533				   vma->vm_pgoff + pgoff);
 534}
 535
 536static bool array_map_meta_equal(const struct bpf_map *meta0,
 537				 const struct bpf_map *meta1)
 538{
 539	if (!bpf_map_meta_equal(meta0, meta1))
 540		return false;
 541	return meta0->map_flags & BPF_F_INNER_MAP ? true :
 542	       meta0->max_entries == meta1->max_entries;
 543}
 544
 545struct bpf_iter_seq_array_map_info {
 546	struct bpf_map *map;
 547	void *percpu_value_buf;
 548	u32 index;
 549};
 550
 551static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
 552{
 553	struct bpf_iter_seq_array_map_info *info = seq->private;
 554	struct bpf_map *map = info->map;
 555	struct bpf_array *array;
 556	u32 index;
 557
 558	if (info->index >= map->max_entries)
 559		return NULL;
 560
 561	if (*pos == 0)
 562		++*pos;
 563	array = container_of(map, struct bpf_array, map);
 564	index = info->index & array->index_mask;
 565	if (info->percpu_value_buf)
 566	       return array->pptrs[index];
 567	return array_map_elem_ptr(array, index);
 568}
 569
 570static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 571{
 572	struct bpf_iter_seq_array_map_info *info = seq->private;
 573	struct bpf_map *map = info->map;
 574	struct bpf_array *array;
 575	u32 index;
 576
 577	++*pos;
 578	++info->index;
 579	if (info->index >= map->max_entries)
 580		return NULL;
 581
 582	array = container_of(map, struct bpf_array, map);
 583	index = info->index & array->index_mask;
 584	if (info->percpu_value_buf)
 585	       return array->pptrs[index];
 586	return array_map_elem_ptr(array, index);
 587}
 588
 589static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
 590{
 591	struct bpf_iter_seq_array_map_info *info = seq->private;
 592	struct bpf_iter__bpf_map_elem ctx = {};
 593	struct bpf_map *map = info->map;
 594	struct bpf_array *array = container_of(map, struct bpf_array, map);
 595	struct bpf_iter_meta meta;
 596	struct bpf_prog *prog;
 597	int off = 0, cpu = 0;
 598	void __percpu **pptr;
 599	u32 size;
 600
 601	meta.seq = seq;
 602	prog = bpf_iter_get_info(&meta, v == NULL);
 603	if (!prog)
 604		return 0;
 605
 606	ctx.meta = &meta;
 607	ctx.map = info->map;
 608	if (v) {
 609		ctx.key = &info->index;
 610
 611		if (!info->percpu_value_buf) {
 612			ctx.value = v;
 613		} else {
 614			pptr = v;
 615			size = array->elem_size;
 616			for_each_possible_cpu(cpu) {
 617				copy_map_value_long(map, info->percpu_value_buf + off,
 618						    per_cpu_ptr(pptr, cpu));
 619				check_and_init_map_value(map, info->percpu_value_buf + off);
 620				off += size;
 621			}
 622			ctx.value = info->percpu_value_buf;
 623		}
 624	}
 625
 626	return bpf_iter_run_prog(prog, &ctx);
 627}
 628
 629static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
 630{
 631	return __bpf_array_map_seq_show(seq, v);
 632}
 633
 634static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
 635{
 636	if (!v)
 637		(void)__bpf_array_map_seq_show(seq, NULL);
 638}
 639
 640static int bpf_iter_init_array_map(void *priv_data,
 641				   struct bpf_iter_aux_info *aux)
 642{
 643	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 644	struct bpf_map *map = aux->map;
 645	struct bpf_array *array = container_of(map, struct bpf_array, map);
 646	void *value_buf;
 647	u32 buf_size;
 648
 649	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 650		buf_size = array->elem_size * num_possible_cpus();
 651		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
 652		if (!value_buf)
 653			return -ENOMEM;
 654
 655		seq_info->percpu_value_buf = value_buf;
 656	}
 657
 658	/* bpf_iter_attach_map() acquires a map uref, and the uref may be
 659	 * released before or in the middle of iterating map elements, so
 660	 * acquire an extra map uref for iterator.
 661	 */
 662	bpf_map_inc_with_uref(map);
 663	seq_info->map = map;
 664	return 0;
 665}
 666
 667static void bpf_iter_fini_array_map(void *priv_data)
 668{
 669	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 670
 671	bpf_map_put_with_uref(seq_info->map);
 672	kfree(seq_info->percpu_value_buf);
 673}
 674
 675static const struct seq_operations bpf_array_map_seq_ops = {
 676	.start	= bpf_array_map_seq_start,
 677	.next	= bpf_array_map_seq_next,
 678	.stop	= bpf_array_map_seq_stop,
 679	.show	= bpf_array_map_seq_show,
 680};
 681
 682static const struct bpf_iter_seq_info iter_seq_info = {
 683	.seq_ops		= &bpf_array_map_seq_ops,
 684	.init_seq_private	= bpf_iter_init_array_map,
 685	.fini_seq_private	= bpf_iter_fini_array_map,
 686	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
 687};
 688
 689static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
 690				    void *callback_ctx, u64 flags)
 691{
 692	u32 i, key, num_elems = 0;
 693	struct bpf_array *array;
 694	bool is_percpu;
 695	u64 ret = 0;
 696	void *val;
 697
 698	if (flags != 0)
 699		return -EINVAL;
 700
 701	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 702	array = container_of(map, struct bpf_array, map);
 703	if (is_percpu)
 704		migrate_disable();
 705	for (i = 0; i < map->max_entries; i++) {
 706		if (is_percpu)
 707			val = this_cpu_ptr(array->pptrs[i]);
 708		else
 709			val = array_map_elem_ptr(array, i);
 710		num_elems++;
 711		key = i;
 712		ret = callback_fn((u64)(long)map, (u64)(long)&key,
 713				  (u64)(long)val, (u64)(long)callback_ctx, 0);
 714		/* return value: 0 - continue, 1 - stop and return */
 715		if (ret)
 716			break;
 717	}
 718
 719	if (is_percpu)
 720		migrate_enable();
 721	return num_elems;
 722}
 723
 724static u64 array_map_mem_usage(const struct bpf_map *map)
 725{
 726	struct bpf_array *array = container_of(map, struct bpf_array, map);
 727	bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 728	u32 elem_size = array->elem_size;
 729	u64 entries = map->max_entries;
 730	u64 usage = sizeof(*array);
 731
 732	if (percpu) {
 733		usage += entries * sizeof(void *);
 734		usage += entries * elem_size * num_possible_cpus();
 735	} else {
 736		if (map->map_flags & BPF_F_MMAPABLE) {
 737			usage = PAGE_ALIGN(usage);
 738			usage += PAGE_ALIGN(entries * elem_size);
 739		} else {
 740			usage += entries * elem_size;
 741		}
 742	}
 743	return usage;
 744}
 745
 746BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
 747const struct bpf_map_ops array_map_ops = {
 748	.map_meta_equal = array_map_meta_equal,
 749	.map_alloc_check = array_map_alloc_check,
 750	.map_alloc = array_map_alloc,
 751	.map_free = array_map_free,
 752	.map_get_next_key = array_map_get_next_key,
 753	.map_release_uref = array_map_free_timers,
 754	.map_lookup_elem = array_map_lookup_elem,
 755	.map_update_elem = array_map_update_elem,
 756	.map_delete_elem = array_map_delete_elem,
 757	.map_gen_lookup = array_map_gen_lookup,
 758	.map_direct_value_addr = array_map_direct_value_addr,
 759	.map_direct_value_meta = array_map_direct_value_meta,
 760	.map_mmap = array_map_mmap,
 761	.map_seq_show_elem = array_map_seq_show_elem,
 762	.map_check_btf = array_map_check_btf,
 763	.map_lookup_batch = generic_map_lookup_batch,
 764	.map_update_batch = generic_map_update_batch,
 765	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 766	.map_for_each_callback = bpf_for_each_array_elem,
 767	.map_mem_usage = array_map_mem_usage,
 768	.map_btf_id = &array_map_btf_ids[0],
 769	.iter_seq_info = &iter_seq_info,
 770};
 771
 772const struct bpf_map_ops percpu_array_map_ops = {
 773	.map_meta_equal = bpf_map_meta_equal,
 774	.map_alloc_check = array_map_alloc_check,
 775	.map_alloc = array_map_alloc,
 776	.map_free = array_map_free,
 777	.map_get_next_key = array_map_get_next_key,
 778	.map_lookup_elem = percpu_array_map_lookup_elem,
 779	.map_update_elem = array_map_update_elem,
 780	.map_delete_elem = array_map_delete_elem,
 781	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
 782	.map_seq_show_elem = percpu_array_map_seq_show_elem,
 783	.map_check_btf = array_map_check_btf,
 784	.map_lookup_batch = generic_map_lookup_batch,
 785	.map_update_batch = generic_map_update_batch,
 786	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 787	.map_for_each_callback = bpf_for_each_array_elem,
 788	.map_mem_usage = array_map_mem_usage,
 789	.map_btf_id = &array_map_btf_ids[0],
 790	.iter_seq_info = &iter_seq_info,
 791};
 792
 793static int fd_array_map_alloc_check(union bpf_attr *attr)
 794{
 795	/* only file descriptors can be stored in this type of map */
 796	if (attr->value_size != sizeof(u32))
 797		return -EINVAL;
 798	/* Program read-only/write-only not supported for special maps yet. */
 799	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
 800		return -EINVAL;
 801	return array_map_alloc_check(attr);
 802}
 803
 804static void fd_array_map_free(struct bpf_map *map)
 805{
 806	struct bpf_array *array = container_of(map, struct bpf_array, map);
 807	int i;
 808
 809	/* make sure it's empty */
 810	for (i = 0; i < array->map.max_entries; i++)
 811		BUG_ON(array->ptrs[i] != NULL);
 812
 813	bpf_map_area_free(array);
 814}
 815
 816static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 817{
 818	return ERR_PTR(-EOPNOTSUPP);
 819}
 820
 821/* only called from syscall */
 822int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
 823{
 824	void **elem, *ptr;
 825	int ret =  0;
 826
 827	if (!map->ops->map_fd_sys_lookup_elem)
 828		return -ENOTSUPP;
 829
 830	rcu_read_lock();
 831	elem = array_map_lookup_elem(map, key);
 832	if (elem && (ptr = READ_ONCE(*elem)))
 833		*value = map->ops->map_fd_sys_lookup_elem(ptr);
 834	else
 835		ret = -ENOENT;
 836	rcu_read_unlock();
 837
 838	return ret;
 839}
 840
 841/* only called from syscall */
 842int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
 843				 void *key, void *value, u64 map_flags)
 844{
 845	struct bpf_array *array = container_of(map, struct bpf_array, map);
 846	void *new_ptr, *old_ptr;
 847	u32 index = *(u32 *)key, ufd;
 848
 849	if (map_flags != BPF_ANY)
 850		return -EINVAL;
 851
 852	if (index >= array->map.max_entries)
 853		return -E2BIG;
 854
 855	ufd = *(u32 *)value;
 856	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
 857	if (IS_ERR(new_ptr))
 858		return PTR_ERR(new_ptr);
 859
 860	if (map->ops->map_poke_run) {
 861		mutex_lock(&array->aux->poke_mutex);
 862		old_ptr = xchg(array->ptrs + index, new_ptr);
 863		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
 864		mutex_unlock(&array->aux->poke_mutex);
 865	} else {
 866		old_ptr = xchg(array->ptrs + index, new_ptr);
 867	}
 868
 869	if (old_ptr)
 870		map->ops->map_fd_put_ptr(map, old_ptr, true);
 871	return 0;
 872}
 873
 874static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
 875{
 876	struct bpf_array *array = container_of(map, struct bpf_array, map);
 877	void *old_ptr;
 878	u32 index = *(u32 *)key;
 879
 880	if (index >= array->map.max_entries)
 881		return -E2BIG;
 882
 883	if (map->ops->map_poke_run) {
 884		mutex_lock(&array->aux->poke_mutex);
 885		old_ptr = xchg(array->ptrs + index, NULL);
 886		map->ops->map_poke_run(map, index, old_ptr, NULL);
 887		mutex_unlock(&array->aux->poke_mutex);
 888	} else {
 889		old_ptr = xchg(array->ptrs + index, NULL);
 890	}
 891
 892	if (old_ptr) {
 893		map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
 894		return 0;
 895	} else {
 896		return -ENOENT;
 897	}
 898}
 899
 900static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
 901{
 902	return __fd_array_map_delete_elem(map, key, true);
 903}
 904
 905static void *prog_fd_array_get_ptr(struct bpf_map *map,
 906				   struct file *map_file, int fd)
 907{
 908	struct bpf_prog *prog = bpf_prog_get(fd);
 909
 910	if (IS_ERR(prog))
 911		return prog;
 912
 913	if (!bpf_prog_map_compatible(map, prog)) {
 914		bpf_prog_put(prog);
 915		return ERR_PTR(-EINVAL);
 916	}
 917
 918	return prog;
 919}
 920
 921static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
 922{
 923	/* bpf_prog is freed after one RCU or tasks trace grace period */
 924	bpf_prog_put(ptr);
 925}
 926
 927static u32 prog_fd_array_sys_lookup_elem(void *ptr)
 928{
 929	return ((struct bpf_prog *)ptr)->aux->id;
 930}
 931
 932/* decrement refcnt of all bpf_progs that are stored in this map */
 933static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
 934{
 935	struct bpf_array *array = container_of(map, struct bpf_array, map);
 936	int i;
 937
 938	for (i = 0; i < array->map.max_entries; i++)
 939		__fd_array_map_delete_elem(map, &i, need_defer);
 940}
 941
 942static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
 943					 struct seq_file *m)
 944{
 945	void **elem, *ptr;
 946	u32 prog_id;
 947
 948	rcu_read_lock();
 949
 950	elem = array_map_lookup_elem(map, key);
 951	if (elem) {
 952		ptr = READ_ONCE(*elem);
 953		if (ptr) {
 954			seq_printf(m, "%u: ", *(u32 *)key);
 955			prog_id = prog_fd_array_sys_lookup_elem(ptr);
 956			btf_type_seq_show(map->btf, map->btf_value_type_id,
 957					  &prog_id, m);
 958			seq_puts(m, "\n");
 959		}
 960	}
 961
 962	rcu_read_unlock();
 963}
 964
 965struct prog_poke_elem {
 966	struct list_head list;
 967	struct bpf_prog_aux *aux;
 968};
 969
 970static int prog_array_map_poke_track(struct bpf_map *map,
 971				     struct bpf_prog_aux *prog_aux)
 972{
 973	struct prog_poke_elem *elem;
 974	struct bpf_array_aux *aux;
 975	int ret = 0;
 976
 977	aux = container_of(map, struct bpf_array, map)->aux;
 978	mutex_lock(&aux->poke_mutex);
 979	list_for_each_entry(elem, &aux->poke_progs, list) {
 980		if (elem->aux == prog_aux)
 981			goto out;
 982	}
 983
 984	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
 985	if (!elem) {
 986		ret = -ENOMEM;
 987		goto out;
 988	}
 989
 990	INIT_LIST_HEAD(&elem->list);
 991	/* We must track the program's aux info at this point in time
 992	 * since the program pointer itself may not be stable yet, see
 993	 * also comment in prog_array_map_poke_run().
 994	 */
 995	elem->aux = prog_aux;
 996
 997	list_add_tail(&elem->list, &aux->poke_progs);
 998out:
 999	mutex_unlock(&aux->poke_mutex);
1000	return ret;
1001}
1002
1003static void prog_array_map_poke_untrack(struct bpf_map *map,
1004					struct bpf_prog_aux *prog_aux)
1005{
1006	struct prog_poke_elem *elem, *tmp;
1007	struct bpf_array_aux *aux;
1008
1009	aux = container_of(map, struct bpf_array, map)->aux;
1010	mutex_lock(&aux->poke_mutex);
1011	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1012		if (elem->aux == prog_aux) {
1013			list_del_init(&elem->list);
1014			kfree(elem);
1015			break;
1016		}
1017	}
1018	mutex_unlock(&aux->poke_mutex);
1019}
1020
1021void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
1022				      struct bpf_prog *new, struct bpf_prog *old)
1023{
1024	WARN_ON_ONCE(1);
1025}
1026
1027static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
1028				    struct bpf_prog *old,
1029				    struct bpf_prog *new)
1030{
 
1031	struct prog_poke_elem *elem;
1032	struct bpf_array_aux *aux;
1033
1034	aux = container_of(map, struct bpf_array, map)->aux;
1035	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1036
1037	list_for_each_entry(elem, &aux->poke_progs, list) {
1038		struct bpf_jit_poke_descriptor *poke;
1039		int i;
1040
1041		for (i = 0; i < elem->aux->size_poke_tab; i++) {
1042			poke = &elem->aux->poke_tab[i];
1043
1044			/* Few things to be aware of:
1045			 *
1046			 * 1) We can only ever access aux in this context, but
1047			 *    not aux->prog since it might not be stable yet and
1048			 *    there could be danger of use after free otherwise.
1049			 * 2) Initially when we start tracking aux, the program
1050			 *    is not JITed yet and also does not have a kallsyms
1051			 *    entry. We skip these as poke->tailcall_target_stable
1052			 *    is not active yet. The JIT will do the final fixup
1053			 *    before setting it stable. The various
1054			 *    poke->tailcall_target_stable are successively
1055			 *    activated, so tail call updates can arrive from here
1056			 *    while JIT is still finishing its final fixup for
1057			 *    non-activated poke entries.
1058			 * 3) Also programs reaching refcount of zero while patching
 
 
 
 
 
 
1059			 *    is in progress is okay since we're protected under
1060			 *    poke_mutex and untrack the programs before the JIT
1061			 *    buffer is freed.
 
 
 
 
 
1062			 */
1063			if (!READ_ONCE(poke->tailcall_target_stable))
1064				continue;
1065			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1066				continue;
1067			if (poke->tail_call.map != map ||
1068			    poke->tail_call.key != key)
1069				continue;
1070
1071			bpf_arch_poke_desc_update(poke, new, old);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072		}
1073	}
1074}
1075
1076static void prog_array_map_clear_deferred(struct work_struct *work)
1077{
1078	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1079					   work)->map;
1080	bpf_fd_array_map_clear(map, true);
1081	bpf_map_put(map);
1082}
1083
1084static void prog_array_map_clear(struct bpf_map *map)
1085{
1086	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1087						 map)->aux;
1088	bpf_map_inc(map);
1089	schedule_work(&aux->work);
1090}
1091
1092static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1093{
1094	struct bpf_array_aux *aux;
1095	struct bpf_map *map;
1096
1097	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1098	if (!aux)
1099		return ERR_PTR(-ENOMEM);
1100
1101	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1102	INIT_LIST_HEAD(&aux->poke_progs);
1103	mutex_init(&aux->poke_mutex);
1104
1105	map = array_map_alloc(attr);
1106	if (IS_ERR(map)) {
1107		kfree(aux);
1108		return map;
1109	}
1110
1111	container_of(map, struct bpf_array, map)->aux = aux;
1112	aux->map = map;
1113
1114	return map;
1115}
1116
1117static void prog_array_map_free(struct bpf_map *map)
1118{
1119	struct prog_poke_elem *elem, *tmp;
1120	struct bpf_array_aux *aux;
1121
1122	aux = container_of(map, struct bpf_array, map)->aux;
1123	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1124		list_del_init(&elem->list);
1125		kfree(elem);
1126	}
1127	kfree(aux);
1128	fd_array_map_free(map);
1129}
1130
1131/* prog_array->aux->{type,jited} is a runtime binding.
1132 * Doing static check alone in the verifier is not enough.
1133 * Thus, prog_array_map cannot be used as an inner_map
1134 * and map_meta_equal is not implemented.
1135 */
1136const struct bpf_map_ops prog_array_map_ops = {
1137	.map_alloc_check = fd_array_map_alloc_check,
1138	.map_alloc = prog_array_map_alloc,
1139	.map_free = prog_array_map_free,
1140	.map_poke_track = prog_array_map_poke_track,
1141	.map_poke_untrack = prog_array_map_poke_untrack,
1142	.map_poke_run = prog_array_map_poke_run,
1143	.map_get_next_key = array_map_get_next_key,
1144	.map_lookup_elem = fd_array_map_lookup_elem,
1145	.map_delete_elem = fd_array_map_delete_elem,
1146	.map_fd_get_ptr = prog_fd_array_get_ptr,
1147	.map_fd_put_ptr = prog_fd_array_put_ptr,
1148	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1149	.map_release_uref = prog_array_map_clear,
1150	.map_seq_show_elem = prog_array_map_seq_show_elem,
1151	.map_mem_usage = array_map_mem_usage,
1152	.map_btf_id = &array_map_btf_ids[0],
1153};
1154
1155static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1156						   struct file *map_file)
1157{
1158	struct bpf_event_entry *ee;
1159
1160	ee = kzalloc(sizeof(*ee), GFP_KERNEL);
1161	if (ee) {
1162		ee->event = perf_file->private_data;
1163		ee->perf_file = perf_file;
1164		ee->map_file = map_file;
1165	}
1166
1167	return ee;
1168}
1169
1170static void __bpf_event_entry_free(struct rcu_head *rcu)
1171{
1172	struct bpf_event_entry *ee;
1173
1174	ee = container_of(rcu, struct bpf_event_entry, rcu);
1175	fput(ee->perf_file);
1176	kfree(ee);
1177}
1178
1179static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1180{
1181	call_rcu(&ee->rcu, __bpf_event_entry_free);
1182}
1183
1184static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1185					 struct file *map_file, int fd)
1186{
1187	struct bpf_event_entry *ee;
1188	struct perf_event *event;
1189	struct file *perf_file;
1190	u64 value;
1191
1192	perf_file = perf_event_get(fd);
1193	if (IS_ERR(perf_file))
1194		return perf_file;
1195
1196	ee = ERR_PTR(-EOPNOTSUPP);
1197	event = perf_file->private_data;
1198	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1199		goto err_out;
1200
1201	ee = bpf_event_entry_gen(perf_file, map_file);
1202	if (ee)
1203		return ee;
1204	ee = ERR_PTR(-ENOMEM);
1205err_out:
1206	fput(perf_file);
1207	return ee;
1208}
1209
1210static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1211{
1212	/* bpf_perf_event is freed after one RCU grace period */
1213	bpf_event_entry_free_rcu(ptr);
1214}
1215
1216static void perf_event_fd_array_release(struct bpf_map *map,
1217					struct file *map_file)
1218{
1219	struct bpf_array *array = container_of(map, struct bpf_array, map);
1220	struct bpf_event_entry *ee;
1221	int i;
1222
1223	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1224		return;
1225
1226	rcu_read_lock();
1227	for (i = 0; i < array->map.max_entries; i++) {
1228		ee = READ_ONCE(array->ptrs[i]);
1229		if (ee && ee->map_file == map_file)
1230			__fd_array_map_delete_elem(map, &i, true);
1231	}
1232	rcu_read_unlock();
1233}
1234
1235static void perf_event_fd_array_map_free(struct bpf_map *map)
1236{
1237	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1238		bpf_fd_array_map_clear(map, false);
1239	fd_array_map_free(map);
1240}
1241
1242const struct bpf_map_ops perf_event_array_map_ops = {
1243	.map_meta_equal = bpf_map_meta_equal,
1244	.map_alloc_check = fd_array_map_alloc_check,
1245	.map_alloc = array_map_alloc,
1246	.map_free = perf_event_fd_array_map_free,
1247	.map_get_next_key = array_map_get_next_key,
1248	.map_lookup_elem = fd_array_map_lookup_elem,
1249	.map_delete_elem = fd_array_map_delete_elem,
1250	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1251	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1252	.map_release = perf_event_fd_array_release,
1253	.map_check_btf = map_check_no_btf,
1254	.map_mem_usage = array_map_mem_usage,
1255	.map_btf_id = &array_map_btf_ids[0],
1256};
1257
1258#ifdef CONFIG_CGROUPS
1259static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1260				     struct file *map_file /* not used */,
1261				     int fd)
1262{
1263	return cgroup_get_from_fd(fd);
1264}
1265
1266static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1267{
1268	/* cgroup_put free cgrp after a rcu grace period */
1269	cgroup_put(ptr);
1270}
1271
1272static void cgroup_fd_array_free(struct bpf_map *map)
1273{
1274	bpf_fd_array_map_clear(map, false);
1275	fd_array_map_free(map);
1276}
1277
1278const struct bpf_map_ops cgroup_array_map_ops = {
1279	.map_meta_equal = bpf_map_meta_equal,
1280	.map_alloc_check = fd_array_map_alloc_check,
1281	.map_alloc = array_map_alloc,
1282	.map_free = cgroup_fd_array_free,
1283	.map_get_next_key = array_map_get_next_key,
1284	.map_lookup_elem = fd_array_map_lookup_elem,
1285	.map_delete_elem = fd_array_map_delete_elem,
1286	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1287	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1288	.map_check_btf = map_check_no_btf,
1289	.map_mem_usage = array_map_mem_usage,
1290	.map_btf_id = &array_map_btf_ids[0],
1291};
1292#endif
1293
1294static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1295{
1296	struct bpf_map *map, *inner_map_meta;
1297
1298	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1299	if (IS_ERR(inner_map_meta))
1300		return inner_map_meta;
1301
1302	map = array_map_alloc(attr);
1303	if (IS_ERR(map)) {
1304		bpf_map_meta_free(inner_map_meta);
1305		return map;
1306	}
1307
1308	map->inner_map_meta = inner_map_meta;
1309
1310	return map;
1311}
1312
1313static void array_of_map_free(struct bpf_map *map)
1314{
1315	/* map->inner_map_meta is only accessed by syscall which
1316	 * is protected by fdget/fdput.
1317	 */
1318	bpf_map_meta_free(map->inner_map_meta);
1319	bpf_fd_array_map_clear(map, false);
1320	fd_array_map_free(map);
1321}
1322
1323static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1324{
1325	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1326
1327	if (!inner_map)
1328		return NULL;
1329
1330	return READ_ONCE(*inner_map);
1331}
1332
1333static int array_of_map_gen_lookup(struct bpf_map *map,
1334				   struct bpf_insn *insn_buf)
1335{
1336	struct bpf_array *array = container_of(map, struct bpf_array, map);
1337	u32 elem_size = array->elem_size;
1338	struct bpf_insn *insn = insn_buf;
1339	const int ret = BPF_REG_0;
1340	const int map_ptr = BPF_REG_1;
1341	const int index = BPF_REG_2;
1342
1343	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1344	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1345	if (!map->bypass_spec_v1) {
1346		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1347		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1348	} else {
1349		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1350	}
1351	if (is_power_of_2(elem_size))
1352		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1353	else
1354		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1355	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1356	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1357	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1358	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1359	*insn++ = BPF_MOV64_IMM(ret, 0);
1360
1361	return insn - insn_buf;
1362}
1363
1364const struct bpf_map_ops array_of_maps_map_ops = {
1365	.map_alloc_check = fd_array_map_alloc_check,
1366	.map_alloc = array_of_map_alloc,
1367	.map_free = array_of_map_free,
1368	.map_get_next_key = array_map_get_next_key,
1369	.map_lookup_elem = array_of_map_lookup_elem,
1370	.map_delete_elem = fd_array_map_delete_elem,
1371	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1372	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1373	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1374	.map_gen_lookup = array_of_map_gen_lookup,
1375	.map_lookup_batch = generic_map_lookup_batch,
1376	.map_update_batch = generic_map_update_batch,
1377	.map_check_btf = map_check_no_btf,
1378	.map_mem_usage = array_map_mem_usage,
1379	.map_btf_id = &array_map_btf_ids[0],
1380};