Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016,2017 Facebook
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/err.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/filter.h>
  11#include <linux/perf_event.h>
  12#include <uapi/linux/btf.h>
 
  13
  14#include "map_in_map.h"
  15
  16#define ARRAY_CREATE_FLAG_MASK \
  17	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK)
 
  18
  19static void bpf_array_free_percpu(struct bpf_array *array)
  20{
  21	int i;
  22
  23	for (i = 0; i < array->map.max_entries; i++) {
  24		free_percpu(array->pptrs[i]);
  25		cond_resched();
  26	}
  27}
  28
  29static int bpf_array_alloc_percpu(struct bpf_array *array)
  30{
  31	void __percpu *ptr;
  32	int i;
  33
  34	for (i = 0; i < array->map.max_entries; i++) {
  35		ptr = __alloc_percpu_gfp(array->elem_size, 8,
  36					 GFP_USER | __GFP_NOWARN);
  37		if (!ptr) {
  38			bpf_array_free_percpu(array);
  39			return -ENOMEM;
  40		}
  41		array->pptrs[i] = ptr;
  42		cond_resched();
  43	}
  44
  45	return 0;
  46}
  47
  48/* Called from syscall */
  49int array_map_alloc_check(union bpf_attr *attr)
  50{
  51	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  52	int numa_node = bpf_map_attr_numa_node(attr);
  53
  54	/* check sanity of attributes */
  55	if (attr->max_entries == 0 || attr->key_size != 4 ||
  56	    attr->value_size == 0 ||
  57	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  58	    !bpf_map_flags_access_ok(attr->map_flags) ||
  59	    (percpu && numa_node != NUMA_NO_NODE))
  60		return -EINVAL;
  61
  62	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  63	    attr->map_flags & BPF_F_MMAPABLE)
 
 
 
 
  64		return -EINVAL;
  65
  66	if (attr->value_size > KMALLOC_MAX_SIZE)
  67		/* if value_size is bigger, the user space won't be able to
  68		 * access the elements.
  69		 */
  70		return -E2BIG;
  71
  72	return 0;
  73}
  74
  75static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  76{
  77	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  78	int ret, numa_node = bpf_map_attr_numa_node(attr);
  79	u32 elem_size, index_mask, max_entries;
  80	bool bypass_spec_v1 = bpf_bypass_spec_v1();
  81	u64 cost, array_size, mask64;
  82	struct bpf_map_memory mem;
  83	struct bpf_array *array;
  84
  85	elem_size = round_up(attr->value_size, 8);
  86
  87	max_entries = attr->max_entries;
  88
  89	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
  90	 * upper most bit set in u32 space is undefined behavior due to
  91	 * resulting 1U << 32, so do it manually here in u64 space.
  92	 */
  93	mask64 = fls_long(max_entries - 1);
  94	mask64 = 1ULL << mask64;
  95	mask64 -= 1;
  96
  97	index_mask = mask64;
  98	if (!bypass_spec_v1) {
  99		/* round up array size to nearest power of 2,
 100		 * since cpu will speculate within index_mask limits
 101		 */
 102		max_entries = index_mask + 1;
 103		/* Check for overflows. */
 104		if (max_entries < attr->max_entries)
 105			return ERR_PTR(-E2BIG);
 106	}
 107
 108	array_size = sizeof(*array);
 109	if (percpu) {
 110		array_size += (u64) max_entries * sizeof(void *);
 111	} else {
 112		/* rely on vmalloc() to return page-aligned memory and
 113		 * ensure array->value is exactly page-aligned
 114		 */
 115		if (attr->map_flags & BPF_F_MMAPABLE) {
 116			array_size = PAGE_ALIGN(array_size);
 117			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
 118		} else {
 119			array_size += (u64) max_entries * elem_size;
 120		}
 121	}
 122
 123	/* make sure there is no u32 overflow later in round_up() */
 124	cost = array_size;
 125	if (percpu)
 126		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
 127
 128	ret = bpf_map_charge_init(&mem, cost);
 129	if (ret < 0)
 130		return ERR_PTR(ret);
 131
 132	/* allocate all map elements and zero-initialize them */
 133	if (attr->map_flags & BPF_F_MMAPABLE) {
 134		void *data;
 135
 136		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
 137		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
 138		if (!data) {
 139			bpf_map_charge_finish(&mem);
 140			return ERR_PTR(-ENOMEM);
 141		}
 142		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
 143			- offsetof(struct bpf_array, value);
 144	} else {
 145		array = bpf_map_area_alloc(array_size, numa_node);
 146	}
 147	if (!array) {
 148		bpf_map_charge_finish(&mem);
 149		return ERR_PTR(-ENOMEM);
 150	}
 151	array->index_mask = index_mask;
 152	array->map.bypass_spec_v1 = bypass_spec_v1;
 153
 154	/* copy mandatory map attributes */
 155	bpf_map_init_from_attr(&array->map, attr);
 156	bpf_map_charge_move(&array->map.memory, &mem);
 157	array->elem_size = elem_size;
 158
 159	if (percpu && bpf_array_alloc_percpu(array)) {
 160		bpf_map_charge_finish(&array->map.memory);
 161		bpf_map_area_free(array);
 162		return ERR_PTR(-ENOMEM);
 163	}
 164
 165	return &array->map;
 166}
 167
 168/* Called from syscall or from eBPF program */
 169static void *array_map_lookup_elem(struct bpf_map *map, void *key)
 170{
 171	struct bpf_array *array = container_of(map, struct bpf_array, map);
 172	u32 index = *(u32 *)key;
 173
 174	if (unlikely(index >= array->map.max_entries))
 175		return NULL;
 176
 177	return array->value + array->elem_size * (index & array->index_mask);
 178}
 179
 180static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
 181				       u32 off)
 182{
 183	struct bpf_array *array = container_of(map, struct bpf_array, map);
 184
 185	if (map->max_entries != 1)
 186		return -ENOTSUPP;
 187	if (off >= map->value_size)
 188		return -EINVAL;
 189
 190	*imm = (unsigned long)array->value;
 191	return 0;
 192}
 193
 194static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
 195				       u32 *off)
 196{
 197	struct bpf_array *array = container_of(map, struct bpf_array, map);
 198	u64 base = (unsigned long)array->value;
 199	u64 range = array->elem_size;
 200
 201	if (map->max_entries != 1)
 202		return -ENOTSUPP;
 203	if (imm < base || imm >= base + range)
 204		return -ENOENT;
 205
 206	*off = imm - base;
 207	return 0;
 208}
 209
 210/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 211static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 212{
 213	struct bpf_array *array = container_of(map, struct bpf_array, map);
 214	struct bpf_insn *insn = insn_buf;
 215	u32 elem_size = round_up(map->value_size, 8);
 216	const int ret = BPF_REG_0;
 217	const int map_ptr = BPF_REG_1;
 218	const int index = BPF_REG_2;
 219
 
 
 
 220	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
 221	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
 222	if (!map->bypass_spec_v1) {
 223		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
 224		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
 225	} else {
 226		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
 227	}
 228
 229	if (is_power_of_2(elem_size)) {
 230		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
 231	} else {
 232		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
 233	}
 234	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
 235	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
 236	*insn++ = BPF_MOV64_IMM(ret, 0);
 237	return insn - insn_buf;
 238}
 239
 240/* Called from eBPF program */
 241static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 242{
 243	struct bpf_array *array = container_of(map, struct bpf_array, map);
 244	u32 index = *(u32 *)key;
 245
 246	if (unlikely(index >= array->map.max_entries))
 247		return NULL;
 248
 249	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 250}
 251
 252int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
 253{
 254	struct bpf_array *array = container_of(map, struct bpf_array, map);
 255	u32 index = *(u32 *)key;
 256	void __percpu *pptr;
 257	int cpu, off = 0;
 258	u32 size;
 259
 260	if (unlikely(index >= array->map.max_entries))
 261		return -ENOENT;
 262
 263	/* per_cpu areas are zero-filled and bpf programs can only
 264	 * access 'value_size' of them, so copying rounded areas
 265	 * will not leak any kernel data
 266	 */
 267	size = round_up(map->value_size, 8);
 268	rcu_read_lock();
 269	pptr = array->pptrs[index & array->index_mask];
 270	for_each_possible_cpu(cpu) {
 271		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
 272		off += size;
 273	}
 274	rcu_read_unlock();
 275	return 0;
 276}
 277
 278/* Called from syscall */
 279static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 280{
 281	struct bpf_array *array = container_of(map, struct bpf_array, map);
 282	u32 index = key ? *(u32 *)key : U32_MAX;
 283	u32 *next = (u32 *)next_key;
 284
 285	if (index >= array->map.max_entries) {
 286		*next = 0;
 287		return 0;
 288	}
 289
 290	if (index == array->map.max_entries - 1)
 291		return -ENOENT;
 292
 293	*next = index + 1;
 294	return 0;
 295}
 296
 297/* Called from syscall or from eBPF program */
 298static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
 299				 u64 map_flags)
 300{
 301	struct bpf_array *array = container_of(map, struct bpf_array, map);
 302	u32 index = *(u32 *)key;
 303	char *val;
 304
 305	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
 306		/* unknown flags */
 307		return -EINVAL;
 308
 309	if (unlikely(index >= array->map.max_entries))
 310		/* all elements were pre-allocated, cannot insert a new one */
 311		return -E2BIG;
 312
 313	if (unlikely(map_flags & BPF_NOEXIST))
 314		/* all elements already exist */
 315		return -EEXIST;
 316
 317	if (unlikely((map_flags & BPF_F_LOCK) &&
 318		     !map_value_has_spin_lock(map)))
 319		return -EINVAL;
 320
 321	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 322		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
 323		       value, map->value_size);
 324	} else {
 325		val = array->value +
 326			array->elem_size * (index & array->index_mask);
 327		if (map_flags & BPF_F_LOCK)
 328			copy_map_value_locked(map, val, value, false);
 329		else
 330			copy_map_value(map, val, value);
 331	}
 332	return 0;
 333}
 334
 335int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
 336			    u64 map_flags)
 337{
 338	struct bpf_array *array = container_of(map, struct bpf_array, map);
 339	u32 index = *(u32 *)key;
 340	void __percpu *pptr;
 341	int cpu, off = 0;
 342	u32 size;
 343
 344	if (unlikely(map_flags > BPF_EXIST))
 345		/* unknown flags */
 346		return -EINVAL;
 347
 348	if (unlikely(index >= array->map.max_entries))
 349		/* all elements were pre-allocated, cannot insert a new one */
 350		return -E2BIG;
 351
 352	if (unlikely(map_flags == BPF_NOEXIST))
 353		/* all elements already exist */
 354		return -EEXIST;
 355
 356	/* the user space will provide round_up(value_size, 8) bytes that
 357	 * will be copied into per-cpu area. bpf programs can only access
 358	 * value_size of it. During lookup the same extra bytes will be
 359	 * returned or zeros which were zero-filled by percpu_alloc,
 360	 * so no kernel data leaks possible
 361	 */
 362	size = round_up(map->value_size, 8);
 363	rcu_read_lock();
 364	pptr = array->pptrs[index & array->index_mask];
 365	for_each_possible_cpu(cpu) {
 366		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
 367		off += size;
 368	}
 369	rcu_read_unlock();
 370	return 0;
 371}
 372
 373/* Called from syscall or from eBPF program */
 374static int array_map_delete_elem(struct bpf_map *map, void *key)
 375{
 376	return -EINVAL;
 377}
 378
 379static void *array_map_vmalloc_addr(struct bpf_array *array)
 380{
 381	return (void *)round_down((unsigned long)array, PAGE_SIZE);
 382}
 383
 384/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 385static void array_map_free(struct bpf_map *map)
 386{
 387	struct bpf_array *array = container_of(map, struct bpf_array, map);
 388
 389	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 390		bpf_array_free_percpu(array);
 391
 392	if (array->map.map_flags & BPF_F_MMAPABLE)
 393		bpf_map_area_free(array_map_vmalloc_addr(array));
 394	else
 395		bpf_map_area_free(array);
 396}
 397
 398static void array_map_seq_show_elem(struct bpf_map *map, void *key,
 399				    struct seq_file *m)
 400{
 401	void *value;
 402
 403	rcu_read_lock();
 404
 405	value = array_map_lookup_elem(map, key);
 406	if (!value) {
 407		rcu_read_unlock();
 408		return;
 409	}
 410
 411	if (map->btf_key_type_id)
 412		seq_printf(m, "%u: ", *(u32 *)key);
 413	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
 414	seq_puts(m, "\n");
 415
 416	rcu_read_unlock();
 417}
 418
 419static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
 420					   struct seq_file *m)
 421{
 422	struct bpf_array *array = container_of(map, struct bpf_array, map);
 423	u32 index = *(u32 *)key;
 424	void __percpu *pptr;
 425	int cpu;
 426
 427	rcu_read_lock();
 428
 429	seq_printf(m, "%u: {\n", *(u32 *)key);
 430	pptr = array->pptrs[index & array->index_mask];
 431	for_each_possible_cpu(cpu) {
 432		seq_printf(m, "\tcpu%d: ", cpu);
 433		btf_type_seq_show(map->btf, map->btf_value_type_id,
 434				  per_cpu_ptr(pptr, cpu), m);
 435		seq_puts(m, "\n");
 436	}
 437	seq_puts(m, "}\n");
 438
 439	rcu_read_unlock();
 440}
 441
 442static int array_map_check_btf(const struct bpf_map *map,
 443			       const struct btf *btf,
 444			       const struct btf_type *key_type,
 445			       const struct btf_type *value_type)
 446{
 447	u32 int_data;
 448
 449	/* One exception for keyless BTF: .bss/.data/.rodata map */
 450	if (btf_type_is_void(key_type)) {
 451		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
 452		    map->max_entries != 1)
 453			return -EINVAL;
 454
 455		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
 456			return -EINVAL;
 457
 458		return 0;
 459	}
 460
 461	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
 462		return -EINVAL;
 463
 464	int_data = *(u32 *)(key_type + 1);
 465	/* bpf array can only take a u32 key. This check makes sure
 466	 * that the btf matches the attr used during map_create.
 467	 */
 468	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
 469		return -EINVAL;
 470
 471	return 0;
 472}
 473
 474static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
 475{
 476	struct bpf_array *array = container_of(map, struct bpf_array, map);
 477	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
 478
 479	if (!(map->map_flags & BPF_F_MMAPABLE))
 480		return -EINVAL;
 481
 482	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
 483	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
 484		return -EINVAL;
 485
 486	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
 487				   vma->vm_pgoff + pgoff);
 488}
 489
 
 
 
 
 
 
 
 
 
 490struct bpf_iter_seq_array_map_info {
 491	struct bpf_map *map;
 492	void *percpu_value_buf;
 493	u32 index;
 494};
 495
 496static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
 497{
 498	struct bpf_iter_seq_array_map_info *info = seq->private;
 499	struct bpf_map *map = info->map;
 500	struct bpf_array *array;
 501	u32 index;
 502
 503	if (info->index >= map->max_entries)
 504		return NULL;
 505
 506	if (*pos == 0)
 507		++*pos;
 508	array = container_of(map, struct bpf_array, map);
 509	index = info->index & array->index_mask;
 510	if (info->percpu_value_buf)
 511	       return array->pptrs[index];
 512	return array->value + array->elem_size * index;
 513}
 514
 515static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 516{
 517	struct bpf_iter_seq_array_map_info *info = seq->private;
 518	struct bpf_map *map = info->map;
 519	struct bpf_array *array;
 520	u32 index;
 521
 522	++*pos;
 523	++info->index;
 524	if (info->index >= map->max_entries)
 525		return NULL;
 526
 527	array = container_of(map, struct bpf_array, map);
 528	index = info->index & array->index_mask;
 529	if (info->percpu_value_buf)
 530	       return array->pptrs[index];
 531	return array->value + array->elem_size * index;
 532}
 533
 534static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
 535{
 536	struct bpf_iter_seq_array_map_info *info = seq->private;
 537	struct bpf_iter__bpf_map_elem ctx = {};
 538	struct bpf_map *map = info->map;
 539	struct bpf_iter_meta meta;
 540	struct bpf_prog *prog;
 541	int off = 0, cpu = 0;
 542	void __percpu **pptr;
 543	u32 size;
 544
 545	meta.seq = seq;
 546	prog = bpf_iter_get_info(&meta, v == NULL);
 547	if (!prog)
 548		return 0;
 549
 550	ctx.meta = &meta;
 551	ctx.map = info->map;
 552	if (v) {
 553		ctx.key = &info->index;
 554
 555		if (!info->percpu_value_buf) {
 556			ctx.value = v;
 557		} else {
 558			pptr = v;
 559			size = round_up(map->value_size, 8);
 560			for_each_possible_cpu(cpu) {
 561				bpf_long_memcpy(info->percpu_value_buf + off,
 562						per_cpu_ptr(pptr, cpu),
 563						size);
 564				off += size;
 565			}
 566			ctx.value = info->percpu_value_buf;
 567		}
 568	}
 569
 570	return bpf_iter_run_prog(prog, &ctx);
 571}
 572
 573static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
 574{
 575	return __bpf_array_map_seq_show(seq, v);
 576}
 577
 578static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
 579{
 580	if (!v)
 581		(void)__bpf_array_map_seq_show(seq, NULL);
 582}
 583
 584static int bpf_iter_init_array_map(void *priv_data,
 585				   struct bpf_iter_aux_info *aux)
 586{
 587	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 588	struct bpf_map *map = aux->map;
 589	void *value_buf;
 590	u32 buf_size;
 591
 592	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 593		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
 594		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
 595		if (!value_buf)
 596			return -ENOMEM;
 597
 598		seq_info->percpu_value_buf = value_buf;
 599	}
 600
 601	seq_info->map = map;
 602	return 0;
 603}
 604
 605static void bpf_iter_fini_array_map(void *priv_data)
 606{
 607	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 608
 609	kfree(seq_info->percpu_value_buf);
 610}
 611
 612static const struct seq_operations bpf_array_map_seq_ops = {
 613	.start	= bpf_array_map_seq_start,
 614	.next	= bpf_array_map_seq_next,
 615	.stop	= bpf_array_map_seq_stop,
 616	.show	= bpf_array_map_seq_show,
 617};
 618
 619static const struct bpf_iter_seq_info iter_seq_info = {
 620	.seq_ops		= &bpf_array_map_seq_ops,
 621	.init_seq_private	= bpf_iter_init_array_map,
 622	.fini_seq_private	= bpf_iter_fini_array_map,
 623	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
 624};
 625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 626static int array_map_btf_id;
 627const struct bpf_map_ops array_map_ops = {
 
 628	.map_alloc_check = array_map_alloc_check,
 629	.map_alloc = array_map_alloc,
 630	.map_free = array_map_free,
 631	.map_get_next_key = array_map_get_next_key,
 632	.map_lookup_elem = array_map_lookup_elem,
 633	.map_update_elem = array_map_update_elem,
 634	.map_delete_elem = array_map_delete_elem,
 635	.map_gen_lookup = array_map_gen_lookup,
 636	.map_direct_value_addr = array_map_direct_value_addr,
 637	.map_direct_value_meta = array_map_direct_value_meta,
 638	.map_mmap = array_map_mmap,
 639	.map_seq_show_elem = array_map_seq_show_elem,
 640	.map_check_btf = array_map_check_btf,
 641	.map_lookup_batch = generic_map_lookup_batch,
 642	.map_update_batch = generic_map_update_batch,
 
 
 643	.map_btf_name = "bpf_array",
 644	.map_btf_id = &array_map_btf_id,
 645	.iter_seq_info = &iter_seq_info,
 646};
 647
 648static int percpu_array_map_btf_id;
 649const struct bpf_map_ops percpu_array_map_ops = {
 
 650	.map_alloc_check = array_map_alloc_check,
 651	.map_alloc = array_map_alloc,
 652	.map_free = array_map_free,
 653	.map_get_next_key = array_map_get_next_key,
 654	.map_lookup_elem = percpu_array_map_lookup_elem,
 655	.map_update_elem = array_map_update_elem,
 656	.map_delete_elem = array_map_delete_elem,
 657	.map_seq_show_elem = percpu_array_map_seq_show_elem,
 658	.map_check_btf = array_map_check_btf,
 
 
 
 
 659	.map_btf_name = "bpf_array",
 660	.map_btf_id = &percpu_array_map_btf_id,
 661	.iter_seq_info = &iter_seq_info,
 662};
 663
 664static int fd_array_map_alloc_check(union bpf_attr *attr)
 665{
 666	/* only file descriptors can be stored in this type of map */
 667	if (attr->value_size != sizeof(u32))
 668		return -EINVAL;
 669	/* Program read-only/write-only not supported for special maps yet. */
 670	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
 671		return -EINVAL;
 672	return array_map_alloc_check(attr);
 673}
 674
 675static void fd_array_map_free(struct bpf_map *map)
 676{
 677	struct bpf_array *array = container_of(map, struct bpf_array, map);
 678	int i;
 679
 680	/* make sure it's empty */
 681	for (i = 0; i < array->map.max_entries; i++)
 682		BUG_ON(array->ptrs[i] != NULL);
 683
 684	bpf_map_area_free(array);
 685}
 686
 687static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 688{
 689	return ERR_PTR(-EOPNOTSUPP);
 690}
 691
 692/* only called from syscall */
 693int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
 694{
 695	void **elem, *ptr;
 696	int ret =  0;
 697
 698	if (!map->ops->map_fd_sys_lookup_elem)
 699		return -ENOTSUPP;
 700
 701	rcu_read_lock();
 702	elem = array_map_lookup_elem(map, key);
 703	if (elem && (ptr = READ_ONCE(*elem)))
 704		*value = map->ops->map_fd_sys_lookup_elem(ptr);
 705	else
 706		ret = -ENOENT;
 707	rcu_read_unlock();
 708
 709	return ret;
 710}
 711
 712/* only called from syscall */
 713int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
 714				 void *key, void *value, u64 map_flags)
 715{
 716	struct bpf_array *array = container_of(map, struct bpf_array, map);
 717	void *new_ptr, *old_ptr;
 718	u32 index = *(u32 *)key, ufd;
 719
 720	if (map_flags != BPF_ANY)
 721		return -EINVAL;
 722
 723	if (index >= array->map.max_entries)
 724		return -E2BIG;
 725
 726	ufd = *(u32 *)value;
 727	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
 728	if (IS_ERR(new_ptr))
 729		return PTR_ERR(new_ptr);
 730
 731	if (map->ops->map_poke_run) {
 732		mutex_lock(&array->aux->poke_mutex);
 733		old_ptr = xchg(array->ptrs + index, new_ptr);
 734		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
 735		mutex_unlock(&array->aux->poke_mutex);
 736	} else {
 737		old_ptr = xchg(array->ptrs + index, new_ptr);
 738	}
 739
 740	if (old_ptr)
 741		map->ops->map_fd_put_ptr(old_ptr);
 742	return 0;
 743}
 744
 745static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
 746{
 747	struct bpf_array *array = container_of(map, struct bpf_array, map);
 748	void *old_ptr;
 749	u32 index = *(u32 *)key;
 750
 751	if (index >= array->map.max_entries)
 752		return -E2BIG;
 753
 754	if (map->ops->map_poke_run) {
 755		mutex_lock(&array->aux->poke_mutex);
 756		old_ptr = xchg(array->ptrs + index, NULL);
 757		map->ops->map_poke_run(map, index, old_ptr, NULL);
 758		mutex_unlock(&array->aux->poke_mutex);
 759	} else {
 760		old_ptr = xchg(array->ptrs + index, NULL);
 761	}
 762
 763	if (old_ptr) {
 764		map->ops->map_fd_put_ptr(old_ptr);
 765		return 0;
 766	} else {
 767		return -ENOENT;
 768	}
 769}
 770
 771static void *prog_fd_array_get_ptr(struct bpf_map *map,
 772				   struct file *map_file, int fd)
 773{
 774	struct bpf_array *array = container_of(map, struct bpf_array, map);
 775	struct bpf_prog *prog = bpf_prog_get(fd);
 776
 777	if (IS_ERR(prog))
 778		return prog;
 779
 780	if (!bpf_prog_array_compatible(array, prog)) {
 781		bpf_prog_put(prog);
 782		return ERR_PTR(-EINVAL);
 783	}
 784
 785	return prog;
 786}
 787
 788static void prog_fd_array_put_ptr(void *ptr)
 789{
 790	bpf_prog_put(ptr);
 791}
 792
 793static u32 prog_fd_array_sys_lookup_elem(void *ptr)
 794{
 795	return ((struct bpf_prog *)ptr)->aux->id;
 796}
 797
 798/* decrement refcnt of all bpf_progs that are stored in this map */
 799static void bpf_fd_array_map_clear(struct bpf_map *map)
 800{
 801	struct bpf_array *array = container_of(map, struct bpf_array, map);
 802	int i;
 803
 804	for (i = 0; i < array->map.max_entries; i++)
 805		fd_array_map_delete_elem(map, &i);
 806}
 807
 808static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
 809					 struct seq_file *m)
 810{
 811	void **elem, *ptr;
 812	u32 prog_id;
 813
 814	rcu_read_lock();
 815
 816	elem = array_map_lookup_elem(map, key);
 817	if (elem) {
 818		ptr = READ_ONCE(*elem);
 819		if (ptr) {
 820			seq_printf(m, "%u: ", *(u32 *)key);
 821			prog_id = prog_fd_array_sys_lookup_elem(ptr);
 822			btf_type_seq_show(map->btf, map->btf_value_type_id,
 823					  &prog_id, m);
 824			seq_puts(m, "\n");
 825		}
 826	}
 827
 828	rcu_read_unlock();
 829}
 830
 831struct prog_poke_elem {
 832	struct list_head list;
 833	struct bpf_prog_aux *aux;
 834};
 835
 836static int prog_array_map_poke_track(struct bpf_map *map,
 837				     struct bpf_prog_aux *prog_aux)
 838{
 839	struct prog_poke_elem *elem;
 840	struct bpf_array_aux *aux;
 841	int ret = 0;
 842
 843	aux = container_of(map, struct bpf_array, map)->aux;
 844	mutex_lock(&aux->poke_mutex);
 845	list_for_each_entry(elem, &aux->poke_progs, list) {
 846		if (elem->aux == prog_aux)
 847			goto out;
 848	}
 849
 850	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
 851	if (!elem) {
 852		ret = -ENOMEM;
 853		goto out;
 854	}
 855
 856	INIT_LIST_HEAD(&elem->list);
 857	/* We must track the program's aux info at this point in time
 858	 * since the program pointer itself may not be stable yet, see
 859	 * also comment in prog_array_map_poke_run().
 860	 */
 861	elem->aux = prog_aux;
 862
 863	list_add_tail(&elem->list, &aux->poke_progs);
 864out:
 865	mutex_unlock(&aux->poke_mutex);
 866	return ret;
 867}
 868
 869static void prog_array_map_poke_untrack(struct bpf_map *map,
 870					struct bpf_prog_aux *prog_aux)
 871{
 872	struct prog_poke_elem *elem, *tmp;
 873	struct bpf_array_aux *aux;
 874
 875	aux = container_of(map, struct bpf_array, map)->aux;
 876	mutex_lock(&aux->poke_mutex);
 877	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
 878		if (elem->aux == prog_aux) {
 879			list_del_init(&elem->list);
 880			kfree(elem);
 881			break;
 882		}
 883	}
 884	mutex_unlock(&aux->poke_mutex);
 885}
 886
 887static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
 888				    struct bpf_prog *old,
 889				    struct bpf_prog *new)
 890{
 
 891	struct prog_poke_elem *elem;
 892	struct bpf_array_aux *aux;
 893
 894	aux = container_of(map, struct bpf_array, map)->aux;
 895	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
 896
 897	list_for_each_entry(elem, &aux->poke_progs, list) {
 898		struct bpf_jit_poke_descriptor *poke;
 899		int i, ret;
 900
 901		for (i = 0; i < elem->aux->size_poke_tab; i++) {
 902			poke = &elem->aux->poke_tab[i];
 903
 904			/* Few things to be aware of:
 905			 *
 906			 * 1) We can only ever access aux in this context, but
 907			 *    not aux->prog since it might not be stable yet and
 908			 *    there could be danger of use after free otherwise.
 909			 * 2) Initially when we start tracking aux, the program
 910			 *    is not JITed yet and also does not have a kallsyms
 911			 *    entry. We skip these as poke->ip_stable is not
 912			 *    active yet. The JIT will do the final fixup before
 913			 *    setting it stable. The various poke->ip_stable are
 914			 *    successively activated, so tail call updates can
 915			 *    arrive from here while JIT is still finishing its
 916			 *    final fixup for non-activated poke entries.
 
 917			 * 3) On program teardown, the program's kallsym entry gets
 918			 *    removed out of RCU callback, but we can only untrack
 919			 *    from sleepable context, therefore bpf_arch_text_poke()
 920			 *    might not see that this is in BPF text section and
 921			 *    bails out with -EINVAL. As these are unreachable since
 922			 *    RCU grace period already passed, we simply skip them.
 923			 * 4) Also programs reaching refcount of zero while patching
 924			 *    is in progress is okay since we're protected under
 925			 *    poke_mutex and untrack the programs before the JIT
 926			 *    buffer is freed. When we're still in the middle of
 927			 *    patching and suddenly kallsyms entry of the program
 928			 *    gets evicted, we just skip the rest which is fine due
 929			 *    to point 3).
 930			 * 5) Any other error happening below from bpf_arch_text_poke()
 931			 *    is a unexpected bug.
 932			 */
 933			if (!READ_ONCE(poke->ip_stable))
 934				continue;
 935			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
 936				continue;
 937			if (poke->tail_call.map != map ||
 938			    poke->tail_call.key != key)
 939				continue;
 940
 941			ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
 942						 old ? (u8 *)old->bpf_func +
 943						 poke->adj_off : NULL,
 944						 new ? (u8 *)new->bpf_func +
 945						 poke->adj_off : NULL);
 946			BUG_ON(ret < 0 && ret != -EINVAL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 947		}
 948	}
 949}
 950
 951static void prog_array_map_clear_deferred(struct work_struct *work)
 952{
 953	struct bpf_map *map = container_of(work, struct bpf_array_aux,
 954					   work)->map;
 955	bpf_fd_array_map_clear(map);
 956	bpf_map_put(map);
 957}
 958
 959static void prog_array_map_clear(struct bpf_map *map)
 960{
 961	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
 962						 map)->aux;
 963	bpf_map_inc(map);
 964	schedule_work(&aux->work);
 965}
 966
 967static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
 968{
 969	struct bpf_array_aux *aux;
 970	struct bpf_map *map;
 971
 972	aux = kzalloc(sizeof(*aux), GFP_KERNEL);
 973	if (!aux)
 974		return ERR_PTR(-ENOMEM);
 975
 976	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
 977	INIT_LIST_HEAD(&aux->poke_progs);
 978	mutex_init(&aux->poke_mutex);
 979
 980	map = array_map_alloc(attr);
 981	if (IS_ERR(map)) {
 982		kfree(aux);
 983		return map;
 984	}
 985
 986	container_of(map, struct bpf_array, map)->aux = aux;
 987	aux->map = map;
 988
 989	return map;
 990}
 991
 992static void prog_array_map_free(struct bpf_map *map)
 993{
 994	struct prog_poke_elem *elem, *tmp;
 995	struct bpf_array_aux *aux;
 996
 997	aux = container_of(map, struct bpf_array, map)->aux;
 998	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
 999		list_del_init(&elem->list);
1000		kfree(elem);
1001	}
1002	kfree(aux);
1003	fd_array_map_free(map);
1004}
1005
 
 
 
 
 
1006static int prog_array_map_btf_id;
1007const struct bpf_map_ops prog_array_map_ops = {
1008	.map_alloc_check = fd_array_map_alloc_check,
1009	.map_alloc = prog_array_map_alloc,
1010	.map_free = prog_array_map_free,
1011	.map_poke_track = prog_array_map_poke_track,
1012	.map_poke_untrack = prog_array_map_poke_untrack,
1013	.map_poke_run = prog_array_map_poke_run,
1014	.map_get_next_key = array_map_get_next_key,
1015	.map_lookup_elem = fd_array_map_lookup_elem,
1016	.map_delete_elem = fd_array_map_delete_elem,
1017	.map_fd_get_ptr = prog_fd_array_get_ptr,
1018	.map_fd_put_ptr = prog_fd_array_put_ptr,
1019	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1020	.map_release_uref = prog_array_map_clear,
1021	.map_seq_show_elem = prog_array_map_seq_show_elem,
1022	.map_btf_name = "bpf_array",
1023	.map_btf_id = &prog_array_map_btf_id,
1024};
1025
1026static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1027						   struct file *map_file)
1028{
1029	struct bpf_event_entry *ee;
1030
1031	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1032	if (ee) {
1033		ee->event = perf_file->private_data;
1034		ee->perf_file = perf_file;
1035		ee->map_file = map_file;
1036	}
1037
1038	return ee;
1039}
1040
1041static void __bpf_event_entry_free(struct rcu_head *rcu)
1042{
1043	struct bpf_event_entry *ee;
1044
1045	ee = container_of(rcu, struct bpf_event_entry, rcu);
1046	fput(ee->perf_file);
1047	kfree(ee);
1048}
1049
1050static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1051{
1052	call_rcu(&ee->rcu, __bpf_event_entry_free);
1053}
1054
1055static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1056					 struct file *map_file, int fd)
1057{
1058	struct bpf_event_entry *ee;
1059	struct perf_event *event;
1060	struct file *perf_file;
1061	u64 value;
1062
1063	perf_file = perf_event_get(fd);
1064	if (IS_ERR(perf_file))
1065		return perf_file;
1066
1067	ee = ERR_PTR(-EOPNOTSUPP);
1068	event = perf_file->private_data;
1069	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1070		goto err_out;
1071
1072	ee = bpf_event_entry_gen(perf_file, map_file);
1073	if (ee)
1074		return ee;
1075	ee = ERR_PTR(-ENOMEM);
1076err_out:
1077	fput(perf_file);
1078	return ee;
1079}
1080
1081static void perf_event_fd_array_put_ptr(void *ptr)
1082{
1083	bpf_event_entry_free_rcu(ptr);
1084}
1085
1086static void perf_event_fd_array_release(struct bpf_map *map,
1087					struct file *map_file)
1088{
1089	struct bpf_array *array = container_of(map, struct bpf_array, map);
1090	struct bpf_event_entry *ee;
1091	int i;
1092
 
 
 
1093	rcu_read_lock();
1094	for (i = 0; i < array->map.max_entries; i++) {
1095		ee = READ_ONCE(array->ptrs[i]);
1096		if (ee && ee->map_file == map_file)
1097			fd_array_map_delete_elem(map, &i);
1098	}
1099	rcu_read_unlock();
1100}
1101
 
 
 
 
 
 
 
1102static int perf_event_array_map_btf_id;
1103const struct bpf_map_ops perf_event_array_map_ops = {
 
1104	.map_alloc_check = fd_array_map_alloc_check,
1105	.map_alloc = array_map_alloc,
1106	.map_free = fd_array_map_free,
1107	.map_get_next_key = array_map_get_next_key,
1108	.map_lookup_elem = fd_array_map_lookup_elem,
1109	.map_delete_elem = fd_array_map_delete_elem,
1110	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1111	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1112	.map_release = perf_event_fd_array_release,
1113	.map_check_btf = map_check_no_btf,
1114	.map_btf_name = "bpf_array",
1115	.map_btf_id = &perf_event_array_map_btf_id,
1116};
1117
1118#ifdef CONFIG_CGROUPS
1119static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1120				     struct file *map_file /* not used */,
1121				     int fd)
1122{
1123	return cgroup_get_from_fd(fd);
1124}
1125
1126static void cgroup_fd_array_put_ptr(void *ptr)
1127{
1128	/* cgroup_put free cgrp after a rcu grace period */
1129	cgroup_put(ptr);
1130}
1131
1132static void cgroup_fd_array_free(struct bpf_map *map)
1133{
1134	bpf_fd_array_map_clear(map);
1135	fd_array_map_free(map);
1136}
1137
1138static int cgroup_array_map_btf_id;
1139const struct bpf_map_ops cgroup_array_map_ops = {
 
1140	.map_alloc_check = fd_array_map_alloc_check,
1141	.map_alloc = array_map_alloc,
1142	.map_free = cgroup_fd_array_free,
1143	.map_get_next_key = array_map_get_next_key,
1144	.map_lookup_elem = fd_array_map_lookup_elem,
1145	.map_delete_elem = fd_array_map_delete_elem,
1146	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1147	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1148	.map_check_btf = map_check_no_btf,
1149	.map_btf_name = "bpf_array",
1150	.map_btf_id = &cgroup_array_map_btf_id,
1151};
1152#endif
1153
1154static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1155{
1156	struct bpf_map *map, *inner_map_meta;
1157
1158	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1159	if (IS_ERR(inner_map_meta))
1160		return inner_map_meta;
1161
1162	map = array_map_alloc(attr);
1163	if (IS_ERR(map)) {
1164		bpf_map_meta_free(inner_map_meta);
1165		return map;
1166	}
1167
1168	map->inner_map_meta = inner_map_meta;
1169
1170	return map;
1171}
1172
1173static void array_of_map_free(struct bpf_map *map)
1174{
1175	/* map->inner_map_meta is only accessed by syscall which
1176	 * is protected by fdget/fdput.
1177	 */
1178	bpf_map_meta_free(map->inner_map_meta);
1179	bpf_fd_array_map_clear(map);
1180	fd_array_map_free(map);
1181}
1182
1183static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1184{
1185	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1186
1187	if (!inner_map)
1188		return NULL;
1189
1190	return READ_ONCE(*inner_map);
1191}
1192
1193static u32 array_of_map_gen_lookup(struct bpf_map *map,
1194				   struct bpf_insn *insn_buf)
1195{
1196	struct bpf_array *array = container_of(map, struct bpf_array, map);
1197	u32 elem_size = round_up(map->value_size, 8);
1198	struct bpf_insn *insn = insn_buf;
1199	const int ret = BPF_REG_0;
1200	const int map_ptr = BPF_REG_1;
1201	const int index = BPF_REG_2;
1202
1203	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1204	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1205	if (!map->bypass_spec_v1) {
1206		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1207		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1208	} else {
1209		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1210	}
1211	if (is_power_of_2(elem_size))
1212		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1213	else
1214		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1215	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1216	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1217	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1218	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1219	*insn++ = BPF_MOV64_IMM(ret, 0);
1220
1221	return insn - insn_buf;
1222}
1223
1224static int array_of_maps_map_btf_id;
1225const struct bpf_map_ops array_of_maps_map_ops = {
1226	.map_alloc_check = fd_array_map_alloc_check,
1227	.map_alloc = array_of_map_alloc,
1228	.map_free = array_of_map_free,
1229	.map_get_next_key = array_map_get_next_key,
1230	.map_lookup_elem = array_of_map_lookup_elem,
1231	.map_delete_elem = fd_array_map_delete_elem,
1232	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1233	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1234	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1235	.map_gen_lookup = array_of_map_gen_lookup,
1236	.map_check_btf = map_check_no_btf,
1237	.map_btf_name = "bpf_array",
1238	.map_btf_id = &array_of_maps_map_btf_id,
1239};
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016,2017 Facebook
   4 */
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/err.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/filter.h>
  11#include <linux/perf_event.h>
  12#include <uapi/linux/btf.h>
  13#include <linux/rcupdate_trace.h>
  14
  15#include "map_in_map.h"
  16
  17#define ARRAY_CREATE_FLAG_MASK \
  18	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
  19	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
  20
  21static void bpf_array_free_percpu(struct bpf_array *array)
  22{
  23	int i;
  24
  25	for (i = 0; i < array->map.max_entries; i++) {
  26		free_percpu(array->pptrs[i]);
  27		cond_resched();
  28	}
  29}
  30
  31static int bpf_array_alloc_percpu(struct bpf_array *array)
  32{
  33	void __percpu *ptr;
  34	int i;
  35
  36	for (i = 0; i < array->map.max_entries; i++) {
  37		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
  38					   GFP_USER | __GFP_NOWARN);
  39		if (!ptr) {
  40			bpf_array_free_percpu(array);
  41			return -ENOMEM;
  42		}
  43		array->pptrs[i] = ptr;
  44		cond_resched();
  45	}
  46
  47	return 0;
  48}
  49
  50/* Called from syscall */
  51int array_map_alloc_check(union bpf_attr *attr)
  52{
  53	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  54	int numa_node = bpf_map_attr_numa_node(attr);
  55
  56	/* check sanity of attributes */
  57	if (attr->max_entries == 0 || attr->key_size != 4 ||
  58	    attr->value_size == 0 ||
  59	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  60	    !bpf_map_flags_access_ok(attr->map_flags) ||
  61	    (percpu && numa_node != NUMA_NO_NODE))
  62		return -EINVAL;
  63
  64	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  65	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
  66		return -EINVAL;
  67
  68	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
  69	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
  70		return -EINVAL;
  71
  72	if (attr->value_size > KMALLOC_MAX_SIZE)
  73		/* if value_size is bigger, the user space won't be able to
  74		 * access the elements.
  75		 */
  76		return -E2BIG;
  77
  78	return 0;
  79}
  80
  81static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  82{
  83	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  84	int numa_node = bpf_map_attr_numa_node(attr);
  85	u32 elem_size, index_mask, max_entries;
  86	bool bypass_spec_v1 = bpf_bypass_spec_v1();
  87	u64 array_size, mask64;
 
  88	struct bpf_array *array;
  89
  90	elem_size = round_up(attr->value_size, 8);
  91
  92	max_entries = attr->max_entries;
  93
  94	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
  95	 * upper most bit set in u32 space is undefined behavior due to
  96	 * resulting 1U << 32, so do it manually here in u64 space.
  97	 */
  98	mask64 = fls_long(max_entries - 1);
  99	mask64 = 1ULL << mask64;
 100	mask64 -= 1;
 101
 102	index_mask = mask64;
 103	if (!bypass_spec_v1) {
 104		/* round up array size to nearest power of 2,
 105		 * since cpu will speculate within index_mask limits
 106		 */
 107		max_entries = index_mask + 1;
 108		/* Check for overflows. */
 109		if (max_entries < attr->max_entries)
 110			return ERR_PTR(-E2BIG);
 111	}
 112
 113	array_size = sizeof(*array);
 114	if (percpu) {
 115		array_size += (u64) max_entries * sizeof(void *);
 116	} else {
 117		/* rely on vmalloc() to return page-aligned memory and
 118		 * ensure array->value is exactly page-aligned
 119		 */
 120		if (attr->map_flags & BPF_F_MMAPABLE) {
 121			array_size = PAGE_ALIGN(array_size);
 122			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
 123		} else {
 124			array_size += (u64) max_entries * elem_size;
 125		}
 126	}
 127
 
 
 
 
 
 
 
 
 
 128	/* allocate all map elements and zero-initialize them */
 129	if (attr->map_flags & BPF_F_MMAPABLE) {
 130		void *data;
 131
 132		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
 133		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
 134		if (!data)
 
 135			return ERR_PTR(-ENOMEM);
 
 136		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
 137			- offsetof(struct bpf_array, value);
 138	} else {
 139		array = bpf_map_area_alloc(array_size, numa_node);
 140	}
 141	if (!array)
 
 142		return ERR_PTR(-ENOMEM);
 
 143	array->index_mask = index_mask;
 144	array->map.bypass_spec_v1 = bypass_spec_v1;
 145
 146	/* copy mandatory map attributes */
 147	bpf_map_init_from_attr(&array->map, attr);
 
 148	array->elem_size = elem_size;
 149
 150	if (percpu && bpf_array_alloc_percpu(array)) {
 
 151		bpf_map_area_free(array);
 152		return ERR_PTR(-ENOMEM);
 153	}
 154
 155	return &array->map;
 156}
 157
 158/* Called from syscall or from eBPF program */
 159static void *array_map_lookup_elem(struct bpf_map *map, void *key)
 160{
 161	struct bpf_array *array = container_of(map, struct bpf_array, map);
 162	u32 index = *(u32 *)key;
 163
 164	if (unlikely(index >= array->map.max_entries))
 165		return NULL;
 166
 167	return array->value + array->elem_size * (index & array->index_mask);
 168}
 169
 170static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
 171				       u32 off)
 172{
 173	struct bpf_array *array = container_of(map, struct bpf_array, map);
 174
 175	if (map->max_entries != 1)
 176		return -ENOTSUPP;
 177	if (off >= map->value_size)
 178		return -EINVAL;
 179
 180	*imm = (unsigned long)array->value;
 181	return 0;
 182}
 183
 184static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
 185				       u32 *off)
 186{
 187	struct bpf_array *array = container_of(map, struct bpf_array, map);
 188	u64 base = (unsigned long)array->value;
 189	u64 range = array->elem_size;
 190
 191	if (map->max_entries != 1)
 192		return -ENOTSUPP;
 193	if (imm < base || imm >= base + range)
 194		return -ENOENT;
 195
 196	*off = imm - base;
 197	return 0;
 198}
 199
 200/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 201static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 202{
 203	struct bpf_array *array = container_of(map, struct bpf_array, map);
 204	struct bpf_insn *insn = insn_buf;
 205	u32 elem_size = round_up(map->value_size, 8);
 206	const int ret = BPF_REG_0;
 207	const int map_ptr = BPF_REG_1;
 208	const int index = BPF_REG_2;
 209
 210	if (map->map_flags & BPF_F_INNER_MAP)
 211		return -EOPNOTSUPP;
 212
 213	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
 214	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
 215	if (!map->bypass_spec_v1) {
 216		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
 217		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
 218	} else {
 219		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
 220	}
 221
 222	if (is_power_of_2(elem_size)) {
 223		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
 224	} else {
 225		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
 226	}
 227	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
 228	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
 229	*insn++ = BPF_MOV64_IMM(ret, 0);
 230	return insn - insn_buf;
 231}
 232
 233/* Called from eBPF program */
 234static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 235{
 236	struct bpf_array *array = container_of(map, struct bpf_array, map);
 237	u32 index = *(u32 *)key;
 238
 239	if (unlikely(index >= array->map.max_entries))
 240		return NULL;
 241
 242	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 243}
 244
 245int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
 246{
 247	struct bpf_array *array = container_of(map, struct bpf_array, map);
 248	u32 index = *(u32 *)key;
 249	void __percpu *pptr;
 250	int cpu, off = 0;
 251	u32 size;
 252
 253	if (unlikely(index >= array->map.max_entries))
 254		return -ENOENT;
 255
 256	/* per_cpu areas are zero-filled and bpf programs can only
 257	 * access 'value_size' of them, so copying rounded areas
 258	 * will not leak any kernel data
 259	 */
 260	size = round_up(map->value_size, 8);
 261	rcu_read_lock();
 262	pptr = array->pptrs[index & array->index_mask];
 263	for_each_possible_cpu(cpu) {
 264		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
 265		off += size;
 266	}
 267	rcu_read_unlock();
 268	return 0;
 269}
 270
 271/* Called from syscall */
 272static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 273{
 274	struct bpf_array *array = container_of(map, struct bpf_array, map);
 275	u32 index = key ? *(u32 *)key : U32_MAX;
 276	u32 *next = (u32 *)next_key;
 277
 278	if (index >= array->map.max_entries) {
 279		*next = 0;
 280		return 0;
 281	}
 282
 283	if (index == array->map.max_entries - 1)
 284		return -ENOENT;
 285
 286	*next = index + 1;
 287	return 0;
 288}
 289
 290/* Called from syscall or from eBPF program */
 291static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
 292				 u64 map_flags)
 293{
 294	struct bpf_array *array = container_of(map, struct bpf_array, map);
 295	u32 index = *(u32 *)key;
 296	char *val;
 297
 298	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
 299		/* unknown flags */
 300		return -EINVAL;
 301
 302	if (unlikely(index >= array->map.max_entries))
 303		/* all elements were pre-allocated, cannot insert a new one */
 304		return -E2BIG;
 305
 306	if (unlikely(map_flags & BPF_NOEXIST))
 307		/* all elements already exist */
 308		return -EEXIST;
 309
 310	if (unlikely((map_flags & BPF_F_LOCK) &&
 311		     !map_value_has_spin_lock(map)))
 312		return -EINVAL;
 313
 314	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 315		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
 316		       value, map->value_size);
 317	} else {
 318		val = array->value +
 319			array->elem_size * (index & array->index_mask);
 320		if (map_flags & BPF_F_LOCK)
 321			copy_map_value_locked(map, val, value, false);
 322		else
 323			copy_map_value(map, val, value);
 324	}
 325	return 0;
 326}
 327
 328int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
 329			    u64 map_flags)
 330{
 331	struct bpf_array *array = container_of(map, struct bpf_array, map);
 332	u32 index = *(u32 *)key;
 333	void __percpu *pptr;
 334	int cpu, off = 0;
 335	u32 size;
 336
 337	if (unlikely(map_flags > BPF_EXIST))
 338		/* unknown flags */
 339		return -EINVAL;
 340
 341	if (unlikely(index >= array->map.max_entries))
 342		/* all elements were pre-allocated, cannot insert a new one */
 343		return -E2BIG;
 344
 345	if (unlikely(map_flags == BPF_NOEXIST))
 346		/* all elements already exist */
 347		return -EEXIST;
 348
 349	/* the user space will provide round_up(value_size, 8) bytes that
 350	 * will be copied into per-cpu area. bpf programs can only access
 351	 * value_size of it. During lookup the same extra bytes will be
 352	 * returned or zeros which were zero-filled by percpu_alloc,
 353	 * so no kernel data leaks possible
 354	 */
 355	size = round_up(map->value_size, 8);
 356	rcu_read_lock();
 357	pptr = array->pptrs[index & array->index_mask];
 358	for_each_possible_cpu(cpu) {
 359		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
 360		off += size;
 361	}
 362	rcu_read_unlock();
 363	return 0;
 364}
 365
 366/* Called from syscall or from eBPF program */
 367static int array_map_delete_elem(struct bpf_map *map, void *key)
 368{
 369	return -EINVAL;
 370}
 371
 372static void *array_map_vmalloc_addr(struct bpf_array *array)
 373{
 374	return (void *)round_down((unsigned long)array, PAGE_SIZE);
 375}
 376
 377/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 378static void array_map_free(struct bpf_map *map)
 379{
 380	struct bpf_array *array = container_of(map, struct bpf_array, map);
 381
 382	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 383		bpf_array_free_percpu(array);
 384
 385	if (array->map.map_flags & BPF_F_MMAPABLE)
 386		bpf_map_area_free(array_map_vmalloc_addr(array));
 387	else
 388		bpf_map_area_free(array);
 389}
 390
 391static void array_map_seq_show_elem(struct bpf_map *map, void *key,
 392				    struct seq_file *m)
 393{
 394	void *value;
 395
 396	rcu_read_lock();
 397
 398	value = array_map_lookup_elem(map, key);
 399	if (!value) {
 400		rcu_read_unlock();
 401		return;
 402	}
 403
 404	if (map->btf_key_type_id)
 405		seq_printf(m, "%u: ", *(u32 *)key);
 406	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
 407	seq_puts(m, "\n");
 408
 409	rcu_read_unlock();
 410}
 411
 412static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
 413					   struct seq_file *m)
 414{
 415	struct bpf_array *array = container_of(map, struct bpf_array, map);
 416	u32 index = *(u32 *)key;
 417	void __percpu *pptr;
 418	int cpu;
 419
 420	rcu_read_lock();
 421
 422	seq_printf(m, "%u: {\n", *(u32 *)key);
 423	pptr = array->pptrs[index & array->index_mask];
 424	for_each_possible_cpu(cpu) {
 425		seq_printf(m, "\tcpu%d: ", cpu);
 426		btf_type_seq_show(map->btf, map->btf_value_type_id,
 427				  per_cpu_ptr(pptr, cpu), m);
 428		seq_puts(m, "\n");
 429	}
 430	seq_puts(m, "}\n");
 431
 432	rcu_read_unlock();
 433}
 434
 435static int array_map_check_btf(const struct bpf_map *map,
 436			       const struct btf *btf,
 437			       const struct btf_type *key_type,
 438			       const struct btf_type *value_type)
 439{
 440	u32 int_data;
 441
 442	/* One exception for keyless BTF: .bss/.data/.rodata map */
 443	if (btf_type_is_void(key_type)) {
 444		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
 445		    map->max_entries != 1)
 446			return -EINVAL;
 447
 448		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
 449			return -EINVAL;
 450
 451		return 0;
 452	}
 453
 454	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
 455		return -EINVAL;
 456
 457	int_data = *(u32 *)(key_type + 1);
 458	/* bpf array can only take a u32 key. This check makes sure
 459	 * that the btf matches the attr used during map_create.
 460	 */
 461	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
 462		return -EINVAL;
 463
 464	return 0;
 465}
 466
 467static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
 468{
 469	struct bpf_array *array = container_of(map, struct bpf_array, map);
 470	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
 471
 472	if (!(map->map_flags & BPF_F_MMAPABLE))
 473		return -EINVAL;
 474
 475	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
 476	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
 477		return -EINVAL;
 478
 479	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
 480				   vma->vm_pgoff + pgoff);
 481}
 482
 483static bool array_map_meta_equal(const struct bpf_map *meta0,
 484				 const struct bpf_map *meta1)
 485{
 486	if (!bpf_map_meta_equal(meta0, meta1))
 487		return false;
 488	return meta0->map_flags & BPF_F_INNER_MAP ? true :
 489	       meta0->max_entries == meta1->max_entries;
 490}
 491
 492struct bpf_iter_seq_array_map_info {
 493	struct bpf_map *map;
 494	void *percpu_value_buf;
 495	u32 index;
 496};
 497
 498static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
 499{
 500	struct bpf_iter_seq_array_map_info *info = seq->private;
 501	struct bpf_map *map = info->map;
 502	struct bpf_array *array;
 503	u32 index;
 504
 505	if (info->index >= map->max_entries)
 506		return NULL;
 507
 508	if (*pos == 0)
 509		++*pos;
 510	array = container_of(map, struct bpf_array, map);
 511	index = info->index & array->index_mask;
 512	if (info->percpu_value_buf)
 513	       return array->pptrs[index];
 514	return array->value + array->elem_size * index;
 515}
 516
 517static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 518{
 519	struct bpf_iter_seq_array_map_info *info = seq->private;
 520	struct bpf_map *map = info->map;
 521	struct bpf_array *array;
 522	u32 index;
 523
 524	++*pos;
 525	++info->index;
 526	if (info->index >= map->max_entries)
 527		return NULL;
 528
 529	array = container_of(map, struct bpf_array, map);
 530	index = info->index & array->index_mask;
 531	if (info->percpu_value_buf)
 532	       return array->pptrs[index];
 533	return array->value + array->elem_size * index;
 534}
 535
 536static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
 537{
 538	struct bpf_iter_seq_array_map_info *info = seq->private;
 539	struct bpf_iter__bpf_map_elem ctx = {};
 540	struct bpf_map *map = info->map;
 541	struct bpf_iter_meta meta;
 542	struct bpf_prog *prog;
 543	int off = 0, cpu = 0;
 544	void __percpu **pptr;
 545	u32 size;
 546
 547	meta.seq = seq;
 548	prog = bpf_iter_get_info(&meta, v == NULL);
 549	if (!prog)
 550		return 0;
 551
 552	ctx.meta = &meta;
 553	ctx.map = info->map;
 554	if (v) {
 555		ctx.key = &info->index;
 556
 557		if (!info->percpu_value_buf) {
 558			ctx.value = v;
 559		} else {
 560			pptr = v;
 561			size = round_up(map->value_size, 8);
 562			for_each_possible_cpu(cpu) {
 563				bpf_long_memcpy(info->percpu_value_buf + off,
 564						per_cpu_ptr(pptr, cpu),
 565						size);
 566				off += size;
 567			}
 568			ctx.value = info->percpu_value_buf;
 569		}
 570	}
 571
 572	return bpf_iter_run_prog(prog, &ctx);
 573}
 574
 575static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
 576{
 577	return __bpf_array_map_seq_show(seq, v);
 578}
 579
 580static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
 581{
 582	if (!v)
 583		(void)__bpf_array_map_seq_show(seq, NULL);
 584}
 585
 586static int bpf_iter_init_array_map(void *priv_data,
 587				   struct bpf_iter_aux_info *aux)
 588{
 589	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 590	struct bpf_map *map = aux->map;
 591	void *value_buf;
 592	u32 buf_size;
 593
 594	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 595		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
 596		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
 597		if (!value_buf)
 598			return -ENOMEM;
 599
 600		seq_info->percpu_value_buf = value_buf;
 601	}
 602
 603	seq_info->map = map;
 604	return 0;
 605}
 606
 607static void bpf_iter_fini_array_map(void *priv_data)
 608{
 609	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
 610
 611	kfree(seq_info->percpu_value_buf);
 612}
 613
 614static const struct seq_operations bpf_array_map_seq_ops = {
 615	.start	= bpf_array_map_seq_start,
 616	.next	= bpf_array_map_seq_next,
 617	.stop	= bpf_array_map_seq_stop,
 618	.show	= bpf_array_map_seq_show,
 619};
 620
 621static const struct bpf_iter_seq_info iter_seq_info = {
 622	.seq_ops		= &bpf_array_map_seq_ops,
 623	.init_seq_private	= bpf_iter_init_array_map,
 624	.fini_seq_private	= bpf_iter_fini_array_map,
 625	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
 626};
 627
 628static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
 629				   void *callback_ctx, u64 flags)
 630{
 631	u32 i, key, num_elems = 0;
 632	struct bpf_array *array;
 633	bool is_percpu;
 634	u64 ret = 0;
 635	void *val;
 636
 637	if (flags != 0)
 638		return -EINVAL;
 639
 640	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 641	array = container_of(map, struct bpf_array, map);
 642	if (is_percpu)
 643		migrate_disable();
 644	for (i = 0; i < map->max_entries; i++) {
 645		if (is_percpu)
 646			val = this_cpu_ptr(array->pptrs[i]);
 647		else
 648			val = array->value + array->elem_size * i;
 649		num_elems++;
 650		key = i;
 651		ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
 652					(u64)(long)&key, (u64)(long)val,
 653					(u64)(long)callback_ctx, 0);
 654		/* return value: 0 - continue, 1 - stop and return */
 655		if (ret)
 656			break;
 657	}
 658
 659	if (is_percpu)
 660		migrate_enable();
 661	return num_elems;
 662}
 663
 664static int array_map_btf_id;
 665const struct bpf_map_ops array_map_ops = {
 666	.map_meta_equal = array_map_meta_equal,
 667	.map_alloc_check = array_map_alloc_check,
 668	.map_alloc = array_map_alloc,
 669	.map_free = array_map_free,
 670	.map_get_next_key = array_map_get_next_key,
 671	.map_lookup_elem = array_map_lookup_elem,
 672	.map_update_elem = array_map_update_elem,
 673	.map_delete_elem = array_map_delete_elem,
 674	.map_gen_lookup = array_map_gen_lookup,
 675	.map_direct_value_addr = array_map_direct_value_addr,
 676	.map_direct_value_meta = array_map_direct_value_meta,
 677	.map_mmap = array_map_mmap,
 678	.map_seq_show_elem = array_map_seq_show_elem,
 679	.map_check_btf = array_map_check_btf,
 680	.map_lookup_batch = generic_map_lookup_batch,
 681	.map_update_batch = generic_map_update_batch,
 682	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 683	.map_for_each_callback = bpf_for_each_array_elem,
 684	.map_btf_name = "bpf_array",
 685	.map_btf_id = &array_map_btf_id,
 686	.iter_seq_info = &iter_seq_info,
 687};
 688
 689static int percpu_array_map_btf_id;
 690const struct bpf_map_ops percpu_array_map_ops = {
 691	.map_meta_equal = bpf_map_meta_equal,
 692	.map_alloc_check = array_map_alloc_check,
 693	.map_alloc = array_map_alloc,
 694	.map_free = array_map_free,
 695	.map_get_next_key = array_map_get_next_key,
 696	.map_lookup_elem = percpu_array_map_lookup_elem,
 697	.map_update_elem = array_map_update_elem,
 698	.map_delete_elem = array_map_delete_elem,
 699	.map_seq_show_elem = percpu_array_map_seq_show_elem,
 700	.map_check_btf = array_map_check_btf,
 701	.map_lookup_batch = generic_map_lookup_batch,
 702	.map_update_batch = generic_map_update_batch,
 703	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 704	.map_for_each_callback = bpf_for_each_array_elem,
 705	.map_btf_name = "bpf_array",
 706	.map_btf_id = &percpu_array_map_btf_id,
 707	.iter_seq_info = &iter_seq_info,
 708};
 709
 710static int fd_array_map_alloc_check(union bpf_attr *attr)
 711{
 712	/* only file descriptors can be stored in this type of map */
 713	if (attr->value_size != sizeof(u32))
 714		return -EINVAL;
 715	/* Program read-only/write-only not supported for special maps yet. */
 716	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
 717		return -EINVAL;
 718	return array_map_alloc_check(attr);
 719}
 720
 721static void fd_array_map_free(struct bpf_map *map)
 722{
 723	struct bpf_array *array = container_of(map, struct bpf_array, map);
 724	int i;
 725
 726	/* make sure it's empty */
 727	for (i = 0; i < array->map.max_entries; i++)
 728		BUG_ON(array->ptrs[i] != NULL);
 729
 730	bpf_map_area_free(array);
 731}
 732
 733static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 734{
 735	return ERR_PTR(-EOPNOTSUPP);
 736}
 737
 738/* only called from syscall */
 739int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
 740{
 741	void **elem, *ptr;
 742	int ret =  0;
 743
 744	if (!map->ops->map_fd_sys_lookup_elem)
 745		return -ENOTSUPP;
 746
 747	rcu_read_lock();
 748	elem = array_map_lookup_elem(map, key);
 749	if (elem && (ptr = READ_ONCE(*elem)))
 750		*value = map->ops->map_fd_sys_lookup_elem(ptr);
 751	else
 752		ret = -ENOENT;
 753	rcu_read_unlock();
 754
 755	return ret;
 756}
 757
 758/* only called from syscall */
 759int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
 760				 void *key, void *value, u64 map_flags)
 761{
 762	struct bpf_array *array = container_of(map, struct bpf_array, map);
 763	void *new_ptr, *old_ptr;
 764	u32 index = *(u32 *)key, ufd;
 765
 766	if (map_flags != BPF_ANY)
 767		return -EINVAL;
 768
 769	if (index >= array->map.max_entries)
 770		return -E2BIG;
 771
 772	ufd = *(u32 *)value;
 773	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
 774	if (IS_ERR(new_ptr))
 775		return PTR_ERR(new_ptr);
 776
 777	if (map->ops->map_poke_run) {
 778		mutex_lock(&array->aux->poke_mutex);
 779		old_ptr = xchg(array->ptrs + index, new_ptr);
 780		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
 781		mutex_unlock(&array->aux->poke_mutex);
 782	} else {
 783		old_ptr = xchg(array->ptrs + index, new_ptr);
 784	}
 785
 786	if (old_ptr)
 787		map->ops->map_fd_put_ptr(old_ptr);
 788	return 0;
 789}
 790
 791static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
 792{
 793	struct bpf_array *array = container_of(map, struct bpf_array, map);
 794	void *old_ptr;
 795	u32 index = *(u32 *)key;
 796
 797	if (index >= array->map.max_entries)
 798		return -E2BIG;
 799
 800	if (map->ops->map_poke_run) {
 801		mutex_lock(&array->aux->poke_mutex);
 802		old_ptr = xchg(array->ptrs + index, NULL);
 803		map->ops->map_poke_run(map, index, old_ptr, NULL);
 804		mutex_unlock(&array->aux->poke_mutex);
 805	} else {
 806		old_ptr = xchg(array->ptrs + index, NULL);
 807	}
 808
 809	if (old_ptr) {
 810		map->ops->map_fd_put_ptr(old_ptr);
 811		return 0;
 812	} else {
 813		return -ENOENT;
 814	}
 815}
 816
 817static void *prog_fd_array_get_ptr(struct bpf_map *map,
 818				   struct file *map_file, int fd)
 819{
 820	struct bpf_array *array = container_of(map, struct bpf_array, map);
 821	struct bpf_prog *prog = bpf_prog_get(fd);
 822
 823	if (IS_ERR(prog))
 824		return prog;
 825
 826	if (!bpf_prog_array_compatible(array, prog)) {
 827		bpf_prog_put(prog);
 828		return ERR_PTR(-EINVAL);
 829	}
 830
 831	return prog;
 832}
 833
 834static void prog_fd_array_put_ptr(void *ptr)
 835{
 836	bpf_prog_put(ptr);
 837}
 838
 839static u32 prog_fd_array_sys_lookup_elem(void *ptr)
 840{
 841	return ((struct bpf_prog *)ptr)->aux->id;
 842}
 843
 844/* decrement refcnt of all bpf_progs that are stored in this map */
 845static void bpf_fd_array_map_clear(struct bpf_map *map)
 846{
 847	struct bpf_array *array = container_of(map, struct bpf_array, map);
 848	int i;
 849
 850	for (i = 0; i < array->map.max_entries; i++)
 851		fd_array_map_delete_elem(map, &i);
 852}
 853
 854static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
 855					 struct seq_file *m)
 856{
 857	void **elem, *ptr;
 858	u32 prog_id;
 859
 860	rcu_read_lock();
 861
 862	elem = array_map_lookup_elem(map, key);
 863	if (elem) {
 864		ptr = READ_ONCE(*elem);
 865		if (ptr) {
 866			seq_printf(m, "%u: ", *(u32 *)key);
 867			prog_id = prog_fd_array_sys_lookup_elem(ptr);
 868			btf_type_seq_show(map->btf, map->btf_value_type_id,
 869					  &prog_id, m);
 870			seq_puts(m, "\n");
 871		}
 872	}
 873
 874	rcu_read_unlock();
 875}
 876
 877struct prog_poke_elem {
 878	struct list_head list;
 879	struct bpf_prog_aux *aux;
 880};
 881
 882static int prog_array_map_poke_track(struct bpf_map *map,
 883				     struct bpf_prog_aux *prog_aux)
 884{
 885	struct prog_poke_elem *elem;
 886	struct bpf_array_aux *aux;
 887	int ret = 0;
 888
 889	aux = container_of(map, struct bpf_array, map)->aux;
 890	mutex_lock(&aux->poke_mutex);
 891	list_for_each_entry(elem, &aux->poke_progs, list) {
 892		if (elem->aux == prog_aux)
 893			goto out;
 894	}
 895
 896	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
 897	if (!elem) {
 898		ret = -ENOMEM;
 899		goto out;
 900	}
 901
 902	INIT_LIST_HEAD(&elem->list);
 903	/* We must track the program's aux info at this point in time
 904	 * since the program pointer itself may not be stable yet, see
 905	 * also comment in prog_array_map_poke_run().
 906	 */
 907	elem->aux = prog_aux;
 908
 909	list_add_tail(&elem->list, &aux->poke_progs);
 910out:
 911	mutex_unlock(&aux->poke_mutex);
 912	return ret;
 913}
 914
 915static void prog_array_map_poke_untrack(struct bpf_map *map,
 916					struct bpf_prog_aux *prog_aux)
 917{
 918	struct prog_poke_elem *elem, *tmp;
 919	struct bpf_array_aux *aux;
 920
 921	aux = container_of(map, struct bpf_array, map)->aux;
 922	mutex_lock(&aux->poke_mutex);
 923	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
 924		if (elem->aux == prog_aux) {
 925			list_del_init(&elem->list);
 926			kfree(elem);
 927			break;
 928		}
 929	}
 930	mutex_unlock(&aux->poke_mutex);
 931}
 932
 933static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
 934				    struct bpf_prog *old,
 935				    struct bpf_prog *new)
 936{
 937	u8 *old_addr, *new_addr, *old_bypass_addr;
 938	struct prog_poke_elem *elem;
 939	struct bpf_array_aux *aux;
 940
 941	aux = container_of(map, struct bpf_array, map)->aux;
 942	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
 943
 944	list_for_each_entry(elem, &aux->poke_progs, list) {
 945		struct bpf_jit_poke_descriptor *poke;
 946		int i, ret;
 947
 948		for (i = 0; i < elem->aux->size_poke_tab; i++) {
 949			poke = &elem->aux->poke_tab[i];
 950
 951			/* Few things to be aware of:
 952			 *
 953			 * 1) We can only ever access aux in this context, but
 954			 *    not aux->prog since it might not be stable yet and
 955			 *    there could be danger of use after free otherwise.
 956			 * 2) Initially when we start tracking aux, the program
 957			 *    is not JITed yet and also does not have a kallsyms
 958			 *    entry. We skip these as poke->tailcall_target_stable
 959			 *    is not active yet. The JIT will do the final fixup
 960			 *    before setting it stable. The various
 961			 *    poke->tailcall_target_stable are successively
 962			 *    activated, so tail call updates can arrive from here
 963			 *    while JIT is still finishing its final fixup for
 964			 *    non-activated poke entries.
 965			 * 3) On program teardown, the program's kallsym entry gets
 966			 *    removed out of RCU callback, but we can only untrack
 967			 *    from sleepable context, therefore bpf_arch_text_poke()
 968			 *    might not see that this is in BPF text section and
 969			 *    bails out with -EINVAL. As these are unreachable since
 970			 *    RCU grace period already passed, we simply skip them.
 971			 * 4) Also programs reaching refcount of zero while patching
 972			 *    is in progress is okay since we're protected under
 973			 *    poke_mutex and untrack the programs before the JIT
 974			 *    buffer is freed. When we're still in the middle of
 975			 *    patching and suddenly kallsyms entry of the program
 976			 *    gets evicted, we just skip the rest which is fine due
 977			 *    to point 3).
 978			 * 5) Any other error happening below from bpf_arch_text_poke()
 979			 *    is a unexpected bug.
 980			 */
 981			if (!READ_ONCE(poke->tailcall_target_stable))
 982				continue;
 983			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
 984				continue;
 985			if (poke->tail_call.map != map ||
 986			    poke->tail_call.key != key)
 987				continue;
 988
 989			old_bypass_addr = old ? NULL : poke->bypass_addr;
 990			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
 991			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
 992
 993			if (new) {
 994				ret = bpf_arch_text_poke(poke->tailcall_target,
 995							 BPF_MOD_JUMP,
 996							 old_addr, new_addr);
 997				BUG_ON(ret < 0 && ret != -EINVAL);
 998				if (!old) {
 999					ret = bpf_arch_text_poke(poke->tailcall_bypass,
1000								 BPF_MOD_JUMP,
1001								 poke->bypass_addr,
1002								 NULL);
1003					BUG_ON(ret < 0 && ret != -EINVAL);
1004				}
1005			} else {
1006				ret = bpf_arch_text_poke(poke->tailcall_bypass,
1007							 BPF_MOD_JUMP,
1008							 old_bypass_addr,
1009							 poke->bypass_addr);
1010				BUG_ON(ret < 0 && ret != -EINVAL);
1011				/* let other CPUs finish the execution of program
1012				 * so that it will not possible to expose them
1013				 * to invalid nop, stack unwind, nop state
1014				 */
1015				if (!ret)
1016					synchronize_rcu();
1017				ret = bpf_arch_text_poke(poke->tailcall_target,
1018							 BPF_MOD_JUMP,
1019							 old_addr, NULL);
1020				BUG_ON(ret < 0 && ret != -EINVAL);
1021			}
1022		}
1023	}
1024}
1025
1026static void prog_array_map_clear_deferred(struct work_struct *work)
1027{
1028	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1029					   work)->map;
1030	bpf_fd_array_map_clear(map);
1031	bpf_map_put(map);
1032}
1033
1034static void prog_array_map_clear(struct bpf_map *map)
1035{
1036	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1037						 map)->aux;
1038	bpf_map_inc(map);
1039	schedule_work(&aux->work);
1040}
1041
1042static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1043{
1044	struct bpf_array_aux *aux;
1045	struct bpf_map *map;
1046
1047	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1048	if (!aux)
1049		return ERR_PTR(-ENOMEM);
1050
1051	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1052	INIT_LIST_HEAD(&aux->poke_progs);
1053	mutex_init(&aux->poke_mutex);
1054
1055	map = array_map_alloc(attr);
1056	if (IS_ERR(map)) {
1057		kfree(aux);
1058		return map;
1059	}
1060
1061	container_of(map, struct bpf_array, map)->aux = aux;
1062	aux->map = map;
1063
1064	return map;
1065}
1066
1067static void prog_array_map_free(struct bpf_map *map)
1068{
1069	struct prog_poke_elem *elem, *tmp;
1070	struct bpf_array_aux *aux;
1071
1072	aux = container_of(map, struct bpf_array, map)->aux;
1073	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1074		list_del_init(&elem->list);
1075		kfree(elem);
1076	}
1077	kfree(aux);
1078	fd_array_map_free(map);
1079}
1080
1081/* prog_array->aux->{type,jited} is a runtime binding.
1082 * Doing static check alone in the verifier is not enough.
1083 * Thus, prog_array_map cannot be used as an inner_map
1084 * and map_meta_equal is not implemented.
1085 */
1086static int prog_array_map_btf_id;
1087const struct bpf_map_ops prog_array_map_ops = {
1088	.map_alloc_check = fd_array_map_alloc_check,
1089	.map_alloc = prog_array_map_alloc,
1090	.map_free = prog_array_map_free,
1091	.map_poke_track = prog_array_map_poke_track,
1092	.map_poke_untrack = prog_array_map_poke_untrack,
1093	.map_poke_run = prog_array_map_poke_run,
1094	.map_get_next_key = array_map_get_next_key,
1095	.map_lookup_elem = fd_array_map_lookup_elem,
1096	.map_delete_elem = fd_array_map_delete_elem,
1097	.map_fd_get_ptr = prog_fd_array_get_ptr,
1098	.map_fd_put_ptr = prog_fd_array_put_ptr,
1099	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1100	.map_release_uref = prog_array_map_clear,
1101	.map_seq_show_elem = prog_array_map_seq_show_elem,
1102	.map_btf_name = "bpf_array",
1103	.map_btf_id = &prog_array_map_btf_id,
1104};
1105
1106static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1107						   struct file *map_file)
1108{
1109	struct bpf_event_entry *ee;
1110
1111	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1112	if (ee) {
1113		ee->event = perf_file->private_data;
1114		ee->perf_file = perf_file;
1115		ee->map_file = map_file;
1116	}
1117
1118	return ee;
1119}
1120
1121static void __bpf_event_entry_free(struct rcu_head *rcu)
1122{
1123	struct bpf_event_entry *ee;
1124
1125	ee = container_of(rcu, struct bpf_event_entry, rcu);
1126	fput(ee->perf_file);
1127	kfree(ee);
1128}
1129
1130static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1131{
1132	call_rcu(&ee->rcu, __bpf_event_entry_free);
1133}
1134
1135static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1136					 struct file *map_file, int fd)
1137{
1138	struct bpf_event_entry *ee;
1139	struct perf_event *event;
1140	struct file *perf_file;
1141	u64 value;
1142
1143	perf_file = perf_event_get(fd);
1144	if (IS_ERR(perf_file))
1145		return perf_file;
1146
1147	ee = ERR_PTR(-EOPNOTSUPP);
1148	event = perf_file->private_data;
1149	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1150		goto err_out;
1151
1152	ee = bpf_event_entry_gen(perf_file, map_file);
1153	if (ee)
1154		return ee;
1155	ee = ERR_PTR(-ENOMEM);
1156err_out:
1157	fput(perf_file);
1158	return ee;
1159}
1160
1161static void perf_event_fd_array_put_ptr(void *ptr)
1162{
1163	bpf_event_entry_free_rcu(ptr);
1164}
1165
1166static void perf_event_fd_array_release(struct bpf_map *map,
1167					struct file *map_file)
1168{
1169	struct bpf_array *array = container_of(map, struct bpf_array, map);
1170	struct bpf_event_entry *ee;
1171	int i;
1172
1173	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1174		return;
1175
1176	rcu_read_lock();
1177	for (i = 0; i < array->map.max_entries; i++) {
1178		ee = READ_ONCE(array->ptrs[i]);
1179		if (ee && ee->map_file == map_file)
1180			fd_array_map_delete_elem(map, &i);
1181	}
1182	rcu_read_unlock();
1183}
1184
1185static void perf_event_fd_array_map_free(struct bpf_map *map)
1186{
1187	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1188		bpf_fd_array_map_clear(map);
1189	fd_array_map_free(map);
1190}
1191
1192static int perf_event_array_map_btf_id;
1193const struct bpf_map_ops perf_event_array_map_ops = {
1194	.map_meta_equal = bpf_map_meta_equal,
1195	.map_alloc_check = fd_array_map_alloc_check,
1196	.map_alloc = array_map_alloc,
1197	.map_free = perf_event_fd_array_map_free,
1198	.map_get_next_key = array_map_get_next_key,
1199	.map_lookup_elem = fd_array_map_lookup_elem,
1200	.map_delete_elem = fd_array_map_delete_elem,
1201	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1202	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1203	.map_release = perf_event_fd_array_release,
1204	.map_check_btf = map_check_no_btf,
1205	.map_btf_name = "bpf_array",
1206	.map_btf_id = &perf_event_array_map_btf_id,
1207};
1208
1209#ifdef CONFIG_CGROUPS
1210static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1211				     struct file *map_file /* not used */,
1212				     int fd)
1213{
1214	return cgroup_get_from_fd(fd);
1215}
1216
1217static void cgroup_fd_array_put_ptr(void *ptr)
1218{
1219	/* cgroup_put free cgrp after a rcu grace period */
1220	cgroup_put(ptr);
1221}
1222
1223static void cgroup_fd_array_free(struct bpf_map *map)
1224{
1225	bpf_fd_array_map_clear(map);
1226	fd_array_map_free(map);
1227}
1228
1229static int cgroup_array_map_btf_id;
1230const struct bpf_map_ops cgroup_array_map_ops = {
1231	.map_meta_equal = bpf_map_meta_equal,
1232	.map_alloc_check = fd_array_map_alloc_check,
1233	.map_alloc = array_map_alloc,
1234	.map_free = cgroup_fd_array_free,
1235	.map_get_next_key = array_map_get_next_key,
1236	.map_lookup_elem = fd_array_map_lookup_elem,
1237	.map_delete_elem = fd_array_map_delete_elem,
1238	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1239	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1240	.map_check_btf = map_check_no_btf,
1241	.map_btf_name = "bpf_array",
1242	.map_btf_id = &cgroup_array_map_btf_id,
1243};
1244#endif
1245
1246static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1247{
1248	struct bpf_map *map, *inner_map_meta;
1249
1250	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1251	if (IS_ERR(inner_map_meta))
1252		return inner_map_meta;
1253
1254	map = array_map_alloc(attr);
1255	if (IS_ERR(map)) {
1256		bpf_map_meta_free(inner_map_meta);
1257		return map;
1258	}
1259
1260	map->inner_map_meta = inner_map_meta;
1261
1262	return map;
1263}
1264
1265static void array_of_map_free(struct bpf_map *map)
1266{
1267	/* map->inner_map_meta is only accessed by syscall which
1268	 * is protected by fdget/fdput.
1269	 */
1270	bpf_map_meta_free(map->inner_map_meta);
1271	bpf_fd_array_map_clear(map);
1272	fd_array_map_free(map);
1273}
1274
1275static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1276{
1277	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1278
1279	if (!inner_map)
1280		return NULL;
1281
1282	return READ_ONCE(*inner_map);
1283}
1284
1285static int array_of_map_gen_lookup(struct bpf_map *map,
1286				   struct bpf_insn *insn_buf)
1287{
1288	struct bpf_array *array = container_of(map, struct bpf_array, map);
1289	u32 elem_size = round_up(map->value_size, 8);
1290	struct bpf_insn *insn = insn_buf;
1291	const int ret = BPF_REG_0;
1292	const int map_ptr = BPF_REG_1;
1293	const int index = BPF_REG_2;
1294
1295	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1296	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1297	if (!map->bypass_spec_v1) {
1298		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1299		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1300	} else {
1301		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1302	}
1303	if (is_power_of_2(elem_size))
1304		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1305	else
1306		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1307	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1308	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1309	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1310	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1311	*insn++ = BPF_MOV64_IMM(ret, 0);
1312
1313	return insn - insn_buf;
1314}
1315
1316static int array_of_maps_map_btf_id;
1317const struct bpf_map_ops array_of_maps_map_ops = {
1318	.map_alloc_check = fd_array_map_alloc_check,
1319	.map_alloc = array_of_map_alloc,
1320	.map_free = array_of_map_free,
1321	.map_get_next_key = array_map_get_next_key,
1322	.map_lookup_elem = array_of_map_lookup_elem,
1323	.map_delete_elem = fd_array_map_delete_elem,
1324	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1325	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1326	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1327	.map_gen_lookup = array_of_map_gen_lookup,
1328	.map_check_btf = map_check_no_btf,
1329	.map_btf_name = "bpf_array",
1330	.map_btf_id = &array_of_maps_map_btf_id,
1331};