Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 */
   4#include <linux/bpf.h>
   5#include <linux/bpf-cgroup.h>
   6#include <linux/bpf_trace.h>
   7#include <linux/bpf_lirc.h>
   8#include <linux/bpf_verifier.h>
   9#include <linux/bsearch.h>
  10#include <linux/btf.h>
  11#include <linux/syscalls.h>
  12#include <linux/slab.h>
  13#include <linux/sched/signal.h>
  14#include <linux/vmalloc.h>
  15#include <linux/mmzone.h>
  16#include <linux/anon_inodes.h>
  17#include <linux/fdtable.h>
  18#include <linux/file.h>
  19#include <linux/fs.h>
  20#include <linux/license.h>
  21#include <linux/filter.h>
 
  22#include <linux/kernel.h>
  23#include <linux/idr.h>
  24#include <linux/cred.h>
  25#include <linux/timekeeping.h>
  26#include <linux/ctype.h>
  27#include <linux/nospec.h>
  28#include <linux/audit.h>
  29#include <uapi/linux/btf.h>
  30#include <linux/pgtable.h>
  31#include <linux/bpf_lsm.h>
  32#include <linux/poll.h>
  33#include <linux/sort.h>
  34#include <linux/bpf-netns.h>
  35#include <linux/rcupdate_trace.h>
  36#include <linux/memcontrol.h>
  37#include <linux/trace_events.h>
  38
  39#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
  40			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
  41			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
  42#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  43#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
  44#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
  45			IS_FD_HASH(map))
  46
  47#define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
  48
  49DEFINE_PER_CPU(int, bpf_prog_active);
  50static DEFINE_IDR(prog_idr);
  51static DEFINE_SPINLOCK(prog_idr_lock);
  52static DEFINE_IDR(map_idr);
  53static DEFINE_SPINLOCK(map_idr_lock);
  54static DEFINE_IDR(link_idr);
  55static DEFINE_SPINLOCK(link_idr_lock);
  56
  57int sysctl_unprivileged_bpf_disabled __read_mostly =
  58	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
  59
  60static const struct bpf_map_ops * const bpf_map_types[] = {
  61#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
  62#define BPF_MAP_TYPE(_id, _ops) \
  63	[_id] = &_ops,
  64#define BPF_LINK_TYPE(_id, _name)
  65#include <linux/bpf_types.h>
  66#undef BPF_PROG_TYPE
  67#undef BPF_MAP_TYPE
  68#undef BPF_LINK_TYPE
  69};
  70
  71/*
  72 * If we're handed a bigger struct than we know of, ensure all the unknown bits
  73 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
  74 * we don't know about yet.
  75 *
  76 * There is a ToCToU between this function call and the following
  77 * copy_from_user() call. However, this is not a concern since this function is
  78 * meant to be a future-proofing of bits.
  79 */
  80int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
  81			     size_t expected_size,
  82			     size_t actual_size)
  83{
  84	int res;
 
 
 
  85
  86	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
  87		return -E2BIG;
  88
 
 
 
  89	if (actual_size <= expected_size)
  90		return 0;
  91
  92	if (uaddr.is_kernel)
  93		res = memchr_inv(uaddr.kernel + expected_size, 0,
  94				 actual_size - expected_size) == NULL;
  95	else
  96		res = check_zeroed_user(uaddr.user + expected_size,
  97					actual_size - expected_size);
  98	if (res < 0)
  99		return res;
 100	return res ? 0 : -E2BIG;
 
 
 
 101}
 102
 103const struct bpf_map_ops bpf_map_offload_ops = {
 104	.map_meta_equal = bpf_map_meta_equal,
 105	.map_alloc = bpf_map_offload_map_alloc,
 106	.map_free = bpf_map_offload_map_free,
 107	.map_check_btf = map_check_no_btf,
 108};
 109
 110static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 111{
 112	const struct bpf_map_ops *ops;
 113	u32 type = attr->map_type;
 114	struct bpf_map *map;
 115	int err;
 116
 117	if (type >= ARRAY_SIZE(bpf_map_types))
 118		return ERR_PTR(-EINVAL);
 119	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
 120	ops = bpf_map_types[type];
 121	if (!ops)
 122		return ERR_PTR(-EINVAL);
 123
 124	if (ops->map_alloc_check) {
 125		err = ops->map_alloc_check(attr);
 126		if (err)
 127			return ERR_PTR(err);
 128	}
 129	if (attr->map_ifindex)
 130		ops = &bpf_map_offload_ops;
 131	map = ops->map_alloc(attr);
 132	if (IS_ERR(map))
 133		return map;
 134	map->ops = ops;
 135	map->map_type = type;
 136	return map;
 137}
 138
 139static void bpf_map_write_active_inc(struct bpf_map *map)
 140{
 141	atomic64_inc(&map->writecnt);
 142}
 143
 144static void bpf_map_write_active_dec(struct bpf_map *map)
 145{
 146	atomic64_dec(&map->writecnt);
 147}
 148
 149bool bpf_map_write_active(const struct bpf_map *map)
 150{
 151	return atomic64_read(&map->writecnt) != 0;
 152}
 153
 154static u32 bpf_map_value_size(const struct bpf_map *map)
 155{
 156	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 157	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 158	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
 159	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 160		return round_up(map->value_size, 8) * num_possible_cpus();
 161	else if (IS_FD_MAP(map))
 162		return sizeof(u32);
 163	else
 164		return  map->value_size;
 165}
 166
 167static void maybe_wait_bpf_programs(struct bpf_map *map)
 168{
 169	/* Wait for any running BPF programs to complete so that
 170	 * userspace, when we return to it, knows that all programs
 171	 * that could be running use the new map value.
 172	 */
 173	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
 174	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
 175		synchronize_rcu();
 176}
 177
 178static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
 179				void *key, void *value, __u64 flags)
 180{
 181	int err;
 182
 183	/* Need to create a kthread, thus must support schedule */
 184	if (bpf_map_is_dev_bound(map)) {
 185		return bpf_map_offload_update_elem(map, key, value, flags);
 186	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
 187		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
 188		return map->ops->map_update_elem(map, key, value, flags);
 189	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
 190		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
 191		return sock_map_update_elem_sys(map, key, value, flags);
 192	} else if (IS_FD_PROG_ARRAY(map)) {
 193		return bpf_fd_array_map_update_elem(map, map_file, key, value,
 194						    flags);
 195	}
 196
 197	bpf_disable_instrumentation();
 198	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 199	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 200		err = bpf_percpu_hash_update(map, key, value, flags);
 201	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 202		err = bpf_percpu_array_update(map, key, value, flags);
 203	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 204		err = bpf_percpu_cgroup_storage_update(map, key, value,
 205						       flags);
 206	} else if (IS_FD_ARRAY(map)) {
 207		rcu_read_lock();
 208		err = bpf_fd_array_map_update_elem(map, map_file, key, value,
 209						   flags);
 210		rcu_read_unlock();
 211	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
 212		rcu_read_lock();
 213		err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
 214						  flags);
 215		rcu_read_unlock();
 216	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 217		/* rcu_read_lock() is not needed */
 218		err = bpf_fd_reuseport_array_update_elem(map, key, value,
 219							 flags);
 220	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 221		   map->map_type == BPF_MAP_TYPE_STACK ||
 222		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
 223		err = map->ops->map_push_elem(map, value, flags);
 224	} else {
 225		rcu_read_lock();
 226		err = map->ops->map_update_elem(map, key, value, flags);
 227		rcu_read_unlock();
 228	}
 229	bpf_enable_instrumentation();
 230	maybe_wait_bpf_programs(map);
 231
 232	return err;
 233}
 234
 235static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
 236			      __u64 flags)
 237{
 238	void *ptr;
 239	int err;
 240
 241	if (bpf_map_is_dev_bound(map))
 242		return bpf_map_offload_lookup_elem(map, key, value);
 243
 244	bpf_disable_instrumentation();
 245	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 246	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 247		err = bpf_percpu_hash_copy(map, key, value);
 248	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 249		err = bpf_percpu_array_copy(map, key, value);
 250	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 251		err = bpf_percpu_cgroup_storage_copy(map, key, value);
 252	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
 253		err = bpf_stackmap_copy(map, key, value);
 254	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
 255		err = bpf_fd_array_map_lookup_elem(map, key, value);
 256	} else if (IS_FD_HASH(map)) {
 257		err = bpf_fd_htab_map_lookup_elem(map, key, value);
 258	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 259		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
 260	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 261		   map->map_type == BPF_MAP_TYPE_STACK ||
 262		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
 263		err = map->ops->map_peek_elem(map, value);
 264	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
 265		/* struct_ops map requires directly updating "value" */
 266		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
 267	} else {
 268		rcu_read_lock();
 269		if (map->ops->map_lookup_elem_sys_only)
 270			ptr = map->ops->map_lookup_elem_sys_only(map, key);
 271		else
 272			ptr = map->ops->map_lookup_elem(map, key);
 273		if (IS_ERR(ptr)) {
 274			err = PTR_ERR(ptr);
 275		} else if (!ptr) {
 276			err = -ENOENT;
 277		} else {
 278			err = 0;
 279			if (flags & BPF_F_LOCK)
 280				/* lock 'ptr' and copy everything but lock */
 281				copy_map_value_locked(map, value, ptr, true);
 282			else
 283				copy_map_value(map, value, ptr);
 284			/* mask lock and timer, since value wasn't zero inited */
 285			check_and_init_map_value(map, value);
 286		}
 287		rcu_read_unlock();
 288	}
 289
 290	bpf_enable_instrumentation();
 291	maybe_wait_bpf_programs(map);
 292
 293	return err;
 294}
 295
 296/* Please, do not use this function outside from the map creation path
 297 * (e.g. in map update path) without taking care of setting the active
 298 * memory cgroup (see at bpf_map_kmalloc_node() for example).
 299 */
 300static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
 301{
 302	/* We really just want to fail instead of triggering OOM killer
 303	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
 304	 * which is used for lower order allocation requests.
 305	 *
 306	 * It has been observed that higher order allocation requests done by
 307	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
 308	 * to reclaim memory from the page cache, thus we set
 309	 * __GFP_RETRY_MAYFAIL to avoid such situations.
 310	 */
 311
 312	const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
 313	unsigned int flags = 0;
 314	unsigned long align = 1;
 315	void *area;
 316
 317	if (size >= SIZE_MAX)
 318		return NULL;
 319
 320	/* kmalloc()'ed memory can't be mmap()'ed */
 321	if (mmapable) {
 322		BUG_ON(!PAGE_ALIGNED(size));
 323		align = SHMLBA;
 324		flags = VM_USERMAP;
 325	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 326		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
 327				    numa_node);
 328		if (area != NULL)
 329			return area;
 330	}
 331
 332	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
 333			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
 334			flags, numa_node, __builtin_return_address(0));
 335}
 336
 337void *bpf_map_area_alloc(u64 size, int numa_node)
 338{
 339	return __bpf_map_area_alloc(size, numa_node, false);
 340}
 341
 342void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
 343{
 344	return __bpf_map_area_alloc(size, numa_node, true);
 345}
 346
 347void bpf_map_area_free(void *area)
 348{
 349	kvfree(area);
 350}
 351
 352static u32 bpf_map_flags_retain_permanent(u32 flags)
 353{
 354	/* Some map creation flags are not tied to the map object but
 355	 * rather to the map fd instead, so they have no meaning upon
 356	 * map object inspection since multiple file descriptors with
 357	 * different (access) properties can exist here. Thus, given
 358	 * this has zero meaning for the map itself, lets clear these
 359	 * from here.
 360	 */
 361	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
 362}
 363
 364void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
 365{
 366	map->map_type = attr->map_type;
 367	map->key_size = attr->key_size;
 368	map->value_size = attr->value_size;
 369	map->max_entries = attr->max_entries;
 370	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
 371	map->numa_node = bpf_map_attr_numa_node(attr);
 372	map->map_extra = attr->map_extra;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 373}
 374
 375static int bpf_map_alloc_id(struct bpf_map *map)
 376{
 377	int id;
 378
 379	idr_preload(GFP_KERNEL);
 380	spin_lock_bh(&map_idr_lock);
 381	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
 382	if (id > 0)
 383		map->id = id;
 384	spin_unlock_bh(&map_idr_lock);
 385	idr_preload_end();
 386
 387	if (WARN_ON_ONCE(!id))
 388		return -ENOSPC;
 389
 390	return id > 0 ? 0 : id;
 391}
 392
 393void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 394{
 395	unsigned long flags;
 396
 397	/* Offloaded maps are removed from the IDR store when their device
 398	 * disappears - even if someone holds an fd to them they are unusable,
 399	 * the memory is gone, all ops will fail; they are simply waiting for
 400	 * refcnt to drop to be freed.
 401	 */
 402	if (!map->id)
 403		return;
 404
 405	if (do_idr_lock)
 406		spin_lock_irqsave(&map_idr_lock, flags);
 407	else
 408		__acquire(&map_idr_lock);
 409
 410	idr_remove(&map_idr, map->id);
 411	map->id = 0;
 412
 413	if (do_idr_lock)
 414		spin_unlock_irqrestore(&map_idr_lock, flags);
 415	else
 416		__release(&map_idr_lock);
 417}
 418
 419#ifdef CONFIG_MEMCG_KMEM
 420static void bpf_map_save_memcg(struct bpf_map *map)
 421{
 422	/* Currently if a map is created by a process belonging to the root
 423	 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
 424	 * So we have to check map->objcg for being NULL each time it's
 425	 * being used.
 426	 */
 427	map->objcg = get_obj_cgroup_from_current();
 428}
 429
 430static void bpf_map_release_memcg(struct bpf_map *map)
 431{
 432	if (map->objcg)
 433		obj_cgroup_put(map->objcg);
 434}
 435
 436static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
 437{
 438	if (map->objcg)
 439		return get_mem_cgroup_from_objcg(map->objcg);
 440
 441	return root_mem_cgroup;
 442}
 443
 444void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
 445			   int node)
 446{
 447	struct mem_cgroup *memcg, *old_memcg;
 448	void *ptr;
 449
 450	memcg = bpf_map_get_memcg(map);
 451	old_memcg = set_active_memcg(memcg);
 452	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
 453	set_active_memcg(old_memcg);
 454	mem_cgroup_put(memcg);
 455
 456	return ptr;
 457}
 458
 459void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
 460{
 461	struct mem_cgroup *memcg, *old_memcg;
 462	void *ptr;
 463
 464	memcg = bpf_map_get_memcg(map);
 465	old_memcg = set_active_memcg(memcg);
 466	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
 467	set_active_memcg(old_memcg);
 468	mem_cgroup_put(memcg);
 469
 470	return ptr;
 471}
 472
 473void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
 474				    size_t align, gfp_t flags)
 475{
 476	struct mem_cgroup *memcg, *old_memcg;
 477	void __percpu *ptr;
 478
 479	memcg = bpf_map_get_memcg(map);
 480	old_memcg = set_active_memcg(memcg);
 481	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
 482	set_active_memcg(old_memcg);
 483	mem_cgroup_put(memcg);
 484
 485	return ptr;
 486}
 487
 488#else
 489static void bpf_map_save_memcg(struct bpf_map *map)
 490{
 491}
 492
 493static void bpf_map_release_memcg(struct bpf_map *map)
 494{
 495}
 496#endif
 497
 498static int btf_field_cmp(const void *a, const void *b)
 499{
 500	const struct btf_field *f1 = a, *f2 = b;
 501
 502	if (f1->offset < f2->offset)
 503		return -1;
 504	else if (f1->offset > f2->offset)
 505		return 1;
 506	return 0;
 507}
 508
 509struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
 510				  enum btf_field_type type)
 511{
 512	struct btf_field *field;
 513
 514	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & type))
 515		return NULL;
 516	field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
 517	if (!field || !(field->type & type))
 518		return NULL;
 519	return field;
 520}
 521
 522void btf_record_free(struct btf_record *rec)
 523{
 524	int i;
 525
 526	if (IS_ERR_OR_NULL(rec))
 527		return;
 528	for (i = 0; i < rec->cnt; i++) {
 529		switch (rec->fields[i].type) {
 530		case BPF_SPIN_LOCK:
 531		case BPF_TIMER:
 532			break;
 533		case BPF_KPTR_UNREF:
 534		case BPF_KPTR_REF:
 535			if (rec->fields[i].kptr.module)
 536				module_put(rec->fields[i].kptr.module);
 537			btf_put(rec->fields[i].kptr.btf);
 538			break;
 539		case BPF_LIST_HEAD:
 540		case BPF_LIST_NODE:
 541			/* Nothing to release for bpf_list_head */
 542			break;
 543		default:
 544			WARN_ON_ONCE(1);
 545			continue;
 546		}
 547	}
 548	kfree(rec);
 549}
 550
 551void bpf_map_free_record(struct bpf_map *map)
 552{
 553	btf_record_free(map->record);
 554	map->record = NULL;
 555}
 556
 557struct btf_record *btf_record_dup(const struct btf_record *rec)
 558{
 559	const struct btf_field *fields;
 560	struct btf_record *new_rec;
 561	int ret, size, i;
 562
 563	if (IS_ERR_OR_NULL(rec))
 564		return NULL;
 565	size = offsetof(struct btf_record, fields[rec->cnt]);
 566	new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
 567	if (!new_rec)
 568		return ERR_PTR(-ENOMEM);
 569	/* Do a deep copy of the btf_record */
 570	fields = rec->fields;
 571	new_rec->cnt = 0;
 572	for (i = 0; i < rec->cnt; i++) {
 573		switch (fields[i].type) {
 574		case BPF_SPIN_LOCK:
 575		case BPF_TIMER:
 576			break;
 577		case BPF_KPTR_UNREF:
 578		case BPF_KPTR_REF:
 579			btf_get(fields[i].kptr.btf);
 580			if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
 581				ret = -ENXIO;
 582				goto free;
 583			}
 584			break;
 585		case BPF_LIST_HEAD:
 586		case BPF_LIST_NODE:
 587			/* Nothing to acquire for bpf_list_head */
 588			break;
 589		default:
 590			ret = -EFAULT;
 591			WARN_ON_ONCE(1);
 592			goto free;
 593		}
 594		new_rec->cnt++;
 595	}
 596	return new_rec;
 597free:
 598	btf_record_free(new_rec);
 599	return ERR_PTR(ret);
 600}
 601
 602bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
 603{
 604	bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
 605	int size;
 606
 607	if (!a_has_fields && !b_has_fields)
 608		return true;
 609	if (a_has_fields != b_has_fields)
 610		return false;
 611	if (rec_a->cnt != rec_b->cnt)
 612		return false;
 613	size = offsetof(struct btf_record, fields[rec_a->cnt]);
 614	/* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
 615	 * members are zeroed out. So memcmp is safe to do without worrying
 616	 * about padding/unused fields.
 617	 *
 618	 * While spin_lock, timer, and kptr have no relation to map BTF,
 619	 * list_head metadata is specific to map BTF, the btf and value_rec
 620	 * members in particular. btf is the map BTF, while value_rec points to
 621	 * btf_record in that map BTF.
 622	 *
 623	 * So while by default, we don't rely on the map BTF (which the records
 624	 * were parsed from) matching for both records, which is not backwards
 625	 * compatible, in case list_head is part of it, we implicitly rely on
 626	 * that by way of depending on memcmp succeeding for it.
 627	 */
 628	return !memcmp(rec_a, rec_b, size);
 629}
 630
 631void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
 632{
 633	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
 634		return;
 635	bpf_timer_cancel_and_free(obj + rec->timer_off);
 636}
 637
 638void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
 639{
 640	const struct btf_field *fields;
 641	int i;
 642
 643	if (IS_ERR_OR_NULL(rec))
 644		return;
 645	fields = rec->fields;
 646	for (i = 0; i < rec->cnt; i++) {
 647		const struct btf_field *field = &fields[i];
 648		void *field_ptr = obj + field->offset;
 649
 650		switch (fields[i].type) {
 651		case BPF_SPIN_LOCK:
 652			break;
 653		case BPF_TIMER:
 654			bpf_timer_cancel_and_free(field_ptr);
 655			break;
 656		case BPF_KPTR_UNREF:
 657			WRITE_ONCE(*(u64 *)field_ptr, 0);
 658			break;
 659		case BPF_KPTR_REF:
 660			field->kptr.dtor((void *)xchg((unsigned long *)field_ptr, 0));
 661			break;
 662		case BPF_LIST_HEAD:
 663			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
 664				continue;
 665			bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
 666			break;
 667		case BPF_LIST_NODE:
 668			break;
 669		default:
 670			WARN_ON_ONCE(1);
 671			continue;
 672		}
 673	}
 674}
 675
 676/* called from workqueue */
 677static void bpf_map_free_deferred(struct work_struct *work)
 678{
 679	struct bpf_map *map = container_of(work, struct bpf_map, work);
 680	struct btf_field_offs *foffs = map->field_offs;
 681	struct btf_record *rec = map->record;
 682
 
 683	security_bpf_map_free(map);
 684	bpf_map_release_memcg(map);
 685	/* implementation dependent freeing */
 686	map->ops->map_free(map);
 687	/* Delay freeing of field_offs and btf_record for maps, as map_free
 688	 * callback usually needs access to them. It is better to do it here
 689	 * than require each callback to do the free itself manually.
 690	 *
 691	 * Note that the btf_record stashed in map->inner_map_meta->record was
 692	 * already freed using the map_free callback for map in map case which
 693	 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
 694	 * template bpf_map struct used during verification.
 695	 */
 696	kfree(foffs);
 697	btf_record_free(rec);
 698}
 699
 700static void bpf_map_put_uref(struct bpf_map *map)
 701{
 702	if (atomic64_dec_and_test(&map->usercnt)) {
 703		if (map->ops->map_release_uref)
 704			map->ops->map_release_uref(map);
 705	}
 706}
 707
 708/* decrement map refcnt and schedule it for freeing via workqueue
 709 * (unrelying map implementation ops->map_free() might sleep)
 710 */
 711static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
 712{
 713	if (atomic64_dec_and_test(&map->refcnt)) {
 714		/* bpf_map_free_id() must be called first */
 715		bpf_map_free_id(map, do_idr_lock);
 716		btf_put(map->btf);
 717		INIT_WORK(&map->work, bpf_map_free_deferred);
 718		/* Avoid spawning kworkers, since they all might contend
 719		 * for the same mutex like slab_mutex.
 720		 */
 721		queue_work(system_unbound_wq, &map->work);
 722	}
 723}
 724
 725void bpf_map_put(struct bpf_map *map)
 726{
 727	__bpf_map_put(map, true);
 728}
 729EXPORT_SYMBOL_GPL(bpf_map_put);
 730
 731void bpf_map_put_with_uref(struct bpf_map *map)
 732{
 733	bpf_map_put_uref(map);
 734	bpf_map_put(map);
 735}
 736
 737static int bpf_map_release(struct inode *inode, struct file *filp)
 738{
 739	struct bpf_map *map = filp->private_data;
 740
 741	if (map->ops->map_release)
 742		map->ops->map_release(map, filp);
 743
 744	bpf_map_put_with_uref(map);
 745	return 0;
 746}
 747
 748static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
 749{
 750	fmode_t mode = f.file->f_mode;
 751
 752	/* Our file permissions may have been overridden by global
 753	 * map permissions facing syscall side.
 754	 */
 755	if (READ_ONCE(map->frozen))
 756		mode &= ~FMODE_CAN_WRITE;
 757	return mode;
 758}
 759
 760#ifdef CONFIG_PROC_FS
 761/* Provides an approximation of the map's memory footprint.
 762 * Used only to provide a backward compatibility and display
 763 * a reasonable "memlock" info.
 764 */
 765static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
 766{
 767	unsigned long size;
 768
 769	size = round_up(map->key_size + bpf_map_value_size(map), 8);
 770
 771	return round_up(map->max_entries * size, PAGE_SIZE);
 772}
 773
 774static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
 775{
 776	struct bpf_map *map = filp->private_data;
 777	u32 type = 0, jited = 0;
 778
 779	if (map_type_contains_progs(map)) {
 780		spin_lock(&map->owner.lock);
 781		type  = map->owner.type;
 782		jited = map->owner.jited;
 783		spin_unlock(&map->owner.lock);
 
 784	}
 785
 786	seq_printf(m,
 787		   "map_type:\t%u\n"
 788		   "key_size:\t%u\n"
 789		   "value_size:\t%u\n"
 790		   "max_entries:\t%u\n"
 791		   "map_flags:\t%#x\n"
 792		   "map_extra:\t%#llx\n"
 793		   "memlock:\t%lu\n"
 794		   "map_id:\t%u\n"
 795		   "frozen:\t%u\n",
 796		   map->map_type,
 797		   map->key_size,
 798		   map->value_size,
 799		   map->max_entries,
 800		   map->map_flags,
 801		   (unsigned long long)map->map_extra,
 802		   bpf_map_memory_footprint(map),
 803		   map->id,
 804		   READ_ONCE(map->frozen));
 805	if (type) {
 806		seq_printf(m, "owner_prog_type:\t%u\n", type);
 807		seq_printf(m, "owner_jited:\t%u\n", jited);
 
 
 
 808	}
 809}
 810#endif
 811
 812static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
 813			      loff_t *ppos)
 814{
 815	/* We need this handler such that alloc_file() enables
 816	 * f_mode with FMODE_CAN_READ.
 817	 */
 818	return -EINVAL;
 819}
 820
 821static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
 822			       size_t siz, loff_t *ppos)
 823{
 824	/* We need this handler such that alloc_file() enables
 825	 * f_mode with FMODE_CAN_WRITE.
 826	 */
 827	return -EINVAL;
 828}
 829
 830/* called for any extra memory-mapped regions (except initial) */
 831static void bpf_map_mmap_open(struct vm_area_struct *vma)
 832{
 833	struct bpf_map *map = vma->vm_file->private_data;
 834
 835	if (vma->vm_flags & VM_MAYWRITE)
 836		bpf_map_write_active_inc(map);
 837}
 838
 839/* called for all unmapped memory region (including initial) */
 840static void bpf_map_mmap_close(struct vm_area_struct *vma)
 841{
 842	struct bpf_map *map = vma->vm_file->private_data;
 843
 844	if (vma->vm_flags & VM_MAYWRITE)
 845		bpf_map_write_active_dec(map);
 846}
 847
 848static const struct vm_operations_struct bpf_map_default_vmops = {
 849	.open		= bpf_map_mmap_open,
 850	.close		= bpf_map_mmap_close,
 851};
 852
 853static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
 854{
 855	struct bpf_map *map = filp->private_data;
 856	int err;
 857
 858	if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
 859		return -ENOTSUPP;
 860
 861	if (!(vma->vm_flags & VM_SHARED))
 862		return -EINVAL;
 863
 864	mutex_lock(&map->freeze_mutex);
 865
 866	if (vma->vm_flags & VM_WRITE) {
 867		if (map->frozen) {
 868			err = -EPERM;
 869			goto out;
 870		}
 871		/* map is meant to be read-only, so do not allow mapping as
 872		 * writable, because it's possible to leak a writable page
 873		 * reference and allows user-space to still modify it after
 874		 * freezing, while verifier will assume contents do not change
 875		 */
 876		if (map->map_flags & BPF_F_RDONLY_PROG) {
 877			err = -EACCES;
 878			goto out;
 879		}
 880	}
 881
 882	/* set default open/close callbacks */
 883	vma->vm_ops = &bpf_map_default_vmops;
 884	vma->vm_private_data = map;
 885	vma->vm_flags &= ~VM_MAYEXEC;
 886	if (!(vma->vm_flags & VM_WRITE))
 887		/* disallow re-mapping with PROT_WRITE */
 888		vma->vm_flags &= ~VM_MAYWRITE;
 889
 890	err = map->ops->map_mmap(map, vma);
 891	if (err)
 892		goto out;
 893
 894	if (vma->vm_flags & VM_MAYWRITE)
 895		bpf_map_write_active_inc(map);
 896out:
 897	mutex_unlock(&map->freeze_mutex);
 898	return err;
 899}
 900
 901static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
 902{
 903	struct bpf_map *map = filp->private_data;
 904
 905	if (map->ops->map_poll)
 906		return map->ops->map_poll(map, filp, pts);
 907
 908	return EPOLLERR;
 909}
 910
 911const struct file_operations bpf_map_fops = {
 912#ifdef CONFIG_PROC_FS
 913	.show_fdinfo	= bpf_map_show_fdinfo,
 914#endif
 915	.release	= bpf_map_release,
 916	.read		= bpf_dummy_read,
 917	.write		= bpf_dummy_write,
 918	.mmap		= bpf_map_mmap,
 919	.poll		= bpf_map_poll,
 920};
 921
 922int bpf_map_new_fd(struct bpf_map *map, int flags)
 923{
 924	int ret;
 925
 926	ret = security_bpf_map(map, OPEN_FMODE(flags));
 927	if (ret < 0)
 928		return ret;
 929
 930	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
 931				flags | O_CLOEXEC);
 932}
 933
 934int bpf_get_file_flag(int flags)
 935{
 936	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
 937		return -EINVAL;
 938	if (flags & BPF_F_RDONLY)
 939		return O_RDONLY;
 940	if (flags & BPF_F_WRONLY)
 941		return O_WRONLY;
 942	return O_RDWR;
 943}
 944
 945/* helper macro to check that unused fields 'union bpf_attr' are zero */
 946#define CHECK_ATTR(CMD) \
 947	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
 948		   sizeof(attr->CMD##_LAST_FIELD), 0, \
 949		   sizeof(*attr) - \
 950		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
 951		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 952
 953/* dst and src must have at least "size" number of bytes.
 954 * Return strlen on success and < 0 on error.
 955 */
 956int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
 957{
 958	const char *end = src + size;
 959	const char *orig_src = src;
 960
 961	memset(dst, 0, size);
 962	/* Copy all isalnum(), '_' and '.' chars. */
 963	while (src < end && *src) {
 964		if (!isalnum(*src) &&
 965		    *src != '_' && *src != '.')
 966			return -EINVAL;
 967		*dst++ = *src++;
 968	}
 969
 970	/* No '\0' found in "size" number of bytes */
 971	if (src == end)
 972		return -EINVAL;
 973
 974	return src - orig_src;
 975}
 976
 977int map_check_no_btf(const struct bpf_map *map,
 978		     const struct btf *btf,
 979		     const struct btf_type *key_type,
 980		     const struct btf_type *value_type)
 981{
 982	return -ENOTSUPP;
 983}
 984
 985static int map_check_btf(struct bpf_map *map, const struct btf *btf,
 986			 u32 btf_key_id, u32 btf_value_id)
 987{
 988	const struct btf_type *key_type, *value_type;
 989	u32 key_size, value_size;
 990	int ret = 0;
 991
 992	/* Some maps allow key to be unspecified. */
 993	if (btf_key_id) {
 994		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
 995		if (!key_type || key_size != map->key_size)
 996			return -EINVAL;
 997	} else {
 998		key_type = btf_type_by_id(btf, 0);
 999		if (!map->ops->map_check_btf)
1000			return -EINVAL;
1001	}
1002
1003	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1004	if (!value_type || value_size != map->value_size)
1005		return -EINVAL;
1006
1007	map->record = btf_parse_fields(btf, value_type,
1008				       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD,
1009				       map->value_size);
1010	if (!IS_ERR_OR_NULL(map->record)) {
1011		int i;
1012
1013		if (!bpf_capable()) {
1014			ret = -EPERM;
1015			goto free_map_tab;
1016		}
1017		if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1018			ret = -EACCES;
1019			goto free_map_tab;
1020		}
1021		for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1022			switch (map->record->field_mask & (1 << i)) {
1023			case 0:
1024				continue;
1025			case BPF_SPIN_LOCK:
1026				if (map->map_type != BPF_MAP_TYPE_HASH &&
1027				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1028				    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1029				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1030				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1031				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1032				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1033					ret = -EOPNOTSUPP;
1034					goto free_map_tab;
1035				}
1036				break;
1037			case BPF_TIMER:
1038				if (map->map_type != BPF_MAP_TYPE_HASH &&
1039				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1040				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1041					ret = -EOPNOTSUPP;
1042					goto free_map_tab;
1043				}
1044				break;
1045			case BPF_KPTR_UNREF:
1046			case BPF_KPTR_REF:
1047				if (map->map_type != BPF_MAP_TYPE_HASH &&
1048				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1049				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1050				    map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY) {
1051					ret = -EOPNOTSUPP;
1052					goto free_map_tab;
1053				}
1054				break;
1055			case BPF_LIST_HEAD:
1056				if (map->map_type != BPF_MAP_TYPE_HASH &&
1057				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1058				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1059					ret = -EOPNOTSUPP;
1060					goto free_map_tab;
1061				}
1062				break;
1063			default:
1064				/* Fail if map_type checks are missing for a field type */
1065				ret = -EOPNOTSUPP;
1066				goto free_map_tab;
1067			}
1068		}
1069	}
1070
1071	ret = btf_check_and_fixup_fields(btf, map->record);
1072	if (ret < 0)
1073		goto free_map_tab;
1074
1075	if (map->ops->map_check_btf) {
1076		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1077		if (ret < 0)
1078			goto free_map_tab;
1079	}
1080
1081	return ret;
1082free_map_tab:
1083	bpf_map_free_record(map);
1084	return ret;
1085}
1086
1087#define BPF_MAP_CREATE_LAST_FIELD map_extra
1088/* called via syscall */
1089static int map_create(union bpf_attr *attr)
1090{
1091	int numa_node = bpf_map_attr_numa_node(attr);
1092	struct btf_field_offs *foffs;
1093	struct bpf_map *map;
1094	int f_flags;
1095	int err;
1096
1097	err = CHECK_ATTR(BPF_MAP_CREATE);
1098	if (err)
1099		return -EINVAL;
1100
1101	if (attr->btf_vmlinux_value_type_id) {
1102		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1103		    attr->btf_key_type_id || attr->btf_value_type_id)
1104			return -EINVAL;
1105	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1106		return -EINVAL;
1107	}
1108
1109	if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1110	    attr->map_extra != 0)
1111		return -EINVAL;
1112
1113	f_flags = bpf_get_file_flag(attr->map_flags);
1114	if (f_flags < 0)
1115		return f_flags;
1116
1117	if (numa_node != NUMA_NO_NODE &&
1118	    ((unsigned int)numa_node >= nr_node_ids ||
1119	     !node_online(numa_node)))
1120		return -EINVAL;
1121
1122	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1123	map = find_and_alloc_map(attr);
1124	if (IS_ERR(map))
1125		return PTR_ERR(map);
1126
1127	err = bpf_obj_name_cpy(map->name, attr->map_name,
1128			       sizeof(attr->map_name));
1129	if (err < 0)
1130		goto free_map;
1131
1132	atomic64_set(&map->refcnt, 1);
1133	atomic64_set(&map->usercnt, 1);
1134	mutex_init(&map->freeze_mutex);
1135	spin_lock_init(&map->owner.lock);
1136
1137	if (attr->btf_key_type_id || attr->btf_value_type_id ||
1138	    /* Even the map's value is a kernel's struct,
1139	     * the bpf_prog.o must have BTF to begin with
1140	     * to figure out the corresponding kernel's
1141	     * counter part.  Thus, attr->btf_fd has
1142	     * to be valid also.
1143	     */
1144	    attr->btf_vmlinux_value_type_id) {
1145		struct btf *btf;
1146
 
 
 
 
 
1147		btf = btf_get_by_fd(attr->btf_fd);
1148		if (IS_ERR(btf)) {
1149			err = PTR_ERR(btf);
1150			goto free_map;
1151		}
1152		if (btf_is_kernel(btf)) {
 
 
 
1153			btf_put(btf);
1154			err = -EACCES;
1155			goto free_map;
1156		}
1157		map->btf = btf;
1158
1159		if (attr->btf_value_type_id) {
1160			err = map_check_btf(map, btf, attr->btf_key_type_id,
1161					    attr->btf_value_type_id);
1162			if (err)
1163				goto free_map;
1164		}
1165
 
1166		map->btf_key_type_id = attr->btf_key_type_id;
1167		map->btf_value_type_id = attr->btf_value_type_id;
1168		map->btf_vmlinux_value_type_id =
1169			attr->btf_vmlinux_value_type_id;
1170	}
1171
1172
1173	foffs = btf_parse_field_offs(map->record);
1174	if (IS_ERR(foffs)) {
1175		err = PTR_ERR(foffs);
1176		goto free_map;
1177	}
1178	map->field_offs = foffs;
1179
1180	err = security_bpf_map_alloc(map);
1181	if (err)
1182		goto free_map_field_offs;
1183
1184	err = bpf_map_alloc_id(map);
1185	if (err)
1186		goto free_map_sec;
1187
1188	bpf_map_save_memcg(map);
1189
1190	err = bpf_map_new_fd(map, f_flags);
1191	if (err < 0) {
1192		/* failed to allocate fd.
1193		 * bpf_map_put_with_uref() is needed because the above
1194		 * bpf_map_alloc_id() has published the map
1195		 * to the userspace and the userspace may
1196		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1197		 */
1198		bpf_map_put_with_uref(map);
1199		return err;
1200	}
1201
1202	return err;
1203
1204free_map_sec:
1205	security_bpf_map_free(map);
1206free_map_field_offs:
1207	kfree(map->field_offs);
1208free_map:
1209	btf_put(map->btf);
 
1210	map->ops->map_free(map);
 
1211	return err;
1212}
1213
1214/* if error is returned, fd is released.
1215 * On success caller should complete fd access with matching fdput()
1216 */
1217struct bpf_map *__bpf_map_get(struct fd f)
1218{
1219	if (!f.file)
1220		return ERR_PTR(-EBADF);
1221	if (f.file->f_op != &bpf_map_fops) {
1222		fdput(f);
1223		return ERR_PTR(-EINVAL);
1224	}
1225
1226	return f.file->private_data;
1227}
1228
1229void bpf_map_inc(struct bpf_map *map)
1230{
1231	atomic64_inc(&map->refcnt);
1232}
1233EXPORT_SYMBOL_GPL(bpf_map_inc);
1234
1235void bpf_map_inc_with_uref(struct bpf_map *map)
1236{
1237	atomic64_inc(&map->refcnt);
1238	atomic64_inc(&map->usercnt);
1239}
1240EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1241
1242struct bpf_map *bpf_map_get(u32 ufd)
1243{
1244	struct fd f = fdget(ufd);
1245	struct bpf_map *map;
1246
1247	map = __bpf_map_get(f);
1248	if (IS_ERR(map))
1249		return map;
1250
1251	bpf_map_inc(map);
1252	fdput(f);
1253
1254	return map;
1255}
1256EXPORT_SYMBOL(bpf_map_get);
1257
1258struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1259{
1260	struct fd f = fdget(ufd);
1261	struct bpf_map *map;
1262
1263	map = __bpf_map_get(f);
1264	if (IS_ERR(map))
1265		return map;
1266
1267	bpf_map_inc_with_uref(map);
1268	fdput(f);
1269
1270	return map;
1271}
1272
1273/* map_idr_lock should have been held */
1274static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
 
1275{
1276	int refold;
1277
1278	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
 
 
 
 
 
 
1279	if (!refold)
1280		return ERR_PTR(-ENOENT);
 
1281	if (uref)
1282		atomic64_inc(&map->usercnt);
1283
1284	return map;
1285}
1286
1287struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1288{
1289	spin_lock_bh(&map_idr_lock);
1290	map = __bpf_map_inc_not_zero(map, false);
1291	spin_unlock_bh(&map_idr_lock);
1292
1293	return map;
1294}
1295EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1296
1297int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1298{
1299	return -ENOTSUPP;
1300}
1301
1302static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1303{
1304	if (key_size)
1305		return vmemdup_user(ukey, key_size);
1306
1307	if (ukey)
1308		return ERR_PTR(-EINVAL);
1309
1310	return NULL;
1311}
1312
1313static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1314{
1315	if (key_size)
1316		return kvmemdup_bpfptr(ukey, key_size);
1317
1318	if (!bpfptr_is_null(ukey))
1319		return ERR_PTR(-EINVAL);
1320
1321	return NULL;
1322}
1323
1324/* last field in 'union bpf_attr' used by this command */
1325#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1326
1327static int map_lookup_elem(union bpf_attr *attr)
1328{
1329	void __user *ukey = u64_to_user_ptr(attr->key);
1330	void __user *uvalue = u64_to_user_ptr(attr->value);
1331	int ufd = attr->map_fd;
1332	struct bpf_map *map;
1333	void *key, *value;
1334	u32 value_size;
1335	struct fd f;
1336	int err;
1337
1338	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1339		return -EINVAL;
1340
1341	if (attr->flags & ~BPF_F_LOCK)
1342		return -EINVAL;
1343
1344	f = fdget(ufd);
1345	map = __bpf_map_get(f);
1346	if (IS_ERR(map))
1347		return PTR_ERR(map);
1348	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1349		err = -EPERM;
1350		goto err_put;
1351	}
1352
1353	if ((attr->flags & BPF_F_LOCK) &&
1354	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1355		err = -EINVAL;
1356		goto err_put;
1357	}
1358
1359	key = __bpf_copy_key(ukey, map->key_size);
1360	if (IS_ERR(key)) {
1361		err = PTR_ERR(key);
1362		goto err_put;
1363	}
1364
1365	value_size = bpf_map_value_size(map);
 
 
 
 
 
 
 
 
1366
1367	err = -ENOMEM;
1368	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1369	if (!value)
1370		goto free_key;
1371
1372	if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1373		if (copy_from_user(value, uvalue, value_size))
1374			err = -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1375		else
1376			err = bpf_map_copy_value(map, key, value, attr->flags);
1377		goto free_value;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1378	}
 
 
1379
1380	err = bpf_map_copy_value(map, key, value, attr->flags);
1381	if (err)
1382		goto free_value;
1383
1384	err = -EFAULT;
1385	if (copy_to_user(uvalue, value, value_size) != 0)
1386		goto free_value;
1387
1388	err = 0;
1389
1390free_value:
1391	kvfree(value);
1392free_key:
1393	kvfree(key);
1394err_put:
1395	fdput(f);
1396	return err;
1397}
1398
 
 
 
 
 
 
 
 
 
 
1399
1400#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1401
1402static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1403{
1404	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1405	bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1406	int ufd = attr->map_fd;
1407	struct bpf_map *map;
1408	void *key, *value;
1409	u32 value_size;
1410	struct fd f;
1411	int err;
1412
1413	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1414		return -EINVAL;
1415
1416	f = fdget(ufd);
1417	map = __bpf_map_get(f);
1418	if (IS_ERR(map))
1419		return PTR_ERR(map);
1420	bpf_map_write_active_inc(map);
1421	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1422		err = -EPERM;
1423		goto err_put;
1424	}
1425
1426	if ((attr->flags & BPF_F_LOCK) &&
1427	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1428		err = -EINVAL;
1429		goto err_put;
1430	}
1431
1432	key = ___bpf_copy_key(ukey, map->key_size);
1433	if (IS_ERR(key)) {
1434		err = PTR_ERR(key);
1435		goto err_put;
1436	}
1437
1438	value_size = bpf_map_value_size(map);
1439	value = kvmemdup_bpfptr(uvalue, value_size);
1440	if (IS_ERR(value)) {
1441		err = PTR_ERR(value);
 
 
 
 
 
 
 
1442		goto free_key;
1443	}
1444
1445	err = bpf_map_update_value(map, f.file, key, value, attr->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
1446
1447	kvfree(value);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1448free_key:
1449	kvfree(key);
1450err_put:
1451	bpf_map_write_active_dec(map);
1452	fdput(f);
1453	return err;
1454}
1455
1456#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1457
1458static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1459{
1460	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1461	int ufd = attr->map_fd;
1462	struct bpf_map *map;
1463	struct fd f;
1464	void *key;
1465	int err;
1466
1467	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1468		return -EINVAL;
1469
1470	f = fdget(ufd);
1471	map = __bpf_map_get(f);
1472	if (IS_ERR(map))
1473		return PTR_ERR(map);
1474	bpf_map_write_active_inc(map);
1475	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1476		err = -EPERM;
1477		goto err_put;
1478	}
1479
1480	key = ___bpf_copy_key(ukey, map->key_size);
1481	if (IS_ERR(key)) {
1482		err = PTR_ERR(key);
1483		goto err_put;
1484	}
1485
1486	if (bpf_map_is_dev_bound(map)) {
1487		err = bpf_map_offload_delete_elem(map, key);
1488		goto out;
1489	} else if (IS_FD_PROG_ARRAY(map) ||
1490		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1491		/* These maps require sleepable context */
1492		err = map->ops->map_delete_elem(map, key);
1493		goto out;
1494	}
1495
1496	bpf_disable_instrumentation();
 
1497	rcu_read_lock();
1498	err = map->ops->map_delete_elem(map, key);
1499	rcu_read_unlock();
1500	bpf_enable_instrumentation();
 
1501	maybe_wait_bpf_programs(map);
1502out:
1503	kvfree(key);
1504err_put:
1505	bpf_map_write_active_dec(map);
1506	fdput(f);
1507	return err;
1508}
1509
1510/* last field in 'union bpf_attr' used by this command */
1511#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1512
1513static int map_get_next_key(union bpf_attr *attr)
1514{
1515	void __user *ukey = u64_to_user_ptr(attr->key);
1516	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1517	int ufd = attr->map_fd;
1518	struct bpf_map *map;
1519	void *key, *next_key;
1520	struct fd f;
1521	int err;
1522
1523	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1524		return -EINVAL;
1525
1526	f = fdget(ufd);
1527	map = __bpf_map_get(f);
1528	if (IS_ERR(map))
1529		return PTR_ERR(map);
1530	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1531		err = -EPERM;
1532		goto err_put;
1533	}
1534
1535	if (ukey) {
1536		key = __bpf_copy_key(ukey, map->key_size);
1537		if (IS_ERR(key)) {
1538			err = PTR_ERR(key);
1539			goto err_put;
1540		}
1541	} else {
1542		key = NULL;
1543	}
1544
1545	err = -ENOMEM;
1546	next_key = kvmalloc(map->key_size, GFP_USER);
1547	if (!next_key)
1548		goto free_key;
1549
1550	if (bpf_map_is_dev_bound(map)) {
1551		err = bpf_map_offload_get_next_key(map, key, next_key);
1552		goto out;
1553	}
1554
1555	rcu_read_lock();
1556	err = map->ops->map_get_next_key(map, key, next_key);
1557	rcu_read_unlock();
1558out:
1559	if (err)
1560		goto free_next_key;
1561
1562	err = -EFAULT;
1563	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1564		goto free_next_key;
1565
1566	err = 0;
1567
1568free_next_key:
1569	kvfree(next_key);
1570free_key:
1571	kvfree(key);
1572err_put:
1573	fdput(f);
1574	return err;
1575}
1576
1577int generic_map_delete_batch(struct bpf_map *map,
1578			     const union bpf_attr *attr,
1579			     union bpf_attr __user *uattr)
1580{
1581	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1582	u32 cp, max_count;
1583	int err = 0;
1584	void *key;
1585
1586	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1587		return -EINVAL;
1588
1589	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1590	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1591		return -EINVAL;
1592	}
1593
1594	max_count = attr->batch.count;
1595	if (!max_count)
1596		return 0;
1597
1598	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1599	if (!key)
1600		return -ENOMEM;
1601
1602	for (cp = 0; cp < max_count; cp++) {
1603		err = -EFAULT;
1604		if (copy_from_user(key, keys + cp * map->key_size,
1605				   map->key_size))
1606			break;
1607
1608		if (bpf_map_is_dev_bound(map)) {
1609			err = bpf_map_offload_delete_elem(map, key);
1610			break;
1611		}
1612
1613		bpf_disable_instrumentation();
1614		rcu_read_lock();
1615		err = map->ops->map_delete_elem(map, key);
1616		rcu_read_unlock();
1617		bpf_enable_instrumentation();
1618		if (err)
1619			break;
1620		cond_resched();
1621	}
1622	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1623		err = -EFAULT;
1624
1625	kvfree(key);
1626
1627	maybe_wait_bpf_programs(map);
1628	return err;
1629}
1630
1631int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1632			     const union bpf_attr *attr,
1633			     union bpf_attr __user *uattr)
1634{
1635	void __user *values = u64_to_user_ptr(attr->batch.values);
1636	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1637	u32 value_size, cp, max_count;
1638	void *key, *value;
1639	int err = 0;
1640
1641	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1642		return -EINVAL;
1643
1644	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1645	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1646		return -EINVAL;
1647	}
1648
1649	value_size = bpf_map_value_size(map);
1650
1651	max_count = attr->batch.count;
1652	if (!max_count)
1653		return 0;
1654
1655	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1656	if (!key)
1657		return -ENOMEM;
1658
1659	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1660	if (!value) {
1661		kvfree(key);
1662		return -ENOMEM;
1663	}
1664
1665	for (cp = 0; cp < max_count; cp++) {
1666		err = -EFAULT;
1667		if (copy_from_user(key, keys + cp * map->key_size,
1668		    map->key_size) ||
1669		    copy_from_user(value, values + cp * value_size, value_size))
1670			break;
1671
1672		err = bpf_map_update_value(map, map_file, key, value,
1673					   attr->batch.elem_flags);
1674
1675		if (err)
1676			break;
1677		cond_resched();
1678	}
1679
1680	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1681		err = -EFAULT;
1682
1683	kvfree(value);
1684	kvfree(key);
1685	return err;
1686}
1687
1688#define MAP_LOOKUP_RETRIES 3
1689
1690int generic_map_lookup_batch(struct bpf_map *map,
1691				    const union bpf_attr *attr,
1692				    union bpf_attr __user *uattr)
1693{
1694	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1695	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1696	void __user *values = u64_to_user_ptr(attr->batch.values);
1697	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1698	void *buf, *buf_prevkey, *prev_key, *key, *value;
1699	int err, retry = MAP_LOOKUP_RETRIES;
1700	u32 value_size, cp, max_count;
1701
1702	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1703		return -EINVAL;
1704
1705	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1706	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1707		return -EINVAL;
1708
1709	value_size = bpf_map_value_size(map);
1710
1711	max_count = attr->batch.count;
1712	if (!max_count)
1713		return 0;
1714
1715	if (put_user(0, &uattr->batch.count))
1716		return -EFAULT;
1717
1718	buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1719	if (!buf_prevkey)
1720		return -ENOMEM;
1721
1722	buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1723	if (!buf) {
1724		kvfree(buf_prevkey);
1725		return -ENOMEM;
1726	}
1727
1728	err = -EFAULT;
1729	prev_key = NULL;
1730	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1731		goto free_buf;
1732	key = buf;
1733	value = key + map->key_size;
1734	if (ubatch)
1735		prev_key = buf_prevkey;
1736
1737	for (cp = 0; cp < max_count;) {
1738		rcu_read_lock();
1739		err = map->ops->map_get_next_key(map, prev_key, key);
1740		rcu_read_unlock();
1741		if (err)
1742			break;
1743		err = bpf_map_copy_value(map, key, value,
1744					 attr->batch.elem_flags);
1745
1746		if (err == -ENOENT) {
1747			if (retry) {
1748				retry--;
1749				continue;
1750			}
1751			err = -EINTR;
1752			break;
1753		}
1754
1755		if (err)
1756			goto free_buf;
1757
1758		if (copy_to_user(keys + cp * map->key_size, key,
1759				 map->key_size)) {
1760			err = -EFAULT;
1761			goto free_buf;
1762		}
1763		if (copy_to_user(values + cp * value_size, value, value_size)) {
1764			err = -EFAULT;
1765			goto free_buf;
1766		}
1767
1768		if (!prev_key)
1769			prev_key = buf_prevkey;
1770
1771		swap(prev_key, key);
1772		retry = MAP_LOOKUP_RETRIES;
1773		cp++;
1774		cond_resched();
1775	}
1776
1777	if (err == -EFAULT)
1778		goto free_buf;
1779
1780	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1781		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1782		err = -EFAULT;
1783
1784free_buf:
1785	kvfree(buf_prevkey);
1786	kvfree(buf);
1787	return err;
1788}
1789
1790#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1791
1792static int map_lookup_and_delete_elem(union bpf_attr *attr)
1793{
1794	void __user *ukey = u64_to_user_ptr(attr->key);
1795	void __user *uvalue = u64_to_user_ptr(attr->value);
1796	int ufd = attr->map_fd;
1797	struct bpf_map *map;
1798	void *key, *value;
1799	u32 value_size;
1800	struct fd f;
1801	int err;
1802
1803	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1804		return -EINVAL;
1805
1806	if (attr->flags & ~BPF_F_LOCK)
1807		return -EINVAL;
1808
1809	f = fdget(ufd);
1810	map = __bpf_map_get(f);
1811	if (IS_ERR(map))
1812		return PTR_ERR(map);
1813	bpf_map_write_active_inc(map);
1814	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1815	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1816		err = -EPERM;
1817		goto err_put;
1818	}
1819
1820	if (attr->flags &&
1821	    (map->map_type == BPF_MAP_TYPE_QUEUE ||
1822	     map->map_type == BPF_MAP_TYPE_STACK)) {
1823		err = -EINVAL;
1824		goto err_put;
1825	}
1826
1827	if ((attr->flags & BPF_F_LOCK) &&
1828	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1829		err = -EINVAL;
1830		goto err_put;
1831	}
1832
1833	key = __bpf_copy_key(ukey, map->key_size);
1834	if (IS_ERR(key)) {
1835		err = PTR_ERR(key);
1836		goto err_put;
1837	}
1838
1839	value_size = bpf_map_value_size(map);
1840
1841	err = -ENOMEM;
1842	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1843	if (!value)
1844		goto free_key;
1845
1846	err = -ENOTSUPP;
1847	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1848	    map->map_type == BPF_MAP_TYPE_STACK) {
1849		err = map->ops->map_pop_elem(map, value);
1850	} else if (map->map_type == BPF_MAP_TYPE_HASH ||
1851		   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1852		   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1853		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1854		if (!bpf_map_is_dev_bound(map)) {
1855			bpf_disable_instrumentation();
1856			rcu_read_lock();
1857			err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
1858			rcu_read_unlock();
1859			bpf_enable_instrumentation();
1860		}
1861	}
1862
1863	if (err)
1864		goto free_value;
1865
1866	if (copy_to_user(uvalue, value, value_size) != 0) {
1867		err = -EFAULT;
1868		goto free_value;
1869	}
1870
1871	err = 0;
1872
1873free_value:
1874	kvfree(value);
1875free_key:
1876	kvfree(key);
1877err_put:
1878	bpf_map_write_active_dec(map);
1879	fdput(f);
1880	return err;
1881}
1882
1883#define BPF_MAP_FREEZE_LAST_FIELD map_fd
1884
1885static int map_freeze(const union bpf_attr *attr)
1886{
1887	int err = 0, ufd = attr->map_fd;
1888	struct bpf_map *map;
1889	struct fd f;
1890
1891	if (CHECK_ATTR(BPF_MAP_FREEZE))
1892		return -EINVAL;
1893
1894	f = fdget(ufd);
1895	map = __bpf_map_get(f);
1896	if (IS_ERR(map))
1897		return PTR_ERR(map);
1898
1899	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
1900		fdput(f);
1901		return -ENOTSUPP;
1902	}
1903
1904	mutex_lock(&map->freeze_mutex);
1905	if (bpf_map_write_active(map)) {
1906		err = -EBUSY;
1907		goto err_put;
1908	}
1909	if (READ_ONCE(map->frozen)) {
1910		err = -EBUSY;
1911		goto err_put;
1912	}
1913	if (!bpf_capable()) {
1914		err = -EPERM;
1915		goto err_put;
1916	}
1917
1918	WRITE_ONCE(map->frozen, true);
1919err_put:
1920	mutex_unlock(&map->freeze_mutex);
1921	fdput(f);
1922	return err;
1923}
1924
1925static const struct bpf_prog_ops * const bpf_prog_types[] = {
1926#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1927	[_id] = & _name ## _prog_ops,
1928#define BPF_MAP_TYPE(_id, _ops)
1929#define BPF_LINK_TYPE(_id, _name)
1930#include <linux/bpf_types.h>
1931#undef BPF_PROG_TYPE
1932#undef BPF_MAP_TYPE
1933#undef BPF_LINK_TYPE
1934};
1935
1936static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1937{
1938	const struct bpf_prog_ops *ops;
1939
1940	if (type >= ARRAY_SIZE(bpf_prog_types))
1941		return -EINVAL;
1942	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1943	ops = bpf_prog_types[type];
1944	if (!ops)
1945		return -EINVAL;
1946
1947	if (!bpf_prog_is_dev_bound(prog->aux))
1948		prog->aux->ops = ops;
1949	else
1950		prog->aux->ops = &bpf_offload_prog_ops;
1951	prog->type = type;
1952	return 0;
1953}
1954
1955enum bpf_audit {
1956	BPF_AUDIT_LOAD,
1957	BPF_AUDIT_UNLOAD,
1958	BPF_AUDIT_MAX,
1959};
1960
1961static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1962	[BPF_AUDIT_LOAD]   = "LOAD",
1963	[BPF_AUDIT_UNLOAD] = "UNLOAD",
1964};
 
 
 
 
 
1965
1966static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
 
 
 
1967{
1968	struct audit_context *ctx = NULL;
1969	struct audit_buffer *ab;
1970
1971	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1972		return;
1973	if (audit_enabled == AUDIT_OFF)
1974		return;
1975	if (!in_irq() && !irqs_disabled())
1976		ctx = audit_context();
1977	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1978	if (unlikely(!ab))
1979		return;
1980	audit_log_format(ab, "prog-id=%u op=%s",
1981			 prog->aux->id, bpf_audit_str[op]);
1982	audit_log_end(ab);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1983}
1984
1985static int bpf_prog_alloc_id(struct bpf_prog *prog)
1986{
1987	int id;
1988
1989	idr_preload(GFP_KERNEL);
1990	spin_lock_bh(&prog_idr_lock);
1991	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1992	if (id > 0)
1993		prog->aux->id = id;
1994	spin_unlock_bh(&prog_idr_lock);
1995	idr_preload_end();
1996
1997	/* id is in [1, INT_MAX) */
1998	if (WARN_ON_ONCE(!id))
1999		return -ENOSPC;
2000
2001	return id > 0 ? 0 : id;
2002}
2003
2004void bpf_prog_free_id(struct bpf_prog *prog)
2005{
2006	unsigned long flags;
2007
2008	/* cBPF to eBPF migrations are currently not in the idr store.
2009	 * Offloaded programs are removed from the store when their device
2010	 * disappears - even if someone grabs an fd to them they are unusable,
2011	 * simply waiting for refcnt to drop to be freed.
2012	 */
2013	if (!prog->aux->id)
2014		return;
2015
2016	spin_lock_irqsave(&prog_idr_lock, flags);
 
 
 
 
2017	idr_remove(&prog_idr, prog->aux->id);
2018	prog->aux->id = 0;
2019	spin_unlock_irqrestore(&prog_idr_lock, flags);
 
 
 
 
2020}
2021
2022static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2023{
2024	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2025
2026	kvfree(aux->func_info);
2027	kfree(aux->func_info_aux);
2028	free_uid(aux->user);
2029	security_bpf_prog_free(aux);
2030	bpf_prog_free(aux->prog);
2031}
2032
2033static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2034{
2035	bpf_prog_kallsyms_del_all(prog);
2036	btf_put(prog->aux->btf);
2037	kvfree(prog->aux->jited_linfo);
2038	kvfree(prog->aux->linfo);
2039	kfree(prog->aux->kfunc_tab);
2040	if (prog->aux->attach_btf)
2041		btf_put(prog->aux->attach_btf);
2042
2043	if (deferred) {
2044		if (prog->aux->sleepable)
2045			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2046		else
2047			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2048	} else {
2049		__bpf_prog_put_rcu(&prog->aux->rcu);
2050	}
2051}
2052
2053static void bpf_prog_put_deferred(struct work_struct *work)
2054{
2055	struct bpf_prog_aux *aux;
2056	struct bpf_prog *prog;
2057
2058	aux = container_of(work, struct bpf_prog_aux, work);
2059	prog = aux->prog;
2060	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2061	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2062	bpf_prog_free_id(prog);
2063	__bpf_prog_put_noref(prog, true);
2064}
2065
2066static void __bpf_prog_put(struct bpf_prog *prog)
2067{
2068	struct bpf_prog_aux *aux = prog->aux;
2069
2070	if (atomic64_dec_and_test(&aux->refcnt)) {
2071		if (in_irq() || irqs_disabled()) {
2072			INIT_WORK(&aux->work, bpf_prog_put_deferred);
2073			schedule_work(&aux->work);
2074		} else {
2075			bpf_prog_put_deferred(&aux->work);
2076		}
2077	}
2078}
2079
2080void bpf_prog_put(struct bpf_prog *prog)
2081{
2082	__bpf_prog_put(prog);
2083}
2084EXPORT_SYMBOL_GPL(bpf_prog_put);
2085
2086static int bpf_prog_release(struct inode *inode, struct file *filp)
2087{
2088	struct bpf_prog *prog = filp->private_data;
2089
2090	bpf_prog_put(prog);
2091	return 0;
2092}
2093
2094struct bpf_prog_kstats {
2095	u64 nsecs;
2096	u64 cnt;
2097	u64 misses;
2098};
2099
2100void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2101{
2102	struct bpf_prog_stats *stats;
2103	unsigned int flags;
2104
2105	stats = this_cpu_ptr(prog->stats);
2106	flags = u64_stats_update_begin_irqsave(&stats->syncp);
2107	u64_stats_inc(&stats->misses);
2108	u64_stats_update_end_irqrestore(&stats->syncp, flags);
2109}
2110
2111static void bpf_prog_get_stats(const struct bpf_prog *prog,
2112			       struct bpf_prog_kstats *stats)
2113{
2114	u64 nsecs = 0, cnt = 0, misses = 0;
2115	int cpu;
2116
2117	for_each_possible_cpu(cpu) {
2118		const struct bpf_prog_stats *st;
2119		unsigned int start;
2120		u64 tnsecs, tcnt, tmisses;
2121
2122		st = per_cpu_ptr(prog->stats, cpu);
2123		do {
2124			start = u64_stats_fetch_begin(&st->syncp);
2125			tnsecs = u64_stats_read(&st->nsecs);
2126			tcnt = u64_stats_read(&st->cnt);
2127			tmisses = u64_stats_read(&st->misses);
2128		} while (u64_stats_fetch_retry(&st->syncp, start));
2129		nsecs += tnsecs;
2130		cnt += tcnt;
2131		misses += tmisses;
2132	}
2133	stats->nsecs = nsecs;
2134	stats->cnt = cnt;
2135	stats->misses = misses;
2136}
2137
2138#ifdef CONFIG_PROC_FS
2139static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2140{
2141	const struct bpf_prog *prog = filp->private_data;
2142	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2143	struct bpf_prog_kstats stats;
2144
2145	bpf_prog_get_stats(prog, &stats);
2146	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2147	seq_printf(m,
2148		   "prog_type:\t%u\n"
2149		   "prog_jited:\t%u\n"
2150		   "prog_tag:\t%s\n"
2151		   "memlock:\t%llu\n"
2152		   "prog_id:\t%u\n"
2153		   "run_time_ns:\t%llu\n"
2154		   "run_cnt:\t%llu\n"
2155		   "recursion_misses:\t%llu\n"
2156		   "verified_insns:\t%u\n",
2157		   prog->type,
2158		   prog->jited,
2159		   prog_tag,
2160		   prog->pages * 1ULL << PAGE_SHIFT,
2161		   prog->aux->id,
2162		   stats.nsecs,
2163		   stats.cnt,
2164		   stats.misses,
2165		   prog->aux->verified_insns);
2166}
2167#endif
2168
2169const struct file_operations bpf_prog_fops = {
2170#ifdef CONFIG_PROC_FS
2171	.show_fdinfo	= bpf_prog_show_fdinfo,
2172#endif
2173	.release	= bpf_prog_release,
2174	.read		= bpf_dummy_read,
2175	.write		= bpf_dummy_write,
2176};
2177
2178int bpf_prog_new_fd(struct bpf_prog *prog)
2179{
2180	int ret;
2181
2182	ret = security_bpf_prog(prog);
2183	if (ret < 0)
2184		return ret;
2185
2186	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2187				O_RDWR | O_CLOEXEC);
2188}
2189
2190static struct bpf_prog *____bpf_prog_get(struct fd f)
2191{
2192	if (!f.file)
2193		return ERR_PTR(-EBADF);
2194	if (f.file->f_op != &bpf_prog_fops) {
2195		fdput(f);
2196		return ERR_PTR(-EINVAL);
2197	}
2198
2199	return f.file->private_data;
2200}
2201
2202void bpf_prog_add(struct bpf_prog *prog, int i)
2203{
2204	atomic64_add(i, &prog->aux->refcnt);
 
 
 
 
2205}
2206EXPORT_SYMBOL_GPL(bpf_prog_add);
2207
2208void bpf_prog_sub(struct bpf_prog *prog, int i)
2209{
2210	/* Only to be used for undoing previous bpf_prog_add() in some
2211	 * error path. We still know that another entity in our call
2212	 * path holds a reference to the program, thus atomic_sub() can
2213	 * be safely used in such cases!
2214	 */
2215	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2216}
2217EXPORT_SYMBOL_GPL(bpf_prog_sub);
2218
2219void bpf_prog_inc(struct bpf_prog *prog)
2220{
2221	atomic64_inc(&prog->aux->refcnt);
2222}
2223EXPORT_SYMBOL_GPL(bpf_prog_inc);
2224
2225/* prog_idr_lock should have been held */
2226struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2227{
2228	int refold;
2229
2230	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
 
 
 
 
 
2231
2232	if (!refold)
2233		return ERR_PTR(-ENOENT);
2234
2235	return prog;
2236}
2237EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2238
2239bool bpf_prog_get_ok(struct bpf_prog *prog,
2240			    enum bpf_prog_type *attach_type, bool attach_drv)
2241{
2242	/* not an attachment, just a refcount inc, always allow */
2243	if (!attach_type)
2244		return true;
2245
2246	if (prog->type != *attach_type)
2247		return false;
2248	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
2249		return false;
2250
2251	return true;
2252}
2253
2254static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2255				       bool attach_drv)
2256{
2257	struct fd f = fdget(ufd);
2258	struct bpf_prog *prog;
2259
2260	prog = ____bpf_prog_get(f);
2261	if (IS_ERR(prog))
2262		return prog;
2263	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
2264		prog = ERR_PTR(-EINVAL);
2265		goto out;
2266	}
2267
2268	bpf_prog_inc(prog);
2269out:
2270	fdput(f);
2271	return prog;
2272}
2273
2274struct bpf_prog *bpf_prog_get(u32 ufd)
2275{
2276	return __bpf_prog_get(ufd, NULL, false);
2277}
2278
2279struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2280				       bool attach_drv)
2281{
2282	return __bpf_prog_get(ufd, &type, attach_drv);
2283}
2284EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2285
2286/* Initially all BPF programs could be loaded w/o specifying
2287 * expected_attach_type. Later for some of them specifying expected_attach_type
2288 * at load time became required so that program could be validated properly.
2289 * Programs of types that are allowed to be loaded both w/ and w/o (for
2290 * backward compatibility) expected_attach_type, should have the default attach
2291 * type assigned to expected_attach_type for the latter case, so that it can be
2292 * validated later at attach time.
2293 *
2294 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2295 * prog type requires it but has some attach types that have to be backward
2296 * compatible.
2297 */
2298static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2299{
2300	switch (attr->prog_type) {
2301	case BPF_PROG_TYPE_CGROUP_SOCK:
2302		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2303		 * exist so checking for non-zero is the way to go here.
2304		 */
2305		if (!attr->expected_attach_type)
2306			attr->expected_attach_type =
2307				BPF_CGROUP_INET_SOCK_CREATE;
2308		break;
2309	case BPF_PROG_TYPE_SK_REUSEPORT:
2310		if (!attr->expected_attach_type)
2311			attr->expected_attach_type =
2312				BPF_SK_REUSEPORT_SELECT;
2313		break;
2314	}
2315}
2316
2317static int
2318bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2319			   enum bpf_attach_type expected_attach_type,
2320			   struct btf *attach_btf, u32 btf_id,
2321			   struct bpf_prog *dst_prog)
2322{
2323	if (btf_id) {
2324		if (btf_id > BTF_MAX_TYPE)
2325			return -EINVAL;
2326
2327		if (!attach_btf && !dst_prog)
2328			return -EINVAL;
2329
2330		switch (prog_type) {
2331		case BPF_PROG_TYPE_TRACING:
2332		case BPF_PROG_TYPE_LSM:
2333		case BPF_PROG_TYPE_STRUCT_OPS:
2334		case BPF_PROG_TYPE_EXT:
2335			break;
2336		default:
2337			return -EINVAL;
2338		}
2339	}
2340
2341	if (attach_btf && (!btf_id || dst_prog))
2342		return -EINVAL;
2343
2344	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2345	    prog_type != BPF_PROG_TYPE_EXT)
2346		return -EINVAL;
2347
2348	switch (prog_type) {
2349	case BPF_PROG_TYPE_CGROUP_SOCK:
2350		switch (expected_attach_type) {
2351		case BPF_CGROUP_INET_SOCK_CREATE:
2352		case BPF_CGROUP_INET_SOCK_RELEASE:
2353		case BPF_CGROUP_INET4_POST_BIND:
2354		case BPF_CGROUP_INET6_POST_BIND:
2355			return 0;
2356		default:
2357			return -EINVAL;
2358		}
2359	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2360		switch (expected_attach_type) {
2361		case BPF_CGROUP_INET4_BIND:
2362		case BPF_CGROUP_INET6_BIND:
2363		case BPF_CGROUP_INET4_CONNECT:
2364		case BPF_CGROUP_INET6_CONNECT:
2365		case BPF_CGROUP_INET4_GETPEERNAME:
2366		case BPF_CGROUP_INET6_GETPEERNAME:
2367		case BPF_CGROUP_INET4_GETSOCKNAME:
2368		case BPF_CGROUP_INET6_GETSOCKNAME:
2369		case BPF_CGROUP_UDP4_SENDMSG:
2370		case BPF_CGROUP_UDP6_SENDMSG:
2371		case BPF_CGROUP_UDP4_RECVMSG:
2372		case BPF_CGROUP_UDP6_RECVMSG:
2373			return 0;
2374		default:
2375			return -EINVAL;
2376		}
2377	case BPF_PROG_TYPE_CGROUP_SKB:
2378		switch (expected_attach_type) {
2379		case BPF_CGROUP_INET_INGRESS:
2380		case BPF_CGROUP_INET_EGRESS:
2381			return 0;
2382		default:
2383			return -EINVAL;
2384		}
2385	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2386		switch (expected_attach_type) {
2387		case BPF_CGROUP_SETSOCKOPT:
2388		case BPF_CGROUP_GETSOCKOPT:
2389			return 0;
2390		default:
2391			return -EINVAL;
2392		}
2393	case BPF_PROG_TYPE_SK_LOOKUP:
2394		if (expected_attach_type == BPF_SK_LOOKUP)
2395			return 0;
2396		return -EINVAL;
2397	case BPF_PROG_TYPE_SK_REUSEPORT:
2398		switch (expected_attach_type) {
2399		case BPF_SK_REUSEPORT_SELECT:
2400		case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2401			return 0;
2402		default:
2403			return -EINVAL;
2404		}
2405	case BPF_PROG_TYPE_SYSCALL:
2406	case BPF_PROG_TYPE_EXT:
2407		if (expected_attach_type)
2408			return -EINVAL;
2409		fallthrough;
2410	default:
2411		return 0;
2412	}
2413}
2414
2415static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2416{
2417	switch (prog_type) {
2418	case BPF_PROG_TYPE_SCHED_CLS:
2419	case BPF_PROG_TYPE_SCHED_ACT:
2420	case BPF_PROG_TYPE_XDP:
2421	case BPF_PROG_TYPE_LWT_IN:
2422	case BPF_PROG_TYPE_LWT_OUT:
2423	case BPF_PROG_TYPE_LWT_XMIT:
2424	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2425	case BPF_PROG_TYPE_SK_SKB:
2426	case BPF_PROG_TYPE_SK_MSG:
2427	case BPF_PROG_TYPE_LIRC_MODE2:
2428	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2429	case BPF_PROG_TYPE_CGROUP_DEVICE:
2430	case BPF_PROG_TYPE_CGROUP_SOCK:
2431	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2432	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2433	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2434	case BPF_PROG_TYPE_SOCK_OPS:
2435	case BPF_PROG_TYPE_EXT: /* extends any prog */
2436		return true;
2437	case BPF_PROG_TYPE_CGROUP_SKB:
2438		/* always unpriv */
2439	case BPF_PROG_TYPE_SK_REUSEPORT:
2440		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2441	default:
2442		return false;
2443	}
2444}
2445
2446static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2447{
2448	switch (prog_type) {
2449	case BPF_PROG_TYPE_KPROBE:
2450	case BPF_PROG_TYPE_TRACEPOINT:
2451	case BPF_PROG_TYPE_PERF_EVENT:
2452	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2453	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2454	case BPF_PROG_TYPE_TRACING:
2455	case BPF_PROG_TYPE_LSM:
2456	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2457	case BPF_PROG_TYPE_EXT: /* extends any prog */
2458		return true;
2459	default:
2460		return false;
2461	}
2462}
2463
2464/* last field in 'union bpf_attr' used by this command */
2465#define	BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size
2466
2467static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
2468{
2469	enum bpf_prog_type type = attr->prog_type;
2470	struct bpf_prog *prog, *dst_prog = NULL;
2471	struct btf *attach_btf = NULL;
2472	int err;
2473	char license[128];
2474	bool is_gpl;
2475
2476	if (CHECK_ATTR(BPF_PROG_LOAD))
2477		return -EINVAL;
2478
2479	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2480				 BPF_F_ANY_ALIGNMENT |
2481				 BPF_F_TEST_STATE_FREQ |
2482				 BPF_F_SLEEPABLE |
2483				 BPF_F_TEST_RND_HI32 |
2484				 BPF_F_XDP_HAS_FRAGS))
2485		return -EINVAL;
2486
2487	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2488	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2489	    !bpf_capable())
2490		return -EPERM;
2491
2492	/* copy eBPF program license from user space */
2493	if (strncpy_from_bpfptr(license,
2494				make_bpfptr(attr->license, uattr.is_kernel),
2495				sizeof(license) - 1) < 0)
2496		return -EFAULT;
2497	license[sizeof(license) - 1] = 0;
2498
2499	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2500	is_gpl = license_is_gpl_compatible(license);
2501
2502	if (attr->insn_cnt == 0 ||
2503	    attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2504		return -E2BIG;
2505	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2506	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2507	    !bpf_capable())
2508		return -EPERM;
2509
2510	if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2511		return -EPERM;
2512	if (is_perfmon_prog_type(type) && !perfmon_capable())
2513		return -EPERM;
2514
2515	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2516	 * or btf, we need to check which one it is
2517	 */
2518	if (attr->attach_prog_fd) {
2519		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2520		if (IS_ERR(dst_prog)) {
2521			dst_prog = NULL;
2522			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2523			if (IS_ERR(attach_btf))
2524				return -EINVAL;
2525			if (!btf_is_kernel(attach_btf)) {
2526				/* attaching through specifying bpf_prog's BTF
2527				 * objects directly might be supported eventually
2528				 */
2529				btf_put(attach_btf);
2530				return -ENOTSUPP;
2531			}
2532		}
2533	} else if (attr->attach_btf_id) {
2534		/* fall back to vmlinux BTF, if BTF type ID is specified */
2535		attach_btf = bpf_get_btf_vmlinux();
2536		if (IS_ERR(attach_btf))
2537			return PTR_ERR(attach_btf);
2538		if (!attach_btf)
2539			return -EINVAL;
2540		btf_get(attach_btf);
2541	}
2542
2543	bpf_prog_load_fixup_attach_type(attr);
2544	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2545				       attach_btf, attr->attach_btf_id,
2546				       dst_prog)) {
2547		if (dst_prog)
2548			bpf_prog_put(dst_prog);
2549		if (attach_btf)
2550			btf_put(attach_btf);
2551		return -EINVAL;
2552	}
2553
2554	/* plain bpf_prog allocation */
2555	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2556	if (!prog) {
2557		if (dst_prog)
2558			bpf_prog_put(dst_prog);
2559		if (attach_btf)
2560			btf_put(attach_btf);
2561		return -ENOMEM;
2562	}
2563
2564	prog->expected_attach_type = attr->expected_attach_type;
2565	prog->aux->attach_btf = attach_btf;
2566	prog->aux->attach_btf_id = attr->attach_btf_id;
2567	prog->aux->dst_prog = dst_prog;
2568	prog->aux->offload_requested = !!attr->prog_ifindex;
2569	prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2570	prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2571
2572	err = security_bpf_prog_alloc(prog->aux);
2573	if (err)
2574		goto free_prog;
 
 
 
 
2575
2576	prog->aux->user = get_current_user();
2577	prog->len = attr->insn_cnt;
2578
2579	err = -EFAULT;
2580	if (copy_from_bpfptr(prog->insns,
2581			     make_bpfptr(attr->insns, uattr.is_kernel),
2582			     bpf_prog_insn_size(prog)) != 0)
2583		goto free_prog_sec;
2584
2585	prog->orig_prog = NULL;
2586	prog->jited = 0;
2587
2588	atomic64_set(&prog->aux->refcnt, 1);
2589	prog->gpl_compatible = is_gpl ? 1 : 0;
2590
2591	if (bpf_prog_is_dev_bound(prog->aux)) {
2592		err = bpf_prog_offload_init(prog, attr);
2593		if (err)
2594			goto free_prog_sec;
2595	}
2596
2597	/* find program type: socket_filter vs tracing_filter */
2598	err = find_prog_type(type, prog);
2599	if (err < 0)
2600		goto free_prog_sec;
2601
2602	prog->aux->load_time = ktime_get_boottime_ns();
2603	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2604			       sizeof(attr->prog_name));
2605	if (err < 0)
2606		goto free_prog_sec;
2607
2608	/* run eBPF verifier */
2609	err = bpf_check(&prog, attr, uattr);
2610	if (err < 0)
2611		goto free_used_maps;
2612
2613	prog = bpf_prog_select_runtime(prog, &err);
2614	if (err < 0)
2615		goto free_used_maps;
2616
2617	err = bpf_prog_alloc_id(prog);
2618	if (err)
2619		goto free_used_maps;
2620
2621	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2622	 * effectively publicly exposed. However, retrieving via
2623	 * bpf_prog_get_fd_by_id() will take another reference,
2624	 * therefore it cannot be gone underneath us.
2625	 *
2626	 * Only for the time /after/ successful bpf_prog_new_fd()
2627	 * and before returning to userspace, we might just hold
2628	 * one reference and any parallel close on that fd could
2629	 * rip everything out. Hence, below notifications must
2630	 * happen before bpf_prog_new_fd().
2631	 *
2632	 * Also, any failure handling from this point onwards must
2633	 * be using bpf_prog_put() given the program is exposed.
2634	 */
2635	bpf_prog_kallsyms_add(prog);
2636	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2637	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2638
2639	err = bpf_prog_new_fd(prog);
2640	if (err < 0)
2641		bpf_prog_put(prog);
2642	return err;
2643
2644free_used_maps:
2645	/* In case we have subprogs, we need to wait for a grace
2646	 * period before we can tear down JIT memory since symbols
2647	 * are already exposed under kallsyms.
2648	 */
2649	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
2650	return err;
 
 
2651free_prog_sec:
2652	free_uid(prog->aux->user);
2653	security_bpf_prog_free(prog->aux);
2654free_prog:
2655	if (prog->aux->attach_btf)
2656		btf_put(prog->aux->attach_btf);
2657	bpf_prog_free(prog);
2658	return err;
2659}
2660
2661#define BPF_OBJ_LAST_FIELD file_flags
2662
2663static int bpf_obj_pin(const union bpf_attr *attr)
2664{
2665	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2666		return -EINVAL;
2667
2668	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2669}
2670
2671static int bpf_obj_get(const union bpf_attr *attr)
2672{
2673	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2674	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2675		return -EINVAL;
2676
2677	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2678				attr->file_flags);
2679}
2680
2681void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2682		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2683{
2684	atomic64_set(&link->refcnt, 1);
2685	link->type = type;
2686	link->id = 0;
2687	link->ops = ops;
2688	link->prog = prog;
2689}
2690
2691static void bpf_link_free_id(int id)
2692{
2693	if (!id)
2694		return;
2695
2696	spin_lock_bh(&link_idr_lock);
2697	idr_remove(&link_idr, id);
2698	spin_unlock_bh(&link_idr_lock);
2699}
2700
2701/* Clean up bpf_link and corresponding anon_inode file and FD. After
2702 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2703 * anon_inode's release() call. This helper marksbpf_link as
2704 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2705 * is not decremented, it's the responsibility of a calling code that failed
2706 * to complete bpf_link initialization.
2707 */
2708void bpf_link_cleanup(struct bpf_link_primer *primer)
2709{
2710	primer->link->prog = NULL;
2711	bpf_link_free_id(primer->id);
2712	fput(primer->file);
2713	put_unused_fd(primer->fd);
2714}
2715
2716void bpf_link_inc(struct bpf_link *link)
2717{
2718	atomic64_inc(&link->refcnt);
2719}
2720
2721/* bpf_link_free is guaranteed to be called from process context */
2722static void bpf_link_free(struct bpf_link *link)
2723{
2724	bpf_link_free_id(link->id);
2725	if (link->prog) {
2726		/* detach BPF program, clean up used resources */
2727		link->ops->release(link);
2728		bpf_prog_put(link->prog);
2729	}
2730	/* free bpf_link and its containing memory */
2731	link->ops->dealloc(link);
2732}
2733
2734static void bpf_link_put_deferred(struct work_struct *work)
2735{
2736	struct bpf_link *link = container_of(work, struct bpf_link, work);
2737
2738	bpf_link_free(link);
2739}
2740
2741/* bpf_link_put can be called from atomic context, but ensures that resources
2742 * are freed from process context
2743 */
2744void bpf_link_put(struct bpf_link *link)
2745{
2746	if (!atomic64_dec_and_test(&link->refcnt))
2747		return;
2748
2749	if (in_atomic()) {
2750		INIT_WORK(&link->work, bpf_link_put_deferred);
2751		schedule_work(&link->work);
2752	} else {
2753		bpf_link_free(link);
2754	}
2755}
2756EXPORT_SYMBOL(bpf_link_put);
2757
2758static int bpf_link_release(struct inode *inode, struct file *filp)
2759{
2760	struct bpf_link *link = filp->private_data;
2761
2762	bpf_link_put(link);
2763	return 0;
2764}
2765
2766#ifdef CONFIG_PROC_FS
2767#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2768#define BPF_MAP_TYPE(_id, _ops)
2769#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2770static const char *bpf_link_type_strs[] = {
2771	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2772#include <linux/bpf_types.h>
2773};
2774#undef BPF_PROG_TYPE
2775#undef BPF_MAP_TYPE
2776#undef BPF_LINK_TYPE
2777
2778static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2779{
2780	const struct bpf_link *link = filp->private_data;
2781	const struct bpf_prog *prog = link->prog;
2782	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2783
2784	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2785	seq_printf(m,
2786		   "link_type:\t%s\n"
2787		   "link_id:\t%u\n"
2788		   "prog_tag:\t%s\n"
2789		   "prog_id:\t%u\n",
2790		   bpf_link_type_strs[link->type],
2791		   link->id,
2792		   prog_tag,
2793		   prog->aux->id);
2794	if (link->ops->show_fdinfo)
2795		link->ops->show_fdinfo(link, m);
2796}
2797#endif
2798
2799static const struct file_operations bpf_link_fops = {
2800#ifdef CONFIG_PROC_FS
2801	.show_fdinfo	= bpf_link_show_fdinfo,
2802#endif
2803	.release	= bpf_link_release,
2804	.read		= bpf_dummy_read,
2805	.write		= bpf_dummy_write,
2806};
2807
2808static int bpf_link_alloc_id(struct bpf_link *link)
2809{
2810	int id;
2811
2812	idr_preload(GFP_KERNEL);
2813	spin_lock_bh(&link_idr_lock);
2814	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2815	spin_unlock_bh(&link_idr_lock);
2816	idr_preload_end();
2817
2818	return id;
2819}
2820
2821/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2822 * reserving unused FD and allocating ID from link_idr. This is to be paired
2823 * with bpf_link_settle() to install FD and ID and expose bpf_link to
2824 * user-space, if bpf_link is successfully attached. If not, bpf_link and
2825 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2826 * transient state is passed around in struct bpf_link_primer.
2827 * This is preferred way to create and initialize bpf_link, especially when
2828 * there are complicated and expensive operations in between creating bpf_link
2829 * itself and attaching it to BPF hook. By using bpf_link_prime() and
2830 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2831 * expensive (and potentially failing) roll back operations in a rare case
2832 * that file, FD, or ID can't be allocated.
2833 */
2834int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2835{
2836	struct file *file;
2837	int fd, id;
2838
2839	fd = get_unused_fd_flags(O_CLOEXEC);
2840	if (fd < 0)
2841		return fd;
2842
 
 
 
 
2843
2844	id = bpf_link_alloc_id(link);
2845	if (id < 0) {
2846		put_unused_fd(fd);
2847		return id;
2848	}
2849
2850	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2851	if (IS_ERR(file)) {
2852		bpf_link_free_id(id);
2853		put_unused_fd(fd);
2854		return PTR_ERR(file);
2855	}
 
2856
2857	primer->link = link;
2858	primer->file = file;
2859	primer->fd = fd;
2860	primer->id = id;
2861	return 0;
2862}
2863
2864int bpf_link_settle(struct bpf_link_primer *primer)
2865{
2866	/* make bpf_link fetchable by ID */
2867	spin_lock_bh(&link_idr_lock);
2868	primer->link->id = primer->id;
2869	spin_unlock_bh(&link_idr_lock);
2870	/* make bpf_link fetchable by FD */
2871	fd_install(primer->fd, primer->file);
2872	/* pass through installed FD */
2873	return primer->fd;
2874}
2875
2876int bpf_link_new_fd(struct bpf_link *link)
2877{
2878	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2879}
2880
2881struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2882{
2883	struct fd f = fdget(ufd);
2884	struct bpf_link *link;
2885
2886	if (!f.file)
2887		return ERR_PTR(-EBADF);
2888	if (f.file->f_op != &bpf_link_fops) {
2889		fdput(f);
2890		return ERR_PTR(-EINVAL);
2891	}
2892
2893	link = f.file->private_data;
2894	bpf_link_inc(link);
2895	fdput(f);
2896
2897	return link;
2898}
2899EXPORT_SYMBOL(bpf_link_get_from_fd);
2900
2901static void bpf_tracing_link_release(struct bpf_link *link)
2902{
2903	struct bpf_tracing_link *tr_link =
2904		container_of(link, struct bpf_tracing_link, link.link);
2905
2906	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
2907						tr_link->trampoline));
2908
2909	bpf_trampoline_put(tr_link->trampoline);
2910
2911	/* tgt_prog is NULL if target is a kernel function */
2912	if (tr_link->tgt_prog)
2913		bpf_prog_put(tr_link->tgt_prog);
2914}
2915
2916static void bpf_tracing_link_dealloc(struct bpf_link *link)
2917{
2918	struct bpf_tracing_link *tr_link =
2919		container_of(link, struct bpf_tracing_link, link.link);
2920
2921	kfree(tr_link);
2922}
2923
2924static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2925					 struct seq_file *seq)
2926{
2927	struct bpf_tracing_link *tr_link =
2928		container_of(link, struct bpf_tracing_link, link.link);
2929
2930	seq_printf(seq,
2931		   "attach_type:\t%d\n",
2932		   tr_link->attach_type);
2933}
2934
2935static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2936					   struct bpf_link_info *info)
2937{
2938	struct bpf_tracing_link *tr_link =
2939		container_of(link, struct bpf_tracing_link, link.link);
2940
2941	info->tracing.attach_type = tr_link->attach_type;
2942	bpf_trampoline_unpack_key(tr_link->trampoline->key,
2943				  &info->tracing.target_obj_id,
2944				  &info->tracing.target_btf_id);
2945
2946	return 0;
2947}
2948
2949static const struct bpf_link_ops bpf_tracing_link_lops = {
2950	.release = bpf_tracing_link_release,
2951	.dealloc = bpf_tracing_link_dealloc,
2952	.show_fdinfo = bpf_tracing_link_show_fdinfo,
2953	.fill_link_info = bpf_tracing_link_fill_link_info,
2954};
2955
2956static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2957				   int tgt_prog_fd,
2958				   u32 btf_id,
2959				   u64 bpf_cookie)
2960{
2961	struct bpf_link_primer link_primer;
2962	struct bpf_prog *tgt_prog = NULL;
2963	struct bpf_trampoline *tr = NULL;
2964	struct bpf_tracing_link *link;
2965	u64 key = 0;
2966	int err;
2967
2968	switch (prog->type) {
2969	case BPF_PROG_TYPE_TRACING:
2970		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2971		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
2972		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
2973			err = -EINVAL;
2974			goto out_put_prog;
2975		}
2976		break;
2977	case BPF_PROG_TYPE_EXT:
2978		if (prog->expected_attach_type != 0) {
2979			err = -EINVAL;
2980			goto out_put_prog;
2981		}
2982		break;
2983	case BPF_PROG_TYPE_LSM:
2984		if (prog->expected_attach_type != BPF_LSM_MAC) {
2985			err = -EINVAL;
2986			goto out_put_prog;
2987		}
2988		break;
2989	default:
2990		err = -EINVAL;
2991		goto out_put_prog;
2992	}
2993
2994	if (!!tgt_prog_fd != !!btf_id) {
2995		err = -EINVAL;
2996		goto out_put_prog;
2997	}
2998
2999	if (tgt_prog_fd) {
3000		/* For now we only allow new targets for BPF_PROG_TYPE_EXT */
3001		if (prog->type != BPF_PROG_TYPE_EXT) {
3002			err = -EINVAL;
3003			goto out_put_prog;
3004		}
3005
3006		tgt_prog = bpf_prog_get(tgt_prog_fd);
3007		if (IS_ERR(tgt_prog)) {
3008			err = PTR_ERR(tgt_prog);
3009			tgt_prog = NULL;
3010			goto out_put_prog;
3011		}
3012
3013		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3014	}
3015
3016	link = kzalloc(sizeof(*link), GFP_USER);
3017	if (!link) {
3018		err = -ENOMEM;
3019		goto out_put_prog;
3020	}
3021	bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3022		      &bpf_tracing_link_lops, prog);
3023	link->attach_type = prog->expected_attach_type;
3024	link->link.cookie = bpf_cookie;
3025
3026	mutex_lock(&prog->aux->dst_mutex);
3027
3028	/* There are a few possible cases here:
3029	 *
3030	 * - if prog->aux->dst_trampoline is set, the program was just loaded
3031	 *   and not yet attached to anything, so we can use the values stored
3032	 *   in prog->aux
3033	 *
3034	 * - if prog->aux->dst_trampoline is NULL, the program has already been
3035         *   attached to a target and its initial target was cleared (below)
3036	 *
3037	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3038	 *   target_btf_id using the link_create API.
3039	 *
3040	 * - if tgt_prog == NULL when this function was called using the old
3041	 *   raw_tracepoint_open API, and we need a target from prog->aux
3042	 *
3043	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3044	 *   was detached and is going for re-attachment.
3045	 */
3046	if (!prog->aux->dst_trampoline && !tgt_prog) {
3047		/*
3048		 * Allow re-attach for TRACING and LSM programs. If it's
3049		 * currently linked, bpf_trampoline_link_prog will fail.
3050		 * EXT programs need to specify tgt_prog_fd, so they
3051		 * re-attach in separate code path.
3052		 */
3053		if (prog->type != BPF_PROG_TYPE_TRACING &&
3054		    prog->type != BPF_PROG_TYPE_LSM) {
3055			err = -EINVAL;
3056			goto out_unlock;
3057		}
3058		btf_id = prog->aux->attach_btf_id;
3059		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3060	}
3061
3062	if (!prog->aux->dst_trampoline ||
3063	    (key && key != prog->aux->dst_trampoline->key)) {
3064		/* If there is no saved target, or the specified target is
3065		 * different from the destination specified at load time, we
3066		 * need a new trampoline and a check for compatibility
3067		 */
3068		struct bpf_attach_target_info tgt_info = {};
3069
3070		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3071					      &tgt_info);
3072		if (err)
3073			goto out_unlock;
3074
3075		tr = bpf_trampoline_get(key, &tgt_info);
3076		if (!tr) {
3077			err = -ENOMEM;
3078			goto out_unlock;
3079		}
3080	} else {
3081		/* The caller didn't specify a target, or the target was the
3082		 * same as the destination supplied during program load. This
3083		 * means we can reuse the trampoline and reference from program
3084		 * load time, and there is no need to allocate a new one. This
3085		 * can only happen once for any program, as the saved values in
3086		 * prog->aux are cleared below.
3087		 */
3088		tr = prog->aux->dst_trampoline;
3089		tgt_prog = prog->aux->dst_prog;
3090	}
3091
3092	err = bpf_link_prime(&link->link.link, &link_primer);
3093	if (err)
3094		goto out_unlock;
3095
3096	err = bpf_trampoline_link_prog(&link->link, tr);
3097	if (err) {
3098		bpf_link_cleanup(&link_primer);
3099		link = NULL;
3100		goto out_unlock;
3101	}
3102
3103	link->tgt_prog = tgt_prog;
3104	link->trampoline = tr;
3105
3106	/* Always clear the trampoline and target prog from prog->aux to make
3107	 * sure the original attach destination is not kept alive after a
3108	 * program is (re-)attached to another target.
3109	 */
3110	if (prog->aux->dst_prog &&
3111	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3112		/* got extra prog ref from syscall, or attaching to different prog */
3113		bpf_prog_put(prog->aux->dst_prog);
3114	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3115		/* we allocated a new trampoline, so free the old one */
3116		bpf_trampoline_put(prog->aux->dst_trampoline);
3117
3118	prog->aux->dst_prog = NULL;
3119	prog->aux->dst_trampoline = NULL;
3120	mutex_unlock(&prog->aux->dst_mutex);
3121
3122	return bpf_link_settle(&link_primer);
3123out_unlock:
3124	if (tr && tr != prog->aux->dst_trampoline)
3125		bpf_trampoline_put(tr);
3126	mutex_unlock(&prog->aux->dst_mutex);
3127	kfree(link);
3128out_put_prog:
3129	if (tgt_prog_fd && tgt_prog)
3130		bpf_prog_put(tgt_prog);
3131	return err;
3132}
3133
3134struct bpf_raw_tp_link {
3135	struct bpf_link link;
3136	struct bpf_raw_event_map *btp;
3137};
3138
3139static void bpf_raw_tp_link_release(struct bpf_link *link)
3140{
3141	struct bpf_raw_tp_link *raw_tp =
3142		container_of(link, struct bpf_raw_tp_link, link);
3143
3144	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
3145	bpf_put_raw_tracepoint(raw_tp->btp);
3146}
3147
3148static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3149{
3150	struct bpf_raw_tp_link *raw_tp =
3151		container_of(link, struct bpf_raw_tp_link, link);
3152
3153	kfree(raw_tp);
3154}
3155
3156static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3157					struct seq_file *seq)
3158{
3159	struct bpf_raw_tp_link *raw_tp_link =
3160		container_of(link, struct bpf_raw_tp_link, link);
3161
3162	seq_printf(seq,
3163		   "tp_name:\t%s\n",
3164		   raw_tp_link->btp->tp->name);
3165}
3166
3167static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3168					  struct bpf_link_info *info)
3169{
3170	struct bpf_raw_tp_link *raw_tp_link =
3171		container_of(link, struct bpf_raw_tp_link, link);
3172	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3173	const char *tp_name = raw_tp_link->btp->tp->name;
3174	u32 ulen = info->raw_tracepoint.tp_name_len;
3175	size_t tp_len = strlen(tp_name);
3176
3177	if (!ulen ^ !ubuf)
3178		return -EINVAL;
3179
3180	info->raw_tracepoint.tp_name_len = tp_len + 1;
3181
3182	if (!ubuf)
3183		return 0;
3184
3185	if (ulen >= tp_len + 1) {
3186		if (copy_to_user(ubuf, tp_name, tp_len + 1))
3187			return -EFAULT;
3188	} else {
3189		char zero = '\0';
3190
3191		if (copy_to_user(ubuf, tp_name, ulen - 1))
3192			return -EFAULT;
3193		if (put_user(zero, ubuf + ulen - 1))
3194			return -EFAULT;
3195		return -ENOSPC;
3196	}
3197
3198	return 0;
3199}
3200
3201static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3202	.release = bpf_raw_tp_link_release,
3203	.dealloc = bpf_raw_tp_link_dealloc,
3204	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3205	.fill_link_info = bpf_raw_tp_link_fill_link_info,
3206};
3207
3208#ifdef CONFIG_PERF_EVENTS
3209struct bpf_perf_link {
3210	struct bpf_link link;
3211	struct file *perf_file;
3212};
3213
3214static void bpf_perf_link_release(struct bpf_link *link)
3215{
3216	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3217	struct perf_event *event = perf_link->perf_file->private_data;
3218
3219	perf_event_free_bpf_prog(event);
3220	fput(perf_link->perf_file);
3221}
3222
3223static void bpf_perf_link_dealloc(struct bpf_link *link)
3224{
3225	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3226
3227	kfree(perf_link);
3228}
3229
3230static const struct bpf_link_ops bpf_perf_link_lops = {
3231	.release = bpf_perf_link_release,
3232	.dealloc = bpf_perf_link_dealloc,
3233};
3234
3235static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3236{
3237	struct bpf_link_primer link_primer;
3238	struct bpf_perf_link *link;
3239	struct perf_event *event;
3240	struct file *perf_file;
3241	int err;
3242
3243	if (attr->link_create.flags)
3244		return -EINVAL;
3245
3246	perf_file = perf_event_get(attr->link_create.target_fd);
3247	if (IS_ERR(perf_file))
3248		return PTR_ERR(perf_file);
3249
3250	link = kzalloc(sizeof(*link), GFP_USER);
3251	if (!link) {
3252		err = -ENOMEM;
3253		goto out_put_file;
3254	}
3255	bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3256	link->perf_file = perf_file;
3257
3258	err = bpf_link_prime(&link->link, &link_primer);
3259	if (err) {
3260		kfree(link);
3261		goto out_put_file;
3262	}
3263
3264	event = perf_file->private_data;
3265	err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3266	if (err) {
3267		bpf_link_cleanup(&link_primer);
3268		goto out_put_file;
3269	}
3270	/* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3271	bpf_prog_inc(prog);
3272
3273	return bpf_link_settle(&link_primer);
3274
3275out_put_file:
3276	fput(perf_file);
3277	return err;
3278}
3279#else
3280static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3281{
3282	return -EOPNOTSUPP;
3283}
3284#endif /* CONFIG_PERF_EVENTS */
3285
3286static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3287				  const char __user *user_tp_name)
3288{
3289	struct bpf_link_primer link_primer;
3290	struct bpf_raw_tp_link *link;
3291	struct bpf_raw_event_map *btp;
3292	const char *tp_name;
3293	char buf[128];
3294	int err;
3295
3296	switch (prog->type) {
3297	case BPF_PROG_TYPE_TRACING:
3298	case BPF_PROG_TYPE_EXT:
3299	case BPF_PROG_TYPE_LSM:
3300		if (user_tp_name)
3301			/* The attach point for this category of programs
3302			 * should be specified via btf_id during program load.
3303			 */
3304			return -EINVAL;
3305		if (prog->type == BPF_PROG_TYPE_TRACING &&
3306		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3307			tp_name = prog->aux->attach_func_name;
3308			break;
3309		}
3310		return bpf_tracing_prog_attach(prog, 0, 0, 0);
3311	case BPF_PROG_TYPE_RAW_TRACEPOINT:
3312	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3313		if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3314			return -EFAULT;
3315		buf[sizeof(buf) - 1] = 0;
3316		tp_name = buf;
3317		break;
3318	default:
3319		return -EINVAL;
3320	}
3321
3322	btp = bpf_get_raw_tracepoint(tp_name);
3323	if (!btp)
3324		return -ENOENT;
3325
3326	link = kzalloc(sizeof(*link), GFP_USER);
3327	if (!link) {
3328		err = -ENOMEM;
3329		goto out_put_btp;
3330	}
3331	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3332		      &bpf_raw_tp_link_lops, prog);
3333	link->btp = btp;
3334
3335	err = bpf_link_prime(&link->link, &link_primer);
3336	if (err) {
3337		kfree(link);
3338		goto out_put_btp;
3339	}
3340
3341	err = bpf_probe_register(link->btp, prog);
3342	if (err) {
3343		bpf_link_cleanup(&link_primer);
3344		goto out_put_btp;
3345	}
3346
3347	return bpf_link_settle(&link_primer);
3348
3349out_put_btp:
3350	bpf_put_raw_tracepoint(btp);
3351	return err;
3352}
3353
3354#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3355
3356static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3357{
3358	struct bpf_prog *prog;
3359	int fd;
3360
3361	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3362		return -EINVAL;
3363
3364	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3365	if (IS_ERR(prog))
3366		return PTR_ERR(prog);
3367
3368	fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
3369	if (fd < 0)
3370		bpf_prog_put(prog);
3371	return fd;
3372}
3373
3374static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3375					     enum bpf_attach_type attach_type)
3376{
3377	switch (prog->type) {
3378	case BPF_PROG_TYPE_CGROUP_SOCK:
3379	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3380	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3381	case BPF_PROG_TYPE_SK_LOOKUP:
3382		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3383	case BPF_PROG_TYPE_CGROUP_SKB:
3384		if (!capable(CAP_NET_ADMIN))
3385			/* cg-skb progs can be loaded by unpriv user.
3386			 * check permissions at attach time.
3387			 */
3388			return -EPERM;
3389		return prog->enforce_expected_attach_type &&
3390			prog->expected_attach_type != attach_type ?
3391			-EINVAL : 0;
3392	default:
3393		return 0;
3394	}
3395}
3396
3397static enum bpf_prog_type
3398attach_type_to_prog_type(enum bpf_attach_type attach_type)
 
 
 
 
3399{
3400	switch (attach_type) {
 
 
 
 
 
 
 
 
 
 
 
 
 
3401	case BPF_CGROUP_INET_INGRESS:
3402	case BPF_CGROUP_INET_EGRESS:
3403		return BPF_PROG_TYPE_CGROUP_SKB;
 
3404	case BPF_CGROUP_INET_SOCK_CREATE:
3405	case BPF_CGROUP_INET_SOCK_RELEASE:
3406	case BPF_CGROUP_INET4_POST_BIND:
3407	case BPF_CGROUP_INET6_POST_BIND:
3408		return BPF_PROG_TYPE_CGROUP_SOCK;
 
3409	case BPF_CGROUP_INET4_BIND:
3410	case BPF_CGROUP_INET6_BIND:
3411	case BPF_CGROUP_INET4_CONNECT:
3412	case BPF_CGROUP_INET6_CONNECT:
3413	case BPF_CGROUP_INET4_GETPEERNAME:
3414	case BPF_CGROUP_INET6_GETPEERNAME:
3415	case BPF_CGROUP_INET4_GETSOCKNAME:
3416	case BPF_CGROUP_INET6_GETSOCKNAME:
3417	case BPF_CGROUP_UDP4_SENDMSG:
3418	case BPF_CGROUP_UDP6_SENDMSG:
3419	case BPF_CGROUP_UDP4_RECVMSG:
3420	case BPF_CGROUP_UDP6_RECVMSG:
3421		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
 
3422	case BPF_CGROUP_SOCK_OPS:
3423		return BPF_PROG_TYPE_SOCK_OPS;
 
3424	case BPF_CGROUP_DEVICE:
3425		return BPF_PROG_TYPE_CGROUP_DEVICE;
 
3426	case BPF_SK_MSG_VERDICT:
3427		return BPF_PROG_TYPE_SK_MSG;
 
3428	case BPF_SK_SKB_STREAM_PARSER:
3429	case BPF_SK_SKB_STREAM_VERDICT:
3430	case BPF_SK_SKB_VERDICT:
3431		return BPF_PROG_TYPE_SK_SKB;
3432	case BPF_LIRC_MODE2:
3433		return BPF_PROG_TYPE_LIRC_MODE2;
 
3434	case BPF_FLOW_DISSECTOR:
3435		return BPF_PROG_TYPE_FLOW_DISSECTOR;
 
3436	case BPF_CGROUP_SYSCTL:
3437		return BPF_PROG_TYPE_CGROUP_SYSCTL;
 
3438	case BPF_CGROUP_GETSOCKOPT:
3439	case BPF_CGROUP_SETSOCKOPT:
3440		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3441	case BPF_TRACE_ITER:
3442	case BPF_TRACE_RAW_TP:
3443	case BPF_TRACE_FENTRY:
3444	case BPF_TRACE_FEXIT:
3445	case BPF_MODIFY_RETURN:
3446		return BPF_PROG_TYPE_TRACING;
3447	case BPF_LSM_MAC:
3448		return BPF_PROG_TYPE_LSM;
3449	case BPF_SK_LOOKUP:
3450		return BPF_PROG_TYPE_SK_LOOKUP;
3451	case BPF_XDP:
3452		return BPF_PROG_TYPE_XDP;
3453	case BPF_LSM_CGROUP:
3454		return BPF_PROG_TYPE_LSM;
3455	default:
3456		return BPF_PROG_TYPE_UNSPEC;
3457	}
3458}
3459
3460#define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
3461
3462#define BPF_F_ATTACH_MASK \
3463	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
3464
3465static int bpf_prog_attach(const union bpf_attr *attr)
3466{
3467	enum bpf_prog_type ptype;
3468	struct bpf_prog *prog;
3469	int ret;
3470
3471	if (CHECK_ATTR(BPF_PROG_ATTACH))
3472		return -EINVAL;
3473
3474	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
3475		return -EINVAL;
3476
3477	ptype = attach_type_to_prog_type(attr->attach_type);
3478	if (ptype == BPF_PROG_TYPE_UNSPEC)
3479		return -EINVAL;
 
3480
3481	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3482	if (IS_ERR(prog))
3483		return PTR_ERR(prog);
3484
3485	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3486		bpf_prog_put(prog);
3487		return -EINVAL;
3488	}
3489
3490	switch (ptype) {
3491	case BPF_PROG_TYPE_SK_SKB:
3492	case BPF_PROG_TYPE_SK_MSG:
3493		ret = sock_map_get_from_fd(attr, prog);
3494		break;
3495	case BPF_PROG_TYPE_LIRC_MODE2:
3496		ret = lirc_prog_attach(attr, prog);
3497		break;
3498	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3499		ret = netns_bpf_prog_attach(attr, prog);
3500		break;
3501	case BPF_PROG_TYPE_CGROUP_DEVICE:
3502	case BPF_PROG_TYPE_CGROUP_SKB:
3503	case BPF_PROG_TYPE_CGROUP_SOCK:
3504	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3505	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3506	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3507	case BPF_PROG_TYPE_SOCK_OPS:
3508	case BPF_PROG_TYPE_LSM:
3509		if (ptype == BPF_PROG_TYPE_LSM &&
3510		    prog->expected_attach_type != BPF_LSM_CGROUP)
3511			ret = -EINVAL;
3512		else
3513			ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3514		break;
3515	default:
3516		ret = -EINVAL;
3517	}
3518
3519	if (ret)
3520		bpf_prog_put(prog);
3521	return ret;
3522}
3523
3524#define BPF_PROG_DETACH_LAST_FIELD attach_type
3525
3526static int bpf_prog_detach(const union bpf_attr *attr)
3527{
3528	enum bpf_prog_type ptype;
3529
 
 
 
3530	if (CHECK_ATTR(BPF_PROG_DETACH))
3531		return -EINVAL;
3532
3533	ptype = attach_type_to_prog_type(attr->attach_type);
3534
3535	switch (ptype) {
3536	case BPF_PROG_TYPE_SK_MSG:
3537	case BPF_PROG_TYPE_SK_SKB:
3538		return sock_map_prog_detach(attr, ptype);
3539	case BPF_PROG_TYPE_LIRC_MODE2:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3540		return lirc_prog_detach(attr);
3541	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3542		return netns_bpf_prog_detach(attr, ptype);
3543	case BPF_PROG_TYPE_CGROUP_DEVICE:
3544	case BPF_PROG_TYPE_CGROUP_SKB:
3545	case BPF_PROG_TYPE_CGROUP_SOCK:
3546	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3547	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3548	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3549	case BPF_PROG_TYPE_SOCK_OPS:
3550	case BPF_PROG_TYPE_LSM:
3551		return cgroup_bpf_prog_detach(attr, ptype);
3552	default:
3553		return -EINVAL;
3554	}
 
 
3555}
3556
3557#define BPF_PROG_QUERY_LAST_FIELD query.prog_attach_flags
3558
3559static int bpf_prog_query(const union bpf_attr *attr,
3560			  union bpf_attr __user *uattr)
3561{
3562	if (!capable(CAP_NET_ADMIN))
3563		return -EPERM;
3564	if (CHECK_ATTR(BPF_PROG_QUERY))
3565		return -EINVAL;
3566	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3567		return -EINVAL;
3568
3569	switch (attr->query.attach_type) {
3570	case BPF_CGROUP_INET_INGRESS:
3571	case BPF_CGROUP_INET_EGRESS:
3572	case BPF_CGROUP_INET_SOCK_CREATE:
3573	case BPF_CGROUP_INET_SOCK_RELEASE:
3574	case BPF_CGROUP_INET4_BIND:
3575	case BPF_CGROUP_INET6_BIND:
3576	case BPF_CGROUP_INET4_POST_BIND:
3577	case BPF_CGROUP_INET6_POST_BIND:
3578	case BPF_CGROUP_INET4_CONNECT:
3579	case BPF_CGROUP_INET6_CONNECT:
3580	case BPF_CGROUP_INET4_GETPEERNAME:
3581	case BPF_CGROUP_INET6_GETPEERNAME:
3582	case BPF_CGROUP_INET4_GETSOCKNAME:
3583	case BPF_CGROUP_INET6_GETSOCKNAME:
3584	case BPF_CGROUP_UDP4_SENDMSG:
3585	case BPF_CGROUP_UDP6_SENDMSG:
3586	case BPF_CGROUP_UDP4_RECVMSG:
3587	case BPF_CGROUP_UDP6_RECVMSG:
3588	case BPF_CGROUP_SOCK_OPS:
3589	case BPF_CGROUP_DEVICE:
3590	case BPF_CGROUP_SYSCTL:
3591	case BPF_CGROUP_GETSOCKOPT:
3592	case BPF_CGROUP_SETSOCKOPT:
3593	case BPF_LSM_CGROUP:
3594		return cgroup_bpf_prog_query(attr, uattr);
3595	case BPF_LIRC_MODE2:
3596		return lirc_prog_query(attr, uattr);
3597	case BPF_FLOW_DISSECTOR:
3598	case BPF_SK_LOOKUP:
3599		return netns_bpf_prog_query(attr, uattr);
3600	case BPF_SK_SKB_STREAM_PARSER:
3601	case BPF_SK_SKB_STREAM_VERDICT:
3602	case BPF_SK_MSG_VERDICT:
3603	case BPF_SK_SKB_VERDICT:
3604		return sock_map_bpf_prog_query(attr, uattr);
3605	default:
3606		return -EINVAL;
3607	}
 
 
3608}
3609
3610#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
3611
3612static int bpf_prog_test_run(const union bpf_attr *attr,
3613			     union bpf_attr __user *uattr)
3614{
3615	struct bpf_prog *prog;
3616	int ret = -ENOTSUPP;
3617
 
 
3618	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3619		return -EINVAL;
3620
3621	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3622	    (!attr->test.ctx_size_in && attr->test.ctx_in))
3623		return -EINVAL;
3624
3625	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3626	    (!attr->test.ctx_size_out && attr->test.ctx_out))
3627		return -EINVAL;
3628
3629	prog = bpf_prog_get(attr->test.prog_fd);
3630	if (IS_ERR(prog))
3631		return PTR_ERR(prog);
3632
3633	if (prog->aux->ops->test_run)
3634		ret = prog->aux->ops->test_run(prog, attr, uattr);
3635
3636	bpf_prog_put(prog);
3637	return ret;
3638}
3639
3640#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3641
3642static int bpf_obj_get_next_id(const union bpf_attr *attr,
3643			       union bpf_attr __user *uattr,
3644			       struct idr *idr,
3645			       spinlock_t *lock)
3646{
3647	u32 next_id = attr->start_id;
3648	int err = 0;
3649
3650	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3651		return -EINVAL;
3652
3653	if (!capable(CAP_SYS_ADMIN))
3654		return -EPERM;
3655
3656	next_id++;
3657	spin_lock_bh(lock);
3658	if (!idr_get_next(idr, &next_id))
3659		err = -ENOENT;
3660	spin_unlock_bh(lock);
3661
3662	if (!err)
3663		err = put_user(next_id, &uattr->next_id);
3664
3665	return err;
3666}
3667
3668struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3669{
3670	struct bpf_map *map;
3671
3672	spin_lock_bh(&map_idr_lock);
3673again:
3674	map = idr_get_next(&map_idr, id);
3675	if (map) {
3676		map = __bpf_map_inc_not_zero(map, false);
3677		if (IS_ERR(map)) {
3678			(*id)++;
3679			goto again;
3680		}
3681	}
3682	spin_unlock_bh(&map_idr_lock);
3683
3684	return map;
3685}
3686
3687struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3688{
3689	struct bpf_prog *prog;
3690
3691	spin_lock_bh(&prog_idr_lock);
3692again:
3693	prog = idr_get_next(&prog_idr, id);
3694	if (prog) {
3695		prog = bpf_prog_inc_not_zero(prog);
3696		if (IS_ERR(prog)) {
3697			(*id)++;
3698			goto again;
3699		}
3700	}
3701	spin_unlock_bh(&prog_idr_lock);
3702
3703	return prog;
3704}
3705
3706#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3707
3708struct bpf_prog *bpf_prog_by_id(u32 id)
3709{
3710	struct bpf_prog *prog;
3711
3712	if (!id)
3713		return ERR_PTR(-ENOENT);
3714
3715	spin_lock_bh(&prog_idr_lock);
3716	prog = idr_find(&prog_idr, id);
3717	if (prog)
3718		prog = bpf_prog_inc_not_zero(prog);
3719	else
3720		prog = ERR_PTR(-ENOENT);
3721	spin_unlock_bh(&prog_idr_lock);
3722	return prog;
3723}
3724
3725static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3726{
3727	struct bpf_prog *prog;
3728	u32 id = attr->prog_id;
3729	int fd;
3730
3731	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3732		return -EINVAL;
3733
3734	if (!capable(CAP_SYS_ADMIN))
3735		return -EPERM;
3736
3737	prog = bpf_prog_by_id(id);
 
 
 
 
 
 
 
3738	if (IS_ERR(prog))
3739		return PTR_ERR(prog);
3740
3741	fd = bpf_prog_new_fd(prog);
3742	if (fd < 0)
3743		bpf_prog_put(prog);
3744
3745	return fd;
3746}
3747
3748#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3749
3750static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3751{
3752	struct bpf_map *map;
3753	u32 id = attr->map_id;
3754	int f_flags;
3755	int fd;
3756
3757	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3758	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3759		return -EINVAL;
3760
3761	if (!capable(CAP_SYS_ADMIN))
3762		return -EPERM;
3763
3764	f_flags = bpf_get_file_flag(attr->open_flags);
3765	if (f_flags < 0)
3766		return f_flags;
3767
3768	spin_lock_bh(&map_idr_lock);
3769	map = idr_find(&map_idr, id);
3770	if (map)
3771		map = __bpf_map_inc_not_zero(map, true);
3772	else
3773		map = ERR_PTR(-ENOENT);
3774	spin_unlock_bh(&map_idr_lock);
3775
3776	if (IS_ERR(map))
3777		return PTR_ERR(map);
3778
3779	fd = bpf_map_new_fd(map, f_flags);
3780	if (fd < 0)
3781		bpf_map_put_with_uref(map);
3782
3783	return fd;
3784}
3785
3786static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3787					      unsigned long addr, u32 *off,
3788					      u32 *type)
3789{
3790	const struct bpf_map *map;
3791	int i;
3792
3793	mutex_lock(&prog->aux->used_maps_mutex);
3794	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3795		map = prog->aux->used_maps[i];
3796		if (map == (void *)addr) {
3797			*type = BPF_PSEUDO_MAP_FD;
3798			goto out;
3799		}
3800		if (!map->ops->map_direct_value_meta)
3801			continue;
3802		if (!map->ops->map_direct_value_meta(map, addr, off)) {
3803			*type = BPF_PSEUDO_MAP_VALUE;
3804			goto out;
3805		}
3806	}
3807	map = NULL;
3808
3809out:
3810	mutex_unlock(&prog->aux->used_maps_mutex);
3811	return map;
3812}
3813
3814static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3815					      const struct cred *f_cred)
3816{
3817	const struct bpf_map *map;
3818	struct bpf_insn *insns;
3819	u32 off, type;
3820	u64 imm;
3821	u8 code;
3822	int i;
3823
3824	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3825			GFP_USER);
3826	if (!insns)
3827		return insns;
3828
3829	for (i = 0; i < prog->len; i++) {
3830		code = insns[i].code;
3831
3832		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3833			insns[i].code = BPF_JMP | BPF_CALL;
3834			insns[i].imm = BPF_FUNC_tail_call;
3835			/* fall-through */
3836		}
3837		if (code == (BPF_JMP | BPF_CALL) ||
3838		    code == (BPF_JMP | BPF_CALL_ARGS)) {
3839			if (code == (BPF_JMP | BPF_CALL_ARGS))
3840				insns[i].code = BPF_JMP | BPF_CALL;
3841			if (!bpf_dump_raw_ok(f_cred))
3842				insns[i].imm = 0;
3843			continue;
3844		}
3845		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3846			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3847			continue;
3848		}
3849
3850		if (code != (BPF_LD | BPF_IMM | BPF_DW))
3851			continue;
3852
3853		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3854		map = bpf_map_from_imm(prog, imm, &off, &type);
3855		if (map) {
3856			insns[i].src_reg = type;
3857			insns[i].imm = map->id;
3858			insns[i + 1].imm = off;
3859			continue;
3860		}
3861	}
3862
3863	return insns;
3864}
3865
3866static int set_info_rec_size(struct bpf_prog_info *info)
3867{
3868	/*
3869	 * Ensure info.*_rec_size is the same as kernel expected size
3870	 *
3871	 * or
3872	 *
3873	 * Only allow zero *_rec_size if both _rec_size and _cnt are
3874	 * zero.  In this case, the kernel will set the expected
3875	 * _rec_size back to the info.
3876	 */
3877
3878	if ((info->nr_func_info || info->func_info_rec_size) &&
3879	    info->func_info_rec_size != sizeof(struct bpf_func_info))
3880		return -EINVAL;
3881
3882	if ((info->nr_line_info || info->line_info_rec_size) &&
3883	    info->line_info_rec_size != sizeof(struct bpf_line_info))
3884		return -EINVAL;
3885
3886	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3887	    info->jited_line_info_rec_size != sizeof(__u64))
3888		return -EINVAL;
3889
3890	info->func_info_rec_size = sizeof(struct bpf_func_info);
3891	info->line_info_rec_size = sizeof(struct bpf_line_info);
3892	info->jited_line_info_rec_size = sizeof(__u64);
3893
3894	return 0;
3895}
3896
3897static int bpf_prog_get_info_by_fd(struct file *file,
3898				   struct bpf_prog *prog,
3899				   const union bpf_attr *attr,
3900				   union bpf_attr __user *uattr)
3901{
3902	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3903	struct btf *attach_btf = bpf_prog_get_target_btf(prog);
3904	struct bpf_prog_info info;
3905	u32 info_len = attr->info.info_len;
3906	struct bpf_prog_kstats stats;
3907	char __user *uinsns;
3908	u32 ulen;
3909	int err;
3910
3911	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3912	if (err)
3913		return err;
3914	info_len = min_t(u32, sizeof(info), info_len);
3915
3916	memset(&info, 0, sizeof(info));
3917	if (copy_from_user(&info, uinfo, info_len))
3918		return -EFAULT;
3919
3920	info.type = prog->type;
3921	info.id = prog->aux->id;
3922	info.load_time = prog->aux->load_time;
3923	info.created_by_uid = from_kuid_munged(current_user_ns(),
3924					       prog->aux->user->uid);
3925	info.gpl_compatible = prog->gpl_compatible;
3926
3927	memcpy(info.tag, prog->tag, sizeof(prog->tag));
3928	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3929
3930	mutex_lock(&prog->aux->used_maps_mutex);
3931	ulen = info.nr_map_ids;
3932	info.nr_map_ids = prog->aux->used_map_cnt;
3933	ulen = min_t(u32, info.nr_map_ids, ulen);
3934	if (ulen) {
3935		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3936		u32 i;
3937
3938		for (i = 0; i < ulen; i++)
3939			if (put_user(prog->aux->used_maps[i]->id,
3940				     &user_map_ids[i])) {
3941				mutex_unlock(&prog->aux->used_maps_mutex);
3942				return -EFAULT;
3943			}
3944	}
3945	mutex_unlock(&prog->aux->used_maps_mutex);
3946
3947	err = set_info_rec_size(&info);
3948	if (err)
3949		return err;
3950
3951	bpf_prog_get_stats(prog, &stats);
3952	info.run_time_ns = stats.nsecs;
3953	info.run_cnt = stats.cnt;
3954	info.recursion_misses = stats.misses;
3955
3956	info.verified_insns = prog->aux->verified_insns;
3957
3958	if (!bpf_capable()) {
3959		info.jited_prog_len = 0;
3960		info.xlated_prog_len = 0;
3961		info.nr_jited_ksyms = 0;
3962		info.nr_jited_func_lens = 0;
3963		info.nr_func_info = 0;
3964		info.nr_line_info = 0;
3965		info.nr_jited_line_info = 0;
3966		goto done;
3967	}
3968
3969	ulen = info.xlated_prog_len;
3970	info.xlated_prog_len = bpf_prog_insn_size(prog);
3971	if (info.xlated_prog_len && ulen) {
3972		struct bpf_insn *insns_sanitized;
3973		bool fault;
3974
3975		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3976			info.xlated_prog_insns = 0;
3977			goto done;
3978		}
3979		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3980		if (!insns_sanitized)
3981			return -ENOMEM;
3982		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3983		ulen = min_t(u32, info.xlated_prog_len, ulen);
3984		fault = copy_to_user(uinsns, insns_sanitized, ulen);
3985		kfree(insns_sanitized);
3986		if (fault)
3987			return -EFAULT;
3988	}
3989
3990	if (bpf_prog_is_dev_bound(prog->aux)) {
3991		err = bpf_prog_offload_info_fill(&info, prog);
3992		if (err)
3993			return err;
3994		goto done;
3995	}
3996
3997	/* NOTE: the following code is supposed to be skipped for offload.
3998	 * bpf_prog_offload_info_fill() is the place to fill similar fields
3999	 * for offload.
4000	 */
4001	ulen = info.jited_prog_len;
4002	if (prog->aux->func_cnt) {
4003		u32 i;
4004
4005		info.jited_prog_len = 0;
4006		for (i = 0; i < prog->aux->func_cnt; i++)
4007			info.jited_prog_len += prog->aux->func[i]->jited_len;
4008	} else {
4009		info.jited_prog_len = prog->jited_len;
4010	}
4011
4012	if (info.jited_prog_len && ulen) {
4013		if (bpf_dump_raw_ok(file->f_cred)) {
4014			uinsns = u64_to_user_ptr(info.jited_prog_insns);
4015			ulen = min_t(u32, info.jited_prog_len, ulen);
4016
4017			/* for multi-function programs, copy the JITed
4018			 * instructions for all the functions
4019			 */
4020			if (prog->aux->func_cnt) {
4021				u32 len, free, i;
4022				u8 *img;
4023
4024				free = ulen;
4025				for (i = 0; i < prog->aux->func_cnt; i++) {
4026					len = prog->aux->func[i]->jited_len;
4027					len = min_t(u32, len, free);
4028					img = (u8 *) prog->aux->func[i]->bpf_func;
4029					if (copy_to_user(uinsns, img, len))
4030						return -EFAULT;
4031					uinsns += len;
4032					free -= len;
4033					if (!free)
4034						break;
4035				}
4036			} else {
4037				if (copy_to_user(uinsns, prog->bpf_func, ulen))
4038					return -EFAULT;
4039			}
4040		} else {
4041			info.jited_prog_insns = 0;
4042		}
4043	}
4044
4045	ulen = info.nr_jited_ksyms;
4046	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4047	if (ulen) {
4048		if (bpf_dump_raw_ok(file->f_cred)) {
4049			unsigned long ksym_addr;
4050			u64 __user *user_ksyms;
4051			u32 i;
4052
4053			/* copy the address of the kernel symbol
4054			 * corresponding to each function
4055			 */
4056			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4057			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4058			if (prog->aux->func_cnt) {
4059				for (i = 0; i < ulen; i++) {
4060					ksym_addr = (unsigned long)
4061						prog->aux->func[i]->bpf_func;
4062					if (put_user((u64) ksym_addr,
4063						     &user_ksyms[i]))
4064						return -EFAULT;
4065				}
4066			} else {
4067				ksym_addr = (unsigned long) prog->bpf_func;
4068				if (put_user((u64) ksym_addr, &user_ksyms[0]))
4069					return -EFAULT;
4070			}
4071		} else {
4072			info.jited_ksyms = 0;
4073		}
4074	}
4075
4076	ulen = info.nr_jited_func_lens;
4077	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4078	if (ulen) {
4079		if (bpf_dump_raw_ok(file->f_cred)) {
4080			u32 __user *user_lens;
4081			u32 func_len, i;
4082
4083			/* copy the JITed image lengths for each function */
4084			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4085			user_lens = u64_to_user_ptr(info.jited_func_lens);
4086			if (prog->aux->func_cnt) {
4087				for (i = 0; i < ulen; i++) {
4088					func_len =
4089						prog->aux->func[i]->jited_len;
4090					if (put_user(func_len, &user_lens[i]))
4091						return -EFAULT;
4092				}
4093			} else {
4094				func_len = prog->jited_len;
4095				if (put_user(func_len, &user_lens[0]))
4096					return -EFAULT;
4097			}
4098		} else {
4099			info.jited_func_lens = 0;
4100		}
4101	}
4102
4103	if (prog->aux->btf)
4104		info.btf_id = btf_obj_id(prog->aux->btf);
4105	info.attach_btf_id = prog->aux->attach_btf_id;
4106	if (attach_btf)
4107		info.attach_btf_obj_id = btf_obj_id(attach_btf);
4108
4109	ulen = info.nr_func_info;
4110	info.nr_func_info = prog->aux->func_info_cnt;
4111	if (info.nr_func_info && ulen) {
4112		char __user *user_finfo;
4113
4114		user_finfo = u64_to_user_ptr(info.func_info);
4115		ulen = min_t(u32, info.nr_func_info, ulen);
4116		if (copy_to_user(user_finfo, prog->aux->func_info,
4117				 info.func_info_rec_size * ulen))
4118			return -EFAULT;
4119	}
4120
4121	ulen = info.nr_line_info;
4122	info.nr_line_info = prog->aux->nr_linfo;
4123	if (info.nr_line_info && ulen) {
4124		__u8 __user *user_linfo;
4125
4126		user_linfo = u64_to_user_ptr(info.line_info);
4127		ulen = min_t(u32, info.nr_line_info, ulen);
4128		if (copy_to_user(user_linfo, prog->aux->linfo,
4129				 info.line_info_rec_size * ulen))
4130			return -EFAULT;
4131	}
4132
4133	ulen = info.nr_jited_line_info;
4134	if (prog->aux->jited_linfo)
4135		info.nr_jited_line_info = prog->aux->nr_linfo;
4136	else
4137		info.nr_jited_line_info = 0;
4138	if (info.nr_jited_line_info && ulen) {
4139		if (bpf_dump_raw_ok(file->f_cred)) {
4140			unsigned long line_addr;
4141			__u64 __user *user_linfo;
4142			u32 i;
4143
4144			user_linfo = u64_to_user_ptr(info.jited_line_info);
4145			ulen = min_t(u32, info.nr_jited_line_info, ulen);
4146			for (i = 0; i < ulen; i++) {
4147				line_addr = (unsigned long)prog->aux->jited_linfo[i];
4148				if (put_user((__u64)line_addr, &user_linfo[i]))
4149					return -EFAULT;
4150			}
4151		} else {
4152			info.jited_line_info = 0;
4153		}
4154	}
4155
4156	ulen = info.nr_prog_tags;
4157	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4158	if (ulen) {
4159		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4160		u32 i;
4161
4162		user_prog_tags = u64_to_user_ptr(info.prog_tags);
4163		ulen = min_t(u32, info.nr_prog_tags, ulen);
4164		if (prog->aux->func_cnt) {
4165			for (i = 0; i < ulen; i++) {
4166				if (copy_to_user(user_prog_tags[i],
4167						 prog->aux->func[i]->tag,
4168						 BPF_TAG_SIZE))
4169					return -EFAULT;
4170			}
4171		} else {
4172			if (copy_to_user(user_prog_tags[0],
4173					 prog->tag, BPF_TAG_SIZE))
4174				return -EFAULT;
4175		}
4176	}
4177
4178done:
4179	if (copy_to_user(uinfo, &info, info_len) ||
4180	    put_user(info_len, &uattr->info.info_len))
4181		return -EFAULT;
4182
4183	return 0;
4184}
4185
4186static int bpf_map_get_info_by_fd(struct file *file,
4187				  struct bpf_map *map,
4188				  const union bpf_attr *attr,
4189				  union bpf_attr __user *uattr)
4190{
4191	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4192	struct bpf_map_info info;
4193	u32 info_len = attr->info.info_len;
4194	int err;
4195
4196	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4197	if (err)
4198		return err;
4199	info_len = min_t(u32, sizeof(info), info_len);
4200
4201	memset(&info, 0, sizeof(info));
4202	info.type = map->map_type;
4203	info.id = map->id;
4204	info.key_size = map->key_size;
4205	info.value_size = map->value_size;
4206	info.max_entries = map->max_entries;
4207	info.map_flags = map->map_flags;
4208	info.map_extra = map->map_extra;
4209	memcpy(info.name, map->name, sizeof(map->name));
4210
4211	if (map->btf) {
4212		info.btf_id = btf_obj_id(map->btf);
4213		info.btf_key_type_id = map->btf_key_type_id;
4214		info.btf_value_type_id = map->btf_value_type_id;
4215	}
4216	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4217
4218	if (bpf_map_is_dev_bound(map)) {
4219		err = bpf_map_offload_info_fill(&info, map);
4220		if (err)
4221			return err;
4222	}
4223
4224	if (copy_to_user(uinfo, &info, info_len) ||
4225	    put_user(info_len, &uattr->info.info_len))
4226		return -EFAULT;
4227
4228	return 0;
4229}
4230
4231static int bpf_btf_get_info_by_fd(struct file *file,
4232				  struct btf *btf,
4233				  const union bpf_attr *attr,
4234				  union bpf_attr __user *uattr)
4235{
4236	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4237	u32 info_len = attr->info.info_len;
4238	int err;
4239
4240	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4241	if (err)
4242		return err;
4243
4244	return btf_get_info_by_fd(btf, attr, uattr);
4245}
4246
4247static int bpf_link_get_info_by_fd(struct file *file,
4248				  struct bpf_link *link,
4249				  const union bpf_attr *attr,
4250				  union bpf_attr __user *uattr)
4251{
4252	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4253	struct bpf_link_info info;
4254	u32 info_len = attr->info.info_len;
4255	int err;
4256
4257	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4258	if (err)
4259		return err;
4260	info_len = min_t(u32, sizeof(info), info_len);
4261
4262	memset(&info, 0, sizeof(info));
4263	if (copy_from_user(&info, uinfo, info_len))
4264		return -EFAULT;
4265
4266	info.type = link->type;
4267	info.id = link->id;
4268	info.prog_id = link->prog->aux->id;
4269
4270	if (link->ops->fill_link_info) {
4271		err = link->ops->fill_link_info(link, &info);
4272		if (err)
4273			return err;
4274	}
4275
4276	if (copy_to_user(uinfo, &info, info_len) ||
4277	    put_user(info_len, &uattr->info.info_len))
4278		return -EFAULT;
4279
4280	return 0;
4281}
4282
4283
4284#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4285
4286static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4287				  union bpf_attr __user *uattr)
4288{
4289	int ufd = attr->info.bpf_fd;
4290	struct fd f;
4291	int err;
4292
4293	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4294		return -EINVAL;
4295
4296	f = fdget(ufd);
4297	if (!f.file)
4298		return -EBADFD;
4299
4300	if (f.file->f_op == &bpf_prog_fops)
4301		err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4302					      uattr);
4303	else if (f.file->f_op == &bpf_map_fops)
4304		err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4305					     uattr);
4306	else if (f.file->f_op == &btf_fops)
4307		err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4308	else if (f.file->f_op == &bpf_link_fops)
4309		err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4310					      attr, uattr);
4311	else
4312		err = -EINVAL;
4313
4314	fdput(f);
4315	return err;
4316}
4317
4318#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
4319
4320static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
4321{
4322	if (CHECK_ATTR(BPF_BTF_LOAD))
4323		return -EINVAL;
4324
4325	if (!bpf_capable())
4326		return -EPERM;
4327
4328	return btf_new_fd(attr, uattr);
4329}
4330
4331#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
4332
4333static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
4334{
4335	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4336		return -EINVAL;
4337
4338	if (!capable(CAP_SYS_ADMIN))
4339		return -EPERM;
4340
4341	return btf_get_fd_by_id(attr->btf_id);
4342}
4343
4344static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4345				    union bpf_attr __user *uattr,
4346				    u32 prog_id, u32 fd_type,
4347				    const char *buf, u64 probe_offset,
4348				    u64 probe_addr)
4349{
4350	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
4351	u32 len = buf ? strlen(buf) : 0, input_len;
4352	int err = 0;
4353
4354	if (put_user(len, &uattr->task_fd_query.buf_len))
4355		return -EFAULT;
4356	input_len = attr->task_fd_query.buf_len;
4357	if (input_len && ubuf) {
4358		if (!len) {
4359			/* nothing to copy, just make ubuf NULL terminated */
4360			char zero = '\0';
4361
4362			if (put_user(zero, ubuf))
4363				return -EFAULT;
4364		} else if (input_len >= len + 1) {
4365			/* ubuf can hold the string with NULL terminator */
4366			if (copy_to_user(ubuf, buf, len + 1))
4367				return -EFAULT;
4368		} else {
4369			/* ubuf cannot hold the string with NULL terminator,
4370			 * do a partial copy with NULL terminator.
4371			 */
4372			char zero = '\0';
4373
4374			err = -ENOSPC;
4375			if (copy_to_user(ubuf, buf, input_len - 1))
4376				return -EFAULT;
4377			if (put_user(zero, ubuf + input_len - 1))
4378				return -EFAULT;
4379		}
4380	}
4381
4382	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
4383	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
4384	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
4385	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
4386		return -EFAULT;
4387
4388	return err;
4389}
4390
4391#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
4392
4393static int bpf_task_fd_query(const union bpf_attr *attr,
4394			     union bpf_attr __user *uattr)
4395{
4396	pid_t pid = attr->task_fd_query.pid;
4397	u32 fd = attr->task_fd_query.fd;
4398	const struct perf_event *event;
 
4399	struct task_struct *task;
4400	struct file *file;
4401	int err;
4402
4403	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
4404		return -EINVAL;
4405
4406	if (!capable(CAP_SYS_ADMIN))
4407		return -EPERM;
4408
4409	if (attr->task_fd_query.flags != 0)
4410		return -EINVAL;
4411
4412	rcu_read_lock();
4413	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
4414	rcu_read_unlock();
4415	if (!task)
4416		return -ENOENT;
4417
4418	err = 0;
4419	file = fget_task(task, fd);
4420	put_task_struct(task);
 
 
 
 
 
 
4421	if (!file)
4422		return -EBADF;
 
 
 
 
4423
4424	if (file->f_op == &bpf_link_fops) {
4425		struct bpf_link *link = file->private_data;
4426
4427		if (link->ops == &bpf_raw_tp_link_lops) {
4428			struct bpf_raw_tp_link *raw_tp =
4429				container_of(link, struct bpf_raw_tp_link, link);
4430			struct bpf_raw_event_map *btp = raw_tp->btp;
4431
4432			err = bpf_task_fd_query_copy(attr, uattr,
4433						     raw_tp->link.prog->aux->id,
4434						     BPF_FD_TYPE_RAW_TRACEPOINT,
4435						     btp->tp->name, 0, 0);
4436			goto put_file;
4437		}
4438		goto out_not_supp;
4439	}
4440
4441	event = perf_get_event(file);
4442	if (!IS_ERR(event)) {
4443		u64 probe_offset, probe_addr;
4444		u32 prog_id, fd_type;
4445		const char *buf;
4446
4447		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4448					      &buf, &probe_offset,
4449					      &probe_addr);
4450		if (!err)
4451			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4452						     fd_type, buf,
4453						     probe_offset,
4454						     probe_addr);
4455		goto put_file;
4456	}
4457
4458out_not_supp:
4459	err = -ENOTSUPP;
4460put_file:
4461	fput(file);
4462	return err;
4463}
4464
4465#define BPF_MAP_BATCH_LAST_FIELD batch.flags
4466
4467#define BPF_DO_BATCH(fn, ...)			\
4468	do {					\
4469		if (!fn) {			\
4470			err = -ENOTSUPP;	\
4471			goto err_put;		\
4472		}				\
4473		err = fn(__VA_ARGS__);		\
4474	} while (0)
4475
4476static int bpf_map_do_batch(const union bpf_attr *attr,
4477			    union bpf_attr __user *uattr,
4478			    int cmd)
4479{
4480	bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
4481			 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
4482	bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
4483	struct bpf_map *map;
4484	int err, ufd;
4485	struct fd f;
4486
4487	if (CHECK_ATTR(BPF_MAP_BATCH))
4488		return -EINVAL;
4489
4490	ufd = attr->batch.map_fd;
4491	f = fdget(ufd);
4492	map = __bpf_map_get(f);
4493	if (IS_ERR(map))
4494		return PTR_ERR(map);
4495	if (has_write)
4496		bpf_map_write_active_inc(map);
4497	if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4498		err = -EPERM;
4499		goto err_put;
4500	}
4501	if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4502		err = -EPERM;
4503		goto err_put;
4504	}
4505
4506	if (cmd == BPF_MAP_LOOKUP_BATCH)
4507		BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
4508	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4509		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
4510	else if (cmd == BPF_MAP_UPDATE_BATCH)
4511		BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
4512	else
4513		BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
4514err_put:
4515	if (has_write)
4516		bpf_map_write_active_dec(map);
4517	fdput(f);
4518	return err;
4519}
4520
4521#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies
4522static int link_create(union bpf_attr *attr, bpfptr_t uattr)
4523{
4524	enum bpf_prog_type ptype;
4525	struct bpf_prog *prog;
4526	int ret;
4527
4528	if (CHECK_ATTR(BPF_LINK_CREATE))
4529		return -EINVAL;
4530
4531	prog = bpf_prog_get(attr->link_create.prog_fd);
4532	if (IS_ERR(prog))
4533		return PTR_ERR(prog);
4534
4535	ret = bpf_prog_attach_check_attach_type(prog,
4536						attr->link_create.attach_type);
4537	if (ret)
4538		goto out;
4539
4540	switch (prog->type) {
4541	case BPF_PROG_TYPE_EXT:
4542		break;
4543	case BPF_PROG_TYPE_PERF_EVENT:
4544	case BPF_PROG_TYPE_TRACEPOINT:
4545		if (attr->link_create.attach_type != BPF_PERF_EVENT) {
4546			ret = -EINVAL;
4547			goto out;
4548		}
4549		break;
4550	case BPF_PROG_TYPE_KPROBE:
4551		if (attr->link_create.attach_type != BPF_PERF_EVENT &&
4552		    attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) {
4553			ret = -EINVAL;
4554			goto out;
4555		}
4556		break;
4557	default:
4558		ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4559		if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4560			ret = -EINVAL;
4561			goto out;
4562		}
4563		break;
4564	}
4565
4566	switch (prog->type) {
4567	case BPF_PROG_TYPE_CGROUP_SKB:
4568	case BPF_PROG_TYPE_CGROUP_SOCK:
4569	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4570	case BPF_PROG_TYPE_SOCK_OPS:
4571	case BPF_PROG_TYPE_CGROUP_DEVICE:
4572	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4573	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4574		ret = cgroup_bpf_link_attach(attr, prog);
4575		break;
4576	case BPF_PROG_TYPE_EXT:
4577		ret = bpf_tracing_prog_attach(prog,
4578					      attr->link_create.target_fd,
4579					      attr->link_create.target_btf_id,
4580					      attr->link_create.tracing.cookie);
4581		break;
4582	case BPF_PROG_TYPE_LSM:
4583	case BPF_PROG_TYPE_TRACING:
4584		if (attr->link_create.attach_type != prog->expected_attach_type) {
4585			ret = -EINVAL;
4586			goto out;
4587		}
4588		if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
4589			ret = bpf_raw_tp_link_attach(prog, NULL);
4590		else if (prog->expected_attach_type == BPF_TRACE_ITER)
4591			ret = bpf_iter_link_attach(attr, uattr, prog);
4592		else if (prog->expected_attach_type == BPF_LSM_CGROUP)
4593			ret = cgroup_bpf_link_attach(attr, prog);
4594		else
4595			ret = bpf_tracing_prog_attach(prog,
4596						      attr->link_create.target_fd,
4597						      attr->link_create.target_btf_id,
4598						      attr->link_create.tracing.cookie);
4599		break;
4600	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4601	case BPF_PROG_TYPE_SK_LOOKUP:
4602		ret = netns_bpf_link_create(attr, prog);
4603		break;
4604#ifdef CONFIG_NET
4605	case BPF_PROG_TYPE_XDP:
4606		ret = bpf_xdp_link_attach(attr, prog);
4607		break;
4608#endif
4609	case BPF_PROG_TYPE_PERF_EVENT:
4610	case BPF_PROG_TYPE_TRACEPOINT:
4611		ret = bpf_perf_link_attach(attr, prog);
4612		break;
4613	case BPF_PROG_TYPE_KPROBE:
4614		if (attr->link_create.attach_type == BPF_PERF_EVENT)
4615			ret = bpf_perf_link_attach(attr, prog);
4616		else
4617			ret = bpf_kprobe_multi_link_attach(attr, prog);
4618		break;
4619	default:
4620		ret = -EINVAL;
4621	}
4622
4623out:
4624	if (ret < 0)
4625		bpf_prog_put(prog);
4626	return ret;
4627}
4628
4629#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4630
4631static int link_update(union bpf_attr *attr)
4632{
4633	struct bpf_prog *old_prog = NULL, *new_prog;
4634	struct bpf_link *link;
4635	u32 flags;
4636	int ret;
4637
4638	if (CHECK_ATTR(BPF_LINK_UPDATE))
4639		return -EINVAL;
4640
4641	flags = attr->link_update.flags;
4642	if (flags & ~BPF_F_REPLACE)
4643		return -EINVAL;
4644
4645	link = bpf_link_get_from_fd(attr->link_update.link_fd);
4646	if (IS_ERR(link))
4647		return PTR_ERR(link);
4648
4649	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4650	if (IS_ERR(new_prog)) {
4651		ret = PTR_ERR(new_prog);
4652		goto out_put_link;
4653	}
4654
4655	if (flags & BPF_F_REPLACE) {
4656		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4657		if (IS_ERR(old_prog)) {
4658			ret = PTR_ERR(old_prog);
4659			old_prog = NULL;
4660			goto out_put_progs;
4661		}
4662	} else if (attr->link_update.old_prog_fd) {
4663		ret = -EINVAL;
4664		goto out_put_progs;
4665	}
4666
4667	if (link->ops->update_prog)
4668		ret = link->ops->update_prog(link, new_prog, old_prog);
4669	else
4670		ret = -EINVAL;
4671
4672out_put_progs:
4673	if (old_prog)
4674		bpf_prog_put(old_prog);
4675	if (ret)
4676		bpf_prog_put(new_prog);
4677out_put_link:
4678	bpf_link_put(link);
4679	return ret;
4680}
4681
4682#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4683
4684static int link_detach(union bpf_attr *attr)
4685{
4686	struct bpf_link *link;
4687	int ret;
4688
4689	if (CHECK_ATTR(BPF_LINK_DETACH))
4690		return -EINVAL;
4691
4692	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4693	if (IS_ERR(link))
4694		return PTR_ERR(link);
4695
4696	if (link->ops->detach)
4697		ret = link->ops->detach(link);
4698	else
4699		ret = -EOPNOTSUPP;
4700
4701	bpf_link_put(link);
4702	return ret;
4703}
4704
4705static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4706{
4707	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4708}
4709
4710struct bpf_link *bpf_link_by_id(u32 id)
4711{
4712	struct bpf_link *link;
4713
4714	if (!id)
4715		return ERR_PTR(-ENOENT);
4716
4717	spin_lock_bh(&link_idr_lock);
4718	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
4719	link = idr_find(&link_idr, id);
4720	if (link) {
4721		if (link->id)
4722			link = bpf_link_inc_not_zero(link);
4723		else
4724			link = ERR_PTR(-EAGAIN);
4725	} else {
4726		link = ERR_PTR(-ENOENT);
4727	}
4728	spin_unlock_bh(&link_idr_lock);
4729	return link;
4730}
4731
4732struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
4733{
4734	struct bpf_link *link;
4735
4736	spin_lock_bh(&link_idr_lock);
4737again:
4738	link = idr_get_next(&link_idr, id);
4739	if (link) {
4740		link = bpf_link_inc_not_zero(link);
4741		if (IS_ERR(link)) {
4742			(*id)++;
4743			goto again;
4744		}
4745	}
4746	spin_unlock_bh(&link_idr_lock);
4747
4748	return link;
4749}
4750
4751#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4752
4753static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4754{
4755	struct bpf_link *link;
4756	u32 id = attr->link_id;
4757	int fd;
4758
4759	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4760		return -EINVAL;
4761
4762	if (!capable(CAP_SYS_ADMIN))
4763		return -EPERM;
4764
4765	link = bpf_link_by_id(id);
4766	if (IS_ERR(link))
4767		return PTR_ERR(link);
4768
4769	fd = bpf_link_new_fd(link);
4770	if (fd < 0)
4771		bpf_link_put(link);
4772
4773	return fd;
4774}
4775
4776DEFINE_MUTEX(bpf_stats_enabled_mutex);
4777
4778static int bpf_stats_release(struct inode *inode, struct file *file)
4779{
4780	mutex_lock(&bpf_stats_enabled_mutex);
4781	static_key_slow_dec(&bpf_stats_enabled_key.key);
4782	mutex_unlock(&bpf_stats_enabled_mutex);
4783	return 0;
4784}
4785
4786static const struct file_operations bpf_stats_fops = {
4787	.release = bpf_stats_release,
4788};
4789
4790static int bpf_enable_runtime_stats(void)
4791{
4792	int fd;
4793
4794	mutex_lock(&bpf_stats_enabled_mutex);
4795
4796	/* Set a very high limit to avoid overflow */
4797	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4798		mutex_unlock(&bpf_stats_enabled_mutex);
4799		return -EBUSY;
4800	}
4801
4802	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4803	if (fd >= 0)
4804		static_key_slow_inc(&bpf_stats_enabled_key.key);
4805
4806	mutex_unlock(&bpf_stats_enabled_mutex);
4807	return fd;
4808}
4809
4810#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4811
4812static int bpf_enable_stats(union bpf_attr *attr)
4813{
4814
4815	if (CHECK_ATTR(BPF_ENABLE_STATS))
4816		return -EINVAL;
4817
4818	if (!capable(CAP_SYS_ADMIN))
4819		return -EPERM;
4820
4821	switch (attr->enable_stats.type) {
4822	case BPF_STATS_RUN_TIME:
4823		return bpf_enable_runtime_stats();
4824	default:
4825		break;
4826	}
4827	return -EINVAL;
4828}
4829
4830#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4831
4832static int bpf_iter_create(union bpf_attr *attr)
4833{
4834	struct bpf_link *link;
4835	int err;
4836
4837	if (CHECK_ATTR(BPF_ITER_CREATE))
4838		return -EINVAL;
4839
4840	if (attr->iter_create.flags)
4841		return -EINVAL;
4842
4843	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4844	if (IS_ERR(link))
4845		return PTR_ERR(link);
4846
4847	err = bpf_iter_new_fd(link);
4848	bpf_link_put(link);
4849
4850	return err;
4851}
4852
4853#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4854
4855static int bpf_prog_bind_map(union bpf_attr *attr)
4856{
4857	struct bpf_prog *prog;
4858	struct bpf_map *map;
4859	struct bpf_map **used_maps_old, **used_maps_new;
4860	int i, ret = 0;
4861
4862	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4863		return -EINVAL;
4864
4865	if (attr->prog_bind_map.flags)
4866		return -EINVAL;
4867
4868	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4869	if (IS_ERR(prog))
4870		return PTR_ERR(prog);
4871
4872	map = bpf_map_get(attr->prog_bind_map.map_fd);
4873	if (IS_ERR(map)) {
4874		ret = PTR_ERR(map);
4875		goto out_prog_put;
4876	}
4877
4878	mutex_lock(&prog->aux->used_maps_mutex);
4879
4880	used_maps_old = prog->aux->used_maps;
4881
4882	for (i = 0; i < prog->aux->used_map_cnt; i++)
4883		if (used_maps_old[i] == map) {
4884			bpf_map_put(map);
4885			goto out_unlock;
4886		}
4887
4888	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4889				      sizeof(used_maps_new[0]),
4890				      GFP_KERNEL);
4891	if (!used_maps_new) {
4892		ret = -ENOMEM;
4893		goto out_unlock;
4894	}
4895
4896	memcpy(used_maps_new, used_maps_old,
4897	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4898	used_maps_new[prog->aux->used_map_cnt] = map;
4899
4900	prog->aux->used_map_cnt++;
4901	prog->aux->used_maps = used_maps_new;
4902
4903	kfree(used_maps_old);
4904
4905out_unlock:
4906	mutex_unlock(&prog->aux->used_maps_mutex);
4907
4908	if (ret)
4909		bpf_map_put(map);
4910out_prog_put:
4911	bpf_prog_put(prog);
4912	return ret;
4913}
4914
4915static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
4916{
4917	union bpf_attr attr;
4918	bool capable;
4919	int err;
4920
4921	capable = bpf_capable() || !sysctl_unprivileged_bpf_disabled;
4922
4923	/* Intent here is for unprivileged_bpf_disabled to block key object
4924	 * creation commands for unprivileged users; other actions depend
4925	 * of fd availability and access to bpffs, so are dependent on
4926	 * object creation success.  Capabilities are later verified for
4927	 * operations such as load and map create, so even with unprivileged
4928	 * BPF disabled, capability checks are still carried out for these
4929	 * and other operations.
4930	 */
4931	if (!capable &&
4932	    (cmd == BPF_MAP_CREATE || cmd == BPF_PROG_LOAD))
4933		return -EPERM;
4934
4935	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4936	if (err)
4937		return err;
4938	size = min_t(u32, size, sizeof(attr));
4939
4940	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
4941	memset(&attr, 0, sizeof(attr));
4942	if (copy_from_bpfptr(&attr, uattr, size) != 0)
4943		return -EFAULT;
4944
4945	err = security_bpf(cmd, &attr, size);
4946	if (err < 0)
4947		return err;
4948
4949	switch (cmd) {
4950	case BPF_MAP_CREATE:
4951		err = map_create(&attr);
4952		break;
4953	case BPF_MAP_LOOKUP_ELEM:
4954		err = map_lookup_elem(&attr);
4955		break;
4956	case BPF_MAP_UPDATE_ELEM:
4957		err = map_update_elem(&attr, uattr);
4958		break;
4959	case BPF_MAP_DELETE_ELEM:
4960		err = map_delete_elem(&attr, uattr);
4961		break;
4962	case BPF_MAP_GET_NEXT_KEY:
4963		err = map_get_next_key(&attr);
4964		break;
4965	case BPF_MAP_FREEZE:
4966		err = map_freeze(&attr);
4967		break;
4968	case BPF_PROG_LOAD:
4969		err = bpf_prog_load(&attr, uattr);
4970		break;
4971	case BPF_OBJ_PIN:
4972		err = bpf_obj_pin(&attr);
4973		break;
4974	case BPF_OBJ_GET:
4975		err = bpf_obj_get(&attr);
4976		break;
4977	case BPF_PROG_ATTACH:
4978		err = bpf_prog_attach(&attr);
4979		break;
4980	case BPF_PROG_DETACH:
4981		err = bpf_prog_detach(&attr);
4982		break;
4983	case BPF_PROG_QUERY:
4984		err = bpf_prog_query(&attr, uattr.user);
4985		break;
4986	case BPF_PROG_TEST_RUN:
4987		err = bpf_prog_test_run(&attr, uattr.user);
4988		break;
4989	case BPF_PROG_GET_NEXT_ID:
4990		err = bpf_obj_get_next_id(&attr, uattr.user,
4991					  &prog_idr, &prog_idr_lock);
4992		break;
4993	case BPF_MAP_GET_NEXT_ID:
4994		err = bpf_obj_get_next_id(&attr, uattr.user,
4995					  &map_idr, &map_idr_lock);
4996		break;
4997	case BPF_BTF_GET_NEXT_ID:
4998		err = bpf_obj_get_next_id(&attr, uattr.user,
4999					  &btf_idr, &btf_idr_lock);
5000		break;
5001	case BPF_PROG_GET_FD_BY_ID:
5002		err = bpf_prog_get_fd_by_id(&attr);
5003		break;
5004	case BPF_MAP_GET_FD_BY_ID:
5005		err = bpf_map_get_fd_by_id(&attr);
5006		break;
5007	case BPF_OBJ_GET_INFO_BY_FD:
5008		err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5009		break;
5010	case BPF_RAW_TRACEPOINT_OPEN:
5011		err = bpf_raw_tracepoint_open(&attr);
5012		break;
5013	case BPF_BTF_LOAD:
5014		err = bpf_btf_load(&attr, uattr);
5015		break;
5016	case BPF_BTF_GET_FD_BY_ID:
5017		err = bpf_btf_get_fd_by_id(&attr);
5018		break;
5019	case BPF_TASK_FD_QUERY:
5020		err = bpf_task_fd_query(&attr, uattr.user);
5021		break;
5022	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5023		err = map_lookup_and_delete_elem(&attr);
5024		break;
5025	case BPF_MAP_LOOKUP_BATCH:
5026		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5027		break;
5028	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5029		err = bpf_map_do_batch(&attr, uattr.user,
5030				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5031		break;
5032	case BPF_MAP_UPDATE_BATCH:
5033		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5034		break;
5035	case BPF_MAP_DELETE_BATCH:
5036		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5037		break;
5038	case BPF_LINK_CREATE:
5039		err = link_create(&attr, uattr);
5040		break;
5041	case BPF_LINK_UPDATE:
5042		err = link_update(&attr);
5043		break;
5044	case BPF_LINK_GET_FD_BY_ID:
5045		err = bpf_link_get_fd_by_id(&attr);
5046		break;
5047	case BPF_LINK_GET_NEXT_ID:
5048		err = bpf_obj_get_next_id(&attr, uattr.user,
5049					  &link_idr, &link_idr_lock);
5050		break;
5051	case BPF_ENABLE_STATS:
5052		err = bpf_enable_stats(&attr);
5053		break;
5054	case BPF_ITER_CREATE:
5055		err = bpf_iter_create(&attr);
5056		break;
5057	case BPF_LINK_DETACH:
5058		err = link_detach(&attr);
5059		break;
5060	case BPF_PROG_BIND_MAP:
5061		err = bpf_prog_bind_map(&attr);
5062		break;
5063	default:
5064		err = -EINVAL;
5065		break;
5066	}
5067
5068	return err;
5069}
5070
5071SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5072{
5073	return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5074}
5075
5076static bool syscall_prog_is_valid_access(int off, int size,
5077					 enum bpf_access_type type,
5078					 const struct bpf_prog *prog,
5079					 struct bpf_insn_access_aux *info)
5080{
5081	if (off < 0 || off >= U16_MAX)
5082		return false;
5083	if (off % size != 0)
5084		return false;
5085	return true;
5086}
5087
5088BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5089{
5090	switch (cmd) {
5091	case BPF_MAP_CREATE:
5092	case BPF_MAP_DELETE_ELEM:
5093	case BPF_MAP_UPDATE_ELEM:
5094	case BPF_MAP_FREEZE:
5095	case BPF_MAP_GET_FD_BY_ID:
5096	case BPF_PROG_LOAD:
5097	case BPF_BTF_LOAD:
5098	case BPF_LINK_CREATE:
5099	case BPF_RAW_TRACEPOINT_OPEN:
5100		break;
5101	default:
5102		return -EINVAL;
5103	}
5104	return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5105}
5106
5107
5108/* To shut up -Wmissing-prototypes.
5109 * This function is used by the kernel light skeleton
5110 * to load bpf programs when modules are loaded or during kernel boot.
5111 * See tools/lib/bpf/skel_internal.h
5112 */
5113int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5114
5115int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5116{
5117	struct bpf_prog * __maybe_unused prog;
5118	struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5119
5120	switch (cmd) {
5121#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5122	case BPF_PROG_TEST_RUN:
5123		if (attr->test.data_in || attr->test.data_out ||
5124		    attr->test.ctx_out || attr->test.duration ||
5125		    attr->test.repeat || attr->test.flags)
5126			return -EINVAL;
5127
5128		prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5129		if (IS_ERR(prog))
5130			return PTR_ERR(prog);
5131
5132		if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5133		    attr->test.ctx_size_in > U16_MAX) {
5134			bpf_prog_put(prog);
5135			return -EINVAL;
5136		}
5137
5138		run_ctx.bpf_cookie = 0;
5139		run_ctx.saved_run_ctx = NULL;
5140		if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5141			/* recursion detected */
5142			bpf_prog_put(prog);
5143			return -EBUSY;
5144		}
5145		attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5146		__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5147						&run_ctx);
5148		bpf_prog_put(prog);
5149		return 0;
5150#endif
5151	default:
5152		return ____bpf_sys_bpf(cmd, attr, size);
5153	}
5154}
5155EXPORT_SYMBOL(kern_sys_bpf);
5156
5157static const struct bpf_func_proto bpf_sys_bpf_proto = {
5158	.func		= bpf_sys_bpf,
5159	.gpl_only	= false,
5160	.ret_type	= RET_INTEGER,
5161	.arg1_type	= ARG_ANYTHING,
5162	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
5163	.arg3_type	= ARG_CONST_SIZE,
5164};
5165
5166const struct bpf_func_proto * __weak
5167tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5168{
5169	return bpf_base_func_proto(func_id);
5170}
5171
5172BPF_CALL_1(bpf_sys_close, u32, fd)
5173{
5174	/* When bpf program calls this helper there should not be
5175	 * an fdget() without matching completed fdput().
5176	 * This helper is allowed in the following callchain only:
5177	 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5178	 */
5179	return close_fd(fd);
5180}
5181
5182static const struct bpf_func_proto bpf_sys_close_proto = {
5183	.func		= bpf_sys_close,
5184	.gpl_only	= false,
5185	.ret_type	= RET_INTEGER,
5186	.arg1_type	= ARG_ANYTHING,
5187};
5188
5189BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5190{
5191	if (flags)
5192		return -EINVAL;
5193
5194	if (name_sz <= 1 || name[name_sz - 1])
5195		return -EINVAL;
5196
5197	if (!bpf_dump_raw_ok(current_cred()))
5198		return -EPERM;
5199
5200	*res = kallsyms_lookup_name(name);
5201	return *res ? 0 : -ENOENT;
5202}
5203
5204static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5205	.func		= bpf_kallsyms_lookup_name,
5206	.gpl_only	= false,
5207	.ret_type	= RET_INTEGER,
5208	.arg1_type	= ARG_PTR_TO_MEM,
5209	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
5210	.arg3_type	= ARG_ANYTHING,
5211	.arg4_type	= ARG_PTR_TO_LONG,
5212};
5213
5214static const struct bpf_func_proto *
5215syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5216{
5217	switch (func_id) {
5218	case BPF_FUNC_sys_bpf:
5219		return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
5220	case BPF_FUNC_btf_find_by_name_kind:
5221		return &bpf_btf_find_by_name_kind_proto;
5222	case BPF_FUNC_sys_close:
5223		return &bpf_sys_close_proto;
5224	case BPF_FUNC_kallsyms_lookup_name:
5225		return &bpf_kallsyms_lookup_name_proto;
5226	default:
5227		return tracing_prog_func_proto(func_id, prog);
5228	}
5229}
5230
5231const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5232	.get_func_proto  = syscall_prog_func_proto,
5233	.is_valid_access = syscall_prog_is_valid_access,
5234};
5235
5236const struct bpf_prog_ops bpf_syscall_prog_ops = {
5237	.test_run = bpf_prog_test_run_syscall,
5238};
5239
5240#ifdef CONFIG_SYSCTL
5241static int bpf_stats_handler(struct ctl_table *table, int write,
5242			     void *buffer, size_t *lenp, loff_t *ppos)
5243{
5244	struct static_key *key = (struct static_key *)table->data;
5245	static int saved_val;
5246	int val, ret;
5247	struct ctl_table tmp = {
5248		.data   = &val,
5249		.maxlen = sizeof(val),
5250		.mode   = table->mode,
5251		.extra1 = SYSCTL_ZERO,
5252		.extra2 = SYSCTL_ONE,
5253	};
5254
5255	if (write && !capable(CAP_SYS_ADMIN))
5256		return -EPERM;
5257
5258	mutex_lock(&bpf_stats_enabled_mutex);
5259	val = saved_val;
5260	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5261	if (write && !ret && val != saved_val) {
5262		if (val)
5263			static_key_slow_inc(key);
5264		else
5265			static_key_slow_dec(key);
5266		saved_val = val;
5267	}
5268	mutex_unlock(&bpf_stats_enabled_mutex);
5269	return ret;
5270}
5271
5272void __weak unpriv_ebpf_notify(int new_state)
5273{
5274}
5275
5276static int bpf_unpriv_handler(struct ctl_table *table, int write,
5277			      void *buffer, size_t *lenp, loff_t *ppos)
5278{
5279	int ret, unpriv_enable = *(int *)table->data;
5280	bool locked_state = unpriv_enable == 1;
5281	struct ctl_table tmp = *table;
5282
5283	if (write && !capable(CAP_SYS_ADMIN))
5284		return -EPERM;
5285
5286	tmp.data = &unpriv_enable;
5287	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5288	if (write && !ret) {
5289		if (locked_state && unpriv_enable != 1)
5290			return -EPERM;
5291		*(int *)table->data = unpriv_enable;
5292	}
5293
5294	unpriv_ebpf_notify(unpriv_enable);
5295
5296	return ret;
5297}
5298
5299static struct ctl_table bpf_syscall_table[] = {
5300	{
5301		.procname	= "unprivileged_bpf_disabled",
5302		.data		= &sysctl_unprivileged_bpf_disabled,
5303		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
5304		.mode		= 0644,
5305		.proc_handler	= bpf_unpriv_handler,
5306		.extra1		= SYSCTL_ZERO,
5307		.extra2		= SYSCTL_TWO,
5308	},
5309	{
5310		.procname	= "bpf_stats_enabled",
5311		.data		= &bpf_stats_enabled_key.key,
5312		.maxlen		= sizeof(bpf_stats_enabled_key),
5313		.mode		= 0644,
5314		.proc_handler	= bpf_stats_handler,
5315	},
5316	{ }
5317};
5318
5319static int __init bpf_syscall_sysctl_init(void)
5320{
5321	register_sysctl_init("kernel", bpf_syscall_table);
5322	return 0;
5323}
5324late_initcall(bpf_syscall_sysctl_init);
5325#endif /* CONFIG_SYSCTL */
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 */
   4#include <linux/bpf.h>
 
   5#include <linux/bpf_trace.h>
   6#include <linux/bpf_lirc.h>
 
 
   7#include <linux/btf.h>
   8#include <linux/syscalls.h>
   9#include <linux/slab.h>
  10#include <linux/sched/signal.h>
  11#include <linux/vmalloc.h>
  12#include <linux/mmzone.h>
  13#include <linux/anon_inodes.h>
  14#include <linux/fdtable.h>
  15#include <linux/file.h>
  16#include <linux/fs.h>
  17#include <linux/license.h>
  18#include <linux/filter.h>
  19#include <linux/version.h>
  20#include <linux/kernel.h>
  21#include <linux/idr.h>
  22#include <linux/cred.h>
  23#include <linux/timekeeping.h>
  24#include <linux/ctype.h>
  25#include <linux/nospec.h>
  26
  27#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
  28			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
  29			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
  30			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
 
 
 
 
 
 
 
 
 
 
  31#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
  32#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
 
  33
  34#define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
  35
  36DEFINE_PER_CPU(int, bpf_prog_active);
  37static DEFINE_IDR(prog_idr);
  38static DEFINE_SPINLOCK(prog_idr_lock);
  39static DEFINE_IDR(map_idr);
  40static DEFINE_SPINLOCK(map_idr_lock);
 
 
  41
  42int sysctl_unprivileged_bpf_disabled __read_mostly;
 
  43
  44static const struct bpf_map_ops * const bpf_map_types[] = {
  45#define BPF_PROG_TYPE(_id, _ops)
  46#define BPF_MAP_TYPE(_id, _ops) \
  47	[_id] = &_ops,
 
  48#include <linux/bpf_types.h>
  49#undef BPF_PROG_TYPE
  50#undef BPF_MAP_TYPE
 
  51};
  52
  53/*
  54 * If we're handed a bigger struct than we know of, ensure all the unknown bits
  55 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
  56 * we don't know about yet.
  57 *
  58 * There is a ToCToU between this function call and the following
  59 * copy_from_user() call. However, this is not a concern since this function is
  60 * meant to be a future-proofing of bits.
  61 */
  62int bpf_check_uarg_tail_zero(void __user *uaddr,
  63			     size_t expected_size,
  64			     size_t actual_size)
  65{
  66	unsigned char __user *addr;
  67	unsigned char __user *end;
  68	unsigned char val;
  69	int err;
  70
  71	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
  72		return -E2BIG;
  73
  74	if (unlikely(!access_ok(uaddr, actual_size)))
  75		return -EFAULT;
  76
  77	if (actual_size <= expected_size)
  78		return 0;
  79
  80	addr = uaddr + expected_size;
  81	end  = uaddr + actual_size;
  82
  83	for (; addr < end; addr++) {
  84		err = get_user(val, addr);
  85		if (err)
  86			return err;
  87		if (val)
  88			return -E2BIG;
  89	}
  90
  91	return 0;
  92}
  93
  94const struct bpf_map_ops bpf_map_offload_ops = {
 
  95	.map_alloc = bpf_map_offload_map_alloc,
  96	.map_free = bpf_map_offload_map_free,
  97	.map_check_btf = map_check_no_btf,
  98};
  99
 100static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 101{
 102	const struct bpf_map_ops *ops;
 103	u32 type = attr->map_type;
 104	struct bpf_map *map;
 105	int err;
 106
 107	if (type >= ARRAY_SIZE(bpf_map_types))
 108		return ERR_PTR(-EINVAL);
 109	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
 110	ops = bpf_map_types[type];
 111	if (!ops)
 112		return ERR_PTR(-EINVAL);
 113
 114	if (ops->map_alloc_check) {
 115		err = ops->map_alloc_check(attr);
 116		if (err)
 117			return ERR_PTR(err);
 118	}
 119	if (attr->map_ifindex)
 120		ops = &bpf_map_offload_ops;
 121	map = ops->map_alloc(attr);
 122	if (IS_ERR(map))
 123		return map;
 124	map->ops = ops;
 125	map->map_type = type;
 126	return map;
 127}
 128
 129void *bpf_map_area_alloc(u64 size, int numa_node)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130{
 131	/* We really just want to fail instead of triggering OOM killer
 132	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
 133	 * which is used for lower order allocation requests.
 134	 *
 135	 * It has been observed that higher order allocation requests done by
 136	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
 137	 * to reclaim memory from the page cache, thus we set
 138	 * __GFP_RETRY_MAYFAIL to avoid such situations.
 139	 */
 140
 141	const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
 
 
 142	void *area;
 143
 144	if (size >= SIZE_MAX)
 145		return NULL;
 146
 147	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 148		area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
 
 
 
 
 
 149				    numa_node);
 150		if (area != NULL)
 151			return area;
 152	}
 153
 154	return __vmalloc_node_flags_caller(size, numa_node,
 155					   GFP_KERNEL | __GFP_RETRY_MAYFAIL |
 156					   flags, __builtin_return_address(0));
 
 
 
 
 
 
 
 
 
 
 157}
 158
 159void bpf_map_area_free(void *area)
 160{
 161	kvfree(area);
 162}
 163
 164static u32 bpf_map_flags_retain_permanent(u32 flags)
 165{
 166	/* Some map creation flags are not tied to the map object but
 167	 * rather to the map fd instead, so they have no meaning upon
 168	 * map object inspection since multiple file descriptors with
 169	 * different (access) properties can exist here. Thus, given
 170	 * this has zero meaning for the map itself, lets clear these
 171	 * from here.
 172	 */
 173	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
 174}
 175
 176void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
 177{
 178	map->map_type = attr->map_type;
 179	map->key_size = attr->key_size;
 180	map->value_size = attr->value_size;
 181	map->max_entries = attr->max_entries;
 182	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
 183	map->numa_node = bpf_map_attr_numa_node(attr);
 184}
 185
 186static int bpf_charge_memlock(struct user_struct *user, u32 pages)
 187{
 188	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 189
 190	if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
 191		atomic_long_sub(pages, &user->locked_vm);
 192		return -EPERM;
 193	}
 194	return 0;
 195}
 196
 197static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
 198{
 199	if (user)
 200		atomic_long_sub(pages, &user->locked_vm);
 201}
 202
 203int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
 204{
 205	u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
 206	struct user_struct *user;
 207	int ret;
 208
 209	if (size >= U32_MAX - PAGE_SIZE)
 210		return -E2BIG;
 211
 212	user = get_current_user();
 213	ret = bpf_charge_memlock(user, pages);
 214	if (ret) {
 215		free_uid(user);
 216		return ret;
 217	}
 218
 219	mem->pages = pages;
 220	mem->user = user;
 221
 222	return 0;
 223}
 224
 225void bpf_map_charge_finish(struct bpf_map_memory *mem)
 226{
 227	bpf_uncharge_memlock(mem->user, mem->pages);
 228	free_uid(mem->user);
 229}
 230
 231void bpf_map_charge_move(struct bpf_map_memory *dst,
 232			 struct bpf_map_memory *src)
 233{
 234	*dst = *src;
 235
 236	/* Make sure src will not be used for the redundant uncharging. */
 237	memset(src, 0, sizeof(struct bpf_map_memory));
 238}
 239
 240int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
 241{
 242	int ret;
 243
 244	ret = bpf_charge_memlock(map->memory.user, pages);
 245	if (ret)
 246		return ret;
 247	map->memory.pages += pages;
 248	return ret;
 249}
 250
 251void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
 252{
 253	bpf_uncharge_memlock(map->memory.user, pages);
 254	map->memory.pages -= pages;
 255}
 256
 257static int bpf_map_alloc_id(struct bpf_map *map)
 258{
 259	int id;
 260
 261	idr_preload(GFP_KERNEL);
 262	spin_lock_bh(&map_idr_lock);
 263	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
 264	if (id > 0)
 265		map->id = id;
 266	spin_unlock_bh(&map_idr_lock);
 267	idr_preload_end();
 268
 269	if (WARN_ON_ONCE(!id))
 270		return -ENOSPC;
 271
 272	return id > 0 ? 0 : id;
 273}
 274
 275void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 276{
 277	unsigned long flags;
 278
 279	/* Offloaded maps are removed from the IDR store when their device
 280	 * disappears - even if someone holds an fd to them they are unusable,
 281	 * the memory is gone, all ops will fail; they are simply waiting for
 282	 * refcnt to drop to be freed.
 283	 */
 284	if (!map->id)
 285		return;
 286
 287	if (do_idr_lock)
 288		spin_lock_irqsave(&map_idr_lock, flags);
 289	else
 290		__acquire(&map_idr_lock);
 291
 292	idr_remove(&map_idr, map->id);
 293	map->id = 0;
 294
 295	if (do_idr_lock)
 296		spin_unlock_irqrestore(&map_idr_lock, flags);
 297	else
 298		__release(&map_idr_lock);
 299}
 300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 301/* called from workqueue */
 302static void bpf_map_free_deferred(struct work_struct *work)
 303{
 304	struct bpf_map *map = container_of(work, struct bpf_map, work);
 305	struct bpf_map_memory mem;
 
 306
 307	bpf_map_charge_move(&mem, &map->memory);
 308	security_bpf_map_free(map);
 
 309	/* implementation dependent freeing */
 310	map->ops->map_free(map);
 311	bpf_map_charge_finish(&mem);
 
 
 
 
 
 
 
 
 
 
 312}
 313
 314static void bpf_map_put_uref(struct bpf_map *map)
 315{
 316	if (atomic_dec_and_test(&map->usercnt)) {
 317		if (map->ops->map_release_uref)
 318			map->ops->map_release_uref(map);
 319	}
 320}
 321
 322/* decrement map refcnt and schedule it for freeing via workqueue
 323 * (unrelying map implementation ops->map_free() might sleep)
 324 */
 325static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
 326{
 327	if (atomic_dec_and_test(&map->refcnt)) {
 328		/* bpf_map_free_id() must be called first */
 329		bpf_map_free_id(map, do_idr_lock);
 330		btf_put(map->btf);
 331		INIT_WORK(&map->work, bpf_map_free_deferred);
 332		schedule_work(&map->work);
 
 
 
 333	}
 334}
 335
 336void bpf_map_put(struct bpf_map *map)
 337{
 338	__bpf_map_put(map, true);
 339}
 340EXPORT_SYMBOL_GPL(bpf_map_put);
 341
 342void bpf_map_put_with_uref(struct bpf_map *map)
 343{
 344	bpf_map_put_uref(map);
 345	bpf_map_put(map);
 346}
 347
 348static int bpf_map_release(struct inode *inode, struct file *filp)
 349{
 350	struct bpf_map *map = filp->private_data;
 351
 352	if (map->ops->map_release)
 353		map->ops->map_release(map, filp);
 354
 355	bpf_map_put_with_uref(map);
 356	return 0;
 357}
 358
 359static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
 360{
 361	fmode_t mode = f.file->f_mode;
 362
 363	/* Our file permissions may have been overridden by global
 364	 * map permissions facing syscall side.
 365	 */
 366	if (READ_ONCE(map->frozen))
 367		mode &= ~FMODE_CAN_WRITE;
 368	return mode;
 369}
 370
 371#ifdef CONFIG_PROC_FS
 
 
 
 
 
 
 
 
 
 
 
 
 
 372static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
 373{
 374	const struct bpf_map *map = filp->private_data;
 375	const struct bpf_array *array;
 376	u32 owner_prog_type = 0;
 377	u32 owner_jited = 0;
 378
 379	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
 380		array = container_of(map, struct bpf_array, map);
 381		owner_prog_type = array->owner_prog_type;
 382		owner_jited = array->owner_jited;
 383	}
 384
 385	seq_printf(m,
 386		   "map_type:\t%u\n"
 387		   "key_size:\t%u\n"
 388		   "value_size:\t%u\n"
 389		   "max_entries:\t%u\n"
 390		   "map_flags:\t%#x\n"
 391		   "memlock:\t%llu\n"
 
 392		   "map_id:\t%u\n"
 393		   "frozen:\t%u\n",
 394		   map->map_type,
 395		   map->key_size,
 396		   map->value_size,
 397		   map->max_entries,
 398		   map->map_flags,
 399		   map->memory.pages * 1ULL << PAGE_SHIFT,
 
 400		   map->id,
 401		   READ_ONCE(map->frozen));
 402
 403	if (owner_prog_type) {
 404		seq_printf(m, "owner_prog_type:\t%u\n",
 405			   owner_prog_type);
 406		seq_printf(m, "owner_jited:\t%u\n",
 407			   owner_jited);
 408	}
 409}
 410#endif
 411
 412static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
 413			      loff_t *ppos)
 414{
 415	/* We need this handler such that alloc_file() enables
 416	 * f_mode with FMODE_CAN_READ.
 417	 */
 418	return -EINVAL;
 419}
 420
 421static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
 422			       size_t siz, loff_t *ppos)
 423{
 424	/* We need this handler such that alloc_file() enables
 425	 * f_mode with FMODE_CAN_WRITE.
 426	 */
 427	return -EINVAL;
 428}
 429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430const struct file_operations bpf_map_fops = {
 431#ifdef CONFIG_PROC_FS
 432	.show_fdinfo	= bpf_map_show_fdinfo,
 433#endif
 434	.release	= bpf_map_release,
 435	.read		= bpf_dummy_read,
 436	.write		= bpf_dummy_write,
 
 
 437};
 438
 439int bpf_map_new_fd(struct bpf_map *map, int flags)
 440{
 441	int ret;
 442
 443	ret = security_bpf_map(map, OPEN_FMODE(flags));
 444	if (ret < 0)
 445		return ret;
 446
 447	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
 448				flags | O_CLOEXEC);
 449}
 450
 451int bpf_get_file_flag(int flags)
 452{
 453	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
 454		return -EINVAL;
 455	if (flags & BPF_F_RDONLY)
 456		return O_RDONLY;
 457	if (flags & BPF_F_WRONLY)
 458		return O_WRONLY;
 459	return O_RDWR;
 460}
 461
 462/* helper macro to check that unused fields 'union bpf_attr' are zero */
 463#define CHECK_ATTR(CMD) \
 464	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
 465		   sizeof(attr->CMD##_LAST_FIELD), 0, \
 466		   sizeof(*attr) - \
 467		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
 468		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 469
 470/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
 471 * Return 0 on success and < 0 on error.
 472 */
 473static int bpf_obj_name_cpy(char *dst, const char *src)
 474{
 475	const char *end = src + BPF_OBJ_NAME_LEN;
 
 476
 477	memset(dst, 0, BPF_OBJ_NAME_LEN);
 478	/* Copy all isalnum(), '_' and '.' chars. */
 479	while (src < end && *src) {
 480		if (!isalnum(*src) &&
 481		    *src != '_' && *src != '.')
 482			return -EINVAL;
 483		*dst++ = *src++;
 484	}
 485
 486	/* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
 487	if (src == end)
 488		return -EINVAL;
 489
 490	return 0;
 491}
 492
 493int map_check_no_btf(const struct bpf_map *map,
 494		     const struct btf *btf,
 495		     const struct btf_type *key_type,
 496		     const struct btf_type *value_type)
 497{
 498	return -ENOTSUPP;
 499}
 500
 501static int map_check_btf(struct bpf_map *map, const struct btf *btf,
 502			 u32 btf_key_id, u32 btf_value_id)
 503{
 504	const struct btf_type *key_type, *value_type;
 505	u32 key_size, value_size;
 506	int ret = 0;
 507
 508	/* Some maps allow key to be unspecified. */
 509	if (btf_key_id) {
 510		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
 511		if (!key_type || key_size != map->key_size)
 512			return -EINVAL;
 513	} else {
 514		key_type = btf_type_by_id(btf, 0);
 515		if (!map->ops->map_check_btf)
 516			return -EINVAL;
 517	}
 518
 519	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
 520	if (!value_type || value_size != map->value_size)
 521		return -EINVAL;
 522
 523	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
 524
 525	if (map_value_has_spin_lock(map)) {
 526		if (map->map_flags & BPF_F_RDONLY_PROG)
 527			return -EACCES;
 528		if (map->map_type != BPF_MAP_TYPE_HASH &&
 529		    map->map_type != BPF_MAP_TYPE_ARRAY &&
 530		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
 531		    map->map_type != BPF_MAP_TYPE_SK_STORAGE)
 532			return -ENOTSUPP;
 533		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
 534		    map->value_size) {
 535			WARN_ONCE(1,
 536				  "verifier bug spin_lock_off %d value_size %d\n",
 537				  map->spin_lock_off, map->value_size);
 538			return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539		}
 540	}
 541
 542	if (map->ops->map_check_btf)
 
 
 
 
 543		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
 
 
 
 544
 545	return ret;
 
 
 
 546}
 547
 548#define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id
 549/* called via syscall */
 550static int map_create(union bpf_attr *attr)
 551{
 552	int numa_node = bpf_map_attr_numa_node(attr);
 553	struct bpf_map_memory mem;
 554	struct bpf_map *map;
 555	int f_flags;
 556	int err;
 557
 558	err = CHECK_ATTR(BPF_MAP_CREATE);
 559	if (err)
 560		return -EINVAL;
 561
 
 
 
 
 
 
 
 
 
 
 
 
 562	f_flags = bpf_get_file_flag(attr->map_flags);
 563	if (f_flags < 0)
 564		return f_flags;
 565
 566	if (numa_node != NUMA_NO_NODE &&
 567	    ((unsigned int)numa_node >= nr_node_ids ||
 568	     !node_online(numa_node)))
 569		return -EINVAL;
 570
 571	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
 572	map = find_and_alloc_map(attr);
 573	if (IS_ERR(map))
 574		return PTR_ERR(map);
 575
 576	err = bpf_obj_name_cpy(map->name, attr->map_name);
 577	if (err)
 
 578		goto free_map;
 579
 580	atomic_set(&map->refcnt, 1);
 581	atomic_set(&map->usercnt, 1);
 582
 583	if (attr->btf_key_type_id || attr->btf_value_type_id) {
 
 
 
 
 
 
 
 
 
 584		struct btf *btf;
 585
 586		if (!attr->btf_value_type_id) {
 587			err = -EINVAL;
 588			goto free_map;
 589		}
 590
 591		btf = btf_get_by_fd(attr->btf_fd);
 592		if (IS_ERR(btf)) {
 593			err = PTR_ERR(btf);
 594			goto free_map;
 595		}
 596
 597		err = map_check_btf(map, btf, attr->btf_key_type_id,
 598				    attr->btf_value_type_id);
 599		if (err) {
 600			btf_put(btf);
 
 601			goto free_map;
 602		}
 
 
 
 
 
 
 
 
 603
 604		map->btf = btf;
 605		map->btf_key_type_id = attr->btf_key_type_id;
 606		map->btf_value_type_id = attr->btf_value_type_id;
 607	} else {
 608		map->spin_lock_off = -EINVAL;
 
 
 
 
 
 
 
 609	}
 
 610
 611	err = security_bpf_map_alloc(map);
 612	if (err)
 613		goto free_map;
 614
 615	err = bpf_map_alloc_id(map);
 616	if (err)
 617		goto free_map_sec;
 618
 
 
 619	err = bpf_map_new_fd(map, f_flags);
 620	if (err < 0) {
 621		/* failed to allocate fd.
 622		 * bpf_map_put_with_uref() is needed because the above
 623		 * bpf_map_alloc_id() has published the map
 624		 * to the userspace and the userspace may
 625		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
 626		 */
 627		bpf_map_put_with_uref(map);
 628		return err;
 629	}
 630
 631	return err;
 632
 633free_map_sec:
 634	security_bpf_map_free(map);
 
 
 635free_map:
 636	btf_put(map->btf);
 637	bpf_map_charge_move(&mem, &map->memory);
 638	map->ops->map_free(map);
 639	bpf_map_charge_finish(&mem);
 640	return err;
 641}
 642
 643/* if error is returned, fd is released.
 644 * On success caller should complete fd access with matching fdput()
 645 */
 646struct bpf_map *__bpf_map_get(struct fd f)
 647{
 648	if (!f.file)
 649		return ERR_PTR(-EBADF);
 650	if (f.file->f_op != &bpf_map_fops) {
 651		fdput(f);
 652		return ERR_PTR(-EINVAL);
 653	}
 654
 655	return f.file->private_data;
 656}
 657
 658/* prog's and map's refcnt limit */
 659#define BPF_MAX_REFCNT 32768
 
 
 
 
 
 
 
 
 
 
 660
 661struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
 662{
 663	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
 664		atomic_dec(&map->refcnt);
 665		return ERR_PTR(-EBUSY);
 666	}
 667	if (uref)
 668		atomic_inc(&map->usercnt);
 
 
 
 
 669	return map;
 670}
 671EXPORT_SYMBOL_GPL(bpf_map_inc);
 672
 673struct bpf_map *bpf_map_get_with_uref(u32 ufd)
 674{
 675	struct fd f = fdget(ufd);
 676	struct bpf_map *map;
 677
 678	map = __bpf_map_get(f);
 679	if (IS_ERR(map))
 680		return map;
 681
 682	map = bpf_map_inc(map, true);
 683	fdput(f);
 684
 685	return map;
 686}
 687
 688/* map_idr_lock should have been held */
 689static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map,
 690					      bool uref)
 691{
 692	int refold;
 693
 694	refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
 695
 696	if (refold >= BPF_MAX_REFCNT) {
 697		__bpf_map_put(map, false);
 698		return ERR_PTR(-EBUSY);
 699	}
 700
 701	if (!refold)
 702		return ERR_PTR(-ENOENT);
 703
 704	if (uref)
 705		atomic_inc(&map->usercnt);
 706
 707	return map;
 708}
 709
 710struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
 711{
 712	spin_lock_bh(&map_idr_lock);
 713	map = __bpf_map_inc_not_zero(map, uref);
 714	spin_unlock_bh(&map_idr_lock);
 715
 716	return map;
 717}
 718EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
 719
 720int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 721{
 722	return -ENOTSUPP;
 723}
 724
 725static void *__bpf_copy_key(void __user *ukey, u64 key_size)
 726{
 727	if (key_size)
 728		return memdup_user(ukey, key_size);
 729
 730	if (ukey)
 731		return ERR_PTR(-EINVAL);
 732
 733	return NULL;
 734}
 735
 
 
 
 
 
 
 
 
 
 
 
 736/* last field in 'union bpf_attr' used by this command */
 737#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
 738
 739static int map_lookup_elem(union bpf_attr *attr)
 740{
 741	void __user *ukey = u64_to_user_ptr(attr->key);
 742	void __user *uvalue = u64_to_user_ptr(attr->value);
 743	int ufd = attr->map_fd;
 744	struct bpf_map *map;
 745	void *key, *value, *ptr;
 746	u32 value_size;
 747	struct fd f;
 748	int err;
 749
 750	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
 751		return -EINVAL;
 752
 753	if (attr->flags & ~BPF_F_LOCK)
 754		return -EINVAL;
 755
 756	f = fdget(ufd);
 757	map = __bpf_map_get(f);
 758	if (IS_ERR(map))
 759		return PTR_ERR(map);
 760	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
 761		err = -EPERM;
 762		goto err_put;
 763	}
 764
 765	if ((attr->flags & BPF_F_LOCK) &&
 766	    !map_value_has_spin_lock(map)) {
 767		err = -EINVAL;
 768		goto err_put;
 769	}
 770
 771	key = __bpf_copy_key(ukey, map->key_size);
 772	if (IS_ERR(key)) {
 773		err = PTR_ERR(key);
 774		goto err_put;
 775	}
 776
 777	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 778	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 779	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
 780	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 781		value_size = round_up(map->value_size, 8) * num_possible_cpus();
 782	else if (IS_FD_MAP(map))
 783		value_size = sizeof(u32);
 784	else
 785		value_size = map->value_size;
 786
 787	err = -ENOMEM;
 788	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
 789	if (!value)
 790		goto free_key;
 791
 792	if (bpf_map_is_dev_bound(map)) {
 793		err = bpf_map_offload_lookup_elem(map, key, value);
 794		goto done;
 795	}
 796
 797	preempt_disable();
 798	this_cpu_inc(bpf_prog_active);
 799	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 800	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 801		err = bpf_percpu_hash_copy(map, key, value);
 802	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 803		err = bpf_percpu_array_copy(map, key, value);
 804	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 805		err = bpf_percpu_cgroup_storage_copy(map, key, value);
 806	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
 807		err = bpf_stackmap_copy(map, key, value);
 808	} else if (IS_FD_ARRAY(map)) {
 809		err = bpf_fd_array_map_lookup_elem(map, key, value);
 810	} else if (IS_FD_HASH(map)) {
 811		err = bpf_fd_htab_map_lookup_elem(map, key, value);
 812	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 813		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
 814	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 815		   map->map_type == BPF_MAP_TYPE_STACK) {
 816		err = map->ops->map_peek_elem(map, value);
 817	} else {
 818		rcu_read_lock();
 819		if (map->ops->map_lookup_elem_sys_only)
 820			ptr = map->ops->map_lookup_elem_sys_only(map, key);
 821		else
 822			ptr = map->ops->map_lookup_elem(map, key);
 823		if (IS_ERR(ptr)) {
 824			err = PTR_ERR(ptr);
 825		} else if (!ptr) {
 826			err = -ENOENT;
 827		} else {
 828			err = 0;
 829			if (attr->flags & BPF_F_LOCK)
 830				/* lock 'ptr' and copy everything but lock */
 831				copy_map_value_locked(map, value, ptr, true);
 832			else
 833				copy_map_value(map, value, ptr);
 834			/* mask lock, since value wasn't zero inited */
 835			check_and_init_map_lock(map, value);
 836		}
 837		rcu_read_unlock();
 838	}
 839	this_cpu_dec(bpf_prog_active);
 840	preempt_enable();
 841
 842done:
 843	if (err)
 844		goto free_value;
 845
 846	err = -EFAULT;
 847	if (copy_to_user(uvalue, value, value_size) != 0)
 848		goto free_value;
 849
 850	err = 0;
 851
 852free_value:
 853	kfree(value);
 854free_key:
 855	kfree(key);
 856err_put:
 857	fdput(f);
 858	return err;
 859}
 860
 861static void maybe_wait_bpf_programs(struct bpf_map *map)
 862{
 863	/* Wait for any running BPF programs to complete so that
 864	 * userspace, when we return to it, knows that all programs
 865	 * that could be running use the new map value.
 866	 */
 867	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
 868	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
 869		synchronize_rcu();
 870}
 871
 872#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
 873
 874static int map_update_elem(union bpf_attr *attr)
 875{
 876	void __user *ukey = u64_to_user_ptr(attr->key);
 877	void __user *uvalue = u64_to_user_ptr(attr->value);
 878	int ufd = attr->map_fd;
 879	struct bpf_map *map;
 880	void *key, *value;
 881	u32 value_size;
 882	struct fd f;
 883	int err;
 884
 885	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
 886		return -EINVAL;
 887
 888	f = fdget(ufd);
 889	map = __bpf_map_get(f);
 890	if (IS_ERR(map))
 891		return PTR_ERR(map);
 
 892	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
 893		err = -EPERM;
 894		goto err_put;
 895	}
 896
 897	if ((attr->flags & BPF_F_LOCK) &&
 898	    !map_value_has_spin_lock(map)) {
 899		err = -EINVAL;
 900		goto err_put;
 901	}
 902
 903	key = __bpf_copy_key(ukey, map->key_size);
 904	if (IS_ERR(key)) {
 905		err = PTR_ERR(key);
 906		goto err_put;
 907	}
 908
 909	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 910	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 911	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
 912	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 913		value_size = round_up(map->value_size, 8) * num_possible_cpus();
 914	else
 915		value_size = map->value_size;
 916
 917	err = -ENOMEM;
 918	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
 919	if (!value)
 920		goto free_key;
 
 921
 922	err = -EFAULT;
 923	if (copy_from_user(value, uvalue, value_size) != 0)
 924		goto free_value;
 925
 926	/* Need to create a kthread, thus must support schedule */
 927	if (bpf_map_is_dev_bound(map)) {
 928		err = bpf_map_offload_update_elem(map, key, value, attr->flags);
 929		goto out;
 930	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
 931		   map->map_type == BPF_MAP_TYPE_SOCKHASH ||
 932		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
 933		err = map->ops->map_update_elem(map, key, value, attr->flags);
 934		goto out;
 935	}
 936
 937	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
 938	 * inside bpf map update or delete otherwise deadlocks are possible
 939	 */
 940	preempt_disable();
 941	__this_cpu_inc(bpf_prog_active);
 942	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 943	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 944		err = bpf_percpu_hash_update(map, key, value, attr->flags);
 945	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 946		err = bpf_percpu_array_update(map, key, value, attr->flags);
 947	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 948		err = bpf_percpu_cgroup_storage_update(map, key, value,
 949						       attr->flags);
 950	} else if (IS_FD_ARRAY(map)) {
 951		rcu_read_lock();
 952		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
 953						   attr->flags);
 954		rcu_read_unlock();
 955	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
 956		rcu_read_lock();
 957		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
 958						  attr->flags);
 959		rcu_read_unlock();
 960	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 961		/* rcu_read_lock() is not needed */
 962		err = bpf_fd_reuseport_array_update_elem(map, key, value,
 963							 attr->flags);
 964	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 965		   map->map_type == BPF_MAP_TYPE_STACK) {
 966		err = map->ops->map_push_elem(map, value, attr->flags);
 967	} else {
 968		rcu_read_lock();
 969		err = map->ops->map_update_elem(map, key, value, attr->flags);
 970		rcu_read_unlock();
 971	}
 972	__this_cpu_dec(bpf_prog_active);
 973	preempt_enable();
 974	maybe_wait_bpf_programs(map);
 975out:
 976free_value:
 977	kfree(value);
 978free_key:
 979	kfree(key);
 980err_put:
 
 981	fdput(f);
 982	return err;
 983}
 984
 985#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
 986
 987static int map_delete_elem(union bpf_attr *attr)
 988{
 989	void __user *ukey = u64_to_user_ptr(attr->key);
 990	int ufd = attr->map_fd;
 991	struct bpf_map *map;
 992	struct fd f;
 993	void *key;
 994	int err;
 995
 996	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
 997		return -EINVAL;
 998
 999	f = fdget(ufd);
1000	map = __bpf_map_get(f);
1001	if (IS_ERR(map))
1002		return PTR_ERR(map);
 
1003	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1004		err = -EPERM;
1005		goto err_put;
1006	}
1007
1008	key = __bpf_copy_key(ukey, map->key_size);
1009	if (IS_ERR(key)) {
1010		err = PTR_ERR(key);
1011		goto err_put;
1012	}
1013
1014	if (bpf_map_is_dev_bound(map)) {
1015		err = bpf_map_offload_delete_elem(map, key);
1016		goto out;
 
 
 
 
 
1017	}
1018
1019	preempt_disable();
1020	__this_cpu_inc(bpf_prog_active);
1021	rcu_read_lock();
1022	err = map->ops->map_delete_elem(map, key);
1023	rcu_read_unlock();
1024	__this_cpu_dec(bpf_prog_active);
1025	preempt_enable();
1026	maybe_wait_bpf_programs(map);
1027out:
1028	kfree(key);
1029err_put:
 
1030	fdput(f);
1031	return err;
1032}
1033
1034/* last field in 'union bpf_attr' used by this command */
1035#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1036
1037static int map_get_next_key(union bpf_attr *attr)
1038{
1039	void __user *ukey = u64_to_user_ptr(attr->key);
1040	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1041	int ufd = attr->map_fd;
1042	struct bpf_map *map;
1043	void *key, *next_key;
1044	struct fd f;
1045	int err;
1046
1047	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1048		return -EINVAL;
1049
1050	f = fdget(ufd);
1051	map = __bpf_map_get(f);
1052	if (IS_ERR(map))
1053		return PTR_ERR(map);
1054	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1055		err = -EPERM;
1056		goto err_put;
1057	}
1058
1059	if (ukey) {
1060		key = __bpf_copy_key(ukey, map->key_size);
1061		if (IS_ERR(key)) {
1062			err = PTR_ERR(key);
1063			goto err_put;
1064		}
1065	} else {
1066		key = NULL;
1067	}
1068
1069	err = -ENOMEM;
1070	next_key = kmalloc(map->key_size, GFP_USER);
1071	if (!next_key)
1072		goto free_key;
1073
1074	if (bpf_map_is_dev_bound(map)) {
1075		err = bpf_map_offload_get_next_key(map, key, next_key);
1076		goto out;
1077	}
1078
1079	rcu_read_lock();
1080	err = map->ops->map_get_next_key(map, key, next_key);
1081	rcu_read_unlock();
1082out:
1083	if (err)
1084		goto free_next_key;
1085
1086	err = -EFAULT;
1087	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1088		goto free_next_key;
1089
1090	err = 0;
1091
1092free_next_key:
1093	kfree(next_key);
1094free_key:
1095	kfree(key);
1096err_put:
1097	fdput(f);
1098	return err;
1099}
1100
1101#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1102
1103static int map_lookup_and_delete_elem(union bpf_attr *attr)
1104{
1105	void __user *ukey = u64_to_user_ptr(attr->key);
1106	void __user *uvalue = u64_to_user_ptr(attr->value);
1107	int ufd = attr->map_fd;
1108	struct bpf_map *map;
1109	void *key, *value;
1110	u32 value_size;
1111	struct fd f;
1112	int err;
1113
1114	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1115		return -EINVAL;
1116
 
 
 
1117	f = fdget(ufd);
1118	map = __bpf_map_get(f);
1119	if (IS_ERR(map))
1120		return PTR_ERR(map);
1121	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
 
 
1122		err = -EPERM;
1123		goto err_put;
1124	}
1125
 
 
 
 
 
 
 
 
 
 
 
 
 
1126	key = __bpf_copy_key(ukey, map->key_size);
1127	if (IS_ERR(key)) {
1128		err = PTR_ERR(key);
1129		goto err_put;
1130	}
1131
1132	value_size = map->value_size;
1133
1134	err = -ENOMEM;
1135	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1136	if (!value)
1137		goto free_key;
1138
 
1139	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1140	    map->map_type == BPF_MAP_TYPE_STACK) {
1141		err = map->ops->map_pop_elem(map, value);
1142	} else {
1143		err = -ENOTSUPP;
 
 
 
 
 
 
 
 
 
1144	}
1145
1146	if (err)
1147		goto free_value;
1148
1149	if (copy_to_user(uvalue, value, value_size) != 0)
 
1150		goto free_value;
 
1151
1152	err = 0;
1153
1154free_value:
1155	kfree(value);
1156free_key:
1157	kfree(key);
1158err_put:
 
1159	fdput(f);
1160	return err;
1161}
1162
1163#define BPF_MAP_FREEZE_LAST_FIELD map_fd
1164
1165static int map_freeze(const union bpf_attr *attr)
1166{
1167	int err = 0, ufd = attr->map_fd;
1168	struct bpf_map *map;
1169	struct fd f;
1170
1171	if (CHECK_ATTR(BPF_MAP_FREEZE))
1172		return -EINVAL;
1173
1174	f = fdget(ufd);
1175	map = __bpf_map_get(f);
1176	if (IS_ERR(map))
1177		return PTR_ERR(map);
 
 
 
 
 
 
 
 
 
 
 
1178	if (READ_ONCE(map->frozen)) {
1179		err = -EBUSY;
1180		goto err_put;
1181	}
1182	if (!capable(CAP_SYS_ADMIN)) {
1183		err = -EPERM;
1184		goto err_put;
1185	}
1186
1187	WRITE_ONCE(map->frozen, true);
1188err_put:
 
1189	fdput(f);
1190	return err;
1191}
1192
1193static const struct bpf_prog_ops * const bpf_prog_types[] = {
1194#define BPF_PROG_TYPE(_id, _name) \
1195	[_id] = & _name ## _prog_ops,
1196#define BPF_MAP_TYPE(_id, _ops)
 
1197#include <linux/bpf_types.h>
1198#undef BPF_PROG_TYPE
1199#undef BPF_MAP_TYPE
 
1200};
1201
1202static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1203{
1204	const struct bpf_prog_ops *ops;
1205
1206	if (type >= ARRAY_SIZE(bpf_prog_types))
1207		return -EINVAL;
1208	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1209	ops = bpf_prog_types[type];
1210	if (!ops)
1211		return -EINVAL;
1212
1213	if (!bpf_prog_is_dev_bound(prog->aux))
1214		prog->aux->ops = ops;
1215	else
1216		prog->aux->ops = &bpf_offload_prog_ops;
1217	prog->type = type;
1218	return 0;
1219}
1220
1221/* drop refcnt on maps used by eBPF program and free auxilary data */
1222static void free_used_maps(struct bpf_prog_aux *aux)
1223{
1224	enum bpf_cgroup_storage_type stype;
1225	int i;
1226
1227	for_each_cgroup_storage_type(stype) {
1228		if (!aux->cgroup_storage[stype])
1229			continue;
1230		bpf_cgroup_storage_release(aux->prog,
1231					   aux->cgroup_storage[stype]);
1232	}
1233
1234	for (i = 0; i < aux->used_map_cnt; i++)
1235		bpf_map_put(aux->used_maps[i]);
1236
1237	kfree(aux->used_maps);
1238}
1239
1240int __bpf_prog_charge(struct user_struct *user, u32 pages)
1241{
1242	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1243	unsigned long user_bufs;
1244
1245	if (user) {
1246		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
1247		if (user_bufs > memlock_limit) {
1248			atomic_long_sub(pages, &user->locked_vm);
1249			return -EPERM;
1250		}
1251	}
1252
1253	return 0;
1254}
1255
1256void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1257{
1258	if (user)
1259		atomic_long_sub(pages, &user->locked_vm);
1260}
1261
1262static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1263{
1264	struct user_struct *user = get_current_user();
1265	int ret;
1266
1267	ret = __bpf_prog_charge(user, prog->pages);
1268	if (ret) {
1269		free_uid(user);
1270		return ret;
1271	}
1272
1273	prog->aux->user = user;
1274	return 0;
1275}
1276
1277static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1278{
1279	struct user_struct *user = prog->aux->user;
1280
1281	__bpf_prog_uncharge(user, prog->pages);
1282	free_uid(user);
1283}
1284
1285static int bpf_prog_alloc_id(struct bpf_prog *prog)
1286{
1287	int id;
1288
1289	idr_preload(GFP_KERNEL);
1290	spin_lock_bh(&prog_idr_lock);
1291	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1292	if (id > 0)
1293		prog->aux->id = id;
1294	spin_unlock_bh(&prog_idr_lock);
1295	idr_preload_end();
1296
1297	/* id is in [1, INT_MAX) */
1298	if (WARN_ON_ONCE(!id))
1299		return -ENOSPC;
1300
1301	return id > 0 ? 0 : id;
1302}
1303
1304void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1305{
 
 
1306	/* cBPF to eBPF migrations are currently not in the idr store.
1307	 * Offloaded programs are removed from the store when their device
1308	 * disappears - even if someone grabs an fd to them they are unusable,
1309	 * simply waiting for refcnt to drop to be freed.
1310	 */
1311	if (!prog->aux->id)
1312		return;
1313
1314	if (do_idr_lock)
1315		spin_lock_bh(&prog_idr_lock);
1316	else
1317		__acquire(&prog_idr_lock);
1318
1319	idr_remove(&prog_idr, prog->aux->id);
1320	prog->aux->id = 0;
1321
1322	if (do_idr_lock)
1323		spin_unlock_bh(&prog_idr_lock);
1324	else
1325		__release(&prog_idr_lock);
1326}
1327
1328static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1329{
1330	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1331
1332	kvfree(aux->func_info);
1333	free_used_maps(aux);
1334	bpf_prog_uncharge_memlock(aux->prog);
1335	security_bpf_prog_free(aux);
1336	bpf_prog_free(aux->prog);
1337}
1338
1339static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1340{
1341	bpf_prog_kallsyms_del_all(prog);
1342	btf_put(prog->aux->btf);
1343	bpf_prog_free_linfo(prog);
1344
1345	if (deferred)
1346		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1347	else
 
 
 
 
 
 
 
1348		__bpf_prog_put_rcu(&prog->aux->rcu);
 
1349}
1350
1351static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1352{
1353	if (atomic_dec_and_test(&prog->aux->refcnt)) {
1354		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1355		/* bpf_prog_free_id() must be called first */
1356		bpf_prog_free_id(prog, do_idr_lock);
1357		__bpf_prog_put_noref(prog, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1358	}
1359}
1360
1361void bpf_prog_put(struct bpf_prog *prog)
1362{
1363	__bpf_prog_put(prog, true);
1364}
1365EXPORT_SYMBOL_GPL(bpf_prog_put);
1366
1367static int bpf_prog_release(struct inode *inode, struct file *filp)
1368{
1369	struct bpf_prog *prog = filp->private_data;
1370
1371	bpf_prog_put(prog);
1372	return 0;
1373}
1374
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1375static void bpf_prog_get_stats(const struct bpf_prog *prog,
1376			       struct bpf_prog_stats *stats)
1377{
1378	u64 nsecs = 0, cnt = 0;
1379	int cpu;
1380
1381	for_each_possible_cpu(cpu) {
1382		const struct bpf_prog_stats *st;
1383		unsigned int start;
1384		u64 tnsecs, tcnt;
1385
1386		st = per_cpu_ptr(prog->aux->stats, cpu);
1387		do {
1388			start = u64_stats_fetch_begin_irq(&st->syncp);
1389			tnsecs = st->nsecs;
1390			tcnt = st->cnt;
1391		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
 
1392		nsecs += tnsecs;
1393		cnt += tcnt;
 
1394	}
1395	stats->nsecs = nsecs;
1396	stats->cnt = cnt;
 
1397}
1398
1399#ifdef CONFIG_PROC_FS
1400static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1401{
1402	const struct bpf_prog *prog = filp->private_data;
1403	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1404	struct bpf_prog_stats stats;
1405
1406	bpf_prog_get_stats(prog, &stats);
1407	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1408	seq_printf(m,
1409		   "prog_type:\t%u\n"
1410		   "prog_jited:\t%u\n"
1411		   "prog_tag:\t%s\n"
1412		   "memlock:\t%llu\n"
1413		   "prog_id:\t%u\n"
1414		   "run_time_ns:\t%llu\n"
1415		   "run_cnt:\t%llu\n",
 
 
1416		   prog->type,
1417		   prog->jited,
1418		   prog_tag,
1419		   prog->pages * 1ULL << PAGE_SHIFT,
1420		   prog->aux->id,
1421		   stats.nsecs,
1422		   stats.cnt);
 
 
1423}
1424#endif
1425
1426const struct file_operations bpf_prog_fops = {
1427#ifdef CONFIG_PROC_FS
1428	.show_fdinfo	= bpf_prog_show_fdinfo,
1429#endif
1430	.release	= bpf_prog_release,
1431	.read		= bpf_dummy_read,
1432	.write		= bpf_dummy_write,
1433};
1434
1435int bpf_prog_new_fd(struct bpf_prog *prog)
1436{
1437	int ret;
1438
1439	ret = security_bpf_prog(prog);
1440	if (ret < 0)
1441		return ret;
1442
1443	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1444				O_RDWR | O_CLOEXEC);
1445}
1446
1447static struct bpf_prog *____bpf_prog_get(struct fd f)
1448{
1449	if (!f.file)
1450		return ERR_PTR(-EBADF);
1451	if (f.file->f_op != &bpf_prog_fops) {
1452		fdput(f);
1453		return ERR_PTR(-EINVAL);
1454	}
1455
1456	return f.file->private_data;
1457}
1458
1459struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
1460{
1461	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
1462		atomic_sub(i, &prog->aux->refcnt);
1463		return ERR_PTR(-EBUSY);
1464	}
1465	return prog;
1466}
1467EXPORT_SYMBOL_GPL(bpf_prog_add);
1468
1469void bpf_prog_sub(struct bpf_prog *prog, int i)
1470{
1471	/* Only to be used for undoing previous bpf_prog_add() in some
1472	 * error path. We still know that another entity in our call
1473	 * path holds a reference to the program, thus atomic_sub() can
1474	 * be safely used in such cases!
1475	 */
1476	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
1477}
1478EXPORT_SYMBOL_GPL(bpf_prog_sub);
1479
1480struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
1481{
1482	return bpf_prog_add(prog, 1);
1483}
1484EXPORT_SYMBOL_GPL(bpf_prog_inc);
1485
1486/* prog_idr_lock should have been held */
1487struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1488{
1489	int refold;
1490
1491	refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1492
1493	if (refold >= BPF_MAX_REFCNT) {
1494		__bpf_prog_put(prog, false);
1495		return ERR_PTR(-EBUSY);
1496	}
1497
1498	if (!refold)
1499		return ERR_PTR(-ENOENT);
1500
1501	return prog;
1502}
1503EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1504
1505bool bpf_prog_get_ok(struct bpf_prog *prog,
1506			    enum bpf_prog_type *attach_type, bool attach_drv)
1507{
1508	/* not an attachment, just a refcount inc, always allow */
1509	if (!attach_type)
1510		return true;
1511
1512	if (prog->type != *attach_type)
1513		return false;
1514	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1515		return false;
1516
1517	return true;
1518}
1519
1520static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1521				       bool attach_drv)
1522{
1523	struct fd f = fdget(ufd);
1524	struct bpf_prog *prog;
1525
1526	prog = ____bpf_prog_get(f);
1527	if (IS_ERR(prog))
1528		return prog;
1529	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1530		prog = ERR_PTR(-EINVAL);
1531		goto out;
1532	}
1533
1534	prog = bpf_prog_inc(prog);
1535out:
1536	fdput(f);
1537	return prog;
1538}
1539
1540struct bpf_prog *bpf_prog_get(u32 ufd)
1541{
1542	return __bpf_prog_get(ufd, NULL, false);
1543}
1544
1545struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1546				       bool attach_drv)
1547{
1548	return __bpf_prog_get(ufd, &type, attach_drv);
1549}
1550EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1551
1552/* Initially all BPF programs could be loaded w/o specifying
1553 * expected_attach_type. Later for some of them specifying expected_attach_type
1554 * at load time became required so that program could be validated properly.
1555 * Programs of types that are allowed to be loaded both w/ and w/o (for
1556 * backward compatibility) expected_attach_type, should have the default attach
1557 * type assigned to expected_attach_type for the latter case, so that it can be
1558 * validated later at attach time.
1559 *
1560 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1561 * prog type requires it but has some attach types that have to be backward
1562 * compatible.
1563 */
1564static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1565{
1566	switch (attr->prog_type) {
1567	case BPF_PROG_TYPE_CGROUP_SOCK:
1568		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1569		 * exist so checking for non-zero is the way to go here.
1570		 */
1571		if (!attr->expected_attach_type)
1572			attr->expected_attach_type =
1573				BPF_CGROUP_INET_SOCK_CREATE;
1574		break;
 
 
 
 
 
1575	}
1576}
1577
1578static int
1579bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
1580				enum bpf_attach_type expected_attach_type)
 
 
1581{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582	switch (prog_type) {
1583	case BPF_PROG_TYPE_CGROUP_SOCK:
1584		switch (expected_attach_type) {
1585		case BPF_CGROUP_INET_SOCK_CREATE:
 
1586		case BPF_CGROUP_INET4_POST_BIND:
1587		case BPF_CGROUP_INET6_POST_BIND:
1588			return 0;
1589		default:
1590			return -EINVAL;
1591		}
1592	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1593		switch (expected_attach_type) {
1594		case BPF_CGROUP_INET4_BIND:
1595		case BPF_CGROUP_INET6_BIND:
1596		case BPF_CGROUP_INET4_CONNECT:
1597		case BPF_CGROUP_INET6_CONNECT:
 
 
 
 
1598		case BPF_CGROUP_UDP4_SENDMSG:
1599		case BPF_CGROUP_UDP6_SENDMSG:
1600		case BPF_CGROUP_UDP4_RECVMSG:
1601		case BPF_CGROUP_UDP6_RECVMSG:
1602			return 0;
1603		default:
1604			return -EINVAL;
1605		}
1606	case BPF_PROG_TYPE_CGROUP_SKB:
1607		switch (expected_attach_type) {
1608		case BPF_CGROUP_INET_INGRESS:
1609		case BPF_CGROUP_INET_EGRESS:
1610			return 0;
1611		default:
1612			return -EINVAL;
1613		}
1614	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
1615		switch (expected_attach_type) {
1616		case BPF_CGROUP_SETSOCKOPT:
1617		case BPF_CGROUP_GETSOCKOPT:
1618			return 0;
1619		default:
1620			return -EINVAL;
1621		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1622	default:
1623		return 0;
1624	}
1625}
1626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1627/* last field in 'union bpf_attr' used by this command */
1628#define	BPF_PROG_LOAD_LAST_FIELD line_info_cnt
1629
1630static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1631{
1632	enum bpf_prog_type type = attr->prog_type;
1633	struct bpf_prog *prog;
 
1634	int err;
1635	char license[128];
1636	bool is_gpl;
1637
1638	if (CHECK_ATTR(BPF_PROG_LOAD))
1639		return -EINVAL;
1640
1641	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
1642				 BPF_F_ANY_ALIGNMENT |
1643				 BPF_F_TEST_STATE_FREQ |
1644				 BPF_F_TEST_RND_HI32))
 
 
1645		return -EINVAL;
1646
1647	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
1648	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
1649	    !capable(CAP_SYS_ADMIN))
1650		return -EPERM;
1651
1652	/* copy eBPF program license from user space */
1653	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
1654			      sizeof(license) - 1) < 0)
 
1655		return -EFAULT;
1656	license[sizeof(license) - 1] = 0;
1657
1658	/* eBPF programs must be GPL compatible to use GPL-ed functions */
1659	is_gpl = license_is_gpl_compatible(license);
1660
1661	if (attr->insn_cnt == 0 ||
1662	    attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
1663		return -E2BIG;
1664	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1665	    type != BPF_PROG_TYPE_CGROUP_SKB &&
1666	    !capable(CAP_SYS_ADMIN))
 
 
 
 
 
1667		return -EPERM;
1668
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1669	bpf_prog_load_fixup_attach_type(attr);
1670	if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type))
 
 
 
 
 
 
1671		return -EINVAL;
 
1672
1673	/* plain bpf_prog allocation */
1674	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1675	if (!prog)
 
 
 
 
1676		return -ENOMEM;
 
1677
1678	prog->expected_attach_type = attr->expected_attach_type;
1679
 
 
1680	prog->aux->offload_requested = !!attr->prog_ifindex;
 
 
1681
1682	err = security_bpf_prog_alloc(prog->aux);
1683	if (err)
1684		goto free_prog_nouncharge;
1685
1686	err = bpf_prog_charge_memlock(prog);
1687	if (err)
1688		goto free_prog_sec;
1689
 
1690	prog->len = attr->insn_cnt;
1691
1692	err = -EFAULT;
1693	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1694			   bpf_prog_insn_size(prog)) != 0)
1695		goto free_prog;
 
1696
1697	prog->orig_prog = NULL;
1698	prog->jited = 0;
1699
1700	atomic_set(&prog->aux->refcnt, 1);
1701	prog->gpl_compatible = is_gpl ? 1 : 0;
1702
1703	if (bpf_prog_is_dev_bound(prog->aux)) {
1704		err = bpf_prog_offload_init(prog, attr);
1705		if (err)
1706			goto free_prog;
1707	}
1708
1709	/* find program type: socket_filter vs tracing_filter */
1710	err = find_prog_type(type, prog);
1711	if (err < 0)
1712		goto free_prog;
1713
1714	prog->aux->load_time = ktime_get_boottime_ns();
1715	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
1716	if (err)
1717		goto free_prog;
 
1718
1719	/* run eBPF verifier */
1720	err = bpf_check(&prog, attr, uattr);
1721	if (err < 0)
1722		goto free_used_maps;
1723
1724	prog = bpf_prog_select_runtime(prog, &err);
1725	if (err < 0)
1726		goto free_used_maps;
1727
1728	err = bpf_prog_alloc_id(prog);
1729	if (err)
1730		goto free_used_maps;
1731
1732	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
1733	 * effectively publicly exposed. However, retrieving via
1734	 * bpf_prog_get_fd_by_id() will take another reference,
1735	 * therefore it cannot be gone underneath us.
1736	 *
1737	 * Only for the time /after/ successful bpf_prog_new_fd()
1738	 * and before returning to userspace, we might just hold
1739	 * one reference and any parallel close on that fd could
1740	 * rip everything out. Hence, below notifications must
1741	 * happen before bpf_prog_new_fd().
1742	 *
1743	 * Also, any failure handling from this point onwards must
1744	 * be using bpf_prog_put() given the program is exposed.
1745	 */
1746	bpf_prog_kallsyms_add(prog);
1747	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
 
1748
1749	err = bpf_prog_new_fd(prog);
1750	if (err < 0)
1751		bpf_prog_put(prog);
1752	return err;
1753
1754free_used_maps:
1755	/* In case we have subprogs, we need to wait for a grace
1756	 * period before we can tear down JIT memory since symbols
1757	 * are already exposed under kallsyms.
1758	 */
1759	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
1760	return err;
1761free_prog:
1762	bpf_prog_uncharge_memlock(prog);
1763free_prog_sec:
 
1764	security_bpf_prog_free(prog->aux);
1765free_prog_nouncharge:
 
 
1766	bpf_prog_free(prog);
1767	return err;
1768}
1769
1770#define BPF_OBJ_LAST_FIELD file_flags
1771
1772static int bpf_obj_pin(const union bpf_attr *attr)
1773{
1774	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
1775		return -EINVAL;
1776
1777	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1778}
1779
1780static int bpf_obj_get(const union bpf_attr *attr)
1781{
1782	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
1783	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
1784		return -EINVAL;
1785
1786	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
1787				attr->file_flags);
1788}
1789
1790struct bpf_raw_tracepoint {
1791	struct bpf_raw_event_map *btp;
1792	struct bpf_prog *prog;
1793};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1794
1795static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
 
 
 
1796{
1797	struct bpf_raw_tracepoint *raw_tp = filp->private_data;
 
1798
1799	if (raw_tp->prog) {
1800		bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
1801		bpf_prog_put(raw_tp->prog);
 
 
1802	}
1803	bpf_put_raw_tracepoint(raw_tp->btp);
1804	kfree(raw_tp);
 
 
 
 
 
 
1805	return 0;
1806}
1807
1808static const struct file_operations bpf_raw_tp_fops = {
1809	.release	= bpf_raw_tracepoint_release,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810	.read		= bpf_dummy_read,
1811	.write		= bpf_dummy_write,
1812};
1813
1814#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
 
 
 
 
 
 
 
 
 
 
 
1815
1816static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
 
 
 
 
 
 
 
 
 
 
 
 
 
1817{
1818	struct bpf_raw_tracepoint *raw_tp;
1819	struct bpf_raw_event_map *btp;
1820	struct bpf_prog *prog;
1821	char tp_name[128];
1822	int tp_fd, err;
 
1823
1824	if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name),
1825			      sizeof(tp_name) - 1) < 0)
1826		return -EFAULT;
1827	tp_name[sizeof(tp_name) - 1] = 0;
1828
1829	btp = bpf_get_raw_tracepoint(tp_name);
1830	if (!btp)
1831		return -ENOENT;
 
 
1832
1833	raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
1834	if (!raw_tp) {
1835		err = -ENOMEM;
1836		goto out_put_btp;
 
1837	}
1838	raw_tp->btp = btp;
1839
1840	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
1841	if (IS_ERR(prog)) {
1842		err = PTR_ERR(prog);
1843		goto out_free_tp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1844	}
1845	if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
1846	    prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847		err = -EINVAL;
1848		goto out_put_prog;
1849	}
1850
1851	err = bpf_probe_register(raw_tp->btp, prog);
1852	if (err)
1853		goto out_put_prog;
 
 
 
 
 
 
 
 
1854
1855	raw_tp->prog = prog;
1856	tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
1857				 O_CLOEXEC);
1858	if (tp_fd < 0) {
1859		bpf_probe_unregister(raw_tp->btp, prog);
1860		err = tp_fd;
 
 
 
 
 
 
 
1861		goto out_put_prog;
1862	}
1863	return tp_fd;
 
 
 
 
 
1864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1865out_put_prog:
1866	bpf_prog_put(prog);
1867out_free_tp:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1868	kfree(raw_tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1869out_put_btp:
1870	bpf_put_raw_tracepoint(btp);
1871	return err;
1872}
1873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1874static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
1875					     enum bpf_attach_type attach_type)
1876{
1877	switch (prog->type) {
1878	case BPF_PROG_TYPE_CGROUP_SOCK:
1879	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1880	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
 
1881		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
1882	case BPF_PROG_TYPE_CGROUP_SKB:
 
 
 
 
 
1883		return prog->enforce_expected_attach_type &&
1884			prog->expected_attach_type != attach_type ?
1885			-EINVAL : 0;
1886	default:
1887		return 0;
1888	}
1889}
1890
1891#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1892
1893#define BPF_F_ATTACH_MASK \
1894	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
1895
1896static int bpf_prog_attach(const union bpf_attr *attr)
1897{
1898	enum bpf_prog_type ptype;
1899	struct bpf_prog *prog;
1900	int ret;
1901
1902	if (!capable(CAP_NET_ADMIN))
1903		return -EPERM;
1904
1905	if (CHECK_ATTR(BPF_PROG_ATTACH))
1906		return -EINVAL;
1907
1908	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
1909		return -EINVAL;
1910
1911	switch (attr->attach_type) {
1912	case BPF_CGROUP_INET_INGRESS:
1913	case BPF_CGROUP_INET_EGRESS:
1914		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1915		break;
1916	case BPF_CGROUP_INET_SOCK_CREATE:
 
1917	case BPF_CGROUP_INET4_POST_BIND:
1918	case BPF_CGROUP_INET6_POST_BIND:
1919		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1920		break;
1921	case BPF_CGROUP_INET4_BIND:
1922	case BPF_CGROUP_INET6_BIND:
1923	case BPF_CGROUP_INET4_CONNECT:
1924	case BPF_CGROUP_INET6_CONNECT:
 
 
 
 
1925	case BPF_CGROUP_UDP4_SENDMSG:
1926	case BPF_CGROUP_UDP6_SENDMSG:
1927	case BPF_CGROUP_UDP4_RECVMSG:
1928	case BPF_CGROUP_UDP6_RECVMSG:
1929		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
1930		break;
1931	case BPF_CGROUP_SOCK_OPS:
1932		ptype = BPF_PROG_TYPE_SOCK_OPS;
1933		break;
1934	case BPF_CGROUP_DEVICE:
1935		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
1936		break;
1937	case BPF_SK_MSG_VERDICT:
1938		ptype = BPF_PROG_TYPE_SK_MSG;
1939		break;
1940	case BPF_SK_SKB_STREAM_PARSER:
1941	case BPF_SK_SKB_STREAM_VERDICT:
1942		ptype = BPF_PROG_TYPE_SK_SKB;
1943		break;
1944	case BPF_LIRC_MODE2:
1945		ptype = BPF_PROG_TYPE_LIRC_MODE2;
1946		break;
1947	case BPF_FLOW_DISSECTOR:
1948		ptype = BPF_PROG_TYPE_FLOW_DISSECTOR;
1949		break;
1950	case BPF_CGROUP_SYSCTL:
1951		ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
1952		break;
1953	case BPF_CGROUP_GETSOCKOPT:
1954	case BPF_CGROUP_SETSOCKOPT:
1955		ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
1956		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
1957	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1958		return -EINVAL;
1959	}
1960
1961	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1962	if (IS_ERR(prog))
1963		return PTR_ERR(prog);
1964
1965	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
1966		bpf_prog_put(prog);
1967		return -EINVAL;
1968	}
1969
1970	switch (ptype) {
1971	case BPF_PROG_TYPE_SK_SKB:
1972	case BPF_PROG_TYPE_SK_MSG:
1973		ret = sock_map_get_from_fd(attr, prog);
1974		break;
1975	case BPF_PROG_TYPE_LIRC_MODE2:
1976		ret = lirc_prog_attach(attr, prog);
1977		break;
1978	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1979		ret = skb_flow_dissector_bpf_prog_attach(attr, prog);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1980		break;
1981	default:
1982		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
1983	}
1984
1985	if (ret)
1986		bpf_prog_put(prog);
1987	return ret;
1988}
1989
1990#define BPF_PROG_DETACH_LAST_FIELD attach_type
1991
1992static int bpf_prog_detach(const union bpf_attr *attr)
1993{
1994	enum bpf_prog_type ptype;
1995
1996	if (!capable(CAP_NET_ADMIN))
1997		return -EPERM;
1998
1999	if (CHECK_ATTR(BPF_PROG_DETACH))
2000		return -EINVAL;
2001
2002	switch (attr->attach_type) {
2003	case BPF_CGROUP_INET_INGRESS:
2004	case BPF_CGROUP_INET_EGRESS:
2005		ptype = BPF_PROG_TYPE_CGROUP_SKB;
2006		break;
2007	case BPF_CGROUP_INET_SOCK_CREATE:
2008	case BPF_CGROUP_INET4_POST_BIND:
2009	case BPF_CGROUP_INET6_POST_BIND:
2010		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
2011		break;
2012	case BPF_CGROUP_INET4_BIND:
2013	case BPF_CGROUP_INET6_BIND:
2014	case BPF_CGROUP_INET4_CONNECT:
2015	case BPF_CGROUP_INET6_CONNECT:
2016	case BPF_CGROUP_UDP4_SENDMSG:
2017	case BPF_CGROUP_UDP6_SENDMSG:
2018	case BPF_CGROUP_UDP4_RECVMSG:
2019	case BPF_CGROUP_UDP6_RECVMSG:
2020		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2021		break;
2022	case BPF_CGROUP_SOCK_OPS:
2023		ptype = BPF_PROG_TYPE_SOCK_OPS;
2024		break;
2025	case BPF_CGROUP_DEVICE:
2026		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
2027		break;
2028	case BPF_SK_MSG_VERDICT:
2029		return sock_map_get_from_fd(attr, NULL);
2030	case BPF_SK_SKB_STREAM_PARSER:
2031	case BPF_SK_SKB_STREAM_VERDICT:
2032		return sock_map_get_from_fd(attr, NULL);
2033	case BPF_LIRC_MODE2:
2034		return lirc_prog_detach(attr);
2035	case BPF_FLOW_DISSECTOR:
2036		return skb_flow_dissector_bpf_prog_detach(attr);
2037	case BPF_CGROUP_SYSCTL:
2038		ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
2039		break;
2040	case BPF_CGROUP_GETSOCKOPT:
2041	case BPF_CGROUP_SETSOCKOPT:
2042		ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
2043		break;
 
 
2044	default:
2045		return -EINVAL;
2046	}
2047
2048	return cgroup_bpf_prog_detach(attr, ptype);
2049}
2050
2051#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
2052
2053static int bpf_prog_query(const union bpf_attr *attr,
2054			  union bpf_attr __user *uattr)
2055{
2056	if (!capable(CAP_NET_ADMIN))
2057		return -EPERM;
2058	if (CHECK_ATTR(BPF_PROG_QUERY))
2059		return -EINVAL;
2060	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
2061		return -EINVAL;
2062
2063	switch (attr->query.attach_type) {
2064	case BPF_CGROUP_INET_INGRESS:
2065	case BPF_CGROUP_INET_EGRESS:
2066	case BPF_CGROUP_INET_SOCK_CREATE:
 
2067	case BPF_CGROUP_INET4_BIND:
2068	case BPF_CGROUP_INET6_BIND:
2069	case BPF_CGROUP_INET4_POST_BIND:
2070	case BPF_CGROUP_INET6_POST_BIND:
2071	case BPF_CGROUP_INET4_CONNECT:
2072	case BPF_CGROUP_INET6_CONNECT:
 
 
 
 
2073	case BPF_CGROUP_UDP4_SENDMSG:
2074	case BPF_CGROUP_UDP6_SENDMSG:
2075	case BPF_CGROUP_UDP4_RECVMSG:
2076	case BPF_CGROUP_UDP6_RECVMSG:
2077	case BPF_CGROUP_SOCK_OPS:
2078	case BPF_CGROUP_DEVICE:
2079	case BPF_CGROUP_SYSCTL:
2080	case BPF_CGROUP_GETSOCKOPT:
2081	case BPF_CGROUP_SETSOCKOPT:
2082		break;
 
2083	case BPF_LIRC_MODE2:
2084		return lirc_prog_query(attr, uattr);
2085	case BPF_FLOW_DISSECTOR:
2086		return skb_flow_dissector_prog_query(attr, uattr);
 
 
 
 
 
 
2087	default:
2088		return -EINVAL;
2089	}
2090
2091	return cgroup_bpf_prog_query(attr, uattr);
2092}
2093
2094#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
2095
2096static int bpf_prog_test_run(const union bpf_attr *attr,
2097			     union bpf_attr __user *uattr)
2098{
2099	struct bpf_prog *prog;
2100	int ret = -ENOTSUPP;
2101
2102	if (!capable(CAP_SYS_ADMIN))
2103		return -EPERM;
2104	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
2105		return -EINVAL;
2106
2107	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
2108	    (!attr->test.ctx_size_in && attr->test.ctx_in))
2109		return -EINVAL;
2110
2111	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
2112	    (!attr->test.ctx_size_out && attr->test.ctx_out))
2113		return -EINVAL;
2114
2115	prog = bpf_prog_get(attr->test.prog_fd);
2116	if (IS_ERR(prog))
2117		return PTR_ERR(prog);
2118
2119	if (prog->aux->ops->test_run)
2120		ret = prog->aux->ops->test_run(prog, attr, uattr);
2121
2122	bpf_prog_put(prog);
2123	return ret;
2124}
2125
2126#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
2127
2128static int bpf_obj_get_next_id(const union bpf_attr *attr,
2129			       union bpf_attr __user *uattr,
2130			       struct idr *idr,
2131			       spinlock_t *lock)
2132{
2133	u32 next_id = attr->start_id;
2134	int err = 0;
2135
2136	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
2137		return -EINVAL;
2138
2139	if (!capable(CAP_SYS_ADMIN))
2140		return -EPERM;
2141
2142	next_id++;
2143	spin_lock_bh(lock);
2144	if (!idr_get_next(idr, &next_id))
2145		err = -ENOENT;
2146	spin_unlock_bh(lock);
2147
2148	if (!err)
2149		err = put_user(next_id, &uattr->next_id);
2150
2151	return err;
2152}
2153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2154#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
2155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2156static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
2157{
2158	struct bpf_prog *prog;
2159	u32 id = attr->prog_id;
2160	int fd;
2161
2162	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
2163		return -EINVAL;
2164
2165	if (!capable(CAP_SYS_ADMIN))
2166		return -EPERM;
2167
2168	spin_lock_bh(&prog_idr_lock);
2169	prog = idr_find(&prog_idr, id);
2170	if (prog)
2171		prog = bpf_prog_inc_not_zero(prog);
2172	else
2173		prog = ERR_PTR(-ENOENT);
2174	spin_unlock_bh(&prog_idr_lock);
2175
2176	if (IS_ERR(prog))
2177		return PTR_ERR(prog);
2178
2179	fd = bpf_prog_new_fd(prog);
2180	if (fd < 0)
2181		bpf_prog_put(prog);
2182
2183	return fd;
2184}
2185
2186#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
2187
2188static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
2189{
2190	struct bpf_map *map;
2191	u32 id = attr->map_id;
2192	int f_flags;
2193	int fd;
2194
2195	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
2196	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
2197		return -EINVAL;
2198
2199	if (!capable(CAP_SYS_ADMIN))
2200		return -EPERM;
2201
2202	f_flags = bpf_get_file_flag(attr->open_flags);
2203	if (f_flags < 0)
2204		return f_flags;
2205
2206	spin_lock_bh(&map_idr_lock);
2207	map = idr_find(&map_idr, id);
2208	if (map)
2209		map = __bpf_map_inc_not_zero(map, true);
2210	else
2211		map = ERR_PTR(-ENOENT);
2212	spin_unlock_bh(&map_idr_lock);
2213
2214	if (IS_ERR(map))
2215		return PTR_ERR(map);
2216
2217	fd = bpf_map_new_fd(map, f_flags);
2218	if (fd < 0)
2219		bpf_map_put_with_uref(map);
2220
2221	return fd;
2222}
2223
2224static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
2225					      unsigned long addr, u32 *off,
2226					      u32 *type)
2227{
2228	const struct bpf_map *map;
2229	int i;
2230
 
2231	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
2232		map = prog->aux->used_maps[i];
2233		if (map == (void *)addr) {
2234			*type = BPF_PSEUDO_MAP_FD;
2235			return map;
2236		}
2237		if (!map->ops->map_direct_value_meta)
2238			continue;
2239		if (!map->ops->map_direct_value_meta(map, addr, off)) {
2240			*type = BPF_PSEUDO_MAP_VALUE;
2241			return map;
2242		}
2243	}
 
2244
2245	return NULL;
 
 
2246}
2247
2248static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
 
2249{
2250	const struct bpf_map *map;
2251	struct bpf_insn *insns;
2252	u32 off, type;
2253	u64 imm;
 
2254	int i;
2255
2256	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
2257			GFP_USER);
2258	if (!insns)
2259		return insns;
2260
2261	for (i = 0; i < prog->len; i++) {
2262		if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
 
 
2263			insns[i].code = BPF_JMP | BPF_CALL;
2264			insns[i].imm = BPF_FUNC_tail_call;
2265			/* fall-through */
2266		}
2267		if (insns[i].code == (BPF_JMP | BPF_CALL) ||
2268		    insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
2269			if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
2270				insns[i].code = BPF_JMP | BPF_CALL;
2271			if (!bpf_dump_raw_ok())
2272				insns[i].imm = 0;
2273			continue;
2274		}
 
 
 
 
2275
2276		if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
2277			continue;
2278
2279		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
2280		map = bpf_map_from_imm(prog, imm, &off, &type);
2281		if (map) {
2282			insns[i].src_reg = type;
2283			insns[i].imm = map->id;
2284			insns[i + 1].imm = off;
2285			continue;
2286		}
2287	}
2288
2289	return insns;
2290}
2291
2292static int set_info_rec_size(struct bpf_prog_info *info)
2293{
2294	/*
2295	 * Ensure info.*_rec_size is the same as kernel expected size
2296	 *
2297	 * or
2298	 *
2299	 * Only allow zero *_rec_size if both _rec_size and _cnt are
2300	 * zero.  In this case, the kernel will set the expected
2301	 * _rec_size back to the info.
2302	 */
2303
2304	if ((info->nr_func_info || info->func_info_rec_size) &&
2305	    info->func_info_rec_size != sizeof(struct bpf_func_info))
2306		return -EINVAL;
2307
2308	if ((info->nr_line_info || info->line_info_rec_size) &&
2309	    info->line_info_rec_size != sizeof(struct bpf_line_info))
2310		return -EINVAL;
2311
2312	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
2313	    info->jited_line_info_rec_size != sizeof(__u64))
2314		return -EINVAL;
2315
2316	info->func_info_rec_size = sizeof(struct bpf_func_info);
2317	info->line_info_rec_size = sizeof(struct bpf_line_info);
2318	info->jited_line_info_rec_size = sizeof(__u64);
2319
2320	return 0;
2321}
2322
2323static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
 
2324				   const union bpf_attr *attr,
2325				   union bpf_attr __user *uattr)
2326{
2327	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2328	struct bpf_prog_info info = {};
 
2329	u32 info_len = attr->info.info_len;
2330	struct bpf_prog_stats stats;
2331	char __user *uinsns;
2332	u32 ulen;
2333	int err;
2334
2335	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
2336	if (err)
2337		return err;
2338	info_len = min_t(u32, sizeof(info), info_len);
2339
 
2340	if (copy_from_user(&info, uinfo, info_len))
2341		return -EFAULT;
2342
2343	info.type = prog->type;
2344	info.id = prog->aux->id;
2345	info.load_time = prog->aux->load_time;
2346	info.created_by_uid = from_kuid_munged(current_user_ns(),
2347					       prog->aux->user->uid);
2348	info.gpl_compatible = prog->gpl_compatible;
2349
2350	memcpy(info.tag, prog->tag, sizeof(prog->tag));
2351	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
2352
 
2353	ulen = info.nr_map_ids;
2354	info.nr_map_ids = prog->aux->used_map_cnt;
2355	ulen = min_t(u32, info.nr_map_ids, ulen);
2356	if (ulen) {
2357		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
2358		u32 i;
2359
2360		for (i = 0; i < ulen; i++)
2361			if (put_user(prog->aux->used_maps[i]->id,
2362				     &user_map_ids[i]))
 
2363				return -EFAULT;
 
2364	}
 
2365
2366	err = set_info_rec_size(&info);
2367	if (err)
2368		return err;
2369
2370	bpf_prog_get_stats(prog, &stats);
2371	info.run_time_ns = stats.nsecs;
2372	info.run_cnt = stats.cnt;
 
 
 
2373
2374	if (!capable(CAP_SYS_ADMIN)) {
2375		info.jited_prog_len = 0;
2376		info.xlated_prog_len = 0;
2377		info.nr_jited_ksyms = 0;
2378		info.nr_jited_func_lens = 0;
2379		info.nr_func_info = 0;
2380		info.nr_line_info = 0;
2381		info.nr_jited_line_info = 0;
2382		goto done;
2383	}
2384
2385	ulen = info.xlated_prog_len;
2386	info.xlated_prog_len = bpf_prog_insn_size(prog);
2387	if (info.xlated_prog_len && ulen) {
2388		struct bpf_insn *insns_sanitized;
2389		bool fault;
2390
2391		if (prog->blinded && !bpf_dump_raw_ok()) {
2392			info.xlated_prog_insns = 0;
2393			goto done;
2394		}
2395		insns_sanitized = bpf_insn_prepare_dump(prog);
2396		if (!insns_sanitized)
2397			return -ENOMEM;
2398		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
2399		ulen = min_t(u32, info.xlated_prog_len, ulen);
2400		fault = copy_to_user(uinsns, insns_sanitized, ulen);
2401		kfree(insns_sanitized);
2402		if (fault)
2403			return -EFAULT;
2404	}
2405
2406	if (bpf_prog_is_dev_bound(prog->aux)) {
2407		err = bpf_prog_offload_info_fill(&info, prog);
2408		if (err)
2409			return err;
2410		goto done;
2411	}
2412
2413	/* NOTE: the following code is supposed to be skipped for offload.
2414	 * bpf_prog_offload_info_fill() is the place to fill similar fields
2415	 * for offload.
2416	 */
2417	ulen = info.jited_prog_len;
2418	if (prog->aux->func_cnt) {
2419		u32 i;
2420
2421		info.jited_prog_len = 0;
2422		for (i = 0; i < prog->aux->func_cnt; i++)
2423			info.jited_prog_len += prog->aux->func[i]->jited_len;
2424	} else {
2425		info.jited_prog_len = prog->jited_len;
2426	}
2427
2428	if (info.jited_prog_len && ulen) {
2429		if (bpf_dump_raw_ok()) {
2430			uinsns = u64_to_user_ptr(info.jited_prog_insns);
2431			ulen = min_t(u32, info.jited_prog_len, ulen);
2432
2433			/* for multi-function programs, copy the JITed
2434			 * instructions for all the functions
2435			 */
2436			if (prog->aux->func_cnt) {
2437				u32 len, free, i;
2438				u8 *img;
2439
2440				free = ulen;
2441				for (i = 0; i < prog->aux->func_cnt; i++) {
2442					len = prog->aux->func[i]->jited_len;
2443					len = min_t(u32, len, free);
2444					img = (u8 *) prog->aux->func[i]->bpf_func;
2445					if (copy_to_user(uinsns, img, len))
2446						return -EFAULT;
2447					uinsns += len;
2448					free -= len;
2449					if (!free)
2450						break;
2451				}
2452			} else {
2453				if (copy_to_user(uinsns, prog->bpf_func, ulen))
2454					return -EFAULT;
2455			}
2456		} else {
2457			info.jited_prog_insns = 0;
2458		}
2459	}
2460
2461	ulen = info.nr_jited_ksyms;
2462	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
2463	if (ulen) {
2464		if (bpf_dump_raw_ok()) {
2465			unsigned long ksym_addr;
2466			u64 __user *user_ksyms;
2467			u32 i;
2468
2469			/* copy the address of the kernel symbol
2470			 * corresponding to each function
2471			 */
2472			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
2473			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
2474			if (prog->aux->func_cnt) {
2475				for (i = 0; i < ulen; i++) {
2476					ksym_addr = (unsigned long)
2477						prog->aux->func[i]->bpf_func;
2478					if (put_user((u64) ksym_addr,
2479						     &user_ksyms[i]))
2480						return -EFAULT;
2481				}
2482			} else {
2483				ksym_addr = (unsigned long) prog->bpf_func;
2484				if (put_user((u64) ksym_addr, &user_ksyms[0]))
2485					return -EFAULT;
2486			}
2487		} else {
2488			info.jited_ksyms = 0;
2489		}
2490	}
2491
2492	ulen = info.nr_jited_func_lens;
2493	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
2494	if (ulen) {
2495		if (bpf_dump_raw_ok()) {
2496			u32 __user *user_lens;
2497			u32 func_len, i;
2498
2499			/* copy the JITed image lengths for each function */
2500			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
2501			user_lens = u64_to_user_ptr(info.jited_func_lens);
2502			if (prog->aux->func_cnt) {
2503				for (i = 0; i < ulen; i++) {
2504					func_len =
2505						prog->aux->func[i]->jited_len;
2506					if (put_user(func_len, &user_lens[i]))
2507						return -EFAULT;
2508				}
2509			} else {
2510				func_len = prog->jited_len;
2511				if (put_user(func_len, &user_lens[0]))
2512					return -EFAULT;
2513			}
2514		} else {
2515			info.jited_func_lens = 0;
2516		}
2517	}
2518
2519	if (prog->aux->btf)
2520		info.btf_id = btf_id(prog->aux->btf);
 
 
 
2521
2522	ulen = info.nr_func_info;
2523	info.nr_func_info = prog->aux->func_info_cnt;
2524	if (info.nr_func_info && ulen) {
2525		char __user *user_finfo;
2526
2527		user_finfo = u64_to_user_ptr(info.func_info);
2528		ulen = min_t(u32, info.nr_func_info, ulen);
2529		if (copy_to_user(user_finfo, prog->aux->func_info,
2530				 info.func_info_rec_size * ulen))
2531			return -EFAULT;
2532	}
2533
2534	ulen = info.nr_line_info;
2535	info.nr_line_info = prog->aux->nr_linfo;
2536	if (info.nr_line_info && ulen) {
2537		__u8 __user *user_linfo;
2538
2539		user_linfo = u64_to_user_ptr(info.line_info);
2540		ulen = min_t(u32, info.nr_line_info, ulen);
2541		if (copy_to_user(user_linfo, prog->aux->linfo,
2542				 info.line_info_rec_size * ulen))
2543			return -EFAULT;
2544	}
2545
2546	ulen = info.nr_jited_line_info;
2547	if (prog->aux->jited_linfo)
2548		info.nr_jited_line_info = prog->aux->nr_linfo;
2549	else
2550		info.nr_jited_line_info = 0;
2551	if (info.nr_jited_line_info && ulen) {
2552		if (bpf_dump_raw_ok()) {
 
2553			__u64 __user *user_linfo;
2554			u32 i;
2555
2556			user_linfo = u64_to_user_ptr(info.jited_line_info);
2557			ulen = min_t(u32, info.nr_jited_line_info, ulen);
2558			for (i = 0; i < ulen; i++) {
2559				if (put_user((__u64)(long)prog->aux->jited_linfo[i],
2560					     &user_linfo[i]))
2561					return -EFAULT;
2562			}
2563		} else {
2564			info.jited_line_info = 0;
2565		}
2566	}
2567
2568	ulen = info.nr_prog_tags;
2569	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
2570	if (ulen) {
2571		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
2572		u32 i;
2573
2574		user_prog_tags = u64_to_user_ptr(info.prog_tags);
2575		ulen = min_t(u32, info.nr_prog_tags, ulen);
2576		if (prog->aux->func_cnt) {
2577			for (i = 0; i < ulen; i++) {
2578				if (copy_to_user(user_prog_tags[i],
2579						 prog->aux->func[i]->tag,
2580						 BPF_TAG_SIZE))
2581					return -EFAULT;
2582			}
2583		} else {
2584			if (copy_to_user(user_prog_tags[0],
2585					 prog->tag, BPF_TAG_SIZE))
2586				return -EFAULT;
2587		}
2588	}
2589
2590done:
2591	if (copy_to_user(uinfo, &info, info_len) ||
2592	    put_user(info_len, &uattr->info.info_len))
2593		return -EFAULT;
2594
2595	return 0;
2596}
2597
2598static int bpf_map_get_info_by_fd(struct bpf_map *map,
 
2599				  const union bpf_attr *attr,
2600				  union bpf_attr __user *uattr)
2601{
2602	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2603	struct bpf_map_info info = {};
2604	u32 info_len = attr->info.info_len;
2605	int err;
2606
2607	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
2608	if (err)
2609		return err;
2610	info_len = min_t(u32, sizeof(info), info_len);
2611
 
2612	info.type = map->map_type;
2613	info.id = map->id;
2614	info.key_size = map->key_size;
2615	info.value_size = map->value_size;
2616	info.max_entries = map->max_entries;
2617	info.map_flags = map->map_flags;
 
2618	memcpy(info.name, map->name, sizeof(map->name));
2619
2620	if (map->btf) {
2621		info.btf_id = btf_id(map->btf);
2622		info.btf_key_type_id = map->btf_key_type_id;
2623		info.btf_value_type_id = map->btf_value_type_id;
2624	}
 
2625
2626	if (bpf_map_is_dev_bound(map)) {
2627		err = bpf_map_offload_info_fill(&info, map);
2628		if (err)
2629			return err;
2630	}
2631
2632	if (copy_to_user(uinfo, &info, info_len) ||
2633	    put_user(info_len, &uattr->info.info_len))
2634		return -EFAULT;
2635
2636	return 0;
2637}
2638
2639static int bpf_btf_get_info_by_fd(struct btf *btf,
 
2640				  const union bpf_attr *attr,
2641				  union bpf_attr __user *uattr)
2642{
2643	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2644	u32 info_len = attr->info.info_len;
2645	int err;
2646
2647	err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
2648	if (err)
2649		return err;
2650
2651	return btf_get_info_by_fd(btf, attr, uattr);
2652}
2653
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2654#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
2655
2656static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
2657				  union bpf_attr __user *uattr)
2658{
2659	int ufd = attr->info.bpf_fd;
2660	struct fd f;
2661	int err;
2662
2663	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
2664		return -EINVAL;
2665
2666	f = fdget(ufd);
2667	if (!f.file)
2668		return -EBADFD;
2669
2670	if (f.file->f_op == &bpf_prog_fops)
2671		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
2672					      uattr);
2673	else if (f.file->f_op == &bpf_map_fops)
2674		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
2675					     uattr);
2676	else if (f.file->f_op == &btf_fops)
2677		err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
 
 
 
2678	else
2679		err = -EINVAL;
2680
2681	fdput(f);
2682	return err;
2683}
2684
2685#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
2686
2687static int bpf_btf_load(const union bpf_attr *attr)
2688{
2689	if (CHECK_ATTR(BPF_BTF_LOAD))
2690		return -EINVAL;
2691
2692	if (!capable(CAP_SYS_ADMIN))
2693		return -EPERM;
2694
2695	return btf_new_fd(attr);
2696}
2697
2698#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
2699
2700static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
2701{
2702	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
2703		return -EINVAL;
2704
2705	if (!capable(CAP_SYS_ADMIN))
2706		return -EPERM;
2707
2708	return btf_get_fd_by_id(attr->btf_id);
2709}
2710
2711static int bpf_task_fd_query_copy(const union bpf_attr *attr,
2712				    union bpf_attr __user *uattr,
2713				    u32 prog_id, u32 fd_type,
2714				    const char *buf, u64 probe_offset,
2715				    u64 probe_addr)
2716{
2717	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
2718	u32 len = buf ? strlen(buf) : 0, input_len;
2719	int err = 0;
2720
2721	if (put_user(len, &uattr->task_fd_query.buf_len))
2722		return -EFAULT;
2723	input_len = attr->task_fd_query.buf_len;
2724	if (input_len && ubuf) {
2725		if (!len) {
2726			/* nothing to copy, just make ubuf NULL terminated */
2727			char zero = '\0';
2728
2729			if (put_user(zero, ubuf))
2730				return -EFAULT;
2731		} else if (input_len >= len + 1) {
2732			/* ubuf can hold the string with NULL terminator */
2733			if (copy_to_user(ubuf, buf, len + 1))
2734				return -EFAULT;
2735		} else {
2736			/* ubuf cannot hold the string with NULL terminator,
2737			 * do a partial copy with NULL terminator.
2738			 */
2739			char zero = '\0';
2740
2741			err = -ENOSPC;
2742			if (copy_to_user(ubuf, buf, input_len - 1))
2743				return -EFAULT;
2744			if (put_user(zero, ubuf + input_len - 1))
2745				return -EFAULT;
2746		}
2747	}
2748
2749	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
2750	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
2751	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
2752	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
2753		return -EFAULT;
2754
2755	return err;
2756}
2757
2758#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
2759
2760static int bpf_task_fd_query(const union bpf_attr *attr,
2761			     union bpf_attr __user *uattr)
2762{
2763	pid_t pid = attr->task_fd_query.pid;
2764	u32 fd = attr->task_fd_query.fd;
2765	const struct perf_event *event;
2766	struct files_struct *files;
2767	struct task_struct *task;
2768	struct file *file;
2769	int err;
2770
2771	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
2772		return -EINVAL;
2773
2774	if (!capable(CAP_SYS_ADMIN))
2775		return -EPERM;
2776
2777	if (attr->task_fd_query.flags != 0)
2778		return -EINVAL;
2779
 
2780	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
 
2781	if (!task)
2782		return -ENOENT;
2783
2784	files = get_files_struct(task);
 
2785	put_task_struct(task);
2786	if (!files)
2787		return -ENOENT;
2788
2789	err = 0;
2790	spin_lock(&files->file_lock);
2791	file = fcheck_files(files, fd);
2792	if (!file)
2793		err = -EBADF;
2794	else
2795		get_file(file);
2796	spin_unlock(&files->file_lock);
2797	put_files_struct(files);
2798
2799	if (err)
2800		goto out;
2801
2802	if (file->f_op == &bpf_raw_tp_fops) {
2803		struct bpf_raw_tracepoint *raw_tp = file->private_data;
2804		struct bpf_raw_event_map *btp = raw_tp->btp;
2805
2806		err = bpf_task_fd_query_copy(attr, uattr,
2807					     raw_tp->prog->aux->id,
2808					     BPF_FD_TYPE_RAW_TRACEPOINT,
2809					     btp->tp->name, 0, 0);
2810		goto put_file;
 
 
 
2811	}
2812
2813	event = perf_get_event(file);
2814	if (!IS_ERR(event)) {
2815		u64 probe_offset, probe_addr;
2816		u32 prog_id, fd_type;
2817		const char *buf;
2818
2819		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
2820					      &buf, &probe_offset,
2821					      &probe_addr);
2822		if (!err)
2823			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
2824						     fd_type, buf,
2825						     probe_offset,
2826						     probe_addr);
2827		goto put_file;
2828	}
2829
 
2830	err = -ENOTSUPP;
2831put_file:
2832	fput(file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2833out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2834	return err;
2835}
2836
2837SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2838{
2839	union bpf_attr attr = {};
 
2840	int err;
2841
2842	if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
 
 
 
 
 
 
 
 
 
 
 
2843		return -EPERM;
2844
2845	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
2846	if (err)
2847		return err;
2848	size = min_t(u32, size, sizeof(attr));
2849
2850	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
2851	if (copy_from_user(&attr, uattr, size) != 0)
 
2852		return -EFAULT;
2853
2854	err = security_bpf(cmd, &attr, size);
2855	if (err < 0)
2856		return err;
2857
2858	switch (cmd) {
2859	case BPF_MAP_CREATE:
2860		err = map_create(&attr);
2861		break;
2862	case BPF_MAP_LOOKUP_ELEM:
2863		err = map_lookup_elem(&attr);
2864		break;
2865	case BPF_MAP_UPDATE_ELEM:
2866		err = map_update_elem(&attr);
2867		break;
2868	case BPF_MAP_DELETE_ELEM:
2869		err = map_delete_elem(&attr);
2870		break;
2871	case BPF_MAP_GET_NEXT_KEY:
2872		err = map_get_next_key(&attr);
2873		break;
2874	case BPF_MAP_FREEZE:
2875		err = map_freeze(&attr);
2876		break;
2877	case BPF_PROG_LOAD:
2878		err = bpf_prog_load(&attr, uattr);
2879		break;
2880	case BPF_OBJ_PIN:
2881		err = bpf_obj_pin(&attr);
2882		break;
2883	case BPF_OBJ_GET:
2884		err = bpf_obj_get(&attr);
2885		break;
2886	case BPF_PROG_ATTACH:
2887		err = bpf_prog_attach(&attr);
2888		break;
2889	case BPF_PROG_DETACH:
2890		err = bpf_prog_detach(&attr);
2891		break;
2892	case BPF_PROG_QUERY:
2893		err = bpf_prog_query(&attr, uattr);
2894		break;
2895	case BPF_PROG_TEST_RUN:
2896		err = bpf_prog_test_run(&attr, uattr);
2897		break;
2898	case BPF_PROG_GET_NEXT_ID:
2899		err = bpf_obj_get_next_id(&attr, uattr,
2900					  &prog_idr, &prog_idr_lock);
2901		break;
2902	case BPF_MAP_GET_NEXT_ID:
2903		err = bpf_obj_get_next_id(&attr, uattr,
2904					  &map_idr, &map_idr_lock);
2905		break;
2906	case BPF_BTF_GET_NEXT_ID:
2907		err = bpf_obj_get_next_id(&attr, uattr,
2908					  &btf_idr, &btf_idr_lock);
2909		break;
2910	case BPF_PROG_GET_FD_BY_ID:
2911		err = bpf_prog_get_fd_by_id(&attr);
2912		break;
2913	case BPF_MAP_GET_FD_BY_ID:
2914		err = bpf_map_get_fd_by_id(&attr);
2915		break;
2916	case BPF_OBJ_GET_INFO_BY_FD:
2917		err = bpf_obj_get_info_by_fd(&attr, uattr);
2918		break;
2919	case BPF_RAW_TRACEPOINT_OPEN:
2920		err = bpf_raw_tracepoint_open(&attr);
2921		break;
2922	case BPF_BTF_LOAD:
2923		err = bpf_btf_load(&attr);
2924		break;
2925	case BPF_BTF_GET_FD_BY_ID:
2926		err = bpf_btf_get_fd_by_id(&attr);
2927		break;
2928	case BPF_TASK_FD_QUERY:
2929		err = bpf_task_fd_query(&attr, uattr);
2930		break;
2931	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
2932		err = map_lookup_and_delete_elem(&attr);
2933		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2934	default:
2935		err = -EINVAL;
2936		break;
2937	}
2938
2939	return err;
2940}