Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 */
   4#include <linux/bpf.h>
   5#include <linux/bpf_trace.h>
   6#include <linux/bpf_lirc.h>
   7#include <linux/btf.h>
   8#include <linux/syscalls.h>
   9#include <linux/slab.h>
  10#include <linux/sched/signal.h>
  11#include <linux/vmalloc.h>
  12#include <linux/mmzone.h>
  13#include <linux/anon_inodes.h>
  14#include <linux/fdtable.h>
  15#include <linux/file.h>
  16#include <linux/fs.h>
  17#include <linux/license.h>
  18#include <linux/filter.h>
  19#include <linux/version.h>
  20#include <linux/kernel.h>
  21#include <linux/idr.h>
  22#include <linux/cred.h>
  23#include <linux/timekeeping.h>
  24#include <linux/ctype.h>
  25#include <linux/nospec.h>
  26
  27#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
  28			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
  29			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
  30			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
 
 
 
 
 
 
  31#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
  32#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
 
  33
  34#define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
  35
  36DEFINE_PER_CPU(int, bpf_prog_active);
  37static DEFINE_IDR(prog_idr);
  38static DEFINE_SPINLOCK(prog_idr_lock);
  39static DEFINE_IDR(map_idr);
  40static DEFINE_SPINLOCK(map_idr_lock);
 
 
  41
  42int sysctl_unprivileged_bpf_disabled __read_mostly;
  43
  44static const struct bpf_map_ops * const bpf_map_types[] = {
  45#define BPF_PROG_TYPE(_id, _ops)
  46#define BPF_MAP_TYPE(_id, _ops) \
  47	[_id] = &_ops,
 
  48#include <linux/bpf_types.h>
  49#undef BPF_PROG_TYPE
  50#undef BPF_MAP_TYPE
 
  51};
  52
  53/*
  54 * If we're handed a bigger struct than we know of, ensure all the unknown bits
  55 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
  56 * we don't know about yet.
  57 *
  58 * There is a ToCToU between this function call and the following
  59 * copy_from_user() call. However, this is not a concern since this function is
  60 * meant to be a future-proofing of bits.
  61 */
  62int bpf_check_uarg_tail_zero(void __user *uaddr,
  63			     size_t expected_size,
  64			     size_t actual_size)
  65{
  66	unsigned char __user *addr;
  67	unsigned char __user *end;
  68	unsigned char val;
  69	int err;
  70
  71	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
  72		return -E2BIG;
  73
  74	if (unlikely(!access_ok(uaddr, actual_size)))
  75		return -EFAULT;
  76
  77	if (actual_size <= expected_size)
  78		return 0;
  79
  80	addr = uaddr + expected_size;
  81	end  = uaddr + actual_size;
  82
  83	for (; addr < end; addr++) {
  84		err = get_user(val, addr);
  85		if (err)
  86			return err;
  87		if (val)
  88			return -E2BIG;
  89	}
  90
  91	return 0;
  92}
  93
  94const struct bpf_map_ops bpf_map_offload_ops = {
  95	.map_alloc = bpf_map_offload_map_alloc,
  96	.map_free = bpf_map_offload_map_free,
  97	.map_check_btf = map_check_no_btf,
  98};
  99
 100static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 101{
 102	const struct bpf_map_ops *ops;
 103	u32 type = attr->map_type;
 104	struct bpf_map *map;
 105	int err;
 106
 107	if (type >= ARRAY_SIZE(bpf_map_types))
 108		return ERR_PTR(-EINVAL);
 109	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
 110	ops = bpf_map_types[type];
 111	if (!ops)
 112		return ERR_PTR(-EINVAL);
 113
 114	if (ops->map_alloc_check) {
 115		err = ops->map_alloc_check(attr);
 116		if (err)
 117			return ERR_PTR(err);
 118	}
 119	if (attr->map_ifindex)
 120		ops = &bpf_map_offload_ops;
 121	map = ops->map_alloc(attr);
 122	if (IS_ERR(map))
 123		return map;
 124	map->ops = ops;
 125	map->map_type = type;
 126	return map;
 127}
 128
 129void *bpf_map_area_alloc(u64 size, int numa_node)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130{
 131	/* We really just want to fail instead of triggering OOM killer
 132	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
 133	 * which is used for lower order allocation requests.
 134	 *
 135	 * It has been observed that higher order allocation requests done by
 136	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
 137	 * to reclaim memory from the page cache, thus we set
 138	 * __GFP_RETRY_MAYFAIL to avoid such situations.
 139	 */
 140
 141	const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
 
 
 142	void *area;
 143
 144	if (size >= SIZE_MAX)
 145		return NULL;
 146
 147	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 148		area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
 
 
 
 
 
 149				    numa_node);
 150		if (area != NULL)
 151			return area;
 152	}
 153
 154	return __vmalloc_node_flags_caller(size, numa_node,
 155					   GFP_KERNEL | __GFP_RETRY_MAYFAIL |
 156					   flags, __builtin_return_address(0));
 
 
 
 
 
 
 
 
 
 
 157}
 158
 159void bpf_map_area_free(void *area)
 160{
 161	kvfree(area);
 162}
 163
 164static u32 bpf_map_flags_retain_permanent(u32 flags)
 165{
 166	/* Some map creation flags are not tied to the map object but
 167	 * rather to the map fd instead, so they have no meaning upon
 168	 * map object inspection since multiple file descriptors with
 169	 * different (access) properties can exist here. Thus, given
 170	 * this has zero meaning for the map itself, lets clear these
 171	 * from here.
 172	 */
 173	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
 174}
 175
 176void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
 177{
 178	map->map_type = attr->map_type;
 179	map->key_size = attr->key_size;
 180	map->value_size = attr->value_size;
 181	map->max_entries = attr->max_entries;
 182	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
 183	map->numa_node = bpf_map_attr_numa_node(attr);
 184}
 185
 186static int bpf_charge_memlock(struct user_struct *user, u32 pages)
 187{
 188	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 189
 190	if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
 191		atomic_long_sub(pages, &user->locked_vm);
 192		return -EPERM;
 193	}
 194	return 0;
 195}
 196
 197static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
 198{
 199	if (user)
 200		atomic_long_sub(pages, &user->locked_vm);
 201}
 202
 203int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
 204{
 205	u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
 206	struct user_struct *user;
 207	int ret;
 208
 209	if (size >= U32_MAX - PAGE_SIZE)
 210		return -E2BIG;
 211
 212	user = get_current_user();
 213	ret = bpf_charge_memlock(user, pages);
 214	if (ret) {
 215		free_uid(user);
 216		return ret;
 217	}
 218
 219	mem->pages = pages;
 220	mem->user = user;
 221
 222	return 0;
 223}
 224
 225void bpf_map_charge_finish(struct bpf_map_memory *mem)
 226{
 227	bpf_uncharge_memlock(mem->user, mem->pages);
 228	free_uid(mem->user);
 229}
 230
 231void bpf_map_charge_move(struct bpf_map_memory *dst,
 232			 struct bpf_map_memory *src)
 233{
 234	*dst = *src;
 235
 236	/* Make sure src will not be used for the redundant uncharging. */
 237	memset(src, 0, sizeof(struct bpf_map_memory));
 238}
 239
 240int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
 241{
 242	int ret;
 243
 244	ret = bpf_charge_memlock(map->memory.user, pages);
 245	if (ret)
 246		return ret;
 247	map->memory.pages += pages;
 248	return ret;
 249}
 250
 251void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
 252{
 253	bpf_uncharge_memlock(map->memory.user, pages);
 254	map->memory.pages -= pages;
 255}
 256
 257static int bpf_map_alloc_id(struct bpf_map *map)
 258{
 259	int id;
 260
 261	idr_preload(GFP_KERNEL);
 262	spin_lock_bh(&map_idr_lock);
 263	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
 264	if (id > 0)
 265		map->id = id;
 266	spin_unlock_bh(&map_idr_lock);
 267	idr_preload_end();
 268
 269	if (WARN_ON_ONCE(!id))
 270		return -ENOSPC;
 271
 272	return id > 0 ? 0 : id;
 273}
 274
 275void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 276{
 277	unsigned long flags;
 278
 279	/* Offloaded maps are removed from the IDR store when their device
 280	 * disappears - even if someone holds an fd to them they are unusable,
 281	 * the memory is gone, all ops will fail; they are simply waiting for
 282	 * refcnt to drop to be freed.
 283	 */
 284	if (!map->id)
 285		return;
 286
 287	if (do_idr_lock)
 288		spin_lock_irqsave(&map_idr_lock, flags);
 289	else
 290		__acquire(&map_idr_lock);
 291
 292	idr_remove(&map_idr, map->id);
 293	map->id = 0;
 294
 295	if (do_idr_lock)
 296		spin_unlock_irqrestore(&map_idr_lock, flags);
 297	else
 298		__release(&map_idr_lock);
 299}
 300
 301/* called from workqueue */
 302static void bpf_map_free_deferred(struct work_struct *work)
 303{
 304	struct bpf_map *map = container_of(work, struct bpf_map, work);
 305	struct bpf_map_memory mem;
 306
 307	bpf_map_charge_move(&mem, &map->memory);
 308	security_bpf_map_free(map);
 309	/* implementation dependent freeing */
 310	map->ops->map_free(map);
 311	bpf_map_charge_finish(&mem);
 312}
 313
 314static void bpf_map_put_uref(struct bpf_map *map)
 315{
 316	if (atomic_dec_and_test(&map->usercnt)) {
 317		if (map->ops->map_release_uref)
 318			map->ops->map_release_uref(map);
 319	}
 320}
 321
 322/* decrement map refcnt and schedule it for freeing via workqueue
 323 * (unrelying map implementation ops->map_free() might sleep)
 324 */
 325static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
 326{
 327	if (atomic_dec_and_test(&map->refcnt)) {
 328		/* bpf_map_free_id() must be called first */
 329		bpf_map_free_id(map, do_idr_lock);
 330		btf_put(map->btf);
 331		INIT_WORK(&map->work, bpf_map_free_deferred);
 332		schedule_work(&map->work);
 333	}
 334}
 335
 336void bpf_map_put(struct bpf_map *map)
 337{
 338	__bpf_map_put(map, true);
 339}
 340EXPORT_SYMBOL_GPL(bpf_map_put);
 341
 342void bpf_map_put_with_uref(struct bpf_map *map)
 343{
 344	bpf_map_put_uref(map);
 345	bpf_map_put(map);
 346}
 347
 348static int bpf_map_release(struct inode *inode, struct file *filp)
 349{
 350	struct bpf_map *map = filp->private_data;
 351
 352	if (map->ops->map_release)
 353		map->ops->map_release(map, filp);
 354
 355	bpf_map_put_with_uref(map);
 356	return 0;
 357}
 358
 359static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
 360{
 361	fmode_t mode = f.file->f_mode;
 362
 363	/* Our file permissions may have been overridden by global
 364	 * map permissions facing syscall side.
 365	 */
 366	if (READ_ONCE(map->frozen))
 367		mode &= ~FMODE_CAN_WRITE;
 368	return mode;
 369}
 370
 371#ifdef CONFIG_PROC_FS
 372static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
 373{
 374	const struct bpf_map *map = filp->private_data;
 375	const struct bpf_array *array;
 376	u32 owner_prog_type = 0;
 377	u32 owner_jited = 0;
 378
 379	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
 380		array = container_of(map, struct bpf_array, map);
 381		owner_prog_type = array->owner_prog_type;
 382		owner_jited = array->owner_jited;
 383	}
 384
 385	seq_printf(m,
 386		   "map_type:\t%u\n"
 387		   "key_size:\t%u\n"
 388		   "value_size:\t%u\n"
 389		   "max_entries:\t%u\n"
 390		   "map_flags:\t%#x\n"
 391		   "memlock:\t%llu\n"
 392		   "map_id:\t%u\n"
 393		   "frozen:\t%u\n",
 394		   map->map_type,
 395		   map->key_size,
 396		   map->value_size,
 397		   map->max_entries,
 398		   map->map_flags,
 399		   map->memory.pages * 1ULL << PAGE_SHIFT,
 400		   map->id,
 401		   READ_ONCE(map->frozen));
 402
 403	if (owner_prog_type) {
 404		seq_printf(m, "owner_prog_type:\t%u\n",
 405			   owner_prog_type);
 406		seq_printf(m, "owner_jited:\t%u\n",
 407			   owner_jited);
 408	}
 409}
 410#endif
 411
 412static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
 413			      loff_t *ppos)
 414{
 415	/* We need this handler such that alloc_file() enables
 416	 * f_mode with FMODE_CAN_READ.
 417	 */
 418	return -EINVAL;
 419}
 420
 421static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
 422			       size_t siz, loff_t *ppos)
 423{
 424	/* We need this handler such that alloc_file() enables
 425	 * f_mode with FMODE_CAN_WRITE.
 426	 */
 427	return -EINVAL;
 428}
 429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430const struct file_operations bpf_map_fops = {
 431#ifdef CONFIG_PROC_FS
 432	.show_fdinfo	= bpf_map_show_fdinfo,
 433#endif
 434	.release	= bpf_map_release,
 435	.read		= bpf_dummy_read,
 436	.write		= bpf_dummy_write,
 
 
 437};
 438
 439int bpf_map_new_fd(struct bpf_map *map, int flags)
 440{
 441	int ret;
 442
 443	ret = security_bpf_map(map, OPEN_FMODE(flags));
 444	if (ret < 0)
 445		return ret;
 446
 447	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
 448				flags | O_CLOEXEC);
 449}
 450
 451int bpf_get_file_flag(int flags)
 452{
 453	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
 454		return -EINVAL;
 455	if (flags & BPF_F_RDONLY)
 456		return O_RDONLY;
 457	if (flags & BPF_F_WRONLY)
 458		return O_WRONLY;
 459	return O_RDWR;
 460}
 461
 462/* helper macro to check that unused fields 'union bpf_attr' are zero */
 463#define CHECK_ATTR(CMD) \
 464	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
 465		   sizeof(attr->CMD##_LAST_FIELD), 0, \
 466		   sizeof(*attr) - \
 467		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
 468		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 469
 470/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
 471 * Return 0 on success and < 0 on error.
 472 */
 473static int bpf_obj_name_cpy(char *dst, const char *src)
 474{
 475	const char *end = src + BPF_OBJ_NAME_LEN;
 
 476
 477	memset(dst, 0, BPF_OBJ_NAME_LEN);
 478	/* Copy all isalnum(), '_' and '.' chars. */
 479	while (src < end && *src) {
 480		if (!isalnum(*src) &&
 481		    *src != '_' && *src != '.')
 482			return -EINVAL;
 483		*dst++ = *src++;
 484	}
 485
 486	/* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
 487	if (src == end)
 488		return -EINVAL;
 489
 490	return 0;
 491}
 492
 493int map_check_no_btf(const struct bpf_map *map,
 494		     const struct btf *btf,
 495		     const struct btf_type *key_type,
 496		     const struct btf_type *value_type)
 497{
 498	return -ENOTSUPP;
 499}
 500
 501static int map_check_btf(struct bpf_map *map, const struct btf *btf,
 502			 u32 btf_key_id, u32 btf_value_id)
 503{
 504	const struct btf_type *key_type, *value_type;
 505	u32 key_size, value_size;
 506	int ret = 0;
 507
 508	/* Some maps allow key to be unspecified. */
 509	if (btf_key_id) {
 510		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
 511		if (!key_type || key_size != map->key_size)
 512			return -EINVAL;
 513	} else {
 514		key_type = btf_type_by_id(btf, 0);
 515		if (!map->ops->map_check_btf)
 516			return -EINVAL;
 517	}
 518
 519	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
 520	if (!value_type || value_size != map->value_size)
 521		return -EINVAL;
 522
 523	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
 524
 525	if (map_value_has_spin_lock(map)) {
 526		if (map->map_flags & BPF_F_RDONLY_PROG)
 527			return -EACCES;
 528		if (map->map_type != BPF_MAP_TYPE_HASH &&
 529		    map->map_type != BPF_MAP_TYPE_ARRAY &&
 530		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
 531		    map->map_type != BPF_MAP_TYPE_SK_STORAGE)
 532			return -ENOTSUPP;
 533		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
 534		    map->value_size) {
 535			WARN_ONCE(1,
 536				  "verifier bug spin_lock_off %d value_size %d\n",
 537				  map->spin_lock_off, map->value_size);
 538			return -EFAULT;
 539		}
 540	}
 541
 542	if (map->ops->map_check_btf)
 543		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
 544
 545	return ret;
 546}
 547
 548#define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id
 549/* called via syscall */
 550static int map_create(union bpf_attr *attr)
 551{
 552	int numa_node = bpf_map_attr_numa_node(attr);
 553	struct bpf_map_memory mem;
 554	struct bpf_map *map;
 555	int f_flags;
 556	int err;
 557
 558	err = CHECK_ATTR(BPF_MAP_CREATE);
 559	if (err)
 560		return -EINVAL;
 561
 
 
 
 
 
 
 
 
 562	f_flags = bpf_get_file_flag(attr->map_flags);
 563	if (f_flags < 0)
 564		return f_flags;
 565
 566	if (numa_node != NUMA_NO_NODE &&
 567	    ((unsigned int)numa_node >= nr_node_ids ||
 568	     !node_online(numa_node)))
 569		return -EINVAL;
 570
 571	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
 572	map = find_and_alloc_map(attr);
 573	if (IS_ERR(map))
 574		return PTR_ERR(map);
 575
 576	err = bpf_obj_name_cpy(map->name, attr->map_name);
 577	if (err)
 
 578		goto free_map;
 579
 580	atomic_set(&map->refcnt, 1);
 581	atomic_set(&map->usercnt, 1);
 582
 583	if (attr->btf_key_type_id || attr->btf_value_type_id) {
 
 
 
 
 
 
 
 
 
 584		struct btf *btf;
 585
 586		if (!attr->btf_value_type_id) {
 587			err = -EINVAL;
 588			goto free_map;
 589		}
 590
 591		btf = btf_get_by_fd(attr->btf_fd);
 592		if (IS_ERR(btf)) {
 593			err = PTR_ERR(btf);
 594			goto free_map;
 595		}
 
 596
 597		err = map_check_btf(map, btf, attr->btf_key_type_id,
 598				    attr->btf_value_type_id);
 599		if (err) {
 600			btf_put(btf);
 601			goto free_map;
 602		}
 603
 604		map->btf = btf;
 605		map->btf_key_type_id = attr->btf_key_type_id;
 606		map->btf_value_type_id = attr->btf_value_type_id;
 607	} else {
 608		map->spin_lock_off = -EINVAL;
 609	}
 610
 611	err = security_bpf_map_alloc(map);
 612	if (err)
 613		goto free_map;
 614
 615	err = bpf_map_alloc_id(map);
 616	if (err)
 617		goto free_map_sec;
 618
 619	err = bpf_map_new_fd(map, f_flags);
 620	if (err < 0) {
 621		/* failed to allocate fd.
 622		 * bpf_map_put_with_uref() is needed because the above
 623		 * bpf_map_alloc_id() has published the map
 624		 * to the userspace and the userspace may
 625		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
 626		 */
 627		bpf_map_put_with_uref(map);
 628		return err;
 629	}
 630
 631	return err;
 632
 633free_map_sec:
 634	security_bpf_map_free(map);
 635free_map:
 636	btf_put(map->btf);
 637	bpf_map_charge_move(&mem, &map->memory);
 638	map->ops->map_free(map);
 639	bpf_map_charge_finish(&mem);
 640	return err;
 641}
 642
 643/* if error is returned, fd is released.
 644 * On success caller should complete fd access with matching fdput()
 645 */
 646struct bpf_map *__bpf_map_get(struct fd f)
 647{
 648	if (!f.file)
 649		return ERR_PTR(-EBADF);
 650	if (f.file->f_op != &bpf_map_fops) {
 651		fdput(f);
 652		return ERR_PTR(-EINVAL);
 653	}
 654
 655	return f.file->private_data;
 656}
 657
 658/* prog's and map's refcnt limit */
 659#define BPF_MAX_REFCNT 32768
 
 
 
 
 
 
 
 
 
 
 660
 661struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
 662{
 663	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
 664		atomic_dec(&map->refcnt);
 665		return ERR_PTR(-EBUSY);
 666	}
 667	if (uref)
 668		atomic_inc(&map->usercnt);
 
 
 
 
 669	return map;
 670}
 671EXPORT_SYMBOL_GPL(bpf_map_inc);
 672
 673struct bpf_map *bpf_map_get_with_uref(u32 ufd)
 674{
 675	struct fd f = fdget(ufd);
 676	struct bpf_map *map;
 677
 678	map = __bpf_map_get(f);
 679	if (IS_ERR(map))
 680		return map;
 681
 682	map = bpf_map_inc(map, true);
 683	fdput(f);
 684
 685	return map;
 686}
 687
 688/* map_idr_lock should have been held */
 689static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map,
 690					      bool uref)
 691{
 692	int refold;
 693
 694	refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
 695
 696	if (refold >= BPF_MAX_REFCNT) {
 697		__bpf_map_put(map, false);
 698		return ERR_PTR(-EBUSY);
 699	}
 700
 701	if (!refold)
 702		return ERR_PTR(-ENOENT);
 703
 704	if (uref)
 705		atomic_inc(&map->usercnt);
 706
 707	return map;
 708}
 709
 710struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
 711{
 712	spin_lock_bh(&map_idr_lock);
 713	map = __bpf_map_inc_not_zero(map, uref);
 714	spin_unlock_bh(&map_idr_lock);
 715
 716	return map;
 717}
 718EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
 719
 720int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 721{
 722	return -ENOTSUPP;
 723}
 724
 725static void *__bpf_copy_key(void __user *ukey, u64 key_size)
 726{
 727	if (key_size)
 728		return memdup_user(ukey, key_size);
 729
 730	if (ukey)
 731		return ERR_PTR(-EINVAL);
 732
 733	return NULL;
 734}
 735
 736/* last field in 'union bpf_attr' used by this command */
 737#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
 738
 739static int map_lookup_elem(union bpf_attr *attr)
 740{
 741	void __user *ukey = u64_to_user_ptr(attr->key);
 742	void __user *uvalue = u64_to_user_ptr(attr->value);
 743	int ufd = attr->map_fd;
 744	struct bpf_map *map;
 745	void *key, *value, *ptr;
 746	u32 value_size;
 747	struct fd f;
 748	int err;
 749
 750	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
 751		return -EINVAL;
 752
 753	if (attr->flags & ~BPF_F_LOCK)
 754		return -EINVAL;
 755
 756	f = fdget(ufd);
 757	map = __bpf_map_get(f);
 758	if (IS_ERR(map))
 759		return PTR_ERR(map);
 760	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
 761		err = -EPERM;
 762		goto err_put;
 763	}
 764
 765	if ((attr->flags & BPF_F_LOCK) &&
 766	    !map_value_has_spin_lock(map)) {
 767		err = -EINVAL;
 768		goto err_put;
 769	}
 770
 771	key = __bpf_copy_key(ukey, map->key_size);
 772	if (IS_ERR(key)) {
 773		err = PTR_ERR(key);
 774		goto err_put;
 775	}
 776
 777	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 778	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 779	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
 780	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 781		value_size = round_up(map->value_size, 8) * num_possible_cpus();
 782	else if (IS_FD_MAP(map))
 783		value_size = sizeof(u32);
 784	else
 785		value_size = map->value_size;
 786
 787	err = -ENOMEM;
 788	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
 789	if (!value)
 790		goto free_key;
 791
 792	if (bpf_map_is_dev_bound(map)) {
 793		err = bpf_map_offload_lookup_elem(map, key, value);
 794		goto done;
 795	}
 796
 797	preempt_disable();
 798	this_cpu_inc(bpf_prog_active);
 799	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 800	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 801		err = bpf_percpu_hash_copy(map, key, value);
 802	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 803		err = bpf_percpu_array_copy(map, key, value);
 804	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 805		err = bpf_percpu_cgroup_storage_copy(map, key, value);
 806	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
 807		err = bpf_stackmap_copy(map, key, value);
 808	} else if (IS_FD_ARRAY(map)) {
 809		err = bpf_fd_array_map_lookup_elem(map, key, value);
 810	} else if (IS_FD_HASH(map)) {
 811		err = bpf_fd_htab_map_lookup_elem(map, key, value);
 812	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 813		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
 814	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 815		   map->map_type == BPF_MAP_TYPE_STACK) {
 816		err = map->ops->map_peek_elem(map, value);
 817	} else {
 818		rcu_read_lock();
 819		if (map->ops->map_lookup_elem_sys_only)
 820			ptr = map->ops->map_lookup_elem_sys_only(map, key);
 821		else
 822			ptr = map->ops->map_lookup_elem(map, key);
 823		if (IS_ERR(ptr)) {
 824			err = PTR_ERR(ptr);
 825		} else if (!ptr) {
 826			err = -ENOENT;
 827		} else {
 828			err = 0;
 829			if (attr->flags & BPF_F_LOCK)
 830				/* lock 'ptr' and copy everything but lock */
 831				copy_map_value_locked(map, value, ptr, true);
 832			else
 833				copy_map_value(map, value, ptr);
 834			/* mask lock, since value wasn't zero inited */
 835			check_and_init_map_lock(map, value);
 836		}
 837		rcu_read_unlock();
 838	}
 839	this_cpu_dec(bpf_prog_active);
 840	preempt_enable();
 841
 842done:
 843	if (err)
 844		goto free_value;
 845
 846	err = -EFAULT;
 847	if (copy_to_user(uvalue, value, value_size) != 0)
 848		goto free_value;
 849
 850	err = 0;
 851
 852free_value:
 853	kfree(value);
 854free_key:
 855	kfree(key);
 856err_put:
 857	fdput(f);
 858	return err;
 859}
 860
 861static void maybe_wait_bpf_programs(struct bpf_map *map)
 862{
 863	/* Wait for any running BPF programs to complete so that
 864	 * userspace, when we return to it, knows that all programs
 865	 * that could be running use the new map value.
 866	 */
 867	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
 868	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
 869		synchronize_rcu();
 870}
 871
 872#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
 873
 874static int map_update_elem(union bpf_attr *attr)
 875{
 876	void __user *ukey = u64_to_user_ptr(attr->key);
 877	void __user *uvalue = u64_to_user_ptr(attr->value);
 878	int ufd = attr->map_fd;
 879	struct bpf_map *map;
 880	void *key, *value;
 881	u32 value_size;
 882	struct fd f;
 883	int err;
 884
 885	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
 886		return -EINVAL;
 887
 888	f = fdget(ufd);
 889	map = __bpf_map_get(f);
 890	if (IS_ERR(map))
 891		return PTR_ERR(map);
 892	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
 893		err = -EPERM;
 894		goto err_put;
 895	}
 896
 897	if ((attr->flags & BPF_F_LOCK) &&
 898	    !map_value_has_spin_lock(map)) {
 899		err = -EINVAL;
 900		goto err_put;
 901	}
 902
 903	key = __bpf_copy_key(ukey, map->key_size);
 904	if (IS_ERR(key)) {
 905		err = PTR_ERR(key);
 906		goto err_put;
 907	}
 908
 909	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 910	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 911	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
 912	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 913		value_size = round_up(map->value_size, 8) * num_possible_cpus();
 914	else
 915		value_size = map->value_size;
 916
 917	err = -ENOMEM;
 918	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
 919	if (!value)
 920		goto free_key;
 921
 922	err = -EFAULT;
 923	if (copy_from_user(value, uvalue, value_size) != 0)
 924		goto free_value;
 925
 926	/* Need to create a kthread, thus must support schedule */
 927	if (bpf_map_is_dev_bound(map)) {
 928		err = bpf_map_offload_update_elem(map, key, value, attr->flags);
 929		goto out;
 930	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
 931		   map->map_type == BPF_MAP_TYPE_SOCKHASH ||
 932		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
 933		err = map->ops->map_update_elem(map, key, value, attr->flags);
 934		goto out;
 935	}
 936
 937	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
 938	 * inside bpf map update or delete otherwise deadlocks are possible
 939	 */
 940	preempt_disable();
 941	__this_cpu_inc(bpf_prog_active);
 942	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 943	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 944		err = bpf_percpu_hash_update(map, key, value, attr->flags);
 945	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 946		err = bpf_percpu_array_update(map, key, value, attr->flags);
 947	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 948		err = bpf_percpu_cgroup_storage_update(map, key, value,
 949						       attr->flags);
 950	} else if (IS_FD_ARRAY(map)) {
 951		rcu_read_lock();
 952		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
 953						   attr->flags);
 954		rcu_read_unlock();
 955	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
 956		rcu_read_lock();
 957		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
 958						  attr->flags);
 959		rcu_read_unlock();
 960	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 961		/* rcu_read_lock() is not needed */
 962		err = bpf_fd_reuseport_array_update_elem(map, key, value,
 963							 attr->flags);
 964	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 965		   map->map_type == BPF_MAP_TYPE_STACK) {
 966		err = map->ops->map_push_elem(map, value, attr->flags);
 967	} else {
 968		rcu_read_lock();
 969		err = map->ops->map_update_elem(map, key, value, attr->flags);
 970		rcu_read_unlock();
 971	}
 972	__this_cpu_dec(bpf_prog_active);
 973	preempt_enable();
 974	maybe_wait_bpf_programs(map);
 975out:
 976free_value:
 977	kfree(value);
 978free_key:
 979	kfree(key);
 980err_put:
 981	fdput(f);
 982	return err;
 983}
 984
 985#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
 986
 987static int map_delete_elem(union bpf_attr *attr)
 988{
 989	void __user *ukey = u64_to_user_ptr(attr->key);
 990	int ufd = attr->map_fd;
 991	struct bpf_map *map;
 992	struct fd f;
 993	void *key;
 994	int err;
 995
 996	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
 997		return -EINVAL;
 998
 999	f = fdget(ufd);
1000	map = __bpf_map_get(f);
1001	if (IS_ERR(map))
1002		return PTR_ERR(map);
1003	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1004		err = -EPERM;
1005		goto err_put;
1006	}
1007
1008	key = __bpf_copy_key(ukey, map->key_size);
1009	if (IS_ERR(key)) {
1010		err = PTR_ERR(key);
1011		goto err_put;
1012	}
1013
1014	if (bpf_map_is_dev_bound(map)) {
1015		err = bpf_map_offload_delete_elem(map, key);
1016		goto out;
 
 
 
 
 
1017	}
1018
1019	preempt_disable();
1020	__this_cpu_inc(bpf_prog_active);
1021	rcu_read_lock();
1022	err = map->ops->map_delete_elem(map, key);
1023	rcu_read_unlock();
1024	__this_cpu_dec(bpf_prog_active);
1025	preempt_enable();
1026	maybe_wait_bpf_programs(map);
1027out:
1028	kfree(key);
1029err_put:
1030	fdput(f);
1031	return err;
1032}
1033
1034/* last field in 'union bpf_attr' used by this command */
1035#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1036
1037static int map_get_next_key(union bpf_attr *attr)
1038{
1039	void __user *ukey = u64_to_user_ptr(attr->key);
1040	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1041	int ufd = attr->map_fd;
1042	struct bpf_map *map;
1043	void *key, *next_key;
1044	struct fd f;
1045	int err;
1046
1047	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1048		return -EINVAL;
1049
1050	f = fdget(ufd);
1051	map = __bpf_map_get(f);
1052	if (IS_ERR(map))
1053		return PTR_ERR(map);
1054	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1055		err = -EPERM;
1056		goto err_put;
1057	}
1058
1059	if (ukey) {
1060		key = __bpf_copy_key(ukey, map->key_size);
1061		if (IS_ERR(key)) {
1062			err = PTR_ERR(key);
1063			goto err_put;
1064		}
1065	} else {
1066		key = NULL;
1067	}
1068
1069	err = -ENOMEM;
1070	next_key = kmalloc(map->key_size, GFP_USER);
1071	if (!next_key)
1072		goto free_key;
1073
1074	if (bpf_map_is_dev_bound(map)) {
1075		err = bpf_map_offload_get_next_key(map, key, next_key);
1076		goto out;
1077	}
1078
1079	rcu_read_lock();
1080	err = map->ops->map_get_next_key(map, key, next_key);
1081	rcu_read_unlock();
1082out:
1083	if (err)
1084		goto free_next_key;
1085
1086	err = -EFAULT;
1087	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1088		goto free_next_key;
1089
1090	err = 0;
1091
1092free_next_key:
1093	kfree(next_key);
1094free_key:
1095	kfree(key);
1096err_put:
1097	fdput(f);
1098	return err;
1099}
1100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1101#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1102
1103static int map_lookup_and_delete_elem(union bpf_attr *attr)
1104{
1105	void __user *ukey = u64_to_user_ptr(attr->key);
1106	void __user *uvalue = u64_to_user_ptr(attr->value);
1107	int ufd = attr->map_fd;
1108	struct bpf_map *map;
1109	void *key, *value;
1110	u32 value_size;
1111	struct fd f;
1112	int err;
1113
1114	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1115		return -EINVAL;
1116
1117	f = fdget(ufd);
1118	map = __bpf_map_get(f);
1119	if (IS_ERR(map))
1120		return PTR_ERR(map);
1121	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
 
1122		err = -EPERM;
1123		goto err_put;
1124	}
1125
1126	key = __bpf_copy_key(ukey, map->key_size);
1127	if (IS_ERR(key)) {
1128		err = PTR_ERR(key);
1129		goto err_put;
1130	}
1131
1132	value_size = map->value_size;
1133
1134	err = -ENOMEM;
1135	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1136	if (!value)
1137		goto free_key;
1138
1139	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1140	    map->map_type == BPF_MAP_TYPE_STACK) {
1141		err = map->ops->map_pop_elem(map, value);
1142	} else {
1143		err = -ENOTSUPP;
1144	}
1145
1146	if (err)
1147		goto free_value;
1148
1149	if (copy_to_user(uvalue, value, value_size) != 0)
 
1150		goto free_value;
 
1151
1152	err = 0;
1153
1154free_value:
1155	kfree(value);
1156free_key:
1157	kfree(key);
1158err_put:
1159	fdput(f);
1160	return err;
1161}
1162
1163#define BPF_MAP_FREEZE_LAST_FIELD map_fd
1164
1165static int map_freeze(const union bpf_attr *attr)
1166{
1167	int err = 0, ufd = attr->map_fd;
1168	struct bpf_map *map;
1169	struct fd f;
1170
1171	if (CHECK_ATTR(BPF_MAP_FREEZE))
1172		return -EINVAL;
1173
1174	f = fdget(ufd);
1175	map = __bpf_map_get(f);
1176	if (IS_ERR(map))
1177		return PTR_ERR(map);
 
 
 
 
 
 
 
 
 
 
 
 
1178	if (READ_ONCE(map->frozen)) {
1179		err = -EBUSY;
1180		goto err_put;
1181	}
1182	if (!capable(CAP_SYS_ADMIN)) {
1183		err = -EPERM;
1184		goto err_put;
1185	}
1186
1187	WRITE_ONCE(map->frozen, true);
1188err_put:
 
1189	fdput(f);
1190	return err;
1191}
1192
1193static const struct bpf_prog_ops * const bpf_prog_types[] = {
1194#define BPF_PROG_TYPE(_id, _name) \
1195	[_id] = & _name ## _prog_ops,
1196#define BPF_MAP_TYPE(_id, _ops)
 
1197#include <linux/bpf_types.h>
1198#undef BPF_PROG_TYPE
1199#undef BPF_MAP_TYPE
 
1200};
1201
1202static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1203{
1204	const struct bpf_prog_ops *ops;
1205
1206	if (type >= ARRAY_SIZE(bpf_prog_types))
1207		return -EINVAL;
1208	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1209	ops = bpf_prog_types[type];
1210	if (!ops)
1211		return -EINVAL;
1212
1213	if (!bpf_prog_is_dev_bound(prog->aux))
1214		prog->aux->ops = ops;
1215	else
1216		prog->aux->ops = &bpf_offload_prog_ops;
1217	prog->type = type;
1218	return 0;
1219}
1220
1221/* drop refcnt on maps used by eBPF program and free auxilary data */
1222static void free_used_maps(struct bpf_prog_aux *aux)
1223{
1224	enum bpf_cgroup_storage_type stype;
1225	int i;
1226
1227	for_each_cgroup_storage_type(stype) {
1228		if (!aux->cgroup_storage[stype])
1229			continue;
1230		bpf_cgroup_storage_release(aux->prog,
1231					   aux->cgroup_storage[stype]);
1232	}
1233
1234	for (i = 0; i < aux->used_map_cnt; i++)
1235		bpf_map_put(aux->used_maps[i]);
 
 
1236
1237	kfree(aux->used_maps);
 
 
 
 
 
 
 
 
 
 
 
1238}
1239
1240int __bpf_prog_charge(struct user_struct *user, u32 pages)
1241{
1242	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1243	unsigned long user_bufs;
1244
1245	if (user) {
1246		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
1247		if (user_bufs > memlock_limit) {
1248			atomic_long_sub(pages, &user->locked_vm);
1249			return -EPERM;
1250		}
1251	}
1252
1253	return 0;
1254}
1255
1256void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1257{
1258	if (user)
1259		atomic_long_sub(pages, &user->locked_vm);
1260}
1261
1262static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1263{
1264	struct user_struct *user = get_current_user();
1265	int ret;
1266
1267	ret = __bpf_prog_charge(user, prog->pages);
1268	if (ret) {
1269		free_uid(user);
1270		return ret;
1271	}
1272
1273	prog->aux->user = user;
1274	return 0;
1275}
1276
1277static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1278{
1279	struct user_struct *user = prog->aux->user;
1280
1281	__bpf_prog_uncharge(user, prog->pages);
1282	free_uid(user);
1283}
1284
1285static int bpf_prog_alloc_id(struct bpf_prog *prog)
1286{
1287	int id;
1288
1289	idr_preload(GFP_KERNEL);
1290	spin_lock_bh(&prog_idr_lock);
1291	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1292	if (id > 0)
1293		prog->aux->id = id;
1294	spin_unlock_bh(&prog_idr_lock);
1295	idr_preload_end();
1296
1297	/* id is in [1, INT_MAX) */
1298	if (WARN_ON_ONCE(!id))
1299		return -ENOSPC;
1300
1301	return id > 0 ? 0 : id;
1302}
1303
1304void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1305{
1306	/* cBPF to eBPF migrations are currently not in the idr store.
1307	 * Offloaded programs are removed from the store when their device
1308	 * disappears - even if someone grabs an fd to them they are unusable,
1309	 * simply waiting for refcnt to drop to be freed.
1310	 */
1311	if (!prog->aux->id)
1312		return;
1313
1314	if (do_idr_lock)
1315		spin_lock_bh(&prog_idr_lock);
1316	else
1317		__acquire(&prog_idr_lock);
1318
1319	idr_remove(&prog_idr, prog->aux->id);
1320	prog->aux->id = 0;
1321
1322	if (do_idr_lock)
1323		spin_unlock_bh(&prog_idr_lock);
1324	else
1325		__release(&prog_idr_lock);
1326}
1327
1328static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1329{
1330	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1331
1332	kvfree(aux->func_info);
1333	free_used_maps(aux);
1334	bpf_prog_uncharge_memlock(aux->prog);
1335	security_bpf_prog_free(aux);
1336	bpf_prog_free(aux->prog);
1337}
1338
1339static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1340{
1341	bpf_prog_kallsyms_del_all(prog);
1342	btf_put(prog->aux->btf);
1343	bpf_prog_free_linfo(prog);
1344
1345	if (deferred)
1346		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1347	else
1348		__bpf_prog_put_rcu(&prog->aux->rcu);
1349}
1350
1351static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1352{
1353	if (atomic_dec_and_test(&prog->aux->refcnt)) {
1354		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
 
1355		/* bpf_prog_free_id() must be called first */
1356		bpf_prog_free_id(prog, do_idr_lock);
1357		__bpf_prog_put_noref(prog, true);
1358	}
1359}
1360
1361void bpf_prog_put(struct bpf_prog *prog)
1362{
1363	__bpf_prog_put(prog, true);
1364}
1365EXPORT_SYMBOL_GPL(bpf_prog_put);
1366
1367static int bpf_prog_release(struct inode *inode, struct file *filp)
1368{
1369	struct bpf_prog *prog = filp->private_data;
1370
1371	bpf_prog_put(prog);
1372	return 0;
1373}
1374
1375static void bpf_prog_get_stats(const struct bpf_prog *prog,
1376			       struct bpf_prog_stats *stats)
1377{
1378	u64 nsecs = 0, cnt = 0;
1379	int cpu;
1380
1381	for_each_possible_cpu(cpu) {
1382		const struct bpf_prog_stats *st;
1383		unsigned int start;
1384		u64 tnsecs, tcnt;
1385
1386		st = per_cpu_ptr(prog->aux->stats, cpu);
1387		do {
1388			start = u64_stats_fetch_begin_irq(&st->syncp);
1389			tnsecs = st->nsecs;
1390			tcnt = st->cnt;
1391		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
1392		nsecs += tnsecs;
1393		cnt += tcnt;
1394	}
1395	stats->nsecs = nsecs;
1396	stats->cnt = cnt;
1397}
1398
1399#ifdef CONFIG_PROC_FS
1400static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1401{
1402	const struct bpf_prog *prog = filp->private_data;
1403	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1404	struct bpf_prog_stats stats;
1405
1406	bpf_prog_get_stats(prog, &stats);
1407	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1408	seq_printf(m,
1409		   "prog_type:\t%u\n"
1410		   "prog_jited:\t%u\n"
1411		   "prog_tag:\t%s\n"
1412		   "memlock:\t%llu\n"
1413		   "prog_id:\t%u\n"
1414		   "run_time_ns:\t%llu\n"
1415		   "run_cnt:\t%llu\n",
1416		   prog->type,
1417		   prog->jited,
1418		   prog_tag,
1419		   prog->pages * 1ULL << PAGE_SHIFT,
1420		   prog->aux->id,
1421		   stats.nsecs,
1422		   stats.cnt);
1423}
1424#endif
1425
1426const struct file_operations bpf_prog_fops = {
1427#ifdef CONFIG_PROC_FS
1428	.show_fdinfo	= bpf_prog_show_fdinfo,
1429#endif
1430	.release	= bpf_prog_release,
1431	.read		= bpf_dummy_read,
1432	.write		= bpf_dummy_write,
1433};
1434
1435int bpf_prog_new_fd(struct bpf_prog *prog)
1436{
1437	int ret;
1438
1439	ret = security_bpf_prog(prog);
1440	if (ret < 0)
1441		return ret;
1442
1443	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1444				O_RDWR | O_CLOEXEC);
1445}
1446
1447static struct bpf_prog *____bpf_prog_get(struct fd f)
1448{
1449	if (!f.file)
1450		return ERR_PTR(-EBADF);
1451	if (f.file->f_op != &bpf_prog_fops) {
1452		fdput(f);
1453		return ERR_PTR(-EINVAL);
1454	}
1455
1456	return f.file->private_data;
1457}
1458
1459struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
1460{
1461	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
1462		atomic_sub(i, &prog->aux->refcnt);
1463		return ERR_PTR(-EBUSY);
1464	}
1465	return prog;
1466}
1467EXPORT_SYMBOL_GPL(bpf_prog_add);
1468
1469void bpf_prog_sub(struct bpf_prog *prog, int i)
1470{
1471	/* Only to be used for undoing previous bpf_prog_add() in some
1472	 * error path. We still know that another entity in our call
1473	 * path holds a reference to the program, thus atomic_sub() can
1474	 * be safely used in such cases!
1475	 */
1476	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
1477}
1478EXPORT_SYMBOL_GPL(bpf_prog_sub);
1479
1480struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
1481{
1482	return bpf_prog_add(prog, 1);
1483}
1484EXPORT_SYMBOL_GPL(bpf_prog_inc);
1485
1486/* prog_idr_lock should have been held */
1487struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1488{
1489	int refold;
1490
1491	refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1492
1493	if (refold >= BPF_MAX_REFCNT) {
1494		__bpf_prog_put(prog, false);
1495		return ERR_PTR(-EBUSY);
1496	}
1497
1498	if (!refold)
1499		return ERR_PTR(-ENOENT);
1500
1501	return prog;
1502}
1503EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1504
1505bool bpf_prog_get_ok(struct bpf_prog *prog,
1506			    enum bpf_prog_type *attach_type, bool attach_drv)
1507{
1508	/* not an attachment, just a refcount inc, always allow */
1509	if (!attach_type)
1510		return true;
1511
1512	if (prog->type != *attach_type)
1513		return false;
1514	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1515		return false;
1516
1517	return true;
1518}
1519
1520static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1521				       bool attach_drv)
1522{
1523	struct fd f = fdget(ufd);
1524	struct bpf_prog *prog;
1525
1526	prog = ____bpf_prog_get(f);
1527	if (IS_ERR(prog))
1528		return prog;
1529	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1530		prog = ERR_PTR(-EINVAL);
1531		goto out;
1532	}
1533
1534	prog = bpf_prog_inc(prog);
1535out:
1536	fdput(f);
1537	return prog;
1538}
1539
1540struct bpf_prog *bpf_prog_get(u32 ufd)
1541{
1542	return __bpf_prog_get(ufd, NULL, false);
1543}
1544
1545struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1546				       bool attach_drv)
1547{
1548	return __bpf_prog_get(ufd, &type, attach_drv);
1549}
1550EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1551
1552/* Initially all BPF programs could be loaded w/o specifying
1553 * expected_attach_type. Later for some of them specifying expected_attach_type
1554 * at load time became required so that program could be validated properly.
1555 * Programs of types that are allowed to be loaded both w/ and w/o (for
1556 * backward compatibility) expected_attach_type, should have the default attach
1557 * type assigned to expected_attach_type for the latter case, so that it can be
1558 * validated later at attach time.
1559 *
1560 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1561 * prog type requires it but has some attach types that have to be backward
1562 * compatible.
1563 */
1564static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1565{
1566	switch (attr->prog_type) {
1567	case BPF_PROG_TYPE_CGROUP_SOCK:
1568		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1569		 * exist so checking for non-zero is the way to go here.
1570		 */
1571		if (!attr->expected_attach_type)
1572			attr->expected_attach_type =
1573				BPF_CGROUP_INET_SOCK_CREATE;
1574		break;
1575	}
1576}
1577
1578static int
1579bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
1580				enum bpf_attach_type expected_attach_type)
 
1581{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582	switch (prog_type) {
1583	case BPF_PROG_TYPE_CGROUP_SOCK:
1584		switch (expected_attach_type) {
1585		case BPF_CGROUP_INET_SOCK_CREATE:
 
1586		case BPF_CGROUP_INET4_POST_BIND:
1587		case BPF_CGROUP_INET6_POST_BIND:
1588			return 0;
1589		default:
1590			return -EINVAL;
1591		}
1592	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1593		switch (expected_attach_type) {
1594		case BPF_CGROUP_INET4_BIND:
1595		case BPF_CGROUP_INET6_BIND:
1596		case BPF_CGROUP_INET4_CONNECT:
1597		case BPF_CGROUP_INET6_CONNECT:
 
 
 
 
1598		case BPF_CGROUP_UDP4_SENDMSG:
1599		case BPF_CGROUP_UDP6_SENDMSG:
1600		case BPF_CGROUP_UDP4_RECVMSG:
1601		case BPF_CGROUP_UDP6_RECVMSG:
1602			return 0;
1603		default:
1604			return -EINVAL;
1605		}
1606	case BPF_PROG_TYPE_CGROUP_SKB:
1607		switch (expected_attach_type) {
1608		case BPF_CGROUP_INET_INGRESS:
1609		case BPF_CGROUP_INET_EGRESS:
1610			return 0;
1611		default:
1612			return -EINVAL;
1613		}
1614	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
1615		switch (expected_attach_type) {
1616		case BPF_CGROUP_SETSOCKOPT:
1617		case BPF_CGROUP_GETSOCKOPT:
1618			return 0;
1619		default:
1620			return -EINVAL;
1621		}
 
 
 
 
 
 
 
 
1622	default:
1623		return 0;
1624	}
1625}
1626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1627/* last field in 'union bpf_attr' used by this command */
1628#define	BPF_PROG_LOAD_LAST_FIELD line_info_cnt
1629
1630static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1631{
1632	enum bpf_prog_type type = attr->prog_type;
1633	struct bpf_prog *prog;
1634	int err;
1635	char license[128];
1636	bool is_gpl;
1637
1638	if (CHECK_ATTR(BPF_PROG_LOAD))
1639		return -EINVAL;
1640
1641	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
1642				 BPF_F_ANY_ALIGNMENT |
1643				 BPF_F_TEST_STATE_FREQ |
1644				 BPF_F_TEST_RND_HI32))
1645		return -EINVAL;
1646
1647	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
1648	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
1649	    !capable(CAP_SYS_ADMIN))
1650		return -EPERM;
1651
1652	/* copy eBPF program license from user space */
1653	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
1654			      sizeof(license) - 1) < 0)
1655		return -EFAULT;
1656	license[sizeof(license) - 1] = 0;
1657
1658	/* eBPF programs must be GPL compatible to use GPL-ed functions */
1659	is_gpl = license_is_gpl_compatible(license);
1660
1661	if (attr->insn_cnt == 0 ||
1662	    attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
1663		return -E2BIG;
1664	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1665	    type != BPF_PROG_TYPE_CGROUP_SKB &&
1666	    !capable(CAP_SYS_ADMIN))
 
 
 
 
 
1667		return -EPERM;
1668
1669	bpf_prog_load_fixup_attach_type(attr);
1670	if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type))
 
 
1671		return -EINVAL;
1672
1673	/* plain bpf_prog allocation */
1674	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1675	if (!prog)
1676		return -ENOMEM;
1677
1678	prog->expected_attach_type = attr->expected_attach_type;
 
 
 
 
 
 
 
 
 
 
 
1679
1680	prog->aux->offload_requested = !!attr->prog_ifindex;
1681
1682	err = security_bpf_prog_alloc(prog->aux);
1683	if (err)
1684		goto free_prog_nouncharge;
1685
1686	err = bpf_prog_charge_memlock(prog);
1687	if (err)
1688		goto free_prog_sec;
1689
1690	prog->len = attr->insn_cnt;
1691
1692	err = -EFAULT;
1693	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1694			   bpf_prog_insn_size(prog)) != 0)
1695		goto free_prog;
1696
1697	prog->orig_prog = NULL;
1698	prog->jited = 0;
1699
1700	atomic_set(&prog->aux->refcnt, 1);
1701	prog->gpl_compatible = is_gpl ? 1 : 0;
1702
1703	if (bpf_prog_is_dev_bound(prog->aux)) {
1704		err = bpf_prog_offload_init(prog, attr);
1705		if (err)
1706			goto free_prog;
1707	}
1708
1709	/* find program type: socket_filter vs tracing_filter */
1710	err = find_prog_type(type, prog);
1711	if (err < 0)
1712		goto free_prog;
1713
1714	prog->aux->load_time = ktime_get_boottime_ns();
1715	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
1716	if (err)
 
1717		goto free_prog;
1718
1719	/* run eBPF verifier */
1720	err = bpf_check(&prog, attr, uattr);
1721	if (err < 0)
1722		goto free_used_maps;
1723
1724	prog = bpf_prog_select_runtime(prog, &err);
1725	if (err < 0)
1726		goto free_used_maps;
1727
1728	err = bpf_prog_alloc_id(prog);
1729	if (err)
1730		goto free_used_maps;
1731
1732	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
1733	 * effectively publicly exposed. However, retrieving via
1734	 * bpf_prog_get_fd_by_id() will take another reference,
1735	 * therefore it cannot be gone underneath us.
1736	 *
1737	 * Only for the time /after/ successful bpf_prog_new_fd()
1738	 * and before returning to userspace, we might just hold
1739	 * one reference and any parallel close on that fd could
1740	 * rip everything out. Hence, below notifications must
1741	 * happen before bpf_prog_new_fd().
1742	 *
1743	 * Also, any failure handling from this point onwards must
1744	 * be using bpf_prog_put() given the program is exposed.
1745	 */
1746	bpf_prog_kallsyms_add(prog);
1747	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
 
1748
1749	err = bpf_prog_new_fd(prog);
1750	if (err < 0)
1751		bpf_prog_put(prog);
1752	return err;
1753
1754free_used_maps:
1755	/* In case we have subprogs, we need to wait for a grace
1756	 * period before we can tear down JIT memory since symbols
1757	 * are already exposed under kallsyms.
1758	 */
1759	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
1760	return err;
1761free_prog:
1762	bpf_prog_uncharge_memlock(prog);
1763free_prog_sec:
1764	security_bpf_prog_free(prog->aux);
1765free_prog_nouncharge:
1766	bpf_prog_free(prog);
1767	return err;
1768}
1769
1770#define BPF_OBJ_LAST_FIELD file_flags
1771
1772static int bpf_obj_pin(const union bpf_attr *attr)
1773{
1774	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
1775		return -EINVAL;
1776
1777	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1778}
1779
1780static int bpf_obj_get(const union bpf_attr *attr)
1781{
1782	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
1783	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
1784		return -EINVAL;
1785
1786	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
1787				attr->file_flags);
1788}
1789
1790struct bpf_raw_tracepoint {
1791	struct bpf_raw_event_map *btp;
1792	struct bpf_prog *prog;
1793};
 
 
 
 
 
1794
1795static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
1796{
1797	struct bpf_raw_tracepoint *raw_tp = filp->private_data;
 
1798
1799	if (raw_tp->prog) {
1800		bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
1801		bpf_prog_put(raw_tp->prog);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1802	}
1803	bpf_put_raw_tracepoint(raw_tp->btp);
1804	kfree(raw_tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805	return 0;
1806}
1807
1808static const struct file_operations bpf_raw_tp_fops = {
1809	.release	= bpf_raw_tracepoint_release,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810	.read		= bpf_dummy_read,
1811	.write		= bpf_dummy_write,
1812};
1813
1814#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
 
 
1815
1816static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1817{
1818	struct bpf_raw_tracepoint *raw_tp;
1819	struct bpf_raw_event_map *btp;
1820	struct bpf_prog *prog;
1821	char tp_name[128];
1822	int tp_fd, err;
1823
1824	if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name),
1825			      sizeof(tp_name) - 1) < 0)
1826		return -EFAULT;
1827	tp_name[sizeof(tp_name) - 1] = 0;
1828
1829	btp = bpf_get_raw_tracepoint(tp_name);
1830	if (!btp)
1831		return -ENOENT;
1832
1833	raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
1834	if (!raw_tp) {
1835		err = -ENOMEM;
1836		goto out_put_btp;
1837	}
1838	raw_tp->btp = btp;
1839
1840	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
1841	if (IS_ERR(prog)) {
1842		err = PTR_ERR(prog);
1843		goto out_free_tp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1844	}
1845	if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
1846	    prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847		err = -EINVAL;
1848		goto out_put_prog;
1849	}
1850
1851	err = bpf_probe_register(raw_tp->btp, prog);
1852	if (err)
 
 
 
 
 
 
 
 
 
 
1853		goto out_put_prog;
 
1854
1855	raw_tp->prog = prog;
1856	tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
1857				 O_CLOEXEC);
1858	if (tp_fd < 0) {
1859		bpf_probe_unregister(raw_tp->btp, prog);
1860		err = tp_fd;
1861		goto out_put_prog;
1862	}
1863	return tp_fd;
1864
 
1865out_put_prog:
1866	bpf_prog_put(prog);
1867out_free_tp:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1868	kfree(raw_tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1869out_put_btp:
1870	bpf_put_raw_tracepoint(btp);
 
 
1871	return err;
1872}
1873
1874static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
1875					     enum bpf_attach_type attach_type)
1876{
1877	switch (prog->type) {
1878	case BPF_PROG_TYPE_CGROUP_SOCK:
1879	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1880	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
 
1881		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
1882	case BPF_PROG_TYPE_CGROUP_SKB:
 
 
 
 
 
1883		return prog->enforce_expected_attach_type &&
1884			prog->expected_attach_type != attach_type ?
1885			-EINVAL : 0;
1886	default:
1887		return 0;
1888	}
1889}
1890
1891#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1892
1893#define BPF_F_ATTACH_MASK \
1894	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
1895
1896static int bpf_prog_attach(const union bpf_attr *attr)
1897{
1898	enum bpf_prog_type ptype;
1899	struct bpf_prog *prog;
1900	int ret;
1901
1902	if (!capable(CAP_NET_ADMIN))
1903		return -EPERM;
1904
1905	if (CHECK_ATTR(BPF_PROG_ATTACH))
1906		return -EINVAL;
1907
1908	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
1909		return -EINVAL;
1910
1911	switch (attr->attach_type) {
1912	case BPF_CGROUP_INET_INGRESS:
1913	case BPF_CGROUP_INET_EGRESS:
1914		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1915		break;
1916	case BPF_CGROUP_INET_SOCK_CREATE:
 
1917	case BPF_CGROUP_INET4_POST_BIND:
1918	case BPF_CGROUP_INET6_POST_BIND:
1919		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1920		break;
1921	case BPF_CGROUP_INET4_BIND:
1922	case BPF_CGROUP_INET6_BIND:
1923	case BPF_CGROUP_INET4_CONNECT:
1924	case BPF_CGROUP_INET6_CONNECT:
 
 
 
 
1925	case BPF_CGROUP_UDP4_SENDMSG:
1926	case BPF_CGROUP_UDP6_SENDMSG:
1927	case BPF_CGROUP_UDP4_RECVMSG:
1928	case BPF_CGROUP_UDP6_RECVMSG:
1929		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
1930		break;
1931	case BPF_CGROUP_SOCK_OPS:
1932		ptype = BPF_PROG_TYPE_SOCK_OPS;
1933		break;
1934	case BPF_CGROUP_DEVICE:
1935		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
1936		break;
1937	case BPF_SK_MSG_VERDICT:
1938		ptype = BPF_PROG_TYPE_SK_MSG;
1939		break;
1940	case BPF_SK_SKB_STREAM_PARSER:
1941	case BPF_SK_SKB_STREAM_VERDICT:
1942		ptype = BPF_PROG_TYPE_SK_SKB;
1943		break;
1944	case BPF_LIRC_MODE2:
1945		ptype = BPF_PROG_TYPE_LIRC_MODE2;
1946		break;
1947	case BPF_FLOW_DISSECTOR:
1948		ptype = BPF_PROG_TYPE_FLOW_DISSECTOR;
1949		break;
1950	case BPF_CGROUP_SYSCTL:
1951		ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
1952		break;
1953	case BPF_CGROUP_GETSOCKOPT:
1954	case BPF_CGROUP_SETSOCKOPT:
1955		ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
1956		break;
 
 
 
 
 
1957	default:
1958		return -EINVAL;
1959	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1960
1961	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1962	if (IS_ERR(prog))
1963		return PTR_ERR(prog);
1964
1965	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
1966		bpf_prog_put(prog);
1967		return -EINVAL;
1968	}
1969
1970	switch (ptype) {
1971	case BPF_PROG_TYPE_SK_SKB:
1972	case BPF_PROG_TYPE_SK_MSG:
1973		ret = sock_map_get_from_fd(attr, prog);
1974		break;
1975	case BPF_PROG_TYPE_LIRC_MODE2:
1976		ret = lirc_prog_attach(attr, prog);
1977		break;
1978	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1979		ret = skb_flow_dissector_bpf_prog_attach(attr, prog);
1980		break;
1981	default:
 
 
 
 
 
 
1982		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
 
 
 
1983	}
1984
1985	if (ret)
1986		bpf_prog_put(prog);
1987	return ret;
1988}
1989
1990#define BPF_PROG_DETACH_LAST_FIELD attach_type
1991
1992static int bpf_prog_detach(const union bpf_attr *attr)
1993{
1994	enum bpf_prog_type ptype;
1995
1996	if (!capable(CAP_NET_ADMIN))
1997		return -EPERM;
1998
1999	if (CHECK_ATTR(BPF_PROG_DETACH))
2000		return -EINVAL;
2001
2002	switch (attr->attach_type) {
2003	case BPF_CGROUP_INET_INGRESS:
2004	case BPF_CGROUP_INET_EGRESS:
2005		ptype = BPF_PROG_TYPE_CGROUP_SKB;
2006		break;
2007	case BPF_CGROUP_INET_SOCK_CREATE:
2008	case BPF_CGROUP_INET4_POST_BIND:
2009	case BPF_CGROUP_INET6_POST_BIND:
2010		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
2011		break;
2012	case BPF_CGROUP_INET4_BIND:
2013	case BPF_CGROUP_INET6_BIND:
2014	case BPF_CGROUP_INET4_CONNECT:
2015	case BPF_CGROUP_INET6_CONNECT:
2016	case BPF_CGROUP_UDP4_SENDMSG:
2017	case BPF_CGROUP_UDP6_SENDMSG:
2018	case BPF_CGROUP_UDP4_RECVMSG:
2019	case BPF_CGROUP_UDP6_RECVMSG:
2020		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2021		break;
2022	case BPF_CGROUP_SOCK_OPS:
2023		ptype = BPF_PROG_TYPE_SOCK_OPS;
2024		break;
2025	case BPF_CGROUP_DEVICE:
2026		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
2027		break;
2028	case BPF_SK_MSG_VERDICT:
2029		return sock_map_get_from_fd(attr, NULL);
2030	case BPF_SK_SKB_STREAM_PARSER:
2031	case BPF_SK_SKB_STREAM_VERDICT:
2032		return sock_map_get_from_fd(attr, NULL);
2033	case BPF_LIRC_MODE2:
2034		return lirc_prog_detach(attr);
2035	case BPF_FLOW_DISSECTOR:
2036		return skb_flow_dissector_bpf_prog_detach(attr);
2037	case BPF_CGROUP_SYSCTL:
2038		ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
2039		break;
2040	case BPF_CGROUP_GETSOCKOPT:
2041	case BPF_CGROUP_SETSOCKOPT:
2042		ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
2043		break;
 
2044	default:
2045		return -EINVAL;
2046	}
2047
2048	return cgroup_bpf_prog_detach(attr, ptype);
2049}
2050
2051#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
2052
2053static int bpf_prog_query(const union bpf_attr *attr,
2054			  union bpf_attr __user *uattr)
2055{
2056	if (!capable(CAP_NET_ADMIN))
2057		return -EPERM;
2058	if (CHECK_ATTR(BPF_PROG_QUERY))
2059		return -EINVAL;
2060	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
2061		return -EINVAL;
2062
2063	switch (attr->query.attach_type) {
2064	case BPF_CGROUP_INET_INGRESS:
2065	case BPF_CGROUP_INET_EGRESS:
2066	case BPF_CGROUP_INET_SOCK_CREATE:
 
2067	case BPF_CGROUP_INET4_BIND:
2068	case BPF_CGROUP_INET6_BIND:
2069	case BPF_CGROUP_INET4_POST_BIND:
2070	case BPF_CGROUP_INET6_POST_BIND:
2071	case BPF_CGROUP_INET4_CONNECT:
2072	case BPF_CGROUP_INET6_CONNECT:
 
 
 
 
2073	case BPF_CGROUP_UDP4_SENDMSG:
2074	case BPF_CGROUP_UDP6_SENDMSG:
2075	case BPF_CGROUP_UDP4_RECVMSG:
2076	case BPF_CGROUP_UDP6_RECVMSG:
2077	case BPF_CGROUP_SOCK_OPS:
2078	case BPF_CGROUP_DEVICE:
2079	case BPF_CGROUP_SYSCTL:
2080	case BPF_CGROUP_GETSOCKOPT:
2081	case BPF_CGROUP_SETSOCKOPT:
2082		break;
2083	case BPF_LIRC_MODE2:
2084		return lirc_prog_query(attr, uattr);
2085	case BPF_FLOW_DISSECTOR:
2086		return skb_flow_dissector_prog_query(attr, uattr);
 
2087	default:
2088		return -EINVAL;
2089	}
2090
2091	return cgroup_bpf_prog_query(attr, uattr);
2092}
2093
2094#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
2095
2096static int bpf_prog_test_run(const union bpf_attr *attr,
2097			     union bpf_attr __user *uattr)
2098{
2099	struct bpf_prog *prog;
2100	int ret = -ENOTSUPP;
2101
2102	if (!capable(CAP_SYS_ADMIN))
2103		return -EPERM;
2104	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
2105		return -EINVAL;
2106
2107	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
2108	    (!attr->test.ctx_size_in && attr->test.ctx_in))
2109		return -EINVAL;
2110
2111	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
2112	    (!attr->test.ctx_size_out && attr->test.ctx_out))
2113		return -EINVAL;
2114
2115	prog = bpf_prog_get(attr->test.prog_fd);
2116	if (IS_ERR(prog))
2117		return PTR_ERR(prog);
2118
2119	if (prog->aux->ops->test_run)
2120		ret = prog->aux->ops->test_run(prog, attr, uattr);
2121
2122	bpf_prog_put(prog);
2123	return ret;
2124}
2125
2126#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
2127
2128static int bpf_obj_get_next_id(const union bpf_attr *attr,
2129			       union bpf_attr __user *uattr,
2130			       struct idr *idr,
2131			       spinlock_t *lock)
2132{
2133	u32 next_id = attr->start_id;
2134	int err = 0;
2135
2136	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
2137		return -EINVAL;
2138
2139	if (!capable(CAP_SYS_ADMIN))
2140		return -EPERM;
2141
2142	next_id++;
2143	spin_lock_bh(lock);
2144	if (!idr_get_next(idr, &next_id))
2145		err = -ENOENT;
2146	spin_unlock_bh(lock);
2147
2148	if (!err)
2149		err = put_user(next_id, &uattr->next_id);
2150
2151	return err;
2152}
2153
2154#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
 
 
2155
2156static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2157{
2158	struct bpf_prog *prog;
2159	u32 id = attr->prog_id;
2160	int fd;
2161
2162	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
2163		return -EINVAL;
 
 
 
 
 
 
 
 
 
2164
2165	if (!capable(CAP_SYS_ADMIN))
2166		return -EPERM;
 
 
 
 
 
 
 
 
 
2167
2168	spin_lock_bh(&prog_idr_lock);
2169	prog = idr_find(&prog_idr, id);
2170	if (prog)
2171		prog = bpf_prog_inc_not_zero(prog);
2172	else
2173		prog = ERR_PTR(-ENOENT);
2174	spin_unlock_bh(&prog_idr_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2175
 
2176	if (IS_ERR(prog))
2177		return PTR_ERR(prog);
2178
2179	fd = bpf_prog_new_fd(prog);
2180	if (fd < 0)
2181		bpf_prog_put(prog);
2182
2183	return fd;
2184}
2185
2186#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
2187
2188static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
2189{
2190	struct bpf_map *map;
2191	u32 id = attr->map_id;
2192	int f_flags;
2193	int fd;
2194
2195	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
2196	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
2197		return -EINVAL;
2198
2199	if (!capable(CAP_SYS_ADMIN))
2200		return -EPERM;
2201
2202	f_flags = bpf_get_file_flag(attr->open_flags);
2203	if (f_flags < 0)
2204		return f_flags;
2205
2206	spin_lock_bh(&map_idr_lock);
2207	map = idr_find(&map_idr, id);
2208	if (map)
2209		map = __bpf_map_inc_not_zero(map, true);
2210	else
2211		map = ERR_PTR(-ENOENT);
2212	spin_unlock_bh(&map_idr_lock);
2213
2214	if (IS_ERR(map))
2215		return PTR_ERR(map);
2216
2217	fd = bpf_map_new_fd(map, f_flags);
2218	if (fd < 0)
2219		bpf_map_put_with_uref(map);
2220
2221	return fd;
2222}
2223
2224static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
2225					      unsigned long addr, u32 *off,
2226					      u32 *type)
2227{
2228	const struct bpf_map *map;
2229	int i;
2230
2231	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
2232		map = prog->aux->used_maps[i];
2233		if (map == (void *)addr) {
2234			*type = BPF_PSEUDO_MAP_FD;
2235			return map;
2236		}
2237		if (!map->ops->map_direct_value_meta)
2238			continue;
2239		if (!map->ops->map_direct_value_meta(map, addr, off)) {
2240			*type = BPF_PSEUDO_MAP_VALUE;
2241			return map;
2242		}
2243	}
2244
2245	return NULL;
2246}
2247
2248static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
 
2249{
2250	const struct bpf_map *map;
2251	struct bpf_insn *insns;
2252	u32 off, type;
2253	u64 imm;
 
2254	int i;
2255
2256	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
2257			GFP_USER);
2258	if (!insns)
2259		return insns;
2260
2261	for (i = 0; i < prog->len; i++) {
2262		if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
 
 
2263			insns[i].code = BPF_JMP | BPF_CALL;
2264			insns[i].imm = BPF_FUNC_tail_call;
2265			/* fall-through */
2266		}
2267		if (insns[i].code == (BPF_JMP | BPF_CALL) ||
2268		    insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
2269			if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
2270				insns[i].code = BPF_JMP | BPF_CALL;
2271			if (!bpf_dump_raw_ok())
2272				insns[i].imm = 0;
2273			continue;
2274		}
 
 
 
 
2275
2276		if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
2277			continue;
2278
2279		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
2280		map = bpf_map_from_imm(prog, imm, &off, &type);
2281		if (map) {
2282			insns[i].src_reg = type;
2283			insns[i].imm = map->id;
2284			insns[i + 1].imm = off;
2285			continue;
2286		}
2287	}
2288
2289	return insns;
2290}
2291
2292static int set_info_rec_size(struct bpf_prog_info *info)
2293{
2294	/*
2295	 * Ensure info.*_rec_size is the same as kernel expected size
2296	 *
2297	 * or
2298	 *
2299	 * Only allow zero *_rec_size if both _rec_size and _cnt are
2300	 * zero.  In this case, the kernel will set the expected
2301	 * _rec_size back to the info.
2302	 */
2303
2304	if ((info->nr_func_info || info->func_info_rec_size) &&
2305	    info->func_info_rec_size != sizeof(struct bpf_func_info))
2306		return -EINVAL;
2307
2308	if ((info->nr_line_info || info->line_info_rec_size) &&
2309	    info->line_info_rec_size != sizeof(struct bpf_line_info))
2310		return -EINVAL;
2311
2312	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
2313	    info->jited_line_info_rec_size != sizeof(__u64))
2314		return -EINVAL;
2315
2316	info->func_info_rec_size = sizeof(struct bpf_func_info);
2317	info->line_info_rec_size = sizeof(struct bpf_line_info);
2318	info->jited_line_info_rec_size = sizeof(__u64);
2319
2320	return 0;
2321}
2322
2323static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
 
2324				   const union bpf_attr *attr,
2325				   union bpf_attr __user *uattr)
2326{
2327	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2328	struct bpf_prog_info info = {};
2329	u32 info_len = attr->info.info_len;
2330	struct bpf_prog_stats stats;
2331	char __user *uinsns;
2332	u32 ulen;
2333	int err;
2334
2335	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
2336	if (err)
2337		return err;
2338	info_len = min_t(u32, sizeof(info), info_len);
2339
 
2340	if (copy_from_user(&info, uinfo, info_len))
2341		return -EFAULT;
2342
2343	info.type = prog->type;
2344	info.id = prog->aux->id;
2345	info.load_time = prog->aux->load_time;
2346	info.created_by_uid = from_kuid_munged(current_user_ns(),
2347					       prog->aux->user->uid);
2348	info.gpl_compatible = prog->gpl_compatible;
2349
2350	memcpy(info.tag, prog->tag, sizeof(prog->tag));
2351	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
2352
2353	ulen = info.nr_map_ids;
2354	info.nr_map_ids = prog->aux->used_map_cnt;
2355	ulen = min_t(u32, info.nr_map_ids, ulen);
2356	if (ulen) {
2357		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
2358		u32 i;
2359
2360		for (i = 0; i < ulen; i++)
2361			if (put_user(prog->aux->used_maps[i]->id,
2362				     &user_map_ids[i]))
2363				return -EFAULT;
2364	}
2365
2366	err = set_info_rec_size(&info);
2367	if (err)
2368		return err;
2369
2370	bpf_prog_get_stats(prog, &stats);
2371	info.run_time_ns = stats.nsecs;
2372	info.run_cnt = stats.cnt;
2373
2374	if (!capable(CAP_SYS_ADMIN)) {
2375		info.jited_prog_len = 0;
2376		info.xlated_prog_len = 0;
2377		info.nr_jited_ksyms = 0;
2378		info.nr_jited_func_lens = 0;
2379		info.nr_func_info = 0;
2380		info.nr_line_info = 0;
2381		info.nr_jited_line_info = 0;
2382		goto done;
2383	}
2384
2385	ulen = info.xlated_prog_len;
2386	info.xlated_prog_len = bpf_prog_insn_size(prog);
2387	if (info.xlated_prog_len && ulen) {
2388		struct bpf_insn *insns_sanitized;
2389		bool fault;
2390
2391		if (prog->blinded && !bpf_dump_raw_ok()) {
2392			info.xlated_prog_insns = 0;
2393			goto done;
2394		}
2395		insns_sanitized = bpf_insn_prepare_dump(prog);
2396		if (!insns_sanitized)
2397			return -ENOMEM;
2398		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
2399		ulen = min_t(u32, info.xlated_prog_len, ulen);
2400		fault = copy_to_user(uinsns, insns_sanitized, ulen);
2401		kfree(insns_sanitized);
2402		if (fault)
2403			return -EFAULT;
2404	}
2405
2406	if (bpf_prog_is_dev_bound(prog->aux)) {
2407		err = bpf_prog_offload_info_fill(&info, prog);
2408		if (err)
2409			return err;
2410		goto done;
2411	}
2412
2413	/* NOTE: the following code is supposed to be skipped for offload.
2414	 * bpf_prog_offload_info_fill() is the place to fill similar fields
2415	 * for offload.
2416	 */
2417	ulen = info.jited_prog_len;
2418	if (prog->aux->func_cnt) {
2419		u32 i;
2420
2421		info.jited_prog_len = 0;
2422		for (i = 0; i < prog->aux->func_cnt; i++)
2423			info.jited_prog_len += prog->aux->func[i]->jited_len;
2424	} else {
2425		info.jited_prog_len = prog->jited_len;
2426	}
2427
2428	if (info.jited_prog_len && ulen) {
2429		if (bpf_dump_raw_ok()) {
2430			uinsns = u64_to_user_ptr(info.jited_prog_insns);
2431			ulen = min_t(u32, info.jited_prog_len, ulen);
2432
2433			/* for multi-function programs, copy the JITed
2434			 * instructions for all the functions
2435			 */
2436			if (prog->aux->func_cnt) {
2437				u32 len, free, i;
2438				u8 *img;
2439
2440				free = ulen;
2441				for (i = 0; i < prog->aux->func_cnt; i++) {
2442					len = prog->aux->func[i]->jited_len;
2443					len = min_t(u32, len, free);
2444					img = (u8 *) prog->aux->func[i]->bpf_func;
2445					if (copy_to_user(uinsns, img, len))
2446						return -EFAULT;
2447					uinsns += len;
2448					free -= len;
2449					if (!free)
2450						break;
2451				}
2452			} else {
2453				if (copy_to_user(uinsns, prog->bpf_func, ulen))
2454					return -EFAULT;
2455			}
2456		} else {
2457			info.jited_prog_insns = 0;
2458		}
2459	}
2460
2461	ulen = info.nr_jited_ksyms;
2462	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
2463	if (ulen) {
2464		if (bpf_dump_raw_ok()) {
2465			unsigned long ksym_addr;
2466			u64 __user *user_ksyms;
2467			u32 i;
2468
2469			/* copy the address of the kernel symbol
2470			 * corresponding to each function
2471			 */
2472			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
2473			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
2474			if (prog->aux->func_cnt) {
2475				for (i = 0; i < ulen; i++) {
2476					ksym_addr = (unsigned long)
2477						prog->aux->func[i]->bpf_func;
2478					if (put_user((u64) ksym_addr,
2479						     &user_ksyms[i]))
2480						return -EFAULT;
2481				}
2482			} else {
2483				ksym_addr = (unsigned long) prog->bpf_func;
2484				if (put_user((u64) ksym_addr, &user_ksyms[0]))
2485					return -EFAULT;
2486			}
2487		} else {
2488			info.jited_ksyms = 0;
2489		}
2490	}
2491
2492	ulen = info.nr_jited_func_lens;
2493	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
2494	if (ulen) {
2495		if (bpf_dump_raw_ok()) {
2496			u32 __user *user_lens;
2497			u32 func_len, i;
2498
2499			/* copy the JITed image lengths for each function */
2500			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
2501			user_lens = u64_to_user_ptr(info.jited_func_lens);
2502			if (prog->aux->func_cnt) {
2503				for (i = 0; i < ulen; i++) {
2504					func_len =
2505						prog->aux->func[i]->jited_len;
2506					if (put_user(func_len, &user_lens[i]))
2507						return -EFAULT;
2508				}
2509			} else {
2510				func_len = prog->jited_len;
2511				if (put_user(func_len, &user_lens[0]))
2512					return -EFAULT;
2513			}
2514		} else {
2515			info.jited_func_lens = 0;
2516		}
2517	}
2518
2519	if (prog->aux->btf)
2520		info.btf_id = btf_id(prog->aux->btf);
2521
2522	ulen = info.nr_func_info;
2523	info.nr_func_info = prog->aux->func_info_cnt;
2524	if (info.nr_func_info && ulen) {
2525		char __user *user_finfo;
2526
2527		user_finfo = u64_to_user_ptr(info.func_info);
2528		ulen = min_t(u32, info.nr_func_info, ulen);
2529		if (copy_to_user(user_finfo, prog->aux->func_info,
2530				 info.func_info_rec_size * ulen))
2531			return -EFAULT;
2532	}
2533
2534	ulen = info.nr_line_info;
2535	info.nr_line_info = prog->aux->nr_linfo;
2536	if (info.nr_line_info && ulen) {
2537		__u8 __user *user_linfo;
2538
2539		user_linfo = u64_to_user_ptr(info.line_info);
2540		ulen = min_t(u32, info.nr_line_info, ulen);
2541		if (copy_to_user(user_linfo, prog->aux->linfo,
2542				 info.line_info_rec_size * ulen))
2543			return -EFAULT;
2544	}
2545
2546	ulen = info.nr_jited_line_info;
2547	if (prog->aux->jited_linfo)
2548		info.nr_jited_line_info = prog->aux->nr_linfo;
2549	else
2550		info.nr_jited_line_info = 0;
2551	if (info.nr_jited_line_info && ulen) {
2552		if (bpf_dump_raw_ok()) {
2553			__u64 __user *user_linfo;
2554			u32 i;
2555
2556			user_linfo = u64_to_user_ptr(info.jited_line_info);
2557			ulen = min_t(u32, info.nr_jited_line_info, ulen);
2558			for (i = 0; i < ulen; i++) {
2559				if (put_user((__u64)(long)prog->aux->jited_linfo[i],
2560					     &user_linfo[i]))
2561					return -EFAULT;
2562			}
2563		} else {
2564			info.jited_line_info = 0;
2565		}
2566	}
2567
2568	ulen = info.nr_prog_tags;
2569	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
2570	if (ulen) {
2571		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
2572		u32 i;
2573
2574		user_prog_tags = u64_to_user_ptr(info.prog_tags);
2575		ulen = min_t(u32, info.nr_prog_tags, ulen);
2576		if (prog->aux->func_cnt) {
2577			for (i = 0; i < ulen; i++) {
2578				if (copy_to_user(user_prog_tags[i],
2579						 prog->aux->func[i]->tag,
2580						 BPF_TAG_SIZE))
2581					return -EFAULT;
2582			}
2583		} else {
2584			if (copy_to_user(user_prog_tags[0],
2585					 prog->tag, BPF_TAG_SIZE))
2586				return -EFAULT;
2587		}
2588	}
2589
2590done:
2591	if (copy_to_user(uinfo, &info, info_len) ||
2592	    put_user(info_len, &uattr->info.info_len))
2593		return -EFAULT;
2594
2595	return 0;
2596}
2597
2598static int bpf_map_get_info_by_fd(struct bpf_map *map,
 
2599				  const union bpf_attr *attr,
2600				  union bpf_attr __user *uattr)
2601{
2602	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2603	struct bpf_map_info info = {};
2604	u32 info_len = attr->info.info_len;
2605	int err;
2606
2607	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
2608	if (err)
2609		return err;
2610	info_len = min_t(u32, sizeof(info), info_len);
2611
 
2612	info.type = map->map_type;
2613	info.id = map->id;
2614	info.key_size = map->key_size;
2615	info.value_size = map->value_size;
2616	info.max_entries = map->max_entries;
2617	info.map_flags = map->map_flags;
2618	memcpy(info.name, map->name, sizeof(map->name));
2619
2620	if (map->btf) {
2621		info.btf_id = btf_id(map->btf);
2622		info.btf_key_type_id = map->btf_key_type_id;
2623		info.btf_value_type_id = map->btf_value_type_id;
2624	}
 
2625
2626	if (bpf_map_is_dev_bound(map)) {
2627		err = bpf_map_offload_info_fill(&info, map);
2628		if (err)
2629			return err;
2630	}
2631
2632	if (copy_to_user(uinfo, &info, info_len) ||
2633	    put_user(info_len, &uattr->info.info_len))
2634		return -EFAULT;
2635
2636	return 0;
2637}
2638
2639static int bpf_btf_get_info_by_fd(struct btf *btf,
 
2640				  const union bpf_attr *attr,
2641				  union bpf_attr __user *uattr)
2642{
2643	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2644	u32 info_len = attr->info.info_len;
2645	int err;
2646
2647	err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
2648	if (err)
2649		return err;
2650
2651	return btf_get_info_by_fd(btf, attr, uattr);
2652}
2653
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2654#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
2655
2656static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
2657				  union bpf_attr __user *uattr)
2658{
2659	int ufd = attr->info.bpf_fd;
2660	struct fd f;
2661	int err;
2662
2663	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
2664		return -EINVAL;
2665
2666	f = fdget(ufd);
2667	if (!f.file)
2668		return -EBADFD;
2669
2670	if (f.file->f_op == &bpf_prog_fops)
2671		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
2672					      uattr);
2673	else if (f.file->f_op == &bpf_map_fops)
2674		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
2675					     uattr);
2676	else if (f.file->f_op == &btf_fops)
2677		err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
 
 
 
2678	else
2679		err = -EINVAL;
2680
2681	fdput(f);
2682	return err;
2683}
2684
2685#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
2686
2687static int bpf_btf_load(const union bpf_attr *attr)
2688{
2689	if (CHECK_ATTR(BPF_BTF_LOAD))
2690		return -EINVAL;
2691
2692	if (!capable(CAP_SYS_ADMIN))
2693		return -EPERM;
2694
2695	return btf_new_fd(attr);
2696}
2697
2698#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
2699
2700static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
2701{
2702	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
2703		return -EINVAL;
2704
2705	if (!capable(CAP_SYS_ADMIN))
2706		return -EPERM;
2707
2708	return btf_get_fd_by_id(attr->btf_id);
2709}
2710
2711static int bpf_task_fd_query_copy(const union bpf_attr *attr,
2712				    union bpf_attr __user *uattr,
2713				    u32 prog_id, u32 fd_type,
2714				    const char *buf, u64 probe_offset,
2715				    u64 probe_addr)
2716{
2717	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
2718	u32 len = buf ? strlen(buf) : 0, input_len;
2719	int err = 0;
2720
2721	if (put_user(len, &uattr->task_fd_query.buf_len))
2722		return -EFAULT;
2723	input_len = attr->task_fd_query.buf_len;
2724	if (input_len && ubuf) {
2725		if (!len) {
2726			/* nothing to copy, just make ubuf NULL terminated */
2727			char zero = '\0';
2728
2729			if (put_user(zero, ubuf))
2730				return -EFAULT;
2731		} else if (input_len >= len + 1) {
2732			/* ubuf can hold the string with NULL terminator */
2733			if (copy_to_user(ubuf, buf, len + 1))
2734				return -EFAULT;
2735		} else {
2736			/* ubuf cannot hold the string with NULL terminator,
2737			 * do a partial copy with NULL terminator.
2738			 */
2739			char zero = '\0';
2740
2741			err = -ENOSPC;
2742			if (copy_to_user(ubuf, buf, input_len - 1))
2743				return -EFAULT;
2744			if (put_user(zero, ubuf + input_len - 1))
2745				return -EFAULT;
2746		}
2747	}
2748
2749	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
2750	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
2751	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
2752	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
2753		return -EFAULT;
2754
2755	return err;
2756}
2757
2758#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
2759
2760static int bpf_task_fd_query(const union bpf_attr *attr,
2761			     union bpf_attr __user *uattr)
2762{
2763	pid_t pid = attr->task_fd_query.pid;
2764	u32 fd = attr->task_fd_query.fd;
2765	const struct perf_event *event;
2766	struct files_struct *files;
2767	struct task_struct *task;
2768	struct file *file;
2769	int err;
2770
2771	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
2772		return -EINVAL;
2773
2774	if (!capable(CAP_SYS_ADMIN))
2775		return -EPERM;
2776
2777	if (attr->task_fd_query.flags != 0)
2778		return -EINVAL;
2779
2780	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
2781	if (!task)
2782		return -ENOENT;
2783
2784	files = get_files_struct(task);
2785	put_task_struct(task);
2786	if (!files)
2787		return -ENOENT;
2788
2789	err = 0;
2790	spin_lock(&files->file_lock);
2791	file = fcheck_files(files, fd);
2792	if (!file)
2793		err = -EBADF;
2794	else
2795		get_file(file);
2796	spin_unlock(&files->file_lock);
2797	put_files_struct(files);
2798
2799	if (err)
2800		goto out;
2801
2802	if (file->f_op == &bpf_raw_tp_fops) {
2803		struct bpf_raw_tracepoint *raw_tp = file->private_data;
2804		struct bpf_raw_event_map *btp = raw_tp->btp;
2805
2806		err = bpf_task_fd_query_copy(attr, uattr,
2807					     raw_tp->prog->aux->id,
2808					     BPF_FD_TYPE_RAW_TRACEPOINT,
2809					     btp->tp->name, 0, 0);
2810		goto put_file;
 
 
 
 
 
 
2811	}
2812
2813	event = perf_get_event(file);
2814	if (!IS_ERR(event)) {
2815		u64 probe_offset, probe_addr;
2816		u32 prog_id, fd_type;
2817		const char *buf;
2818
2819		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
2820					      &buf, &probe_offset,
2821					      &probe_addr);
2822		if (!err)
2823			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
2824						     fd_type, buf,
2825						     probe_offset,
2826						     probe_addr);
2827		goto put_file;
2828	}
2829
 
2830	err = -ENOTSUPP;
2831put_file:
2832	fput(file);
2833out:
2834	return err;
2835}
2836
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2837SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
2838{
2839	union bpf_attr attr = {};
2840	int err;
2841
2842	if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
2843		return -EPERM;
2844
2845	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
2846	if (err)
2847		return err;
2848	size = min_t(u32, size, sizeof(attr));
2849
2850	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
 
2851	if (copy_from_user(&attr, uattr, size) != 0)
2852		return -EFAULT;
2853
2854	err = security_bpf(cmd, &attr, size);
2855	if (err < 0)
2856		return err;
2857
2858	switch (cmd) {
2859	case BPF_MAP_CREATE:
2860		err = map_create(&attr);
2861		break;
2862	case BPF_MAP_LOOKUP_ELEM:
2863		err = map_lookup_elem(&attr);
2864		break;
2865	case BPF_MAP_UPDATE_ELEM:
2866		err = map_update_elem(&attr);
2867		break;
2868	case BPF_MAP_DELETE_ELEM:
2869		err = map_delete_elem(&attr);
2870		break;
2871	case BPF_MAP_GET_NEXT_KEY:
2872		err = map_get_next_key(&attr);
2873		break;
2874	case BPF_MAP_FREEZE:
2875		err = map_freeze(&attr);
2876		break;
2877	case BPF_PROG_LOAD:
2878		err = bpf_prog_load(&attr, uattr);
2879		break;
2880	case BPF_OBJ_PIN:
2881		err = bpf_obj_pin(&attr);
2882		break;
2883	case BPF_OBJ_GET:
2884		err = bpf_obj_get(&attr);
2885		break;
2886	case BPF_PROG_ATTACH:
2887		err = bpf_prog_attach(&attr);
2888		break;
2889	case BPF_PROG_DETACH:
2890		err = bpf_prog_detach(&attr);
2891		break;
2892	case BPF_PROG_QUERY:
2893		err = bpf_prog_query(&attr, uattr);
2894		break;
2895	case BPF_PROG_TEST_RUN:
2896		err = bpf_prog_test_run(&attr, uattr);
2897		break;
2898	case BPF_PROG_GET_NEXT_ID:
2899		err = bpf_obj_get_next_id(&attr, uattr,
2900					  &prog_idr, &prog_idr_lock);
2901		break;
2902	case BPF_MAP_GET_NEXT_ID:
2903		err = bpf_obj_get_next_id(&attr, uattr,
2904					  &map_idr, &map_idr_lock);
2905		break;
2906	case BPF_BTF_GET_NEXT_ID:
2907		err = bpf_obj_get_next_id(&attr, uattr,
2908					  &btf_idr, &btf_idr_lock);
2909		break;
2910	case BPF_PROG_GET_FD_BY_ID:
2911		err = bpf_prog_get_fd_by_id(&attr);
2912		break;
2913	case BPF_MAP_GET_FD_BY_ID:
2914		err = bpf_map_get_fd_by_id(&attr);
2915		break;
2916	case BPF_OBJ_GET_INFO_BY_FD:
2917		err = bpf_obj_get_info_by_fd(&attr, uattr);
2918		break;
2919	case BPF_RAW_TRACEPOINT_OPEN:
2920		err = bpf_raw_tracepoint_open(&attr);
2921		break;
2922	case BPF_BTF_LOAD:
2923		err = bpf_btf_load(&attr);
2924		break;
2925	case BPF_BTF_GET_FD_BY_ID:
2926		err = bpf_btf_get_fd_by_id(&attr);
2927		break;
2928	case BPF_TASK_FD_QUERY:
2929		err = bpf_task_fd_query(&attr, uattr);
2930		break;
2931	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
2932		err = map_lookup_and_delete_elem(&attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2933		break;
2934	default:
2935		err = -EINVAL;
2936		break;
2937	}
2938
2939	return err;
2940}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 */
   4#include <linux/bpf.h>
   5#include <linux/bpf_trace.h>
   6#include <linux/bpf_lirc.h>
   7#include <linux/btf.h>
   8#include <linux/syscalls.h>
   9#include <linux/slab.h>
  10#include <linux/sched/signal.h>
  11#include <linux/vmalloc.h>
  12#include <linux/mmzone.h>
  13#include <linux/anon_inodes.h>
  14#include <linux/fdtable.h>
  15#include <linux/file.h>
  16#include <linux/fs.h>
  17#include <linux/license.h>
  18#include <linux/filter.h>
  19#include <linux/version.h>
  20#include <linux/kernel.h>
  21#include <linux/idr.h>
  22#include <linux/cred.h>
  23#include <linux/timekeeping.h>
  24#include <linux/ctype.h>
  25#include <linux/nospec.h>
  26#include <linux/audit.h>
  27#include <uapi/linux/btf.h>
  28#include <linux/pgtable.h>
  29#include <linux/bpf_lsm.h>
  30#include <linux/poll.h>
  31#include <linux/bpf-netns.h>
  32
  33#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
  34			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
  35			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
  36#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  37#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
  38#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
  39			IS_FD_HASH(map))
  40
  41#define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
  42
  43DEFINE_PER_CPU(int, bpf_prog_active);
  44static DEFINE_IDR(prog_idr);
  45static DEFINE_SPINLOCK(prog_idr_lock);
  46static DEFINE_IDR(map_idr);
  47static DEFINE_SPINLOCK(map_idr_lock);
  48static DEFINE_IDR(link_idr);
  49static DEFINE_SPINLOCK(link_idr_lock);
  50
  51int sysctl_unprivileged_bpf_disabled __read_mostly;
  52
  53static const struct bpf_map_ops * const bpf_map_types[] = {
  54#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
  55#define BPF_MAP_TYPE(_id, _ops) \
  56	[_id] = &_ops,
  57#define BPF_LINK_TYPE(_id, _name)
  58#include <linux/bpf_types.h>
  59#undef BPF_PROG_TYPE
  60#undef BPF_MAP_TYPE
  61#undef BPF_LINK_TYPE
  62};
  63
  64/*
  65 * If we're handed a bigger struct than we know of, ensure all the unknown bits
  66 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
  67 * we don't know about yet.
  68 *
  69 * There is a ToCToU between this function call and the following
  70 * copy_from_user() call. However, this is not a concern since this function is
  71 * meant to be a future-proofing of bits.
  72 */
  73int bpf_check_uarg_tail_zero(void __user *uaddr,
  74			     size_t expected_size,
  75			     size_t actual_size)
  76{
  77	unsigned char __user *addr = uaddr + expected_size;
  78	int res;
 
 
  79
  80	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
  81		return -E2BIG;
  82
 
 
 
  83	if (actual_size <= expected_size)
  84		return 0;
  85
  86	res = check_zeroed_user(addr, actual_size - expected_size);
  87	if (res < 0)
  88		return res;
  89	return res ? 0 : -E2BIG;
 
 
 
 
 
 
 
 
  90}
  91
  92const struct bpf_map_ops bpf_map_offload_ops = {
  93	.map_alloc = bpf_map_offload_map_alloc,
  94	.map_free = bpf_map_offload_map_free,
  95	.map_check_btf = map_check_no_btf,
  96};
  97
  98static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  99{
 100	const struct bpf_map_ops *ops;
 101	u32 type = attr->map_type;
 102	struct bpf_map *map;
 103	int err;
 104
 105	if (type >= ARRAY_SIZE(bpf_map_types))
 106		return ERR_PTR(-EINVAL);
 107	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
 108	ops = bpf_map_types[type];
 109	if (!ops)
 110		return ERR_PTR(-EINVAL);
 111
 112	if (ops->map_alloc_check) {
 113		err = ops->map_alloc_check(attr);
 114		if (err)
 115			return ERR_PTR(err);
 116	}
 117	if (attr->map_ifindex)
 118		ops = &bpf_map_offload_ops;
 119	map = ops->map_alloc(attr);
 120	if (IS_ERR(map))
 121		return map;
 122	map->ops = ops;
 123	map->map_type = type;
 124	return map;
 125}
 126
 127static u32 bpf_map_value_size(struct bpf_map *map)
 128{
 129	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 130	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 131	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
 132	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 133		return round_up(map->value_size, 8) * num_possible_cpus();
 134	else if (IS_FD_MAP(map))
 135		return sizeof(u32);
 136	else
 137		return  map->value_size;
 138}
 139
 140static void maybe_wait_bpf_programs(struct bpf_map *map)
 141{
 142	/* Wait for any running BPF programs to complete so that
 143	 * userspace, when we return to it, knows that all programs
 144	 * that could be running use the new map value.
 145	 */
 146	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
 147	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
 148		synchronize_rcu();
 149}
 150
 151static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
 152				void *value, __u64 flags)
 153{
 154	int err;
 155
 156	/* Need to create a kthread, thus must support schedule */
 157	if (bpf_map_is_dev_bound(map)) {
 158		return bpf_map_offload_update_elem(map, key, value, flags);
 159	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
 160		   map->map_type == BPF_MAP_TYPE_SOCKHASH ||
 161		   map->map_type == BPF_MAP_TYPE_SOCKMAP ||
 162		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
 163		return map->ops->map_update_elem(map, key, value, flags);
 164	} else if (IS_FD_PROG_ARRAY(map)) {
 165		return bpf_fd_array_map_update_elem(map, f.file, key, value,
 166						    flags);
 167	}
 168
 169	bpf_disable_instrumentation();
 170	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 171	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 172		err = bpf_percpu_hash_update(map, key, value, flags);
 173	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 174		err = bpf_percpu_array_update(map, key, value, flags);
 175	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 176		err = bpf_percpu_cgroup_storage_update(map, key, value,
 177						       flags);
 178	} else if (IS_FD_ARRAY(map)) {
 179		rcu_read_lock();
 180		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
 181						   flags);
 182		rcu_read_unlock();
 183	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
 184		rcu_read_lock();
 185		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
 186						  flags);
 187		rcu_read_unlock();
 188	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 189		/* rcu_read_lock() is not needed */
 190		err = bpf_fd_reuseport_array_update_elem(map, key, value,
 191							 flags);
 192	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 193		   map->map_type == BPF_MAP_TYPE_STACK) {
 194		err = map->ops->map_push_elem(map, value, flags);
 195	} else {
 196		rcu_read_lock();
 197		err = map->ops->map_update_elem(map, key, value, flags);
 198		rcu_read_unlock();
 199	}
 200	bpf_enable_instrumentation();
 201	maybe_wait_bpf_programs(map);
 202
 203	return err;
 204}
 205
 206static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
 207			      __u64 flags)
 208{
 209	void *ptr;
 210	int err;
 211
 212	if (bpf_map_is_dev_bound(map))
 213		return bpf_map_offload_lookup_elem(map, key, value);
 214
 215	bpf_disable_instrumentation();
 216	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 217	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 218		err = bpf_percpu_hash_copy(map, key, value);
 219	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 220		err = bpf_percpu_array_copy(map, key, value);
 221	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 222		err = bpf_percpu_cgroup_storage_copy(map, key, value);
 223	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
 224		err = bpf_stackmap_copy(map, key, value);
 225	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
 226		err = bpf_fd_array_map_lookup_elem(map, key, value);
 227	} else if (IS_FD_HASH(map)) {
 228		err = bpf_fd_htab_map_lookup_elem(map, key, value);
 229	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 230		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
 231	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 232		   map->map_type == BPF_MAP_TYPE_STACK) {
 233		err = map->ops->map_peek_elem(map, value);
 234	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
 235		/* struct_ops map requires directly updating "value" */
 236		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
 237	} else {
 238		rcu_read_lock();
 239		if (map->ops->map_lookup_elem_sys_only)
 240			ptr = map->ops->map_lookup_elem_sys_only(map, key);
 241		else
 242			ptr = map->ops->map_lookup_elem(map, key);
 243		if (IS_ERR(ptr)) {
 244			err = PTR_ERR(ptr);
 245		} else if (!ptr) {
 246			err = -ENOENT;
 247		} else {
 248			err = 0;
 249			if (flags & BPF_F_LOCK)
 250				/* lock 'ptr' and copy everything but lock */
 251				copy_map_value_locked(map, value, ptr, true);
 252			else
 253				copy_map_value(map, value, ptr);
 254			/* mask lock, since value wasn't zero inited */
 255			check_and_init_map_lock(map, value);
 256		}
 257		rcu_read_unlock();
 258	}
 259
 260	bpf_enable_instrumentation();
 261	maybe_wait_bpf_programs(map);
 262
 263	return err;
 264}
 265
 266static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
 267{
 268	/* We really just want to fail instead of triggering OOM killer
 269	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
 270	 * which is used for lower order allocation requests.
 271	 *
 272	 * It has been observed that higher order allocation requests done by
 273	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
 274	 * to reclaim memory from the page cache, thus we set
 275	 * __GFP_RETRY_MAYFAIL to avoid such situations.
 276	 */
 277
 278	const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
 279	unsigned int flags = 0;
 280	unsigned long align = 1;
 281	void *area;
 282
 283	if (size >= SIZE_MAX)
 284		return NULL;
 285
 286	/* kmalloc()'ed memory can't be mmap()'ed */
 287	if (mmapable) {
 288		BUG_ON(!PAGE_ALIGNED(size));
 289		align = SHMLBA;
 290		flags = VM_USERMAP;
 291	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 292		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
 293				    numa_node);
 294		if (area != NULL)
 295			return area;
 296	}
 297
 298	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
 299			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
 300			flags, numa_node, __builtin_return_address(0));
 301}
 302
 303void *bpf_map_area_alloc(u64 size, int numa_node)
 304{
 305	return __bpf_map_area_alloc(size, numa_node, false);
 306}
 307
 308void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
 309{
 310	return __bpf_map_area_alloc(size, numa_node, true);
 311}
 312
 313void bpf_map_area_free(void *area)
 314{
 315	kvfree(area);
 316}
 317
 318static u32 bpf_map_flags_retain_permanent(u32 flags)
 319{
 320	/* Some map creation flags are not tied to the map object but
 321	 * rather to the map fd instead, so they have no meaning upon
 322	 * map object inspection since multiple file descriptors with
 323	 * different (access) properties can exist here. Thus, given
 324	 * this has zero meaning for the map itself, lets clear these
 325	 * from here.
 326	 */
 327	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
 328}
 329
 330void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
 331{
 332	map->map_type = attr->map_type;
 333	map->key_size = attr->key_size;
 334	map->value_size = attr->value_size;
 335	map->max_entries = attr->max_entries;
 336	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
 337	map->numa_node = bpf_map_attr_numa_node(attr);
 338}
 339
 340static int bpf_charge_memlock(struct user_struct *user, u32 pages)
 341{
 342	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 343
 344	if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
 345		atomic_long_sub(pages, &user->locked_vm);
 346		return -EPERM;
 347	}
 348	return 0;
 349}
 350
 351static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
 352{
 353	if (user)
 354		atomic_long_sub(pages, &user->locked_vm);
 355}
 356
 357int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
 358{
 359	u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
 360	struct user_struct *user;
 361	int ret;
 362
 363	if (size >= U32_MAX - PAGE_SIZE)
 364		return -E2BIG;
 365
 366	user = get_current_user();
 367	ret = bpf_charge_memlock(user, pages);
 368	if (ret) {
 369		free_uid(user);
 370		return ret;
 371	}
 372
 373	mem->pages = pages;
 374	mem->user = user;
 375
 376	return 0;
 377}
 378
 379void bpf_map_charge_finish(struct bpf_map_memory *mem)
 380{
 381	bpf_uncharge_memlock(mem->user, mem->pages);
 382	free_uid(mem->user);
 383}
 384
 385void bpf_map_charge_move(struct bpf_map_memory *dst,
 386			 struct bpf_map_memory *src)
 387{
 388	*dst = *src;
 389
 390	/* Make sure src will not be used for the redundant uncharging. */
 391	memset(src, 0, sizeof(struct bpf_map_memory));
 392}
 393
 394int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
 395{
 396	int ret;
 397
 398	ret = bpf_charge_memlock(map->memory.user, pages);
 399	if (ret)
 400		return ret;
 401	map->memory.pages += pages;
 402	return ret;
 403}
 404
 405void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
 406{
 407	bpf_uncharge_memlock(map->memory.user, pages);
 408	map->memory.pages -= pages;
 409}
 410
 411static int bpf_map_alloc_id(struct bpf_map *map)
 412{
 413	int id;
 414
 415	idr_preload(GFP_KERNEL);
 416	spin_lock_bh(&map_idr_lock);
 417	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
 418	if (id > 0)
 419		map->id = id;
 420	spin_unlock_bh(&map_idr_lock);
 421	idr_preload_end();
 422
 423	if (WARN_ON_ONCE(!id))
 424		return -ENOSPC;
 425
 426	return id > 0 ? 0 : id;
 427}
 428
 429void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 430{
 431	unsigned long flags;
 432
 433	/* Offloaded maps are removed from the IDR store when their device
 434	 * disappears - even if someone holds an fd to them they are unusable,
 435	 * the memory is gone, all ops will fail; they are simply waiting for
 436	 * refcnt to drop to be freed.
 437	 */
 438	if (!map->id)
 439		return;
 440
 441	if (do_idr_lock)
 442		spin_lock_irqsave(&map_idr_lock, flags);
 443	else
 444		__acquire(&map_idr_lock);
 445
 446	idr_remove(&map_idr, map->id);
 447	map->id = 0;
 448
 449	if (do_idr_lock)
 450		spin_unlock_irqrestore(&map_idr_lock, flags);
 451	else
 452		__release(&map_idr_lock);
 453}
 454
 455/* called from workqueue */
 456static void bpf_map_free_deferred(struct work_struct *work)
 457{
 458	struct bpf_map *map = container_of(work, struct bpf_map, work);
 459	struct bpf_map_memory mem;
 460
 461	bpf_map_charge_move(&mem, &map->memory);
 462	security_bpf_map_free(map);
 463	/* implementation dependent freeing */
 464	map->ops->map_free(map);
 465	bpf_map_charge_finish(&mem);
 466}
 467
 468static void bpf_map_put_uref(struct bpf_map *map)
 469{
 470	if (atomic64_dec_and_test(&map->usercnt)) {
 471		if (map->ops->map_release_uref)
 472			map->ops->map_release_uref(map);
 473	}
 474}
 475
 476/* decrement map refcnt and schedule it for freeing via workqueue
 477 * (unrelying map implementation ops->map_free() might sleep)
 478 */
 479static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
 480{
 481	if (atomic64_dec_and_test(&map->refcnt)) {
 482		/* bpf_map_free_id() must be called first */
 483		bpf_map_free_id(map, do_idr_lock);
 484		btf_put(map->btf);
 485		INIT_WORK(&map->work, bpf_map_free_deferred);
 486		schedule_work(&map->work);
 487	}
 488}
 489
 490void bpf_map_put(struct bpf_map *map)
 491{
 492	__bpf_map_put(map, true);
 493}
 494EXPORT_SYMBOL_GPL(bpf_map_put);
 495
 496void bpf_map_put_with_uref(struct bpf_map *map)
 497{
 498	bpf_map_put_uref(map);
 499	bpf_map_put(map);
 500}
 501
 502static int bpf_map_release(struct inode *inode, struct file *filp)
 503{
 504	struct bpf_map *map = filp->private_data;
 505
 506	if (map->ops->map_release)
 507		map->ops->map_release(map, filp);
 508
 509	bpf_map_put_with_uref(map);
 510	return 0;
 511}
 512
 513static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
 514{
 515	fmode_t mode = f.file->f_mode;
 516
 517	/* Our file permissions may have been overridden by global
 518	 * map permissions facing syscall side.
 519	 */
 520	if (READ_ONCE(map->frozen))
 521		mode &= ~FMODE_CAN_WRITE;
 522	return mode;
 523}
 524
 525#ifdef CONFIG_PROC_FS
 526static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
 527{
 528	const struct bpf_map *map = filp->private_data;
 529	const struct bpf_array *array;
 530	u32 type = 0, jited = 0;
 
 531
 532	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
 533		array = container_of(map, struct bpf_array, map);
 534		type  = array->aux->type;
 535		jited = array->aux->jited;
 536	}
 537
 538	seq_printf(m,
 539		   "map_type:\t%u\n"
 540		   "key_size:\t%u\n"
 541		   "value_size:\t%u\n"
 542		   "max_entries:\t%u\n"
 543		   "map_flags:\t%#x\n"
 544		   "memlock:\t%llu\n"
 545		   "map_id:\t%u\n"
 546		   "frozen:\t%u\n",
 547		   map->map_type,
 548		   map->key_size,
 549		   map->value_size,
 550		   map->max_entries,
 551		   map->map_flags,
 552		   map->memory.pages * 1ULL << PAGE_SHIFT,
 553		   map->id,
 554		   READ_ONCE(map->frozen));
 555	if (type) {
 556		seq_printf(m, "owner_prog_type:\t%u\n", type);
 557		seq_printf(m, "owner_jited:\t%u\n", jited);
 
 
 
 558	}
 559}
 560#endif
 561
 562static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
 563			      loff_t *ppos)
 564{
 565	/* We need this handler such that alloc_file() enables
 566	 * f_mode with FMODE_CAN_READ.
 567	 */
 568	return -EINVAL;
 569}
 570
 571static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
 572			       size_t siz, loff_t *ppos)
 573{
 574	/* We need this handler such that alloc_file() enables
 575	 * f_mode with FMODE_CAN_WRITE.
 576	 */
 577	return -EINVAL;
 578}
 579
 580/* called for any extra memory-mapped regions (except initial) */
 581static void bpf_map_mmap_open(struct vm_area_struct *vma)
 582{
 583	struct bpf_map *map = vma->vm_file->private_data;
 584
 585	if (vma->vm_flags & VM_MAYWRITE) {
 586		mutex_lock(&map->freeze_mutex);
 587		map->writecnt++;
 588		mutex_unlock(&map->freeze_mutex);
 589	}
 590}
 591
 592/* called for all unmapped memory region (including initial) */
 593static void bpf_map_mmap_close(struct vm_area_struct *vma)
 594{
 595	struct bpf_map *map = vma->vm_file->private_data;
 596
 597	if (vma->vm_flags & VM_MAYWRITE) {
 598		mutex_lock(&map->freeze_mutex);
 599		map->writecnt--;
 600		mutex_unlock(&map->freeze_mutex);
 601	}
 602}
 603
 604static const struct vm_operations_struct bpf_map_default_vmops = {
 605	.open		= bpf_map_mmap_open,
 606	.close		= bpf_map_mmap_close,
 607};
 608
 609static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
 610{
 611	struct bpf_map *map = filp->private_data;
 612	int err;
 613
 614	if (!map->ops->map_mmap || map_value_has_spin_lock(map))
 615		return -ENOTSUPP;
 616
 617	if (!(vma->vm_flags & VM_SHARED))
 618		return -EINVAL;
 619
 620	mutex_lock(&map->freeze_mutex);
 621
 622	if (vma->vm_flags & VM_WRITE) {
 623		if (map->frozen) {
 624			err = -EPERM;
 625			goto out;
 626		}
 627		/* map is meant to be read-only, so do not allow mapping as
 628		 * writable, because it's possible to leak a writable page
 629		 * reference and allows user-space to still modify it after
 630		 * freezing, while verifier will assume contents do not change
 631		 */
 632		if (map->map_flags & BPF_F_RDONLY_PROG) {
 633			err = -EACCES;
 634			goto out;
 635		}
 636	}
 637
 638	/* set default open/close callbacks */
 639	vma->vm_ops = &bpf_map_default_vmops;
 640	vma->vm_private_data = map;
 641	vma->vm_flags &= ~VM_MAYEXEC;
 642	if (!(vma->vm_flags & VM_WRITE))
 643		/* disallow re-mapping with PROT_WRITE */
 644		vma->vm_flags &= ~VM_MAYWRITE;
 645
 646	err = map->ops->map_mmap(map, vma);
 647	if (err)
 648		goto out;
 649
 650	if (vma->vm_flags & VM_MAYWRITE)
 651		map->writecnt++;
 652out:
 653	mutex_unlock(&map->freeze_mutex);
 654	return err;
 655}
 656
 657static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
 658{
 659	struct bpf_map *map = filp->private_data;
 660
 661	if (map->ops->map_poll)
 662		return map->ops->map_poll(map, filp, pts);
 663
 664	return EPOLLERR;
 665}
 666
 667const struct file_operations bpf_map_fops = {
 668#ifdef CONFIG_PROC_FS
 669	.show_fdinfo	= bpf_map_show_fdinfo,
 670#endif
 671	.release	= bpf_map_release,
 672	.read		= bpf_dummy_read,
 673	.write		= bpf_dummy_write,
 674	.mmap		= bpf_map_mmap,
 675	.poll		= bpf_map_poll,
 676};
 677
 678int bpf_map_new_fd(struct bpf_map *map, int flags)
 679{
 680	int ret;
 681
 682	ret = security_bpf_map(map, OPEN_FMODE(flags));
 683	if (ret < 0)
 684		return ret;
 685
 686	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
 687				flags | O_CLOEXEC);
 688}
 689
 690int bpf_get_file_flag(int flags)
 691{
 692	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
 693		return -EINVAL;
 694	if (flags & BPF_F_RDONLY)
 695		return O_RDONLY;
 696	if (flags & BPF_F_WRONLY)
 697		return O_WRONLY;
 698	return O_RDWR;
 699}
 700
 701/* helper macro to check that unused fields 'union bpf_attr' are zero */
 702#define CHECK_ATTR(CMD) \
 703	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
 704		   sizeof(attr->CMD##_LAST_FIELD), 0, \
 705		   sizeof(*attr) - \
 706		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
 707		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 708
 709/* dst and src must have at least "size" number of bytes.
 710 * Return strlen on success and < 0 on error.
 711 */
 712int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
 713{
 714	const char *end = src + size;
 715	const char *orig_src = src;
 716
 717	memset(dst, 0, size);
 718	/* Copy all isalnum(), '_' and '.' chars. */
 719	while (src < end && *src) {
 720		if (!isalnum(*src) &&
 721		    *src != '_' && *src != '.')
 722			return -EINVAL;
 723		*dst++ = *src++;
 724	}
 725
 726	/* No '\0' found in "size" number of bytes */
 727	if (src == end)
 728		return -EINVAL;
 729
 730	return src - orig_src;
 731}
 732
 733int map_check_no_btf(const struct bpf_map *map,
 734		     const struct btf *btf,
 735		     const struct btf_type *key_type,
 736		     const struct btf_type *value_type)
 737{
 738	return -ENOTSUPP;
 739}
 740
 741static int map_check_btf(struct bpf_map *map, const struct btf *btf,
 742			 u32 btf_key_id, u32 btf_value_id)
 743{
 744	const struct btf_type *key_type, *value_type;
 745	u32 key_size, value_size;
 746	int ret = 0;
 747
 748	/* Some maps allow key to be unspecified. */
 749	if (btf_key_id) {
 750		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
 751		if (!key_type || key_size != map->key_size)
 752			return -EINVAL;
 753	} else {
 754		key_type = btf_type_by_id(btf, 0);
 755		if (!map->ops->map_check_btf)
 756			return -EINVAL;
 757	}
 758
 759	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
 760	if (!value_type || value_size != map->value_size)
 761		return -EINVAL;
 762
 763	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
 764
 765	if (map_value_has_spin_lock(map)) {
 766		if (map->map_flags & BPF_F_RDONLY_PROG)
 767			return -EACCES;
 768		if (map->map_type != BPF_MAP_TYPE_HASH &&
 769		    map->map_type != BPF_MAP_TYPE_ARRAY &&
 770		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
 771		    map->map_type != BPF_MAP_TYPE_SK_STORAGE)
 772			return -ENOTSUPP;
 773		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
 774		    map->value_size) {
 775			WARN_ONCE(1,
 776				  "verifier bug spin_lock_off %d value_size %d\n",
 777				  map->spin_lock_off, map->value_size);
 778			return -EFAULT;
 779		}
 780	}
 781
 782	if (map->ops->map_check_btf)
 783		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
 784
 785	return ret;
 786}
 787
 788#define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
 789/* called via syscall */
 790static int map_create(union bpf_attr *attr)
 791{
 792	int numa_node = bpf_map_attr_numa_node(attr);
 793	struct bpf_map_memory mem;
 794	struct bpf_map *map;
 795	int f_flags;
 796	int err;
 797
 798	err = CHECK_ATTR(BPF_MAP_CREATE);
 799	if (err)
 800		return -EINVAL;
 801
 802	if (attr->btf_vmlinux_value_type_id) {
 803		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
 804		    attr->btf_key_type_id || attr->btf_value_type_id)
 805			return -EINVAL;
 806	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
 807		return -EINVAL;
 808	}
 809
 810	f_flags = bpf_get_file_flag(attr->map_flags);
 811	if (f_flags < 0)
 812		return f_flags;
 813
 814	if (numa_node != NUMA_NO_NODE &&
 815	    ((unsigned int)numa_node >= nr_node_ids ||
 816	     !node_online(numa_node)))
 817		return -EINVAL;
 818
 819	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
 820	map = find_and_alloc_map(attr);
 821	if (IS_ERR(map))
 822		return PTR_ERR(map);
 823
 824	err = bpf_obj_name_cpy(map->name, attr->map_name,
 825			       sizeof(attr->map_name));
 826	if (err < 0)
 827		goto free_map;
 828
 829	atomic64_set(&map->refcnt, 1);
 830	atomic64_set(&map->usercnt, 1);
 831	mutex_init(&map->freeze_mutex);
 832
 833	map->spin_lock_off = -EINVAL;
 834	if (attr->btf_key_type_id || attr->btf_value_type_id ||
 835	    /* Even the map's value is a kernel's struct,
 836	     * the bpf_prog.o must have BTF to begin with
 837	     * to figure out the corresponding kernel's
 838	     * counter part.  Thus, attr->btf_fd has
 839	     * to be valid also.
 840	     */
 841	    attr->btf_vmlinux_value_type_id) {
 842		struct btf *btf;
 843
 
 
 
 
 
 844		btf = btf_get_by_fd(attr->btf_fd);
 845		if (IS_ERR(btf)) {
 846			err = PTR_ERR(btf);
 847			goto free_map;
 848		}
 849		map->btf = btf;
 850
 851		if (attr->btf_value_type_id) {
 852			err = map_check_btf(map, btf, attr->btf_key_type_id,
 853					    attr->btf_value_type_id);
 854			if (err)
 855				goto free_map;
 856		}
 857
 
 858		map->btf_key_type_id = attr->btf_key_type_id;
 859		map->btf_value_type_id = attr->btf_value_type_id;
 860		map->btf_vmlinux_value_type_id =
 861			attr->btf_vmlinux_value_type_id;
 862	}
 863
 864	err = security_bpf_map_alloc(map);
 865	if (err)
 866		goto free_map;
 867
 868	err = bpf_map_alloc_id(map);
 869	if (err)
 870		goto free_map_sec;
 871
 872	err = bpf_map_new_fd(map, f_flags);
 873	if (err < 0) {
 874		/* failed to allocate fd.
 875		 * bpf_map_put_with_uref() is needed because the above
 876		 * bpf_map_alloc_id() has published the map
 877		 * to the userspace and the userspace may
 878		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
 879		 */
 880		bpf_map_put_with_uref(map);
 881		return err;
 882	}
 883
 884	return err;
 885
 886free_map_sec:
 887	security_bpf_map_free(map);
 888free_map:
 889	btf_put(map->btf);
 890	bpf_map_charge_move(&mem, &map->memory);
 891	map->ops->map_free(map);
 892	bpf_map_charge_finish(&mem);
 893	return err;
 894}
 895
 896/* if error is returned, fd is released.
 897 * On success caller should complete fd access with matching fdput()
 898 */
 899struct bpf_map *__bpf_map_get(struct fd f)
 900{
 901	if (!f.file)
 902		return ERR_PTR(-EBADF);
 903	if (f.file->f_op != &bpf_map_fops) {
 904		fdput(f);
 905		return ERR_PTR(-EINVAL);
 906	}
 907
 908	return f.file->private_data;
 909}
 910
 911void bpf_map_inc(struct bpf_map *map)
 912{
 913	atomic64_inc(&map->refcnt);
 914}
 915EXPORT_SYMBOL_GPL(bpf_map_inc);
 916
 917void bpf_map_inc_with_uref(struct bpf_map *map)
 918{
 919	atomic64_inc(&map->refcnt);
 920	atomic64_inc(&map->usercnt);
 921}
 922EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
 923
 924struct bpf_map *bpf_map_get(u32 ufd)
 925{
 926	struct fd f = fdget(ufd);
 927	struct bpf_map *map;
 928
 929	map = __bpf_map_get(f);
 930	if (IS_ERR(map))
 931		return map;
 932
 933	bpf_map_inc(map);
 934	fdput(f);
 935
 936	return map;
 937}
 
 938
 939struct bpf_map *bpf_map_get_with_uref(u32 ufd)
 940{
 941	struct fd f = fdget(ufd);
 942	struct bpf_map *map;
 943
 944	map = __bpf_map_get(f);
 945	if (IS_ERR(map))
 946		return map;
 947
 948	bpf_map_inc_with_uref(map);
 949	fdput(f);
 950
 951	return map;
 952}
 953
 954/* map_idr_lock should have been held */
 955static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
 
 956{
 957	int refold;
 958
 959	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
 
 
 
 
 
 
 960	if (!refold)
 961		return ERR_PTR(-ENOENT);
 
 962	if (uref)
 963		atomic64_inc(&map->usercnt);
 964
 965	return map;
 966}
 967
 968struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
 969{
 970	spin_lock_bh(&map_idr_lock);
 971	map = __bpf_map_inc_not_zero(map, false);
 972	spin_unlock_bh(&map_idr_lock);
 973
 974	return map;
 975}
 976EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
 977
 978int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 979{
 980	return -ENOTSUPP;
 981}
 982
 983static void *__bpf_copy_key(void __user *ukey, u64 key_size)
 984{
 985	if (key_size)
 986		return memdup_user(ukey, key_size);
 987
 988	if (ukey)
 989		return ERR_PTR(-EINVAL);
 990
 991	return NULL;
 992}
 993
 994/* last field in 'union bpf_attr' used by this command */
 995#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
 996
 997static int map_lookup_elem(union bpf_attr *attr)
 998{
 999	void __user *ukey = u64_to_user_ptr(attr->key);
1000	void __user *uvalue = u64_to_user_ptr(attr->value);
1001	int ufd = attr->map_fd;
1002	struct bpf_map *map;
1003	void *key, *value;
1004	u32 value_size;
1005	struct fd f;
1006	int err;
1007
1008	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1009		return -EINVAL;
1010
1011	if (attr->flags & ~BPF_F_LOCK)
1012		return -EINVAL;
1013
1014	f = fdget(ufd);
1015	map = __bpf_map_get(f);
1016	if (IS_ERR(map))
1017		return PTR_ERR(map);
1018	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1019		err = -EPERM;
1020		goto err_put;
1021	}
1022
1023	if ((attr->flags & BPF_F_LOCK) &&
1024	    !map_value_has_spin_lock(map)) {
1025		err = -EINVAL;
1026		goto err_put;
1027	}
1028
1029	key = __bpf_copy_key(ukey, map->key_size);
1030	if (IS_ERR(key)) {
1031		err = PTR_ERR(key);
1032		goto err_put;
1033	}
1034
1035	value_size = bpf_map_value_size(map);
 
 
 
 
 
 
 
 
1036
1037	err = -ENOMEM;
1038	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1039	if (!value)
1040		goto free_key;
1041
1042	err = bpf_map_copy_value(map, key, value, attr->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1043	if (err)
1044		goto free_value;
1045
1046	err = -EFAULT;
1047	if (copy_to_user(uvalue, value, value_size) != 0)
1048		goto free_value;
1049
1050	err = 0;
1051
1052free_value:
1053	kfree(value);
1054free_key:
1055	kfree(key);
1056err_put:
1057	fdput(f);
1058	return err;
1059}
1060
 
 
 
 
 
 
 
 
 
 
1061
1062#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1063
1064static int map_update_elem(union bpf_attr *attr)
1065{
1066	void __user *ukey = u64_to_user_ptr(attr->key);
1067	void __user *uvalue = u64_to_user_ptr(attr->value);
1068	int ufd = attr->map_fd;
1069	struct bpf_map *map;
1070	void *key, *value;
1071	u32 value_size;
1072	struct fd f;
1073	int err;
1074
1075	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1076		return -EINVAL;
1077
1078	f = fdget(ufd);
1079	map = __bpf_map_get(f);
1080	if (IS_ERR(map))
1081		return PTR_ERR(map);
1082	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1083		err = -EPERM;
1084		goto err_put;
1085	}
1086
1087	if ((attr->flags & BPF_F_LOCK) &&
1088	    !map_value_has_spin_lock(map)) {
1089		err = -EINVAL;
1090		goto err_put;
1091	}
1092
1093	key = __bpf_copy_key(ukey, map->key_size);
1094	if (IS_ERR(key)) {
1095		err = PTR_ERR(key);
1096		goto err_put;
1097	}
1098
1099	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1100	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1101	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1102	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1103		value_size = round_up(map->value_size, 8) * num_possible_cpus();
1104	else
1105		value_size = map->value_size;
1106
1107	err = -ENOMEM;
1108	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1109	if (!value)
1110		goto free_key;
1111
1112	err = -EFAULT;
1113	if (copy_from_user(value, uvalue, value_size) != 0)
1114		goto free_value;
1115
1116	err = bpf_map_update_value(map, f, key, value, attr->flags);
 
 
 
 
 
 
 
 
 
1117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118free_value:
1119	kfree(value);
1120free_key:
1121	kfree(key);
1122err_put:
1123	fdput(f);
1124	return err;
1125}
1126
1127#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1128
1129static int map_delete_elem(union bpf_attr *attr)
1130{
1131	void __user *ukey = u64_to_user_ptr(attr->key);
1132	int ufd = attr->map_fd;
1133	struct bpf_map *map;
1134	struct fd f;
1135	void *key;
1136	int err;
1137
1138	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1139		return -EINVAL;
1140
1141	f = fdget(ufd);
1142	map = __bpf_map_get(f);
1143	if (IS_ERR(map))
1144		return PTR_ERR(map);
1145	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1146		err = -EPERM;
1147		goto err_put;
1148	}
1149
1150	key = __bpf_copy_key(ukey, map->key_size);
1151	if (IS_ERR(key)) {
1152		err = PTR_ERR(key);
1153		goto err_put;
1154	}
1155
1156	if (bpf_map_is_dev_bound(map)) {
1157		err = bpf_map_offload_delete_elem(map, key);
1158		goto out;
1159	} else if (IS_FD_PROG_ARRAY(map) ||
1160		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1161		/* These maps require sleepable context */
1162		err = map->ops->map_delete_elem(map, key);
1163		goto out;
1164	}
1165
1166	bpf_disable_instrumentation();
 
1167	rcu_read_lock();
1168	err = map->ops->map_delete_elem(map, key);
1169	rcu_read_unlock();
1170	bpf_enable_instrumentation();
 
1171	maybe_wait_bpf_programs(map);
1172out:
1173	kfree(key);
1174err_put:
1175	fdput(f);
1176	return err;
1177}
1178
1179/* last field in 'union bpf_attr' used by this command */
1180#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1181
1182static int map_get_next_key(union bpf_attr *attr)
1183{
1184	void __user *ukey = u64_to_user_ptr(attr->key);
1185	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1186	int ufd = attr->map_fd;
1187	struct bpf_map *map;
1188	void *key, *next_key;
1189	struct fd f;
1190	int err;
1191
1192	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1193		return -EINVAL;
1194
1195	f = fdget(ufd);
1196	map = __bpf_map_get(f);
1197	if (IS_ERR(map))
1198		return PTR_ERR(map);
1199	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1200		err = -EPERM;
1201		goto err_put;
1202	}
1203
1204	if (ukey) {
1205		key = __bpf_copy_key(ukey, map->key_size);
1206		if (IS_ERR(key)) {
1207			err = PTR_ERR(key);
1208			goto err_put;
1209		}
1210	} else {
1211		key = NULL;
1212	}
1213
1214	err = -ENOMEM;
1215	next_key = kmalloc(map->key_size, GFP_USER);
1216	if (!next_key)
1217		goto free_key;
1218
1219	if (bpf_map_is_dev_bound(map)) {
1220		err = bpf_map_offload_get_next_key(map, key, next_key);
1221		goto out;
1222	}
1223
1224	rcu_read_lock();
1225	err = map->ops->map_get_next_key(map, key, next_key);
1226	rcu_read_unlock();
1227out:
1228	if (err)
1229		goto free_next_key;
1230
1231	err = -EFAULT;
1232	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1233		goto free_next_key;
1234
1235	err = 0;
1236
1237free_next_key:
1238	kfree(next_key);
1239free_key:
1240	kfree(key);
1241err_put:
1242	fdput(f);
1243	return err;
1244}
1245
1246int generic_map_delete_batch(struct bpf_map *map,
1247			     const union bpf_attr *attr,
1248			     union bpf_attr __user *uattr)
1249{
1250	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1251	u32 cp, max_count;
1252	int err = 0;
1253	void *key;
1254
1255	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1256		return -EINVAL;
1257
1258	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1259	    !map_value_has_spin_lock(map)) {
1260		return -EINVAL;
1261	}
1262
1263	max_count = attr->batch.count;
1264	if (!max_count)
1265		return 0;
1266
1267	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1268	if (!key)
1269		return -ENOMEM;
1270
1271	for (cp = 0; cp < max_count; cp++) {
1272		err = -EFAULT;
1273		if (copy_from_user(key, keys + cp * map->key_size,
1274				   map->key_size))
1275			break;
1276
1277		if (bpf_map_is_dev_bound(map)) {
1278			err = bpf_map_offload_delete_elem(map, key);
1279			break;
1280		}
1281
1282		bpf_disable_instrumentation();
1283		rcu_read_lock();
1284		err = map->ops->map_delete_elem(map, key);
1285		rcu_read_unlock();
1286		bpf_enable_instrumentation();
1287		maybe_wait_bpf_programs(map);
1288		if (err)
1289			break;
1290	}
1291	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1292		err = -EFAULT;
1293
1294	kfree(key);
1295	return err;
1296}
1297
1298int generic_map_update_batch(struct bpf_map *map,
1299			     const union bpf_attr *attr,
1300			     union bpf_attr __user *uattr)
1301{
1302	void __user *values = u64_to_user_ptr(attr->batch.values);
1303	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1304	u32 value_size, cp, max_count;
1305	int ufd = attr->map_fd;
1306	void *key, *value;
1307	struct fd f;
1308	int err = 0;
1309
1310	f = fdget(ufd);
1311	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1312		return -EINVAL;
1313
1314	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1315	    !map_value_has_spin_lock(map)) {
1316		return -EINVAL;
1317	}
1318
1319	value_size = bpf_map_value_size(map);
1320
1321	max_count = attr->batch.count;
1322	if (!max_count)
1323		return 0;
1324
1325	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1326	if (!key)
1327		return -ENOMEM;
1328
1329	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1330	if (!value) {
1331		kfree(key);
1332		return -ENOMEM;
1333	}
1334
1335	for (cp = 0; cp < max_count; cp++) {
1336		err = -EFAULT;
1337		if (copy_from_user(key, keys + cp * map->key_size,
1338		    map->key_size) ||
1339		    copy_from_user(value, values + cp * value_size, value_size))
1340			break;
1341
1342		err = bpf_map_update_value(map, f, key, value,
1343					   attr->batch.elem_flags);
1344
1345		if (err)
1346			break;
1347	}
1348
1349	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1350		err = -EFAULT;
1351
1352	kfree(value);
1353	kfree(key);
1354	return err;
1355}
1356
1357#define MAP_LOOKUP_RETRIES 3
1358
1359int generic_map_lookup_batch(struct bpf_map *map,
1360				    const union bpf_attr *attr,
1361				    union bpf_attr __user *uattr)
1362{
1363	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1364	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1365	void __user *values = u64_to_user_ptr(attr->batch.values);
1366	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1367	void *buf, *buf_prevkey, *prev_key, *key, *value;
1368	int err, retry = MAP_LOOKUP_RETRIES;
1369	u32 value_size, cp, max_count;
1370
1371	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1372		return -EINVAL;
1373
1374	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1375	    !map_value_has_spin_lock(map))
1376		return -EINVAL;
1377
1378	value_size = bpf_map_value_size(map);
1379
1380	max_count = attr->batch.count;
1381	if (!max_count)
1382		return 0;
1383
1384	if (put_user(0, &uattr->batch.count))
1385		return -EFAULT;
1386
1387	buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1388	if (!buf_prevkey)
1389		return -ENOMEM;
1390
1391	buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1392	if (!buf) {
1393		kfree(buf_prevkey);
1394		return -ENOMEM;
1395	}
1396
1397	err = -EFAULT;
1398	prev_key = NULL;
1399	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1400		goto free_buf;
1401	key = buf;
1402	value = key + map->key_size;
1403	if (ubatch)
1404		prev_key = buf_prevkey;
1405
1406	for (cp = 0; cp < max_count;) {
1407		rcu_read_lock();
1408		err = map->ops->map_get_next_key(map, prev_key, key);
1409		rcu_read_unlock();
1410		if (err)
1411			break;
1412		err = bpf_map_copy_value(map, key, value,
1413					 attr->batch.elem_flags);
1414
1415		if (err == -ENOENT) {
1416			if (retry) {
1417				retry--;
1418				continue;
1419			}
1420			err = -EINTR;
1421			break;
1422		}
1423
1424		if (err)
1425			goto free_buf;
1426
1427		if (copy_to_user(keys + cp * map->key_size, key,
1428				 map->key_size)) {
1429			err = -EFAULT;
1430			goto free_buf;
1431		}
1432		if (copy_to_user(values + cp * value_size, value, value_size)) {
1433			err = -EFAULT;
1434			goto free_buf;
1435		}
1436
1437		if (!prev_key)
1438			prev_key = buf_prevkey;
1439
1440		swap(prev_key, key);
1441		retry = MAP_LOOKUP_RETRIES;
1442		cp++;
1443	}
1444
1445	if (err == -EFAULT)
1446		goto free_buf;
1447
1448	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1449		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1450		err = -EFAULT;
1451
1452free_buf:
1453	kfree(buf_prevkey);
1454	kfree(buf);
1455	return err;
1456}
1457
1458#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1459
1460static int map_lookup_and_delete_elem(union bpf_attr *attr)
1461{
1462	void __user *ukey = u64_to_user_ptr(attr->key);
1463	void __user *uvalue = u64_to_user_ptr(attr->value);
1464	int ufd = attr->map_fd;
1465	struct bpf_map *map;
1466	void *key, *value;
1467	u32 value_size;
1468	struct fd f;
1469	int err;
1470
1471	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1472		return -EINVAL;
1473
1474	f = fdget(ufd);
1475	map = __bpf_map_get(f);
1476	if (IS_ERR(map))
1477		return PTR_ERR(map);
1478	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1479	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1480		err = -EPERM;
1481		goto err_put;
1482	}
1483
1484	key = __bpf_copy_key(ukey, map->key_size);
1485	if (IS_ERR(key)) {
1486		err = PTR_ERR(key);
1487		goto err_put;
1488	}
1489
1490	value_size = map->value_size;
1491
1492	err = -ENOMEM;
1493	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1494	if (!value)
1495		goto free_key;
1496
1497	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1498	    map->map_type == BPF_MAP_TYPE_STACK) {
1499		err = map->ops->map_pop_elem(map, value);
1500	} else {
1501		err = -ENOTSUPP;
1502	}
1503
1504	if (err)
1505		goto free_value;
1506
1507	if (copy_to_user(uvalue, value, value_size) != 0) {
1508		err = -EFAULT;
1509		goto free_value;
1510	}
1511
1512	err = 0;
1513
1514free_value:
1515	kfree(value);
1516free_key:
1517	kfree(key);
1518err_put:
1519	fdput(f);
1520	return err;
1521}
1522
1523#define BPF_MAP_FREEZE_LAST_FIELD map_fd
1524
1525static int map_freeze(const union bpf_attr *attr)
1526{
1527	int err = 0, ufd = attr->map_fd;
1528	struct bpf_map *map;
1529	struct fd f;
1530
1531	if (CHECK_ATTR(BPF_MAP_FREEZE))
1532		return -EINVAL;
1533
1534	f = fdget(ufd);
1535	map = __bpf_map_get(f);
1536	if (IS_ERR(map))
1537		return PTR_ERR(map);
1538
1539	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1540		fdput(f);
1541		return -ENOTSUPP;
1542	}
1543
1544	mutex_lock(&map->freeze_mutex);
1545
1546	if (map->writecnt) {
1547		err = -EBUSY;
1548		goto err_put;
1549	}
1550	if (READ_ONCE(map->frozen)) {
1551		err = -EBUSY;
1552		goto err_put;
1553	}
1554	if (!bpf_capable()) {
1555		err = -EPERM;
1556		goto err_put;
1557	}
1558
1559	WRITE_ONCE(map->frozen, true);
1560err_put:
1561	mutex_unlock(&map->freeze_mutex);
1562	fdput(f);
1563	return err;
1564}
1565
1566static const struct bpf_prog_ops * const bpf_prog_types[] = {
1567#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1568	[_id] = & _name ## _prog_ops,
1569#define BPF_MAP_TYPE(_id, _ops)
1570#define BPF_LINK_TYPE(_id, _name)
1571#include <linux/bpf_types.h>
1572#undef BPF_PROG_TYPE
1573#undef BPF_MAP_TYPE
1574#undef BPF_LINK_TYPE
1575};
1576
1577static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1578{
1579	const struct bpf_prog_ops *ops;
1580
1581	if (type >= ARRAY_SIZE(bpf_prog_types))
1582		return -EINVAL;
1583	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1584	ops = bpf_prog_types[type];
1585	if (!ops)
1586		return -EINVAL;
1587
1588	if (!bpf_prog_is_dev_bound(prog->aux))
1589		prog->aux->ops = ops;
1590	else
1591		prog->aux->ops = &bpf_offload_prog_ops;
1592	prog->type = type;
1593	return 0;
1594}
1595
1596enum bpf_audit {
1597	BPF_AUDIT_LOAD,
1598	BPF_AUDIT_UNLOAD,
1599	BPF_AUDIT_MAX,
1600};
1601
1602static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1603	[BPF_AUDIT_LOAD]   = "LOAD",
1604	[BPF_AUDIT_UNLOAD] = "UNLOAD",
1605};
 
 
1606
1607static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1608{
1609	struct audit_context *ctx = NULL;
1610	struct audit_buffer *ab;
1611
1612	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1613		return;
1614	if (audit_enabled == AUDIT_OFF)
1615		return;
1616	if (op == BPF_AUDIT_LOAD)
1617		ctx = audit_context();
1618	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1619	if (unlikely(!ab))
1620		return;
1621	audit_log_format(ab, "prog-id=%u op=%s",
1622			 prog->aux->id, bpf_audit_str[op]);
1623	audit_log_end(ab);
1624}
1625
1626int __bpf_prog_charge(struct user_struct *user, u32 pages)
1627{
1628	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1629	unsigned long user_bufs;
1630
1631	if (user) {
1632		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
1633		if (user_bufs > memlock_limit) {
1634			atomic_long_sub(pages, &user->locked_vm);
1635			return -EPERM;
1636		}
1637	}
1638
1639	return 0;
1640}
1641
1642void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1643{
1644	if (user)
1645		atomic_long_sub(pages, &user->locked_vm);
1646}
1647
1648static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1649{
1650	struct user_struct *user = get_current_user();
1651	int ret;
1652
1653	ret = __bpf_prog_charge(user, prog->pages);
1654	if (ret) {
1655		free_uid(user);
1656		return ret;
1657	}
1658
1659	prog->aux->user = user;
1660	return 0;
1661}
1662
1663static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1664{
1665	struct user_struct *user = prog->aux->user;
1666
1667	__bpf_prog_uncharge(user, prog->pages);
1668	free_uid(user);
1669}
1670
1671static int bpf_prog_alloc_id(struct bpf_prog *prog)
1672{
1673	int id;
1674
1675	idr_preload(GFP_KERNEL);
1676	spin_lock_bh(&prog_idr_lock);
1677	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1678	if (id > 0)
1679		prog->aux->id = id;
1680	spin_unlock_bh(&prog_idr_lock);
1681	idr_preload_end();
1682
1683	/* id is in [1, INT_MAX) */
1684	if (WARN_ON_ONCE(!id))
1685		return -ENOSPC;
1686
1687	return id > 0 ? 0 : id;
1688}
1689
1690void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1691{
1692	/* cBPF to eBPF migrations are currently not in the idr store.
1693	 * Offloaded programs are removed from the store when their device
1694	 * disappears - even if someone grabs an fd to them they are unusable,
1695	 * simply waiting for refcnt to drop to be freed.
1696	 */
1697	if (!prog->aux->id)
1698		return;
1699
1700	if (do_idr_lock)
1701		spin_lock_bh(&prog_idr_lock);
1702	else
1703		__acquire(&prog_idr_lock);
1704
1705	idr_remove(&prog_idr, prog->aux->id);
1706	prog->aux->id = 0;
1707
1708	if (do_idr_lock)
1709		spin_unlock_bh(&prog_idr_lock);
1710	else
1711		__release(&prog_idr_lock);
1712}
1713
1714static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1715{
1716	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1717
1718	kvfree(aux->func_info);
1719	kfree(aux->func_info_aux);
1720	bpf_prog_uncharge_memlock(aux->prog);
1721	security_bpf_prog_free(aux);
1722	bpf_prog_free(aux->prog);
1723}
1724
1725static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1726{
1727	bpf_prog_kallsyms_del_all(prog);
1728	btf_put(prog->aux->btf);
1729	bpf_prog_free_linfo(prog);
1730
1731	if (deferred)
1732		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1733	else
1734		__bpf_prog_put_rcu(&prog->aux->rcu);
1735}
1736
1737static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1738{
1739	if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1740		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1741		bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1742		/* bpf_prog_free_id() must be called first */
1743		bpf_prog_free_id(prog, do_idr_lock);
1744		__bpf_prog_put_noref(prog, true);
1745	}
1746}
1747
1748void bpf_prog_put(struct bpf_prog *prog)
1749{
1750	__bpf_prog_put(prog, true);
1751}
1752EXPORT_SYMBOL_GPL(bpf_prog_put);
1753
1754static int bpf_prog_release(struct inode *inode, struct file *filp)
1755{
1756	struct bpf_prog *prog = filp->private_data;
1757
1758	bpf_prog_put(prog);
1759	return 0;
1760}
1761
1762static void bpf_prog_get_stats(const struct bpf_prog *prog,
1763			       struct bpf_prog_stats *stats)
1764{
1765	u64 nsecs = 0, cnt = 0;
1766	int cpu;
1767
1768	for_each_possible_cpu(cpu) {
1769		const struct bpf_prog_stats *st;
1770		unsigned int start;
1771		u64 tnsecs, tcnt;
1772
1773		st = per_cpu_ptr(prog->aux->stats, cpu);
1774		do {
1775			start = u64_stats_fetch_begin_irq(&st->syncp);
1776			tnsecs = st->nsecs;
1777			tcnt = st->cnt;
1778		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
1779		nsecs += tnsecs;
1780		cnt += tcnt;
1781	}
1782	stats->nsecs = nsecs;
1783	stats->cnt = cnt;
1784}
1785
1786#ifdef CONFIG_PROC_FS
1787static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1788{
1789	const struct bpf_prog *prog = filp->private_data;
1790	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1791	struct bpf_prog_stats stats;
1792
1793	bpf_prog_get_stats(prog, &stats);
1794	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1795	seq_printf(m,
1796		   "prog_type:\t%u\n"
1797		   "prog_jited:\t%u\n"
1798		   "prog_tag:\t%s\n"
1799		   "memlock:\t%llu\n"
1800		   "prog_id:\t%u\n"
1801		   "run_time_ns:\t%llu\n"
1802		   "run_cnt:\t%llu\n",
1803		   prog->type,
1804		   prog->jited,
1805		   prog_tag,
1806		   prog->pages * 1ULL << PAGE_SHIFT,
1807		   prog->aux->id,
1808		   stats.nsecs,
1809		   stats.cnt);
1810}
1811#endif
1812
1813const struct file_operations bpf_prog_fops = {
1814#ifdef CONFIG_PROC_FS
1815	.show_fdinfo	= bpf_prog_show_fdinfo,
1816#endif
1817	.release	= bpf_prog_release,
1818	.read		= bpf_dummy_read,
1819	.write		= bpf_dummy_write,
1820};
1821
1822int bpf_prog_new_fd(struct bpf_prog *prog)
1823{
1824	int ret;
1825
1826	ret = security_bpf_prog(prog);
1827	if (ret < 0)
1828		return ret;
1829
1830	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1831				O_RDWR | O_CLOEXEC);
1832}
1833
1834static struct bpf_prog *____bpf_prog_get(struct fd f)
1835{
1836	if (!f.file)
1837		return ERR_PTR(-EBADF);
1838	if (f.file->f_op != &bpf_prog_fops) {
1839		fdput(f);
1840		return ERR_PTR(-EINVAL);
1841	}
1842
1843	return f.file->private_data;
1844}
1845
1846void bpf_prog_add(struct bpf_prog *prog, int i)
1847{
1848	atomic64_add(i, &prog->aux->refcnt);
 
 
 
 
1849}
1850EXPORT_SYMBOL_GPL(bpf_prog_add);
1851
1852void bpf_prog_sub(struct bpf_prog *prog, int i)
1853{
1854	/* Only to be used for undoing previous bpf_prog_add() in some
1855	 * error path. We still know that another entity in our call
1856	 * path holds a reference to the program, thus atomic_sub() can
1857	 * be safely used in such cases!
1858	 */
1859	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1860}
1861EXPORT_SYMBOL_GPL(bpf_prog_sub);
1862
1863void bpf_prog_inc(struct bpf_prog *prog)
1864{
1865	atomic64_inc(&prog->aux->refcnt);
1866}
1867EXPORT_SYMBOL_GPL(bpf_prog_inc);
1868
1869/* prog_idr_lock should have been held */
1870struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1871{
1872	int refold;
1873
1874	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
 
 
 
 
 
1875
1876	if (!refold)
1877		return ERR_PTR(-ENOENT);
1878
1879	return prog;
1880}
1881EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1882
1883bool bpf_prog_get_ok(struct bpf_prog *prog,
1884			    enum bpf_prog_type *attach_type, bool attach_drv)
1885{
1886	/* not an attachment, just a refcount inc, always allow */
1887	if (!attach_type)
1888		return true;
1889
1890	if (prog->type != *attach_type)
1891		return false;
1892	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1893		return false;
1894
1895	return true;
1896}
1897
1898static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1899				       bool attach_drv)
1900{
1901	struct fd f = fdget(ufd);
1902	struct bpf_prog *prog;
1903
1904	prog = ____bpf_prog_get(f);
1905	if (IS_ERR(prog))
1906		return prog;
1907	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1908		prog = ERR_PTR(-EINVAL);
1909		goto out;
1910	}
1911
1912	bpf_prog_inc(prog);
1913out:
1914	fdput(f);
1915	return prog;
1916}
1917
1918struct bpf_prog *bpf_prog_get(u32 ufd)
1919{
1920	return __bpf_prog_get(ufd, NULL, false);
1921}
1922
1923struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1924				       bool attach_drv)
1925{
1926	return __bpf_prog_get(ufd, &type, attach_drv);
1927}
1928EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1929
1930/* Initially all BPF programs could be loaded w/o specifying
1931 * expected_attach_type. Later for some of them specifying expected_attach_type
1932 * at load time became required so that program could be validated properly.
1933 * Programs of types that are allowed to be loaded both w/ and w/o (for
1934 * backward compatibility) expected_attach_type, should have the default attach
1935 * type assigned to expected_attach_type for the latter case, so that it can be
1936 * validated later at attach time.
1937 *
1938 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1939 * prog type requires it but has some attach types that have to be backward
1940 * compatible.
1941 */
1942static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1943{
1944	switch (attr->prog_type) {
1945	case BPF_PROG_TYPE_CGROUP_SOCK:
1946		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1947		 * exist so checking for non-zero is the way to go here.
1948		 */
1949		if (!attr->expected_attach_type)
1950			attr->expected_attach_type =
1951				BPF_CGROUP_INET_SOCK_CREATE;
1952		break;
1953	}
1954}
1955
1956static int
1957bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1958			   enum bpf_attach_type expected_attach_type,
1959			   u32 btf_id, u32 prog_fd)
1960{
1961	if (btf_id) {
1962		if (btf_id > BTF_MAX_TYPE)
1963			return -EINVAL;
1964
1965		switch (prog_type) {
1966		case BPF_PROG_TYPE_TRACING:
1967		case BPF_PROG_TYPE_LSM:
1968		case BPF_PROG_TYPE_STRUCT_OPS:
1969		case BPF_PROG_TYPE_EXT:
1970			break;
1971		default:
1972			return -EINVAL;
1973		}
1974	}
1975
1976	if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING &&
1977	    prog_type != BPF_PROG_TYPE_EXT)
1978		return -EINVAL;
1979
1980	switch (prog_type) {
1981	case BPF_PROG_TYPE_CGROUP_SOCK:
1982		switch (expected_attach_type) {
1983		case BPF_CGROUP_INET_SOCK_CREATE:
1984		case BPF_CGROUP_INET_SOCK_RELEASE:
1985		case BPF_CGROUP_INET4_POST_BIND:
1986		case BPF_CGROUP_INET6_POST_BIND:
1987			return 0;
1988		default:
1989			return -EINVAL;
1990		}
1991	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1992		switch (expected_attach_type) {
1993		case BPF_CGROUP_INET4_BIND:
1994		case BPF_CGROUP_INET6_BIND:
1995		case BPF_CGROUP_INET4_CONNECT:
1996		case BPF_CGROUP_INET6_CONNECT:
1997		case BPF_CGROUP_INET4_GETPEERNAME:
1998		case BPF_CGROUP_INET6_GETPEERNAME:
1999		case BPF_CGROUP_INET4_GETSOCKNAME:
2000		case BPF_CGROUP_INET6_GETSOCKNAME:
2001		case BPF_CGROUP_UDP4_SENDMSG:
2002		case BPF_CGROUP_UDP6_SENDMSG:
2003		case BPF_CGROUP_UDP4_RECVMSG:
2004		case BPF_CGROUP_UDP6_RECVMSG:
2005			return 0;
2006		default:
2007			return -EINVAL;
2008		}
2009	case BPF_PROG_TYPE_CGROUP_SKB:
2010		switch (expected_attach_type) {
2011		case BPF_CGROUP_INET_INGRESS:
2012		case BPF_CGROUP_INET_EGRESS:
2013			return 0;
2014		default:
2015			return -EINVAL;
2016		}
2017	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2018		switch (expected_attach_type) {
2019		case BPF_CGROUP_SETSOCKOPT:
2020		case BPF_CGROUP_GETSOCKOPT:
2021			return 0;
2022		default:
2023			return -EINVAL;
2024		}
2025	case BPF_PROG_TYPE_SK_LOOKUP:
2026		if (expected_attach_type == BPF_SK_LOOKUP)
2027			return 0;
2028		return -EINVAL;
2029	case BPF_PROG_TYPE_EXT:
2030		if (expected_attach_type)
2031			return -EINVAL;
2032		fallthrough;
2033	default:
2034		return 0;
2035	}
2036}
2037
2038static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2039{
2040	switch (prog_type) {
2041	case BPF_PROG_TYPE_SCHED_CLS:
2042	case BPF_PROG_TYPE_SCHED_ACT:
2043	case BPF_PROG_TYPE_XDP:
2044	case BPF_PROG_TYPE_LWT_IN:
2045	case BPF_PROG_TYPE_LWT_OUT:
2046	case BPF_PROG_TYPE_LWT_XMIT:
2047	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2048	case BPF_PROG_TYPE_SK_SKB:
2049	case BPF_PROG_TYPE_SK_MSG:
2050	case BPF_PROG_TYPE_LIRC_MODE2:
2051	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2052	case BPF_PROG_TYPE_CGROUP_DEVICE:
2053	case BPF_PROG_TYPE_CGROUP_SOCK:
2054	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2055	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2056	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2057	case BPF_PROG_TYPE_SOCK_OPS:
2058	case BPF_PROG_TYPE_EXT: /* extends any prog */
2059		return true;
2060	case BPF_PROG_TYPE_CGROUP_SKB:
2061		/* always unpriv */
2062	case BPF_PROG_TYPE_SK_REUSEPORT:
2063		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2064	default:
2065		return false;
2066	}
2067}
2068
2069static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2070{
2071	switch (prog_type) {
2072	case BPF_PROG_TYPE_KPROBE:
2073	case BPF_PROG_TYPE_TRACEPOINT:
2074	case BPF_PROG_TYPE_PERF_EVENT:
2075	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2076	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2077	case BPF_PROG_TYPE_TRACING:
2078	case BPF_PROG_TYPE_LSM:
2079	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2080	case BPF_PROG_TYPE_EXT: /* extends any prog */
2081		return true;
2082	default:
2083		return false;
2084	}
2085}
2086
2087/* last field in 'union bpf_attr' used by this command */
2088#define	BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2089
2090static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2091{
2092	enum bpf_prog_type type = attr->prog_type;
2093	struct bpf_prog *prog;
2094	int err;
2095	char license[128];
2096	bool is_gpl;
2097
2098	if (CHECK_ATTR(BPF_PROG_LOAD))
2099		return -EINVAL;
2100
2101	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2102				 BPF_F_ANY_ALIGNMENT |
2103				 BPF_F_TEST_STATE_FREQ |
2104				 BPF_F_TEST_RND_HI32))
2105		return -EINVAL;
2106
2107	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2108	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2109	    !bpf_capable())
2110		return -EPERM;
2111
2112	/* copy eBPF program license from user space */
2113	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2114			      sizeof(license) - 1) < 0)
2115		return -EFAULT;
2116	license[sizeof(license) - 1] = 0;
2117
2118	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2119	is_gpl = license_is_gpl_compatible(license);
2120
2121	if (attr->insn_cnt == 0 ||
2122	    attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2123		return -E2BIG;
2124	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2125	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2126	    !bpf_capable())
2127		return -EPERM;
2128
2129	if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2130		return -EPERM;
2131	if (is_perfmon_prog_type(type) && !perfmon_capable())
2132		return -EPERM;
2133
2134	bpf_prog_load_fixup_attach_type(attr);
2135	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2136				       attr->attach_btf_id,
2137				       attr->attach_prog_fd))
2138		return -EINVAL;
2139
2140	/* plain bpf_prog allocation */
2141	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2142	if (!prog)
2143		return -ENOMEM;
2144
2145	prog->expected_attach_type = attr->expected_attach_type;
2146	prog->aux->attach_btf_id = attr->attach_btf_id;
2147	if (attr->attach_prog_fd) {
2148		struct bpf_prog *tgt_prog;
2149
2150		tgt_prog = bpf_prog_get(attr->attach_prog_fd);
2151		if (IS_ERR(tgt_prog)) {
2152			err = PTR_ERR(tgt_prog);
2153			goto free_prog_nouncharge;
2154		}
2155		prog->aux->linked_prog = tgt_prog;
2156	}
2157
2158	prog->aux->offload_requested = !!attr->prog_ifindex;
2159
2160	err = security_bpf_prog_alloc(prog->aux);
2161	if (err)
2162		goto free_prog_nouncharge;
2163
2164	err = bpf_prog_charge_memlock(prog);
2165	if (err)
2166		goto free_prog_sec;
2167
2168	prog->len = attr->insn_cnt;
2169
2170	err = -EFAULT;
2171	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2172			   bpf_prog_insn_size(prog)) != 0)
2173		goto free_prog;
2174
2175	prog->orig_prog = NULL;
2176	prog->jited = 0;
2177
2178	atomic64_set(&prog->aux->refcnt, 1);
2179	prog->gpl_compatible = is_gpl ? 1 : 0;
2180
2181	if (bpf_prog_is_dev_bound(prog->aux)) {
2182		err = bpf_prog_offload_init(prog, attr);
2183		if (err)
2184			goto free_prog;
2185	}
2186
2187	/* find program type: socket_filter vs tracing_filter */
2188	err = find_prog_type(type, prog);
2189	if (err < 0)
2190		goto free_prog;
2191
2192	prog->aux->load_time = ktime_get_boottime_ns();
2193	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2194			       sizeof(attr->prog_name));
2195	if (err < 0)
2196		goto free_prog;
2197
2198	/* run eBPF verifier */
2199	err = bpf_check(&prog, attr, uattr);
2200	if (err < 0)
2201		goto free_used_maps;
2202
2203	prog = bpf_prog_select_runtime(prog, &err);
2204	if (err < 0)
2205		goto free_used_maps;
2206
2207	err = bpf_prog_alloc_id(prog);
2208	if (err)
2209		goto free_used_maps;
2210
2211	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2212	 * effectively publicly exposed. However, retrieving via
2213	 * bpf_prog_get_fd_by_id() will take another reference,
2214	 * therefore it cannot be gone underneath us.
2215	 *
2216	 * Only for the time /after/ successful bpf_prog_new_fd()
2217	 * and before returning to userspace, we might just hold
2218	 * one reference and any parallel close on that fd could
2219	 * rip everything out. Hence, below notifications must
2220	 * happen before bpf_prog_new_fd().
2221	 *
2222	 * Also, any failure handling from this point onwards must
2223	 * be using bpf_prog_put() given the program is exposed.
2224	 */
2225	bpf_prog_kallsyms_add(prog);
2226	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2227	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2228
2229	err = bpf_prog_new_fd(prog);
2230	if (err < 0)
2231		bpf_prog_put(prog);
2232	return err;
2233
2234free_used_maps:
2235	/* In case we have subprogs, we need to wait for a grace
2236	 * period before we can tear down JIT memory since symbols
2237	 * are already exposed under kallsyms.
2238	 */
2239	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
2240	return err;
2241free_prog:
2242	bpf_prog_uncharge_memlock(prog);
2243free_prog_sec:
2244	security_bpf_prog_free(prog->aux);
2245free_prog_nouncharge:
2246	bpf_prog_free(prog);
2247	return err;
2248}
2249
2250#define BPF_OBJ_LAST_FIELD file_flags
2251
2252static int bpf_obj_pin(const union bpf_attr *attr)
2253{
2254	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2255		return -EINVAL;
2256
2257	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2258}
2259
2260static int bpf_obj_get(const union bpf_attr *attr)
2261{
2262	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2263	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2264		return -EINVAL;
2265
2266	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2267				attr->file_flags);
2268}
2269
2270void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2271		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2272{
2273	atomic64_set(&link->refcnt, 1);
2274	link->type = type;
2275	link->id = 0;
2276	link->ops = ops;
2277	link->prog = prog;
2278}
2279
2280static void bpf_link_free_id(int id)
2281{
2282	if (!id)
2283		return;
2284
2285	spin_lock_bh(&link_idr_lock);
2286	idr_remove(&link_idr, id);
2287	spin_unlock_bh(&link_idr_lock);
2288}
2289
2290/* Clean up bpf_link and corresponding anon_inode file and FD. After
2291 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2292 * anon_inode's release() call. This helper marksbpf_link as
2293 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2294 * is not decremented, it's the responsibility of a calling code that failed
2295 * to complete bpf_link initialization.
2296 */
2297void bpf_link_cleanup(struct bpf_link_primer *primer)
2298{
2299	primer->link->prog = NULL;
2300	bpf_link_free_id(primer->id);
2301	fput(primer->file);
2302	put_unused_fd(primer->fd);
2303}
2304
2305void bpf_link_inc(struct bpf_link *link)
2306{
2307	atomic64_inc(&link->refcnt);
2308}
2309
2310/* bpf_link_free is guaranteed to be called from process context */
2311static void bpf_link_free(struct bpf_link *link)
2312{
2313	bpf_link_free_id(link->id);
2314	if (link->prog) {
2315		/* detach BPF program, clean up used resources */
2316		link->ops->release(link);
2317		bpf_prog_put(link->prog);
2318	}
2319	/* free bpf_link and its containing memory */
2320	link->ops->dealloc(link);
2321}
2322
2323static void bpf_link_put_deferred(struct work_struct *work)
2324{
2325	struct bpf_link *link = container_of(work, struct bpf_link, work);
2326
2327	bpf_link_free(link);
2328}
2329
2330/* bpf_link_put can be called from atomic context, but ensures that resources
2331 * are freed from process context
2332 */
2333void bpf_link_put(struct bpf_link *link)
2334{
2335	if (!atomic64_dec_and_test(&link->refcnt))
2336		return;
2337
2338	if (in_atomic()) {
2339		INIT_WORK(&link->work, bpf_link_put_deferred);
2340		schedule_work(&link->work);
2341	} else {
2342		bpf_link_free(link);
2343	}
2344}
2345
2346static int bpf_link_release(struct inode *inode, struct file *filp)
2347{
2348	struct bpf_link *link = filp->private_data;
2349
2350	bpf_link_put(link);
2351	return 0;
2352}
2353
2354#ifdef CONFIG_PROC_FS
2355#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2356#define BPF_MAP_TYPE(_id, _ops)
2357#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2358static const char *bpf_link_type_strs[] = {
2359	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2360#include <linux/bpf_types.h>
2361};
2362#undef BPF_PROG_TYPE
2363#undef BPF_MAP_TYPE
2364#undef BPF_LINK_TYPE
2365
2366static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2367{
2368	const struct bpf_link *link = filp->private_data;
2369	const struct bpf_prog *prog = link->prog;
2370	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2371
2372	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2373	seq_printf(m,
2374		   "link_type:\t%s\n"
2375		   "link_id:\t%u\n"
2376		   "prog_tag:\t%s\n"
2377		   "prog_id:\t%u\n",
2378		   bpf_link_type_strs[link->type],
2379		   link->id,
2380		   prog_tag,
2381		   prog->aux->id);
2382	if (link->ops->show_fdinfo)
2383		link->ops->show_fdinfo(link, m);
2384}
2385#endif
2386
2387static const struct file_operations bpf_link_fops = {
2388#ifdef CONFIG_PROC_FS
2389	.show_fdinfo	= bpf_link_show_fdinfo,
2390#endif
2391	.release	= bpf_link_release,
2392	.read		= bpf_dummy_read,
2393	.write		= bpf_dummy_write,
2394};
2395
2396static int bpf_link_alloc_id(struct bpf_link *link)
2397{
2398	int id;
2399
2400	idr_preload(GFP_KERNEL);
2401	spin_lock_bh(&link_idr_lock);
2402	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2403	spin_unlock_bh(&link_idr_lock);
2404	idr_preload_end();
2405
2406	return id;
2407}
2408
2409/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2410 * reserving unused FD and allocating ID from link_idr. This is to be paired
2411 * with bpf_link_settle() to install FD and ID and expose bpf_link to
2412 * user-space, if bpf_link is successfully attached. If not, bpf_link and
2413 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2414 * transient state is passed around in struct bpf_link_primer.
2415 * This is preferred way to create and initialize bpf_link, especially when
2416 * there are complicated and expensive operations inbetween creating bpf_link
2417 * itself and attaching it to BPF hook. By using bpf_link_prime() and
2418 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2419 * expensive (and potentially failing) roll back operations in a rare case
2420 * that file, FD, or ID can't be allocated.
2421 */
2422int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2423{
2424	struct file *file;
2425	int fd, id;
 
 
 
2426
2427	fd = get_unused_fd_flags(O_CLOEXEC);
2428	if (fd < 0)
2429		return fd;
 
2430
 
 
 
2431
2432	id = bpf_link_alloc_id(link);
2433	if (id < 0) {
2434		put_unused_fd(fd);
2435		return id;
2436	}
 
2437
2438	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2439	if (IS_ERR(file)) {
2440		bpf_link_free_id(id);
2441		put_unused_fd(fd);
2442		return PTR_ERR(file);
2443	}
2444
2445	primer->link = link;
2446	primer->file = file;
2447	primer->fd = fd;
2448	primer->id = id;
2449	return 0;
2450}
2451
2452int bpf_link_settle(struct bpf_link_primer *primer)
2453{
2454	/* make bpf_link fetchable by ID */
2455	spin_lock_bh(&link_idr_lock);
2456	primer->link->id = primer->id;
2457	spin_unlock_bh(&link_idr_lock);
2458	/* make bpf_link fetchable by FD */
2459	fd_install(primer->fd, primer->file);
2460	/* pass through installed FD */
2461	return primer->fd;
2462}
2463
2464int bpf_link_new_fd(struct bpf_link *link)
2465{
2466	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2467}
2468
2469struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2470{
2471	struct fd f = fdget(ufd);
2472	struct bpf_link *link;
2473
2474	if (!f.file)
2475		return ERR_PTR(-EBADF);
2476	if (f.file->f_op != &bpf_link_fops) {
2477		fdput(f);
2478		return ERR_PTR(-EINVAL);
2479	}
2480
2481	link = f.file->private_data;
2482	bpf_link_inc(link);
2483	fdput(f);
2484
2485	return link;
2486}
2487
2488struct bpf_tracing_link {
2489	struct bpf_link link;
2490	enum bpf_attach_type attach_type;
2491};
2492
2493static void bpf_tracing_link_release(struct bpf_link *link)
2494{
2495	WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog));
2496}
2497
2498static void bpf_tracing_link_dealloc(struct bpf_link *link)
2499{
2500	struct bpf_tracing_link *tr_link =
2501		container_of(link, struct bpf_tracing_link, link);
2502
2503	kfree(tr_link);
2504}
2505
2506static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2507					 struct seq_file *seq)
2508{
2509	struct bpf_tracing_link *tr_link =
2510		container_of(link, struct bpf_tracing_link, link);
2511
2512	seq_printf(seq,
2513		   "attach_type:\t%d\n",
2514		   tr_link->attach_type);
2515}
2516
2517static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2518					   struct bpf_link_info *info)
2519{
2520	struct bpf_tracing_link *tr_link =
2521		container_of(link, struct bpf_tracing_link, link);
2522
2523	info->tracing.attach_type = tr_link->attach_type;
2524
2525	return 0;
2526}
2527
2528static const struct bpf_link_ops bpf_tracing_link_lops = {
2529	.release = bpf_tracing_link_release,
2530	.dealloc = bpf_tracing_link_dealloc,
2531	.show_fdinfo = bpf_tracing_link_show_fdinfo,
2532	.fill_link_info = bpf_tracing_link_fill_link_info,
2533};
2534
2535static int bpf_tracing_prog_attach(struct bpf_prog *prog)
2536{
2537	struct bpf_link_primer link_primer;
2538	struct bpf_tracing_link *link;
2539	int err;
2540
2541	switch (prog->type) {
2542	case BPF_PROG_TYPE_TRACING:
2543		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2544		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
2545		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
2546			err = -EINVAL;
2547			goto out_put_prog;
2548		}
2549		break;
2550	case BPF_PROG_TYPE_EXT:
2551		if (prog->expected_attach_type != 0) {
2552			err = -EINVAL;
2553			goto out_put_prog;
2554		}
2555		break;
2556	case BPF_PROG_TYPE_LSM:
2557		if (prog->expected_attach_type != BPF_LSM_MAC) {
2558			err = -EINVAL;
2559			goto out_put_prog;
2560		}
2561		break;
2562	default:
2563		err = -EINVAL;
2564		goto out_put_prog;
2565	}
2566
2567	link = kzalloc(sizeof(*link), GFP_USER);
2568	if (!link) {
2569		err = -ENOMEM;
2570		goto out_put_prog;
2571	}
2572	bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2573		      &bpf_tracing_link_lops, prog);
2574	link->attach_type = prog->expected_attach_type;
2575
2576	err = bpf_link_prime(&link->link, &link_primer);
2577	if (err) {
2578		kfree(link);
2579		goto out_put_prog;
2580	}
2581
2582	err = bpf_trampoline_link_prog(prog);
2583	if (err) {
2584		bpf_link_cleanup(&link_primer);
 
 
 
2585		goto out_put_prog;
2586	}
 
2587
2588	return bpf_link_settle(&link_primer);
2589out_put_prog:
2590	bpf_prog_put(prog);
2591	return err;
2592}
2593
2594struct bpf_raw_tp_link {
2595	struct bpf_link link;
2596	struct bpf_raw_event_map *btp;
2597};
2598
2599static void bpf_raw_tp_link_release(struct bpf_link *link)
2600{
2601	struct bpf_raw_tp_link *raw_tp =
2602		container_of(link, struct bpf_raw_tp_link, link);
2603
2604	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2605	bpf_put_raw_tracepoint(raw_tp->btp);
2606}
2607
2608static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2609{
2610	struct bpf_raw_tp_link *raw_tp =
2611		container_of(link, struct bpf_raw_tp_link, link);
2612
2613	kfree(raw_tp);
2614}
2615
2616static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2617					struct seq_file *seq)
2618{
2619	struct bpf_raw_tp_link *raw_tp_link =
2620		container_of(link, struct bpf_raw_tp_link, link);
2621
2622	seq_printf(seq,
2623		   "tp_name:\t%s\n",
2624		   raw_tp_link->btp->tp->name);
2625}
2626
2627static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2628					  struct bpf_link_info *info)
2629{
2630	struct bpf_raw_tp_link *raw_tp_link =
2631		container_of(link, struct bpf_raw_tp_link, link);
2632	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2633	const char *tp_name = raw_tp_link->btp->tp->name;
2634	u32 ulen = info->raw_tracepoint.tp_name_len;
2635	size_t tp_len = strlen(tp_name);
2636
2637	if (!ulen ^ !ubuf)
2638		return -EINVAL;
2639
2640	info->raw_tracepoint.tp_name_len = tp_len + 1;
2641
2642	if (!ubuf)
2643		return 0;
2644
2645	if (ulen >= tp_len + 1) {
2646		if (copy_to_user(ubuf, tp_name, tp_len + 1))
2647			return -EFAULT;
2648	} else {
2649		char zero = '\0';
2650
2651		if (copy_to_user(ubuf, tp_name, ulen - 1))
2652			return -EFAULT;
2653		if (put_user(zero, ubuf + ulen - 1))
2654			return -EFAULT;
2655		return -ENOSPC;
2656	}
2657
2658	return 0;
2659}
2660
2661static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2662	.release = bpf_raw_tp_link_release,
2663	.dealloc = bpf_raw_tp_link_dealloc,
2664	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2665	.fill_link_info = bpf_raw_tp_link_fill_link_info,
2666};
2667
2668#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2669
2670static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2671{
2672	struct bpf_link_primer link_primer;
2673	struct bpf_raw_tp_link *link;
2674	struct bpf_raw_event_map *btp;
2675	struct bpf_prog *prog;
2676	const char *tp_name;
2677	char buf[128];
2678	int err;
2679
2680	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2681		return -EINVAL;
2682
2683	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2684	if (IS_ERR(prog))
2685		return PTR_ERR(prog);
2686
2687	switch (prog->type) {
2688	case BPF_PROG_TYPE_TRACING:
2689	case BPF_PROG_TYPE_EXT:
2690	case BPF_PROG_TYPE_LSM:
2691		if (attr->raw_tracepoint.name) {
2692			/* The attach point for this category of programs
2693			 * should be specified via btf_id during program load.
2694			 */
2695			err = -EINVAL;
2696			goto out_put_prog;
2697		}
2698		if (prog->type == BPF_PROG_TYPE_TRACING &&
2699		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2700			tp_name = prog->aux->attach_func_name;
2701			break;
2702		}
2703		return bpf_tracing_prog_attach(prog);
2704	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2705	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2706		if (strncpy_from_user(buf,
2707				      u64_to_user_ptr(attr->raw_tracepoint.name),
2708				      sizeof(buf) - 1) < 0) {
2709			err = -EFAULT;
2710			goto out_put_prog;
2711		}
2712		buf[sizeof(buf) - 1] = 0;
2713		tp_name = buf;
2714		break;
2715	default:
2716		err = -EINVAL;
2717		goto out_put_prog;
2718	}
2719
2720	btp = bpf_get_raw_tracepoint(tp_name);
2721	if (!btp) {
2722		err = -ENOENT;
2723		goto out_put_prog;
2724	}
2725
2726	link = kzalloc(sizeof(*link), GFP_USER);
2727	if (!link) {
2728		err = -ENOMEM;
2729		goto out_put_btp;
2730	}
2731	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2732		      &bpf_raw_tp_link_lops, prog);
2733	link->btp = btp;
2734
2735	err = bpf_link_prime(&link->link, &link_primer);
2736	if (err) {
2737		kfree(link);
2738		goto out_put_btp;
2739	}
2740
2741	err = bpf_probe_register(link->btp, prog);
2742	if (err) {
2743		bpf_link_cleanup(&link_primer);
2744		goto out_put_btp;
2745	}
2746
2747	return bpf_link_settle(&link_primer);
2748
2749out_put_btp:
2750	bpf_put_raw_tracepoint(btp);
2751out_put_prog:
2752	bpf_prog_put(prog);
2753	return err;
2754}
2755
2756static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2757					     enum bpf_attach_type attach_type)
2758{
2759	switch (prog->type) {
2760	case BPF_PROG_TYPE_CGROUP_SOCK:
2761	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2762	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2763	case BPF_PROG_TYPE_SK_LOOKUP:
2764		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2765	case BPF_PROG_TYPE_CGROUP_SKB:
2766		if (!capable(CAP_NET_ADMIN))
2767			/* cg-skb progs can be loaded by unpriv user.
2768			 * check permissions at attach time.
2769			 */
2770			return -EPERM;
2771		return prog->enforce_expected_attach_type &&
2772			prog->expected_attach_type != attach_type ?
2773			-EINVAL : 0;
2774	default:
2775		return 0;
2776	}
2777}
2778
2779static enum bpf_prog_type
2780attach_type_to_prog_type(enum bpf_attach_type attach_type)
 
 
 
 
2781{
2782	switch (attach_type) {
 
 
 
 
 
 
 
 
 
 
 
 
 
2783	case BPF_CGROUP_INET_INGRESS:
2784	case BPF_CGROUP_INET_EGRESS:
2785		return BPF_PROG_TYPE_CGROUP_SKB;
2786		break;
2787	case BPF_CGROUP_INET_SOCK_CREATE:
2788	case BPF_CGROUP_INET_SOCK_RELEASE:
2789	case BPF_CGROUP_INET4_POST_BIND:
2790	case BPF_CGROUP_INET6_POST_BIND:
2791		return BPF_PROG_TYPE_CGROUP_SOCK;
 
2792	case BPF_CGROUP_INET4_BIND:
2793	case BPF_CGROUP_INET6_BIND:
2794	case BPF_CGROUP_INET4_CONNECT:
2795	case BPF_CGROUP_INET6_CONNECT:
2796	case BPF_CGROUP_INET4_GETPEERNAME:
2797	case BPF_CGROUP_INET6_GETPEERNAME:
2798	case BPF_CGROUP_INET4_GETSOCKNAME:
2799	case BPF_CGROUP_INET6_GETSOCKNAME:
2800	case BPF_CGROUP_UDP4_SENDMSG:
2801	case BPF_CGROUP_UDP6_SENDMSG:
2802	case BPF_CGROUP_UDP4_RECVMSG:
2803	case BPF_CGROUP_UDP6_RECVMSG:
2804		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
 
2805	case BPF_CGROUP_SOCK_OPS:
2806		return BPF_PROG_TYPE_SOCK_OPS;
 
2807	case BPF_CGROUP_DEVICE:
2808		return BPF_PROG_TYPE_CGROUP_DEVICE;
 
2809	case BPF_SK_MSG_VERDICT:
2810		return BPF_PROG_TYPE_SK_MSG;
 
2811	case BPF_SK_SKB_STREAM_PARSER:
2812	case BPF_SK_SKB_STREAM_VERDICT:
2813		return BPF_PROG_TYPE_SK_SKB;
 
2814	case BPF_LIRC_MODE2:
2815		return BPF_PROG_TYPE_LIRC_MODE2;
 
2816	case BPF_FLOW_DISSECTOR:
2817		return BPF_PROG_TYPE_FLOW_DISSECTOR;
 
2818	case BPF_CGROUP_SYSCTL:
2819		return BPF_PROG_TYPE_CGROUP_SYSCTL;
 
2820	case BPF_CGROUP_GETSOCKOPT:
2821	case BPF_CGROUP_SETSOCKOPT:
2822		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2823	case BPF_TRACE_ITER:
2824		return BPF_PROG_TYPE_TRACING;
2825	case BPF_SK_LOOKUP:
2826		return BPF_PROG_TYPE_SK_LOOKUP;
2827	case BPF_XDP:
2828		return BPF_PROG_TYPE_XDP;
2829	default:
2830		return BPF_PROG_TYPE_UNSPEC;
2831	}
2832}
2833
2834#define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2835
2836#define BPF_F_ATTACH_MASK \
2837	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2838
2839static int bpf_prog_attach(const union bpf_attr *attr)
2840{
2841	enum bpf_prog_type ptype;
2842	struct bpf_prog *prog;
2843	int ret;
2844
2845	if (CHECK_ATTR(BPF_PROG_ATTACH))
2846		return -EINVAL;
2847
2848	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
2849		return -EINVAL;
2850
2851	ptype = attach_type_to_prog_type(attr->attach_type);
2852	if (ptype == BPF_PROG_TYPE_UNSPEC)
2853		return -EINVAL;
2854
2855	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
2856	if (IS_ERR(prog))
2857		return PTR_ERR(prog);
2858
2859	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
2860		bpf_prog_put(prog);
2861		return -EINVAL;
2862	}
2863
2864	switch (ptype) {
2865	case BPF_PROG_TYPE_SK_SKB:
2866	case BPF_PROG_TYPE_SK_MSG:
2867		ret = sock_map_get_from_fd(attr, prog);
2868		break;
2869	case BPF_PROG_TYPE_LIRC_MODE2:
2870		ret = lirc_prog_attach(attr, prog);
2871		break;
2872	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2873		ret = netns_bpf_prog_attach(attr, prog);
2874		break;
2875	case BPF_PROG_TYPE_CGROUP_DEVICE:
2876	case BPF_PROG_TYPE_CGROUP_SKB:
2877	case BPF_PROG_TYPE_CGROUP_SOCK:
2878	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2879	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2880	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2881	case BPF_PROG_TYPE_SOCK_OPS:
2882		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
2883		break;
2884	default:
2885		ret = -EINVAL;
2886	}
2887
2888	if (ret)
2889		bpf_prog_put(prog);
2890	return ret;
2891}
2892
2893#define BPF_PROG_DETACH_LAST_FIELD attach_type
2894
2895static int bpf_prog_detach(const union bpf_attr *attr)
2896{
2897	enum bpf_prog_type ptype;
2898
 
 
 
2899	if (CHECK_ATTR(BPF_PROG_DETACH))
2900		return -EINVAL;
2901
2902	ptype = attach_type_to_prog_type(attr->attach_type);
2903
2904	switch (ptype) {
2905	case BPF_PROG_TYPE_SK_MSG:
2906	case BPF_PROG_TYPE_SK_SKB:
2907		return sock_map_prog_detach(attr, ptype);
2908	case BPF_PROG_TYPE_LIRC_MODE2:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2909		return lirc_prog_detach(attr);
2910	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2911		return netns_bpf_prog_detach(attr, ptype);
2912	case BPF_PROG_TYPE_CGROUP_DEVICE:
2913	case BPF_PROG_TYPE_CGROUP_SKB:
2914	case BPF_PROG_TYPE_CGROUP_SOCK:
2915	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2916	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2917	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2918	case BPF_PROG_TYPE_SOCK_OPS:
2919		return cgroup_bpf_prog_detach(attr, ptype);
2920	default:
2921		return -EINVAL;
2922	}
 
 
2923}
2924
2925#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
2926
2927static int bpf_prog_query(const union bpf_attr *attr,
2928			  union bpf_attr __user *uattr)
2929{
2930	if (!capable(CAP_NET_ADMIN))
2931		return -EPERM;
2932	if (CHECK_ATTR(BPF_PROG_QUERY))
2933		return -EINVAL;
2934	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
2935		return -EINVAL;
2936
2937	switch (attr->query.attach_type) {
2938	case BPF_CGROUP_INET_INGRESS:
2939	case BPF_CGROUP_INET_EGRESS:
2940	case BPF_CGROUP_INET_SOCK_CREATE:
2941	case BPF_CGROUP_INET_SOCK_RELEASE:
2942	case BPF_CGROUP_INET4_BIND:
2943	case BPF_CGROUP_INET6_BIND:
2944	case BPF_CGROUP_INET4_POST_BIND:
2945	case BPF_CGROUP_INET6_POST_BIND:
2946	case BPF_CGROUP_INET4_CONNECT:
2947	case BPF_CGROUP_INET6_CONNECT:
2948	case BPF_CGROUP_INET4_GETPEERNAME:
2949	case BPF_CGROUP_INET6_GETPEERNAME:
2950	case BPF_CGROUP_INET4_GETSOCKNAME:
2951	case BPF_CGROUP_INET6_GETSOCKNAME:
2952	case BPF_CGROUP_UDP4_SENDMSG:
2953	case BPF_CGROUP_UDP6_SENDMSG:
2954	case BPF_CGROUP_UDP4_RECVMSG:
2955	case BPF_CGROUP_UDP6_RECVMSG:
2956	case BPF_CGROUP_SOCK_OPS:
2957	case BPF_CGROUP_DEVICE:
2958	case BPF_CGROUP_SYSCTL:
2959	case BPF_CGROUP_GETSOCKOPT:
2960	case BPF_CGROUP_SETSOCKOPT:
2961		return cgroup_bpf_prog_query(attr, uattr);
2962	case BPF_LIRC_MODE2:
2963		return lirc_prog_query(attr, uattr);
2964	case BPF_FLOW_DISSECTOR:
2965	case BPF_SK_LOOKUP:
2966		return netns_bpf_prog_query(attr, uattr);
2967	default:
2968		return -EINVAL;
2969	}
 
 
2970}
2971
2972#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
2973
2974static int bpf_prog_test_run(const union bpf_attr *attr,
2975			     union bpf_attr __user *uattr)
2976{
2977	struct bpf_prog *prog;
2978	int ret = -ENOTSUPP;
2979
 
 
2980	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
2981		return -EINVAL;
2982
2983	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
2984	    (!attr->test.ctx_size_in && attr->test.ctx_in))
2985		return -EINVAL;
2986
2987	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
2988	    (!attr->test.ctx_size_out && attr->test.ctx_out))
2989		return -EINVAL;
2990
2991	prog = bpf_prog_get(attr->test.prog_fd);
2992	if (IS_ERR(prog))
2993		return PTR_ERR(prog);
2994
2995	if (prog->aux->ops->test_run)
2996		ret = prog->aux->ops->test_run(prog, attr, uattr);
2997
2998	bpf_prog_put(prog);
2999	return ret;
3000}
3001
3002#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3003
3004static int bpf_obj_get_next_id(const union bpf_attr *attr,
3005			       union bpf_attr __user *uattr,
3006			       struct idr *idr,
3007			       spinlock_t *lock)
3008{
3009	u32 next_id = attr->start_id;
3010	int err = 0;
3011
3012	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3013		return -EINVAL;
3014
3015	if (!capable(CAP_SYS_ADMIN))
3016		return -EPERM;
3017
3018	next_id++;
3019	spin_lock_bh(lock);
3020	if (!idr_get_next(idr, &next_id))
3021		err = -ENOENT;
3022	spin_unlock_bh(lock);
3023
3024	if (!err)
3025		err = put_user(next_id, &uattr->next_id);
3026
3027	return err;
3028}
3029
3030struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3031{
3032	struct bpf_map *map;
3033
3034	spin_lock_bh(&map_idr_lock);
3035again:
3036	map = idr_get_next(&map_idr, id);
3037	if (map) {
3038		map = __bpf_map_inc_not_zero(map, false);
3039		if (IS_ERR(map)) {
3040			(*id)++;
3041			goto again;
3042		}
3043	}
3044	spin_unlock_bh(&map_idr_lock);
3045
3046	return map;
3047}
3048
3049struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3050{
3051	struct bpf_prog *prog;
 
 
3052
3053	spin_lock_bh(&prog_idr_lock);
3054again:
3055	prog = idr_get_next(&prog_idr, id);
3056	if (prog) {
3057		prog = bpf_prog_inc_not_zero(prog);
3058		if (IS_ERR(prog)) {
3059			(*id)++;
3060			goto again;
3061		}
3062	}
3063	spin_unlock_bh(&prog_idr_lock);
3064
3065	return prog;
3066}
3067
3068#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3069
3070struct bpf_prog *bpf_prog_by_id(u32 id)
3071{
3072	struct bpf_prog *prog;
3073
3074	if (!id)
3075		return ERR_PTR(-ENOENT);
3076
3077	spin_lock_bh(&prog_idr_lock);
3078	prog = idr_find(&prog_idr, id);
3079	if (prog)
3080		prog = bpf_prog_inc_not_zero(prog);
3081	else
3082		prog = ERR_PTR(-ENOENT);
3083	spin_unlock_bh(&prog_idr_lock);
3084	return prog;
3085}
3086
3087static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3088{
3089	struct bpf_prog *prog;
3090	u32 id = attr->prog_id;
3091	int fd;
3092
3093	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3094		return -EINVAL;
3095
3096	if (!capable(CAP_SYS_ADMIN))
3097		return -EPERM;
3098
3099	prog = bpf_prog_by_id(id);
3100	if (IS_ERR(prog))
3101		return PTR_ERR(prog);
3102
3103	fd = bpf_prog_new_fd(prog);
3104	if (fd < 0)
3105		bpf_prog_put(prog);
3106
3107	return fd;
3108}
3109
3110#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3111
3112static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3113{
3114	struct bpf_map *map;
3115	u32 id = attr->map_id;
3116	int f_flags;
3117	int fd;
3118
3119	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3120	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3121		return -EINVAL;
3122
3123	if (!capable(CAP_SYS_ADMIN))
3124		return -EPERM;
3125
3126	f_flags = bpf_get_file_flag(attr->open_flags);
3127	if (f_flags < 0)
3128		return f_flags;
3129
3130	spin_lock_bh(&map_idr_lock);
3131	map = idr_find(&map_idr, id);
3132	if (map)
3133		map = __bpf_map_inc_not_zero(map, true);
3134	else
3135		map = ERR_PTR(-ENOENT);
3136	spin_unlock_bh(&map_idr_lock);
3137
3138	if (IS_ERR(map))
3139		return PTR_ERR(map);
3140
3141	fd = bpf_map_new_fd(map, f_flags);
3142	if (fd < 0)
3143		bpf_map_put_with_uref(map);
3144
3145	return fd;
3146}
3147
3148static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3149					      unsigned long addr, u32 *off,
3150					      u32 *type)
3151{
3152	const struct bpf_map *map;
3153	int i;
3154
3155	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3156		map = prog->aux->used_maps[i];
3157		if (map == (void *)addr) {
3158			*type = BPF_PSEUDO_MAP_FD;
3159			return map;
3160		}
3161		if (!map->ops->map_direct_value_meta)
3162			continue;
3163		if (!map->ops->map_direct_value_meta(map, addr, off)) {
3164			*type = BPF_PSEUDO_MAP_VALUE;
3165			return map;
3166		}
3167	}
3168
3169	return NULL;
3170}
3171
3172static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3173					      const struct cred *f_cred)
3174{
3175	const struct bpf_map *map;
3176	struct bpf_insn *insns;
3177	u32 off, type;
3178	u64 imm;
3179	u8 code;
3180	int i;
3181
3182	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3183			GFP_USER);
3184	if (!insns)
3185		return insns;
3186
3187	for (i = 0; i < prog->len; i++) {
3188		code = insns[i].code;
3189
3190		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3191			insns[i].code = BPF_JMP | BPF_CALL;
3192			insns[i].imm = BPF_FUNC_tail_call;
3193			/* fall-through */
3194		}
3195		if (code == (BPF_JMP | BPF_CALL) ||
3196		    code == (BPF_JMP | BPF_CALL_ARGS)) {
3197			if (code == (BPF_JMP | BPF_CALL_ARGS))
3198				insns[i].code = BPF_JMP | BPF_CALL;
3199			if (!bpf_dump_raw_ok(f_cred))
3200				insns[i].imm = 0;
3201			continue;
3202		}
3203		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3204			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3205			continue;
3206		}
3207
3208		if (code != (BPF_LD | BPF_IMM | BPF_DW))
3209			continue;
3210
3211		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3212		map = bpf_map_from_imm(prog, imm, &off, &type);
3213		if (map) {
3214			insns[i].src_reg = type;
3215			insns[i].imm = map->id;
3216			insns[i + 1].imm = off;
3217			continue;
3218		}
3219	}
3220
3221	return insns;
3222}
3223
3224static int set_info_rec_size(struct bpf_prog_info *info)
3225{
3226	/*
3227	 * Ensure info.*_rec_size is the same as kernel expected size
3228	 *
3229	 * or
3230	 *
3231	 * Only allow zero *_rec_size if both _rec_size and _cnt are
3232	 * zero.  In this case, the kernel will set the expected
3233	 * _rec_size back to the info.
3234	 */
3235
3236	if ((info->nr_func_info || info->func_info_rec_size) &&
3237	    info->func_info_rec_size != sizeof(struct bpf_func_info))
3238		return -EINVAL;
3239
3240	if ((info->nr_line_info || info->line_info_rec_size) &&
3241	    info->line_info_rec_size != sizeof(struct bpf_line_info))
3242		return -EINVAL;
3243
3244	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3245	    info->jited_line_info_rec_size != sizeof(__u64))
3246		return -EINVAL;
3247
3248	info->func_info_rec_size = sizeof(struct bpf_func_info);
3249	info->line_info_rec_size = sizeof(struct bpf_line_info);
3250	info->jited_line_info_rec_size = sizeof(__u64);
3251
3252	return 0;
3253}
3254
3255static int bpf_prog_get_info_by_fd(struct file *file,
3256				   struct bpf_prog *prog,
3257				   const union bpf_attr *attr,
3258				   union bpf_attr __user *uattr)
3259{
3260	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3261	struct bpf_prog_info info;
3262	u32 info_len = attr->info.info_len;
3263	struct bpf_prog_stats stats;
3264	char __user *uinsns;
3265	u32 ulen;
3266	int err;
3267
3268	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3269	if (err)
3270		return err;
3271	info_len = min_t(u32, sizeof(info), info_len);
3272
3273	memset(&info, 0, sizeof(info));
3274	if (copy_from_user(&info, uinfo, info_len))
3275		return -EFAULT;
3276
3277	info.type = prog->type;
3278	info.id = prog->aux->id;
3279	info.load_time = prog->aux->load_time;
3280	info.created_by_uid = from_kuid_munged(current_user_ns(),
3281					       prog->aux->user->uid);
3282	info.gpl_compatible = prog->gpl_compatible;
3283
3284	memcpy(info.tag, prog->tag, sizeof(prog->tag));
3285	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3286
3287	ulen = info.nr_map_ids;
3288	info.nr_map_ids = prog->aux->used_map_cnt;
3289	ulen = min_t(u32, info.nr_map_ids, ulen);
3290	if (ulen) {
3291		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3292		u32 i;
3293
3294		for (i = 0; i < ulen; i++)
3295			if (put_user(prog->aux->used_maps[i]->id,
3296				     &user_map_ids[i]))
3297				return -EFAULT;
3298	}
3299
3300	err = set_info_rec_size(&info);
3301	if (err)
3302		return err;
3303
3304	bpf_prog_get_stats(prog, &stats);
3305	info.run_time_ns = stats.nsecs;
3306	info.run_cnt = stats.cnt;
3307
3308	if (!bpf_capable()) {
3309		info.jited_prog_len = 0;
3310		info.xlated_prog_len = 0;
3311		info.nr_jited_ksyms = 0;
3312		info.nr_jited_func_lens = 0;
3313		info.nr_func_info = 0;
3314		info.nr_line_info = 0;
3315		info.nr_jited_line_info = 0;
3316		goto done;
3317	}
3318
3319	ulen = info.xlated_prog_len;
3320	info.xlated_prog_len = bpf_prog_insn_size(prog);
3321	if (info.xlated_prog_len && ulen) {
3322		struct bpf_insn *insns_sanitized;
3323		bool fault;
3324
3325		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3326			info.xlated_prog_insns = 0;
3327			goto done;
3328		}
3329		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3330		if (!insns_sanitized)
3331			return -ENOMEM;
3332		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3333		ulen = min_t(u32, info.xlated_prog_len, ulen);
3334		fault = copy_to_user(uinsns, insns_sanitized, ulen);
3335		kfree(insns_sanitized);
3336		if (fault)
3337			return -EFAULT;
3338	}
3339
3340	if (bpf_prog_is_dev_bound(prog->aux)) {
3341		err = bpf_prog_offload_info_fill(&info, prog);
3342		if (err)
3343			return err;
3344		goto done;
3345	}
3346
3347	/* NOTE: the following code is supposed to be skipped for offload.
3348	 * bpf_prog_offload_info_fill() is the place to fill similar fields
3349	 * for offload.
3350	 */
3351	ulen = info.jited_prog_len;
3352	if (prog->aux->func_cnt) {
3353		u32 i;
3354
3355		info.jited_prog_len = 0;
3356		for (i = 0; i < prog->aux->func_cnt; i++)
3357			info.jited_prog_len += prog->aux->func[i]->jited_len;
3358	} else {
3359		info.jited_prog_len = prog->jited_len;
3360	}
3361
3362	if (info.jited_prog_len && ulen) {
3363		if (bpf_dump_raw_ok(file->f_cred)) {
3364			uinsns = u64_to_user_ptr(info.jited_prog_insns);
3365			ulen = min_t(u32, info.jited_prog_len, ulen);
3366
3367			/* for multi-function programs, copy the JITed
3368			 * instructions for all the functions
3369			 */
3370			if (prog->aux->func_cnt) {
3371				u32 len, free, i;
3372				u8 *img;
3373
3374				free = ulen;
3375				for (i = 0; i < prog->aux->func_cnt; i++) {
3376					len = prog->aux->func[i]->jited_len;
3377					len = min_t(u32, len, free);
3378					img = (u8 *) prog->aux->func[i]->bpf_func;
3379					if (copy_to_user(uinsns, img, len))
3380						return -EFAULT;
3381					uinsns += len;
3382					free -= len;
3383					if (!free)
3384						break;
3385				}
3386			} else {
3387				if (copy_to_user(uinsns, prog->bpf_func, ulen))
3388					return -EFAULT;
3389			}
3390		} else {
3391			info.jited_prog_insns = 0;
3392		}
3393	}
3394
3395	ulen = info.nr_jited_ksyms;
3396	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3397	if (ulen) {
3398		if (bpf_dump_raw_ok(file->f_cred)) {
3399			unsigned long ksym_addr;
3400			u64 __user *user_ksyms;
3401			u32 i;
3402
3403			/* copy the address of the kernel symbol
3404			 * corresponding to each function
3405			 */
3406			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3407			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3408			if (prog->aux->func_cnt) {
3409				for (i = 0; i < ulen; i++) {
3410					ksym_addr = (unsigned long)
3411						prog->aux->func[i]->bpf_func;
3412					if (put_user((u64) ksym_addr,
3413						     &user_ksyms[i]))
3414						return -EFAULT;
3415				}
3416			} else {
3417				ksym_addr = (unsigned long) prog->bpf_func;
3418				if (put_user((u64) ksym_addr, &user_ksyms[0]))
3419					return -EFAULT;
3420			}
3421		} else {
3422			info.jited_ksyms = 0;
3423		}
3424	}
3425
3426	ulen = info.nr_jited_func_lens;
3427	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3428	if (ulen) {
3429		if (bpf_dump_raw_ok(file->f_cred)) {
3430			u32 __user *user_lens;
3431			u32 func_len, i;
3432
3433			/* copy the JITed image lengths for each function */
3434			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3435			user_lens = u64_to_user_ptr(info.jited_func_lens);
3436			if (prog->aux->func_cnt) {
3437				for (i = 0; i < ulen; i++) {
3438					func_len =
3439						prog->aux->func[i]->jited_len;
3440					if (put_user(func_len, &user_lens[i]))
3441						return -EFAULT;
3442				}
3443			} else {
3444				func_len = prog->jited_len;
3445				if (put_user(func_len, &user_lens[0]))
3446					return -EFAULT;
3447			}
3448		} else {
3449			info.jited_func_lens = 0;
3450		}
3451	}
3452
3453	if (prog->aux->btf)
3454		info.btf_id = btf_id(prog->aux->btf);
3455
3456	ulen = info.nr_func_info;
3457	info.nr_func_info = prog->aux->func_info_cnt;
3458	if (info.nr_func_info && ulen) {
3459		char __user *user_finfo;
3460
3461		user_finfo = u64_to_user_ptr(info.func_info);
3462		ulen = min_t(u32, info.nr_func_info, ulen);
3463		if (copy_to_user(user_finfo, prog->aux->func_info,
3464				 info.func_info_rec_size * ulen))
3465			return -EFAULT;
3466	}
3467
3468	ulen = info.nr_line_info;
3469	info.nr_line_info = prog->aux->nr_linfo;
3470	if (info.nr_line_info && ulen) {
3471		__u8 __user *user_linfo;
3472
3473		user_linfo = u64_to_user_ptr(info.line_info);
3474		ulen = min_t(u32, info.nr_line_info, ulen);
3475		if (copy_to_user(user_linfo, prog->aux->linfo,
3476				 info.line_info_rec_size * ulen))
3477			return -EFAULT;
3478	}
3479
3480	ulen = info.nr_jited_line_info;
3481	if (prog->aux->jited_linfo)
3482		info.nr_jited_line_info = prog->aux->nr_linfo;
3483	else
3484		info.nr_jited_line_info = 0;
3485	if (info.nr_jited_line_info && ulen) {
3486		if (bpf_dump_raw_ok(file->f_cred)) {
3487			__u64 __user *user_linfo;
3488			u32 i;
3489
3490			user_linfo = u64_to_user_ptr(info.jited_line_info);
3491			ulen = min_t(u32, info.nr_jited_line_info, ulen);
3492			for (i = 0; i < ulen; i++) {
3493				if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3494					     &user_linfo[i]))
3495					return -EFAULT;
3496			}
3497		} else {
3498			info.jited_line_info = 0;
3499		}
3500	}
3501
3502	ulen = info.nr_prog_tags;
3503	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3504	if (ulen) {
3505		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3506		u32 i;
3507
3508		user_prog_tags = u64_to_user_ptr(info.prog_tags);
3509		ulen = min_t(u32, info.nr_prog_tags, ulen);
3510		if (prog->aux->func_cnt) {
3511			for (i = 0; i < ulen; i++) {
3512				if (copy_to_user(user_prog_tags[i],
3513						 prog->aux->func[i]->tag,
3514						 BPF_TAG_SIZE))
3515					return -EFAULT;
3516			}
3517		} else {
3518			if (copy_to_user(user_prog_tags[0],
3519					 prog->tag, BPF_TAG_SIZE))
3520				return -EFAULT;
3521		}
3522	}
3523
3524done:
3525	if (copy_to_user(uinfo, &info, info_len) ||
3526	    put_user(info_len, &uattr->info.info_len))
3527		return -EFAULT;
3528
3529	return 0;
3530}
3531
3532static int bpf_map_get_info_by_fd(struct file *file,
3533				  struct bpf_map *map,
3534				  const union bpf_attr *attr,
3535				  union bpf_attr __user *uattr)
3536{
3537	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3538	struct bpf_map_info info;
3539	u32 info_len = attr->info.info_len;
3540	int err;
3541
3542	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3543	if (err)
3544		return err;
3545	info_len = min_t(u32, sizeof(info), info_len);
3546
3547	memset(&info, 0, sizeof(info));
3548	info.type = map->map_type;
3549	info.id = map->id;
3550	info.key_size = map->key_size;
3551	info.value_size = map->value_size;
3552	info.max_entries = map->max_entries;
3553	info.map_flags = map->map_flags;
3554	memcpy(info.name, map->name, sizeof(map->name));
3555
3556	if (map->btf) {
3557		info.btf_id = btf_id(map->btf);
3558		info.btf_key_type_id = map->btf_key_type_id;
3559		info.btf_value_type_id = map->btf_value_type_id;
3560	}
3561	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3562
3563	if (bpf_map_is_dev_bound(map)) {
3564		err = bpf_map_offload_info_fill(&info, map);
3565		if (err)
3566			return err;
3567	}
3568
3569	if (copy_to_user(uinfo, &info, info_len) ||
3570	    put_user(info_len, &uattr->info.info_len))
3571		return -EFAULT;
3572
3573	return 0;
3574}
3575
3576static int bpf_btf_get_info_by_fd(struct file *file,
3577				  struct btf *btf,
3578				  const union bpf_attr *attr,
3579				  union bpf_attr __user *uattr)
3580{
3581	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3582	u32 info_len = attr->info.info_len;
3583	int err;
3584
3585	err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3586	if (err)
3587		return err;
3588
3589	return btf_get_info_by_fd(btf, attr, uattr);
3590}
3591
3592static int bpf_link_get_info_by_fd(struct file *file,
3593				  struct bpf_link *link,
3594				  const union bpf_attr *attr,
3595				  union bpf_attr __user *uattr)
3596{
3597	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3598	struct bpf_link_info info;
3599	u32 info_len = attr->info.info_len;
3600	int err;
3601
3602	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3603	if (err)
3604		return err;
3605	info_len = min_t(u32, sizeof(info), info_len);
3606
3607	memset(&info, 0, sizeof(info));
3608	if (copy_from_user(&info, uinfo, info_len))
3609		return -EFAULT;
3610
3611	info.type = link->type;
3612	info.id = link->id;
3613	info.prog_id = link->prog->aux->id;
3614
3615	if (link->ops->fill_link_info) {
3616		err = link->ops->fill_link_info(link, &info);
3617		if (err)
3618			return err;
3619	}
3620
3621	if (copy_to_user(uinfo, &info, info_len) ||
3622	    put_user(info_len, &uattr->info.info_len))
3623		return -EFAULT;
3624
3625	return 0;
3626}
3627
3628
3629#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3630
3631static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3632				  union bpf_attr __user *uattr)
3633{
3634	int ufd = attr->info.bpf_fd;
3635	struct fd f;
3636	int err;
3637
3638	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3639		return -EINVAL;
3640
3641	f = fdget(ufd);
3642	if (!f.file)
3643		return -EBADFD;
3644
3645	if (f.file->f_op == &bpf_prog_fops)
3646		err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3647					      uattr);
3648	else if (f.file->f_op == &bpf_map_fops)
3649		err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3650					     uattr);
3651	else if (f.file->f_op == &btf_fops)
3652		err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3653	else if (f.file->f_op == &bpf_link_fops)
3654		err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3655					      attr, uattr);
3656	else
3657		err = -EINVAL;
3658
3659	fdput(f);
3660	return err;
3661}
3662
3663#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3664
3665static int bpf_btf_load(const union bpf_attr *attr)
3666{
3667	if (CHECK_ATTR(BPF_BTF_LOAD))
3668		return -EINVAL;
3669
3670	if (!bpf_capable())
3671		return -EPERM;
3672
3673	return btf_new_fd(attr);
3674}
3675
3676#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3677
3678static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3679{
3680	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3681		return -EINVAL;
3682
3683	if (!capable(CAP_SYS_ADMIN))
3684		return -EPERM;
3685
3686	return btf_get_fd_by_id(attr->btf_id);
3687}
3688
3689static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3690				    union bpf_attr __user *uattr,
3691				    u32 prog_id, u32 fd_type,
3692				    const char *buf, u64 probe_offset,
3693				    u64 probe_addr)
3694{
3695	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3696	u32 len = buf ? strlen(buf) : 0, input_len;
3697	int err = 0;
3698
3699	if (put_user(len, &uattr->task_fd_query.buf_len))
3700		return -EFAULT;
3701	input_len = attr->task_fd_query.buf_len;
3702	if (input_len && ubuf) {
3703		if (!len) {
3704			/* nothing to copy, just make ubuf NULL terminated */
3705			char zero = '\0';
3706
3707			if (put_user(zero, ubuf))
3708				return -EFAULT;
3709		} else if (input_len >= len + 1) {
3710			/* ubuf can hold the string with NULL terminator */
3711			if (copy_to_user(ubuf, buf, len + 1))
3712				return -EFAULT;
3713		} else {
3714			/* ubuf cannot hold the string with NULL terminator,
3715			 * do a partial copy with NULL terminator.
3716			 */
3717			char zero = '\0';
3718
3719			err = -ENOSPC;
3720			if (copy_to_user(ubuf, buf, input_len - 1))
3721				return -EFAULT;
3722			if (put_user(zero, ubuf + input_len - 1))
3723				return -EFAULT;
3724		}
3725	}
3726
3727	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3728	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3729	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3730	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3731		return -EFAULT;
3732
3733	return err;
3734}
3735
3736#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3737
3738static int bpf_task_fd_query(const union bpf_attr *attr,
3739			     union bpf_attr __user *uattr)
3740{
3741	pid_t pid = attr->task_fd_query.pid;
3742	u32 fd = attr->task_fd_query.fd;
3743	const struct perf_event *event;
3744	struct files_struct *files;
3745	struct task_struct *task;
3746	struct file *file;
3747	int err;
3748
3749	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3750		return -EINVAL;
3751
3752	if (!capable(CAP_SYS_ADMIN))
3753		return -EPERM;
3754
3755	if (attr->task_fd_query.flags != 0)
3756		return -EINVAL;
3757
3758	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3759	if (!task)
3760		return -ENOENT;
3761
3762	files = get_files_struct(task);
3763	put_task_struct(task);
3764	if (!files)
3765		return -ENOENT;
3766
3767	err = 0;
3768	spin_lock(&files->file_lock);
3769	file = fcheck_files(files, fd);
3770	if (!file)
3771		err = -EBADF;
3772	else
3773		get_file(file);
3774	spin_unlock(&files->file_lock);
3775	put_files_struct(files);
3776
3777	if (err)
3778		goto out;
3779
3780	if (file->f_op == &bpf_link_fops) {
3781		struct bpf_link *link = file->private_data;
3782
3783		if (link->ops == &bpf_raw_tp_link_lops) {
3784			struct bpf_raw_tp_link *raw_tp =
3785				container_of(link, struct bpf_raw_tp_link, link);
3786			struct bpf_raw_event_map *btp = raw_tp->btp;
3787
3788			err = bpf_task_fd_query_copy(attr, uattr,
3789						     raw_tp->link.prog->aux->id,
3790						     BPF_FD_TYPE_RAW_TRACEPOINT,
3791						     btp->tp->name, 0, 0);
3792			goto put_file;
3793		}
3794		goto out_not_supp;
3795	}
3796
3797	event = perf_get_event(file);
3798	if (!IS_ERR(event)) {
3799		u64 probe_offset, probe_addr;
3800		u32 prog_id, fd_type;
3801		const char *buf;
3802
3803		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3804					      &buf, &probe_offset,
3805					      &probe_addr);
3806		if (!err)
3807			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3808						     fd_type, buf,
3809						     probe_offset,
3810						     probe_addr);
3811		goto put_file;
3812	}
3813
3814out_not_supp:
3815	err = -ENOTSUPP;
3816put_file:
3817	fput(file);
3818out:
3819	return err;
3820}
3821
3822#define BPF_MAP_BATCH_LAST_FIELD batch.flags
3823
3824#define BPF_DO_BATCH(fn)			\
3825	do {					\
3826		if (!fn) {			\
3827			err = -ENOTSUPP;	\
3828			goto err_put;		\
3829		}				\
3830		err = fn(map, attr, uattr);	\
3831	} while (0)
3832
3833static int bpf_map_do_batch(const union bpf_attr *attr,
3834			    union bpf_attr __user *uattr,
3835			    int cmd)
3836{
3837	struct bpf_map *map;
3838	int err, ufd;
3839	struct fd f;
3840
3841	if (CHECK_ATTR(BPF_MAP_BATCH))
3842		return -EINVAL;
3843
3844	ufd = attr->batch.map_fd;
3845	f = fdget(ufd);
3846	map = __bpf_map_get(f);
3847	if (IS_ERR(map))
3848		return PTR_ERR(map);
3849
3850	if ((cmd == BPF_MAP_LOOKUP_BATCH ||
3851	     cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
3852	    !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
3853		err = -EPERM;
3854		goto err_put;
3855	}
3856
3857	if (cmd != BPF_MAP_LOOKUP_BATCH &&
3858	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
3859		err = -EPERM;
3860		goto err_put;
3861	}
3862
3863	if (cmd == BPF_MAP_LOOKUP_BATCH)
3864		BPF_DO_BATCH(map->ops->map_lookup_batch);
3865	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
3866		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
3867	else if (cmd == BPF_MAP_UPDATE_BATCH)
3868		BPF_DO_BATCH(map->ops->map_update_batch);
3869	else
3870		BPF_DO_BATCH(map->ops->map_delete_batch);
3871
3872err_put:
3873	fdput(f);
3874	return err;
3875}
3876
3877static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3878{
3879	if (attr->link_create.attach_type == BPF_TRACE_ITER &&
3880	    prog->expected_attach_type == BPF_TRACE_ITER)
3881		return bpf_iter_link_attach(attr, prog);
3882
3883	return -EINVAL;
3884}
3885
3886#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
3887static int link_create(union bpf_attr *attr)
3888{
3889	enum bpf_prog_type ptype;
3890	struct bpf_prog *prog;
3891	int ret;
3892
3893	if (CHECK_ATTR(BPF_LINK_CREATE))
3894		return -EINVAL;
3895
3896	ptype = attach_type_to_prog_type(attr->link_create.attach_type);
3897	if (ptype == BPF_PROG_TYPE_UNSPEC)
3898		return -EINVAL;
3899
3900	prog = bpf_prog_get_type(attr->link_create.prog_fd, ptype);
3901	if (IS_ERR(prog))
3902		return PTR_ERR(prog);
3903
3904	ret = bpf_prog_attach_check_attach_type(prog,
3905						attr->link_create.attach_type);
3906	if (ret)
3907		goto err_out;
3908
3909	switch (ptype) {
3910	case BPF_PROG_TYPE_CGROUP_SKB:
3911	case BPF_PROG_TYPE_CGROUP_SOCK:
3912	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3913	case BPF_PROG_TYPE_SOCK_OPS:
3914	case BPF_PROG_TYPE_CGROUP_DEVICE:
3915	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3916	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3917		ret = cgroup_bpf_link_attach(attr, prog);
3918		break;
3919	case BPF_PROG_TYPE_TRACING:
3920		ret = tracing_bpf_link_attach(attr, prog);
3921		break;
3922	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3923	case BPF_PROG_TYPE_SK_LOOKUP:
3924		ret = netns_bpf_link_create(attr, prog);
3925		break;
3926#ifdef CONFIG_NET
3927	case BPF_PROG_TYPE_XDP:
3928		ret = bpf_xdp_link_attach(attr, prog);
3929		break;
3930#endif
3931	default:
3932		ret = -EINVAL;
3933	}
3934
3935err_out:
3936	if (ret < 0)
3937		bpf_prog_put(prog);
3938	return ret;
3939}
3940
3941#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
3942
3943static int link_update(union bpf_attr *attr)
3944{
3945	struct bpf_prog *old_prog = NULL, *new_prog;
3946	struct bpf_link *link;
3947	u32 flags;
3948	int ret;
3949
3950	if (CHECK_ATTR(BPF_LINK_UPDATE))
3951		return -EINVAL;
3952
3953	flags = attr->link_update.flags;
3954	if (flags & ~BPF_F_REPLACE)
3955		return -EINVAL;
3956
3957	link = bpf_link_get_from_fd(attr->link_update.link_fd);
3958	if (IS_ERR(link))
3959		return PTR_ERR(link);
3960
3961	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
3962	if (IS_ERR(new_prog)) {
3963		ret = PTR_ERR(new_prog);
3964		goto out_put_link;
3965	}
3966
3967	if (flags & BPF_F_REPLACE) {
3968		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
3969		if (IS_ERR(old_prog)) {
3970			ret = PTR_ERR(old_prog);
3971			old_prog = NULL;
3972			goto out_put_progs;
3973		}
3974	} else if (attr->link_update.old_prog_fd) {
3975		ret = -EINVAL;
3976		goto out_put_progs;
3977	}
3978
3979	if (link->ops->update_prog)
3980		ret = link->ops->update_prog(link, new_prog, old_prog);
3981	else
3982		ret = -EINVAL;
3983
3984out_put_progs:
3985	if (old_prog)
3986		bpf_prog_put(old_prog);
3987	if (ret)
3988		bpf_prog_put(new_prog);
3989out_put_link:
3990	bpf_link_put(link);
3991	return ret;
3992}
3993
3994#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
3995
3996static int link_detach(union bpf_attr *attr)
3997{
3998	struct bpf_link *link;
3999	int ret;
4000
4001	if (CHECK_ATTR(BPF_LINK_DETACH))
4002		return -EINVAL;
4003
4004	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4005	if (IS_ERR(link))
4006		return PTR_ERR(link);
4007
4008	if (link->ops->detach)
4009		ret = link->ops->detach(link);
4010	else
4011		ret = -EOPNOTSUPP;
4012
4013	bpf_link_put(link);
4014	return ret;
4015}
4016
4017static int bpf_link_inc_not_zero(struct bpf_link *link)
4018{
4019	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT;
4020}
4021
4022#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4023
4024static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4025{
4026	struct bpf_link *link;
4027	u32 id = attr->link_id;
4028	int fd, err;
4029
4030	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4031		return -EINVAL;
4032
4033	if (!capable(CAP_SYS_ADMIN))
4034		return -EPERM;
4035
4036	spin_lock_bh(&link_idr_lock);
4037	link = idr_find(&link_idr, id);
4038	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
4039	if (link) {
4040		if (link->id)
4041			err = bpf_link_inc_not_zero(link);
4042		else
4043			err = -EAGAIN;
4044	} else {
4045		err = -ENOENT;
4046	}
4047	spin_unlock_bh(&link_idr_lock);
4048
4049	if (err)
4050		return err;
4051
4052	fd = bpf_link_new_fd(link);
4053	if (fd < 0)
4054		bpf_link_put(link);
4055
4056	return fd;
4057}
4058
4059DEFINE_MUTEX(bpf_stats_enabled_mutex);
4060
4061static int bpf_stats_release(struct inode *inode, struct file *file)
4062{
4063	mutex_lock(&bpf_stats_enabled_mutex);
4064	static_key_slow_dec(&bpf_stats_enabled_key.key);
4065	mutex_unlock(&bpf_stats_enabled_mutex);
4066	return 0;
4067}
4068
4069static const struct file_operations bpf_stats_fops = {
4070	.release = bpf_stats_release,
4071};
4072
4073static int bpf_enable_runtime_stats(void)
4074{
4075	int fd;
4076
4077	mutex_lock(&bpf_stats_enabled_mutex);
4078
4079	/* Set a very high limit to avoid overflow */
4080	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4081		mutex_unlock(&bpf_stats_enabled_mutex);
4082		return -EBUSY;
4083	}
4084
4085	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4086	if (fd >= 0)
4087		static_key_slow_inc(&bpf_stats_enabled_key.key);
4088
4089	mutex_unlock(&bpf_stats_enabled_mutex);
4090	return fd;
4091}
4092
4093#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4094
4095static int bpf_enable_stats(union bpf_attr *attr)
4096{
4097
4098	if (CHECK_ATTR(BPF_ENABLE_STATS))
4099		return -EINVAL;
4100
4101	if (!capable(CAP_SYS_ADMIN))
4102		return -EPERM;
4103
4104	switch (attr->enable_stats.type) {
4105	case BPF_STATS_RUN_TIME:
4106		return bpf_enable_runtime_stats();
4107	default:
4108		break;
4109	}
4110	return -EINVAL;
4111}
4112
4113#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4114
4115static int bpf_iter_create(union bpf_attr *attr)
4116{
4117	struct bpf_link *link;
4118	int err;
4119
4120	if (CHECK_ATTR(BPF_ITER_CREATE))
4121		return -EINVAL;
4122
4123	if (attr->iter_create.flags)
4124		return -EINVAL;
4125
4126	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4127	if (IS_ERR(link))
4128		return PTR_ERR(link);
4129
4130	err = bpf_iter_new_fd(link);
4131	bpf_link_put(link);
4132
4133	return err;
4134}
4135
4136SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4137{
4138	union bpf_attr attr;
4139	int err;
4140
4141	if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4142		return -EPERM;
4143
4144	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4145	if (err)
4146		return err;
4147	size = min_t(u32, size, sizeof(attr));
4148
4149	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
4150	memset(&attr, 0, sizeof(attr));
4151	if (copy_from_user(&attr, uattr, size) != 0)
4152		return -EFAULT;
4153
4154	err = security_bpf(cmd, &attr, size);
4155	if (err < 0)
4156		return err;
4157
4158	switch (cmd) {
4159	case BPF_MAP_CREATE:
4160		err = map_create(&attr);
4161		break;
4162	case BPF_MAP_LOOKUP_ELEM:
4163		err = map_lookup_elem(&attr);
4164		break;
4165	case BPF_MAP_UPDATE_ELEM:
4166		err = map_update_elem(&attr);
4167		break;
4168	case BPF_MAP_DELETE_ELEM:
4169		err = map_delete_elem(&attr);
4170		break;
4171	case BPF_MAP_GET_NEXT_KEY:
4172		err = map_get_next_key(&attr);
4173		break;
4174	case BPF_MAP_FREEZE:
4175		err = map_freeze(&attr);
4176		break;
4177	case BPF_PROG_LOAD:
4178		err = bpf_prog_load(&attr, uattr);
4179		break;
4180	case BPF_OBJ_PIN:
4181		err = bpf_obj_pin(&attr);
4182		break;
4183	case BPF_OBJ_GET:
4184		err = bpf_obj_get(&attr);
4185		break;
4186	case BPF_PROG_ATTACH:
4187		err = bpf_prog_attach(&attr);
4188		break;
4189	case BPF_PROG_DETACH:
4190		err = bpf_prog_detach(&attr);
4191		break;
4192	case BPF_PROG_QUERY:
4193		err = bpf_prog_query(&attr, uattr);
4194		break;
4195	case BPF_PROG_TEST_RUN:
4196		err = bpf_prog_test_run(&attr, uattr);
4197		break;
4198	case BPF_PROG_GET_NEXT_ID:
4199		err = bpf_obj_get_next_id(&attr, uattr,
4200					  &prog_idr, &prog_idr_lock);
4201		break;
4202	case BPF_MAP_GET_NEXT_ID:
4203		err = bpf_obj_get_next_id(&attr, uattr,
4204					  &map_idr, &map_idr_lock);
4205		break;
4206	case BPF_BTF_GET_NEXT_ID:
4207		err = bpf_obj_get_next_id(&attr, uattr,
4208					  &btf_idr, &btf_idr_lock);
4209		break;
4210	case BPF_PROG_GET_FD_BY_ID:
4211		err = bpf_prog_get_fd_by_id(&attr);
4212		break;
4213	case BPF_MAP_GET_FD_BY_ID:
4214		err = bpf_map_get_fd_by_id(&attr);
4215		break;
4216	case BPF_OBJ_GET_INFO_BY_FD:
4217		err = bpf_obj_get_info_by_fd(&attr, uattr);
4218		break;
4219	case BPF_RAW_TRACEPOINT_OPEN:
4220		err = bpf_raw_tracepoint_open(&attr);
4221		break;
4222	case BPF_BTF_LOAD:
4223		err = bpf_btf_load(&attr);
4224		break;
4225	case BPF_BTF_GET_FD_BY_ID:
4226		err = bpf_btf_get_fd_by_id(&attr);
4227		break;
4228	case BPF_TASK_FD_QUERY:
4229		err = bpf_task_fd_query(&attr, uattr);
4230		break;
4231	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4232		err = map_lookup_and_delete_elem(&attr);
4233		break;
4234	case BPF_MAP_LOOKUP_BATCH:
4235		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4236		break;
4237	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4238		err = bpf_map_do_batch(&attr, uattr,
4239				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4240		break;
4241	case BPF_MAP_UPDATE_BATCH:
4242		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4243		break;
4244	case BPF_MAP_DELETE_BATCH:
4245		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4246		break;
4247	case BPF_LINK_CREATE:
4248		err = link_create(&attr);
4249		break;
4250	case BPF_LINK_UPDATE:
4251		err = link_update(&attr);
4252		break;
4253	case BPF_LINK_GET_FD_BY_ID:
4254		err = bpf_link_get_fd_by_id(&attr);
4255		break;
4256	case BPF_LINK_GET_NEXT_ID:
4257		err = bpf_obj_get_next_id(&attr, uattr,
4258					  &link_idr, &link_idr_lock);
4259		break;
4260	case BPF_ENABLE_STATS:
4261		err = bpf_enable_stats(&attr);
4262		break;
4263	case BPF_ITER_CREATE:
4264		err = bpf_iter_create(&attr);
4265		break;
4266	case BPF_LINK_DETACH:
4267		err = link_detach(&attr);
4268		break;
4269	default:
4270		err = -EINVAL;
4271		break;
4272	}
4273
4274	return err;
4275}