Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/bpf.h>
 
   5#include <linux/filter.h>
   6#include <linux/errno.h>
   7#include <linux/file.h>
   8#include <linux/net.h>
   9#include <linux/workqueue.h>
  10#include <linux/skmsg.h>
  11#include <linux/list.h>
  12#include <linux/jhash.h>
  13#include <linux/sock_diag.h>
  14#include <net/udp.h>
  15
  16struct bpf_stab {
  17	struct bpf_map map;
  18	struct sock **sks;
  19	struct sk_psock_progs progs;
  20	raw_spinlock_t lock;
  21};
  22
  23#define SOCK_CREATE_FLAG_MASK				\
  24	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  25
 
 
 
 
  26static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  27{
  28	struct bpf_stab *stab;
  29	u64 cost;
  30	int err;
  31
  32	if (!capable(CAP_NET_ADMIN))
  33		return ERR_PTR(-EPERM);
  34	if (attr->max_entries == 0 ||
  35	    attr->key_size    != 4 ||
  36	    (attr->value_size != sizeof(u32) &&
  37	     attr->value_size != sizeof(u64)) ||
  38	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  39		return ERR_PTR(-EINVAL);
  40
  41	stab = kzalloc(sizeof(*stab), GFP_USER);
  42	if (!stab)
  43		return ERR_PTR(-ENOMEM);
  44
  45	bpf_map_init_from_attr(&stab->map, attr);
  46	raw_spin_lock_init(&stab->lock);
  47
  48	/* Make sure page count doesn't overflow. */
  49	cost = (u64) stab->map.max_entries * sizeof(struct sock *);
  50	err = bpf_map_charge_init(&stab->map.memory, cost);
  51	if (err)
  52		goto free_stab;
  53
  54	stab->sks = bpf_map_area_alloc(stab->map.max_entries *
  55				       sizeof(struct sock *),
  56				       stab->map.numa_node);
  57	if (stab->sks)
  58		return &stab->map;
  59	err = -ENOMEM;
  60	bpf_map_charge_finish(&stab->map.memory);
  61free_stab:
  62	kfree(stab);
  63	return ERR_PTR(err);
  64}
  65
  66int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  67{
  68	u32 ufd = attr->target_fd;
  69	struct bpf_map *map;
  70	struct fd f;
  71	int ret;
  72
  73	if (attr->attach_flags || attr->replace_bpf_fd)
  74		return -EINVAL;
  75
  76	f = fdget(ufd);
  77	map = __bpf_map_get(f);
  78	if (IS_ERR(map))
  79		return PTR_ERR(map);
  80	ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
  81	fdput(f);
  82	return ret;
  83}
  84
  85int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  86{
  87	u32 ufd = attr->target_fd;
  88	struct bpf_prog *prog;
  89	struct bpf_map *map;
  90	struct fd f;
  91	int ret;
  92
  93	if (attr->attach_flags || attr->replace_bpf_fd)
  94		return -EINVAL;
  95
  96	f = fdget(ufd);
  97	map = __bpf_map_get(f);
  98	if (IS_ERR(map))
  99		return PTR_ERR(map);
 100
 101	prog = bpf_prog_get(attr->attach_bpf_fd);
 102	if (IS_ERR(prog)) {
 103		ret = PTR_ERR(prog);
 104		goto put_map;
 105	}
 106
 107	if (prog->type != ptype) {
 108		ret = -EINVAL;
 109		goto put_prog;
 110	}
 111
 112	ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
 113put_prog:
 114	bpf_prog_put(prog);
 115put_map:
 116	fdput(f);
 117	return ret;
 118}
 119
 120static void sock_map_sk_acquire(struct sock *sk)
 121	__acquires(&sk->sk_lock.slock)
 122{
 123	lock_sock(sk);
 124	preempt_disable();
 125	rcu_read_lock();
 126}
 127
 128static void sock_map_sk_release(struct sock *sk)
 129	__releases(&sk->sk_lock.slock)
 130{
 131	rcu_read_unlock();
 132	preempt_enable();
 133	release_sock(sk);
 134}
 135
 136static void sock_map_add_link(struct sk_psock *psock,
 137			      struct sk_psock_link *link,
 138			      struct bpf_map *map, void *link_raw)
 139{
 140	link->link_raw = link_raw;
 141	link->map = map;
 142	spin_lock_bh(&psock->link_lock);
 143	list_add_tail(&link->list, &psock->link);
 144	spin_unlock_bh(&psock->link_lock);
 145}
 146
 147static void sock_map_del_link(struct sock *sk,
 148			      struct sk_psock *psock, void *link_raw)
 149{
 
 150	struct sk_psock_link *link, *tmp;
 151	bool strp_stop = false;
 152
 153	spin_lock_bh(&psock->link_lock);
 154	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 155		if (link->link_raw == link_raw) {
 156			struct bpf_map *map = link->map;
 157			struct bpf_stab *stab = container_of(map, struct bpf_stab,
 158							     map);
 159			if (psock->parser.enabled && stab->progs.skb_parser)
 160				strp_stop = true;
 
 
 
 
 161			list_del(&link->list);
 162			sk_psock_free_link(link);
 163		}
 164	}
 165	spin_unlock_bh(&psock->link_lock);
 166	if (strp_stop) {
 167		write_lock_bh(&sk->sk_callback_lock);
 168		sk_psock_stop_strp(sk, psock);
 
 
 
 
 
 
 169		write_unlock_bh(&sk->sk_callback_lock);
 170	}
 171}
 172
 173static void sock_map_unref(struct sock *sk, void *link_raw)
 174{
 175	struct sk_psock *psock = sk_psock(sk);
 176
 177	if (likely(psock)) {
 178		sock_map_del_link(sk, psock, link_raw);
 179		sk_psock_put(sk, psock);
 180	}
 181}
 182
 183static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
 184{
 185	struct proto *prot;
 186
 187	sock_owned_by_me(sk);
 188
 189	switch (sk->sk_type) {
 190	case SOCK_STREAM:
 191		prot = tcp_bpf_get_proto(sk, psock);
 192		break;
 193
 194	case SOCK_DGRAM:
 195		prot = udp_bpf_get_proto(sk, psock);
 196		break;
 197
 198	default:
 199		return -EINVAL;
 200	}
 201
 202	if (IS_ERR(prot))
 203		return PTR_ERR(prot);
 204
 205	sk_psock_update_proto(sk, psock, prot);
 206	return 0;
 207}
 208
 209static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
 210{
 211	struct sk_psock *psock;
 212
 213	rcu_read_lock();
 214	psock = sk_psock(sk);
 215	if (psock) {
 216		if (sk->sk_prot->close != sock_map_close) {
 217			psock = ERR_PTR(-EBUSY);
 218			goto out;
 219		}
 220
 221		if (!refcount_inc_not_zero(&psock->refcnt))
 222			psock = ERR_PTR(-EBUSY);
 223	}
 224out:
 225	rcu_read_unlock();
 226	return psock;
 227}
 228
 229static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
 230			 struct sock *sk)
 231{
 232	struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
 
 
 
 
 233	struct sk_psock *psock;
 234	bool skb_progs;
 235	int ret;
 236
 237	skb_verdict = READ_ONCE(progs->skb_verdict);
 238	skb_parser = READ_ONCE(progs->skb_parser);
 239	skb_progs = skb_parser && skb_verdict;
 240	if (skb_progs) {
 241		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
 242		if (IS_ERR(skb_verdict))
 243			return PTR_ERR(skb_verdict);
 244		skb_parser = bpf_prog_inc_not_zero(skb_parser);
 245		if (IS_ERR(skb_parser)) {
 246			bpf_prog_put(skb_verdict);
 247			return PTR_ERR(skb_parser);
 
 
 248		}
 249	}
 250
 251	msg_parser = READ_ONCE(progs->msg_parser);
 252	if (msg_parser) {
 253		msg_parser = bpf_prog_inc_not_zero(msg_parser);
 254		if (IS_ERR(msg_parser)) {
 255			ret = PTR_ERR(msg_parser);
 256			goto out;
 
 
 
 
 
 
 
 
 
 257		}
 258	}
 259
 260	psock = sock_map_psock_get_checked(sk);
 261	if (IS_ERR(psock)) {
 262		ret = PTR_ERR(psock);
 263		goto out_progs;
 264	}
 265
 266	if (psock) {
 267		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
 268		    (skb_progs  && READ_ONCE(psock->progs.skb_parser))) {
 
 
 
 
 269			sk_psock_put(sk, psock);
 270			ret = -EBUSY;
 271			goto out_progs;
 272		}
 273	} else {
 274		psock = sk_psock_init(sk, map->numa_node);
 275		if (!psock) {
 276			ret = -ENOMEM;
 277			goto out_progs;
 278		}
 279	}
 280
 281	if (msg_parser)
 282		psock_set_prog(&psock->progs.msg_parser, msg_parser);
 
 
 
 
 
 
 283
 
 
 
 284	ret = sock_map_init_proto(sk, psock);
 285	if (ret < 0)
 286		goto out_drop;
 
 
 287
 288	write_lock_bh(&sk->sk_callback_lock);
 289	if (skb_progs && !psock->parser.enabled) {
 290		ret = sk_psock_init_strp(sk, psock);
 291		if (ret) {
 292			write_unlock_bh(&sk->sk_callback_lock);
 293			goto out_drop;
 
 294		}
 295		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 296		psock_set_prog(&psock->progs.skb_parser, skb_parser);
 297		sk_psock_start_strp(sk, psock);
 
 
 
 
 298	}
 299	write_unlock_bh(&sk->sk_callback_lock);
 300	return 0;
 301out_drop:
 302	sk_psock_put(sk, psock);
 303out_progs:
 
 
 
 304	if (msg_parser)
 305		bpf_prog_put(msg_parser);
 
 
 
 
 
 
 306out:
 307	if (skb_progs) {
 308		bpf_prog_put(skb_verdict);
 309		bpf_prog_put(skb_parser);
 310	}
 311	return ret;
 312}
 313
 314static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk)
 315{
 316	struct sk_psock *psock;
 317	int ret;
 318
 319	psock = sock_map_psock_get_checked(sk);
 320	if (IS_ERR(psock))
 321		return PTR_ERR(psock);
 322
 323	if (!psock) {
 324		psock = sk_psock_init(sk, map->numa_node);
 325		if (!psock)
 326			return -ENOMEM;
 327	}
 328
 329	ret = sock_map_init_proto(sk, psock);
 330	if (ret < 0)
 331		sk_psock_put(sk, psock);
 332	return ret;
 333}
 334
 335static void sock_map_free(struct bpf_map *map)
 336{
 337	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 338	int i;
 339
 340	/* After the sync no updates or deletes will be in-flight so it
 341	 * is safe to walk map and remove entries without risking a race
 342	 * in EEXIST update case.
 343	 */
 344	synchronize_rcu();
 345	for (i = 0; i < stab->map.max_entries; i++) {
 346		struct sock **psk = &stab->sks[i];
 347		struct sock *sk;
 348
 349		sk = xchg(psk, NULL);
 350		if (sk) {
 
 351			lock_sock(sk);
 352			rcu_read_lock();
 353			sock_map_unref(sk, psk);
 354			rcu_read_unlock();
 355			release_sock(sk);
 
 356		}
 357	}
 358
 359	/* wait for psock readers accessing its map link */
 360	synchronize_rcu();
 361
 362	bpf_map_area_free(stab->sks);
 363	kfree(stab);
 364}
 365
 366static void sock_map_release_progs(struct bpf_map *map)
 367{
 368	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
 369}
 370
 371static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 372{
 373	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 374
 375	WARN_ON_ONCE(!rcu_read_lock_held());
 376
 377	if (unlikely(key >= map->max_entries))
 378		return NULL;
 379	return READ_ONCE(stab->sks[key]);
 380}
 381
 382static void *sock_map_lookup(struct bpf_map *map, void *key)
 383{
 384	struct sock *sk;
 385
 386	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 387	if (!sk || !sk_fullsock(sk))
 388		return NULL;
 389	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
 390		return NULL;
 391	return sk;
 392}
 393
 394static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
 395{
 396	struct sock *sk;
 397
 398	if (map->value_size != sizeof(u64))
 399		return ERR_PTR(-ENOSPC);
 400
 401	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 402	if (!sk)
 403		return ERR_PTR(-ENOENT);
 404
 405	sock_gen_cookie(sk);
 406	return &sk->sk_cookie;
 407}
 408
 409static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
 410			     struct sock **psk)
 411{
 412	struct sock *sk;
 413	int err = 0;
 414
 415	raw_spin_lock_bh(&stab->lock);
 
 
 
 416	sk = *psk;
 417	if (!sk_test || sk_test == sk)
 418		sk = xchg(psk, NULL);
 419
 420	if (likely(sk))
 421		sock_map_unref(sk, psk);
 422	else
 423		err = -EINVAL;
 424
 425	raw_spin_unlock_bh(&stab->lock);
 426	return err;
 427}
 428
 429static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
 430				      void *link_raw)
 431{
 432	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 433
 434	__sock_map_delete(stab, sk, link_raw);
 435}
 436
 437static int sock_map_delete_elem(struct bpf_map *map, void *key)
 438{
 439	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 440	u32 i = *(u32 *)key;
 441	struct sock **psk;
 442
 443	if (unlikely(i >= map->max_entries))
 444		return -EINVAL;
 445
 446	psk = &stab->sks[i];
 447	return __sock_map_delete(stab, NULL, psk);
 448}
 449
 450static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
 451{
 452	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 453	u32 i = key ? *(u32 *)key : U32_MAX;
 454	u32 *key_next = next;
 455
 456	if (i == stab->map.max_entries - 1)
 457		return -ENOENT;
 458	if (i >= stab->map.max_entries)
 459		*key_next = 0;
 460	else
 461		*key_next = i + 1;
 462	return 0;
 463}
 464
 465static bool sock_map_redirect_allowed(const struct sock *sk);
 466
 467static int sock_map_update_common(struct bpf_map *map, u32 idx,
 468				  struct sock *sk, u64 flags)
 469{
 470	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 471	struct sk_psock_link *link;
 472	struct sk_psock *psock;
 473	struct sock *osk;
 474	int ret;
 475
 476	WARN_ON_ONCE(!rcu_read_lock_held());
 477	if (unlikely(flags > BPF_EXIST))
 478		return -EINVAL;
 479	if (unlikely(idx >= map->max_entries))
 480		return -E2BIG;
 481	if (inet_csk_has_ulp(sk))
 482		return -EINVAL;
 483
 484	link = sk_psock_init_link();
 485	if (!link)
 486		return -ENOMEM;
 487
 488	/* Only sockets we can redirect into/from in BPF need to hold
 489	 * refs to parser/verdict progs and have their sk_data_ready
 490	 * and sk_write_space callbacks overridden.
 491	 */
 492	if (sock_map_redirect_allowed(sk))
 493		ret = sock_map_link(map, &stab->progs, sk);
 494	else
 495		ret = sock_map_link_no_progs(map, sk);
 496	if (ret < 0)
 497		goto out_free;
 498
 499	psock = sk_psock(sk);
 500	WARN_ON_ONCE(!psock);
 501
 502	raw_spin_lock_bh(&stab->lock);
 503	osk = stab->sks[idx];
 504	if (osk && flags == BPF_NOEXIST) {
 505		ret = -EEXIST;
 506		goto out_unlock;
 507	} else if (!osk && flags == BPF_EXIST) {
 508		ret = -ENOENT;
 509		goto out_unlock;
 510	}
 511
 512	sock_map_add_link(psock, link, map, &stab->sks[idx]);
 513	stab->sks[idx] = sk;
 514	if (osk)
 515		sock_map_unref(osk, &stab->sks[idx]);
 516	raw_spin_unlock_bh(&stab->lock);
 517	return 0;
 518out_unlock:
 519	raw_spin_unlock_bh(&stab->lock);
 520	if (psock)
 521		sk_psock_put(sk, psock);
 522out_free:
 523	sk_psock_free_link(link);
 524	return ret;
 525}
 526
 527static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
 528{
 529	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
 530	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
 531	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
 532}
 533
 534static bool sk_is_tcp(const struct sock *sk)
 535{
 536	return sk->sk_type == SOCK_STREAM &&
 537	       sk->sk_protocol == IPPROTO_TCP;
 538}
 539
 540static bool sk_is_udp(const struct sock *sk)
 541{
 542	return sk->sk_type == SOCK_DGRAM &&
 543	       sk->sk_protocol == IPPROTO_UDP;
 544}
 545
 546static bool sock_map_redirect_allowed(const struct sock *sk)
 547{
 548	return sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN;
 
 
 
 549}
 550
 551static bool sock_map_sk_is_suitable(const struct sock *sk)
 552{
 553	return sk_is_tcp(sk) || sk_is_udp(sk);
 554}
 555
 556static bool sock_map_sk_state_allowed(const struct sock *sk)
 557{
 558	if (sk_is_tcp(sk))
 559		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
 560	else if (sk_is_udp(sk))
 561		return sk_hashed(sk);
 562
 563	return false;
 564}
 565
 566static int sock_map_update_elem(struct bpf_map *map, void *key,
 567				void *value, u64 flags)
 
 
 
 568{
 569	u32 idx = *(u32 *)key;
 570	struct socket *sock;
 571	struct sock *sk;
 572	int ret;
 573	u64 ufd;
 574
 575	if (map->value_size == sizeof(u64))
 576		ufd = *(u64 *)value;
 577	else
 578		ufd = *(u32 *)value;
 579	if (ufd > S32_MAX)
 580		return -EINVAL;
 581
 582	sock = sockfd_lookup(ufd, &ret);
 583	if (!sock)
 584		return ret;
 585	sk = sock->sk;
 586	if (!sk) {
 587		ret = -EINVAL;
 588		goto out;
 589	}
 590	if (!sock_map_sk_is_suitable(sk)) {
 591		ret = -EOPNOTSUPP;
 592		goto out;
 593	}
 594
 595	sock_map_sk_acquire(sk);
 596	if (!sock_map_sk_state_allowed(sk))
 597		ret = -EOPNOTSUPP;
 
 
 598	else
 599		ret = sock_map_update_common(map, idx, sk, flags);
 600	sock_map_sk_release(sk);
 601out:
 602	fput(sock->file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603	return ret;
 604}
 605
 606BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
 607	   struct bpf_map *, map, void *, key, u64, flags)
 608{
 609	WARN_ON_ONCE(!rcu_read_lock_held());
 610
 611	if (likely(sock_map_sk_is_suitable(sops->sk) &&
 612		   sock_map_op_okay(sops)))
 613		return sock_map_update_common(map, *(u32 *)key, sops->sk,
 614					      flags);
 615	return -EOPNOTSUPP;
 616}
 617
 618const struct bpf_func_proto bpf_sock_map_update_proto = {
 619	.func		= bpf_sock_map_update,
 620	.gpl_only	= false,
 621	.pkt_access	= true,
 622	.ret_type	= RET_INTEGER,
 623	.arg1_type	= ARG_PTR_TO_CTX,
 624	.arg2_type	= ARG_CONST_MAP_PTR,
 625	.arg3_type	= ARG_PTR_TO_MAP_KEY,
 626	.arg4_type	= ARG_ANYTHING,
 627};
 628
 629BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
 630	   struct bpf_map *, map, u32, key, u64, flags)
 631{
 632	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 633	struct sock *sk;
 634
 635	if (unlikely(flags & ~(BPF_F_INGRESS)))
 636		return SK_DROP;
 637
 638	sk = __sock_map_lookup_elem(map, key);
 639	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 640		return SK_DROP;
 641
 642	tcb->bpf.flags = flags;
 643	tcb->bpf.sk_redir = sk;
 644	return SK_PASS;
 645}
 646
 647const struct bpf_func_proto bpf_sk_redirect_map_proto = {
 648	.func           = bpf_sk_redirect_map,
 649	.gpl_only       = false,
 650	.ret_type       = RET_INTEGER,
 651	.arg1_type	= ARG_PTR_TO_CTX,
 652	.arg2_type      = ARG_CONST_MAP_PTR,
 653	.arg3_type      = ARG_ANYTHING,
 654	.arg4_type      = ARG_ANYTHING,
 655};
 656
 657BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
 658	   struct bpf_map *, map, u32, key, u64, flags)
 659{
 660	struct sock *sk;
 661
 662	if (unlikely(flags & ~(BPF_F_INGRESS)))
 663		return SK_DROP;
 664
 665	sk = __sock_map_lookup_elem(map, key);
 666	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 667		return SK_DROP;
 
 
 668
 669	msg->flags = flags;
 670	msg->sk_redir = sk;
 671	return SK_PASS;
 672}
 673
 674const struct bpf_func_proto bpf_msg_redirect_map_proto = {
 675	.func           = bpf_msg_redirect_map,
 676	.gpl_only       = false,
 677	.ret_type       = RET_INTEGER,
 678	.arg1_type	= ARG_PTR_TO_CTX,
 679	.arg2_type      = ARG_CONST_MAP_PTR,
 680	.arg3_type      = ARG_ANYTHING,
 681	.arg4_type      = ARG_ANYTHING,
 682};
 683
 684static int sock_map_btf_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685const struct bpf_map_ops sock_map_ops = {
 
 686	.map_alloc		= sock_map_alloc,
 687	.map_free		= sock_map_free,
 688	.map_get_next_key	= sock_map_get_next_key,
 689	.map_lookup_elem_sys_only = sock_map_lookup_sys,
 690	.map_update_elem	= sock_map_update_elem,
 691	.map_delete_elem	= sock_map_delete_elem,
 692	.map_lookup_elem	= sock_map_lookup,
 693	.map_release_uref	= sock_map_release_progs,
 694	.map_check_btf		= map_check_no_btf,
 695	.map_btf_name		= "bpf_stab",
 696	.map_btf_id		= &sock_map_btf_id,
 
 697};
 698
 699struct bpf_shtab_elem {
 700	struct rcu_head rcu;
 701	u32 hash;
 702	struct sock *sk;
 703	struct hlist_node node;
 704	u8 key[];
 705};
 706
 707struct bpf_shtab_bucket {
 708	struct hlist_head head;
 709	raw_spinlock_t lock;
 710};
 711
 712struct bpf_shtab {
 713	struct bpf_map map;
 714	struct bpf_shtab_bucket *buckets;
 715	u32 buckets_num;
 716	u32 elem_size;
 717	struct sk_psock_progs progs;
 718	atomic_t count;
 719};
 720
 721static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
 722{
 723	return jhash(key, len, 0);
 724}
 725
 726static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
 727							u32 hash)
 728{
 729	return &htab->buckets[hash & (htab->buckets_num - 1)];
 730}
 731
 732static struct bpf_shtab_elem *
 733sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 734			  u32 key_size)
 735{
 736	struct bpf_shtab_elem *elem;
 737
 738	hlist_for_each_entry_rcu(elem, head, node) {
 739		if (elem->hash == hash &&
 740		    !memcmp(&elem->key, key, key_size))
 741			return elem;
 742	}
 743
 744	return NULL;
 745}
 746
 747static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 748{
 749	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 750	u32 key_size = map->key_size, hash;
 751	struct bpf_shtab_bucket *bucket;
 752	struct bpf_shtab_elem *elem;
 753
 754	WARN_ON_ONCE(!rcu_read_lock_held());
 755
 756	hash = sock_hash_bucket_hash(key, key_size);
 757	bucket = sock_hash_select_bucket(htab, hash);
 758	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 759
 760	return elem ? elem->sk : NULL;
 761}
 762
 763static void sock_hash_free_elem(struct bpf_shtab *htab,
 764				struct bpf_shtab_elem *elem)
 765{
 766	atomic_dec(&htab->count);
 767	kfree_rcu(elem, rcu);
 768}
 769
 770static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 771				       void *link_raw)
 772{
 773	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 774	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
 775	struct bpf_shtab_bucket *bucket;
 776
 777	WARN_ON_ONCE(!rcu_read_lock_held());
 778	bucket = sock_hash_select_bucket(htab, elem->hash);
 779
 780	/* elem may be deleted in parallel from the map, but access here
 781	 * is okay since it's going away only after RCU grace period.
 782	 * However, we need to check whether it's still present.
 783	 */
 784	raw_spin_lock_bh(&bucket->lock);
 785	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
 786					       elem->key, map->key_size);
 787	if (elem_probe && elem_probe == elem) {
 788		hlist_del_rcu(&elem->node);
 789		sock_map_unref(elem->sk, elem);
 790		sock_hash_free_elem(htab, elem);
 791	}
 792	raw_spin_unlock_bh(&bucket->lock);
 793}
 794
 795static int sock_hash_delete_elem(struct bpf_map *map, void *key)
 796{
 797	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 798	u32 hash, key_size = map->key_size;
 799	struct bpf_shtab_bucket *bucket;
 800	struct bpf_shtab_elem *elem;
 801	int ret = -ENOENT;
 802
 
 
 
 803	hash = sock_hash_bucket_hash(key, key_size);
 804	bucket = sock_hash_select_bucket(htab, hash);
 805
 806	raw_spin_lock_bh(&bucket->lock);
 807	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 808	if (elem) {
 809		hlist_del_rcu(&elem->node);
 810		sock_map_unref(elem->sk, elem);
 811		sock_hash_free_elem(htab, elem);
 812		ret = 0;
 813	}
 814	raw_spin_unlock_bh(&bucket->lock);
 815	return ret;
 816}
 817
 818static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
 819						   void *key, u32 key_size,
 820						   u32 hash, struct sock *sk,
 821						   struct bpf_shtab_elem *old)
 822{
 823	struct bpf_shtab_elem *new;
 824
 825	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 826		if (!old) {
 827			atomic_dec(&htab->count);
 828			return ERR_PTR(-E2BIG);
 829		}
 830	}
 831
 832	new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
 833			   htab->map.numa_node);
 
 834	if (!new) {
 835		atomic_dec(&htab->count);
 836		return ERR_PTR(-ENOMEM);
 837	}
 838	memcpy(new->key, key, key_size);
 839	new->sk = sk;
 840	new->hash = hash;
 841	return new;
 842}
 843
 844static int sock_hash_update_common(struct bpf_map *map, void *key,
 845				   struct sock *sk, u64 flags)
 846{
 847	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 848	u32 key_size = map->key_size, hash;
 849	struct bpf_shtab_elem *elem, *elem_new;
 850	struct bpf_shtab_bucket *bucket;
 851	struct sk_psock_link *link;
 852	struct sk_psock *psock;
 853	int ret;
 854
 855	WARN_ON_ONCE(!rcu_read_lock_held());
 856	if (unlikely(flags > BPF_EXIST))
 857		return -EINVAL;
 858	if (inet_csk_has_ulp(sk))
 859		return -EINVAL;
 860
 861	link = sk_psock_init_link();
 862	if (!link)
 863		return -ENOMEM;
 864
 865	/* Only sockets we can redirect into/from in BPF need to hold
 866	 * refs to parser/verdict progs and have their sk_data_ready
 867	 * and sk_write_space callbacks overridden.
 868	 */
 869	if (sock_map_redirect_allowed(sk))
 870		ret = sock_map_link(map, &htab->progs, sk);
 871	else
 872		ret = sock_map_link_no_progs(map, sk);
 873	if (ret < 0)
 874		goto out_free;
 875
 876	psock = sk_psock(sk);
 877	WARN_ON_ONCE(!psock);
 878
 879	hash = sock_hash_bucket_hash(key, key_size);
 880	bucket = sock_hash_select_bucket(htab, hash);
 881
 882	raw_spin_lock_bh(&bucket->lock);
 883	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 884	if (elem && flags == BPF_NOEXIST) {
 885		ret = -EEXIST;
 886		goto out_unlock;
 887	} else if (!elem && flags == BPF_EXIST) {
 888		ret = -ENOENT;
 889		goto out_unlock;
 890	}
 891
 892	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
 893	if (IS_ERR(elem_new)) {
 894		ret = PTR_ERR(elem_new);
 895		goto out_unlock;
 896	}
 897
 898	sock_map_add_link(psock, link, map, elem_new);
 899	/* Add new element to the head of the list, so that
 900	 * concurrent search will find it before old elem.
 901	 */
 902	hlist_add_head_rcu(&elem_new->node, &bucket->head);
 903	if (elem) {
 904		hlist_del_rcu(&elem->node);
 905		sock_map_unref(elem->sk, elem);
 906		sock_hash_free_elem(htab, elem);
 907	}
 908	raw_spin_unlock_bh(&bucket->lock);
 909	return 0;
 910out_unlock:
 911	raw_spin_unlock_bh(&bucket->lock);
 912	sk_psock_put(sk, psock);
 913out_free:
 914	sk_psock_free_link(link);
 915	return ret;
 916}
 917
 918static int sock_hash_update_elem(struct bpf_map *map, void *key,
 919				 void *value, u64 flags)
 920{
 921	struct socket *sock;
 922	struct sock *sk;
 923	int ret;
 924	u64 ufd;
 925
 926	if (map->value_size == sizeof(u64))
 927		ufd = *(u64 *)value;
 928	else
 929		ufd = *(u32 *)value;
 930	if (ufd > S32_MAX)
 931		return -EINVAL;
 932
 933	sock = sockfd_lookup(ufd, &ret);
 934	if (!sock)
 935		return ret;
 936	sk = sock->sk;
 937	if (!sk) {
 938		ret = -EINVAL;
 939		goto out;
 940	}
 941	if (!sock_map_sk_is_suitable(sk)) {
 942		ret = -EOPNOTSUPP;
 943		goto out;
 944	}
 945
 946	sock_map_sk_acquire(sk);
 947	if (!sock_map_sk_state_allowed(sk))
 948		ret = -EOPNOTSUPP;
 949	else
 950		ret = sock_hash_update_common(map, key, sk, flags);
 951	sock_map_sk_release(sk);
 952out:
 953	fput(sock->file);
 954	return ret;
 955}
 956
 957static int sock_hash_get_next_key(struct bpf_map *map, void *key,
 958				  void *key_next)
 959{
 960	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 961	struct bpf_shtab_elem *elem, *elem_next;
 962	u32 hash, key_size = map->key_size;
 963	struct hlist_head *head;
 964	int i = 0;
 965
 966	if (!key)
 967		goto find_first_elem;
 968	hash = sock_hash_bucket_hash(key, key_size);
 969	head = &sock_hash_select_bucket(htab, hash)->head;
 970	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
 971	if (!elem)
 972		goto find_first_elem;
 973
 974	elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
 975				     struct bpf_shtab_elem, node);
 976	if (elem_next) {
 977		memcpy(key_next, elem_next->key, key_size);
 978		return 0;
 979	}
 980
 981	i = hash & (htab->buckets_num - 1);
 982	i++;
 983find_first_elem:
 984	for (; i < htab->buckets_num; i++) {
 985		head = &sock_hash_select_bucket(htab, i)->head;
 986		elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
 987					     struct bpf_shtab_elem, node);
 988		if (elem_next) {
 989			memcpy(key_next, elem_next->key, key_size);
 990			return 0;
 991		}
 992	}
 993
 994	return -ENOENT;
 995}
 996
 997static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
 998{
 999	struct bpf_shtab *htab;
1000	int i, err;
1001	u64 cost;
1002
1003	if (!capable(CAP_NET_ADMIN))
1004		return ERR_PTR(-EPERM);
1005	if (attr->max_entries == 0 ||
1006	    attr->key_size    == 0 ||
1007	    (attr->value_size != sizeof(u32) &&
1008	     attr->value_size != sizeof(u64)) ||
1009	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1010		return ERR_PTR(-EINVAL);
1011	if (attr->key_size > MAX_BPF_STACK)
1012		return ERR_PTR(-E2BIG);
1013
1014	htab = kzalloc(sizeof(*htab), GFP_USER);
1015	if (!htab)
1016		return ERR_PTR(-ENOMEM);
1017
1018	bpf_map_init_from_attr(&htab->map, attr);
1019
1020	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1021	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1022			  round_up(htab->map.key_size, 8);
1023	if (htab->buckets_num == 0 ||
1024	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1025		err = -EINVAL;
1026		goto free_htab;
1027	}
1028
1029	cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
1030	       (u64) htab->elem_size * htab->map.max_entries;
1031	if (cost >= U32_MAX - PAGE_SIZE) {
1032		err = -EINVAL;
1033		goto free_htab;
1034	}
1035	err = bpf_map_charge_init(&htab->map.memory, cost);
1036	if (err)
1037		goto free_htab;
1038
1039	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1040					   sizeof(struct bpf_shtab_bucket),
1041					   htab->map.numa_node);
1042	if (!htab->buckets) {
1043		bpf_map_charge_finish(&htab->map.memory);
1044		err = -ENOMEM;
1045		goto free_htab;
1046	}
1047
1048	for (i = 0; i < htab->buckets_num; i++) {
1049		INIT_HLIST_HEAD(&htab->buckets[i].head);
1050		raw_spin_lock_init(&htab->buckets[i].lock);
1051	}
1052
1053	return &htab->map;
1054free_htab:
1055	kfree(htab);
1056	return ERR_PTR(err);
1057}
1058
1059static void sock_hash_free(struct bpf_map *map)
1060{
1061	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1062	struct bpf_shtab_bucket *bucket;
1063	struct hlist_head unlink_list;
1064	struct bpf_shtab_elem *elem;
1065	struct hlist_node *node;
1066	int i;
1067
1068	/* After the sync no updates or deletes will be in-flight so it
1069	 * is safe to walk map and remove entries without risking a race
1070	 * in EEXIST update case.
1071	 */
1072	synchronize_rcu();
1073	for (i = 0; i < htab->buckets_num; i++) {
1074		bucket = sock_hash_select_bucket(htab, i);
1075
1076		/* We are racing with sock_hash_delete_from_link to
1077		 * enter the spin-lock critical section. Every socket on
1078		 * the list is still linked to sockhash. Since link
1079		 * exists, psock exists and holds a ref to socket. That
1080		 * lets us to grab a socket ref too.
1081		 */
1082		raw_spin_lock_bh(&bucket->lock);
1083		hlist_for_each_entry(elem, &bucket->head, node)
1084			sock_hold(elem->sk);
1085		hlist_move_list(&bucket->head, &unlink_list);
1086		raw_spin_unlock_bh(&bucket->lock);
1087
1088		/* Process removed entries out of atomic context to
1089		 * block for socket lock before deleting the psock's
1090		 * link to sockhash.
1091		 */
1092		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1093			hlist_del(&elem->node);
1094			lock_sock(elem->sk);
1095			rcu_read_lock();
1096			sock_map_unref(elem->sk, elem);
1097			rcu_read_unlock();
1098			release_sock(elem->sk);
1099			sock_put(elem->sk);
1100			sock_hash_free_elem(htab, elem);
1101		}
1102	}
1103
1104	/* wait for psock readers accessing its map link */
1105	synchronize_rcu();
1106
1107	bpf_map_area_free(htab->buckets);
1108	kfree(htab);
1109}
1110
1111static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1112{
1113	struct sock *sk;
1114
1115	if (map->value_size != sizeof(u64))
1116		return ERR_PTR(-ENOSPC);
1117
1118	sk = __sock_hash_lookup_elem(map, key);
1119	if (!sk)
1120		return ERR_PTR(-ENOENT);
1121
1122	sock_gen_cookie(sk);
1123	return &sk->sk_cookie;
1124}
1125
1126static void *sock_hash_lookup(struct bpf_map *map, void *key)
1127{
1128	struct sock *sk;
1129
1130	sk = __sock_hash_lookup_elem(map, key);
1131	if (!sk || !sk_fullsock(sk))
1132		return NULL;
1133	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1134		return NULL;
1135	return sk;
1136}
1137
1138static void sock_hash_release_progs(struct bpf_map *map)
1139{
1140	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1141}
1142
1143BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1144	   struct bpf_map *, map, void *, key, u64, flags)
1145{
1146	WARN_ON_ONCE(!rcu_read_lock_held());
1147
1148	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1149		   sock_map_op_okay(sops)))
1150		return sock_hash_update_common(map, key, sops->sk, flags);
1151	return -EOPNOTSUPP;
1152}
1153
1154const struct bpf_func_proto bpf_sock_hash_update_proto = {
1155	.func		= bpf_sock_hash_update,
1156	.gpl_only	= false,
1157	.pkt_access	= true,
1158	.ret_type	= RET_INTEGER,
1159	.arg1_type	= ARG_PTR_TO_CTX,
1160	.arg2_type	= ARG_CONST_MAP_PTR,
1161	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1162	.arg4_type	= ARG_ANYTHING,
1163};
1164
1165BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1166	   struct bpf_map *, map, void *, key, u64, flags)
1167{
1168	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1169	struct sock *sk;
1170
1171	if (unlikely(flags & ~(BPF_F_INGRESS)))
1172		return SK_DROP;
1173
1174	sk = __sock_hash_lookup_elem(map, key);
1175	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1176		return SK_DROP;
1177
1178	tcb->bpf.flags = flags;
1179	tcb->bpf.sk_redir = sk;
1180	return SK_PASS;
1181}
1182
1183const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1184	.func           = bpf_sk_redirect_hash,
1185	.gpl_only       = false,
1186	.ret_type       = RET_INTEGER,
1187	.arg1_type	= ARG_PTR_TO_CTX,
1188	.arg2_type      = ARG_CONST_MAP_PTR,
1189	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1190	.arg4_type      = ARG_ANYTHING,
1191};
1192
1193BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1194	   struct bpf_map *, map, void *, key, u64, flags)
1195{
1196	struct sock *sk;
1197
1198	if (unlikely(flags & ~(BPF_F_INGRESS)))
1199		return SK_DROP;
1200
1201	sk = __sock_hash_lookup_elem(map, key);
1202	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1203		return SK_DROP;
 
 
1204
1205	msg->flags = flags;
1206	msg->sk_redir = sk;
1207	return SK_PASS;
1208}
1209
1210const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1211	.func           = bpf_msg_redirect_hash,
1212	.gpl_only       = false,
1213	.ret_type       = RET_INTEGER,
1214	.arg1_type	= ARG_PTR_TO_CTX,
1215	.arg2_type      = ARG_CONST_MAP_PTR,
1216	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1217	.arg4_type      = ARG_ANYTHING,
1218};
1219
1220static int sock_hash_map_btf_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1221const struct bpf_map_ops sock_hash_ops = {
 
1222	.map_alloc		= sock_hash_alloc,
1223	.map_free		= sock_hash_free,
1224	.map_get_next_key	= sock_hash_get_next_key,
1225	.map_update_elem	= sock_hash_update_elem,
1226	.map_delete_elem	= sock_hash_delete_elem,
1227	.map_lookup_elem	= sock_hash_lookup,
1228	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1229	.map_release_uref	= sock_hash_release_progs,
1230	.map_check_btf		= map_check_no_btf,
1231	.map_btf_name		= "bpf_shtab",
1232	.map_btf_id		= &sock_hash_map_btf_id,
 
1233};
1234
1235static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1236{
1237	switch (map->map_type) {
1238	case BPF_MAP_TYPE_SOCKMAP:
1239		return &container_of(map, struct bpf_stab, map)->progs;
1240	case BPF_MAP_TYPE_SOCKHASH:
1241		return &container_of(map, struct bpf_shtab, map)->progs;
1242	default:
1243		break;
1244	}
1245
1246	return NULL;
1247}
1248
1249int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1250			 struct bpf_prog *old, u32 which)
1251{
1252	struct sk_psock_progs *progs = sock_map_progs(map);
1253	struct bpf_prog **pprog;
1254
1255	if (!progs)
1256		return -EOPNOTSUPP;
1257
1258	switch (which) {
1259	case BPF_SK_MSG_VERDICT:
1260		pprog = &progs->msg_parser;
1261		break;
 
1262	case BPF_SK_SKB_STREAM_PARSER:
1263		pprog = &progs->skb_parser;
1264		break;
 
1265	case BPF_SK_SKB_STREAM_VERDICT:
1266		pprog = &progs->skb_verdict;
 
 
 
 
 
 
 
1267		break;
1268	default:
1269		return -EOPNOTSUPP;
1270	}
1271
 
 
 
 
 
 
 
 
 
 
 
 
 
1272	if (old)
1273		return psock_replace_prog(pprog, prog, old);
1274
1275	psock_set_prog(pprog, prog);
1276	return 0;
1277}
1278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1279static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1280{
1281	switch (link->map->map_type) {
1282	case BPF_MAP_TYPE_SOCKMAP:
1283		return sock_map_delete_from_link(link->map, sk,
1284						 link->link_raw);
1285	case BPF_MAP_TYPE_SOCKHASH:
1286		return sock_hash_delete_from_link(link->map, sk,
1287						  link->link_raw);
1288	default:
1289		break;
1290	}
1291}
1292
1293static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1294{
1295	struct sk_psock_link *link;
1296
1297	while ((link = sk_psock_link_pop(psock))) {
1298		sock_map_unlink(sk, link);
1299		sk_psock_free_link(link);
1300	}
1301}
1302
1303void sock_map_unhash(struct sock *sk)
1304{
1305	void (*saved_unhash)(struct sock *sk);
1306	struct sk_psock *psock;
1307
1308	rcu_read_lock();
1309	psock = sk_psock(sk);
1310	if (unlikely(!psock)) {
1311		rcu_read_unlock();
1312		if (sk->sk_prot->unhash)
1313			sk->sk_prot->unhash(sk);
1314		return;
 
 
1315	}
 
 
 
 
 
 
1316
1317	saved_unhash = psock->saved_unhash;
1318	sock_map_remove_links(sk, psock);
1319	rcu_read_unlock();
1320	saved_unhash(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321}
 
1322
1323void sock_map_close(struct sock *sk, long timeout)
1324{
1325	void (*saved_close)(struct sock *sk, long timeout);
1326	struct sk_psock *psock;
1327
1328	lock_sock(sk);
1329	rcu_read_lock();
1330	psock = sk_psock(sk);
1331	if (unlikely(!psock)) {
1332		rcu_read_unlock();
1333		release_sock(sk);
1334		return sk->sk_prot->close(sk, timeout);
 
 
 
 
 
 
 
 
1335	}
1336
1337	saved_close = psock->saved_close;
1338	sock_map_remove_links(sk, psock);
1339	rcu_read_unlock();
1340	release_sock(sk);
 
1341	saved_close(sk, timeout);
1342}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/bpf.h>
   5#include <linux/btf_ids.h>
   6#include <linux/filter.h>
   7#include <linux/errno.h>
   8#include <linux/file.h>
   9#include <linux/net.h>
  10#include <linux/workqueue.h>
  11#include <linux/skmsg.h>
  12#include <linux/list.h>
  13#include <linux/jhash.h>
  14#include <linux/sock_diag.h>
  15#include <net/udp.h>
  16
  17struct bpf_stab {
  18	struct bpf_map map;
  19	struct sock **sks;
  20	struct sk_psock_progs progs;
  21	spinlock_t lock;
  22};
  23
  24#define SOCK_CREATE_FLAG_MASK				\
  25	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  26
  27static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
  28				struct bpf_prog *old, u32 which);
  29static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
  30
  31static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  32{
  33	struct bpf_stab *stab;
 
 
  34
 
 
  35	if (attr->max_entries == 0 ||
  36	    attr->key_size    != 4 ||
  37	    (attr->value_size != sizeof(u32) &&
  38	     attr->value_size != sizeof(u64)) ||
  39	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  40		return ERR_PTR(-EINVAL);
  41
  42	stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
  43	if (!stab)
  44		return ERR_PTR(-ENOMEM);
  45
  46	bpf_map_init_from_attr(&stab->map, attr);
  47	spin_lock_init(&stab->lock);
 
 
 
 
 
 
  48
  49	stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
  50				       sizeof(struct sock *),
  51				       stab->map.numa_node);
  52	if (!stab->sks) {
  53		bpf_map_area_free(stab);
  54		return ERR_PTR(-ENOMEM);
  55	}
  56
  57	return &stab->map;
 
  58}
  59
  60int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  61{
  62	u32 ufd = attr->target_fd;
  63	struct bpf_map *map;
  64	struct fd f;
  65	int ret;
  66
  67	if (attr->attach_flags || attr->replace_bpf_fd)
  68		return -EINVAL;
  69
  70	f = fdget(ufd);
  71	map = __bpf_map_get(f);
  72	if (IS_ERR(map))
  73		return PTR_ERR(map);
  74	ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
  75	fdput(f);
  76	return ret;
  77}
  78
  79int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  80{
  81	u32 ufd = attr->target_fd;
  82	struct bpf_prog *prog;
  83	struct bpf_map *map;
  84	struct fd f;
  85	int ret;
  86
  87	if (attr->attach_flags || attr->replace_bpf_fd)
  88		return -EINVAL;
  89
  90	f = fdget(ufd);
  91	map = __bpf_map_get(f);
  92	if (IS_ERR(map))
  93		return PTR_ERR(map);
  94
  95	prog = bpf_prog_get(attr->attach_bpf_fd);
  96	if (IS_ERR(prog)) {
  97		ret = PTR_ERR(prog);
  98		goto put_map;
  99	}
 100
 101	if (prog->type != ptype) {
 102		ret = -EINVAL;
 103		goto put_prog;
 104	}
 105
 106	ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
 107put_prog:
 108	bpf_prog_put(prog);
 109put_map:
 110	fdput(f);
 111	return ret;
 112}
 113
 114static void sock_map_sk_acquire(struct sock *sk)
 115	__acquires(&sk->sk_lock.slock)
 116{
 117	lock_sock(sk);
 
 118	rcu_read_lock();
 119}
 120
 121static void sock_map_sk_release(struct sock *sk)
 122	__releases(&sk->sk_lock.slock)
 123{
 124	rcu_read_unlock();
 
 125	release_sock(sk);
 126}
 127
 128static void sock_map_add_link(struct sk_psock *psock,
 129			      struct sk_psock_link *link,
 130			      struct bpf_map *map, void *link_raw)
 131{
 132	link->link_raw = link_raw;
 133	link->map = map;
 134	spin_lock_bh(&psock->link_lock);
 135	list_add_tail(&link->list, &psock->link);
 136	spin_unlock_bh(&psock->link_lock);
 137}
 138
 139static void sock_map_del_link(struct sock *sk,
 140			      struct sk_psock *psock, void *link_raw)
 141{
 142	bool strp_stop = false, verdict_stop = false;
 143	struct sk_psock_link *link, *tmp;
 
 144
 145	spin_lock_bh(&psock->link_lock);
 146	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 147		if (link->link_raw == link_raw) {
 148			struct bpf_map *map = link->map;
 149			struct sk_psock_progs *progs = sock_map_progs(map);
 150
 151			if (psock->saved_data_ready && progs->stream_parser)
 152				strp_stop = true;
 153			if (psock->saved_data_ready && progs->stream_verdict)
 154				verdict_stop = true;
 155			if (psock->saved_data_ready && progs->skb_verdict)
 156				verdict_stop = true;
 157			list_del(&link->list);
 158			sk_psock_free_link(link);
 159		}
 160	}
 161	spin_unlock_bh(&psock->link_lock);
 162	if (strp_stop || verdict_stop) {
 163		write_lock_bh(&sk->sk_callback_lock);
 164		if (strp_stop)
 165			sk_psock_stop_strp(sk, psock);
 166		if (verdict_stop)
 167			sk_psock_stop_verdict(sk, psock);
 168
 169		if (psock->psock_update_sk_prot)
 170			psock->psock_update_sk_prot(sk, psock, false);
 171		write_unlock_bh(&sk->sk_callback_lock);
 172	}
 173}
 174
 175static void sock_map_unref(struct sock *sk, void *link_raw)
 176{
 177	struct sk_psock *psock = sk_psock(sk);
 178
 179	if (likely(psock)) {
 180		sock_map_del_link(sk, psock, link_raw);
 181		sk_psock_put(sk, psock);
 182	}
 183}
 184
 185static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
 186{
 187	if (!sk->sk_prot->psock_update_sk_prot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 188		return -EINVAL;
 189	psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
 190	return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
 
 
 
 
 
 191}
 192
 193static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
 194{
 195	struct sk_psock *psock;
 196
 197	rcu_read_lock();
 198	psock = sk_psock(sk);
 199	if (psock) {
 200		if (sk->sk_prot->close != sock_map_close) {
 201			psock = ERR_PTR(-EBUSY);
 202			goto out;
 203		}
 204
 205		if (!refcount_inc_not_zero(&psock->refcnt))
 206			psock = ERR_PTR(-EBUSY);
 207	}
 208out:
 209	rcu_read_unlock();
 210	return psock;
 211}
 212
 213static int sock_map_link(struct bpf_map *map, struct sock *sk)
 
 214{
 215	struct sk_psock_progs *progs = sock_map_progs(map);
 216	struct bpf_prog *stream_verdict = NULL;
 217	struct bpf_prog *stream_parser = NULL;
 218	struct bpf_prog *skb_verdict = NULL;
 219	struct bpf_prog *msg_parser = NULL;
 220	struct sk_psock *psock;
 
 221	int ret;
 222
 223	stream_verdict = READ_ONCE(progs->stream_verdict);
 224	if (stream_verdict) {
 225		stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
 226		if (IS_ERR(stream_verdict))
 227			return PTR_ERR(stream_verdict);
 228	}
 229
 230	stream_parser = READ_ONCE(progs->stream_parser);
 231	if (stream_parser) {
 232		stream_parser = bpf_prog_inc_not_zero(stream_parser);
 233		if (IS_ERR(stream_parser)) {
 234			ret = PTR_ERR(stream_parser);
 235			goto out_put_stream_verdict;
 236		}
 237	}
 238
 239	msg_parser = READ_ONCE(progs->msg_parser);
 240	if (msg_parser) {
 241		msg_parser = bpf_prog_inc_not_zero(msg_parser);
 242		if (IS_ERR(msg_parser)) {
 243			ret = PTR_ERR(msg_parser);
 244			goto out_put_stream_parser;
 245		}
 246	}
 247
 248	skb_verdict = READ_ONCE(progs->skb_verdict);
 249	if (skb_verdict) {
 250		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
 251		if (IS_ERR(skb_verdict)) {
 252			ret = PTR_ERR(skb_verdict);
 253			goto out_put_msg_parser;
 254		}
 255	}
 256
 257	psock = sock_map_psock_get_checked(sk);
 258	if (IS_ERR(psock)) {
 259		ret = PTR_ERR(psock);
 260		goto out_progs;
 261	}
 262
 263	if (psock) {
 264		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
 265		    (stream_parser  && READ_ONCE(psock->progs.stream_parser)) ||
 266		    (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 267		    (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
 268		    (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 269		    (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
 270			sk_psock_put(sk, psock);
 271			ret = -EBUSY;
 272			goto out_progs;
 273		}
 274	} else {
 275		psock = sk_psock_init(sk, map->numa_node);
 276		if (IS_ERR(psock)) {
 277			ret = PTR_ERR(psock);
 278			goto out_progs;
 279		}
 280	}
 281
 282	if (msg_parser)
 283		psock_set_prog(&psock->progs.msg_parser, msg_parser);
 284	if (stream_parser)
 285		psock_set_prog(&psock->progs.stream_parser, stream_parser);
 286	if (stream_verdict)
 287		psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
 288	if (skb_verdict)
 289		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 290
 291	/* msg_* and stream_* programs references tracked in psock after this
 292	 * point. Reference dec and cleanup will occur through psock destructor
 293	 */
 294	ret = sock_map_init_proto(sk, psock);
 295	if (ret < 0) {
 296		sk_psock_put(sk, psock);
 297		goto out;
 298	}
 299
 300	write_lock_bh(&sk->sk_callback_lock);
 301	if (stream_parser && stream_verdict && !psock->saved_data_ready) {
 302		ret = sk_psock_init_strp(sk, psock);
 303		if (ret) {
 304			write_unlock_bh(&sk->sk_callback_lock);
 305			sk_psock_put(sk, psock);
 306			goto out;
 307		}
 
 
 308		sk_psock_start_strp(sk, psock);
 309	} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
 310		sk_psock_start_verdict(sk,psock);
 311	} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
 312		sk_psock_start_verdict(sk, psock);
 313	}
 314	write_unlock_bh(&sk->sk_callback_lock);
 315	return 0;
 
 
 316out_progs:
 317	if (skb_verdict)
 318		bpf_prog_put(skb_verdict);
 319out_put_msg_parser:
 320	if (msg_parser)
 321		bpf_prog_put(msg_parser);
 322out_put_stream_parser:
 323	if (stream_parser)
 324		bpf_prog_put(stream_parser);
 325out_put_stream_verdict:
 326	if (stream_verdict)
 327		bpf_prog_put(stream_verdict);
 328out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329	return ret;
 330}
 331
 332static void sock_map_free(struct bpf_map *map)
 333{
 334	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 335	int i;
 336
 337	/* After the sync no updates or deletes will be in-flight so it
 338	 * is safe to walk map and remove entries without risking a race
 339	 * in EEXIST update case.
 340	 */
 341	synchronize_rcu();
 342	for (i = 0; i < stab->map.max_entries; i++) {
 343		struct sock **psk = &stab->sks[i];
 344		struct sock *sk;
 345
 346		sk = xchg(psk, NULL);
 347		if (sk) {
 348			sock_hold(sk);
 349			lock_sock(sk);
 350			rcu_read_lock();
 351			sock_map_unref(sk, psk);
 352			rcu_read_unlock();
 353			release_sock(sk);
 354			sock_put(sk);
 355		}
 356	}
 357
 358	/* wait for psock readers accessing its map link */
 359	synchronize_rcu();
 360
 361	bpf_map_area_free(stab->sks);
 362	bpf_map_area_free(stab);
 363}
 364
 365static void sock_map_release_progs(struct bpf_map *map)
 366{
 367	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
 368}
 369
 370static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 371{
 372	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 373
 374	WARN_ON_ONCE(!rcu_read_lock_held());
 375
 376	if (unlikely(key >= map->max_entries))
 377		return NULL;
 378	return READ_ONCE(stab->sks[key]);
 379}
 380
 381static void *sock_map_lookup(struct bpf_map *map, void *key)
 382{
 383	struct sock *sk;
 384
 385	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 386	if (!sk)
 387		return NULL;
 388	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
 389		return NULL;
 390	return sk;
 391}
 392
 393static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
 394{
 395	struct sock *sk;
 396
 397	if (map->value_size != sizeof(u64))
 398		return ERR_PTR(-ENOSPC);
 399
 400	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 401	if (!sk)
 402		return ERR_PTR(-ENOENT);
 403
 404	__sock_gen_cookie(sk);
 405	return &sk->sk_cookie;
 406}
 407
 408static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
 409			     struct sock **psk)
 410{
 411	struct sock *sk;
 412	int err = 0;
 413
 414	if (irqs_disabled())
 415		return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
 416
 417	spin_lock_bh(&stab->lock);
 418	sk = *psk;
 419	if (!sk_test || sk_test == sk)
 420		sk = xchg(psk, NULL);
 421
 422	if (likely(sk))
 423		sock_map_unref(sk, psk);
 424	else
 425		err = -EINVAL;
 426
 427	spin_unlock_bh(&stab->lock);
 428	return err;
 429}
 430
 431static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
 432				      void *link_raw)
 433{
 434	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 435
 436	__sock_map_delete(stab, sk, link_raw);
 437}
 438
 439static long sock_map_delete_elem(struct bpf_map *map, void *key)
 440{
 441	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 442	u32 i = *(u32 *)key;
 443	struct sock **psk;
 444
 445	if (unlikely(i >= map->max_entries))
 446		return -EINVAL;
 447
 448	psk = &stab->sks[i];
 449	return __sock_map_delete(stab, NULL, psk);
 450}
 451
 452static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
 453{
 454	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 455	u32 i = key ? *(u32 *)key : U32_MAX;
 456	u32 *key_next = next;
 457
 458	if (i == stab->map.max_entries - 1)
 459		return -ENOENT;
 460	if (i >= stab->map.max_entries)
 461		*key_next = 0;
 462	else
 463		*key_next = i + 1;
 464	return 0;
 465}
 466
 
 
 467static int sock_map_update_common(struct bpf_map *map, u32 idx,
 468				  struct sock *sk, u64 flags)
 469{
 470	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 471	struct sk_psock_link *link;
 472	struct sk_psock *psock;
 473	struct sock *osk;
 474	int ret;
 475
 476	WARN_ON_ONCE(!rcu_read_lock_held());
 477	if (unlikely(flags > BPF_EXIST))
 478		return -EINVAL;
 479	if (unlikely(idx >= map->max_entries))
 480		return -E2BIG;
 
 
 481
 482	link = sk_psock_init_link();
 483	if (!link)
 484		return -ENOMEM;
 485
 486	ret = sock_map_link(map, sk);
 
 
 
 
 
 
 
 487	if (ret < 0)
 488		goto out_free;
 489
 490	psock = sk_psock(sk);
 491	WARN_ON_ONCE(!psock);
 492
 493	spin_lock_bh(&stab->lock);
 494	osk = stab->sks[idx];
 495	if (osk && flags == BPF_NOEXIST) {
 496		ret = -EEXIST;
 497		goto out_unlock;
 498	} else if (!osk && flags == BPF_EXIST) {
 499		ret = -ENOENT;
 500		goto out_unlock;
 501	}
 502
 503	sock_map_add_link(psock, link, map, &stab->sks[idx]);
 504	stab->sks[idx] = sk;
 505	if (osk)
 506		sock_map_unref(osk, &stab->sks[idx]);
 507	spin_unlock_bh(&stab->lock);
 508	return 0;
 509out_unlock:
 510	spin_unlock_bh(&stab->lock);
 511	if (psock)
 512		sk_psock_put(sk, psock);
 513out_free:
 514	sk_psock_free_link(link);
 515	return ret;
 516}
 517
 518static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
 519{
 520	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
 521	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
 522	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
 523}
 524
 
 
 
 
 
 
 
 
 
 
 
 
 525static bool sock_map_redirect_allowed(const struct sock *sk)
 526{
 527	if (sk_is_tcp(sk))
 528		return sk->sk_state != TCP_LISTEN;
 529	else
 530		return sk->sk_state == TCP_ESTABLISHED;
 531}
 532
 533static bool sock_map_sk_is_suitable(const struct sock *sk)
 534{
 535	return !!sk->sk_prot->psock_update_sk_prot;
 536}
 537
 538static bool sock_map_sk_state_allowed(const struct sock *sk)
 539{
 540	if (sk_is_tcp(sk))
 541		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
 542	if (sk_is_stream_unix(sk))
 543		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
 544	return true;
 
 545}
 546
 547static int sock_hash_update_common(struct bpf_map *map, void *key,
 548				   struct sock *sk, u64 flags);
 549
 550int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
 551			     u64 flags)
 552{
 
 553	struct socket *sock;
 554	struct sock *sk;
 555	int ret;
 556	u64 ufd;
 557
 558	if (map->value_size == sizeof(u64))
 559		ufd = *(u64 *)value;
 560	else
 561		ufd = *(u32 *)value;
 562	if (ufd > S32_MAX)
 563		return -EINVAL;
 564
 565	sock = sockfd_lookup(ufd, &ret);
 566	if (!sock)
 567		return ret;
 568	sk = sock->sk;
 569	if (!sk) {
 570		ret = -EINVAL;
 571		goto out;
 572	}
 573	if (!sock_map_sk_is_suitable(sk)) {
 574		ret = -EOPNOTSUPP;
 575		goto out;
 576	}
 577
 578	sock_map_sk_acquire(sk);
 579	if (!sock_map_sk_state_allowed(sk))
 580		ret = -EOPNOTSUPP;
 581	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 582		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 583	else
 584		ret = sock_hash_update_common(map, key, sk, flags);
 585	sock_map_sk_release(sk);
 586out:
 587	sockfd_put(sock);
 588	return ret;
 589}
 590
 591static long sock_map_update_elem(struct bpf_map *map, void *key,
 592				 void *value, u64 flags)
 593{
 594	struct sock *sk = (struct sock *)value;
 595	int ret;
 596
 597	if (unlikely(!sk || !sk_fullsock(sk)))
 598		return -EINVAL;
 599
 600	if (!sock_map_sk_is_suitable(sk))
 601		return -EOPNOTSUPP;
 602
 603	local_bh_disable();
 604	bh_lock_sock(sk);
 605	if (!sock_map_sk_state_allowed(sk))
 606		ret = -EOPNOTSUPP;
 607	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 608		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 609	else
 610		ret = sock_hash_update_common(map, key, sk, flags);
 611	bh_unlock_sock(sk);
 612	local_bh_enable();
 613	return ret;
 614}
 615
 616BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
 617	   struct bpf_map *, map, void *, key, u64, flags)
 618{
 619	WARN_ON_ONCE(!rcu_read_lock_held());
 620
 621	if (likely(sock_map_sk_is_suitable(sops->sk) &&
 622		   sock_map_op_okay(sops)))
 623		return sock_map_update_common(map, *(u32 *)key, sops->sk,
 624					      flags);
 625	return -EOPNOTSUPP;
 626}
 627
 628const struct bpf_func_proto bpf_sock_map_update_proto = {
 629	.func		= bpf_sock_map_update,
 630	.gpl_only	= false,
 631	.pkt_access	= true,
 632	.ret_type	= RET_INTEGER,
 633	.arg1_type	= ARG_PTR_TO_CTX,
 634	.arg2_type	= ARG_CONST_MAP_PTR,
 635	.arg3_type	= ARG_PTR_TO_MAP_KEY,
 636	.arg4_type	= ARG_ANYTHING,
 637};
 638
 639BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
 640	   struct bpf_map *, map, u32, key, u64, flags)
 641{
 
 642	struct sock *sk;
 643
 644	if (unlikely(flags & ~(BPF_F_INGRESS)))
 645		return SK_DROP;
 646
 647	sk = __sock_map_lookup_elem(map, key);
 648	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 649		return SK_DROP;
 650
 651	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
 
 652	return SK_PASS;
 653}
 654
 655const struct bpf_func_proto bpf_sk_redirect_map_proto = {
 656	.func           = bpf_sk_redirect_map,
 657	.gpl_only       = false,
 658	.ret_type       = RET_INTEGER,
 659	.arg1_type	= ARG_PTR_TO_CTX,
 660	.arg2_type      = ARG_CONST_MAP_PTR,
 661	.arg3_type      = ARG_ANYTHING,
 662	.arg4_type      = ARG_ANYTHING,
 663};
 664
 665BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
 666	   struct bpf_map *, map, u32, key, u64, flags)
 667{
 668	struct sock *sk;
 669
 670	if (unlikely(flags & ~(BPF_F_INGRESS)))
 671		return SK_DROP;
 672
 673	sk = __sock_map_lookup_elem(map, key);
 674	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 675		return SK_DROP;
 676	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
 677		return SK_DROP;
 678
 679	msg->flags = flags;
 680	msg->sk_redir = sk;
 681	return SK_PASS;
 682}
 683
 684const struct bpf_func_proto bpf_msg_redirect_map_proto = {
 685	.func           = bpf_msg_redirect_map,
 686	.gpl_only       = false,
 687	.ret_type       = RET_INTEGER,
 688	.arg1_type	= ARG_PTR_TO_CTX,
 689	.arg2_type      = ARG_CONST_MAP_PTR,
 690	.arg3_type      = ARG_ANYTHING,
 691	.arg4_type      = ARG_ANYTHING,
 692};
 693
 694struct sock_map_seq_info {
 695	struct bpf_map *map;
 696	struct sock *sk;
 697	u32 index;
 698};
 699
 700struct bpf_iter__sockmap {
 701	__bpf_md_ptr(struct bpf_iter_meta *, meta);
 702	__bpf_md_ptr(struct bpf_map *, map);
 703	__bpf_md_ptr(void *, key);
 704	__bpf_md_ptr(struct sock *, sk);
 705};
 706
 707DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
 708		     struct bpf_map *map, void *key,
 709		     struct sock *sk)
 710
 711static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
 712{
 713	if (unlikely(info->index >= info->map->max_entries))
 714		return NULL;
 715
 716	info->sk = __sock_map_lookup_elem(info->map, info->index);
 717
 718	/* can't return sk directly, since that might be NULL */
 719	return info;
 720}
 721
 722static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
 723	__acquires(rcu)
 724{
 725	struct sock_map_seq_info *info = seq->private;
 726
 727	if (*pos == 0)
 728		++*pos;
 729
 730	/* pairs with sock_map_seq_stop */
 731	rcu_read_lock();
 732	return sock_map_seq_lookup_elem(info);
 733}
 734
 735static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 736	__must_hold(rcu)
 737{
 738	struct sock_map_seq_info *info = seq->private;
 739
 740	++*pos;
 741	++info->index;
 742
 743	return sock_map_seq_lookup_elem(info);
 744}
 745
 746static int sock_map_seq_show(struct seq_file *seq, void *v)
 747	__must_hold(rcu)
 748{
 749	struct sock_map_seq_info *info = seq->private;
 750	struct bpf_iter__sockmap ctx = {};
 751	struct bpf_iter_meta meta;
 752	struct bpf_prog *prog;
 753
 754	meta.seq = seq;
 755	prog = bpf_iter_get_info(&meta, !v);
 756	if (!prog)
 757		return 0;
 758
 759	ctx.meta = &meta;
 760	ctx.map = info->map;
 761	if (v) {
 762		ctx.key = &info->index;
 763		ctx.sk = info->sk;
 764	}
 765
 766	return bpf_iter_run_prog(prog, &ctx);
 767}
 768
 769static void sock_map_seq_stop(struct seq_file *seq, void *v)
 770	__releases(rcu)
 771{
 772	if (!v)
 773		(void)sock_map_seq_show(seq, NULL);
 774
 775	/* pairs with sock_map_seq_start */
 776	rcu_read_unlock();
 777}
 778
 779static const struct seq_operations sock_map_seq_ops = {
 780	.start	= sock_map_seq_start,
 781	.next	= sock_map_seq_next,
 782	.stop	= sock_map_seq_stop,
 783	.show	= sock_map_seq_show,
 784};
 785
 786static int sock_map_init_seq_private(void *priv_data,
 787				     struct bpf_iter_aux_info *aux)
 788{
 789	struct sock_map_seq_info *info = priv_data;
 790
 791	bpf_map_inc_with_uref(aux->map);
 792	info->map = aux->map;
 793	return 0;
 794}
 795
 796static void sock_map_fini_seq_private(void *priv_data)
 797{
 798	struct sock_map_seq_info *info = priv_data;
 799
 800	bpf_map_put_with_uref(info->map);
 801}
 802
 803static u64 sock_map_mem_usage(const struct bpf_map *map)
 804{
 805	u64 usage = sizeof(struct bpf_stab);
 806
 807	usage += (u64)map->max_entries * sizeof(struct sock *);
 808	return usage;
 809}
 810
 811static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
 812	.seq_ops		= &sock_map_seq_ops,
 813	.init_seq_private	= sock_map_init_seq_private,
 814	.fini_seq_private	= sock_map_fini_seq_private,
 815	.seq_priv_size		= sizeof(struct sock_map_seq_info),
 816};
 817
 818BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
 819const struct bpf_map_ops sock_map_ops = {
 820	.map_meta_equal		= bpf_map_meta_equal,
 821	.map_alloc		= sock_map_alloc,
 822	.map_free		= sock_map_free,
 823	.map_get_next_key	= sock_map_get_next_key,
 824	.map_lookup_elem_sys_only = sock_map_lookup_sys,
 825	.map_update_elem	= sock_map_update_elem,
 826	.map_delete_elem	= sock_map_delete_elem,
 827	.map_lookup_elem	= sock_map_lookup,
 828	.map_release_uref	= sock_map_release_progs,
 829	.map_check_btf		= map_check_no_btf,
 830	.map_mem_usage		= sock_map_mem_usage,
 831	.map_btf_id		= &sock_map_btf_ids[0],
 832	.iter_seq_info		= &sock_map_iter_seq_info,
 833};
 834
 835struct bpf_shtab_elem {
 836	struct rcu_head rcu;
 837	u32 hash;
 838	struct sock *sk;
 839	struct hlist_node node;
 840	u8 key[];
 841};
 842
 843struct bpf_shtab_bucket {
 844	struct hlist_head head;
 845	spinlock_t lock;
 846};
 847
 848struct bpf_shtab {
 849	struct bpf_map map;
 850	struct bpf_shtab_bucket *buckets;
 851	u32 buckets_num;
 852	u32 elem_size;
 853	struct sk_psock_progs progs;
 854	atomic_t count;
 855};
 856
 857static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
 858{
 859	return jhash(key, len, 0);
 860}
 861
 862static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
 863							u32 hash)
 864{
 865	return &htab->buckets[hash & (htab->buckets_num - 1)];
 866}
 867
 868static struct bpf_shtab_elem *
 869sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 870			  u32 key_size)
 871{
 872	struct bpf_shtab_elem *elem;
 873
 874	hlist_for_each_entry_rcu(elem, head, node) {
 875		if (elem->hash == hash &&
 876		    !memcmp(&elem->key, key, key_size))
 877			return elem;
 878	}
 879
 880	return NULL;
 881}
 882
 883static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 884{
 885	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 886	u32 key_size = map->key_size, hash;
 887	struct bpf_shtab_bucket *bucket;
 888	struct bpf_shtab_elem *elem;
 889
 890	WARN_ON_ONCE(!rcu_read_lock_held());
 891
 892	hash = sock_hash_bucket_hash(key, key_size);
 893	bucket = sock_hash_select_bucket(htab, hash);
 894	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 895
 896	return elem ? elem->sk : NULL;
 897}
 898
 899static void sock_hash_free_elem(struct bpf_shtab *htab,
 900				struct bpf_shtab_elem *elem)
 901{
 902	atomic_dec(&htab->count);
 903	kfree_rcu(elem, rcu);
 904}
 905
 906static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 907				       void *link_raw)
 908{
 909	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 910	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
 911	struct bpf_shtab_bucket *bucket;
 912
 913	WARN_ON_ONCE(!rcu_read_lock_held());
 914	bucket = sock_hash_select_bucket(htab, elem->hash);
 915
 916	/* elem may be deleted in parallel from the map, but access here
 917	 * is okay since it's going away only after RCU grace period.
 918	 * However, we need to check whether it's still present.
 919	 */
 920	spin_lock_bh(&bucket->lock);
 921	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
 922					       elem->key, map->key_size);
 923	if (elem_probe && elem_probe == elem) {
 924		hlist_del_rcu(&elem->node);
 925		sock_map_unref(elem->sk, elem);
 926		sock_hash_free_elem(htab, elem);
 927	}
 928	spin_unlock_bh(&bucket->lock);
 929}
 930
 931static long sock_hash_delete_elem(struct bpf_map *map, void *key)
 932{
 933	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 934	u32 hash, key_size = map->key_size;
 935	struct bpf_shtab_bucket *bucket;
 936	struct bpf_shtab_elem *elem;
 937	int ret = -ENOENT;
 938
 939	if (irqs_disabled())
 940		return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
 941
 942	hash = sock_hash_bucket_hash(key, key_size);
 943	bucket = sock_hash_select_bucket(htab, hash);
 944
 945	spin_lock_bh(&bucket->lock);
 946	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 947	if (elem) {
 948		hlist_del_rcu(&elem->node);
 949		sock_map_unref(elem->sk, elem);
 950		sock_hash_free_elem(htab, elem);
 951		ret = 0;
 952	}
 953	spin_unlock_bh(&bucket->lock);
 954	return ret;
 955}
 956
 957static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
 958						   void *key, u32 key_size,
 959						   u32 hash, struct sock *sk,
 960						   struct bpf_shtab_elem *old)
 961{
 962	struct bpf_shtab_elem *new;
 963
 964	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 965		if (!old) {
 966			atomic_dec(&htab->count);
 967			return ERR_PTR(-E2BIG);
 968		}
 969	}
 970
 971	new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
 972				   GFP_ATOMIC | __GFP_NOWARN,
 973				   htab->map.numa_node);
 974	if (!new) {
 975		atomic_dec(&htab->count);
 976		return ERR_PTR(-ENOMEM);
 977	}
 978	memcpy(new->key, key, key_size);
 979	new->sk = sk;
 980	new->hash = hash;
 981	return new;
 982}
 983
 984static int sock_hash_update_common(struct bpf_map *map, void *key,
 985				   struct sock *sk, u64 flags)
 986{
 987	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 988	u32 key_size = map->key_size, hash;
 989	struct bpf_shtab_elem *elem, *elem_new;
 990	struct bpf_shtab_bucket *bucket;
 991	struct sk_psock_link *link;
 992	struct sk_psock *psock;
 993	int ret;
 994
 995	WARN_ON_ONCE(!rcu_read_lock_held());
 996	if (unlikely(flags > BPF_EXIST))
 997		return -EINVAL;
 
 
 998
 999	link = sk_psock_init_link();
1000	if (!link)
1001		return -ENOMEM;
1002
1003	ret = sock_map_link(map, sk);
 
 
 
 
 
 
 
1004	if (ret < 0)
1005		goto out_free;
1006
1007	psock = sk_psock(sk);
1008	WARN_ON_ONCE(!psock);
1009
1010	hash = sock_hash_bucket_hash(key, key_size);
1011	bucket = sock_hash_select_bucket(htab, hash);
1012
1013	spin_lock_bh(&bucket->lock);
1014	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1015	if (elem && flags == BPF_NOEXIST) {
1016		ret = -EEXIST;
1017		goto out_unlock;
1018	} else if (!elem && flags == BPF_EXIST) {
1019		ret = -ENOENT;
1020		goto out_unlock;
1021	}
1022
1023	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1024	if (IS_ERR(elem_new)) {
1025		ret = PTR_ERR(elem_new);
1026		goto out_unlock;
1027	}
1028
1029	sock_map_add_link(psock, link, map, elem_new);
1030	/* Add new element to the head of the list, so that
1031	 * concurrent search will find it before old elem.
1032	 */
1033	hlist_add_head_rcu(&elem_new->node, &bucket->head);
1034	if (elem) {
1035		hlist_del_rcu(&elem->node);
1036		sock_map_unref(elem->sk, elem);
1037		sock_hash_free_elem(htab, elem);
1038	}
1039	spin_unlock_bh(&bucket->lock);
1040	return 0;
1041out_unlock:
1042	spin_unlock_bh(&bucket->lock);
1043	sk_psock_put(sk, psock);
1044out_free:
1045	sk_psock_free_link(link);
1046	return ret;
1047}
1048
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1049static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1050				  void *key_next)
1051{
1052	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1053	struct bpf_shtab_elem *elem, *elem_next;
1054	u32 hash, key_size = map->key_size;
1055	struct hlist_head *head;
1056	int i = 0;
1057
1058	if (!key)
1059		goto find_first_elem;
1060	hash = sock_hash_bucket_hash(key, key_size);
1061	head = &sock_hash_select_bucket(htab, hash)->head;
1062	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1063	if (!elem)
1064		goto find_first_elem;
1065
1066	elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1067				     struct bpf_shtab_elem, node);
1068	if (elem_next) {
1069		memcpy(key_next, elem_next->key, key_size);
1070		return 0;
1071	}
1072
1073	i = hash & (htab->buckets_num - 1);
1074	i++;
1075find_first_elem:
1076	for (; i < htab->buckets_num; i++) {
1077		head = &sock_hash_select_bucket(htab, i)->head;
1078		elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1079					     struct bpf_shtab_elem, node);
1080		if (elem_next) {
1081			memcpy(key_next, elem_next->key, key_size);
1082			return 0;
1083		}
1084	}
1085
1086	return -ENOENT;
1087}
1088
1089static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1090{
1091	struct bpf_shtab *htab;
1092	int i, err;
 
1093
 
 
1094	if (attr->max_entries == 0 ||
1095	    attr->key_size    == 0 ||
1096	    (attr->value_size != sizeof(u32) &&
1097	     attr->value_size != sizeof(u64)) ||
1098	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1099		return ERR_PTR(-EINVAL);
1100	if (attr->key_size > MAX_BPF_STACK)
1101		return ERR_PTR(-E2BIG);
1102
1103	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1104	if (!htab)
1105		return ERR_PTR(-ENOMEM);
1106
1107	bpf_map_init_from_attr(&htab->map, attr);
1108
1109	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1110	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1111			  round_up(htab->map.key_size, 8);
1112	if (htab->buckets_num == 0 ||
1113	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1114		err = -EINVAL;
1115		goto free_htab;
1116	}
1117
 
 
 
 
 
 
 
 
 
 
1118	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1119					   sizeof(struct bpf_shtab_bucket),
1120					   htab->map.numa_node);
1121	if (!htab->buckets) {
 
1122		err = -ENOMEM;
1123		goto free_htab;
1124	}
1125
1126	for (i = 0; i < htab->buckets_num; i++) {
1127		INIT_HLIST_HEAD(&htab->buckets[i].head);
1128		spin_lock_init(&htab->buckets[i].lock);
1129	}
1130
1131	return &htab->map;
1132free_htab:
1133	bpf_map_area_free(htab);
1134	return ERR_PTR(err);
1135}
1136
1137static void sock_hash_free(struct bpf_map *map)
1138{
1139	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1140	struct bpf_shtab_bucket *bucket;
1141	struct hlist_head unlink_list;
1142	struct bpf_shtab_elem *elem;
1143	struct hlist_node *node;
1144	int i;
1145
1146	/* After the sync no updates or deletes will be in-flight so it
1147	 * is safe to walk map and remove entries without risking a race
1148	 * in EEXIST update case.
1149	 */
1150	synchronize_rcu();
1151	for (i = 0; i < htab->buckets_num; i++) {
1152		bucket = sock_hash_select_bucket(htab, i);
1153
1154		/* We are racing with sock_hash_delete_from_link to
1155		 * enter the spin-lock critical section. Every socket on
1156		 * the list is still linked to sockhash. Since link
1157		 * exists, psock exists and holds a ref to socket. That
1158		 * lets us to grab a socket ref too.
1159		 */
1160		spin_lock_bh(&bucket->lock);
1161		hlist_for_each_entry(elem, &bucket->head, node)
1162			sock_hold(elem->sk);
1163		hlist_move_list(&bucket->head, &unlink_list);
1164		spin_unlock_bh(&bucket->lock);
1165
1166		/* Process removed entries out of atomic context to
1167		 * block for socket lock before deleting the psock's
1168		 * link to sockhash.
1169		 */
1170		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1171			hlist_del(&elem->node);
1172			lock_sock(elem->sk);
1173			rcu_read_lock();
1174			sock_map_unref(elem->sk, elem);
1175			rcu_read_unlock();
1176			release_sock(elem->sk);
1177			sock_put(elem->sk);
1178			sock_hash_free_elem(htab, elem);
1179		}
1180	}
1181
1182	/* wait for psock readers accessing its map link */
1183	synchronize_rcu();
1184
1185	bpf_map_area_free(htab->buckets);
1186	bpf_map_area_free(htab);
1187}
1188
1189static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1190{
1191	struct sock *sk;
1192
1193	if (map->value_size != sizeof(u64))
1194		return ERR_PTR(-ENOSPC);
1195
1196	sk = __sock_hash_lookup_elem(map, key);
1197	if (!sk)
1198		return ERR_PTR(-ENOENT);
1199
1200	__sock_gen_cookie(sk);
1201	return &sk->sk_cookie;
1202}
1203
1204static void *sock_hash_lookup(struct bpf_map *map, void *key)
1205{
1206	struct sock *sk;
1207
1208	sk = __sock_hash_lookup_elem(map, key);
1209	if (!sk)
1210		return NULL;
1211	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1212		return NULL;
1213	return sk;
1214}
1215
1216static void sock_hash_release_progs(struct bpf_map *map)
1217{
1218	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1219}
1220
1221BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1222	   struct bpf_map *, map, void *, key, u64, flags)
1223{
1224	WARN_ON_ONCE(!rcu_read_lock_held());
1225
1226	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1227		   sock_map_op_okay(sops)))
1228		return sock_hash_update_common(map, key, sops->sk, flags);
1229	return -EOPNOTSUPP;
1230}
1231
1232const struct bpf_func_proto bpf_sock_hash_update_proto = {
1233	.func		= bpf_sock_hash_update,
1234	.gpl_only	= false,
1235	.pkt_access	= true,
1236	.ret_type	= RET_INTEGER,
1237	.arg1_type	= ARG_PTR_TO_CTX,
1238	.arg2_type	= ARG_CONST_MAP_PTR,
1239	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1240	.arg4_type	= ARG_ANYTHING,
1241};
1242
1243BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1244	   struct bpf_map *, map, void *, key, u64, flags)
1245{
 
1246	struct sock *sk;
1247
1248	if (unlikely(flags & ~(BPF_F_INGRESS)))
1249		return SK_DROP;
1250
1251	sk = __sock_hash_lookup_elem(map, key);
1252	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1253		return SK_DROP;
1254
1255	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
 
1256	return SK_PASS;
1257}
1258
1259const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1260	.func           = bpf_sk_redirect_hash,
1261	.gpl_only       = false,
1262	.ret_type       = RET_INTEGER,
1263	.arg1_type	= ARG_PTR_TO_CTX,
1264	.arg2_type      = ARG_CONST_MAP_PTR,
1265	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1266	.arg4_type      = ARG_ANYTHING,
1267};
1268
1269BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1270	   struct bpf_map *, map, void *, key, u64, flags)
1271{
1272	struct sock *sk;
1273
1274	if (unlikely(flags & ~(BPF_F_INGRESS)))
1275		return SK_DROP;
1276
1277	sk = __sock_hash_lookup_elem(map, key);
1278	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1279		return SK_DROP;
1280	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
1281		return SK_DROP;
1282
1283	msg->flags = flags;
1284	msg->sk_redir = sk;
1285	return SK_PASS;
1286}
1287
1288const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1289	.func           = bpf_msg_redirect_hash,
1290	.gpl_only       = false,
1291	.ret_type       = RET_INTEGER,
1292	.arg1_type	= ARG_PTR_TO_CTX,
1293	.arg2_type      = ARG_CONST_MAP_PTR,
1294	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1295	.arg4_type      = ARG_ANYTHING,
1296};
1297
1298struct sock_hash_seq_info {
1299	struct bpf_map *map;
1300	struct bpf_shtab *htab;
1301	u32 bucket_id;
1302};
1303
1304static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1305				     struct bpf_shtab_elem *prev_elem)
1306{
1307	const struct bpf_shtab *htab = info->htab;
1308	struct bpf_shtab_bucket *bucket;
1309	struct bpf_shtab_elem *elem;
1310	struct hlist_node *node;
1311
1312	/* try to find next elem in the same bucket */
1313	if (prev_elem) {
1314		node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1315		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1316		if (elem)
1317			return elem;
1318
1319		/* no more elements, continue in the next bucket */
1320		info->bucket_id++;
1321	}
1322
1323	for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1324		bucket = &htab->buckets[info->bucket_id];
1325		node = rcu_dereference(hlist_first_rcu(&bucket->head));
1326		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1327		if (elem)
1328			return elem;
1329	}
1330
1331	return NULL;
1332}
1333
1334static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1335	__acquires(rcu)
1336{
1337	struct sock_hash_seq_info *info = seq->private;
1338
1339	if (*pos == 0)
1340		++*pos;
1341
1342	/* pairs with sock_hash_seq_stop */
1343	rcu_read_lock();
1344	return sock_hash_seq_find_next(info, NULL);
1345}
1346
1347static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1348	__must_hold(rcu)
1349{
1350	struct sock_hash_seq_info *info = seq->private;
1351
1352	++*pos;
1353	return sock_hash_seq_find_next(info, v);
1354}
1355
1356static int sock_hash_seq_show(struct seq_file *seq, void *v)
1357	__must_hold(rcu)
1358{
1359	struct sock_hash_seq_info *info = seq->private;
1360	struct bpf_iter__sockmap ctx = {};
1361	struct bpf_shtab_elem *elem = v;
1362	struct bpf_iter_meta meta;
1363	struct bpf_prog *prog;
1364
1365	meta.seq = seq;
1366	prog = bpf_iter_get_info(&meta, !elem);
1367	if (!prog)
1368		return 0;
1369
1370	ctx.meta = &meta;
1371	ctx.map = info->map;
1372	if (elem) {
1373		ctx.key = elem->key;
1374		ctx.sk = elem->sk;
1375	}
1376
1377	return bpf_iter_run_prog(prog, &ctx);
1378}
1379
1380static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1381	__releases(rcu)
1382{
1383	if (!v)
1384		(void)sock_hash_seq_show(seq, NULL);
1385
1386	/* pairs with sock_hash_seq_start */
1387	rcu_read_unlock();
1388}
1389
1390static const struct seq_operations sock_hash_seq_ops = {
1391	.start	= sock_hash_seq_start,
1392	.next	= sock_hash_seq_next,
1393	.stop	= sock_hash_seq_stop,
1394	.show	= sock_hash_seq_show,
1395};
1396
1397static int sock_hash_init_seq_private(void *priv_data,
1398				      struct bpf_iter_aux_info *aux)
1399{
1400	struct sock_hash_seq_info *info = priv_data;
1401
1402	bpf_map_inc_with_uref(aux->map);
1403	info->map = aux->map;
1404	info->htab = container_of(aux->map, struct bpf_shtab, map);
1405	return 0;
1406}
1407
1408static void sock_hash_fini_seq_private(void *priv_data)
1409{
1410	struct sock_hash_seq_info *info = priv_data;
1411
1412	bpf_map_put_with_uref(info->map);
1413}
1414
1415static u64 sock_hash_mem_usage(const struct bpf_map *map)
1416{
1417	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1418	u64 usage = sizeof(*htab);
1419
1420	usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
1421	usage += atomic_read(&htab->count) * (u64)htab->elem_size;
1422	return usage;
1423}
1424
1425static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1426	.seq_ops		= &sock_hash_seq_ops,
1427	.init_seq_private	= sock_hash_init_seq_private,
1428	.fini_seq_private	= sock_hash_fini_seq_private,
1429	.seq_priv_size		= sizeof(struct sock_hash_seq_info),
1430};
1431
1432BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1433const struct bpf_map_ops sock_hash_ops = {
1434	.map_meta_equal		= bpf_map_meta_equal,
1435	.map_alloc		= sock_hash_alloc,
1436	.map_free		= sock_hash_free,
1437	.map_get_next_key	= sock_hash_get_next_key,
1438	.map_update_elem	= sock_map_update_elem,
1439	.map_delete_elem	= sock_hash_delete_elem,
1440	.map_lookup_elem	= sock_hash_lookup,
1441	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1442	.map_release_uref	= sock_hash_release_progs,
1443	.map_check_btf		= map_check_no_btf,
1444	.map_mem_usage		= sock_hash_mem_usage,
1445	.map_btf_id		= &sock_hash_map_btf_ids[0],
1446	.iter_seq_info		= &sock_hash_iter_seq_info,
1447};
1448
1449static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1450{
1451	switch (map->map_type) {
1452	case BPF_MAP_TYPE_SOCKMAP:
1453		return &container_of(map, struct bpf_stab, map)->progs;
1454	case BPF_MAP_TYPE_SOCKHASH:
1455		return &container_of(map, struct bpf_shtab, map)->progs;
1456	default:
1457		break;
1458	}
1459
1460	return NULL;
1461}
1462
1463static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1464				u32 which)
1465{
1466	struct sk_psock_progs *progs = sock_map_progs(map);
 
1467
1468	if (!progs)
1469		return -EOPNOTSUPP;
1470
1471	switch (which) {
1472	case BPF_SK_MSG_VERDICT:
1473		*pprog = &progs->msg_parser;
1474		break;
1475#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1476	case BPF_SK_SKB_STREAM_PARSER:
1477		*pprog = &progs->stream_parser;
1478		break;
1479#endif
1480	case BPF_SK_SKB_STREAM_VERDICT:
1481		if (progs->skb_verdict)
1482			return -EBUSY;
1483		*pprog = &progs->stream_verdict;
1484		break;
1485	case BPF_SK_SKB_VERDICT:
1486		if (progs->stream_verdict)
1487			return -EBUSY;
1488		*pprog = &progs->skb_verdict;
1489		break;
1490	default:
1491		return -EOPNOTSUPP;
1492	}
1493
1494	return 0;
1495}
1496
1497static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1498				struct bpf_prog *old, u32 which)
1499{
1500	struct bpf_prog **pprog;
1501	int ret;
1502
1503	ret = sock_map_prog_lookup(map, &pprog, which);
1504	if (ret)
1505		return ret;
1506
1507	if (old)
1508		return psock_replace_prog(pprog, prog, old);
1509
1510	psock_set_prog(pprog, prog);
1511	return 0;
1512}
1513
1514int sock_map_bpf_prog_query(const union bpf_attr *attr,
1515			    union bpf_attr __user *uattr)
1516{
1517	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1518	u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1519	struct bpf_prog **pprog;
1520	struct bpf_prog *prog;
1521	struct bpf_map *map;
1522	struct fd f;
1523	u32 id = 0;
1524	int ret;
1525
1526	if (attr->query.query_flags)
1527		return -EINVAL;
1528
1529	f = fdget(ufd);
1530	map = __bpf_map_get(f);
1531	if (IS_ERR(map))
1532		return PTR_ERR(map);
1533
1534	rcu_read_lock();
1535
1536	ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1537	if (ret)
1538		goto end;
1539
1540	prog = *pprog;
1541	prog_cnt = !prog ? 0 : 1;
1542
1543	if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1544		goto end;
1545
1546	/* we do not hold the refcnt, the bpf prog may be released
1547	 * asynchronously and the id would be set to 0.
1548	 */
1549	id = data_race(prog->aux->id);
1550	if (id == 0)
1551		prog_cnt = 0;
1552
1553end:
1554	rcu_read_unlock();
1555
1556	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1557	    (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1558	    copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1559		ret = -EFAULT;
1560
1561	fdput(f);
1562	return ret;
1563}
1564
1565static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1566{
1567	switch (link->map->map_type) {
1568	case BPF_MAP_TYPE_SOCKMAP:
1569		return sock_map_delete_from_link(link->map, sk,
1570						 link->link_raw);
1571	case BPF_MAP_TYPE_SOCKHASH:
1572		return sock_hash_delete_from_link(link->map, sk,
1573						  link->link_raw);
1574	default:
1575		break;
1576	}
1577}
1578
1579static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1580{
1581	struct sk_psock_link *link;
1582
1583	while ((link = sk_psock_link_pop(psock))) {
1584		sock_map_unlink(sk, link);
1585		sk_psock_free_link(link);
1586	}
1587}
1588
1589void sock_map_unhash(struct sock *sk)
1590{
1591	void (*saved_unhash)(struct sock *sk);
1592	struct sk_psock *psock;
1593
1594	rcu_read_lock();
1595	psock = sk_psock(sk);
1596	if (unlikely(!psock)) {
1597		rcu_read_unlock();
1598		saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1599	} else {
1600		saved_unhash = psock->saved_unhash;
1601		sock_map_remove_links(sk, psock);
1602		rcu_read_unlock();
1603	}
1604	if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1605		return;
1606	if (saved_unhash)
1607		saved_unhash(sk);
1608}
1609EXPORT_SYMBOL_GPL(sock_map_unhash);
1610
1611void sock_map_destroy(struct sock *sk)
1612{
1613	void (*saved_destroy)(struct sock *sk);
1614	struct sk_psock *psock;
1615
1616	rcu_read_lock();
1617	psock = sk_psock_get(sk);
1618	if (unlikely(!psock)) {
1619		rcu_read_unlock();
1620		saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1621	} else {
1622		saved_destroy = psock->saved_destroy;
1623		sock_map_remove_links(sk, psock);
1624		rcu_read_unlock();
1625		sk_psock_stop(psock);
1626		sk_psock_put(sk, psock);
1627	}
1628	if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1629		return;
1630	if (saved_destroy)
1631		saved_destroy(sk);
1632}
1633EXPORT_SYMBOL_GPL(sock_map_destroy);
1634
1635void sock_map_close(struct sock *sk, long timeout)
1636{
1637	void (*saved_close)(struct sock *sk, long timeout);
1638	struct sk_psock *psock;
1639
1640	lock_sock(sk);
1641	rcu_read_lock();
1642	psock = sk_psock_get(sk);
1643	if (unlikely(!psock)) {
1644		rcu_read_unlock();
1645		release_sock(sk);
1646		saved_close = READ_ONCE(sk->sk_prot)->close;
1647	} else {
1648		saved_close = psock->saved_close;
1649		sock_map_remove_links(sk, psock);
1650		rcu_read_unlock();
1651		sk_psock_stop(psock);
1652		release_sock(sk);
1653		cancel_delayed_work_sync(&psock->work);
1654		sk_psock_put(sk, psock);
1655	}
1656
1657	/* Make sure we do not recurse. This is a bug.
1658	 * Leak the socket instead of crashing on a stack overflow.
1659	 */
1660	if (WARN_ON_ONCE(saved_close == sock_map_close))
1661		return;
1662	saved_close(sk, timeout);
1663}
1664EXPORT_SYMBOL_GPL(sock_map_close);
1665
1666static int sock_map_iter_attach_target(struct bpf_prog *prog,
1667				       union bpf_iter_link_info *linfo,
1668				       struct bpf_iter_aux_info *aux)
1669{
1670	struct bpf_map *map;
1671	int err = -EINVAL;
1672
1673	if (!linfo->map.map_fd)
1674		return -EBADF;
1675
1676	map = bpf_map_get_with_uref(linfo->map.map_fd);
1677	if (IS_ERR(map))
1678		return PTR_ERR(map);
1679
1680	if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1681	    map->map_type != BPF_MAP_TYPE_SOCKHASH)
1682		goto put_map;
1683
1684	if (prog->aux->max_rdonly_access > map->key_size) {
1685		err = -EACCES;
1686		goto put_map;
1687	}
1688
1689	aux->map = map;
1690	return 0;
1691
1692put_map:
1693	bpf_map_put_with_uref(map);
1694	return err;
1695}
1696
1697static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1698{
1699	bpf_map_put_with_uref(aux->map);
1700}
1701
1702static struct bpf_iter_reg sock_map_iter_reg = {
1703	.target			= "sockmap",
1704	.attach_target		= sock_map_iter_attach_target,
1705	.detach_target		= sock_map_iter_detach_target,
1706	.show_fdinfo		= bpf_iter_map_show_fdinfo,
1707	.fill_link_info		= bpf_iter_map_fill_link_info,
1708	.ctx_arg_info_size	= 2,
1709	.ctx_arg_info		= {
1710		{ offsetof(struct bpf_iter__sockmap, key),
1711		  PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1712		{ offsetof(struct bpf_iter__sockmap, sk),
1713		  PTR_TO_BTF_ID_OR_NULL },
1714	},
1715};
1716
1717static int __init bpf_sockmap_iter_init(void)
1718{
1719	sock_map_iter_reg.ctx_arg_info[1].btf_id =
1720		btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1721	return bpf_iter_reg_target(&sock_map_iter_reg);
1722}
1723late_initcall(bpf_sockmap_iter_init);