Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/bpf.h>
   5#include <linux/btf_ids.h>
   6#include <linux/filter.h>
   7#include <linux/errno.h>
   8#include <linux/file.h>
   9#include <linux/net.h>
  10#include <linux/workqueue.h>
  11#include <linux/skmsg.h>
  12#include <linux/list.h>
  13#include <linux/jhash.h>
  14#include <linux/sock_diag.h>
  15#include <net/udp.h>
  16
  17struct bpf_stab {
  18	struct bpf_map map;
  19	struct sock **sks;
  20	struct sk_psock_progs progs;
  21	spinlock_t lock;
  22};
  23
  24#define SOCK_CREATE_FLAG_MASK				\
  25	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  26
  27static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
  28				struct bpf_prog *old, u32 which);
  29static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
  30
  31static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  32{
  33	struct bpf_stab *stab;
 
 
  34
 
 
  35	if (attr->max_entries == 0 ||
  36	    attr->key_size    != 4 ||
  37	    (attr->value_size != sizeof(u32) &&
  38	     attr->value_size != sizeof(u64)) ||
  39	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  40		return ERR_PTR(-EINVAL);
  41
  42	stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
  43	if (!stab)
  44		return ERR_PTR(-ENOMEM);
  45
  46	bpf_map_init_from_attr(&stab->map, attr);
  47	spin_lock_init(&stab->lock);
 
 
 
 
 
 
  48
  49	stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
  50				       sizeof(struct sock *),
  51				       stab->map.numa_node);
  52	if (!stab->sks) {
  53		bpf_map_area_free(stab);
  54		return ERR_PTR(-ENOMEM);
  55	}
  56
  57	return &stab->map;
 
  58}
  59
  60int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  61{
  62	u32 ufd = attr->target_fd;
  63	struct bpf_map *map;
  64	struct fd f;
  65	int ret;
  66
  67	if (attr->attach_flags || attr->replace_bpf_fd)
  68		return -EINVAL;
  69
  70	f = fdget(ufd);
  71	map = __bpf_map_get(f);
  72	if (IS_ERR(map))
  73		return PTR_ERR(map);
  74	ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
  75	fdput(f);
  76	return ret;
  77}
  78
  79int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  80{
  81	u32 ufd = attr->target_fd;
  82	struct bpf_prog *prog;
  83	struct bpf_map *map;
  84	struct fd f;
  85	int ret;
  86
  87	if (attr->attach_flags || attr->replace_bpf_fd)
  88		return -EINVAL;
  89
  90	f = fdget(ufd);
  91	map = __bpf_map_get(f);
  92	if (IS_ERR(map))
  93		return PTR_ERR(map);
  94
  95	prog = bpf_prog_get(attr->attach_bpf_fd);
  96	if (IS_ERR(prog)) {
  97		ret = PTR_ERR(prog);
  98		goto put_map;
  99	}
 100
 101	if (prog->type != ptype) {
 102		ret = -EINVAL;
 103		goto put_prog;
 104	}
 105
 106	ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
 107put_prog:
 108	bpf_prog_put(prog);
 109put_map:
 110	fdput(f);
 111	return ret;
 112}
 113
 114static void sock_map_sk_acquire(struct sock *sk)
 115	__acquires(&sk->sk_lock.slock)
 116{
 117	lock_sock(sk);
 
 118	rcu_read_lock();
 119}
 120
 121static void sock_map_sk_release(struct sock *sk)
 122	__releases(&sk->sk_lock.slock)
 123{
 124	rcu_read_unlock();
 
 125	release_sock(sk);
 126}
 127
 128static void sock_map_add_link(struct sk_psock *psock,
 129			      struct sk_psock_link *link,
 130			      struct bpf_map *map, void *link_raw)
 131{
 132	link->link_raw = link_raw;
 133	link->map = map;
 134	spin_lock_bh(&psock->link_lock);
 135	list_add_tail(&link->list, &psock->link);
 136	spin_unlock_bh(&psock->link_lock);
 137}
 138
 139static void sock_map_del_link(struct sock *sk,
 140			      struct sk_psock *psock, void *link_raw)
 141{
 142	bool strp_stop = false, verdict_stop = false;
 143	struct sk_psock_link *link, *tmp;
 
 144
 145	spin_lock_bh(&psock->link_lock);
 146	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 147		if (link->link_raw == link_raw) {
 148			struct bpf_map *map = link->map;
 149			struct sk_psock_progs *progs = sock_map_progs(map);
 150
 151			if (psock->saved_data_ready && progs->stream_parser)
 152				strp_stop = true;
 153			if (psock->saved_data_ready && progs->stream_verdict)
 154				verdict_stop = true;
 155			if (psock->saved_data_ready && progs->skb_verdict)
 156				verdict_stop = true;
 157			list_del(&link->list);
 158			sk_psock_free_link(link);
 159		}
 160	}
 161	spin_unlock_bh(&psock->link_lock);
 162	if (strp_stop || verdict_stop) {
 163		write_lock_bh(&sk->sk_callback_lock);
 164		if (strp_stop)
 165			sk_psock_stop_strp(sk, psock);
 166		if (verdict_stop)
 167			sk_psock_stop_verdict(sk, psock);
 168
 169		if (psock->psock_update_sk_prot)
 170			psock->psock_update_sk_prot(sk, psock, false);
 171		write_unlock_bh(&sk->sk_callback_lock);
 172	}
 173}
 174
 175static void sock_map_unref(struct sock *sk, void *link_raw)
 176{
 177	struct sk_psock *psock = sk_psock(sk);
 178
 179	if (likely(psock)) {
 180		sock_map_del_link(sk, psock, link_raw);
 181		sk_psock_put(sk, psock);
 182	}
 183}
 184
 185static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
 186{
 187	if (!sk->sk_prot->psock_update_sk_prot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 188		return -EINVAL;
 189	psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
 190	return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
 
 
 
 
 
 191}
 192
 193static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
 194{
 195	struct sk_psock *psock;
 196
 197	rcu_read_lock();
 198	psock = sk_psock(sk);
 199	if (psock) {
 200		if (sk->sk_prot->close != sock_map_close) {
 201			psock = ERR_PTR(-EBUSY);
 202			goto out;
 203		}
 204
 205		if (!refcount_inc_not_zero(&psock->refcnt))
 206			psock = ERR_PTR(-EBUSY);
 207	}
 208out:
 209	rcu_read_unlock();
 210	return psock;
 211}
 212
 213static int sock_map_link(struct bpf_map *map, struct sock *sk)
 
 214{
 215	struct sk_psock_progs *progs = sock_map_progs(map);
 216	struct bpf_prog *stream_verdict = NULL;
 217	struct bpf_prog *stream_parser = NULL;
 218	struct bpf_prog *skb_verdict = NULL;
 219	struct bpf_prog *msg_parser = NULL;
 220	struct sk_psock *psock;
 
 221	int ret;
 222
 223	stream_verdict = READ_ONCE(progs->stream_verdict);
 224	if (stream_verdict) {
 225		stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
 226		if (IS_ERR(stream_verdict))
 227			return PTR_ERR(stream_verdict);
 228	}
 229
 230	stream_parser = READ_ONCE(progs->stream_parser);
 231	if (stream_parser) {
 232		stream_parser = bpf_prog_inc_not_zero(stream_parser);
 233		if (IS_ERR(stream_parser)) {
 234			ret = PTR_ERR(stream_parser);
 235			goto out_put_stream_verdict;
 236		}
 237	}
 238
 239	msg_parser = READ_ONCE(progs->msg_parser);
 240	if (msg_parser) {
 241		msg_parser = bpf_prog_inc_not_zero(msg_parser);
 242		if (IS_ERR(msg_parser)) {
 243			ret = PTR_ERR(msg_parser);
 244			goto out_put_stream_parser;
 245		}
 246	}
 247
 248	skb_verdict = READ_ONCE(progs->skb_verdict);
 249	if (skb_verdict) {
 250		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
 251		if (IS_ERR(skb_verdict)) {
 252			ret = PTR_ERR(skb_verdict);
 253			goto out_put_msg_parser;
 254		}
 255	}
 256
 257	psock = sock_map_psock_get_checked(sk);
 258	if (IS_ERR(psock)) {
 259		ret = PTR_ERR(psock);
 260		goto out_progs;
 261	}
 262
 263	if (psock) {
 264		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
 265		    (stream_parser  && READ_ONCE(psock->progs.stream_parser)) ||
 266		    (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 267		    (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
 268		    (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 269		    (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
 270			sk_psock_put(sk, psock);
 271			ret = -EBUSY;
 272			goto out_progs;
 273		}
 274	} else {
 275		psock = sk_psock_init(sk, map->numa_node);
 276		if (IS_ERR(psock)) {
 277			ret = PTR_ERR(psock);
 278			goto out_progs;
 279		}
 280	}
 281
 282	if (msg_parser)
 283		psock_set_prog(&psock->progs.msg_parser, msg_parser);
 284	if (stream_parser)
 285		psock_set_prog(&psock->progs.stream_parser, stream_parser);
 286	if (stream_verdict)
 287		psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
 288	if (skb_verdict)
 289		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 290
 291	/* msg_* and stream_* programs references tracked in psock after this
 292	 * point. Reference dec and cleanup will occur through psock destructor
 293	 */
 294	ret = sock_map_init_proto(sk, psock);
 295	if (ret < 0) {
 296		sk_psock_put(sk, psock);
 297		goto out;
 298	}
 299
 300	write_lock_bh(&sk->sk_callback_lock);
 301	if (stream_parser && stream_verdict && !psock->saved_data_ready) {
 302		ret = sk_psock_init_strp(sk, psock);
 303		if (ret) {
 304			write_unlock_bh(&sk->sk_callback_lock);
 305			sk_psock_put(sk, psock);
 306			goto out;
 307		}
 
 
 308		sk_psock_start_strp(sk, psock);
 309	} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
 310		sk_psock_start_verdict(sk,psock);
 311	} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
 312		sk_psock_start_verdict(sk, psock);
 313	}
 314	write_unlock_bh(&sk->sk_callback_lock);
 315	return 0;
 
 
 316out_progs:
 317	if (skb_verdict)
 318		bpf_prog_put(skb_verdict);
 319out_put_msg_parser:
 320	if (msg_parser)
 321		bpf_prog_put(msg_parser);
 322out_put_stream_parser:
 323	if (stream_parser)
 324		bpf_prog_put(stream_parser);
 325out_put_stream_verdict:
 326	if (stream_verdict)
 327		bpf_prog_put(stream_verdict);
 328out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329	return ret;
 330}
 331
 332static void sock_map_free(struct bpf_map *map)
 333{
 334	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 335	int i;
 336
 337	/* After the sync no updates or deletes will be in-flight so it
 338	 * is safe to walk map and remove entries without risking a race
 339	 * in EEXIST update case.
 340	 */
 341	synchronize_rcu();
 342	for (i = 0; i < stab->map.max_entries; i++) {
 343		struct sock **psk = &stab->sks[i];
 344		struct sock *sk;
 345
 346		sk = xchg(psk, NULL);
 347		if (sk) {
 348			sock_hold(sk);
 349			lock_sock(sk);
 350			rcu_read_lock();
 351			sock_map_unref(sk, psk);
 352			rcu_read_unlock();
 353			release_sock(sk);
 354			sock_put(sk);
 355		}
 356	}
 357
 358	/* wait for psock readers accessing its map link */
 359	synchronize_rcu();
 360
 361	bpf_map_area_free(stab->sks);
 362	bpf_map_area_free(stab);
 363}
 364
 365static void sock_map_release_progs(struct bpf_map *map)
 366{
 367	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
 368}
 369
 370static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 371{
 372	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 373
 374	WARN_ON_ONCE(!rcu_read_lock_held());
 375
 376	if (unlikely(key >= map->max_entries))
 377		return NULL;
 378	return READ_ONCE(stab->sks[key]);
 379}
 380
 381static void *sock_map_lookup(struct bpf_map *map, void *key)
 382{
 383	struct sock *sk;
 384
 385	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 386	if (!sk)
 387		return NULL;
 388	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
 389		return NULL;
 390	return sk;
 391}
 392
 393static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
 394{
 395	struct sock *sk;
 396
 397	if (map->value_size != sizeof(u64))
 398		return ERR_PTR(-ENOSPC);
 399
 400	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 401	if (!sk)
 402		return ERR_PTR(-ENOENT);
 403
 404	__sock_gen_cookie(sk);
 405	return &sk->sk_cookie;
 406}
 407
 408static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
 409			     struct sock **psk)
 410{
 411	struct sock *sk;
 412	int err = 0;
 413
 414	spin_lock_bh(&stab->lock);
 415	sk = *psk;
 416	if (!sk_test || sk_test == sk)
 417		sk = xchg(psk, NULL);
 418
 419	if (likely(sk))
 420		sock_map_unref(sk, psk);
 421	else
 422		err = -EINVAL;
 423
 424	spin_unlock_bh(&stab->lock);
 425	return err;
 426}
 427
 428static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
 429				      void *link_raw)
 430{
 431	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 432
 433	__sock_map_delete(stab, sk, link_raw);
 434}
 435
 436static long sock_map_delete_elem(struct bpf_map *map, void *key)
 437{
 438	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 439	u32 i = *(u32 *)key;
 440	struct sock **psk;
 441
 442	if (unlikely(i >= map->max_entries))
 443		return -EINVAL;
 444
 445	psk = &stab->sks[i];
 446	return __sock_map_delete(stab, NULL, psk);
 447}
 448
 449static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
 450{
 451	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 452	u32 i = key ? *(u32 *)key : U32_MAX;
 453	u32 *key_next = next;
 454
 455	if (i == stab->map.max_entries - 1)
 456		return -ENOENT;
 457	if (i >= stab->map.max_entries)
 458		*key_next = 0;
 459	else
 460		*key_next = i + 1;
 461	return 0;
 462}
 463
 
 
 464static int sock_map_update_common(struct bpf_map *map, u32 idx,
 465				  struct sock *sk, u64 flags)
 466{
 467	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 468	struct sk_psock_link *link;
 469	struct sk_psock *psock;
 470	struct sock *osk;
 471	int ret;
 472
 473	WARN_ON_ONCE(!rcu_read_lock_held());
 474	if (unlikely(flags > BPF_EXIST))
 475		return -EINVAL;
 476	if (unlikely(idx >= map->max_entries))
 477		return -E2BIG;
 
 
 478
 479	link = sk_psock_init_link();
 480	if (!link)
 481		return -ENOMEM;
 482
 483	ret = sock_map_link(map, sk);
 
 
 
 
 
 
 
 484	if (ret < 0)
 485		goto out_free;
 486
 487	psock = sk_psock(sk);
 488	WARN_ON_ONCE(!psock);
 489
 490	spin_lock_bh(&stab->lock);
 491	osk = stab->sks[idx];
 492	if (osk && flags == BPF_NOEXIST) {
 493		ret = -EEXIST;
 494		goto out_unlock;
 495	} else if (!osk && flags == BPF_EXIST) {
 496		ret = -ENOENT;
 497		goto out_unlock;
 498	}
 499
 500	sock_map_add_link(psock, link, map, &stab->sks[idx]);
 501	stab->sks[idx] = sk;
 502	if (osk)
 503		sock_map_unref(osk, &stab->sks[idx]);
 504	spin_unlock_bh(&stab->lock);
 505	return 0;
 506out_unlock:
 507	spin_unlock_bh(&stab->lock);
 508	if (psock)
 509		sk_psock_put(sk, psock);
 510out_free:
 511	sk_psock_free_link(link);
 512	return ret;
 513}
 514
 515static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
 516{
 517	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
 518	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
 519	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
 520}
 521
 
 
 
 
 
 
 
 
 
 
 
 
 522static bool sock_map_redirect_allowed(const struct sock *sk)
 523{
 524	if (sk_is_tcp(sk))
 525		return sk->sk_state != TCP_LISTEN;
 526	else
 527		return sk->sk_state == TCP_ESTABLISHED;
 528}
 529
 530static bool sock_map_sk_is_suitable(const struct sock *sk)
 531{
 532	return !!sk->sk_prot->psock_update_sk_prot;
 533}
 534
 535static bool sock_map_sk_state_allowed(const struct sock *sk)
 536{
 537	if (sk_is_tcp(sk))
 538		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
 539	if (sk_is_stream_unix(sk))
 540		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
 541	return true;
 542}
 543
 544static int sock_hash_update_common(struct bpf_map *map, void *key,
 545				   struct sock *sk, u64 flags);
 546
 547int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
 548			     u64 flags)
 549{
 
 550	struct socket *sock;
 551	struct sock *sk;
 552	int ret;
 553	u64 ufd;
 554
 555	if (map->value_size == sizeof(u64))
 556		ufd = *(u64 *)value;
 557	else
 558		ufd = *(u32 *)value;
 559	if (ufd > S32_MAX)
 560		return -EINVAL;
 561
 562	sock = sockfd_lookup(ufd, &ret);
 563	if (!sock)
 564		return ret;
 565	sk = sock->sk;
 566	if (!sk) {
 567		ret = -EINVAL;
 568		goto out;
 569	}
 570	if (!sock_map_sk_is_suitable(sk)) {
 571		ret = -EOPNOTSUPP;
 572		goto out;
 573	}
 574
 575	sock_map_sk_acquire(sk);
 576	if (!sock_map_sk_state_allowed(sk))
 577		ret = -EOPNOTSUPP;
 578	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 579		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 580	else
 581		ret = sock_hash_update_common(map, key, sk, flags);
 582	sock_map_sk_release(sk);
 583out:
 584	sockfd_put(sock);
 585	return ret;
 586}
 587
 588static long sock_map_update_elem(struct bpf_map *map, void *key,
 589				 void *value, u64 flags)
 590{
 591	struct sock *sk = (struct sock *)value;
 592	int ret;
 593
 594	if (unlikely(!sk || !sk_fullsock(sk)))
 595		return -EINVAL;
 596
 597	if (!sock_map_sk_is_suitable(sk))
 598		return -EOPNOTSUPP;
 599
 600	local_bh_disable();
 601	bh_lock_sock(sk);
 602	if (!sock_map_sk_state_allowed(sk))
 603		ret = -EOPNOTSUPP;
 604	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 605		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 606	else
 607		ret = sock_hash_update_common(map, key, sk, flags);
 608	bh_unlock_sock(sk);
 609	local_bh_enable();
 610	return ret;
 611}
 612
 613BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
 614	   struct bpf_map *, map, void *, key, u64, flags)
 615{
 616	WARN_ON_ONCE(!rcu_read_lock_held());
 617
 618	if (likely(sock_map_sk_is_suitable(sops->sk) &&
 619		   sock_map_op_okay(sops)))
 620		return sock_map_update_common(map, *(u32 *)key, sops->sk,
 621					      flags);
 622	return -EOPNOTSUPP;
 623}
 624
 625const struct bpf_func_proto bpf_sock_map_update_proto = {
 626	.func		= bpf_sock_map_update,
 627	.gpl_only	= false,
 628	.pkt_access	= true,
 629	.ret_type	= RET_INTEGER,
 630	.arg1_type	= ARG_PTR_TO_CTX,
 631	.arg2_type	= ARG_CONST_MAP_PTR,
 632	.arg3_type	= ARG_PTR_TO_MAP_KEY,
 633	.arg4_type	= ARG_ANYTHING,
 634};
 635
 636BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
 637	   struct bpf_map *, map, u32, key, u64, flags)
 638{
 
 639	struct sock *sk;
 640
 641	if (unlikely(flags & ~(BPF_F_INGRESS)))
 642		return SK_DROP;
 643
 644	sk = __sock_map_lookup_elem(map, key);
 645	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 646		return SK_DROP;
 647
 648	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
 
 649	return SK_PASS;
 650}
 651
 652const struct bpf_func_proto bpf_sk_redirect_map_proto = {
 653	.func           = bpf_sk_redirect_map,
 654	.gpl_only       = false,
 655	.ret_type       = RET_INTEGER,
 656	.arg1_type	= ARG_PTR_TO_CTX,
 657	.arg2_type      = ARG_CONST_MAP_PTR,
 658	.arg3_type      = ARG_ANYTHING,
 659	.arg4_type      = ARG_ANYTHING,
 660};
 661
 662BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
 663	   struct bpf_map *, map, u32, key, u64, flags)
 664{
 665	struct sock *sk;
 666
 667	if (unlikely(flags & ~(BPF_F_INGRESS)))
 668		return SK_DROP;
 669
 670	sk = __sock_map_lookup_elem(map, key);
 671	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 672		return SK_DROP;
 673	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
 674		return SK_DROP;
 675
 676	msg->flags = flags;
 677	msg->sk_redir = sk;
 678	return SK_PASS;
 679}
 680
 681const struct bpf_func_proto bpf_msg_redirect_map_proto = {
 682	.func           = bpf_msg_redirect_map,
 683	.gpl_only       = false,
 684	.ret_type       = RET_INTEGER,
 685	.arg1_type	= ARG_PTR_TO_CTX,
 686	.arg2_type      = ARG_CONST_MAP_PTR,
 687	.arg3_type      = ARG_ANYTHING,
 688	.arg4_type      = ARG_ANYTHING,
 689};
 690
 691struct sock_map_seq_info {
 692	struct bpf_map *map;
 693	struct sock *sk;
 694	u32 index;
 695};
 696
 697struct bpf_iter__sockmap {
 698	__bpf_md_ptr(struct bpf_iter_meta *, meta);
 699	__bpf_md_ptr(struct bpf_map *, map);
 700	__bpf_md_ptr(void *, key);
 701	__bpf_md_ptr(struct sock *, sk);
 702};
 703
 704DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
 705		     struct bpf_map *map, void *key,
 706		     struct sock *sk)
 707
 708static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
 709{
 710	if (unlikely(info->index >= info->map->max_entries))
 711		return NULL;
 712
 713	info->sk = __sock_map_lookup_elem(info->map, info->index);
 714
 715	/* can't return sk directly, since that might be NULL */
 716	return info;
 717}
 718
 719static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
 720	__acquires(rcu)
 721{
 722	struct sock_map_seq_info *info = seq->private;
 723
 724	if (*pos == 0)
 725		++*pos;
 726
 727	/* pairs with sock_map_seq_stop */
 728	rcu_read_lock();
 729	return sock_map_seq_lookup_elem(info);
 730}
 731
 732static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 733	__must_hold(rcu)
 734{
 735	struct sock_map_seq_info *info = seq->private;
 736
 737	++*pos;
 738	++info->index;
 739
 740	return sock_map_seq_lookup_elem(info);
 741}
 742
 743static int sock_map_seq_show(struct seq_file *seq, void *v)
 744	__must_hold(rcu)
 745{
 746	struct sock_map_seq_info *info = seq->private;
 747	struct bpf_iter__sockmap ctx = {};
 748	struct bpf_iter_meta meta;
 749	struct bpf_prog *prog;
 750
 751	meta.seq = seq;
 752	prog = bpf_iter_get_info(&meta, !v);
 753	if (!prog)
 754		return 0;
 755
 756	ctx.meta = &meta;
 757	ctx.map = info->map;
 758	if (v) {
 759		ctx.key = &info->index;
 760		ctx.sk = info->sk;
 761	}
 762
 763	return bpf_iter_run_prog(prog, &ctx);
 764}
 765
 766static void sock_map_seq_stop(struct seq_file *seq, void *v)
 767	__releases(rcu)
 768{
 769	if (!v)
 770		(void)sock_map_seq_show(seq, NULL);
 771
 772	/* pairs with sock_map_seq_start */
 773	rcu_read_unlock();
 774}
 775
 776static const struct seq_operations sock_map_seq_ops = {
 777	.start	= sock_map_seq_start,
 778	.next	= sock_map_seq_next,
 779	.stop	= sock_map_seq_stop,
 780	.show	= sock_map_seq_show,
 781};
 782
 783static int sock_map_init_seq_private(void *priv_data,
 784				     struct bpf_iter_aux_info *aux)
 785{
 786	struct sock_map_seq_info *info = priv_data;
 787
 788	bpf_map_inc_with_uref(aux->map);
 789	info->map = aux->map;
 790	return 0;
 791}
 792
 793static void sock_map_fini_seq_private(void *priv_data)
 794{
 795	struct sock_map_seq_info *info = priv_data;
 796
 797	bpf_map_put_with_uref(info->map);
 798}
 799
 800static u64 sock_map_mem_usage(const struct bpf_map *map)
 801{
 802	u64 usage = sizeof(struct bpf_stab);
 803
 804	usage += (u64)map->max_entries * sizeof(struct sock *);
 805	return usage;
 806}
 807
 808static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
 809	.seq_ops		= &sock_map_seq_ops,
 810	.init_seq_private	= sock_map_init_seq_private,
 811	.fini_seq_private	= sock_map_fini_seq_private,
 812	.seq_priv_size		= sizeof(struct sock_map_seq_info),
 813};
 814
 815BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
 816const struct bpf_map_ops sock_map_ops = {
 817	.map_meta_equal		= bpf_map_meta_equal,
 818	.map_alloc		= sock_map_alloc,
 819	.map_free		= sock_map_free,
 820	.map_get_next_key	= sock_map_get_next_key,
 821	.map_lookup_elem_sys_only = sock_map_lookup_sys,
 822	.map_update_elem	= sock_map_update_elem,
 823	.map_delete_elem	= sock_map_delete_elem,
 824	.map_lookup_elem	= sock_map_lookup,
 825	.map_release_uref	= sock_map_release_progs,
 826	.map_check_btf		= map_check_no_btf,
 827	.map_mem_usage		= sock_map_mem_usage,
 828	.map_btf_id		= &sock_map_btf_ids[0],
 829	.iter_seq_info		= &sock_map_iter_seq_info,
 830};
 831
 832struct bpf_shtab_elem {
 833	struct rcu_head rcu;
 834	u32 hash;
 835	struct sock *sk;
 836	struct hlist_node node;
 837	u8 key[];
 838};
 839
 840struct bpf_shtab_bucket {
 841	struct hlist_head head;
 842	spinlock_t lock;
 843};
 844
 845struct bpf_shtab {
 846	struct bpf_map map;
 847	struct bpf_shtab_bucket *buckets;
 848	u32 buckets_num;
 849	u32 elem_size;
 850	struct sk_psock_progs progs;
 851	atomic_t count;
 852};
 853
 854static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
 855{
 856	return jhash(key, len, 0);
 857}
 858
 859static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
 860							u32 hash)
 861{
 862	return &htab->buckets[hash & (htab->buckets_num - 1)];
 863}
 864
 865static struct bpf_shtab_elem *
 866sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 867			  u32 key_size)
 868{
 869	struct bpf_shtab_elem *elem;
 870
 871	hlist_for_each_entry_rcu(elem, head, node) {
 872		if (elem->hash == hash &&
 873		    !memcmp(&elem->key, key, key_size))
 874			return elem;
 875	}
 876
 877	return NULL;
 878}
 879
 880static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 881{
 882	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 883	u32 key_size = map->key_size, hash;
 884	struct bpf_shtab_bucket *bucket;
 885	struct bpf_shtab_elem *elem;
 886
 887	WARN_ON_ONCE(!rcu_read_lock_held());
 888
 889	hash = sock_hash_bucket_hash(key, key_size);
 890	bucket = sock_hash_select_bucket(htab, hash);
 891	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 892
 893	return elem ? elem->sk : NULL;
 894}
 895
 896static void sock_hash_free_elem(struct bpf_shtab *htab,
 897				struct bpf_shtab_elem *elem)
 898{
 899	atomic_dec(&htab->count);
 900	kfree_rcu(elem, rcu);
 901}
 902
 903static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 904				       void *link_raw)
 905{
 906	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 907	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
 908	struct bpf_shtab_bucket *bucket;
 909
 910	WARN_ON_ONCE(!rcu_read_lock_held());
 911	bucket = sock_hash_select_bucket(htab, elem->hash);
 912
 913	/* elem may be deleted in parallel from the map, but access here
 914	 * is okay since it's going away only after RCU grace period.
 915	 * However, we need to check whether it's still present.
 916	 */
 917	spin_lock_bh(&bucket->lock);
 918	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
 919					       elem->key, map->key_size);
 920	if (elem_probe && elem_probe == elem) {
 921		hlist_del_rcu(&elem->node);
 922		sock_map_unref(elem->sk, elem);
 923		sock_hash_free_elem(htab, elem);
 924	}
 925	spin_unlock_bh(&bucket->lock);
 926}
 927
 928static long sock_hash_delete_elem(struct bpf_map *map, void *key)
 929{
 930	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 931	u32 hash, key_size = map->key_size;
 932	struct bpf_shtab_bucket *bucket;
 933	struct bpf_shtab_elem *elem;
 934	int ret = -ENOENT;
 935
 936	hash = sock_hash_bucket_hash(key, key_size);
 937	bucket = sock_hash_select_bucket(htab, hash);
 938
 939	spin_lock_bh(&bucket->lock);
 940	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 941	if (elem) {
 942		hlist_del_rcu(&elem->node);
 943		sock_map_unref(elem->sk, elem);
 944		sock_hash_free_elem(htab, elem);
 945		ret = 0;
 946	}
 947	spin_unlock_bh(&bucket->lock);
 948	return ret;
 949}
 950
 951static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
 952						   void *key, u32 key_size,
 953						   u32 hash, struct sock *sk,
 954						   struct bpf_shtab_elem *old)
 955{
 956	struct bpf_shtab_elem *new;
 957
 958	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 959		if (!old) {
 960			atomic_dec(&htab->count);
 961			return ERR_PTR(-E2BIG);
 962		}
 963	}
 964
 965	new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
 966				   GFP_ATOMIC | __GFP_NOWARN,
 967				   htab->map.numa_node);
 968	if (!new) {
 969		atomic_dec(&htab->count);
 970		return ERR_PTR(-ENOMEM);
 971	}
 972	memcpy(new->key, key, key_size);
 973	new->sk = sk;
 974	new->hash = hash;
 975	return new;
 976}
 977
 978static int sock_hash_update_common(struct bpf_map *map, void *key,
 979				   struct sock *sk, u64 flags)
 980{
 981	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 982	u32 key_size = map->key_size, hash;
 983	struct bpf_shtab_elem *elem, *elem_new;
 984	struct bpf_shtab_bucket *bucket;
 985	struct sk_psock_link *link;
 986	struct sk_psock *psock;
 987	int ret;
 988
 989	WARN_ON_ONCE(!rcu_read_lock_held());
 990	if (unlikely(flags > BPF_EXIST))
 991		return -EINVAL;
 
 
 992
 993	link = sk_psock_init_link();
 994	if (!link)
 995		return -ENOMEM;
 996
 997	ret = sock_map_link(map, sk);
 
 
 
 
 
 
 
 998	if (ret < 0)
 999		goto out_free;
1000
1001	psock = sk_psock(sk);
1002	WARN_ON_ONCE(!psock);
1003
1004	hash = sock_hash_bucket_hash(key, key_size);
1005	bucket = sock_hash_select_bucket(htab, hash);
1006
1007	spin_lock_bh(&bucket->lock);
1008	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1009	if (elem && flags == BPF_NOEXIST) {
1010		ret = -EEXIST;
1011		goto out_unlock;
1012	} else if (!elem && flags == BPF_EXIST) {
1013		ret = -ENOENT;
1014		goto out_unlock;
1015	}
1016
1017	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1018	if (IS_ERR(elem_new)) {
1019		ret = PTR_ERR(elem_new);
1020		goto out_unlock;
1021	}
1022
1023	sock_map_add_link(psock, link, map, elem_new);
1024	/* Add new element to the head of the list, so that
1025	 * concurrent search will find it before old elem.
1026	 */
1027	hlist_add_head_rcu(&elem_new->node, &bucket->head);
1028	if (elem) {
1029		hlist_del_rcu(&elem->node);
1030		sock_map_unref(elem->sk, elem);
1031		sock_hash_free_elem(htab, elem);
1032	}
1033	spin_unlock_bh(&bucket->lock);
1034	return 0;
1035out_unlock:
1036	spin_unlock_bh(&bucket->lock);
1037	sk_psock_put(sk, psock);
1038out_free:
1039	sk_psock_free_link(link);
1040	return ret;
1041}
1042
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1043static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1044				  void *key_next)
1045{
1046	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1047	struct bpf_shtab_elem *elem, *elem_next;
1048	u32 hash, key_size = map->key_size;
1049	struct hlist_head *head;
1050	int i = 0;
1051
1052	if (!key)
1053		goto find_first_elem;
1054	hash = sock_hash_bucket_hash(key, key_size);
1055	head = &sock_hash_select_bucket(htab, hash)->head;
1056	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1057	if (!elem)
1058		goto find_first_elem;
1059
1060	elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1061				     struct bpf_shtab_elem, node);
1062	if (elem_next) {
1063		memcpy(key_next, elem_next->key, key_size);
1064		return 0;
1065	}
1066
1067	i = hash & (htab->buckets_num - 1);
1068	i++;
1069find_first_elem:
1070	for (; i < htab->buckets_num; i++) {
1071		head = &sock_hash_select_bucket(htab, i)->head;
1072		elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1073					     struct bpf_shtab_elem, node);
1074		if (elem_next) {
1075			memcpy(key_next, elem_next->key, key_size);
1076			return 0;
1077		}
1078	}
1079
1080	return -ENOENT;
1081}
1082
1083static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1084{
1085	struct bpf_shtab *htab;
1086	int i, err;
 
1087
 
 
1088	if (attr->max_entries == 0 ||
1089	    attr->key_size    == 0 ||
1090	    (attr->value_size != sizeof(u32) &&
1091	     attr->value_size != sizeof(u64)) ||
1092	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1093		return ERR_PTR(-EINVAL);
1094	if (attr->key_size > MAX_BPF_STACK)
1095		return ERR_PTR(-E2BIG);
1096
1097	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1098	if (!htab)
1099		return ERR_PTR(-ENOMEM);
1100
1101	bpf_map_init_from_attr(&htab->map, attr);
1102
1103	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1104	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1105			  round_up(htab->map.key_size, 8);
1106	if (htab->buckets_num == 0 ||
1107	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1108		err = -EINVAL;
1109		goto free_htab;
1110	}
1111
 
 
 
 
 
 
 
 
 
 
1112	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1113					   sizeof(struct bpf_shtab_bucket),
1114					   htab->map.numa_node);
1115	if (!htab->buckets) {
 
1116		err = -ENOMEM;
1117		goto free_htab;
1118	}
1119
1120	for (i = 0; i < htab->buckets_num; i++) {
1121		INIT_HLIST_HEAD(&htab->buckets[i].head);
1122		spin_lock_init(&htab->buckets[i].lock);
1123	}
1124
1125	return &htab->map;
1126free_htab:
1127	bpf_map_area_free(htab);
1128	return ERR_PTR(err);
1129}
1130
1131static void sock_hash_free(struct bpf_map *map)
1132{
1133	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1134	struct bpf_shtab_bucket *bucket;
1135	struct hlist_head unlink_list;
1136	struct bpf_shtab_elem *elem;
1137	struct hlist_node *node;
1138	int i;
1139
1140	/* After the sync no updates or deletes will be in-flight so it
1141	 * is safe to walk map and remove entries without risking a race
1142	 * in EEXIST update case.
1143	 */
1144	synchronize_rcu();
1145	for (i = 0; i < htab->buckets_num; i++) {
1146		bucket = sock_hash_select_bucket(htab, i);
1147
1148		/* We are racing with sock_hash_delete_from_link to
1149		 * enter the spin-lock critical section. Every socket on
1150		 * the list is still linked to sockhash. Since link
1151		 * exists, psock exists and holds a ref to socket. That
1152		 * lets us to grab a socket ref too.
1153		 */
1154		spin_lock_bh(&bucket->lock);
1155		hlist_for_each_entry(elem, &bucket->head, node)
1156			sock_hold(elem->sk);
1157		hlist_move_list(&bucket->head, &unlink_list);
1158		spin_unlock_bh(&bucket->lock);
1159
1160		/* Process removed entries out of atomic context to
1161		 * block for socket lock before deleting the psock's
1162		 * link to sockhash.
1163		 */
1164		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1165			hlist_del(&elem->node);
1166			lock_sock(elem->sk);
1167			rcu_read_lock();
1168			sock_map_unref(elem->sk, elem);
1169			rcu_read_unlock();
1170			release_sock(elem->sk);
1171			sock_put(elem->sk);
1172			sock_hash_free_elem(htab, elem);
1173		}
1174	}
1175
1176	/* wait for psock readers accessing its map link */
1177	synchronize_rcu();
1178
1179	bpf_map_area_free(htab->buckets);
1180	bpf_map_area_free(htab);
1181}
1182
1183static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1184{
1185	struct sock *sk;
1186
1187	if (map->value_size != sizeof(u64))
1188		return ERR_PTR(-ENOSPC);
1189
1190	sk = __sock_hash_lookup_elem(map, key);
1191	if (!sk)
1192		return ERR_PTR(-ENOENT);
1193
1194	__sock_gen_cookie(sk);
1195	return &sk->sk_cookie;
1196}
1197
1198static void *sock_hash_lookup(struct bpf_map *map, void *key)
1199{
1200	struct sock *sk;
1201
1202	sk = __sock_hash_lookup_elem(map, key);
1203	if (!sk)
1204		return NULL;
1205	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1206		return NULL;
1207	return sk;
1208}
1209
1210static void sock_hash_release_progs(struct bpf_map *map)
1211{
1212	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1213}
1214
1215BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1216	   struct bpf_map *, map, void *, key, u64, flags)
1217{
1218	WARN_ON_ONCE(!rcu_read_lock_held());
1219
1220	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1221		   sock_map_op_okay(sops)))
1222		return sock_hash_update_common(map, key, sops->sk, flags);
1223	return -EOPNOTSUPP;
1224}
1225
1226const struct bpf_func_proto bpf_sock_hash_update_proto = {
1227	.func		= bpf_sock_hash_update,
1228	.gpl_only	= false,
1229	.pkt_access	= true,
1230	.ret_type	= RET_INTEGER,
1231	.arg1_type	= ARG_PTR_TO_CTX,
1232	.arg2_type	= ARG_CONST_MAP_PTR,
1233	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1234	.arg4_type	= ARG_ANYTHING,
1235};
1236
1237BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1238	   struct bpf_map *, map, void *, key, u64, flags)
1239{
 
1240	struct sock *sk;
1241
1242	if (unlikely(flags & ~(BPF_F_INGRESS)))
1243		return SK_DROP;
1244
1245	sk = __sock_hash_lookup_elem(map, key);
1246	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1247		return SK_DROP;
1248
1249	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
 
1250	return SK_PASS;
1251}
1252
1253const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1254	.func           = bpf_sk_redirect_hash,
1255	.gpl_only       = false,
1256	.ret_type       = RET_INTEGER,
1257	.arg1_type	= ARG_PTR_TO_CTX,
1258	.arg2_type      = ARG_CONST_MAP_PTR,
1259	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1260	.arg4_type      = ARG_ANYTHING,
1261};
1262
1263BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1264	   struct bpf_map *, map, void *, key, u64, flags)
1265{
1266	struct sock *sk;
1267
1268	if (unlikely(flags & ~(BPF_F_INGRESS)))
1269		return SK_DROP;
1270
1271	sk = __sock_hash_lookup_elem(map, key);
1272	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1273		return SK_DROP;
1274	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
1275		return SK_DROP;
1276
1277	msg->flags = flags;
1278	msg->sk_redir = sk;
1279	return SK_PASS;
1280}
1281
1282const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1283	.func           = bpf_msg_redirect_hash,
1284	.gpl_only       = false,
1285	.ret_type       = RET_INTEGER,
1286	.arg1_type	= ARG_PTR_TO_CTX,
1287	.arg2_type      = ARG_CONST_MAP_PTR,
1288	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1289	.arg4_type      = ARG_ANYTHING,
1290};
1291
1292struct sock_hash_seq_info {
1293	struct bpf_map *map;
1294	struct bpf_shtab *htab;
1295	u32 bucket_id;
1296};
1297
1298static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1299				     struct bpf_shtab_elem *prev_elem)
1300{
1301	const struct bpf_shtab *htab = info->htab;
1302	struct bpf_shtab_bucket *bucket;
1303	struct bpf_shtab_elem *elem;
1304	struct hlist_node *node;
1305
1306	/* try to find next elem in the same bucket */
1307	if (prev_elem) {
1308		node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1309		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1310		if (elem)
1311			return elem;
1312
1313		/* no more elements, continue in the next bucket */
1314		info->bucket_id++;
1315	}
1316
1317	for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1318		bucket = &htab->buckets[info->bucket_id];
1319		node = rcu_dereference(hlist_first_rcu(&bucket->head));
1320		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1321		if (elem)
1322			return elem;
1323	}
1324
1325	return NULL;
1326}
1327
1328static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1329	__acquires(rcu)
1330{
1331	struct sock_hash_seq_info *info = seq->private;
1332
1333	if (*pos == 0)
1334		++*pos;
1335
1336	/* pairs with sock_hash_seq_stop */
1337	rcu_read_lock();
1338	return sock_hash_seq_find_next(info, NULL);
1339}
1340
1341static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1342	__must_hold(rcu)
1343{
1344	struct sock_hash_seq_info *info = seq->private;
1345
1346	++*pos;
1347	return sock_hash_seq_find_next(info, v);
1348}
1349
1350static int sock_hash_seq_show(struct seq_file *seq, void *v)
1351	__must_hold(rcu)
1352{
1353	struct sock_hash_seq_info *info = seq->private;
1354	struct bpf_iter__sockmap ctx = {};
1355	struct bpf_shtab_elem *elem = v;
1356	struct bpf_iter_meta meta;
1357	struct bpf_prog *prog;
1358
1359	meta.seq = seq;
1360	prog = bpf_iter_get_info(&meta, !elem);
1361	if (!prog)
1362		return 0;
1363
1364	ctx.meta = &meta;
1365	ctx.map = info->map;
1366	if (elem) {
1367		ctx.key = elem->key;
1368		ctx.sk = elem->sk;
1369	}
1370
1371	return bpf_iter_run_prog(prog, &ctx);
1372}
1373
1374static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1375	__releases(rcu)
1376{
1377	if (!v)
1378		(void)sock_hash_seq_show(seq, NULL);
1379
1380	/* pairs with sock_hash_seq_start */
1381	rcu_read_unlock();
1382}
1383
1384static const struct seq_operations sock_hash_seq_ops = {
1385	.start	= sock_hash_seq_start,
1386	.next	= sock_hash_seq_next,
1387	.stop	= sock_hash_seq_stop,
1388	.show	= sock_hash_seq_show,
1389};
1390
1391static int sock_hash_init_seq_private(void *priv_data,
1392				      struct bpf_iter_aux_info *aux)
1393{
1394	struct sock_hash_seq_info *info = priv_data;
1395
1396	bpf_map_inc_with_uref(aux->map);
1397	info->map = aux->map;
1398	info->htab = container_of(aux->map, struct bpf_shtab, map);
1399	return 0;
1400}
1401
1402static void sock_hash_fini_seq_private(void *priv_data)
1403{
1404	struct sock_hash_seq_info *info = priv_data;
1405
1406	bpf_map_put_with_uref(info->map);
1407}
1408
1409static u64 sock_hash_mem_usage(const struct bpf_map *map)
1410{
1411	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1412	u64 usage = sizeof(*htab);
1413
1414	usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
1415	usage += atomic_read(&htab->count) * (u64)htab->elem_size;
1416	return usage;
1417}
1418
1419static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1420	.seq_ops		= &sock_hash_seq_ops,
1421	.init_seq_private	= sock_hash_init_seq_private,
1422	.fini_seq_private	= sock_hash_fini_seq_private,
1423	.seq_priv_size		= sizeof(struct sock_hash_seq_info),
1424};
1425
1426BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1427const struct bpf_map_ops sock_hash_ops = {
1428	.map_meta_equal		= bpf_map_meta_equal,
1429	.map_alloc		= sock_hash_alloc,
1430	.map_free		= sock_hash_free,
1431	.map_get_next_key	= sock_hash_get_next_key,
1432	.map_update_elem	= sock_map_update_elem,
1433	.map_delete_elem	= sock_hash_delete_elem,
1434	.map_lookup_elem	= sock_hash_lookup,
1435	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1436	.map_release_uref	= sock_hash_release_progs,
1437	.map_check_btf		= map_check_no_btf,
1438	.map_mem_usage		= sock_hash_mem_usage,
1439	.map_btf_id		= &sock_hash_map_btf_ids[0],
1440	.iter_seq_info		= &sock_hash_iter_seq_info,
1441};
1442
1443static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1444{
1445	switch (map->map_type) {
1446	case BPF_MAP_TYPE_SOCKMAP:
1447		return &container_of(map, struct bpf_stab, map)->progs;
1448	case BPF_MAP_TYPE_SOCKHASH:
1449		return &container_of(map, struct bpf_shtab, map)->progs;
1450	default:
1451		break;
1452	}
1453
1454	return NULL;
1455}
1456
1457static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1458				u32 which)
1459{
1460	struct sk_psock_progs *progs = sock_map_progs(map);
 
1461
1462	if (!progs)
1463		return -EOPNOTSUPP;
1464
1465	switch (which) {
1466	case BPF_SK_MSG_VERDICT:
1467		*pprog = &progs->msg_parser;
1468		break;
1469#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1470	case BPF_SK_SKB_STREAM_PARSER:
1471		*pprog = &progs->stream_parser;
1472		break;
1473#endif
1474	case BPF_SK_SKB_STREAM_VERDICT:
1475		if (progs->skb_verdict)
1476			return -EBUSY;
1477		*pprog = &progs->stream_verdict;
1478		break;
1479	case BPF_SK_SKB_VERDICT:
1480		if (progs->stream_verdict)
1481			return -EBUSY;
1482		*pprog = &progs->skb_verdict;
1483		break;
1484	default:
1485		return -EOPNOTSUPP;
1486	}
1487
1488	return 0;
1489}
1490
1491static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1492				struct bpf_prog *old, u32 which)
1493{
1494	struct bpf_prog **pprog;
1495	int ret;
1496
1497	ret = sock_map_prog_lookup(map, &pprog, which);
1498	if (ret)
1499		return ret;
1500
1501	if (old)
1502		return psock_replace_prog(pprog, prog, old);
1503
1504	psock_set_prog(pprog, prog);
1505	return 0;
1506}
1507
1508int sock_map_bpf_prog_query(const union bpf_attr *attr,
1509			    union bpf_attr __user *uattr)
1510{
1511	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1512	u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1513	struct bpf_prog **pprog;
1514	struct bpf_prog *prog;
1515	struct bpf_map *map;
1516	struct fd f;
1517	u32 id = 0;
1518	int ret;
1519
1520	if (attr->query.query_flags)
1521		return -EINVAL;
1522
1523	f = fdget(ufd);
1524	map = __bpf_map_get(f);
1525	if (IS_ERR(map))
1526		return PTR_ERR(map);
1527
1528	rcu_read_lock();
1529
1530	ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1531	if (ret)
1532		goto end;
1533
1534	prog = *pprog;
1535	prog_cnt = !prog ? 0 : 1;
1536
1537	if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1538		goto end;
1539
1540	/* we do not hold the refcnt, the bpf prog may be released
1541	 * asynchronously and the id would be set to 0.
1542	 */
1543	id = data_race(prog->aux->id);
1544	if (id == 0)
1545		prog_cnt = 0;
1546
1547end:
1548	rcu_read_unlock();
1549
1550	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1551	    (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1552	    copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1553		ret = -EFAULT;
1554
1555	fdput(f);
1556	return ret;
1557}
1558
1559static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1560{
1561	switch (link->map->map_type) {
1562	case BPF_MAP_TYPE_SOCKMAP:
1563		return sock_map_delete_from_link(link->map, sk,
1564						 link->link_raw);
1565	case BPF_MAP_TYPE_SOCKHASH:
1566		return sock_hash_delete_from_link(link->map, sk,
1567						  link->link_raw);
1568	default:
1569		break;
1570	}
1571}
1572
1573static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1574{
1575	struct sk_psock_link *link;
1576
1577	while ((link = sk_psock_link_pop(psock))) {
1578		sock_map_unlink(sk, link);
1579		sk_psock_free_link(link);
1580	}
1581}
1582
1583void sock_map_unhash(struct sock *sk)
1584{
1585	void (*saved_unhash)(struct sock *sk);
1586	struct sk_psock *psock;
1587
1588	rcu_read_lock();
1589	psock = sk_psock(sk);
1590	if (unlikely(!psock)) {
1591		rcu_read_unlock();
1592		saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1593	} else {
1594		saved_unhash = psock->saved_unhash;
1595		sock_map_remove_links(sk, psock);
1596		rcu_read_unlock();
1597	}
1598	if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1599		return;
1600	if (saved_unhash)
1601		saved_unhash(sk);
1602}
1603EXPORT_SYMBOL_GPL(sock_map_unhash);
1604
1605void sock_map_destroy(struct sock *sk)
1606{
1607	void (*saved_destroy)(struct sock *sk);
1608	struct sk_psock *psock;
1609
1610	rcu_read_lock();
1611	psock = sk_psock_get(sk);
1612	if (unlikely(!psock)) {
1613		rcu_read_unlock();
1614		saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1615	} else {
1616		saved_destroy = psock->saved_destroy;
1617		sock_map_remove_links(sk, psock);
1618		rcu_read_unlock();
1619		sk_psock_stop(psock);
1620		sk_psock_put(sk, psock);
1621	}
1622	if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1623		return;
1624	if (saved_destroy)
1625		saved_destroy(sk);
 
1626}
1627EXPORT_SYMBOL_GPL(sock_map_destroy);
1628
1629void sock_map_close(struct sock *sk, long timeout)
1630{
1631	void (*saved_close)(struct sock *sk, long timeout);
1632	struct sk_psock *psock;
1633
1634	lock_sock(sk);
1635	rcu_read_lock();
1636	psock = sk_psock_get(sk);
1637	if (unlikely(!psock)) {
1638		rcu_read_unlock();
1639		release_sock(sk);
1640		saved_close = READ_ONCE(sk->sk_prot)->close;
1641	} else {
1642		saved_close = psock->saved_close;
1643		sock_map_remove_links(sk, psock);
1644		rcu_read_unlock();
1645		sk_psock_stop(psock);
1646		release_sock(sk);
1647		cancel_delayed_work_sync(&psock->work);
1648		sk_psock_put(sk, psock);
1649	}
1650
1651	/* Make sure we do not recurse. This is a bug.
1652	 * Leak the socket instead of crashing on a stack overflow.
1653	 */
1654	if (WARN_ON_ONCE(saved_close == sock_map_close))
1655		return;
1656	saved_close(sk, timeout);
1657}
1658EXPORT_SYMBOL_GPL(sock_map_close);
1659
1660static int sock_map_iter_attach_target(struct bpf_prog *prog,
1661				       union bpf_iter_link_info *linfo,
1662				       struct bpf_iter_aux_info *aux)
1663{
1664	struct bpf_map *map;
1665	int err = -EINVAL;
1666
1667	if (!linfo->map.map_fd)
1668		return -EBADF;
1669
1670	map = bpf_map_get_with_uref(linfo->map.map_fd);
1671	if (IS_ERR(map))
1672		return PTR_ERR(map);
1673
1674	if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1675	    map->map_type != BPF_MAP_TYPE_SOCKHASH)
1676		goto put_map;
1677
1678	if (prog->aux->max_rdonly_access > map->key_size) {
1679		err = -EACCES;
1680		goto put_map;
1681	}
1682
1683	aux->map = map;
1684	return 0;
1685
1686put_map:
1687	bpf_map_put_with_uref(map);
1688	return err;
1689}
1690
1691static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1692{
1693	bpf_map_put_with_uref(aux->map);
1694}
1695
1696static struct bpf_iter_reg sock_map_iter_reg = {
1697	.target			= "sockmap",
1698	.attach_target		= sock_map_iter_attach_target,
1699	.detach_target		= sock_map_iter_detach_target,
1700	.show_fdinfo		= bpf_iter_map_show_fdinfo,
1701	.fill_link_info		= bpf_iter_map_fill_link_info,
1702	.ctx_arg_info_size	= 2,
1703	.ctx_arg_info		= {
1704		{ offsetof(struct bpf_iter__sockmap, key),
1705		  PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1706		{ offsetof(struct bpf_iter__sockmap, sk),
1707		  PTR_TO_BTF_ID_OR_NULL },
1708	},
1709};
1710
1711static int __init bpf_sockmap_iter_init(void)
1712{
1713	sock_map_iter_reg.ctx_arg_info[1].btf_id =
1714		btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1715	return bpf_iter_reg_target(&sock_map_iter_reg);
1716}
1717late_initcall(bpf_sockmap_iter_init);
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/bpf.h>
 
   5#include <linux/filter.h>
   6#include <linux/errno.h>
   7#include <linux/file.h>
   8#include <linux/net.h>
   9#include <linux/workqueue.h>
  10#include <linux/skmsg.h>
  11#include <linux/list.h>
  12#include <linux/jhash.h>
  13#include <linux/sock_diag.h>
  14#include <net/udp.h>
  15
  16struct bpf_stab {
  17	struct bpf_map map;
  18	struct sock **sks;
  19	struct sk_psock_progs progs;
  20	raw_spinlock_t lock;
  21};
  22
  23#define SOCK_CREATE_FLAG_MASK				\
  24	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  25
 
 
 
 
  26static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  27{
  28	struct bpf_stab *stab;
  29	u64 cost;
  30	int err;
  31
  32	if (!capable(CAP_NET_ADMIN))
  33		return ERR_PTR(-EPERM);
  34	if (attr->max_entries == 0 ||
  35	    attr->key_size    != 4 ||
  36	    (attr->value_size != sizeof(u32) &&
  37	     attr->value_size != sizeof(u64)) ||
  38	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  39		return ERR_PTR(-EINVAL);
  40
  41	stab = kzalloc(sizeof(*stab), GFP_USER);
  42	if (!stab)
  43		return ERR_PTR(-ENOMEM);
  44
  45	bpf_map_init_from_attr(&stab->map, attr);
  46	raw_spin_lock_init(&stab->lock);
  47
  48	/* Make sure page count doesn't overflow. */
  49	cost = (u64) stab->map.max_entries * sizeof(struct sock *);
  50	err = bpf_map_charge_init(&stab->map.memory, cost);
  51	if (err)
  52		goto free_stab;
  53
  54	stab->sks = bpf_map_area_alloc(stab->map.max_entries *
  55				       sizeof(struct sock *),
  56				       stab->map.numa_node);
  57	if (stab->sks)
  58		return &stab->map;
  59	err = -ENOMEM;
  60	bpf_map_charge_finish(&stab->map.memory);
  61free_stab:
  62	kfree(stab);
  63	return ERR_PTR(err);
  64}
  65
  66int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  67{
  68	u32 ufd = attr->target_fd;
  69	struct bpf_map *map;
  70	struct fd f;
  71	int ret;
  72
  73	if (attr->attach_flags || attr->replace_bpf_fd)
  74		return -EINVAL;
  75
  76	f = fdget(ufd);
  77	map = __bpf_map_get(f);
  78	if (IS_ERR(map))
  79		return PTR_ERR(map);
  80	ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
  81	fdput(f);
  82	return ret;
  83}
  84
  85int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  86{
  87	u32 ufd = attr->target_fd;
  88	struct bpf_prog *prog;
  89	struct bpf_map *map;
  90	struct fd f;
  91	int ret;
  92
  93	if (attr->attach_flags || attr->replace_bpf_fd)
  94		return -EINVAL;
  95
  96	f = fdget(ufd);
  97	map = __bpf_map_get(f);
  98	if (IS_ERR(map))
  99		return PTR_ERR(map);
 100
 101	prog = bpf_prog_get(attr->attach_bpf_fd);
 102	if (IS_ERR(prog)) {
 103		ret = PTR_ERR(prog);
 104		goto put_map;
 105	}
 106
 107	if (prog->type != ptype) {
 108		ret = -EINVAL;
 109		goto put_prog;
 110	}
 111
 112	ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
 113put_prog:
 114	bpf_prog_put(prog);
 115put_map:
 116	fdput(f);
 117	return ret;
 118}
 119
 120static void sock_map_sk_acquire(struct sock *sk)
 121	__acquires(&sk->sk_lock.slock)
 122{
 123	lock_sock(sk);
 124	preempt_disable();
 125	rcu_read_lock();
 126}
 127
 128static void sock_map_sk_release(struct sock *sk)
 129	__releases(&sk->sk_lock.slock)
 130{
 131	rcu_read_unlock();
 132	preempt_enable();
 133	release_sock(sk);
 134}
 135
 136static void sock_map_add_link(struct sk_psock *psock,
 137			      struct sk_psock_link *link,
 138			      struct bpf_map *map, void *link_raw)
 139{
 140	link->link_raw = link_raw;
 141	link->map = map;
 142	spin_lock_bh(&psock->link_lock);
 143	list_add_tail(&link->list, &psock->link);
 144	spin_unlock_bh(&psock->link_lock);
 145}
 146
 147static void sock_map_del_link(struct sock *sk,
 148			      struct sk_psock *psock, void *link_raw)
 149{
 
 150	struct sk_psock_link *link, *tmp;
 151	bool strp_stop = false;
 152
 153	spin_lock_bh(&psock->link_lock);
 154	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 155		if (link->link_raw == link_raw) {
 156			struct bpf_map *map = link->map;
 157			struct bpf_stab *stab = container_of(map, struct bpf_stab,
 158							     map);
 159			if (psock->parser.enabled && stab->progs.skb_parser)
 160				strp_stop = true;
 
 
 
 
 161			list_del(&link->list);
 162			sk_psock_free_link(link);
 163		}
 164	}
 165	spin_unlock_bh(&psock->link_lock);
 166	if (strp_stop) {
 167		write_lock_bh(&sk->sk_callback_lock);
 168		sk_psock_stop_strp(sk, psock);
 
 
 
 
 
 
 169		write_unlock_bh(&sk->sk_callback_lock);
 170	}
 171}
 172
 173static void sock_map_unref(struct sock *sk, void *link_raw)
 174{
 175	struct sk_psock *psock = sk_psock(sk);
 176
 177	if (likely(psock)) {
 178		sock_map_del_link(sk, psock, link_raw);
 179		sk_psock_put(sk, psock);
 180	}
 181}
 182
 183static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
 184{
 185	struct proto *prot;
 186
 187	sock_owned_by_me(sk);
 188
 189	switch (sk->sk_type) {
 190	case SOCK_STREAM:
 191		prot = tcp_bpf_get_proto(sk, psock);
 192		break;
 193
 194	case SOCK_DGRAM:
 195		prot = udp_bpf_get_proto(sk, psock);
 196		break;
 197
 198	default:
 199		return -EINVAL;
 200	}
 201
 202	if (IS_ERR(prot))
 203		return PTR_ERR(prot);
 204
 205	sk_psock_update_proto(sk, psock, prot);
 206	return 0;
 207}
 208
 209static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
 210{
 211	struct sk_psock *psock;
 212
 213	rcu_read_lock();
 214	psock = sk_psock(sk);
 215	if (psock) {
 216		if (sk->sk_prot->close != sock_map_close) {
 217			psock = ERR_PTR(-EBUSY);
 218			goto out;
 219		}
 220
 221		if (!refcount_inc_not_zero(&psock->refcnt))
 222			psock = ERR_PTR(-EBUSY);
 223	}
 224out:
 225	rcu_read_unlock();
 226	return psock;
 227}
 228
 229static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
 230			 struct sock *sk)
 231{
 232	struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
 
 
 
 
 233	struct sk_psock *psock;
 234	bool skb_progs;
 235	int ret;
 236
 237	skb_verdict = READ_ONCE(progs->skb_verdict);
 238	skb_parser = READ_ONCE(progs->skb_parser);
 239	skb_progs = skb_parser && skb_verdict;
 240	if (skb_progs) {
 241		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
 242		if (IS_ERR(skb_verdict))
 243			return PTR_ERR(skb_verdict);
 244		skb_parser = bpf_prog_inc_not_zero(skb_parser);
 245		if (IS_ERR(skb_parser)) {
 246			bpf_prog_put(skb_verdict);
 247			return PTR_ERR(skb_parser);
 
 
 248		}
 249	}
 250
 251	msg_parser = READ_ONCE(progs->msg_parser);
 252	if (msg_parser) {
 253		msg_parser = bpf_prog_inc_not_zero(msg_parser);
 254		if (IS_ERR(msg_parser)) {
 255			ret = PTR_ERR(msg_parser);
 256			goto out;
 
 
 
 
 
 
 
 
 
 257		}
 258	}
 259
 260	psock = sock_map_psock_get_checked(sk);
 261	if (IS_ERR(psock)) {
 262		ret = PTR_ERR(psock);
 263		goto out_progs;
 264	}
 265
 266	if (psock) {
 267		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
 268		    (skb_progs  && READ_ONCE(psock->progs.skb_parser))) {
 
 
 
 
 269			sk_psock_put(sk, psock);
 270			ret = -EBUSY;
 271			goto out_progs;
 272		}
 273	} else {
 274		psock = sk_psock_init(sk, map->numa_node);
 275		if (!psock) {
 276			ret = -ENOMEM;
 277			goto out_progs;
 278		}
 279	}
 280
 281	if (msg_parser)
 282		psock_set_prog(&psock->progs.msg_parser, msg_parser);
 
 
 
 
 
 
 283
 
 
 
 284	ret = sock_map_init_proto(sk, psock);
 285	if (ret < 0)
 286		goto out_drop;
 
 
 287
 288	write_lock_bh(&sk->sk_callback_lock);
 289	if (skb_progs && !psock->parser.enabled) {
 290		ret = sk_psock_init_strp(sk, psock);
 291		if (ret) {
 292			write_unlock_bh(&sk->sk_callback_lock);
 293			goto out_drop;
 
 294		}
 295		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 296		psock_set_prog(&psock->progs.skb_parser, skb_parser);
 297		sk_psock_start_strp(sk, psock);
 
 
 
 
 298	}
 299	write_unlock_bh(&sk->sk_callback_lock);
 300	return 0;
 301out_drop:
 302	sk_psock_put(sk, psock);
 303out_progs:
 
 
 
 304	if (msg_parser)
 305		bpf_prog_put(msg_parser);
 
 
 
 
 
 
 306out:
 307	if (skb_progs) {
 308		bpf_prog_put(skb_verdict);
 309		bpf_prog_put(skb_parser);
 310	}
 311	return ret;
 312}
 313
 314static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk)
 315{
 316	struct sk_psock *psock;
 317	int ret;
 318
 319	psock = sock_map_psock_get_checked(sk);
 320	if (IS_ERR(psock))
 321		return PTR_ERR(psock);
 322
 323	if (!psock) {
 324		psock = sk_psock_init(sk, map->numa_node);
 325		if (!psock)
 326			return -ENOMEM;
 327	}
 328
 329	ret = sock_map_init_proto(sk, psock);
 330	if (ret < 0)
 331		sk_psock_put(sk, psock);
 332	return ret;
 333}
 334
 335static void sock_map_free(struct bpf_map *map)
 336{
 337	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 338	int i;
 339
 340	/* After the sync no updates or deletes will be in-flight so it
 341	 * is safe to walk map and remove entries without risking a race
 342	 * in EEXIST update case.
 343	 */
 344	synchronize_rcu();
 345	for (i = 0; i < stab->map.max_entries; i++) {
 346		struct sock **psk = &stab->sks[i];
 347		struct sock *sk;
 348
 349		sk = xchg(psk, NULL);
 350		if (sk) {
 
 351			lock_sock(sk);
 352			rcu_read_lock();
 353			sock_map_unref(sk, psk);
 354			rcu_read_unlock();
 355			release_sock(sk);
 
 356		}
 357	}
 358
 359	/* wait for psock readers accessing its map link */
 360	synchronize_rcu();
 361
 362	bpf_map_area_free(stab->sks);
 363	kfree(stab);
 364}
 365
 366static void sock_map_release_progs(struct bpf_map *map)
 367{
 368	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
 369}
 370
 371static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 372{
 373	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 374
 375	WARN_ON_ONCE(!rcu_read_lock_held());
 376
 377	if (unlikely(key >= map->max_entries))
 378		return NULL;
 379	return READ_ONCE(stab->sks[key]);
 380}
 381
 382static void *sock_map_lookup(struct bpf_map *map, void *key)
 383{
 384	struct sock *sk;
 385
 386	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 387	if (!sk || !sk_fullsock(sk))
 388		return NULL;
 389	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
 390		return NULL;
 391	return sk;
 392}
 393
 394static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
 395{
 396	struct sock *sk;
 397
 398	if (map->value_size != sizeof(u64))
 399		return ERR_PTR(-ENOSPC);
 400
 401	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 402	if (!sk)
 403		return ERR_PTR(-ENOENT);
 404
 405	sock_gen_cookie(sk);
 406	return &sk->sk_cookie;
 407}
 408
 409static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
 410			     struct sock **psk)
 411{
 412	struct sock *sk;
 413	int err = 0;
 414
 415	raw_spin_lock_bh(&stab->lock);
 416	sk = *psk;
 417	if (!sk_test || sk_test == sk)
 418		sk = xchg(psk, NULL);
 419
 420	if (likely(sk))
 421		sock_map_unref(sk, psk);
 422	else
 423		err = -EINVAL;
 424
 425	raw_spin_unlock_bh(&stab->lock);
 426	return err;
 427}
 428
 429static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
 430				      void *link_raw)
 431{
 432	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 433
 434	__sock_map_delete(stab, sk, link_raw);
 435}
 436
 437static int sock_map_delete_elem(struct bpf_map *map, void *key)
 438{
 439	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 440	u32 i = *(u32 *)key;
 441	struct sock **psk;
 442
 443	if (unlikely(i >= map->max_entries))
 444		return -EINVAL;
 445
 446	psk = &stab->sks[i];
 447	return __sock_map_delete(stab, NULL, psk);
 448}
 449
 450static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
 451{
 452	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 453	u32 i = key ? *(u32 *)key : U32_MAX;
 454	u32 *key_next = next;
 455
 456	if (i == stab->map.max_entries - 1)
 457		return -ENOENT;
 458	if (i >= stab->map.max_entries)
 459		*key_next = 0;
 460	else
 461		*key_next = i + 1;
 462	return 0;
 463}
 464
 465static bool sock_map_redirect_allowed(const struct sock *sk);
 466
 467static int sock_map_update_common(struct bpf_map *map, u32 idx,
 468				  struct sock *sk, u64 flags)
 469{
 470	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 471	struct sk_psock_link *link;
 472	struct sk_psock *psock;
 473	struct sock *osk;
 474	int ret;
 475
 476	WARN_ON_ONCE(!rcu_read_lock_held());
 477	if (unlikely(flags > BPF_EXIST))
 478		return -EINVAL;
 479	if (unlikely(idx >= map->max_entries))
 480		return -E2BIG;
 481	if (inet_csk_has_ulp(sk))
 482		return -EINVAL;
 483
 484	link = sk_psock_init_link();
 485	if (!link)
 486		return -ENOMEM;
 487
 488	/* Only sockets we can redirect into/from in BPF need to hold
 489	 * refs to parser/verdict progs and have their sk_data_ready
 490	 * and sk_write_space callbacks overridden.
 491	 */
 492	if (sock_map_redirect_allowed(sk))
 493		ret = sock_map_link(map, &stab->progs, sk);
 494	else
 495		ret = sock_map_link_no_progs(map, sk);
 496	if (ret < 0)
 497		goto out_free;
 498
 499	psock = sk_psock(sk);
 500	WARN_ON_ONCE(!psock);
 501
 502	raw_spin_lock_bh(&stab->lock);
 503	osk = stab->sks[idx];
 504	if (osk && flags == BPF_NOEXIST) {
 505		ret = -EEXIST;
 506		goto out_unlock;
 507	} else if (!osk && flags == BPF_EXIST) {
 508		ret = -ENOENT;
 509		goto out_unlock;
 510	}
 511
 512	sock_map_add_link(psock, link, map, &stab->sks[idx]);
 513	stab->sks[idx] = sk;
 514	if (osk)
 515		sock_map_unref(osk, &stab->sks[idx]);
 516	raw_spin_unlock_bh(&stab->lock);
 517	return 0;
 518out_unlock:
 519	raw_spin_unlock_bh(&stab->lock);
 520	if (psock)
 521		sk_psock_put(sk, psock);
 522out_free:
 523	sk_psock_free_link(link);
 524	return ret;
 525}
 526
 527static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
 528{
 529	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
 530	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
 531	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
 532}
 533
 534static bool sk_is_tcp(const struct sock *sk)
 535{
 536	return sk->sk_type == SOCK_STREAM &&
 537	       sk->sk_protocol == IPPROTO_TCP;
 538}
 539
 540static bool sk_is_udp(const struct sock *sk)
 541{
 542	return sk->sk_type == SOCK_DGRAM &&
 543	       sk->sk_protocol == IPPROTO_UDP;
 544}
 545
 546static bool sock_map_redirect_allowed(const struct sock *sk)
 547{
 548	return sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN;
 
 
 
 549}
 550
 551static bool sock_map_sk_is_suitable(const struct sock *sk)
 552{
 553	return sk_is_tcp(sk) || sk_is_udp(sk);
 554}
 555
 556static bool sock_map_sk_state_allowed(const struct sock *sk)
 557{
 558	if (sk_is_tcp(sk))
 559		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
 560	else if (sk_is_udp(sk))
 561		return sk_hashed(sk);
 
 
 562
 563	return false;
 564}
 565
 566static int sock_map_update_elem(struct bpf_map *map, void *key,
 567				void *value, u64 flags)
 568{
 569	u32 idx = *(u32 *)key;
 570	struct socket *sock;
 571	struct sock *sk;
 572	int ret;
 573	u64 ufd;
 574
 575	if (map->value_size == sizeof(u64))
 576		ufd = *(u64 *)value;
 577	else
 578		ufd = *(u32 *)value;
 579	if (ufd > S32_MAX)
 580		return -EINVAL;
 581
 582	sock = sockfd_lookup(ufd, &ret);
 583	if (!sock)
 584		return ret;
 585	sk = sock->sk;
 586	if (!sk) {
 587		ret = -EINVAL;
 588		goto out;
 589	}
 590	if (!sock_map_sk_is_suitable(sk)) {
 591		ret = -EOPNOTSUPP;
 592		goto out;
 593	}
 594
 595	sock_map_sk_acquire(sk);
 596	if (!sock_map_sk_state_allowed(sk))
 597		ret = -EOPNOTSUPP;
 
 
 598	else
 599		ret = sock_map_update_common(map, idx, sk, flags);
 600	sock_map_sk_release(sk);
 601out:
 602	fput(sock->file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603	return ret;
 604}
 605
 606BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
 607	   struct bpf_map *, map, void *, key, u64, flags)
 608{
 609	WARN_ON_ONCE(!rcu_read_lock_held());
 610
 611	if (likely(sock_map_sk_is_suitable(sops->sk) &&
 612		   sock_map_op_okay(sops)))
 613		return sock_map_update_common(map, *(u32 *)key, sops->sk,
 614					      flags);
 615	return -EOPNOTSUPP;
 616}
 617
 618const struct bpf_func_proto bpf_sock_map_update_proto = {
 619	.func		= bpf_sock_map_update,
 620	.gpl_only	= false,
 621	.pkt_access	= true,
 622	.ret_type	= RET_INTEGER,
 623	.arg1_type	= ARG_PTR_TO_CTX,
 624	.arg2_type	= ARG_CONST_MAP_PTR,
 625	.arg3_type	= ARG_PTR_TO_MAP_KEY,
 626	.arg4_type	= ARG_ANYTHING,
 627};
 628
 629BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
 630	   struct bpf_map *, map, u32, key, u64, flags)
 631{
 632	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 633	struct sock *sk;
 634
 635	if (unlikely(flags & ~(BPF_F_INGRESS)))
 636		return SK_DROP;
 637
 638	sk = __sock_map_lookup_elem(map, key);
 639	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 640		return SK_DROP;
 641
 642	tcb->bpf.flags = flags;
 643	tcb->bpf.sk_redir = sk;
 644	return SK_PASS;
 645}
 646
 647const struct bpf_func_proto bpf_sk_redirect_map_proto = {
 648	.func           = bpf_sk_redirect_map,
 649	.gpl_only       = false,
 650	.ret_type       = RET_INTEGER,
 651	.arg1_type	= ARG_PTR_TO_CTX,
 652	.arg2_type      = ARG_CONST_MAP_PTR,
 653	.arg3_type      = ARG_ANYTHING,
 654	.arg4_type      = ARG_ANYTHING,
 655};
 656
 657BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
 658	   struct bpf_map *, map, u32, key, u64, flags)
 659{
 660	struct sock *sk;
 661
 662	if (unlikely(flags & ~(BPF_F_INGRESS)))
 663		return SK_DROP;
 664
 665	sk = __sock_map_lookup_elem(map, key);
 666	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 667		return SK_DROP;
 
 
 668
 669	msg->flags = flags;
 670	msg->sk_redir = sk;
 671	return SK_PASS;
 672}
 673
 674const struct bpf_func_proto bpf_msg_redirect_map_proto = {
 675	.func           = bpf_msg_redirect_map,
 676	.gpl_only       = false,
 677	.ret_type       = RET_INTEGER,
 678	.arg1_type	= ARG_PTR_TO_CTX,
 679	.arg2_type      = ARG_CONST_MAP_PTR,
 680	.arg3_type      = ARG_ANYTHING,
 681	.arg4_type      = ARG_ANYTHING,
 682};
 683
 684static int sock_map_btf_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685const struct bpf_map_ops sock_map_ops = {
 
 686	.map_alloc		= sock_map_alloc,
 687	.map_free		= sock_map_free,
 688	.map_get_next_key	= sock_map_get_next_key,
 689	.map_lookup_elem_sys_only = sock_map_lookup_sys,
 690	.map_update_elem	= sock_map_update_elem,
 691	.map_delete_elem	= sock_map_delete_elem,
 692	.map_lookup_elem	= sock_map_lookup,
 693	.map_release_uref	= sock_map_release_progs,
 694	.map_check_btf		= map_check_no_btf,
 695	.map_btf_name		= "bpf_stab",
 696	.map_btf_id		= &sock_map_btf_id,
 
 697};
 698
 699struct bpf_shtab_elem {
 700	struct rcu_head rcu;
 701	u32 hash;
 702	struct sock *sk;
 703	struct hlist_node node;
 704	u8 key[];
 705};
 706
 707struct bpf_shtab_bucket {
 708	struct hlist_head head;
 709	raw_spinlock_t lock;
 710};
 711
 712struct bpf_shtab {
 713	struct bpf_map map;
 714	struct bpf_shtab_bucket *buckets;
 715	u32 buckets_num;
 716	u32 elem_size;
 717	struct sk_psock_progs progs;
 718	atomic_t count;
 719};
 720
 721static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
 722{
 723	return jhash(key, len, 0);
 724}
 725
 726static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
 727							u32 hash)
 728{
 729	return &htab->buckets[hash & (htab->buckets_num - 1)];
 730}
 731
 732static struct bpf_shtab_elem *
 733sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 734			  u32 key_size)
 735{
 736	struct bpf_shtab_elem *elem;
 737
 738	hlist_for_each_entry_rcu(elem, head, node) {
 739		if (elem->hash == hash &&
 740		    !memcmp(&elem->key, key, key_size))
 741			return elem;
 742	}
 743
 744	return NULL;
 745}
 746
 747static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 748{
 749	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 750	u32 key_size = map->key_size, hash;
 751	struct bpf_shtab_bucket *bucket;
 752	struct bpf_shtab_elem *elem;
 753
 754	WARN_ON_ONCE(!rcu_read_lock_held());
 755
 756	hash = sock_hash_bucket_hash(key, key_size);
 757	bucket = sock_hash_select_bucket(htab, hash);
 758	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 759
 760	return elem ? elem->sk : NULL;
 761}
 762
 763static void sock_hash_free_elem(struct bpf_shtab *htab,
 764				struct bpf_shtab_elem *elem)
 765{
 766	atomic_dec(&htab->count);
 767	kfree_rcu(elem, rcu);
 768}
 769
 770static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 771				       void *link_raw)
 772{
 773	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 774	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
 775	struct bpf_shtab_bucket *bucket;
 776
 777	WARN_ON_ONCE(!rcu_read_lock_held());
 778	bucket = sock_hash_select_bucket(htab, elem->hash);
 779
 780	/* elem may be deleted in parallel from the map, but access here
 781	 * is okay since it's going away only after RCU grace period.
 782	 * However, we need to check whether it's still present.
 783	 */
 784	raw_spin_lock_bh(&bucket->lock);
 785	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
 786					       elem->key, map->key_size);
 787	if (elem_probe && elem_probe == elem) {
 788		hlist_del_rcu(&elem->node);
 789		sock_map_unref(elem->sk, elem);
 790		sock_hash_free_elem(htab, elem);
 791	}
 792	raw_spin_unlock_bh(&bucket->lock);
 793}
 794
 795static int sock_hash_delete_elem(struct bpf_map *map, void *key)
 796{
 797	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 798	u32 hash, key_size = map->key_size;
 799	struct bpf_shtab_bucket *bucket;
 800	struct bpf_shtab_elem *elem;
 801	int ret = -ENOENT;
 802
 803	hash = sock_hash_bucket_hash(key, key_size);
 804	bucket = sock_hash_select_bucket(htab, hash);
 805
 806	raw_spin_lock_bh(&bucket->lock);
 807	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 808	if (elem) {
 809		hlist_del_rcu(&elem->node);
 810		sock_map_unref(elem->sk, elem);
 811		sock_hash_free_elem(htab, elem);
 812		ret = 0;
 813	}
 814	raw_spin_unlock_bh(&bucket->lock);
 815	return ret;
 816}
 817
 818static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
 819						   void *key, u32 key_size,
 820						   u32 hash, struct sock *sk,
 821						   struct bpf_shtab_elem *old)
 822{
 823	struct bpf_shtab_elem *new;
 824
 825	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 826		if (!old) {
 827			atomic_dec(&htab->count);
 828			return ERR_PTR(-E2BIG);
 829		}
 830	}
 831
 832	new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
 833			   htab->map.numa_node);
 
 834	if (!new) {
 835		atomic_dec(&htab->count);
 836		return ERR_PTR(-ENOMEM);
 837	}
 838	memcpy(new->key, key, key_size);
 839	new->sk = sk;
 840	new->hash = hash;
 841	return new;
 842}
 843
 844static int sock_hash_update_common(struct bpf_map *map, void *key,
 845				   struct sock *sk, u64 flags)
 846{
 847	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 848	u32 key_size = map->key_size, hash;
 849	struct bpf_shtab_elem *elem, *elem_new;
 850	struct bpf_shtab_bucket *bucket;
 851	struct sk_psock_link *link;
 852	struct sk_psock *psock;
 853	int ret;
 854
 855	WARN_ON_ONCE(!rcu_read_lock_held());
 856	if (unlikely(flags > BPF_EXIST))
 857		return -EINVAL;
 858	if (inet_csk_has_ulp(sk))
 859		return -EINVAL;
 860
 861	link = sk_psock_init_link();
 862	if (!link)
 863		return -ENOMEM;
 864
 865	/* Only sockets we can redirect into/from in BPF need to hold
 866	 * refs to parser/verdict progs and have their sk_data_ready
 867	 * and sk_write_space callbacks overridden.
 868	 */
 869	if (sock_map_redirect_allowed(sk))
 870		ret = sock_map_link(map, &htab->progs, sk);
 871	else
 872		ret = sock_map_link_no_progs(map, sk);
 873	if (ret < 0)
 874		goto out_free;
 875
 876	psock = sk_psock(sk);
 877	WARN_ON_ONCE(!psock);
 878
 879	hash = sock_hash_bucket_hash(key, key_size);
 880	bucket = sock_hash_select_bucket(htab, hash);
 881
 882	raw_spin_lock_bh(&bucket->lock);
 883	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 884	if (elem && flags == BPF_NOEXIST) {
 885		ret = -EEXIST;
 886		goto out_unlock;
 887	} else if (!elem && flags == BPF_EXIST) {
 888		ret = -ENOENT;
 889		goto out_unlock;
 890	}
 891
 892	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
 893	if (IS_ERR(elem_new)) {
 894		ret = PTR_ERR(elem_new);
 895		goto out_unlock;
 896	}
 897
 898	sock_map_add_link(psock, link, map, elem_new);
 899	/* Add new element to the head of the list, so that
 900	 * concurrent search will find it before old elem.
 901	 */
 902	hlist_add_head_rcu(&elem_new->node, &bucket->head);
 903	if (elem) {
 904		hlist_del_rcu(&elem->node);
 905		sock_map_unref(elem->sk, elem);
 906		sock_hash_free_elem(htab, elem);
 907	}
 908	raw_spin_unlock_bh(&bucket->lock);
 909	return 0;
 910out_unlock:
 911	raw_spin_unlock_bh(&bucket->lock);
 912	sk_psock_put(sk, psock);
 913out_free:
 914	sk_psock_free_link(link);
 915	return ret;
 916}
 917
 918static int sock_hash_update_elem(struct bpf_map *map, void *key,
 919				 void *value, u64 flags)
 920{
 921	struct socket *sock;
 922	struct sock *sk;
 923	int ret;
 924	u64 ufd;
 925
 926	if (map->value_size == sizeof(u64))
 927		ufd = *(u64 *)value;
 928	else
 929		ufd = *(u32 *)value;
 930	if (ufd > S32_MAX)
 931		return -EINVAL;
 932
 933	sock = sockfd_lookup(ufd, &ret);
 934	if (!sock)
 935		return ret;
 936	sk = sock->sk;
 937	if (!sk) {
 938		ret = -EINVAL;
 939		goto out;
 940	}
 941	if (!sock_map_sk_is_suitable(sk)) {
 942		ret = -EOPNOTSUPP;
 943		goto out;
 944	}
 945
 946	sock_map_sk_acquire(sk);
 947	if (!sock_map_sk_state_allowed(sk))
 948		ret = -EOPNOTSUPP;
 949	else
 950		ret = sock_hash_update_common(map, key, sk, flags);
 951	sock_map_sk_release(sk);
 952out:
 953	fput(sock->file);
 954	return ret;
 955}
 956
 957static int sock_hash_get_next_key(struct bpf_map *map, void *key,
 958				  void *key_next)
 959{
 960	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 961	struct bpf_shtab_elem *elem, *elem_next;
 962	u32 hash, key_size = map->key_size;
 963	struct hlist_head *head;
 964	int i = 0;
 965
 966	if (!key)
 967		goto find_first_elem;
 968	hash = sock_hash_bucket_hash(key, key_size);
 969	head = &sock_hash_select_bucket(htab, hash)->head;
 970	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
 971	if (!elem)
 972		goto find_first_elem;
 973
 974	elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
 975				     struct bpf_shtab_elem, node);
 976	if (elem_next) {
 977		memcpy(key_next, elem_next->key, key_size);
 978		return 0;
 979	}
 980
 981	i = hash & (htab->buckets_num - 1);
 982	i++;
 983find_first_elem:
 984	for (; i < htab->buckets_num; i++) {
 985		head = &sock_hash_select_bucket(htab, i)->head;
 986		elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
 987					     struct bpf_shtab_elem, node);
 988		if (elem_next) {
 989			memcpy(key_next, elem_next->key, key_size);
 990			return 0;
 991		}
 992	}
 993
 994	return -ENOENT;
 995}
 996
 997static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
 998{
 999	struct bpf_shtab *htab;
1000	int i, err;
1001	u64 cost;
1002
1003	if (!capable(CAP_NET_ADMIN))
1004		return ERR_PTR(-EPERM);
1005	if (attr->max_entries == 0 ||
1006	    attr->key_size    == 0 ||
1007	    (attr->value_size != sizeof(u32) &&
1008	     attr->value_size != sizeof(u64)) ||
1009	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1010		return ERR_PTR(-EINVAL);
1011	if (attr->key_size > MAX_BPF_STACK)
1012		return ERR_PTR(-E2BIG);
1013
1014	htab = kzalloc(sizeof(*htab), GFP_USER);
1015	if (!htab)
1016		return ERR_PTR(-ENOMEM);
1017
1018	bpf_map_init_from_attr(&htab->map, attr);
1019
1020	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1021	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1022			  round_up(htab->map.key_size, 8);
1023	if (htab->buckets_num == 0 ||
1024	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1025		err = -EINVAL;
1026		goto free_htab;
1027	}
1028
1029	cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
1030	       (u64) htab->elem_size * htab->map.max_entries;
1031	if (cost >= U32_MAX - PAGE_SIZE) {
1032		err = -EINVAL;
1033		goto free_htab;
1034	}
1035	err = bpf_map_charge_init(&htab->map.memory, cost);
1036	if (err)
1037		goto free_htab;
1038
1039	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1040					   sizeof(struct bpf_shtab_bucket),
1041					   htab->map.numa_node);
1042	if (!htab->buckets) {
1043		bpf_map_charge_finish(&htab->map.memory);
1044		err = -ENOMEM;
1045		goto free_htab;
1046	}
1047
1048	for (i = 0; i < htab->buckets_num; i++) {
1049		INIT_HLIST_HEAD(&htab->buckets[i].head);
1050		raw_spin_lock_init(&htab->buckets[i].lock);
1051	}
1052
1053	return &htab->map;
1054free_htab:
1055	kfree(htab);
1056	return ERR_PTR(err);
1057}
1058
1059static void sock_hash_free(struct bpf_map *map)
1060{
1061	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1062	struct bpf_shtab_bucket *bucket;
1063	struct hlist_head unlink_list;
1064	struct bpf_shtab_elem *elem;
1065	struct hlist_node *node;
1066	int i;
1067
1068	/* After the sync no updates or deletes will be in-flight so it
1069	 * is safe to walk map and remove entries without risking a race
1070	 * in EEXIST update case.
1071	 */
1072	synchronize_rcu();
1073	for (i = 0; i < htab->buckets_num; i++) {
1074		bucket = sock_hash_select_bucket(htab, i);
1075
1076		/* We are racing with sock_hash_delete_from_link to
1077		 * enter the spin-lock critical section. Every socket on
1078		 * the list is still linked to sockhash. Since link
1079		 * exists, psock exists and holds a ref to socket. That
1080		 * lets us to grab a socket ref too.
1081		 */
1082		raw_spin_lock_bh(&bucket->lock);
1083		hlist_for_each_entry(elem, &bucket->head, node)
1084			sock_hold(elem->sk);
1085		hlist_move_list(&bucket->head, &unlink_list);
1086		raw_spin_unlock_bh(&bucket->lock);
1087
1088		/* Process removed entries out of atomic context to
1089		 * block for socket lock before deleting the psock's
1090		 * link to sockhash.
1091		 */
1092		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1093			hlist_del(&elem->node);
1094			lock_sock(elem->sk);
1095			rcu_read_lock();
1096			sock_map_unref(elem->sk, elem);
1097			rcu_read_unlock();
1098			release_sock(elem->sk);
1099			sock_put(elem->sk);
1100			sock_hash_free_elem(htab, elem);
1101		}
1102	}
1103
1104	/* wait for psock readers accessing its map link */
1105	synchronize_rcu();
1106
1107	bpf_map_area_free(htab->buckets);
1108	kfree(htab);
1109}
1110
1111static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1112{
1113	struct sock *sk;
1114
1115	if (map->value_size != sizeof(u64))
1116		return ERR_PTR(-ENOSPC);
1117
1118	sk = __sock_hash_lookup_elem(map, key);
1119	if (!sk)
1120		return ERR_PTR(-ENOENT);
1121
1122	sock_gen_cookie(sk);
1123	return &sk->sk_cookie;
1124}
1125
1126static void *sock_hash_lookup(struct bpf_map *map, void *key)
1127{
1128	struct sock *sk;
1129
1130	sk = __sock_hash_lookup_elem(map, key);
1131	if (!sk || !sk_fullsock(sk))
1132		return NULL;
1133	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1134		return NULL;
1135	return sk;
1136}
1137
1138static void sock_hash_release_progs(struct bpf_map *map)
1139{
1140	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1141}
1142
1143BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1144	   struct bpf_map *, map, void *, key, u64, flags)
1145{
1146	WARN_ON_ONCE(!rcu_read_lock_held());
1147
1148	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1149		   sock_map_op_okay(sops)))
1150		return sock_hash_update_common(map, key, sops->sk, flags);
1151	return -EOPNOTSUPP;
1152}
1153
1154const struct bpf_func_proto bpf_sock_hash_update_proto = {
1155	.func		= bpf_sock_hash_update,
1156	.gpl_only	= false,
1157	.pkt_access	= true,
1158	.ret_type	= RET_INTEGER,
1159	.arg1_type	= ARG_PTR_TO_CTX,
1160	.arg2_type	= ARG_CONST_MAP_PTR,
1161	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1162	.arg4_type	= ARG_ANYTHING,
1163};
1164
1165BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1166	   struct bpf_map *, map, void *, key, u64, flags)
1167{
1168	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1169	struct sock *sk;
1170
1171	if (unlikely(flags & ~(BPF_F_INGRESS)))
1172		return SK_DROP;
1173
1174	sk = __sock_hash_lookup_elem(map, key);
1175	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1176		return SK_DROP;
1177
1178	tcb->bpf.flags = flags;
1179	tcb->bpf.sk_redir = sk;
1180	return SK_PASS;
1181}
1182
1183const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1184	.func           = bpf_sk_redirect_hash,
1185	.gpl_only       = false,
1186	.ret_type       = RET_INTEGER,
1187	.arg1_type	= ARG_PTR_TO_CTX,
1188	.arg2_type      = ARG_CONST_MAP_PTR,
1189	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1190	.arg4_type      = ARG_ANYTHING,
1191};
1192
1193BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1194	   struct bpf_map *, map, void *, key, u64, flags)
1195{
1196	struct sock *sk;
1197
1198	if (unlikely(flags & ~(BPF_F_INGRESS)))
1199		return SK_DROP;
1200
1201	sk = __sock_hash_lookup_elem(map, key);
1202	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1203		return SK_DROP;
 
 
1204
1205	msg->flags = flags;
1206	msg->sk_redir = sk;
1207	return SK_PASS;
1208}
1209
1210const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1211	.func           = bpf_msg_redirect_hash,
1212	.gpl_only       = false,
1213	.ret_type       = RET_INTEGER,
1214	.arg1_type	= ARG_PTR_TO_CTX,
1215	.arg2_type      = ARG_CONST_MAP_PTR,
1216	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1217	.arg4_type      = ARG_ANYTHING,
1218};
1219
1220static int sock_hash_map_btf_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1221const struct bpf_map_ops sock_hash_ops = {
 
1222	.map_alloc		= sock_hash_alloc,
1223	.map_free		= sock_hash_free,
1224	.map_get_next_key	= sock_hash_get_next_key,
1225	.map_update_elem	= sock_hash_update_elem,
1226	.map_delete_elem	= sock_hash_delete_elem,
1227	.map_lookup_elem	= sock_hash_lookup,
1228	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1229	.map_release_uref	= sock_hash_release_progs,
1230	.map_check_btf		= map_check_no_btf,
1231	.map_btf_name		= "bpf_shtab",
1232	.map_btf_id		= &sock_hash_map_btf_id,
 
1233};
1234
1235static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1236{
1237	switch (map->map_type) {
1238	case BPF_MAP_TYPE_SOCKMAP:
1239		return &container_of(map, struct bpf_stab, map)->progs;
1240	case BPF_MAP_TYPE_SOCKHASH:
1241		return &container_of(map, struct bpf_shtab, map)->progs;
1242	default:
1243		break;
1244	}
1245
1246	return NULL;
1247}
1248
1249int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1250			 struct bpf_prog *old, u32 which)
1251{
1252	struct sk_psock_progs *progs = sock_map_progs(map);
1253	struct bpf_prog **pprog;
1254
1255	if (!progs)
1256		return -EOPNOTSUPP;
1257
1258	switch (which) {
1259	case BPF_SK_MSG_VERDICT:
1260		pprog = &progs->msg_parser;
1261		break;
 
1262	case BPF_SK_SKB_STREAM_PARSER:
1263		pprog = &progs->skb_parser;
1264		break;
 
1265	case BPF_SK_SKB_STREAM_VERDICT:
1266		pprog = &progs->skb_verdict;
 
 
 
 
 
 
 
1267		break;
1268	default:
1269		return -EOPNOTSUPP;
1270	}
1271
 
 
 
 
 
 
 
 
 
 
 
 
 
1272	if (old)
1273		return psock_replace_prog(pprog, prog, old);
1274
1275	psock_set_prog(pprog, prog);
1276	return 0;
1277}
1278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1279static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1280{
1281	switch (link->map->map_type) {
1282	case BPF_MAP_TYPE_SOCKMAP:
1283		return sock_map_delete_from_link(link->map, sk,
1284						 link->link_raw);
1285	case BPF_MAP_TYPE_SOCKHASH:
1286		return sock_hash_delete_from_link(link->map, sk,
1287						  link->link_raw);
1288	default:
1289		break;
1290	}
1291}
1292
1293static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1294{
1295	struct sk_psock_link *link;
1296
1297	while ((link = sk_psock_link_pop(psock))) {
1298		sock_map_unlink(sk, link);
1299		sk_psock_free_link(link);
1300	}
1301}
1302
1303void sock_map_unhash(struct sock *sk)
1304{
1305	void (*saved_unhash)(struct sock *sk);
1306	struct sk_psock *psock;
1307
1308	rcu_read_lock();
1309	psock = sk_psock(sk);
1310	if (unlikely(!psock)) {
1311		rcu_read_unlock();
1312		if (sk->sk_prot->unhash)
1313			sk->sk_prot->unhash(sk);
 
 
 
 
 
1314		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1315	}
1316
1317	saved_unhash = psock->saved_unhash;
1318	sock_map_remove_links(sk, psock);
1319	rcu_read_unlock();
1320	saved_unhash(sk);
1321}
 
1322
1323void sock_map_close(struct sock *sk, long timeout)
1324{
1325	void (*saved_close)(struct sock *sk, long timeout);
1326	struct sk_psock *psock;
1327
1328	lock_sock(sk);
1329	rcu_read_lock();
1330	psock = sk_psock(sk);
1331	if (unlikely(!psock)) {
1332		rcu_read_unlock();
1333		release_sock(sk);
1334		return sk->sk_prot->close(sk, timeout);
 
 
 
 
 
 
 
 
1335	}
1336
1337	saved_close = psock->saved_close;
1338	sock_map_remove_links(sk, psock);
1339	rcu_read_unlock();
1340	release_sock(sk);
 
1341	saved_close(sk, timeout);
1342}