Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/bpf.h>
   5#include <linux/btf_ids.h>
   6#include <linux/filter.h>
   7#include <linux/errno.h>
   8#include <linux/file.h>
   9#include <linux/net.h>
  10#include <linux/workqueue.h>
  11#include <linux/skmsg.h>
  12#include <linux/list.h>
  13#include <linux/jhash.h>
  14#include <linux/sock_diag.h>
  15#include <net/udp.h>
  16
  17struct bpf_stab {
  18	struct bpf_map map;
  19	struct sock **sks;
  20	struct sk_psock_progs progs;
  21	spinlock_t lock;
  22};
  23
  24#define SOCK_CREATE_FLAG_MASK				\
  25	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  26
  27/* This mutex is used to
  28 *  - protect race between prog/link attach/detach and link prog update, and
  29 *  - protect race between releasing and accessing map in bpf_link.
  30 * A single global mutex lock is used since it is expected contention is low.
  31 */
  32static DEFINE_MUTEX(sockmap_mutex);
  33
  34static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
  35				struct bpf_prog *old, struct bpf_link *link,
  36				u32 which);
  37static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
  38
  39static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  40{
  41	struct bpf_stab *stab;
 
 
  42
 
 
  43	if (attr->max_entries == 0 ||
  44	    attr->key_size    != 4 ||
  45	    (attr->value_size != sizeof(u32) &&
  46	     attr->value_size != sizeof(u64)) ||
  47	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  48		return ERR_PTR(-EINVAL);
  49
  50	stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
  51	if (!stab)
  52		return ERR_PTR(-ENOMEM);
  53
  54	bpf_map_init_from_attr(&stab->map, attr);
  55	spin_lock_init(&stab->lock);
 
 
 
 
 
 
  56
  57	stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
  58				       sizeof(struct sock *),
  59				       stab->map.numa_node);
  60	if (!stab->sks) {
  61		bpf_map_area_free(stab);
  62		return ERR_PTR(-ENOMEM);
  63	}
  64
  65	return &stab->map;
 
  66}
  67
  68int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  69{
 
  70	struct bpf_map *map;
 
  71	int ret;
  72
  73	if (attr->attach_flags || attr->replace_bpf_fd)
  74		return -EINVAL;
  75
  76	CLASS(fd, f)(attr->target_fd);
  77	map = __bpf_map_get(f);
  78	if (IS_ERR(map))
  79		return PTR_ERR(map);
  80	mutex_lock(&sockmap_mutex);
  81	ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type);
  82	mutex_unlock(&sockmap_mutex);
  83	return ret;
  84}
  85
  86int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  87{
 
  88	struct bpf_prog *prog;
  89	struct bpf_map *map;
 
  90	int ret;
  91
  92	if (attr->attach_flags || attr->replace_bpf_fd)
  93		return -EINVAL;
  94
  95	CLASS(fd, f)(attr->target_fd);
  96	map = __bpf_map_get(f);
  97	if (IS_ERR(map))
  98		return PTR_ERR(map);
  99
 100	prog = bpf_prog_get(attr->attach_bpf_fd);
 101	if (IS_ERR(prog))
 102		return PTR_ERR(prog);
 
 
 103
 104	if (prog->type != ptype) {
 105		ret = -EINVAL;
 106		goto put_prog;
 107	}
 108
 109	mutex_lock(&sockmap_mutex);
 110	ret = sock_map_prog_update(map, NULL, prog, NULL, attr->attach_type);
 111	mutex_unlock(&sockmap_mutex);
 112put_prog:
 113	bpf_prog_put(prog);
 
 
 114	return ret;
 115}
 116
 117static void sock_map_sk_acquire(struct sock *sk)
 118	__acquires(&sk->sk_lock.slock)
 119{
 120	lock_sock(sk);
 
 121	rcu_read_lock();
 122}
 123
 124static void sock_map_sk_release(struct sock *sk)
 125	__releases(&sk->sk_lock.slock)
 126{
 127	rcu_read_unlock();
 
 128	release_sock(sk);
 129}
 130
 131static void sock_map_add_link(struct sk_psock *psock,
 132			      struct sk_psock_link *link,
 133			      struct bpf_map *map, void *link_raw)
 134{
 135	link->link_raw = link_raw;
 136	link->map = map;
 137	spin_lock_bh(&psock->link_lock);
 138	list_add_tail(&link->list, &psock->link);
 139	spin_unlock_bh(&psock->link_lock);
 140}
 141
 142static void sock_map_del_link(struct sock *sk,
 143			      struct sk_psock *psock, void *link_raw)
 144{
 145	bool strp_stop = false, verdict_stop = false;
 146	struct sk_psock_link *link, *tmp;
 
 147
 148	spin_lock_bh(&psock->link_lock);
 149	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 150		if (link->link_raw == link_raw) {
 151			struct bpf_map *map = link->map;
 152			struct sk_psock_progs *progs = sock_map_progs(map);
 153
 154			if (psock->saved_data_ready && progs->stream_parser)
 155				strp_stop = true;
 156			if (psock->saved_data_ready && progs->stream_verdict)
 157				verdict_stop = true;
 158			if (psock->saved_data_ready && progs->skb_verdict)
 159				verdict_stop = true;
 160			list_del(&link->list);
 161			sk_psock_free_link(link);
 162			break;
 163		}
 164	}
 165	spin_unlock_bh(&psock->link_lock);
 166	if (strp_stop || verdict_stop) {
 167		write_lock_bh(&sk->sk_callback_lock);
 168		if (strp_stop)
 169			sk_psock_stop_strp(sk, psock);
 170		if (verdict_stop)
 171			sk_psock_stop_verdict(sk, psock);
 172
 173		if (psock->psock_update_sk_prot)
 174			psock->psock_update_sk_prot(sk, psock, false);
 175		write_unlock_bh(&sk->sk_callback_lock);
 176	}
 177}
 178
 179static void sock_map_unref(struct sock *sk, void *link_raw)
 180{
 181	struct sk_psock *psock = sk_psock(sk);
 182
 183	if (likely(psock)) {
 184		sock_map_del_link(sk, psock, link_raw);
 185		sk_psock_put(sk, psock);
 186	}
 187}
 188
 189static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
 190{
 191	if (!sk->sk_prot->psock_update_sk_prot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 192		return -EINVAL;
 193	psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
 194	return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
 
 
 
 
 
 195}
 196
 197static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
 198{
 199	struct sk_psock *psock;
 200
 201	rcu_read_lock();
 202	psock = sk_psock(sk);
 203	if (psock) {
 204		if (sk->sk_prot->close != sock_map_close) {
 205			psock = ERR_PTR(-EBUSY);
 206			goto out;
 207		}
 208
 209		if (!refcount_inc_not_zero(&psock->refcnt))
 210			psock = ERR_PTR(-EBUSY);
 211	}
 212out:
 213	rcu_read_unlock();
 214	return psock;
 215}
 216
 217static int sock_map_link(struct bpf_map *map, struct sock *sk)
 
 218{
 219	struct sk_psock_progs *progs = sock_map_progs(map);
 220	struct bpf_prog *stream_verdict = NULL;
 221	struct bpf_prog *stream_parser = NULL;
 222	struct bpf_prog *skb_verdict = NULL;
 223	struct bpf_prog *msg_parser = NULL;
 224	struct sk_psock *psock;
 
 225	int ret;
 226
 227	stream_verdict = READ_ONCE(progs->stream_verdict);
 228	if (stream_verdict) {
 229		stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
 230		if (IS_ERR(stream_verdict))
 231			return PTR_ERR(stream_verdict);
 232	}
 233
 234	stream_parser = READ_ONCE(progs->stream_parser);
 235	if (stream_parser) {
 236		stream_parser = bpf_prog_inc_not_zero(stream_parser);
 237		if (IS_ERR(stream_parser)) {
 238			ret = PTR_ERR(stream_parser);
 239			goto out_put_stream_verdict;
 240		}
 241	}
 242
 243	msg_parser = READ_ONCE(progs->msg_parser);
 244	if (msg_parser) {
 245		msg_parser = bpf_prog_inc_not_zero(msg_parser);
 246		if (IS_ERR(msg_parser)) {
 247			ret = PTR_ERR(msg_parser);
 248			goto out_put_stream_parser;
 249		}
 250	}
 251
 252	skb_verdict = READ_ONCE(progs->skb_verdict);
 253	if (skb_verdict) {
 254		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
 255		if (IS_ERR(skb_verdict)) {
 256			ret = PTR_ERR(skb_verdict);
 257			goto out_put_msg_parser;
 258		}
 259	}
 260
 261	psock = sock_map_psock_get_checked(sk);
 262	if (IS_ERR(psock)) {
 263		ret = PTR_ERR(psock);
 264		goto out_progs;
 265	}
 266
 267	if (psock) {
 268		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
 269		    (stream_parser  && READ_ONCE(psock->progs.stream_parser)) ||
 270		    (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 271		    (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
 272		    (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 273		    (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
 274			sk_psock_put(sk, psock);
 275			ret = -EBUSY;
 276			goto out_progs;
 277		}
 278	} else {
 279		psock = sk_psock_init(sk, map->numa_node);
 280		if (IS_ERR(psock)) {
 281			ret = PTR_ERR(psock);
 282			goto out_progs;
 283		}
 284	}
 285
 286	if (msg_parser)
 287		psock_set_prog(&psock->progs.msg_parser, msg_parser);
 288	if (stream_parser)
 289		psock_set_prog(&psock->progs.stream_parser, stream_parser);
 290	if (stream_verdict)
 291		psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
 292	if (skb_verdict)
 293		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 294
 295	/* msg_* and stream_* programs references tracked in psock after this
 296	 * point. Reference dec and cleanup will occur through psock destructor
 297	 */
 298	ret = sock_map_init_proto(sk, psock);
 299	if (ret < 0) {
 300		sk_psock_put(sk, psock);
 301		goto out;
 302	}
 303
 304	write_lock_bh(&sk->sk_callback_lock);
 305	if (stream_parser && stream_verdict && !psock->saved_data_ready) {
 306		if (sk_is_tcp(sk))
 307			ret = sk_psock_init_strp(sk, psock);
 308		else
 309			ret = -EOPNOTSUPP;
 310		if (ret) {
 311			write_unlock_bh(&sk->sk_callback_lock);
 312			sk_psock_put(sk, psock);
 313			goto out;
 314		}
 
 
 315		sk_psock_start_strp(sk, psock);
 316	} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
 317		sk_psock_start_verdict(sk,psock);
 318	} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
 319		sk_psock_start_verdict(sk, psock);
 320	}
 321	write_unlock_bh(&sk->sk_callback_lock);
 322	return 0;
 
 
 323out_progs:
 324	if (skb_verdict)
 325		bpf_prog_put(skb_verdict);
 326out_put_msg_parser:
 327	if (msg_parser)
 328		bpf_prog_put(msg_parser);
 329out_put_stream_parser:
 330	if (stream_parser)
 331		bpf_prog_put(stream_parser);
 332out_put_stream_verdict:
 333	if (stream_verdict)
 334		bpf_prog_put(stream_verdict);
 335out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 336	return ret;
 337}
 338
 339static void sock_map_free(struct bpf_map *map)
 340{
 341	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 342	int i;
 343
 344	/* After the sync no updates or deletes will be in-flight so it
 345	 * is safe to walk map and remove entries without risking a race
 346	 * in EEXIST update case.
 347	 */
 348	synchronize_rcu();
 349	for (i = 0; i < stab->map.max_entries; i++) {
 350		struct sock **psk = &stab->sks[i];
 351		struct sock *sk;
 352
 353		sk = xchg(psk, NULL);
 354		if (sk) {
 355			sock_hold(sk);
 356			lock_sock(sk);
 357			rcu_read_lock();
 358			sock_map_unref(sk, psk);
 359			rcu_read_unlock();
 360			release_sock(sk);
 361			sock_put(sk);
 362		}
 363	}
 364
 365	/* wait for psock readers accessing its map link */
 366	synchronize_rcu();
 367
 368	bpf_map_area_free(stab->sks);
 369	bpf_map_area_free(stab);
 370}
 371
 372static void sock_map_release_progs(struct bpf_map *map)
 373{
 374	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
 375}
 376
 377static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 378{
 379	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 380
 381	WARN_ON_ONCE(!rcu_read_lock_held());
 382
 383	if (unlikely(key >= map->max_entries))
 384		return NULL;
 385	return READ_ONCE(stab->sks[key]);
 386}
 387
 388static void *sock_map_lookup(struct bpf_map *map, void *key)
 389{
 390	struct sock *sk;
 391
 392	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 393	if (!sk)
 394		return NULL;
 395	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
 396		return NULL;
 397	return sk;
 398}
 399
 400static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
 401{
 402	struct sock *sk;
 403
 404	if (map->value_size != sizeof(u64))
 405		return ERR_PTR(-ENOSPC);
 406
 407	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 408	if (!sk)
 409		return ERR_PTR(-ENOENT);
 410
 411	__sock_gen_cookie(sk);
 412	return &sk->sk_cookie;
 413}
 414
 415static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
 416			     struct sock **psk)
 417{
 418	struct sock *sk = NULL;
 419	int err = 0;
 420
 421	spin_lock_bh(&stab->lock);
 422	if (!sk_test || sk_test == *psk)
 
 423		sk = xchg(psk, NULL);
 424
 425	if (likely(sk))
 426		sock_map_unref(sk, psk);
 427	else
 428		err = -EINVAL;
 429
 430	spin_unlock_bh(&stab->lock);
 431	return err;
 432}
 433
 434static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
 435				      void *link_raw)
 436{
 437	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 438
 439	__sock_map_delete(stab, sk, link_raw);
 440}
 441
 442static long sock_map_delete_elem(struct bpf_map *map, void *key)
 443{
 444	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 445	u32 i = *(u32 *)key;
 446	struct sock **psk;
 447
 448	if (unlikely(i >= map->max_entries))
 449		return -EINVAL;
 450
 451	psk = &stab->sks[i];
 452	return __sock_map_delete(stab, NULL, psk);
 453}
 454
 455static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
 456{
 457	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 458	u32 i = key ? *(u32 *)key : U32_MAX;
 459	u32 *key_next = next;
 460
 461	if (i == stab->map.max_entries - 1)
 462		return -ENOENT;
 463	if (i >= stab->map.max_entries)
 464		*key_next = 0;
 465	else
 466		*key_next = i + 1;
 467	return 0;
 468}
 469
 
 
 470static int sock_map_update_common(struct bpf_map *map, u32 idx,
 471				  struct sock *sk, u64 flags)
 472{
 473	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 474	struct sk_psock_link *link;
 475	struct sk_psock *psock;
 476	struct sock *osk;
 477	int ret;
 478
 479	WARN_ON_ONCE(!rcu_read_lock_held());
 480	if (unlikely(flags > BPF_EXIST))
 481		return -EINVAL;
 482	if (unlikely(idx >= map->max_entries))
 483		return -E2BIG;
 
 
 484
 485	link = sk_psock_init_link();
 486	if (!link)
 487		return -ENOMEM;
 488
 489	ret = sock_map_link(map, sk);
 
 
 
 
 
 
 
 490	if (ret < 0)
 491		goto out_free;
 492
 493	psock = sk_psock(sk);
 494	WARN_ON_ONCE(!psock);
 495
 496	spin_lock_bh(&stab->lock);
 497	osk = stab->sks[idx];
 498	if (osk && flags == BPF_NOEXIST) {
 499		ret = -EEXIST;
 500		goto out_unlock;
 501	} else if (!osk && flags == BPF_EXIST) {
 502		ret = -ENOENT;
 503		goto out_unlock;
 504	}
 505
 506	sock_map_add_link(psock, link, map, &stab->sks[idx]);
 507	stab->sks[idx] = sk;
 508	if (osk)
 509		sock_map_unref(osk, &stab->sks[idx]);
 510	spin_unlock_bh(&stab->lock);
 511	return 0;
 512out_unlock:
 513	spin_unlock_bh(&stab->lock);
 514	if (psock)
 515		sk_psock_put(sk, psock);
 516out_free:
 517	sk_psock_free_link(link);
 518	return ret;
 519}
 520
 521static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
 522{
 523	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
 524	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
 525	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
 526}
 527
 
 
 
 
 
 
 
 
 
 
 
 
 528static bool sock_map_redirect_allowed(const struct sock *sk)
 529{
 530	if (sk_is_tcp(sk))
 531		return sk->sk_state != TCP_LISTEN;
 532	else
 533		return sk->sk_state == TCP_ESTABLISHED;
 534}
 535
 536static bool sock_map_sk_is_suitable(const struct sock *sk)
 537{
 538	return !!sk->sk_prot->psock_update_sk_prot;
 539}
 540
 541static bool sock_map_sk_state_allowed(const struct sock *sk)
 542{
 543	if (sk_is_tcp(sk))
 544		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
 545	if (sk_is_stream_unix(sk))
 546		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
 547	if (sk_is_vsock(sk) &&
 548	    (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET))
 549		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
 550	return true;
 551}
 552
 553static int sock_hash_update_common(struct bpf_map *map, void *key,
 554				   struct sock *sk, u64 flags);
 555
 556int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
 557			     u64 flags)
 558{
 
 559	struct socket *sock;
 560	struct sock *sk;
 561	int ret;
 562	u64 ufd;
 563
 564	if (map->value_size == sizeof(u64))
 565		ufd = *(u64 *)value;
 566	else
 567		ufd = *(u32 *)value;
 568	if (ufd > S32_MAX)
 569		return -EINVAL;
 570
 571	sock = sockfd_lookup(ufd, &ret);
 572	if (!sock)
 573		return ret;
 574	sk = sock->sk;
 575	if (!sk) {
 576		ret = -EINVAL;
 577		goto out;
 578	}
 579	if (!sock_map_sk_is_suitable(sk)) {
 580		ret = -EOPNOTSUPP;
 581		goto out;
 582	}
 583
 584	sock_map_sk_acquire(sk);
 585	if (!sock_map_sk_state_allowed(sk))
 586		ret = -EOPNOTSUPP;
 587	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 588		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 589	else
 590		ret = sock_hash_update_common(map, key, sk, flags);
 591	sock_map_sk_release(sk);
 592out:
 593	sockfd_put(sock);
 594	return ret;
 595}
 596
 597static long sock_map_update_elem(struct bpf_map *map, void *key,
 598				 void *value, u64 flags)
 599{
 600	struct sock *sk = (struct sock *)value;
 601	int ret;
 602
 603	if (unlikely(!sk || !sk_fullsock(sk)))
 604		return -EINVAL;
 605
 606	if (!sock_map_sk_is_suitable(sk))
 607		return -EOPNOTSUPP;
 608
 609	local_bh_disable();
 610	bh_lock_sock(sk);
 611	if (!sock_map_sk_state_allowed(sk))
 612		ret = -EOPNOTSUPP;
 613	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 614		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 615	else
 616		ret = sock_hash_update_common(map, key, sk, flags);
 617	bh_unlock_sock(sk);
 618	local_bh_enable();
 619	return ret;
 620}
 621
 622BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
 623	   struct bpf_map *, map, void *, key, u64, flags)
 624{
 625	WARN_ON_ONCE(!rcu_read_lock_held());
 626
 627	if (likely(sock_map_sk_is_suitable(sops->sk) &&
 628		   sock_map_op_okay(sops)))
 629		return sock_map_update_common(map, *(u32 *)key, sops->sk,
 630					      flags);
 631	return -EOPNOTSUPP;
 632}
 633
 634const struct bpf_func_proto bpf_sock_map_update_proto = {
 635	.func		= bpf_sock_map_update,
 636	.gpl_only	= false,
 637	.pkt_access	= true,
 638	.ret_type	= RET_INTEGER,
 639	.arg1_type	= ARG_PTR_TO_CTX,
 640	.arg2_type	= ARG_CONST_MAP_PTR,
 641	.arg3_type	= ARG_PTR_TO_MAP_KEY,
 642	.arg4_type	= ARG_ANYTHING,
 643};
 644
 645BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
 646	   struct bpf_map *, map, u32, key, u64, flags)
 647{
 
 648	struct sock *sk;
 649
 650	if (unlikely(flags & ~(BPF_F_INGRESS)))
 651		return SK_DROP;
 652
 653	sk = __sock_map_lookup_elem(map, key);
 654	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 655		return SK_DROP;
 656	if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
 657		return SK_DROP;
 658
 659	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
 
 660	return SK_PASS;
 661}
 662
 663const struct bpf_func_proto bpf_sk_redirect_map_proto = {
 664	.func           = bpf_sk_redirect_map,
 665	.gpl_only       = false,
 666	.ret_type       = RET_INTEGER,
 667	.arg1_type	= ARG_PTR_TO_CTX,
 668	.arg2_type      = ARG_CONST_MAP_PTR,
 669	.arg3_type      = ARG_ANYTHING,
 670	.arg4_type      = ARG_ANYTHING,
 671};
 672
 673BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
 674	   struct bpf_map *, map, u32, key, u64, flags)
 675{
 676	struct sock *sk;
 677
 678	if (unlikely(flags & ~(BPF_F_INGRESS)))
 679		return SK_DROP;
 680
 681	sk = __sock_map_lookup_elem(map, key);
 682	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 683		return SK_DROP;
 684	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
 685		return SK_DROP;
 686	if (sk_is_vsock(sk))
 687		return SK_DROP;
 688
 689	msg->flags = flags;
 690	msg->sk_redir = sk;
 691	return SK_PASS;
 692}
 693
 694const struct bpf_func_proto bpf_msg_redirect_map_proto = {
 695	.func           = bpf_msg_redirect_map,
 696	.gpl_only       = false,
 697	.ret_type       = RET_INTEGER,
 698	.arg1_type	= ARG_PTR_TO_CTX,
 699	.arg2_type      = ARG_CONST_MAP_PTR,
 700	.arg3_type      = ARG_ANYTHING,
 701	.arg4_type      = ARG_ANYTHING,
 702};
 703
 704struct sock_map_seq_info {
 705	struct bpf_map *map;
 706	struct sock *sk;
 707	u32 index;
 708};
 709
 710struct bpf_iter__sockmap {
 711	__bpf_md_ptr(struct bpf_iter_meta *, meta);
 712	__bpf_md_ptr(struct bpf_map *, map);
 713	__bpf_md_ptr(void *, key);
 714	__bpf_md_ptr(struct sock *, sk);
 715};
 716
 717DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
 718		     struct bpf_map *map, void *key,
 719		     struct sock *sk)
 720
 721static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
 722{
 723	if (unlikely(info->index >= info->map->max_entries))
 724		return NULL;
 725
 726	info->sk = __sock_map_lookup_elem(info->map, info->index);
 727
 728	/* can't return sk directly, since that might be NULL */
 729	return info;
 730}
 731
 732static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
 733	__acquires(rcu)
 734{
 735	struct sock_map_seq_info *info = seq->private;
 736
 737	if (*pos == 0)
 738		++*pos;
 739
 740	/* pairs with sock_map_seq_stop */
 741	rcu_read_lock();
 742	return sock_map_seq_lookup_elem(info);
 743}
 744
 745static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 746	__must_hold(rcu)
 747{
 748	struct sock_map_seq_info *info = seq->private;
 749
 750	++*pos;
 751	++info->index;
 752
 753	return sock_map_seq_lookup_elem(info);
 754}
 755
 756static int sock_map_seq_show(struct seq_file *seq, void *v)
 757	__must_hold(rcu)
 758{
 759	struct sock_map_seq_info *info = seq->private;
 760	struct bpf_iter__sockmap ctx = {};
 761	struct bpf_iter_meta meta;
 762	struct bpf_prog *prog;
 763
 764	meta.seq = seq;
 765	prog = bpf_iter_get_info(&meta, !v);
 766	if (!prog)
 767		return 0;
 768
 769	ctx.meta = &meta;
 770	ctx.map = info->map;
 771	if (v) {
 772		ctx.key = &info->index;
 773		ctx.sk = info->sk;
 774	}
 775
 776	return bpf_iter_run_prog(prog, &ctx);
 777}
 778
 779static void sock_map_seq_stop(struct seq_file *seq, void *v)
 780	__releases(rcu)
 781{
 782	if (!v)
 783		(void)sock_map_seq_show(seq, NULL);
 784
 785	/* pairs with sock_map_seq_start */
 786	rcu_read_unlock();
 787}
 788
 789static const struct seq_operations sock_map_seq_ops = {
 790	.start	= sock_map_seq_start,
 791	.next	= sock_map_seq_next,
 792	.stop	= sock_map_seq_stop,
 793	.show	= sock_map_seq_show,
 794};
 795
 796static int sock_map_init_seq_private(void *priv_data,
 797				     struct bpf_iter_aux_info *aux)
 798{
 799	struct sock_map_seq_info *info = priv_data;
 800
 801	bpf_map_inc_with_uref(aux->map);
 802	info->map = aux->map;
 803	return 0;
 804}
 805
 806static void sock_map_fini_seq_private(void *priv_data)
 807{
 808	struct sock_map_seq_info *info = priv_data;
 809
 810	bpf_map_put_with_uref(info->map);
 811}
 812
 813static u64 sock_map_mem_usage(const struct bpf_map *map)
 814{
 815	u64 usage = sizeof(struct bpf_stab);
 816
 817	usage += (u64)map->max_entries * sizeof(struct sock *);
 818	return usage;
 819}
 820
 821static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
 822	.seq_ops		= &sock_map_seq_ops,
 823	.init_seq_private	= sock_map_init_seq_private,
 824	.fini_seq_private	= sock_map_fini_seq_private,
 825	.seq_priv_size		= sizeof(struct sock_map_seq_info),
 826};
 827
 828BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
 829const struct bpf_map_ops sock_map_ops = {
 830	.map_meta_equal		= bpf_map_meta_equal,
 831	.map_alloc		= sock_map_alloc,
 832	.map_free		= sock_map_free,
 833	.map_get_next_key	= sock_map_get_next_key,
 834	.map_lookup_elem_sys_only = sock_map_lookup_sys,
 835	.map_update_elem	= sock_map_update_elem,
 836	.map_delete_elem	= sock_map_delete_elem,
 837	.map_lookup_elem	= sock_map_lookup,
 838	.map_release_uref	= sock_map_release_progs,
 839	.map_check_btf		= map_check_no_btf,
 840	.map_mem_usage		= sock_map_mem_usage,
 841	.map_btf_id		= &sock_map_btf_ids[0],
 842	.iter_seq_info		= &sock_map_iter_seq_info,
 843};
 844
 845struct bpf_shtab_elem {
 846	struct rcu_head rcu;
 847	u32 hash;
 848	struct sock *sk;
 849	struct hlist_node node;
 850	u8 key[];
 851};
 852
 853struct bpf_shtab_bucket {
 854	struct hlist_head head;
 855	spinlock_t lock;
 856};
 857
 858struct bpf_shtab {
 859	struct bpf_map map;
 860	struct bpf_shtab_bucket *buckets;
 861	u32 buckets_num;
 862	u32 elem_size;
 863	struct sk_psock_progs progs;
 864	atomic_t count;
 865};
 866
 867static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
 868{
 869	return jhash(key, len, 0);
 870}
 871
 872static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
 873							u32 hash)
 874{
 875	return &htab->buckets[hash & (htab->buckets_num - 1)];
 876}
 877
 878static struct bpf_shtab_elem *
 879sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 880			  u32 key_size)
 881{
 882	struct bpf_shtab_elem *elem;
 883
 884	hlist_for_each_entry_rcu(elem, head, node) {
 885		if (elem->hash == hash &&
 886		    !memcmp(&elem->key, key, key_size))
 887			return elem;
 888	}
 889
 890	return NULL;
 891}
 892
 893static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 894{
 895	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 896	u32 key_size = map->key_size, hash;
 897	struct bpf_shtab_bucket *bucket;
 898	struct bpf_shtab_elem *elem;
 899
 900	WARN_ON_ONCE(!rcu_read_lock_held());
 901
 902	hash = sock_hash_bucket_hash(key, key_size);
 903	bucket = sock_hash_select_bucket(htab, hash);
 904	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 905
 906	return elem ? elem->sk : NULL;
 907}
 908
 909static void sock_hash_free_elem(struct bpf_shtab *htab,
 910				struct bpf_shtab_elem *elem)
 911{
 912	atomic_dec(&htab->count);
 913	kfree_rcu(elem, rcu);
 914}
 915
 916static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 917				       void *link_raw)
 918{
 919	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 920	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
 921	struct bpf_shtab_bucket *bucket;
 922
 923	WARN_ON_ONCE(!rcu_read_lock_held());
 924	bucket = sock_hash_select_bucket(htab, elem->hash);
 925
 926	/* elem may be deleted in parallel from the map, but access here
 927	 * is okay since it's going away only after RCU grace period.
 928	 * However, we need to check whether it's still present.
 929	 */
 930	spin_lock_bh(&bucket->lock);
 931	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
 932					       elem->key, map->key_size);
 933	if (elem_probe && elem_probe == elem) {
 934		hlist_del_rcu(&elem->node);
 935		sock_map_unref(elem->sk, elem);
 936		sock_hash_free_elem(htab, elem);
 937	}
 938	spin_unlock_bh(&bucket->lock);
 939}
 940
 941static long sock_hash_delete_elem(struct bpf_map *map, void *key)
 942{
 943	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 944	u32 hash, key_size = map->key_size;
 945	struct bpf_shtab_bucket *bucket;
 946	struct bpf_shtab_elem *elem;
 947	int ret = -ENOENT;
 948
 949	hash = sock_hash_bucket_hash(key, key_size);
 950	bucket = sock_hash_select_bucket(htab, hash);
 951
 952	spin_lock_bh(&bucket->lock);
 953	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 954	if (elem) {
 955		hlist_del_rcu(&elem->node);
 956		sock_map_unref(elem->sk, elem);
 957		sock_hash_free_elem(htab, elem);
 958		ret = 0;
 959	}
 960	spin_unlock_bh(&bucket->lock);
 961	return ret;
 962}
 963
 964static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
 965						   void *key, u32 key_size,
 966						   u32 hash, struct sock *sk,
 967						   struct bpf_shtab_elem *old)
 968{
 969	struct bpf_shtab_elem *new;
 970
 971	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 972		if (!old) {
 973			atomic_dec(&htab->count);
 974			return ERR_PTR(-E2BIG);
 975		}
 976	}
 977
 978	new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
 979				   GFP_ATOMIC | __GFP_NOWARN,
 980				   htab->map.numa_node);
 981	if (!new) {
 982		atomic_dec(&htab->count);
 983		return ERR_PTR(-ENOMEM);
 984	}
 985	memcpy(new->key, key, key_size);
 986	new->sk = sk;
 987	new->hash = hash;
 988	return new;
 989}
 990
 991static int sock_hash_update_common(struct bpf_map *map, void *key,
 992				   struct sock *sk, u64 flags)
 993{
 994	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 995	u32 key_size = map->key_size, hash;
 996	struct bpf_shtab_elem *elem, *elem_new;
 997	struct bpf_shtab_bucket *bucket;
 998	struct sk_psock_link *link;
 999	struct sk_psock *psock;
1000	int ret;
1001
1002	WARN_ON_ONCE(!rcu_read_lock_held());
1003	if (unlikely(flags > BPF_EXIST))
1004		return -EINVAL;
 
 
1005
1006	link = sk_psock_init_link();
1007	if (!link)
1008		return -ENOMEM;
1009
1010	ret = sock_map_link(map, sk);
 
 
 
 
 
 
 
1011	if (ret < 0)
1012		goto out_free;
1013
1014	psock = sk_psock(sk);
1015	WARN_ON_ONCE(!psock);
1016
1017	hash = sock_hash_bucket_hash(key, key_size);
1018	bucket = sock_hash_select_bucket(htab, hash);
1019
1020	spin_lock_bh(&bucket->lock);
1021	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1022	if (elem && flags == BPF_NOEXIST) {
1023		ret = -EEXIST;
1024		goto out_unlock;
1025	} else if (!elem && flags == BPF_EXIST) {
1026		ret = -ENOENT;
1027		goto out_unlock;
1028	}
1029
1030	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1031	if (IS_ERR(elem_new)) {
1032		ret = PTR_ERR(elem_new);
1033		goto out_unlock;
1034	}
1035
1036	sock_map_add_link(psock, link, map, elem_new);
1037	/* Add new element to the head of the list, so that
1038	 * concurrent search will find it before old elem.
1039	 */
1040	hlist_add_head_rcu(&elem_new->node, &bucket->head);
1041	if (elem) {
1042		hlist_del_rcu(&elem->node);
1043		sock_map_unref(elem->sk, elem);
1044		sock_hash_free_elem(htab, elem);
1045	}
1046	spin_unlock_bh(&bucket->lock);
1047	return 0;
1048out_unlock:
1049	spin_unlock_bh(&bucket->lock);
1050	sk_psock_put(sk, psock);
1051out_free:
1052	sk_psock_free_link(link);
1053	return ret;
1054}
1055
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1057				  void *key_next)
1058{
1059	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1060	struct bpf_shtab_elem *elem, *elem_next;
1061	u32 hash, key_size = map->key_size;
1062	struct hlist_head *head;
1063	int i = 0;
1064
1065	if (!key)
1066		goto find_first_elem;
1067	hash = sock_hash_bucket_hash(key, key_size);
1068	head = &sock_hash_select_bucket(htab, hash)->head;
1069	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1070	if (!elem)
1071		goto find_first_elem;
1072
1073	elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1074				     struct bpf_shtab_elem, node);
1075	if (elem_next) {
1076		memcpy(key_next, elem_next->key, key_size);
1077		return 0;
1078	}
1079
1080	i = hash & (htab->buckets_num - 1);
1081	i++;
1082find_first_elem:
1083	for (; i < htab->buckets_num; i++) {
1084		head = &sock_hash_select_bucket(htab, i)->head;
1085		elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1086					     struct bpf_shtab_elem, node);
1087		if (elem_next) {
1088			memcpy(key_next, elem_next->key, key_size);
1089			return 0;
1090		}
1091	}
1092
1093	return -ENOENT;
1094}
1095
1096static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1097{
1098	struct bpf_shtab *htab;
1099	int i, err;
 
1100
 
 
1101	if (attr->max_entries == 0 ||
1102	    attr->key_size    == 0 ||
1103	    (attr->value_size != sizeof(u32) &&
1104	     attr->value_size != sizeof(u64)) ||
1105	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1106		return ERR_PTR(-EINVAL);
1107	if (attr->key_size > MAX_BPF_STACK)
1108		return ERR_PTR(-E2BIG);
1109
1110	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1111	if (!htab)
1112		return ERR_PTR(-ENOMEM);
1113
1114	bpf_map_init_from_attr(&htab->map, attr);
1115
1116	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1117	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1118			  round_up(htab->map.key_size, 8);
1119	if (htab->buckets_num == 0 ||
1120	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1121		err = -EINVAL;
1122		goto free_htab;
1123	}
1124
 
 
 
 
 
 
 
 
 
 
1125	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1126					   sizeof(struct bpf_shtab_bucket),
1127					   htab->map.numa_node);
1128	if (!htab->buckets) {
 
1129		err = -ENOMEM;
1130		goto free_htab;
1131	}
1132
1133	for (i = 0; i < htab->buckets_num; i++) {
1134		INIT_HLIST_HEAD(&htab->buckets[i].head);
1135		spin_lock_init(&htab->buckets[i].lock);
1136	}
1137
1138	return &htab->map;
1139free_htab:
1140	bpf_map_area_free(htab);
1141	return ERR_PTR(err);
1142}
1143
1144static void sock_hash_free(struct bpf_map *map)
1145{
1146	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1147	struct bpf_shtab_bucket *bucket;
1148	struct hlist_head unlink_list;
1149	struct bpf_shtab_elem *elem;
1150	struct hlist_node *node;
1151	int i;
1152
1153	/* After the sync no updates or deletes will be in-flight so it
1154	 * is safe to walk map and remove entries without risking a race
1155	 * in EEXIST update case.
1156	 */
1157	synchronize_rcu();
1158	for (i = 0; i < htab->buckets_num; i++) {
1159		bucket = sock_hash_select_bucket(htab, i);
1160
1161		/* We are racing with sock_hash_delete_from_link to
1162		 * enter the spin-lock critical section. Every socket on
1163		 * the list is still linked to sockhash. Since link
1164		 * exists, psock exists and holds a ref to socket. That
1165		 * lets us to grab a socket ref too.
1166		 */
1167		spin_lock_bh(&bucket->lock);
1168		hlist_for_each_entry(elem, &bucket->head, node)
1169			sock_hold(elem->sk);
1170		hlist_move_list(&bucket->head, &unlink_list);
1171		spin_unlock_bh(&bucket->lock);
1172
1173		/* Process removed entries out of atomic context to
1174		 * block for socket lock before deleting the psock's
1175		 * link to sockhash.
1176		 */
1177		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1178			hlist_del(&elem->node);
1179			lock_sock(elem->sk);
1180			rcu_read_lock();
1181			sock_map_unref(elem->sk, elem);
1182			rcu_read_unlock();
1183			release_sock(elem->sk);
1184			sock_put(elem->sk);
1185			sock_hash_free_elem(htab, elem);
1186		}
1187		cond_resched();
1188	}
1189
1190	/* wait for psock readers accessing its map link */
1191	synchronize_rcu();
1192
1193	bpf_map_area_free(htab->buckets);
1194	bpf_map_area_free(htab);
1195}
1196
1197static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1198{
1199	struct sock *sk;
1200
1201	if (map->value_size != sizeof(u64))
1202		return ERR_PTR(-ENOSPC);
1203
1204	sk = __sock_hash_lookup_elem(map, key);
1205	if (!sk)
1206		return ERR_PTR(-ENOENT);
1207
1208	__sock_gen_cookie(sk);
1209	return &sk->sk_cookie;
1210}
1211
1212static void *sock_hash_lookup(struct bpf_map *map, void *key)
1213{
1214	struct sock *sk;
1215
1216	sk = __sock_hash_lookup_elem(map, key);
1217	if (!sk)
1218		return NULL;
1219	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1220		return NULL;
1221	return sk;
1222}
1223
1224static void sock_hash_release_progs(struct bpf_map *map)
1225{
1226	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1227}
1228
1229BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1230	   struct bpf_map *, map, void *, key, u64, flags)
1231{
1232	WARN_ON_ONCE(!rcu_read_lock_held());
1233
1234	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1235		   sock_map_op_okay(sops)))
1236		return sock_hash_update_common(map, key, sops->sk, flags);
1237	return -EOPNOTSUPP;
1238}
1239
1240const struct bpf_func_proto bpf_sock_hash_update_proto = {
1241	.func		= bpf_sock_hash_update,
1242	.gpl_only	= false,
1243	.pkt_access	= true,
1244	.ret_type	= RET_INTEGER,
1245	.arg1_type	= ARG_PTR_TO_CTX,
1246	.arg2_type	= ARG_CONST_MAP_PTR,
1247	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1248	.arg4_type	= ARG_ANYTHING,
1249};
1250
1251BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1252	   struct bpf_map *, map, void *, key, u64, flags)
1253{
 
1254	struct sock *sk;
1255
1256	if (unlikely(flags & ~(BPF_F_INGRESS)))
1257		return SK_DROP;
1258
1259	sk = __sock_hash_lookup_elem(map, key);
1260	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1261		return SK_DROP;
1262	if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
1263		return SK_DROP;
1264
1265	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
 
1266	return SK_PASS;
1267}
1268
1269const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1270	.func           = bpf_sk_redirect_hash,
1271	.gpl_only       = false,
1272	.ret_type       = RET_INTEGER,
1273	.arg1_type	= ARG_PTR_TO_CTX,
1274	.arg2_type      = ARG_CONST_MAP_PTR,
1275	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1276	.arg4_type      = ARG_ANYTHING,
1277};
1278
1279BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1280	   struct bpf_map *, map, void *, key, u64, flags)
1281{
1282	struct sock *sk;
1283
1284	if (unlikely(flags & ~(BPF_F_INGRESS)))
1285		return SK_DROP;
1286
1287	sk = __sock_hash_lookup_elem(map, key);
1288	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1289		return SK_DROP;
1290	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
1291		return SK_DROP;
1292	if (sk_is_vsock(sk))
1293		return SK_DROP;
1294
1295	msg->flags = flags;
1296	msg->sk_redir = sk;
1297	return SK_PASS;
1298}
1299
1300const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1301	.func           = bpf_msg_redirect_hash,
1302	.gpl_only       = false,
1303	.ret_type       = RET_INTEGER,
1304	.arg1_type	= ARG_PTR_TO_CTX,
1305	.arg2_type      = ARG_CONST_MAP_PTR,
1306	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1307	.arg4_type      = ARG_ANYTHING,
1308};
1309
1310struct sock_hash_seq_info {
1311	struct bpf_map *map;
1312	struct bpf_shtab *htab;
1313	u32 bucket_id;
1314};
1315
1316static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1317				     struct bpf_shtab_elem *prev_elem)
1318{
1319	const struct bpf_shtab *htab = info->htab;
1320	struct bpf_shtab_bucket *bucket;
1321	struct bpf_shtab_elem *elem;
1322	struct hlist_node *node;
1323
1324	/* try to find next elem in the same bucket */
1325	if (prev_elem) {
1326		node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1327		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1328		if (elem)
1329			return elem;
1330
1331		/* no more elements, continue in the next bucket */
1332		info->bucket_id++;
1333	}
1334
1335	for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1336		bucket = &htab->buckets[info->bucket_id];
1337		node = rcu_dereference(hlist_first_rcu(&bucket->head));
1338		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1339		if (elem)
1340			return elem;
1341	}
1342
1343	return NULL;
1344}
1345
1346static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1347	__acquires(rcu)
1348{
1349	struct sock_hash_seq_info *info = seq->private;
1350
1351	if (*pos == 0)
1352		++*pos;
1353
1354	/* pairs with sock_hash_seq_stop */
1355	rcu_read_lock();
1356	return sock_hash_seq_find_next(info, NULL);
1357}
1358
1359static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1360	__must_hold(rcu)
1361{
1362	struct sock_hash_seq_info *info = seq->private;
1363
1364	++*pos;
1365	return sock_hash_seq_find_next(info, v);
1366}
1367
1368static int sock_hash_seq_show(struct seq_file *seq, void *v)
1369	__must_hold(rcu)
1370{
1371	struct sock_hash_seq_info *info = seq->private;
1372	struct bpf_iter__sockmap ctx = {};
1373	struct bpf_shtab_elem *elem = v;
1374	struct bpf_iter_meta meta;
1375	struct bpf_prog *prog;
1376
1377	meta.seq = seq;
1378	prog = bpf_iter_get_info(&meta, !elem);
1379	if (!prog)
1380		return 0;
1381
1382	ctx.meta = &meta;
1383	ctx.map = info->map;
1384	if (elem) {
1385		ctx.key = elem->key;
1386		ctx.sk = elem->sk;
1387	}
1388
1389	return bpf_iter_run_prog(prog, &ctx);
1390}
1391
1392static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1393	__releases(rcu)
1394{
1395	if (!v)
1396		(void)sock_hash_seq_show(seq, NULL);
1397
1398	/* pairs with sock_hash_seq_start */
1399	rcu_read_unlock();
1400}
1401
1402static const struct seq_operations sock_hash_seq_ops = {
1403	.start	= sock_hash_seq_start,
1404	.next	= sock_hash_seq_next,
1405	.stop	= sock_hash_seq_stop,
1406	.show	= sock_hash_seq_show,
1407};
1408
1409static int sock_hash_init_seq_private(void *priv_data,
1410				      struct bpf_iter_aux_info *aux)
1411{
1412	struct sock_hash_seq_info *info = priv_data;
1413
1414	bpf_map_inc_with_uref(aux->map);
1415	info->map = aux->map;
1416	info->htab = container_of(aux->map, struct bpf_shtab, map);
1417	return 0;
1418}
1419
1420static void sock_hash_fini_seq_private(void *priv_data)
1421{
1422	struct sock_hash_seq_info *info = priv_data;
1423
1424	bpf_map_put_with_uref(info->map);
1425}
1426
1427static u64 sock_hash_mem_usage(const struct bpf_map *map)
1428{
1429	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1430	u64 usage = sizeof(*htab);
1431
1432	usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
1433	usage += atomic_read(&htab->count) * (u64)htab->elem_size;
1434	return usage;
1435}
1436
1437static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1438	.seq_ops		= &sock_hash_seq_ops,
1439	.init_seq_private	= sock_hash_init_seq_private,
1440	.fini_seq_private	= sock_hash_fini_seq_private,
1441	.seq_priv_size		= sizeof(struct sock_hash_seq_info),
1442};
1443
1444BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1445const struct bpf_map_ops sock_hash_ops = {
1446	.map_meta_equal		= bpf_map_meta_equal,
1447	.map_alloc		= sock_hash_alloc,
1448	.map_free		= sock_hash_free,
1449	.map_get_next_key	= sock_hash_get_next_key,
1450	.map_update_elem	= sock_map_update_elem,
1451	.map_delete_elem	= sock_hash_delete_elem,
1452	.map_lookup_elem	= sock_hash_lookup,
1453	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1454	.map_release_uref	= sock_hash_release_progs,
1455	.map_check_btf		= map_check_no_btf,
1456	.map_mem_usage		= sock_hash_mem_usage,
1457	.map_btf_id		= &sock_hash_map_btf_ids[0],
1458	.iter_seq_info		= &sock_hash_iter_seq_info,
1459};
1460
1461static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1462{
1463	switch (map->map_type) {
1464	case BPF_MAP_TYPE_SOCKMAP:
1465		return &container_of(map, struct bpf_stab, map)->progs;
1466	case BPF_MAP_TYPE_SOCKHASH:
1467		return &container_of(map, struct bpf_shtab, map)->progs;
1468	default:
1469		break;
1470	}
1471
1472	return NULL;
1473}
1474
1475static int sock_map_prog_link_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1476				     struct bpf_link ***plink, u32 which)
1477{
1478	struct sk_psock_progs *progs = sock_map_progs(map);
1479	struct bpf_prog **cur_pprog;
1480	struct bpf_link **cur_plink;
1481
1482	if (!progs)
1483		return -EOPNOTSUPP;
1484
1485	switch (which) {
1486	case BPF_SK_MSG_VERDICT:
1487		cur_pprog = &progs->msg_parser;
1488		cur_plink = &progs->msg_parser_link;
1489		break;
1490#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1491	case BPF_SK_SKB_STREAM_PARSER:
1492		cur_pprog = &progs->stream_parser;
1493		cur_plink = &progs->stream_parser_link;
1494		break;
1495#endif
1496	case BPF_SK_SKB_STREAM_VERDICT:
1497		if (progs->skb_verdict)
1498			return -EBUSY;
1499		cur_pprog = &progs->stream_verdict;
1500		cur_plink = &progs->stream_verdict_link;
1501		break;
1502	case BPF_SK_SKB_VERDICT:
1503		if (progs->stream_verdict)
1504			return -EBUSY;
1505		cur_pprog = &progs->skb_verdict;
1506		cur_plink = &progs->skb_verdict_link;
1507		break;
1508	default:
1509		return -EOPNOTSUPP;
1510	}
1511
1512	*pprog = cur_pprog;
1513	if (plink)
1514		*plink = cur_plink;
1515	return 0;
1516}
1517
1518/* Handle the following four cases:
1519 * prog_attach: prog != NULL, old == NULL, link == NULL
1520 * prog_detach: prog == NULL, old != NULL, link == NULL
1521 * link_attach: prog != NULL, old == NULL, link != NULL
1522 * link_detach: prog == NULL, old != NULL, link != NULL
1523 */
1524static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1525				struct bpf_prog *old, struct bpf_link *link,
1526				u32 which)
1527{
1528	struct bpf_prog **pprog;
1529	struct bpf_link **plink;
1530	int ret;
1531
1532	ret = sock_map_prog_link_lookup(map, &pprog, &plink, which);
1533	if (ret)
1534		return ret;
1535
1536	/* for prog_attach/prog_detach/link_attach, return error if a bpf_link
1537	 * exists for that prog.
1538	 */
1539	if ((!link || prog) && *plink)
1540		return -EBUSY;
1541
1542	if (old) {
1543		ret = psock_replace_prog(pprog, prog, old);
1544		if (!ret)
1545			*plink = NULL;
1546	} else {
1547		psock_set_prog(pprog, prog);
1548		if (link)
1549			*plink = link;
1550	}
1551
1552	return ret;
1553}
1554
1555int sock_map_bpf_prog_query(const union bpf_attr *attr,
1556			    union bpf_attr __user *uattr)
1557{
1558	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1559	u32 prog_cnt = 0, flags = 0;
1560	struct bpf_prog **pprog;
1561	struct bpf_prog *prog;
1562	struct bpf_map *map;
1563	u32 id = 0;
1564	int ret;
1565
1566	if (attr->query.query_flags)
1567		return -EINVAL;
1568
1569	CLASS(fd, f)(attr->target_fd);
1570	map = __bpf_map_get(f);
1571	if (IS_ERR(map))
1572		return PTR_ERR(map);
1573
1574	rcu_read_lock();
1575
1576	ret = sock_map_prog_link_lookup(map, &pprog, NULL, attr->query.attach_type);
1577	if (ret)
1578		goto end;
1579
1580	prog = *pprog;
1581	prog_cnt = !prog ? 0 : 1;
1582
1583	if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1584		goto end;
1585
1586	/* we do not hold the refcnt, the bpf prog may be released
1587	 * asynchronously and the id would be set to 0.
1588	 */
1589	id = data_race(prog->aux->id);
1590	if (id == 0)
1591		prog_cnt = 0;
1592
1593end:
1594	rcu_read_unlock();
1595
1596	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1597	    (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1598	    copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1599		ret = -EFAULT;
1600
1601	return ret;
1602}
1603
1604static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1605{
1606	switch (link->map->map_type) {
1607	case BPF_MAP_TYPE_SOCKMAP:
1608		return sock_map_delete_from_link(link->map, sk,
1609						 link->link_raw);
1610	case BPF_MAP_TYPE_SOCKHASH:
1611		return sock_hash_delete_from_link(link->map, sk,
1612						  link->link_raw);
1613	default:
1614		break;
1615	}
1616}
1617
1618static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1619{
1620	struct sk_psock_link *link;
1621
1622	while ((link = sk_psock_link_pop(psock))) {
1623		sock_map_unlink(sk, link);
1624		sk_psock_free_link(link);
1625	}
1626}
1627
1628void sock_map_unhash(struct sock *sk)
1629{
1630	void (*saved_unhash)(struct sock *sk);
1631	struct sk_psock *psock;
1632
1633	rcu_read_lock();
1634	psock = sk_psock(sk);
1635	if (unlikely(!psock)) {
1636		rcu_read_unlock();
1637		saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1638	} else {
1639		saved_unhash = psock->saved_unhash;
1640		sock_map_remove_links(sk, psock);
1641		rcu_read_unlock();
1642	}
1643	if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1644		return;
1645	if (saved_unhash)
1646		saved_unhash(sk);
1647}
1648EXPORT_SYMBOL_GPL(sock_map_unhash);
1649
1650void sock_map_destroy(struct sock *sk)
1651{
1652	void (*saved_destroy)(struct sock *sk);
1653	struct sk_psock *psock;
1654
1655	rcu_read_lock();
1656	psock = sk_psock_get(sk);
1657	if (unlikely(!psock)) {
1658		rcu_read_unlock();
1659		saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1660	} else {
1661		saved_destroy = psock->saved_destroy;
1662		sock_map_remove_links(sk, psock);
1663		rcu_read_unlock();
1664		sk_psock_stop(psock);
1665		sk_psock_put(sk, psock);
1666	}
1667	if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1668		return;
1669	if (saved_destroy)
1670		saved_destroy(sk);
 
1671}
1672EXPORT_SYMBOL_GPL(sock_map_destroy);
1673
1674void sock_map_close(struct sock *sk, long timeout)
1675{
1676	void (*saved_close)(struct sock *sk, long timeout);
1677	struct sk_psock *psock;
1678
1679	lock_sock(sk);
1680	rcu_read_lock();
1681	psock = sk_psock(sk);
1682	if (likely(psock)) {
1683		saved_close = psock->saved_close;
1684		sock_map_remove_links(sk, psock);
1685		psock = sk_psock_get(sk);
1686		if (unlikely(!psock))
1687			goto no_psock;
1688		rcu_read_unlock();
1689		sk_psock_stop(psock);
1690		release_sock(sk);
1691		cancel_delayed_work_sync(&psock->work);
1692		sk_psock_put(sk, psock);
1693	} else {
1694		saved_close = READ_ONCE(sk->sk_prot)->close;
1695no_psock:
1696		rcu_read_unlock();
1697		release_sock(sk);
 
1698	}
1699
1700	/* Make sure we do not recurse. This is a bug.
1701	 * Leak the socket instead of crashing on a stack overflow.
1702	 */
1703	if (WARN_ON_ONCE(saved_close == sock_map_close))
1704		return;
1705	saved_close(sk, timeout);
1706}
1707EXPORT_SYMBOL_GPL(sock_map_close);
1708
1709struct sockmap_link {
1710	struct bpf_link link;
1711	struct bpf_map *map;
1712	enum bpf_attach_type attach_type;
1713};
1714
1715static void sock_map_link_release(struct bpf_link *link)
1716{
1717	struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
1718
1719	mutex_lock(&sockmap_mutex);
1720	if (!sockmap_link->map)
1721		goto out;
1722
1723	WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link,
1724					  sockmap_link->attach_type));
1725
1726	bpf_map_put_with_uref(sockmap_link->map);
1727	sockmap_link->map = NULL;
1728out:
1729	mutex_unlock(&sockmap_mutex);
1730}
1731
1732static int sock_map_link_detach(struct bpf_link *link)
1733{
1734	sock_map_link_release(link);
1735	return 0;
1736}
1737
1738static void sock_map_link_dealloc(struct bpf_link *link)
1739{
1740	kfree(link);
1741}
1742
1743/* Handle the following two cases:
1744 * case 1: link != NULL, prog != NULL, old != NULL
1745 * case 2: link != NULL, prog != NULL, old == NULL
1746 */
1747static int sock_map_link_update_prog(struct bpf_link *link,
1748				     struct bpf_prog *prog,
1749				     struct bpf_prog *old)
1750{
1751	const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
1752	struct bpf_prog **pprog, *old_link_prog;
1753	struct bpf_link **plink;
1754	int ret = 0;
1755
1756	mutex_lock(&sockmap_mutex);
1757
1758	/* If old prog is not NULL, ensure old prog is the same as link->prog. */
1759	if (old && link->prog != old) {
1760		ret = -EPERM;
1761		goto out;
1762	}
1763	/* Ensure link->prog has the same type/attach_type as the new prog. */
1764	if (link->prog->type != prog->type ||
1765	    link->prog->expected_attach_type != prog->expected_attach_type) {
1766		ret = -EINVAL;
1767		goto out;
1768	}
1769	if (!sockmap_link->map) {
1770		ret = -ENOLINK;
1771		goto out;
1772	}
1773
1774	ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink,
1775					sockmap_link->attach_type);
1776	if (ret)
1777		goto out;
1778
1779	/* return error if the stored bpf_link does not match the incoming bpf_link. */
1780	if (link != *plink) {
1781		ret = -EBUSY;
1782		goto out;
1783	}
1784
1785	if (old) {
1786		ret = psock_replace_prog(pprog, prog, old);
1787		if (ret)
1788			goto out;
1789	} else {
1790		psock_set_prog(pprog, prog);
1791	}
1792
1793	bpf_prog_inc(prog);
1794	old_link_prog = xchg(&link->prog, prog);
1795	bpf_prog_put(old_link_prog);
1796
1797out:
1798	mutex_unlock(&sockmap_mutex);
1799	return ret;
1800}
1801
1802static u32 sock_map_link_get_map_id(const struct sockmap_link *sockmap_link)
1803{
1804	u32 map_id = 0;
1805
1806	mutex_lock(&sockmap_mutex);
1807	if (sockmap_link->map)
1808		map_id = sockmap_link->map->id;
1809	mutex_unlock(&sockmap_mutex);
1810	return map_id;
1811}
1812
1813static int sock_map_link_fill_info(const struct bpf_link *link,
1814				   struct bpf_link_info *info)
1815{
1816	const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
1817	u32 map_id = sock_map_link_get_map_id(sockmap_link);
1818
1819	info->sockmap.map_id = map_id;
1820	info->sockmap.attach_type = sockmap_link->attach_type;
1821	return 0;
1822}
1823
1824static void sock_map_link_show_fdinfo(const struct bpf_link *link,
1825				      struct seq_file *seq)
1826{
1827	const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
1828	u32 map_id = sock_map_link_get_map_id(sockmap_link);
1829
1830	seq_printf(seq, "map_id:\t%u\n", map_id);
1831	seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type);
1832}
1833
1834static const struct bpf_link_ops sock_map_link_ops = {
1835	.release = sock_map_link_release,
1836	.dealloc = sock_map_link_dealloc,
1837	.detach = sock_map_link_detach,
1838	.update_prog = sock_map_link_update_prog,
1839	.fill_link_info = sock_map_link_fill_info,
1840	.show_fdinfo = sock_map_link_show_fdinfo,
1841};
1842
1843int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
1844{
1845	struct bpf_link_primer link_primer;
1846	struct sockmap_link *sockmap_link;
1847	enum bpf_attach_type attach_type;
1848	struct bpf_map *map;
1849	int ret;
1850
1851	if (attr->link_create.flags)
1852		return -EINVAL;
1853
1854	map = bpf_map_get_with_uref(attr->link_create.target_fd);
1855	if (IS_ERR(map))
1856		return PTR_ERR(map);
1857	if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) {
1858		ret = -EINVAL;
1859		goto out;
1860	}
1861
1862	sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER);
1863	if (!sockmap_link) {
1864		ret = -ENOMEM;
1865		goto out;
1866	}
1867
1868	attach_type = attr->link_create.attach_type;
1869	bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog);
1870	sockmap_link->map = map;
1871	sockmap_link->attach_type = attach_type;
1872
1873	ret = bpf_link_prime(&sockmap_link->link, &link_primer);
1874	if (ret) {
1875		kfree(sockmap_link);
1876		goto out;
1877	}
1878
1879	mutex_lock(&sockmap_mutex);
1880	ret = sock_map_prog_update(map, prog, NULL, &sockmap_link->link, attach_type);
1881	mutex_unlock(&sockmap_mutex);
1882	if (ret) {
1883		bpf_link_cleanup(&link_primer);
1884		goto out;
1885	}
1886
1887	/* Increase refcnt for the prog since when old prog is replaced with
1888	 * psock_replace_prog() and psock_set_prog() its refcnt will be decreased.
1889	 *
1890	 * Actually, we do not need to increase refcnt for the prog since bpf_link
1891	 * will hold a reference. But in order to have less complexity w.r.t.
1892	 * replacing/setting prog, let us increase the refcnt to make things simpler.
1893	 */
1894	bpf_prog_inc(prog);
1895
1896	return bpf_link_settle(&link_primer);
1897
1898out:
1899	bpf_map_put_with_uref(map);
1900	return ret;
1901}
1902
1903static int sock_map_iter_attach_target(struct bpf_prog *prog,
1904				       union bpf_iter_link_info *linfo,
1905				       struct bpf_iter_aux_info *aux)
1906{
1907	struct bpf_map *map;
1908	int err = -EINVAL;
1909
1910	if (!linfo->map.map_fd)
1911		return -EBADF;
1912
1913	map = bpf_map_get_with_uref(linfo->map.map_fd);
1914	if (IS_ERR(map))
1915		return PTR_ERR(map);
1916
1917	if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1918	    map->map_type != BPF_MAP_TYPE_SOCKHASH)
1919		goto put_map;
1920
1921	if (prog->aux->max_rdonly_access > map->key_size) {
1922		err = -EACCES;
1923		goto put_map;
1924	}
1925
1926	aux->map = map;
1927	return 0;
1928
1929put_map:
1930	bpf_map_put_with_uref(map);
1931	return err;
1932}
1933
1934static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1935{
1936	bpf_map_put_with_uref(aux->map);
1937}
1938
1939static struct bpf_iter_reg sock_map_iter_reg = {
1940	.target			= "sockmap",
1941	.attach_target		= sock_map_iter_attach_target,
1942	.detach_target		= sock_map_iter_detach_target,
1943	.show_fdinfo		= bpf_iter_map_show_fdinfo,
1944	.fill_link_info		= bpf_iter_map_fill_link_info,
1945	.ctx_arg_info_size	= 2,
1946	.ctx_arg_info		= {
1947		{ offsetof(struct bpf_iter__sockmap, key),
1948		  PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1949		{ offsetof(struct bpf_iter__sockmap, sk),
1950		  PTR_TO_BTF_ID_OR_NULL },
1951	},
1952};
1953
1954static int __init bpf_sockmap_iter_init(void)
1955{
1956	sock_map_iter_reg.ctx_arg_info[1].btf_id =
1957		btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1958	return bpf_iter_reg_target(&sock_map_iter_reg);
1959}
1960late_initcall(bpf_sockmap_iter_init);
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/bpf.h>
 
   5#include <linux/filter.h>
   6#include <linux/errno.h>
   7#include <linux/file.h>
   8#include <linux/net.h>
   9#include <linux/workqueue.h>
  10#include <linux/skmsg.h>
  11#include <linux/list.h>
  12#include <linux/jhash.h>
  13#include <linux/sock_diag.h>
  14#include <net/udp.h>
  15
  16struct bpf_stab {
  17	struct bpf_map map;
  18	struct sock **sks;
  19	struct sk_psock_progs progs;
  20	raw_spinlock_t lock;
  21};
  22
  23#define SOCK_CREATE_FLAG_MASK				\
  24	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  25
 
 
 
 
 
 
 
 
 
 
 
 
  26static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  27{
  28	struct bpf_stab *stab;
  29	u64 cost;
  30	int err;
  31
  32	if (!capable(CAP_NET_ADMIN))
  33		return ERR_PTR(-EPERM);
  34	if (attr->max_entries == 0 ||
  35	    attr->key_size    != 4 ||
  36	    (attr->value_size != sizeof(u32) &&
  37	     attr->value_size != sizeof(u64)) ||
  38	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  39		return ERR_PTR(-EINVAL);
  40
  41	stab = kzalloc(sizeof(*stab), GFP_USER);
  42	if (!stab)
  43		return ERR_PTR(-ENOMEM);
  44
  45	bpf_map_init_from_attr(&stab->map, attr);
  46	raw_spin_lock_init(&stab->lock);
  47
  48	/* Make sure page count doesn't overflow. */
  49	cost = (u64) stab->map.max_entries * sizeof(struct sock *);
  50	err = bpf_map_charge_init(&stab->map.memory, cost);
  51	if (err)
  52		goto free_stab;
  53
  54	stab->sks = bpf_map_area_alloc(stab->map.max_entries *
  55				       sizeof(struct sock *),
  56				       stab->map.numa_node);
  57	if (stab->sks)
  58		return &stab->map;
  59	err = -ENOMEM;
  60	bpf_map_charge_finish(&stab->map.memory);
  61free_stab:
  62	kfree(stab);
  63	return ERR_PTR(err);
  64}
  65
  66int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  67{
  68	u32 ufd = attr->target_fd;
  69	struct bpf_map *map;
  70	struct fd f;
  71	int ret;
  72
  73	if (attr->attach_flags || attr->replace_bpf_fd)
  74		return -EINVAL;
  75
  76	f = fdget(ufd);
  77	map = __bpf_map_get(f);
  78	if (IS_ERR(map))
  79		return PTR_ERR(map);
  80	ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
  81	fdput(f);
 
  82	return ret;
  83}
  84
  85int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  86{
  87	u32 ufd = attr->target_fd;
  88	struct bpf_prog *prog;
  89	struct bpf_map *map;
  90	struct fd f;
  91	int ret;
  92
  93	if (attr->attach_flags || attr->replace_bpf_fd)
  94		return -EINVAL;
  95
  96	f = fdget(ufd);
  97	map = __bpf_map_get(f);
  98	if (IS_ERR(map))
  99		return PTR_ERR(map);
 100
 101	prog = bpf_prog_get(attr->attach_bpf_fd);
 102	if (IS_ERR(prog)) {
 103		ret = PTR_ERR(prog);
 104		goto put_map;
 105	}
 106
 107	if (prog->type != ptype) {
 108		ret = -EINVAL;
 109		goto put_prog;
 110	}
 111
 112	ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
 
 
 113put_prog:
 114	bpf_prog_put(prog);
 115put_map:
 116	fdput(f);
 117	return ret;
 118}
 119
 120static void sock_map_sk_acquire(struct sock *sk)
 121	__acquires(&sk->sk_lock.slock)
 122{
 123	lock_sock(sk);
 124	preempt_disable();
 125	rcu_read_lock();
 126}
 127
 128static void sock_map_sk_release(struct sock *sk)
 129	__releases(&sk->sk_lock.slock)
 130{
 131	rcu_read_unlock();
 132	preempt_enable();
 133	release_sock(sk);
 134}
 135
 136static void sock_map_add_link(struct sk_psock *psock,
 137			      struct sk_psock_link *link,
 138			      struct bpf_map *map, void *link_raw)
 139{
 140	link->link_raw = link_raw;
 141	link->map = map;
 142	spin_lock_bh(&psock->link_lock);
 143	list_add_tail(&link->list, &psock->link);
 144	spin_unlock_bh(&psock->link_lock);
 145}
 146
 147static void sock_map_del_link(struct sock *sk,
 148			      struct sk_psock *psock, void *link_raw)
 149{
 
 150	struct sk_psock_link *link, *tmp;
 151	bool strp_stop = false;
 152
 153	spin_lock_bh(&psock->link_lock);
 154	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 155		if (link->link_raw == link_raw) {
 156			struct bpf_map *map = link->map;
 157			struct bpf_stab *stab = container_of(map, struct bpf_stab,
 158							     map);
 159			if (psock->parser.enabled && stab->progs.skb_parser)
 160				strp_stop = true;
 
 
 
 
 161			list_del(&link->list);
 162			sk_psock_free_link(link);
 
 163		}
 164	}
 165	spin_unlock_bh(&psock->link_lock);
 166	if (strp_stop) {
 167		write_lock_bh(&sk->sk_callback_lock);
 168		sk_psock_stop_strp(sk, psock);
 
 
 
 
 
 
 169		write_unlock_bh(&sk->sk_callback_lock);
 170	}
 171}
 172
 173static void sock_map_unref(struct sock *sk, void *link_raw)
 174{
 175	struct sk_psock *psock = sk_psock(sk);
 176
 177	if (likely(psock)) {
 178		sock_map_del_link(sk, psock, link_raw);
 179		sk_psock_put(sk, psock);
 180	}
 181}
 182
 183static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
 184{
 185	struct proto *prot;
 186
 187	sock_owned_by_me(sk);
 188
 189	switch (sk->sk_type) {
 190	case SOCK_STREAM:
 191		prot = tcp_bpf_get_proto(sk, psock);
 192		break;
 193
 194	case SOCK_DGRAM:
 195		prot = udp_bpf_get_proto(sk, psock);
 196		break;
 197
 198	default:
 199		return -EINVAL;
 200	}
 201
 202	if (IS_ERR(prot))
 203		return PTR_ERR(prot);
 204
 205	sk_psock_update_proto(sk, psock, prot);
 206	return 0;
 207}
 208
 209static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
 210{
 211	struct sk_psock *psock;
 212
 213	rcu_read_lock();
 214	psock = sk_psock(sk);
 215	if (psock) {
 216		if (sk->sk_prot->close != sock_map_close) {
 217			psock = ERR_PTR(-EBUSY);
 218			goto out;
 219		}
 220
 221		if (!refcount_inc_not_zero(&psock->refcnt))
 222			psock = ERR_PTR(-EBUSY);
 223	}
 224out:
 225	rcu_read_unlock();
 226	return psock;
 227}
 228
 229static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
 230			 struct sock *sk)
 231{
 232	struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
 
 
 
 
 233	struct sk_psock *psock;
 234	bool skb_progs;
 235	int ret;
 236
 237	skb_verdict = READ_ONCE(progs->skb_verdict);
 238	skb_parser = READ_ONCE(progs->skb_parser);
 239	skb_progs = skb_parser && skb_verdict;
 240	if (skb_progs) {
 241		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
 242		if (IS_ERR(skb_verdict))
 243			return PTR_ERR(skb_verdict);
 244		skb_parser = bpf_prog_inc_not_zero(skb_parser);
 245		if (IS_ERR(skb_parser)) {
 246			bpf_prog_put(skb_verdict);
 247			return PTR_ERR(skb_parser);
 
 
 248		}
 249	}
 250
 251	msg_parser = READ_ONCE(progs->msg_parser);
 252	if (msg_parser) {
 253		msg_parser = bpf_prog_inc_not_zero(msg_parser);
 254		if (IS_ERR(msg_parser)) {
 255			ret = PTR_ERR(msg_parser);
 256			goto out;
 
 
 
 
 
 
 
 
 
 257		}
 258	}
 259
 260	psock = sock_map_psock_get_checked(sk);
 261	if (IS_ERR(psock)) {
 262		ret = PTR_ERR(psock);
 263		goto out_progs;
 264	}
 265
 266	if (psock) {
 267		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
 268		    (skb_progs  && READ_ONCE(psock->progs.skb_parser))) {
 
 
 
 
 269			sk_psock_put(sk, psock);
 270			ret = -EBUSY;
 271			goto out_progs;
 272		}
 273	} else {
 274		psock = sk_psock_init(sk, map->numa_node);
 275		if (!psock) {
 276			ret = -ENOMEM;
 277			goto out_progs;
 278		}
 279	}
 280
 281	if (msg_parser)
 282		psock_set_prog(&psock->progs.msg_parser, msg_parser);
 
 
 
 
 
 
 283
 
 
 
 284	ret = sock_map_init_proto(sk, psock);
 285	if (ret < 0)
 286		goto out_drop;
 
 
 287
 288	write_lock_bh(&sk->sk_callback_lock);
 289	if (skb_progs && !psock->parser.enabled) {
 290		ret = sk_psock_init_strp(sk, psock);
 
 
 
 291		if (ret) {
 292			write_unlock_bh(&sk->sk_callback_lock);
 293			goto out_drop;
 
 294		}
 295		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 296		psock_set_prog(&psock->progs.skb_parser, skb_parser);
 297		sk_psock_start_strp(sk, psock);
 
 
 
 
 298	}
 299	write_unlock_bh(&sk->sk_callback_lock);
 300	return 0;
 301out_drop:
 302	sk_psock_put(sk, psock);
 303out_progs:
 
 
 
 304	if (msg_parser)
 305		bpf_prog_put(msg_parser);
 
 
 
 
 
 
 306out:
 307	if (skb_progs) {
 308		bpf_prog_put(skb_verdict);
 309		bpf_prog_put(skb_parser);
 310	}
 311	return ret;
 312}
 313
 314static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk)
 315{
 316	struct sk_psock *psock;
 317	int ret;
 318
 319	psock = sock_map_psock_get_checked(sk);
 320	if (IS_ERR(psock))
 321		return PTR_ERR(psock);
 322
 323	if (!psock) {
 324		psock = sk_psock_init(sk, map->numa_node);
 325		if (!psock)
 326			return -ENOMEM;
 327	}
 328
 329	ret = sock_map_init_proto(sk, psock);
 330	if (ret < 0)
 331		sk_psock_put(sk, psock);
 332	return ret;
 333}
 334
 335static void sock_map_free(struct bpf_map *map)
 336{
 337	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 338	int i;
 339
 340	/* After the sync no updates or deletes will be in-flight so it
 341	 * is safe to walk map and remove entries without risking a race
 342	 * in EEXIST update case.
 343	 */
 344	synchronize_rcu();
 345	for (i = 0; i < stab->map.max_entries; i++) {
 346		struct sock **psk = &stab->sks[i];
 347		struct sock *sk;
 348
 349		sk = xchg(psk, NULL);
 350		if (sk) {
 
 351			lock_sock(sk);
 352			rcu_read_lock();
 353			sock_map_unref(sk, psk);
 354			rcu_read_unlock();
 355			release_sock(sk);
 
 356		}
 357	}
 358
 359	/* wait for psock readers accessing its map link */
 360	synchronize_rcu();
 361
 362	bpf_map_area_free(stab->sks);
 363	kfree(stab);
 364}
 365
 366static void sock_map_release_progs(struct bpf_map *map)
 367{
 368	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
 369}
 370
 371static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 372{
 373	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 374
 375	WARN_ON_ONCE(!rcu_read_lock_held());
 376
 377	if (unlikely(key >= map->max_entries))
 378		return NULL;
 379	return READ_ONCE(stab->sks[key]);
 380}
 381
 382static void *sock_map_lookup(struct bpf_map *map, void *key)
 383{
 384	struct sock *sk;
 385
 386	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 387	if (!sk || !sk_fullsock(sk))
 388		return NULL;
 389	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
 390		return NULL;
 391	return sk;
 392}
 393
 394static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
 395{
 396	struct sock *sk;
 397
 398	if (map->value_size != sizeof(u64))
 399		return ERR_PTR(-ENOSPC);
 400
 401	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 402	if (!sk)
 403		return ERR_PTR(-ENOENT);
 404
 405	sock_gen_cookie(sk);
 406	return &sk->sk_cookie;
 407}
 408
 409static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
 410			     struct sock **psk)
 411{
 412	struct sock *sk;
 413	int err = 0;
 414
 415	raw_spin_lock_bh(&stab->lock);
 416	sk = *psk;
 417	if (!sk_test || sk_test == sk)
 418		sk = xchg(psk, NULL);
 419
 420	if (likely(sk))
 421		sock_map_unref(sk, psk);
 422	else
 423		err = -EINVAL;
 424
 425	raw_spin_unlock_bh(&stab->lock);
 426	return err;
 427}
 428
 429static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
 430				      void *link_raw)
 431{
 432	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 433
 434	__sock_map_delete(stab, sk, link_raw);
 435}
 436
 437static int sock_map_delete_elem(struct bpf_map *map, void *key)
 438{
 439	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 440	u32 i = *(u32 *)key;
 441	struct sock **psk;
 442
 443	if (unlikely(i >= map->max_entries))
 444		return -EINVAL;
 445
 446	psk = &stab->sks[i];
 447	return __sock_map_delete(stab, NULL, psk);
 448}
 449
 450static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
 451{
 452	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 453	u32 i = key ? *(u32 *)key : U32_MAX;
 454	u32 *key_next = next;
 455
 456	if (i == stab->map.max_entries - 1)
 457		return -ENOENT;
 458	if (i >= stab->map.max_entries)
 459		*key_next = 0;
 460	else
 461		*key_next = i + 1;
 462	return 0;
 463}
 464
 465static bool sock_map_redirect_allowed(const struct sock *sk);
 466
 467static int sock_map_update_common(struct bpf_map *map, u32 idx,
 468				  struct sock *sk, u64 flags)
 469{
 470	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 471	struct sk_psock_link *link;
 472	struct sk_psock *psock;
 473	struct sock *osk;
 474	int ret;
 475
 476	WARN_ON_ONCE(!rcu_read_lock_held());
 477	if (unlikely(flags > BPF_EXIST))
 478		return -EINVAL;
 479	if (unlikely(idx >= map->max_entries))
 480		return -E2BIG;
 481	if (inet_csk_has_ulp(sk))
 482		return -EINVAL;
 483
 484	link = sk_psock_init_link();
 485	if (!link)
 486		return -ENOMEM;
 487
 488	/* Only sockets we can redirect into/from in BPF need to hold
 489	 * refs to parser/verdict progs and have their sk_data_ready
 490	 * and sk_write_space callbacks overridden.
 491	 */
 492	if (sock_map_redirect_allowed(sk))
 493		ret = sock_map_link(map, &stab->progs, sk);
 494	else
 495		ret = sock_map_link_no_progs(map, sk);
 496	if (ret < 0)
 497		goto out_free;
 498
 499	psock = sk_psock(sk);
 500	WARN_ON_ONCE(!psock);
 501
 502	raw_spin_lock_bh(&stab->lock);
 503	osk = stab->sks[idx];
 504	if (osk && flags == BPF_NOEXIST) {
 505		ret = -EEXIST;
 506		goto out_unlock;
 507	} else if (!osk && flags == BPF_EXIST) {
 508		ret = -ENOENT;
 509		goto out_unlock;
 510	}
 511
 512	sock_map_add_link(psock, link, map, &stab->sks[idx]);
 513	stab->sks[idx] = sk;
 514	if (osk)
 515		sock_map_unref(osk, &stab->sks[idx]);
 516	raw_spin_unlock_bh(&stab->lock);
 517	return 0;
 518out_unlock:
 519	raw_spin_unlock_bh(&stab->lock);
 520	if (psock)
 521		sk_psock_put(sk, psock);
 522out_free:
 523	sk_psock_free_link(link);
 524	return ret;
 525}
 526
 527static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
 528{
 529	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
 530	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
 531	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
 532}
 533
 534static bool sk_is_tcp(const struct sock *sk)
 535{
 536	return sk->sk_type == SOCK_STREAM &&
 537	       sk->sk_protocol == IPPROTO_TCP;
 538}
 539
 540static bool sk_is_udp(const struct sock *sk)
 541{
 542	return sk->sk_type == SOCK_DGRAM &&
 543	       sk->sk_protocol == IPPROTO_UDP;
 544}
 545
 546static bool sock_map_redirect_allowed(const struct sock *sk)
 547{
 548	return sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN;
 
 
 
 549}
 550
 551static bool sock_map_sk_is_suitable(const struct sock *sk)
 552{
 553	return sk_is_tcp(sk) || sk_is_udp(sk);
 554}
 555
 556static bool sock_map_sk_state_allowed(const struct sock *sk)
 557{
 558	if (sk_is_tcp(sk))
 559		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
 560	else if (sk_is_udp(sk))
 561		return sk_hashed(sk);
 
 
 
 
 
 562
 563	return false;
 564}
 565
 566static int sock_map_update_elem(struct bpf_map *map, void *key,
 567				void *value, u64 flags)
 568{
 569	u32 idx = *(u32 *)key;
 570	struct socket *sock;
 571	struct sock *sk;
 572	int ret;
 573	u64 ufd;
 574
 575	if (map->value_size == sizeof(u64))
 576		ufd = *(u64 *)value;
 577	else
 578		ufd = *(u32 *)value;
 579	if (ufd > S32_MAX)
 580		return -EINVAL;
 581
 582	sock = sockfd_lookup(ufd, &ret);
 583	if (!sock)
 584		return ret;
 585	sk = sock->sk;
 586	if (!sk) {
 587		ret = -EINVAL;
 588		goto out;
 589	}
 590	if (!sock_map_sk_is_suitable(sk)) {
 591		ret = -EOPNOTSUPP;
 592		goto out;
 593	}
 594
 595	sock_map_sk_acquire(sk);
 596	if (!sock_map_sk_state_allowed(sk))
 597		ret = -EOPNOTSUPP;
 
 
 598	else
 599		ret = sock_map_update_common(map, idx, sk, flags);
 600	sock_map_sk_release(sk);
 601out:
 602	fput(sock->file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603	return ret;
 604}
 605
 606BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
 607	   struct bpf_map *, map, void *, key, u64, flags)
 608{
 609	WARN_ON_ONCE(!rcu_read_lock_held());
 610
 611	if (likely(sock_map_sk_is_suitable(sops->sk) &&
 612		   sock_map_op_okay(sops)))
 613		return sock_map_update_common(map, *(u32 *)key, sops->sk,
 614					      flags);
 615	return -EOPNOTSUPP;
 616}
 617
 618const struct bpf_func_proto bpf_sock_map_update_proto = {
 619	.func		= bpf_sock_map_update,
 620	.gpl_only	= false,
 621	.pkt_access	= true,
 622	.ret_type	= RET_INTEGER,
 623	.arg1_type	= ARG_PTR_TO_CTX,
 624	.arg2_type	= ARG_CONST_MAP_PTR,
 625	.arg3_type	= ARG_PTR_TO_MAP_KEY,
 626	.arg4_type	= ARG_ANYTHING,
 627};
 628
 629BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
 630	   struct bpf_map *, map, u32, key, u64, flags)
 631{
 632	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 633	struct sock *sk;
 634
 635	if (unlikely(flags & ~(BPF_F_INGRESS)))
 636		return SK_DROP;
 637
 638	sk = __sock_map_lookup_elem(map, key);
 639	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 640		return SK_DROP;
 
 
 641
 642	tcb->bpf.flags = flags;
 643	tcb->bpf.sk_redir = sk;
 644	return SK_PASS;
 645}
 646
 647const struct bpf_func_proto bpf_sk_redirect_map_proto = {
 648	.func           = bpf_sk_redirect_map,
 649	.gpl_only       = false,
 650	.ret_type       = RET_INTEGER,
 651	.arg1_type	= ARG_PTR_TO_CTX,
 652	.arg2_type      = ARG_CONST_MAP_PTR,
 653	.arg3_type      = ARG_ANYTHING,
 654	.arg4_type      = ARG_ANYTHING,
 655};
 656
 657BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
 658	   struct bpf_map *, map, u32, key, u64, flags)
 659{
 660	struct sock *sk;
 661
 662	if (unlikely(flags & ~(BPF_F_INGRESS)))
 663		return SK_DROP;
 664
 665	sk = __sock_map_lookup_elem(map, key);
 666	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 667		return SK_DROP;
 
 
 
 
 668
 669	msg->flags = flags;
 670	msg->sk_redir = sk;
 671	return SK_PASS;
 672}
 673
 674const struct bpf_func_proto bpf_msg_redirect_map_proto = {
 675	.func           = bpf_msg_redirect_map,
 676	.gpl_only       = false,
 677	.ret_type       = RET_INTEGER,
 678	.arg1_type	= ARG_PTR_TO_CTX,
 679	.arg2_type      = ARG_CONST_MAP_PTR,
 680	.arg3_type      = ARG_ANYTHING,
 681	.arg4_type      = ARG_ANYTHING,
 682};
 683
 684static int sock_map_btf_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685const struct bpf_map_ops sock_map_ops = {
 
 686	.map_alloc		= sock_map_alloc,
 687	.map_free		= sock_map_free,
 688	.map_get_next_key	= sock_map_get_next_key,
 689	.map_lookup_elem_sys_only = sock_map_lookup_sys,
 690	.map_update_elem	= sock_map_update_elem,
 691	.map_delete_elem	= sock_map_delete_elem,
 692	.map_lookup_elem	= sock_map_lookup,
 693	.map_release_uref	= sock_map_release_progs,
 694	.map_check_btf		= map_check_no_btf,
 695	.map_btf_name		= "bpf_stab",
 696	.map_btf_id		= &sock_map_btf_id,
 
 697};
 698
 699struct bpf_shtab_elem {
 700	struct rcu_head rcu;
 701	u32 hash;
 702	struct sock *sk;
 703	struct hlist_node node;
 704	u8 key[];
 705};
 706
 707struct bpf_shtab_bucket {
 708	struct hlist_head head;
 709	raw_spinlock_t lock;
 710};
 711
 712struct bpf_shtab {
 713	struct bpf_map map;
 714	struct bpf_shtab_bucket *buckets;
 715	u32 buckets_num;
 716	u32 elem_size;
 717	struct sk_psock_progs progs;
 718	atomic_t count;
 719};
 720
 721static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
 722{
 723	return jhash(key, len, 0);
 724}
 725
 726static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
 727							u32 hash)
 728{
 729	return &htab->buckets[hash & (htab->buckets_num - 1)];
 730}
 731
 732static struct bpf_shtab_elem *
 733sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 734			  u32 key_size)
 735{
 736	struct bpf_shtab_elem *elem;
 737
 738	hlist_for_each_entry_rcu(elem, head, node) {
 739		if (elem->hash == hash &&
 740		    !memcmp(&elem->key, key, key_size))
 741			return elem;
 742	}
 743
 744	return NULL;
 745}
 746
 747static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 748{
 749	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 750	u32 key_size = map->key_size, hash;
 751	struct bpf_shtab_bucket *bucket;
 752	struct bpf_shtab_elem *elem;
 753
 754	WARN_ON_ONCE(!rcu_read_lock_held());
 755
 756	hash = sock_hash_bucket_hash(key, key_size);
 757	bucket = sock_hash_select_bucket(htab, hash);
 758	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 759
 760	return elem ? elem->sk : NULL;
 761}
 762
 763static void sock_hash_free_elem(struct bpf_shtab *htab,
 764				struct bpf_shtab_elem *elem)
 765{
 766	atomic_dec(&htab->count);
 767	kfree_rcu(elem, rcu);
 768}
 769
 770static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 771				       void *link_raw)
 772{
 773	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 774	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
 775	struct bpf_shtab_bucket *bucket;
 776
 777	WARN_ON_ONCE(!rcu_read_lock_held());
 778	bucket = sock_hash_select_bucket(htab, elem->hash);
 779
 780	/* elem may be deleted in parallel from the map, but access here
 781	 * is okay since it's going away only after RCU grace period.
 782	 * However, we need to check whether it's still present.
 783	 */
 784	raw_spin_lock_bh(&bucket->lock);
 785	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
 786					       elem->key, map->key_size);
 787	if (elem_probe && elem_probe == elem) {
 788		hlist_del_rcu(&elem->node);
 789		sock_map_unref(elem->sk, elem);
 790		sock_hash_free_elem(htab, elem);
 791	}
 792	raw_spin_unlock_bh(&bucket->lock);
 793}
 794
 795static int sock_hash_delete_elem(struct bpf_map *map, void *key)
 796{
 797	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 798	u32 hash, key_size = map->key_size;
 799	struct bpf_shtab_bucket *bucket;
 800	struct bpf_shtab_elem *elem;
 801	int ret = -ENOENT;
 802
 803	hash = sock_hash_bucket_hash(key, key_size);
 804	bucket = sock_hash_select_bucket(htab, hash);
 805
 806	raw_spin_lock_bh(&bucket->lock);
 807	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 808	if (elem) {
 809		hlist_del_rcu(&elem->node);
 810		sock_map_unref(elem->sk, elem);
 811		sock_hash_free_elem(htab, elem);
 812		ret = 0;
 813	}
 814	raw_spin_unlock_bh(&bucket->lock);
 815	return ret;
 816}
 817
 818static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
 819						   void *key, u32 key_size,
 820						   u32 hash, struct sock *sk,
 821						   struct bpf_shtab_elem *old)
 822{
 823	struct bpf_shtab_elem *new;
 824
 825	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 826		if (!old) {
 827			atomic_dec(&htab->count);
 828			return ERR_PTR(-E2BIG);
 829		}
 830	}
 831
 832	new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
 833			   htab->map.numa_node);
 
 834	if (!new) {
 835		atomic_dec(&htab->count);
 836		return ERR_PTR(-ENOMEM);
 837	}
 838	memcpy(new->key, key, key_size);
 839	new->sk = sk;
 840	new->hash = hash;
 841	return new;
 842}
 843
 844static int sock_hash_update_common(struct bpf_map *map, void *key,
 845				   struct sock *sk, u64 flags)
 846{
 847	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 848	u32 key_size = map->key_size, hash;
 849	struct bpf_shtab_elem *elem, *elem_new;
 850	struct bpf_shtab_bucket *bucket;
 851	struct sk_psock_link *link;
 852	struct sk_psock *psock;
 853	int ret;
 854
 855	WARN_ON_ONCE(!rcu_read_lock_held());
 856	if (unlikely(flags > BPF_EXIST))
 857		return -EINVAL;
 858	if (inet_csk_has_ulp(sk))
 859		return -EINVAL;
 860
 861	link = sk_psock_init_link();
 862	if (!link)
 863		return -ENOMEM;
 864
 865	/* Only sockets we can redirect into/from in BPF need to hold
 866	 * refs to parser/verdict progs and have their sk_data_ready
 867	 * and sk_write_space callbacks overridden.
 868	 */
 869	if (sock_map_redirect_allowed(sk))
 870		ret = sock_map_link(map, &htab->progs, sk);
 871	else
 872		ret = sock_map_link_no_progs(map, sk);
 873	if (ret < 0)
 874		goto out_free;
 875
 876	psock = sk_psock(sk);
 877	WARN_ON_ONCE(!psock);
 878
 879	hash = sock_hash_bucket_hash(key, key_size);
 880	bucket = sock_hash_select_bucket(htab, hash);
 881
 882	raw_spin_lock_bh(&bucket->lock);
 883	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 884	if (elem && flags == BPF_NOEXIST) {
 885		ret = -EEXIST;
 886		goto out_unlock;
 887	} else if (!elem && flags == BPF_EXIST) {
 888		ret = -ENOENT;
 889		goto out_unlock;
 890	}
 891
 892	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
 893	if (IS_ERR(elem_new)) {
 894		ret = PTR_ERR(elem_new);
 895		goto out_unlock;
 896	}
 897
 898	sock_map_add_link(psock, link, map, elem_new);
 899	/* Add new element to the head of the list, so that
 900	 * concurrent search will find it before old elem.
 901	 */
 902	hlist_add_head_rcu(&elem_new->node, &bucket->head);
 903	if (elem) {
 904		hlist_del_rcu(&elem->node);
 905		sock_map_unref(elem->sk, elem);
 906		sock_hash_free_elem(htab, elem);
 907	}
 908	raw_spin_unlock_bh(&bucket->lock);
 909	return 0;
 910out_unlock:
 911	raw_spin_unlock_bh(&bucket->lock);
 912	sk_psock_put(sk, psock);
 913out_free:
 914	sk_psock_free_link(link);
 915	return ret;
 916}
 917
 918static int sock_hash_update_elem(struct bpf_map *map, void *key,
 919				 void *value, u64 flags)
 920{
 921	struct socket *sock;
 922	struct sock *sk;
 923	int ret;
 924	u64 ufd;
 925
 926	if (map->value_size == sizeof(u64))
 927		ufd = *(u64 *)value;
 928	else
 929		ufd = *(u32 *)value;
 930	if (ufd > S32_MAX)
 931		return -EINVAL;
 932
 933	sock = sockfd_lookup(ufd, &ret);
 934	if (!sock)
 935		return ret;
 936	sk = sock->sk;
 937	if (!sk) {
 938		ret = -EINVAL;
 939		goto out;
 940	}
 941	if (!sock_map_sk_is_suitable(sk)) {
 942		ret = -EOPNOTSUPP;
 943		goto out;
 944	}
 945
 946	sock_map_sk_acquire(sk);
 947	if (!sock_map_sk_state_allowed(sk))
 948		ret = -EOPNOTSUPP;
 949	else
 950		ret = sock_hash_update_common(map, key, sk, flags);
 951	sock_map_sk_release(sk);
 952out:
 953	fput(sock->file);
 954	return ret;
 955}
 956
 957static int sock_hash_get_next_key(struct bpf_map *map, void *key,
 958				  void *key_next)
 959{
 960	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 961	struct bpf_shtab_elem *elem, *elem_next;
 962	u32 hash, key_size = map->key_size;
 963	struct hlist_head *head;
 964	int i = 0;
 965
 966	if (!key)
 967		goto find_first_elem;
 968	hash = sock_hash_bucket_hash(key, key_size);
 969	head = &sock_hash_select_bucket(htab, hash)->head;
 970	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
 971	if (!elem)
 972		goto find_first_elem;
 973
 974	elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
 975				     struct bpf_shtab_elem, node);
 976	if (elem_next) {
 977		memcpy(key_next, elem_next->key, key_size);
 978		return 0;
 979	}
 980
 981	i = hash & (htab->buckets_num - 1);
 982	i++;
 983find_first_elem:
 984	for (; i < htab->buckets_num; i++) {
 985		head = &sock_hash_select_bucket(htab, i)->head;
 986		elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
 987					     struct bpf_shtab_elem, node);
 988		if (elem_next) {
 989			memcpy(key_next, elem_next->key, key_size);
 990			return 0;
 991		}
 992	}
 993
 994	return -ENOENT;
 995}
 996
 997static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
 998{
 999	struct bpf_shtab *htab;
1000	int i, err;
1001	u64 cost;
1002
1003	if (!capable(CAP_NET_ADMIN))
1004		return ERR_PTR(-EPERM);
1005	if (attr->max_entries == 0 ||
1006	    attr->key_size    == 0 ||
1007	    (attr->value_size != sizeof(u32) &&
1008	     attr->value_size != sizeof(u64)) ||
1009	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1010		return ERR_PTR(-EINVAL);
1011	if (attr->key_size > MAX_BPF_STACK)
1012		return ERR_PTR(-E2BIG);
1013
1014	htab = kzalloc(sizeof(*htab), GFP_USER);
1015	if (!htab)
1016		return ERR_PTR(-ENOMEM);
1017
1018	bpf_map_init_from_attr(&htab->map, attr);
1019
1020	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1021	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1022			  round_up(htab->map.key_size, 8);
1023	if (htab->buckets_num == 0 ||
1024	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1025		err = -EINVAL;
1026		goto free_htab;
1027	}
1028
1029	cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
1030	       (u64) htab->elem_size * htab->map.max_entries;
1031	if (cost >= U32_MAX - PAGE_SIZE) {
1032		err = -EINVAL;
1033		goto free_htab;
1034	}
1035	err = bpf_map_charge_init(&htab->map.memory, cost);
1036	if (err)
1037		goto free_htab;
1038
1039	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1040					   sizeof(struct bpf_shtab_bucket),
1041					   htab->map.numa_node);
1042	if (!htab->buckets) {
1043		bpf_map_charge_finish(&htab->map.memory);
1044		err = -ENOMEM;
1045		goto free_htab;
1046	}
1047
1048	for (i = 0; i < htab->buckets_num; i++) {
1049		INIT_HLIST_HEAD(&htab->buckets[i].head);
1050		raw_spin_lock_init(&htab->buckets[i].lock);
1051	}
1052
1053	return &htab->map;
1054free_htab:
1055	kfree(htab);
1056	return ERR_PTR(err);
1057}
1058
1059static void sock_hash_free(struct bpf_map *map)
1060{
1061	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1062	struct bpf_shtab_bucket *bucket;
1063	struct hlist_head unlink_list;
1064	struct bpf_shtab_elem *elem;
1065	struct hlist_node *node;
1066	int i;
1067
1068	/* After the sync no updates or deletes will be in-flight so it
1069	 * is safe to walk map and remove entries without risking a race
1070	 * in EEXIST update case.
1071	 */
1072	synchronize_rcu();
1073	for (i = 0; i < htab->buckets_num; i++) {
1074		bucket = sock_hash_select_bucket(htab, i);
1075
1076		/* We are racing with sock_hash_delete_from_link to
1077		 * enter the spin-lock critical section. Every socket on
1078		 * the list is still linked to sockhash. Since link
1079		 * exists, psock exists and holds a ref to socket. That
1080		 * lets us to grab a socket ref too.
1081		 */
1082		raw_spin_lock_bh(&bucket->lock);
1083		hlist_for_each_entry(elem, &bucket->head, node)
1084			sock_hold(elem->sk);
1085		hlist_move_list(&bucket->head, &unlink_list);
1086		raw_spin_unlock_bh(&bucket->lock);
1087
1088		/* Process removed entries out of atomic context to
1089		 * block for socket lock before deleting the psock's
1090		 * link to sockhash.
1091		 */
1092		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1093			hlist_del(&elem->node);
1094			lock_sock(elem->sk);
1095			rcu_read_lock();
1096			sock_map_unref(elem->sk, elem);
1097			rcu_read_unlock();
1098			release_sock(elem->sk);
1099			sock_put(elem->sk);
1100			sock_hash_free_elem(htab, elem);
1101		}
 
1102	}
1103
1104	/* wait for psock readers accessing its map link */
1105	synchronize_rcu();
1106
1107	bpf_map_area_free(htab->buckets);
1108	kfree(htab);
1109}
1110
1111static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1112{
1113	struct sock *sk;
1114
1115	if (map->value_size != sizeof(u64))
1116		return ERR_PTR(-ENOSPC);
1117
1118	sk = __sock_hash_lookup_elem(map, key);
1119	if (!sk)
1120		return ERR_PTR(-ENOENT);
1121
1122	sock_gen_cookie(sk);
1123	return &sk->sk_cookie;
1124}
1125
1126static void *sock_hash_lookup(struct bpf_map *map, void *key)
1127{
1128	struct sock *sk;
1129
1130	sk = __sock_hash_lookup_elem(map, key);
1131	if (!sk || !sk_fullsock(sk))
1132		return NULL;
1133	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1134		return NULL;
1135	return sk;
1136}
1137
1138static void sock_hash_release_progs(struct bpf_map *map)
1139{
1140	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1141}
1142
1143BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1144	   struct bpf_map *, map, void *, key, u64, flags)
1145{
1146	WARN_ON_ONCE(!rcu_read_lock_held());
1147
1148	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1149		   sock_map_op_okay(sops)))
1150		return sock_hash_update_common(map, key, sops->sk, flags);
1151	return -EOPNOTSUPP;
1152}
1153
1154const struct bpf_func_proto bpf_sock_hash_update_proto = {
1155	.func		= bpf_sock_hash_update,
1156	.gpl_only	= false,
1157	.pkt_access	= true,
1158	.ret_type	= RET_INTEGER,
1159	.arg1_type	= ARG_PTR_TO_CTX,
1160	.arg2_type	= ARG_CONST_MAP_PTR,
1161	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1162	.arg4_type	= ARG_ANYTHING,
1163};
1164
1165BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1166	   struct bpf_map *, map, void *, key, u64, flags)
1167{
1168	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1169	struct sock *sk;
1170
1171	if (unlikely(flags & ~(BPF_F_INGRESS)))
1172		return SK_DROP;
1173
1174	sk = __sock_hash_lookup_elem(map, key);
1175	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1176		return SK_DROP;
 
 
1177
1178	tcb->bpf.flags = flags;
1179	tcb->bpf.sk_redir = sk;
1180	return SK_PASS;
1181}
1182
1183const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1184	.func           = bpf_sk_redirect_hash,
1185	.gpl_only       = false,
1186	.ret_type       = RET_INTEGER,
1187	.arg1_type	= ARG_PTR_TO_CTX,
1188	.arg2_type      = ARG_CONST_MAP_PTR,
1189	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1190	.arg4_type      = ARG_ANYTHING,
1191};
1192
1193BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1194	   struct bpf_map *, map, void *, key, u64, flags)
1195{
1196	struct sock *sk;
1197
1198	if (unlikely(flags & ~(BPF_F_INGRESS)))
1199		return SK_DROP;
1200
1201	sk = __sock_hash_lookup_elem(map, key);
1202	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1203		return SK_DROP;
 
 
 
 
1204
1205	msg->flags = flags;
1206	msg->sk_redir = sk;
1207	return SK_PASS;
1208}
1209
1210const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1211	.func           = bpf_msg_redirect_hash,
1212	.gpl_only       = false,
1213	.ret_type       = RET_INTEGER,
1214	.arg1_type	= ARG_PTR_TO_CTX,
1215	.arg2_type      = ARG_CONST_MAP_PTR,
1216	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1217	.arg4_type      = ARG_ANYTHING,
1218};
1219
1220static int sock_hash_map_btf_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1221const struct bpf_map_ops sock_hash_ops = {
 
1222	.map_alloc		= sock_hash_alloc,
1223	.map_free		= sock_hash_free,
1224	.map_get_next_key	= sock_hash_get_next_key,
1225	.map_update_elem	= sock_hash_update_elem,
1226	.map_delete_elem	= sock_hash_delete_elem,
1227	.map_lookup_elem	= sock_hash_lookup,
1228	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1229	.map_release_uref	= sock_hash_release_progs,
1230	.map_check_btf		= map_check_no_btf,
1231	.map_btf_name		= "bpf_shtab",
1232	.map_btf_id		= &sock_hash_map_btf_id,
 
1233};
1234
1235static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1236{
1237	switch (map->map_type) {
1238	case BPF_MAP_TYPE_SOCKMAP:
1239		return &container_of(map, struct bpf_stab, map)->progs;
1240	case BPF_MAP_TYPE_SOCKHASH:
1241		return &container_of(map, struct bpf_shtab, map)->progs;
1242	default:
1243		break;
1244	}
1245
1246	return NULL;
1247}
1248
1249int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1250			 struct bpf_prog *old, u32 which)
1251{
1252	struct sk_psock_progs *progs = sock_map_progs(map);
1253	struct bpf_prog **pprog;
 
1254
1255	if (!progs)
1256		return -EOPNOTSUPP;
1257
1258	switch (which) {
1259	case BPF_SK_MSG_VERDICT:
1260		pprog = &progs->msg_parser;
 
1261		break;
 
1262	case BPF_SK_SKB_STREAM_PARSER:
1263		pprog = &progs->skb_parser;
 
1264		break;
 
1265	case BPF_SK_SKB_STREAM_VERDICT:
1266		pprog = &progs->skb_verdict;
 
 
 
 
 
 
 
 
 
1267		break;
1268	default:
1269		return -EOPNOTSUPP;
1270	}
1271
1272	if (old)
1273		return psock_replace_prog(pprog, prog, old);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1274
1275	psock_set_prog(pprog, prog);
1276	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1277}
1278
1279static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1280{
1281	switch (link->map->map_type) {
1282	case BPF_MAP_TYPE_SOCKMAP:
1283		return sock_map_delete_from_link(link->map, sk,
1284						 link->link_raw);
1285	case BPF_MAP_TYPE_SOCKHASH:
1286		return sock_hash_delete_from_link(link->map, sk,
1287						  link->link_raw);
1288	default:
1289		break;
1290	}
1291}
1292
1293static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1294{
1295	struct sk_psock_link *link;
1296
1297	while ((link = sk_psock_link_pop(psock))) {
1298		sock_map_unlink(sk, link);
1299		sk_psock_free_link(link);
1300	}
1301}
1302
1303void sock_map_unhash(struct sock *sk)
1304{
1305	void (*saved_unhash)(struct sock *sk);
1306	struct sk_psock *psock;
1307
1308	rcu_read_lock();
1309	psock = sk_psock(sk);
1310	if (unlikely(!psock)) {
1311		rcu_read_unlock();
1312		if (sk->sk_prot->unhash)
1313			sk->sk_prot->unhash(sk);
 
 
 
 
 
1314		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1315	}
1316
1317	saved_unhash = psock->saved_unhash;
1318	sock_map_remove_links(sk, psock);
1319	rcu_read_unlock();
1320	saved_unhash(sk);
1321}
 
1322
1323void sock_map_close(struct sock *sk, long timeout)
1324{
1325	void (*saved_close)(struct sock *sk, long timeout);
1326	struct sk_psock *psock;
1327
1328	lock_sock(sk);
1329	rcu_read_lock();
1330	psock = sk_psock(sk);
1331	if (unlikely(!psock)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
1332		rcu_read_unlock();
1333		release_sock(sk);
1334		return sk->sk_prot->close(sk, timeout);
1335	}
1336
1337	saved_close = psock->saved_close;
1338	sock_map_remove_links(sk, psock);
1339	rcu_read_unlock();
1340	release_sock(sk);
 
1341	saved_close(sk, timeout);
1342}