Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/bpf.h>
   5#include <linux/btf_ids.h>
   6#include <linux/filter.h>
   7#include <linux/errno.h>
   8#include <linux/file.h>
   9#include <linux/net.h>
  10#include <linux/workqueue.h>
  11#include <linux/skmsg.h>
  12#include <linux/list.h>
  13#include <linux/jhash.h>
  14#include <linux/sock_diag.h>
  15#include <net/udp.h>
  16
  17struct bpf_stab {
  18	struct bpf_map map;
  19	struct sock **sks;
  20	struct sk_psock_progs progs;
  21	raw_spinlock_t lock;
  22};
  23
  24#define SOCK_CREATE_FLAG_MASK				\
  25	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  26
  27static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
  28				struct bpf_prog *old, u32 which);
  29static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
  30
  31static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  32{
  33	struct bpf_stab *stab;
  34
  35	if (!capable(CAP_NET_ADMIN))
  36		return ERR_PTR(-EPERM);
  37	if (attr->max_entries == 0 ||
  38	    attr->key_size    != 4 ||
  39	    (attr->value_size != sizeof(u32) &&
  40	     attr->value_size != sizeof(u64)) ||
  41	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  42		return ERR_PTR(-EINVAL);
  43
  44	stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
  45	if (!stab)
  46		return ERR_PTR(-ENOMEM);
  47
  48	bpf_map_init_from_attr(&stab->map, attr);
  49	raw_spin_lock_init(&stab->lock);
  50
  51	stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
  52				       sizeof(struct sock *),
  53				       stab->map.numa_node);
  54	if (!stab->sks) {
  55		bpf_map_area_free(stab);
  56		return ERR_PTR(-ENOMEM);
  57	}
  58
  59	return &stab->map;
  60}
  61
  62int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  63{
  64	u32 ufd = attr->target_fd;
  65	struct bpf_map *map;
  66	struct fd f;
  67	int ret;
  68
  69	if (attr->attach_flags || attr->replace_bpf_fd)
  70		return -EINVAL;
  71
  72	f = fdget(ufd);
  73	map = __bpf_map_get(f);
  74	if (IS_ERR(map))
  75		return PTR_ERR(map);
  76	ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
  77	fdput(f);
  78	return ret;
  79}
  80
  81int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  82{
  83	u32 ufd = attr->target_fd;
  84	struct bpf_prog *prog;
  85	struct bpf_map *map;
  86	struct fd f;
  87	int ret;
  88
  89	if (attr->attach_flags || attr->replace_bpf_fd)
  90		return -EINVAL;
  91
  92	f = fdget(ufd);
  93	map = __bpf_map_get(f);
  94	if (IS_ERR(map))
  95		return PTR_ERR(map);
  96
  97	prog = bpf_prog_get(attr->attach_bpf_fd);
  98	if (IS_ERR(prog)) {
  99		ret = PTR_ERR(prog);
 100		goto put_map;
 101	}
 102
 103	if (prog->type != ptype) {
 104		ret = -EINVAL;
 105		goto put_prog;
 106	}
 107
 108	ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
 109put_prog:
 110	bpf_prog_put(prog);
 111put_map:
 112	fdput(f);
 113	return ret;
 114}
 115
 116static void sock_map_sk_acquire(struct sock *sk)
 117	__acquires(&sk->sk_lock.slock)
 118{
 119	lock_sock(sk);
 120	preempt_disable();
 121	rcu_read_lock();
 122}
 123
 124static void sock_map_sk_release(struct sock *sk)
 125	__releases(&sk->sk_lock.slock)
 126{
 127	rcu_read_unlock();
 128	preempt_enable();
 129	release_sock(sk);
 130}
 131
 132static void sock_map_add_link(struct sk_psock *psock,
 133			      struct sk_psock_link *link,
 134			      struct bpf_map *map, void *link_raw)
 135{
 136	link->link_raw = link_raw;
 137	link->map = map;
 138	spin_lock_bh(&psock->link_lock);
 139	list_add_tail(&link->list, &psock->link);
 140	spin_unlock_bh(&psock->link_lock);
 141}
 142
 143static void sock_map_del_link(struct sock *sk,
 144			      struct sk_psock *psock, void *link_raw)
 145{
 146	bool strp_stop = false, verdict_stop = false;
 147	struct sk_psock_link *link, *tmp;
 148
 149	spin_lock_bh(&psock->link_lock);
 150	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 151		if (link->link_raw == link_raw) {
 152			struct bpf_map *map = link->map;
 153			struct bpf_stab *stab = container_of(map, struct bpf_stab,
 154							     map);
 155			if (psock->saved_data_ready && stab->progs.stream_parser)
 156				strp_stop = true;
 157			if (psock->saved_data_ready && stab->progs.stream_verdict)
 158				verdict_stop = true;
 159			if (psock->saved_data_ready && stab->progs.skb_verdict)
 160				verdict_stop = true;
 161			list_del(&link->list);
 162			sk_psock_free_link(link);
 163		}
 164	}
 165	spin_unlock_bh(&psock->link_lock);
 166	if (strp_stop || verdict_stop) {
 167		write_lock_bh(&sk->sk_callback_lock);
 168		if (strp_stop)
 169			sk_psock_stop_strp(sk, psock);
 170		if (verdict_stop)
 171			sk_psock_stop_verdict(sk, psock);
 172
 173		if (psock->psock_update_sk_prot)
 174			psock->psock_update_sk_prot(sk, psock, false);
 175		write_unlock_bh(&sk->sk_callback_lock);
 176	}
 177}
 178
 179static void sock_map_unref(struct sock *sk, void *link_raw)
 180{
 181	struct sk_psock *psock = sk_psock(sk);
 182
 183	if (likely(psock)) {
 184		sock_map_del_link(sk, psock, link_raw);
 185		sk_psock_put(sk, psock);
 186	}
 187}
 188
 189static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
 190{
 191	if (!sk->sk_prot->psock_update_sk_prot)
 192		return -EINVAL;
 193	psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
 194	return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
 195}
 196
 197static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
 198{
 199	struct sk_psock *psock;
 200
 201	rcu_read_lock();
 202	psock = sk_psock(sk);
 203	if (psock) {
 204		if (sk->sk_prot->close != sock_map_close) {
 205			psock = ERR_PTR(-EBUSY);
 206			goto out;
 207		}
 208
 209		if (!refcount_inc_not_zero(&psock->refcnt))
 210			psock = ERR_PTR(-EBUSY);
 211	}
 212out:
 213	rcu_read_unlock();
 214	return psock;
 215}
 216
 217static int sock_map_link(struct bpf_map *map, struct sock *sk)
 218{
 219	struct sk_psock_progs *progs = sock_map_progs(map);
 220	struct bpf_prog *stream_verdict = NULL;
 221	struct bpf_prog *stream_parser = NULL;
 222	struct bpf_prog *skb_verdict = NULL;
 223	struct bpf_prog *msg_parser = NULL;
 224	struct sk_psock *psock;
 225	int ret;
 226
 227	stream_verdict = READ_ONCE(progs->stream_verdict);
 228	if (stream_verdict) {
 229		stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
 230		if (IS_ERR(stream_verdict))
 231			return PTR_ERR(stream_verdict);
 232	}
 233
 234	stream_parser = READ_ONCE(progs->stream_parser);
 235	if (stream_parser) {
 236		stream_parser = bpf_prog_inc_not_zero(stream_parser);
 237		if (IS_ERR(stream_parser)) {
 238			ret = PTR_ERR(stream_parser);
 239			goto out_put_stream_verdict;
 240		}
 241	}
 242
 243	msg_parser = READ_ONCE(progs->msg_parser);
 244	if (msg_parser) {
 245		msg_parser = bpf_prog_inc_not_zero(msg_parser);
 246		if (IS_ERR(msg_parser)) {
 247			ret = PTR_ERR(msg_parser);
 248			goto out_put_stream_parser;
 249		}
 250	}
 251
 252	skb_verdict = READ_ONCE(progs->skb_verdict);
 253	if (skb_verdict) {
 254		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
 255		if (IS_ERR(skb_verdict)) {
 256			ret = PTR_ERR(skb_verdict);
 257			goto out_put_msg_parser;
 258		}
 259	}
 260
 261	psock = sock_map_psock_get_checked(sk);
 262	if (IS_ERR(psock)) {
 263		ret = PTR_ERR(psock);
 264		goto out_progs;
 265	}
 266
 267	if (psock) {
 268		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
 269		    (stream_parser  && READ_ONCE(psock->progs.stream_parser)) ||
 270		    (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 271		    (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
 272		    (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 273		    (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
 274			sk_psock_put(sk, psock);
 275			ret = -EBUSY;
 276			goto out_progs;
 277		}
 278	} else {
 279		psock = sk_psock_init(sk, map->numa_node);
 280		if (IS_ERR(psock)) {
 281			ret = PTR_ERR(psock);
 282			goto out_progs;
 283		}
 284	}
 285
 286	if (msg_parser)
 287		psock_set_prog(&psock->progs.msg_parser, msg_parser);
 288	if (stream_parser)
 289		psock_set_prog(&psock->progs.stream_parser, stream_parser);
 290	if (stream_verdict)
 291		psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
 292	if (skb_verdict)
 293		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 294
 295	/* msg_* and stream_* programs references tracked in psock after this
 296	 * point. Reference dec and cleanup will occur through psock destructor
 297	 */
 298	ret = sock_map_init_proto(sk, psock);
 299	if (ret < 0) {
 300		sk_psock_put(sk, psock);
 301		goto out;
 302	}
 303
 304	write_lock_bh(&sk->sk_callback_lock);
 305	if (stream_parser && stream_verdict && !psock->saved_data_ready) {
 306		ret = sk_psock_init_strp(sk, psock);
 307		if (ret) {
 308			write_unlock_bh(&sk->sk_callback_lock);
 309			sk_psock_put(sk, psock);
 310			goto out;
 311		}
 312		sk_psock_start_strp(sk, psock);
 313	} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
 314		sk_psock_start_verdict(sk,psock);
 315	} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
 316		sk_psock_start_verdict(sk, psock);
 317	}
 318	write_unlock_bh(&sk->sk_callback_lock);
 319	return 0;
 320out_progs:
 321	if (skb_verdict)
 322		bpf_prog_put(skb_verdict);
 323out_put_msg_parser:
 324	if (msg_parser)
 325		bpf_prog_put(msg_parser);
 326out_put_stream_parser:
 327	if (stream_parser)
 328		bpf_prog_put(stream_parser);
 329out_put_stream_verdict:
 330	if (stream_verdict)
 331		bpf_prog_put(stream_verdict);
 332out:
 333	return ret;
 334}
 335
 336static void sock_map_free(struct bpf_map *map)
 337{
 338	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 339	int i;
 340
 341	/* After the sync no updates or deletes will be in-flight so it
 342	 * is safe to walk map and remove entries without risking a race
 343	 * in EEXIST update case.
 344	 */
 345	synchronize_rcu();
 346	for (i = 0; i < stab->map.max_entries; i++) {
 347		struct sock **psk = &stab->sks[i];
 348		struct sock *sk;
 349
 350		sk = xchg(psk, NULL);
 351		if (sk) {
 352			sock_hold(sk);
 353			lock_sock(sk);
 354			rcu_read_lock();
 355			sock_map_unref(sk, psk);
 356			rcu_read_unlock();
 357			release_sock(sk);
 358			sock_put(sk);
 359		}
 360	}
 361
 362	/* wait for psock readers accessing its map link */
 363	synchronize_rcu();
 364
 365	bpf_map_area_free(stab->sks);
 366	bpf_map_area_free(stab);
 367}
 368
 369static void sock_map_release_progs(struct bpf_map *map)
 370{
 371	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
 372}
 373
 374static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 375{
 376	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 377
 378	WARN_ON_ONCE(!rcu_read_lock_held());
 379
 380	if (unlikely(key >= map->max_entries))
 381		return NULL;
 382	return READ_ONCE(stab->sks[key]);
 383}
 384
 385static void *sock_map_lookup(struct bpf_map *map, void *key)
 386{
 387	struct sock *sk;
 388
 389	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 390	if (!sk)
 391		return NULL;
 392	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
 393		return NULL;
 394	return sk;
 395}
 396
 397static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
 398{
 399	struct sock *sk;
 400
 401	if (map->value_size != sizeof(u64))
 402		return ERR_PTR(-ENOSPC);
 403
 404	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 405	if (!sk)
 406		return ERR_PTR(-ENOENT);
 407
 408	__sock_gen_cookie(sk);
 409	return &sk->sk_cookie;
 410}
 411
 412static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
 413			     struct sock **psk)
 414{
 415	struct sock *sk;
 416	int err = 0;
 417
 418	raw_spin_lock_bh(&stab->lock);
 
 
 
 419	sk = *psk;
 420	if (!sk_test || sk_test == sk)
 421		sk = xchg(psk, NULL);
 422
 423	if (likely(sk))
 424		sock_map_unref(sk, psk);
 425	else
 426		err = -EINVAL;
 427
 428	raw_spin_unlock_bh(&stab->lock);
 429	return err;
 430}
 431
 432static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
 433				      void *link_raw)
 434{
 435	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 436
 437	__sock_map_delete(stab, sk, link_raw);
 438}
 439
 440static int sock_map_delete_elem(struct bpf_map *map, void *key)
 441{
 442	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 443	u32 i = *(u32 *)key;
 444	struct sock **psk;
 445
 446	if (unlikely(i >= map->max_entries))
 447		return -EINVAL;
 448
 449	psk = &stab->sks[i];
 450	return __sock_map_delete(stab, NULL, psk);
 451}
 452
 453static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
 454{
 455	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 456	u32 i = key ? *(u32 *)key : U32_MAX;
 457	u32 *key_next = next;
 458
 459	if (i == stab->map.max_entries - 1)
 460		return -ENOENT;
 461	if (i >= stab->map.max_entries)
 462		*key_next = 0;
 463	else
 464		*key_next = i + 1;
 465	return 0;
 466}
 467
 468static int sock_map_update_common(struct bpf_map *map, u32 idx,
 469				  struct sock *sk, u64 flags)
 470{
 471	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 472	struct sk_psock_link *link;
 473	struct sk_psock *psock;
 474	struct sock *osk;
 475	int ret;
 476
 477	WARN_ON_ONCE(!rcu_read_lock_held());
 478	if (unlikely(flags > BPF_EXIST))
 479		return -EINVAL;
 480	if (unlikely(idx >= map->max_entries))
 481		return -E2BIG;
 482
 483	link = sk_psock_init_link();
 484	if (!link)
 485		return -ENOMEM;
 486
 487	ret = sock_map_link(map, sk);
 488	if (ret < 0)
 489		goto out_free;
 490
 491	psock = sk_psock(sk);
 492	WARN_ON_ONCE(!psock);
 493
 494	raw_spin_lock_bh(&stab->lock);
 495	osk = stab->sks[idx];
 496	if (osk && flags == BPF_NOEXIST) {
 497		ret = -EEXIST;
 498		goto out_unlock;
 499	} else if (!osk && flags == BPF_EXIST) {
 500		ret = -ENOENT;
 501		goto out_unlock;
 502	}
 503
 504	sock_map_add_link(psock, link, map, &stab->sks[idx]);
 505	stab->sks[idx] = sk;
 506	if (osk)
 507		sock_map_unref(osk, &stab->sks[idx]);
 508	raw_spin_unlock_bh(&stab->lock);
 509	return 0;
 510out_unlock:
 511	raw_spin_unlock_bh(&stab->lock);
 512	if (psock)
 513		sk_psock_put(sk, psock);
 514out_free:
 515	sk_psock_free_link(link);
 516	return ret;
 517}
 518
 519static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
 520{
 521	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
 522	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
 523	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
 524}
 525
 526static bool sock_map_redirect_allowed(const struct sock *sk)
 527{
 528	if (sk_is_tcp(sk))
 529		return sk->sk_state != TCP_LISTEN;
 530	else
 531		return sk->sk_state == TCP_ESTABLISHED;
 532}
 533
 534static bool sock_map_sk_is_suitable(const struct sock *sk)
 535{
 536	return !!sk->sk_prot->psock_update_sk_prot;
 537}
 538
 539static bool sock_map_sk_state_allowed(const struct sock *sk)
 540{
 541	if (sk_is_tcp(sk))
 542		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
 
 
 543	return true;
 544}
 545
 546static int sock_hash_update_common(struct bpf_map *map, void *key,
 547				   struct sock *sk, u64 flags);
 548
 549int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
 550			     u64 flags)
 551{
 552	struct socket *sock;
 553	struct sock *sk;
 554	int ret;
 555	u64 ufd;
 556
 557	if (map->value_size == sizeof(u64))
 558		ufd = *(u64 *)value;
 559	else
 560		ufd = *(u32 *)value;
 561	if (ufd > S32_MAX)
 562		return -EINVAL;
 563
 564	sock = sockfd_lookup(ufd, &ret);
 565	if (!sock)
 566		return ret;
 567	sk = sock->sk;
 568	if (!sk) {
 569		ret = -EINVAL;
 570		goto out;
 571	}
 572	if (!sock_map_sk_is_suitable(sk)) {
 573		ret = -EOPNOTSUPP;
 574		goto out;
 575	}
 576
 577	sock_map_sk_acquire(sk);
 578	if (!sock_map_sk_state_allowed(sk))
 579		ret = -EOPNOTSUPP;
 580	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 581		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 582	else
 583		ret = sock_hash_update_common(map, key, sk, flags);
 584	sock_map_sk_release(sk);
 585out:
 586	sockfd_put(sock);
 587	return ret;
 588}
 589
 590static int sock_map_update_elem(struct bpf_map *map, void *key,
 591				void *value, u64 flags)
 592{
 593	struct sock *sk = (struct sock *)value;
 594	int ret;
 595
 596	if (unlikely(!sk || !sk_fullsock(sk)))
 597		return -EINVAL;
 598
 599	if (!sock_map_sk_is_suitable(sk))
 600		return -EOPNOTSUPP;
 601
 602	local_bh_disable();
 603	bh_lock_sock(sk);
 604	if (!sock_map_sk_state_allowed(sk))
 605		ret = -EOPNOTSUPP;
 606	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 607		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 608	else
 609		ret = sock_hash_update_common(map, key, sk, flags);
 610	bh_unlock_sock(sk);
 611	local_bh_enable();
 612	return ret;
 613}
 614
 615BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
 616	   struct bpf_map *, map, void *, key, u64, flags)
 617{
 618	WARN_ON_ONCE(!rcu_read_lock_held());
 619
 620	if (likely(sock_map_sk_is_suitable(sops->sk) &&
 621		   sock_map_op_okay(sops)))
 622		return sock_map_update_common(map, *(u32 *)key, sops->sk,
 623					      flags);
 624	return -EOPNOTSUPP;
 625}
 626
 627const struct bpf_func_proto bpf_sock_map_update_proto = {
 628	.func		= bpf_sock_map_update,
 629	.gpl_only	= false,
 630	.pkt_access	= true,
 631	.ret_type	= RET_INTEGER,
 632	.arg1_type	= ARG_PTR_TO_CTX,
 633	.arg2_type	= ARG_CONST_MAP_PTR,
 634	.arg3_type	= ARG_PTR_TO_MAP_KEY,
 635	.arg4_type	= ARG_ANYTHING,
 636};
 637
 638BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
 639	   struct bpf_map *, map, u32, key, u64, flags)
 640{
 641	struct sock *sk;
 642
 643	if (unlikely(flags & ~(BPF_F_INGRESS)))
 644		return SK_DROP;
 645
 646	sk = __sock_map_lookup_elem(map, key);
 647	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 648		return SK_DROP;
 649
 650	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
 651	return SK_PASS;
 652}
 653
 654const struct bpf_func_proto bpf_sk_redirect_map_proto = {
 655	.func           = bpf_sk_redirect_map,
 656	.gpl_only       = false,
 657	.ret_type       = RET_INTEGER,
 658	.arg1_type	= ARG_PTR_TO_CTX,
 659	.arg2_type      = ARG_CONST_MAP_PTR,
 660	.arg3_type      = ARG_ANYTHING,
 661	.arg4_type      = ARG_ANYTHING,
 662};
 663
 664BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
 665	   struct bpf_map *, map, u32, key, u64, flags)
 666{
 667	struct sock *sk;
 668
 669	if (unlikely(flags & ~(BPF_F_INGRESS)))
 670		return SK_DROP;
 671
 672	sk = __sock_map_lookup_elem(map, key);
 673	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 674		return SK_DROP;
 
 
 675
 676	msg->flags = flags;
 677	msg->sk_redir = sk;
 678	return SK_PASS;
 679}
 680
 681const struct bpf_func_proto bpf_msg_redirect_map_proto = {
 682	.func           = bpf_msg_redirect_map,
 683	.gpl_only       = false,
 684	.ret_type       = RET_INTEGER,
 685	.arg1_type	= ARG_PTR_TO_CTX,
 686	.arg2_type      = ARG_CONST_MAP_PTR,
 687	.arg3_type      = ARG_ANYTHING,
 688	.arg4_type      = ARG_ANYTHING,
 689};
 690
 691struct sock_map_seq_info {
 692	struct bpf_map *map;
 693	struct sock *sk;
 694	u32 index;
 695};
 696
 697struct bpf_iter__sockmap {
 698	__bpf_md_ptr(struct bpf_iter_meta *, meta);
 699	__bpf_md_ptr(struct bpf_map *, map);
 700	__bpf_md_ptr(void *, key);
 701	__bpf_md_ptr(struct sock *, sk);
 702};
 703
 704DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
 705		     struct bpf_map *map, void *key,
 706		     struct sock *sk)
 707
 708static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
 709{
 710	if (unlikely(info->index >= info->map->max_entries))
 711		return NULL;
 712
 713	info->sk = __sock_map_lookup_elem(info->map, info->index);
 714
 715	/* can't return sk directly, since that might be NULL */
 716	return info;
 717}
 718
 719static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
 720	__acquires(rcu)
 721{
 722	struct sock_map_seq_info *info = seq->private;
 723
 724	if (*pos == 0)
 725		++*pos;
 726
 727	/* pairs with sock_map_seq_stop */
 728	rcu_read_lock();
 729	return sock_map_seq_lookup_elem(info);
 730}
 731
 732static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 733	__must_hold(rcu)
 734{
 735	struct sock_map_seq_info *info = seq->private;
 736
 737	++*pos;
 738	++info->index;
 739
 740	return sock_map_seq_lookup_elem(info);
 741}
 742
 743static int sock_map_seq_show(struct seq_file *seq, void *v)
 744	__must_hold(rcu)
 745{
 746	struct sock_map_seq_info *info = seq->private;
 747	struct bpf_iter__sockmap ctx = {};
 748	struct bpf_iter_meta meta;
 749	struct bpf_prog *prog;
 750
 751	meta.seq = seq;
 752	prog = bpf_iter_get_info(&meta, !v);
 753	if (!prog)
 754		return 0;
 755
 756	ctx.meta = &meta;
 757	ctx.map = info->map;
 758	if (v) {
 759		ctx.key = &info->index;
 760		ctx.sk = info->sk;
 761	}
 762
 763	return bpf_iter_run_prog(prog, &ctx);
 764}
 765
 766static void sock_map_seq_stop(struct seq_file *seq, void *v)
 767	__releases(rcu)
 768{
 769	if (!v)
 770		(void)sock_map_seq_show(seq, NULL);
 771
 772	/* pairs with sock_map_seq_start */
 773	rcu_read_unlock();
 774}
 775
 776static const struct seq_operations sock_map_seq_ops = {
 777	.start	= sock_map_seq_start,
 778	.next	= sock_map_seq_next,
 779	.stop	= sock_map_seq_stop,
 780	.show	= sock_map_seq_show,
 781};
 782
 783static int sock_map_init_seq_private(void *priv_data,
 784				     struct bpf_iter_aux_info *aux)
 785{
 786	struct sock_map_seq_info *info = priv_data;
 787
 788	bpf_map_inc_with_uref(aux->map);
 789	info->map = aux->map;
 790	return 0;
 791}
 792
 793static void sock_map_fini_seq_private(void *priv_data)
 794{
 795	struct sock_map_seq_info *info = priv_data;
 796
 797	bpf_map_put_with_uref(info->map);
 798}
 799
 
 
 
 
 
 
 
 
 800static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
 801	.seq_ops		= &sock_map_seq_ops,
 802	.init_seq_private	= sock_map_init_seq_private,
 803	.fini_seq_private	= sock_map_fini_seq_private,
 804	.seq_priv_size		= sizeof(struct sock_map_seq_info),
 805};
 806
 807BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
 808const struct bpf_map_ops sock_map_ops = {
 809	.map_meta_equal		= bpf_map_meta_equal,
 810	.map_alloc		= sock_map_alloc,
 811	.map_free		= sock_map_free,
 812	.map_get_next_key	= sock_map_get_next_key,
 813	.map_lookup_elem_sys_only = sock_map_lookup_sys,
 814	.map_update_elem	= sock_map_update_elem,
 815	.map_delete_elem	= sock_map_delete_elem,
 816	.map_lookup_elem	= sock_map_lookup,
 817	.map_release_uref	= sock_map_release_progs,
 818	.map_check_btf		= map_check_no_btf,
 
 819	.map_btf_id		= &sock_map_btf_ids[0],
 820	.iter_seq_info		= &sock_map_iter_seq_info,
 821};
 822
 823struct bpf_shtab_elem {
 824	struct rcu_head rcu;
 825	u32 hash;
 826	struct sock *sk;
 827	struct hlist_node node;
 828	u8 key[];
 829};
 830
 831struct bpf_shtab_bucket {
 832	struct hlist_head head;
 833	raw_spinlock_t lock;
 834};
 835
 836struct bpf_shtab {
 837	struct bpf_map map;
 838	struct bpf_shtab_bucket *buckets;
 839	u32 buckets_num;
 840	u32 elem_size;
 841	struct sk_psock_progs progs;
 842	atomic_t count;
 843};
 844
 845static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
 846{
 847	return jhash(key, len, 0);
 848}
 849
 850static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
 851							u32 hash)
 852{
 853	return &htab->buckets[hash & (htab->buckets_num - 1)];
 854}
 855
 856static struct bpf_shtab_elem *
 857sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 858			  u32 key_size)
 859{
 860	struct bpf_shtab_elem *elem;
 861
 862	hlist_for_each_entry_rcu(elem, head, node) {
 863		if (elem->hash == hash &&
 864		    !memcmp(&elem->key, key, key_size))
 865			return elem;
 866	}
 867
 868	return NULL;
 869}
 870
 871static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 872{
 873	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 874	u32 key_size = map->key_size, hash;
 875	struct bpf_shtab_bucket *bucket;
 876	struct bpf_shtab_elem *elem;
 877
 878	WARN_ON_ONCE(!rcu_read_lock_held());
 879
 880	hash = sock_hash_bucket_hash(key, key_size);
 881	bucket = sock_hash_select_bucket(htab, hash);
 882	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 883
 884	return elem ? elem->sk : NULL;
 885}
 886
 887static void sock_hash_free_elem(struct bpf_shtab *htab,
 888				struct bpf_shtab_elem *elem)
 889{
 890	atomic_dec(&htab->count);
 891	kfree_rcu(elem, rcu);
 892}
 893
 894static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 895				       void *link_raw)
 896{
 897	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 898	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
 899	struct bpf_shtab_bucket *bucket;
 900
 901	WARN_ON_ONCE(!rcu_read_lock_held());
 902	bucket = sock_hash_select_bucket(htab, elem->hash);
 903
 904	/* elem may be deleted in parallel from the map, but access here
 905	 * is okay since it's going away only after RCU grace period.
 906	 * However, we need to check whether it's still present.
 907	 */
 908	raw_spin_lock_bh(&bucket->lock);
 909	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
 910					       elem->key, map->key_size);
 911	if (elem_probe && elem_probe == elem) {
 912		hlist_del_rcu(&elem->node);
 913		sock_map_unref(elem->sk, elem);
 914		sock_hash_free_elem(htab, elem);
 915	}
 916	raw_spin_unlock_bh(&bucket->lock);
 917}
 918
 919static int sock_hash_delete_elem(struct bpf_map *map, void *key)
 920{
 921	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 922	u32 hash, key_size = map->key_size;
 923	struct bpf_shtab_bucket *bucket;
 924	struct bpf_shtab_elem *elem;
 925	int ret = -ENOENT;
 926
 
 
 
 927	hash = sock_hash_bucket_hash(key, key_size);
 928	bucket = sock_hash_select_bucket(htab, hash);
 929
 930	raw_spin_lock_bh(&bucket->lock);
 931	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 932	if (elem) {
 933		hlist_del_rcu(&elem->node);
 934		sock_map_unref(elem->sk, elem);
 935		sock_hash_free_elem(htab, elem);
 936		ret = 0;
 937	}
 938	raw_spin_unlock_bh(&bucket->lock);
 939	return ret;
 940}
 941
 942static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
 943						   void *key, u32 key_size,
 944						   u32 hash, struct sock *sk,
 945						   struct bpf_shtab_elem *old)
 946{
 947	struct bpf_shtab_elem *new;
 948
 949	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 950		if (!old) {
 951			atomic_dec(&htab->count);
 952			return ERR_PTR(-E2BIG);
 953		}
 954	}
 955
 956	new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
 957				   GFP_ATOMIC | __GFP_NOWARN,
 958				   htab->map.numa_node);
 959	if (!new) {
 960		atomic_dec(&htab->count);
 961		return ERR_PTR(-ENOMEM);
 962	}
 963	memcpy(new->key, key, key_size);
 964	new->sk = sk;
 965	new->hash = hash;
 966	return new;
 967}
 968
 969static int sock_hash_update_common(struct bpf_map *map, void *key,
 970				   struct sock *sk, u64 flags)
 971{
 972	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 973	u32 key_size = map->key_size, hash;
 974	struct bpf_shtab_elem *elem, *elem_new;
 975	struct bpf_shtab_bucket *bucket;
 976	struct sk_psock_link *link;
 977	struct sk_psock *psock;
 978	int ret;
 979
 980	WARN_ON_ONCE(!rcu_read_lock_held());
 981	if (unlikely(flags > BPF_EXIST))
 982		return -EINVAL;
 983
 984	link = sk_psock_init_link();
 985	if (!link)
 986		return -ENOMEM;
 987
 988	ret = sock_map_link(map, sk);
 989	if (ret < 0)
 990		goto out_free;
 991
 992	psock = sk_psock(sk);
 993	WARN_ON_ONCE(!psock);
 994
 995	hash = sock_hash_bucket_hash(key, key_size);
 996	bucket = sock_hash_select_bucket(htab, hash);
 997
 998	raw_spin_lock_bh(&bucket->lock);
 999	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1000	if (elem && flags == BPF_NOEXIST) {
1001		ret = -EEXIST;
1002		goto out_unlock;
1003	} else if (!elem && flags == BPF_EXIST) {
1004		ret = -ENOENT;
1005		goto out_unlock;
1006	}
1007
1008	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1009	if (IS_ERR(elem_new)) {
1010		ret = PTR_ERR(elem_new);
1011		goto out_unlock;
1012	}
1013
1014	sock_map_add_link(psock, link, map, elem_new);
1015	/* Add new element to the head of the list, so that
1016	 * concurrent search will find it before old elem.
1017	 */
1018	hlist_add_head_rcu(&elem_new->node, &bucket->head);
1019	if (elem) {
1020		hlist_del_rcu(&elem->node);
1021		sock_map_unref(elem->sk, elem);
1022		sock_hash_free_elem(htab, elem);
1023	}
1024	raw_spin_unlock_bh(&bucket->lock);
1025	return 0;
1026out_unlock:
1027	raw_spin_unlock_bh(&bucket->lock);
1028	sk_psock_put(sk, psock);
1029out_free:
1030	sk_psock_free_link(link);
1031	return ret;
1032}
1033
1034static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1035				  void *key_next)
1036{
1037	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1038	struct bpf_shtab_elem *elem, *elem_next;
1039	u32 hash, key_size = map->key_size;
1040	struct hlist_head *head;
1041	int i = 0;
1042
1043	if (!key)
1044		goto find_first_elem;
1045	hash = sock_hash_bucket_hash(key, key_size);
1046	head = &sock_hash_select_bucket(htab, hash)->head;
1047	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1048	if (!elem)
1049		goto find_first_elem;
1050
1051	elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1052				     struct bpf_shtab_elem, node);
1053	if (elem_next) {
1054		memcpy(key_next, elem_next->key, key_size);
1055		return 0;
1056	}
1057
1058	i = hash & (htab->buckets_num - 1);
1059	i++;
1060find_first_elem:
1061	for (; i < htab->buckets_num; i++) {
1062		head = &sock_hash_select_bucket(htab, i)->head;
1063		elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1064					     struct bpf_shtab_elem, node);
1065		if (elem_next) {
1066			memcpy(key_next, elem_next->key, key_size);
1067			return 0;
1068		}
1069	}
1070
1071	return -ENOENT;
1072}
1073
1074static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1075{
1076	struct bpf_shtab *htab;
1077	int i, err;
1078
1079	if (!capable(CAP_NET_ADMIN))
1080		return ERR_PTR(-EPERM);
1081	if (attr->max_entries == 0 ||
1082	    attr->key_size    == 0 ||
1083	    (attr->value_size != sizeof(u32) &&
1084	     attr->value_size != sizeof(u64)) ||
1085	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1086		return ERR_PTR(-EINVAL);
1087	if (attr->key_size > MAX_BPF_STACK)
1088		return ERR_PTR(-E2BIG);
1089
1090	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1091	if (!htab)
1092		return ERR_PTR(-ENOMEM);
1093
1094	bpf_map_init_from_attr(&htab->map, attr);
1095
1096	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1097	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1098			  round_up(htab->map.key_size, 8);
1099	if (htab->buckets_num == 0 ||
1100	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1101		err = -EINVAL;
1102		goto free_htab;
1103	}
1104
1105	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1106					   sizeof(struct bpf_shtab_bucket),
1107					   htab->map.numa_node);
1108	if (!htab->buckets) {
1109		err = -ENOMEM;
1110		goto free_htab;
1111	}
1112
1113	for (i = 0; i < htab->buckets_num; i++) {
1114		INIT_HLIST_HEAD(&htab->buckets[i].head);
1115		raw_spin_lock_init(&htab->buckets[i].lock);
1116	}
1117
1118	return &htab->map;
1119free_htab:
1120	bpf_map_area_free(htab);
1121	return ERR_PTR(err);
1122}
1123
1124static void sock_hash_free(struct bpf_map *map)
1125{
1126	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1127	struct bpf_shtab_bucket *bucket;
1128	struct hlist_head unlink_list;
1129	struct bpf_shtab_elem *elem;
1130	struct hlist_node *node;
1131	int i;
1132
1133	/* After the sync no updates or deletes will be in-flight so it
1134	 * is safe to walk map and remove entries without risking a race
1135	 * in EEXIST update case.
1136	 */
1137	synchronize_rcu();
1138	for (i = 0; i < htab->buckets_num; i++) {
1139		bucket = sock_hash_select_bucket(htab, i);
1140
1141		/* We are racing with sock_hash_delete_from_link to
1142		 * enter the spin-lock critical section. Every socket on
1143		 * the list is still linked to sockhash. Since link
1144		 * exists, psock exists and holds a ref to socket. That
1145		 * lets us to grab a socket ref too.
1146		 */
1147		raw_spin_lock_bh(&bucket->lock);
1148		hlist_for_each_entry(elem, &bucket->head, node)
1149			sock_hold(elem->sk);
1150		hlist_move_list(&bucket->head, &unlink_list);
1151		raw_spin_unlock_bh(&bucket->lock);
1152
1153		/* Process removed entries out of atomic context to
1154		 * block for socket lock before deleting the psock's
1155		 * link to sockhash.
1156		 */
1157		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1158			hlist_del(&elem->node);
1159			lock_sock(elem->sk);
1160			rcu_read_lock();
1161			sock_map_unref(elem->sk, elem);
1162			rcu_read_unlock();
1163			release_sock(elem->sk);
1164			sock_put(elem->sk);
1165			sock_hash_free_elem(htab, elem);
1166		}
1167	}
1168
1169	/* wait for psock readers accessing its map link */
1170	synchronize_rcu();
1171
1172	bpf_map_area_free(htab->buckets);
1173	bpf_map_area_free(htab);
1174}
1175
1176static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1177{
1178	struct sock *sk;
1179
1180	if (map->value_size != sizeof(u64))
1181		return ERR_PTR(-ENOSPC);
1182
1183	sk = __sock_hash_lookup_elem(map, key);
1184	if (!sk)
1185		return ERR_PTR(-ENOENT);
1186
1187	__sock_gen_cookie(sk);
1188	return &sk->sk_cookie;
1189}
1190
1191static void *sock_hash_lookup(struct bpf_map *map, void *key)
1192{
1193	struct sock *sk;
1194
1195	sk = __sock_hash_lookup_elem(map, key);
1196	if (!sk)
1197		return NULL;
1198	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1199		return NULL;
1200	return sk;
1201}
1202
1203static void sock_hash_release_progs(struct bpf_map *map)
1204{
1205	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1206}
1207
1208BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1209	   struct bpf_map *, map, void *, key, u64, flags)
1210{
1211	WARN_ON_ONCE(!rcu_read_lock_held());
1212
1213	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1214		   sock_map_op_okay(sops)))
1215		return sock_hash_update_common(map, key, sops->sk, flags);
1216	return -EOPNOTSUPP;
1217}
1218
1219const struct bpf_func_proto bpf_sock_hash_update_proto = {
1220	.func		= bpf_sock_hash_update,
1221	.gpl_only	= false,
1222	.pkt_access	= true,
1223	.ret_type	= RET_INTEGER,
1224	.arg1_type	= ARG_PTR_TO_CTX,
1225	.arg2_type	= ARG_CONST_MAP_PTR,
1226	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1227	.arg4_type	= ARG_ANYTHING,
1228};
1229
1230BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1231	   struct bpf_map *, map, void *, key, u64, flags)
1232{
1233	struct sock *sk;
1234
1235	if (unlikely(flags & ~(BPF_F_INGRESS)))
1236		return SK_DROP;
1237
1238	sk = __sock_hash_lookup_elem(map, key);
1239	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1240		return SK_DROP;
1241
1242	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
1243	return SK_PASS;
1244}
1245
1246const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1247	.func           = bpf_sk_redirect_hash,
1248	.gpl_only       = false,
1249	.ret_type       = RET_INTEGER,
1250	.arg1_type	= ARG_PTR_TO_CTX,
1251	.arg2_type      = ARG_CONST_MAP_PTR,
1252	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1253	.arg4_type      = ARG_ANYTHING,
1254};
1255
1256BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1257	   struct bpf_map *, map, void *, key, u64, flags)
1258{
1259	struct sock *sk;
1260
1261	if (unlikely(flags & ~(BPF_F_INGRESS)))
1262		return SK_DROP;
1263
1264	sk = __sock_hash_lookup_elem(map, key);
1265	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1266		return SK_DROP;
 
 
1267
1268	msg->flags = flags;
1269	msg->sk_redir = sk;
1270	return SK_PASS;
1271}
1272
1273const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1274	.func           = bpf_msg_redirect_hash,
1275	.gpl_only       = false,
1276	.ret_type       = RET_INTEGER,
1277	.arg1_type	= ARG_PTR_TO_CTX,
1278	.arg2_type      = ARG_CONST_MAP_PTR,
1279	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1280	.arg4_type      = ARG_ANYTHING,
1281};
1282
1283struct sock_hash_seq_info {
1284	struct bpf_map *map;
1285	struct bpf_shtab *htab;
1286	u32 bucket_id;
1287};
1288
1289static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1290				     struct bpf_shtab_elem *prev_elem)
1291{
1292	const struct bpf_shtab *htab = info->htab;
1293	struct bpf_shtab_bucket *bucket;
1294	struct bpf_shtab_elem *elem;
1295	struct hlist_node *node;
1296
1297	/* try to find next elem in the same bucket */
1298	if (prev_elem) {
1299		node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1300		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1301		if (elem)
1302			return elem;
1303
1304		/* no more elements, continue in the next bucket */
1305		info->bucket_id++;
1306	}
1307
1308	for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1309		bucket = &htab->buckets[info->bucket_id];
1310		node = rcu_dereference(hlist_first_rcu(&bucket->head));
1311		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1312		if (elem)
1313			return elem;
1314	}
1315
1316	return NULL;
1317}
1318
1319static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1320	__acquires(rcu)
1321{
1322	struct sock_hash_seq_info *info = seq->private;
1323
1324	if (*pos == 0)
1325		++*pos;
1326
1327	/* pairs with sock_hash_seq_stop */
1328	rcu_read_lock();
1329	return sock_hash_seq_find_next(info, NULL);
1330}
1331
1332static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1333	__must_hold(rcu)
1334{
1335	struct sock_hash_seq_info *info = seq->private;
1336
1337	++*pos;
1338	return sock_hash_seq_find_next(info, v);
1339}
1340
1341static int sock_hash_seq_show(struct seq_file *seq, void *v)
1342	__must_hold(rcu)
1343{
1344	struct sock_hash_seq_info *info = seq->private;
1345	struct bpf_iter__sockmap ctx = {};
1346	struct bpf_shtab_elem *elem = v;
1347	struct bpf_iter_meta meta;
1348	struct bpf_prog *prog;
1349
1350	meta.seq = seq;
1351	prog = bpf_iter_get_info(&meta, !elem);
1352	if (!prog)
1353		return 0;
1354
1355	ctx.meta = &meta;
1356	ctx.map = info->map;
1357	if (elem) {
1358		ctx.key = elem->key;
1359		ctx.sk = elem->sk;
1360	}
1361
1362	return bpf_iter_run_prog(prog, &ctx);
1363}
1364
1365static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1366	__releases(rcu)
1367{
1368	if (!v)
1369		(void)sock_hash_seq_show(seq, NULL);
1370
1371	/* pairs with sock_hash_seq_start */
1372	rcu_read_unlock();
1373}
1374
1375static const struct seq_operations sock_hash_seq_ops = {
1376	.start	= sock_hash_seq_start,
1377	.next	= sock_hash_seq_next,
1378	.stop	= sock_hash_seq_stop,
1379	.show	= sock_hash_seq_show,
1380};
1381
1382static int sock_hash_init_seq_private(void *priv_data,
1383				      struct bpf_iter_aux_info *aux)
1384{
1385	struct sock_hash_seq_info *info = priv_data;
1386
1387	bpf_map_inc_with_uref(aux->map);
1388	info->map = aux->map;
1389	info->htab = container_of(aux->map, struct bpf_shtab, map);
1390	return 0;
1391}
1392
1393static void sock_hash_fini_seq_private(void *priv_data)
1394{
1395	struct sock_hash_seq_info *info = priv_data;
1396
1397	bpf_map_put_with_uref(info->map);
1398}
1399
 
 
 
 
 
 
 
 
 
 
1400static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1401	.seq_ops		= &sock_hash_seq_ops,
1402	.init_seq_private	= sock_hash_init_seq_private,
1403	.fini_seq_private	= sock_hash_fini_seq_private,
1404	.seq_priv_size		= sizeof(struct sock_hash_seq_info),
1405};
1406
1407BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1408const struct bpf_map_ops sock_hash_ops = {
1409	.map_meta_equal		= bpf_map_meta_equal,
1410	.map_alloc		= sock_hash_alloc,
1411	.map_free		= sock_hash_free,
1412	.map_get_next_key	= sock_hash_get_next_key,
1413	.map_update_elem	= sock_map_update_elem,
1414	.map_delete_elem	= sock_hash_delete_elem,
1415	.map_lookup_elem	= sock_hash_lookup,
1416	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1417	.map_release_uref	= sock_hash_release_progs,
1418	.map_check_btf		= map_check_no_btf,
 
1419	.map_btf_id		= &sock_hash_map_btf_ids[0],
1420	.iter_seq_info		= &sock_hash_iter_seq_info,
1421};
1422
1423static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1424{
1425	switch (map->map_type) {
1426	case BPF_MAP_TYPE_SOCKMAP:
1427		return &container_of(map, struct bpf_stab, map)->progs;
1428	case BPF_MAP_TYPE_SOCKHASH:
1429		return &container_of(map, struct bpf_shtab, map)->progs;
1430	default:
1431		break;
1432	}
1433
1434	return NULL;
1435}
1436
1437static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1438				u32 which)
1439{
1440	struct sk_psock_progs *progs = sock_map_progs(map);
1441
1442	if (!progs)
1443		return -EOPNOTSUPP;
1444
1445	switch (which) {
1446	case BPF_SK_MSG_VERDICT:
1447		*pprog = &progs->msg_parser;
1448		break;
1449#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1450	case BPF_SK_SKB_STREAM_PARSER:
1451		*pprog = &progs->stream_parser;
1452		break;
1453#endif
1454	case BPF_SK_SKB_STREAM_VERDICT:
1455		if (progs->skb_verdict)
1456			return -EBUSY;
1457		*pprog = &progs->stream_verdict;
1458		break;
1459	case BPF_SK_SKB_VERDICT:
1460		if (progs->stream_verdict)
1461			return -EBUSY;
1462		*pprog = &progs->skb_verdict;
1463		break;
1464	default:
1465		return -EOPNOTSUPP;
1466	}
1467
1468	return 0;
1469}
1470
1471static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1472				struct bpf_prog *old, u32 which)
1473{
1474	struct bpf_prog **pprog;
1475	int ret;
1476
1477	ret = sock_map_prog_lookup(map, &pprog, which);
1478	if (ret)
1479		return ret;
1480
1481	if (old)
1482		return psock_replace_prog(pprog, prog, old);
1483
1484	psock_set_prog(pprog, prog);
1485	return 0;
1486}
1487
1488int sock_map_bpf_prog_query(const union bpf_attr *attr,
1489			    union bpf_attr __user *uattr)
1490{
1491	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1492	u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1493	struct bpf_prog **pprog;
1494	struct bpf_prog *prog;
1495	struct bpf_map *map;
1496	struct fd f;
1497	u32 id = 0;
1498	int ret;
1499
1500	if (attr->query.query_flags)
1501		return -EINVAL;
1502
1503	f = fdget(ufd);
1504	map = __bpf_map_get(f);
1505	if (IS_ERR(map))
1506		return PTR_ERR(map);
1507
1508	rcu_read_lock();
1509
1510	ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1511	if (ret)
1512		goto end;
1513
1514	prog = *pprog;
1515	prog_cnt = !prog ? 0 : 1;
1516
1517	if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1518		goto end;
1519
1520	/* we do not hold the refcnt, the bpf prog may be released
1521	 * asynchronously and the id would be set to 0.
1522	 */
1523	id = data_race(prog->aux->id);
1524	if (id == 0)
1525		prog_cnt = 0;
1526
1527end:
1528	rcu_read_unlock();
1529
1530	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1531	    (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1532	    copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1533		ret = -EFAULT;
1534
1535	fdput(f);
1536	return ret;
1537}
1538
1539static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1540{
1541	switch (link->map->map_type) {
1542	case BPF_MAP_TYPE_SOCKMAP:
1543		return sock_map_delete_from_link(link->map, sk,
1544						 link->link_raw);
1545	case BPF_MAP_TYPE_SOCKHASH:
1546		return sock_hash_delete_from_link(link->map, sk,
1547						  link->link_raw);
1548	default:
1549		break;
1550	}
1551}
1552
1553static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1554{
1555	struct sk_psock_link *link;
1556
1557	while ((link = sk_psock_link_pop(psock))) {
1558		sock_map_unlink(sk, link);
1559		sk_psock_free_link(link);
1560	}
1561}
1562
1563void sock_map_unhash(struct sock *sk)
1564{
1565	void (*saved_unhash)(struct sock *sk);
1566	struct sk_psock *psock;
1567
1568	rcu_read_lock();
1569	psock = sk_psock(sk);
1570	if (unlikely(!psock)) {
1571		rcu_read_unlock();
1572		saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1573	} else {
1574		saved_unhash = psock->saved_unhash;
1575		sock_map_remove_links(sk, psock);
1576		rcu_read_unlock();
1577	}
1578	if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1579		return;
1580	if (saved_unhash)
1581		saved_unhash(sk);
1582}
1583EXPORT_SYMBOL_GPL(sock_map_unhash);
1584
1585void sock_map_destroy(struct sock *sk)
1586{
1587	void (*saved_destroy)(struct sock *sk);
1588	struct sk_psock *psock;
1589
1590	rcu_read_lock();
1591	psock = sk_psock_get(sk);
1592	if (unlikely(!psock)) {
1593		rcu_read_unlock();
1594		saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1595	} else {
1596		saved_destroy = psock->saved_destroy;
1597		sock_map_remove_links(sk, psock);
1598		rcu_read_unlock();
1599		sk_psock_stop(psock);
1600		sk_psock_put(sk, psock);
1601	}
1602	if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1603		return;
1604	if (saved_destroy)
1605		saved_destroy(sk);
1606}
1607EXPORT_SYMBOL_GPL(sock_map_destroy);
1608
1609void sock_map_close(struct sock *sk, long timeout)
1610{
1611	void (*saved_close)(struct sock *sk, long timeout);
1612	struct sk_psock *psock;
1613
1614	lock_sock(sk);
1615	rcu_read_lock();
1616	psock = sk_psock_get(sk);
1617	if (unlikely(!psock)) {
1618		rcu_read_unlock();
1619		release_sock(sk);
1620		saved_close = READ_ONCE(sk->sk_prot)->close;
1621	} else {
1622		saved_close = psock->saved_close;
1623		sock_map_remove_links(sk, psock);
1624		rcu_read_unlock();
1625		sk_psock_stop(psock);
1626		release_sock(sk);
1627		cancel_work_sync(&psock->work);
1628		sk_psock_put(sk, psock);
1629	}
 
1630	/* Make sure we do not recurse. This is a bug.
1631	 * Leak the socket instead of crashing on a stack overflow.
1632	 */
1633	if (WARN_ON_ONCE(saved_close == sock_map_close))
1634		return;
1635	saved_close(sk, timeout);
1636}
1637EXPORT_SYMBOL_GPL(sock_map_close);
1638
1639static int sock_map_iter_attach_target(struct bpf_prog *prog,
1640				       union bpf_iter_link_info *linfo,
1641				       struct bpf_iter_aux_info *aux)
1642{
1643	struct bpf_map *map;
1644	int err = -EINVAL;
1645
1646	if (!linfo->map.map_fd)
1647		return -EBADF;
1648
1649	map = bpf_map_get_with_uref(linfo->map.map_fd);
1650	if (IS_ERR(map))
1651		return PTR_ERR(map);
1652
1653	if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1654	    map->map_type != BPF_MAP_TYPE_SOCKHASH)
1655		goto put_map;
1656
1657	if (prog->aux->max_rdonly_access > map->key_size) {
1658		err = -EACCES;
1659		goto put_map;
1660	}
1661
1662	aux->map = map;
1663	return 0;
1664
1665put_map:
1666	bpf_map_put_with_uref(map);
1667	return err;
1668}
1669
1670static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1671{
1672	bpf_map_put_with_uref(aux->map);
1673}
1674
1675static struct bpf_iter_reg sock_map_iter_reg = {
1676	.target			= "sockmap",
1677	.attach_target		= sock_map_iter_attach_target,
1678	.detach_target		= sock_map_iter_detach_target,
1679	.show_fdinfo		= bpf_iter_map_show_fdinfo,
1680	.fill_link_info		= bpf_iter_map_fill_link_info,
1681	.ctx_arg_info_size	= 2,
1682	.ctx_arg_info		= {
1683		{ offsetof(struct bpf_iter__sockmap, key),
1684		  PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1685		{ offsetof(struct bpf_iter__sockmap, sk),
1686		  PTR_TO_BTF_ID_OR_NULL },
1687	},
1688};
1689
1690static int __init bpf_sockmap_iter_init(void)
1691{
1692	sock_map_iter_reg.ctx_arg_info[1].btf_id =
1693		btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1694	return bpf_iter_reg_target(&sock_map_iter_reg);
1695}
1696late_initcall(bpf_sockmap_iter_init);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/bpf.h>
   5#include <linux/btf_ids.h>
   6#include <linux/filter.h>
   7#include <linux/errno.h>
   8#include <linux/file.h>
   9#include <linux/net.h>
  10#include <linux/workqueue.h>
  11#include <linux/skmsg.h>
  12#include <linux/list.h>
  13#include <linux/jhash.h>
  14#include <linux/sock_diag.h>
  15#include <net/udp.h>
  16
  17struct bpf_stab {
  18	struct bpf_map map;
  19	struct sock **sks;
  20	struct sk_psock_progs progs;
  21	spinlock_t lock;
  22};
  23
  24#define SOCK_CREATE_FLAG_MASK				\
  25	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  26
  27static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
  28				struct bpf_prog *old, u32 which);
  29static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
  30
  31static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  32{
  33	struct bpf_stab *stab;
  34
 
 
  35	if (attr->max_entries == 0 ||
  36	    attr->key_size    != 4 ||
  37	    (attr->value_size != sizeof(u32) &&
  38	     attr->value_size != sizeof(u64)) ||
  39	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  40		return ERR_PTR(-EINVAL);
  41
  42	stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
  43	if (!stab)
  44		return ERR_PTR(-ENOMEM);
  45
  46	bpf_map_init_from_attr(&stab->map, attr);
  47	spin_lock_init(&stab->lock);
  48
  49	stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
  50				       sizeof(struct sock *),
  51				       stab->map.numa_node);
  52	if (!stab->sks) {
  53		bpf_map_area_free(stab);
  54		return ERR_PTR(-ENOMEM);
  55	}
  56
  57	return &stab->map;
  58}
  59
  60int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  61{
  62	u32 ufd = attr->target_fd;
  63	struct bpf_map *map;
  64	struct fd f;
  65	int ret;
  66
  67	if (attr->attach_flags || attr->replace_bpf_fd)
  68		return -EINVAL;
  69
  70	f = fdget(ufd);
  71	map = __bpf_map_get(f);
  72	if (IS_ERR(map))
  73		return PTR_ERR(map);
  74	ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
  75	fdput(f);
  76	return ret;
  77}
  78
  79int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  80{
  81	u32 ufd = attr->target_fd;
  82	struct bpf_prog *prog;
  83	struct bpf_map *map;
  84	struct fd f;
  85	int ret;
  86
  87	if (attr->attach_flags || attr->replace_bpf_fd)
  88		return -EINVAL;
  89
  90	f = fdget(ufd);
  91	map = __bpf_map_get(f);
  92	if (IS_ERR(map))
  93		return PTR_ERR(map);
  94
  95	prog = bpf_prog_get(attr->attach_bpf_fd);
  96	if (IS_ERR(prog)) {
  97		ret = PTR_ERR(prog);
  98		goto put_map;
  99	}
 100
 101	if (prog->type != ptype) {
 102		ret = -EINVAL;
 103		goto put_prog;
 104	}
 105
 106	ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
 107put_prog:
 108	bpf_prog_put(prog);
 109put_map:
 110	fdput(f);
 111	return ret;
 112}
 113
 114static void sock_map_sk_acquire(struct sock *sk)
 115	__acquires(&sk->sk_lock.slock)
 116{
 117	lock_sock(sk);
 
 118	rcu_read_lock();
 119}
 120
 121static void sock_map_sk_release(struct sock *sk)
 122	__releases(&sk->sk_lock.slock)
 123{
 124	rcu_read_unlock();
 
 125	release_sock(sk);
 126}
 127
 128static void sock_map_add_link(struct sk_psock *psock,
 129			      struct sk_psock_link *link,
 130			      struct bpf_map *map, void *link_raw)
 131{
 132	link->link_raw = link_raw;
 133	link->map = map;
 134	spin_lock_bh(&psock->link_lock);
 135	list_add_tail(&link->list, &psock->link);
 136	spin_unlock_bh(&psock->link_lock);
 137}
 138
 139static void sock_map_del_link(struct sock *sk,
 140			      struct sk_psock *psock, void *link_raw)
 141{
 142	bool strp_stop = false, verdict_stop = false;
 143	struct sk_psock_link *link, *tmp;
 144
 145	spin_lock_bh(&psock->link_lock);
 146	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 147		if (link->link_raw == link_raw) {
 148			struct bpf_map *map = link->map;
 149			struct sk_psock_progs *progs = sock_map_progs(map);
 150
 151			if (psock->saved_data_ready && progs->stream_parser)
 152				strp_stop = true;
 153			if (psock->saved_data_ready && progs->stream_verdict)
 154				verdict_stop = true;
 155			if (psock->saved_data_ready && progs->skb_verdict)
 156				verdict_stop = true;
 157			list_del(&link->list);
 158			sk_psock_free_link(link);
 159		}
 160	}
 161	spin_unlock_bh(&psock->link_lock);
 162	if (strp_stop || verdict_stop) {
 163		write_lock_bh(&sk->sk_callback_lock);
 164		if (strp_stop)
 165			sk_psock_stop_strp(sk, psock);
 166		if (verdict_stop)
 167			sk_psock_stop_verdict(sk, psock);
 168
 169		if (psock->psock_update_sk_prot)
 170			psock->psock_update_sk_prot(sk, psock, false);
 171		write_unlock_bh(&sk->sk_callback_lock);
 172	}
 173}
 174
 175static void sock_map_unref(struct sock *sk, void *link_raw)
 176{
 177	struct sk_psock *psock = sk_psock(sk);
 178
 179	if (likely(psock)) {
 180		sock_map_del_link(sk, psock, link_raw);
 181		sk_psock_put(sk, psock);
 182	}
 183}
 184
 185static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
 186{
 187	if (!sk->sk_prot->psock_update_sk_prot)
 188		return -EINVAL;
 189	psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
 190	return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
 191}
 192
 193static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
 194{
 195	struct sk_psock *psock;
 196
 197	rcu_read_lock();
 198	psock = sk_psock(sk);
 199	if (psock) {
 200		if (sk->sk_prot->close != sock_map_close) {
 201			psock = ERR_PTR(-EBUSY);
 202			goto out;
 203		}
 204
 205		if (!refcount_inc_not_zero(&psock->refcnt))
 206			psock = ERR_PTR(-EBUSY);
 207	}
 208out:
 209	rcu_read_unlock();
 210	return psock;
 211}
 212
 213static int sock_map_link(struct bpf_map *map, struct sock *sk)
 214{
 215	struct sk_psock_progs *progs = sock_map_progs(map);
 216	struct bpf_prog *stream_verdict = NULL;
 217	struct bpf_prog *stream_parser = NULL;
 218	struct bpf_prog *skb_verdict = NULL;
 219	struct bpf_prog *msg_parser = NULL;
 220	struct sk_psock *psock;
 221	int ret;
 222
 223	stream_verdict = READ_ONCE(progs->stream_verdict);
 224	if (stream_verdict) {
 225		stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
 226		if (IS_ERR(stream_verdict))
 227			return PTR_ERR(stream_verdict);
 228	}
 229
 230	stream_parser = READ_ONCE(progs->stream_parser);
 231	if (stream_parser) {
 232		stream_parser = bpf_prog_inc_not_zero(stream_parser);
 233		if (IS_ERR(stream_parser)) {
 234			ret = PTR_ERR(stream_parser);
 235			goto out_put_stream_verdict;
 236		}
 237	}
 238
 239	msg_parser = READ_ONCE(progs->msg_parser);
 240	if (msg_parser) {
 241		msg_parser = bpf_prog_inc_not_zero(msg_parser);
 242		if (IS_ERR(msg_parser)) {
 243			ret = PTR_ERR(msg_parser);
 244			goto out_put_stream_parser;
 245		}
 246	}
 247
 248	skb_verdict = READ_ONCE(progs->skb_verdict);
 249	if (skb_verdict) {
 250		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
 251		if (IS_ERR(skb_verdict)) {
 252			ret = PTR_ERR(skb_verdict);
 253			goto out_put_msg_parser;
 254		}
 255	}
 256
 257	psock = sock_map_psock_get_checked(sk);
 258	if (IS_ERR(psock)) {
 259		ret = PTR_ERR(psock);
 260		goto out_progs;
 261	}
 262
 263	if (psock) {
 264		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
 265		    (stream_parser  && READ_ONCE(psock->progs.stream_parser)) ||
 266		    (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 267		    (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
 268		    (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
 269		    (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
 270			sk_psock_put(sk, psock);
 271			ret = -EBUSY;
 272			goto out_progs;
 273		}
 274	} else {
 275		psock = sk_psock_init(sk, map->numa_node);
 276		if (IS_ERR(psock)) {
 277			ret = PTR_ERR(psock);
 278			goto out_progs;
 279		}
 280	}
 281
 282	if (msg_parser)
 283		psock_set_prog(&psock->progs.msg_parser, msg_parser);
 284	if (stream_parser)
 285		psock_set_prog(&psock->progs.stream_parser, stream_parser);
 286	if (stream_verdict)
 287		psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
 288	if (skb_verdict)
 289		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 290
 291	/* msg_* and stream_* programs references tracked in psock after this
 292	 * point. Reference dec and cleanup will occur through psock destructor
 293	 */
 294	ret = sock_map_init_proto(sk, psock);
 295	if (ret < 0) {
 296		sk_psock_put(sk, psock);
 297		goto out;
 298	}
 299
 300	write_lock_bh(&sk->sk_callback_lock);
 301	if (stream_parser && stream_verdict && !psock->saved_data_ready) {
 302		ret = sk_psock_init_strp(sk, psock);
 303		if (ret) {
 304			write_unlock_bh(&sk->sk_callback_lock);
 305			sk_psock_put(sk, psock);
 306			goto out;
 307		}
 308		sk_psock_start_strp(sk, psock);
 309	} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
 310		sk_psock_start_verdict(sk,psock);
 311	} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
 312		sk_psock_start_verdict(sk, psock);
 313	}
 314	write_unlock_bh(&sk->sk_callback_lock);
 315	return 0;
 316out_progs:
 317	if (skb_verdict)
 318		bpf_prog_put(skb_verdict);
 319out_put_msg_parser:
 320	if (msg_parser)
 321		bpf_prog_put(msg_parser);
 322out_put_stream_parser:
 323	if (stream_parser)
 324		bpf_prog_put(stream_parser);
 325out_put_stream_verdict:
 326	if (stream_verdict)
 327		bpf_prog_put(stream_verdict);
 328out:
 329	return ret;
 330}
 331
 332static void sock_map_free(struct bpf_map *map)
 333{
 334	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 335	int i;
 336
 337	/* After the sync no updates or deletes will be in-flight so it
 338	 * is safe to walk map and remove entries without risking a race
 339	 * in EEXIST update case.
 340	 */
 341	synchronize_rcu();
 342	for (i = 0; i < stab->map.max_entries; i++) {
 343		struct sock **psk = &stab->sks[i];
 344		struct sock *sk;
 345
 346		sk = xchg(psk, NULL);
 347		if (sk) {
 348			sock_hold(sk);
 349			lock_sock(sk);
 350			rcu_read_lock();
 351			sock_map_unref(sk, psk);
 352			rcu_read_unlock();
 353			release_sock(sk);
 354			sock_put(sk);
 355		}
 356	}
 357
 358	/* wait for psock readers accessing its map link */
 359	synchronize_rcu();
 360
 361	bpf_map_area_free(stab->sks);
 362	bpf_map_area_free(stab);
 363}
 364
 365static void sock_map_release_progs(struct bpf_map *map)
 366{
 367	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
 368}
 369
 370static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 371{
 372	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 373
 374	WARN_ON_ONCE(!rcu_read_lock_held());
 375
 376	if (unlikely(key >= map->max_entries))
 377		return NULL;
 378	return READ_ONCE(stab->sks[key]);
 379}
 380
 381static void *sock_map_lookup(struct bpf_map *map, void *key)
 382{
 383	struct sock *sk;
 384
 385	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 386	if (!sk)
 387		return NULL;
 388	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
 389		return NULL;
 390	return sk;
 391}
 392
 393static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
 394{
 395	struct sock *sk;
 396
 397	if (map->value_size != sizeof(u64))
 398		return ERR_PTR(-ENOSPC);
 399
 400	sk = __sock_map_lookup_elem(map, *(u32 *)key);
 401	if (!sk)
 402		return ERR_PTR(-ENOENT);
 403
 404	__sock_gen_cookie(sk);
 405	return &sk->sk_cookie;
 406}
 407
 408static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
 409			     struct sock **psk)
 410{
 411	struct sock *sk;
 412	int err = 0;
 413
 414	if (irqs_disabled())
 415		return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
 416
 417	spin_lock_bh(&stab->lock);
 418	sk = *psk;
 419	if (!sk_test || sk_test == sk)
 420		sk = xchg(psk, NULL);
 421
 422	if (likely(sk))
 423		sock_map_unref(sk, psk);
 424	else
 425		err = -EINVAL;
 426
 427	spin_unlock_bh(&stab->lock);
 428	return err;
 429}
 430
 431static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
 432				      void *link_raw)
 433{
 434	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 435
 436	__sock_map_delete(stab, sk, link_raw);
 437}
 438
 439static long sock_map_delete_elem(struct bpf_map *map, void *key)
 440{
 441	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 442	u32 i = *(u32 *)key;
 443	struct sock **psk;
 444
 445	if (unlikely(i >= map->max_entries))
 446		return -EINVAL;
 447
 448	psk = &stab->sks[i];
 449	return __sock_map_delete(stab, NULL, psk);
 450}
 451
 452static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
 453{
 454	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 455	u32 i = key ? *(u32 *)key : U32_MAX;
 456	u32 *key_next = next;
 457
 458	if (i == stab->map.max_entries - 1)
 459		return -ENOENT;
 460	if (i >= stab->map.max_entries)
 461		*key_next = 0;
 462	else
 463		*key_next = i + 1;
 464	return 0;
 465}
 466
 467static int sock_map_update_common(struct bpf_map *map, u32 idx,
 468				  struct sock *sk, u64 flags)
 469{
 470	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 471	struct sk_psock_link *link;
 472	struct sk_psock *psock;
 473	struct sock *osk;
 474	int ret;
 475
 476	WARN_ON_ONCE(!rcu_read_lock_held());
 477	if (unlikely(flags > BPF_EXIST))
 478		return -EINVAL;
 479	if (unlikely(idx >= map->max_entries))
 480		return -E2BIG;
 481
 482	link = sk_psock_init_link();
 483	if (!link)
 484		return -ENOMEM;
 485
 486	ret = sock_map_link(map, sk);
 487	if (ret < 0)
 488		goto out_free;
 489
 490	psock = sk_psock(sk);
 491	WARN_ON_ONCE(!psock);
 492
 493	spin_lock_bh(&stab->lock);
 494	osk = stab->sks[idx];
 495	if (osk && flags == BPF_NOEXIST) {
 496		ret = -EEXIST;
 497		goto out_unlock;
 498	} else if (!osk && flags == BPF_EXIST) {
 499		ret = -ENOENT;
 500		goto out_unlock;
 501	}
 502
 503	sock_map_add_link(psock, link, map, &stab->sks[idx]);
 504	stab->sks[idx] = sk;
 505	if (osk)
 506		sock_map_unref(osk, &stab->sks[idx]);
 507	spin_unlock_bh(&stab->lock);
 508	return 0;
 509out_unlock:
 510	spin_unlock_bh(&stab->lock);
 511	if (psock)
 512		sk_psock_put(sk, psock);
 513out_free:
 514	sk_psock_free_link(link);
 515	return ret;
 516}
 517
 518static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
 519{
 520	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
 521	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
 522	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
 523}
 524
 525static bool sock_map_redirect_allowed(const struct sock *sk)
 526{
 527	if (sk_is_tcp(sk))
 528		return sk->sk_state != TCP_LISTEN;
 529	else
 530		return sk->sk_state == TCP_ESTABLISHED;
 531}
 532
 533static bool sock_map_sk_is_suitable(const struct sock *sk)
 534{
 535	return !!sk->sk_prot->psock_update_sk_prot;
 536}
 537
 538static bool sock_map_sk_state_allowed(const struct sock *sk)
 539{
 540	if (sk_is_tcp(sk))
 541		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
 542	if (sk_is_stream_unix(sk))
 543		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
 544	return true;
 545}
 546
 547static int sock_hash_update_common(struct bpf_map *map, void *key,
 548				   struct sock *sk, u64 flags);
 549
 550int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
 551			     u64 flags)
 552{
 553	struct socket *sock;
 554	struct sock *sk;
 555	int ret;
 556	u64 ufd;
 557
 558	if (map->value_size == sizeof(u64))
 559		ufd = *(u64 *)value;
 560	else
 561		ufd = *(u32 *)value;
 562	if (ufd > S32_MAX)
 563		return -EINVAL;
 564
 565	sock = sockfd_lookup(ufd, &ret);
 566	if (!sock)
 567		return ret;
 568	sk = sock->sk;
 569	if (!sk) {
 570		ret = -EINVAL;
 571		goto out;
 572	}
 573	if (!sock_map_sk_is_suitable(sk)) {
 574		ret = -EOPNOTSUPP;
 575		goto out;
 576	}
 577
 578	sock_map_sk_acquire(sk);
 579	if (!sock_map_sk_state_allowed(sk))
 580		ret = -EOPNOTSUPP;
 581	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 582		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 583	else
 584		ret = sock_hash_update_common(map, key, sk, flags);
 585	sock_map_sk_release(sk);
 586out:
 587	sockfd_put(sock);
 588	return ret;
 589}
 590
 591static long sock_map_update_elem(struct bpf_map *map, void *key,
 592				 void *value, u64 flags)
 593{
 594	struct sock *sk = (struct sock *)value;
 595	int ret;
 596
 597	if (unlikely(!sk || !sk_fullsock(sk)))
 598		return -EINVAL;
 599
 600	if (!sock_map_sk_is_suitable(sk))
 601		return -EOPNOTSUPP;
 602
 603	local_bh_disable();
 604	bh_lock_sock(sk);
 605	if (!sock_map_sk_state_allowed(sk))
 606		ret = -EOPNOTSUPP;
 607	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
 608		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
 609	else
 610		ret = sock_hash_update_common(map, key, sk, flags);
 611	bh_unlock_sock(sk);
 612	local_bh_enable();
 613	return ret;
 614}
 615
 616BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
 617	   struct bpf_map *, map, void *, key, u64, flags)
 618{
 619	WARN_ON_ONCE(!rcu_read_lock_held());
 620
 621	if (likely(sock_map_sk_is_suitable(sops->sk) &&
 622		   sock_map_op_okay(sops)))
 623		return sock_map_update_common(map, *(u32 *)key, sops->sk,
 624					      flags);
 625	return -EOPNOTSUPP;
 626}
 627
 628const struct bpf_func_proto bpf_sock_map_update_proto = {
 629	.func		= bpf_sock_map_update,
 630	.gpl_only	= false,
 631	.pkt_access	= true,
 632	.ret_type	= RET_INTEGER,
 633	.arg1_type	= ARG_PTR_TO_CTX,
 634	.arg2_type	= ARG_CONST_MAP_PTR,
 635	.arg3_type	= ARG_PTR_TO_MAP_KEY,
 636	.arg4_type	= ARG_ANYTHING,
 637};
 638
 639BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
 640	   struct bpf_map *, map, u32, key, u64, flags)
 641{
 642	struct sock *sk;
 643
 644	if (unlikely(flags & ~(BPF_F_INGRESS)))
 645		return SK_DROP;
 646
 647	sk = __sock_map_lookup_elem(map, key);
 648	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 649		return SK_DROP;
 650
 651	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
 652	return SK_PASS;
 653}
 654
 655const struct bpf_func_proto bpf_sk_redirect_map_proto = {
 656	.func           = bpf_sk_redirect_map,
 657	.gpl_only       = false,
 658	.ret_type       = RET_INTEGER,
 659	.arg1_type	= ARG_PTR_TO_CTX,
 660	.arg2_type      = ARG_CONST_MAP_PTR,
 661	.arg3_type      = ARG_ANYTHING,
 662	.arg4_type      = ARG_ANYTHING,
 663};
 664
 665BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
 666	   struct bpf_map *, map, u32, key, u64, flags)
 667{
 668	struct sock *sk;
 669
 670	if (unlikely(flags & ~(BPF_F_INGRESS)))
 671		return SK_DROP;
 672
 673	sk = __sock_map_lookup_elem(map, key);
 674	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
 675		return SK_DROP;
 676	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
 677		return SK_DROP;
 678
 679	msg->flags = flags;
 680	msg->sk_redir = sk;
 681	return SK_PASS;
 682}
 683
 684const struct bpf_func_proto bpf_msg_redirect_map_proto = {
 685	.func           = bpf_msg_redirect_map,
 686	.gpl_only       = false,
 687	.ret_type       = RET_INTEGER,
 688	.arg1_type	= ARG_PTR_TO_CTX,
 689	.arg2_type      = ARG_CONST_MAP_PTR,
 690	.arg3_type      = ARG_ANYTHING,
 691	.arg4_type      = ARG_ANYTHING,
 692};
 693
 694struct sock_map_seq_info {
 695	struct bpf_map *map;
 696	struct sock *sk;
 697	u32 index;
 698};
 699
 700struct bpf_iter__sockmap {
 701	__bpf_md_ptr(struct bpf_iter_meta *, meta);
 702	__bpf_md_ptr(struct bpf_map *, map);
 703	__bpf_md_ptr(void *, key);
 704	__bpf_md_ptr(struct sock *, sk);
 705};
 706
 707DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
 708		     struct bpf_map *map, void *key,
 709		     struct sock *sk)
 710
 711static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
 712{
 713	if (unlikely(info->index >= info->map->max_entries))
 714		return NULL;
 715
 716	info->sk = __sock_map_lookup_elem(info->map, info->index);
 717
 718	/* can't return sk directly, since that might be NULL */
 719	return info;
 720}
 721
 722static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
 723	__acquires(rcu)
 724{
 725	struct sock_map_seq_info *info = seq->private;
 726
 727	if (*pos == 0)
 728		++*pos;
 729
 730	/* pairs with sock_map_seq_stop */
 731	rcu_read_lock();
 732	return sock_map_seq_lookup_elem(info);
 733}
 734
 735static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 736	__must_hold(rcu)
 737{
 738	struct sock_map_seq_info *info = seq->private;
 739
 740	++*pos;
 741	++info->index;
 742
 743	return sock_map_seq_lookup_elem(info);
 744}
 745
 746static int sock_map_seq_show(struct seq_file *seq, void *v)
 747	__must_hold(rcu)
 748{
 749	struct sock_map_seq_info *info = seq->private;
 750	struct bpf_iter__sockmap ctx = {};
 751	struct bpf_iter_meta meta;
 752	struct bpf_prog *prog;
 753
 754	meta.seq = seq;
 755	prog = bpf_iter_get_info(&meta, !v);
 756	if (!prog)
 757		return 0;
 758
 759	ctx.meta = &meta;
 760	ctx.map = info->map;
 761	if (v) {
 762		ctx.key = &info->index;
 763		ctx.sk = info->sk;
 764	}
 765
 766	return bpf_iter_run_prog(prog, &ctx);
 767}
 768
 769static void sock_map_seq_stop(struct seq_file *seq, void *v)
 770	__releases(rcu)
 771{
 772	if (!v)
 773		(void)sock_map_seq_show(seq, NULL);
 774
 775	/* pairs with sock_map_seq_start */
 776	rcu_read_unlock();
 777}
 778
 779static const struct seq_operations sock_map_seq_ops = {
 780	.start	= sock_map_seq_start,
 781	.next	= sock_map_seq_next,
 782	.stop	= sock_map_seq_stop,
 783	.show	= sock_map_seq_show,
 784};
 785
 786static int sock_map_init_seq_private(void *priv_data,
 787				     struct bpf_iter_aux_info *aux)
 788{
 789	struct sock_map_seq_info *info = priv_data;
 790
 791	bpf_map_inc_with_uref(aux->map);
 792	info->map = aux->map;
 793	return 0;
 794}
 795
 796static void sock_map_fini_seq_private(void *priv_data)
 797{
 798	struct sock_map_seq_info *info = priv_data;
 799
 800	bpf_map_put_with_uref(info->map);
 801}
 802
 803static u64 sock_map_mem_usage(const struct bpf_map *map)
 804{
 805	u64 usage = sizeof(struct bpf_stab);
 806
 807	usage += (u64)map->max_entries * sizeof(struct sock *);
 808	return usage;
 809}
 810
 811static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
 812	.seq_ops		= &sock_map_seq_ops,
 813	.init_seq_private	= sock_map_init_seq_private,
 814	.fini_seq_private	= sock_map_fini_seq_private,
 815	.seq_priv_size		= sizeof(struct sock_map_seq_info),
 816};
 817
 818BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
 819const struct bpf_map_ops sock_map_ops = {
 820	.map_meta_equal		= bpf_map_meta_equal,
 821	.map_alloc		= sock_map_alloc,
 822	.map_free		= sock_map_free,
 823	.map_get_next_key	= sock_map_get_next_key,
 824	.map_lookup_elem_sys_only = sock_map_lookup_sys,
 825	.map_update_elem	= sock_map_update_elem,
 826	.map_delete_elem	= sock_map_delete_elem,
 827	.map_lookup_elem	= sock_map_lookup,
 828	.map_release_uref	= sock_map_release_progs,
 829	.map_check_btf		= map_check_no_btf,
 830	.map_mem_usage		= sock_map_mem_usage,
 831	.map_btf_id		= &sock_map_btf_ids[0],
 832	.iter_seq_info		= &sock_map_iter_seq_info,
 833};
 834
 835struct bpf_shtab_elem {
 836	struct rcu_head rcu;
 837	u32 hash;
 838	struct sock *sk;
 839	struct hlist_node node;
 840	u8 key[];
 841};
 842
 843struct bpf_shtab_bucket {
 844	struct hlist_head head;
 845	spinlock_t lock;
 846};
 847
 848struct bpf_shtab {
 849	struct bpf_map map;
 850	struct bpf_shtab_bucket *buckets;
 851	u32 buckets_num;
 852	u32 elem_size;
 853	struct sk_psock_progs progs;
 854	atomic_t count;
 855};
 856
 857static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
 858{
 859	return jhash(key, len, 0);
 860}
 861
 862static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
 863							u32 hash)
 864{
 865	return &htab->buckets[hash & (htab->buckets_num - 1)];
 866}
 867
 868static struct bpf_shtab_elem *
 869sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 870			  u32 key_size)
 871{
 872	struct bpf_shtab_elem *elem;
 873
 874	hlist_for_each_entry_rcu(elem, head, node) {
 875		if (elem->hash == hash &&
 876		    !memcmp(&elem->key, key, key_size))
 877			return elem;
 878	}
 879
 880	return NULL;
 881}
 882
 883static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 884{
 885	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 886	u32 key_size = map->key_size, hash;
 887	struct bpf_shtab_bucket *bucket;
 888	struct bpf_shtab_elem *elem;
 889
 890	WARN_ON_ONCE(!rcu_read_lock_held());
 891
 892	hash = sock_hash_bucket_hash(key, key_size);
 893	bucket = sock_hash_select_bucket(htab, hash);
 894	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 895
 896	return elem ? elem->sk : NULL;
 897}
 898
 899static void sock_hash_free_elem(struct bpf_shtab *htab,
 900				struct bpf_shtab_elem *elem)
 901{
 902	atomic_dec(&htab->count);
 903	kfree_rcu(elem, rcu);
 904}
 905
 906static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 907				       void *link_raw)
 908{
 909	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 910	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
 911	struct bpf_shtab_bucket *bucket;
 912
 913	WARN_ON_ONCE(!rcu_read_lock_held());
 914	bucket = sock_hash_select_bucket(htab, elem->hash);
 915
 916	/* elem may be deleted in parallel from the map, but access here
 917	 * is okay since it's going away only after RCU grace period.
 918	 * However, we need to check whether it's still present.
 919	 */
 920	spin_lock_bh(&bucket->lock);
 921	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
 922					       elem->key, map->key_size);
 923	if (elem_probe && elem_probe == elem) {
 924		hlist_del_rcu(&elem->node);
 925		sock_map_unref(elem->sk, elem);
 926		sock_hash_free_elem(htab, elem);
 927	}
 928	spin_unlock_bh(&bucket->lock);
 929}
 930
 931static long sock_hash_delete_elem(struct bpf_map *map, void *key)
 932{
 933	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 934	u32 hash, key_size = map->key_size;
 935	struct bpf_shtab_bucket *bucket;
 936	struct bpf_shtab_elem *elem;
 937	int ret = -ENOENT;
 938
 939	if (irqs_disabled())
 940		return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
 941
 942	hash = sock_hash_bucket_hash(key, key_size);
 943	bucket = sock_hash_select_bucket(htab, hash);
 944
 945	spin_lock_bh(&bucket->lock);
 946	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
 947	if (elem) {
 948		hlist_del_rcu(&elem->node);
 949		sock_map_unref(elem->sk, elem);
 950		sock_hash_free_elem(htab, elem);
 951		ret = 0;
 952	}
 953	spin_unlock_bh(&bucket->lock);
 954	return ret;
 955}
 956
 957static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
 958						   void *key, u32 key_size,
 959						   u32 hash, struct sock *sk,
 960						   struct bpf_shtab_elem *old)
 961{
 962	struct bpf_shtab_elem *new;
 963
 964	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 965		if (!old) {
 966			atomic_dec(&htab->count);
 967			return ERR_PTR(-E2BIG);
 968		}
 969	}
 970
 971	new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
 972				   GFP_ATOMIC | __GFP_NOWARN,
 973				   htab->map.numa_node);
 974	if (!new) {
 975		atomic_dec(&htab->count);
 976		return ERR_PTR(-ENOMEM);
 977	}
 978	memcpy(new->key, key, key_size);
 979	new->sk = sk;
 980	new->hash = hash;
 981	return new;
 982}
 983
 984static int sock_hash_update_common(struct bpf_map *map, void *key,
 985				   struct sock *sk, u64 flags)
 986{
 987	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
 988	u32 key_size = map->key_size, hash;
 989	struct bpf_shtab_elem *elem, *elem_new;
 990	struct bpf_shtab_bucket *bucket;
 991	struct sk_psock_link *link;
 992	struct sk_psock *psock;
 993	int ret;
 994
 995	WARN_ON_ONCE(!rcu_read_lock_held());
 996	if (unlikely(flags > BPF_EXIST))
 997		return -EINVAL;
 998
 999	link = sk_psock_init_link();
1000	if (!link)
1001		return -ENOMEM;
1002
1003	ret = sock_map_link(map, sk);
1004	if (ret < 0)
1005		goto out_free;
1006
1007	psock = sk_psock(sk);
1008	WARN_ON_ONCE(!psock);
1009
1010	hash = sock_hash_bucket_hash(key, key_size);
1011	bucket = sock_hash_select_bucket(htab, hash);
1012
1013	spin_lock_bh(&bucket->lock);
1014	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1015	if (elem && flags == BPF_NOEXIST) {
1016		ret = -EEXIST;
1017		goto out_unlock;
1018	} else if (!elem && flags == BPF_EXIST) {
1019		ret = -ENOENT;
1020		goto out_unlock;
1021	}
1022
1023	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1024	if (IS_ERR(elem_new)) {
1025		ret = PTR_ERR(elem_new);
1026		goto out_unlock;
1027	}
1028
1029	sock_map_add_link(psock, link, map, elem_new);
1030	/* Add new element to the head of the list, so that
1031	 * concurrent search will find it before old elem.
1032	 */
1033	hlist_add_head_rcu(&elem_new->node, &bucket->head);
1034	if (elem) {
1035		hlist_del_rcu(&elem->node);
1036		sock_map_unref(elem->sk, elem);
1037		sock_hash_free_elem(htab, elem);
1038	}
1039	spin_unlock_bh(&bucket->lock);
1040	return 0;
1041out_unlock:
1042	spin_unlock_bh(&bucket->lock);
1043	sk_psock_put(sk, psock);
1044out_free:
1045	sk_psock_free_link(link);
1046	return ret;
1047}
1048
1049static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1050				  void *key_next)
1051{
1052	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1053	struct bpf_shtab_elem *elem, *elem_next;
1054	u32 hash, key_size = map->key_size;
1055	struct hlist_head *head;
1056	int i = 0;
1057
1058	if (!key)
1059		goto find_first_elem;
1060	hash = sock_hash_bucket_hash(key, key_size);
1061	head = &sock_hash_select_bucket(htab, hash)->head;
1062	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1063	if (!elem)
1064		goto find_first_elem;
1065
1066	elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1067				     struct bpf_shtab_elem, node);
1068	if (elem_next) {
1069		memcpy(key_next, elem_next->key, key_size);
1070		return 0;
1071	}
1072
1073	i = hash & (htab->buckets_num - 1);
1074	i++;
1075find_first_elem:
1076	for (; i < htab->buckets_num; i++) {
1077		head = &sock_hash_select_bucket(htab, i)->head;
1078		elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1079					     struct bpf_shtab_elem, node);
1080		if (elem_next) {
1081			memcpy(key_next, elem_next->key, key_size);
1082			return 0;
1083		}
1084	}
1085
1086	return -ENOENT;
1087}
1088
1089static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1090{
1091	struct bpf_shtab *htab;
1092	int i, err;
1093
 
 
1094	if (attr->max_entries == 0 ||
1095	    attr->key_size    == 0 ||
1096	    (attr->value_size != sizeof(u32) &&
1097	     attr->value_size != sizeof(u64)) ||
1098	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1099		return ERR_PTR(-EINVAL);
1100	if (attr->key_size > MAX_BPF_STACK)
1101		return ERR_PTR(-E2BIG);
1102
1103	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1104	if (!htab)
1105		return ERR_PTR(-ENOMEM);
1106
1107	bpf_map_init_from_attr(&htab->map, attr);
1108
1109	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1110	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1111			  round_up(htab->map.key_size, 8);
1112	if (htab->buckets_num == 0 ||
1113	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1114		err = -EINVAL;
1115		goto free_htab;
1116	}
1117
1118	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1119					   sizeof(struct bpf_shtab_bucket),
1120					   htab->map.numa_node);
1121	if (!htab->buckets) {
1122		err = -ENOMEM;
1123		goto free_htab;
1124	}
1125
1126	for (i = 0; i < htab->buckets_num; i++) {
1127		INIT_HLIST_HEAD(&htab->buckets[i].head);
1128		spin_lock_init(&htab->buckets[i].lock);
1129	}
1130
1131	return &htab->map;
1132free_htab:
1133	bpf_map_area_free(htab);
1134	return ERR_PTR(err);
1135}
1136
1137static void sock_hash_free(struct bpf_map *map)
1138{
1139	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1140	struct bpf_shtab_bucket *bucket;
1141	struct hlist_head unlink_list;
1142	struct bpf_shtab_elem *elem;
1143	struct hlist_node *node;
1144	int i;
1145
1146	/* After the sync no updates or deletes will be in-flight so it
1147	 * is safe to walk map and remove entries without risking a race
1148	 * in EEXIST update case.
1149	 */
1150	synchronize_rcu();
1151	for (i = 0; i < htab->buckets_num; i++) {
1152		bucket = sock_hash_select_bucket(htab, i);
1153
1154		/* We are racing with sock_hash_delete_from_link to
1155		 * enter the spin-lock critical section. Every socket on
1156		 * the list is still linked to sockhash. Since link
1157		 * exists, psock exists and holds a ref to socket. That
1158		 * lets us to grab a socket ref too.
1159		 */
1160		spin_lock_bh(&bucket->lock);
1161		hlist_for_each_entry(elem, &bucket->head, node)
1162			sock_hold(elem->sk);
1163		hlist_move_list(&bucket->head, &unlink_list);
1164		spin_unlock_bh(&bucket->lock);
1165
1166		/* Process removed entries out of atomic context to
1167		 * block for socket lock before deleting the psock's
1168		 * link to sockhash.
1169		 */
1170		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1171			hlist_del(&elem->node);
1172			lock_sock(elem->sk);
1173			rcu_read_lock();
1174			sock_map_unref(elem->sk, elem);
1175			rcu_read_unlock();
1176			release_sock(elem->sk);
1177			sock_put(elem->sk);
1178			sock_hash_free_elem(htab, elem);
1179		}
1180	}
1181
1182	/* wait for psock readers accessing its map link */
1183	synchronize_rcu();
1184
1185	bpf_map_area_free(htab->buckets);
1186	bpf_map_area_free(htab);
1187}
1188
1189static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1190{
1191	struct sock *sk;
1192
1193	if (map->value_size != sizeof(u64))
1194		return ERR_PTR(-ENOSPC);
1195
1196	sk = __sock_hash_lookup_elem(map, key);
1197	if (!sk)
1198		return ERR_PTR(-ENOENT);
1199
1200	__sock_gen_cookie(sk);
1201	return &sk->sk_cookie;
1202}
1203
1204static void *sock_hash_lookup(struct bpf_map *map, void *key)
1205{
1206	struct sock *sk;
1207
1208	sk = __sock_hash_lookup_elem(map, key);
1209	if (!sk)
1210		return NULL;
1211	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1212		return NULL;
1213	return sk;
1214}
1215
1216static void sock_hash_release_progs(struct bpf_map *map)
1217{
1218	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1219}
1220
1221BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1222	   struct bpf_map *, map, void *, key, u64, flags)
1223{
1224	WARN_ON_ONCE(!rcu_read_lock_held());
1225
1226	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1227		   sock_map_op_okay(sops)))
1228		return sock_hash_update_common(map, key, sops->sk, flags);
1229	return -EOPNOTSUPP;
1230}
1231
1232const struct bpf_func_proto bpf_sock_hash_update_proto = {
1233	.func		= bpf_sock_hash_update,
1234	.gpl_only	= false,
1235	.pkt_access	= true,
1236	.ret_type	= RET_INTEGER,
1237	.arg1_type	= ARG_PTR_TO_CTX,
1238	.arg2_type	= ARG_CONST_MAP_PTR,
1239	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1240	.arg4_type	= ARG_ANYTHING,
1241};
1242
1243BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1244	   struct bpf_map *, map, void *, key, u64, flags)
1245{
1246	struct sock *sk;
1247
1248	if (unlikely(flags & ~(BPF_F_INGRESS)))
1249		return SK_DROP;
1250
1251	sk = __sock_hash_lookup_elem(map, key);
1252	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1253		return SK_DROP;
1254
1255	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
1256	return SK_PASS;
1257}
1258
1259const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1260	.func           = bpf_sk_redirect_hash,
1261	.gpl_only       = false,
1262	.ret_type       = RET_INTEGER,
1263	.arg1_type	= ARG_PTR_TO_CTX,
1264	.arg2_type      = ARG_CONST_MAP_PTR,
1265	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1266	.arg4_type      = ARG_ANYTHING,
1267};
1268
1269BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1270	   struct bpf_map *, map, void *, key, u64, flags)
1271{
1272	struct sock *sk;
1273
1274	if (unlikely(flags & ~(BPF_F_INGRESS)))
1275		return SK_DROP;
1276
1277	sk = __sock_hash_lookup_elem(map, key);
1278	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1279		return SK_DROP;
1280	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
1281		return SK_DROP;
1282
1283	msg->flags = flags;
1284	msg->sk_redir = sk;
1285	return SK_PASS;
1286}
1287
1288const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1289	.func           = bpf_msg_redirect_hash,
1290	.gpl_only       = false,
1291	.ret_type       = RET_INTEGER,
1292	.arg1_type	= ARG_PTR_TO_CTX,
1293	.arg2_type      = ARG_CONST_MAP_PTR,
1294	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1295	.arg4_type      = ARG_ANYTHING,
1296};
1297
1298struct sock_hash_seq_info {
1299	struct bpf_map *map;
1300	struct bpf_shtab *htab;
1301	u32 bucket_id;
1302};
1303
1304static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1305				     struct bpf_shtab_elem *prev_elem)
1306{
1307	const struct bpf_shtab *htab = info->htab;
1308	struct bpf_shtab_bucket *bucket;
1309	struct bpf_shtab_elem *elem;
1310	struct hlist_node *node;
1311
1312	/* try to find next elem in the same bucket */
1313	if (prev_elem) {
1314		node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1315		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1316		if (elem)
1317			return elem;
1318
1319		/* no more elements, continue in the next bucket */
1320		info->bucket_id++;
1321	}
1322
1323	for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1324		bucket = &htab->buckets[info->bucket_id];
1325		node = rcu_dereference(hlist_first_rcu(&bucket->head));
1326		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1327		if (elem)
1328			return elem;
1329	}
1330
1331	return NULL;
1332}
1333
1334static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1335	__acquires(rcu)
1336{
1337	struct sock_hash_seq_info *info = seq->private;
1338
1339	if (*pos == 0)
1340		++*pos;
1341
1342	/* pairs with sock_hash_seq_stop */
1343	rcu_read_lock();
1344	return sock_hash_seq_find_next(info, NULL);
1345}
1346
1347static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1348	__must_hold(rcu)
1349{
1350	struct sock_hash_seq_info *info = seq->private;
1351
1352	++*pos;
1353	return sock_hash_seq_find_next(info, v);
1354}
1355
1356static int sock_hash_seq_show(struct seq_file *seq, void *v)
1357	__must_hold(rcu)
1358{
1359	struct sock_hash_seq_info *info = seq->private;
1360	struct bpf_iter__sockmap ctx = {};
1361	struct bpf_shtab_elem *elem = v;
1362	struct bpf_iter_meta meta;
1363	struct bpf_prog *prog;
1364
1365	meta.seq = seq;
1366	prog = bpf_iter_get_info(&meta, !elem);
1367	if (!prog)
1368		return 0;
1369
1370	ctx.meta = &meta;
1371	ctx.map = info->map;
1372	if (elem) {
1373		ctx.key = elem->key;
1374		ctx.sk = elem->sk;
1375	}
1376
1377	return bpf_iter_run_prog(prog, &ctx);
1378}
1379
1380static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1381	__releases(rcu)
1382{
1383	if (!v)
1384		(void)sock_hash_seq_show(seq, NULL);
1385
1386	/* pairs with sock_hash_seq_start */
1387	rcu_read_unlock();
1388}
1389
1390static const struct seq_operations sock_hash_seq_ops = {
1391	.start	= sock_hash_seq_start,
1392	.next	= sock_hash_seq_next,
1393	.stop	= sock_hash_seq_stop,
1394	.show	= sock_hash_seq_show,
1395};
1396
1397static int sock_hash_init_seq_private(void *priv_data,
1398				      struct bpf_iter_aux_info *aux)
1399{
1400	struct sock_hash_seq_info *info = priv_data;
1401
1402	bpf_map_inc_with_uref(aux->map);
1403	info->map = aux->map;
1404	info->htab = container_of(aux->map, struct bpf_shtab, map);
1405	return 0;
1406}
1407
1408static void sock_hash_fini_seq_private(void *priv_data)
1409{
1410	struct sock_hash_seq_info *info = priv_data;
1411
1412	bpf_map_put_with_uref(info->map);
1413}
1414
1415static u64 sock_hash_mem_usage(const struct bpf_map *map)
1416{
1417	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1418	u64 usage = sizeof(*htab);
1419
1420	usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
1421	usage += atomic_read(&htab->count) * (u64)htab->elem_size;
1422	return usage;
1423}
1424
1425static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1426	.seq_ops		= &sock_hash_seq_ops,
1427	.init_seq_private	= sock_hash_init_seq_private,
1428	.fini_seq_private	= sock_hash_fini_seq_private,
1429	.seq_priv_size		= sizeof(struct sock_hash_seq_info),
1430};
1431
1432BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1433const struct bpf_map_ops sock_hash_ops = {
1434	.map_meta_equal		= bpf_map_meta_equal,
1435	.map_alloc		= sock_hash_alloc,
1436	.map_free		= sock_hash_free,
1437	.map_get_next_key	= sock_hash_get_next_key,
1438	.map_update_elem	= sock_map_update_elem,
1439	.map_delete_elem	= sock_hash_delete_elem,
1440	.map_lookup_elem	= sock_hash_lookup,
1441	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1442	.map_release_uref	= sock_hash_release_progs,
1443	.map_check_btf		= map_check_no_btf,
1444	.map_mem_usage		= sock_hash_mem_usage,
1445	.map_btf_id		= &sock_hash_map_btf_ids[0],
1446	.iter_seq_info		= &sock_hash_iter_seq_info,
1447};
1448
1449static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1450{
1451	switch (map->map_type) {
1452	case BPF_MAP_TYPE_SOCKMAP:
1453		return &container_of(map, struct bpf_stab, map)->progs;
1454	case BPF_MAP_TYPE_SOCKHASH:
1455		return &container_of(map, struct bpf_shtab, map)->progs;
1456	default:
1457		break;
1458	}
1459
1460	return NULL;
1461}
1462
1463static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1464				u32 which)
1465{
1466	struct sk_psock_progs *progs = sock_map_progs(map);
1467
1468	if (!progs)
1469		return -EOPNOTSUPP;
1470
1471	switch (which) {
1472	case BPF_SK_MSG_VERDICT:
1473		*pprog = &progs->msg_parser;
1474		break;
1475#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1476	case BPF_SK_SKB_STREAM_PARSER:
1477		*pprog = &progs->stream_parser;
1478		break;
1479#endif
1480	case BPF_SK_SKB_STREAM_VERDICT:
1481		if (progs->skb_verdict)
1482			return -EBUSY;
1483		*pprog = &progs->stream_verdict;
1484		break;
1485	case BPF_SK_SKB_VERDICT:
1486		if (progs->stream_verdict)
1487			return -EBUSY;
1488		*pprog = &progs->skb_verdict;
1489		break;
1490	default:
1491		return -EOPNOTSUPP;
1492	}
1493
1494	return 0;
1495}
1496
1497static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1498				struct bpf_prog *old, u32 which)
1499{
1500	struct bpf_prog **pprog;
1501	int ret;
1502
1503	ret = sock_map_prog_lookup(map, &pprog, which);
1504	if (ret)
1505		return ret;
1506
1507	if (old)
1508		return psock_replace_prog(pprog, prog, old);
1509
1510	psock_set_prog(pprog, prog);
1511	return 0;
1512}
1513
1514int sock_map_bpf_prog_query(const union bpf_attr *attr,
1515			    union bpf_attr __user *uattr)
1516{
1517	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1518	u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1519	struct bpf_prog **pprog;
1520	struct bpf_prog *prog;
1521	struct bpf_map *map;
1522	struct fd f;
1523	u32 id = 0;
1524	int ret;
1525
1526	if (attr->query.query_flags)
1527		return -EINVAL;
1528
1529	f = fdget(ufd);
1530	map = __bpf_map_get(f);
1531	if (IS_ERR(map))
1532		return PTR_ERR(map);
1533
1534	rcu_read_lock();
1535
1536	ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1537	if (ret)
1538		goto end;
1539
1540	prog = *pprog;
1541	prog_cnt = !prog ? 0 : 1;
1542
1543	if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1544		goto end;
1545
1546	/* we do not hold the refcnt, the bpf prog may be released
1547	 * asynchronously and the id would be set to 0.
1548	 */
1549	id = data_race(prog->aux->id);
1550	if (id == 0)
1551		prog_cnt = 0;
1552
1553end:
1554	rcu_read_unlock();
1555
1556	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1557	    (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1558	    copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1559		ret = -EFAULT;
1560
1561	fdput(f);
1562	return ret;
1563}
1564
1565static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1566{
1567	switch (link->map->map_type) {
1568	case BPF_MAP_TYPE_SOCKMAP:
1569		return sock_map_delete_from_link(link->map, sk,
1570						 link->link_raw);
1571	case BPF_MAP_TYPE_SOCKHASH:
1572		return sock_hash_delete_from_link(link->map, sk,
1573						  link->link_raw);
1574	default:
1575		break;
1576	}
1577}
1578
1579static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1580{
1581	struct sk_psock_link *link;
1582
1583	while ((link = sk_psock_link_pop(psock))) {
1584		sock_map_unlink(sk, link);
1585		sk_psock_free_link(link);
1586	}
1587}
1588
1589void sock_map_unhash(struct sock *sk)
1590{
1591	void (*saved_unhash)(struct sock *sk);
1592	struct sk_psock *psock;
1593
1594	rcu_read_lock();
1595	psock = sk_psock(sk);
1596	if (unlikely(!psock)) {
1597		rcu_read_unlock();
1598		saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1599	} else {
1600		saved_unhash = psock->saved_unhash;
1601		sock_map_remove_links(sk, psock);
1602		rcu_read_unlock();
1603	}
1604	if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1605		return;
1606	if (saved_unhash)
1607		saved_unhash(sk);
1608}
1609EXPORT_SYMBOL_GPL(sock_map_unhash);
1610
1611void sock_map_destroy(struct sock *sk)
1612{
1613	void (*saved_destroy)(struct sock *sk);
1614	struct sk_psock *psock;
1615
1616	rcu_read_lock();
1617	psock = sk_psock_get(sk);
1618	if (unlikely(!psock)) {
1619		rcu_read_unlock();
1620		saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1621	} else {
1622		saved_destroy = psock->saved_destroy;
1623		sock_map_remove_links(sk, psock);
1624		rcu_read_unlock();
1625		sk_psock_stop(psock);
1626		sk_psock_put(sk, psock);
1627	}
1628	if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1629		return;
1630	if (saved_destroy)
1631		saved_destroy(sk);
1632}
1633EXPORT_SYMBOL_GPL(sock_map_destroy);
1634
1635void sock_map_close(struct sock *sk, long timeout)
1636{
1637	void (*saved_close)(struct sock *sk, long timeout);
1638	struct sk_psock *psock;
1639
1640	lock_sock(sk);
1641	rcu_read_lock();
1642	psock = sk_psock_get(sk);
1643	if (unlikely(!psock)) {
1644		rcu_read_unlock();
1645		release_sock(sk);
1646		saved_close = READ_ONCE(sk->sk_prot)->close;
1647	} else {
1648		saved_close = psock->saved_close;
1649		sock_map_remove_links(sk, psock);
1650		rcu_read_unlock();
1651		sk_psock_stop(psock);
1652		release_sock(sk);
1653		cancel_delayed_work_sync(&psock->work);
1654		sk_psock_put(sk, psock);
1655	}
1656
1657	/* Make sure we do not recurse. This is a bug.
1658	 * Leak the socket instead of crashing on a stack overflow.
1659	 */
1660	if (WARN_ON_ONCE(saved_close == sock_map_close))
1661		return;
1662	saved_close(sk, timeout);
1663}
1664EXPORT_SYMBOL_GPL(sock_map_close);
1665
1666static int sock_map_iter_attach_target(struct bpf_prog *prog,
1667				       union bpf_iter_link_info *linfo,
1668				       struct bpf_iter_aux_info *aux)
1669{
1670	struct bpf_map *map;
1671	int err = -EINVAL;
1672
1673	if (!linfo->map.map_fd)
1674		return -EBADF;
1675
1676	map = bpf_map_get_with_uref(linfo->map.map_fd);
1677	if (IS_ERR(map))
1678		return PTR_ERR(map);
1679
1680	if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1681	    map->map_type != BPF_MAP_TYPE_SOCKHASH)
1682		goto put_map;
1683
1684	if (prog->aux->max_rdonly_access > map->key_size) {
1685		err = -EACCES;
1686		goto put_map;
1687	}
1688
1689	aux->map = map;
1690	return 0;
1691
1692put_map:
1693	bpf_map_put_with_uref(map);
1694	return err;
1695}
1696
1697static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1698{
1699	bpf_map_put_with_uref(aux->map);
1700}
1701
1702static struct bpf_iter_reg sock_map_iter_reg = {
1703	.target			= "sockmap",
1704	.attach_target		= sock_map_iter_attach_target,
1705	.detach_target		= sock_map_iter_detach_target,
1706	.show_fdinfo		= bpf_iter_map_show_fdinfo,
1707	.fill_link_info		= bpf_iter_map_fill_link_info,
1708	.ctx_arg_info_size	= 2,
1709	.ctx_arg_info		= {
1710		{ offsetof(struct bpf_iter__sockmap, key),
1711		  PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1712		{ offsetof(struct bpf_iter__sockmap, sk),
1713		  PTR_TO_BTF_ID_OR_NULL },
1714	},
1715};
1716
1717static int __init bpf_sockmap_iter_init(void)
1718{
1719	sock_map_iter_reg.ctx_arg_info[1].btf_id =
1720		btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1721	return bpf_iter_reg_target(&sock_map_iter_reg);
1722}
1723late_initcall(bpf_sockmap_iter_init);