Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Support for INET connection oriented protocols.
   7 *
   8 * Authors:	See the TCP sources
   9 *
  10 *		This program is free software; you can redistribute it and/or
  11 *		modify it under the terms of the GNU General Public License
  12 *		as published by the Free Software Foundation; either version
  13 *		2 of the License, or(at your option) any later version.
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/jhash.h>
  18
  19#include <net/inet_connection_sock.h>
  20#include <net/inet_hashtables.h>
  21#include <net/inet_timewait_sock.h>
  22#include <net/ip.h>
  23#include <net/route.h>
  24#include <net/tcp_states.h>
  25#include <net/xfrm.h>
  26#include <net/tcp.h>
  27#include <net/sock_reuseport.h>
  28#include <net/addrconf.h>
  29
  30#ifdef INET_CSK_DEBUG
  31const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
  32EXPORT_SYMBOL(inet_csk_timer_bug_msg);
  33#endif
  34
  35#if IS_ENABLED(CONFIG_IPV6)
  36/* match_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
  37 *                          only, and any IPv4 addresses if not IPv6 only
  38 * match_wildcard == false: addresses must be exactly the same, i.e.
  39 *                          IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
  40 *                          and 0.0.0.0 equals to 0.0.0.0 only
 
  41 */
  42static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
  43				 const struct in6_addr *sk2_rcv_saddr6,
  44				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
  45				 bool sk1_ipv6only, bool sk2_ipv6only,
  46				 bool match_wildcard)
 
  47{
  48	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
  49	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
  50
  51	/* if both are mapped, treat as IPv4 */
  52	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
  53		if (!sk2_ipv6only) {
  54			if (sk1_rcv_saddr == sk2_rcv_saddr)
  55				return true;
  56			if (!sk1_rcv_saddr || !sk2_rcv_saddr)
  57				return match_wildcard;
  58		}
  59		return false;
  60	}
  61
  62	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
  63		return true;
  64
  65	if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
  66	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
  67		return true;
  68
  69	if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
  70	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
  71		return true;
  72
  73	if (sk2_rcv_saddr6 &&
  74	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
  75		return true;
  76
  77	return false;
  78}
  79#endif
  80
  81/* match_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
  82 * match_wildcard == false: addresses must be exactly the same, i.e.
  83 *                          0.0.0.0 only equals to 0.0.0.0
  84 */
  85static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
  86				 bool sk2_ipv6only, bool match_wildcard)
 
  87{
  88	if (!sk2_ipv6only) {
  89		if (sk1_rcv_saddr == sk2_rcv_saddr)
  90			return true;
  91		if (!sk1_rcv_saddr || !sk2_rcv_saddr)
  92			return match_wildcard;
  93	}
  94	return false;
  95}
  96
  97bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
  98			  bool match_wildcard)
  99{
 100#if IS_ENABLED(CONFIG_IPV6)
 101	if (sk->sk_family == AF_INET6)
 102		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
 103					    inet6_rcv_saddr(sk2),
 104					    sk->sk_rcv_saddr,
 105					    sk2->sk_rcv_saddr,
 106					    ipv6_only_sock(sk),
 107					    ipv6_only_sock(sk2),
 
 108					    match_wildcard);
 109#endif
 110	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
 111				    ipv6_only_sock(sk2), match_wildcard);
 
 112}
 113EXPORT_SYMBOL(inet_rcv_saddr_equal);
 114
 
 
 
 
 
 
 
 
 
 115void inet_get_local_port_range(struct net *net, int *low, int *high)
 116{
 117	unsigned int seq;
 118
 119	do {
 120		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 121
 122		*low = net->ipv4.ip_local_ports.range[0];
 123		*high = net->ipv4.ip_local_ports.range[1];
 124	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 125}
 126EXPORT_SYMBOL(inet_get_local_port_range);
 127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128static int inet_csk_bind_conflict(const struct sock *sk,
 129				  const struct inet_bind_bucket *tb,
 
 130				  bool relax, bool reuseport_ok)
 131{
 132	struct sock *sk2;
 133	bool reuse = sk->sk_reuse;
 134	bool reuseport = !!sk->sk_reuseport && reuseport_ok;
 135	kuid_t uid = sock_i_uid((struct sock *)sk);
 136
 
 
 
 
 
 
 137	/*
 138	 * Unlike other sk lookup places we do not check
 139	 * for sk_net here, since _all_ the socks listed
 140	 * in tb->owners list belong to the same net - the
 141	 * one this bucket belongs to.
 142	 */
 143
 144	sk_for_each_bound(sk2, &tb->owners) {
 145		if (sk != sk2 &&
 146		    (!sk->sk_bound_dev_if ||
 147		     !sk2->sk_bound_dev_if ||
 148		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 149			if ((!reuse || !sk2->sk_reuse ||
 150			    sk2->sk_state == TCP_LISTEN) &&
 151			    (!reuseport || !sk2->sk_reuseport ||
 152			     rcu_access_pointer(sk->sk_reuseport_cb) ||
 153			     (sk2->sk_state != TCP_TIME_WAIT &&
 154			     !uid_eq(uid, sock_i_uid(sk2))))) {
 155				if (inet_rcv_saddr_equal(sk, sk2, true))
 156					break;
 157			}
 158			if (!relax && reuse && sk2->sk_reuse &&
 159			    sk2->sk_state != TCP_LISTEN) {
 160				if (inet_rcv_saddr_equal(sk, sk2, true))
 161					break;
 162			}
 163		}
 164	}
 165	return sk2 != NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166}
 167
 168/*
 169 * Find an open port number for the socket.  Returns with the
 170 * inet_bind_hashbucket lock held.
 171 */
 172static struct inet_bind_hashbucket *
 173inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
 174{
 175	struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
 176	int port = 0;
 177	struct inet_bind_hashbucket *head;
 
 
 178	struct net *net = sock_net(sk);
 179	int i, low, high, attempt_half;
 180	struct inet_bind_bucket *tb;
 181	u32 remaining, offset;
 
 182
 
 
 183	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 184other_half_scan:
 185	inet_get_local_port_range(net, &low, &high);
 186	high++; /* [32768, 60999] -> [32768, 61000[ */
 187	if (high - low < 4)
 188		attempt_half = 0;
 189	if (attempt_half) {
 190		int half = low + (((high - low) >> 2) << 1);
 191
 192		if (attempt_half == 1)
 193			high = half;
 194		else
 195			low = half;
 196	}
 197	remaining = high - low;
 198	if (likely(remaining > 1))
 199		remaining &= ~1U;
 200
 201	offset = prandom_u32() % remaining;
 202	/* __inet_hash_connect() favors ports having @low parity
 203	 * We do the opposite to not pollute connect() users.
 204	 */
 205	offset |= 1U;
 206
 207other_parity_scan:
 208	port = low + offset;
 209	for (i = 0; i < remaining; i += 2, port += 2) {
 210		if (unlikely(port >= high))
 211			port -= remaining;
 212		if (inet_is_local_reserved_port(net, port))
 213			continue;
 214		head = &hinfo->bhash[inet_bhashfn(net, port,
 215						  hinfo->bhash_size)];
 216		spin_lock_bh(&head->lock);
 
 
 
 
 
 
 
 
 217		inet_bind_bucket_for_each(tb, &head->chain)
 218			if (net_eq(ib_net(tb), net) && tb->port == port) {
 219				if (!inet_csk_bind_conflict(sk, tb, false, false))
 
 220					goto success;
 
 221				goto next_port;
 222			}
 223		tb = NULL;
 224		goto success;
 225next_port:
 226		spin_unlock_bh(&head->lock);
 227		cond_resched();
 228	}
 229
 230	offset--;
 231	if (!(offset & 1))
 232		goto other_parity_scan;
 233
 234	if (attempt_half == 1) {
 235		/* OK we now try the upper half of the range */
 236		attempt_half = 2;
 237		goto other_half_scan;
 238	}
 
 
 
 
 
 
 239	return NULL;
 240success:
 241	*port_ret = port;
 242	*tb_ret = tb;
 
 
 243	return head;
 244}
 245
 246static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
 247				     struct sock *sk)
 248{
 249	kuid_t uid = sock_i_uid(sk);
 250
 251	if (tb->fastreuseport <= 0)
 252		return 0;
 253	if (!sk->sk_reuseport)
 254		return 0;
 255	if (rcu_access_pointer(sk->sk_reuseport_cb))
 256		return 0;
 257	if (!uid_eq(tb->fastuid, uid))
 258		return 0;
 259	/* We only need to check the rcv_saddr if this tb was once marked
 260	 * without fastreuseport and then was reset, as we can only know that
 261	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
 262	 * owners list.
 263	 */
 264	if (tb->fastreuseport == FASTREUSEPORT_ANY)
 265		return 1;
 266#if IS_ENABLED(CONFIG_IPV6)
 267	if (tb->fast_sk_family == AF_INET6)
 268		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
 269					    inet6_rcv_saddr(sk),
 270					    tb->fast_rcv_saddr,
 271					    sk->sk_rcv_saddr,
 272					    tb->fast_ipv6_only,
 273					    ipv6_only_sock(sk), true);
 274#endif
 275	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
 276				    ipv6_only_sock(sk), true);
 277}
 278
 279/* Obtain a reference to a local port for the given sock,
 280 * if snum is zero it means select any available local port.
 281 * We try to allocate an odd port (and leave even ports for connect())
 282 */
 283int inet_csk_get_port(struct sock *sk, unsigned short snum)
 284{
 285	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
 286	struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
 287	int ret = 1, port = snum;
 288	struct inet_bind_hashbucket *head;
 289	struct net *net = sock_net(sk);
 290	struct inet_bind_bucket *tb = NULL;
 291	kuid_t uid = sock_i_uid(sk);
 
 292
 293	if (!port) {
 294		head = inet_csk_find_open_port(sk, &tb, &port);
 295		if (!head)
 296			return ret;
 297		if (!tb)
 298			goto tb_not_found;
 299		goto success;
 300	}
 301	head = &hinfo->bhash[inet_bhashfn(net, port,
 302					  hinfo->bhash_size)];
 303	spin_lock_bh(&head->lock);
 304	inet_bind_bucket_for_each(tb, &head->chain)
 305		if (net_eq(ib_net(tb), net) && tb->port == port)
 306			goto tb_found;
 307tb_not_found:
 308	tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
 309				     net, head, port);
 310	if (!tb)
 311		goto fail_unlock;
 312tb_found:
 313	if (!hlist_empty(&tb->owners)) {
 314		if (sk->sk_reuse == SK_FORCE_REUSE)
 315			goto success;
 316
 317		if ((tb->fastreuse > 0 && reuse) ||
 318		    sk_reuseport_match(tb, sk))
 319			goto success;
 320		if (inet_csk_bind_conflict(sk, tb, true, true))
 321			goto fail_unlock;
 322	}
 323success:
 324	if (hlist_empty(&tb->owners)) {
 325		tb->fastreuse = reuse;
 326		if (sk->sk_reuseport) {
 327			tb->fastreuseport = FASTREUSEPORT_ANY;
 328			tb->fastuid = uid;
 329			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
 330			tb->fast_ipv6_only = ipv6_only_sock(sk);
 331			tb->fast_sk_family = sk->sk_family;
 332#if IS_ENABLED(CONFIG_IPV6)
 333			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 334#endif
 335		} else {
 336			tb->fastreuseport = 0;
 337		}
 338	} else {
 339		if (!reuse)
 340			tb->fastreuse = 0;
 341		if (sk->sk_reuseport) {
 342			/* We didn't match or we don't have fastreuseport set on
 343			 * the tb, but we have sk_reuseport set on this socket
 344			 * and we know that there are no bind conflicts with
 345			 * this socket in this tb, so reset our tb's reuseport
 346			 * settings so that any subsequent sockets that match
 347			 * our current socket will be put on the fast path.
 348			 *
 349			 * If we reset we need to set FASTREUSEPORT_STRICT so we
 350			 * do extra checking for all subsequent sk_reuseport
 351			 * socks.
 352			 */
 353			if (!sk_reuseport_match(tb, sk)) {
 354				tb->fastreuseport = FASTREUSEPORT_STRICT;
 355				tb->fastuid = uid;
 356				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
 357				tb->fast_ipv6_only = ipv6_only_sock(sk);
 358				tb->fast_sk_family = sk->sk_family;
 359#if IS_ENABLED(CONFIG_IPV6)
 360				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 361#endif
 362			}
 363		} else {
 364			tb->fastreuseport = 0;
 365		}
 366	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367	if (!inet_csk(sk)->icsk_bind_hash)
 368		inet_bind_hash(sk, tb, port);
 369	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
 
 370	ret = 0;
 371
 372fail_unlock:
 
 
 
 
 
 
 
 
 
 373	spin_unlock_bh(&head->lock);
 374	return ret;
 375}
 376EXPORT_SYMBOL_GPL(inet_csk_get_port);
 377
 378/*
 379 * Wait for an incoming connection, avoid race conditions. This must be called
 380 * with the socket locked.
 381 */
 382static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 383{
 384	struct inet_connection_sock *icsk = inet_csk(sk);
 385	DEFINE_WAIT(wait);
 386	int err;
 387
 388	/*
 389	 * True wake-one mechanism for incoming connections: only
 390	 * one process gets woken up, not the 'whole herd'.
 391	 * Since we do not 'race & poll' for established sockets
 392	 * anymore, the common case will execute the loop only once.
 393	 *
 394	 * Subtle issue: "add_wait_queue_exclusive()" will be added
 395	 * after any current non-exclusive waiters, and we know that
 396	 * it will always _stay_ after any new non-exclusive waiters
 397	 * because all non-exclusive waiters are added at the
 398	 * beginning of the wait-queue. As such, it's ok to "drop"
 399	 * our exclusiveness temporarily when we get woken up without
 400	 * having to remove and re-insert us on the wait queue.
 401	 */
 402	for (;;) {
 403		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
 404					  TASK_INTERRUPTIBLE);
 405		release_sock(sk);
 406		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
 407			timeo = schedule_timeout(timeo);
 408		sched_annotate_sleep();
 409		lock_sock(sk);
 410		err = 0;
 411		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
 412			break;
 413		err = -EINVAL;
 414		if (sk->sk_state != TCP_LISTEN)
 415			break;
 416		err = sock_intr_errno(timeo);
 417		if (signal_pending(current))
 418			break;
 419		err = -EAGAIN;
 420		if (!timeo)
 421			break;
 422	}
 423	finish_wait(sk_sleep(sk), &wait);
 424	return err;
 425}
 426
 427/*
 428 * This will accept the next outstanding connection.
 429 */
 430struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
 431{
 432	struct inet_connection_sock *icsk = inet_csk(sk);
 433	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 434	struct request_sock *req;
 435	struct sock *newsk;
 436	int error;
 437
 438	lock_sock(sk);
 439
 440	/* We need to make sure that this socket is listening,
 441	 * and that it has something pending.
 442	 */
 443	error = -EINVAL;
 444	if (sk->sk_state != TCP_LISTEN)
 445		goto out_err;
 446
 447	/* Find already established connection */
 448	if (reqsk_queue_empty(queue)) {
 449		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 450
 451		/* If this is a non blocking socket don't sleep */
 452		error = -EAGAIN;
 453		if (!timeo)
 454			goto out_err;
 455
 456		error = inet_csk_wait_for_connect(sk, timeo);
 457		if (error)
 458			goto out_err;
 459	}
 460	req = reqsk_queue_remove(queue, sk);
 461	newsk = req->sk;
 462
 463	if (sk->sk_protocol == IPPROTO_TCP &&
 464	    tcp_rsk(req)->tfo_listener) {
 465		spin_lock_bh(&queue->fastopenq.lock);
 466		if (tcp_rsk(req)->tfo_listener) {
 467			/* We are still waiting for the final ACK from 3WHS
 468			 * so can't free req now. Instead, we set req->sk to
 469			 * NULL to signify that the child socket is taken
 470			 * so reqsk_fastopen_remove() will free the req
 471			 * when 3WHS finishes (or is aborted).
 472			 */
 473			req->sk = NULL;
 474			req = NULL;
 475		}
 476		spin_unlock_bh(&queue->fastopenq.lock);
 477	}
 
 478out:
 479	release_sock(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480	if (req)
 481		reqsk_put(req);
 482	return newsk;
 483out_err:
 484	newsk = NULL;
 485	req = NULL;
 486	*err = error;
 487	goto out;
 488}
 489EXPORT_SYMBOL(inet_csk_accept);
 490
 491/*
 492 * Using different timers for retransmit, delayed acks and probes
 493 * We may wish use just one timer maintaining a list of expire jiffies
 494 * to optimize.
 495 */
 496void inet_csk_init_xmit_timers(struct sock *sk,
 497			       void (*retransmit_handler)(struct timer_list *t),
 498			       void (*delack_handler)(struct timer_list *t),
 499			       void (*keepalive_handler)(struct timer_list *t))
 500{
 501	struct inet_connection_sock *icsk = inet_csk(sk);
 502
 503	timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
 504	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
 505	timer_setup(&sk->sk_timer, keepalive_handler, 0);
 506	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 507}
 508EXPORT_SYMBOL(inet_csk_init_xmit_timers);
 509
 510void inet_csk_clear_xmit_timers(struct sock *sk)
 511{
 512	struct inet_connection_sock *icsk = inet_csk(sk);
 513
 514	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
 515
 516	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 517	sk_stop_timer(sk, &icsk->icsk_delack_timer);
 518	sk_stop_timer(sk, &sk->sk_timer);
 519}
 520EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
 521
 522void inet_csk_delete_keepalive_timer(struct sock *sk)
 523{
 524	sk_stop_timer(sk, &sk->sk_timer);
 525}
 526EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
 527
 528void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
 529{
 530	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
 531}
 532EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
 533
 534struct dst_entry *inet_csk_route_req(const struct sock *sk,
 535				     struct flowi4 *fl4,
 536				     const struct request_sock *req)
 537{
 538	const struct inet_request_sock *ireq = inet_rsk(req);
 539	struct net *net = read_pnet(&ireq->ireq_net);
 540	struct ip_options_rcu *opt;
 541	struct rtable *rt;
 542
 543	opt = ireq_opt_deref(ireq);
 
 544
 545	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 546			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 547			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 548			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 549			   ireq->ir_loc_addr, ireq->ir_rmt_port,
 550			   htons(ireq->ir_num), sk->sk_uid);
 551	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 552	rt = ip_route_output_flow(net, fl4, sk);
 553	if (IS_ERR(rt))
 554		goto no_route;
 555	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 556		goto route_err;
 
 557	return &rt->dst;
 558
 559route_err:
 560	ip_rt_put(rt);
 561no_route:
 
 562	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 563	return NULL;
 564}
 565EXPORT_SYMBOL_GPL(inet_csk_route_req);
 566
 567struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
 568					    struct sock *newsk,
 569					    const struct request_sock *req)
 570{
 571	const struct inet_request_sock *ireq = inet_rsk(req);
 572	struct net *net = read_pnet(&ireq->ireq_net);
 573	struct inet_sock *newinet = inet_sk(newsk);
 574	struct ip_options_rcu *opt;
 575	struct flowi4 *fl4;
 576	struct rtable *rt;
 577
 578	opt = rcu_dereference(ireq->ireq_opt);
 579	fl4 = &newinet->cork.fl.u.ip4;
 580
 581	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 582			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 583			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 584			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 585			   ireq->ir_loc_addr, ireq->ir_rmt_port,
 586			   htons(ireq->ir_num), sk->sk_uid);
 587	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 588	rt = ip_route_output_flow(net, fl4, sk);
 589	if (IS_ERR(rt))
 590		goto no_route;
 591	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 592		goto route_err;
 593	return &rt->dst;
 594
 595route_err:
 596	ip_rt_put(rt);
 597no_route:
 598	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 599	return NULL;
 600}
 601EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 602
 603#if IS_ENABLED(CONFIG_IPV6)
 604#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 605#else
 606#define AF_INET_FAMILY(fam) true
 607#endif
 608
 609/* Decide when to expire the request and when to resend SYN-ACK */
 610static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
 611				  const int max_retries,
 612				  const u8 rskq_defer_accept,
 613				  int *expire, int *resend)
 614{
 615	if (!rskq_defer_accept) {
 616		*expire = req->num_timeout >= thresh;
 617		*resend = 1;
 618		return;
 619	}
 620	*expire = req->num_timeout >= thresh &&
 621		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
 622	/*
 623	 * Do not resend while waiting for data after ACK,
 624	 * start to resend on end of deferring period to give
 625	 * last chance for data or ACK to create established socket.
 626	 */
 627	*resend = !inet_rsk(req)->acked ||
 628		  req->num_timeout >= rskq_defer_accept - 1;
 629}
 630
 631int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
 632{
 633	int err = req->rsk_ops->rtx_syn_ack(parent, req);
 634
 635	if (!err)
 636		req->num_retrans++;
 637	return err;
 638}
 639EXPORT_SYMBOL(inet_rtx_syn_ack);
 640
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 641/* return true if req was found in the ehash table */
 642static bool reqsk_queue_unlink(struct request_sock_queue *queue,
 643			       struct request_sock *req)
 644{
 645	struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
 646	bool found = false;
 647
 648	if (sk_hashed(req_to_sk(req))) {
 
 649		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
 650
 651		spin_lock(lock);
 652		found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
 653		spin_unlock(lock);
 654	}
 655	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
 656		reqsk_put(req);
 657	return found;
 658}
 659
 660void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
 661{
 662	if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
 
 
 663		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 664		reqsk_put(req);
 665	}
 
 666}
 667EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 668
 669void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
 670{
 671	inet_csk_reqsk_queue_drop(sk, req);
 672	reqsk_put(req);
 673}
 674EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
 675
 676static void reqsk_timer_handler(struct timer_list *t)
 677{
 678	struct request_sock *req = from_timer(req, t, rsk_timer);
 
 679	struct sock *sk_listener = req->rsk_listener;
 680	struct net *net = sock_net(sk_listener);
 681	struct inet_connection_sock *icsk = inet_csk(sk_listener);
 682	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 683	int qlen, expire = 0, resend = 0;
 684	int max_retries, thresh;
 685	u8 defer_accept;
 686
 687	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
 688		goto drop;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 689
 690	max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
 691	thresh = max_retries;
 
 
 692	/* Normally all the openreqs are young and become mature
 693	 * (i.e. converted to established socket) for first timeout.
 694	 * If synack was not acknowledged for 1 second, it means
 695	 * one of the following things: synack was lost, ack was lost,
 696	 * rtt is high or nobody planned to ack (i.e. synflood).
 697	 * When server is a bit loaded, queue is populated with old
 698	 * open requests, reducing effective size of queue.
 699	 * When server is well loaded, queue size reduces to zero
 700	 * after several minutes of work. It is not synflood,
 701	 * it is normal operation. The solution is pruning
 702	 * too old entries overriding normal timeout, when
 703	 * situation becomes dangerous.
 704	 *
 705	 * Essentially, we reserve half of room for young
 706	 * embrions; and abort old ones without pity, if old
 707	 * ones are about to clog our table.
 708	 */
 
 709	qlen = reqsk_queue_len(queue);
 710	if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
 711		int young = reqsk_queue_len_young(queue) << 1;
 712
 713		while (thresh > 2) {
 714			if (qlen < young)
 715				break;
 716			thresh--;
 717			young <<= 1;
 718		}
 719	}
 720	defer_accept = READ_ONCE(queue->rskq_defer_accept);
 721	if (defer_accept)
 722		max_retries = defer_accept;
 723	syn_ack_recalc(req, thresh, max_retries, defer_accept,
 724		       &expire, &resend);
 725	req->rsk_ops->syn_ack_timeout(req);
 726	if (!expire &&
 727	    (!resend ||
 728	     !inet_rtx_syn_ack(sk_listener, req) ||
 729	     inet_rsk(req)->acked)) {
 730		unsigned long timeo;
 731
 732		if (req->num_timeout++ == 0)
 733			atomic_dec(&queue->young);
 734		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 735		mod_timer(&req->rsk_timer, jiffies + timeo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 736		return;
 737	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 738drop:
 739	inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
 740}
 741
 742static void reqsk_queue_hash_req(struct request_sock *req,
 743				 unsigned long timeout)
 744{
 745	req->num_retrans = 0;
 746	req->num_timeout = 0;
 747	req->sk = NULL;
 748
 749	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
 750	mod_timer(&req->rsk_timer, jiffies + timeout);
 751
 752	inet_ehash_insert(req_to_sk(req), NULL);
 753	/* before letting lookups find us, make sure all req fields
 754	 * are committed to memory and refcnt initialized.
 755	 */
 756	smp_wmb();
 757	refcount_set(&req->rsk_refcnt, 2 + 1);
 758}
 759
 760void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 761				   unsigned long timeout)
 762{
 763	reqsk_queue_hash_req(req, timeout);
 764	inet_csk_reqsk_queue_added(sk);
 765}
 766EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 767
 
 
 
 
 
 
 
 
 
 
 
 
 768/**
 769 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
 770 *	@sk: the socket to clone
 771 *	@req: request_sock
 772 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 773 *
 774 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 775 */
 776struct sock *inet_csk_clone_lock(const struct sock *sk,
 777				 const struct request_sock *req,
 778				 const gfp_t priority)
 779{
 780	struct sock *newsk = sk_clone_lock(sk, priority);
 781
 782	if (newsk) {
 783		struct inet_connection_sock *newicsk = inet_csk(newsk);
 784
 785		inet_sk_set_state(newsk, TCP_SYN_RECV);
 786		newicsk->icsk_bind_hash = NULL;
 
 787
 788		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
 789		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
 790		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
 791
 792		/* listeners have SOCK_RCU_FREE, not the children */
 793		sock_reset_flag(newsk, SOCK_RCU_FREE);
 794
 795		inet_sk(newsk)->mc_list = NULL;
 796
 797		newsk->sk_mark = inet_rsk(req)->ir_mark;
 798		atomic64_set(&newsk->sk_cookie,
 799			     atomic64_read(&inet_rsk(req)->ir_cookie));
 800
 801		newicsk->icsk_retransmits = 0;
 802		newicsk->icsk_backoff	  = 0;
 803		newicsk->icsk_probes_out  = 0;
 
 804
 805		/* Deinitialize accept_queue to trap illegal accesses. */
 806		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
 807
 
 
 808		security_inet_csk_clone(newsk, req);
 809	}
 810	return newsk;
 811}
 812EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
 813
 814/*
 815 * At this point, there should be no process reference to this
 816 * socket, and thus no user references at all.  Therefore we
 817 * can assume the socket waitqueue is inactive and nobody will
 818 * try to jump onto it.
 819 */
 820void inet_csk_destroy_sock(struct sock *sk)
 821{
 822	WARN_ON(sk->sk_state != TCP_CLOSE);
 823	WARN_ON(!sock_flag(sk, SOCK_DEAD));
 824
 825	/* It cannot be in hash table! */
 826	WARN_ON(!sk_unhashed(sk));
 827
 828	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
 829	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
 830
 831	sk->sk_prot->destroy(sk);
 832
 833	sk_stream_kill_queues(sk);
 834
 835	xfrm_sk_free_policy(sk);
 836
 837	sk_refcnt_debug_release(sk);
 838
 839	percpu_counter_dec(sk->sk_prot->orphan_count);
 840
 841	sock_put(sk);
 842}
 843EXPORT_SYMBOL(inet_csk_destroy_sock);
 844
 845/* This function allows to force a closure of a socket after the call to
 846 * tcp/dccp_create_openreq_child().
 847 */
 848void inet_csk_prepare_forced_close(struct sock *sk)
 849	__releases(&sk->sk_lock.slock)
 850{
 851	/* sk_clone_lock locked the socket and set refcnt to 2 */
 852	bh_unlock_sock(sk);
 853	sock_put(sk);
 854
 855	/* The below has to be done to allow calling inet_csk_destroy_sock */
 856	sock_set_flag(sk, SOCK_DEAD);
 857	percpu_counter_inc(sk->sk_prot->orphan_count);
 858	inet_sk(sk)->inet_num = 0;
 859}
 860EXPORT_SYMBOL(inet_csk_prepare_forced_close);
 861
 862int inet_csk_listen_start(struct sock *sk, int backlog)
 
 
 
 
 
 
 
 
 
 
 863{
 864	struct inet_connection_sock *icsk = inet_csk(sk);
 865	struct inet_sock *inet = inet_sk(sk);
 866	int err = -EADDRINUSE;
 
 
 
 
 867
 868	reqsk_queue_alloc(&icsk->icsk_accept_queue);
 869
 870	sk->sk_max_ack_backlog = backlog;
 871	sk->sk_ack_backlog = 0;
 872	inet_csk_delack_init(sk);
 873
 874	/* There is race window here: we announce ourselves listening,
 875	 * but this transition is still not validated by get_port().
 876	 * It is OK, because this socket enters to hash table only
 877	 * after validation is complete.
 878	 */
 879	inet_sk_state_store(sk, TCP_LISTEN);
 880	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 
 881		inet->inet_sport = htons(inet->inet_num);
 882
 883		sk_dst_reset(sk);
 884		err = sk->sk_prot->hash(sk);
 885
 886		if (likely(!err))
 887			return 0;
 888	}
 889
 890	inet_sk_set_state(sk, TCP_CLOSE);
 891	return err;
 892}
 893EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 894
 895static void inet_child_forget(struct sock *sk, struct request_sock *req,
 896			      struct sock *child)
 897{
 898	sk->sk_prot->disconnect(child, O_NONBLOCK);
 899
 900	sock_orphan(child);
 901
 902	percpu_counter_inc(sk->sk_prot->orphan_count);
 903
 904	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
 905		BUG_ON(tcp_sk(child)->fastopen_rsk != req);
 906		BUG_ON(sk != req->rsk_listener);
 907
 908		/* Paranoid, to prevent race condition if
 909		 * an inbound pkt destined for child is
 910		 * blocked by sock lock in tcp_v4_rcv().
 911		 * Also to satisfy an assertion in
 912		 * tcp_v4_destroy_sock().
 913		 */
 914		tcp_sk(child)->fastopen_rsk = NULL;
 915	}
 916	inet_csk_destroy_sock(child);
 917}
 918
 919struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
 920				      struct request_sock *req,
 921				      struct sock *child)
 922{
 923	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 924
 925	spin_lock(&queue->rskq_lock);
 926	if (unlikely(sk->sk_state != TCP_LISTEN)) {
 927		inet_child_forget(sk, req, child);
 928		child = NULL;
 929	} else {
 930		req->sk = child;
 931		req->dl_next = NULL;
 932		if (queue->rskq_accept_head == NULL)
 933			queue->rskq_accept_head = req;
 934		else
 935			queue->rskq_accept_tail->dl_next = req;
 936		queue->rskq_accept_tail = req;
 937		sk_acceptq_added(sk);
 938	}
 939	spin_unlock(&queue->rskq_lock);
 940	return child;
 941}
 942EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
 943
 944struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
 945					 struct request_sock *req, bool own_req)
 946{
 947	if (own_req) {
 948		inet_csk_reqsk_queue_drop(sk, req);
 949		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 950		if (inet_csk_reqsk_queue_add(sk, req, child))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951			return child;
 
 952	}
 953	/* Too bad, another child took ownership of the request, undo. */
 
 954	bh_unlock_sock(child);
 955	sock_put(child);
 956	return NULL;
 957}
 958EXPORT_SYMBOL(inet_csk_complete_hashdance);
 959
 960/*
 961 *	This routine closes sockets which have been at least partially
 962 *	opened, but not yet accepted.
 963 */
 964void inet_csk_listen_stop(struct sock *sk)
 965{
 966	struct inet_connection_sock *icsk = inet_csk(sk);
 967	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 968	struct request_sock *next, *req;
 969
 970	/* Following specs, it would be better either to send FIN
 971	 * (and enter FIN-WAIT-1, it is normal close)
 972	 * or to send active reset (abort).
 973	 * Certainly, it is pretty dangerous while synflood, but it is
 974	 * bad justification for our negligence 8)
 975	 * To be honest, we are not able to make either
 976	 * of the variants now.			--ANK
 977	 */
 978	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
 979		struct sock *child = req->sk;
 
 980
 981		local_bh_disable();
 982		bh_lock_sock(child);
 983		WARN_ON(sock_owned_by_user(child));
 984		sock_hold(child);
 985
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986		inet_child_forget(sk, req, child);
 
 987		reqsk_put(req);
 988		bh_unlock_sock(child);
 989		local_bh_enable();
 990		sock_put(child);
 991
 992		cond_resched();
 993	}
 994	if (queue->fastopenq.rskq_rst_head) {
 995		/* Free all the reqs queued in rskq_rst_head. */
 996		spin_lock_bh(&queue->fastopenq.lock);
 997		req = queue->fastopenq.rskq_rst_head;
 998		queue->fastopenq.rskq_rst_head = NULL;
 999		spin_unlock_bh(&queue->fastopenq.lock);
1000		while (req != NULL) {
1001			next = req->dl_next;
1002			reqsk_put(req);
1003			req = next;
1004		}
1005	}
1006	WARN_ON_ONCE(sk->sk_ack_backlog);
1007}
1008EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1009
1010void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1011{
1012	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1013	const struct inet_sock *inet = inet_sk(sk);
1014
1015	sin->sin_family		= AF_INET;
1016	sin->sin_addr.s_addr	= inet->inet_daddr;
1017	sin->sin_port		= inet->inet_dport;
1018}
1019EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1020
1021#ifdef CONFIG_COMPAT
1022int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
1023			       char __user *optval, int __user *optlen)
1024{
1025	const struct inet_connection_sock *icsk = inet_csk(sk);
1026
1027	if (icsk->icsk_af_ops->compat_getsockopt)
1028		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
1029							    optval, optlen);
1030	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
1031					     optval, optlen);
1032}
1033EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
1034
1035int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
1036			       char __user *optval, unsigned int optlen)
1037{
1038	const struct inet_connection_sock *icsk = inet_csk(sk);
1039
1040	if (icsk->icsk_af_ops->compat_setsockopt)
1041		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
1042							    optval, optlen);
1043	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1044					     optval, optlen);
1045}
1046EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
1047#endif
1048
1049static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1050{
1051	const struct inet_sock *inet = inet_sk(sk);
1052	const struct ip_options_rcu *inet_opt;
1053	__be32 daddr = inet->inet_daddr;
1054	struct flowi4 *fl4;
1055	struct rtable *rt;
1056
1057	rcu_read_lock();
1058	inet_opt = rcu_dereference(inet->inet_opt);
1059	if (inet_opt && inet_opt->opt.srr)
1060		daddr = inet_opt->opt.faddr;
1061	fl4 = &fl->u.ip4;
1062	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1063				   inet->inet_saddr, inet->inet_dport,
1064				   inet->inet_sport, sk->sk_protocol,
1065				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1066	if (IS_ERR(rt))
1067		rt = NULL;
1068	if (rt)
1069		sk_setup_caps(sk, &rt->dst);
1070	rcu_read_unlock();
1071
1072	return &rt->dst;
1073}
1074
1075struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1076{
1077	struct dst_entry *dst = __sk_dst_check(sk, 0);
1078	struct inet_sock *inet = inet_sk(sk);
1079
1080	if (!dst) {
1081		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1082		if (!dst)
1083			goto out;
1084	}
1085	dst->ops->update_pmtu(dst, sk, NULL, mtu);
1086
1087	dst = __sk_dst_check(sk, 0);
1088	if (!dst)
1089		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1090out:
1091	return dst;
1092}
1093EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Support for INET connection oriented protocols.
   8 *
   9 * Authors:	See the TCP sources
 
 
 
 
 
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/jhash.h>
  14
  15#include <net/inet_connection_sock.h>
  16#include <net/inet_hashtables.h>
  17#include <net/inet_timewait_sock.h>
  18#include <net/ip.h>
  19#include <net/route.h>
  20#include <net/tcp_states.h>
  21#include <net/xfrm.h>
  22#include <net/tcp.h>
  23#include <net/sock_reuseport.h>
  24#include <net/addrconf.h>
  25
 
 
 
 
 
  26#if IS_ENABLED(CONFIG_IPV6)
  27/* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
  28 *				if IPv6 only, and any IPv4 addresses
  29 *				if not IPv6 only
  30 * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
  31 *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
  32 *				and 0.0.0.0 equals to 0.0.0.0 only
  33 */
  34static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
  35				 const struct in6_addr *sk2_rcv_saddr6,
  36				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
  37				 bool sk1_ipv6only, bool sk2_ipv6only,
  38				 bool match_sk1_wildcard,
  39				 bool match_sk2_wildcard)
  40{
  41	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
  42	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
  43
  44	/* if both are mapped, treat as IPv4 */
  45	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
  46		if (!sk2_ipv6only) {
  47			if (sk1_rcv_saddr == sk2_rcv_saddr)
  48				return true;
  49			return (match_sk1_wildcard && !sk1_rcv_saddr) ||
  50				(match_sk2_wildcard && !sk2_rcv_saddr);
  51		}
  52		return false;
  53	}
  54
  55	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
  56		return true;
  57
  58	if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
  59	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
  60		return true;
  61
  62	if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
  63	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
  64		return true;
  65
  66	if (sk2_rcv_saddr6 &&
  67	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
  68		return true;
  69
  70	return false;
  71}
  72#endif
  73
  74/* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
  75 * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
  76 *				0.0.0.0 only equals to 0.0.0.0
  77 */
  78static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
  79				 bool sk2_ipv6only, bool match_sk1_wildcard,
  80				 bool match_sk2_wildcard)
  81{
  82	if (!sk2_ipv6only) {
  83		if (sk1_rcv_saddr == sk2_rcv_saddr)
  84			return true;
  85		return (match_sk1_wildcard && !sk1_rcv_saddr) ||
  86			(match_sk2_wildcard && !sk2_rcv_saddr);
  87	}
  88	return false;
  89}
  90
  91bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
  92			  bool match_wildcard)
  93{
  94#if IS_ENABLED(CONFIG_IPV6)
  95	if (sk->sk_family == AF_INET6)
  96		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
  97					    inet6_rcv_saddr(sk2),
  98					    sk->sk_rcv_saddr,
  99					    sk2->sk_rcv_saddr,
 100					    ipv6_only_sock(sk),
 101					    ipv6_only_sock(sk2),
 102					    match_wildcard,
 103					    match_wildcard);
 104#endif
 105	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
 106				    ipv6_only_sock(sk2), match_wildcard,
 107				    match_wildcard);
 108}
 109EXPORT_SYMBOL(inet_rcv_saddr_equal);
 110
 111bool inet_rcv_saddr_any(const struct sock *sk)
 112{
 113#if IS_ENABLED(CONFIG_IPV6)
 114	if (sk->sk_family == AF_INET6)
 115		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
 116#endif
 117	return !sk->sk_rcv_saddr;
 118}
 119
 120void inet_get_local_port_range(struct net *net, int *low, int *high)
 121{
 122	unsigned int seq;
 123
 124	do {
 125		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 126
 127		*low = net->ipv4.ip_local_ports.range[0];
 128		*high = net->ipv4.ip_local_ports.range[1];
 129	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 130}
 131EXPORT_SYMBOL(inet_get_local_port_range);
 132
 133static bool inet_use_bhash2_on_bind(const struct sock *sk)
 134{
 135#if IS_ENABLED(CONFIG_IPV6)
 136	if (sk->sk_family == AF_INET6) {
 137		int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
 138
 139		return addr_type != IPV6_ADDR_ANY &&
 140			addr_type != IPV6_ADDR_MAPPED;
 141	}
 142#endif
 143	return sk->sk_rcv_saddr != htonl(INADDR_ANY);
 144}
 145
 146static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
 147			       kuid_t sk_uid, bool relax,
 148			       bool reuseport_cb_ok, bool reuseport_ok)
 149{
 150	int bound_dev_if2;
 151
 152	if (sk == sk2)
 153		return false;
 154
 155	bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
 156
 157	if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
 158	    sk->sk_bound_dev_if == bound_dev_if2) {
 159		if (sk->sk_reuse && sk2->sk_reuse &&
 160		    sk2->sk_state != TCP_LISTEN) {
 161			if (!relax || (!reuseport_ok && sk->sk_reuseport &&
 162				       sk2->sk_reuseport && reuseport_cb_ok &&
 163				       (sk2->sk_state == TCP_TIME_WAIT ||
 164					uid_eq(sk_uid, sock_i_uid(sk2)))))
 165				return true;
 166		} else if (!reuseport_ok || !sk->sk_reuseport ||
 167			   !sk2->sk_reuseport || !reuseport_cb_ok ||
 168			   (sk2->sk_state != TCP_TIME_WAIT &&
 169			    !uid_eq(sk_uid, sock_i_uid(sk2)))) {
 170			return true;
 171		}
 172	}
 173	return false;
 174}
 175
 176static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
 177				   kuid_t sk_uid, bool relax,
 178				   bool reuseport_cb_ok, bool reuseport_ok)
 179{
 180	if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
 181		return false;
 182
 183	return inet_bind_conflict(sk, sk2, sk_uid, relax,
 184				  reuseport_cb_ok, reuseport_ok);
 185}
 186
 187static bool inet_bhash2_conflict(const struct sock *sk,
 188				 const struct inet_bind2_bucket *tb2,
 189				 kuid_t sk_uid,
 190				 bool relax, bool reuseport_cb_ok,
 191				 bool reuseport_ok)
 192{
 193	struct inet_timewait_sock *tw2;
 194	struct sock *sk2;
 195
 196	sk_for_each_bound_bhash2(sk2, &tb2->owners) {
 197		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
 198					   reuseport_cb_ok, reuseport_ok))
 199			return true;
 200	}
 201
 202	twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
 203		sk2 = (struct sock *)tw2;
 204
 205		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
 206					   reuseport_cb_ok, reuseport_ok))
 207			return true;
 208	}
 209
 210	return false;
 211}
 212
 213/* This should be called only when the tb and tb2 hashbuckets' locks are held */
 214static int inet_csk_bind_conflict(const struct sock *sk,
 215				  const struct inet_bind_bucket *tb,
 216				  const struct inet_bind2_bucket *tb2, /* may be null */
 217				  bool relax, bool reuseport_ok)
 218{
 219	bool reuseport_cb_ok;
 220	struct sock_reuseport *reuseport_cb;
 
 221	kuid_t uid = sock_i_uid((struct sock *)sk);
 222
 223	rcu_read_lock();
 224	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
 225	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
 226	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
 227	rcu_read_unlock();
 228
 229	/*
 230	 * Unlike other sk lookup places we do not check
 231	 * for sk_net here, since _all_ the socks listed
 232	 * in tb->owners and tb2->owners list belong
 233	 * to the same net - the one this bucket belongs to.
 234	 */
 235
 236	if (!inet_use_bhash2_on_bind(sk)) {
 237		struct sock *sk2;
 238
 239		sk_for_each_bound(sk2, &tb->owners)
 240			if (inet_bind_conflict(sk, sk2, uid, relax,
 241					       reuseport_cb_ok, reuseport_ok) &&
 242			    inet_rcv_saddr_equal(sk, sk2, true))
 243				return true;
 244
 245		return false;
 
 
 
 
 
 
 
 
 
 
 246	}
 247
 248	/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
 249	 * ipv4) should have been checked already. We need to do these two
 250	 * checks separately because their spinlocks have to be acquired/released
 251	 * independently of each other, to prevent possible deadlocks
 252	 */
 253	return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
 254					   reuseport_ok);
 255}
 256
 257/* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
 258 * INADDR_ANY (if ipv4) socket.
 259 *
 260 * Caller must hold bhash hashbucket lock with local bh disabled, to protect
 261 * against concurrent binds on the port for addr any
 262 */
 263static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
 264					  bool relax, bool reuseport_ok)
 265{
 266	kuid_t uid = sock_i_uid((struct sock *)sk);
 267	const struct net *net = sock_net(sk);
 268	struct sock_reuseport *reuseport_cb;
 269	struct inet_bind_hashbucket *head2;
 270	struct inet_bind2_bucket *tb2;
 271	bool reuseport_cb_ok;
 272
 273	rcu_read_lock();
 274	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
 275	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
 276	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
 277	rcu_read_unlock();
 278
 279	head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
 280
 281	spin_lock(&head2->lock);
 282
 283	inet_bind_bucket_for_each(tb2, &head2->chain)
 284		if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
 285			break;
 286
 287	if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
 288					reuseport_ok)) {
 289		spin_unlock(&head2->lock);
 290		return true;
 291	}
 292
 293	spin_unlock(&head2->lock);
 294	return false;
 295}
 296
 297/*
 298 * Find an open port number for the socket.  Returns with the
 299 * inet_bind_hashbucket locks held if successful.
 300 */
 301static struct inet_bind_hashbucket *
 302inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
 303			struct inet_bind2_bucket **tb2_ret,
 304			struct inet_bind_hashbucket **head2_ret, int *port_ret)
 305{
 306	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
 307	int i, low, high, attempt_half, port, l3mdev;
 308	struct inet_bind_hashbucket *head, *head2;
 309	struct net *net = sock_net(sk);
 310	struct inet_bind2_bucket *tb2;
 311	struct inet_bind_bucket *tb;
 312	u32 remaining, offset;
 313	bool relax = false;
 314
 315	l3mdev = inet_sk_bound_l3mdev(sk);
 316ports_exhausted:
 317	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 318other_half_scan:
 319	inet_get_local_port_range(net, &low, &high);
 320	high++; /* [32768, 60999] -> [32768, 61000[ */
 321	if (high - low < 4)
 322		attempt_half = 0;
 323	if (attempt_half) {
 324		int half = low + (((high - low) >> 2) << 1);
 325
 326		if (attempt_half == 1)
 327			high = half;
 328		else
 329			low = half;
 330	}
 331	remaining = high - low;
 332	if (likely(remaining > 1))
 333		remaining &= ~1U;
 334
 335	offset = get_random_u32_below(remaining);
 336	/* __inet_hash_connect() favors ports having @low parity
 337	 * We do the opposite to not pollute connect() users.
 338	 */
 339	offset |= 1U;
 340
 341other_parity_scan:
 342	port = low + offset;
 343	for (i = 0; i < remaining; i += 2, port += 2) {
 344		if (unlikely(port >= high))
 345			port -= remaining;
 346		if (inet_is_local_reserved_port(net, port))
 347			continue;
 348		head = &hinfo->bhash[inet_bhashfn(net, port,
 349						  hinfo->bhash_size)];
 350		spin_lock_bh(&head->lock);
 351		if (inet_use_bhash2_on_bind(sk)) {
 352			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
 353				goto next_port;
 354		}
 355
 356		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
 357		spin_lock(&head2->lock);
 358		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
 359		inet_bind_bucket_for_each(tb, &head->chain)
 360			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
 361				if (!inet_csk_bind_conflict(sk, tb, tb2,
 362							    relax, false))
 363					goto success;
 364				spin_unlock(&head2->lock);
 365				goto next_port;
 366			}
 367		tb = NULL;
 368		goto success;
 369next_port:
 370		spin_unlock_bh(&head->lock);
 371		cond_resched();
 372	}
 373
 374	offset--;
 375	if (!(offset & 1))
 376		goto other_parity_scan;
 377
 378	if (attempt_half == 1) {
 379		/* OK we now try the upper half of the range */
 380		attempt_half = 2;
 381		goto other_half_scan;
 382	}
 383
 384	if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
 385		/* We still have a chance to connect to different destinations */
 386		relax = true;
 387		goto ports_exhausted;
 388	}
 389	return NULL;
 390success:
 391	*port_ret = port;
 392	*tb_ret = tb;
 393	*tb2_ret = tb2;
 394	*head2_ret = head2;
 395	return head;
 396}
 397
 398static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
 399				     struct sock *sk)
 400{
 401	kuid_t uid = sock_i_uid(sk);
 402
 403	if (tb->fastreuseport <= 0)
 404		return 0;
 405	if (!sk->sk_reuseport)
 406		return 0;
 407	if (rcu_access_pointer(sk->sk_reuseport_cb))
 408		return 0;
 409	if (!uid_eq(tb->fastuid, uid))
 410		return 0;
 411	/* We only need to check the rcv_saddr if this tb was once marked
 412	 * without fastreuseport and then was reset, as we can only know that
 413	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
 414	 * owners list.
 415	 */
 416	if (tb->fastreuseport == FASTREUSEPORT_ANY)
 417		return 1;
 418#if IS_ENABLED(CONFIG_IPV6)
 419	if (tb->fast_sk_family == AF_INET6)
 420		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
 421					    inet6_rcv_saddr(sk),
 422					    tb->fast_rcv_saddr,
 423					    sk->sk_rcv_saddr,
 424					    tb->fast_ipv6_only,
 425					    ipv6_only_sock(sk), true, false);
 426#endif
 427	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
 428				    ipv6_only_sock(sk), true, false);
 429}
 430
 431void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
 432			       struct sock *sk)
 
 
 
 433{
 
 
 
 
 
 
 434	kuid_t uid = sock_i_uid(sk);
 435	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
 436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437	if (hlist_empty(&tb->owners)) {
 438		tb->fastreuse = reuse;
 439		if (sk->sk_reuseport) {
 440			tb->fastreuseport = FASTREUSEPORT_ANY;
 441			tb->fastuid = uid;
 442			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
 443			tb->fast_ipv6_only = ipv6_only_sock(sk);
 444			tb->fast_sk_family = sk->sk_family;
 445#if IS_ENABLED(CONFIG_IPV6)
 446			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 447#endif
 448		} else {
 449			tb->fastreuseport = 0;
 450		}
 451	} else {
 452		if (!reuse)
 453			tb->fastreuse = 0;
 454		if (sk->sk_reuseport) {
 455			/* We didn't match or we don't have fastreuseport set on
 456			 * the tb, but we have sk_reuseport set on this socket
 457			 * and we know that there are no bind conflicts with
 458			 * this socket in this tb, so reset our tb's reuseport
 459			 * settings so that any subsequent sockets that match
 460			 * our current socket will be put on the fast path.
 461			 *
 462			 * If we reset we need to set FASTREUSEPORT_STRICT so we
 463			 * do extra checking for all subsequent sk_reuseport
 464			 * socks.
 465			 */
 466			if (!sk_reuseport_match(tb, sk)) {
 467				tb->fastreuseport = FASTREUSEPORT_STRICT;
 468				tb->fastuid = uid;
 469				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
 470				tb->fast_ipv6_only = ipv6_only_sock(sk);
 471				tb->fast_sk_family = sk->sk_family;
 472#if IS_ENABLED(CONFIG_IPV6)
 473				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 474#endif
 475			}
 476		} else {
 477			tb->fastreuseport = 0;
 478		}
 479	}
 480}
 481
 482/* Obtain a reference to a local port for the given sock,
 483 * if snum is zero it means select any available local port.
 484 * We try to allocate an odd port (and leave even ports for connect())
 485 */
 486int inet_csk_get_port(struct sock *sk, unsigned short snum)
 487{
 488	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
 489	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
 490	bool found_port = false, check_bind_conflict = true;
 491	bool bhash_created = false, bhash2_created = false;
 492	int ret = -EADDRINUSE, port = snum, l3mdev;
 493	struct inet_bind_hashbucket *head, *head2;
 494	struct inet_bind2_bucket *tb2 = NULL;
 495	struct inet_bind_bucket *tb = NULL;
 496	bool head2_lock_acquired = false;
 497	struct net *net = sock_net(sk);
 498
 499	l3mdev = inet_sk_bound_l3mdev(sk);
 500
 501	if (!port) {
 502		head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
 503		if (!head)
 504			return ret;
 505
 506		head2_lock_acquired = true;
 507
 508		if (tb && tb2)
 509			goto success;
 510		found_port = true;
 511	} else {
 512		head = &hinfo->bhash[inet_bhashfn(net, port,
 513						  hinfo->bhash_size)];
 514		spin_lock_bh(&head->lock);
 515		inet_bind_bucket_for_each(tb, &head->chain)
 516			if (inet_bind_bucket_match(tb, net, port, l3mdev))
 517				break;
 518	}
 519
 520	if (!tb) {
 521		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
 522					     head, port, l3mdev);
 523		if (!tb)
 524			goto fail_unlock;
 525		bhash_created = true;
 526	}
 527
 528	if (!found_port) {
 529		if (!hlist_empty(&tb->owners)) {
 530			if (sk->sk_reuse == SK_FORCE_REUSE ||
 531			    (tb->fastreuse > 0 && reuse) ||
 532			    sk_reuseport_match(tb, sk))
 533				check_bind_conflict = false;
 534		}
 535
 536		if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) {
 537			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
 538				goto fail_unlock;
 539		}
 540
 541		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
 542		spin_lock(&head2->lock);
 543		head2_lock_acquired = true;
 544		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
 545	}
 546
 547	if (!tb2) {
 548		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
 549					       net, head2, port, l3mdev, sk);
 550		if (!tb2)
 551			goto fail_unlock;
 552		bhash2_created = true;
 553	}
 554
 555	if (!found_port && check_bind_conflict) {
 556		if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
 557			goto fail_unlock;
 558	}
 559
 560success:
 561	inet_csk_update_fastreuse(tb, sk);
 562
 563	if (!inet_csk(sk)->icsk_bind_hash)
 564		inet_bind_hash(sk, tb, tb2, port);
 565	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
 566	WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
 567	ret = 0;
 568
 569fail_unlock:
 570	if (ret) {
 571		if (bhash_created)
 572			inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
 573		if (bhash2_created)
 574			inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
 575						  tb2);
 576	}
 577	if (head2_lock_acquired)
 578		spin_unlock(&head2->lock);
 579	spin_unlock_bh(&head->lock);
 580	return ret;
 581}
 582EXPORT_SYMBOL_GPL(inet_csk_get_port);
 583
 584/*
 585 * Wait for an incoming connection, avoid race conditions. This must be called
 586 * with the socket locked.
 587 */
 588static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 589{
 590	struct inet_connection_sock *icsk = inet_csk(sk);
 591	DEFINE_WAIT(wait);
 592	int err;
 593
 594	/*
 595	 * True wake-one mechanism for incoming connections: only
 596	 * one process gets woken up, not the 'whole herd'.
 597	 * Since we do not 'race & poll' for established sockets
 598	 * anymore, the common case will execute the loop only once.
 599	 *
 600	 * Subtle issue: "add_wait_queue_exclusive()" will be added
 601	 * after any current non-exclusive waiters, and we know that
 602	 * it will always _stay_ after any new non-exclusive waiters
 603	 * because all non-exclusive waiters are added at the
 604	 * beginning of the wait-queue. As such, it's ok to "drop"
 605	 * our exclusiveness temporarily when we get woken up without
 606	 * having to remove and re-insert us on the wait queue.
 607	 */
 608	for (;;) {
 609		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
 610					  TASK_INTERRUPTIBLE);
 611		release_sock(sk);
 612		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
 613			timeo = schedule_timeout(timeo);
 614		sched_annotate_sleep();
 615		lock_sock(sk);
 616		err = 0;
 617		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
 618			break;
 619		err = -EINVAL;
 620		if (sk->sk_state != TCP_LISTEN)
 621			break;
 622		err = sock_intr_errno(timeo);
 623		if (signal_pending(current))
 624			break;
 625		err = -EAGAIN;
 626		if (!timeo)
 627			break;
 628	}
 629	finish_wait(sk_sleep(sk), &wait);
 630	return err;
 631}
 632
 633/*
 634 * This will accept the next outstanding connection.
 635 */
 636struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
 637{
 638	struct inet_connection_sock *icsk = inet_csk(sk);
 639	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 640	struct request_sock *req;
 641	struct sock *newsk;
 642	int error;
 643
 644	lock_sock(sk);
 645
 646	/* We need to make sure that this socket is listening,
 647	 * and that it has something pending.
 648	 */
 649	error = -EINVAL;
 650	if (sk->sk_state != TCP_LISTEN)
 651		goto out_err;
 652
 653	/* Find already established connection */
 654	if (reqsk_queue_empty(queue)) {
 655		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 656
 657		/* If this is a non blocking socket don't sleep */
 658		error = -EAGAIN;
 659		if (!timeo)
 660			goto out_err;
 661
 662		error = inet_csk_wait_for_connect(sk, timeo);
 663		if (error)
 664			goto out_err;
 665	}
 666	req = reqsk_queue_remove(queue, sk);
 667	newsk = req->sk;
 668
 669	if (sk->sk_protocol == IPPROTO_TCP &&
 670	    tcp_rsk(req)->tfo_listener) {
 671		spin_lock_bh(&queue->fastopenq.lock);
 672		if (tcp_rsk(req)->tfo_listener) {
 673			/* We are still waiting for the final ACK from 3WHS
 674			 * so can't free req now. Instead, we set req->sk to
 675			 * NULL to signify that the child socket is taken
 676			 * so reqsk_fastopen_remove() will free the req
 677			 * when 3WHS finishes (or is aborted).
 678			 */
 679			req->sk = NULL;
 680			req = NULL;
 681		}
 682		spin_unlock_bh(&queue->fastopenq.lock);
 683	}
 684
 685out:
 686	release_sock(sk);
 687	if (newsk && mem_cgroup_sockets_enabled) {
 688		int amt;
 689
 690		/* atomically get the memory usage, set and charge the
 691		 * newsk->sk_memcg.
 692		 */
 693		lock_sock(newsk);
 694
 695		/* The socket has not been accepted yet, no need to look at
 696		 * newsk->sk_wmem_queued.
 697		 */
 698		amt = sk_mem_pages(newsk->sk_forward_alloc +
 699				   atomic_read(&newsk->sk_rmem_alloc));
 700		mem_cgroup_sk_alloc(newsk);
 701		if (newsk->sk_memcg && amt)
 702			mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
 703						GFP_KERNEL | __GFP_NOFAIL);
 704
 705		release_sock(newsk);
 706	}
 707	if (req)
 708		reqsk_put(req);
 709	return newsk;
 710out_err:
 711	newsk = NULL;
 712	req = NULL;
 713	*err = error;
 714	goto out;
 715}
 716EXPORT_SYMBOL(inet_csk_accept);
 717
 718/*
 719 * Using different timers for retransmit, delayed acks and probes
 720 * We may wish use just one timer maintaining a list of expire jiffies
 721 * to optimize.
 722 */
 723void inet_csk_init_xmit_timers(struct sock *sk,
 724			       void (*retransmit_handler)(struct timer_list *t),
 725			       void (*delack_handler)(struct timer_list *t),
 726			       void (*keepalive_handler)(struct timer_list *t))
 727{
 728	struct inet_connection_sock *icsk = inet_csk(sk);
 729
 730	timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
 731	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
 732	timer_setup(&sk->sk_timer, keepalive_handler, 0);
 733	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 734}
 735EXPORT_SYMBOL(inet_csk_init_xmit_timers);
 736
 737void inet_csk_clear_xmit_timers(struct sock *sk)
 738{
 739	struct inet_connection_sock *icsk = inet_csk(sk);
 740
 741	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 742
 743	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 744	sk_stop_timer(sk, &icsk->icsk_delack_timer);
 745	sk_stop_timer(sk, &sk->sk_timer);
 746}
 747EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
 748
 749void inet_csk_delete_keepalive_timer(struct sock *sk)
 750{
 751	sk_stop_timer(sk, &sk->sk_timer);
 752}
 753EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
 754
 755void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
 756{
 757	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
 758}
 759EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
 760
 761struct dst_entry *inet_csk_route_req(const struct sock *sk,
 762				     struct flowi4 *fl4,
 763				     const struct request_sock *req)
 764{
 765	const struct inet_request_sock *ireq = inet_rsk(req);
 766	struct net *net = read_pnet(&ireq->ireq_net);
 767	struct ip_options_rcu *opt;
 768	struct rtable *rt;
 769
 770	rcu_read_lock();
 771	opt = rcu_dereference(ireq->ireq_opt);
 772
 773	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 774			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 775			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 776			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 777			   ireq->ir_loc_addr, ireq->ir_rmt_port,
 778			   htons(ireq->ir_num), sk->sk_uid);
 779	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
 780	rt = ip_route_output_flow(net, fl4, sk);
 781	if (IS_ERR(rt))
 782		goto no_route;
 783	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 784		goto route_err;
 785	rcu_read_unlock();
 786	return &rt->dst;
 787
 788route_err:
 789	ip_rt_put(rt);
 790no_route:
 791	rcu_read_unlock();
 792	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 793	return NULL;
 794}
 795EXPORT_SYMBOL_GPL(inet_csk_route_req);
 796
 797struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
 798					    struct sock *newsk,
 799					    const struct request_sock *req)
 800{
 801	const struct inet_request_sock *ireq = inet_rsk(req);
 802	struct net *net = read_pnet(&ireq->ireq_net);
 803	struct inet_sock *newinet = inet_sk(newsk);
 804	struct ip_options_rcu *opt;
 805	struct flowi4 *fl4;
 806	struct rtable *rt;
 807
 808	opt = rcu_dereference(ireq->ireq_opt);
 809	fl4 = &newinet->cork.fl.u.ip4;
 810
 811	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 812			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 813			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 814			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 815			   ireq->ir_loc_addr, ireq->ir_rmt_port,
 816			   htons(ireq->ir_num), sk->sk_uid);
 817	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
 818	rt = ip_route_output_flow(net, fl4, sk);
 819	if (IS_ERR(rt))
 820		goto no_route;
 821	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 822		goto route_err;
 823	return &rt->dst;
 824
 825route_err:
 826	ip_rt_put(rt);
 827no_route:
 828	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 829	return NULL;
 830}
 831EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 832
 
 
 
 
 
 
 833/* Decide when to expire the request and when to resend SYN-ACK */
 834static void syn_ack_recalc(struct request_sock *req,
 835			   const int max_syn_ack_retries,
 836			   const u8 rskq_defer_accept,
 837			   int *expire, int *resend)
 838{
 839	if (!rskq_defer_accept) {
 840		*expire = req->num_timeout >= max_syn_ack_retries;
 841		*resend = 1;
 842		return;
 843	}
 844	*expire = req->num_timeout >= max_syn_ack_retries &&
 845		  (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
 846	/* Do not resend while waiting for data after ACK,
 
 847	 * start to resend on end of deferring period to give
 848	 * last chance for data or ACK to create established socket.
 849	 */
 850	*resend = !inet_rsk(req)->acked ||
 851		  req->num_timeout >= rskq_defer_accept - 1;
 852}
 853
 854int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
 855{
 856	int err = req->rsk_ops->rtx_syn_ack(parent, req);
 857
 858	if (!err)
 859		req->num_retrans++;
 860	return err;
 861}
 862EXPORT_SYMBOL(inet_rtx_syn_ack);
 863
 864static struct request_sock *inet_reqsk_clone(struct request_sock *req,
 865					     struct sock *sk)
 866{
 867	struct sock *req_sk, *nreq_sk;
 868	struct request_sock *nreq;
 869
 870	nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
 871	if (!nreq) {
 872		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
 873
 874		/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
 875		sock_put(sk);
 876		return NULL;
 877	}
 878
 879	req_sk = req_to_sk(req);
 880	nreq_sk = req_to_sk(nreq);
 881
 882	memcpy(nreq_sk, req_sk,
 883	       offsetof(struct sock, sk_dontcopy_begin));
 884	memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
 885	       req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
 886
 887	sk_node_init(&nreq_sk->sk_node);
 888	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
 889#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
 890	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
 891#endif
 892	nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
 893
 894	nreq->rsk_listener = sk;
 895
 896	/* We need not acquire fastopenq->lock
 897	 * because the child socket is locked in inet_csk_listen_stop().
 898	 */
 899	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
 900		rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
 901
 902	return nreq;
 903}
 904
 905static void reqsk_queue_migrated(struct request_sock_queue *queue,
 906				 const struct request_sock *req)
 907{
 908	if (req->num_timeout == 0)
 909		atomic_inc(&queue->young);
 910	atomic_inc(&queue->qlen);
 911}
 912
 913static void reqsk_migrate_reset(struct request_sock *req)
 914{
 915	req->saved_syn = NULL;
 916#if IS_ENABLED(CONFIG_IPV6)
 917	inet_rsk(req)->ipv6_opt = NULL;
 918	inet_rsk(req)->pktopts = NULL;
 919#else
 920	inet_rsk(req)->ireq_opt = NULL;
 921#endif
 922}
 923
 924/* return true if req was found in the ehash table */
 925static bool reqsk_queue_unlink(struct request_sock *req)
 
 926{
 927	struct sock *sk = req_to_sk(req);
 928	bool found = false;
 929
 930	if (sk_hashed(sk)) {
 931		struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
 932		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
 933
 934		spin_lock(lock);
 935		found = __sk_nulls_del_node_init_rcu(sk);
 936		spin_unlock(lock);
 937	}
 938	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
 939		reqsk_put(req);
 940	return found;
 941}
 942
 943bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
 944{
 945	bool unlinked = reqsk_queue_unlink(req);
 946
 947	if (unlinked) {
 948		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 949		reqsk_put(req);
 950	}
 951	return unlinked;
 952}
 953EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 954
 955void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
 956{
 957	inet_csk_reqsk_queue_drop(sk, req);
 958	reqsk_put(req);
 959}
 960EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
 961
 962static void reqsk_timer_handler(struct timer_list *t)
 963{
 964	struct request_sock *req = from_timer(req, t, rsk_timer);
 965	struct request_sock *nreq = NULL, *oreq = req;
 966	struct sock *sk_listener = req->rsk_listener;
 967	struct inet_connection_sock *icsk;
 968	struct request_sock_queue *queue;
 969	struct net *net;
 970	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
 971
 972	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
 973		struct sock *nsk;
 974
 975		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
 976		if (!nsk)
 977			goto drop;
 978
 979		nreq = inet_reqsk_clone(req, nsk);
 980		if (!nreq)
 981			goto drop;
 982
 983		/* The new timer for the cloned req can decrease the 2
 984		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
 985		 * hold another count to prevent use-after-free and
 986		 * call reqsk_put() just before return.
 987		 */
 988		refcount_set(&nreq->rsk_refcnt, 2 + 1);
 989		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
 990		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
 991
 992		req = nreq;
 993		sk_listener = nsk;
 994	}
 995
 996	icsk = inet_csk(sk_listener);
 997	net = sock_net(sk_listener);
 998	max_syn_ack_retries = icsk->icsk_syn_retries ? :
 999		READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
1000	/* Normally all the openreqs are young and become mature
1001	 * (i.e. converted to established socket) for first timeout.
1002	 * If synack was not acknowledged for 1 second, it means
1003	 * one of the following things: synack was lost, ack was lost,
1004	 * rtt is high or nobody planned to ack (i.e. synflood).
1005	 * When server is a bit loaded, queue is populated with old
1006	 * open requests, reducing effective size of queue.
1007	 * When server is well loaded, queue size reduces to zero
1008	 * after several minutes of work. It is not synflood,
1009	 * it is normal operation. The solution is pruning
1010	 * too old entries overriding normal timeout, when
1011	 * situation becomes dangerous.
1012	 *
1013	 * Essentially, we reserve half of room for young
1014	 * embrions; and abort old ones without pity, if old
1015	 * ones are about to clog our table.
1016	 */
1017	queue = &icsk->icsk_accept_queue;
1018	qlen = reqsk_queue_len(queue);
1019	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
1020		int young = reqsk_queue_len_young(queue) << 1;
1021
1022		while (max_syn_ack_retries > 2) {
1023			if (qlen < young)
1024				break;
1025			max_syn_ack_retries--;
1026			young <<= 1;
1027		}
1028	}
1029	syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
 
 
 
1030		       &expire, &resend);
1031	req->rsk_ops->syn_ack_timeout(req);
1032	if (!expire &&
1033	    (!resend ||
1034	     !inet_rtx_syn_ack(sk_listener, req) ||
1035	     inet_rsk(req)->acked)) {
 
 
1036		if (req->num_timeout++ == 0)
1037			atomic_dec(&queue->young);
1038		mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX));
1039
1040		if (!nreq)
1041			return;
1042
1043		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
1044			/* delete timer */
1045			inet_csk_reqsk_queue_drop(sk_listener, nreq);
1046			goto no_ownership;
1047		}
1048
1049		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
1050		reqsk_migrate_reset(oreq);
1051		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
1052		reqsk_put(oreq);
1053
1054		reqsk_put(nreq);
1055		return;
1056	}
1057
1058	/* Even if we can clone the req, we may need not retransmit any more
1059	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
1060	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
1061	 */
1062	if (nreq) {
1063		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
1064no_ownership:
1065		reqsk_migrate_reset(nreq);
1066		reqsk_queue_removed(queue, nreq);
1067		__reqsk_free(nreq);
1068	}
1069
1070drop:
1071	inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
1072}
1073
1074static void reqsk_queue_hash_req(struct request_sock *req,
1075				 unsigned long timeout)
1076{
 
 
 
 
1077	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1078	mod_timer(&req->rsk_timer, jiffies + timeout);
1079
1080	inet_ehash_insert(req_to_sk(req), NULL, NULL);
1081	/* before letting lookups find us, make sure all req fields
1082	 * are committed to memory and refcnt initialized.
1083	 */
1084	smp_wmb();
1085	refcount_set(&req->rsk_refcnt, 2 + 1);
1086}
1087
1088void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
1089				   unsigned long timeout)
1090{
1091	reqsk_queue_hash_req(req, timeout);
1092	inet_csk_reqsk_queue_added(sk);
1093}
1094EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
1095
1096static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1097			   const gfp_t priority)
1098{
1099	struct inet_connection_sock *icsk = inet_csk(newsk);
1100
1101	if (!icsk->icsk_ulp_ops)
1102		return;
1103
1104	if (icsk->icsk_ulp_ops->clone)
1105		icsk->icsk_ulp_ops->clone(req, newsk, priority);
1106}
1107
1108/**
1109 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
1110 *	@sk: the socket to clone
1111 *	@req: request_sock
1112 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1113 *
1114 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1115 */
1116struct sock *inet_csk_clone_lock(const struct sock *sk,
1117				 const struct request_sock *req,
1118				 const gfp_t priority)
1119{
1120	struct sock *newsk = sk_clone_lock(sk, priority);
1121
1122	if (newsk) {
1123		struct inet_connection_sock *newicsk = inet_csk(newsk);
1124
1125		inet_sk_set_state(newsk, TCP_SYN_RECV);
1126		newicsk->icsk_bind_hash = NULL;
1127		newicsk->icsk_bind2_hash = NULL;
1128
1129		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
1130		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
1131		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
1132
1133		/* listeners have SOCK_RCU_FREE, not the children */
1134		sock_reset_flag(newsk, SOCK_RCU_FREE);
1135
1136		inet_sk(newsk)->mc_list = NULL;
1137
1138		newsk->sk_mark = inet_rsk(req)->ir_mark;
1139		atomic64_set(&newsk->sk_cookie,
1140			     atomic64_read(&inet_rsk(req)->ir_cookie));
1141
1142		newicsk->icsk_retransmits = 0;
1143		newicsk->icsk_backoff	  = 0;
1144		newicsk->icsk_probes_out  = 0;
1145		newicsk->icsk_probes_tstamp = 0;
1146
1147		/* Deinitialize accept_queue to trap illegal accesses. */
1148		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
1149
1150		inet_clone_ulp(req, newsk, priority);
1151
1152		security_inet_csk_clone(newsk, req);
1153	}
1154	return newsk;
1155}
1156EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
1157
1158/*
1159 * At this point, there should be no process reference to this
1160 * socket, and thus no user references at all.  Therefore we
1161 * can assume the socket waitqueue is inactive and nobody will
1162 * try to jump onto it.
1163 */
1164void inet_csk_destroy_sock(struct sock *sk)
1165{
1166	WARN_ON(sk->sk_state != TCP_CLOSE);
1167	WARN_ON(!sock_flag(sk, SOCK_DEAD));
1168
1169	/* It cannot be in hash table! */
1170	WARN_ON(!sk_unhashed(sk));
1171
1172	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
1173	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
1174
1175	sk->sk_prot->destroy(sk);
1176
1177	sk_stream_kill_queues(sk);
1178
1179	xfrm_sk_free_policy(sk);
1180
1181	sk_refcnt_debug_release(sk);
1182
1183	this_cpu_dec(*sk->sk_prot->orphan_count);
1184
1185	sock_put(sk);
1186}
1187EXPORT_SYMBOL(inet_csk_destroy_sock);
1188
1189/* This function allows to force a closure of a socket after the call to
1190 * tcp/dccp_create_openreq_child().
1191 */
1192void inet_csk_prepare_forced_close(struct sock *sk)
1193	__releases(&sk->sk_lock.slock)
1194{
1195	/* sk_clone_lock locked the socket and set refcnt to 2 */
1196	bh_unlock_sock(sk);
1197	sock_put(sk);
1198	inet_csk_prepare_for_destroy_sock(sk);
 
 
 
1199	inet_sk(sk)->inet_num = 0;
1200}
1201EXPORT_SYMBOL(inet_csk_prepare_forced_close);
1202
1203static int inet_ulp_can_listen(const struct sock *sk)
1204{
1205	const struct inet_connection_sock *icsk = inet_csk(sk);
1206
1207	if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
1208		return -EINVAL;
1209
1210	return 0;
1211}
1212
1213int inet_csk_listen_start(struct sock *sk)
1214{
1215	struct inet_connection_sock *icsk = inet_csk(sk);
1216	struct inet_sock *inet = inet_sk(sk);
1217	int err;
1218
1219	err = inet_ulp_can_listen(sk);
1220	if (unlikely(err))
1221		return err;
1222
1223	reqsk_queue_alloc(&icsk->icsk_accept_queue);
1224
 
1225	sk->sk_ack_backlog = 0;
1226	inet_csk_delack_init(sk);
1227
1228	/* There is race window here: we announce ourselves listening,
1229	 * but this transition is still not validated by get_port().
1230	 * It is OK, because this socket enters to hash table only
1231	 * after validation is complete.
1232	 */
1233	inet_sk_state_store(sk, TCP_LISTEN);
1234	err = sk->sk_prot->get_port(sk, inet->inet_num);
1235	if (!err) {
1236		inet->inet_sport = htons(inet->inet_num);
1237
1238		sk_dst_reset(sk);
1239		err = sk->sk_prot->hash(sk);
1240
1241		if (likely(!err))
1242			return 0;
1243	}
1244
1245	inet_sk_set_state(sk, TCP_CLOSE);
1246	return err;
1247}
1248EXPORT_SYMBOL_GPL(inet_csk_listen_start);
1249
1250static void inet_child_forget(struct sock *sk, struct request_sock *req,
1251			      struct sock *child)
1252{
1253	sk->sk_prot->disconnect(child, O_NONBLOCK);
1254
1255	sock_orphan(child);
1256
1257	this_cpu_inc(*sk->sk_prot->orphan_count);
1258
1259	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
1260		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
1261		BUG_ON(sk != req->rsk_listener);
1262
1263		/* Paranoid, to prevent race condition if
1264		 * an inbound pkt destined for child is
1265		 * blocked by sock lock in tcp_v4_rcv().
1266		 * Also to satisfy an assertion in
1267		 * tcp_v4_destroy_sock().
1268		 */
1269		RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
1270	}
1271	inet_csk_destroy_sock(child);
1272}
1273
1274struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
1275				      struct request_sock *req,
1276				      struct sock *child)
1277{
1278	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1279
1280	spin_lock(&queue->rskq_lock);
1281	if (unlikely(sk->sk_state != TCP_LISTEN)) {
1282		inet_child_forget(sk, req, child);
1283		child = NULL;
1284	} else {
1285		req->sk = child;
1286		req->dl_next = NULL;
1287		if (queue->rskq_accept_head == NULL)
1288			WRITE_ONCE(queue->rskq_accept_head, req);
1289		else
1290			queue->rskq_accept_tail->dl_next = req;
1291		queue->rskq_accept_tail = req;
1292		sk_acceptq_added(sk);
1293	}
1294	spin_unlock(&queue->rskq_lock);
1295	return child;
1296}
1297EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
1298
1299struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
1300					 struct request_sock *req, bool own_req)
1301{
1302	if (own_req) {
1303		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
1304		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
1305
1306		if (sk != req->rsk_listener) {
1307			/* another listening sk has been selected,
1308			 * migrate the req to it.
1309			 */
1310			struct request_sock *nreq;
1311
1312			/* hold a refcnt for the nreq->rsk_listener
1313			 * which is assigned in inet_reqsk_clone()
1314			 */
1315			sock_hold(sk);
1316			nreq = inet_reqsk_clone(req, sk);
1317			if (!nreq) {
1318				inet_child_forget(sk, req, child);
1319				goto child_put;
1320			}
1321
1322			refcount_set(&nreq->rsk_refcnt, 1);
1323			if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
1324				__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
1325				reqsk_migrate_reset(req);
1326				reqsk_put(req);
1327				return child;
1328			}
1329
1330			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
1331			reqsk_migrate_reset(nreq);
1332			__reqsk_free(nreq);
1333		} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
1334			return child;
1335		}
1336	}
1337	/* Too bad, another child took ownership of the request, undo. */
1338child_put:
1339	bh_unlock_sock(child);
1340	sock_put(child);
1341	return NULL;
1342}
1343EXPORT_SYMBOL(inet_csk_complete_hashdance);
1344
1345/*
1346 *	This routine closes sockets which have been at least partially
1347 *	opened, but not yet accepted.
1348 */
1349void inet_csk_listen_stop(struct sock *sk)
1350{
1351	struct inet_connection_sock *icsk = inet_csk(sk);
1352	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1353	struct request_sock *next, *req;
1354
1355	/* Following specs, it would be better either to send FIN
1356	 * (and enter FIN-WAIT-1, it is normal close)
1357	 * or to send active reset (abort).
1358	 * Certainly, it is pretty dangerous while synflood, but it is
1359	 * bad justification for our negligence 8)
1360	 * To be honest, we are not able to make either
1361	 * of the variants now.			--ANK
1362	 */
1363	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1364		struct sock *child = req->sk, *nsk;
1365		struct request_sock *nreq;
1366
1367		local_bh_disable();
1368		bh_lock_sock(child);
1369		WARN_ON(sock_owned_by_user(child));
1370		sock_hold(child);
1371
1372		nsk = reuseport_migrate_sock(sk, child, NULL);
1373		if (nsk) {
1374			nreq = inet_reqsk_clone(req, nsk);
1375			if (nreq) {
1376				refcount_set(&nreq->rsk_refcnt, 1);
1377
1378				if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
1379					__NET_INC_STATS(sock_net(nsk),
1380							LINUX_MIB_TCPMIGRATEREQSUCCESS);
1381					reqsk_migrate_reset(req);
1382				} else {
1383					__NET_INC_STATS(sock_net(nsk),
1384							LINUX_MIB_TCPMIGRATEREQFAILURE);
1385					reqsk_migrate_reset(nreq);
1386					__reqsk_free(nreq);
1387				}
1388
1389				/* inet_csk_reqsk_queue_add() has already
1390				 * called inet_child_forget() on failure case.
1391				 */
1392				goto skip_child_forget;
1393			}
1394		}
1395
1396		inet_child_forget(sk, req, child);
1397skip_child_forget:
1398		reqsk_put(req);
1399		bh_unlock_sock(child);
1400		local_bh_enable();
1401		sock_put(child);
1402
1403		cond_resched();
1404	}
1405	if (queue->fastopenq.rskq_rst_head) {
1406		/* Free all the reqs queued in rskq_rst_head. */
1407		spin_lock_bh(&queue->fastopenq.lock);
1408		req = queue->fastopenq.rskq_rst_head;
1409		queue->fastopenq.rskq_rst_head = NULL;
1410		spin_unlock_bh(&queue->fastopenq.lock);
1411		while (req != NULL) {
1412			next = req->dl_next;
1413			reqsk_put(req);
1414			req = next;
1415		}
1416	}
1417	WARN_ON_ONCE(sk->sk_ack_backlog);
1418}
1419EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1420
1421void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1422{
1423	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1424	const struct inet_sock *inet = inet_sk(sk);
1425
1426	sin->sin_family		= AF_INET;
1427	sin->sin_addr.s_addr	= inet->inet_daddr;
1428	sin->sin_port		= inet->inet_dport;
1429}
1430EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1432static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1433{
1434	const struct inet_sock *inet = inet_sk(sk);
1435	const struct ip_options_rcu *inet_opt;
1436	__be32 daddr = inet->inet_daddr;
1437	struct flowi4 *fl4;
1438	struct rtable *rt;
1439
1440	rcu_read_lock();
1441	inet_opt = rcu_dereference(inet->inet_opt);
1442	if (inet_opt && inet_opt->opt.srr)
1443		daddr = inet_opt->opt.faddr;
1444	fl4 = &fl->u.ip4;
1445	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1446				   inet->inet_saddr, inet->inet_dport,
1447				   inet->inet_sport, sk->sk_protocol,
1448				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1449	if (IS_ERR(rt))
1450		rt = NULL;
1451	if (rt)
1452		sk_setup_caps(sk, &rt->dst);
1453	rcu_read_unlock();
1454
1455	return &rt->dst;
1456}
1457
1458struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1459{
1460	struct dst_entry *dst = __sk_dst_check(sk, 0);
1461	struct inet_sock *inet = inet_sk(sk);
1462
1463	if (!dst) {
1464		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1465		if (!dst)
1466			goto out;
1467	}
1468	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1469
1470	dst = __sk_dst_check(sk, 0);
1471	if (!dst)
1472		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1473out:
1474	return dst;
1475}
1476EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);