Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Support for INET connection oriented protocols.
   7 *
   8 * Authors:	See the TCP sources
   9 *
  10 *		This program is free software; you can redistribute it and/or
  11 *		modify it under the terms of the GNU General Public License
  12 *		as published by the Free Software Foundation; either version
  13 *		2 of the License, or(at your option) any later version.
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/jhash.h>
  18
  19#include <net/inet_connection_sock.h>
  20#include <net/inet_hashtables.h>
  21#include <net/inet_timewait_sock.h>
  22#include <net/ip.h>
  23#include <net/route.h>
  24#include <net/tcp_states.h>
  25#include <net/xfrm.h>
  26#include <net/tcp.h>
  27#include <net/sock_reuseport.h>
  28#include <net/addrconf.h>
  29
  30#ifdef INET_CSK_DEBUG
  31const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
  32EXPORT_SYMBOL(inet_csk_timer_bug_msg);
  33#endif
  34
  35#if IS_ENABLED(CONFIG_IPV6)
  36/* match_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
  37 *                          only, and any IPv4 addresses if not IPv6 only
  38 * match_wildcard == false: addresses must be exactly the same, i.e.
  39 *                          IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
  40 *                          and 0.0.0.0 equals to 0.0.0.0 only
 
  41 */
  42static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
  43				 const struct in6_addr *sk2_rcv_saddr6,
  44				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
  45				 bool sk1_ipv6only, bool sk2_ipv6only,
  46				 bool match_wildcard)
 
  47{
  48	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
  49	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
  50
  51	/* if both are mapped, treat as IPv4 */
  52	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
  53		if (!sk2_ipv6only) {
  54			if (sk1_rcv_saddr == sk2_rcv_saddr)
  55				return true;
  56			if (!sk1_rcv_saddr || !sk2_rcv_saddr)
  57				return match_wildcard;
  58		}
  59		return false;
  60	}
  61
  62	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
  63		return true;
  64
  65	if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
  66	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
  67		return true;
  68
  69	if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
  70	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
  71		return true;
  72
  73	if (sk2_rcv_saddr6 &&
  74	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
  75		return true;
  76
  77	return false;
  78}
  79#endif
  80
  81/* match_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
  82 * match_wildcard == false: addresses must be exactly the same, i.e.
  83 *                          0.0.0.0 only equals to 0.0.0.0
  84 */
  85static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
  86				 bool sk2_ipv6only, bool match_wildcard)
 
  87{
  88	if (!sk2_ipv6only) {
  89		if (sk1_rcv_saddr == sk2_rcv_saddr)
  90			return true;
  91		if (!sk1_rcv_saddr || !sk2_rcv_saddr)
  92			return match_wildcard;
  93	}
  94	return false;
  95}
  96
  97bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
  98			  bool match_wildcard)
  99{
 100#if IS_ENABLED(CONFIG_IPV6)
 101	if (sk->sk_family == AF_INET6)
 102		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
 103					    inet6_rcv_saddr(sk2),
 104					    sk->sk_rcv_saddr,
 105					    sk2->sk_rcv_saddr,
 106					    ipv6_only_sock(sk),
 107					    ipv6_only_sock(sk2),
 
 108					    match_wildcard);
 109#endif
 110	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
 111				    ipv6_only_sock(sk2), match_wildcard);
 
 112}
 113EXPORT_SYMBOL(inet_rcv_saddr_equal);
 114
 
 
 
 
 
 
 
 
 
 115void inet_get_local_port_range(struct net *net, int *low, int *high)
 116{
 117	unsigned int seq;
 118
 119	do {
 120		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 121
 122		*low = net->ipv4.ip_local_ports.range[0];
 123		*high = net->ipv4.ip_local_ports.range[1];
 124	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 125}
 126EXPORT_SYMBOL(inet_get_local_port_range);
 127
 128static int inet_csk_bind_conflict(const struct sock *sk,
 129				  const struct inet_bind_bucket *tb,
 130				  bool relax, bool reuseport_ok)
 131{
 132	struct sock *sk2;
 133	bool reuse = sk->sk_reuse;
 134	bool reuseport = !!sk->sk_reuseport && reuseport_ok;
 135	kuid_t uid = sock_i_uid((struct sock *)sk);
 136
 137	/*
 138	 * Unlike other sk lookup places we do not check
 139	 * for sk_net here, since _all_ the socks listed
 140	 * in tb->owners list belong to the same net - the
 141	 * one this bucket belongs to.
 142	 */
 143
 144	sk_for_each_bound(sk2, &tb->owners) {
 145		if (sk != sk2 &&
 146		    (!sk->sk_bound_dev_if ||
 147		     !sk2->sk_bound_dev_if ||
 148		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 149			if ((!reuse || !sk2->sk_reuse ||
 150			    sk2->sk_state == TCP_LISTEN) &&
 151			    (!reuseport || !sk2->sk_reuseport ||
 152			     rcu_access_pointer(sk->sk_reuseport_cb) ||
 153			     (sk2->sk_state != TCP_TIME_WAIT &&
 154			     !uid_eq(uid, sock_i_uid(sk2))))) {
 155				if (inet_rcv_saddr_equal(sk, sk2, true))
 156					break;
 157			}
 158			if (!relax && reuse && sk2->sk_reuse &&
 159			    sk2->sk_state != TCP_LISTEN) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 160				if (inet_rcv_saddr_equal(sk, sk2, true))
 161					break;
 162			}
 163		}
 164	}
 165	return sk2 != NULL;
 166}
 167
 168/*
 169 * Find an open port number for the socket.  Returns with the
 170 * inet_bind_hashbucket lock held.
 171 */
 172static struct inet_bind_hashbucket *
 173inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
 174{
 175	struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
 176	int port = 0;
 177	struct inet_bind_hashbucket *head;
 178	struct net *net = sock_net(sk);
 
 179	int i, low, high, attempt_half;
 180	struct inet_bind_bucket *tb;
 181	u32 remaining, offset;
 
 182
 
 
 183	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 184other_half_scan:
 185	inet_get_local_port_range(net, &low, &high);
 186	high++; /* [32768, 60999] -> [32768, 61000[ */
 187	if (high - low < 4)
 188		attempt_half = 0;
 189	if (attempt_half) {
 190		int half = low + (((high - low) >> 2) << 1);
 191
 192		if (attempt_half == 1)
 193			high = half;
 194		else
 195			low = half;
 196	}
 197	remaining = high - low;
 198	if (likely(remaining > 1))
 199		remaining &= ~1U;
 200
 201	offset = prandom_u32() % remaining;
 202	/* __inet_hash_connect() favors ports having @low parity
 203	 * We do the opposite to not pollute connect() users.
 204	 */
 205	offset |= 1U;
 206
 207other_parity_scan:
 208	port = low + offset;
 209	for (i = 0; i < remaining; i += 2, port += 2) {
 210		if (unlikely(port >= high))
 211			port -= remaining;
 212		if (inet_is_local_reserved_port(net, port))
 213			continue;
 214		head = &hinfo->bhash[inet_bhashfn(net, port,
 215						  hinfo->bhash_size)];
 216		spin_lock_bh(&head->lock);
 217		inet_bind_bucket_for_each(tb, &head->chain)
 218			if (net_eq(ib_net(tb), net) && tb->port == port) {
 219				if (!inet_csk_bind_conflict(sk, tb, false, false))
 
 220					goto success;
 221				goto next_port;
 222			}
 223		tb = NULL;
 224		goto success;
 225next_port:
 226		spin_unlock_bh(&head->lock);
 227		cond_resched();
 228	}
 229
 230	offset--;
 231	if (!(offset & 1))
 232		goto other_parity_scan;
 233
 234	if (attempt_half == 1) {
 235		/* OK we now try the upper half of the range */
 236		attempt_half = 2;
 237		goto other_half_scan;
 238	}
 
 
 
 
 
 
 239	return NULL;
 240success:
 241	*port_ret = port;
 242	*tb_ret = tb;
 243	return head;
 244}
 245
 246static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
 247				     struct sock *sk)
 248{
 249	kuid_t uid = sock_i_uid(sk);
 250
 251	if (tb->fastreuseport <= 0)
 252		return 0;
 253	if (!sk->sk_reuseport)
 254		return 0;
 255	if (rcu_access_pointer(sk->sk_reuseport_cb))
 256		return 0;
 257	if (!uid_eq(tb->fastuid, uid))
 258		return 0;
 259	/* We only need to check the rcv_saddr if this tb was once marked
 260	 * without fastreuseport and then was reset, as we can only know that
 261	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
 262	 * owners list.
 263	 */
 264	if (tb->fastreuseport == FASTREUSEPORT_ANY)
 265		return 1;
 266#if IS_ENABLED(CONFIG_IPV6)
 267	if (tb->fast_sk_family == AF_INET6)
 268		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
 269					    inet6_rcv_saddr(sk),
 270					    tb->fast_rcv_saddr,
 271					    sk->sk_rcv_saddr,
 272					    tb->fast_ipv6_only,
 273					    ipv6_only_sock(sk), true);
 274#endif
 275	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
 276				    ipv6_only_sock(sk), true);
 277}
 278
 279/* Obtain a reference to a local port for the given sock,
 280 * if snum is zero it means select any available local port.
 281 * We try to allocate an odd port (and leave even ports for connect())
 282 */
 283int inet_csk_get_port(struct sock *sk, unsigned short snum)
 284{
 285	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
 286	struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
 287	int ret = 1, port = snum;
 288	struct inet_bind_hashbucket *head;
 289	struct net *net = sock_net(sk);
 290	struct inet_bind_bucket *tb = NULL;
 291	kuid_t uid = sock_i_uid(sk);
 
 292
 293	if (!port) {
 294		head = inet_csk_find_open_port(sk, &tb, &port);
 295		if (!head)
 296			return ret;
 297		if (!tb)
 298			goto tb_not_found;
 299		goto success;
 300	}
 301	head = &hinfo->bhash[inet_bhashfn(net, port,
 302					  hinfo->bhash_size)];
 303	spin_lock_bh(&head->lock);
 304	inet_bind_bucket_for_each(tb, &head->chain)
 305		if (net_eq(ib_net(tb), net) && tb->port == port)
 306			goto tb_found;
 307tb_not_found:
 308	tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
 309				     net, head, port);
 310	if (!tb)
 311		goto fail_unlock;
 312tb_found:
 313	if (!hlist_empty(&tb->owners)) {
 314		if (sk->sk_reuse == SK_FORCE_REUSE)
 315			goto success;
 316
 317		if ((tb->fastreuse > 0 && reuse) ||
 318		    sk_reuseport_match(tb, sk))
 319			goto success;
 320		if (inet_csk_bind_conflict(sk, tb, true, true))
 321			goto fail_unlock;
 322	}
 323success:
 324	if (hlist_empty(&tb->owners)) {
 325		tb->fastreuse = reuse;
 326		if (sk->sk_reuseport) {
 327			tb->fastreuseport = FASTREUSEPORT_ANY;
 328			tb->fastuid = uid;
 329			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
 330			tb->fast_ipv6_only = ipv6_only_sock(sk);
 331			tb->fast_sk_family = sk->sk_family;
 332#if IS_ENABLED(CONFIG_IPV6)
 333			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 334#endif
 335		} else {
 336			tb->fastreuseport = 0;
 337		}
 338	} else {
 339		if (!reuse)
 340			tb->fastreuse = 0;
 341		if (sk->sk_reuseport) {
 342			/* We didn't match or we don't have fastreuseport set on
 343			 * the tb, but we have sk_reuseport set on this socket
 344			 * and we know that there are no bind conflicts with
 345			 * this socket in this tb, so reset our tb's reuseport
 346			 * settings so that any subsequent sockets that match
 347			 * our current socket will be put on the fast path.
 348			 *
 349			 * If we reset we need to set FASTREUSEPORT_STRICT so we
 350			 * do extra checking for all subsequent sk_reuseport
 351			 * socks.
 352			 */
 353			if (!sk_reuseport_match(tb, sk)) {
 354				tb->fastreuseport = FASTREUSEPORT_STRICT;
 355				tb->fastuid = uid;
 356				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
 357				tb->fast_ipv6_only = ipv6_only_sock(sk);
 358				tb->fast_sk_family = sk->sk_family;
 359#if IS_ENABLED(CONFIG_IPV6)
 360				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 361#endif
 362			}
 363		} else {
 364			tb->fastreuseport = 0;
 365		}
 366	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367	if (!inet_csk(sk)->icsk_bind_hash)
 368		inet_bind_hash(sk, tb, port);
 369	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
 370	ret = 0;
 371
 372fail_unlock:
 373	spin_unlock_bh(&head->lock);
 374	return ret;
 375}
 376EXPORT_SYMBOL_GPL(inet_csk_get_port);
 377
 378/*
 379 * Wait for an incoming connection, avoid race conditions. This must be called
 380 * with the socket locked.
 381 */
 382static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 383{
 384	struct inet_connection_sock *icsk = inet_csk(sk);
 385	DEFINE_WAIT(wait);
 386	int err;
 387
 388	/*
 389	 * True wake-one mechanism for incoming connections: only
 390	 * one process gets woken up, not the 'whole herd'.
 391	 * Since we do not 'race & poll' for established sockets
 392	 * anymore, the common case will execute the loop only once.
 393	 *
 394	 * Subtle issue: "add_wait_queue_exclusive()" will be added
 395	 * after any current non-exclusive waiters, and we know that
 396	 * it will always _stay_ after any new non-exclusive waiters
 397	 * because all non-exclusive waiters are added at the
 398	 * beginning of the wait-queue. As such, it's ok to "drop"
 399	 * our exclusiveness temporarily when we get woken up without
 400	 * having to remove and re-insert us on the wait queue.
 401	 */
 402	for (;;) {
 403		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
 404					  TASK_INTERRUPTIBLE);
 405		release_sock(sk);
 406		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
 407			timeo = schedule_timeout(timeo);
 408		sched_annotate_sleep();
 409		lock_sock(sk);
 410		err = 0;
 411		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
 412			break;
 413		err = -EINVAL;
 414		if (sk->sk_state != TCP_LISTEN)
 415			break;
 416		err = sock_intr_errno(timeo);
 417		if (signal_pending(current))
 418			break;
 419		err = -EAGAIN;
 420		if (!timeo)
 421			break;
 422	}
 423	finish_wait(sk_sleep(sk), &wait);
 424	return err;
 425}
 426
 427/*
 428 * This will accept the next outstanding connection.
 429 */
 430struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
 431{
 432	struct inet_connection_sock *icsk = inet_csk(sk);
 433	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 434	struct request_sock *req;
 435	struct sock *newsk;
 436	int error;
 437
 438	lock_sock(sk);
 439
 440	/* We need to make sure that this socket is listening,
 441	 * and that it has something pending.
 442	 */
 443	error = -EINVAL;
 444	if (sk->sk_state != TCP_LISTEN)
 445		goto out_err;
 446
 447	/* Find already established connection */
 448	if (reqsk_queue_empty(queue)) {
 449		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 450
 451		/* If this is a non blocking socket don't sleep */
 452		error = -EAGAIN;
 453		if (!timeo)
 454			goto out_err;
 455
 456		error = inet_csk_wait_for_connect(sk, timeo);
 457		if (error)
 458			goto out_err;
 459	}
 460	req = reqsk_queue_remove(queue, sk);
 461	newsk = req->sk;
 462
 463	if (sk->sk_protocol == IPPROTO_TCP &&
 464	    tcp_rsk(req)->tfo_listener) {
 465		spin_lock_bh(&queue->fastopenq.lock);
 466		if (tcp_rsk(req)->tfo_listener) {
 467			/* We are still waiting for the final ACK from 3WHS
 468			 * so can't free req now. Instead, we set req->sk to
 469			 * NULL to signify that the child socket is taken
 470			 * so reqsk_fastopen_remove() will free the req
 471			 * when 3WHS finishes (or is aborted).
 472			 */
 473			req->sk = NULL;
 474			req = NULL;
 475		}
 476		spin_unlock_bh(&queue->fastopenq.lock);
 477	}
 
 478out:
 479	release_sock(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480	if (req)
 481		reqsk_put(req);
 482	return newsk;
 483out_err:
 484	newsk = NULL;
 485	req = NULL;
 486	*err = error;
 487	goto out;
 488}
 489EXPORT_SYMBOL(inet_csk_accept);
 490
 491/*
 492 * Using different timers for retransmit, delayed acks and probes
 493 * We may wish use just one timer maintaining a list of expire jiffies
 494 * to optimize.
 495 */
 496void inet_csk_init_xmit_timers(struct sock *sk,
 497			       void (*retransmit_handler)(struct timer_list *t),
 498			       void (*delack_handler)(struct timer_list *t),
 499			       void (*keepalive_handler)(struct timer_list *t))
 500{
 501	struct inet_connection_sock *icsk = inet_csk(sk);
 502
 503	timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
 504	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
 505	timer_setup(&sk->sk_timer, keepalive_handler, 0);
 506	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 507}
 508EXPORT_SYMBOL(inet_csk_init_xmit_timers);
 509
 510void inet_csk_clear_xmit_timers(struct sock *sk)
 511{
 512	struct inet_connection_sock *icsk = inet_csk(sk);
 513
 514	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
 515
 516	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 517	sk_stop_timer(sk, &icsk->icsk_delack_timer);
 518	sk_stop_timer(sk, &sk->sk_timer);
 519}
 520EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
 521
 522void inet_csk_delete_keepalive_timer(struct sock *sk)
 523{
 524	sk_stop_timer(sk, &sk->sk_timer);
 525}
 526EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
 527
 528void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
 529{
 530	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
 531}
 532EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
 533
 534struct dst_entry *inet_csk_route_req(const struct sock *sk,
 535				     struct flowi4 *fl4,
 536				     const struct request_sock *req)
 537{
 538	const struct inet_request_sock *ireq = inet_rsk(req);
 539	struct net *net = read_pnet(&ireq->ireq_net);
 540	struct ip_options_rcu *opt;
 541	struct rtable *rt;
 542
 543	opt = ireq_opt_deref(ireq);
 
 544
 545	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 546			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 547			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 548			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 549			   ireq->ir_loc_addr, ireq->ir_rmt_port,
 550			   htons(ireq->ir_num), sk->sk_uid);
 551	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 552	rt = ip_route_output_flow(net, fl4, sk);
 553	if (IS_ERR(rt))
 554		goto no_route;
 555	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 556		goto route_err;
 
 557	return &rt->dst;
 558
 559route_err:
 560	ip_rt_put(rt);
 561no_route:
 
 562	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 563	return NULL;
 564}
 565EXPORT_SYMBOL_GPL(inet_csk_route_req);
 566
 567struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
 568					    struct sock *newsk,
 569					    const struct request_sock *req)
 570{
 571	const struct inet_request_sock *ireq = inet_rsk(req);
 572	struct net *net = read_pnet(&ireq->ireq_net);
 573	struct inet_sock *newinet = inet_sk(newsk);
 574	struct ip_options_rcu *opt;
 575	struct flowi4 *fl4;
 576	struct rtable *rt;
 577
 578	opt = rcu_dereference(ireq->ireq_opt);
 579	fl4 = &newinet->cork.fl.u.ip4;
 580
 581	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 582			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 583			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 584			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 585			   ireq->ir_loc_addr, ireq->ir_rmt_port,
 586			   htons(ireq->ir_num), sk->sk_uid);
 587	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 588	rt = ip_route_output_flow(net, fl4, sk);
 589	if (IS_ERR(rt))
 590		goto no_route;
 591	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 592		goto route_err;
 593	return &rt->dst;
 594
 595route_err:
 596	ip_rt_put(rt);
 597no_route:
 598	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 599	return NULL;
 600}
 601EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 602
 603#if IS_ENABLED(CONFIG_IPV6)
 604#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 605#else
 606#define AF_INET_FAMILY(fam) true
 607#endif
 608
 609/* Decide when to expire the request and when to resend SYN-ACK */
 610static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
 611				  const int max_retries,
 612				  const u8 rskq_defer_accept,
 613				  int *expire, int *resend)
 614{
 615	if (!rskq_defer_accept) {
 616		*expire = req->num_timeout >= thresh;
 617		*resend = 1;
 618		return;
 619	}
 620	*expire = req->num_timeout >= thresh &&
 621		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
 622	/*
 623	 * Do not resend while waiting for data after ACK,
 624	 * start to resend on end of deferring period to give
 625	 * last chance for data or ACK to create established socket.
 626	 */
 627	*resend = !inet_rsk(req)->acked ||
 628		  req->num_timeout >= rskq_defer_accept - 1;
 629}
 630
 631int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
 632{
 633	int err = req->rsk_ops->rtx_syn_ack(parent, req);
 634
 635	if (!err)
 636		req->num_retrans++;
 637	return err;
 638}
 639EXPORT_SYMBOL(inet_rtx_syn_ack);
 640
 641/* return true if req was found in the ehash table */
 642static bool reqsk_queue_unlink(struct request_sock_queue *queue,
 643			       struct request_sock *req)
 644{
 645	struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
 646	bool found = false;
 647
 648	if (sk_hashed(req_to_sk(req))) {
 649		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
 650
 651		spin_lock(lock);
 652		found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
 653		spin_unlock(lock);
 654	}
 655	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
 656		reqsk_put(req);
 657	return found;
 658}
 659
 660void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
 661{
 662	if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
 663		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 664		reqsk_put(req);
 665	}
 666}
 667EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 668
 669void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
 670{
 671	inet_csk_reqsk_queue_drop(sk, req);
 672	reqsk_put(req);
 673}
 674EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
 675
 676static void reqsk_timer_handler(struct timer_list *t)
 677{
 678	struct request_sock *req = from_timer(req, t, rsk_timer);
 679	struct sock *sk_listener = req->rsk_listener;
 680	struct net *net = sock_net(sk_listener);
 681	struct inet_connection_sock *icsk = inet_csk(sk_listener);
 682	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 683	int qlen, expire = 0, resend = 0;
 684	int max_retries, thresh;
 685	u8 defer_accept;
 686
 687	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
 688		goto drop;
 689
 690	max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
 691	thresh = max_retries;
 692	/* Normally all the openreqs are young and become mature
 693	 * (i.e. converted to established socket) for first timeout.
 694	 * If synack was not acknowledged for 1 second, it means
 695	 * one of the following things: synack was lost, ack was lost,
 696	 * rtt is high or nobody planned to ack (i.e. synflood).
 697	 * When server is a bit loaded, queue is populated with old
 698	 * open requests, reducing effective size of queue.
 699	 * When server is well loaded, queue size reduces to zero
 700	 * after several minutes of work. It is not synflood,
 701	 * it is normal operation. The solution is pruning
 702	 * too old entries overriding normal timeout, when
 703	 * situation becomes dangerous.
 704	 *
 705	 * Essentially, we reserve half of room for young
 706	 * embrions; and abort old ones without pity, if old
 707	 * ones are about to clog our table.
 708	 */
 709	qlen = reqsk_queue_len(queue);
 710	if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
 711		int young = reqsk_queue_len_young(queue) << 1;
 712
 713		while (thresh > 2) {
 714			if (qlen < young)
 715				break;
 716			thresh--;
 717			young <<= 1;
 718		}
 719	}
 720	defer_accept = READ_ONCE(queue->rskq_defer_accept);
 721	if (defer_accept)
 722		max_retries = defer_accept;
 723	syn_ack_recalc(req, thresh, max_retries, defer_accept,
 724		       &expire, &resend);
 725	req->rsk_ops->syn_ack_timeout(req);
 726	if (!expire &&
 727	    (!resend ||
 728	     !inet_rtx_syn_ack(sk_listener, req) ||
 729	     inet_rsk(req)->acked)) {
 730		unsigned long timeo;
 731
 732		if (req->num_timeout++ == 0)
 733			atomic_dec(&queue->young);
 734		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 735		mod_timer(&req->rsk_timer, jiffies + timeo);
 736		return;
 737	}
 738drop:
 739	inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
 740}
 741
 742static void reqsk_queue_hash_req(struct request_sock *req,
 743				 unsigned long timeout)
 744{
 745	req->num_retrans = 0;
 746	req->num_timeout = 0;
 747	req->sk = NULL;
 748
 749	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
 750	mod_timer(&req->rsk_timer, jiffies + timeout);
 751
 752	inet_ehash_insert(req_to_sk(req), NULL);
 753	/* before letting lookups find us, make sure all req fields
 754	 * are committed to memory and refcnt initialized.
 755	 */
 756	smp_wmb();
 757	refcount_set(&req->rsk_refcnt, 2 + 1);
 758}
 759
 760void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 761				   unsigned long timeout)
 762{
 763	reqsk_queue_hash_req(req, timeout);
 764	inet_csk_reqsk_queue_added(sk);
 765}
 766EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 767
 
 
 
 
 
 
 
 
 
 
 
 
 768/**
 769 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
 770 *	@sk: the socket to clone
 771 *	@req: request_sock
 772 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 773 *
 774 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 775 */
 776struct sock *inet_csk_clone_lock(const struct sock *sk,
 777				 const struct request_sock *req,
 778				 const gfp_t priority)
 779{
 780	struct sock *newsk = sk_clone_lock(sk, priority);
 781
 782	if (newsk) {
 783		struct inet_connection_sock *newicsk = inet_csk(newsk);
 784
 785		inet_sk_set_state(newsk, TCP_SYN_RECV);
 786		newicsk->icsk_bind_hash = NULL;
 787
 788		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
 789		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
 790		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
 791
 792		/* listeners have SOCK_RCU_FREE, not the children */
 793		sock_reset_flag(newsk, SOCK_RCU_FREE);
 794
 795		inet_sk(newsk)->mc_list = NULL;
 796
 797		newsk->sk_mark = inet_rsk(req)->ir_mark;
 798		atomic64_set(&newsk->sk_cookie,
 799			     atomic64_read(&inet_rsk(req)->ir_cookie));
 800
 801		newicsk->icsk_retransmits = 0;
 802		newicsk->icsk_backoff	  = 0;
 803		newicsk->icsk_probes_out  = 0;
 804
 805		/* Deinitialize accept_queue to trap illegal accesses. */
 806		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
 807
 
 
 808		security_inet_csk_clone(newsk, req);
 809	}
 810	return newsk;
 811}
 812EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
 813
 814/*
 815 * At this point, there should be no process reference to this
 816 * socket, and thus no user references at all.  Therefore we
 817 * can assume the socket waitqueue is inactive and nobody will
 818 * try to jump onto it.
 819 */
 820void inet_csk_destroy_sock(struct sock *sk)
 821{
 822	WARN_ON(sk->sk_state != TCP_CLOSE);
 823	WARN_ON(!sock_flag(sk, SOCK_DEAD));
 824
 825	/* It cannot be in hash table! */
 826	WARN_ON(!sk_unhashed(sk));
 827
 828	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
 829	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
 830
 831	sk->sk_prot->destroy(sk);
 832
 833	sk_stream_kill_queues(sk);
 834
 835	xfrm_sk_free_policy(sk);
 836
 837	sk_refcnt_debug_release(sk);
 838
 839	percpu_counter_dec(sk->sk_prot->orphan_count);
 840
 841	sock_put(sk);
 842}
 843EXPORT_SYMBOL(inet_csk_destroy_sock);
 844
 845/* This function allows to force a closure of a socket after the call to
 846 * tcp/dccp_create_openreq_child().
 847 */
 848void inet_csk_prepare_forced_close(struct sock *sk)
 849	__releases(&sk->sk_lock.slock)
 850{
 851	/* sk_clone_lock locked the socket and set refcnt to 2 */
 852	bh_unlock_sock(sk);
 853	sock_put(sk);
 854
 855	/* The below has to be done to allow calling inet_csk_destroy_sock */
 856	sock_set_flag(sk, SOCK_DEAD);
 857	percpu_counter_inc(sk->sk_prot->orphan_count);
 858	inet_sk(sk)->inet_num = 0;
 859}
 860EXPORT_SYMBOL(inet_csk_prepare_forced_close);
 861
 862int inet_csk_listen_start(struct sock *sk, int backlog)
 863{
 864	struct inet_connection_sock *icsk = inet_csk(sk);
 865	struct inet_sock *inet = inet_sk(sk);
 866	int err = -EADDRINUSE;
 867
 868	reqsk_queue_alloc(&icsk->icsk_accept_queue);
 869
 870	sk->sk_max_ack_backlog = backlog;
 871	sk->sk_ack_backlog = 0;
 872	inet_csk_delack_init(sk);
 873
 874	/* There is race window here: we announce ourselves listening,
 875	 * but this transition is still not validated by get_port().
 876	 * It is OK, because this socket enters to hash table only
 877	 * after validation is complete.
 878	 */
 879	inet_sk_state_store(sk, TCP_LISTEN);
 880	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 881		inet->inet_sport = htons(inet->inet_num);
 882
 883		sk_dst_reset(sk);
 884		err = sk->sk_prot->hash(sk);
 885
 886		if (likely(!err))
 887			return 0;
 888	}
 889
 890	inet_sk_set_state(sk, TCP_CLOSE);
 891	return err;
 892}
 893EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 894
 895static void inet_child_forget(struct sock *sk, struct request_sock *req,
 896			      struct sock *child)
 897{
 898	sk->sk_prot->disconnect(child, O_NONBLOCK);
 899
 900	sock_orphan(child);
 901
 902	percpu_counter_inc(sk->sk_prot->orphan_count);
 903
 904	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
 905		BUG_ON(tcp_sk(child)->fastopen_rsk != req);
 906		BUG_ON(sk != req->rsk_listener);
 907
 908		/* Paranoid, to prevent race condition if
 909		 * an inbound pkt destined for child is
 910		 * blocked by sock lock in tcp_v4_rcv().
 911		 * Also to satisfy an assertion in
 912		 * tcp_v4_destroy_sock().
 913		 */
 914		tcp_sk(child)->fastopen_rsk = NULL;
 915	}
 916	inet_csk_destroy_sock(child);
 917}
 918
 919struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
 920				      struct request_sock *req,
 921				      struct sock *child)
 922{
 923	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 924
 925	spin_lock(&queue->rskq_lock);
 926	if (unlikely(sk->sk_state != TCP_LISTEN)) {
 927		inet_child_forget(sk, req, child);
 928		child = NULL;
 929	} else {
 930		req->sk = child;
 931		req->dl_next = NULL;
 932		if (queue->rskq_accept_head == NULL)
 933			queue->rskq_accept_head = req;
 934		else
 935			queue->rskq_accept_tail->dl_next = req;
 936		queue->rskq_accept_tail = req;
 937		sk_acceptq_added(sk);
 938	}
 939	spin_unlock(&queue->rskq_lock);
 940	return child;
 941}
 942EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
 943
 944struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
 945					 struct request_sock *req, bool own_req)
 946{
 947	if (own_req) {
 948		inet_csk_reqsk_queue_drop(sk, req);
 949		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 950		if (inet_csk_reqsk_queue_add(sk, req, child))
 951			return child;
 952	}
 953	/* Too bad, another child took ownership of the request, undo. */
 954	bh_unlock_sock(child);
 955	sock_put(child);
 956	return NULL;
 957}
 958EXPORT_SYMBOL(inet_csk_complete_hashdance);
 959
 960/*
 961 *	This routine closes sockets which have been at least partially
 962 *	opened, but not yet accepted.
 963 */
 964void inet_csk_listen_stop(struct sock *sk)
 965{
 966	struct inet_connection_sock *icsk = inet_csk(sk);
 967	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 968	struct request_sock *next, *req;
 969
 970	/* Following specs, it would be better either to send FIN
 971	 * (and enter FIN-WAIT-1, it is normal close)
 972	 * or to send active reset (abort).
 973	 * Certainly, it is pretty dangerous while synflood, but it is
 974	 * bad justification for our negligence 8)
 975	 * To be honest, we are not able to make either
 976	 * of the variants now.			--ANK
 977	 */
 978	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
 979		struct sock *child = req->sk;
 980
 981		local_bh_disable();
 982		bh_lock_sock(child);
 983		WARN_ON(sock_owned_by_user(child));
 984		sock_hold(child);
 985
 986		inet_child_forget(sk, req, child);
 987		reqsk_put(req);
 988		bh_unlock_sock(child);
 989		local_bh_enable();
 990		sock_put(child);
 991
 992		cond_resched();
 993	}
 994	if (queue->fastopenq.rskq_rst_head) {
 995		/* Free all the reqs queued in rskq_rst_head. */
 996		spin_lock_bh(&queue->fastopenq.lock);
 997		req = queue->fastopenq.rskq_rst_head;
 998		queue->fastopenq.rskq_rst_head = NULL;
 999		spin_unlock_bh(&queue->fastopenq.lock);
1000		while (req != NULL) {
1001			next = req->dl_next;
1002			reqsk_put(req);
1003			req = next;
1004		}
1005	}
1006	WARN_ON_ONCE(sk->sk_ack_backlog);
1007}
1008EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1009
1010void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1011{
1012	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1013	const struct inet_sock *inet = inet_sk(sk);
1014
1015	sin->sin_family		= AF_INET;
1016	sin->sin_addr.s_addr	= inet->inet_daddr;
1017	sin->sin_port		= inet->inet_dport;
1018}
1019EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1020
1021#ifdef CONFIG_COMPAT
1022int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
1023			       char __user *optval, int __user *optlen)
1024{
1025	const struct inet_connection_sock *icsk = inet_csk(sk);
1026
1027	if (icsk->icsk_af_ops->compat_getsockopt)
1028		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
1029							    optval, optlen);
1030	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
1031					     optval, optlen);
1032}
1033EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
1034
1035int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
1036			       char __user *optval, unsigned int optlen)
1037{
1038	const struct inet_connection_sock *icsk = inet_csk(sk);
1039
1040	if (icsk->icsk_af_ops->compat_setsockopt)
1041		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
1042							    optval, optlen);
1043	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1044					     optval, optlen);
1045}
1046EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
1047#endif
1048
1049static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1050{
1051	const struct inet_sock *inet = inet_sk(sk);
1052	const struct ip_options_rcu *inet_opt;
1053	__be32 daddr = inet->inet_daddr;
1054	struct flowi4 *fl4;
1055	struct rtable *rt;
1056
1057	rcu_read_lock();
1058	inet_opt = rcu_dereference(inet->inet_opt);
1059	if (inet_opt && inet_opt->opt.srr)
1060		daddr = inet_opt->opt.faddr;
1061	fl4 = &fl->u.ip4;
1062	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1063				   inet->inet_saddr, inet->inet_dport,
1064				   inet->inet_sport, sk->sk_protocol,
1065				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1066	if (IS_ERR(rt))
1067		rt = NULL;
1068	if (rt)
1069		sk_setup_caps(sk, &rt->dst);
1070	rcu_read_unlock();
1071
1072	return &rt->dst;
1073}
1074
1075struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1076{
1077	struct dst_entry *dst = __sk_dst_check(sk, 0);
1078	struct inet_sock *inet = inet_sk(sk);
1079
1080	if (!dst) {
1081		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1082		if (!dst)
1083			goto out;
1084	}
1085	dst->ops->update_pmtu(dst, sk, NULL, mtu);
1086
1087	dst = __sk_dst_check(sk, 0);
1088	if (!dst)
1089		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1090out:
1091	return dst;
1092}
1093EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Support for INET connection oriented protocols.
   8 *
   9 * Authors:	See the TCP sources
 
 
 
 
 
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/jhash.h>
  14
  15#include <net/inet_connection_sock.h>
  16#include <net/inet_hashtables.h>
  17#include <net/inet_timewait_sock.h>
  18#include <net/ip.h>
  19#include <net/route.h>
  20#include <net/tcp_states.h>
  21#include <net/xfrm.h>
  22#include <net/tcp.h>
  23#include <net/sock_reuseport.h>
  24#include <net/addrconf.h>
  25
 
 
 
 
 
  26#if IS_ENABLED(CONFIG_IPV6)
  27/* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
  28 *				if IPv6 only, and any IPv4 addresses
  29 *				if not IPv6 only
  30 * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
  31 *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
  32 *				and 0.0.0.0 equals to 0.0.0.0 only
  33 */
  34static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
  35				 const struct in6_addr *sk2_rcv_saddr6,
  36				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
  37				 bool sk1_ipv6only, bool sk2_ipv6only,
  38				 bool match_sk1_wildcard,
  39				 bool match_sk2_wildcard)
  40{
  41	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
  42	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
  43
  44	/* if both are mapped, treat as IPv4 */
  45	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
  46		if (!sk2_ipv6only) {
  47			if (sk1_rcv_saddr == sk2_rcv_saddr)
  48				return true;
  49			return (match_sk1_wildcard && !sk1_rcv_saddr) ||
  50				(match_sk2_wildcard && !sk2_rcv_saddr);
  51		}
  52		return false;
  53	}
  54
  55	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
  56		return true;
  57
  58	if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
  59	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
  60		return true;
  61
  62	if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
  63	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
  64		return true;
  65
  66	if (sk2_rcv_saddr6 &&
  67	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
  68		return true;
  69
  70	return false;
  71}
  72#endif
  73
  74/* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
  75 * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
  76 *				0.0.0.0 only equals to 0.0.0.0
  77 */
  78static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
  79				 bool sk2_ipv6only, bool match_sk1_wildcard,
  80				 bool match_sk2_wildcard)
  81{
  82	if (!sk2_ipv6only) {
  83		if (sk1_rcv_saddr == sk2_rcv_saddr)
  84			return true;
  85		return (match_sk1_wildcard && !sk1_rcv_saddr) ||
  86			(match_sk2_wildcard && !sk2_rcv_saddr);
  87	}
  88	return false;
  89}
  90
  91bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
  92			  bool match_wildcard)
  93{
  94#if IS_ENABLED(CONFIG_IPV6)
  95	if (sk->sk_family == AF_INET6)
  96		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
  97					    inet6_rcv_saddr(sk2),
  98					    sk->sk_rcv_saddr,
  99					    sk2->sk_rcv_saddr,
 100					    ipv6_only_sock(sk),
 101					    ipv6_only_sock(sk2),
 102					    match_wildcard,
 103					    match_wildcard);
 104#endif
 105	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
 106				    ipv6_only_sock(sk2), match_wildcard,
 107				    match_wildcard);
 108}
 109EXPORT_SYMBOL(inet_rcv_saddr_equal);
 110
 111bool inet_rcv_saddr_any(const struct sock *sk)
 112{
 113#if IS_ENABLED(CONFIG_IPV6)
 114	if (sk->sk_family == AF_INET6)
 115		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
 116#endif
 117	return !sk->sk_rcv_saddr;
 118}
 119
 120void inet_get_local_port_range(struct net *net, int *low, int *high)
 121{
 122	unsigned int seq;
 123
 124	do {
 125		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 126
 127		*low = net->ipv4.ip_local_ports.range[0];
 128		*high = net->ipv4.ip_local_ports.range[1];
 129	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 130}
 131EXPORT_SYMBOL(inet_get_local_port_range);
 132
 133static int inet_csk_bind_conflict(const struct sock *sk,
 134				  const struct inet_bind_bucket *tb,
 135				  bool relax, bool reuseport_ok)
 136{
 137	struct sock *sk2;
 138	bool reuse = sk->sk_reuse;
 139	bool reuseport = !!sk->sk_reuseport;
 140	kuid_t uid = sock_i_uid((struct sock *)sk);
 141
 142	/*
 143	 * Unlike other sk lookup places we do not check
 144	 * for sk_net here, since _all_ the socks listed
 145	 * in tb->owners list belong to the same net - the
 146	 * one this bucket belongs to.
 147	 */
 148
 149	sk_for_each_bound(sk2, &tb->owners) {
 150		if (sk != sk2 &&
 151		    (!sk->sk_bound_dev_if ||
 152		     !sk2->sk_bound_dev_if ||
 153		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 154			if (reuse && sk2->sk_reuse &&
 
 
 
 
 
 
 
 
 
 155			    sk2->sk_state != TCP_LISTEN) {
 156				if ((!relax ||
 157				     (!reuseport_ok &&
 158				      reuseport && sk2->sk_reuseport &&
 159				      !rcu_access_pointer(sk->sk_reuseport_cb) &&
 160				      (sk2->sk_state == TCP_TIME_WAIT ||
 161				       uid_eq(uid, sock_i_uid(sk2))))) &&
 162				    inet_rcv_saddr_equal(sk, sk2, true))
 163					break;
 164			} else if (!reuseport_ok ||
 165				   !reuseport || !sk2->sk_reuseport ||
 166				   rcu_access_pointer(sk->sk_reuseport_cb) ||
 167				   (sk2->sk_state != TCP_TIME_WAIT &&
 168				    !uid_eq(uid, sock_i_uid(sk2)))) {
 169				if (inet_rcv_saddr_equal(sk, sk2, true))
 170					break;
 171			}
 172		}
 173	}
 174	return sk2 != NULL;
 175}
 176
 177/*
 178 * Find an open port number for the socket.  Returns with the
 179 * inet_bind_hashbucket lock held.
 180 */
 181static struct inet_bind_hashbucket *
 182inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
 183{
 184	struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
 185	int port = 0;
 186	struct inet_bind_hashbucket *head;
 187	struct net *net = sock_net(sk);
 188	bool relax = false;
 189	int i, low, high, attempt_half;
 190	struct inet_bind_bucket *tb;
 191	u32 remaining, offset;
 192	int l3mdev;
 193
 194	l3mdev = inet_sk_bound_l3mdev(sk);
 195ports_exhausted:
 196	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 197other_half_scan:
 198	inet_get_local_port_range(net, &low, &high);
 199	high++; /* [32768, 60999] -> [32768, 61000[ */
 200	if (high - low < 4)
 201		attempt_half = 0;
 202	if (attempt_half) {
 203		int half = low + (((high - low) >> 2) << 1);
 204
 205		if (attempt_half == 1)
 206			high = half;
 207		else
 208			low = half;
 209	}
 210	remaining = high - low;
 211	if (likely(remaining > 1))
 212		remaining &= ~1U;
 213
 214	offset = prandom_u32() % remaining;
 215	/* __inet_hash_connect() favors ports having @low parity
 216	 * We do the opposite to not pollute connect() users.
 217	 */
 218	offset |= 1U;
 219
 220other_parity_scan:
 221	port = low + offset;
 222	for (i = 0; i < remaining; i += 2, port += 2) {
 223		if (unlikely(port >= high))
 224			port -= remaining;
 225		if (inet_is_local_reserved_port(net, port))
 226			continue;
 227		head = &hinfo->bhash[inet_bhashfn(net, port,
 228						  hinfo->bhash_size)];
 229		spin_lock_bh(&head->lock);
 230		inet_bind_bucket_for_each(tb, &head->chain)
 231			if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
 232			    tb->port == port) {
 233				if (!inet_csk_bind_conflict(sk, tb, relax, false))
 234					goto success;
 235				goto next_port;
 236			}
 237		tb = NULL;
 238		goto success;
 239next_port:
 240		spin_unlock_bh(&head->lock);
 241		cond_resched();
 242	}
 243
 244	offset--;
 245	if (!(offset & 1))
 246		goto other_parity_scan;
 247
 248	if (attempt_half == 1) {
 249		/* OK we now try the upper half of the range */
 250		attempt_half = 2;
 251		goto other_half_scan;
 252	}
 253
 254	if (net->ipv4.sysctl_ip_autobind_reuse && !relax) {
 255		/* We still have a chance to connect to different destinations */
 256		relax = true;
 257		goto ports_exhausted;
 258	}
 259	return NULL;
 260success:
 261	*port_ret = port;
 262	*tb_ret = tb;
 263	return head;
 264}
 265
 266static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
 267				     struct sock *sk)
 268{
 269	kuid_t uid = sock_i_uid(sk);
 270
 271	if (tb->fastreuseport <= 0)
 272		return 0;
 273	if (!sk->sk_reuseport)
 274		return 0;
 275	if (rcu_access_pointer(sk->sk_reuseport_cb))
 276		return 0;
 277	if (!uid_eq(tb->fastuid, uid))
 278		return 0;
 279	/* We only need to check the rcv_saddr if this tb was once marked
 280	 * without fastreuseport and then was reset, as we can only know that
 281	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
 282	 * owners list.
 283	 */
 284	if (tb->fastreuseport == FASTREUSEPORT_ANY)
 285		return 1;
 286#if IS_ENABLED(CONFIG_IPV6)
 287	if (tb->fast_sk_family == AF_INET6)
 288		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
 289					    inet6_rcv_saddr(sk),
 290					    tb->fast_rcv_saddr,
 291					    sk->sk_rcv_saddr,
 292					    tb->fast_ipv6_only,
 293					    ipv6_only_sock(sk), true, false);
 294#endif
 295	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
 296				    ipv6_only_sock(sk), true, false);
 297}
 298
 299void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
 300			       struct sock *sk)
 
 
 
 301{
 
 
 
 
 
 
 302	kuid_t uid = sock_i_uid(sk);
 303	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
 304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305	if (hlist_empty(&tb->owners)) {
 306		tb->fastreuse = reuse;
 307		if (sk->sk_reuseport) {
 308			tb->fastreuseport = FASTREUSEPORT_ANY;
 309			tb->fastuid = uid;
 310			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
 311			tb->fast_ipv6_only = ipv6_only_sock(sk);
 312			tb->fast_sk_family = sk->sk_family;
 313#if IS_ENABLED(CONFIG_IPV6)
 314			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 315#endif
 316		} else {
 317			tb->fastreuseport = 0;
 318		}
 319	} else {
 320		if (!reuse)
 321			tb->fastreuse = 0;
 322		if (sk->sk_reuseport) {
 323			/* We didn't match or we don't have fastreuseport set on
 324			 * the tb, but we have sk_reuseport set on this socket
 325			 * and we know that there are no bind conflicts with
 326			 * this socket in this tb, so reset our tb's reuseport
 327			 * settings so that any subsequent sockets that match
 328			 * our current socket will be put on the fast path.
 329			 *
 330			 * If we reset we need to set FASTREUSEPORT_STRICT so we
 331			 * do extra checking for all subsequent sk_reuseport
 332			 * socks.
 333			 */
 334			if (!sk_reuseport_match(tb, sk)) {
 335				tb->fastreuseport = FASTREUSEPORT_STRICT;
 336				tb->fastuid = uid;
 337				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
 338				tb->fast_ipv6_only = ipv6_only_sock(sk);
 339				tb->fast_sk_family = sk->sk_family;
 340#if IS_ENABLED(CONFIG_IPV6)
 341				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 342#endif
 343			}
 344		} else {
 345			tb->fastreuseport = 0;
 346		}
 347	}
 348}
 349
 350/* Obtain a reference to a local port for the given sock,
 351 * if snum is zero it means select any available local port.
 352 * We try to allocate an odd port (and leave even ports for connect())
 353 */
 354int inet_csk_get_port(struct sock *sk, unsigned short snum)
 355{
 356	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
 357	struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
 358	int ret = 1, port = snum;
 359	struct inet_bind_hashbucket *head;
 360	struct net *net = sock_net(sk);
 361	struct inet_bind_bucket *tb = NULL;
 362	int l3mdev;
 363
 364	l3mdev = inet_sk_bound_l3mdev(sk);
 365
 366	if (!port) {
 367		head = inet_csk_find_open_port(sk, &tb, &port);
 368		if (!head)
 369			return ret;
 370		if (!tb)
 371			goto tb_not_found;
 372		goto success;
 373	}
 374	head = &hinfo->bhash[inet_bhashfn(net, port,
 375					  hinfo->bhash_size)];
 376	spin_lock_bh(&head->lock);
 377	inet_bind_bucket_for_each(tb, &head->chain)
 378		if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
 379		    tb->port == port)
 380			goto tb_found;
 381tb_not_found:
 382	tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
 383				     net, head, port, l3mdev);
 384	if (!tb)
 385		goto fail_unlock;
 386tb_found:
 387	if (!hlist_empty(&tb->owners)) {
 388		if (sk->sk_reuse == SK_FORCE_REUSE)
 389			goto success;
 390
 391		if ((tb->fastreuse > 0 && reuse) ||
 392		    sk_reuseport_match(tb, sk))
 393			goto success;
 394		if (inet_csk_bind_conflict(sk, tb, true, true))
 395			goto fail_unlock;
 396	}
 397success:
 398	inet_csk_update_fastreuse(tb, sk);
 399
 400	if (!inet_csk(sk)->icsk_bind_hash)
 401		inet_bind_hash(sk, tb, port);
 402	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
 403	ret = 0;
 404
 405fail_unlock:
 406	spin_unlock_bh(&head->lock);
 407	return ret;
 408}
 409EXPORT_SYMBOL_GPL(inet_csk_get_port);
 410
 411/*
 412 * Wait for an incoming connection, avoid race conditions. This must be called
 413 * with the socket locked.
 414 */
 415static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 416{
 417	struct inet_connection_sock *icsk = inet_csk(sk);
 418	DEFINE_WAIT(wait);
 419	int err;
 420
 421	/*
 422	 * True wake-one mechanism for incoming connections: only
 423	 * one process gets woken up, not the 'whole herd'.
 424	 * Since we do not 'race & poll' for established sockets
 425	 * anymore, the common case will execute the loop only once.
 426	 *
 427	 * Subtle issue: "add_wait_queue_exclusive()" will be added
 428	 * after any current non-exclusive waiters, and we know that
 429	 * it will always _stay_ after any new non-exclusive waiters
 430	 * because all non-exclusive waiters are added at the
 431	 * beginning of the wait-queue. As such, it's ok to "drop"
 432	 * our exclusiveness temporarily when we get woken up without
 433	 * having to remove and re-insert us on the wait queue.
 434	 */
 435	for (;;) {
 436		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
 437					  TASK_INTERRUPTIBLE);
 438		release_sock(sk);
 439		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
 440			timeo = schedule_timeout(timeo);
 441		sched_annotate_sleep();
 442		lock_sock(sk);
 443		err = 0;
 444		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
 445			break;
 446		err = -EINVAL;
 447		if (sk->sk_state != TCP_LISTEN)
 448			break;
 449		err = sock_intr_errno(timeo);
 450		if (signal_pending(current))
 451			break;
 452		err = -EAGAIN;
 453		if (!timeo)
 454			break;
 455	}
 456	finish_wait(sk_sleep(sk), &wait);
 457	return err;
 458}
 459
 460/*
 461 * This will accept the next outstanding connection.
 462 */
 463struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
 464{
 465	struct inet_connection_sock *icsk = inet_csk(sk);
 466	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 467	struct request_sock *req;
 468	struct sock *newsk;
 469	int error;
 470
 471	lock_sock(sk);
 472
 473	/* We need to make sure that this socket is listening,
 474	 * and that it has something pending.
 475	 */
 476	error = -EINVAL;
 477	if (sk->sk_state != TCP_LISTEN)
 478		goto out_err;
 479
 480	/* Find already established connection */
 481	if (reqsk_queue_empty(queue)) {
 482		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 483
 484		/* If this is a non blocking socket don't sleep */
 485		error = -EAGAIN;
 486		if (!timeo)
 487			goto out_err;
 488
 489		error = inet_csk_wait_for_connect(sk, timeo);
 490		if (error)
 491			goto out_err;
 492	}
 493	req = reqsk_queue_remove(queue, sk);
 494	newsk = req->sk;
 495
 496	if (sk->sk_protocol == IPPROTO_TCP &&
 497	    tcp_rsk(req)->tfo_listener) {
 498		spin_lock_bh(&queue->fastopenq.lock);
 499		if (tcp_rsk(req)->tfo_listener) {
 500			/* We are still waiting for the final ACK from 3WHS
 501			 * so can't free req now. Instead, we set req->sk to
 502			 * NULL to signify that the child socket is taken
 503			 * so reqsk_fastopen_remove() will free the req
 504			 * when 3WHS finishes (or is aborted).
 505			 */
 506			req->sk = NULL;
 507			req = NULL;
 508		}
 509		spin_unlock_bh(&queue->fastopenq.lock);
 510	}
 511
 512out:
 513	release_sock(sk);
 514	if (newsk && mem_cgroup_sockets_enabled) {
 515		int amt;
 516
 517		/* atomically get the memory usage, set and charge the
 518		 * newsk->sk_memcg.
 519		 */
 520		lock_sock(newsk);
 521
 522		/* The socket has not been accepted yet, no need to look at
 523		 * newsk->sk_wmem_queued.
 524		 */
 525		amt = sk_mem_pages(newsk->sk_forward_alloc +
 526				   atomic_read(&newsk->sk_rmem_alloc));
 527		mem_cgroup_sk_alloc(newsk);
 528		if (newsk->sk_memcg && amt)
 529			mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
 530
 531		release_sock(newsk);
 532	}
 533	if (req)
 534		reqsk_put(req);
 535	return newsk;
 536out_err:
 537	newsk = NULL;
 538	req = NULL;
 539	*err = error;
 540	goto out;
 541}
 542EXPORT_SYMBOL(inet_csk_accept);
 543
 544/*
 545 * Using different timers for retransmit, delayed acks and probes
 546 * We may wish use just one timer maintaining a list of expire jiffies
 547 * to optimize.
 548 */
 549void inet_csk_init_xmit_timers(struct sock *sk,
 550			       void (*retransmit_handler)(struct timer_list *t),
 551			       void (*delack_handler)(struct timer_list *t),
 552			       void (*keepalive_handler)(struct timer_list *t))
 553{
 554	struct inet_connection_sock *icsk = inet_csk(sk);
 555
 556	timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
 557	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
 558	timer_setup(&sk->sk_timer, keepalive_handler, 0);
 559	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 560}
 561EXPORT_SYMBOL(inet_csk_init_xmit_timers);
 562
 563void inet_csk_clear_xmit_timers(struct sock *sk)
 564{
 565	struct inet_connection_sock *icsk = inet_csk(sk);
 566
 567	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
 568
 569	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 570	sk_stop_timer(sk, &icsk->icsk_delack_timer);
 571	sk_stop_timer(sk, &sk->sk_timer);
 572}
 573EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
 574
 575void inet_csk_delete_keepalive_timer(struct sock *sk)
 576{
 577	sk_stop_timer(sk, &sk->sk_timer);
 578}
 579EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
 580
 581void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
 582{
 583	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
 584}
 585EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
 586
 587struct dst_entry *inet_csk_route_req(const struct sock *sk,
 588				     struct flowi4 *fl4,
 589				     const struct request_sock *req)
 590{
 591	const struct inet_request_sock *ireq = inet_rsk(req);
 592	struct net *net = read_pnet(&ireq->ireq_net);
 593	struct ip_options_rcu *opt;
 594	struct rtable *rt;
 595
 596	rcu_read_lock();
 597	opt = rcu_dereference(ireq->ireq_opt);
 598
 599	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 600			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 601			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 602			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 603			   ireq->ir_loc_addr, ireq->ir_rmt_port,
 604			   htons(ireq->ir_num), sk->sk_uid);
 605	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 606	rt = ip_route_output_flow(net, fl4, sk);
 607	if (IS_ERR(rt))
 608		goto no_route;
 609	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 610		goto route_err;
 611	rcu_read_unlock();
 612	return &rt->dst;
 613
 614route_err:
 615	ip_rt_put(rt);
 616no_route:
 617	rcu_read_unlock();
 618	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 619	return NULL;
 620}
 621EXPORT_SYMBOL_GPL(inet_csk_route_req);
 622
 623struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
 624					    struct sock *newsk,
 625					    const struct request_sock *req)
 626{
 627	const struct inet_request_sock *ireq = inet_rsk(req);
 628	struct net *net = read_pnet(&ireq->ireq_net);
 629	struct inet_sock *newinet = inet_sk(newsk);
 630	struct ip_options_rcu *opt;
 631	struct flowi4 *fl4;
 632	struct rtable *rt;
 633
 634	opt = rcu_dereference(ireq->ireq_opt);
 635	fl4 = &newinet->cork.fl.u.ip4;
 636
 637	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 638			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 639			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 640			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 641			   ireq->ir_loc_addr, ireq->ir_rmt_port,
 642			   htons(ireq->ir_num), sk->sk_uid);
 643	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 644	rt = ip_route_output_flow(net, fl4, sk);
 645	if (IS_ERR(rt))
 646		goto no_route;
 647	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 648		goto route_err;
 649	return &rt->dst;
 650
 651route_err:
 652	ip_rt_put(rt);
 653no_route:
 654	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 655	return NULL;
 656}
 657EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 658
 
 
 
 
 
 
 659/* Decide when to expire the request and when to resend SYN-ACK */
 660static void syn_ack_recalc(struct request_sock *req,
 661			   const int max_syn_ack_retries,
 662			   const u8 rskq_defer_accept,
 663			   int *expire, int *resend)
 664{
 665	if (!rskq_defer_accept) {
 666		*expire = req->num_timeout >= max_syn_ack_retries;
 667		*resend = 1;
 668		return;
 669	}
 670	*expire = req->num_timeout >= max_syn_ack_retries &&
 671		  (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
 672	/* Do not resend while waiting for data after ACK,
 
 673	 * start to resend on end of deferring period to give
 674	 * last chance for data or ACK to create established socket.
 675	 */
 676	*resend = !inet_rsk(req)->acked ||
 677		  req->num_timeout >= rskq_defer_accept - 1;
 678}
 679
 680int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
 681{
 682	int err = req->rsk_ops->rtx_syn_ack(parent, req);
 683
 684	if (!err)
 685		req->num_retrans++;
 686	return err;
 687}
 688EXPORT_SYMBOL(inet_rtx_syn_ack);
 689
 690/* return true if req was found in the ehash table */
 691static bool reqsk_queue_unlink(struct request_sock *req)
 
 692{
 693	struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
 694	bool found = false;
 695
 696	if (sk_hashed(req_to_sk(req))) {
 697		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
 698
 699		spin_lock(lock);
 700		found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
 701		spin_unlock(lock);
 702	}
 703	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
 704		reqsk_put(req);
 705	return found;
 706}
 707
 708void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
 709{
 710	if (reqsk_queue_unlink(req)) {
 711		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 712		reqsk_put(req);
 713	}
 714}
 715EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 716
 717void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
 718{
 719	inet_csk_reqsk_queue_drop(sk, req);
 720	reqsk_put(req);
 721}
 722EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
 723
 724static void reqsk_timer_handler(struct timer_list *t)
 725{
 726	struct request_sock *req = from_timer(req, t, rsk_timer);
 727	struct sock *sk_listener = req->rsk_listener;
 728	struct net *net = sock_net(sk_listener);
 729	struct inet_connection_sock *icsk = inet_csk(sk_listener);
 730	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 731	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
 
 
 732
 733	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
 734		goto drop;
 735
 736	max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
 
 737	/* Normally all the openreqs are young and become mature
 738	 * (i.e. converted to established socket) for first timeout.
 739	 * If synack was not acknowledged for 1 second, it means
 740	 * one of the following things: synack was lost, ack was lost,
 741	 * rtt is high or nobody planned to ack (i.e. synflood).
 742	 * When server is a bit loaded, queue is populated with old
 743	 * open requests, reducing effective size of queue.
 744	 * When server is well loaded, queue size reduces to zero
 745	 * after several minutes of work. It is not synflood,
 746	 * it is normal operation. The solution is pruning
 747	 * too old entries overriding normal timeout, when
 748	 * situation becomes dangerous.
 749	 *
 750	 * Essentially, we reserve half of room for young
 751	 * embrions; and abort old ones without pity, if old
 752	 * ones are about to clog our table.
 753	 */
 754	qlen = reqsk_queue_len(queue);
 755	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
 756		int young = reqsk_queue_len_young(queue) << 1;
 757
 758		while (max_syn_ack_retries > 2) {
 759			if (qlen < young)
 760				break;
 761			max_syn_ack_retries--;
 762			young <<= 1;
 763		}
 764	}
 765	syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
 
 
 
 766		       &expire, &resend);
 767	req->rsk_ops->syn_ack_timeout(req);
 768	if (!expire &&
 769	    (!resend ||
 770	     !inet_rtx_syn_ack(sk_listener, req) ||
 771	     inet_rsk(req)->acked)) {
 772		unsigned long timeo;
 773
 774		if (req->num_timeout++ == 0)
 775			atomic_dec(&queue->young);
 776		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 777		mod_timer(&req->rsk_timer, jiffies + timeo);
 778		return;
 779	}
 780drop:
 781	inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
 782}
 783
 784static void reqsk_queue_hash_req(struct request_sock *req,
 785				 unsigned long timeout)
 786{
 
 
 
 
 787	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
 788	mod_timer(&req->rsk_timer, jiffies + timeout);
 789
 790	inet_ehash_insert(req_to_sk(req), NULL);
 791	/* before letting lookups find us, make sure all req fields
 792	 * are committed to memory and refcnt initialized.
 793	 */
 794	smp_wmb();
 795	refcount_set(&req->rsk_refcnt, 2 + 1);
 796}
 797
 798void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 799				   unsigned long timeout)
 800{
 801	reqsk_queue_hash_req(req, timeout);
 802	inet_csk_reqsk_queue_added(sk);
 803}
 804EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 805
 806static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
 807			   const gfp_t priority)
 808{
 809	struct inet_connection_sock *icsk = inet_csk(newsk);
 810
 811	if (!icsk->icsk_ulp_ops)
 812		return;
 813
 814	if (icsk->icsk_ulp_ops->clone)
 815		icsk->icsk_ulp_ops->clone(req, newsk, priority);
 816}
 817
 818/**
 819 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
 820 *	@sk: the socket to clone
 821 *	@req: request_sock
 822 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 823 *
 824 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 825 */
 826struct sock *inet_csk_clone_lock(const struct sock *sk,
 827				 const struct request_sock *req,
 828				 const gfp_t priority)
 829{
 830	struct sock *newsk = sk_clone_lock(sk, priority);
 831
 832	if (newsk) {
 833		struct inet_connection_sock *newicsk = inet_csk(newsk);
 834
 835		inet_sk_set_state(newsk, TCP_SYN_RECV);
 836		newicsk->icsk_bind_hash = NULL;
 837
 838		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
 839		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
 840		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
 841
 842		/* listeners have SOCK_RCU_FREE, not the children */
 843		sock_reset_flag(newsk, SOCK_RCU_FREE);
 844
 845		inet_sk(newsk)->mc_list = NULL;
 846
 847		newsk->sk_mark = inet_rsk(req)->ir_mark;
 848		atomic64_set(&newsk->sk_cookie,
 849			     atomic64_read(&inet_rsk(req)->ir_cookie));
 850
 851		newicsk->icsk_retransmits = 0;
 852		newicsk->icsk_backoff	  = 0;
 853		newicsk->icsk_probes_out  = 0;
 854
 855		/* Deinitialize accept_queue to trap illegal accesses. */
 856		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
 857
 858		inet_clone_ulp(req, newsk, priority);
 859
 860		security_inet_csk_clone(newsk, req);
 861	}
 862	return newsk;
 863}
 864EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
 865
 866/*
 867 * At this point, there should be no process reference to this
 868 * socket, and thus no user references at all.  Therefore we
 869 * can assume the socket waitqueue is inactive and nobody will
 870 * try to jump onto it.
 871 */
 872void inet_csk_destroy_sock(struct sock *sk)
 873{
 874	WARN_ON(sk->sk_state != TCP_CLOSE);
 875	WARN_ON(!sock_flag(sk, SOCK_DEAD));
 876
 877	/* It cannot be in hash table! */
 878	WARN_ON(!sk_unhashed(sk));
 879
 880	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
 881	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
 882
 883	sk->sk_prot->destroy(sk);
 884
 885	sk_stream_kill_queues(sk);
 886
 887	xfrm_sk_free_policy(sk);
 888
 889	sk_refcnt_debug_release(sk);
 890
 891	percpu_counter_dec(sk->sk_prot->orphan_count);
 892
 893	sock_put(sk);
 894}
 895EXPORT_SYMBOL(inet_csk_destroy_sock);
 896
 897/* This function allows to force a closure of a socket after the call to
 898 * tcp/dccp_create_openreq_child().
 899 */
 900void inet_csk_prepare_forced_close(struct sock *sk)
 901	__releases(&sk->sk_lock.slock)
 902{
 903	/* sk_clone_lock locked the socket and set refcnt to 2 */
 904	bh_unlock_sock(sk);
 905	sock_put(sk);
 906	inet_csk_prepare_for_destroy_sock(sk);
 
 
 
 907	inet_sk(sk)->inet_num = 0;
 908}
 909EXPORT_SYMBOL(inet_csk_prepare_forced_close);
 910
 911int inet_csk_listen_start(struct sock *sk, int backlog)
 912{
 913	struct inet_connection_sock *icsk = inet_csk(sk);
 914	struct inet_sock *inet = inet_sk(sk);
 915	int err = -EADDRINUSE;
 916
 917	reqsk_queue_alloc(&icsk->icsk_accept_queue);
 918
 
 919	sk->sk_ack_backlog = 0;
 920	inet_csk_delack_init(sk);
 921
 922	/* There is race window here: we announce ourselves listening,
 923	 * but this transition is still not validated by get_port().
 924	 * It is OK, because this socket enters to hash table only
 925	 * after validation is complete.
 926	 */
 927	inet_sk_state_store(sk, TCP_LISTEN);
 928	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 929		inet->inet_sport = htons(inet->inet_num);
 930
 931		sk_dst_reset(sk);
 932		err = sk->sk_prot->hash(sk);
 933
 934		if (likely(!err))
 935			return 0;
 936	}
 937
 938	inet_sk_set_state(sk, TCP_CLOSE);
 939	return err;
 940}
 941EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 942
 943static void inet_child_forget(struct sock *sk, struct request_sock *req,
 944			      struct sock *child)
 945{
 946	sk->sk_prot->disconnect(child, O_NONBLOCK);
 947
 948	sock_orphan(child);
 949
 950	percpu_counter_inc(sk->sk_prot->orphan_count);
 951
 952	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
 953		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
 954		BUG_ON(sk != req->rsk_listener);
 955
 956		/* Paranoid, to prevent race condition if
 957		 * an inbound pkt destined for child is
 958		 * blocked by sock lock in tcp_v4_rcv().
 959		 * Also to satisfy an assertion in
 960		 * tcp_v4_destroy_sock().
 961		 */
 962		RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
 963	}
 964	inet_csk_destroy_sock(child);
 965}
 966
 967struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
 968				      struct request_sock *req,
 969				      struct sock *child)
 970{
 971	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 972
 973	spin_lock(&queue->rskq_lock);
 974	if (unlikely(sk->sk_state != TCP_LISTEN)) {
 975		inet_child_forget(sk, req, child);
 976		child = NULL;
 977	} else {
 978		req->sk = child;
 979		req->dl_next = NULL;
 980		if (queue->rskq_accept_head == NULL)
 981			WRITE_ONCE(queue->rskq_accept_head, req);
 982		else
 983			queue->rskq_accept_tail->dl_next = req;
 984		queue->rskq_accept_tail = req;
 985		sk_acceptq_added(sk);
 986	}
 987	spin_unlock(&queue->rskq_lock);
 988	return child;
 989}
 990EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
 991
 992struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
 993					 struct request_sock *req, bool own_req)
 994{
 995	if (own_req) {
 996		inet_csk_reqsk_queue_drop(sk, req);
 997		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 998		if (inet_csk_reqsk_queue_add(sk, req, child))
 999			return child;
1000	}
1001	/* Too bad, another child took ownership of the request, undo. */
1002	bh_unlock_sock(child);
1003	sock_put(child);
1004	return NULL;
1005}
1006EXPORT_SYMBOL(inet_csk_complete_hashdance);
1007
1008/*
1009 *	This routine closes sockets which have been at least partially
1010 *	opened, but not yet accepted.
1011 */
1012void inet_csk_listen_stop(struct sock *sk)
1013{
1014	struct inet_connection_sock *icsk = inet_csk(sk);
1015	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1016	struct request_sock *next, *req;
1017
1018	/* Following specs, it would be better either to send FIN
1019	 * (and enter FIN-WAIT-1, it is normal close)
1020	 * or to send active reset (abort).
1021	 * Certainly, it is pretty dangerous while synflood, but it is
1022	 * bad justification for our negligence 8)
1023	 * To be honest, we are not able to make either
1024	 * of the variants now.			--ANK
1025	 */
1026	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1027		struct sock *child = req->sk;
1028
1029		local_bh_disable();
1030		bh_lock_sock(child);
1031		WARN_ON(sock_owned_by_user(child));
1032		sock_hold(child);
1033
1034		inet_child_forget(sk, req, child);
1035		reqsk_put(req);
1036		bh_unlock_sock(child);
1037		local_bh_enable();
1038		sock_put(child);
1039
1040		cond_resched();
1041	}
1042	if (queue->fastopenq.rskq_rst_head) {
1043		/* Free all the reqs queued in rskq_rst_head. */
1044		spin_lock_bh(&queue->fastopenq.lock);
1045		req = queue->fastopenq.rskq_rst_head;
1046		queue->fastopenq.rskq_rst_head = NULL;
1047		spin_unlock_bh(&queue->fastopenq.lock);
1048		while (req != NULL) {
1049			next = req->dl_next;
1050			reqsk_put(req);
1051			req = next;
1052		}
1053	}
1054	WARN_ON_ONCE(sk->sk_ack_backlog);
1055}
1056EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1057
1058void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1059{
1060	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1061	const struct inet_sock *inet = inet_sk(sk);
1062
1063	sin->sin_family		= AF_INET;
1064	sin->sin_addr.s_addr	= inet->inet_daddr;
1065	sin->sin_port		= inet->inet_dport;
1066}
1067EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1068
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1069static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1070{
1071	const struct inet_sock *inet = inet_sk(sk);
1072	const struct ip_options_rcu *inet_opt;
1073	__be32 daddr = inet->inet_daddr;
1074	struct flowi4 *fl4;
1075	struct rtable *rt;
1076
1077	rcu_read_lock();
1078	inet_opt = rcu_dereference(inet->inet_opt);
1079	if (inet_opt && inet_opt->opt.srr)
1080		daddr = inet_opt->opt.faddr;
1081	fl4 = &fl->u.ip4;
1082	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1083				   inet->inet_saddr, inet->inet_dport,
1084				   inet->inet_sport, sk->sk_protocol,
1085				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1086	if (IS_ERR(rt))
1087		rt = NULL;
1088	if (rt)
1089		sk_setup_caps(sk, &rt->dst);
1090	rcu_read_unlock();
1091
1092	return &rt->dst;
1093}
1094
1095struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1096{
1097	struct dst_entry *dst = __sk_dst_check(sk, 0);
1098	struct inet_sock *inet = inet_sk(sk);
1099
1100	if (!dst) {
1101		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1102		if (!dst)
1103			goto out;
1104	}
1105	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1106
1107	dst = __sk_dst_check(sk, 0);
1108	if (!dst)
1109		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1110out:
1111	return dst;
1112}
1113EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);