Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/rcupdate.h>
   3#include <linux/spinlock.h>
   4#include <linux/jiffies.h>
   5#include <linux/module.h>
   6#include <linux/cache.h>
   7#include <linux/slab.h>
   8#include <linux/init.h>
   9#include <linux/tcp.h>
  10#include <linux/hash.h>
  11#include <linux/tcp_metrics.h>
  12#include <linux/vmalloc.h>
  13
  14#include <net/inet_connection_sock.h>
  15#include <net/net_namespace.h>
  16#include <net/request_sock.h>
  17#include <net/inetpeer.h>
  18#include <net/sock.h>
  19#include <net/ipv6.h>
  20#include <net/dst.h>
  21#include <net/tcp.h>
  22#include <net/genetlink.h>
  23
 
 
  24static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  25						   const struct inetpeer_addr *daddr,
  26						   struct net *net, unsigned int hash);
  27
  28struct tcp_fastopen_metrics {
  29	u16	mss;
  30	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */
  31		try_exp:2;		/* Request w/ exp. option (once) */
  32	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
  33	struct	tcp_fastopen_cookie	cookie;
  34};
  35
  36/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  37 * Kernel only stores RTT and RTTVAR in usec resolution
  38 */
  39#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  40
  41struct tcp_metrics_block {
  42	struct tcp_metrics_block __rcu	*tcpm_next;
  43	possible_net_t			tcpm_net;
  44	struct inetpeer_addr		tcpm_saddr;
  45	struct inetpeer_addr		tcpm_daddr;
  46	unsigned long			tcpm_stamp;
 
 
  47	u32				tcpm_lock;
  48	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  49	struct tcp_fastopen_metrics	tcpm_fastopen;
  50
  51	struct rcu_head			rcu_head;
  52};
  53
  54static inline struct net *tm_net(struct tcp_metrics_block *tm)
  55{
  56	return read_pnet(&tm->tcpm_net);
  57}
  58
  59static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  60			      enum tcp_metric_index idx)
  61{
  62	return tm->tcpm_lock & (1 << idx);
  63}
  64
  65static u32 tcp_metric_get(struct tcp_metrics_block *tm,
  66			  enum tcp_metric_index idx)
  67{
  68	return tm->tcpm_vals[idx];
  69}
  70
  71static void tcp_metric_set(struct tcp_metrics_block *tm,
  72			   enum tcp_metric_index idx,
  73			   u32 val)
  74{
  75	tm->tcpm_vals[idx] = val;
  76}
  77
  78static bool addr_same(const struct inetpeer_addr *a,
  79		      const struct inetpeer_addr *b)
  80{
  81	return inetpeer_addr_cmp(a, b) == 0;
  82}
  83
  84struct tcpm_hash_bucket {
  85	struct tcp_metrics_block __rcu	*chain;
  86};
  87
  88static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
  89static unsigned int		tcp_metrics_hash_log __read_mostly;
  90
  91static DEFINE_SPINLOCK(tcp_metrics_lock);
  92
  93static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  94			  const struct dst_entry *dst,
  95			  bool fastopen_clear)
  96{
  97	u32 msval;
  98	u32 val;
  99
 100	tm->tcpm_stamp = jiffies;
 101
 102	val = 0;
 103	if (dst_metric_locked(dst, RTAX_RTT))
 104		val |= 1 << TCP_METRIC_RTT;
 105	if (dst_metric_locked(dst, RTAX_RTTVAR))
 106		val |= 1 << TCP_METRIC_RTTVAR;
 107	if (dst_metric_locked(dst, RTAX_SSTHRESH))
 108		val |= 1 << TCP_METRIC_SSTHRESH;
 109	if (dst_metric_locked(dst, RTAX_CWND))
 110		val |= 1 << TCP_METRIC_CWND;
 111	if (dst_metric_locked(dst, RTAX_REORDERING))
 112		val |= 1 << TCP_METRIC_REORDERING;
 113	tm->tcpm_lock = val;
 114
 115	msval = dst_metric_raw(dst, RTAX_RTT);
 116	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
 117
 118	msval = dst_metric_raw(dst, RTAX_RTTVAR);
 119	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
 120	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
 121	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
 122	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
 
 
 123	if (fastopen_clear) {
 124		tm->tcpm_fastopen.mss = 0;
 125		tm->tcpm_fastopen.syn_loss = 0;
 126		tm->tcpm_fastopen.try_exp = 0;
 127		tm->tcpm_fastopen.cookie.exp = false;
 128		tm->tcpm_fastopen.cookie.len = 0;
 129	}
 130}
 131
 132#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
 133
 134static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
 135{
 136	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
 137		tcpm_suck_dst(tm, dst, false);
 138}
 139
 140#define TCP_METRICS_RECLAIM_DEPTH	5
 141#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
 142
 143#define deref_locked(p)	\
 144	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
 145
 146static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
 147					  struct inetpeer_addr *saddr,
 148					  struct inetpeer_addr *daddr,
 149					  unsigned int hash)
 150{
 151	struct tcp_metrics_block *tm;
 152	struct net *net;
 153	bool reclaim = false;
 154
 155	spin_lock_bh(&tcp_metrics_lock);
 156	net = dev_net(dst->dev);
 157
 158	/* While waiting for the spin-lock the cache might have been populated
 159	 * with this entry and so we have to check again.
 160	 */
 161	tm = __tcp_get_metrics(saddr, daddr, net, hash);
 162	if (tm == TCP_METRICS_RECLAIM_PTR) {
 163		reclaim = true;
 164		tm = NULL;
 165	}
 166	if (tm) {
 167		tcpm_check_stamp(tm, dst);
 168		goto out_unlock;
 169	}
 170
 171	if (unlikely(reclaim)) {
 172		struct tcp_metrics_block *oldest;
 173
 174		oldest = deref_locked(tcp_metrics_hash[hash].chain);
 175		for (tm = deref_locked(oldest->tcpm_next); tm;
 176		     tm = deref_locked(tm->tcpm_next)) {
 177			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
 178				oldest = tm;
 179		}
 180		tm = oldest;
 181	} else {
 182		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
 183		if (!tm)
 184			goto out_unlock;
 185	}
 186	write_pnet(&tm->tcpm_net, net);
 187	tm->tcpm_saddr = *saddr;
 188	tm->tcpm_daddr = *daddr;
 189
 190	tcpm_suck_dst(tm, dst, true);
 191
 192	if (likely(!reclaim)) {
 193		tm->tcpm_next = tcp_metrics_hash[hash].chain;
 194		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
 195	}
 196
 197out_unlock:
 198	spin_unlock_bh(&tcp_metrics_lock);
 199	return tm;
 200}
 201
 202static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
 203{
 204	if (tm)
 205		return tm;
 206	if (depth > TCP_METRICS_RECLAIM_DEPTH)
 207		return TCP_METRICS_RECLAIM_PTR;
 208	return NULL;
 209}
 210
 211static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
 212						   const struct inetpeer_addr *daddr,
 213						   struct net *net, unsigned int hash)
 214{
 215	struct tcp_metrics_block *tm;
 216	int depth = 0;
 217
 218	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 219	     tm = rcu_dereference(tm->tcpm_next)) {
 220		if (addr_same(&tm->tcpm_saddr, saddr) &&
 221		    addr_same(&tm->tcpm_daddr, daddr) &&
 222		    net_eq(tm_net(tm), net))
 223			break;
 224		depth++;
 225	}
 226	return tcp_get_encode(tm, depth);
 227}
 228
 229static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
 230						       struct dst_entry *dst)
 231{
 232	struct tcp_metrics_block *tm;
 233	struct inetpeer_addr saddr, daddr;
 234	unsigned int hash;
 235	struct net *net;
 236
 237	saddr.family = req->rsk_ops->family;
 238	daddr.family = req->rsk_ops->family;
 239	switch (daddr.family) {
 240	case AF_INET:
 241		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
 242		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
 243		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
 244		break;
 245#if IS_ENABLED(CONFIG_IPV6)
 246	case AF_INET6:
 247		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
 248		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
 249		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 250		break;
 251#endif
 252	default:
 253		return NULL;
 254	}
 255
 256	net = dev_net(dst->dev);
 257	hash ^= net_hash_mix(net);
 258	hash = hash_32(hash, tcp_metrics_hash_log);
 259
 260	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 261	     tm = rcu_dereference(tm->tcpm_next)) {
 262		if (addr_same(&tm->tcpm_saddr, &saddr) &&
 263		    addr_same(&tm->tcpm_daddr, &daddr) &&
 264		    net_eq(tm_net(tm), net))
 265			break;
 266	}
 267	tcpm_check_stamp(tm, dst);
 268	return tm;
 269}
 270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
 272						 struct dst_entry *dst,
 273						 bool create)
 274{
 275	struct tcp_metrics_block *tm;
 276	struct inetpeer_addr saddr, daddr;
 277	unsigned int hash;
 278	struct net *net;
 279
 280	if (sk->sk_family == AF_INET) {
 281		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 282		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 283		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 284	}
 285#if IS_ENABLED(CONFIG_IPV6)
 286	else if (sk->sk_family == AF_INET6) {
 287		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
 288			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 289			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 290			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 291		} else {
 292			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
 293			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
 294			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 295		}
 296	}
 297#endif
 298	else
 299		return NULL;
 300
 301	net = dev_net(dst->dev);
 302	hash ^= net_hash_mix(net);
 303	hash = hash_32(hash, tcp_metrics_hash_log);
 304
 305	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
 306	if (tm == TCP_METRICS_RECLAIM_PTR)
 307		tm = NULL;
 308	if (!tm && create)
 309		tm = tcpm_new(dst, &saddr, &daddr, hash);
 310	else
 311		tcpm_check_stamp(tm, dst);
 312
 313	return tm;
 314}
 315
 316/* Save metrics learned by this TCP session.  This function is called
 317 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
 318 * or goes from LAST-ACK to CLOSE.
 319 */
 320void tcp_update_metrics(struct sock *sk)
 321{
 322	const struct inet_connection_sock *icsk = inet_csk(sk);
 323	struct dst_entry *dst = __sk_dst_get(sk);
 324	struct tcp_sock *tp = tcp_sk(sk);
 325	struct net *net = sock_net(sk);
 326	struct tcp_metrics_block *tm;
 327	unsigned long rtt;
 328	u32 val;
 329	int m;
 330
 331	sk_dst_confirm(sk);
 332	if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
 333		return;
 334
 
 
 
 335	rcu_read_lock();
 336	if (icsk->icsk_backoff || !tp->srtt_us) {
 337		/* This session failed to estimate rtt. Why?
 338		 * Probably, no packets returned in time.  Reset our
 339		 * results.
 340		 */
 341		tm = tcp_get_metrics(sk, dst, false);
 342		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
 343			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
 344		goto out_unlock;
 345	} else
 346		tm = tcp_get_metrics(sk, dst, true);
 347
 348	if (!tm)
 349		goto out_unlock;
 350
 351	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 352	m = rtt - tp->srtt_us;
 353
 354	/* If newly calculated rtt larger than stored one, store new
 355	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
 356	 * always better than underestimation.
 357	 */
 358	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
 359		if (m <= 0)
 360			rtt = tp->srtt_us;
 361		else
 362			rtt -= (m >> 3);
 363		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
 364	}
 365
 366	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
 367		unsigned long var;
 368
 369		if (m < 0)
 370			m = -m;
 371
 372		/* Scale deviation to rttvar fixed point */
 373		m >>= 1;
 374		if (m < tp->mdev_us)
 375			m = tp->mdev_us;
 376
 377		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
 378		if (m >= var)
 379			var = m;
 380		else
 381			var -= (var - m) >> 2;
 382
 383		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
 384	}
 385
 386	if (tcp_in_initial_slowstart(tp)) {
 387		/* Slow start still did not finish. */
 388		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 389			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 390			if (val && (tp->snd_cwnd >> 1) > val)
 391				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 392					       tp->snd_cwnd >> 1);
 393		}
 394		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 395			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 396			if (tp->snd_cwnd > val)
 397				tcp_metric_set(tm, TCP_METRIC_CWND,
 398					       tp->snd_cwnd);
 399		}
 400	} else if (!tcp_in_slow_start(tp) &&
 401		   icsk->icsk_ca_state == TCP_CA_Open) {
 402		/* Cong. avoidance phase, cwnd is reliable. */
 403		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
 404			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 405				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
 406		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 407			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 408			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
 409		}
 410	} else {
 411		/* Else slow start did not finish, cwnd is non-sense,
 412		 * ssthresh may be also invalid.
 413		 */
 414		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 415			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 416			tcp_metric_set(tm, TCP_METRIC_CWND,
 417				       (val + tp->snd_ssthresh) >> 1);
 418		}
 419		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 420			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 421			if (val && tp->snd_ssthresh > val)
 422				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 423					       tp->snd_ssthresh);
 424		}
 425		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
 426			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 427			if (val < tp->reordering &&
 428			    tp->reordering != net->ipv4.sysctl_tcp_reordering)
 429				tcp_metric_set(tm, TCP_METRIC_REORDERING,
 430					       tp->reordering);
 431		}
 432	}
 433	tm->tcpm_stamp = jiffies;
 434out_unlock:
 435	rcu_read_unlock();
 436}
 437
 438/* Initialize metrics on socket. */
 439
 440void tcp_init_metrics(struct sock *sk)
 441{
 442	struct dst_entry *dst = __sk_dst_get(sk);
 443	struct tcp_sock *tp = tcp_sk(sk);
 444	struct tcp_metrics_block *tm;
 445	u32 val, crtt = 0; /* cached RTT scaled by 8 */
 446
 447	sk_dst_confirm(sk);
 448	if (!dst)
 449		goto reset;
 450
 
 
 451	rcu_read_lock();
 452	tm = tcp_get_metrics(sk, dst, true);
 453	if (!tm) {
 454		rcu_read_unlock();
 455		goto reset;
 456	}
 457
 458	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
 459		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 460
 461	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 462	if (val) {
 463		tp->snd_ssthresh = val;
 464		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
 465			tp->snd_ssthresh = tp->snd_cwnd_clamp;
 466	} else {
 467		/* ssthresh may have been reduced unnecessarily during.
 468		 * 3WHS. Restore it back to its initial default.
 469		 */
 470		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 471	}
 472	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 473	if (val && tp->reordering != val)
 
 
 474		tp->reordering = val;
 
 475
 476	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 477	rcu_read_unlock();
 478reset:
 479	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
 480	 * to seed the RTO for later data packets because SYN packets are
 481	 * small. Use the per-dst cached values to seed the RTO but keep
 482	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
 483	 * Later the RTO will be updated immediately upon obtaining the first
 484	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
 485	 * influences the first RTO but not later RTT estimation.
 486	 *
 487	 * But if RTT is not available from the SYN (due to retransmits or
 488	 * syn cookies) or the cache, force a conservative 3secs timeout.
 489	 *
 490	 * A bit of theory. RTT is time passed after "normal" sized packet
 491	 * is sent until it is ACKed. In normal circumstances sending small
 492	 * packets force peer to delay ACKs and calculation is correct too.
 493	 * The algorithm is adaptive and, provided we follow specs, it
 494	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
 495	 * tricks sort of "quick acks" for time long enough to decrease RTT
 496	 * to low value, and then abruptly stops to do it and starts to delay
 497	 * ACKs, wait for troubles.
 498	 */
 499	if (crtt > tp->srtt_us) {
 500		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
 501		crtt /= 8 * USEC_PER_SEC / HZ;
 502		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
 503	} else if (tp->srtt_us == 0) {
 504		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
 505		 * 3WHS. This is most likely due to retransmission,
 506		 * including spurious one. Reset the RTO back to 3secs
 507		 * from the more aggressive 1sec to avoid more spurious
 508		 * retransmission.
 509		 */
 510		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
 511		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
 512
 513		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 514	}
 515	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
 516	 * retransmitted. In light of RFC6298 more aggressive 1sec
 517	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
 518	 * retransmission has occurred.
 519	 */
 520	if (tp->total_retrans > 1)
 521		tp->snd_cwnd = 1;
 522	else
 523		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
 524	tp->snd_cwnd_stamp = tcp_jiffies32;
 525}
 526
 527bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 
 528{
 529	struct tcp_metrics_block *tm;
 530	bool ret;
 531
 532	if (!dst)
 533		return false;
 534
 535	rcu_read_lock();
 536	tm = __tcp_get_metrics_req(req, dst);
 537	if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 538		ret = true;
 539	else
 540		ret = false;
 541	rcu_read_unlock();
 542
 543	return ret;
 544}
 545
 546static DEFINE_SEQLOCK(fastopen_seqlock);
 547
 548void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 549			    struct tcp_fastopen_cookie *cookie)
 
 550{
 551	struct tcp_metrics_block *tm;
 552
 553	rcu_read_lock();
 554	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
 555	if (tm) {
 556		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 557		unsigned int seq;
 558
 559		do {
 560			seq = read_seqbegin(&fastopen_seqlock);
 561			if (tfom->mss)
 562				*mss = tfom->mss;
 563			*cookie = tfom->cookie;
 564			if (cookie->len <= 0 && tfom->try_exp == 1)
 565				cookie->exp = true;
 
 
 566		} while (read_seqretry(&fastopen_seqlock, seq));
 567	}
 568	rcu_read_unlock();
 569}
 570
 571void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 572			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
 573			    u16 try_exp)
 574{
 575	struct dst_entry *dst = __sk_dst_get(sk);
 576	struct tcp_metrics_block *tm;
 577
 578	if (!dst)
 579		return;
 580	rcu_read_lock();
 581	tm = tcp_get_metrics(sk, dst, true);
 582	if (tm) {
 583		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 584
 585		write_seqlock_bh(&fastopen_seqlock);
 586		if (mss)
 587			tfom->mss = mss;
 588		if (cookie && cookie->len > 0)
 589			tfom->cookie = *cookie;
 590		else if (try_exp > tfom->try_exp &&
 591			 tfom->cookie.len <= 0 && !tfom->cookie.exp)
 592			tfom->try_exp = try_exp;
 593		if (syn_lost) {
 594			++tfom->syn_loss;
 595			tfom->last_syn_loss = jiffies;
 596		} else
 597			tfom->syn_loss = 0;
 598		write_sequnlock_bh(&fastopen_seqlock);
 599	}
 600	rcu_read_unlock();
 601}
 602
 603static struct genl_family tcp_metrics_nl_family;
 604
 605static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
 606	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
 607	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
 608					    .len = sizeof(struct in6_addr), },
 609	/* Following attributes are not received for GET/DEL,
 610	 * we keep them for reference
 611	 */
 612#if 0
 613	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
 614	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
 615	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
 616	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
 617	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
 618	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
 619	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
 620	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
 621					    .len = TCP_FASTOPEN_COOKIE_MAX, },
 622#endif
 623};
 624
 625/* Add attributes, caller cancels its header on failure */
 626static int tcp_metrics_fill_info(struct sk_buff *msg,
 627				 struct tcp_metrics_block *tm)
 628{
 629	struct nlattr *nest;
 630	int i;
 631
 632	switch (tm->tcpm_daddr.family) {
 633	case AF_INET:
 634		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
 635				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
 636			goto nla_put_failure;
 637		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
 638				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
 639			goto nla_put_failure;
 640		break;
 641	case AF_INET6:
 642		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
 643				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
 644			goto nla_put_failure;
 645		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
 646				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
 647			goto nla_put_failure;
 648		break;
 649	default:
 650		return -EAFNOSUPPORT;
 651	}
 652
 653	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
 654			  jiffies - tm->tcpm_stamp,
 655			  TCP_METRICS_ATTR_PAD) < 0)
 656		goto nla_put_failure;
 
 
 
 
 
 
 
 
 657
 658	{
 659		int n = 0;
 660
 661		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
 662		if (!nest)
 663			goto nla_put_failure;
 664		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
 665			u32 val = tm->tcpm_vals[i];
 666
 667			if (!val)
 668				continue;
 669			if (i == TCP_METRIC_RTT) {
 670				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
 671						val) < 0)
 672					goto nla_put_failure;
 673				n++;
 674				val = max(val / 1000, 1U);
 675			}
 676			if (i == TCP_METRIC_RTTVAR) {
 677				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
 678						val) < 0)
 679					goto nla_put_failure;
 680				n++;
 681				val = max(val / 1000, 1U);
 682			}
 683			if (nla_put_u32(msg, i + 1, val) < 0)
 684				goto nla_put_failure;
 685			n++;
 686		}
 687		if (n)
 688			nla_nest_end(msg, nest);
 689		else
 690			nla_nest_cancel(msg, nest);
 691	}
 692
 693	{
 694		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
 695		unsigned int seq;
 696
 697		do {
 698			seq = read_seqbegin(&fastopen_seqlock);
 699			tfom_copy[0] = tm->tcpm_fastopen;
 700		} while (read_seqretry(&fastopen_seqlock, seq));
 701
 702		tfom = tfom_copy;
 703		if (tfom->mss &&
 704		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
 705				tfom->mss) < 0)
 706			goto nla_put_failure;
 707		if (tfom->syn_loss &&
 708		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
 709				tfom->syn_loss) < 0 ||
 710		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
 711				jiffies - tfom->last_syn_loss,
 712				TCP_METRICS_ATTR_PAD) < 0))
 713			goto nla_put_failure;
 714		if (tfom->cookie.len > 0 &&
 715		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
 716			    tfom->cookie.len, tfom->cookie.val) < 0)
 717			goto nla_put_failure;
 718	}
 719
 720	return 0;
 721
 722nla_put_failure:
 723	return -EMSGSIZE;
 724}
 725
 726static int tcp_metrics_dump_info(struct sk_buff *skb,
 727				 struct netlink_callback *cb,
 728				 struct tcp_metrics_block *tm)
 729{
 730	void *hdr;
 731
 732	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 733			  &tcp_metrics_nl_family, NLM_F_MULTI,
 734			  TCP_METRICS_CMD_GET);
 735	if (!hdr)
 736		return -EMSGSIZE;
 737
 738	if (tcp_metrics_fill_info(skb, tm) < 0)
 739		goto nla_put_failure;
 740
 741	genlmsg_end(skb, hdr);
 742	return 0;
 743
 744nla_put_failure:
 745	genlmsg_cancel(skb, hdr);
 746	return -EMSGSIZE;
 747}
 748
 749static int tcp_metrics_nl_dump(struct sk_buff *skb,
 750			       struct netlink_callback *cb)
 751{
 752	struct net *net = sock_net(skb->sk);
 753	unsigned int max_rows = 1U << tcp_metrics_hash_log;
 754	unsigned int row, s_row = cb->args[0];
 755	int s_col = cb->args[1], col = s_col;
 756
 757	for (row = s_row; row < max_rows; row++, s_col = 0) {
 758		struct tcp_metrics_block *tm;
 759		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
 760
 761		rcu_read_lock();
 762		for (col = 0, tm = rcu_dereference(hb->chain); tm;
 763		     tm = rcu_dereference(tm->tcpm_next), col++) {
 764			if (!net_eq(tm_net(tm), net))
 765				continue;
 766			if (col < s_col)
 767				continue;
 768			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
 769				rcu_read_unlock();
 770				goto done;
 771			}
 772		}
 773		rcu_read_unlock();
 774	}
 775
 776done:
 777	cb->args[0] = row;
 778	cb->args[1] = col;
 779	return skb->len;
 780}
 781
 782static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 783			   unsigned int *hash, int optional, int v4, int v6)
 784{
 785	struct nlattr *a;
 786
 787	a = info->attrs[v4];
 788	if (a) {
 789		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
 790		if (hash)
 791			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
 792		return 0;
 793	}
 794	a = info->attrs[v6];
 795	if (a) {
 796		struct in6_addr in6;
 797
 798		if (nla_len(a) != sizeof(struct in6_addr))
 799			return -EINVAL;
 800		in6 = nla_get_in6_addr(a);
 801		inetpeer_set_addr_v6(addr, &in6);
 802		if (hash)
 803			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
 804		return 0;
 805	}
 806	return optional ? 1 : -EAFNOSUPPORT;
 807}
 808
 809static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 810			 unsigned int *hash, int optional)
 811{
 812	return __parse_nl_addr(info, addr, hash, optional,
 813			       TCP_METRICS_ATTR_ADDR_IPV4,
 814			       TCP_METRICS_ATTR_ADDR_IPV6);
 815}
 816
 817static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
 818{
 819	return __parse_nl_addr(info, addr, NULL, 0,
 820			       TCP_METRICS_ATTR_SADDR_IPV4,
 821			       TCP_METRICS_ATTR_SADDR_IPV6);
 822}
 823
 824static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
 825{
 826	struct tcp_metrics_block *tm;
 827	struct inetpeer_addr saddr, daddr;
 828	unsigned int hash;
 829	struct sk_buff *msg;
 830	struct net *net = genl_info_net(info);
 831	void *reply;
 832	int ret;
 833	bool src = true;
 834
 835	ret = parse_nl_addr(info, &daddr, &hash, 0);
 836	if (ret < 0)
 837		return ret;
 838
 839	ret = parse_nl_saddr(info, &saddr);
 840	if (ret < 0)
 841		src = false;
 842
 843	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 844	if (!msg)
 845		return -ENOMEM;
 846
 847	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
 848				  info->genlhdr->cmd);
 849	if (!reply)
 850		goto nla_put_failure;
 851
 852	hash ^= net_hash_mix(net);
 853	hash = hash_32(hash, tcp_metrics_hash_log);
 854	ret = -ESRCH;
 855	rcu_read_lock();
 856	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 857	     tm = rcu_dereference(tm->tcpm_next)) {
 858		if (addr_same(&tm->tcpm_daddr, &daddr) &&
 859		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 860		    net_eq(tm_net(tm), net)) {
 861			ret = tcp_metrics_fill_info(msg, tm);
 862			break;
 863		}
 864	}
 865	rcu_read_unlock();
 866	if (ret < 0)
 867		goto out_free;
 868
 869	genlmsg_end(msg, reply);
 870	return genlmsg_reply(msg, info);
 871
 872nla_put_failure:
 873	ret = -EMSGSIZE;
 874
 875out_free:
 876	nlmsg_free(msg);
 877	return ret;
 878}
 879
 880static void tcp_metrics_flush_all(struct net *net)
 881{
 882	unsigned int max_rows = 1U << tcp_metrics_hash_log;
 883	struct tcpm_hash_bucket *hb = tcp_metrics_hash;
 884	struct tcp_metrics_block *tm;
 885	unsigned int row;
 886
 887	for (row = 0; row < max_rows; row++, hb++) {
 888		struct tcp_metrics_block __rcu **pp;
 889		bool match;
 890
 891		spin_lock_bh(&tcp_metrics_lock);
 892		pp = &hb->chain;
 893		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 894			match = net ? net_eq(tm_net(tm), net) :
 895				!refcount_read(&tm_net(tm)->count);
 896			if (match) {
 897				*pp = tm->tcpm_next;
 898				kfree_rcu(tm, rcu_head);
 899			} else {
 900				pp = &tm->tcpm_next;
 901			}
 902		}
 903		spin_unlock_bh(&tcp_metrics_lock);
 904	}
 905}
 906
 907static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
 908{
 909	struct tcpm_hash_bucket *hb;
 910	struct tcp_metrics_block *tm;
 911	struct tcp_metrics_block __rcu **pp;
 912	struct inetpeer_addr saddr, daddr;
 913	unsigned int hash;
 914	struct net *net = genl_info_net(info);
 915	int ret;
 916	bool src = true, found = false;
 917
 918	ret = parse_nl_addr(info, &daddr, &hash, 1);
 919	if (ret < 0)
 920		return ret;
 921	if (ret > 0) {
 922		tcp_metrics_flush_all(net);
 923		return 0;
 924	}
 925	ret = parse_nl_saddr(info, &saddr);
 926	if (ret < 0)
 927		src = false;
 928
 929	hash ^= net_hash_mix(net);
 930	hash = hash_32(hash, tcp_metrics_hash_log);
 931	hb = tcp_metrics_hash + hash;
 932	pp = &hb->chain;
 933	spin_lock_bh(&tcp_metrics_lock);
 934	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 935		if (addr_same(&tm->tcpm_daddr, &daddr) &&
 936		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 937		    net_eq(tm_net(tm), net)) {
 938			*pp = tm->tcpm_next;
 939			kfree_rcu(tm, rcu_head);
 940			found = true;
 941		} else {
 942			pp = &tm->tcpm_next;
 943		}
 944	}
 945	spin_unlock_bh(&tcp_metrics_lock);
 946	if (!found)
 947		return -ESRCH;
 948	return 0;
 949}
 950
 951static const struct genl_ops tcp_metrics_nl_ops[] = {
 952	{
 953		.cmd = TCP_METRICS_CMD_GET,
 954		.doit = tcp_metrics_nl_cmd_get,
 955		.dumpit = tcp_metrics_nl_dump,
 956		.policy = tcp_metrics_nl_policy,
 957	},
 958	{
 959		.cmd = TCP_METRICS_CMD_DEL,
 960		.doit = tcp_metrics_nl_cmd_del,
 961		.policy = tcp_metrics_nl_policy,
 962		.flags = GENL_ADMIN_PERM,
 963	},
 964};
 965
 966static struct genl_family tcp_metrics_nl_family __ro_after_init = {
 967	.hdrsize	= 0,
 968	.name		= TCP_METRICS_GENL_NAME,
 969	.version	= TCP_METRICS_GENL_VERSION,
 970	.maxattr	= TCP_METRICS_ATTR_MAX,
 971	.netnsok	= true,
 972	.module		= THIS_MODULE,
 973	.ops		= tcp_metrics_nl_ops,
 974	.n_ops		= ARRAY_SIZE(tcp_metrics_nl_ops),
 975};
 976
 977static unsigned int tcpmhash_entries;
 978static int __init set_tcpmhash_entries(char *str)
 979{
 980	ssize_t ret;
 981
 982	if (!str)
 983		return 0;
 984
 985	ret = kstrtouint(str, 0, &tcpmhash_entries);
 986	if (ret)
 987		return 0;
 988
 989	return 1;
 990}
 991__setup("tcpmhash_entries=", set_tcpmhash_entries);
 992
 993static int __net_init tcp_net_metrics_init(struct net *net)
 994{
 995	size_t size;
 996	unsigned int slots;
 997
 998	if (!net_eq(net, &init_net))
 999		return 0;
1000
1001	slots = tcpmhash_entries;
1002	if (!slots) {
1003		if (totalram_pages >= 128 * 1024)
1004			slots = 16 * 1024;
1005		else
1006			slots = 8 * 1024;
1007	}
1008
1009	tcp_metrics_hash_log = order_base_2(slots);
1010	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1011
1012	tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
 
 
 
1013	if (!tcp_metrics_hash)
1014		return -ENOMEM;
1015
1016	return 0;
1017}
1018
1019static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1020{
1021	tcp_metrics_flush_all(NULL);
1022}
1023
1024static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1025	.init		=	tcp_net_metrics_init,
1026	.exit_batch	=	tcp_net_metrics_exit_batch,
1027};
1028
1029void __init tcp_metrics_init(void)
1030{
1031	int ret;
1032
1033	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1034	if (ret < 0)
1035		panic("Could not allocate the tcp_metrics hash table\n");
1036
1037	ret = genl_register_family(&tcp_metrics_nl_family);
1038	if (ret < 0)
1039		panic("Could not register tcp_metrics generic netlink\n");
1040}
v4.10.11
 
   1#include <linux/rcupdate.h>
   2#include <linux/spinlock.h>
   3#include <linux/jiffies.h>
   4#include <linux/module.h>
   5#include <linux/cache.h>
   6#include <linux/slab.h>
   7#include <linux/init.h>
   8#include <linux/tcp.h>
   9#include <linux/hash.h>
  10#include <linux/tcp_metrics.h>
  11#include <linux/vmalloc.h>
  12
  13#include <net/inet_connection_sock.h>
  14#include <net/net_namespace.h>
  15#include <net/request_sock.h>
  16#include <net/inetpeer.h>
  17#include <net/sock.h>
  18#include <net/ipv6.h>
  19#include <net/dst.h>
  20#include <net/tcp.h>
  21#include <net/genetlink.h>
  22
  23int sysctl_tcp_nometrics_save __read_mostly;
  24
  25static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  26						   const struct inetpeer_addr *daddr,
  27						   struct net *net, unsigned int hash);
  28
  29struct tcp_fastopen_metrics {
  30	u16	mss;
  31	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */
  32		try_exp:2;		/* Request w/ exp. option (once) */
  33	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
  34	struct	tcp_fastopen_cookie	cookie;
  35};
  36
  37/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  38 * Kernel only stores RTT and RTTVAR in usec resolution
  39 */
  40#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  41
  42struct tcp_metrics_block {
  43	struct tcp_metrics_block __rcu	*tcpm_next;
  44	possible_net_t			tcpm_net;
  45	struct inetpeer_addr		tcpm_saddr;
  46	struct inetpeer_addr		tcpm_daddr;
  47	unsigned long			tcpm_stamp;
  48	u32				tcpm_ts;
  49	u32				tcpm_ts_stamp;
  50	u32				tcpm_lock;
  51	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  52	struct tcp_fastopen_metrics	tcpm_fastopen;
  53
  54	struct rcu_head			rcu_head;
  55};
  56
  57static inline struct net *tm_net(struct tcp_metrics_block *tm)
  58{
  59	return read_pnet(&tm->tcpm_net);
  60}
  61
  62static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  63			      enum tcp_metric_index idx)
  64{
  65	return tm->tcpm_lock & (1 << idx);
  66}
  67
  68static u32 tcp_metric_get(struct tcp_metrics_block *tm,
  69			  enum tcp_metric_index idx)
  70{
  71	return tm->tcpm_vals[idx];
  72}
  73
  74static void tcp_metric_set(struct tcp_metrics_block *tm,
  75			   enum tcp_metric_index idx,
  76			   u32 val)
  77{
  78	tm->tcpm_vals[idx] = val;
  79}
  80
  81static bool addr_same(const struct inetpeer_addr *a,
  82		      const struct inetpeer_addr *b)
  83{
  84	return inetpeer_addr_cmp(a, b) == 0;
  85}
  86
  87struct tcpm_hash_bucket {
  88	struct tcp_metrics_block __rcu	*chain;
  89};
  90
  91static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
  92static unsigned int		tcp_metrics_hash_log __read_mostly;
  93
  94static DEFINE_SPINLOCK(tcp_metrics_lock);
  95
  96static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  97			  const struct dst_entry *dst,
  98			  bool fastopen_clear)
  99{
 100	u32 msval;
 101	u32 val;
 102
 103	tm->tcpm_stamp = jiffies;
 104
 105	val = 0;
 106	if (dst_metric_locked(dst, RTAX_RTT))
 107		val |= 1 << TCP_METRIC_RTT;
 108	if (dst_metric_locked(dst, RTAX_RTTVAR))
 109		val |= 1 << TCP_METRIC_RTTVAR;
 110	if (dst_metric_locked(dst, RTAX_SSTHRESH))
 111		val |= 1 << TCP_METRIC_SSTHRESH;
 112	if (dst_metric_locked(dst, RTAX_CWND))
 113		val |= 1 << TCP_METRIC_CWND;
 114	if (dst_metric_locked(dst, RTAX_REORDERING))
 115		val |= 1 << TCP_METRIC_REORDERING;
 116	tm->tcpm_lock = val;
 117
 118	msval = dst_metric_raw(dst, RTAX_RTT);
 119	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
 120
 121	msval = dst_metric_raw(dst, RTAX_RTTVAR);
 122	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
 123	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
 124	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
 125	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
 126	tm->tcpm_ts = 0;
 127	tm->tcpm_ts_stamp = 0;
 128	if (fastopen_clear) {
 129		tm->tcpm_fastopen.mss = 0;
 130		tm->tcpm_fastopen.syn_loss = 0;
 131		tm->tcpm_fastopen.try_exp = 0;
 132		tm->tcpm_fastopen.cookie.exp = false;
 133		tm->tcpm_fastopen.cookie.len = 0;
 134	}
 135}
 136
 137#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
 138
 139static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
 140{
 141	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
 142		tcpm_suck_dst(tm, dst, false);
 143}
 144
 145#define TCP_METRICS_RECLAIM_DEPTH	5
 146#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
 147
 148#define deref_locked(p)	\
 149	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
 150
 151static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
 152					  struct inetpeer_addr *saddr,
 153					  struct inetpeer_addr *daddr,
 154					  unsigned int hash)
 155{
 156	struct tcp_metrics_block *tm;
 157	struct net *net;
 158	bool reclaim = false;
 159
 160	spin_lock_bh(&tcp_metrics_lock);
 161	net = dev_net(dst->dev);
 162
 163	/* While waiting for the spin-lock the cache might have been populated
 164	 * with this entry and so we have to check again.
 165	 */
 166	tm = __tcp_get_metrics(saddr, daddr, net, hash);
 167	if (tm == TCP_METRICS_RECLAIM_PTR) {
 168		reclaim = true;
 169		tm = NULL;
 170	}
 171	if (tm) {
 172		tcpm_check_stamp(tm, dst);
 173		goto out_unlock;
 174	}
 175
 176	if (unlikely(reclaim)) {
 177		struct tcp_metrics_block *oldest;
 178
 179		oldest = deref_locked(tcp_metrics_hash[hash].chain);
 180		for (tm = deref_locked(oldest->tcpm_next); tm;
 181		     tm = deref_locked(tm->tcpm_next)) {
 182			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
 183				oldest = tm;
 184		}
 185		tm = oldest;
 186	} else {
 187		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
 188		if (!tm)
 189			goto out_unlock;
 190	}
 191	write_pnet(&tm->tcpm_net, net);
 192	tm->tcpm_saddr = *saddr;
 193	tm->tcpm_daddr = *daddr;
 194
 195	tcpm_suck_dst(tm, dst, true);
 196
 197	if (likely(!reclaim)) {
 198		tm->tcpm_next = tcp_metrics_hash[hash].chain;
 199		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
 200	}
 201
 202out_unlock:
 203	spin_unlock_bh(&tcp_metrics_lock);
 204	return tm;
 205}
 206
 207static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
 208{
 209	if (tm)
 210		return tm;
 211	if (depth > TCP_METRICS_RECLAIM_DEPTH)
 212		return TCP_METRICS_RECLAIM_PTR;
 213	return NULL;
 214}
 215
 216static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
 217						   const struct inetpeer_addr *daddr,
 218						   struct net *net, unsigned int hash)
 219{
 220	struct tcp_metrics_block *tm;
 221	int depth = 0;
 222
 223	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 224	     tm = rcu_dereference(tm->tcpm_next)) {
 225		if (addr_same(&tm->tcpm_saddr, saddr) &&
 226		    addr_same(&tm->tcpm_daddr, daddr) &&
 227		    net_eq(tm_net(tm), net))
 228			break;
 229		depth++;
 230	}
 231	return tcp_get_encode(tm, depth);
 232}
 233
 234static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
 235						       struct dst_entry *dst)
 236{
 237	struct tcp_metrics_block *tm;
 238	struct inetpeer_addr saddr, daddr;
 239	unsigned int hash;
 240	struct net *net;
 241
 242	saddr.family = req->rsk_ops->family;
 243	daddr.family = req->rsk_ops->family;
 244	switch (daddr.family) {
 245	case AF_INET:
 246		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
 247		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
 248		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
 249		break;
 250#if IS_ENABLED(CONFIG_IPV6)
 251	case AF_INET6:
 252		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
 253		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
 254		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 255		break;
 256#endif
 257	default:
 258		return NULL;
 259	}
 260
 261	net = dev_net(dst->dev);
 262	hash ^= net_hash_mix(net);
 263	hash = hash_32(hash, tcp_metrics_hash_log);
 264
 265	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 266	     tm = rcu_dereference(tm->tcpm_next)) {
 267		if (addr_same(&tm->tcpm_saddr, &saddr) &&
 268		    addr_same(&tm->tcpm_daddr, &daddr) &&
 269		    net_eq(tm_net(tm), net))
 270			break;
 271	}
 272	tcpm_check_stamp(tm, dst);
 273	return tm;
 274}
 275
 276static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
 277{
 278	struct tcp_metrics_block *tm;
 279	struct inetpeer_addr saddr, daddr;
 280	unsigned int hash;
 281	struct net *net;
 282
 283	if (tw->tw_family == AF_INET) {
 284		inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
 285		inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
 286		hash = ipv4_addr_hash(tw->tw_daddr);
 287	}
 288#if IS_ENABLED(CONFIG_IPV6)
 289	else if (tw->tw_family == AF_INET6) {
 290		if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
 291			inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
 292			inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
 293			hash = ipv4_addr_hash(tw->tw_daddr);
 294		} else {
 295			inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
 296			inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
 297			hash = ipv6_addr_hash(&tw->tw_v6_daddr);
 298		}
 299	}
 300#endif
 301	else
 302		return NULL;
 303
 304	net = twsk_net(tw);
 305	hash ^= net_hash_mix(net);
 306	hash = hash_32(hash, tcp_metrics_hash_log);
 307
 308	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 309	     tm = rcu_dereference(tm->tcpm_next)) {
 310		if (addr_same(&tm->tcpm_saddr, &saddr) &&
 311		    addr_same(&tm->tcpm_daddr, &daddr) &&
 312		    net_eq(tm_net(tm), net))
 313			break;
 314	}
 315	return tm;
 316}
 317
 318static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
 319						 struct dst_entry *dst,
 320						 bool create)
 321{
 322	struct tcp_metrics_block *tm;
 323	struct inetpeer_addr saddr, daddr;
 324	unsigned int hash;
 325	struct net *net;
 326
 327	if (sk->sk_family == AF_INET) {
 328		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 329		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 330		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 331	}
 332#if IS_ENABLED(CONFIG_IPV6)
 333	else if (sk->sk_family == AF_INET6) {
 334		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
 335			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 336			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 337			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 338		} else {
 339			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
 340			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
 341			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 342		}
 343	}
 344#endif
 345	else
 346		return NULL;
 347
 348	net = dev_net(dst->dev);
 349	hash ^= net_hash_mix(net);
 350	hash = hash_32(hash, tcp_metrics_hash_log);
 351
 352	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
 353	if (tm == TCP_METRICS_RECLAIM_PTR)
 354		tm = NULL;
 355	if (!tm && create)
 356		tm = tcpm_new(dst, &saddr, &daddr, hash);
 357	else
 358		tcpm_check_stamp(tm, dst);
 359
 360	return tm;
 361}
 362
 363/* Save metrics learned by this TCP session.  This function is called
 364 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
 365 * or goes from LAST-ACK to CLOSE.
 366 */
 367void tcp_update_metrics(struct sock *sk)
 368{
 369	const struct inet_connection_sock *icsk = inet_csk(sk);
 370	struct dst_entry *dst = __sk_dst_get(sk);
 371	struct tcp_sock *tp = tcp_sk(sk);
 372	struct net *net = sock_net(sk);
 373	struct tcp_metrics_block *tm;
 374	unsigned long rtt;
 375	u32 val;
 376	int m;
 377
 378	if (sysctl_tcp_nometrics_save || !dst)
 
 379		return;
 380
 381	if (dst->flags & DST_HOST)
 382		dst_confirm(dst);
 383
 384	rcu_read_lock();
 385	if (icsk->icsk_backoff || !tp->srtt_us) {
 386		/* This session failed to estimate rtt. Why?
 387		 * Probably, no packets returned in time.  Reset our
 388		 * results.
 389		 */
 390		tm = tcp_get_metrics(sk, dst, false);
 391		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
 392			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
 393		goto out_unlock;
 394	} else
 395		tm = tcp_get_metrics(sk, dst, true);
 396
 397	if (!tm)
 398		goto out_unlock;
 399
 400	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 401	m = rtt - tp->srtt_us;
 402
 403	/* If newly calculated rtt larger than stored one, store new
 404	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
 405	 * always better than underestimation.
 406	 */
 407	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
 408		if (m <= 0)
 409			rtt = tp->srtt_us;
 410		else
 411			rtt -= (m >> 3);
 412		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
 413	}
 414
 415	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
 416		unsigned long var;
 417
 418		if (m < 0)
 419			m = -m;
 420
 421		/* Scale deviation to rttvar fixed point */
 422		m >>= 1;
 423		if (m < tp->mdev_us)
 424			m = tp->mdev_us;
 425
 426		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
 427		if (m >= var)
 428			var = m;
 429		else
 430			var -= (var - m) >> 2;
 431
 432		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
 433	}
 434
 435	if (tcp_in_initial_slowstart(tp)) {
 436		/* Slow start still did not finish. */
 437		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 438			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 439			if (val && (tp->snd_cwnd >> 1) > val)
 440				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 441					       tp->snd_cwnd >> 1);
 442		}
 443		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 444			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 445			if (tp->snd_cwnd > val)
 446				tcp_metric_set(tm, TCP_METRIC_CWND,
 447					       tp->snd_cwnd);
 448		}
 449	} else if (!tcp_in_slow_start(tp) &&
 450		   icsk->icsk_ca_state == TCP_CA_Open) {
 451		/* Cong. avoidance phase, cwnd is reliable. */
 452		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
 453			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 454				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
 455		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 456			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 457			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
 458		}
 459	} else {
 460		/* Else slow start did not finish, cwnd is non-sense,
 461		 * ssthresh may be also invalid.
 462		 */
 463		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 464			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 465			tcp_metric_set(tm, TCP_METRIC_CWND,
 466				       (val + tp->snd_ssthresh) >> 1);
 467		}
 468		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 469			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 470			if (val && tp->snd_ssthresh > val)
 471				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 472					       tp->snd_ssthresh);
 473		}
 474		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
 475			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 476			if (val < tp->reordering &&
 477			    tp->reordering != net->ipv4.sysctl_tcp_reordering)
 478				tcp_metric_set(tm, TCP_METRIC_REORDERING,
 479					       tp->reordering);
 480		}
 481	}
 482	tm->tcpm_stamp = jiffies;
 483out_unlock:
 484	rcu_read_unlock();
 485}
 486
 487/* Initialize metrics on socket. */
 488
 489void tcp_init_metrics(struct sock *sk)
 490{
 491	struct dst_entry *dst = __sk_dst_get(sk);
 492	struct tcp_sock *tp = tcp_sk(sk);
 493	struct tcp_metrics_block *tm;
 494	u32 val, crtt = 0; /* cached RTT scaled by 8 */
 495
 
 496	if (!dst)
 497		goto reset;
 498
 499	dst_confirm(dst);
 500
 501	rcu_read_lock();
 502	tm = tcp_get_metrics(sk, dst, true);
 503	if (!tm) {
 504		rcu_read_unlock();
 505		goto reset;
 506	}
 507
 508	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
 509		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 510
 511	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 512	if (val) {
 513		tp->snd_ssthresh = val;
 514		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
 515			tp->snd_ssthresh = tp->snd_cwnd_clamp;
 516	} else {
 517		/* ssthresh may have been reduced unnecessarily during.
 518		 * 3WHS. Restore it back to its initial default.
 519		 */
 520		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 521	}
 522	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 523	if (val && tp->reordering != val) {
 524		tcp_disable_fack(tp);
 525		tcp_disable_early_retrans(tp);
 526		tp->reordering = val;
 527	}
 528
 529	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 530	rcu_read_unlock();
 531reset:
 532	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
 533	 * to seed the RTO for later data packets because SYN packets are
 534	 * small. Use the per-dst cached values to seed the RTO but keep
 535	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
 536	 * Later the RTO will be updated immediately upon obtaining the first
 537	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
 538	 * influences the first RTO but not later RTT estimation.
 539	 *
 540	 * But if RTT is not available from the SYN (due to retransmits or
 541	 * syn cookies) or the cache, force a conservative 3secs timeout.
 542	 *
 543	 * A bit of theory. RTT is time passed after "normal" sized packet
 544	 * is sent until it is ACKed. In normal circumstances sending small
 545	 * packets force peer to delay ACKs and calculation is correct too.
 546	 * The algorithm is adaptive and, provided we follow specs, it
 547	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
 548	 * tricks sort of "quick acks" for time long enough to decrease RTT
 549	 * to low value, and then abruptly stops to do it and starts to delay
 550	 * ACKs, wait for troubles.
 551	 */
 552	if (crtt > tp->srtt_us) {
 553		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
 554		crtt /= 8 * USEC_PER_SEC / HZ;
 555		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
 556	} else if (tp->srtt_us == 0) {
 557		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
 558		 * 3WHS. This is most likely due to retransmission,
 559		 * including spurious one. Reset the RTO back to 3secs
 560		 * from the more aggressive 1sec to avoid more spurious
 561		 * retransmission.
 562		 */
 563		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
 564		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
 565
 566		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 567	}
 568	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
 569	 * retransmitted. In light of RFC6298 more aggressive 1sec
 570	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
 571	 * retransmission has occurred.
 572	 */
 573	if (tp->total_retrans > 1)
 574		tp->snd_cwnd = 1;
 575	else
 576		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
 577	tp->snd_cwnd_stamp = tcp_time_stamp;
 578}
 579
 580bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
 581			bool paws_check, bool timestamps)
 582{
 583	struct tcp_metrics_block *tm;
 584	bool ret;
 585
 586	if (!dst)
 587		return false;
 588
 589	rcu_read_lock();
 590	tm = __tcp_get_metrics_req(req, dst);
 591	if (paws_check) {
 592		if (tm &&
 593		    (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
 594		    ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW ||
 595		     !timestamps))
 596			ret = false;
 597		else
 598			ret = true;
 599	} else {
 600		if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
 601			ret = true;
 602		else
 603			ret = false;
 604	}
 605	rcu_read_unlock();
 606
 607	return ret;
 608}
 609
 610void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
 611{
 612	struct tcp_metrics_block *tm;
 613
 614	rcu_read_lock();
 615	tm = tcp_get_metrics(sk, dst, true);
 616	if (tm) {
 617		struct tcp_sock *tp = tcp_sk(sk);
 618
 619		if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
 620			tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
 621			tp->rx_opt.ts_recent = tm->tcpm_ts;
 622		}
 623	}
 624	rcu_read_unlock();
 625}
 626EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
 627
 628/* VJ's idea. Save last timestamp seen from this destination and hold
 629 * it at least for normal timewait interval to use for duplicate
 630 * segment detection in subsequent connections, before they enter
 631 * synchronized state.
 632 */
 633bool tcp_remember_stamp(struct sock *sk)
 634{
 635	struct dst_entry *dst = __sk_dst_get(sk);
 636	bool ret = false;
 637
 638	if (dst) {
 639		struct tcp_metrics_block *tm;
 640
 641		rcu_read_lock();
 642		tm = tcp_get_metrics(sk, dst, true);
 643		if (tm) {
 644			struct tcp_sock *tp = tcp_sk(sk);
 645
 646			if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
 647			    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
 648			     tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
 649				tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
 650				tm->tcpm_ts = tp->rx_opt.ts_recent;
 651			}
 652			ret = true;
 653		}
 654		rcu_read_unlock();
 655	}
 656	return ret;
 657}
 658
 659bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
 660{
 661	struct tcp_metrics_block *tm;
 662	bool ret = false;
 663
 664	rcu_read_lock();
 665	tm = __tcp_get_metrics_tw(tw);
 666	if (tm) {
 667		const struct tcp_timewait_sock *tcptw;
 668		struct sock *sk = (struct sock *) tw;
 669
 670		tcptw = tcp_twsk(sk);
 671		if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
 672		    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
 673		     tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
 674			tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
 675			tm->tcpm_ts	   = tcptw->tw_ts_recent;
 676		}
 677		ret = true;
 678	}
 
 679	rcu_read_unlock();
 680
 681	return ret;
 682}
 683
 684static DEFINE_SEQLOCK(fastopen_seqlock);
 685
 686void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 687			    struct tcp_fastopen_cookie *cookie,
 688			    int *syn_loss, unsigned long *last_syn_loss)
 689{
 690	struct tcp_metrics_block *tm;
 691
 692	rcu_read_lock();
 693	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
 694	if (tm) {
 695		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 696		unsigned int seq;
 697
 698		do {
 699			seq = read_seqbegin(&fastopen_seqlock);
 700			if (tfom->mss)
 701				*mss = tfom->mss;
 702			*cookie = tfom->cookie;
 703			if (cookie->len <= 0 && tfom->try_exp == 1)
 704				cookie->exp = true;
 705			*syn_loss = tfom->syn_loss;
 706			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
 707		} while (read_seqretry(&fastopen_seqlock, seq));
 708	}
 709	rcu_read_unlock();
 710}
 711
 712void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 713			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
 714			    u16 try_exp)
 715{
 716	struct dst_entry *dst = __sk_dst_get(sk);
 717	struct tcp_metrics_block *tm;
 718
 719	if (!dst)
 720		return;
 721	rcu_read_lock();
 722	tm = tcp_get_metrics(sk, dst, true);
 723	if (tm) {
 724		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 725
 726		write_seqlock_bh(&fastopen_seqlock);
 727		if (mss)
 728			tfom->mss = mss;
 729		if (cookie && cookie->len > 0)
 730			tfom->cookie = *cookie;
 731		else if (try_exp > tfom->try_exp &&
 732			 tfom->cookie.len <= 0 && !tfom->cookie.exp)
 733			tfom->try_exp = try_exp;
 734		if (syn_lost) {
 735			++tfom->syn_loss;
 736			tfom->last_syn_loss = jiffies;
 737		} else
 738			tfom->syn_loss = 0;
 739		write_sequnlock_bh(&fastopen_seqlock);
 740	}
 741	rcu_read_unlock();
 742}
 743
 744static struct genl_family tcp_metrics_nl_family;
 745
 746static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
 747	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
 748	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
 749					    .len = sizeof(struct in6_addr), },
 750	/* Following attributes are not received for GET/DEL,
 751	 * we keep them for reference
 752	 */
 753#if 0
 754	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
 755	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
 756	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
 757	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
 758	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
 759	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
 760	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
 761	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
 762					    .len = TCP_FASTOPEN_COOKIE_MAX, },
 763#endif
 764};
 765
 766/* Add attributes, caller cancels its header on failure */
 767static int tcp_metrics_fill_info(struct sk_buff *msg,
 768				 struct tcp_metrics_block *tm)
 769{
 770	struct nlattr *nest;
 771	int i;
 772
 773	switch (tm->tcpm_daddr.family) {
 774	case AF_INET:
 775		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
 776				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
 777			goto nla_put_failure;
 778		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
 779				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
 780			goto nla_put_failure;
 781		break;
 782	case AF_INET6:
 783		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
 784				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
 785			goto nla_put_failure;
 786		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
 787				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
 788			goto nla_put_failure;
 789		break;
 790	default:
 791		return -EAFNOSUPPORT;
 792	}
 793
 794	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
 795			  jiffies - tm->tcpm_stamp,
 796			  TCP_METRICS_ATTR_PAD) < 0)
 797		goto nla_put_failure;
 798	if (tm->tcpm_ts_stamp) {
 799		if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
 800				(s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
 801			goto nla_put_failure;
 802		if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
 803				tm->tcpm_ts) < 0)
 804			goto nla_put_failure;
 805	}
 806
 807	{
 808		int n = 0;
 809
 810		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
 811		if (!nest)
 812			goto nla_put_failure;
 813		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
 814			u32 val = tm->tcpm_vals[i];
 815
 816			if (!val)
 817				continue;
 818			if (i == TCP_METRIC_RTT) {
 819				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
 820						val) < 0)
 821					goto nla_put_failure;
 822				n++;
 823				val = max(val / 1000, 1U);
 824			}
 825			if (i == TCP_METRIC_RTTVAR) {
 826				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
 827						val) < 0)
 828					goto nla_put_failure;
 829				n++;
 830				val = max(val / 1000, 1U);
 831			}
 832			if (nla_put_u32(msg, i + 1, val) < 0)
 833				goto nla_put_failure;
 834			n++;
 835		}
 836		if (n)
 837			nla_nest_end(msg, nest);
 838		else
 839			nla_nest_cancel(msg, nest);
 840	}
 841
 842	{
 843		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
 844		unsigned int seq;
 845
 846		do {
 847			seq = read_seqbegin(&fastopen_seqlock);
 848			tfom_copy[0] = tm->tcpm_fastopen;
 849		} while (read_seqretry(&fastopen_seqlock, seq));
 850
 851		tfom = tfom_copy;
 852		if (tfom->mss &&
 853		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
 854				tfom->mss) < 0)
 855			goto nla_put_failure;
 856		if (tfom->syn_loss &&
 857		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
 858				tfom->syn_loss) < 0 ||
 859		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
 860				jiffies - tfom->last_syn_loss,
 861				TCP_METRICS_ATTR_PAD) < 0))
 862			goto nla_put_failure;
 863		if (tfom->cookie.len > 0 &&
 864		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
 865			    tfom->cookie.len, tfom->cookie.val) < 0)
 866			goto nla_put_failure;
 867	}
 868
 869	return 0;
 870
 871nla_put_failure:
 872	return -EMSGSIZE;
 873}
 874
 875static int tcp_metrics_dump_info(struct sk_buff *skb,
 876				 struct netlink_callback *cb,
 877				 struct tcp_metrics_block *tm)
 878{
 879	void *hdr;
 880
 881	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 882			  &tcp_metrics_nl_family, NLM_F_MULTI,
 883			  TCP_METRICS_CMD_GET);
 884	if (!hdr)
 885		return -EMSGSIZE;
 886
 887	if (tcp_metrics_fill_info(skb, tm) < 0)
 888		goto nla_put_failure;
 889
 890	genlmsg_end(skb, hdr);
 891	return 0;
 892
 893nla_put_failure:
 894	genlmsg_cancel(skb, hdr);
 895	return -EMSGSIZE;
 896}
 897
 898static int tcp_metrics_nl_dump(struct sk_buff *skb,
 899			       struct netlink_callback *cb)
 900{
 901	struct net *net = sock_net(skb->sk);
 902	unsigned int max_rows = 1U << tcp_metrics_hash_log;
 903	unsigned int row, s_row = cb->args[0];
 904	int s_col = cb->args[1], col = s_col;
 905
 906	for (row = s_row; row < max_rows; row++, s_col = 0) {
 907		struct tcp_metrics_block *tm;
 908		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
 909
 910		rcu_read_lock();
 911		for (col = 0, tm = rcu_dereference(hb->chain); tm;
 912		     tm = rcu_dereference(tm->tcpm_next), col++) {
 913			if (!net_eq(tm_net(tm), net))
 914				continue;
 915			if (col < s_col)
 916				continue;
 917			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
 918				rcu_read_unlock();
 919				goto done;
 920			}
 921		}
 922		rcu_read_unlock();
 923	}
 924
 925done:
 926	cb->args[0] = row;
 927	cb->args[1] = col;
 928	return skb->len;
 929}
 930
 931static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 932			   unsigned int *hash, int optional, int v4, int v6)
 933{
 934	struct nlattr *a;
 935
 936	a = info->attrs[v4];
 937	if (a) {
 938		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
 939		if (hash)
 940			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
 941		return 0;
 942	}
 943	a = info->attrs[v6];
 944	if (a) {
 945		struct in6_addr in6;
 946
 947		if (nla_len(a) != sizeof(struct in6_addr))
 948			return -EINVAL;
 949		in6 = nla_get_in6_addr(a);
 950		inetpeer_set_addr_v6(addr, &in6);
 951		if (hash)
 952			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
 953		return 0;
 954	}
 955	return optional ? 1 : -EAFNOSUPPORT;
 956}
 957
 958static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 959			 unsigned int *hash, int optional)
 960{
 961	return __parse_nl_addr(info, addr, hash, optional,
 962			       TCP_METRICS_ATTR_ADDR_IPV4,
 963			       TCP_METRICS_ATTR_ADDR_IPV6);
 964}
 965
 966static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
 967{
 968	return __parse_nl_addr(info, addr, NULL, 0,
 969			       TCP_METRICS_ATTR_SADDR_IPV4,
 970			       TCP_METRICS_ATTR_SADDR_IPV6);
 971}
 972
 973static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
 974{
 975	struct tcp_metrics_block *tm;
 976	struct inetpeer_addr saddr, daddr;
 977	unsigned int hash;
 978	struct sk_buff *msg;
 979	struct net *net = genl_info_net(info);
 980	void *reply;
 981	int ret;
 982	bool src = true;
 983
 984	ret = parse_nl_addr(info, &daddr, &hash, 0);
 985	if (ret < 0)
 986		return ret;
 987
 988	ret = parse_nl_saddr(info, &saddr);
 989	if (ret < 0)
 990		src = false;
 991
 992	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 993	if (!msg)
 994		return -ENOMEM;
 995
 996	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
 997				  info->genlhdr->cmd);
 998	if (!reply)
 999		goto nla_put_failure;
1000
1001	hash ^= net_hash_mix(net);
1002	hash = hash_32(hash, tcp_metrics_hash_log);
1003	ret = -ESRCH;
1004	rcu_read_lock();
1005	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
1006	     tm = rcu_dereference(tm->tcpm_next)) {
1007		if (addr_same(&tm->tcpm_daddr, &daddr) &&
1008		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
1009		    net_eq(tm_net(tm), net)) {
1010			ret = tcp_metrics_fill_info(msg, tm);
1011			break;
1012		}
1013	}
1014	rcu_read_unlock();
1015	if (ret < 0)
1016		goto out_free;
1017
1018	genlmsg_end(msg, reply);
1019	return genlmsg_reply(msg, info);
1020
1021nla_put_failure:
1022	ret = -EMSGSIZE;
1023
1024out_free:
1025	nlmsg_free(msg);
1026	return ret;
1027}
1028
1029static void tcp_metrics_flush_all(struct net *net)
1030{
1031	unsigned int max_rows = 1U << tcp_metrics_hash_log;
1032	struct tcpm_hash_bucket *hb = tcp_metrics_hash;
1033	struct tcp_metrics_block *tm;
1034	unsigned int row;
1035
1036	for (row = 0; row < max_rows; row++, hb++) {
1037		struct tcp_metrics_block __rcu **pp;
 
 
1038		spin_lock_bh(&tcp_metrics_lock);
1039		pp = &hb->chain;
1040		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
1041			if (net_eq(tm_net(tm), net)) {
 
 
1042				*pp = tm->tcpm_next;
1043				kfree_rcu(tm, rcu_head);
1044			} else {
1045				pp = &tm->tcpm_next;
1046			}
1047		}
1048		spin_unlock_bh(&tcp_metrics_lock);
1049	}
1050}
1051
1052static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
1053{
1054	struct tcpm_hash_bucket *hb;
1055	struct tcp_metrics_block *tm;
1056	struct tcp_metrics_block __rcu **pp;
1057	struct inetpeer_addr saddr, daddr;
1058	unsigned int hash;
1059	struct net *net = genl_info_net(info);
1060	int ret;
1061	bool src = true, found = false;
1062
1063	ret = parse_nl_addr(info, &daddr, &hash, 1);
1064	if (ret < 0)
1065		return ret;
1066	if (ret > 0) {
1067		tcp_metrics_flush_all(net);
1068		return 0;
1069	}
1070	ret = parse_nl_saddr(info, &saddr);
1071	if (ret < 0)
1072		src = false;
1073
1074	hash ^= net_hash_mix(net);
1075	hash = hash_32(hash, tcp_metrics_hash_log);
1076	hb = tcp_metrics_hash + hash;
1077	pp = &hb->chain;
1078	spin_lock_bh(&tcp_metrics_lock);
1079	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
1080		if (addr_same(&tm->tcpm_daddr, &daddr) &&
1081		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
1082		    net_eq(tm_net(tm), net)) {
1083			*pp = tm->tcpm_next;
1084			kfree_rcu(tm, rcu_head);
1085			found = true;
1086		} else {
1087			pp = &tm->tcpm_next;
1088		}
1089	}
1090	spin_unlock_bh(&tcp_metrics_lock);
1091	if (!found)
1092		return -ESRCH;
1093	return 0;
1094}
1095
1096static const struct genl_ops tcp_metrics_nl_ops[] = {
1097	{
1098		.cmd = TCP_METRICS_CMD_GET,
1099		.doit = tcp_metrics_nl_cmd_get,
1100		.dumpit = tcp_metrics_nl_dump,
1101		.policy = tcp_metrics_nl_policy,
1102	},
1103	{
1104		.cmd = TCP_METRICS_CMD_DEL,
1105		.doit = tcp_metrics_nl_cmd_del,
1106		.policy = tcp_metrics_nl_policy,
1107		.flags = GENL_ADMIN_PERM,
1108	},
1109};
1110
1111static struct genl_family tcp_metrics_nl_family __ro_after_init = {
1112	.hdrsize	= 0,
1113	.name		= TCP_METRICS_GENL_NAME,
1114	.version	= TCP_METRICS_GENL_VERSION,
1115	.maxattr	= TCP_METRICS_ATTR_MAX,
1116	.netnsok	= true,
1117	.module		= THIS_MODULE,
1118	.ops		= tcp_metrics_nl_ops,
1119	.n_ops		= ARRAY_SIZE(tcp_metrics_nl_ops),
1120};
1121
1122static unsigned int tcpmhash_entries;
1123static int __init set_tcpmhash_entries(char *str)
1124{
1125	ssize_t ret;
1126
1127	if (!str)
1128		return 0;
1129
1130	ret = kstrtouint(str, 0, &tcpmhash_entries);
1131	if (ret)
1132		return 0;
1133
1134	return 1;
1135}
1136__setup("tcpmhash_entries=", set_tcpmhash_entries);
1137
1138static int __net_init tcp_net_metrics_init(struct net *net)
1139{
1140	size_t size;
1141	unsigned int slots;
1142
1143	if (!net_eq(net, &init_net))
1144		return 0;
1145
1146	slots = tcpmhash_entries;
1147	if (!slots) {
1148		if (totalram_pages >= 128 * 1024)
1149			slots = 16 * 1024;
1150		else
1151			slots = 8 * 1024;
1152	}
1153
1154	tcp_metrics_hash_log = order_base_2(slots);
1155	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1156
1157	tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1158	if (!tcp_metrics_hash)
1159		tcp_metrics_hash = vzalloc(size);
1160
1161	if (!tcp_metrics_hash)
1162		return -ENOMEM;
1163
1164	return 0;
1165}
1166
1167static void __net_exit tcp_net_metrics_exit(struct net *net)
1168{
1169	tcp_metrics_flush_all(net);
1170}
1171
1172static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1173	.init	=	tcp_net_metrics_init,
1174	.exit	=	tcp_net_metrics_exit,
1175};
1176
1177void __init tcp_metrics_init(void)
1178{
1179	int ret;
1180
1181	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1182	if (ret < 0)
1183		panic("Could not allocate the tcp_metrics hash table\n");
1184
1185	ret = genl_register_family(&tcp_metrics_nl_family);
1186	if (ret < 0)
1187		panic("Could not register tcp_metrics generic netlink\n");
1188}