Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/rcupdate.h>
   3#include <linux/spinlock.h>
   4#include <linux/jiffies.h>
   5#include <linux/module.h>
   6#include <linux/cache.h>
   7#include <linux/slab.h>
   8#include <linux/init.h>
   9#include <linux/tcp.h>
  10#include <linux/hash.h>
  11#include <linux/tcp_metrics.h>
  12#include <linux/vmalloc.h>
  13
  14#include <net/inet_connection_sock.h>
  15#include <net/net_namespace.h>
  16#include <net/request_sock.h>
  17#include <net/inetpeer.h>
  18#include <net/sock.h>
  19#include <net/ipv6.h>
  20#include <net/dst.h>
  21#include <net/tcp.h>
  22#include <net/genetlink.h>
  23
  24static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  25						   const struct inetpeer_addr *daddr,
  26						   struct net *net, unsigned int hash);
  27
  28struct tcp_fastopen_metrics {
  29	u16	mss;
  30	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */
  31		try_exp:2;		/* Request w/ exp. option (once) */
  32	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
  33	struct	tcp_fastopen_cookie	cookie;
  34};
  35
  36/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  37 * Kernel only stores RTT and RTTVAR in usec resolution
  38 */
  39#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  40
  41struct tcp_metrics_block {
  42	struct tcp_metrics_block __rcu	*tcpm_next;
  43	possible_net_t			tcpm_net;
  44	struct inetpeer_addr		tcpm_saddr;
  45	struct inetpeer_addr		tcpm_daddr;
  46	unsigned long			tcpm_stamp;
  47	u32				tcpm_lock;
  48	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  49	struct tcp_fastopen_metrics	tcpm_fastopen;
  50
  51	struct rcu_head			rcu_head;
  52};
  53
  54static inline struct net *tm_net(struct tcp_metrics_block *tm)
  55{
  56	return read_pnet(&tm->tcpm_net);
 
  57}
  58
  59static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  60			      enum tcp_metric_index idx)
  61{
  62	return tm->tcpm_lock & (1 << idx);
 
  63}
  64
  65static u32 tcp_metric_get(struct tcp_metrics_block *tm,
  66			  enum tcp_metric_index idx)
  67{
  68	return tm->tcpm_vals[idx];
 
  69}
  70
  71static void tcp_metric_set(struct tcp_metrics_block *tm,
  72			   enum tcp_metric_index idx,
  73			   u32 val)
  74{
  75	tm->tcpm_vals[idx] = val;
 
  76}
  77
  78static bool addr_same(const struct inetpeer_addr *a,
  79		      const struct inetpeer_addr *b)
  80{
  81	return inetpeer_addr_cmp(a, b) == 0;
  82}
  83
  84struct tcpm_hash_bucket {
  85	struct tcp_metrics_block __rcu	*chain;
  86};
  87
  88static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
  89static unsigned int		tcp_metrics_hash_log __read_mostly;
  90
  91static DEFINE_SPINLOCK(tcp_metrics_lock);
 
  92
  93static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  94			  const struct dst_entry *dst,
  95			  bool fastopen_clear)
  96{
  97	u32 msval;
  98	u32 val;
  99
 100	tm->tcpm_stamp = jiffies;
 101
 102	val = 0;
 103	if (dst_metric_locked(dst, RTAX_RTT))
 104		val |= 1 << TCP_METRIC_RTT;
 105	if (dst_metric_locked(dst, RTAX_RTTVAR))
 106		val |= 1 << TCP_METRIC_RTTVAR;
 107	if (dst_metric_locked(dst, RTAX_SSTHRESH))
 108		val |= 1 << TCP_METRIC_SSTHRESH;
 109	if (dst_metric_locked(dst, RTAX_CWND))
 110		val |= 1 << TCP_METRIC_CWND;
 111	if (dst_metric_locked(dst, RTAX_REORDERING))
 112		val |= 1 << TCP_METRIC_REORDERING;
 113	tm->tcpm_lock = val;
 
 114
 115	msval = dst_metric_raw(dst, RTAX_RTT);
 116	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
 117
 118	msval = dst_metric_raw(dst, RTAX_RTTVAR);
 119	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
 120	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
 121	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
 122	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
 
 
 
 123	if (fastopen_clear) {
 
 124		tm->tcpm_fastopen.mss = 0;
 125		tm->tcpm_fastopen.syn_loss = 0;
 126		tm->tcpm_fastopen.try_exp = 0;
 127		tm->tcpm_fastopen.cookie.exp = false;
 128		tm->tcpm_fastopen.cookie.len = 0;
 
 129	}
 130}
 131
 132#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
 133
 134static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
 
 135{
 136	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
 
 
 
 
 
 137		tcpm_suck_dst(tm, dst, false);
 138}
 139
 140#define TCP_METRICS_RECLAIM_DEPTH	5
 141#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
 142
 143#define deref_locked(p)	\
 144	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
 145
 146static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
 147					  struct inetpeer_addr *saddr,
 148					  struct inetpeer_addr *daddr,
 149					  unsigned int hash)
 150{
 151	struct tcp_metrics_block *tm;
 152	struct net *net;
 153	bool reclaim = false;
 154
 155	spin_lock_bh(&tcp_metrics_lock);
 156	net = dev_net(dst->dev);
 157
 158	/* While waiting for the spin-lock the cache might have been populated
 159	 * with this entry and so we have to check again.
 160	 */
 161	tm = __tcp_get_metrics(saddr, daddr, net, hash);
 162	if (tm == TCP_METRICS_RECLAIM_PTR) {
 163		reclaim = true;
 164		tm = NULL;
 165	}
 166	if (tm) {
 167		tcpm_check_stamp(tm, dst);
 168		goto out_unlock;
 169	}
 170
 171	if (unlikely(reclaim)) {
 172		struct tcp_metrics_block *oldest;
 173
 174		oldest = deref_locked(tcp_metrics_hash[hash].chain);
 175		for (tm = deref_locked(oldest->tcpm_next); tm;
 176		     tm = deref_locked(tm->tcpm_next)) {
 177			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
 
 178				oldest = tm;
 179		}
 180		tm = oldest;
 181	} else {
 182		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
 183		if (!tm)
 184			goto out_unlock;
 185	}
 186	write_pnet(&tm->tcpm_net, net);
 
 
 187	tm->tcpm_saddr = *saddr;
 188	tm->tcpm_daddr = *daddr;
 189
 190	tcpm_suck_dst(tm, dst, true);
 191
 192	if (likely(!reclaim)) {
 193		tm->tcpm_next = tcp_metrics_hash[hash].chain;
 194		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
 195	}
 196
 197out_unlock:
 198	spin_unlock_bh(&tcp_metrics_lock);
 199	return tm;
 200}
 201
 202static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
 203{
 204	if (tm)
 205		return tm;
 206	if (depth > TCP_METRICS_RECLAIM_DEPTH)
 207		return TCP_METRICS_RECLAIM_PTR;
 208	return NULL;
 209}
 210
 211static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
 212						   const struct inetpeer_addr *daddr,
 213						   struct net *net, unsigned int hash)
 214{
 215	struct tcp_metrics_block *tm;
 216	int depth = 0;
 217
 218	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 219	     tm = rcu_dereference(tm->tcpm_next)) {
 220		if (addr_same(&tm->tcpm_saddr, saddr) &&
 221		    addr_same(&tm->tcpm_daddr, daddr) &&
 222		    net_eq(tm_net(tm), net))
 223			break;
 224		depth++;
 225	}
 226	return tcp_get_encode(tm, depth);
 227}
 228
 229static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
 230						       struct dst_entry *dst)
 231{
 232	struct tcp_metrics_block *tm;
 233	struct inetpeer_addr saddr, daddr;
 234	unsigned int hash;
 235	struct net *net;
 236
 237	saddr.family = req->rsk_ops->family;
 238	daddr.family = req->rsk_ops->family;
 239	switch (daddr.family) {
 240	case AF_INET:
 241		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
 242		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
 243		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
 244		break;
 245#if IS_ENABLED(CONFIG_IPV6)
 246	case AF_INET6:
 247		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
 248		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
 249		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 250		break;
 251#endif
 252	default:
 253		return NULL;
 254	}
 255
 256	net = dev_net(dst->dev);
 257	hash ^= net_hash_mix(net);
 258	hash = hash_32(hash, tcp_metrics_hash_log);
 259
 260	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 261	     tm = rcu_dereference(tm->tcpm_next)) {
 262		if (addr_same(&tm->tcpm_saddr, &saddr) &&
 263		    addr_same(&tm->tcpm_daddr, &daddr) &&
 264		    net_eq(tm_net(tm), net))
 265			break;
 266	}
 267	tcpm_check_stamp(tm, dst);
 268	return tm;
 269}
 270
 271static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
 272						 struct dst_entry *dst,
 273						 bool create)
 274{
 275	struct tcp_metrics_block *tm;
 276	struct inetpeer_addr saddr, daddr;
 277	unsigned int hash;
 278	struct net *net;
 279
 280	if (sk->sk_family == AF_INET) {
 281		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 282		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 283		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 284	}
 285#if IS_ENABLED(CONFIG_IPV6)
 286	else if (sk->sk_family == AF_INET6) {
 287		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
 288			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 289			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 290			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 291		} else {
 292			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
 293			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
 294			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 295		}
 296	}
 297#endif
 298	else
 299		return NULL;
 300
 301	net = dev_net(dst->dev);
 302	hash ^= net_hash_mix(net);
 303	hash = hash_32(hash, tcp_metrics_hash_log);
 304
 305	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
 306	if (tm == TCP_METRICS_RECLAIM_PTR)
 307		tm = NULL;
 308	if (!tm && create)
 309		tm = tcpm_new(dst, &saddr, &daddr, hash);
 310	else
 311		tcpm_check_stamp(tm, dst);
 312
 313	return tm;
 314}
 315
 316/* Save metrics learned by this TCP session.  This function is called
 317 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
 318 * or goes from LAST-ACK to CLOSE.
 319 */
 320void tcp_update_metrics(struct sock *sk)
 321{
 322	const struct inet_connection_sock *icsk = inet_csk(sk);
 323	struct dst_entry *dst = __sk_dst_get(sk);
 324	struct tcp_sock *tp = tcp_sk(sk);
 325	struct net *net = sock_net(sk);
 326	struct tcp_metrics_block *tm;
 327	unsigned long rtt;
 328	u32 val;
 329	int m;
 330
 331	sk_dst_confirm(sk);
 332	if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
 333		return;
 334
 335	rcu_read_lock();
 336	if (icsk->icsk_backoff || !tp->srtt_us) {
 337		/* This session failed to estimate rtt. Why?
 338		 * Probably, no packets returned in time.  Reset our
 339		 * results.
 340		 */
 341		tm = tcp_get_metrics(sk, dst, false);
 342		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
 343			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
 344		goto out_unlock;
 345	} else
 346		tm = tcp_get_metrics(sk, dst, true);
 347
 348	if (!tm)
 349		goto out_unlock;
 350
 351	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 352	m = rtt - tp->srtt_us;
 353
 354	/* If newly calculated rtt larger than stored one, store new
 355	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
 356	 * always better than underestimation.
 357	 */
 358	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
 359		if (m <= 0)
 360			rtt = tp->srtt_us;
 361		else
 362			rtt -= (m >> 3);
 363		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
 364	}
 365
 366	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
 367		unsigned long var;
 368
 369		if (m < 0)
 370			m = -m;
 371
 372		/* Scale deviation to rttvar fixed point */
 373		m >>= 1;
 374		if (m < tp->mdev_us)
 375			m = tp->mdev_us;
 376
 377		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
 378		if (m >= var)
 379			var = m;
 380		else
 381			var -= (var - m) >> 2;
 382
 383		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
 384	}
 385
 386	if (tcp_in_initial_slowstart(tp)) {
 387		/* Slow start still did not finish. */
 388		if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
 389		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 390			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 391			if (val && (tp->snd_cwnd >> 1) > val)
 392				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 393					       tp->snd_cwnd >> 1);
 394		}
 395		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 396			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 397			if (tp->snd_cwnd > val)
 398				tcp_metric_set(tm, TCP_METRIC_CWND,
 399					       tp->snd_cwnd);
 400		}
 401	} else if (!tcp_in_slow_start(tp) &&
 402		   icsk->icsk_ca_state == TCP_CA_Open) {
 403		/* Cong. avoidance phase, cwnd is reliable. */
 404		if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
 405		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
 406			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 407				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
 408		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 409			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 410			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
 411		}
 412	} else {
 413		/* Else slow start did not finish, cwnd is non-sense,
 414		 * ssthresh may be also invalid.
 415		 */
 416		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 417			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 418			tcp_metric_set(tm, TCP_METRIC_CWND,
 419				       (val + tp->snd_ssthresh) >> 1);
 420		}
 421		if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
 422		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 423			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 424			if (val && tp->snd_ssthresh > val)
 425				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 426					       tp->snd_ssthresh);
 427		}
 428		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
 429			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 430			if (val < tp->reordering &&
 431			    tp->reordering != net->ipv4.sysctl_tcp_reordering)
 
 432				tcp_metric_set(tm, TCP_METRIC_REORDERING,
 433					       tp->reordering);
 434		}
 435	}
 436	tm->tcpm_stamp = jiffies;
 437out_unlock:
 438	rcu_read_unlock();
 439}
 440
 441/* Initialize metrics on socket. */
 442
 443void tcp_init_metrics(struct sock *sk)
 444{
 445	struct dst_entry *dst = __sk_dst_get(sk);
 446	struct tcp_sock *tp = tcp_sk(sk);
 447	struct net *net = sock_net(sk);
 448	struct tcp_metrics_block *tm;
 449	u32 val, crtt = 0; /* cached RTT scaled by 8 */
 450
 451	sk_dst_confirm(sk);
 
 
 
 
 452	if (!dst)
 453		goto reset;
 454
 455	rcu_read_lock();
 456	tm = tcp_get_metrics(sk, dst, true);
 457	if (!tm) {
 458		rcu_read_unlock();
 459		goto reset;
 460	}
 461
 462	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
 463		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 464
 465	val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
 466	      0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 467	if (val) {
 468		tp->snd_ssthresh = val;
 469		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
 470			tp->snd_ssthresh = tp->snd_cwnd_clamp;
 471	} else {
 472		/* ssthresh may have been reduced unnecessarily during.
 473		 * 3WHS. Restore it back to its initial default.
 474		 */
 475		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 476	}
 477	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 478	if (val && tp->reordering != val)
 479		tp->reordering = val;
 480
 481	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 482	rcu_read_unlock();
 483reset:
 484	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
 485	 * to seed the RTO for later data packets because SYN packets are
 486	 * small. Use the per-dst cached values to seed the RTO but keep
 487	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
 488	 * Later the RTO will be updated immediately upon obtaining the first
 489	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
 490	 * influences the first RTO but not later RTT estimation.
 491	 *
 492	 * But if RTT is not available from the SYN (due to retransmits or
 493	 * syn cookies) or the cache, force a conservative 3secs timeout.
 494	 *
 495	 * A bit of theory. RTT is time passed after "normal" sized packet
 496	 * is sent until it is ACKed. In normal circumstances sending small
 497	 * packets force peer to delay ACKs and calculation is correct too.
 498	 * The algorithm is adaptive and, provided we follow specs, it
 499	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
 500	 * tricks sort of "quick acks" for time long enough to decrease RTT
 501	 * to low value, and then abruptly stops to do it and starts to delay
 502	 * ACKs, wait for troubles.
 503	 */
 504	if (crtt > tp->srtt_us) {
 505		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
 506		crtt /= 8 * USEC_PER_SEC / HZ;
 507		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
 508	} else if (tp->srtt_us == 0) {
 509		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
 510		 * 3WHS. This is most likely due to retransmission,
 511		 * including spurious one. Reset the RTO back to 3secs
 512		 * from the more aggressive 1sec to avoid more spurious
 513		 * retransmission.
 514		 */
 515		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
 516		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
 517
 518		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 519	}
 520}
 521
 522bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 523{
 524	struct tcp_metrics_block *tm;
 525	bool ret;
 526
 527	if (!dst)
 528		return false;
 529
 530	rcu_read_lock();
 531	tm = __tcp_get_metrics_req(req, dst);
 532	if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
 533		ret = true;
 534	else
 535		ret = false;
 536	rcu_read_unlock();
 537
 538	return ret;
 539}
 540
 541static DEFINE_SEQLOCK(fastopen_seqlock);
 542
 543void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 544			    struct tcp_fastopen_cookie *cookie)
 545{
 546	struct tcp_metrics_block *tm;
 547
 548	rcu_read_lock();
 549	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
 550	if (tm) {
 551		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 552		unsigned int seq;
 553
 554		do {
 555			seq = read_seqbegin(&fastopen_seqlock);
 556			if (tfom->mss)
 557				*mss = tfom->mss;
 558			*cookie = tfom->cookie;
 559			if (cookie->len <= 0 && tfom->try_exp == 1)
 560				cookie->exp = true;
 561		} while (read_seqretry(&fastopen_seqlock, seq));
 562	}
 563	rcu_read_unlock();
 564}
 565
 566void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 567			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
 568			    u16 try_exp)
 569{
 570	struct dst_entry *dst = __sk_dst_get(sk);
 571	struct tcp_metrics_block *tm;
 572
 573	if (!dst)
 574		return;
 575	rcu_read_lock();
 576	tm = tcp_get_metrics(sk, dst, true);
 577	if (tm) {
 578		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 579
 580		write_seqlock_bh(&fastopen_seqlock);
 581		if (mss)
 582			tfom->mss = mss;
 583		if (cookie && cookie->len > 0)
 584			tfom->cookie = *cookie;
 585		else if (try_exp > tfom->try_exp &&
 586			 tfom->cookie.len <= 0 && !tfom->cookie.exp)
 587			tfom->try_exp = try_exp;
 588		if (syn_lost) {
 589			++tfom->syn_loss;
 590			tfom->last_syn_loss = jiffies;
 591		} else
 592			tfom->syn_loss = 0;
 593		write_sequnlock_bh(&fastopen_seqlock);
 594	}
 595	rcu_read_unlock();
 596}
 597
 598static struct genl_family tcp_metrics_nl_family;
 599
 600static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
 601	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
 602	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
 603					    .len = sizeof(struct in6_addr), },
 604	/* Following attributes are not received for GET/DEL,
 605	 * we keep them for reference
 606	 */
 607#if 0
 608	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
 609	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
 610	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
 611	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
 612	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
 613	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
 614	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
 615	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
 616					    .len = TCP_FASTOPEN_COOKIE_MAX, },
 617#endif
 618};
 619
 620/* Add attributes, caller cancels its header on failure */
 621static int tcp_metrics_fill_info(struct sk_buff *msg,
 622				 struct tcp_metrics_block *tm)
 623{
 624	struct nlattr *nest;
 625	int i;
 626
 627	switch (tm->tcpm_daddr.family) {
 628	case AF_INET:
 629		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
 630				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
 631			goto nla_put_failure;
 632		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
 633				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
 634			goto nla_put_failure;
 635		break;
 636	case AF_INET6:
 637		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
 638				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
 639			goto nla_put_failure;
 640		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
 641				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
 642			goto nla_put_failure;
 643		break;
 644	default:
 645		return -EAFNOSUPPORT;
 646	}
 647
 648	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
 649			  jiffies - tm->tcpm_stamp,
 650			  TCP_METRICS_ATTR_PAD) < 0)
 651		goto nla_put_failure;
 652
 653	{
 654		int n = 0;
 655
 656		nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
 657		if (!nest)
 658			goto nla_put_failure;
 659		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
 660			u32 val = tm->tcpm_vals[i];
 661
 662			if (!val)
 663				continue;
 664			if (i == TCP_METRIC_RTT) {
 665				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
 666						val) < 0)
 667					goto nla_put_failure;
 668				n++;
 669				val = max(val / 1000, 1U);
 670			}
 671			if (i == TCP_METRIC_RTTVAR) {
 672				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
 673						val) < 0)
 674					goto nla_put_failure;
 675				n++;
 676				val = max(val / 1000, 1U);
 677			}
 678			if (nla_put_u32(msg, i + 1, val) < 0)
 679				goto nla_put_failure;
 680			n++;
 681		}
 682		if (n)
 683			nla_nest_end(msg, nest);
 684		else
 685			nla_nest_cancel(msg, nest);
 686	}
 687
 688	{
 689		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
 690		unsigned int seq;
 691
 692		do {
 693			seq = read_seqbegin(&fastopen_seqlock);
 694			tfom_copy[0] = tm->tcpm_fastopen;
 695		} while (read_seqretry(&fastopen_seqlock, seq));
 696
 697		tfom = tfom_copy;
 698		if (tfom->mss &&
 699		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
 700				tfom->mss) < 0)
 701			goto nla_put_failure;
 702		if (tfom->syn_loss &&
 703		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
 704				tfom->syn_loss) < 0 ||
 705		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
 706				jiffies - tfom->last_syn_loss,
 707				TCP_METRICS_ATTR_PAD) < 0))
 708			goto nla_put_failure;
 709		if (tfom->cookie.len > 0 &&
 710		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
 711			    tfom->cookie.len, tfom->cookie.val) < 0)
 712			goto nla_put_failure;
 713	}
 714
 715	return 0;
 716
 717nla_put_failure:
 718	return -EMSGSIZE;
 719}
 720
 721static int tcp_metrics_dump_info(struct sk_buff *skb,
 722				 struct netlink_callback *cb,
 723				 struct tcp_metrics_block *tm)
 724{
 725	void *hdr;
 726
 727	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 728			  &tcp_metrics_nl_family, NLM_F_MULTI,
 729			  TCP_METRICS_CMD_GET);
 730	if (!hdr)
 731		return -EMSGSIZE;
 732
 733	if (tcp_metrics_fill_info(skb, tm) < 0)
 734		goto nla_put_failure;
 735
 736	genlmsg_end(skb, hdr);
 737	return 0;
 738
 739nla_put_failure:
 740	genlmsg_cancel(skb, hdr);
 741	return -EMSGSIZE;
 742}
 743
 744static int tcp_metrics_nl_dump(struct sk_buff *skb,
 745			       struct netlink_callback *cb)
 746{
 747	struct net *net = sock_net(skb->sk);
 748	unsigned int max_rows = 1U << tcp_metrics_hash_log;
 749	unsigned int row, s_row = cb->args[0];
 750	int s_col = cb->args[1], col = s_col;
 751
 752	for (row = s_row; row < max_rows; row++, s_col = 0) {
 753		struct tcp_metrics_block *tm;
 754		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
 755
 756		rcu_read_lock();
 757		for (col = 0, tm = rcu_dereference(hb->chain); tm;
 758		     tm = rcu_dereference(tm->tcpm_next), col++) {
 759			if (!net_eq(tm_net(tm), net))
 760				continue;
 761			if (col < s_col)
 762				continue;
 763			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
 764				rcu_read_unlock();
 765				goto done;
 766			}
 767		}
 768		rcu_read_unlock();
 769	}
 770
 771done:
 772	cb->args[0] = row;
 773	cb->args[1] = col;
 774	return skb->len;
 775}
 776
 777static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 778			   unsigned int *hash, int optional, int v4, int v6)
 779{
 780	struct nlattr *a;
 781
 782	a = info->attrs[v4];
 783	if (a) {
 784		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
 785		if (hash)
 786			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
 787		return 0;
 788	}
 789	a = info->attrs[v6];
 790	if (a) {
 791		struct in6_addr in6;
 792
 793		if (nla_len(a) != sizeof(struct in6_addr))
 794			return -EINVAL;
 795		in6 = nla_get_in6_addr(a);
 796		inetpeer_set_addr_v6(addr, &in6);
 797		if (hash)
 798			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
 799		return 0;
 800	}
 801	return optional ? 1 : -EAFNOSUPPORT;
 802}
 803
 804static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 805			 unsigned int *hash, int optional)
 806{
 807	return __parse_nl_addr(info, addr, hash, optional,
 808			       TCP_METRICS_ATTR_ADDR_IPV4,
 809			       TCP_METRICS_ATTR_ADDR_IPV6);
 810}
 811
 812static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
 813{
 814	return __parse_nl_addr(info, addr, NULL, 0,
 815			       TCP_METRICS_ATTR_SADDR_IPV4,
 816			       TCP_METRICS_ATTR_SADDR_IPV6);
 817}
 818
 819static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
 820{
 821	struct tcp_metrics_block *tm;
 822	struct inetpeer_addr saddr, daddr;
 823	unsigned int hash;
 824	struct sk_buff *msg;
 825	struct net *net = genl_info_net(info);
 826	void *reply;
 827	int ret;
 828	bool src = true;
 829
 830	ret = parse_nl_addr(info, &daddr, &hash, 0);
 831	if (ret < 0)
 832		return ret;
 833
 834	ret = parse_nl_saddr(info, &saddr);
 835	if (ret < 0)
 836		src = false;
 837
 838	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 839	if (!msg)
 840		return -ENOMEM;
 841
 842	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
 843				  info->genlhdr->cmd);
 844	if (!reply)
 845		goto nla_put_failure;
 846
 847	hash ^= net_hash_mix(net);
 848	hash = hash_32(hash, tcp_metrics_hash_log);
 849	ret = -ESRCH;
 850	rcu_read_lock();
 851	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 852	     tm = rcu_dereference(tm->tcpm_next)) {
 853		if (addr_same(&tm->tcpm_daddr, &daddr) &&
 854		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 855		    net_eq(tm_net(tm), net)) {
 856			ret = tcp_metrics_fill_info(msg, tm);
 857			break;
 858		}
 859	}
 860	rcu_read_unlock();
 861	if (ret < 0)
 862		goto out_free;
 863
 864	genlmsg_end(msg, reply);
 865	return genlmsg_reply(msg, info);
 866
 867nla_put_failure:
 868	ret = -EMSGSIZE;
 869
 870out_free:
 871	nlmsg_free(msg);
 872	return ret;
 873}
 874
 875static void tcp_metrics_flush_all(struct net *net)
 876{
 877	unsigned int max_rows = 1U << tcp_metrics_hash_log;
 878	struct tcpm_hash_bucket *hb = tcp_metrics_hash;
 879	struct tcp_metrics_block *tm;
 880	unsigned int row;
 881
 882	for (row = 0; row < max_rows; row++, hb++) {
 883		struct tcp_metrics_block __rcu **pp;
 884		bool match;
 885
 
 
 
 886		spin_lock_bh(&tcp_metrics_lock);
 887		pp = &hb->chain;
 888		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 889			match = net ? net_eq(tm_net(tm), net) :
 890				!refcount_read(&tm_net(tm)->ns.count);
 891			if (match) {
 892				*pp = tm->tcpm_next;
 893				kfree_rcu(tm, rcu_head);
 894			} else {
 895				pp = &tm->tcpm_next;
 896			}
 897		}
 898		spin_unlock_bh(&tcp_metrics_lock);
 
 899	}
 900}
 901
 902static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
 903{
 904	struct tcpm_hash_bucket *hb;
 905	struct tcp_metrics_block *tm;
 906	struct tcp_metrics_block __rcu **pp;
 907	struct inetpeer_addr saddr, daddr;
 908	unsigned int hash;
 909	struct net *net = genl_info_net(info);
 910	int ret;
 911	bool src = true, found = false;
 912
 913	ret = parse_nl_addr(info, &daddr, &hash, 1);
 914	if (ret < 0)
 915		return ret;
 916	if (ret > 0) {
 917		tcp_metrics_flush_all(net);
 918		return 0;
 919	}
 920	ret = parse_nl_saddr(info, &saddr);
 921	if (ret < 0)
 922		src = false;
 923
 924	hash ^= net_hash_mix(net);
 925	hash = hash_32(hash, tcp_metrics_hash_log);
 926	hb = tcp_metrics_hash + hash;
 927	pp = &hb->chain;
 928	spin_lock_bh(&tcp_metrics_lock);
 929	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 930		if (addr_same(&tm->tcpm_daddr, &daddr) &&
 931		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 932		    net_eq(tm_net(tm), net)) {
 933			*pp = tm->tcpm_next;
 934			kfree_rcu(tm, rcu_head);
 935			found = true;
 936		} else {
 937			pp = &tm->tcpm_next;
 938		}
 939	}
 940	spin_unlock_bh(&tcp_metrics_lock);
 941	if (!found)
 942		return -ESRCH;
 943	return 0;
 944}
 945
 946static const struct genl_small_ops tcp_metrics_nl_ops[] = {
 947	{
 948		.cmd = TCP_METRICS_CMD_GET,
 949		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 950		.doit = tcp_metrics_nl_cmd_get,
 951		.dumpit = tcp_metrics_nl_dump,
 952	},
 953	{
 954		.cmd = TCP_METRICS_CMD_DEL,
 955		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 956		.doit = tcp_metrics_nl_cmd_del,
 957		.flags = GENL_ADMIN_PERM,
 958	},
 959};
 960
 961static struct genl_family tcp_metrics_nl_family __ro_after_init = {
 962	.hdrsize	= 0,
 963	.name		= TCP_METRICS_GENL_NAME,
 964	.version	= TCP_METRICS_GENL_VERSION,
 965	.maxattr	= TCP_METRICS_ATTR_MAX,
 966	.policy = tcp_metrics_nl_policy,
 967	.netnsok	= true,
 968	.module		= THIS_MODULE,
 969	.small_ops	= tcp_metrics_nl_ops,
 970	.n_small_ops	= ARRAY_SIZE(tcp_metrics_nl_ops),
 
 971};
 972
 973static unsigned int tcpmhash_entries;
 974static int __init set_tcpmhash_entries(char *str)
 975{
 976	ssize_t ret;
 977
 978	if (!str)
 979		return 0;
 980
 981	ret = kstrtouint(str, 0, &tcpmhash_entries);
 982	if (ret)
 983		return 0;
 984
 985	return 1;
 986}
 987__setup("tcpmhash_entries=", set_tcpmhash_entries);
 988
 989static int __net_init tcp_net_metrics_init(struct net *net)
 990{
 
 991	size_t size;
 992	unsigned int slots;
 993
 994	if (!net_eq(net, &init_net))
 995		return 0;
 996
 997	slots = tcpmhash_entries;
 998	if (!slots) {
 999		if (totalram_pages() >= 128 * 1024)
1000			slots = 16 * 1024;
1001		else
1002			slots = 8 * 1024;
1003	}
1004
1005	tcp_metrics_hash_log = order_base_2(slots);
1006	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1007
1008	tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1009	if (!tcp_metrics_hash)
1010		return -ENOMEM;
1011
1012	return 0;
1013}
1014
1015static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1016{
1017	tcp_metrics_flush_all(NULL);
1018}
1019
1020static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1021	.init		=	tcp_net_metrics_init,
1022	.exit_batch	=	tcp_net_metrics_exit_batch,
1023};
1024
1025void __init tcp_metrics_init(void)
1026{
1027	int ret;
1028
 
 
1029	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1030	if (ret < 0)
1031		panic("Could not allocate the tcp_metrics hash table\n");
1032
1033	ret = genl_register_family(&tcp_metrics_nl_family);
1034	if (ret < 0)
1035		panic("Could not register tcp_metrics generic netlink\n");
1036}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/rcupdate.h>
   3#include <linux/spinlock.h>
   4#include <linux/jiffies.h>
   5#include <linux/module.h>
   6#include <linux/cache.h>
   7#include <linux/slab.h>
   8#include <linux/init.h>
   9#include <linux/tcp.h>
  10#include <linux/hash.h>
  11#include <linux/tcp_metrics.h>
  12#include <linux/vmalloc.h>
  13
  14#include <net/inet_connection_sock.h>
  15#include <net/net_namespace.h>
  16#include <net/request_sock.h>
  17#include <net/inetpeer.h>
  18#include <net/sock.h>
  19#include <net/ipv6.h>
  20#include <net/dst.h>
  21#include <net/tcp.h>
  22#include <net/genetlink.h>
  23
  24static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  25						   const struct inetpeer_addr *daddr,
  26						   struct net *net, unsigned int hash);
  27
  28struct tcp_fastopen_metrics {
  29	u16	mss;
  30	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */
  31		try_exp:2;		/* Request w/ exp. option (once) */
  32	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
  33	struct	tcp_fastopen_cookie	cookie;
  34};
  35
  36/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  37 * Kernel only stores RTT and RTTVAR in usec resolution
  38 */
  39#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  40
  41struct tcp_metrics_block {
  42	struct tcp_metrics_block __rcu	*tcpm_next;
  43	struct net			*tcpm_net;
  44	struct inetpeer_addr		tcpm_saddr;
  45	struct inetpeer_addr		tcpm_daddr;
  46	unsigned long			tcpm_stamp;
  47	u32				tcpm_lock;
  48	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  49	struct tcp_fastopen_metrics	tcpm_fastopen;
  50
  51	struct rcu_head			rcu_head;
  52};
  53
  54static inline struct net *tm_net(const struct tcp_metrics_block *tm)
  55{
  56	/* Paired with the WRITE_ONCE() in tcpm_new() */
  57	return READ_ONCE(tm->tcpm_net);
  58}
  59
  60static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  61			      enum tcp_metric_index idx)
  62{
  63	/* Paired with WRITE_ONCE() in tcpm_suck_dst() */
  64	return READ_ONCE(tm->tcpm_lock) & (1 << idx);
  65}
  66
  67static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
  68			  enum tcp_metric_index idx)
  69{
  70	/* Paired with WRITE_ONCE() in tcp_metric_set() */
  71	return READ_ONCE(tm->tcpm_vals[idx]);
  72}
  73
  74static void tcp_metric_set(struct tcp_metrics_block *tm,
  75			   enum tcp_metric_index idx,
  76			   u32 val)
  77{
  78	/* Paired with READ_ONCE() in tcp_metric_get() */
  79	WRITE_ONCE(tm->tcpm_vals[idx], val);
  80}
  81
  82static bool addr_same(const struct inetpeer_addr *a,
  83		      const struct inetpeer_addr *b)
  84{
  85	return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
  86}
  87
  88struct tcpm_hash_bucket {
  89	struct tcp_metrics_block __rcu	*chain;
  90};
  91
  92static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
  93static unsigned int		tcp_metrics_hash_log __read_mostly;
  94
  95static DEFINE_SPINLOCK(tcp_metrics_lock);
  96static DEFINE_SEQLOCK(fastopen_seqlock);
  97
  98static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  99			  const struct dst_entry *dst,
 100			  bool fastopen_clear)
 101{
 102	u32 msval;
 103	u32 val;
 104
 105	WRITE_ONCE(tm->tcpm_stamp, jiffies);
 106
 107	val = 0;
 108	if (dst_metric_locked(dst, RTAX_RTT))
 109		val |= 1 << TCP_METRIC_RTT;
 110	if (dst_metric_locked(dst, RTAX_RTTVAR))
 111		val |= 1 << TCP_METRIC_RTTVAR;
 112	if (dst_metric_locked(dst, RTAX_SSTHRESH))
 113		val |= 1 << TCP_METRIC_SSTHRESH;
 114	if (dst_metric_locked(dst, RTAX_CWND))
 115		val |= 1 << TCP_METRIC_CWND;
 116	if (dst_metric_locked(dst, RTAX_REORDERING))
 117		val |= 1 << TCP_METRIC_REORDERING;
 118	/* Paired with READ_ONCE() in tcp_metric_locked() */
 119	WRITE_ONCE(tm->tcpm_lock, val);
 120
 121	msval = dst_metric_raw(dst, RTAX_RTT);
 122	tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
 123
 124	msval = dst_metric_raw(dst, RTAX_RTTVAR);
 125	tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
 126	tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 127		       dst_metric_raw(dst, RTAX_SSTHRESH));
 128	tcp_metric_set(tm, TCP_METRIC_CWND,
 129		       dst_metric_raw(dst, RTAX_CWND));
 130	tcp_metric_set(tm, TCP_METRIC_REORDERING,
 131		       dst_metric_raw(dst, RTAX_REORDERING));
 132	if (fastopen_clear) {
 133		write_seqlock(&fastopen_seqlock);
 134		tm->tcpm_fastopen.mss = 0;
 135		tm->tcpm_fastopen.syn_loss = 0;
 136		tm->tcpm_fastopen.try_exp = 0;
 137		tm->tcpm_fastopen.cookie.exp = false;
 138		tm->tcpm_fastopen.cookie.len = 0;
 139		write_sequnlock(&fastopen_seqlock);
 140	}
 141}
 142
 143#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
 144
 145static void tcpm_check_stamp(struct tcp_metrics_block *tm,
 146			     const struct dst_entry *dst)
 147{
 148	unsigned long limit;
 149
 150	if (!tm)
 151		return;
 152	limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
 153	if (unlikely(time_after(jiffies, limit)))
 154		tcpm_suck_dst(tm, dst, false);
 155}
 156
 157#define TCP_METRICS_RECLAIM_DEPTH	5
 158#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
 159
 160#define deref_locked(p)	\
 161	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
 162
 163static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
 164					  struct inetpeer_addr *saddr,
 165					  struct inetpeer_addr *daddr,
 166					  unsigned int hash)
 167{
 168	struct tcp_metrics_block *tm;
 169	struct net *net;
 170	bool reclaim = false;
 171
 172	spin_lock_bh(&tcp_metrics_lock);
 173	net = dev_net(dst->dev);
 174
 175	/* While waiting for the spin-lock the cache might have been populated
 176	 * with this entry and so we have to check again.
 177	 */
 178	tm = __tcp_get_metrics(saddr, daddr, net, hash);
 179	if (tm == TCP_METRICS_RECLAIM_PTR) {
 180		reclaim = true;
 181		tm = NULL;
 182	}
 183	if (tm) {
 184		tcpm_check_stamp(tm, dst);
 185		goto out_unlock;
 186	}
 187
 188	if (unlikely(reclaim)) {
 189		struct tcp_metrics_block *oldest;
 190
 191		oldest = deref_locked(tcp_metrics_hash[hash].chain);
 192		for (tm = deref_locked(oldest->tcpm_next); tm;
 193		     tm = deref_locked(tm->tcpm_next)) {
 194			if (time_before(READ_ONCE(tm->tcpm_stamp),
 195					READ_ONCE(oldest->tcpm_stamp)))
 196				oldest = tm;
 197		}
 198		tm = oldest;
 199	} else {
 200		tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
 201		if (!tm)
 202			goto out_unlock;
 203	}
 204	/* Paired with the READ_ONCE() in tm_net() */
 205	WRITE_ONCE(tm->tcpm_net, net);
 206
 207	tm->tcpm_saddr = *saddr;
 208	tm->tcpm_daddr = *daddr;
 209
 210	tcpm_suck_dst(tm, dst, reclaim);
 211
 212	if (likely(!reclaim)) {
 213		tm->tcpm_next = tcp_metrics_hash[hash].chain;
 214		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
 215	}
 216
 217out_unlock:
 218	spin_unlock_bh(&tcp_metrics_lock);
 219	return tm;
 220}
 221
 222static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
 223{
 224	if (tm)
 225		return tm;
 226	if (depth > TCP_METRICS_RECLAIM_DEPTH)
 227		return TCP_METRICS_RECLAIM_PTR;
 228	return NULL;
 229}
 230
 231static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
 232						   const struct inetpeer_addr *daddr,
 233						   struct net *net, unsigned int hash)
 234{
 235	struct tcp_metrics_block *tm;
 236	int depth = 0;
 237
 238	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 239	     tm = rcu_dereference(tm->tcpm_next)) {
 240		if (addr_same(&tm->tcpm_saddr, saddr) &&
 241		    addr_same(&tm->tcpm_daddr, daddr) &&
 242		    net_eq(tm_net(tm), net))
 243			break;
 244		depth++;
 245	}
 246	return tcp_get_encode(tm, depth);
 247}
 248
 249static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
 250						       struct dst_entry *dst)
 251{
 252	struct tcp_metrics_block *tm;
 253	struct inetpeer_addr saddr, daddr;
 254	unsigned int hash;
 255	struct net *net;
 256
 257	saddr.family = req->rsk_ops->family;
 258	daddr.family = req->rsk_ops->family;
 259	switch (daddr.family) {
 260	case AF_INET:
 261		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
 262		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
 263		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
 264		break;
 265#if IS_ENABLED(CONFIG_IPV6)
 266	case AF_INET6:
 267		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
 268		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
 269		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 270		break;
 271#endif
 272	default:
 273		return NULL;
 274	}
 275
 276	net = dev_net(dst->dev);
 277	hash ^= net_hash_mix(net);
 278	hash = hash_32(hash, tcp_metrics_hash_log);
 279
 280	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 281	     tm = rcu_dereference(tm->tcpm_next)) {
 282		if (addr_same(&tm->tcpm_saddr, &saddr) &&
 283		    addr_same(&tm->tcpm_daddr, &daddr) &&
 284		    net_eq(tm_net(tm), net))
 285			break;
 286	}
 287	tcpm_check_stamp(tm, dst);
 288	return tm;
 289}
 290
 291static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
 292						 struct dst_entry *dst,
 293						 bool create)
 294{
 295	struct tcp_metrics_block *tm;
 296	struct inetpeer_addr saddr, daddr;
 297	unsigned int hash;
 298	struct net *net;
 299
 300	if (sk->sk_family == AF_INET) {
 301		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 302		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 303		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 304	}
 305#if IS_ENABLED(CONFIG_IPV6)
 306	else if (sk->sk_family == AF_INET6) {
 307		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
 308			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 309			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 310			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 311		} else {
 312			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
 313			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
 314			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 315		}
 316	}
 317#endif
 318	else
 319		return NULL;
 320
 321	net = dev_net(dst->dev);
 322	hash ^= net_hash_mix(net);
 323	hash = hash_32(hash, tcp_metrics_hash_log);
 324
 325	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
 326	if (tm == TCP_METRICS_RECLAIM_PTR)
 327		tm = NULL;
 328	if (!tm && create)
 329		tm = tcpm_new(dst, &saddr, &daddr, hash);
 330	else
 331		tcpm_check_stamp(tm, dst);
 332
 333	return tm;
 334}
 335
 336/* Save metrics learned by this TCP session.  This function is called
 337 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
 338 * or goes from LAST-ACK to CLOSE.
 339 */
 340void tcp_update_metrics(struct sock *sk)
 341{
 342	const struct inet_connection_sock *icsk = inet_csk(sk);
 343	struct dst_entry *dst = __sk_dst_get(sk);
 344	struct tcp_sock *tp = tcp_sk(sk);
 345	struct net *net = sock_net(sk);
 346	struct tcp_metrics_block *tm;
 347	unsigned long rtt;
 348	u32 val;
 349	int m;
 350
 351	sk_dst_confirm(sk);
 352	if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
 353		return;
 354
 355	rcu_read_lock();
 356	if (icsk->icsk_backoff || !tp->srtt_us) {
 357		/* This session failed to estimate rtt. Why?
 358		 * Probably, no packets returned in time.  Reset our
 359		 * results.
 360		 */
 361		tm = tcp_get_metrics(sk, dst, false);
 362		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
 363			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
 364		goto out_unlock;
 365	} else
 366		tm = tcp_get_metrics(sk, dst, true);
 367
 368	if (!tm)
 369		goto out_unlock;
 370
 371	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 372	m = rtt - tp->srtt_us;
 373
 374	/* If newly calculated rtt larger than stored one, store new
 375	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
 376	 * always better than underestimation.
 377	 */
 378	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
 379		if (m <= 0)
 380			rtt = tp->srtt_us;
 381		else
 382			rtt -= (m >> 3);
 383		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
 384	}
 385
 386	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
 387		unsigned long var;
 388
 389		if (m < 0)
 390			m = -m;
 391
 392		/* Scale deviation to rttvar fixed point */
 393		m >>= 1;
 394		if (m < tp->mdev_us)
 395			m = tp->mdev_us;
 396
 397		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
 398		if (m >= var)
 399			var = m;
 400		else
 401			var -= (var - m) >> 2;
 402
 403		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
 404	}
 405
 406	if (tcp_in_initial_slowstart(tp)) {
 407		/* Slow start still did not finish. */
 408		if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
 409		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 410			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 411			if (val && (tcp_snd_cwnd(tp) >> 1) > val)
 412				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 413					       tcp_snd_cwnd(tp) >> 1);
 414		}
 415		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 416			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 417			if (tcp_snd_cwnd(tp) > val)
 418				tcp_metric_set(tm, TCP_METRIC_CWND,
 419					       tcp_snd_cwnd(tp));
 420		}
 421	} else if (!tcp_in_slow_start(tp) &&
 422		   icsk->icsk_ca_state == TCP_CA_Open) {
 423		/* Cong. avoidance phase, cwnd is reliable. */
 424		if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
 425		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
 426			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 427				       max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
 428		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 429			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 430			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
 431		}
 432	} else {
 433		/* Else slow start did not finish, cwnd is non-sense,
 434		 * ssthresh may be also invalid.
 435		 */
 436		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 437			val = tcp_metric_get(tm, TCP_METRIC_CWND);
 438			tcp_metric_set(tm, TCP_METRIC_CWND,
 439				       (val + tp->snd_ssthresh) >> 1);
 440		}
 441		if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
 442		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 443			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 444			if (val && tp->snd_ssthresh > val)
 445				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 446					       tp->snd_ssthresh);
 447		}
 448		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
 449			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 450			if (val < tp->reordering &&
 451			    tp->reordering !=
 452			    READ_ONCE(net->ipv4.sysctl_tcp_reordering))
 453				tcp_metric_set(tm, TCP_METRIC_REORDERING,
 454					       tp->reordering);
 455		}
 456	}
 457	WRITE_ONCE(tm->tcpm_stamp, jiffies);
 458out_unlock:
 459	rcu_read_unlock();
 460}
 461
 462/* Initialize metrics on socket. */
 463
 464void tcp_init_metrics(struct sock *sk)
 465{
 466	struct dst_entry *dst = __sk_dst_get(sk);
 467	struct tcp_sock *tp = tcp_sk(sk);
 468	struct net *net = sock_net(sk);
 469	struct tcp_metrics_block *tm;
 470	u32 val, crtt = 0; /* cached RTT scaled by 8 */
 471
 472	sk_dst_confirm(sk);
 473	/* ssthresh may have been reduced unnecessarily during.
 474	 * 3WHS. Restore it back to its initial default.
 475	 */
 476	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 477	if (!dst)
 478		goto reset;
 479
 480	rcu_read_lock();
 481	tm = tcp_get_metrics(sk, dst, false);
 482	if (!tm) {
 483		rcu_read_unlock();
 484		goto reset;
 485	}
 486
 487	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
 488		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 489
 490	val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
 491	      0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 492	if (val) {
 493		tp->snd_ssthresh = val;
 494		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
 495			tp->snd_ssthresh = tp->snd_cwnd_clamp;
 
 
 
 
 
 496	}
 497	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 498	if (val && tp->reordering != val)
 499		tp->reordering = val;
 500
 501	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 502	rcu_read_unlock();
 503reset:
 504	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
 505	 * to seed the RTO for later data packets because SYN packets are
 506	 * small. Use the per-dst cached values to seed the RTO but keep
 507	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
 508	 * Later the RTO will be updated immediately upon obtaining the first
 509	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
 510	 * influences the first RTO but not later RTT estimation.
 511	 *
 512	 * But if RTT is not available from the SYN (due to retransmits or
 513	 * syn cookies) or the cache, force a conservative 3secs timeout.
 514	 *
 515	 * A bit of theory. RTT is time passed after "normal" sized packet
 516	 * is sent until it is ACKed. In normal circumstances sending small
 517	 * packets force peer to delay ACKs and calculation is correct too.
 518	 * The algorithm is adaptive and, provided we follow specs, it
 519	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
 520	 * tricks sort of "quick acks" for time long enough to decrease RTT
 521	 * to low value, and then abruptly stops to do it and starts to delay
 522	 * ACKs, wait for troubles.
 523	 */
 524	if (crtt > tp->srtt_us) {
 525		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
 526		crtt /= 8 * USEC_PER_SEC / HZ;
 527		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
 528	} else if (tp->srtt_us == 0) {
 529		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
 530		 * 3WHS. This is most likely due to retransmission,
 531		 * including spurious one. Reset the RTO back to 3secs
 532		 * from the more aggressive 1sec to avoid more spurious
 533		 * retransmission.
 534		 */
 535		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
 536		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
 537
 538		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 539	}
 540}
 541
 542bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 543{
 544	struct tcp_metrics_block *tm;
 545	bool ret;
 546
 547	if (!dst)
 548		return false;
 549
 550	rcu_read_lock();
 551	tm = __tcp_get_metrics_req(req, dst);
 552	if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
 553		ret = true;
 554	else
 555		ret = false;
 556	rcu_read_unlock();
 557
 558	return ret;
 559}
 560
 
 
 561void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 562			    struct tcp_fastopen_cookie *cookie)
 563{
 564	struct tcp_metrics_block *tm;
 565
 566	rcu_read_lock();
 567	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
 568	if (tm) {
 569		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 570		unsigned int seq;
 571
 572		do {
 573			seq = read_seqbegin(&fastopen_seqlock);
 574			if (tfom->mss)
 575				*mss = tfom->mss;
 576			*cookie = tfom->cookie;
 577			if (cookie->len <= 0 && tfom->try_exp == 1)
 578				cookie->exp = true;
 579		} while (read_seqretry(&fastopen_seqlock, seq));
 580	}
 581	rcu_read_unlock();
 582}
 583
 584void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 585			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
 586			    u16 try_exp)
 587{
 588	struct dst_entry *dst = __sk_dst_get(sk);
 589	struct tcp_metrics_block *tm;
 590
 591	if (!dst)
 592		return;
 593	rcu_read_lock();
 594	tm = tcp_get_metrics(sk, dst, true);
 595	if (tm) {
 596		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 597
 598		write_seqlock_bh(&fastopen_seqlock);
 599		if (mss)
 600			tfom->mss = mss;
 601		if (cookie && cookie->len > 0)
 602			tfom->cookie = *cookie;
 603		else if (try_exp > tfom->try_exp &&
 604			 tfom->cookie.len <= 0 && !tfom->cookie.exp)
 605			tfom->try_exp = try_exp;
 606		if (syn_lost) {
 607			++tfom->syn_loss;
 608			tfom->last_syn_loss = jiffies;
 609		} else
 610			tfom->syn_loss = 0;
 611		write_sequnlock_bh(&fastopen_seqlock);
 612	}
 613	rcu_read_unlock();
 614}
 615
 616static struct genl_family tcp_metrics_nl_family;
 617
 618static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
 619	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
 620	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
 621					    .len = sizeof(struct in6_addr), },
 622	/* Following attributes are not received for GET/DEL,
 623	 * we keep them for reference
 624	 */
 625#if 0
 626	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
 627	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
 628	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
 629	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
 630	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
 631	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
 632	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
 633	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
 634					    .len = TCP_FASTOPEN_COOKIE_MAX, },
 635#endif
 636};
 637
 638/* Add attributes, caller cancels its header on failure */
 639static int tcp_metrics_fill_info(struct sk_buff *msg,
 640				 struct tcp_metrics_block *tm)
 641{
 642	struct nlattr *nest;
 643	int i;
 644
 645	switch (tm->tcpm_daddr.family) {
 646	case AF_INET:
 647		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
 648				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
 649			goto nla_put_failure;
 650		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
 651				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
 652			goto nla_put_failure;
 653		break;
 654	case AF_INET6:
 655		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
 656				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
 657			goto nla_put_failure;
 658		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
 659				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
 660			goto nla_put_failure;
 661		break;
 662	default:
 663		return -EAFNOSUPPORT;
 664	}
 665
 666	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
 667			  jiffies - READ_ONCE(tm->tcpm_stamp),
 668			  TCP_METRICS_ATTR_PAD) < 0)
 669		goto nla_put_failure;
 670
 671	{
 672		int n = 0;
 673
 674		nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
 675		if (!nest)
 676			goto nla_put_failure;
 677		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
 678			u32 val = tcp_metric_get(tm, i);
 679
 680			if (!val)
 681				continue;
 682			if (i == TCP_METRIC_RTT) {
 683				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
 684						val) < 0)
 685					goto nla_put_failure;
 686				n++;
 687				val = max(val / 1000, 1U);
 688			}
 689			if (i == TCP_METRIC_RTTVAR) {
 690				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
 691						val) < 0)
 692					goto nla_put_failure;
 693				n++;
 694				val = max(val / 1000, 1U);
 695			}
 696			if (nla_put_u32(msg, i + 1, val) < 0)
 697				goto nla_put_failure;
 698			n++;
 699		}
 700		if (n)
 701			nla_nest_end(msg, nest);
 702		else
 703			nla_nest_cancel(msg, nest);
 704	}
 705
 706	{
 707		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
 708		unsigned int seq;
 709
 710		do {
 711			seq = read_seqbegin(&fastopen_seqlock);
 712			tfom_copy[0] = tm->tcpm_fastopen;
 713		} while (read_seqretry(&fastopen_seqlock, seq));
 714
 715		tfom = tfom_copy;
 716		if (tfom->mss &&
 717		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
 718				tfom->mss) < 0)
 719			goto nla_put_failure;
 720		if (tfom->syn_loss &&
 721		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
 722				tfom->syn_loss) < 0 ||
 723		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
 724				jiffies - tfom->last_syn_loss,
 725				TCP_METRICS_ATTR_PAD) < 0))
 726			goto nla_put_failure;
 727		if (tfom->cookie.len > 0 &&
 728		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
 729			    tfom->cookie.len, tfom->cookie.val) < 0)
 730			goto nla_put_failure;
 731	}
 732
 733	return 0;
 734
 735nla_put_failure:
 736	return -EMSGSIZE;
 737}
 738
 739static int tcp_metrics_dump_info(struct sk_buff *skb,
 740				 struct netlink_callback *cb,
 741				 struct tcp_metrics_block *tm)
 742{
 743	void *hdr;
 744
 745	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 746			  &tcp_metrics_nl_family, NLM_F_MULTI,
 747			  TCP_METRICS_CMD_GET);
 748	if (!hdr)
 749		return -EMSGSIZE;
 750
 751	if (tcp_metrics_fill_info(skb, tm) < 0)
 752		goto nla_put_failure;
 753
 754	genlmsg_end(skb, hdr);
 755	return 0;
 756
 757nla_put_failure:
 758	genlmsg_cancel(skb, hdr);
 759	return -EMSGSIZE;
 760}
 761
 762static int tcp_metrics_nl_dump(struct sk_buff *skb,
 763			       struct netlink_callback *cb)
 764{
 765	struct net *net = sock_net(skb->sk);
 766	unsigned int max_rows = 1U << tcp_metrics_hash_log;
 767	unsigned int row, s_row = cb->args[0];
 768	int s_col = cb->args[1], col = s_col;
 769
 770	for (row = s_row; row < max_rows; row++, s_col = 0) {
 771		struct tcp_metrics_block *tm;
 772		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
 773
 774		rcu_read_lock();
 775		for (col = 0, tm = rcu_dereference(hb->chain); tm;
 776		     tm = rcu_dereference(tm->tcpm_next), col++) {
 777			if (!net_eq(tm_net(tm), net))
 778				continue;
 779			if (col < s_col)
 780				continue;
 781			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
 782				rcu_read_unlock();
 783				goto done;
 784			}
 785		}
 786		rcu_read_unlock();
 787	}
 788
 789done:
 790	cb->args[0] = row;
 791	cb->args[1] = col;
 792	return skb->len;
 793}
 794
 795static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 796			   unsigned int *hash, int optional, int v4, int v6)
 797{
 798	struct nlattr *a;
 799
 800	a = info->attrs[v4];
 801	if (a) {
 802		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
 803		if (hash)
 804			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
 805		return 0;
 806	}
 807	a = info->attrs[v6];
 808	if (a) {
 809		struct in6_addr in6;
 810
 811		if (nla_len(a) != sizeof(struct in6_addr))
 812			return -EINVAL;
 813		in6 = nla_get_in6_addr(a);
 814		inetpeer_set_addr_v6(addr, &in6);
 815		if (hash)
 816			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
 817		return 0;
 818	}
 819	return optional ? 1 : -EAFNOSUPPORT;
 820}
 821
 822static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 823			 unsigned int *hash, int optional)
 824{
 825	return __parse_nl_addr(info, addr, hash, optional,
 826			       TCP_METRICS_ATTR_ADDR_IPV4,
 827			       TCP_METRICS_ATTR_ADDR_IPV6);
 828}
 829
 830static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
 831{
 832	return __parse_nl_addr(info, addr, NULL, 0,
 833			       TCP_METRICS_ATTR_SADDR_IPV4,
 834			       TCP_METRICS_ATTR_SADDR_IPV6);
 835}
 836
 837static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
 838{
 839	struct tcp_metrics_block *tm;
 840	struct inetpeer_addr saddr, daddr;
 841	unsigned int hash;
 842	struct sk_buff *msg;
 843	struct net *net = genl_info_net(info);
 844	void *reply;
 845	int ret;
 846	bool src = true;
 847
 848	ret = parse_nl_addr(info, &daddr, &hash, 0);
 849	if (ret < 0)
 850		return ret;
 851
 852	ret = parse_nl_saddr(info, &saddr);
 853	if (ret < 0)
 854		src = false;
 855
 856	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 857	if (!msg)
 858		return -ENOMEM;
 859
 860	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
 861				  info->genlhdr->cmd);
 862	if (!reply)
 863		goto nla_put_failure;
 864
 865	hash ^= net_hash_mix(net);
 866	hash = hash_32(hash, tcp_metrics_hash_log);
 867	ret = -ESRCH;
 868	rcu_read_lock();
 869	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 870	     tm = rcu_dereference(tm->tcpm_next)) {
 871		if (addr_same(&tm->tcpm_daddr, &daddr) &&
 872		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 873		    net_eq(tm_net(tm), net)) {
 874			ret = tcp_metrics_fill_info(msg, tm);
 875			break;
 876		}
 877	}
 878	rcu_read_unlock();
 879	if (ret < 0)
 880		goto out_free;
 881
 882	genlmsg_end(msg, reply);
 883	return genlmsg_reply(msg, info);
 884
 885nla_put_failure:
 886	ret = -EMSGSIZE;
 887
 888out_free:
 889	nlmsg_free(msg);
 890	return ret;
 891}
 892
 893static void tcp_metrics_flush_all(struct net *net)
 894{
 895	unsigned int max_rows = 1U << tcp_metrics_hash_log;
 896	struct tcpm_hash_bucket *hb = tcp_metrics_hash;
 897	struct tcp_metrics_block *tm;
 898	unsigned int row;
 899
 900	for (row = 0; row < max_rows; row++, hb++) {
 901		struct tcp_metrics_block __rcu **pp = &hb->chain;
 902		bool match;
 903
 904		if (!rcu_access_pointer(*pp))
 905			continue;
 906
 907		spin_lock_bh(&tcp_metrics_lock);
 
 908		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 909			match = net ? net_eq(tm_net(tm), net) :
 910				!refcount_read(&tm_net(tm)->ns.count);
 911			if (match) {
 912				rcu_assign_pointer(*pp, tm->tcpm_next);
 913				kfree_rcu(tm, rcu_head);
 914			} else {
 915				pp = &tm->tcpm_next;
 916			}
 917		}
 918		spin_unlock_bh(&tcp_metrics_lock);
 919		cond_resched();
 920	}
 921}
 922
 923static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
 924{
 925	struct tcpm_hash_bucket *hb;
 926	struct tcp_metrics_block *tm;
 927	struct tcp_metrics_block __rcu **pp;
 928	struct inetpeer_addr saddr, daddr;
 929	unsigned int hash;
 930	struct net *net = genl_info_net(info);
 931	int ret;
 932	bool src = true, found = false;
 933
 934	ret = parse_nl_addr(info, &daddr, &hash, 1);
 935	if (ret < 0)
 936		return ret;
 937	if (ret > 0) {
 938		tcp_metrics_flush_all(net);
 939		return 0;
 940	}
 941	ret = parse_nl_saddr(info, &saddr);
 942	if (ret < 0)
 943		src = false;
 944
 945	hash ^= net_hash_mix(net);
 946	hash = hash_32(hash, tcp_metrics_hash_log);
 947	hb = tcp_metrics_hash + hash;
 948	pp = &hb->chain;
 949	spin_lock_bh(&tcp_metrics_lock);
 950	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 951		if (addr_same(&tm->tcpm_daddr, &daddr) &&
 952		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 953		    net_eq(tm_net(tm), net)) {
 954			rcu_assign_pointer(*pp, tm->tcpm_next);
 955			kfree_rcu(tm, rcu_head);
 956			found = true;
 957		} else {
 958			pp = &tm->tcpm_next;
 959		}
 960	}
 961	spin_unlock_bh(&tcp_metrics_lock);
 962	if (!found)
 963		return -ESRCH;
 964	return 0;
 965}
 966
 967static const struct genl_small_ops tcp_metrics_nl_ops[] = {
 968	{
 969		.cmd = TCP_METRICS_CMD_GET,
 970		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 971		.doit = tcp_metrics_nl_cmd_get,
 972		.dumpit = tcp_metrics_nl_dump,
 973	},
 974	{
 975		.cmd = TCP_METRICS_CMD_DEL,
 976		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 977		.doit = tcp_metrics_nl_cmd_del,
 978		.flags = GENL_ADMIN_PERM,
 979	},
 980};
 981
 982static struct genl_family tcp_metrics_nl_family __ro_after_init = {
 983	.hdrsize	= 0,
 984	.name		= TCP_METRICS_GENL_NAME,
 985	.version	= TCP_METRICS_GENL_VERSION,
 986	.maxattr	= TCP_METRICS_ATTR_MAX,
 987	.policy = tcp_metrics_nl_policy,
 988	.netnsok	= true,
 989	.module		= THIS_MODULE,
 990	.small_ops	= tcp_metrics_nl_ops,
 991	.n_small_ops	= ARRAY_SIZE(tcp_metrics_nl_ops),
 992	.resv_start_op	= TCP_METRICS_CMD_DEL + 1,
 993};
 994
 995static unsigned int tcpmhash_entries __initdata;
 996static int __init set_tcpmhash_entries(char *str)
 997{
 998	ssize_t ret;
 999
1000	if (!str)
1001		return 0;
1002
1003	ret = kstrtouint(str, 0, &tcpmhash_entries);
1004	if (ret)
1005		return 0;
1006
1007	return 1;
1008}
1009__setup("tcpmhash_entries=", set_tcpmhash_entries);
1010
1011static void __init tcp_metrics_hash_alloc(void)
1012{
1013	unsigned int slots = tcpmhash_entries;
1014	size_t size;
 
1015
 
 
 
 
1016	if (!slots) {
1017		if (totalram_pages() >= 128 * 1024)
1018			slots = 16 * 1024;
1019		else
1020			slots = 8 * 1024;
1021	}
1022
1023	tcp_metrics_hash_log = order_base_2(slots);
1024	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1025
1026	tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1027	if (!tcp_metrics_hash)
1028		panic("Could not allocate the tcp_metrics hash table\n");
 
 
1029}
1030
1031static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1032{
1033	tcp_metrics_flush_all(NULL);
1034}
1035
1036static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
 
1037	.exit_batch	=	tcp_net_metrics_exit_batch,
1038};
1039
1040void __init tcp_metrics_init(void)
1041{
1042	int ret;
1043
1044	tcp_metrics_hash_alloc();
1045
1046	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1047	if (ret < 0)
1048		panic("Could not register tcp_net_metrics_ops\n");
1049
1050	ret = genl_register_family(&tcp_metrics_nl_family);
1051	if (ret < 0)
1052		panic("Could not register tcp_metrics generic netlink\n");
1053}