Loading...
1#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/module.h>
5#include <linux/cache.h>
6#include <linux/slab.h>
7#include <linux/init.h>
8#include <linux/tcp.h>
9#include <linux/hash.h>
10#include <linux/tcp_metrics.h>
11#include <linux/vmalloc.h>
12
13#include <net/inet_connection_sock.h>
14#include <net/net_namespace.h>
15#include <net/request_sock.h>
16#include <net/inetpeer.h>
17#include <net/sock.h>
18#include <net/ipv6.h>
19#include <net/dst.h>
20#include <net/tcp.h>
21#include <net/genetlink.h>
22
23int sysctl_tcp_nometrics_save __read_mostly;
24
25static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
26 const struct inetpeer_addr *daddr,
27 struct net *net, unsigned int hash);
28
29struct tcp_fastopen_metrics {
30 u16 mss;
31 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
32 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
33 struct tcp_fastopen_cookie cookie;
34};
35
36/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37 * Kernel only stores RTT and RTTVAR in usec resolution
38 */
39#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
40
41struct tcp_metrics_block {
42 struct tcp_metrics_block __rcu *tcpm_next;
43 struct inetpeer_addr tcpm_saddr;
44 struct inetpeer_addr tcpm_daddr;
45 unsigned long tcpm_stamp;
46 u32 tcpm_ts;
47 u32 tcpm_ts_stamp;
48 u32 tcpm_lock;
49 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
50 struct tcp_fastopen_metrics tcpm_fastopen;
51
52 struct rcu_head rcu_head;
53};
54
55static bool tcp_metric_locked(struct tcp_metrics_block *tm,
56 enum tcp_metric_index idx)
57{
58 return tm->tcpm_lock & (1 << idx);
59}
60
61static u32 tcp_metric_get(struct tcp_metrics_block *tm,
62 enum tcp_metric_index idx)
63{
64 return tm->tcpm_vals[idx];
65}
66
67static void tcp_metric_set(struct tcp_metrics_block *tm,
68 enum tcp_metric_index idx,
69 u32 val)
70{
71 tm->tcpm_vals[idx] = val;
72}
73
74static bool addr_same(const struct inetpeer_addr *a,
75 const struct inetpeer_addr *b)
76{
77 const struct in6_addr *a6, *b6;
78
79 if (a->family != b->family)
80 return false;
81 if (a->family == AF_INET)
82 return a->addr.a4 == b->addr.a4;
83
84 a6 = (const struct in6_addr *) &a->addr.a6[0];
85 b6 = (const struct in6_addr *) &b->addr.a6[0];
86
87 return ipv6_addr_equal(a6, b6);
88}
89
90struct tcpm_hash_bucket {
91 struct tcp_metrics_block __rcu *chain;
92};
93
94static DEFINE_SPINLOCK(tcp_metrics_lock);
95
96static void tcpm_suck_dst(struct tcp_metrics_block *tm,
97 const struct dst_entry *dst,
98 bool fastopen_clear)
99{
100 u32 msval;
101 u32 val;
102
103 tm->tcpm_stamp = jiffies;
104
105 val = 0;
106 if (dst_metric_locked(dst, RTAX_RTT))
107 val |= 1 << TCP_METRIC_RTT;
108 if (dst_metric_locked(dst, RTAX_RTTVAR))
109 val |= 1 << TCP_METRIC_RTTVAR;
110 if (dst_metric_locked(dst, RTAX_SSTHRESH))
111 val |= 1 << TCP_METRIC_SSTHRESH;
112 if (dst_metric_locked(dst, RTAX_CWND))
113 val |= 1 << TCP_METRIC_CWND;
114 if (dst_metric_locked(dst, RTAX_REORDERING))
115 val |= 1 << TCP_METRIC_REORDERING;
116 tm->tcpm_lock = val;
117
118 msval = dst_metric_raw(dst, RTAX_RTT);
119 tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
120
121 msval = dst_metric_raw(dst, RTAX_RTTVAR);
122 tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
123 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
124 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
125 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
126 tm->tcpm_ts = 0;
127 tm->tcpm_ts_stamp = 0;
128 if (fastopen_clear) {
129 tm->tcpm_fastopen.mss = 0;
130 tm->tcpm_fastopen.syn_loss = 0;
131 tm->tcpm_fastopen.cookie.len = 0;
132 }
133}
134
135#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
136
137static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
138{
139 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
140 tcpm_suck_dst(tm, dst, false);
141}
142
143#define TCP_METRICS_RECLAIM_DEPTH 5
144#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
145
146static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
147 struct inetpeer_addr *saddr,
148 struct inetpeer_addr *daddr,
149 unsigned int hash)
150{
151 struct tcp_metrics_block *tm;
152 struct net *net;
153 bool reclaim = false;
154
155 spin_lock_bh(&tcp_metrics_lock);
156 net = dev_net(dst->dev);
157
158 /* While waiting for the spin-lock the cache might have been populated
159 * with this entry and so we have to check again.
160 */
161 tm = __tcp_get_metrics(saddr, daddr, net, hash);
162 if (tm == TCP_METRICS_RECLAIM_PTR) {
163 reclaim = true;
164 tm = NULL;
165 }
166 if (tm) {
167 tcpm_check_stamp(tm, dst);
168 goto out_unlock;
169 }
170
171 if (unlikely(reclaim)) {
172 struct tcp_metrics_block *oldest;
173
174 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
175 for (tm = rcu_dereference(oldest->tcpm_next); tm;
176 tm = rcu_dereference(tm->tcpm_next)) {
177 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
178 oldest = tm;
179 }
180 tm = oldest;
181 } else {
182 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
183 if (!tm)
184 goto out_unlock;
185 }
186 tm->tcpm_saddr = *saddr;
187 tm->tcpm_daddr = *daddr;
188
189 tcpm_suck_dst(tm, dst, true);
190
191 if (likely(!reclaim)) {
192 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
193 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
194 }
195
196out_unlock:
197 spin_unlock_bh(&tcp_metrics_lock);
198 return tm;
199}
200
201static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
202{
203 if (tm)
204 return tm;
205 if (depth > TCP_METRICS_RECLAIM_DEPTH)
206 return TCP_METRICS_RECLAIM_PTR;
207 return NULL;
208}
209
210static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
211 const struct inetpeer_addr *daddr,
212 struct net *net, unsigned int hash)
213{
214 struct tcp_metrics_block *tm;
215 int depth = 0;
216
217 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
218 tm = rcu_dereference(tm->tcpm_next)) {
219 if (addr_same(&tm->tcpm_saddr, saddr) &&
220 addr_same(&tm->tcpm_daddr, daddr))
221 break;
222 depth++;
223 }
224 return tcp_get_encode(tm, depth);
225}
226
227static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
228 struct dst_entry *dst)
229{
230 struct tcp_metrics_block *tm;
231 struct inetpeer_addr saddr, daddr;
232 unsigned int hash;
233 struct net *net;
234
235 saddr.family = req->rsk_ops->family;
236 daddr.family = req->rsk_ops->family;
237 switch (daddr.family) {
238 case AF_INET:
239 saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
240 daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
241 hash = (__force unsigned int) daddr.addr.a4;
242 break;
243#if IS_ENABLED(CONFIG_IPV6)
244 case AF_INET6:
245 *(struct in6_addr *)saddr.addr.a6 = inet_rsk(req)->ir_v6_loc_addr;
246 *(struct in6_addr *)daddr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
247 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
248 break;
249#endif
250 default:
251 return NULL;
252 }
253
254 net = dev_net(dst->dev);
255 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
256
257 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
258 tm = rcu_dereference(tm->tcpm_next)) {
259 if (addr_same(&tm->tcpm_saddr, &saddr) &&
260 addr_same(&tm->tcpm_daddr, &daddr))
261 break;
262 }
263 tcpm_check_stamp(tm, dst);
264 return tm;
265}
266
267static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
268{
269 struct tcp_metrics_block *tm;
270 struct inetpeer_addr saddr, daddr;
271 unsigned int hash;
272 struct net *net;
273
274 if (tw->tw_family == AF_INET) {
275 saddr.family = AF_INET;
276 saddr.addr.a4 = tw->tw_rcv_saddr;
277 daddr.family = AF_INET;
278 daddr.addr.a4 = tw->tw_daddr;
279 hash = (__force unsigned int) daddr.addr.a4;
280 }
281#if IS_ENABLED(CONFIG_IPV6)
282 else if (tw->tw_family == AF_INET6) {
283 if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
284 saddr.family = AF_INET;
285 saddr.addr.a4 = tw->tw_rcv_saddr;
286 daddr.family = AF_INET;
287 daddr.addr.a4 = tw->tw_daddr;
288 hash = (__force unsigned int) daddr.addr.a4;
289 } else {
290 saddr.family = AF_INET6;
291 *(struct in6_addr *)saddr.addr.a6 = tw->tw_v6_rcv_saddr;
292 daddr.family = AF_INET6;
293 *(struct in6_addr *)daddr.addr.a6 = tw->tw_v6_daddr;
294 hash = ipv6_addr_hash(&tw->tw_v6_daddr);
295 }
296 }
297#endif
298 else
299 return NULL;
300
301 net = twsk_net(tw);
302 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
303
304 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
305 tm = rcu_dereference(tm->tcpm_next)) {
306 if (addr_same(&tm->tcpm_saddr, &saddr) &&
307 addr_same(&tm->tcpm_daddr, &daddr))
308 break;
309 }
310 return tm;
311}
312
313static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
314 struct dst_entry *dst,
315 bool create)
316{
317 struct tcp_metrics_block *tm;
318 struct inetpeer_addr saddr, daddr;
319 unsigned int hash;
320 struct net *net;
321
322 if (sk->sk_family == AF_INET) {
323 saddr.family = AF_INET;
324 saddr.addr.a4 = inet_sk(sk)->inet_saddr;
325 daddr.family = AF_INET;
326 daddr.addr.a4 = inet_sk(sk)->inet_daddr;
327 hash = (__force unsigned int) daddr.addr.a4;
328 }
329#if IS_ENABLED(CONFIG_IPV6)
330 else if (sk->sk_family == AF_INET6) {
331 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
332 saddr.family = AF_INET;
333 saddr.addr.a4 = inet_sk(sk)->inet_saddr;
334 daddr.family = AF_INET;
335 daddr.addr.a4 = inet_sk(sk)->inet_daddr;
336 hash = (__force unsigned int) daddr.addr.a4;
337 } else {
338 saddr.family = AF_INET6;
339 *(struct in6_addr *)saddr.addr.a6 = sk->sk_v6_rcv_saddr;
340 daddr.family = AF_INET6;
341 *(struct in6_addr *)daddr.addr.a6 = sk->sk_v6_daddr;
342 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
343 }
344 }
345#endif
346 else
347 return NULL;
348
349 net = dev_net(dst->dev);
350 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
351
352 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
353 if (tm == TCP_METRICS_RECLAIM_PTR)
354 tm = NULL;
355 if (!tm && create)
356 tm = tcpm_new(dst, &saddr, &daddr, hash);
357 else
358 tcpm_check_stamp(tm, dst);
359
360 return tm;
361}
362
363/* Save metrics learned by this TCP session. This function is called
364 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
365 * or goes from LAST-ACK to CLOSE.
366 */
367void tcp_update_metrics(struct sock *sk)
368{
369 const struct inet_connection_sock *icsk = inet_csk(sk);
370 struct dst_entry *dst = __sk_dst_get(sk);
371 struct tcp_sock *tp = tcp_sk(sk);
372 struct tcp_metrics_block *tm;
373 unsigned long rtt;
374 u32 val;
375 int m;
376
377 if (sysctl_tcp_nometrics_save || !dst)
378 return;
379
380 if (dst->flags & DST_HOST)
381 dst_confirm(dst);
382
383 rcu_read_lock();
384 if (icsk->icsk_backoff || !tp->srtt_us) {
385 /* This session failed to estimate rtt. Why?
386 * Probably, no packets returned in time. Reset our
387 * results.
388 */
389 tm = tcp_get_metrics(sk, dst, false);
390 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
391 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
392 goto out_unlock;
393 } else
394 tm = tcp_get_metrics(sk, dst, true);
395
396 if (!tm)
397 goto out_unlock;
398
399 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
400 m = rtt - tp->srtt_us;
401
402 /* If newly calculated rtt larger than stored one, store new
403 * one. Otherwise, use EWMA. Remember, rtt overestimation is
404 * always better than underestimation.
405 */
406 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
407 if (m <= 0)
408 rtt = tp->srtt_us;
409 else
410 rtt -= (m >> 3);
411 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
412 }
413
414 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
415 unsigned long var;
416
417 if (m < 0)
418 m = -m;
419
420 /* Scale deviation to rttvar fixed point */
421 m >>= 1;
422 if (m < tp->mdev_us)
423 m = tp->mdev_us;
424
425 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
426 if (m >= var)
427 var = m;
428 else
429 var -= (var - m) >> 2;
430
431 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
432 }
433
434 if (tcp_in_initial_slowstart(tp)) {
435 /* Slow start still did not finish. */
436 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
437 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
438 if (val && (tp->snd_cwnd >> 1) > val)
439 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
440 tp->snd_cwnd >> 1);
441 }
442 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
443 val = tcp_metric_get(tm, TCP_METRIC_CWND);
444 if (tp->snd_cwnd > val)
445 tcp_metric_set(tm, TCP_METRIC_CWND,
446 tp->snd_cwnd);
447 }
448 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
449 icsk->icsk_ca_state == TCP_CA_Open) {
450 /* Cong. avoidance phase, cwnd is reliable. */
451 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
452 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
453 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
454 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
455 val = tcp_metric_get(tm, TCP_METRIC_CWND);
456 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
457 }
458 } else {
459 /* Else slow start did not finish, cwnd is non-sense,
460 * ssthresh may be also invalid.
461 */
462 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
463 val = tcp_metric_get(tm, TCP_METRIC_CWND);
464 tcp_metric_set(tm, TCP_METRIC_CWND,
465 (val + tp->snd_ssthresh) >> 1);
466 }
467 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
468 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
469 if (val && tp->snd_ssthresh > val)
470 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
471 tp->snd_ssthresh);
472 }
473 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
474 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
475 if (val < tp->reordering &&
476 tp->reordering != sysctl_tcp_reordering)
477 tcp_metric_set(tm, TCP_METRIC_REORDERING,
478 tp->reordering);
479 }
480 }
481 tm->tcpm_stamp = jiffies;
482out_unlock:
483 rcu_read_unlock();
484}
485
486/* Initialize metrics on socket. */
487
488void tcp_init_metrics(struct sock *sk)
489{
490 struct dst_entry *dst = __sk_dst_get(sk);
491 struct tcp_sock *tp = tcp_sk(sk);
492 struct tcp_metrics_block *tm;
493 u32 val, crtt = 0; /* cached RTT scaled by 8 */
494
495 if (dst == NULL)
496 goto reset;
497
498 dst_confirm(dst);
499
500 rcu_read_lock();
501 tm = tcp_get_metrics(sk, dst, true);
502 if (!tm) {
503 rcu_read_unlock();
504 goto reset;
505 }
506
507 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
508 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
509
510 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
511 if (val) {
512 tp->snd_ssthresh = val;
513 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
514 tp->snd_ssthresh = tp->snd_cwnd_clamp;
515 } else {
516 /* ssthresh may have been reduced unnecessarily during.
517 * 3WHS. Restore it back to its initial default.
518 */
519 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
520 }
521 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
522 if (val && tp->reordering != val) {
523 tcp_disable_fack(tp);
524 tcp_disable_early_retrans(tp);
525 tp->reordering = val;
526 }
527
528 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
529 rcu_read_unlock();
530reset:
531 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
532 * to seed the RTO for later data packets because SYN packets are
533 * small. Use the per-dst cached values to seed the RTO but keep
534 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
535 * Later the RTO will be updated immediately upon obtaining the first
536 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
537 * influences the first RTO but not later RTT estimation.
538 *
539 * But if RTT is not available from the SYN (due to retransmits or
540 * syn cookies) or the cache, force a conservative 3secs timeout.
541 *
542 * A bit of theory. RTT is time passed after "normal" sized packet
543 * is sent until it is ACKed. In normal circumstances sending small
544 * packets force peer to delay ACKs and calculation is correct too.
545 * The algorithm is adaptive and, provided we follow specs, it
546 * NEVER underestimate RTT. BUT! If peer tries to make some clever
547 * tricks sort of "quick acks" for time long enough to decrease RTT
548 * to low value, and then abruptly stops to do it and starts to delay
549 * ACKs, wait for troubles.
550 */
551 if (crtt > tp->srtt_us) {
552 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
553 crtt /= 8 * USEC_PER_MSEC;
554 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
555 } else if (tp->srtt_us == 0) {
556 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
557 * 3WHS. This is most likely due to retransmission,
558 * including spurious one. Reset the RTO back to 3secs
559 * from the more aggressive 1sec to avoid more spurious
560 * retransmission.
561 */
562 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
563 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
564
565 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
566 }
567 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
568 * retransmitted. In light of RFC6298 more aggressive 1sec
569 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
570 * retransmission has occurred.
571 */
572 if (tp->total_retrans > 1)
573 tp->snd_cwnd = 1;
574 else
575 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
576 tp->snd_cwnd_stamp = tcp_time_stamp;
577}
578
579bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
580{
581 struct tcp_metrics_block *tm;
582 bool ret;
583
584 if (!dst)
585 return false;
586
587 rcu_read_lock();
588 tm = __tcp_get_metrics_req(req, dst);
589 if (paws_check) {
590 if (tm &&
591 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
592 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
593 ret = false;
594 else
595 ret = true;
596 } else {
597 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
598 ret = true;
599 else
600 ret = false;
601 }
602 rcu_read_unlock();
603
604 return ret;
605}
606EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
607
608void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
609{
610 struct tcp_metrics_block *tm;
611
612 rcu_read_lock();
613 tm = tcp_get_metrics(sk, dst, true);
614 if (tm) {
615 struct tcp_sock *tp = tcp_sk(sk);
616
617 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
618 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
619 tp->rx_opt.ts_recent = tm->tcpm_ts;
620 }
621 }
622 rcu_read_unlock();
623}
624EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
625
626/* VJ's idea. Save last timestamp seen from this destination and hold
627 * it at least for normal timewait interval to use for duplicate
628 * segment detection in subsequent connections, before they enter
629 * synchronized state.
630 */
631bool tcp_remember_stamp(struct sock *sk)
632{
633 struct dst_entry *dst = __sk_dst_get(sk);
634 bool ret = false;
635
636 if (dst) {
637 struct tcp_metrics_block *tm;
638
639 rcu_read_lock();
640 tm = tcp_get_metrics(sk, dst, true);
641 if (tm) {
642 struct tcp_sock *tp = tcp_sk(sk);
643
644 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
645 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
646 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
647 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
648 tm->tcpm_ts = tp->rx_opt.ts_recent;
649 }
650 ret = true;
651 }
652 rcu_read_unlock();
653 }
654 return ret;
655}
656
657bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
658{
659 struct tcp_metrics_block *tm;
660 bool ret = false;
661
662 rcu_read_lock();
663 tm = __tcp_get_metrics_tw(tw);
664 if (tm) {
665 const struct tcp_timewait_sock *tcptw;
666 struct sock *sk = (struct sock *) tw;
667
668 tcptw = tcp_twsk(sk);
669 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
670 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
671 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
672 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
673 tm->tcpm_ts = tcptw->tw_ts_recent;
674 }
675 ret = true;
676 }
677 rcu_read_unlock();
678
679 return ret;
680}
681
682static DEFINE_SEQLOCK(fastopen_seqlock);
683
684void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
685 struct tcp_fastopen_cookie *cookie,
686 int *syn_loss, unsigned long *last_syn_loss)
687{
688 struct tcp_metrics_block *tm;
689
690 rcu_read_lock();
691 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
692 if (tm) {
693 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
694 unsigned int seq;
695
696 do {
697 seq = read_seqbegin(&fastopen_seqlock);
698 if (tfom->mss)
699 *mss = tfom->mss;
700 *cookie = tfom->cookie;
701 *syn_loss = tfom->syn_loss;
702 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
703 } while (read_seqretry(&fastopen_seqlock, seq));
704 }
705 rcu_read_unlock();
706}
707
708void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
709 struct tcp_fastopen_cookie *cookie, bool syn_lost)
710{
711 struct dst_entry *dst = __sk_dst_get(sk);
712 struct tcp_metrics_block *tm;
713
714 if (!dst)
715 return;
716 rcu_read_lock();
717 tm = tcp_get_metrics(sk, dst, true);
718 if (tm) {
719 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
720
721 write_seqlock_bh(&fastopen_seqlock);
722 if (mss)
723 tfom->mss = mss;
724 if (cookie && cookie->len > 0)
725 tfom->cookie = *cookie;
726 if (syn_lost) {
727 ++tfom->syn_loss;
728 tfom->last_syn_loss = jiffies;
729 } else
730 tfom->syn_loss = 0;
731 write_sequnlock_bh(&fastopen_seqlock);
732 }
733 rcu_read_unlock();
734}
735
736static struct genl_family tcp_metrics_nl_family = {
737 .id = GENL_ID_GENERATE,
738 .hdrsize = 0,
739 .name = TCP_METRICS_GENL_NAME,
740 .version = TCP_METRICS_GENL_VERSION,
741 .maxattr = TCP_METRICS_ATTR_MAX,
742 .netnsok = true,
743};
744
745static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
746 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
747 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
748 .len = sizeof(struct in6_addr), },
749 /* Following attributes are not received for GET/DEL,
750 * we keep them for reference
751 */
752#if 0
753 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
754 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
755 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
756 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
757 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
758 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
759 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
760 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
761 .len = TCP_FASTOPEN_COOKIE_MAX, },
762#endif
763};
764
765/* Add attributes, caller cancels its header on failure */
766static int tcp_metrics_fill_info(struct sk_buff *msg,
767 struct tcp_metrics_block *tm)
768{
769 struct nlattr *nest;
770 int i;
771
772 switch (tm->tcpm_daddr.family) {
773 case AF_INET:
774 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
775 tm->tcpm_daddr.addr.a4) < 0)
776 goto nla_put_failure;
777 if (nla_put_be32(msg, TCP_METRICS_ATTR_SADDR_IPV4,
778 tm->tcpm_saddr.addr.a4) < 0)
779 goto nla_put_failure;
780 break;
781 case AF_INET6:
782 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
783 tm->tcpm_daddr.addr.a6) < 0)
784 goto nla_put_failure;
785 if (nla_put(msg, TCP_METRICS_ATTR_SADDR_IPV6, 16,
786 tm->tcpm_saddr.addr.a6) < 0)
787 goto nla_put_failure;
788 break;
789 default:
790 return -EAFNOSUPPORT;
791 }
792
793 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
794 jiffies - tm->tcpm_stamp) < 0)
795 goto nla_put_failure;
796 if (tm->tcpm_ts_stamp) {
797 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
798 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
799 goto nla_put_failure;
800 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
801 tm->tcpm_ts) < 0)
802 goto nla_put_failure;
803 }
804
805 {
806 int n = 0;
807
808 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
809 if (!nest)
810 goto nla_put_failure;
811 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
812 u32 val = tm->tcpm_vals[i];
813
814 if (!val)
815 continue;
816 if (i == TCP_METRIC_RTT) {
817 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
818 val) < 0)
819 goto nla_put_failure;
820 n++;
821 val = max(val / 1000, 1U);
822 }
823 if (i == TCP_METRIC_RTTVAR) {
824 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
825 val) < 0)
826 goto nla_put_failure;
827 n++;
828 val = max(val / 1000, 1U);
829 }
830 if (nla_put_u32(msg, i + 1, val) < 0)
831 goto nla_put_failure;
832 n++;
833 }
834 if (n)
835 nla_nest_end(msg, nest);
836 else
837 nla_nest_cancel(msg, nest);
838 }
839
840 {
841 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
842 unsigned int seq;
843
844 do {
845 seq = read_seqbegin(&fastopen_seqlock);
846 tfom_copy[0] = tm->tcpm_fastopen;
847 } while (read_seqretry(&fastopen_seqlock, seq));
848
849 tfom = tfom_copy;
850 if (tfom->mss &&
851 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
852 tfom->mss) < 0)
853 goto nla_put_failure;
854 if (tfom->syn_loss &&
855 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
856 tfom->syn_loss) < 0 ||
857 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
858 jiffies - tfom->last_syn_loss) < 0))
859 goto nla_put_failure;
860 if (tfom->cookie.len > 0 &&
861 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
862 tfom->cookie.len, tfom->cookie.val) < 0)
863 goto nla_put_failure;
864 }
865
866 return 0;
867
868nla_put_failure:
869 return -EMSGSIZE;
870}
871
872static int tcp_metrics_dump_info(struct sk_buff *skb,
873 struct netlink_callback *cb,
874 struct tcp_metrics_block *tm)
875{
876 void *hdr;
877
878 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
879 &tcp_metrics_nl_family, NLM_F_MULTI,
880 TCP_METRICS_CMD_GET);
881 if (!hdr)
882 return -EMSGSIZE;
883
884 if (tcp_metrics_fill_info(skb, tm) < 0)
885 goto nla_put_failure;
886
887 return genlmsg_end(skb, hdr);
888
889nla_put_failure:
890 genlmsg_cancel(skb, hdr);
891 return -EMSGSIZE;
892}
893
894static int tcp_metrics_nl_dump(struct sk_buff *skb,
895 struct netlink_callback *cb)
896{
897 struct net *net = sock_net(skb->sk);
898 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
899 unsigned int row, s_row = cb->args[0];
900 int s_col = cb->args[1], col = s_col;
901
902 for (row = s_row; row < max_rows; row++, s_col = 0) {
903 struct tcp_metrics_block *tm;
904 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
905
906 rcu_read_lock();
907 for (col = 0, tm = rcu_dereference(hb->chain); tm;
908 tm = rcu_dereference(tm->tcpm_next), col++) {
909 if (col < s_col)
910 continue;
911 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
912 rcu_read_unlock();
913 goto done;
914 }
915 }
916 rcu_read_unlock();
917 }
918
919done:
920 cb->args[0] = row;
921 cb->args[1] = col;
922 return skb->len;
923}
924
925static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
926 unsigned int *hash, int optional, int v4, int v6)
927{
928 struct nlattr *a;
929
930 a = info->attrs[v4];
931 if (a) {
932 addr->family = AF_INET;
933 addr->addr.a4 = nla_get_be32(a);
934 if (hash)
935 *hash = (__force unsigned int) addr->addr.a4;
936 return 0;
937 }
938 a = info->attrs[v6];
939 if (a) {
940 if (nla_len(a) != sizeof(struct in6_addr))
941 return -EINVAL;
942 addr->family = AF_INET6;
943 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
944 if (hash)
945 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
946 return 0;
947 }
948 return optional ? 1 : -EAFNOSUPPORT;
949}
950
951static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
952 unsigned int *hash, int optional)
953{
954 return __parse_nl_addr(info, addr, hash, optional,
955 TCP_METRICS_ATTR_ADDR_IPV4,
956 TCP_METRICS_ATTR_ADDR_IPV6);
957}
958
959static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
960{
961 return __parse_nl_addr(info, addr, NULL, 0,
962 TCP_METRICS_ATTR_SADDR_IPV4,
963 TCP_METRICS_ATTR_SADDR_IPV6);
964}
965
966static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
967{
968 struct tcp_metrics_block *tm;
969 struct inetpeer_addr saddr, daddr;
970 unsigned int hash;
971 struct sk_buff *msg;
972 struct net *net = genl_info_net(info);
973 void *reply;
974 int ret;
975 bool src = true;
976
977 ret = parse_nl_addr(info, &daddr, &hash, 0);
978 if (ret < 0)
979 return ret;
980
981 ret = parse_nl_saddr(info, &saddr);
982 if (ret < 0)
983 src = false;
984
985 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
986 if (!msg)
987 return -ENOMEM;
988
989 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
990 info->genlhdr->cmd);
991 if (!reply)
992 goto nla_put_failure;
993
994 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
995 ret = -ESRCH;
996 rcu_read_lock();
997 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
998 tm = rcu_dereference(tm->tcpm_next)) {
999 if (addr_same(&tm->tcpm_daddr, &daddr) &&
1000 (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
1001 ret = tcp_metrics_fill_info(msg, tm);
1002 break;
1003 }
1004 }
1005 rcu_read_unlock();
1006 if (ret < 0)
1007 goto out_free;
1008
1009 genlmsg_end(msg, reply);
1010 return genlmsg_reply(msg, info);
1011
1012nla_put_failure:
1013 ret = -EMSGSIZE;
1014
1015out_free:
1016 nlmsg_free(msg);
1017 return ret;
1018}
1019
1020#define deref_locked_genl(p) \
1021 rcu_dereference_protected(p, lockdep_genl_is_held() && \
1022 lockdep_is_held(&tcp_metrics_lock))
1023
1024#define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
1025
1026static int tcp_metrics_flush_all(struct net *net)
1027{
1028 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
1029 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
1030 struct tcp_metrics_block *tm;
1031 unsigned int row;
1032
1033 for (row = 0; row < max_rows; row++, hb++) {
1034 spin_lock_bh(&tcp_metrics_lock);
1035 tm = deref_locked_genl(hb->chain);
1036 if (tm)
1037 hb->chain = NULL;
1038 spin_unlock_bh(&tcp_metrics_lock);
1039 while (tm) {
1040 struct tcp_metrics_block *next;
1041
1042 next = deref_genl(tm->tcpm_next);
1043 kfree_rcu(tm, rcu_head);
1044 tm = next;
1045 }
1046 }
1047 return 0;
1048}
1049
1050static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
1051{
1052 struct tcpm_hash_bucket *hb;
1053 struct tcp_metrics_block *tm;
1054 struct tcp_metrics_block __rcu **pp;
1055 struct inetpeer_addr saddr, daddr;
1056 unsigned int hash;
1057 struct net *net = genl_info_net(info);
1058 int ret;
1059 bool src = true, found = false;
1060
1061 ret = parse_nl_addr(info, &daddr, &hash, 1);
1062 if (ret < 0)
1063 return ret;
1064 if (ret > 0)
1065 return tcp_metrics_flush_all(net);
1066 ret = parse_nl_saddr(info, &saddr);
1067 if (ret < 0)
1068 src = false;
1069
1070 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
1071 hb = net->ipv4.tcp_metrics_hash + hash;
1072 pp = &hb->chain;
1073 spin_lock_bh(&tcp_metrics_lock);
1074 for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) {
1075 if (addr_same(&tm->tcpm_daddr, &daddr) &&
1076 (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
1077 *pp = tm->tcpm_next;
1078 kfree_rcu(tm, rcu_head);
1079 found = true;
1080 } else {
1081 pp = &tm->tcpm_next;
1082 }
1083 }
1084 spin_unlock_bh(&tcp_metrics_lock);
1085 if (!found)
1086 return -ESRCH;
1087 return 0;
1088}
1089
1090static const struct genl_ops tcp_metrics_nl_ops[] = {
1091 {
1092 .cmd = TCP_METRICS_CMD_GET,
1093 .doit = tcp_metrics_nl_cmd_get,
1094 .dumpit = tcp_metrics_nl_dump,
1095 .policy = tcp_metrics_nl_policy,
1096 .flags = GENL_ADMIN_PERM,
1097 },
1098 {
1099 .cmd = TCP_METRICS_CMD_DEL,
1100 .doit = tcp_metrics_nl_cmd_del,
1101 .policy = tcp_metrics_nl_policy,
1102 .flags = GENL_ADMIN_PERM,
1103 },
1104};
1105
1106static unsigned int tcpmhash_entries;
1107static int __init set_tcpmhash_entries(char *str)
1108{
1109 ssize_t ret;
1110
1111 if (!str)
1112 return 0;
1113
1114 ret = kstrtouint(str, 0, &tcpmhash_entries);
1115 if (ret)
1116 return 0;
1117
1118 return 1;
1119}
1120__setup("tcpmhash_entries=", set_tcpmhash_entries);
1121
1122static int __net_init tcp_net_metrics_init(struct net *net)
1123{
1124 size_t size;
1125 unsigned int slots;
1126
1127 slots = tcpmhash_entries;
1128 if (!slots) {
1129 if (totalram_pages >= 128 * 1024)
1130 slots = 16 * 1024;
1131 else
1132 slots = 8 * 1024;
1133 }
1134
1135 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1136 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1137
1138 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1139 if (!net->ipv4.tcp_metrics_hash)
1140 net->ipv4.tcp_metrics_hash = vzalloc(size);
1141
1142 if (!net->ipv4.tcp_metrics_hash)
1143 return -ENOMEM;
1144
1145 return 0;
1146}
1147
1148static void __net_exit tcp_net_metrics_exit(struct net *net)
1149{
1150 unsigned int i;
1151
1152 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1153 struct tcp_metrics_block *tm, *next;
1154
1155 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1156 while (tm) {
1157 next = rcu_dereference_protected(tm->tcpm_next, 1);
1158 kfree(tm);
1159 tm = next;
1160 }
1161 }
1162 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1163 vfree(net->ipv4.tcp_metrics_hash);
1164 else
1165 kfree(net->ipv4.tcp_metrics_hash);
1166}
1167
1168static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1169 .init = tcp_net_metrics_init,
1170 .exit = tcp_net_metrics_exit,
1171};
1172
1173void __init tcp_metrics_init(void)
1174{
1175 int ret;
1176
1177 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1178 if (ret < 0)
1179 goto cleanup;
1180 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1181 tcp_metrics_nl_ops);
1182 if (ret < 0)
1183 goto cleanup_subsys;
1184 return;
1185
1186cleanup_subsys:
1187 unregister_pernet_subsys(&tcp_net_metrics_ops);
1188
1189cleanup:
1190 return;
1191}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/rcupdate.h>
3#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/module.h>
6#include <linux/cache.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/tcp.h>
10#include <linux/hash.h>
11#include <linux/tcp_metrics.h>
12#include <linux/vmalloc.h>
13
14#include <net/inet_connection_sock.h>
15#include <net/net_namespace.h>
16#include <net/request_sock.h>
17#include <net/inetpeer.h>
18#include <net/sock.h>
19#include <net/ipv6.h>
20#include <net/dst.h>
21#include <net/tcp.h>
22#include <net/genetlink.h>
23
24static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
25 const struct inetpeer_addr *daddr,
26 struct net *net, unsigned int hash);
27
28struct tcp_fastopen_metrics {
29 u16 mss;
30 u16 syn_loss:10, /* Recurring Fast Open SYN losses */
31 try_exp:2; /* Request w/ exp. option (once) */
32 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
33 struct tcp_fastopen_cookie cookie;
34};
35
36/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37 * Kernel only stores RTT and RTTVAR in usec resolution
38 */
39#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
40
41struct tcp_metrics_block {
42 struct tcp_metrics_block __rcu *tcpm_next;
43 possible_net_t tcpm_net;
44 struct inetpeer_addr tcpm_saddr;
45 struct inetpeer_addr tcpm_daddr;
46 unsigned long tcpm_stamp;
47 u32 tcpm_lock;
48 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
49 struct tcp_fastopen_metrics tcpm_fastopen;
50
51 struct rcu_head rcu_head;
52};
53
54static inline struct net *tm_net(struct tcp_metrics_block *tm)
55{
56 return read_pnet(&tm->tcpm_net);
57}
58
59static bool tcp_metric_locked(struct tcp_metrics_block *tm,
60 enum tcp_metric_index idx)
61{
62 return tm->tcpm_lock & (1 << idx);
63}
64
65static u32 tcp_metric_get(struct tcp_metrics_block *tm,
66 enum tcp_metric_index idx)
67{
68 return tm->tcpm_vals[idx];
69}
70
71static void tcp_metric_set(struct tcp_metrics_block *tm,
72 enum tcp_metric_index idx,
73 u32 val)
74{
75 tm->tcpm_vals[idx] = val;
76}
77
78static bool addr_same(const struct inetpeer_addr *a,
79 const struct inetpeer_addr *b)
80{
81 return inetpeer_addr_cmp(a, b) == 0;
82}
83
84struct tcpm_hash_bucket {
85 struct tcp_metrics_block __rcu *chain;
86};
87
88static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
89static unsigned int tcp_metrics_hash_log __read_mostly;
90
91static DEFINE_SPINLOCK(tcp_metrics_lock);
92
93static void tcpm_suck_dst(struct tcp_metrics_block *tm,
94 const struct dst_entry *dst,
95 bool fastopen_clear)
96{
97 u32 msval;
98 u32 val;
99
100 tm->tcpm_stamp = jiffies;
101
102 val = 0;
103 if (dst_metric_locked(dst, RTAX_RTT))
104 val |= 1 << TCP_METRIC_RTT;
105 if (dst_metric_locked(dst, RTAX_RTTVAR))
106 val |= 1 << TCP_METRIC_RTTVAR;
107 if (dst_metric_locked(dst, RTAX_SSTHRESH))
108 val |= 1 << TCP_METRIC_SSTHRESH;
109 if (dst_metric_locked(dst, RTAX_CWND))
110 val |= 1 << TCP_METRIC_CWND;
111 if (dst_metric_locked(dst, RTAX_REORDERING))
112 val |= 1 << TCP_METRIC_REORDERING;
113 tm->tcpm_lock = val;
114
115 msval = dst_metric_raw(dst, RTAX_RTT);
116 tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
117
118 msval = dst_metric_raw(dst, RTAX_RTTVAR);
119 tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
120 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
121 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
122 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
123 if (fastopen_clear) {
124 tm->tcpm_fastopen.mss = 0;
125 tm->tcpm_fastopen.syn_loss = 0;
126 tm->tcpm_fastopen.try_exp = 0;
127 tm->tcpm_fastopen.cookie.exp = false;
128 tm->tcpm_fastopen.cookie.len = 0;
129 }
130}
131
132#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
133
134static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
135{
136 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
137 tcpm_suck_dst(tm, dst, false);
138}
139
140#define TCP_METRICS_RECLAIM_DEPTH 5
141#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
142
143#define deref_locked(p) \
144 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
145
146static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
147 struct inetpeer_addr *saddr,
148 struct inetpeer_addr *daddr,
149 unsigned int hash)
150{
151 struct tcp_metrics_block *tm;
152 struct net *net;
153 bool reclaim = false;
154
155 spin_lock_bh(&tcp_metrics_lock);
156 net = dev_net(dst->dev);
157
158 /* While waiting for the spin-lock the cache might have been populated
159 * with this entry and so we have to check again.
160 */
161 tm = __tcp_get_metrics(saddr, daddr, net, hash);
162 if (tm == TCP_METRICS_RECLAIM_PTR) {
163 reclaim = true;
164 tm = NULL;
165 }
166 if (tm) {
167 tcpm_check_stamp(tm, dst);
168 goto out_unlock;
169 }
170
171 if (unlikely(reclaim)) {
172 struct tcp_metrics_block *oldest;
173
174 oldest = deref_locked(tcp_metrics_hash[hash].chain);
175 for (tm = deref_locked(oldest->tcpm_next); tm;
176 tm = deref_locked(tm->tcpm_next)) {
177 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
178 oldest = tm;
179 }
180 tm = oldest;
181 } else {
182 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
183 if (!tm)
184 goto out_unlock;
185 }
186 write_pnet(&tm->tcpm_net, net);
187 tm->tcpm_saddr = *saddr;
188 tm->tcpm_daddr = *daddr;
189
190 tcpm_suck_dst(tm, dst, true);
191
192 if (likely(!reclaim)) {
193 tm->tcpm_next = tcp_metrics_hash[hash].chain;
194 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
195 }
196
197out_unlock:
198 spin_unlock_bh(&tcp_metrics_lock);
199 return tm;
200}
201
202static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
203{
204 if (tm)
205 return tm;
206 if (depth > TCP_METRICS_RECLAIM_DEPTH)
207 return TCP_METRICS_RECLAIM_PTR;
208 return NULL;
209}
210
211static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
212 const struct inetpeer_addr *daddr,
213 struct net *net, unsigned int hash)
214{
215 struct tcp_metrics_block *tm;
216 int depth = 0;
217
218 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
219 tm = rcu_dereference(tm->tcpm_next)) {
220 if (addr_same(&tm->tcpm_saddr, saddr) &&
221 addr_same(&tm->tcpm_daddr, daddr) &&
222 net_eq(tm_net(tm), net))
223 break;
224 depth++;
225 }
226 return tcp_get_encode(tm, depth);
227}
228
229static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
230 struct dst_entry *dst)
231{
232 struct tcp_metrics_block *tm;
233 struct inetpeer_addr saddr, daddr;
234 unsigned int hash;
235 struct net *net;
236
237 saddr.family = req->rsk_ops->family;
238 daddr.family = req->rsk_ops->family;
239 switch (daddr.family) {
240 case AF_INET:
241 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
242 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
243 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
244 break;
245#if IS_ENABLED(CONFIG_IPV6)
246 case AF_INET6:
247 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
248 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
249 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
250 break;
251#endif
252 default:
253 return NULL;
254 }
255
256 net = dev_net(dst->dev);
257 hash ^= net_hash_mix(net);
258 hash = hash_32(hash, tcp_metrics_hash_log);
259
260 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
261 tm = rcu_dereference(tm->tcpm_next)) {
262 if (addr_same(&tm->tcpm_saddr, &saddr) &&
263 addr_same(&tm->tcpm_daddr, &daddr) &&
264 net_eq(tm_net(tm), net))
265 break;
266 }
267 tcpm_check_stamp(tm, dst);
268 return tm;
269}
270
271static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
272 struct dst_entry *dst,
273 bool create)
274{
275 struct tcp_metrics_block *tm;
276 struct inetpeer_addr saddr, daddr;
277 unsigned int hash;
278 struct net *net;
279
280 if (sk->sk_family == AF_INET) {
281 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
282 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
283 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
284 }
285#if IS_ENABLED(CONFIG_IPV6)
286 else if (sk->sk_family == AF_INET6) {
287 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
288 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
289 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
290 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
291 } else {
292 inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
293 inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
294 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
295 }
296 }
297#endif
298 else
299 return NULL;
300
301 net = dev_net(dst->dev);
302 hash ^= net_hash_mix(net);
303 hash = hash_32(hash, tcp_metrics_hash_log);
304
305 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
306 if (tm == TCP_METRICS_RECLAIM_PTR)
307 tm = NULL;
308 if (!tm && create)
309 tm = tcpm_new(dst, &saddr, &daddr, hash);
310 else
311 tcpm_check_stamp(tm, dst);
312
313 return tm;
314}
315
316/* Save metrics learned by this TCP session. This function is called
317 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
318 * or goes from LAST-ACK to CLOSE.
319 */
320void tcp_update_metrics(struct sock *sk)
321{
322 const struct inet_connection_sock *icsk = inet_csk(sk);
323 struct dst_entry *dst = __sk_dst_get(sk);
324 struct tcp_sock *tp = tcp_sk(sk);
325 struct net *net = sock_net(sk);
326 struct tcp_metrics_block *tm;
327 unsigned long rtt;
328 u32 val;
329 int m;
330
331 sk_dst_confirm(sk);
332 if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
333 return;
334
335 rcu_read_lock();
336 if (icsk->icsk_backoff || !tp->srtt_us) {
337 /* This session failed to estimate rtt. Why?
338 * Probably, no packets returned in time. Reset our
339 * results.
340 */
341 tm = tcp_get_metrics(sk, dst, false);
342 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
343 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
344 goto out_unlock;
345 } else
346 tm = tcp_get_metrics(sk, dst, true);
347
348 if (!tm)
349 goto out_unlock;
350
351 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
352 m = rtt - tp->srtt_us;
353
354 /* If newly calculated rtt larger than stored one, store new
355 * one. Otherwise, use EWMA. Remember, rtt overestimation is
356 * always better than underestimation.
357 */
358 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
359 if (m <= 0)
360 rtt = tp->srtt_us;
361 else
362 rtt -= (m >> 3);
363 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
364 }
365
366 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
367 unsigned long var;
368
369 if (m < 0)
370 m = -m;
371
372 /* Scale deviation to rttvar fixed point */
373 m >>= 1;
374 if (m < tp->mdev_us)
375 m = tp->mdev_us;
376
377 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
378 if (m >= var)
379 var = m;
380 else
381 var -= (var - m) >> 2;
382
383 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
384 }
385
386 if (tcp_in_initial_slowstart(tp)) {
387 /* Slow start still did not finish. */
388 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
389 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
390 if (val && (tp->snd_cwnd >> 1) > val)
391 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
392 tp->snd_cwnd >> 1);
393 }
394 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
395 val = tcp_metric_get(tm, TCP_METRIC_CWND);
396 if (tp->snd_cwnd > val)
397 tcp_metric_set(tm, TCP_METRIC_CWND,
398 tp->snd_cwnd);
399 }
400 } else if (!tcp_in_slow_start(tp) &&
401 icsk->icsk_ca_state == TCP_CA_Open) {
402 /* Cong. avoidance phase, cwnd is reliable. */
403 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
404 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
405 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
406 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
407 val = tcp_metric_get(tm, TCP_METRIC_CWND);
408 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
409 }
410 } else {
411 /* Else slow start did not finish, cwnd is non-sense,
412 * ssthresh may be also invalid.
413 */
414 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
415 val = tcp_metric_get(tm, TCP_METRIC_CWND);
416 tcp_metric_set(tm, TCP_METRIC_CWND,
417 (val + tp->snd_ssthresh) >> 1);
418 }
419 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
420 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
421 if (val && tp->snd_ssthresh > val)
422 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
423 tp->snd_ssthresh);
424 }
425 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
426 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
427 if (val < tp->reordering &&
428 tp->reordering != net->ipv4.sysctl_tcp_reordering)
429 tcp_metric_set(tm, TCP_METRIC_REORDERING,
430 tp->reordering);
431 }
432 }
433 tm->tcpm_stamp = jiffies;
434out_unlock:
435 rcu_read_unlock();
436}
437
438/* Initialize metrics on socket. */
439
440void tcp_init_metrics(struct sock *sk)
441{
442 struct dst_entry *dst = __sk_dst_get(sk);
443 struct tcp_sock *tp = tcp_sk(sk);
444 struct tcp_metrics_block *tm;
445 u32 val, crtt = 0; /* cached RTT scaled by 8 */
446
447 sk_dst_confirm(sk);
448 if (!dst)
449 goto reset;
450
451 rcu_read_lock();
452 tm = tcp_get_metrics(sk, dst, true);
453 if (!tm) {
454 rcu_read_unlock();
455 goto reset;
456 }
457
458 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
459 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
460
461 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
462 if (val) {
463 tp->snd_ssthresh = val;
464 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
465 tp->snd_ssthresh = tp->snd_cwnd_clamp;
466 } else {
467 /* ssthresh may have been reduced unnecessarily during.
468 * 3WHS. Restore it back to its initial default.
469 */
470 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
471 }
472 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
473 if (val && tp->reordering != val)
474 tp->reordering = val;
475
476 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
477 rcu_read_unlock();
478reset:
479 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
480 * to seed the RTO for later data packets because SYN packets are
481 * small. Use the per-dst cached values to seed the RTO but keep
482 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
483 * Later the RTO will be updated immediately upon obtaining the first
484 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
485 * influences the first RTO but not later RTT estimation.
486 *
487 * But if RTT is not available from the SYN (due to retransmits or
488 * syn cookies) or the cache, force a conservative 3secs timeout.
489 *
490 * A bit of theory. RTT is time passed after "normal" sized packet
491 * is sent until it is ACKed. In normal circumstances sending small
492 * packets force peer to delay ACKs and calculation is correct too.
493 * The algorithm is adaptive and, provided we follow specs, it
494 * NEVER underestimate RTT. BUT! If peer tries to make some clever
495 * tricks sort of "quick acks" for time long enough to decrease RTT
496 * to low value, and then abruptly stops to do it and starts to delay
497 * ACKs, wait for troubles.
498 */
499 if (crtt > tp->srtt_us) {
500 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
501 crtt /= 8 * USEC_PER_SEC / HZ;
502 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
503 } else if (tp->srtt_us == 0) {
504 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
505 * 3WHS. This is most likely due to retransmission,
506 * including spurious one. Reset the RTO back to 3secs
507 * from the more aggressive 1sec to avoid more spurious
508 * retransmission.
509 */
510 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
511 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
512
513 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
514 }
515 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
516 * retransmitted. In light of RFC6298 more aggressive 1sec
517 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
518 * retransmission has occurred.
519 */
520 if (tp->total_retrans > 1)
521 tp->snd_cwnd = 1;
522 else
523 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
524 tp->snd_cwnd_stamp = tcp_jiffies32;
525}
526
527bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
528{
529 struct tcp_metrics_block *tm;
530 bool ret;
531
532 if (!dst)
533 return false;
534
535 rcu_read_lock();
536 tm = __tcp_get_metrics_req(req, dst);
537 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
538 ret = true;
539 else
540 ret = false;
541 rcu_read_unlock();
542
543 return ret;
544}
545
546static DEFINE_SEQLOCK(fastopen_seqlock);
547
548void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
549 struct tcp_fastopen_cookie *cookie)
550{
551 struct tcp_metrics_block *tm;
552
553 rcu_read_lock();
554 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
555 if (tm) {
556 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
557 unsigned int seq;
558
559 do {
560 seq = read_seqbegin(&fastopen_seqlock);
561 if (tfom->mss)
562 *mss = tfom->mss;
563 *cookie = tfom->cookie;
564 if (cookie->len <= 0 && tfom->try_exp == 1)
565 cookie->exp = true;
566 } while (read_seqretry(&fastopen_seqlock, seq));
567 }
568 rcu_read_unlock();
569}
570
571void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
572 struct tcp_fastopen_cookie *cookie, bool syn_lost,
573 u16 try_exp)
574{
575 struct dst_entry *dst = __sk_dst_get(sk);
576 struct tcp_metrics_block *tm;
577
578 if (!dst)
579 return;
580 rcu_read_lock();
581 tm = tcp_get_metrics(sk, dst, true);
582 if (tm) {
583 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
584
585 write_seqlock_bh(&fastopen_seqlock);
586 if (mss)
587 tfom->mss = mss;
588 if (cookie && cookie->len > 0)
589 tfom->cookie = *cookie;
590 else if (try_exp > tfom->try_exp &&
591 tfom->cookie.len <= 0 && !tfom->cookie.exp)
592 tfom->try_exp = try_exp;
593 if (syn_lost) {
594 ++tfom->syn_loss;
595 tfom->last_syn_loss = jiffies;
596 } else
597 tfom->syn_loss = 0;
598 write_sequnlock_bh(&fastopen_seqlock);
599 }
600 rcu_read_unlock();
601}
602
603static struct genl_family tcp_metrics_nl_family;
604
605static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
606 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
607 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
608 .len = sizeof(struct in6_addr), },
609 /* Following attributes are not received for GET/DEL,
610 * we keep them for reference
611 */
612#if 0
613 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
614 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
615 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
616 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
617 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
618 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
619 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
620 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
621 .len = TCP_FASTOPEN_COOKIE_MAX, },
622#endif
623};
624
625/* Add attributes, caller cancels its header on failure */
626static int tcp_metrics_fill_info(struct sk_buff *msg,
627 struct tcp_metrics_block *tm)
628{
629 struct nlattr *nest;
630 int i;
631
632 switch (tm->tcpm_daddr.family) {
633 case AF_INET:
634 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
635 inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
636 goto nla_put_failure;
637 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
638 inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
639 goto nla_put_failure;
640 break;
641 case AF_INET6:
642 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
643 inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
644 goto nla_put_failure;
645 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
646 inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
647 goto nla_put_failure;
648 break;
649 default:
650 return -EAFNOSUPPORT;
651 }
652
653 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
654 jiffies - tm->tcpm_stamp,
655 TCP_METRICS_ATTR_PAD) < 0)
656 goto nla_put_failure;
657
658 {
659 int n = 0;
660
661 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
662 if (!nest)
663 goto nla_put_failure;
664 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
665 u32 val = tm->tcpm_vals[i];
666
667 if (!val)
668 continue;
669 if (i == TCP_METRIC_RTT) {
670 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
671 val) < 0)
672 goto nla_put_failure;
673 n++;
674 val = max(val / 1000, 1U);
675 }
676 if (i == TCP_METRIC_RTTVAR) {
677 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
678 val) < 0)
679 goto nla_put_failure;
680 n++;
681 val = max(val / 1000, 1U);
682 }
683 if (nla_put_u32(msg, i + 1, val) < 0)
684 goto nla_put_failure;
685 n++;
686 }
687 if (n)
688 nla_nest_end(msg, nest);
689 else
690 nla_nest_cancel(msg, nest);
691 }
692
693 {
694 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
695 unsigned int seq;
696
697 do {
698 seq = read_seqbegin(&fastopen_seqlock);
699 tfom_copy[0] = tm->tcpm_fastopen;
700 } while (read_seqretry(&fastopen_seqlock, seq));
701
702 tfom = tfom_copy;
703 if (tfom->mss &&
704 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
705 tfom->mss) < 0)
706 goto nla_put_failure;
707 if (tfom->syn_loss &&
708 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
709 tfom->syn_loss) < 0 ||
710 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
711 jiffies - tfom->last_syn_loss,
712 TCP_METRICS_ATTR_PAD) < 0))
713 goto nla_put_failure;
714 if (tfom->cookie.len > 0 &&
715 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
716 tfom->cookie.len, tfom->cookie.val) < 0)
717 goto nla_put_failure;
718 }
719
720 return 0;
721
722nla_put_failure:
723 return -EMSGSIZE;
724}
725
726static int tcp_metrics_dump_info(struct sk_buff *skb,
727 struct netlink_callback *cb,
728 struct tcp_metrics_block *tm)
729{
730 void *hdr;
731
732 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
733 &tcp_metrics_nl_family, NLM_F_MULTI,
734 TCP_METRICS_CMD_GET);
735 if (!hdr)
736 return -EMSGSIZE;
737
738 if (tcp_metrics_fill_info(skb, tm) < 0)
739 goto nla_put_failure;
740
741 genlmsg_end(skb, hdr);
742 return 0;
743
744nla_put_failure:
745 genlmsg_cancel(skb, hdr);
746 return -EMSGSIZE;
747}
748
749static int tcp_metrics_nl_dump(struct sk_buff *skb,
750 struct netlink_callback *cb)
751{
752 struct net *net = sock_net(skb->sk);
753 unsigned int max_rows = 1U << tcp_metrics_hash_log;
754 unsigned int row, s_row = cb->args[0];
755 int s_col = cb->args[1], col = s_col;
756
757 for (row = s_row; row < max_rows; row++, s_col = 0) {
758 struct tcp_metrics_block *tm;
759 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
760
761 rcu_read_lock();
762 for (col = 0, tm = rcu_dereference(hb->chain); tm;
763 tm = rcu_dereference(tm->tcpm_next), col++) {
764 if (!net_eq(tm_net(tm), net))
765 continue;
766 if (col < s_col)
767 continue;
768 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
769 rcu_read_unlock();
770 goto done;
771 }
772 }
773 rcu_read_unlock();
774 }
775
776done:
777 cb->args[0] = row;
778 cb->args[1] = col;
779 return skb->len;
780}
781
782static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
783 unsigned int *hash, int optional, int v4, int v6)
784{
785 struct nlattr *a;
786
787 a = info->attrs[v4];
788 if (a) {
789 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
790 if (hash)
791 *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
792 return 0;
793 }
794 a = info->attrs[v6];
795 if (a) {
796 struct in6_addr in6;
797
798 if (nla_len(a) != sizeof(struct in6_addr))
799 return -EINVAL;
800 in6 = nla_get_in6_addr(a);
801 inetpeer_set_addr_v6(addr, &in6);
802 if (hash)
803 *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
804 return 0;
805 }
806 return optional ? 1 : -EAFNOSUPPORT;
807}
808
809static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
810 unsigned int *hash, int optional)
811{
812 return __parse_nl_addr(info, addr, hash, optional,
813 TCP_METRICS_ATTR_ADDR_IPV4,
814 TCP_METRICS_ATTR_ADDR_IPV6);
815}
816
817static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
818{
819 return __parse_nl_addr(info, addr, NULL, 0,
820 TCP_METRICS_ATTR_SADDR_IPV4,
821 TCP_METRICS_ATTR_SADDR_IPV6);
822}
823
824static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
825{
826 struct tcp_metrics_block *tm;
827 struct inetpeer_addr saddr, daddr;
828 unsigned int hash;
829 struct sk_buff *msg;
830 struct net *net = genl_info_net(info);
831 void *reply;
832 int ret;
833 bool src = true;
834
835 ret = parse_nl_addr(info, &daddr, &hash, 0);
836 if (ret < 0)
837 return ret;
838
839 ret = parse_nl_saddr(info, &saddr);
840 if (ret < 0)
841 src = false;
842
843 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
844 if (!msg)
845 return -ENOMEM;
846
847 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
848 info->genlhdr->cmd);
849 if (!reply)
850 goto nla_put_failure;
851
852 hash ^= net_hash_mix(net);
853 hash = hash_32(hash, tcp_metrics_hash_log);
854 ret = -ESRCH;
855 rcu_read_lock();
856 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
857 tm = rcu_dereference(tm->tcpm_next)) {
858 if (addr_same(&tm->tcpm_daddr, &daddr) &&
859 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
860 net_eq(tm_net(tm), net)) {
861 ret = tcp_metrics_fill_info(msg, tm);
862 break;
863 }
864 }
865 rcu_read_unlock();
866 if (ret < 0)
867 goto out_free;
868
869 genlmsg_end(msg, reply);
870 return genlmsg_reply(msg, info);
871
872nla_put_failure:
873 ret = -EMSGSIZE;
874
875out_free:
876 nlmsg_free(msg);
877 return ret;
878}
879
880static void tcp_metrics_flush_all(struct net *net)
881{
882 unsigned int max_rows = 1U << tcp_metrics_hash_log;
883 struct tcpm_hash_bucket *hb = tcp_metrics_hash;
884 struct tcp_metrics_block *tm;
885 unsigned int row;
886
887 for (row = 0; row < max_rows; row++, hb++) {
888 struct tcp_metrics_block __rcu **pp;
889 bool match;
890
891 spin_lock_bh(&tcp_metrics_lock);
892 pp = &hb->chain;
893 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
894 match = net ? net_eq(tm_net(tm), net) :
895 !refcount_read(&tm_net(tm)->count);
896 if (match) {
897 *pp = tm->tcpm_next;
898 kfree_rcu(tm, rcu_head);
899 } else {
900 pp = &tm->tcpm_next;
901 }
902 }
903 spin_unlock_bh(&tcp_metrics_lock);
904 }
905}
906
907static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
908{
909 struct tcpm_hash_bucket *hb;
910 struct tcp_metrics_block *tm;
911 struct tcp_metrics_block __rcu **pp;
912 struct inetpeer_addr saddr, daddr;
913 unsigned int hash;
914 struct net *net = genl_info_net(info);
915 int ret;
916 bool src = true, found = false;
917
918 ret = parse_nl_addr(info, &daddr, &hash, 1);
919 if (ret < 0)
920 return ret;
921 if (ret > 0) {
922 tcp_metrics_flush_all(net);
923 return 0;
924 }
925 ret = parse_nl_saddr(info, &saddr);
926 if (ret < 0)
927 src = false;
928
929 hash ^= net_hash_mix(net);
930 hash = hash_32(hash, tcp_metrics_hash_log);
931 hb = tcp_metrics_hash + hash;
932 pp = &hb->chain;
933 spin_lock_bh(&tcp_metrics_lock);
934 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
935 if (addr_same(&tm->tcpm_daddr, &daddr) &&
936 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
937 net_eq(tm_net(tm), net)) {
938 *pp = tm->tcpm_next;
939 kfree_rcu(tm, rcu_head);
940 found = true;
941 } else {
942 pp = &tm->tcpm_next;
943 }
944 }
945 spin_unlock_bh(&tcp_metrics_lock);
946 if (!found)
947 return -ESRCH;
948 return 0;
949}
950
951static const struct genl_ops tcp_metrics_nl_ops[] = {
952 {
953 .cmd = TCP_METRICS_CMD_GET,
954 .doit = tcp_metrics_nl_cmd_get,
955 .dumpit = tcp_metrics_nl_dump,
956 .policy = tcp_metrics_nl_policy,
957 },
958 {
959 .cmd = TCP_METRICS_CMD_DEL,
960 .doit = tcp_metrics_nl_cmd_del,
961 .policy = tcp_metrics_nl_policy,
962 .flags = GENL_ADMIN_PERM,
963 },
964};
965
966static struct genl_family tcp_metrics_nl_family __ro_after_init = {
967 .hdrsize = 0,
968 .name = TCP_METRICS_GENL_NAME,
969 .version = TCP_METRICS_GENL_VERSION,
970 .maxattr = TCP_METRICS_ATTR_MAX,
971 .netnsok = true,
972 .module = THIS_MODULE,
973 .ops = tcp_metrics_nl_ops,
974 .n_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
975};
976
977static unsigned int tcpmhash_entries;
978static int __init set_tcpmhash_entries(char *str)
979{
980 ssize_t ret;
981
982 if (!str)
983 return 0;
984
985 ret = kstrtouint(str, 0, &tcpmhash_entries);
986 if (ret)
987 return 0;
988
989 return 1;
990}
991__setup("tcpmhash_entries=", set_tcpmhash_entries);
992
993static int __net_init tcp_net_metrics_init(struct net *net)
994{
995 size_t size;
996 unsigned int slots;
997
998 if (!net_eq(net, &init_net))
999 return 0;
1000
1001 slots = tcpmhash_entries;
1002 if (!slots) {
1003 if (totalram_pages >= 128 * 1024)
1004 slots = 16 * 1024;
1005 else
1006 slots = 8 * 1024;
1007 }
1008
1009 tcp_metrics_hash_log = order_base_2(slots);
1010 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1011
1012 tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1013 if (!tcp_metrics_hash)
1014 return -ENOMEM;
1015
1016 return 0;
1017}
1018
1019static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1020{
1021 tcp_metrics_flush_all(NULL);
1022}
1023
1024static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1025 .init = tcp_net_metrics_init,
1026 .exit_batch = tcp_net_metrics_exit_batch,
1027};
1028
1029void __init tcp_metrics_init(void)
1030{
1031 int ret;
1032
1033 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1034 if (ret < 0)
1035 panic("Could not allocate the tcp_metrics hash table\n");
1036
1037 ret = genl_register_family(&tcp_metrics_nl_family);
1038 if (ret < 0)
1039 panic("Could not register tcp_metrics generic netlink\n");
1040}