Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NET_GEN_STATS_H
3#define __NET_GEN_STATS_H
4
5#include <linux/gen_stats.h>
6#include <linux/socket.h>
7#include <linux/rtnetlink.h>
8#include <linux/pkt_sched.h>
9
10/* Throughput stats.
11 * Must be initialized beforehand with gnet_stats_basic_sync_init().
12 *
13 * If no reads can ever occur parallel to writes (e.g. stack-allocated
14 * bstats), then the internal stat values can be written to and read
15 * from directly. Otherwise, use _bstats_set/update() for writes and
16 * gnet_stats_add_basic() for reads.
17 */
18struct gnet_stats_basic_sync {
19 u64_stats_t bytes;
20 u64_stats_t packets;
21 struct u64_stats_sync syncp;
22} __aligned(2 * sizeof(u64));
23
24struct net_rate_estimator;
25
26struct gnet_dump {
27 spinlock_t * lock;
28 struct sk_buff * skb;
29 struct nlattr * tail;
30
31 /* Backward compatibility */
32 int compat_tc_stats;
33 int compat_xstats;
34 int padattr;
35 void * xstats;
36 int xstats_len;
37 struct tc_stats tc_stats;
38};
39
40void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
41int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
42 struct gnet_dump *d, int padattr);
43
44int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
45 int tc_stats_type, int xstats_type,
46 spinlock_t *lock, struct gnet_dump *d,
47 int padattr);
48
49int gnet_stats_copy_basic(struct gnet_dump *d,
50 struct gnet_stats_basic_sync __percpu *cpu,
51 struct gnet_stats_basic_sync *b, bool running);
52void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
53 struct gnet_stats_basic_sync __percpu *cpu,
54 struct gnet_stats_basic_sync *b, bool running);
55int gnet_stats_copy_basic_hw(struct gnet_dump *d,
56 struct gnet_stats_basic_sync __percpu *cpu,
57 struct gnet_stats_basic_sync *b, bool running);
58int gnet_stats_copy_rate_est(struct gnet_dump *d,
59 struct net_rate_estimator __rcu **ptr);
60int gnet_stats_copy_queue(struct gnet_dump *d,
61 struct gnet_stats_queue __percpu *cpu_q,
62 struct gnet_stats_queue *q, __u32 qlen);
63void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
64 const struct gnet_stats_queue __percpu *cpu_q,
65 const struct gnet_stats_queue *q);
66int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
67
68int gnet_stats_finish_copy(struct gnet_dump *d);
69
70int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
71 struct gnet_stats_basic_sync __percpu *cpu_bstats,
72 struct net_rate_estimator __rcu **rate_est,
73 spinlock_t *lock,
74 bool running, struct nlattr *opt);
75void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
76int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
77 struct gnet_stats_basic_sync __percpu *cpu_bstats,
78 struct net_rate_estimator __rcu **ptr,
79 spinlock_t *lock,
80 bool running, struct nlattr *opt);
81bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
82bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
83 struct gnet_stats_rate_est64 *sample);
84#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NET_GEN_STATS_H
3#define __NET_GEN_STATS_H
4
5#include <linux/gen_stats.h>
6#include <linux/socket.h>
7#include <linux/rtnetlink.h>
8#include <linux/pkt_sched.h>
9
10struct gnet_stats_basic_cpu {
11 struct gnet_stats_basic_packed bstats;
12 struct u64_stats_sync syncp;
13};
14
15struct net_rate_estimator;
16
17struct gnet_dump {
18 spinlock_t * lock;
19 struct sk_buff * skb;
20 struct nlattr * tail;
21
22 /* Backward compatibility */
23 int compat_tc_stats;
24 int compat_xstats;
25 int padattr;
26 void * xstats;
27 int xstats_len;
28 struct tc_stats tc_stats;
29};
30
31int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
32 struct gnet_dump *d, int padattr);
33
34int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
35 int tc_stats_type, int xstats_type,
36 spinlock_t *lock, struct gnet_dump *d,
37 int padattr);
38
39int gnet_stats_copy_basic(const seqcount_t *running,
40 struct gnet_dump *d,
41 struct gnet_stats_basic_cpu __percpu *cpu,
42 struct gnet_stats_basic_packed *b);
43void __gnet_stats_copy_basic(const seqcount_t *running,
44 struct gnet_stats_basic_packed *bstats,
45 struct gnet_stats_basic_cpu __percpu *cpu,
46 struct gnet_stats_basic_packed *b);
47int gnet_stats_copy_rate_est(struct gnet_dump *d,
48 struct net_rate_estimator __rcu **ptr);
49int gnet_stats_copy_queue(struct gnet_dump *d,
50 struct gnet_stats_queue __percpu *cpu_q,
51 struct gnet_stats_queue *q, __u32 qlen);
52void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
53 const struct gnet_stats_queue __percpu *cpu_q,
54 const struct gnet_stats_queue *q, __u32 qlen);
55int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
56
57int gnet_stats_finish_copy(struct gnet_dump *d);
58
59int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
60 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
61 struct net_rate_estimator __rcu **rate_est,
62 spinlock_t *stats_lock,
63 seqcount_t *running, struct nlattr *opt);
64void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
65int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
66 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
67 struct net_rate_estimator __rcu **ptr,
68 spinlock_t *stats_lock,
69 seqcount_t *running, struct nlattr *opt);
70bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
71bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
72 struct gnet_stats_rate_est64 *sample);
73#endif