Loading...
1#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
4#include <linux/percpu_counter.h>
5
6struct netns_frags {
7 int nqueues;
8 struct list_head lru_list;
9 spinlock_t lru_lock;
10
11 /* The percpu_counter "mem" need to be cacheline aligned.
12 * mem.count must not share cacheline with other writers
13 */
14 struct percpu_counter mem ____cacheline_aligned_in_smp;
15
16 /* sysctls */
17 int timeout;
18 int high_thresh;
19 int low_thresh;
20};
21
22struct inet_frag_queue {
23 spinlock_t lock;
24 struct timer_list timer; /* when will this queue expire? */
25 struct list_head lru_list; /* lru list member */
26 struct hlist_node list;
27 atomic_t refcnt;
28 struct sk_buff *fragments; /* list of received fragments */
29 struct sk_buff *fragments_tail;
30 ktime_t stamp;
31 int len; /* total length of orig datagram */
32 int meat;
33 __u8 last_in; /* first/last segment arrived? */
34
35#define INET_FRAG_COMPLETE 4
36#define INET_FRAG_FIRST_IN 2
37#define INET_FRAG_LAST_IN 1
38
39 u16 max_size;
40
41 struct netns_frags *net;
42};
43
44#define INETFRAGS_HASHSZ 1024
45
46/* averaged:
47 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
48 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
49 * struct frag_queue))
50 */
51#define INETFRAGS_MAXDEPTH 128
52
53struct inet_frag_bucket {
54 struct hlist_head chain;
55 spinlock_t chain_lock;
56};
57
58struct inet_frags {
59 struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
60 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
61 * netfilter). Important to keep this on a seperate cacheline.
62 * Its primarily a rebuild protection rwlock.
63 */
64 rwlock_t lock ____cacheline_aligned_in_smp;
65 int secret_interval;
66 struct timer_list secret_timer;
67
68 /* The first call to hashfn is responsible to initialize
69 * rnd. This is best done with net_get_random_once.
70 */
71 u32 rnd;
72 int qsize;
73
74 unsigned int (*hashfn)(struct inet_frag_queue *);
75 bool (*match)(struct inet_frag_queue *q, void *arg);
76 void (*constructor)(struct inet_frag_queue *q,
77 void *arg);
78 void (*destructor)(struct inet_frag_queue *);
79 void (*skb_free)(struct sk_buff *);
80 void (*frag_expire)(unsigned long data);
81};
82
83void inet_frags_init(struct inet_frags *);
84void inet_frags_fini(struct inet_frags *);
85
86void inet_frags_init_net(struct netns_frags *nf);
87void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
88
89void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
90void inet_frag_destroy(struct inet_frag_queue *q,
91 struct inet_frags *f, int *work);
92int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
93struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
94 struct inet_frags *f, void *key, unsigned int hash)
95 __releases(&f->lock);
96void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
97 const char *prefix);
98
99static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
100{
101 if (atomic_dec_and_test(&q->refcnt))
102 inet_frag_destroy(q, f, NULL);
103}
104
105/* Memory Tracking Functions. */
106
107/* The default percpu_counter batch size is not big enough to scale to
108 * fragmentation mem acct sizes.
109 * The mem size of a 64K fragment is approx:
110 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
111 */
112static unsigned int frag_percpu_counter_batch = 130000;
113
114static inline int frag_mem_limit(struct netns_frags *nf)
115{
116 return percpu_counter_read(&nf->mem);
117}
118
119static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
120{
121 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
122}
123
124static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
125{
126 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
127}
128
129static inline void init_frag_mem_limit(struct netns_frags *nf)
130{
131 percpu_counter_init(&nf->mem, 0);
132}
133
134static inline int sum_frag_mem_limit(struct netns_frags *nf)
135{
136 int res;
137
138 local_bh_disable();
139 res = percpu_counter_sum_positive(&nf->mem);
140 local_bh_enable();
141
142 return res;
143}
144
145static inline void inet_frag_lru_move(struct inet_frag_queue *q)
146{
147 spin_lock(&q->net->lru_lock);
148 if (!list_empty(&q->lru_list))
149 list_move_tail(&q->lru_list, &q->net->lru_list);
150 spin_unlock(&q->net->lru_lock);
151}
152
153static inline void inet_frag_lru_del(struct inet_frag_queue *q)
154{
155 spin_lock(&q->net->lru_lock);
156 list_del_init(&q->lru_list);
157 q->net->nqueues--;
158 spin_unlock(&q->net->lru_lock);
159}
160
161static inline void inet_frag_lru_add(struct netns_frags *nf,
162 struct inet_frag_queue *q)
163{
164 spin_lock(&nf->lru_lock);
165 list_add_tail(&q->lru_list, &nf->lru_list);
166 q->net->nqueues++;
167 spin_unlock(&nf->lru_lock);
168}
169
170/* RFC 3168 support :
171 * We want to check ECN values of all fragments, do detect invalid combinations.
172 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
173 */
174#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
175#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
176#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
177#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
178
179extern const u8 ip_frag_ecn_table[16];
180
181#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NET_FRAG_H__
3#define __NET_FRAG_H__
4
5#include <linux/rhashtable.h>
6
7struct netns_frags {
8 /* sysctls */
9 long high_thresh;
10 long low_thresh;
11 int timeout;
12 int max_dist;
13 struct inet_frags *f;
14
15 struct rhashtable rhashtable ____cacheline_aligned_in_smp;
16
17 /* Keep atomic mem on separate cachelines in structs that include it */
18 atomic_long_t mem ____cacheline_aligned_in_smp;
19};
20
21/**
22 * fragment queue flags
23 *
24 * @INET_FRAG_FIRST_IN: first fragment has arrived
25 * @INET_FRAG_LAST_IN: final fragment has arrived
26 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
27 */
28enum {
29 INET_FRAG_FIRST_IN = BIT(0),
30 INET_FRAG_LAST_IN = BIT(1),
31 INET_FRAG_COMPLETE = BIT(2),
32};
33
34struct frag_v4_compare_key {
35 __be32 saddr;
36 __be32 daddr;
37 u32 user;
38 u32 vif;
39 __be16 id;
40 u16 protocol;
41};
42
43struct frag_v6_compare_key {
44 struct in6_addr saddr;
45 struct in6_addr daddr;
46 u32 user;
47 __be32 id;
48 u32 iif;
49};
50
51/**
52 * struct inet_frag_queue - fragment queue
53 *
54 * @node: rhash node
55 * @key: keys identifying this frag.
56 * @timer: queue expiration timer
57 * @lock: spinlock protecting this frag
58 * @refcnt: reference count of the queue
59 * @fragments: received fragments head
60 * @fragments_tail: received fragments tail
61 * @stamp: timestamp of the last received fragment
62 * @len: total length of the original datagram
63 * @meat: length of received fragments so far
64 * @flags: fragment queue flags
65 * @max_size: maximum received fragment size
66 * @net: namespace that this frag belongs to
67 * @rcu: rcu head for freeing deferall
68 */
69struct inet_frag_queue {
70 struct rhash_head node;
71 union {
72 struct frag_v4_compare_key v4;
73 struct frag_v6_compare_key v6;
74 } key;
75 struct timer_list timer;
76 spinlock_t lock;
77 refcount_t refcnt;
78 struct sk_buff *fragments;
79 struct sk_buff *fragments_tail;
80 ktime_t stamp;
81 int len;
82 int meat;
83 __u8 flags;
84 u16 max_size;
85 struct netns_frags *net;
86 struct rcu_head rcu;
87};
88
89struct inet_frags {
90 unsigned int qsize;
91
92 void (*constructor)(struct inet_frag_queue *q,
93 const void *arg);
94 void (*destructor)(struct inet_frag_queue *);
95 void (*frag_expire)(struct timer_list *t);
96 struct kmem_cache *frags_cachep;
97 const char *frags_cache_name;
98 struct rhashtable_params rhash_params;
99};
100
101int inet_frags_init(struct inet_frags *);
102void inet_frags_fini(struct inet_frags *);
103
104static inline int inet_frags_init_net(struct netns_frags *nf)
105{
106 atomic_long_set(&nf->mem, 0);
107 return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
108}
109void inet_frags_exit_net(struct netns_frags *nf);
110
111void inet_frag_kill(struct inet_frag_queue *q);
112void inet_frag_destroy(struct inet_frag_queue *q);
113struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
114
115static inline void inet_frag_put(struct inet_frag_queue *q)
116{
117 if (refcount_dec_and_test(&q->refcnt))
118 inet_frag_destroy(q);
119}
120
121/* Memory Tracking Functions. */
122
123static inline long frag_mem_limit(const struct netns_frags *nf)
124{
125 return atomic_long_read(&nf->mem);
126}
127
128static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
129{
130 atomic_long_sub(val, &nf->mem);
131}
132
133static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
134{
135 atomic_long_add(val, &nf->mem);
136}
137
138/* RFC 3168 support :
139 * We want to check ECN values of all fragments, do detect invalid combinations.
140 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
141 */
142#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
143#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
144#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
145#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
146
147extern const u8 ip_frag_ecn_table[16];
148
149#endif