Loading...
1#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
4#include <linux/percpu_counter.h>
5
6struct netns_frags {
7 /* The percpu_counter "mem" need to be cacheline aligned.
8 * mem.count must not share cacheline with other writers
9 */
10 struct percpu_counter mem ____cacheline_aligned_in_smp;
11
12 /* sysctls */
13 int timeout;
14 int high_thresh;
15 int low_thresh;
16 int max_dist;
17};
18
19/**
20 * fragment queue flags
21 *
22 * @INET_FRAG_FIRST_IN: first fragment has arrived
23 * @INET_FRAG_LAST_IN: final fragment has arrived
24 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
25 */
26enum {
27 INET_FRAG_FIRST_IN = BIT(0),
28 INET_FRAG_LAST_IN = BIT(1),
29 INET_FRAG_COMPLETE = BIT(2),
30};
31
32/**
33 * struct inet_frag_queue - fragment queue
34 *
35 * @lock: spinlock protecting the queue
36 * @timer: queue expiration timer
37 * @list: hash bucket list
38 * @refcnt: reference count of the queue
39 * @fragments: received fragments head
40 * @fragments_tail: received fragments tail
41 * @stamp: timestamp of the last received fragment
42 * @len: total length of the original datagram
43 * @meat: length of received fragments so far
44 * @flags: fragment queue flags
45 * @max_size: maximum received fragment size
46 * @net: namespace that this frag belongs to
47 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
48 */
49struct inet_frag_queue {
50 spinlock_t lock;
51 struct timer_list timer;
52 struct hlist_node list;
53 atomic_t refcnt;
54 struct sk_buff *fragments;
55 struct sk_buff *fragments_tail;
56 ktime_t stamp;
57 int len;
58 int meat;
59 __u8 flags;
60 u16 max_size;
61 struct netns_frags *net;
62 struct hlist_node list_evictor;
63};
64
65#define INETFRAGS_HASHSZ 1024
66
67/* averaged:
68 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
69 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
70 * struct frag_queue))
71 */
72#define INETFRAGS_MAXDEPTH 128
73
74struct inet_frag_bucket {
75 struct hlist_head chain;
76 spinlock_t chain_lock;
77};
78
79struct inet_frags {
80 struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
81
82 struct work_struct frags_work;
83 unsigned int next_bucket;
84 unsigned long last_rebuild_jiffies;
85 bool rebuild;
86
87 /* The first call to hashfn is responsible to initialize
88 * rnd. This is best done with net_get_random_once.
89 *
90 * rnd_seqlock is used to let hash insertion detect
91 * when it needs to re-lookup the hash chain to use.
92 */
93 u32 rnd;
94 seqlock_t rnd_seqlock;
95 int qsize;
96
97 unsigned int (*hashfn)(const struct inet_frag_queue *);
98 bool (*match)(const struct inet_frag_queue *q,
99 const void *arg);
100 void (*constructor)(struct inet_frag_queue *q,
101 const void *arg);
102 void (*destructor)(struct inet_frag_queue *);
103 void (*frag_expire)(unsigned long data);
104 struct kmem_cache *frags_cachep;
105 const char *frags_cache_name;
106};
107
108int inet_frags_init(struct inet_frags *);
109void inet_frags_fini(struct inet_frags *);
110
111static inline int inet_frags_init_net(struct netns_frags *nf)
112{
113 return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
114}
115static inline void inet_frags_uninit_net(struct netns_frags *nf)
116{
117 percpu_counter_destroy(&nf->mem);
118}
119
120void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
121
122void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
123void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
124struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
125 struct inet_frags *f, void *key, unsigned int hash);
126
127void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
128 const char *prefix);
129
130static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
131{
132 if (atomic_dec_and_test(&q->refcnt))
133 inet_frag_destroy(q, f);
134}
135
136static inline bool inet_frag_evicting(struct inet_frag_queue *q)
137{
138 return !hlist_unhashed(&q->list_evictor);
139}
140
141/* Memory Tracking Functions. */
142
143/* The default percpu_counter batch size is not big enough to scale to
144 * fragmentation mem acct sizes.
145 * The mem size of a 64K fragment is approx:
146 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
147 */
148static unsigned int frag_percpu_counter_batch = 130000;
149
150static inline int frag_mem_limit(struct netns_frags *nf)
151{
152 return percpu_counter_read(&nf->mem);
153}
154
155static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
156{
157 __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
158}
159
160static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
161{
162 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
163}
164
165static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
166{
167 unsigned int res;
168
169 local_bh_disable();
170 res = percpu_counter_sum_positive(&nf->mem);
171 local_bh_enable();
172
173 return res;
174}
175
176/* RFC 3168 support :
177 * We want to check ECN values of all fragments, do detect invalid combinations.
178 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
179 */
180#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
181#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
182#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
183#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
184
185extern const u8 ip_frag_ecn_table[16];
186
187#endif
1#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
4#include <linux/percpu_counter.h>
5
6struct netns_frags {
7 int nqueues;
8 struct list_head lru_list;
9 spinlock_t lru_lock;
10
11 /* The percpu_counter "mem" need to be cacheline aligned.
12 * mem.count must not share cacheline with other writers
13 */
14 struct percpu_counter mem ____cacheline_aligned_in_smp;
15
16 /* sysctls */
17 int timeout;
18 int high_thresh;
19 int low_thresh;
20};
21
22struct inet_frag_queue {
23 spinlock_t lock;
24 struct timer_list timer; /* when will this queue expire? */
25 struct list_head lru_list; /* lru list member */
26 struct hlist_node list;
27 atomic_t refcnt;
28 struct sk_buff *fragments; /* list of received fragments */
29 struct sk_buff *fragments_tail;
30 ktime_t stamp;
31 int len; /* total length of orig datagram */
32 int meat;
33 __u8 last_in; /* first/last segment arrived? */
34
35#define INET_FRAG_COMPLETE 4
36#define INET_FRAG_FIRST_IN 2
37#define INET_FRAG_LAST_IN 1
38
39 u16 max_size;
40
41 struct netns_frags *net;
42};
43
44#define INETFRAGS_HASHSZ 1024
45
46/* averaged:
47 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
48 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
49 * struct frag_queue))
50 */
51#define INETFRAGS_MAXDEPTH 128
52
53struct inet_frag_bucket {
54 struct hlist_head chain;
55 spinlock_t chain_lock;
56};
57
58struct inet_frags {
59 struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
60 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
61 * netfilter). Important to keep this on a seperate cacheline.
62 * Its primarily a rebuild protection rwlock.
63 */
64 rwlock_t lock ____cacheline_aligned_in_smp;
65 int secret_interval;
66 struct timer_list secret_timer;
67
68 /* The first call to hashfn is responsible to initialize
69 * rnd. This is best done with net_get_random_once.
70 */
71 u32 rnd;
72 int qsize;
73
74 unsigned int (*hashfn)(struct inet_frag_queue *);
75 bool (*match)(struct inet_frag_queue *q, void *arg);
76 void (*constructor)(struct inet_frag_queue *q,
77 void *arg);
78 void (*destructor)(struct inet_frag_queue *);
79 void (*skb_free)(struct sk_buff *);
80 void (*frag_expire)(unsigned long data);
81};
82
83void inet_frags_init(struct inet_frags *);
84void inet_frags_fini(struct inet_frags *);
85
86void inet_frags_init_net(struct netns_frags *nf);
87void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
88
89void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
90void inet_frag_destroy(struct inet_frag_queue *q,
91 struct inet_frags *f, int *work);
92int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
93struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
94 struct inet_frags *f, void *key, unsigned int hash)
95 __releases(&f->lock);
96void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
97 const char *prefix);
98
99static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
100{
101 if (atomic_dec_and_test(&q->refcnt))
102 inet_frag_destroy(q, f, NULL);
103}
104
105/* Memory Tracking Functions. */
106
107/* The default percpu_counter batch size is not big enough to scale to
108 * fragmentation mem acct sizes.
109 * The mem size of a 64K fragment is approx:
110 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
111 */
112static unsigned int frag_percpu_counter_batch = 130000;
113
114static inline int frag_mem_limit(struct netns_frags *nf)
115{
116 return percpu_counter_read(&nf->mem);
117}
118
119static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
120{
121 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
122}
123
124static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
125{
126 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
127}
128
129static inline void init_frag_mem_limit(struct netns_frags *nf)
130{
131 percpu_counter_init(&nf->mem, 0);
132}
133
134static inline int sum_frag_mem_limit(struct netns_frags *nf)
135{
136 int res;
137
138 local_bh_disable();
139 res = percpu_counter_sum_positive(&nf->mem);
140 local_bh_enable();
141
142 return res;
143}
144
145static inline void inet_frag_lru_move(struct inet_frag_queue *q)
146{
147 spin_lock(&q->net->lru_lock);
148 if (!list_empty(&q->lru_list))
149 list_move_tail(&q->lru_list, &q->net->lru_list);
150 spin_unlock(&q->net->lru_lock);
151}
152
153static inline void inet_frag_lru_del(struct inet_frag_queue *q)
154{
155 spin_lock(&q->net->lru_lock);
156 list_del_init(&q->lru_list);
157 q->net->nqueues--;
158 spin_unlock(&q->net->lru_lock);
159}
160
161static inline void inet_frag_lru_add(struct netns_frags *nf,
162 struct inet_frag_queue *q)
163{
164 spin_lock(&nf->lru_lock);
165 list_add_tail(&q->lru_list, &nf->lru_list);
166 q->net->nqueues++;
167 spin_unlock(&nf->lru_lock);
168}
169
170/* RFC 3168 support :
171 * We want to check ECN values of all fragments, do detect invalid combinations.
172 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
173 */
174#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
175#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
176#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
177#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
178
179extern const u8 ip_frag_ecn_table[16];
180
181#endif