Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic TIME_WAIT sockets functions
7 *
8 * From code orinally in TCP
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <net/inet_hashtables.h>
15#include <net/inet_timewait_sock.h>
16#include <net/ip.h>
17
18
19/**
20 * inet_twsk_bind_unhash - unhash a timewait socket from bind hash
21 * @tw: timewait socket
22 * @hashinfo: hashinfo pointer
23 *
24 * unhash a timewait socket from bind hash, if hashed.
25 * bind hash lock must be held by caller.
26 * Returns 1 if caller should call inet_twsk_put() after lock release.
27 */
28void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
29 struct inet_hashinfo *hashinfo)
30{
31 struct inet_bind_bucket *tb = tw->tw_tb;
32
33 if (!tb)
34 return;
35
36 __hlist_del(&tw->tw_bind_node);
37 tw->tw_tb = NULL;
38 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
39 __sock_put((struct sock *)tw);
40}
41
42/* Must be called with locally disabled BHs. */
43static void inet_twsk_kill(struct inet_timewait_sock *tw)
44{
45 struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
46 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
47 struct inet_bind_hashbucket *bhead;
48
49 spin_lock(lock);
50 sk_nulls_del_node_init_rcu((struct sock *)tw);
51 spin_unlock(lock);
52
53 /* Disassociate with bind bucket. */
54 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
55 hashinfo->bhash_size)];
56
57 spin_lock(&bhead->lock);
58 inet_twsk_bind_unhash(tw, hashinfo);
59 spin_unlock(&bhead->lock);
60
61 atomic_dec(&tw->tw_dr->tw_count);
62 inet_twsk_put(tw);
63}
64
65void inet_twsk_free(struct inet_timewait_sock *tw)
66{
67 struct module *owner = tw->tw_prot->owner;
68 twsk_destructor((struct sock *)tw);
69#ifdef SOCK_REFCNT_DEBUG
70 pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
71#endif
72 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
73 module_put(owner);
74}
75
76void inet_twsk_put(struct inet_timewait_sock *tw)
77{
78 if (refcount_dec_and_test(&tw->tw_refcnt))
79 inet_twsk_free(tw);
80}
81EXPORT_SYMBOL_GPL(inet_twsk_put);
82
83static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
84 struct hlist_nulls_head *list)
85{
86 hlist_nulls_add_head_rcu(&tw->tw_node, list);
87}
88
89static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
90 struct hlist_head *list)
91{
92 hlist_add_head(&tw->tw_bind_node, list);
93}
94
95/*
96 * Enter the time wait state. This is called with locally disabled BH.
97 * Essentially we whip up a timewait bucket, copy the relevant info into it
98 * from the SK, and mess with hash chains and list linkage.
99 */
100void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
101 struct inet_hashinfo *hashinfo)
102{
103 const struct inet_sock *inet = inet_sk(sk);
104 const struct inet_connection_sock *icsk = inet_csk(sk);
105 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
106 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
107 struct inet_bind_hashbucket *bhead;
108 /* Step 1: Put TW into bind hash. Original socket stays there too.
109 Note, that any socket with inet->num != 0 MUST be bound in
110 binding cache, even if it is closed.
111 */
112 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
113 hashinfo->bhash_size)];
114 spin_lock(&bhead->lock);
115 tw->tw_tb = icsk->icsk_bind_hash;
116 WARN_ON(!icsk->icsk_bind_hash);
117 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
118 spin_unlock(&bhead->lock);
119
120 spin_lock(lock);
121
122 inet_twsk_add_node_rcu(tw, &ehead->chain);
123
124 /* Step 3: Remove SK from hash chain */
125 if (__sk_nulls_del_node_init_rcu(sk))
126 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
127
128 spin_unlock(lock);
129
130 /* tw_refcnt is set to 3 because we have :
131 * - one reference for bhash chain.
132 * - one reference for ehash chain.
133 * - one reference for timer.
134 * We can use atomic_set() because prior spin_lock()/spin_unlock()
135 * committed into memory all tw fields.
136 * Also note that after this point, we lost our implicit reference
137 * so we are not allowed to use tw anymore.
138 */
139 refcount_set(&tw->tw_refcnt, 3);
140}
141EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
142
143static void tw_timer_handler(struct timer_list *t)
144{
145 struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer);
146
147 if (tw->tw_kill)
148 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
149 else
150 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
151 inet_twsk_kill(tw);
152}
153
154struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
155 struct inet_timewait_death_row *dr,
156 const int state)
157{
158 struct inet_timewait_sock *tw;
159
160 if (atomic_read(&dr->tw_count) >= dr->sysctl_max_tw_buckets)
161 return NULL;
162
163 tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
164 GFP_ATOMIC);
165 if (tw) {
166 const struct inet_sock *inet = inet_sk(sk);
167
168 tw->tw_dr = dr;
169 /* Give us an identity. */
170 tw->tw_daddr = inet->inet_daddr;
171 tw->tw_rcv_saddr = inet->inet_rcv_saddr;
172 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
173 tw->tw_tos = inet->tos;
174 tw->tw_num = inet->inet_num;
175 tw->tw_state = TCP_TIME_WAIT;
176 tw->tw_substate = state;
177 tw->tw_sport = inet->inet_sport;
178 tw->tw_dport = inet->inet_dport;
179 tw->tw_family = sk->sk_family;
180 tw->tw_reuse = sk->sk_reuse;
181 tw->tw_reuseport = sk->sk_reuseport;
182 tw->tw_hash = sk->sk_hash;
183 tw->tw_ipv6only = 0;
184 tw->tw_transparent = inet->transparent;
185 tw->tw_prot = sk->sk_prot_creator;
186 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
187 twsk_net_set(tw, sock_net(sk));
188 timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
189 /*
190 * Because we use RCU lookups, we should not set tw_refcnt
191 * to a non null value before everything is setup for this
192 * timewait socket.
193 */
194 refcount_set(&tw->tw_refcnt, 0);
195
196 __module_get(tw->tw_prot->owner);
197 }
198
199 return tw;
200}
201EXPORT_SYMBOL_GPL(inet_twsk_alloc);
202
203/* These are always called from BH context. See callers in
204 * tcp_input.c to verify this.
205 */
206
207/* This is for handling early-kills of TIME_WAIT sockets.
208 * Warning : consume reference.
209 * Caller should not access tw anymore.
210 */
211void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
212{
213 if (del_timer_sync(&tw->tw_timer))
214 inet_twsk_kill(tw);
215 inet_twsk_put(tw);
216}
217EXPORT_SYMBOL(inet_twsk_deschedule_put);
218
219void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
220{
221 /* timeout := RTO * 3.5
222 *
223 * 3.5 = 1+2+0.5 to wait for two retransmits.
224 *
225 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
226 * our ACK acking that FIN can be lost. If N subsequent retransmitted
227 * FINs (or previous seqments) are lost (probability of such event
228 * is p^(N+1), where p is probability to lose single packet and
229 * time to detect the loss is about RTO*(2^N - 1) with exponential
230 * backoff). Normal timewait length is calculated so, that we
231 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
232 * [ BTW Linux. following BSD, violates this requirement waiting
233 * only for 60sec, we should wait at least for 240 secs.
234 * Well, 240 consumes too much of resources 8)
235 * ]
236 * This interval is not reduced to catch old duplicate and
237 * responces to our wandering segments living for two MSLs.
238 * However, if we use PAWS to detect
239 * old duplicates, we can reduce the interval to bounds required
240 * by RTO, rather than MSL. So, if peer understands PAWS, we
241 * kill tw bucket after 3.5*RTO (it is important that this number
242 * is greater than TS tick!) and detect old duplicates with help
243 * of PAWS.
244 */
245
246 tw->tw_kill = timeo <= 4*HZ;
247 if (!rearm) {
248 BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
249 atomic_inc(&tw->tw_dr->tw_count);
250 } else {
251 mod_timer_pending(&tw->tw_timer, jiffies + timeo);
252 }
253}
254EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
255
256void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
257{
258 struct inet_timewait_sock *tw;
259 struct sock *sk;
260 struct hlist_nulls_node *node;
261 unsigned int slot;
262
263 for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
264 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
265restart_rcu:
266 cond_resched();
267 rcu_read_lock();
268restart:
269 sk_nulls_for_each_rcu(sk, node, &head->chain) {
270 if (sk->sk_state != TCP_TIME_WAIT)
271 continue;
272 tw = inet_twsk(sk);
273 if ((tw->tw_family != family) ||
274 refcount_read(&twsk_net(tw)->count))
275 continue;
276
277 if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
278 continue;
279
280 if (unlikely((tw->tw_family != family) ||
281 refcount_read(&twsk_net(tw)->count))) {
282 inet_twsk_put(tw);
283 goto restart;
284 }
285
286 rcu_read_unlock();
287 local_bh_disable();
288 inet_twsk_deschedule_put(tw);
289 local_bh_enable();
290 goto restart_rcu;
291 }
292 /* If the nulls value we got at the end of this lookup is
293 * not the expected one, we must restart lookup.
294 * We probably met an item that was moved to another chain.
295 */
296 if (get_nulls_value(node) != slot)
297 goto restart;
298 rcu_read_unlock();
299 }
300}
301EXPORT_SYMBOL_GPL(inet_twsk_purge);
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic TIME_WAIT sockets functions
7 *
8 * From code orinally in TCP
9 */
10
11#include <linux/kernel.h>
12#include <linux/kmemcheck.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <net/inet_hashtables.h>
16#include <net/inet_timewait_sock.h>
17#include <net/ip.h>
18
19
20/**
21 * inet_twsk_unhash - unhash a timewait socket from established hash
22 * @tw: timewait socket
23 *
24 * unhash a timewait socket from established hash, if hashed.
25 * ehash lock must be held by caller.
26 * Returns 1 if caller should call inet_twsk_put() after lock release.
27 */
28int inet_twsk_unhash(struct inet_timewait_sock *tw)
29{
30 if (hlist_nulls_unhashed(&tw->tw_node))
31 return 0;
32
33 hlist_nulls_del_rcu(&tw->tw_node);
34 sk_nulls_node_init(&tw->tw_node);
35 /*
36 * We cannot call inet_twsk_put() ourself under lock,
37 * caller must call it for us.
38 */
39 return 1;
40}
41
42/**
43 * inet_twsk_bind_unhash - unhash a timewait socket from bind hash
44 * @tw: timewait socket
45 * @hashinfo: hashinfo pointer
46 *
47 * unhash a timewait socket from bind hash, if hashed.
48 * bind hash lock must be held by caller.
49 * Returns 1 if caller should call inet_twsk_put() after lock release.
50 */
51int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
52 struct inet_hashinfo *hashinfo)
53{
54 struct inet_bind_bucket *tb = tw->tw_tb;
55
56 if (!tb)
57 return 0;
58
59 __hlist_del(&tw->tw_bind_node);
60 tw->tw_tb = NULL;
61 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
62 /*
63 * We cannot call inet_twsk_put() ourself under lock,
64 * caller must call it for us.
65 */
66 return 1;
67}
68
69/* Must be called with locally disabled BHs. */
70static void __inet_twsk_kill(struct inet_timewait_sock *tw,
71 struct inet_hashinfo *hashinfo)
72{
73 struct inet_bind_hashbucket *bhead;
74 int refcnt;
75 /* Unlink from established hashes. */
76 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
77
78 spin_lock(lock);
79 refcnt = inet_twsk_unhash(tw);
80 spin_unlock(lock);
81
82 /* Disassociate with bind bucket. */
83 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
84 hashinfo->bhash_size)];
85
86 spin_lock(&bhead->lock);
87 refcnt += inet_twsk_bind_unhash(tw, hashinfo);
88 spin_unlock(&bhead->lock);
89
90#ifdef SOCK_REFCNT_DEBUG
91 if (atomic_read(&tw->tw_refcnt) != 1) {
92 pr_debug("%s timewait_sock %p refcnt=%d\n",
93 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
94 }
95#endif
96 while (refcnt) {
97 inet_twsk_put(tw);
98 refcnt--;
99 }
100}
101
102static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
103{
104 struct module *owner = tw->tw_prot->owner;
105 twsk_destructor((struct sock *)tw);
106#ifdef SOCK_REFCNT_DEBUG
107 pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
108#endif
109 release_net(twsk_net(tw));
110 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
111 module_put(owner);
112}
113
114void inet_twsk_put(struct inet_timewait_sock *tw)
115{
116 if (atomic_dec_and_test(&tw->tw_refcnt))
117 inet_twsk_free(tw);
118}
119EXPORT_SYMBOL_GPL(inet_twsk_put);
120
121/*
122 * Enter the time wait state. This is called with locally disabled BH.
123 * Essentially we whip up a timewait bucket, copy the relevant info into it
124 * from the SK, and mess with hash chains and list linkage.
125 */
126void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
127 struct inet_hashinfo *hashinfo)
128{
129 const struct inet_sock *inet = inet_sk(sk);
130 const struct inet_connection_sock *icsk = inet_csk(sk);
131 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
132 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
133 struct inet_bind_hashbucket *bhead;
134 /* Step 1: Put TW into bind hash. Original socket stays there too.
135 Note, that any socket with inet->num != 0 MUST be bound in
136 binding cache, even if it is closed.
137 */
138 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
139 hashinfo->bhash_size)];
140 spin_lock(&bhead->lock);
141 tw->tw_tb = icsk->icsk_bind_hash;
142 WARN_ON(!icsk->icsk_bind_hash);
143 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
144 spin_unlock(&bhead->lock);
145
146 spin_lock(lock);
147
148 /*
149 * Step 2: Hash TW into TIMEWAIT chain.
150 * Should be done before removing sk from established chain
151 * because readers are lockless and search established first.
152 */
153 inet_twsk_add_node_rcu(tw, &ehead->twchain);
154
155 /* Step 3: Remove SK from established hash. */
156 if (__sk_nulls_del_node_init_rcu(sk))
157 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
158
159 /*
160 * Notes :
161 * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
162 * - We add one reference for the bhash link
163 * - We add one reference for the ehash link
164 * - We want this refcnt update done before allowing other
165 * threads to find this tw in ehash chain.
166 */
167 atomic_add(1 + 1 + 1, &tw->tw_refcnt);
168
169 spin_unlock(lock);
170}
171EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
172
173struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
174{
175 struct inet_timewait_sock *tw =
176 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
177 GFP_ATOMIC);
178 if (tw != NULL) {
179 const struct inet_sock *inet = inet_sk(sk);
180
181 kmemcheck_annotate_bitfield(tw, flags);
182
183 /* Give us an identity. */
184 tw->tw_daddr = inet->inet_daddr;
185 tw->tw_rcv_saddr = inet->inet_rcv_saddr;
186 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
187 tw->tw_tos = inet->tos;
188 tw->tw_num = inet->inet_num;
189 tw->tw_state = TCP_TIME_WAIT;
190 tw->tw_substate = state;
191 tw->tw_sport = inet->inet_sport;
192 tw->tw_dport = inet->inet_dport;
193 tw->tw_family = sk->sk_family;
194 tw->tw_reuse = sk->sk_reuse;
195 tw->tw_hash = sk->sk_hash;
196 tw->tw_ipv6only = 0;
197 tw->tw_transparent = inet->transparent;
198 tw->tw_prot = sk->sk_prot_creator;
199 twsk_net_set(tw, hold_net(sock_net(sk)));
200 /*
201 * Because we use RCU lookups, we should not set tw_refcnt
202 * to a non null value before everything is setup for this
203 * timewait socket.
204 */
205 atomic_set(&tw->tw_refcnt, 0);
206 inet_twsk_dead_node_init(tw);
207 __module_get(tw->tw_prot->owner);
208 }
209
210 return tw;
211}
212EXPORT_SYMBOL_GPL(inet_twsk_alloc);
213
214/* Returns non-zero if quota exceeded. */
215static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
216 const int slot)
217{
218 struct inet_timewait_sock *tw;
219 struct hlist_node *node;
220 unsigned int killed;
221 int ret;
222
223 /* NOTE: compare this to previous version where lock
224 * was released after detaching chain. It was racy,
225 * because tw buckets are scheduled in not serialized context
226 * in 2.3 (with netfilter), and with softnet it is common, because
227 * soft irqs are not sequenced.
228 */
229 killed = 0;
230 ret = 0;
231rescan:
232 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
233 __inet_twsk_del_dead_node(tw);
234 spin_unlock(&twdr->death_lock);
235 __inet_twsk_kill(tw, twdr->hashinfo);
236#ifdef CONFIG_NET_NS
237 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
238#endif
239 inet_twsk_put(tw);
240 killed++;
241 spin_lock(&twdr->death_lock);
242 if (killed > INET_TWDR_TWKILL_QUOTA) {
243 ret = 1;
244 break;
245 }
246
247 /* While we dropped twdr->death_lock, another cpu may have
248 * killed off the next TW bucket in the list, therefore
249 * do a fresh re-read of the hlist head node with the
250 * lock reacquired. We still use the hlist traversal
251 * macro in order to get the prefetches.
252 */
253 goto rescan;
254 }
255
256 twdr->tw_count -= killed;
257#ifndef CONFIG_NET_NS
258 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
259#endif
260 return ret;
261}
262
263void inet_twdr_hangman(unsigned long data)
264{
265 struct inet_timewait_death_row *twdr;
266 unsigned int need_timer;
267
268 twdr = (struct inet_timewait_death_row *)data;
269 spin_lock(&twdr->death_lock);
270
271 if (twdr->tw_count == 0)
272 goto out;
273
274 need_timer = 0;
275 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
276 twdr->thread_slots |= (1 << twdr->slot);
277 schedule_work(&twdr->twkill_work);
278 need_timer = 1;
279 } else {
280 /* We purged the entire slot, anything left? */
281 if (twdr->tw_count)
282 need_timer = 1;
283 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
284 }
285 if (need_timer)
286 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
287out:
288 spin_unlock(&twdr->death_lock);
289}
290EXPORT_SYMBOL_GPL(inet_twdr_hangman);
291
292void inet_twdr_twkill_work(struct work_struct *work)
293{
294 struct inet_timewait_death_row *twdr =
295 container_of(work, struct inet_timewait_death_row, twkill_work);
296 int i;
297
298 BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
299 (sizeof(twdr->thread_slots) * 8));
300
301 while (twdr->thread_slots) {
302 spin_lock_bh(&twdr->death_lock);
303 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
304 if (!(twdr->thread_slots & (1 << i)))
305 continue;
306
307 while (inet_twdr_do_twkill_work(twdr, i) != 0) {
308 if (need_resched()) {
309 spin_unlock_bh(&twdr->death_lock);
310 schedule();
311 spin_lock_bh(&twdr->death_lock);
312 }
313 }
314
315 twdr->thread_slots &= ~(1 << i);
316 }
317 spin_unlock_bh(&twdr->death_lock);
318 }
319}
320EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
321
322/* These are always called from BH context. See callers in
323 * tcp_input.c to verify this.
324 */
325
326/* This is for handling early-kills of TIME_WAIT sockets. */
327void inet_twsk_deschedule(struct inet_timewait_sock *tw,
328 struct inet_timewait_death_row *twdr)
329{
330 spin_lock(&twdr->death_lock);
331 if (inet_twsk_del_dead_node(tw)) {
332 inet_twsk_put(tw);
333 if (--twdr->tw_count == 0)
334 del_timer(&twdr->tw_timer);
335 }
336 spin_unlock(&twdr->death_lock);
337 __inet_twsk_kill(tw, twdr->hashinfo);
338}
339EXPORT_SYMBOL(inet_twsk_deschedule);
340
341void inet_twsk_schedule(struct inet_timewait_sock *tw,
342 struct inet_timewait_death_row *twdr,
343 const int timeo, const int timewait_len)
344{
345 struct hlist_head *list;
346 int slot;
347
348 /* timeout := RTO * 3.5
349 *
350 * 3.5 = 1+2+0.5 to wait for two retransmits.
351 *
352 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
353 * our ACK acking that FIN can be lost. If N subsequent retransmitted
354 * FINs (or previous seqments) are lost (probability of such event
355 * is p^(N+1), where p is probability to lose single packet and
356 * time to detect the loss is about RTO*(2^N - 1) with exponential
357 * backoff). Normal timewait length is calculated so, that we
358 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
359 * [ BTW Linux. following BSD, violates this requirement waiting
360 * only for 60sec, we should wait at least for 240 secs.
361 * Well, 240 consumes too much of resources 8)
362 * ]
363 * This interval is not reduced to catch old duplicate and
364 * responces to our wandering segments living for two MSLs.
365 * However, if we use PAWS to detect
366 * old duplicates, we can reduce the interval to bounds required
367 * by RTO, rather than MSL. So, if peer understands PAWS, we
368 * kill tw bucket after 3.5*RTO (it is important that this number
369 * is greater than TS tick!) and detect old duplicates with help
370 * of PAWS.
371 */
372 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
373
374 spin_lock(&twdr->death_lock);
375
376 /* Unlink it, if it was scheduled */
377 if (inet_twsk_del_dead_node(tw))
378 twdr->tw_count--;
379 else
380 atomic_inc(&tw->tw_refcnt);
381
382 if (slot >= INET_TWDR_RECYCLE_SLOTS) {
383 /* Schedule to slow timer */
384 if (timeo >= timewait_len) {
385 slot = INET_TWDR_TWKILL_SLOTS - 1;
386 } else {
387 slot = DIV_ROUND_UP(timeo, twdr->period);
388 if (slot >= INET_TWDR_TWKILL_SLOTS)
389 slot = INET_TWDR_TWKILL_SLOTS - 1;
390 }
391 tw->tw_ttd = jiffies + timeo;
392 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
393 list = &twdr->cells[slot];
394 } else {
395 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
396
397 if (twdr->twcal_hand < 0) {
398 twdr->twcal_hand = 0;
399 twdr->twcal_jiffie = jiffies;
400 twdr->twcal_timer.expires = twdr->twcal_jiffie +
401 (slot << INET_TWDR_RECYCLE_TICK);
402 add_timer(&twdr->twcal_timer);
403 } else {
404 if (time_after(twdr->twcal_timer.expires,
405 jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
406 mod_timer(&twdr->twcal_timer,
407 jiffies + (slot << INET_TWDR_RECYCLE_TICK));
408 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
409 }
410 list = &twdr->twcal_row[slot];
411 }
412
413 hlist_add_head(&tw->tw_death_node, list);
414
415 if (twdr->tw_count++ == 0)
416 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
417 spin_unlock(&twdr->death_lock);
418}
419EXPORT_SYMBOL_GPL(inet_twsk_schedule);
420
421void inet_twdr_twcal_tick(unsigned long data)
422{
423 struct inet_timewait_death_row *twdr;
424 int n, slot;
425 unsigned long j;
426 unsigned long now = jiffies;
427 int killed = 0;
428 int adv = 0;
429
430 twdr = (struct inet_timewait_death_row *)data;
431
432 spin_lock(&twdr->death_lock);
433 if (twdr->twcal_hand < 0)
434 goto out;
435
436 slot = twdr->twcal_hand;
437 j = twdr->twcal_jiffie;
438
439 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
440 if (time_before_eq(j, now)) {
441 struct hlist_node *node, *safe;
442 struct inet_timewait_sock *tw;
443
444 inet_twsk_for_each_inmate_safe(tw, node, safe,
445 &twdr->twcal_row[slot]) {
446 __inet_twsk_del_dead_node(tw);
447 __inet_twsk_kill(tw, twdr->hashinfo);
448#ifdef CONFIG_NET_NS
449 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
450#endif
451 inet_twsk_put(tw);
452 killed++;
453 }
454 } else {
455 if (!adv) {
456 adv = 1;
457 twdr->twcal_jiffie = j;
458 twdr->twcal_hand = slot;
459 }
460
461 if (!hlist_empty(&twdr->twcal_row[slot])) {
462 mod_timer(&twdr->twcal_timer, j);
463 goto out;
464 }
465 }
466 j += 1 << INET_TWDR_RECYCLE_TICK;
467 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
468 }
469 twdr->twcal_hand = -1;
470
471out:
472 if ((twdr->tw_count -= killed) == 0)
473 del_timer(&twdr->tw_timer);
474#ifndef CONFIG_NET_NS
475 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
476#endif
477 spin_unlock(&twdr->death_lock);
478}
479EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
480
481void inet_twsk_purge(struct inet_hashinfo *hashinfo,
482 struct inet_timewait_death_row *twdr, int family)
483{
484 struct inet_timewait_sock *tw;
485 struct sock *sk;
486 struct hlist_nulls_node *node;
487 unsigned int slot;
488
489 for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
490 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
491restart_rcu:
492 rcu_read_lock();
493restart:
494 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
495 tw = inet_twsk(sk);
496 if ((tw->tw_family != family) ||
497 atomic_read(&twsk_net(tw)->count))
498 continue;
499
500 if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
501 continue;
502
503 if (unlikely((tw->tw_family != family) ||
504 atomic_read(&twsk_net(tw)->count))) {
505 inet_twsk_put(tw);
506 goto restart;
507 }
508
509 rcu_read_unlock();
510 local_bh_disable();
511 inet_twsk_deschedule(tw, twdr);
512 local_bh_enable();
513 inet_twsk_put(tw);
514 goto restart_rcu;
515 }
516 /* If the nulls value we got at the end of this lookup is
517 * not the expected one, we must restart lookup.
518 * We probably met an item that was moved to another chain.
519 */
520 if (get_nulls_value(node) != slot)
521 goto restart;
522 rcu_read_unlock();
523 }
524}
525EXPORT_SYMBOL_GPL(inet_twsk_purge);