Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic INET transport hashtables
7 *
8 * Authors: Lotsa people, from code originally in tcp
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/random.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/wait.h>
21
22#include <net/inet_connection_sock.h>
23#include <net/inet_hashtables.h>
24#include <net/secure_seq.h>
25#include <net/ip.h>
26
27/*
28 * Allocate and initialize a new local port bind bucket.
29 * The bindhash mutex for snum's hash chain must be held here.
30 */
31struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
32 struct net *net,
33 struct inet_bind_hashbucket *head,
34 const unsigned short snum)
35{
36 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
37
38 if (tb != NULL) {
39 write_pnet(&tb->ib_net, hold_net(net));
40 tb->port = snum;
41 tb->fastreuse = 0;
42 tb->num_owners = 0;
43 INIT_HLIST_HEAD(&tb->owners);
44 hlist_add_head(&tb->node, &head->chain);
45 }
46 return tb;
47}
48
49/*
50 * Caller must hold hashbucket lock for this tb with local BH disabled
51 */
52void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
53{
54 if (hlist_empty(&tb->owners)) {
55 __hlist_del(&tb->node);
56 release_net(ib_net(tb));
57 kmem_cache_free(cachep, tb);
58 }
59}
60
61void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
62 const unsigned short snum)
63{
64 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
65
66 atomic_inc(&hashinfo->bsockets);
67
68 inet_sk(sk)->inet_num = snum;
69 sk_add_bind_node(sk, &tb->owners);
70 tb->num_owners++;
71 inet_csk(sk)->icsk_bind_hash = tb;
72}
73
74/*
75 * Get rid of any references to a local port held by the given sock.
76 */
77static void __inet_put_port(struct sock *sk)
78{
79 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
80 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
81 hashinfo->bhash_size);
82 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
83 struct inet_bind_bucket *tb;
84
85 atomic_dec(&hashinfo->bsockets);
86
87 spin_lock(&head->lock);
88 tb = inet_csk(sk)->icsk_bind_hash;
89 __sk_del_bind_node(sk);
90 tb->num_owners--;
91 inet_csk(sk)->icsk_bind_hash = NULL;
92 inet_sk(sk)->inet_num = 0;
93 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
94 spin_unlock(&head->lock);
95}
96
97void inet_put_port(struct sock *sk)
98{
99 local_bh_disable();
100 __inet_put_port(sk);
101 local_bh_enable();
102}
103EXPORT_SYMBOL(inet_put_port);
104
105int __inet_inherit_port(struct sock *sk, struct sock *child)
106{
107 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
108 unsigned short port = inet_sk(child)->inet_num;
109 const int bhash = inet_bhashfn(sock_net(sk), port,
110 table->bhash_size);
111 struct inet_bind_hashbucket *head = &table->bhash[bhash];
112 struct inet_bind_bucket *tb;
113
114 spin_lock(&head->lock);
115 tb = inet_csk(sk)->icsk_bind_hash;
116 if (tb->port != port) {
117 /* NOTE: using tproxy and redirecting skbs to a proxy
118 * on a different listener port breaks the assumption
119 * that the listener socket's icsk_bind_hash is the same
120 * as that of the child socket. We have to look up or
121 * create a new bind bucket for the child here. */
122 struct hlist_node *node;
123 inet_bind_bucket_for_each(tb, node, &head->chain) {
124 if (net_eq(ib_net(tb), sock_net(sk)) &&
125 tb->port == port)
126 break;
127 }
128 if (!node) {
129 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
130 sock_net(sk), head, port);
131 if (!tb) {
132 spin_unlock(&head->lock);
133 return -ENOMEM;
134 }
135 }
136 }
137 inet_bind_hash(child, tb, port);
138 spin_unlock(&head->lock);
139
140 return 0;
141}
142EXPORT_SYMBOL_GPL(__inet_inherit_port);
143
144static inline int compute_score(struct sock *sk, struct net *net,
145 const unsigned short hnum, const __be32 daddr,
146 const int dif)
147{
148 int score = -1;
149 struct inet_sock *inet = inet_sk(sk);
150
151 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
152 !ipv6_only_sock(sk)) {
153 __be32 rcv_saddr = inet->inet_rcv_saddr;
154 score = sk->sk_family == PF_INET ? 1 : 0;
155 if (rcv_saddr) {
156 if (rcv_saddr != daddr)
157 return -1;
158 score += 2;
159 }
160 if (sk->sk_bound_dev_if) {
161 if (sk->sk_bound_dev_if != dif)
162 return -1;
163 score += 2;
164 }
165 }
166 return score;
167}
168
169/*
170 * Don't inline this cruft. Here are some nice properties to exploit here. The
171 * BSD API does not allow a listening sock to specify the remote port nor the
172 * remote address for the connection. So always assume those are both
173 * wildcarded during the search since they can never be otherwise.
174 */
175
176
177struct sock *__inet_lookup_listener(struct net *net,
178 struct inet_hashinfo *hashinfo,
179 const __be32 daddr, const unsigned short hnum,
180 const int dif)
181{
182 struct sock *sk, *result;
183 struct hlist_nulls_node *node;
184 unsigned int hash = inet_lhashfn(net, hnum);
185 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
186 int score, hiscore;
187
188 rcu_read_lock();
189begin:
190 result = NULL;
191 hiscore = -1;
192 sk_nulls_for_each_rcu(sk, node, &ilb->head) {
193 score = compute_score(sk, net, hnum, daddr, dif);
194 if (score > hiscore) {
195 result = sk;
196 hiscore = score;
197 }
198 }
199 /*
200 * if the nulls value we got at the end of this lookup is
201 * not the expected one, we must restart lookup.
202 * We probably met an item that was moved to another chain.
203 */
204 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
205 goto begin;
206 if (result) {
207 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
208 result = NULL;
209 else if (unlikely(compute_score(result, net, hnum, daddr,
210 dif) < hiscore)) {
211 sock_put(result);
212 goto begin;
213 }
214 }
215 rcu_read_unlock();
216 return result;
217}
218EXPORT_SYMBOL_GPL(__inet_lookup_listener);
219
220struct sock * __inet_lookup_established(struct net *net,
221 struct inet_hashinfo *hashinfo,
222 const __be32 saddr, const __be16 sport,
223 const __be32 daddr, const u16 hnum,
224 const int dif)
225{
226 INET_ADDR_COOKIE(acookie, saddr, daddr)
227 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
228 struct sock *sk;
229 const struct hlist_nulls_node *node;
230 /* Optimize here for direct hit, only listening connections can
231 * have wildcards anyways.
232 */
233 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
234 unsigned int slot = hash & hashinfo->ehash_mask;
235 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
236
237 rcu_read_lock();
238begin:
239 sk_nulls_for_each_rcu(sk, node, &head->chain) {
240 if (INET_MATCH(sk, net, hash, acookie,
241 saddr, daddr, ports, dif)) {
242 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
243 goto begintw;
244 if (unlikely(!INET_MATCH(sk, net, hash, acookie,
245 saddr, daddr, ports, dif))) {
246 sock_put(sk);
247 goto begin;
248 }
249 goto out;
250 }
251 }
252 /*
253 * if the nulls value we got at the end of this lookup is
254 * not the expected one, we must restart lookup.
255 * We probably met an item that was moved to another chain.
256 */
257 if (get_nulls_value(node) != slot)
258 goto begin;
259
260begintw:
261 /* Must check for a TIME_WAIT'er before going to listener hash. */
262 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
263 if (INET_TW_MATCH(sk, net, hash, acookie,
264 saddr, daddr, ports, dif)) {
265 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
266 sk = NULL;
267 goto out;
268 }
269 if (unlikely(!INET_TW_MATCH(sk, net, hash, acookie,
270 saddr, daddr, ports, dif))) {
271 sock_put(sk);
272 goto begintw;
273 }
274 goto out;
275 }
276 }
277 /*
278 * if the nulls value we got at the end of this lookup is
279 * not the expected one, we must restart lookup.
280 * We probably met an item that was moved to another chain.
281 */
282 if (get_nulls_value(node) != slot)
283 goto begintw;
284 sk = NULL;
285out:
286 rcu_read_unlock();
287 return sk;
288}
289EXPORT_SYMBOL_GPL(__inet_lookup_established);
290
291/* called with local bh disabled */
292static int __inet_check_established(struct inet_timewait_death_row *death_row,
293 struct sock *sk, __u16 lport,
294 struct inet_timewait_sock **twp)
295{
296 struct inet_hashinfo *hinfo = death_row->hashinfo;
297 struct inet_sock *inet = inet_sk(sk);
298 __be32 daddr = inet->inet_rcv_saddr;
299 __be32 saddr = inet->inet_daddr;
300 int dif = sk->sk_bound_dev_if;
301 INET_ADDR_COOKIE(acookie, saddr, daddr)
302 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
303 struct net *net = sock_net(sk);
304 unsigned int hash = inet_ehashfn(net, daddr, lport,
305 saddr, inet->inet_dport);
306 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
307 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
308 struct sock *sk2;
309 const struct hlist_nulls_node *node;
310 struct inet_timewait_sock *tw;
311 int twrefcnt = 0;
312
313 spin_lock(lock);
314
315 /* Check TIME-WAIT sockets first. */
316 sk_nulls_for_each(sk2, node, &head->twchain) {
317 tw = inet_twsk(sk2);
318
319 if (INET_TW_MATCH(sk2, net, hash, acookie,
320 saddr, daddr, ports, dif)) {
321 if (twsk_unique(sk, sk2, twp))
322 goto unique;
323 else
324 goto not_unique;
325 }
326 }
327 tw = NULL;
328
329 /* And established part... */
330 sk_nulls_for_each(sk2, node, &head->chain) {
331 if (INET_MATCH(sk2, net, hash, acookie,
332 saddr, daddr, ports, dif))
333 goto not_unique;
334 }
335
336unique:
337 /* Must record num and sport now. Otherwise we will see
338 * in hash table socket with a funny identity. */
339 inet->inet_num = lport;
340 inet->inet_sport = htons(lport);
341 sk->sk_hash = hash;
342 WARN_ON(!sk_unhashed(sk));
343 __sk_nulls_add_node_rcu(sk, &head->chain);
344 if (tw) {
345 twrefcnt = inet_twsk_unhash(tw);
346 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
347 }
348 spin_unlock(lock);
349 if (twrefcnt)
350 inet_twsk_put(tw);
351 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
352
353 if (twp) {
354 *twp = tw;
355 } else if (tw) {
356 /* Silly. Should hash-dance instead... */
357 inet_twsk_deschedule(tw, death_row);
358
359 inet_twsk_put(tw);
360 }
361 return 0;
362
363not_unique:
364 spin_unlock(lock);
365 return -EADDRNOTAVAIL;
366}
367
368static inline u32 inet_sk_port_offset(const struct sock *sk)
369{
370 const struct inet_sock *inet = inet_sk(sk);
371 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
372 inet->inet_daddr,
373 inet->inet_dport);
374}
375
376int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
377{
378 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
379 struct hlist_nulls_head *list;
380 spinlock_t *lock;
381 struct inet_ehash_bucket *head;
382 int twrefcnt = 0;
383
384 WARN_ON(!sk_unhashed(sk));
385
386 sk->sk_hash = inet_sk_ehashfn(sk);
387 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
388 list = &head->chain;
389 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
390
391 spin_lock(lock);
392 __sk_nulls_add_node_rcu(sk, list);
393 if (tw) {
394 WARN_ON(sk->sk_hash != tw->tw_hash);
395 twrefcnt = inet_twsk_unhash(tw);
396 }
397 spin_unlock(lock);
398 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
399 return twrefcnt;
400}
401EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
402
403static void __inet_hash(struct sock *sk)
404{
405 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
406 struct inet_listen_hashbucket *ilb;
407
408 if (sk->sk_state != TCP_LISTEN) {
409 __inet_hash_nolisten(sk, NULL);
410 return;
411 }
412
413 WARN_ON(!sk_unhashed(sk));
414 ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
415
416 spin_lock(&ilb->lock);
417 __sk_nulls_add_node_rcu(sk, &ilb->head);
418 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
419 spin_unlock(&ilb->lock);
420}
421
422void inet_hash(struct sock *sk)
423{
424 if (sk->sk_state != TCP_CLOSE) {
425 local_bh_disable();
426 __inet_hash(sk);
427 local_bh_enable();
428 }
429}
430EXPORT_SYMBOL_GPL(inet_hash);
431
432void inet_unhash(struct sock *sk)
433{
434 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
435 spinlock_t *lock;
436 int done;
437
438 if (sk_unhashed(sk))
439 return;
440
441 if (sk->sk_state == TCP_LISTEN)
442 lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
443 else
444 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
445
446 spin_lock_bh(lock);
447 done =__sk_nulls_del_node_init_rcu(sk);
448 if (done)
449 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
450 spin_unlock_bh(lock);
451}
452EXPORT_SYMBOL_GPL(inet_unhash);
453
454int __inet_hash_connect(struct inet_timewait_death_row *death_row,
455 struct sock *sk, u32 port_offset,
456 int (*check_established)(struct inet_timewait_death_row *,
457 struct sock *, __u16, struct inet_timewait_sock **),
458 int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
459{
460 struct inet_hashinfo *hinfo = death_row->hashinfo;
461 const unsigned short snum = inet_sk(sk)->inet_num;
462 struct inet_bind_hashbucket *head;
463 struct inet_bind_bucket *tb;
464 int ret;
465 struct net *net = sock_net(sk);
466 int twrefcnt = 1;
467
468 if (!snum) {
469 int i, remaining, low, high, port;
470 static u32 hint;
471 u32 offset = hint + port_offset;
472 struct hlist_node *node;
473 struct inet_timewait_sock *tw = NULL;
474
475 inet_get_local_port_range(&low, &high);
476 remaining = (high - low) + 1;
477
478 local_bh_disable();
479 for (i = 1; i <= remaining; i++) {
480 port = low + (i + offset) % remaining;
481 if (inet_is_reserved_local_port(port))
482 continue;
483 head = &hinfo->bhash[inet_bhashfn(net, port,
484 hinfo->bhash_size)];
485 spin_lock(&head->lock);
486
487 /* Does not bother with rcv_saddr checks,
488 * because the established check is already
489 * unique enough.
490 */
491 inet_bind_bucket_for_each(tb, node, &head->chain) {
492 if (net_eq(ib_net(tb), net) &&
493 tb->port == port) {
494 if (tb->fastreuse >= 0)
495 goto next_port;
496 WARN_ON(hlist_empty(&tb->owners));
497 if (!check_established(death_row, sk,
498 port, &tw))
499 goto ok;
500 goto next_port;
501 }
502 }
503
504 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
505 net, head, port);
506 if (!tb) {
507 spin_unlock(&head->lock);
508 break;
509 }
510 tb->fastreuse = -1;
511 goto ok;
512
513 next_port:
514 spin_unlock(&head->lock);
515 }
516 local_bh_enable();
517
518 return -EADDRNOTAVAIL;
519
520ok:
521 hint += i;
522
523 /* Head lock still held and bh's disabled */
524 inet_bind_hash(sk, tb, port);
525 if (sk_unhashed(sk)) {
526 inet_sk(sk)->inet_sport = htons(port);
527 twrefcnt += hash(sk, tw);
528 }
529 if (tw)
530 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
531 spin_unlock(&head->lock);
532
533 if (tw) {
534 inet_twsk_deschedule(tw, death_row);
535 while (twrefcnt) {
536 twrefcnt--;
537 inet_twsk_put(tw);
538 }
539 }
540
541 ret = 0;
542 goto out;
543 }
544
545 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
546 tb = inet_csk(sk)->icsk_bind_hash;
547 spin_lock_bh(&head->lock);
548 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
549 hash(sk, NULL);
550 spin_unlock_bh(&head->lock);
551 return 0;
552 } else {
553 spin_unlock(&head->lock);
554 /* No definite answer... Walk to established hash table */
555 ret = check_established(death_row, sk, snum, NULL);
556out:
557 local_bh_enable();
558 return ret;
559 }
560}
561
562/*
563 * Bind a port for a connect operation and hash it.
564 */
565int inet_hash_connect(struct inet_timewait_death_row *death_row,
566 struct sock *sk)
567{
568 return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
569 __inet_check_established, __inet_hash_nolisten);
570}
571EXPORT_SYMBOL_GPL(inet_hash_connect);
572
573void inet_hashinfo_init(struct inet_hashinfo *h)
574{
575 int i;
576
577 atomic_set(&h->bsockets, 0);
578 for (i = 0; i < INET_LHTABLE_SIZE; i++) {
579 spin_lock_init(&h->listening_hash[i].lock);
580 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
581 i + LISTENING_NULLS_BASE);
582 }
583}
584EXPORT_SYMBOL_GPL(inet_hashinfo_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Generic INET transport hashtables
8 *
9 * Authors: Lotsa people, from code originally in tcp
10 */
11
12#include <linux/module.h>
13#include <linux/random.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/wait.h>
17#include <linux/vmalloc.h>
18#include <linux/memblock.h>
19
20#include <net/addrconf.h>
21#include <net/inet_connection_sock.h>
22#include <net/inet_hashtables.h>
23#if IS_ENABLED(CONFIG_IPV6)
24#include <net/inet6_hashtables.h>
25#endif
26#include <net/secure_seq.h>
27#include <net/ip.h>
28#include <net/tcp.h>
29#include <net/sock_reuseport.h>
30
31static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
32 const __u16 lport, const __be32 faddr,
33 const __be16 fport)
34{
35 static u32 inet_ehash_secret __read_mostly;
36
37 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
38
39 return __inet_ehashfn(laddr, lport, faddr, fport,
40 inet_ehash_secret + net_hash_mix(net));
41}
42
43/* This function handles inet_sock, but also timewait and request sockets
44 * for IPv4/IPv6.
45 */
46static u32 sk_ehashfn(const struct sock *sk)
47{
48#if IS_ENABLED(CONFIG_IPV6)
49 if (sk->sk_family == AF_INET6 &&
50 !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
51 return inet6_ehashfn(sock_net(sk),
52 &sk->sk_v6_rcv_saddr, sk->sk_num,
53 &sk->sk_v6_daddr, sk->sk_dport);
54#endif
55 return inet_ehashfn(sock_net(sk),
56 sk->sk_rcv_saddr, sk->sk_num,
57 sk->sk_daddr, sk->sk_dport);
58}
59
60/*
61 * Allocate and initialize a new local port bind bucket.
62 * The bindhash mutex for snum's hash chain must be held here.
63 */
64struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
65 struct net *net,
66 struct inet_bind_hashbucket *head,
67 const unsigned short snum,
68 int l3mdev)
69{
70 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
71
72 if (tb) {
73 write_pnet(&tb->ib_net, net);
74 tb->l3mdev = l3mdev;
75 tb->port = snum;
76 tb->fastreuse = 0;
77 tb->fastreuseport = 0;
78 INIT_HLIST_HEAD(&tb->owners);
79 hlist_add_head(&tb->node, &head->chain);
80 }
81 return tb;
82}
83
84/*
85 * Caller must hold hashbucket lock for this tb with local BH disabled
86 */
87void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
88{
89 if (hlist_empty(&tb->owners)) {
90 __hlist_del(&tb->node);
91 kmem_cache_free(cachep, tb);
92 }
93}
94
95bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net,
96 unsigned short port, int l3mdev)
97{
98 return net_eq(ib_net(tb), net) && tb->port == port &&
99 tb->l3mdev == l3mdev;
100}
101
102static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb,
103 struct net *net,
104 struct inet_bind_hashbucket *head,
105 unsigned short port, int l3mdev,
106 const struct sock *sk)
107{
108 write_pnet(&tb->ib_net, net);
109 tb->l3mdev = l3mdev;
110 tb->port = port;
111#if IS_ENABLED(CONFIG_IPV6)
112 tb->family = sk->sk_family;
113 if (sk->sk_family == AF_INET6)
114 tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
115 else
116#endif
117 tb->rcv_saddr = sk->sk_rcv_saddr;
118 INIT_HLIST_HEAD(&tb->owners);
119 INIT_HLIST_HEAD(&tb->deathrow);
120 hlist_add_head(&tb->node, &head->chain);
121}
122
123struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
124 struct net *net,
125 struct inet_bind_hashbucket *head,
126 unsigned short port,
127 int l3mdev,
128 const struct sock *sk)
129{
130 struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
131
132 if (tb)
133 inet_bind2_bucket_init(tb, net, head, port, l3mdev, sk);
134
135 return tb;
136}
137
138/* Caller must hold hashbucket lock for this tb with local BH disabled */
139void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
140{
141 if (hlist_empty(&tb->owners) && hlist_empty(&tb->deathrow)) {
142 __hlist_del(&tb->node);
143 kmem_cache_free(cachep, tb);
144 }
145}
146
147static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
148 const struct sock *sk)
149{
150#if IS_ENABLED(CONFIG_IPV6)
151 if (sk->sk_family != tb2->family)
152 return false;
153
154 if (sk->sk_family == AF_INET6)
155 return ipv6_addr_equal(&tb2->v6_rcv_saddr,
156 &sk->sk_v6_rcv_saddr);
157#endif
158 return tb2->rcv_saddr == sk->sk_rcv_saddr;
159}
160
161void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
162 struct inet_bind2_bucket *tb2, unsigned short port)
163{
164 inet_sk(sk)->inet_num = port;
165 sk_add_bind_node(sk, &tb->owners);
166 inet_csk(sk)->icsk_bind_hash = tb;
167 sk_add_bind2_node(sk, &tb2->owners);
168 inet_csk(sk)->icsk_bind2_hash = tb2;
169}
170
171/*
172 * Get rid of any references to a local port held by the given sock.
173 */
174static void __inet_put_port(struct sock *sk)
175{
176 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
177 struct inet_bind_hashbucket *head, *head2;
178 struct net *net = sock_net(sk);
179 struct inet_bind_bucket *tb;
180 int bhash;
181
182 bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size);
183 head = &hashinfo->bhash[bhash];
184 head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num);
185
186 spin_lock(&head->lock);
187 tb = inet_csk(sk)->icsk_bind_hash;
188 __sk_del_bind_node(sk);
189 inet_csk(sk)->icsk_bind_hash = NULL;
190 inet_sk(sk)->inet_num = 0;
191 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
192
193 spin_lock(&head2->lock);
194 if (inet_csk(sk)->icsk_bind2_hash) {
195 struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash;
196
197 __sk_del_bind2_node(sk);
198 inet_csk(sk)->icsk_bind2_hash = NULL;
199 inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
200 }
201 spin_unlock(&head2->lock);
202
203 spin_unlock(&head->lock);
204}
205
206void inet_put_port(struct sock *sk)
207{
208 local_bh_disable();
209 __inet_put_port(sk);
210 local_bh_enable();
211}
212EXPORT_SYMBOL(inet_put_port);
213
214int __inet_inherit_port(const struct sock *sk, struct sock *child)
215{
216 struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk);
217 unsigned short port = inet_sk(child)->inet_num;
218 struct inet_bind_hashbucket *head, *head2;
219 bool created_inet_bind_bucket = false;
220 struct net *net = sock_net(sk);
221 bool update_fastreuse = false;
222 struct inet_bind2_bucket *tb2;
223 struct inet_bind_bucket *tb;
224 int bhash, l3mdev;
225
226 bhash = inet_bhashfn(net, port, table->bhash_size);
227 head = &table->bhash[bhash];
228 head2 = inet_bhashfn_portaddr(table, child, net, port);
229
230 spin_lock(&head->lock);
231 spin_lock(&head2->lock);
232 tb = inet_csk(sk)->icsk_bind_hash;
233 tb2 = inet_csk(sk)->icsk_bind2_hash;
234 if (unlikely(!tb || !tb2)) {
235 spin_unlock(&head2->lock);
236 spin_unlock(&head->lock);
237 return -ENOENT;
238 }
239 if (tb->port != port) {
240 l3mdev = inet_sk_bound_l3mdev(sk);
241
242 /* NOTE: using tproxy and redirecting skbs to a proxy
243 * on a different listener port breaks the assumption
244 * that the listener socket's icsk_bind_hash is the same
245 * as that of the child socket. We have to look up or
246 * create a new bind bucket for the child here. */
247 inet_bind_bucket_for_each(tb, &head->chain) {
248 if (inet_bind_bucket_match(tb, net, port, l3mdev))
249 break;
250 }
251 if (!tb) {
252 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
253 net, head, port, l3mdev);
254 if (!tb) {
255 spin_unlock(&head2->lock);
256 spin_unlock(&head->lock);
257 return -ENOMEM;
258 }
259 created_inet_bind_bucket = true;
260 }
261 update_fastreuse = true;
262
263 goto bhash2_find;
264 } else if (!inet_bind2_bucket_addr_match(tb2, child)) {
265 l3mdev = inet_sk_bound_l3mdev(sk);
266
267bhash2_find:
268 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child);
269 if (!tb2) {
270 tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
271 net, head2, port,
272 l3mdev, child);
273 if (!tb2)
274 goto error;
275 }
276 }
277 if (update_fastreuse)
278 inet_csk_update_fastreuse(tb, child);
279 inet_bind_hash(child, tb, tb2, port);
280 spin_unlock(&head2->lock);
281 spin_unlock(&head->lock);
282
283 return 0;
284
285error:
286 if (created_inet_bind_bucket)
287 inet_bind_bucket_destroy(table->bind_bucket_cachep, tb);
288 spin_unlock(&head2->lock);
289 spin_unlock(&head->lock);
290 return -ENOMEM;
291}
292EXPORT_SYMBOL_GPL(__inet_inherit_port);
293
294static struct inet_listen_hashbucket *
295inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
296{
297 u32 hash;
298
299#if IS_ENABLED(CONFIG_IPV6)
300 if (sk->sk_family == AF_INET6)
301 hash = ipv6_portaddr_hash(sock_net(sk),
302 &sk->sk_v6_rcv_saddr,
303 inet_sk(sk)->inet_num);
304 else
305#endif
306 hash = ipv4_portaddr_hash(sock_net(sk),
307 inet_sk(sk)->inet_rcv_saddr,
308 inet_sk(sk)->inet_num);
309 return inet_lhash2_bucket(h, hash);
310}
311
312static inline int compute_score(struct sock *sk, struct net *net,
313 const unsigned short hnum, const __be32 daddr,
314 const int dif, const int sdif)
315{
316 int score = -1;
317
318 if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
319 !ipv6_only_sock(sk)) {
320 if (sk->sk_rcv_saddr != daddr)
321 return -1;
322
323 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
324 return -1;
325 score = sk->sk_bound_dev_if ? 2 : 1;
326
327 if (sk->sk_family == PF_INET)
328 score++;
329 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
330 score++;
331 }
332 return score;
333}
334
335static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
336 struct sk_buff *skb, int doff,
337 __be32 saddr, __be16 sport,
338 __be32 daddr, unsigned short hnum)
339{
340 struct sock *reuse_sk = NULL;
341 u32 phash;
342
343 if (sk->sk_reuseport) {
344 phash = inet_ehashfn(net, daddr, hnum, saddr, sport);
345 reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
346 }
347 return reuse_sk;
348}
349
350/*
351 * Here are some nice properties to exploit here. The BSD API
352 * does not allow a listening sock to specify the remote port nor the
353 * remote address for the connection. So always assume those are both
354 * wildcarded during the search since they can never be otherwise.
355 */
356
357/* called with rcu_read_lock() : No refcount taken on the socket */
358static struct sock *inet_lhash2_lookup(struct net *net,
359 struct inet_listen_hashbucket *ilb2,
360 struct sk_buff *skb, int doff,
361 const __be32 saddr, __be16 sport,
362 const __be32 daddr, const unsigned short hnum,
363 const int dif, const int sdif)
364{
365 struct sock *sk, *result = NULL;
366 struct hlist_nulls_node *node;
367 int score, hiscore = 0;
368
369 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
370 score = compute_score(sk, net, hnum, daddr, dif, sdif);
371 if (score > hiscore) {
372 result = lookup_reuseport(net, sk, skb, doff,
373 saddr, sport, daddr, hnum);
374 if (result)
375 return result;
376
377 result = sk;
378 hiscore = score;
379 }
380 }
381
382 return result;
383}
384
385static inline struct sock *inet_lookup_run_bpf(struct net *net,
386 struct inet_hashinfo *hashinfo,
387 struct sk_buff *skb, int doff,
388 __be32 saddr, __be16 sport,
389 __be32 daddr, u16 hnum, const int dif)
390{
391 struct sock *sk, *reuse_sk;
392 bool no_reuseport;
393
394 if (hashinfo != net->ipv4.tcp_death_row.hashinfo)
395 return NULL; /* only TCP is supported */
396
397 no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport,
398 daddr, hnum, dif, &sk);
399 if (no_reuseport || IS_ERR_OR_NULL(sk))
400 return sk;
401
402 reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
403 if (reuse_sk)
404 sk = reuse_sk;
405 return sk;
406}
407
408struct sock *__inet_lookup_listener(struct net *net,
409 struct inet_hashinfo *hashinfo,
410 struct sk_buff *skb, int doff,
411 const __be32 saddr, __be16 sport,
412 const __be32 daddr, const unsigned short hnum,
413 const int dif, const int sdif)
414{
415 struct inet_listen_hashbucket *ilb2;
416 struct sock *result = NULL;
417 unsigned int hash2;
418
419 /* Lookup redirect from BPF */
420 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
421 result = inet_lookup_run_bpf(net, hashinfo, skb, doff,
422 saddr, sport, daddr, hnum, dif);
423 if (result)
424 goto done;
425 }
426
427 hash2 = ipv4_portaddr_hash(net, daddr, hnum);
428 ilb2 = inet_lhash2_bucket(hashinfo, hash2);
429
430 result = inet_lhash2_lookup(net, ilb2, skb, doff,
431 saddr, sport, daddr, hnum,
432 dif, sdif);
433 if (result)
434 goto done;
435
436 /* Lookup lhash2 with INADDR_ANY */
437 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
438 ilb2 = inet_lhash2_bucket(hashinfo, hash2);
439
440 result = inet_lhash2_lookup(net, ilb2, skb, doff,
441 saddr, sport, htonl(INADDR_ANY), hnum,
442 dif, sdif);
443done:
444 if (IS_ERR(result))
445 return NULL;
446 return result;
447}
448EXPORT_SYMBOL_GPL(__inet_lookup_listener);
449
450/* All sockets share common refcount, but have different destructors */
451void sock_gen_put(struct sock *sk)
452{
453 if (!refcount_dec_and_test(&sk->sk_refcnt))
454 return;
455
456 if (sk->sk_state == TCP_TIME_WAIT)
457 inet_twsk_free(inet_twsk(sk));
458 else if (sk->sk_state == TCP_NEW_SYN_RECV)
459 reqsk_free(inet_reqsk(sk));
460 else
461 sk_free(sk);
462}
463EXPORT_SYMBOL_GPL(sock_gen_put);
464
465void sock_edemux(struct sk_buff *skb)
466{
467 sock_gen_put(skb->sk);
468}
469EXPORT_SYMBOL(sock_edemux);
470
471struct sock *__inet_lookup_established(struct net *net,
472 struct inet_hashinfo *hashinfo,
473 const __be32 saddr, const __be16 sport,
474 const __be32 daddr, const u16 hnum,
475 const int dif, const int sdif)
476{
477 INET_ADDR_COOKIE(acookie, saddr, daddr);
478 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
479 struct sock *sk;
480 const struct hlist_nulls_node *node;
481 /* Optimize here for direct hit, only listening connections can
482 * have wildcards anyways.
483 */
484 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
485 unsigned int slot = hash & hashinfo->ehash_mask;
486 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
487
488begin:
489 sk_nulls_for_each_rcu(sk, node, &head->chain) {
490 if (sk->sk_hash != hash)
491 continue;
492 if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) {
493 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
494 goto out;
495 if (unlikely(!inet_match(net, sk, acookie,
496 ports, dif, sdif))) {
497 sock_gen_put(sk);
498 goto begin;
499 }
500 goto found;
501 }
502 }
503 /*
504 * if the nulls value we got at the end of this lookup is
505 * not the expected one, we must restart lookup.
506 * We probably met an item that was moved to another chain.
507 */
508 if (get_nulls_value(node) != slot)
509 goto begin;
510out:
511 sk = NULL;
512found:
513 return sk;
514}
515EXPORT_SYMBOL_GPL(__inet_lookup_established);
516
517/* called with local bh disabled */
518static int __inet_check_established(struct inet_timewait_death_row *death_row,
519 struct sock *sk, __u16 lport,
520 struct inet_timewait_sock **twp)
521{
522 struct inet_hashinfo *hinfo = death_row->hashinfo;
523 struct inet_sock *inet = inet_sk(sk);
524 __be32 daddr = inet->inet_rcv_saddr;
525 __be32 saddr = inet->inet_daddr;
526 int dif = sk->sk_bound_dev_if;
527 struct net *net = sock_net(sk);
528 int sdif = l3mdev_master_ifindex_by_index(net, dif);
529 INET_ADDR_COOKIE(acookie, saddr, daddr);
530 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
531 unsigned int hash = inet_ehashfn(net, daddr, lport,
532 saddr, inet->inet_dport);
533 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
534 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
535 struct sock *sk2;
536 const struct hlist_nulls_node *node;
537 struct inet_timewait_sock *tw = NULL;
538
539 spin_lock(lock);
540
541 sk_nulls_for_each(sk2, node, &head->chain) {
542 if (sk2->sk_hash != hash)
543 continue;
544
545 if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
546 if (sk2->sk_state == TCP_TIME_WAIT) {
547 tw = inet_twsk(sk2);
548 if (twsk_unique(sk, sk2, twp))
549 break;
550 }
551 goto not_unique;
552 }
553 }
554
555 /* Must record num and sport now. Otherwise we will see
556 * in hash table socket with a funny identity.
557 */
558 inet->inet_num = lport;
559 inet->inet_sport = htons(lport);
560 sk->sk_hash = hash;
561 WARN_ON(!sk_unhashed(sk));
562 __sk_nulls_add_node_rcu(sk, &head->chain);
563 if (tw) {
564 sk_nulls_del_node_init_rcu((struct sock *)tw);
565 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
566 }
567 spin_unlock(lock);
568 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
569
570 if (twp) {
571 *twp = tw;
572 } else if (tw) {
573 /* Silly. Should hash-dance instead... */
574 inet_twsk_deschedule_put(tw);
575 }
576 return 0;
577
578not_unique:
579 spin_unlock(lock);
580 return -EADDRNOTAVAIL;
581}
582
583static u64 inet_sk_port_offset(const struct sock *sk)
584{
585 const struct inet_sock *inet = inet_sk(sk);
586
587 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
588 inet->inet_daddr,
589 inet->inet_dport);
590}
591
592/* Searches for an exsiting socket in the ehash bucket list.
593 * Returns true if found, false otherwise.
594 */
595static bool inet_ehash_lookup_by_sk(struct sock *sk,
596 struct hlist_nulls_head *list)
597{
598 const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
599 const int sdif = sk->sk_bound_dev_if;
600 const int dif = sk->sk_bound_dev_if;
601 const struct hlist_nulls_node *node;
602 struct net *net = sock_net(sk);
603 struct sock *esk;
604
605 INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
606
607 sk_nulls_for_each_rcu(esk, node, list) {
608 if (esk->sk_hash != sk->sk_hash)
609 continue;
610 if (sk->sk_family == AF_INET) {
611 if (unlikely(inet_match(net, esk, acookie,
612 ports, dif, sdif))) {
613 return true;
614 }
615 }
616#if IS_ENABLED(CONFIG_IPV6)
617 else if (sk->sk_family == AF_INET6) {
618 if (unlikely(inet6_match(net, esk,
619 &sk->sk_v6_daddr,
620 &sk->sk_v6_rcv_saddr,
621 ports, dif, sdif))) {
622 return true;
623 }
624 }
625#endif
626 }
627 return false;
628}
629
630/* Insert a socket into ehash, and eventually remove another one
631 * (The another one can be a SYN_RECV or TIMEWAIT)
632 * If an existing socket already exists, socket sk is not inserted,
633 * and sets found_dup_sk parameter to true.
634 */
635bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
636{
637 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
638 struct inet_ehash_bucket *head;
639 struct hlist_nulls_head *list;
640 spinlock_t *lock;
641 bool ret = true;
642
643 WARN_ON_ONCE(!sk_unhashed(sk));
644
645 sk->sk_hash = sk_ehashfn(sk);
646 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
647 list = &head->chain;
648 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
649
650 spin_lock(lock);
651 if (osk) {
652 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
653 ret = sk_hashed(osk);
654 if (ret) {
655 /* Before deleting the node, we insert a new one to make
656 * sure that the look-up-sk process would not miss either
657 * of them and that at least one node would exist in ehash
658 * table all the time. Otherwise there's a tiny chance
659 * that lookup process could find nothing in ehash table.
660 */
661 __sk_nulls_add_node_tail_rcu(sk, list);
662 sk_nulls_del_node_init_rcu(osk);
663 }
664 goto unlock;
665 }
666 if (found_dup_sk) {
667 *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
668 if (*found_dup_sk)
669 ret = false;
670 }
671
672 if (ret)
673 __sk_nulls_add_node_rcu(sk, list);
674
675unlock:
676 spin_unlock(lock);
677
678 return ret;
679}
680
681bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
682{
683 bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
684
685 if (ok) {
686 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
687 } else {
688 this_cpu_inc(*sk->sk_prot->orphan_count);
689 inet_sk_set_state(sk, TCP_CLOSE);
690 sock_set_flag(sk, SOCK_DEAD);
691 inet_csk_destroy_sock(sk);
692 }
693 return ok;
694}
695EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
696
697static int inet_reuseport_add_sock(struct sock *sk,
698 struct inet_listen_hashbucket *ilb)
699{
700 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
701 const struct hlist_nulls_node *node;
702 struct sock *sk2;
703 kuid_t uid = sock_i_uid(sk);
704
705 sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
706 if (sk2 != sk &&
707 sk2->sk_family == sk->sk_family &&
708 ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
709 sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
710 inet_csk(sk2)->icsk_bind_hash == tb &&
711 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
712 inet_rcv_saddr_equal(sk, sk2, false))
713 return reuseport_add_sock(sk, sk2,
714 inet_rcv_saddr_any(sk));
715 }
716
717 return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
718}
719
720int __inet_hash(struct sock *sk, struct sock *osk)
721{
722 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
723 struct inet_listen_hashbucket *ilb2;
724 int err = 0;
725
726 if (sk->sk_state != TCP_LISTEN) {
727 local_bh_disable();
728 inet_ehash_nolisten(sk, osk, NULL);
729 local_bh_enable();
730 return 0;
731 }
732 WARN_ON(!sk_unhashed(sk));
733 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
734
735 spin_lock(&ilb2->lock);
736 if (sk->sk_reuseport) {
737 err = inet_reuseport_add_sock(sk, ilb2);
738 if (err)
739 goto unlock;
740 }
741 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
742 sk->sk_family == AF_INET6)
743 __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
744 else
745 __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
746 sock_set_flag(sk, SOCK_RCU_FREE);
747 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
748unlock:
749 spin_unlock(&ilb2->lock);
750
751 return err;
752}
753EXPORT_SYMBOL(__inet_hash);
754
755int inet_hash(struct sock *sk)
756{
757 int err = 0;
758
759 if (sk->sk_state != TCP_CLOSE)
760 err = __inet_hash(sk, NULL);
761
762 return err;
763}
764EXPORT_SYMBOL_GPL(inet_hash);
765
766void inet_unhash(struct sock *sk)
767{
768 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
769
770 if (sk_unhashed(sk))
771 return;
772
773 if (sk->sk_state == TCP_LISTEN) {
774 struct inet_listen_hashbucket *ilb2;
775
776 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
777 /* Don't disable bottom halves while acquiring the lock to
778 * avoid circular locking dependency on PREEMPT_RT.
779 */
780 spin_lock(&ilb2->lock);
781 if (sk_unhashed(sk)) {
782 spin_unlock(&ilb2->lock);
783 return;
784 }
785
786 if (rcu_access_pointer(sk->sk_reuseport_cb))
787 reuseport_stop_listen_sock(sk);
788
789 __sk_nulls_del_node_init_rcu(sk);
790 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
791 spin_unlock(&ilb2->lock);
792 } else {
793 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
794
795 spin_lock_bh(lock);
796 if (sk_unhashed(sk)) {
797 spin_unlock_bh(lock);
798 return;
799 }
800 __sk_nulls_del_node_init_rcu(sk);
801 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
802 spin_unlock_bh(lock);
803 }
804}
805EXPORT_SYMBOL_GPL(inet_unhash);
806
807static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
808 const struct net *net, unsigned short port,
809 int l3mdev, const struct sock *sk)
810{
811#if IS_ENABLED(CONFIG_IPV6)
812 if (sk->sk_family != tb->family)
813 return false;
814
815 if (sk->sk_family == AF_INET6)
816 return net_eq(ib2_net(tb), net) && tb->port == port &&
817 tb->l3mdev == l3mdev &&
818 ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
819 else
820#endif
821 return net_eq(ib2_net(tb), net) && tb->port == port &&
822 tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
823}
824
825bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
826 unsigned short port, int l3mdev, const struct sock *sk)
827{
828#if IS_ENABLED(CONFIG_IPV6)
829 struct in6_addr addr_any = {};
830
831 if (sk->sk_family != tb->family)
832 return false;
833
834 if (sk->sk_family == AF_INET6)
835 return net_eq(ib2_net(tb), net) && tb->port == port &&
836 tb->l3mdev == l3mdev &&
837 ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
838 else
839#endif
840 return net_eq(ib2_net(tb), net) && tb->port == port &&
841 tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
842}
843
844/* The socket's bhash2 hashbucket spinlock must be held when this is called */
845struct inet_bind2_bucket *
846inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net,
847 unsigned short port, int l3mdev, const struct sock *sk)
848{
849 struct inet_bind2_bucket *bhash2 = NULL;
850
851 inet_bind_bucket_for_each(bhash2, &head->chain)
852 if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
853 break;
854
855 return bhash2;
856}
857
858struct inet_bind_hashbucket *
859inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port)
860{
861 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
862 u32 hash;
863#if IS_ENABLED(CONFIG_IPV6)
864 struct in6_addr addr_any = {};
865
866 if (sk->sk_family == AF_INET6)
867 hash = ipv6_portaddr_hash(net, &addr_any, port);
868 else
869#endif
870 hash = ipv4_portaddr_hash(net, 0, port);
871
872 return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
873}
874
875static void inet_update_saddr(struct sock *sk, void *saddr, int family)
876{
877 if (family == AF_INET) {
878 inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
879 sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
880 }
881#if IS_ENABLED(CONFIG_IPV6)
882 else {
883 sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
884 }
885#endif
886}
887
888static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
889{
890 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
891 struct inet_bind_hashbucket *head, *head2;
892 struct inet_bind2_bucket *tb2, *new_tb2;
893 int l3mdev = inet_sk_bound_l3mdev(sk);
894 int port = inet_sk(sk)->inet_num;
895 struct net *net = sock_net(sk);
896 int bhash;
897
898 if (!inet_csk(sk)->icsk_bind2_hash) {
899 /* Not bind()ed before. */
900 if (reset)
901 inet_reset_saddr(sk);
902 else
903 inet_update_saddr(sk, saddr, family);
904
905 return 0;
906 }
907
908 /* Allocate a bind2 bucket ahead of time to avoid permanently putting
909 * the bhash2 table in an inconsistent state if a new tb2 bucket
910 * allocation fails.
911 */
912 new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
913 if (!new_tb2) {
914 if (reset) {
915 /* The (INADDR_ANY, port) bucket might have already
916 * been freed, then we cannot fixup icsk_bind2_hash,
917 * so we give up and unlink sk from bhash/bhash2 not
918 * to leave inconsistency in bhash2.
919 */
920 inet_put_port(sk);
921 inet_reset_saddr(sk);
922 }
923
924 return -ENOMEM;
925 }
926
927 bhash = inet_bhashfn(net, port, hinfo->bhash_size);
928 head = &hinfo->bhash[bhash];
929 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
930
931 /* If we change saddr locklessly, another thread
932 * iterating over bhash might see corrupted address.
933 */
934 spin_lock_bh(&head->lock);
935
936 spin_lock(&head2->lock);
937 __sk_del_bind2_node(sk);
938 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
939 spin_unlock(&head2->lock);
940
941 if (reset)
942 inet_reset_saddr(sk);
943 else
944 inet_update_saddr(sk, saddr, family);
945
946 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
947
948 spin_lock(&head2->lock);
949 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
950 if (!tb2) {
951 tb2 = new_tb2;
952 inet_bind2_bucket_init(tb2, net, head2, port, l3mdev, sk);
953 }
954 sk_add_bind2_node(sk, &tb2->owners);
955 inet_csk(sk)->icsk_bind2_hash = tb2;
956 spin_unlock(&head2->lock);
957
958 spin_unlock_bh(&head->lock);
959
960 if (tb2 != new_tb2)
961 kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
962
963 return 0;
964}
965
966int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
967{
968 return __inet_bhash2_update_saddr(sk, saddr, family, false);
969}
970EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
971
972void inet_bhash2_reset_saddr(struct sock *sk)
973{
974 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
975 __inet_bhash2_update_saddr(sk, NULL, 0, true);
976}
977EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
978
979/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
980 * Note that we use 32bit integers (vs RFC 'short integers')
981 * because 2^16 is not a multiple of num_ephemeral and this
982 * property might be used by clever attacker.
983 *
984 * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
985 * attacks were since demonstrated, thus we use 65536 by default instead
986 * to really give more isolation and privacy, at the expense of 256kB
987 * of kernel memory.
988 */
989#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
990static u32 *table_perturb;
991
992int __inet_hash_connect(struct inet_timewait_death_row *death_row,
993 struct sock *sk, u64 port_offset,
994 int (*check_established)(struct inet_timewait_death_row *,
995 struct sock *, __u16, struct inet_timewait_sock **))
996{
997 struct inet_hashinfo *hinfo = death_row->hashinfo;
998 struct inet_bind_hashbucket *head, *head2;
999 struct inet_timewait_sock *tw = NULL;
1000 int port = inet_sk(sk)->inet_num;
1001 struct net *net = sock_net(sk);
1002 struct inet_bind2_bucket *tb2;
1003 struct inet_bind_bucket *tb;
1004 bool tb_created = false;
1005 u32 remaining, offset;
1006 int ret, i, low, high;
1007 int l3mdev;
1008 u32 index;
1009
1010 if (port) {
1011 head = &hinfo->bhash[inet_bhashfn(net, port,
1012 hinfo->bhash_size)];
1013 tb = inet_csk(sk)->icsk_bind_hash;
1014 spin_lock_bh(&head->lock);
1015 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
1016 inet_ehash_nolisten(sk, NULL, NULL);
1017 spin_unlock_bh(&head->lock);
1018 return 0;
1019 }
1020 spin_unlock(&head->lock);
1021 /* No definite answer... Walk to established hash table */
1022 ret = check_established(death_row, sk, port, NULL);
1023 local_bh_enable();
1024 return ret;
1025 }
1026
1027 l3mdev = inet_sk_bound_l3mdev(sk);
1028
1029 inet_get_local_port_range(net, &low, &high);
1030 high++; /* [32768, 60999] -> [32768, 61000[ */
1031 remaining = high - low;
1032 if (likely(remaining > 1))
1033 remaining &= ~1U;
1034
1035 get_random_sleepable_once(table_perturb,
1036 INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
1037 index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
1038
1039 offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
1040 offset %= remaining;
1041
1042 /* In first pass we try ports of @low parity.
1043 * inet_csk_get_port() does the opposite choice.
1044 */
1045 offset &= ~1U;
1046other_parity_scan:
1047 port = low + offset;
1048 for (i = 0; i < remaining; i += 2, port += 2) {
1049 if (unlikely(port >= high))
1050 port -= remaining;
1051 if (inet_is_local_reserved_port(net, port))
1052 continue;
1053 head = &hinfo->bhash[inet_bhashfn(net, port,
1054 hinfo->bhash_size)];
1055 spin_lock_bh(&head->lock);
1056
1057 /* Does not bother with rcv_saddr checks, because
1058 * the established check is already unique enough.
1059 */
1060 inet_bind_bucket_for_each(tb, &head->chain) {
1061 if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
1062 if (tb->fastreuse >= 0 ||
1063 tb->fastreuseport >= 0)
1064 goto next_port;
1065 WARN_ON(hlist_empty(&tb->owners));
1066 if (!check_established(death_row, sk,
1067 port, &tw))
1068 goto ok;
1069 goto next_port;
1070 }
1071 }
1072
1073 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
1074 net, head, port, l3mdev);
1075 if (!tb) {
1076 spin_unlock_bh(&head->lock);
1077 return -ENOMEM;
1078 }
1079 tb_created = true;
1080 tb->fastreuse = -1;
1081 tb->fastreuseport = -1;
1082 goto ok;
1083next_port:
1084 spin_unlock_bh(&head->lock);
1085 cond_resched();
1086 }
1087
1088 offset++;
1089 if ((offset & 1) && remaining > 1)
1090 goto other_parity_scan;
1091
1092 return -EADDRNOTAVAIL;
1093
1094ok:
1095 /* Find the corresponding tb2 bucket since we need to
1096 * add the socket to the bhash2 table as well
1097 */
1098 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
1099 spin_lock(&head2->lock);
1100
1101 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
1102 if (!tb2) {
1103 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
1104 head2, port, l3mdev, sk);
1105 if (!tb2)
1106 goto error;
1107 }
1108
1109 /* Here we want to add a little bit of randomness to the next source
1110 * port that will be chosen. We use a max() with a random here so that
1111 * on low contention the randomness is maximal and on high contention
1112 * it may be inexistent.
1113 */
1114 i = max_t(int, i, get_random_u32_below(8) * 2);
1115 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
1116
1117 /* Head lock still held and bh's disabled */
1118 inet_bind_hash(sk, tb, tb2, port);
1119
1120 if (sk_unhashed(sk)) {
1121 inet_sk(sk)->inet_sport = htons(port);
1122 inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
1123 }
1124 if (tw)
1125 inet_twsk_bind_unhash(tw, hinfo);
1126
1127 spin_unlock(&head2->lock);
1128 spin_unlock(&head->lock);
1129
1130 if (tw)
1131 inet_twsk_deschedule_put(tw);
1132 local_bh_enable();
1133 return 0;
1134
1135error:
1136 spin_unlock(&head2->lock);
1137 if (tb_created)
1138 inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
1139 spin_unlock_bh(&head->lock);
1140 return -ENOMEM;
1141}
1142
1143/*
1144 * Bind a port for a connect operation and hash it.
1145 */
1146int inet_hash_connect(struct inet_timewait_death_row *death_row,
1147 struct sock *sk)
1148{
1149 u64 port_offset = 0;
1150
1151 if (!inet_sk(sk)->inet_num)
1152 port_offset = inet_sk_port_offset(sk);
1153 return __inet_hash_connect(death_row, sk, port_offset,
1154 __inet_check_established);
1155}
1156EXPORT_SYMBOL_GPL(inet_hash_connect);
1157
1158static void init_hashinfo_lhash2(struct inet_hashinfo *h)
1159{
1160 int i;
1161
1162 for (i = 0; i <= h->lhash2_mask; i++) {
1163 spin_lock_init(&h->lhash2[i].lock);
1164 INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head,
1165 i + LISTENING_NULLS_BASE);
1166 }
1167}
1168
1169void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
1170 unsigned long numentries, int scale,
1171 unsigned long low_limit,
1172 unsigned long high_limit)
1173{
1174 h->lhash2 = alloc_large_system_hash(name,
1175 sizeof(*h->lhash2),
1176 numentries,
1177 scale,
1178 0,
1179 NULL,
1180 &h->lhash2_mask,
1181 low_limit,
1182 high_limit);
1183 init_hashinfo_lhash2(h);
1184
1185 /* this one is used for source ports of outgoing connections */
1186 table_perturb = alloc_large_system_hash("Table-perturb",
1187 sizeof(*table_perturb),
1188 INET_TABLE_PERTURB_SIZE,
1189 0, 0, NULL, NULL,
1190 INET_TABLE_PERTURB_SIZE,
1191 INET_TABLE_PERTURB_SIZE);
1192}
1193
1194int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
1195{
1196 h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
1197 if (!h->lhash2)
1198 return -ENOMEM;
1199
1200 h->lhash2_mask = INET_LHTABLE_SIZE - 1;
1201 /* INET_LHTABLE_SIZE must be a power of 2 */
1202 BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
1203
1204 init_hashinfo_lhash2(h);
1205 return 0;
1206}
1207EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
1208
1209int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
1210{
1211 unsigned int locksz = sizeof(spinlock_t);
1212 unsigned int i, nblocks = 1;
1213
1214 if (locksz != 0) {
1215 /* allocate 2 cache lines or at least one spinlock per cpu */
1216 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
1217 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
1218
1219 /* no more locks than number of hash buckets */
1220 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
1221
1222 hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
1223 if (!hashinfo->ehash_locks)
1224 return -ENOMEM;
1225
1226 for (i = 0; i < nblocks; i++)
1227 spin_lock_init(&hashinfo->ehash_locks[i]);
1228 }
1229 hashinfo->ehash_locks_mask = nblocks - 1;
1230 return 0;
1231}
1232EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
1233
1234struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
1235 unsigned int ehash_entries)
1236{
1237 struct inet_hashinfo *new_hashinfo;
1238 int i;
1239
1240 new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL);
1241 if (!new_hashinfo)
1242 goto err;
1243
1244 new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket),
1245 GFP_KERNEL_ACCOUNT);
1246 if (!new_hashinfo->ehash)
1247 goto free_hashinfo;
1248
1249 new_hashinfo->ehash_mask = ehash_entries - 1;
1250
1251 if (inet_ehash_locks_alloc(new_hashinfo))
1252 goto free_ehash;
1253
1254 for (i = 0; i < ehash_entries; i++)
1255 INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i);
1256
1257 new_hashinfo->pernet = true;
1258
1259 return new_hashinfo;
1260
1261free_ehash:
1262 vfree(new_hashinfo->ehash);
1263free_hashinfo:
1264 kfree(new_hashinfo);
1265err:
1266 return NULL;
1267}
1268EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc);
1269
1270void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo)
1271{
1272 if (!hashinfo->pernet)
1273 return;
1274
1275 inet_ehash_locks_free(hashinfo);
1276 vfree(hashinfo->ehash);
1277 kfree(hashinfo);
1278}
1279EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free);