Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Authors: Lotsa people, from code originally in tcp
8 */
9
10#ifndef _INET_HASHTABLES_H
11#define _INET_HASHTABLES_H
12
13
14#include <linux/interrupt.h>
15#include <linux/ip.h>
16#include <linux/ipv6.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/socket.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/wait.h>
23
24#include <net/inet_connection_sock.h>
25#include <net/inet_sock.h>
26#include <net/sock.h>
27#include <net/route.h>
28#include <net/tcp_states.h>
29#include <net/netns/hash.h>
30
31#include <linux/refcount.h>
32#include <asm/byteorder.h>
33
34/* This is for all connections with a full identity, no wildcards.
35 * The 'e' prefix stands for Establish, but we really put all sockets
36 * but LISTEN ones.
37 */
38struct inet_ehash_bucket {
39 struct hlist_nulls_head chain;
40};
41
42/* There are a few simple rules, which allow for local port reuse by
43 * an application. In essence:
44 *
45 * 1) Sockets bound to different interfaces may share a local port.
46 * Failing that, goto test 2.
47 * 2) If all sockets have sk->sk_reuse set, and none of them are in
48 * TCP_LISTEN state, the port may be shared.
49 * Failing that, goto test 3.
50 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
51 * address, and none of them are the same, the port may be
52 * shared.
53 * Failing this, the port cannot be shared.
54 *
55 * The interesting point, is test #2. This is what an FTP server does
56 * all day. To optimize this case we use a specific flag bit defined
57 * below. As we add sockets to a bind bucket list, we perform a
58 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
59 * As long as all sockets added to a bind bucket pass this test,
60 * the flag bit will be set.
61 * The resulting situation is that tcp_v[46]_verify_bind() can just check
62 * for this flag bit, if it is set and the socket trying to bind has
63 * sk->sk_reuse set, we don't even have to walk the owners list at all,
64 * we return that it is ok to bind this socket to the requested local port.
65 *
66 * Sounds like a lot of work, but it is worth it. In a more naive
67 * implementation (ie. current FreeBSD etc.) the entire list of ports
68 * must be walked for each data port opened by an ftp server. Needless
69 * to say, this does not scale at all. With a couple thousand FTP
70 * users logged onto your box, isn't it nice to know that new data
71 * ports are created in O(1) time? I thought so. ;-) -DaveM
72 */
73#define FASTREUSEPORT_ANY 1
74#define FASTREUSEPORT_STRICT 2
75
76struct inet_bind_bucket {
77 possible_net_t ib_net;
78 int l3mdev;
79 unsigned short port;
80 signed char fastreuse;
81 signed char fastreuseport;
82 kuid_t fastuid;
83#if IS_ENABLED(CONFIG_IPV6)
84 struct in6_addr fast_v6_rcv_saddr;
85#endif
86 __be32 fast_rcv_saddr;
87 unsigned short fast_sk_family;
88 bool fast_ipv6_only;
89 struct hlist_node node;
90 struct hlist_head owners;
91};
92
93static inline struct net *ib_net(struct inet_bind_bucket *ib)
94{
95 return read_pnet(&ib->ib_net);
96}
97
98#define inet_bind_bucket_for_each(tb, head) \
99 hlist_for_each_entry(tb, head, node)
100
101struct inet_bind_hashbucket {
102 spinlock_t lock;
103 struct hlist_head chain;
104};
105
106/* Sockets can be hashed in established or listening table.
107 * We must use different 'nulls' end-of-chain value for all hash buckets :
108 * A socket might transition from ESTABLISH to LISTEN state without
109 * RCU grace period. A lookup in ehash table needs to handle this case.
110 */
111#define LISTENING_NULLS_BASE (1U << 29)
112struct inet_listen_hashbucket {
113 spinlock_t lock;
114 unsigned int count;
115 union {
116 struct hlist_head head;
117 struct hlist_nulls_head nulls_head;
118 };
119};
120
121/* This is for listening sockets, thus all sockets which possess wildcards. */
122#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
123
124struct inet_hashinfo {
125 /* This is for sockets with full identity only. Sockets here will
126 * always be without wildcards and will have the following invariant:
127 *
128 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
129 *
130 */
131 struct inet_ehash_bucket *ehash;
132 spinlock_t *ehash_locks;
133 unsigned int ehash_mask;
134 unsigned int ehash_locks_mask;
135
136 /* Ok, let's try this, I give up, we do need a local binding
137 * TCP hash as well as the others for fast bind/connect.
138 */
139 struct kmem_cache *bind_bucket_cachep;
140 struct inet_bind_hashbucket *bhash;
141 unsigned int bhash_size;
142
143 /* The 2nd listener table hashed by local port and address */
144 unsigned int lhash2_mask;
145 struct inet_listen_hashbucket *lhash2;
146
147 /* All the above members are written once at bootup and
148 * never written again _or_ are predominantly read-access.
149 *
150 * Now align to a new cache line as all the following members
151 * might be often dirty.
152 */
153 /* All sockets in TCP_LISTEN state will be in listening_hash.
154 * This is the only table where wildcard'd TCP sockets can
155 * exist. listening_hash is only hashed by local port number.
156 * If lhash2 is initialized, the same socket will also be hashed
157 * to lhash2 by port and address.
158 */
159 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
160 ____cacheline_aligned_in_smp;
161};
162
163#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
164 hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
165
166static inline struct inet_listen_hashbucket *
167inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
168{
169 return &h->lhash2[hash & h->lhash2_mask];
170}
171
172static inline struct inet_ehash_bucket *inet_ehash_bucket(
173 struct inet_hashinfo *hashinfo,
174 unsigned int hash)
175{
176 return &hashinfo->ehash[hash & hashinfo->ehash_mask];
177}
178
179static inline spinlock_t *inet_ehash_lockp(
180 struct inet_hashinfo *hashinfo,
181 unsigned int hash)
182{
183 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
184}
185
186int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
187
188static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
189{
190 kfree(h->lhash2);
191 h->lhash2 = NULL;
192}
193
194static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
195{
196 kvfree(hashinfo->ehash_locks);
197 hashinfo->ehash_locks = NULL;
198}
199
200static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
201 int dif, int sdif)
202{
203#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
204 return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
205 bound_dev_if, dif, sdif);
206#else
207 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
208#endif
209}
210
211struct inet_bind_bucket *
212inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
213 struct inet_bind_hashbucket *head,
214 const unsigned short snum, int l3mdev);
215void inet_bind_bucket_destroy(struct kmem_cache *cachep,
216 struct inet_bind_bucket *tb);
217
218static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
219 const u32 bhash_size)
220{
221 return (lport + net_hash_mix(net)) & (bhash_size - 1);
222}
223
224void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
225 const unsigned short snum);
226
227/* These can have wildcards, don't try too hard. */
228static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
229{
230 return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
231}
232
233static inline int inet_sk_listen_hashfn(const struct sock *sk)
234{
235 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
236}
237
238/* Caller must disable local BH processing. */
239int __inet_inherit_port(const struct sock *sk, struct sock *child);
240
241void inet_put_port(struct sock *sk);
242
243void inet_hashinfo_init(struct inet_hashinfo *h);
244void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
245 unsigned long numentries, int scale,
246 unsigned long low_limit,
247 unsigned long high_limit);
248int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
249
250bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
251bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
252 bool *found_dup_sk);
253int __inet_hash(struct sock *sk, struct sock *osk);
254int inet_hash(struct sock *sk);
255void inet_unhash(struct sock *sk);
256
257struct sock *__inet_lookup_listener(struct net *net,
258 struct inet_hashinfo *hashinfo,
259 struct sk_buff *skb, int doff,
260 const __be32 saddr, const __be16 sport,
261 const __be32 daddr,
262 const unsigned short hnum,
263 const int dif, const int sdif);
264
265static inline struct sock *inet_lookup_listener(struct net *net,
266 struct inet_hashinfo *hashinfo,
267 struct sk_buff *skb, int doff,
268 __be32 saddr, __be16 sport,
269 __be32 daddr, __be16 dport, int dif, int sdif)
270{
271 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
272 daddr, ntohs(dport), dif, sdif);
273}
274
275/* Socket demux engine toys. */
276/* What happens here is ugly; there's a pair of adjacent fields in
277 struct inet_sock; __be16 dport followed by __u16 num. We want to
278 search by pair, so we combine the keys into a single 32bit value
279 and compare with 32bit value read from &...->dport. Let's at least
280 make sure that it's not mixed with anything else...
281 On 64bit targets we combine comparisons with pair of adjacent __be32
282 fields in the same way.
283*/
284#ifdef __BIG_ENDIAN
285#define INET_COMBINED_PORTS(__sport, __dport) \
286 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
287#else /* __LITTLE_ENDIAN */
288#define INET_COMBINED_PORTS(__sport, __dport) \
289 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
290#endif
291
292#if (BITS_PER_LONG == 64)
293#ifdef __BIG_ENDIAN
294#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
295 const __addrpair __name = (__force __addrpair) ( \
296 (((__force __u64)(__be32)(__saddr)) << 32) | \
297 ((__force __u64)(__be32)(__daddr)))
298#else /* __LITTLE_ENDIAN */
299#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
300 const __addrpair __name = (__force __addrpair) ( \
301 (((__force __u64)(__be32)(__daddr)) << 32) | \
302 ((__force __u64)(__be32)(__saddr)))
303#endif /* __BIG_ENDIAN */
304#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
305 (((__sk)->sk_portpair == (__ports)) && \
306 ((__sk)->sk_addrpair == (__cookie)) && \
307 (((__sk)->sk_bound_dev_if == (__dif)) || \
308 ((__sk)->sk_bound_dev_if == (__sdif))) && \
309 net_eq(sock_net(__sk), (__net)))
310#else /* 32-bit arch */
311#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
312 const int __name __deprecated __attribute__((unused))
313
314#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
315 (((__sk)->sk_portpair == (__ports)) && \
316 ((__sk)->sk_daddr == (__saddr)) && \
317 ((__sk)->sk_rcv_saddr == (__daddr)) && \
318 (((__sk)->sk_bound_dev_if == (__dif)) || \
319 ((__sk)->sk_bound_dev_if == (__sdif))) && \
320 net_eq(sock_net(__sk), (__net)))
321#endif /* 64-bit arch */
322
323/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
324 * not check it for lookups anymore, thanks Alexey. -DaveM
325 */
326struct sock *__inet_lookup_established(struct net *net,
327 struct inet_hashinfo *hashinfo,
328 const __be32 saddr, const __be16 sport,
329 const __be32 daddr, const u16 hnum,
330 const int dif, const int sdif);
331
332static inline struct sock *
333 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
334 const __be32 saddr, const __be16 sport,
335 const __be32 daddr, const __be16 dport,
336 const int dif)
337{
338 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
339 ntohs(dport), dif, 0);
340}
341
342static inline struct sock *__inet_lookup(struct net *net,
343 struct inet_hashinfo *hashinfo,
344 struct sk_buff *skb, int doff,
345 const __be32 saddr, const __be16 sport,
346 const __be32 daddr, const __be16 dport,
347 const int dif, const int sdif,
348 bool *refcounted)
349{
350 u16 hnum = ntohs(dport);
351 struct sock *sk;
352
353 sk = __inet_lookup_established(net, hashinfo, saddr, sport,
354 daddr, hnum, dif, sdif);
355 *refcounted = true;
356 if (sk)
357 return sk;
358 *refcounted = false;
359 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
360 sport, daddr, hnum, dif, sdif);
361}
362
363static inline struct sock *inet_lookup(struct net *net,
364 struct inet_hashinfo *hashinfo,
365 struct sk_buff *skb, int doff,
366 const __be32 saddr, const __be16 sport,
367 const __be32 daddr, const __be16 dport,
368 const int dif)
369{
370 struct sock *sk;
371 bool refcounted;
372
373 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
374 dport, dif, 0, &refcounted);
375
376 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
377 sk = NULL;
378 return sk;
379}
380
381static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
382 struct sk_buff *skb,
383 int doff,
384 const __be16 sport,
385 const __be16 dport,
386 const int sdif,
387 bool *refcounted)
388{
389 struct sock *sk = skb_steal_sock(skb, refcounted);
390 const struct iphdr *iph = ip_hdr(skb);
391
392 if (sk)
393 return sk;
394
395 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
396 doff, iph->saddr, sport,
397 iph->daddr, dport, inet_iif(skb), sdif,
398 refcounted);
399}
400
401u32 inet6_ehashfn(const struct net *net,
402 const struct in6_addr *laddr, const u16 lport,
403 const struct in6_addr *faddr, const __be16 fport);
404
405static inline void sk_daddr_set(struct sock *sk, __be32 addr)
406{
407 sk->sk_daddr = addr; /* alias of inet_daddr */
408#if IS_ENABLED(CONFIG_IPV6)
409 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
410#endif
411}
412
413static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
414{
415 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
416#if IS_ENABLED(CONFIG_IPV6)
417 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
418#endif
419}
420
421int __inet_hash_connect(struct inet_timewait_death_row *death_row,
422 struct sock *sk, u32 port_offset,
423 int (*check_established)(struct inet_timewait_death_row *,
424 struct sock *, __u16,
425 struct inet_timewait_sock **));
426
427int inet_hash_connect(struct inet_timewait_death_row *death_row,
428 struct sock *sk);
429#endif /* _INET_HASHTABLES_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Authors: Lotsa people, from code originally in tcp
8 */
9
10#ifndef _INET_HASHTABLES_H
11#define _INET_HASHTABLES_H
12
13
14#include <linux/interrupt.h>
15#include <linux/ip.h>
16#include <linux/ipv6.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/socket.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/wait.h>
23
24#include <net/inet_connection_sock.h>
25#include <net/inet_sock.h>
26#include <net/sock.h>
27#include <net/route.h>
28#include <net/tcp_states.h>
29#include <net/netns/hash.h>
30
31#include <linux/refcount.h>
32#include <asm/byteorder.h>
33
34/* This is for all connections with a full identity, no wildcards.
35 * The 'e' prefix stands for Establish, but we really put all sockets
36 * but LISTEN ones.
37 */
38struct inet_ehash_bucket {
39 struct hlist_nulls_head chain;
40};
41
42/* There are a few simple rules, which allow for local port reuse by
43 * an application. In essence:
44 *
45 * 1) Sockets bound to different interfaces may share a local port.
46 * Failing that, goto test 2.
47 * 2) If all sockets have sk->sk_reuse set, and none of them are in
48 * TCP_LISTEN state, the port may be shared.
49 * Failing that, goto test 3.
50 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
51 * address, and none of them are the same, the port may be
52 * shared.
53 * Failing this, the port cannot be shared.
54 *
55 * The interesting point, is test #2. This is what an FTP server does
56 * all day. To optimize this case we use a specific flag bit defined
57 * below. As we add sockets to a bind bucket list, we perform a
58 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
59 * As long as all sockets added to a bind bucket pass this test,
60 * the flag bit will be set.
61 * The resulting situation is that tcp_v[46]_verify_bind() can just check
62 * for this flag bit, if it is set and the socket trying to bind has
63 * sk->sk_reuse set, we don't even have to walk the owners list at all,
64 * we return that it is ok to bind this socket to the requested local port.
65 *
66 * Sounds like a lot of work, but it is worth it. In a more naive
67 * implementation (ie. current FreeBSD etc.) the entire list of ports
68 * must be walked for each data port opened by an ftp server. Needless
69 * to say, this does not scale at all. With a couple thousand FTP
70 * users logged onto your box, isn't it nice to know that new data
71 * ports are created in O(1) time? I thought so. ;-) -DaveM
72 */
73#define FASTREUSEPORT_ANY 1
74#define FASTREUSEPORT_STRICT 2
75
76struct inet_bind_bucket {
77 possible_net_t ib_net;
78 int l3mdev;
79 unsigned short port;
80 signed char fastreuse;
81 signed char fastreuseport;
82 kuid_t fastuid;
83#if IS_ENABLED(CONFIG_IPV6)
84 struct in6_addr fast_v6_rcv_saddr;
85#endif
86 __be32 fast_rcv_saddr;
87 unsigned short fast_sk_family;
88 bool fast_ipv6_only;
89 struct hlist_node node;
90 struct hlist_head owners;
91};
92
93static inline struct net *ib_net(struct inet_bind_bucket *ib)
94{
95 return read_pnet(&ib->ib_net);
96}
97
98#define inet_bind_bucket_for_each(tb, head) \
99 hlist_for_each_entry(tb, head, node)
100
101struct inet_bind_hashbucket {
102 spinlock_t lock;
103 struct hlist_head chain;
104};
105
106/*
107 * Sockets can be hashed in established or listening table
108 */
109struct inet_listen_hashbucket {
110 spinlock_t lock;
111 unsigned int count;
112 struct hlist_head head;
113};
114
115/* This is for listening sockets, thus all sockets which possess wildcards. */
116#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
117
118struct inet_hashinfo {
119 /* This is for sockets with full identity only. Sockets here will
120 * always be without wildcards and will have the following invariant:
121 *
122 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
123 *
124 */
125 struct inet_ehash_bucket *ehash;
126 spinlock_t *ehash_locks;
127 unsigned int ehash_mask;
128 unsigned int ehash_locks_mask;
129
130 /* Ok, let's try this, I give up, we do need a local binding
131 * TCP hash as well as the others for fast bind/connect.
132 */
133 struct kmem_cache *bind_bucket_cachep;
134 struct inet_bind_hashbucket *bhash;
135 unsigned int bhash_size;
136
137 /* The 2nd listener table hashed by local port and address */
138 unsigned int lhash2_mask;
139 struct inet_listen_hashbucket *lhash2;
140
141 /* All the above members are written once at bootup and
142 * never written again _or_ are predominantly read-access.
143 *
144 * Now align to a new cache line as all the following members
145 * might be often dirty.
146 */
147 /* All sockets in TCP_LISTEN state will be in listening_hash.
148 * This is the only table where wildcard'd TCP sockets can
149 * exist. listening_hash is only hashed by local port number.
150 * If lhash2 is initialized, the same socket will also be hashed
151 * to lhash2 by port and address.
152 */
153 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
154 ____cacheline_aligned_in_smp;
155};
156
157#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
158 hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
159
160static inline struct inet_listen_hashbucket *
161inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
162{
163 return &h->lhash2[hash & h->lhash2_mask];
164}
165
166static inline struct inet_ehash_bucket *inet_ehash_bucket(
167 struct inet_hashinfo *hashinfo,
168 unsigned int hash)
169{
170 return &hashinfo->ehash[hash & hashinfo->ehash_mask];
171}
172
173static inline spinlock_t *inet_ehash_lockp(
174 struct inet_hashinfo *hashinfo,
175 unsigned int hash)
176{
177 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
178}
179
180int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
181
182static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
183{
184 kvfree(hashinfo->ehash_locks);
185 hashinfo->ehash_locks = NULL;
186}
187
188static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
189 int dif, int sdif)
190{
191#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
192 return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
193 bound_dev_if, dif, sdif);
194#else
195 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
196#endif
197}
198
199struct inet_bind_bucket *
200inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
201 struct inet_bind_hashbucket *head,
202 const unsigned short snum, int l3mdev);
203void inet_bind_bucket_destroy(struct kmem_cache *cachep,
204 struct inet_bind_bucket *tb);
205
206static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
207 const u32 bhash_size)
208{
209 return (lport + net_hash_mix(net)) & (bhash_size - 1);
210}
211
212void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
213 const unsigned short snum);
214
215/* These can have wildcards, don't try too hard. */
216static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
217{
218 return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
219}
220
221static inline int inet_sk_listen_hashfn(const struct sock *sk)
222{
223 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
224}
225
226/* Caller must disable local BH processing. */
227int __inet_inherit_port(const struct sock *sk, struct sock *child);
228
229void inet_put_port(struct sock *sk);
230
231void inet_hashinfo_init(struct inet_hashinfo *h);
232void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
233 unsigned long numentries, int scale,
234 unsigned long low_limit,
235 unsigned long high_limit);
236int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
237
238bool inet_ehash_insert(struct sock *sk, struct sock *osk);
239bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
240int __inet_hash(struct sock *sk, struct sock *osk);
241int inet_hash(struct sock *sk);
242void inet_unhash(struct sock *sk);
243
244struct sock *__inet_lookup_listener(struct net *net,
245 struct inet_hashinfo *hashinfo,
246 struct sk_buff *skb, int doff,
247 const __be32 saddr, const __be16 sport,
248 const __be32 daddr,
249 const unsigned short hnum,
250 const int dif, const int sdif);
251
252static inline struct sock *inet_lookup_listener(struct net *net,
253 struct inet_hashinfo *hashinfo,
254 struct sk_buff *skb, int doff,
255 __be32 saddr, __be16 sport,
256 __be32 daddr, __be16 dport, int dif, int sdif)
257{
258 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
259 daddr, ntohs(dport), dif, sdif);
260}
261
262/* Socket demux engine toys. */
263/* What happens here is ugly; there's a pair of adjacent fields in
264 struct inet_sock; __be16 dport followed by __u16 num. We want to
265 search by pair, so we combine the keys into a single 32bit value
266 and compare with 32bit value read from &...->dport. Let's at least
267 make sure that it's not mixed with anything else...
268 On 64bit targets we combine comparisons with pair of adjacent __be32
269 fields in the same way.
270*/
271#ifdef __BIG_ENDIAN
272#define INET_COMBINED_PORTS(__sport, __dport) \
273 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
274#else /* __LITTLE_ENDIAN */
275#define INET_COMBINED_PORTS(__sport, __dport) \
276 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
277#endif
278
279#if (BITS_PER_LONG == 64)
280#ifdef __BIG_ENDIAN
281#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
282 const __addrpair __name = (__force __addrpair) ( \
283 (((__force __u64)(__be32)(__saddr)) << 32) | \
284 ((__force __u64)(__be32)(__daddr)))
285#else /* __LITTLE_ENDIAN */
286#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
287 const __addrpair __name = (__force __addrpair) ( \
288 (((__force __u64)(__be32)(__daddr)) << 32) | \
289 ((__force __u64)(__be32)(__saddr)))
290#endif /* __BIG_ENDIAN */
291#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
292 (((__sk)->sk_portpair == (__ports)) && \
293 ((__sk)->sk_addrpair == (__cookie)) && \
294 (((__sk)->sk_bound_dev_if == (__dif)) || \
295 ((__sk)->sk_bound_dev_if == (__sdif))) && \
296 net_eq(sock_net(__sk), (__net)))
297#else /* 32-bit arch */
298#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
299 const int __name __deprecated __attribute__((unused))
300
301#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
302 (((__sk)->sk_portpair == (__ports)) && \
303 ((__sk)->sk_daddr == (__saddr)) && \
304 ((__sk)->sk_rcv_saddr == (__daddr)) && \
305 (((__sk)->sk_bound_dev_if == (__dif)) || \
306 ((__sk)->sk_bound_dev_if == (__sdif))) && \
307 net_eq(sock_net(__sk), (__net)))
308#endif /* 64-bit arch */
309
310/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
311 * not check it for lookups anymore, thanks Alexey. -DaveM
312 */
313struct sock *__inet_lookup_established(struct net *net,
314 struct inet_hashinfo *hashinfo,
315 const __be32 saddr, const __be16 sport,
316 const __be32 daddr, const u16 hnum,
317 const int dif, const int sdif);
318
319static inline struct sock *
320 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
321 const __be32 saddr, const __be16 sport,
322 const __be32 daddr, const __be16 dport,
323 const int dif)
324{
325 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
326 ntohs(dport), dif, 0);
327}
328
329static inline struct sock *__inet_lookup(struct net *net,
330 struct inet_hashinfo *hashinfo,
331 struct sk_buff *skb, int doff,
332 const __be32 saddr, const __be16 sport,
333 const __be32 daddr, const __be16 dport,
334 const int dif, const int sdif,
335 bool *refcounted)
336{
337 u16 hnum = ntohs(dport);
338 struct sock *sk;
339
340 sk = __inet_lookup_established(net, hashinfo, saddr, sport,
341 daddr, hnum, dif, sdif);
342 *refcounted = true;
343 if (sk)
344 return sk;
345 *refcounted = false;
346 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
347 sport, daddr, hnum, dif, sdif);
348}
349
350static inline struct sock *inet_lookup(struct net *net,
351 struct inet_hashinfo *hashinfo,
352 struct sk_buff *skb, int doff,
353 const __be32 saddr, const __be16 sport,
354 const __be32 daddr, const __be16 dport,
355 const int dif)
356{
357 struct sock *sk;
358 bool refcounted;
359
360 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
361 dport, dif, 0, &refcounted);
362
363 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
364 sk = NULL;
365 return sk;
366}
367
368static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
369 struct sk_buff *skb,
370 int doff,
371 const __be16 sport,
372 const __be16 dport,
373 const int sdif,
374 bool *refcounted)
375{
376 struct sock *sk = skb_steal_sock(skb);
377 const struct iphdr *iph = ip_hdr(skb);
378
379 *refcounted = true;
380 if (sk)
381 return sk;
382
383 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
384 doff, iph->saddr, sport,
385 iph->daddr, dport, inet_iif(skb), sdif,
386 refcounted);
387}
388
389u32 inet6_ehashfn(const struct net *net,
390 const struct in6_addr *laddr, const u16 lport,
391 const struct in6_addr *faddr, const __be16 fport);
392
393static inline void sk_daddr_set(struct sock *sk, __be32 addr)
394{
395 sk->sk_daddr = addr; /* alias of inet_daddr */
396#if IS_ENABLED(CONFIG_IPV6)
397 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
398#endif
399}
400
401static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
402{
403 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
404#if IS_ENABLED(CONFIG_IPV6)
405 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
406#endif
407}
408
409int __inet_hash_connect(struct inet_timewait_death_row *death_row,
410 struct sock *sk, u32 port_offset,
411 int (*check_established)(struct inet_timewait_death_row *,
412 struct sock *, __u16,
413 struct inet_timewait_sock **));
414
415int inet_hash_connect(struct inet_timewait_death_row *death_row,
416 struct sock *sk);
417#endif /* _INET_HASHTABLES_H */