Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 * Authors:	Lotsa people, from code originally in tcp
  7 *
  8 *	This program is free software; you can redistribute it and/or
  9 *      modify it under the terms of the GNU General Public License
 10 *      as published by the Free Software Foundation; either version
 11 *      2 of the License, or (at your option) any later version.
 12 */
 13
 14#ifndef _INET_HASHTABLES_H
 15#define _INET_HASHTABLES_H
 16
 17
 18#include <linux/interrupt.h>
 19#include <linux/ip.h>
 20#include <linux/ipv6.h>
 21#include <linux/list.h>
 22#include <linux/slab.h>
 23#include <linux/socket.h>
 24#include <linux/spinlock.h>
 25#include <linux/types.h>
 26#include <linux/wait.h>
 27
 28#include <net/inet_connection_sock.h>
 29#include <net/inet_sock.h>
 30#include <net/sock.h>
 31#include <net/route.h>
 32#include <net/tcp_states.h>
 33#include <net/netns/hash.h>
 34
 35#include <linux/refcount.h>
 36#include <asm/byteorder.h>
 37
 38/* This is for all connections with a full identity, no wildcards.
 39 * The 'e' prefix stands for Establish, but we really put all sockets
 40 * but LISTEN ones.
 41 */
 42struct inet_ehash_bucket {
 43	struct hlist_nulls_head chain;
 44};
 45
 46/* There are a few simple rules, which allow for local port reuse by
 47 * an application.  In essence:
 48 *
 49 *	1) Sockets bound to different interfaces may share a local port.
 50 *	   Failing that, goto test 2.
 51 *	2) If all sockets have sk->sk_reuse set, and none of them are in
 52 *	   TCP_LISTEN state, the port may be shared.
 53 *	   Failing that, goto test 3.
 54 *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
 55 *	   address, and none of them are the same, the port may be
 56 *	   shared.
 57 *	   Failing this, the port cannot be shared.
 58 *
 59 * The interesting point, is test #2.  This is what an FTP server does
 60 * all day.  To optimize this case we use a specific flag bit defined
 61 * below.  As we add sockets to a bind bucket list, we perform a
 62 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
 63 * As long as all sockets added to a bind bucket pass this test,
 64 * the flag bit will be set.
 65 * The resulting situation is that tcp_v[46]_verify_bind() can just check
 66 * for this flag bit, if it is set and the socket trying to bind has
 67 * sk->sk_reuse set, we don't even have to walk the owners list at all,
 68 * we return that it is ok to bind this socket to the requested local port.
 69 *
 70 * Sounds like a lot of work, but it is worth it.  In a more naive
 71 * implementation (ie. current FreeBSD etc.) the entire list of ports
 72 * must be walked for each data port opened by an ftp server.  Needless
 73 * to say, this does not scale at all.  With a couple thousand FTP
 74 * users logged onto your box, isn't it nice to know that new data
 75 * ports are created in O(1) time?  I thought so. ;-)	-DaveM
 76 */
 77#define FASTREUSEPORT_ANY	1
 78#define FASTREUSEPORT_STRICT	2
 79
 80struct inet_bind_bucket {
 81	possible_net_t		ib_net;
 
 82	unsigned short		port;
 83	signed char		fastreuse;
 84	signed char		fastreuseport;
 85	kuid_t			fastuid;
 86#if IS_ENABLED(CONFIG_IPV6)
 87	struct in6_addr		fast_v6_rcv_saddr;
 88#endif
 89	__be32			fast_rcv_saddr;
 90	unsigned short		fast_sk_family;
 91	bool			fast_ipv6_only;
 92	struct hlist_node	node;
 93	struct hlist_head	owners;
 94};
 95
 96static inline struct net *ib_net(struct inet_bind_bucket *ib)
 97{
 98	return read_pnet(&ib->ib_net);
 99}
100
101#define inet_bind_bucket_for_each(tb, head) \
102	hlist_for_each_entry(tb, head, node)
103
104struct inet_bind_hashbucket {
105	spinlock_t		lock;
106	struct hlist_head	chain;
107};
108
109/*
110 * Sockets can be hashed in established or listening table
111 */
112struct inet_listen_hashbucket {
113	spinlock_t		lock;
114	unsigned int		count;
115	struct hlist_head	head;
116};
117
118/* This is for listening sockets, thus all sockets which possess wildcards. */
119#define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */
120
121struct inet_hashinfo {
122	/* This is for sockets with full identity only.  Sockets here will
123	 * always be without wildcards and will have the following invariant:
124	 *
125	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
126	 *
127	 */
128	struct inet_ehash_bucket	*ehash;
129	spinlock_t			*ehash_locks;
130	unsigned int			ehash_mask;
131	unsigned int			ehash_locks_mask;
132
133	/* Ok, let's try this, I give up, we do need a local binding
134	 * TCP hash as well as the others for fast bind/connect.
135	 */
136	struct kmem_cache		*bind_bucket_cachep;
137	struct inet_bind_hashbucket	*bhash;
138	unsigned int			bhash_size;
139
140	/* The 2nd listener table hashed by local port and address */
141	unsigned int			lhash2_mask;
142	struct inet_listen_hashbucket	*lhash2;
143
144	/* All the above members are written once at bootup and
145	 * never written again _or_ are predominantly read-access.
146	 *
147	 * Now align to a new cache line as all the following members
148	 * might be often dirty.
149	 */
150	/* All sockets in TCP_LISTEN state will be in listening_hash.
151	 * This is the only table where wildcard'd TCP sockets can
152	 * exist.  listening_hash is only hashed by local port number.
153	 * If lhash2 is initialized, the same socket will also be hashed
154	 * to lhash2 by port and address.
155	 */
156	struct inet_listen_hashbucket	listening_hash[INET_LHTABLE_SIZE]
157					____cacheline_aligned_in_smp;
158};
159
160#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
161	hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
162
163static inline struct inet_listen_hashbucket *
164inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
165{
166	return &h->lhash2[hash & h->lhash2_mask];
167}
168
169static inline struct inet_ehash_bucket *inet_ehash_bucket(
170	struct inet_hashinfo *hashinfo,
171	unsigned int hash)
172{
173	return &hashinfo->ehash[hash & hashinfo->ehash_mask];
174}
175
176static inline spinlock_t *inet_ehash_lockp(
177	struct inet_hashinfo *hashinfo,
178	unsigned int hash)
179{
180	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
181}
182
183int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
184
185static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
186{
187	kvfree(hashinfo->ehash_locks);
188	hashinfo->ehash_locks = NULL;
189}
190
 
 
 
 
 
 
 
 
 
 
 
191struct inet_bind_bucket *
192inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
193			struct inet_bind_hashbucket *head,
194			const unsigned short snum);
195void inet_bind_bucket_destroy(struct kmem_cache *cachep,
196			      struct inet_bind_bucket *tb);
197
198static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
199			       const u32 bhash_size)
200{
201	return (lport + net_hash_mix(net)) & (bhash_size - 1);
202}
203
204void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
205		    const unsigned short snum);
206
207/* These can have wildcards, don't try too hard. */
208static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
209{
210	return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
211}
212
213static inline int inet_sk_listen_hashfn(const struct sock *sk)
214{
215	return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
216}
217
218/* Caller must disable local BH processing. */
219int __inet_inherit_port(const struct sock *sk, struct sock *child);
220
221void inet_put_port(struct sock *sk);
222
223void inet_hashinfo_init(struct inet_hashinfo *h);
224void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
225			 unsigned long numentries, int scale,
226			 unsigned long low_limit,
227			 unsigned long high_limit);
 
228
229bool inet_ehash_insert(struct sock *sk, struct sock *osk);
230bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
231int __inet_hash(struct sock *sk, struct sock *osk);
232int inet_hash(struct sock *sk);
233void inet_unhash(struct sock *sk);
234
235struct sock *__inet_lookup_listener(struct net *net,
236				    struct inet_hashinfo *hashinfo,
237				    struct sk_buff *skb, int doff,
238				    const __be32 saddr, const __be16 sport,
239				    const __be32 daddr,
240				    const unsigned short hnum,
241				    const int dif, const int sdif);
242
243static inline struct sock *inet_lookup_listener(struct net *net,
244		struct inet_hashinfo *hashinfo,
245		struct sk_buff *skb, int doff,
246		__be32 saddr, __be16 sport,
247		__be32 daddr, __be16 dport, int dif, int sdif)
248{
249	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
250				      daddr, ntohs(dport), dif, sdif);
251}
252
253/* Socket demux engine toys. */
254/* What happens here is ugly; there's a pair of adjacent fields in
255   struct inet_sock; __be16 dport followed by __u16 num.  We want to
256   search by pair, so we combine the keys into a single 32bit value
257   and compare with 32bit value read from &...->dport.  Let's at least
258   make sure that it's not mixed with anything else...
259   On 64bit targets we combine comparisons with pair of adjacent __be32
260   fields in the same way.
261*/
262#ifdef __BIG_ENDIAN
263#define INET_COMBINED_PORTS(__sport, __dport) \
264	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
265#else /* __LITTLE_ENDIAN */
266#define INET_COMBINED_PORTS(__sport, __dport) \
267	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
268#endif
269
270#if (BITS_PER_LONG == 64)
271#ifdef __BIG_ENDIAN
272#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
273	const __addrpair __name = (__force __addrpair) ( \
274				   (((__force __u64)(__be32)(__saddr)) << 32) | \
275				   ((__force __u64)(__be32)(__daddr)))
276#else /* __LITTLE_ENDIAN */
277#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
278	const __addrpair __name = (__force __addrpair) ( \
279				   (((__force __u64)(__be32)(__daddr)) << 32) | \
280				   ((__force __u64)(__be32)(__saddr)))
281#endif /* __BIG_ENDIAN */
282#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
283	(((__sk)->sk_portpair == (__ports))			&&	\
284	 ((__sk)->sk_addrpair == (__cookie))			&&	\
285	 (!(__sk)->sk_bound_dev_if	||				\
286	   ((__sk)->sk_bound_dev_if == (__dif))			||	\
287	   ((__sk)->sk_bound_dev_if == (__sdif)))		&&	\
288	 net_eq(sock_net(__sk), (__net)))
289#else /* 32-bit arch */
290#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
291	const int __name __deprecated __attribute__((unused))
292
293#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
294	(((__sk)->sk_portpair == (__ports))		&&		\
295	 ((__sk)->sk_daddr	== (__saddr))		&&		\
296	 ((__sk)->sk_rcv_saddr	== (__daddr))		&&		\
297	 (!(__sk)->sk_bound_dev_if	||				\
298	   ((__sk)->sk_bound_dev_if == (__dif))		||		\
299	   ((__sk)->sk_bound_dev_if == (__sdif)))	&&		\
300	 net_eq(sock_net(__sk), (__net)))
301#endif /* 64-bit arch */
302
303/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
304 * not check it for lookups anymore, thanks Alexey. -DaveM
305 */
306struct sock *__inet_lookup_established(struct net *net,
307				       struct inet_hashinfo *hashinfo,
308				       const __be32 saddr, const __be16 sport,
309				       const __be32 daddr, const u16 hnum,
310				       const int dif, const int sdif);
311
312static inline struct sock *
313	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
314				const __be32 saddr, const __be16 sport,
315				const __be32 daddr, const __be16 dport,
316				const int dif)
317{
318	return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
319					 ntohs(dport), dif, 0);
320}
321
322static inline struct sock *__inet_lookup(struct net *net,
323					 struct inet_hashinfo *hashinfo,
324					 struct sk_buff *skb, int doff,
325					 const __be32 saddr, const __be16 sport,
326					 const __be32 daddr, const __be16 dport,
327					 const int dif, const int sdif,
328					 bool *refcounted)
329{
330	u16 hnum = ntohs(dport);
331	struct sock *sk;
332
333	sk = __inet_lookup_established(net, hashinfo, saddr, sport,
334				       daddr, hnum, dif, sdif);
335	*refcounted = true;
336	if (sk)
337		return sk;
338	*refcounted = false;
339	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
340				      sport, daddr, hnum, dif, sdif);
341}
342
343static inline struct sock *inet_lookup(struct net *net,
344				       struct inet_hashinfo *hashinfo,
345				       struct sk_buff *skb, int doff,
346				       const __be32 saddr, const __be16 sport,
347				       const __be32 daddr, const __be16 dport,
348				       const int dif)
349{
350	struct sock *sk;
351	bool refcounted;
352
353	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
354			   dport, dif, 0, &refcounted);
355
356	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
357		sk = NULL;
358	return sk;
359}
360
361static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
362					     struct sk_buff *skb,
363					     int doff,
364					     const __be16 sport,
365					     const __be16 dport,
366					     const int sdif,
367					     bool *refcounted)
368{
369	struct sock *sk = skb_steal_sock(skb);
370	const struct iphdr *iph = ip_hdr(skb);
371
372	*refcounted = true;
373	if (sk)
374		return sk;
375
376	return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
377			     doff, iph->saddr, sport,
378			     iph->daddr, dport, inet_iif(skb), sdif,
379			     refcounted);
380}
381
382u32 inet6_ehashfn(const struct net *net,
383		  const struct in6_addr *laddr, const u16 lport,
384		  const struct in6_addr *faddr, const __be16 fport);
385
386static inline void sk_daddr_set(struct sock *sk, __be32 addr)
387{
388	sk->sk_daddr = addr; /* alias of inet_daddr */
389#if IS_ENABLED(CONFIG_IPV6)
390	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
391#endif
392}
393
394static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
395{
396	sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
397#if IS_ENABLED(CONFIG_IPV6)
398	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
399#endif
400}
401
402int __inet_hash_connect(struct inet_timewait_death_row *death_row,
403			struct sock *sk, u32 port_offset,
404			int (*check_established)(struct inet_timewait_death_row *,
405						 struct sock *, __u16,
406						 struct inet_timewait_sock **));
407
408int inet_hash_connect(struct inet_timewait_death_row *death_row,
409		      struct sock *sk);
410#endif /* _INET_HASHTABLES_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 * Authors:	Lotsa people, from code originally in tcp
 
 
 
 
 
  8 */
  9
 10#ifndef _INET_HASHTABLES_H
 11#define _INET_HASHTABLES_H
 12
 13
 14#include <linux/interrupt.h>
 15#include <linux/ip.h>
 16#include <linux/ipv6.h>
 17#include <linux/list.h>
 18#include <linux/slab.h>
 19#include <linux/socket.h>
 20#include <linux/spinlock.h>
 21#include <linux/types.h>
 22#include <linux/wait.h>
 23
 24#include <net/inet_connection_sock.h>
 25#include <net/inet_sock.h>
 26#include <net/sock.h>
 27#include <net/route.h>
 28#include <net/tcp_states.h>
 29#include <net/netns/hash.h>
 30
 31#include <linux/refcount.h>
 32#include <asm/byteorder.h>
 33
 34/* This is for all connections with a full identity, no wildcards.
 35 * The 'e' prefix stands for Establish, but we really put all sockets
 36 * but LISTEN ones.
 37 */
 38struct inet_ehash_bucket {
 39	struct hlist_nulls_head chain;
 40};
 41
 42/* There are a few simple rules, which allow for local port reuse by
 43 * an application.  In essence:
 44 *
 45 *	1) Sockets bound to different interfaces may share a local port.
 46 *	   Failing that, goto test 2.
 47 *	2) If all sockets have sk->sk_reuse set, and none of them are in
 48 *	   TCP_LISTEN state, the port may be shared.
 49 *	   Failing that, goto test 3.
 50 *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
 51 *	   address, and none of them are the same, the port may be
 52 *	   shared.
 53 *	   Failing this, the port cannot be shared.
 54 *
 55 * The interesting point, is test #2.  This is what an FTP server does
 56 * all day.  To optimize this case we use a specific flag bit defined
 57 * below.  As we add sockets to a bind bucket list, we perform a
 58 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
 59 * As long as all sockets added to a bind bucket pass this test,
 60 * the flag bit will be set.
 61 * The resulting situation is that tcp_v[46]_verify_bind() can just check
 62 * for this flag bit, if it is set and the socket trying to bind has
 63 * sk->sk_reuse set, we don't even have to walk the owners list at all,
 64 * we return that it is ok to bind this socket to the requested local port.
 65 *
 66 * Sounds like a lot of work, but it is worth it.  In a more naive
 67 * implementation (ie. current FreeBSD etc.) the entire list of ports
 68 * must be walked for each data port opened by an ftp server.  Needless
 69 * to say, this does not scale at all.  With a couple thousand FTP
 70 * users logged onto your box, isn't it nice to know that new data
 71 * ports are created in O(1) time?  I thought so. ;-)	-DaveM
 72 */
 73#define FASTREUSEPORT_ANY	1
 74#define FASTREUSEPORT_STRICT	2
 75
 76struct inet_bind_bucket {
 77	possible_net_t		ib_net;
 78	int			l3mdev;
 79	unsigned short		port;
 80	signed char		fastreuse;
 81	signed char		fastreuseport;
 82	kuid_t			fastuid;
 83#if IS_ENABLED(CONFIG_IPV6)
 84	struct in6_addr		fast_v6_rcv_saddr;
 85#endif
 86	__be32			fast_rcv_saddr;
 87	unsigned short		fast_sk_family;
 88	bool			fast_ipv6_only;
 89	struct hlist_node	node;
 90	struct hlist_head	owners;
 91};
 92
 93static inline struct net *ib_net(struct inet_bind_bucket *ib)
 94{
 95	return read_pnet(&ib->ib_net);
 96}
 97
 98#define inet_bind_bucket_for_each(tb, head) \
 99	hlist_for_each_entry(tb, head, node)
100
101struct inet_bind_hashbucket {
102	spinlock_t		lock;
103	struct hlist_head	chain;
104};
105
106/*
107 * Sockets can be hashed in established or listening table
108 */
109struct inet_listen_hashbucket {
110	spinlock_t		lock;
111	unsigned int		count;
112	struct hlist_head	head;
113};
114
115/* This is for listening sockets, thus all sockets which possess wildcards. */
116#define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */
117
118struct inet_hashinfo {
119	/* This is for sockets with full identity only.  Sockets here will
120	 * always be without wildcards and will have the following invariant:
121	 *
122	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
123	 *
124	 */
125	struct inet_ehash_bucket	*ehash;
126	spinlock_t			*ehash_locks;
127	unsigned int			ehash_mask;
128	unsigned int			ehash_locks_mask;
129
130	/* Ok, let's try this, I give up, we do need a local binding
131	 * TCP hash as well as the others for fast bind/connect.
132	 */
133	struct kmem_cache		*bind_bucket_cachep;
134	struct inet_bind_hashbucket	*bhash;
135	unsigned int			bhash_size;
136
137	/* The 2nd listener table hashed by local port and address */
138	unsigned int			lhash2_mask;
139	struct inet_listen_hashbucket	*lhash2;
140
141	/* All the above members are written once at bootup and
142	 * never written again _or_ are predominantly read-access.
143	 *
144	 * Now align to a new cache line as all the following members
145	 * might be often dirty.
146	 */
147	/* All sockets in TCP_LISTEN state will be in listening_hash.
148	 * This is the only table where wildcard'd TCP sockets can
149	 * exist.  listening_hash is only hashed by local port number.
150	 * If lhash2 is initialized, the same socket will also be hashed
151	 * to lhash2 by port and address.
152	 */
153	struct inet_listen_hashbucket	listening_hash[INET_LHTABLE_SIZE]
154					____cacheline_aligned_in_smp;
155};
156
157#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
158	hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
159
160static inline struct inet_listen_hashbucket *
161inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
162{
163	return &h->lhash2[hash & h->lhash2_mask];
164}
165
166static inline struct inet_ehash_bucket *inet_ehash_bucket(
167	struct inet_hashinfo *hashinfo,
168	unsigned int hash)
169{
170	return &hashinfo->ehash[hash & hashinfo->ehash_mask];
171}
172
173static inline spinlock_t *inet_ehash_lockp(
174	struct inet_hashinfo *hashinfo,
175	unsigned int hash)
176{
177	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
178}
179
180int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
181
182static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
183{
184	kvfree(hashinfo->ehash_locks);
185	hashinfo->ehash_locks = NULL;
186}
187
188static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
189					int dif, int sdif)
190{
191#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
192	return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
193				 bound_dev_if, dif, sdif);
194#else
195	return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
196#endif
197}
198
199struct inet_bind_bucket *
200inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
201			struct inet_bind_hashbucket *head,
202			const unsigned short snum, int l3mdev);
203void inet_bind_bucket_destroy(struct kmem_cache *cachep,
204			      struct inet_bind_bucket *tb);
205
206static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
207			       const u32 bhash_size)
208{
209	return (lport + net_hash_mix(net)) & (bhash_size - 1);
210}
211
212void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
213		    const unsigned short snum);
214
215/* These can have wildcards, don't try too hard. */
216static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
217{
218	return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
219}
220
221static inline int inet_sk_listen_hashfn(const struct sock *sk)
222{
223	return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
224}
225
226/* Caller must disable local BH processing. */
227int __inet_inherit_port(const struct sock *sk, struct sock *child);
228
229void inet_put_port(struct sock *sk);
230
231void inet_hashinfo_init(struct inet_hashinfo *h);
232void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
233			 unsigned long numentries, int scale,
234			 unsigned long low_limit,
235			 unsigned long high_limit);
236int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
237
238bool inet_ehash_insert(struct sock *sk, struct sock *osk);
239bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
240int __inet_hash(struct sock *sk, struct sock *osk);
241int inet_hash(struct sock *sk);
242void inet_unhash(struct sock *sk);
243
244struct sock *__inet_lookup_listener(struct net *net,
245				    struct inet_hashinfo *hashinfo,
246				    struct sk_buff *skb, int doff,
247				    const __be32 saddr, const __be16 sport,
248				    const __be32 daddr,
249				    const unsigned short hnum,
250				    const int dif, const int sdif);
251
252static inline struct sock *inet_lookup_listener(struct net *net,
253		struct inet_hashinfo *hashinfo,
254		struct sk_buff *skb, int doff,
255		__be32 saddr, __be16 sport,
256		__be32 daddr, __be16 dport, int dif, int sdif)
257{
258	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
259				      daddr, ntohs(dport), dif, sdif);
260}
261
262/* Socket demux engine toys. */
263/* What happens here is ugly; there's a pair of adjacent fields in
264   struct inet_sock; __be16 dport followed by __u16 num.  We want to
265   search by pair, so we combine the keys into a single 32bit value
266   and compare with 32bit value read from &...->dport.  Let's at least
267   make sure that it's not mixed with anything else...
268   On 64bit targets we combine comparisons with pair of adjacent __be32
269   fields in the same way.
270*/
271#ifdef __BIG_ENDIAN
272#define INET_COMBINED_PORTS(__sport, __dport) \
273	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
274#else /* __LITTLE_ENDIAN */
275#define INET_COMBINED_PORTS(__sport, __dport) \
276	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
277#endif
278
279#if (BITS_PER_LONG == 64)
280#ifdef __BIG_ENDIAN
281#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
282	const __addrpair __name = (__force __addrpair) ( \
283				   (((__force __u64)(__be32)(__saddr)) << 32) | \
284				   ((__force __u64)(__be32)(__daddr)))
285#else /* __LITTLE_ENDIAN */
286#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
287	const __addrpair __name = (__force __addrpair) ( \
288				   (((__force __u64)(__be32)(__daddr)) << 32) | \
289				   ((__force __u64)(__be32)(__saddr)))
290#endif /* __BIG_ENDIAN */
291#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
292	(((__sk)->sk_portpair == (__ports))			&&	\
293	 ((__sk)->sk_addrpair == (__cookie))			&&	\
294	 (((__sk)->sk_bound_dev_if == (__dif))			||	\
295	  ((__sk)->sk_bound_dev_if == (__sdif)))		&&	\
 
296	 net_eq(sock_net(__sk), (__net)))
297#else /* 32-bit arch */
298#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
299	const int __name __deprecated __attribute__((unused))
300
301#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
302	(((__sk)->sk_portpair == (__ports))		&&		\
303	 ((__sk)->sk_daddr	== (__saddr))		&&		\
304	 ((__sk)->sk_rcv_saddr	== (__daddr))		&&		\
305	 (((__sk)->sk_bound_dev_if == (__dif))		||		\
306	  ((__sk)->sk_bound_dev_if == (__sdif)))	&&		\
 
307	 net_eq(sock_net(__sk), (__net)))
308#endif /* 64-bit arch */
309
310/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
311 * not check it for lookups anymore, thanks Alexey. -DaveM
312 */
313struct sock *__inet_lookup_established(struct net *net,
314				       struct inet_hashinfo *hashinfo,
315				       const __be32 saddr, const __be16 sport,
316				       const __be32 daddr, const u16 hnum,
317				       const int dif, const int sdif);
318
319static inline struct sock *
320	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
321				const __be32 saddr, const __be16 sport,
322				const __be32 daddr, const __be16 dport,
323				const int dif)
324{
325	return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
326					 ntohs(dport), dif, 0);
327}
328
329static inline struct sock *__inet_lookup(struct net *net,
330					 struct inet_hashinfo *hashinfo,
331					 struct sk_buff *skb, int doff,
332					 const __be32 saddr, const __be16 sport,
333					 const __be32 daddr, const __be16 dport,
334					 const int dif, const int sdif,
335					 bool *refcounted)
336{
337	u16 hnum = ntohs(dport);
338	struct sock *sk;
339
340	sk = __inet_lookup_established(net, hashinfo, saddr, sport,
341				       daddr, hnum, dif, sdif);
342	*refcounted = true;
343	if (sk)
344		return sk;
345	*refcounted = false;
346	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
347				      sport, daddr, hnum, dif, sdif);
348}
349
350static inline struct sock *inet_lookup(struct net *net,
351				       struct inet_hashinfo *hashinfo,
352				       struct sk_buff *skb, int doff,
353				       const __be32 saddr, const __be16 sport,
354				       const __be32 daddr, const __be16 dport,
355				       const int dif)
356{
357	struct sock *sk;
358	bool refcounted;
359
360	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
361			   dport, dif, 0, &refcounted);
362
363	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
364		sk = NULL;
365	return sk;
366}
367
368static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
369					     struct sk_buff *skb,
370					     int doff,
371					     const __be16 sport,
372					     const __be16 dport,
373					     const int sdif,
374					     bool *refcounted)
375{
376	struct sock *sk = skb_steal_sock(skb);
377	const struct iphdr *iph = ip_hdr(skb);
378
379	*refcounted = true;
380	if (sk)
381		return sk;
382
383	return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
384			     doff, iph->saddr, sport,
385			     iph->daddr, dport, inet_iif(skb), sdif,
386			     refcounted);
387}
388
389u32 inet6_ehashfn(const struct net *net,
390		  const struct in6_addr *laddr, const u16 lport,
391		  const struct in6_addr *faddr, const __be16 fport);
392
393static inline void sk_daddr_set(struct sock *sk, __be32 addr)
394{
395	sk->sk_daddr = addr; /* alias of inet_daddr */
396#if IS_ENABLED(CONFIG_IPV6)
397	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
398#endif
399}
400
401static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
402{
403	sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
404#if IS_ENABLED(CONFIG_IPV6)
405	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
406#endif
407}
408
409int __inet_hash_connect(struct inet_timewait_death_row *death_row,
410			struct sock *sk, u32 port_offset,
411			int (*check_established)(struct inet_timewait_death_row *,
412						 struct sock *, __u16,
413						 struct inet_timewait_sock **));
414
415int inet_hash_connect(struct inet_timewait_death_row *death_row,
416		      struct sock *sk);
417#endif /* _INET_HASHTABLES_H */