Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 *		Generic INET transport hashtables
  7 *
  8 * Authors:	Lotsa people, from code originally in tcp
  9 *
 10 *	This program is free software; you can redistribute it and/or
 11 *      modify it under the terms of the GNU General Public License
 12 *      as published by the Free Software Foundation; either version
 13 *      2 of the License, or (at your option) any later version.
 14 */
 15
 16#include <linux/module.h>
 17#include <linux/random.h>
 18#include <linux/sched.h>
 19#include <linux/slab.h>
 20#include <linux/wait.h>
 21#include <linux/vmalloc.h>
 
 22
 23#include <net/addrconf.h>
 24#include <net/inet_connection_sock.h>
 25#include <net/inet_hashtables.h>
 
 
 
 26#include <net/secure_seq.h>
 
 27#include <net/ip.h>
 
 28#include <net/sock_reuseport.h>
 29
 30static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
 31			const __u16 lport, const __be32 faddr,
 32			const __be16 fport)
 33{
 34	static u32 inet_ehash_secret __read_mostly;
 35
 36	net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
 37
 38	return __inet_ehashfn(laddr, lport, faddr, fport,
 39			      inet_ehash_secret + net_hash_mix(net));
 40}
 
 41
 42/* This function handles inet_sock, but also timewait and request sockets
 43 * for IPv4/IPv6.
 44 */
 45u32 sk_ehashfn(const struct sock *sk)
 46{
 47#if IS_ENABLED(CONFIG_IPV6)
 48	if (sk->sk_family == AF_INET6 &&
 49	    !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
 50		return inet6_ehashfn(sock_net(sk),
 51				     &sk->sk_v6_rcv_saddr, sk->sk_num,
 52				     &sk->sk_v6_daddr, sk->sk_dport);
 53#endif
 54	return inet_ehashfn(sock_net(sk),
 55			    sk->sk_rcv_saddr, sk->sk_num,
 56			    sk->sk_daddr, sk->sk_dport);
 57}
 58
 59/*
 60 * Allocate and initialize a new local port bind bucket.
 61 * The bindhash mutex for snum's hash chain must be held here.
 62 */
 63struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
 64						 struct net *net,
 65						 struct inet_bind_hashbucket *head,
 66						 const unsigned short snum)
 
 67{
 68	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 69
 70	if (tb) {
 71		write_pnet(&tb->ib_net, net);
 
 72		tb->port      = snum;
 73		tb->fastreuse = 0;
 74		tb->fastreuseport = 0;
 75		tb->num_owners = 0;
 76		INIT_HLIST_HEAD(&tb->owners);
 77		hlist_add_head(&tb->node, &head->chain);
 78	}
 79	return tb;
 80}
 81
 82/*
 83 * Caller must hold hashbucket lock for this tb with local BH disabled
 84 */
 85void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
 86{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87	if (hlist_empty(&tb->owners)) {
 88		__hlist_del(&tb->node);
 
 89		kmem_cache_free(cachep, tb);
 90	}
 91}
 92
 
 
 
 
 
 
 
 
 
 
 
 
 
 93void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
 94		    const unsigned short snum)
 95{
 96	inet_sk(sk)->inet_num = snum;
 97	sk_add_bind_node(sk, &tb->owners);
 98	tb->num_owners++;
 99	inet_csk(sk)->icsk_bind_hash = tb;
 
 
100}
101
102/*
103 * Get rid of any references to a local port held by the given sock.
104 */
105static void __inet_put_port(struct sock *sk)
106{
107	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
108	const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
109			hashinfo->bhash_size);
110	struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
111	struct inet_bind_bucket *tb;
 
 
 
 
 
112
113	spin_lock(&head->lock);
114	tb = inet_csk(sk)->icsk_bind_hash;
115	__sk_del_bind_node(sk);
116	tb->num_owners--;
117	inet_csk(sk)->icsk_bind_hash = NULL;
118	inet_sk(sk)->inet_num = 0;
 
 
 
 
 
 
 
 
 
 
 
119	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
120	spin_unlock(&head->lock);
121}
122
123void inet_put_port(struct sock *sk)
124{
125	local_bh_disable();
126	__inet_put_port(sk);
127	local_bh_enable();
128}
129EXPORT_SYMBOL(inet_put_port);
130
131int __inet_inherit_port(const struct sock *sk, struct sock *child)
132{
133	struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
134	unsigned short port = inet_sk(child)->inet_num;
135	const int bhash = inet_bhashfn(sock_net(sk), port,
136			table->bhash_size);
137	struct inet_bind_hashbucket *head = &table->bhash[bhash];
 
 
138	struct inet_bind_bucket *tb;
 
 
 
 
 
139
140	spin_lock(&head->lock);
 
141	tb = inet_csk(sk)->icsk_bind_hash;
142	if (unlikely(!tb)) {
 
 
143		spin_unlock(&head->lock);
144		return -ENOENT;
145	}
146	if (tb->port != port) {
 
 
147		/* NOTE: using tproxy and redirecting skbs to a proxy
148		 * on a different listener port breaks the assumption
149		 * that the listener socket's icsk_bind_hash is the same
150		 * as that of the child socket. We have to look up or
151		 * create a new bind bucket for the child here. */
152		inet_bind_bucket_for_each(tb, &head->chain) {
153			if (net_eq(ib_net(tb), sock_net(sk)) &&
154			    tb->port == port)
155				break;
156		}
157		if (!tb) {
158			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
159						     sock_net(sk), head, port);
160			if (!tb) {
 
161				spin_unlock(&head->lock);
162				return -ENOMEM;
163			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164		}
165	}
166	inet_bind_hash(child, tb, port);
 
 
 
167	spin_unlock(&head->lock);
168
169	return 0;
 
 
 
 
 
 
 
170}
171EXPORT_SYMBOL_GPL(__inet_inherit_port);
172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173static inline int compute_score(struct sock *sk, struct net *net,
174				const unsigned short hnum, const __be32 daddr,
175				const int dif)
176{
177	int score = -1;
178	struct inet_sock *inet = inet_sk(sk);
179
180	if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
181			!ipv6_only_sock(sk)) {
182		__be32 rcv_saddr = inet->inet_rcv_saddr;
183		score = sk->sk_family == PF_INET ? 2 : 1;
184		if (rcv_saddr) {
185			if (rcv_saddr != daddr)
186				return -1;
187			score += 4;
188		}
189		if (sk->sk_bound_dev_if) {
190			if (sk->sk_bound_dev_if != dif)
191				return -1;
192			score += 4;
193		}
194		if (sk->sk_incoming_cpu == raw_smp_processor_id())
195			score++;
196	}
197	return score;
198}
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200/*
201 * Don't inline this cruft. Here are some nice properties to exploit here. The
202 * BSD API does not allow a listening sock to specify the remote port nor the
203 * remote address for the connection. So always assume those are both
204 * wildcarded during the search since they can never be otherwise.
205 */
206
207
208struct sock *__inet_lookup_listener(struct net *net,
209				    struct inet_hashinfo *hashinfo,
210				    struct sk_buff *skb, int doff,
211				    const __be32 saddr, __be16 sport,
212				    const __be32 daddr, const unsigned short hnum,
213				    const int dif)
214{
215	struct sock *sk, *result;
216	struct hlist_nulls_node *node;
217	unsigned int hash = inet_lhashfn(net, hnum);
218	struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
219	int score, hiscore, matches = 0, reuseport = 0;
220	bool select_ok = true;
221	u32 phash = 0;
222
223	rcu_read_lock();
224begin:
225	result = NULL;
226	hiscore = 0;
227	sk_nulls_for_each_rcu(sk, node, &ilb->head) {
228		score = compute_score(sk, net, hnum, daddr, dif);
229		if (score > hiscore) {
 
 
 
 
 
230			result = sk;
231			hiscore = score;
232			reuseport = sk->sk_reuseport;
233			if (reuseport) {
234				phash = inet_ehashfn(net, daddr, hnum,
235						     saddr, sport);
236				if (select_ok) {
237					struct sock *sk2;
238					sk2 = reuseport_select_sock(sk, phash,
239								    skb, doff);
240					if (sk2) {
241						result = sk2;
242						goto found;
243					}
244				}
245				matches = 1;
246			}
247		} else if (score == hiscore && reuseport) {
248			matches++;
249			if (reciprocal_scale(phash, matches) == 0)
250				result = sk;
251			phash = next_pseudo_random32(phash);
252		}
253	}
254	/*
255	 * if the nulls value we got at the end of this lookup is
256	 * not the expected one, we must restart lookup.
257	 * We probably met an item that was moved to another chain.
258	 */
259	if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
260		goto begin;
261	if (result) {
262found:
263		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
264			result = NULL;
265		else if (unlikely(compute_score(result, net, hnum, daddr,
266				  dif) < hiscore)) {
267			sock_put(result);
268			select_ok = false;
269			goto begin;
270		}
271	}
272	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273	return result;
274}
275EXPORT_SYMBOL_GPL(__inet_lookup_listener);
276
277/* All sockets share common refcount, but have different destructors */
278void sock_gen_put(struct sock *sk)
279{
280	if (!atomic_dec_and_test(&sk->sk_refcnt))
281		return;
282
283	if (sk->sk_state == TCP_TIME_WAIT)
284		inet_twsk_free(inet_twsk(sk));
285	else if (sk->sk_state == TCP_NEW_SYN_RECV)
286		reqsk_free(inet_reqsk(sk));
287	else
288		sk_free(sk);
289}
290EXPORT_SYMBOL_GPL(sock_gen_put);
291
292void sock_edemux(struct sk_buff *skb)
293{
294	sock_gen_put(skb->sk);
295}
296EXPORT_SYMBOL(sock_edemux);
297
298struct sock *__inet_lookup_established(struct net *net,
299				  struct inet_hashinfo *hashinfo,
300				  const __be32 saddr, const __be16 sport,
301				  const __be32 daddr, const u16 hnum,
302				  const int dif)
303{
304	INET_ADDR_COOKIE(acookie, saddr, daddr);
305	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
306	struct sock *sk;
307	const struct hlist_nulls_node *node;
308	/* Optimize here for direct hit, only listening connections can
309	 * have wildcards anyways.
310	 */
311	unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
312	unsigned int slot = hash & hashinfo->ehash_mask;
313	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
314
315	rcu_read_lock();
316begin:
317	sk_nulls_for_each_rcu(sk, node, &head->chain) {
318		if (sk->sk_hash != hash)
319			continue;
320		if (likely(INET_MATCH(sk, net, acookie,
321				      saddr, daddr, ports, dif))) {
322			if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
323				goto out;
324			if (unlikely(!INET_MATCH(sk, net, acookie,
325						 saddr, daddr, ports, dif))) {
326				sock_gen_put(sk);
327				goto begin;
328			}
329			goto found;
330		}
331	}
332	/*
333	 * if the nulls value we got at the end of this lookup is
334	 * not the expected one, we must restart lookup.
335	 * We probably met an item that was moved to another chain.
336	 */
337	if (get_nulls_value(node) != slot)
338		goto begin;
339out:
340	sk = NULL;
341found:
342	rcu_read_unlock();
343	return sk;
344}
345EXPORT_SYMBOL_GPL(__inet_lookup_established);
346
347/* called with local bh disabled */
348static int __inet_check_established(struct inet_timewait_death_row *death_row,
349				    struct sock *sk, __u16 lport,
350				    struct inet_timewait_sock **twp)
351{
352	struct inet_hashinfo *hinfo = death_row->hashinfo;
353	struct inet_sock *inet = inet_sk(sk);
354	__be32 daddr = inet->inet_rcv_saddr;
355	__be32 saddr = inet->inet_daddr;
356	int dif = sk->sk_bound_dev_if;
 
 
357	INET_ADDR_COOKIE(acookie, saddr, daddr);
358	const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
359	struct net *net = sock_net(sk);
360	unsigned int hash = inet_ehashfn(net, daddr, lport,
361					 saddr, inet->inet_dport);
362	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
363	spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
364	struct sock *sk2;
365	const struct hlist_nulls_node *node;
366	struct inet_timewait_sock *tw = NULL;
367
368	spin_lock(lock);
369
370	sk_nulls_for_each(sk2, node, &head->chain) {
371		if (sk2->sk_hash != hash)
372			continue;
373
374		if (likely(INET_MATCH(sk2, net, acookie,
375					 saddr, daddr, ports, dif))) {
376			if (sk2->sk_state == TCP_TIME_WAIT) {
377				tw = inet_twsk(sk2);
378				if (twsk_unique(sk, sk2, twp))
379					break;
380			}
381			goto not_unique;
382		}
383	}
384
385	/* Must record num and sport now. Otherwise we will see
386	 * in hash table socket with a funny identity.
387	 */
388	inet->inet_num = lport;
389	inet->inet_sport = htons(lport);
390	sk->sk_hash = hash;
391	WARN_ON(!sk_unhashed(sk));
392	__sk_nulls_add_node_rcu(sk, &head->chain);
393	if (tw) {
394		sk_nulls_del_node_init_rcu((struct sock *)tw);
395		NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
396	}
397	spin_unlock(lock);
398	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
399
400	if (twp) {
401		*twp = tw;
402	} else if (tw) {
403		/* Silly. Should hash-dance instead... */
404		inet_twsk_deschedule_put(tw);
405	}
406	return 0;
407
408not_unique:
409	spin_unlock(lock);
410	return -EADDRNOTAVAIL;
411}
412
413static u32 inet_sk_port_offset(const struct sock *sk)
414{
415	const struct inet_sock *inet = inet_sk(sk);
416
417	return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
418					  inet->inet_daddr,
419					  inet->inet_dport);
420}
421
422/* insert a socket into ehash, and eventually remove another one
423 * (The another one can be a SYN_RECV or TIMEWAIT
424 */
425bool inet_ehash_insert(struct sock *sk, struct sock *osk)
 
426{
427	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
428	struct hlist_nulls_head *list;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429	struct inet_ehash_bucket *head;
 
430	spinlock_t *lock;
431	bool ret = true;
432
433	WARN_ON_ONCE(!sk_unhashed(sk));
434
435	sk->sk_hash = sk_ehashfn(sk);
436	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
437	list = &head->chain;
438	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
439
440	spin_lock(lock);
441	if (osk) {
442		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
443		ret = sk_nulls_del_node_init_rcu(osk);
 
 
 
 
444	}
 
445	if (ret)
446		__sk_nulls_add_node_rcu(sk, list);
 
447	spin_unlock(lock);
 
448	return ret;
449}
450
451bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
452{
453	bool ok = inet_ehash_insert(sk, osk);
454
455	if (ok) {
456		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
457	} else {
458		percpu_counter_inc(sk->sk_prot->orphan_count);
459		sk->sk_state = TCP_CLOSE;
460		sock_set_flag(sk, SOCK_DEAD);
461		inet_csk_destroy_sock(sk);
462	}
463	return ok;
464}
465EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
466
467static int inet_reuseport_add_sock(struct sock *sk,
468				   struct inet_listen_hashbucket *ilb,
469				   int (*saddr_same)(const struct sock *sk1,
470						     const struct sock *sk2,
471						     bool match_wildcard))
472{
473	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
 
474	struct sock *sk2;
475	struct hlist_nulls_node *node;
476	kuid_t uid = sock_i_uid(sk);
477
478	sk_nulls_for_each_rcu(sk2, node, &ilb->head) {
479		if (sk2 != sk &&
480		    sk2->sk_family == sk->sk_family &&
481		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
482		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
483		    inet_csk(sk2)->icsk_bind_hash == tb &&
484		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
485		    saddr_same(sk, sk2, false))
486			return reuseport_add_sock(sk, sk2);
 
487	}
488
489	/* Initial allocation may have already happened via setsockopt */
490	if (!rcu_access_pointer(sk->sk_reuseport_cb))
491		return reuseport_alloc(sk);
492	return 0;
493}
494
495int __inet_hash(struct sock *sk, struct sock *osk,
496		 int (*saddr_same)(const struct sock *sk1,
497				   const struct sock *sk2,
498				   bool match_wildcard))
499{
500	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
501	struct inet_listen_hashbucket *ilb;
502	int err = 0;
503
504	if (sk->sk_state != TCP_LISTEN) {
505		inet_ehash_nolisten(sk, osk);
 
 
506		return 0;
507	}
508	WARN_ON(!sk_unhashed(sk));
509	ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
510
511	spin_lock(&ilb->lock);
512	if (sk->sk_reuseport) {
513		err = inet_reuseport_add_sock(sk, ilb, saddr_same);
514		if (err)
515			goto unlock;
516	}
517	__sk_nulls_add_node_rcu(sk, &ilb->head);
 
 
 
 
 
518	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
519unlock:
520	spin_unlock(&ilb->lock);
521
522	return err;
523}
524EXPORT_SYMBOL(__inet_hash);
525
526int inet_hash(struct sock *sk)
527{
528	int err = 0;
529
530	if (sk->sk_state != TCP_CLOSE) {
531		local_bh_disable();
532		err = __inet_hash(sk, NULL, ipv4_rcv_saddr_equal);
533		local_bh_enable();
534	}
535
536	return err;
537}
538EXPORT_SYMBOL_GPL(inet_hash);
539
540void inet_unhash(struct sock *sk)
541{
542	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
543	spinlock_t *lock;
544	int done;
545
546	if (sk_unhashed(sk))
547		return;
548
549	if (sk->sk_state == TCP_LISTEN)
550		lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
551	else
552		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
 
 
 
 
 
 
 
 
 
 
 
553
554	spin_lock_bh(lock);
555	if (rcu_access_pointer(sk->sk_reuseport_cb))
556		reuseport_detach_sock(sk);
557	done = __sk_nulls_del_node_init_rcu(sk);
558	if (done)
559		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
560	spin_unlock_bh(lock);
 
 
 
 
 
 
 
 
 
 
 
 
561}
562EXPORT_SYMBOL_GPL(inet_unhash);
563
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564int __inet_hash_connect(struct inet_timewait_death_row *death_row,
565		struct sock *sk, u32 port_offset,
566		int (*check_established)(struct inet_timewait_death_row *,
567			struct sock *, __u16, struct inet_timewait_sock **))
568{
569	struct inet_hashinfo *hinfo = death_row->hashinfo;
 
570	struct inet_timewait_sock *tw = NULL;
571	struct inet_bind_hashbucket *head;
572	int port = inet_sk(sk)->inet_num;
573	struct net *net = sock_net(sk);
 
574	struct inet_bind_bucket *tb;
 
575	u32 remaining, offset;
576	int ret, i, low, high;
577	static u32 hint;
 
 
578
579	if (port) {
580		head = &hinfo->bhash[inet_bhashfn(net, port,
581						  hinfo->bhash_size)];
582		tb = inet_csk(sk)->icsk_bind_hash;
583		spin_lock_bh(&head->lock);
584		if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
585			inet_ehash_nolisten(sk, NULL);
586			spin_unlock_bh(&head->lock);
587			return 0;
588		}
589		spin_unlock(&head->lock);
590		/* No definite answer... Walk to established hash table */
591		ret = check_established(death_row, sk, port, NULL);
592		local_bh_enable();
593		return ret;
594	}
595
596	inet_get_local_port_range(net, &low, &high);
 
 
 
 
597	high++; /* [32768, 60999] -> [32768, 61000[ */
598	remaining = high - low;
599	if (likely(remaining > 1))
600		remaining &= ~1U;
601
602	offset = (hint + port_offset) % remaining;
 
 
 
 
 
 
603	/* In first pass we try ports of @low parity.
604	 * inet_csk_get_port() does the opposite choice.
605	 */
606	offset &= ~1U;
 
607other_parity_scan:
608	port = low + offset;
609	for (i = 0; i < remaining; i += 2, port += 2) {
610		if (unlikely(port >= high))
611			port -= remaining;
612		if (inet_is_local_reserved_port(net, port))
613			continue;
614		head = &hinfo->bhash[inet_bhashfn(net, port,
615						  hinfo->bhash_size)];
616		spin_lock_bh(&head->lock);
617
618		/* Does not bother with rcv_saddr checks, because
619		 * the established check is already unique enough.
620		 */
621		inet_bind_bucket_for_each(tb, &head->chain) {
622			if (net_eq(ib_net(tb), net) && tb->port == port) {
623				if (tb->fastreuse >= 0 ||
624				    tb->fastreuseport >= 0)
625					goto next_port;
626				WARN_ON(hlist_empty(&tb->owners));
627				if (!check_established(death_row, sk,
628						       port, &tw))
629					goto ok;
630				goto next_port;
631			}
632		}
633
634		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
635					     net, head, port);
636		if (!tb) {
637			spin_unlock_bh(&head->lock);
638			return -ENOMEM;
639		}
 
640		tb->fastreuse = -1;
641		tb->fastreuseport = -1;
642		goto ok;
643next_port:
644		spin_unlock_bh(&head->lock);
645		cond_resched();
646	}
647
648	offset++;
649	if ((offset & 1) && remaining > 1)
650		goto other_parity_scan;
651
 
652	return -EADDRNOTAVAIL;
653
654ok:
655	hint += i + 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656
657	/* Head lock still held and bh's disabled */
658	inet_bind_hash(sk, tb, port);
 
659	if (sk_unhashed(sk)) {
660		inet_sk(sk)->inet_sport = htons(port);
661		inet_ehash_nolisten(sk, (struct sock *)tw);
662	}
663	if (tw)
664		inet_twsk_bind_unhash(tw, hinfo);
 
 
665	spin_unlock(&head->lock);
 
666	if (tw)
667		inet_twsk_deschedule_put(tw);
668	local_bh_enable();
669	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
670}
671
672/*
673 * Bind a port for a connect operation and hash it.
674 */
675int inet_hash_connect(struct inet_timewait_death_row *death_row,
676		      struct sock *sk)
677{
678	u32 port_offset = 0;
679
680	if (!inet_sk(sk)->inet_num)
681		port_offset = inet_sk_port_offset(sk);
682	return __inet_hash_connect(death_row, sk, port_offset,
683				   __inet_check_established);
684}
685EXPORT_SYMBOL_GPL(inet_hash_connect);
686
687void inet_hashinfo_init(struct inet_hashinfo *h)
688{
689	int i;
690
691	for (i = 0; i < INET_LHTABLE_SIZE; i++) {
692		spin_lock_init(&h->listening_hash[i].lock);
693		INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
694				      i + LISTENING_NULLS_BASE);
695		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
696}
697EXPORT_SYMBOL_GPL(inet_hashinfo_init);
698
699int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
700{
701	unsigned int locksz = sizeof(spinlock_t);
702	unsigned int i, nblocks = 1;
703
704	if (locksz != 0) {
705		/* allocate 2 cache lines or at least one spinlock per cpu */
706		nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
707		nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
708
709		/* no more locks than number of hash buckets */
710		nblocks = min(nblocks, hashinfo->ehash_mask + 1);
711
712		hashinfo->ehash_locks =	kmalloc_array(nblocks, locksz,
713						      GFP_KERNEL | __GFP_NOWARN);
714		if (!hashinfo->ehash_locks)
715			hashinfo->ehash_locks = vmalloc(nblocks * locksz);
716
717		if (!hashinfo->ehash_locks)
718			return -ENOMEM;
719
720		for (i = 0; i < nblocks; i++)
721			spin_lock_init(&hashinfo->ehash_locks[i]);
722	}
723	hashinfo->ehash_locks_mask = nblocks - 1;
724	return 0;
725}
726EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Generic INET transport hashtables
   8 *
   9 * Authors:	Lotsa people, from code originally in tcp
 
 
 
 
 
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/random.h>
  14#include <linux/sched.h>
  15#include <linux/slab.h>
  16#include <linux/wait.h>
  17#include <linux/vmalloc.h>
  18#include <linux/memblock.h>
  19
  20#include <net/addrconf.h>
  21#include <net/inet_connection_sock.h>
  22#include <net/inet_hashtables.h>
  23#if IS_ENABLED(CONFIG_IPV6)
  24#include <net/inet6_hashtables.h>
  25#endif
  26#include <net/secure_seq.h>
  27#include <net/hotdata.h>
  28#include <net/ip.h>
  29#include <net/tcp.h>
  30#include <net/sock_reuseport.h>
  31
  32u32 inet_ehashfn(const struct net *net, const __be32 laddr,
  33		 const __u16 lport, const __be32 faddr,
  34		 const __be16 fport)
  35{
 
 
  36	net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
  37
  38	return __inet_ehashfn(laddr, lport, faddr, fport,
  39			      inet_ehash_secret + net_hash_mix(net));
  40}
  41EXPORT_SYMBOL_GPL(inet_ehashfn);
  42
  43/* This function handles inet_sock, but also timewait and request sockets
  44 * for IPv4/IPv6.
  45 */
  46static u32 sk_ehashfn(const struct sock *sk)
  47{
  48#if IS_ENABLED(CONFIG_IPV6)
  49	if (sk->sk_family == AF_INET6 &&
  50	    !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
  51		return inet6_ehashfn(sock_net(sk),
  52				     &sk->sk_v6_rcv_saddr, sk->sk_num,
  53				     &sk->sk_v6_daddr, sk->sk_dport);
  54#endif
  55	return inet_ehashfn(sock_net(sk),
  56			    sk->sk_rcv_saddr, sk->sk_num,
  57			    sk->sk_daddr, sk->sk_dport);
  58}
  59
  60/*
  61 * Allocate and initialize a new local port bind bucket.
  62 * The bindhash mutex for snum's hash chain must be held here.
  63 */
  64struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
  65						 struct net *net,
  66						 struct inet_bind_hashbucket *head,
  67						 const unsigned short snum,
  68						 int l3mdev)
  69{
  70	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
  71
  72	if (tb) {
  73		write_pnet(&tb->ib_net, net);
  74		tb->l3mdev    = l3mdev;
  75		tb->port      = snum;
  76		tb->fastreuse = 0;
  77		tb->fastreuseport = 0;
  78		INIT_HLIST_HEAD(&tb->bhash2);
 
  79		hlist_add_head(&tb->node, &head->chain);
  80	}
  81	return tb;
  82}
  83
  84/*
  85 * Caller must hold hashbucket lock for this tb with local BH disabled
  86 */
  87void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
  88{
  89	if (hlist_empty(&tb->bhash2)) {
  90		__hlist_del(&tb->node);
  91		kmem_cache_free(cachep, tb);
  92	}
  93}
  94
  95bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net,
  96			    unsigned short port, int l3mdev)
  97{
  98	return net_eq(ib_net(tb), net) && tb->port == port &&
  99		tb->l3mdev == l3mdev;
 100}
 101
 102static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb2,
 103				   struct net *net,
 104				   struct inet_bind_hashbucket *head,
 105				   struct inet_bind_bucket *tb,
 106				   const struct sock *sk)
 107{
 108	write_pnet(&tb2->ib_net, net);
 109	tb2->l3mdev = tb->l3mdev;
 110	tb2->port = tb->port;
 111#if IS_ENABLED(CONFIG_IPV6)
 112	BUILD_BUG_ON(USHRT_MAX < (IPV6_ADDR_ANY | IPV6_ADDR_MAPPED));
 113	if (sk->sk_family == AF_INET6) {
 114		tb2->addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
 115		tb2->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 116	} else {
 117		tb2->addr_type = IPV6_ADDR_MAPPED;
 118		ipv6_addr_set_v4mapped(sk->sk_rcv_saddr, &tb2->v6_rcv_saddr);
 119	}
 120#else
 121	tb2->rcv_saddr = sk->sk_rcv_saddr;
 122#endif
 123	INIT_HLIST_HEAD(&tb2->owners);
 124	hlist_add_head(&tb2->node, &head->chain);
 125	hlist_add_head(&tb2->bhash_node, &tb->bhash2);
 126}
 127
 128struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
 129						   struct net *net,
 130						   struct inet_bind_hashbucket *head,
 131						   struct inet_bind_bucket *tb,
 132						   const struct sock *sk)
 133{
 134	struct inet_bind2_bucket *tb2 = kmem_cache_alloc(cachep, GFP_ATOMIC);
 135
 136	if (tb2)
 137		inet_bind2_bucket_init(tb2, net, head, tb, sk);
 138
 139	return tb2;
 140}
 141
 142/* Caller must hold hashbucket lock for this tb with local BH disabled */
 143void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
 144{
 145	if (hlist_empty(&tb->owners)) {
 146		__hlist_del(&tb->node);
 147		__hlist_del(&tb->bhash_node);
 148		kmem_cache_free(cachep, tb);
 149	}
 150}
 151
 152static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
 153					 const struct sock *sk)
 154{
 155#if IS_ENABLED(CONFIG_IPV6)
 156	if (sk->sk_family == AF_INET6)
 157		return ipv6_addr_equal(&tb2->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
 158
 159	if (tb2->addr_type != IPV6_ADDR_MAPPED)
 160		return false;
 161#endif
 162	return tb2->rcv_saddr == sk->sk_rcv_saddr;
 163}
 164
 165void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
 166		    struct inet_bind2_bucket *tb2, unsigned short port)
 167{
 168	inet_sk(sk)->inet_num = port;
 
 
 169	inet_csk(sk)->icsk_bind_hash = tb;
 170	inet_csk(sk)->icsk_bind2_hash = tb2;
 171	sk_add_bind_node(sk, &tb2->owners);
 172}
 173
 174/*
 175 * Get rid of any references to a local port held by the given sock.
 176 */
 177static void __inet_put_port(struct sock *sk)
 178{
 179	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
 180	struct inet_bind_hashbucket *head, *head2;
 181	struct net *net = sock_net(sk);
 
 182	struct inet_bind_bucket *tb;
 183	int bhash;
 184
 185	bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size);
 186	head = &hashinfo->bhash[bhash];
 187	head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num);
 188
 189	spin_lock(&head->lock);
 190	tb = inet_csk(sk)->icsk_bind_hash;
 
 
 191	inet_csk(sk)->icsk_bind_hash = NULL;
 192	inet_sk(sk)->inet_num = 0;
 193
 194	spin_lock(&head2->lock);
 195	if (inet_csk(sk)->icsk_bind2_hash) {
 196		struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash;
 197
 198		__sk_del_bind_node(sk);
 199		inet_csk(sk)->icsk_bind2_hash = NULL;
 200		inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
 201	}
 202	spin_unlock(&head2->lock);
 203
 204	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
 205	spin_unlock(&head->lock);
 206}
 207
 208void inet_put_port(struct sock *sk)
 209{
 210	local_bh_disable();
 211	__inet_put_port(sk);
 212	local_bh_enable();
 213}
 214EXPORT_SYMBOL(inet_put_port);
 215
 216int __inet_inherit_port(const struct sock *sk, struct sock *child)
 217{
 218	struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk);
 219	unsigned short port = inet_sk(child)->inet_num;
 220	struct inet_bind_hashbucket *head, *head2;
 221	bool created_inet_bind_bucket = false;
 222	struct net *net = sock_net(sk);
 223	bool update_fastreuse = false;
 224	struct inet_bind2_bucket *tb2;
 225	struct inet_bind_bucket *tb;
 226	int bhash, l3mdev;
 227
 228	bhash = inet_bhashfn(net, port, table->bhash_size);
 229	head = &table->bhash[bhash];
 230	head2 = inet_bhashfn_portaddr(table, child, net, port);
 231
 232	spin_lock(&head->lock);
 233	spin_lock(&head2->lock);
 234	tb = inet_csk(sk)->icsk_bind_hash;
 235	tb2 = inet_csk(sk)->icsk_bind2_hash;
 236	if (unlikely(!tb || !tb2)) {
 237		spin_unlock(&head2->lock);
 238		spin_unlock(&head->lock);
 239		return -ENOENT;
 240	}
 241	if (tb->port != port) {
 242		l3mdev = inet_sk_bound_l3mdev(sk);
 243
 244		/* NOTE: using tproxy and redirecting skbs to a proxy
 245		 * on a different listener port breaks the assumption
 246		 * that the listener socket's icsk_bind_hash is the same
 247		 * as that of the child socket. We have to look up or
 248		 * create a new bind bucket for the child here. */
 249		inet_bind_bucket_for_each(tb, &head->chain) {
 250			if (inet_bind_bucket_match(tb, net, port, l3mdev))
 
 251				break;
 252		}
 253		if (!tb) {
 254			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
 255						     net, head, port, l3mdev);
 256			if (!tb) {
 257				spin_unlock(&head2->lock);
 258				spin_unlock(&head->lock);
 259				return -ENOMEM;
 260			}
 261			created_inet_bind_bucket = true;
 262		}
 263		update_fastreuse = true;
 264
 265		goto bhash2_find;
 266	} else if (!inet_bind2_bucket_addr_match(tb2, child)) {
 267		l3mdev = inet_sk_bound_l3mdev(sk);
 268
 269bhash2_find:
 270		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child);
 271		if (!tb2) {
 272			tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
 273						       net, head2, tb, child);
 274			if (!tb2)
 275				goto error;
 276		}
 277	}
 278	if (update_fastreuse)
 279		inet_csk_update_fastreuse(tb, child);
 280	inet_bind_hash(child, tb, tb2, port);
 281	spin_unlock(&head2->lock);
 282	spin_unlock(&head->lock);
 283
 284	return 0;
 285
 286error:
 287	if (created_inet_bind_bucket)
 288		inet_bind_bucket_destroy(table->bind_bucket_cachep, tb);
 289	spin_unlock(&head2->lock);
 290	spin_unlock(&head->lock);
 291	return -ENOMEM;
 292}
 293EXPORT_SYMBOL_GPL(__inet_inherit_port);
 294
 295static struct inet_listen_hashbucket *
 296inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
 297{
 298	u32 hash;
 299
 300#if IS_ENABLED(CONFIG_IPV6)
 301	if (sk->sk_family == AF_INET6)
 302		hash = ipv6_portaddr_hash(sock_net(sk),
 303					  &sk->sk_v6_rcv_saddr,
 304					  inet_sk(sk)->inet_num);
 305	else
 306#endif
 307		hash = ipv4_portaddr_hash(sock_net(sk),
 308					  inet_sk(sk)->inet_rcv_saddr,
 309					  inet_sk(sk)->inet_num);
 310	return inet_lhash2_bucket(h, hash);
 311}
 312
 313static inline int compute_score(struct sock *sk, struct net *net,
 314				const unsigned short hnum, const __be32 daddr,
 315				const int dif, const int sdif)
 316{
 317	int score = -1;
 
 318
 319	if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
 320			!ipv6_only_sock(sk)) {
 321		if (sk->sk_rcv_saddr != daddr)
 322			return -1;
 323
 324		if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
 325			return -1;
 326		score =  sk->sk_bound_dev_if ? 2 : 1;
 327
 328		if (sk->sk_family == PF_INET)
 329			score++;
 330		if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
 
 
 
 331			score++;
 332	}
 333	return score;
 334}
 335
 336/**
 337 * inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary.
 338 * @net: network namespace.
 339 * @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP.
 340 * @skb: context for a potential SK_REUSEPORT program.
 341 * @doff: header offset.
 342 * @saddr: source address.
 343 * @sport: source port.
 344 * @daddr: destination address.
 345 * @hnum: destination port in host byte order.
 346 * @ehashfn: hash function used to generate the fallback hash.
 347 *
 348 * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
 349 *         the selected sock or an error.
 350 */
 351struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
 352				   struct sk_buff *skb, int doff,
 353				   __be32 saddr, __be16 sport,
 354				   __be32 daddr, unsigned short hnum,
 355				   inet_ehashfn_t *ehashfn)
 356{
 357	struct sock *reuse_sk = NULL;
 358	u32 phash;
 359
 360	if (sk->sk_reuseport) {
 361		phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn,
 362					net, daddr, hnum, saddr, sport);
 363		reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
 364	}
 365	return reuse_sk;
 366}
 367EXPORT_SYMBOL_GPL(inet_lookup_reuseport);
 368
 369/*
 370 * Here are some nice properties to exploit here. The BSD API
 371 * does not allow a listening sock to specify the remote port nor the
 372 * remote address for the connection. So always assume those are both
 373 * wildcarded during the search since they can never be otherwise.
 374 */
 375
 376/* called with rcu_read_lock() : No refcount taken on the socket */
 377static struct sock *inet_lhash2_lookup(struct net *net,
 378				struct inet_listen_hashbucket *ilb2,
 379				struct sk_buff *skb, int doff,
 380				const __be32 saddr, __be16 sport,
 381				const __be32 daddr, const unsigned short hnum,
 382				const int dif, const int sdif)
 383{
 384	struct sock *sk, *result = NULL;
 385	struct hlist_nulls_node *node;
 386	int score, hiscore = 0;
 
 
 
 
 387
 388	sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
 389		score = compute_score(sk, net, hnum, daddr, dif, sdif);
 
 
 
 
 390		if (score > hiscore) {
 391			result = inet_lookup_reuseport(net, sk, skb, doff,
 392						       saddr, sport, daddr, hnum, inet_ehashfn);
 393			if (result)
 394				return result;
 395
 396			result = sk;
 397			hiscore = score;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 398		}
 399	}
 400
 401	return result;
 402}
 403
 404struct sock *inet_lookup_run_sk_lookup(struct net *net,
 405				       int protocol,
 406				       struct sk_buff *skb, int doff,
 407				       __be32 saddr, __be16 sport,
 408				       __be32 daddr, u16 hnum, const int dif,
 409				       inet_ehashfn_t *ehashfn)
 410{
 411	struct sock *sk, *reuse_sk;
 412	bool no_reuseport;
 413
 414	no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport,
 415					    daddr, hnum, dif, &sk);
 416	if (no_reuseport || IS_ERR_OR_NULL(sk))
 417		return sk;
 418
 419	reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum,
 420					 ehashfn);
 421	if (reuse_sk)
 422		sk = reuse_sk;
 423	return sk;
 424}
 425
 426struct sock *__inet_lookup_listener(struct net *net,
 427				    struct inet_hashinfo *hashinfo,
 428				    struct sk_buff *skb, int doff,
 429				    const __be32 saddr, __be16 sport,
 430				    const __be32 daddr, const unsigned short hnum,
 431				    const int dif, const int sdif)
 432{
 433	struct inet_listen_hashbucket *ilb2;
 434	struct sock *result = NULL;
 435	unsigned int hash2;
 436
 437	/* Lookup redirect from BPF */
 438	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
 439	    hashinfo == net->ipv4.tcp_death_row.hashinfo) {
 440		result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff,
 441						   saddr, sport, daddr, hnum, dif,
 442						   inet_ehashfn);
 443		if (result)
 444			goto done;
 445	}
 446
 447	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
 448	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
 449
 450	result = inet_lhash2_lookup(net, ilb2, skb, doff,
 451				    saddr, sport, daddr, hnum,
 452				    dif, sdif);
 453	if (result)
 454		goto done;
 455
 456	/* Lookup lhash2 with INADDR_ANY */
 457	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
 458	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
 459
 460	result = inet_lhash2_lookup(net, ilb2, skb, doff,
 461				    saddr, sport, htonl(INADDR_ANY), hnum,
 462				    dif, sdif);
 463done:
 464	if (IS_ERR(result))
 465		return NULL;
 466	return result;
 467}
 468EXPORT_SYMBOL_GPL(__inet_lookup_listener);
 469
 470/* All sockets share common refcount, but have different destructors */
 471void sock_gen_put(struct sock *sk)
 472{
 473	if (!refcount_dec_and_test(&sk->sk_refcnt))
 474		return;
 475
 476	if (sk->sk_state == TCP_TIME_WAIT)
 477		inet_twsk_free(inet_twsk(sk));
 478	else if (sk->sk_state == TCP_NEW_SYN_RECV)
 479		reqsk_free(inet_reqsk(sk));
 480	else
 481		sk_free(sk);
 482}
 483EXPORT_SYMBOL_GPL(sock_gen_put);
 484
 485void sock_edemux(struct sk_buff *skb)
 486{
 487	sock_gen_put(skb->sk);
 488}
 489EXPORT_SYMBOL(sock_edemux);
 490
 491struct sock *__inet_lookup_established(struct net *net,
 492				  struct inet_hashinfo *hashinfo,
 493				  const __be32 saddr, const __be16 sport,
 494				  const __be32 daddr, const u16 hnum,
 495				  const int dif, const int sdif)
 496{
 497	INET_ADDR_COOKIE(acookie, saddr, daddr);
 498	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
 499	struct sock *sk;
 500	const struct hlist_nulls_node *node;
 501	/* Optimize here for direct hit, only listening connections can
 502	 * have wildcards anyways.
 503	 */
 504	unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
 505	unsigned int slot = hash & hashinfo->ehash_mask;
 506	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
 507
 
 508begin:
 509	sk_nulls_for_each_rcu(sk, node, &head->chain) {
 510		if (sk->sk_hash != hash)
 511			continue;
 512		if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) {
 513			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
 
 514				goto out;
 515			if (unlikely(!inet_match(net, sk, acookie,
 516						 ports, dif, sdif))) {
 517				sock_gen_put(sk);
 518				goto begin;
 519			}
 520			goto found;
 521		}
 522	}
 523	/*
 524	 * if the nulls value we got at the end of this lookup is
 525	 * not the expected one, we must restart lookup.
 526	 * We probably met an item that was moved to another chain.
 527	 */
 528	if (get_nulls_value(node) != slot)
 529		goto begin;
 530out:
 531	sk = NULL;
 532found:
 
 533	return sk;
 534}
 535EXPORT_SYMBOL_GPL(__inet_lookup_established);
 536
 537/* called with local bh disabled */
 538static int __inet_check_established(struct inet_timewait_death_row *death_row,
 539				    struct sock *sk, __u16 lport,
 540				    struct inet_timewait_sock **twp)
 541{
 542	struct inet_hashinfo *hinfo = death_row->hashinfo;
 543	struct inet_sock *inet = inet_sk(sk);
 544	__be32 daddr = inet->inet_rcv_saddr;
 545	__be32 saddr = inet->inet_daddr;
 546	int dif = sk->sk_bound_dev_if;
 547	struct net *net = sock_net(sk);
 548	int sdif = l3mdev_master_ifindex_by_index(net, dif);
 549	INET_ADDR_COOKIE(acookie, saddr, daddr);
 550	const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
 
 551	unsigned int hash = inet_ehashfn(net, daddr, lport,
 552					 saddr, inet->inet_dport);
 553	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
 554	spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
 555	struct sock *sk2;
 556	const struct hlist_nulls_node *node;
 557	struct inet_timewait_sock *tw = NULL;
 558
 559	spin_lock(lock);
 560
 561	sk_nulls_for_each(sk2, node, &head->chain) {
 562		if (sk2->sk_hash != hash)
 563			continue;
 564
 565		if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
 
 566			if (sk2->sk_state == TCP_TIME_WAIT) {
 567				tw = inet_twsk(sk2);
 568				if (twsk_unique(sk, sk2, twp))
 569					break;
 570			}
 571			goto not_unique;
 572		}
 573	}
 574
 575	/* Must record num and sport now. Otherwise we will see
 576	 * in hash table socket with a funny identity.
 577	 */
 578	inet->inet_num = lport;
 579	inet->inet_sport = htons(lport);
 580	sk->sk_hash = hash;
 581	WARN_ON(!sk_unhashed(sk));
 582	__sk_nulls_add_node_rcu(sk, &head->chain);
 583	if (tw) {
 584		sk_nulls_del_node_init_rcu((struct sock *)tw);
 585		__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
 586	}
 587	spin_unlock(lock);
 588	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 589
 590	if (twp) {
 591		*twp = tw;
 592	} else if (tw) {
 593		/* Silly. Should hash-dance instead... */
 594		inet_twsk_deschedule_put(tw);
 595	}
 596	return 0;
 597
 598not_unique:
 599	spin_unlock(lock);
 600	return -EADDRNOTAVAIL;
 601}
 602
 603static u64 inet_sk_port_offset(const struct sock *sk)
 604{
 605	const struct inet_sock *inet = inet_sk(sk);
 606
 607	return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
 608					  inet->inet_daddr,
 609					  inet->inet_dport);
 610}
 611
 612/* Searches for an exsiting socket in the ehash bucket list.
 613 * Returns true if found, false otherwise.
 614 */
 615static bool inet_ehash_lookup_by_sk(struct sock *sk,
 616				    struct hlist_nulls_head *list)
 617{
 618	const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
 619	const int sdif = sk->sk_bound_dev_if;
 620	const int dif = sk->sk_bound_dev_if;
 621	const struct hlist_nulls_node *node;
 622	struct net *net = sock_net(sk);
 623	struct sock *esk;
 624
 625	INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
 626
 627	sk_nulls_for_each_rcu(esk, node, list) {
 628		if (esk->sk_hash != sk->sk_hash)
 629			continue;
 630		if (sk->sk_family == AF_INET) {
 631			if (unlikely(inet_match(net, esk, acookie,
 632						ports, dif, sdif))) {
 633				return true;
 634			}
 635		}
 636#if IS_ENABLED(CONFIG_IPV6)
 637		else if (sk->sk_family == AF_INET6) {
 638			if (unlikely(inet6_match(net, esk,
 639						 &sk->sk_v6_daddr,
 640						 &sk->sk_v6_rcv_saddr,
 641						 ports, dif, sdif))) {
 642				return true;
 643			}
 644		}
 645#endif
 646	}
 647	return false;
 648}
 649
 650/* Insert a socket into ehash, and eventually remove another one
 651 * (The another one can be a SYN_RECV or TIMEWAIT)
 652 * If an existing socket already exists, socket sk is not inserted,
 653 * and sets found_dup_sk parameter to true.
 654 */
 655bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
 656{
 657	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
 658	struct inet_ehash_bucket *head;
 659	struct hlist_nulls_head *list;
 660	spinlock_t *lock;
 661	bool ret = true;
 662
 663	WARN_ON_ONCE(!sk_unhashed(sk));
 664
 665	sk->sk_hash = sk_ehashfn(sk);
 666	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
 667	list = &head->chain;
 668	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
 669
 670	spin_lock(lock);
 671	if (osk) {
 672		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
 673		ret = sk_nulls_del_node_init_rcu(osk);
 674	} else if (found_dup_sk) {
 675		*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
 676		if (*found_dup_sk)
 677			ret = false;
 678	}
 679
 680	if (ret)
 681		__sk_nulls_add_node_rcu(sk, list);
 682
 683	spin_unlock(lock);
 684
 685	return ret;
 686}
 687
 688bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
 689{
 690	bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
 691
 692	if (ok) {
 693		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 694	} else {
 695		this_cpu_inc(*sk->sk_prot->orphan_count);
 696		inet_sk_set_state(sk, TCP_CLOSE);
 697		sock_set_flag(sk, SOCK_DEAD);
 698		inet_csk_destroy_sock(sk);
 699	}
 700	return ok;
 701}
 702EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
 703
 704static int inet_reuseport_add_sock(struct sock *sk,
 705				   struct inet_listen_hashbucket *ilb)
 
 
 
 706{
 707	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
 708	const struct hlist_nulls_node *node;
 709	struct sock *sk2;
 
 710	kuid_t uid = sock_i_uid(sk);
 711
 712	sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
 713		if (sk2 != sk &&
 714		    sk2->sk_family == sk->sk_family &&
 715		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
 716		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
 717		    inet_csk(sk2)->icsk_bind_hash == tb &&
 718		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
 719		    inet_rcv_saddr_equal(sk, sk2, false))
 720			return reuseport_add_sock(sk, sk2,
 721						  inet_rcv_saddr_any(sk));
 722	}
 723
 724	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
 
 
 
 725}
 726
 727int __inet_hash(struct sock *sk, struct sock *osk)
 
 
 
 728{
 729	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
 730	struct inet_listen_hashbucket *ilb2;
 731	int err = 0;
 732
 733	if (sk->sk_state != TCP_LISTEN) {
 734		local_bh_disable();
 735		inet_ehash_nolisten(sk, osk, NULL);
 736		local_bh_enable();
 737		return 0;
 738	}
 739	WARN_ON(!sk_unhashed(sk));
 740	ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
 741
 742	spin_lock(&ilb2->lock);
 743	if (sk->sk_reuseport) {
 744		err = inet_reuseport_add_sock(sk, ilb2);
 745		if (err)
 746			goto unlock;
 747	}
 748	sock_set_flag(sk, SOCK_RCU_FREE);
 749	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
 750		sk->sk_family == AF_INET6)
 751		__sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
 752	else
 753		__sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
 754	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 755unlock:
 756	spin_unlock(&ilb2->lock);
 757
 758	return err;
 759}
 760EXPORT_SYMBOL(__inet_hash);
 761
 762int inet_hash(struct sock *sk)
 763{
 764	int err = 0;
 765
 766	if (sk->sk_state != TCP_CLOSE)
 767		err = __inet_hash(sk, NULL);
 
 
 
 768
 769	return err;
 770}
 771EXPORT_SYMBOL_GPL(inet_hash);
 772
 773void inet_unhash(struct sock *sk)
 774{
 775	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
 
 
 776
 777	if (sk_unhashed(sk))
 778		return;
 779
 780	if (sk->sk_state == TCP_LISTEN) {
 781		struct inet_listen_hashbucket *ilb2;
 782
 783		ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
 784		/* Don't disable bottom halves while acquiring the lock to
 785		 * avoid circular locking dependency on PREEMPT_RT.
 786		 */
 787		spin_lock(&ilb2->lock);
 788		if (sk_unhashed(sk)) {
 789			spin_unlock(&ilb2->lock);
 790			return;
 791		}
 792
 793		if (rcu_access_pointer(sk->sk_reuseport_cb))
 794			reuseport_stop_listen_sock(sk);
 795
 796		__sk_nulls_del_node_init_rcu(sk);
 
 
 
 
 797		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 798		spin_unlock(&ilb2->lock);
 799	} else {
 800		spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
 801
 802		spin_lock_bh(lock);
 803		if (sk_unhashed(sk)) {
 804			spin_unlock_bh(lock);
 805			return;
 806		}
 807		__sk_nulls_del_node_init_rcu(sk);
 808		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 809		spin_unlock_bh(lock);
 810	}
 811}
 812EXPORT_SYMBOL_GPL(inet_unhash);
 813
 814static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
 815				    const struct net *net, unsigned short port,
 816				    int l3mdev, const struct sock *sk)
 817{
 818	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
 819	    tb->l3mdev != l3mdev)
 820		return false;
 821
 822	return inet_bind2_bucket_addr_match(tb, sk);
 823}
 824
 825bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
 826				      unsigned short port, int l3mdev, const struct sock *sk)
 827{
 828	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
 829	    tb->l3mdev != l3mdev)
 830		return false;
 831
 832#if IS_ENABLED(CONFIG_IPV6)
 833	if (tb->addr_type == IPV6_ADDR_ANY)
 834		return true;
 835
 836	if (tb->addr_type != IPV6_ADDR_MAPPED)
 837		return false;
 838
 839	if (sk->sk_family == AF_INET6 &&
 840	    !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
 841		return false;
 842#endif
 843	return tb->rcv_saddr == 0;
 844}
 845
 846/* The socket's bhash2 hashbucket spinlock must be held when this is called */
 847struct inet_bind2_bucket *
 848inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net,
 849		       unsigned short port, int l3mdev, const struct sock *sk)
 850{
 851	struct inet_bind2_bucket *bhash2 = NULL;
 852
 853	inet_bind_bucket_for_each(bhash2, &head->chain)
 854		if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
 855			break;
 856
 857	return bhash2;
 858}
 859
 860struct inet_bind_hashbucket *
 861inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port)
 862{
 863	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
 864	u32 hash;
 865
 866#if IS_ENABLED(CONFIG_IPV6)
 867	if (sk->sk_family == AF_INET6)
 868		hash = ipv6_portaddr_hash(net, &in6addr_any, port);
 869	else
 870#endif
 871		hash = ipv4_portaddr_hash(net, 0, port);
 872
 873	return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
 874}
 875
 876static void inet_update_saddr(struct sock *sk, void *saddr, int family)
 877{
 878	if (family == AF_INET) {
 879		inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
 880		sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
 881	}
 882#if IS_ENABLED(CONFIG_IPV6)
 883	else {
 884		sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
 885	}
 886#endif
 887}
 888
 889static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
 890{
 891	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
 892	struct inet_bind_hashbucket *head, *head2;
 893	struct inet_bind2_bucket *tb2, *new_tb2;
 894	int l3mdev = inet_sk_bound_l3mdev(sk);
 895	int port = inet_sk(sk)->inet_num;
 896	struct net *net = sock_net(sk);
 897	int bhash;
 898
 899	if (!inet_csk(sk)->icsk_bind2_hash) {
 900		/* Not bind()ed before. */
 901		if (reset)
 902			inet_reset_saddr(sk);
 903		else
 904			inet_update_saddr(sk, saddr, family);
 905
 906		return 0;
 907	}
 908
 909	/* Allocate a bind2 bucket ahead of time to avoid permanently putting
 910	 * the bhash2 table in an inconsistent state if a new tb2 bucket
 911	 * allocation fails.
 912	 */
 913	new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
 914	if (!new_tb2) {
 915		if (reset) {
 916			/* The (INADDR_ANY, port) bucket might have already
 917			 * been freed, then we cannot fixup icsk_bind2_hash,
 918			 * so we give up and unlink sk from bhash/bhash2 not
 919			 * to leave inconsistency in bhash2.
 920			 */
 921			inet_put_port(sk);
 922			inet_reset_saddr(sk);
 923		}
 924
 925		return -ENOMEM;
 926	}
 927
 928	bhash = inet_bhashfn(net, port, hinfo->bhash_size);
 929	head = &hinfo->bhash[bhash];
 930	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
 931
 932	/* If we change saddr locklessly, another thread
 933	 * iterating over bhash might see corrupted address.
 934	 */
 935	spin_lock_bh(&head->lock);
 936
 937	spin_lock(&head2->lock);
 938	__sk_del_bind_node(sk);
 939	inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
 940	spin_unlock(&head2->lock);
 941
 942	if (reset)
 943		inet_reset_saddr(sk);
 944	else
 945		inet_update_saddr(sk, saddr, family);
 946
 947	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
 948
 949	spin_lock(&head2->lock);
 950	tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
 951	if (!tb2) {
 952		tb2 = new_tb2;
 953		inet_bind2_bucket_init(tb2, net, head2, inet_csk(sk)->icsk_bind_hash, sk);
 954	}
 955	inet_csk(sk)->icsk_bind2_hash = tb2;
 956	sk_add_bind_node(sk, &tb2->owners);
 957	spin_unlock(&head2->lock);
 958
 959	spin_unlock_bh(&head->lock);
 960
 961	if (tb2 != new_tb2)
 962		kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
 963
 964	return 0;
 965}
 966
 967int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
 968{
 969	return __inet_bhash2_update_saddr(sk, saddr, family, false);
 970}
 971EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
 972
 973void inet_bhash2_reset_saddr(struct sock *sk)
 974{
 975	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
 976		__inet_bhash2_update_saddr(sk, NULL, 0, true);
 977}
 978EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
 979
 980/* RFC 6056 3.3.4.  Algorithm 4: Double-Hash Port Selection Algorithm
 981 * Note that we use 32bit integers (vs RFC 'short integers')
 982 * because 2^16 is not a multiple of num_ephemeral and this
 983 * property might be used by clever attacker.
 984 *
 985 * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
 986 * attacks were since demonstrated, thus we use 65536 by default instead
 987 * to really give more isolation and privacy, at the expense of 256kB
 988 * of kernel memory.
 989 */
 990#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
 991static u32 *table_perturb;
 992
 993int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 994		struct sock *sk, u64 port_offset,
 995		int (*check_established)(struct inet_timewait_death_row *,
 996			struct sock *, __u16, struct inet_timewait_sock **))
 997{
 998	struct inet_hashinfo *hinfo = death_row->hashinfo;
 999	struct inet_bind_hashbucket *head, *head2;
1000	struct inet_timewait_sock *tw = NULL;
 
1001	int port = inet_sk(sk)->inet_num;
1002	struct net *net = sock_net(sk);
1003	struct inet_bind2_bucket *tb2;
1004	struct inet_bind_bucket *tb;
1005	bool tb_created = false;
1006	u32 remaining, offset;
1007	int ret, i, low, high;
1008	bool local_ports;
1009	int step, l3mdev;
1010	u32 index;
1011
1012	if (port) {
1013		local_bh_disable();
 
 
 
 
 
 
 
 
 
 
1014		ret = check_established(death_row, sk, port, NULL);
1015		local_bh_enable();
1016		return ret;
1017	}
1018
1019	l3mdev = inet_sk_bound_l3mdev(sk);
1020
1021	local_ports = inet_sk_get_local_port_range(sk, &low, &high);
1022	step = local_ports ? 1 : 2;
1023
1024	high++; /* [32768, 60999] -> [32768, 61000[ */
1025	remaining = high - low;
1026	if (!local_ports && remaining > 1)
1027		remaining &= ~1U;
1028
1029	get_random_sleepable_once(table_perturb,
1030				  INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
1031	index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
1032
1033	offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
1034	offset %= remaining;
1035
1036	/* In first pass we try ports of @low parity.
1037	 * inet_csk_get_port() does the opposite choice.
1038	 */
1039	if (!local_ports)
1040		offset &= ~1U;
1041other_parity_scan:
1042	port = low + offset;
1043	for (i = 0; i < remaining; i += step, port += step) {
1044		if (unlikely(port >= high))
1045			port -= remaining;
1046		if (inet_is_local_reserved_port(net, port))
1047			continue;
1048		head = &hinfo->bhash[inet_bhashfn(net, port,
1049						  hinfo->bhash_size)];
1050		spin_lock_bh(&head->lock);
1051
1052		/* Does not bother with rcv_saddr checks, because
1053		 * the established check is already unique enough.
1054		 */
1055		inet_bind_bucket_for_each(tb, &head->chain) {
1056			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
1057				if (tb->fastreuse >= 0 ||
1058				    tb->fastreuseport >= 0)
1059					goto next_port;
1060				WARN_ON(hlist_empty(&tb->bhash2));
1061				if (!check_established(death_row, sk,
1062						       port, &tw))
1063					goto ok;
1064				goto next_port;
1065			}
1066		}
1067
1068		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
1069					     net, head, port, l3mdev);
1070		if (!tb) {
1071			spin_unlock_bh(&head->lock);
1072			return -ENOMEM;
1073		}
1074		tb_created = true;
1075		tb->fastreuse = -1;
1076		tb->fastreuseport = -1;
1077		goto ok;
1078next_port:
1079		spin_unlock_bh(&head->lock);
1080		cond_resched();
1081	}
1082
1083	if (!local_ports) {
1084		offset++;
1085		if ((offset & 1) && remaining > 1)
1086			goto other_parity_scan;
1087	}
1088	return -EADDRNOTAVAIL;
1089
1090ok:
1091	/* Find the corresponding tb2 bucket since we need to
1092	 * add the socket to the bhash2 table as well
1093	 */
1094	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
1095	spin_lock(&head2->lock);
1096
1097	tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
1098	if (!tb2) {
1099		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
1100					       head2, tb, sk);
1101		if (!tb2)
1102			goto error;
1103	}
1104
1105	/* Here we want to add a little bit of randomness to the next source
1106	 * port that will be chosen. We use a max() with a random here so that
1107	 * on low contention the randomness is maximal and on high contention
1108	 * it may be inexistent.
1109	 */
1110	i = max_t(int, i, get_random_u32_below(8) * step);
1111	WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + step);
1112
1113	/* Head lock still held and bh's disabled */
1114	inet_bind_hash(sk, tb, tb2, port);
1115
1116	if (sk_unhashed(sk)) {
1117		inet_sk(sk)->inet_sport = htons(port);
1118		inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
1119	}
1120	if (tw)
1121		inet_twsk_bind_unhash(tw, hinfo);
1122
1123	spin_unlock(&head2->lock);
1124	spin_unlock(&head->lock);
1125
1126	if (tw)
1127		inet_twsk_deschedule_put(tw);
1128	local_bh_enable();
1129	return 0;
1130
1131error:
1132	if (sk_hashed(sk)) {
1133		spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash);
1134
1135		sock_prot_inuse_add(net, sk->sk_prot, -1);
1136
1137		spin_lock(lock);
1138		__sk_nulls_del_node_init_rcu(sk);
1139		spin_unlock(lock);
1140
1141		sk->sk_hash = 0;
1142		inet_sk(sk)->inet_sport = 0;
1143		inet_sk(sk)->inet_num = 0;
1144
1145		if (tw)
1146			inet_twsk_bind_unhash(tw, hinfo);
1147	}
1148
1149	spin_unlock(&head2->lock);
1150	if (tb_created)
1151		inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
1152	spin_unlock(&head->lock);
1153
1154	if (tw)
1155		inet_twsk_deschedule_put(tw);
1156
1157	local_bh_enable();
1158
1159	return -ENOMEM;
1160}
1161
1162/*
1163 * Bind a port for a connect operation and hash it.
1164 */
1165int inet_hash_connect(struct inet_timewait_death_row *death_row,
1166		      struct sock *sk)
1167{
1168	u64 port_offset = 0;
1169
1170	if (!inet_sk(sk)->inet_num)
1171		port_offset = inet_sk_port_offset(sk);
1172	return __inet_hash_connect(death_row, sk, port_offset,
1173				   __inet_check_established);
1174}
1175EXPORT_SYMBOL_GPL(inet_hash_connect);
1176
1177static void init_hashinfo_lhash2(struct inet_hashinfo *h)
1178{
1179	int i;
1180
1181	for (i = 0; i <= h->lhash2_mask; i++) {
1182		spin_lock_init(&h->lhash2[i].lock);
1183		INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head,
1184				      i + LISTENING_NULLS_BASE);
1185	}
1186}
1187
1188void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
1189				unsigned long numentries, int scale,
1190				unsigned long low_limit,
1191				unsigned long high_limit)
1192{
1193	h->lhash2 = alloc_large_system_hash(name,
1194					    sizeof(*h->lhash2),
1195					    numentries,
1196					    scale,
1197					    0,
1198					    NULL,
1199					    &h->lhash2_mask,
1200					    low_limit,
1201					    high_limit);
1202	init_hashinfo_lhash2(h);
1203
1204	/* this one is used for source ports of outgoing connections */
1205	table_perturb = alloc_large_system_hash("Table-perturb",
1206						sizeof(*table_perturb),
1207						INET_TABLE_PERTURB_SIZE,
1208						0, 0, NULL, NULL,
1209						INET_TABLE_PERTURB_SIZE,
1210						INET_TABLE_PERTURB_SIZE);
1211}
1212
1213int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
1214{
1215	h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
1216	if (!h->lhash2)
1217		return -ENOMEM;
1218
1219	h->lhash2_mask = INET_LHTABLE_SIZE - 1;
1220	/* INET_LHTABLE_SIZE must be a power of 2 */
1221	BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
1222
1223	init_hashinfo_lhash2(h);
1224	return 0;
1225}
1226EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
1227
1228int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
1229{
1230	unsigned int locksz = sizeof(spinlock_t);
1231	unsigned int i, nblocks = 1;
1232
1233	if (locksz != 0) {
1234		/* allocate 2 cache lines or at least one spinlock per cpu */
1235		nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
1236		nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
1237
1238		/* no more locks than number of hash buckets */
1239		nblocks = min(nblocks, hashinfo->ehash_mask + 1);
1240
1241		hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
 
 
 
 
1242		if (!hashinfo->ehash_locks)
1243			return -ENOMEM;
1244
1245		for (i = 0; i < nblocks; i++)
1246			spin_lock_init(&hashinfo->ehash_locks[i]);
1247	}
1248	hashinfo->ehash_locks_mask = nblocks - 1;
1249	return 0;
1250}
1251EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
1252
1253struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
1254						 unsigned int ehash_entries)
1255{
1256	struct inet_hashinfo *new_hashinfo;
1257	int i;
1258
1259	new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL);
1260	if (!new_hashinfo)
1261		goto err;
1262
1263	new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket),
1264					   GFP_KERNEL_ACCOUNT);
1265	if (!new_hashinfo->ehash)
1266		goto free_hashinfo;
1267
1268	new_hashinfo->ehash_mask = ehash_entries - 1;
1269
1270	if (inet_ehash_locks_alloc(new_hashinfo))
1271		goto free_ehash;
1272
1273	for (i = 0; i < ehash_entries; i++)
1274		INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i);
1275
1276	new_hashinfo->pernet = true;
1277
1278	return new_hashinfo;
1279
1280free_ehash:
1281	vfree(new_hashinfo->ehash);
1282free_hashinfo:
1283	kfree(new_hashinfo);
1284err:
1285	return NULL;
1286}
1287EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc);
1288
1289void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo)
1290{
1291	if (!hashinfo->pernet)
1292		return;
1293
1294	inet_ehash_locks_free(hashinfo);
1295	vfree(hashinfo->ehash);
1296	kfree(hashinfo);
1297}
1298EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free);