Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * NET		Generic infrastructure for Network protocols.
  3 *
  4 *		Definitions for request_sock 
  5 *
  6 * Authors:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  7 *
  8 * 		From code originally in include/net/tcp.h
  9 *
 10 *		This program is free software; you can redistribute it and/or
 11 *		modify it under the terms of the GNU General Public License
 12 *		as published by the Free Software Foundation; either version
 13 *		2 of the License, or (at your option) any later version.
 14 */
 15#ifndef _REQUEST_SOCK_H
 16#define _REQUEST_SOCK_H
 17
 18#include <linux/slab.h>
 19#include <linux/spinlock.h>
 20#include <linux/types.h>
 21#include <linux/bug.h>
 
 22
 23#include <net/sock.h>
 24
 25struct request_sock;
 26struct sk_buff;
 27struct dst_entry;
 28struct proto;
 29
 30/* empty to "strongly type" an otherwise void parameter.
 31 */
 32struct request_values {
 33};
 34
 35struct request_sock_ops {
 36	int		family;
 37	int		obj_size;
 38	struct kmem_cache	*slab;
 39	char		*slab_name;
 40	int		(*rtx_syn_ack)(struct sock *sk,
 41				       struct request_sock *req,
 42				       struct request_values *rvp);
 43	void		(*send_ack)(struct sock *sk, struct sk_buff *skb,
 44				    struct request_sock *req);
 45	void		(*send_reset)(struct sock *sk,
 46				      struct sk_buff *skb);
 47	void		(*destructor)(struct request_sock *req);
 48	void		(*syn_ack_timeout)(struct sock *sk,
 49					   struct request_sock *req);
 
 
 
 
 
 
 
 
 50};
 51
 52/* struct request_sock - mini sock to represent a connection request
 53 */
 54struct request_sock {
 55	struct request_sock		*dl_next; /* Must be first member! */
 
 
 
 
 
 
 
 56	u16				mss;
 57	u8				retrans;
 58	u8				cookie_ts; /* syncookie: encode tcpopts in timestamp */
 59	/* The following two fields can be easily recomputed I think -AK */
 60	u32				window_clamp; /* window clamp at creation time */
 61	u32				rcv_wnd;	  /* rcv_wnd offered first time */
 62	u32				ts_recent;
 63	unsigned long			expires;
 64	const struct request_sock_ops	*rsk_ops;
 65	struct sock			*sk;
 
 66	u32				secid;
 67	u32				peer_secid;
 68};
 69
 70static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
 71{
 72	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
 
 
 
 
 
 
 73
 74	if (req != NULL)
 75		req->rsk_ops = ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	return req;
 78}
 79
 80static inline void __reqsk_free(struct request_sock *req)
 81{
 
 
 
 
 82	kmem_cache_free(req->rsk_ops->slab, req);
 83}
 84
 85static inline void reqsk_free(struct request_sock *req)
 86{
 87	req->rsk_ops->destructor(req);
 88	__reqsk_free(req);
 89}
 90
 91extern int sysctl_max_syn_backlog;
 
 
 
 
 92
 93/** struct listen_sock - listen state
 94 *
 95 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
 
 
 
 
 
 
 
 
 
 
 
 
 
 96 */
 97struct listen_sock {
 98	u8			max_qlen_log;
 99	u8			synflood_warned;
100	/* 2 bytes hole, try to use */
101	int			qlen;
102	int			qlen_young;
103	int			clock_hand;
104	u32			hash_rnd;
105	u32			nr_table_entries;
106	struct request_sock	*syn_table[0];
 
107};
108
109/** struct request_sock_queue - queue of request_socks
110 *
111 * @rskq_accept_head - FIFO head of established children
112 * @rskq_accept_tail - FIFO tail of established children
113 * @rskq_defer_accept - User waits for some data after accept()
114 * @syn_wait_lock - serializer
115 *
116 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
117 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
118 *
119 * This lock is acquired in read mode only from listening_get_next() seq_file
120 * op and it's acquired in write mode _only_ from code that is actively
121 * changing rskq_accept_head. All readers that are holding the master sock lock
122 * don't need to grab this lock in read mode too as rskq_accept_head. writes
123 * are always protected from the main sock lock.
124 */
125struct request_sock_queue {
126	struct request_sock	*rskq_accept_head;
127	struct request_sock	*rskq_accept_tail;
128	rwlock_t		syn_wait_lock;
129	u8			rskq_defer_accept;
130	/* 3 bytes hole, try to pack */
131	struct listen_sock	*listen_opt;
132};
133
134extern int reqsk_queue_alloc(struct request_sock_queue *queue,
135			     unsigned int nr_table_entries);
136
137extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
138extern void reqsk_queue_destroy(struct request_sock_queue *queue);
139
140static inline struct request_sock *
141	reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
142{
143	struct request_sock *req = queue->rskq_accept_head;
144
145	queue->rskq_accept_head = NULL;
146	return req;
147}
 
 
 
148
149static inline int reqsk_queue_empty(struct request_sock_queue *queue)
150{
151	return queue->rskq_accept_head == NULL;
152}
153
154static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
155				      struct request_sock *req,
156				      struct request_sock **prev_req)
157{
158	write_lock(&queue->syn_wait_lock);
159	*prev_req = req->dl_next;
160	write_unlock(&queue->syn_wait_lock);
161}
162
163static inline void reqsk_queue_add(struct request_sock_queue *queue,
164				   struct request_sock *req,
165				   struct sock *parent,
166				   struct sock *child)
167{
168	req->sk = child;
169	sk_acceptq_added(parent);
170
171	if (queue->rskq_accept_head == NULL)
172		queue->rskq_accept_head = req;
173	else
174		queue->rskq_accept_tail->dl_next = req;
175
176	queue->rskq_accept_tail = req;
177	req->dl_next = NULL;
178}
179
180static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
 
181{
182	struct request_sock *req = queue->rskq_accept_head;
183
184	WARN_ON(req == NULL);
185
186	queue->rskq_accept_head = req->dl_next;
187	if (queue->rskq_accept_head == NULL)
188		queue->rskq_accept_tail = NULL;
189
 
 
 
 
 
 
 
 
 
190	return req;
191}
192
193static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
194						 struct sock *parent)
195{
196	struct request_sock *req = reqsk_queue_remove(queue);
197	struct sock *child = req->sk;
198
199	WARN_ON(child == NULL);
200
201	sk_acceptq_removed(parent);
202	__reqsk_free(req);
203	return child;
204}
205
206static inline int reqsk_queue_removed(struct request_sock_queue *queue,
207				      struct request_sock *req)
208{
209	struct listen_sock *lopt = queue->listen_opt;
210
211	if (req->retrans == 0)
212		--lopt->qlen_young;
213
214	return --lopt->qlen;
215}
216
217static inline int reqsk_queue_added(struct request_sock_queue *queue)
218{
219	struct listen_sock *lopt = queue->listen_opt;
220	const int prev_qlen = lopt->qlen;
221
222	lopt->qlen_young++;
223	lopt->qlen++;
224	return prev_qlen;
225}
226
227static inline int reqsk_queue_len(const struct request_sock_queue *queue)
228{
229	return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
230}
231
232static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
233{
234	return queue->listen_opt->qlen_young;
235}
236
237static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
238{
239	return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
240}
241
242static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
243					u32 hash, struct request_sock *req,
244					unsigned long timeout)
245{
246	struct listen_sock *lopt = queue->listen_opt;
247
248	req->expires = jiffies + timeout;
249	req->retrans = 0;
250	req->sk = NULL;
251	req->dl_next = lopt->syn_table[hash];
252
253	write_lock(&queue->syn_wait_lock);
254	lopt->syn_table[hash] = req;
255	write_unlock(&queue->syn_wait_lock);
256}
257
258#endif /* _REQUEST_SOCK_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * NET		Generic infrastructure for Network protocols.
  4 *
  5 *		Definitions for request_sock
  6 *
  7 * Authors:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  8 *
  9 * 		From code originally in include/net/tcp.h
 
 
 
 
 
 10 */
 11#ifndef _REQUEST_SOCK_H
 12#define _REQUEST_SOCK_H
 13
 14#include <linux/slab.h>
 15#include <linux/spinlock.h>
 16#include <linux/types.h>
 17#include <linux/bug.h>
 18#include <linux/refcount.h>
 19
 20#include <net/sock.h>
 21
 22struct request_sock;
 23struct sk_buff;
 24struct dst_entry;
 25struct proto;
 26
 
 
 
 
 
 27struct request_sock_ops {
 28	int		family;
 29	unsigned int	obj_size;
 30	struct kmem_cache	*slab;
 31	char		*slab_name;
 32	int		(*rtx_syn_ack)(const struct sock *sk,
 33				       struct request_sock *req);
 34	void		(*send_ack)(const struct sock *sk, struct sk_buff *skb,
 
 35				    struct request_sock *req);
 36	void		(*send_reset)(const struct sock *sk,
 37				      struct sk_buff *skb);
 38	void		(*destructor)(struct request_sock *req);
 39	void		(*syn_ack_timeout)(const struct request_sock *req);
 40};
 41
 42int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
 43
 44struct saved_syn {
 45	u32 mac_hdrlen;
 46	u32 network_hdrlen;
 47	u32 tcp_hdrlen;
 48	u8 data[];
 49};
 50
 51/* struct request_sock - mini sock to represent a connection request
 52 */
 53struct request_sock {
 54	struct sock_common		__req_common;
 55#define rsk_refcnt			__req_common.skc_refcnt
 56#define rsk_hash			__req_common.skc_hash
 57#define rsk_listener			__req_common.skc_listener
 58#define rsk_window_clamp		__req_common.skc_window_clamp
 59#define rsk_rcv_wnd			__req_common.skc_rcv_wnd
 60
 61	struct request_sock		*dl_next;
 62	u16				mss;
 63	u8				num_retrans; /* number of retransmits */
 64	u8				syncookie:1; /* syncookie: encode tcpopts in timestamp */
 65	u8				num_timeout:7; /* number of timeouts */
 
 
 66	u32				ts_recent;
 67	struct timer_list		rsk_timer;
 68	const struct request_sock_ops	*rsk_ops;
 69	struct sock			*sk;
 70	struct saved_syn		*saved_syn;
 71	u32				secid;
 72	u32				peer_secid;
 73};
 74
 75static inline struct request_sock *inet_reqsk(const struct sock *sk)
 76{
 77	return (struct request_sock *)sk;
 78}
 79
 80static inline struct sock *req_to_sk(struct request_sock *req)
 81{
 82	return (struct sock *)req;
 83}
 84
 85static inline struct request_sock *
 86reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
 87	    bool attach_listener)
 88{
 89	struct request_sock *req;
 90
 91	req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
 92	if (!req)
 93		return NULL;
 94	req->rsk_listener = NULL;
 95	if (attach_listener) {
 96		if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
 97			kmem_cache_free(ops->slab, req);
 98			return NULL;
 99		}
100		req->rsk_listener = sk_listener;
101	}
102	req->rsk_ops = ops;
103	req_to_sk(req)->sk_prot = sk_listener->sk_prot;
104	sk_node_init(&req_to_sk(req)->sk_node);
105	sk_tx_queue_clear(req_to_sk(req));
106	req->saved_syn = NULL;
107	req->num_timeout = 0;
108	req->num_retrans = 0;
109	req->sk = NULL;
110	refcount_set(&req->rsk_refcnt, 0);
111
112	return req;
113}
114
115static inline void __reqsk_free(struct request_sock *req)
116{
117	req->rsk_ops->destructor(req);
118	if (req->rsk_listener)
119		sock_put(req->rsk_listener);
120	kfree(req->saved_syn);
121	kmem_cache_free(req->rsk_ops->slab, req);
122}
123
124static inline void reqsk_free(struct request_sock *req)
125{
126	WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
127	__reqsk_free(req);
128}
129
130static inline void reqsk_put(struct request_sock *req)
131{
132	if (refcount_dec_and_test(&req->rsk_refcnt))
133		reqsk_free(req);
134}
135
136/*
137 * For a TCP Fast Open listener -
138 *	lock - protects the access to all the reqsk, which is co-owned by
139 *		the listener and the child socket.
140 *	qlen - pending TFO requests (still in TCP_SYN_RECV).
141 *	max_qlen - max TFO reqs allowed before TFO is disabled.
142 *
143 *	XXX (TFO) - ideally these fields can be made as part of "listen_sock"
144 *	structure above. But there is some implementation difficulty due to
145 *	listen_sock being part of request_sock_queue hence will be freed when
146 *	a listener is stopped. But TFO related fields may continue to be
147 *	accessed even after a listener is closed, until its sk_refcnt drops
148 *	to 0 implying no more outstanding TFO reqs. One solution is to keep
149 *	listen_opt around until	sk_refcnt drops to 0. But there is some other
150 *	complexity that needs to be resolved. E.g., a listener can be disabled
151 *	temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
152 */
153struct fastopen_queue {
154	struct request_sock	*rskq_rst_head; /* Keep track of past TFO */
155	struct request_sock	*rskq_rst_tail; /* requests that caused RST.
156						 * This is part of the defense
157						 * against spoofing attack.
158						 */
159	spinlock_t	lock;
160	int		qlen;		/* # of pending (TCP_SYN_RECV) reqs */
161	int		max_qlen;	/* != 0 iff TFO is currently enabled */
162
163	struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
164};
165
166/** struct request_sock_queue - queue of request_socks
167 *
168 * @rskq_accept_head - FIFO head of established children
169 * @rskq_accept_tail - FIFO tail of established children
170 * @rskq_defer_accept - User waits for some data after accept()
 
 
 
 
171 *
 
 
 
 
 
172 */
173struct request_sock_queue {
174	spinlock_t		rskq_lock;
 
 
175	u8			rskq_defer_accept;
 
 
 
 
 
 
176
177	u32			synflood_warned;
178	atomic_t		qlen;
179	atomic_t		young;
 
 
 
 
180
181	struct request_sock	*rskq_accept_head;
182	struct request_sock	*rskq_accept_tail;
183	struct fastopen_queue	fastopenq;  /* Check max_qlen != 0 to determine
184					     * if TFO is enabled.
185					     */
186};
187
188void reqsk_queue_alloc(struct request_sock_queue *queue);
 
 
 
189
190void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
191			   bool reset);
 
 
 
 
 
 
192
193static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
 
 
 
194{
195	return READ_ONCE(queue->rskq_accept_head) == NULL;
 
 
 
 
 
 
 
 
 
196}
197
198static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
199						      struct sock *parent)
200{
201	struct request_sock *req;
 
 
 
 
 
 
202
203	spin_lock_bh(&queue->rskq_lock);
204	req = queue->rskq_accept_head;
205	if (req) {
206		sk_acceptq_removed(parent);
207		WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
208		if (queue->rskq_accept_head == NULL)
209			queue->rskq_accept_tail = NULL;
210	}
211	spin_unlock_bh(&queue->rskq_lock);
212	return req;
213}
214
215static inline void reqsk_queue_removed(struct request_sock_queue *queue,
216				       const struct request_sock *req)
217{
218	if (req->num_timeout == 0)
219		atomic_dec(&queue->young);
220	atomic_dec(&queue->qlen);
 
 
 
 
 
221}
222
223static inline void reqsk_queue_added(struct request_sock_queue *queue)
 
224{
225	atomic_inc(&queue->young);
226	atomic_inc(&queue->qlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227}
228
229static inline int reqsk_queue_len(const struct request_sock_queue *queue)
230{
231	return atomic_read(&queue->qlen);
232}
233
234static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
235{
236	return atomic_read(&queue->young);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237}
238
239#endif /* _REQUEST_SOCK_H */