Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * NET Generic infrastructure for Network protocols.
4 *
5 * Definitions for request_sock
6 *
7 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 *
9 * From code originally in include/net/tcp.h
10 */
11#ifndef _REQUEST_SOCK_H
12#define _REQUEST_SOCK_H
13
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/refcount.h>
19
20#include <net/sock.h>
21
22struct request_sock;
23struct sk_buff;
24struct dst_entry;
25struct proto;
26
27struct request_sock_ops {
28 int family;
29 unsigned int obj_size;
30 struct kmem_cache *slab;
31 char *slab_name;
32 int (*rtx_syn_ack)(const struct sock *sk,
33 struct request_sock *req);
34 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
35 struct request_sock *req);
36 void (*send_reset)(const struct sock *sk,
37 struct sk_buff *skb);
38 void (*destructor)(struct request_sock *req);
39 void (*syn_ack_timeout)(const struct request_sock *req);
40};
41
42int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
43
44struct saved_syn {
45 u32 mac_hdrlen;
46 u32 network_hdrlen;
47 u32 tcp_hdrlen;
48 u8 data[];
49};
50
51/* struct request_sock - mini sock to represent a connection request
52 */
53struct request_sock {
54 struct sock_common __req_common;
55#define rsk_refcnt __req_common.skc_refcnt
56#define rsk_hash __req_common.skc_hash
57#define rsk_listener __req_common.skc_listener
58#define rsk_window_clamp __req_common.skc_window_clamp
59#define rsk_rcv_wnd __req_common.skc_rcv_wnd
60
61 struct request_sock *dl_next;
62 u16 mss;
63 u8 num_retrans; /* number of retransmits */
64 u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
65 u8 num_timeout:7; /* number of timeouts */
66 u32 ts_recent;
67 struct timer_list rsk_timer;
68 const struct request_sock_ops *rsk_ops;
69 struct sock *sk;
70 struct saved_syn *saved_syn;
71 u32 secid;
72 u32 peer_secid;
73};
74
75static inline struct request_sock *inet_reqsk(const struct sock *sk)
76{
77 return (struct request_sock *)sk;
78}
79
80static inline struct sock *req_to_sk(struct request_sock *req)
81{
82 return (struct sock *)req;
83}
84
85static inline struct request_sock *
86reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
87 bool attach_listener)
88{
89 struct request_sock *req;
90
91 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
92 if (!req)
93 return NULL;
94 req->rsk_listener = NULL;
95 if (attach_listener) {
96 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
97 kmem_cache_free(ops->slab, req);
98 return NULL;
99 }
100 req->rsk_listener = sk_listener;
101 }
102 req->rsk_ops = ops;
103 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
104 sk_node_init(&req_to_sk(req)->sk_node);
105 sk_tx_queue_clear(req_to_sk(req));
106 req->saved_syn = NULL;
107 req->num_timeout = 0;
108 req->num_retrans = 0;
109 req->sk = NULL;
110 refcount_set(&req->rsk_refcnt, 0);
111
112 return req;
113}
114
115static inline void __reqsk_free(struct request_sock *req)
116{
117 req->rsk_ops->destructor(req);
118 if (req->rsk_listener)
119 sock_put(req->rsk_listener);
120 kfree(req->saved_syn);
121 kmem_cache_free(req->rsk_ops->slab, req);
122}
123
124static inline void reqsk_free(struct request_sock *req)
125{
126 WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
127 __reqsk_free(req);
128}
129
130static inline void reqsk_put(struct request_sock *req)
131{
132 if (refcount_dec_and_test(&req->rsk_refcnt))
133 reqsk_free(req);
134}
135
136/*
137 * For a TCP Fast Open listener -
138 * lock - protects the access to all the reqsk, which is co-owned by
139 * the listener and the child socket.
140 * qlen - pending TFO requests (still in TCP_SYN_RECV).
141 * max_qlen - max TFO reqs allowed before TFO is disabled.
142 *
143 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
144 * structure above. But there is some implementation difficulty due to
145 * listen_sock being part of request_sock_queue hence will be freed when
146 * a listener is stopped. But TFO related fields may continue to be
147 * accessed even after a listener is closed, until its sk_refcnt drops
148 * to 0 implying no more outstanding TFO reqs. One solution is to keep
149 * listen_opt around until sk_refcnt drops to 0. But there is some other
150 * complexity that needs to be resolved. E.g., a listener can be disabled
151 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
152 */
153struct fastopen_queue {
154 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
155 struct request_sock *rskq_rst_tail; /* requests that caused RST.
156 * This is part of the defense
157 * against spoofing attack.
158 */
159 spinlock_t lock;
160 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
161 int max_qlen; /* != 0 iff TFO is currently enabled */
162
163 struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
164};
165
166/** struct request_sock_queue - queue of request_socks
167 *
168 * @rskq_accept_head - FIFO head of established children
169 * @rskq_accept_tail - FIFO tail of established children
170 * @rskq_defer_accept - User waits for some data after accept()
171 *
172 */
173struct request_sock_queue {
174 spinlock_t rskq_lock;
175 u8 rskq_defer_accept;
176
177 u32 synflood_warned;
178 atomic_t qlen;
179 atomic_t young;
180
181 struct request_sock *rskq_accept_head;
182 struct request_sock *rskq_accept_tail;
183 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
184 * if TFO is enabled.
185 */
186};
187
188void reqsk_queue_alloc(struct request_sock_queue *queue);
189
190void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
191 bool reset);
192
193static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
194{
195 return READ_ONCE(queue->rskq_accept_head) == NULL;
196}
197
198static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
199 struct sock *parent)
200{
201 struct request_sock *req;
202
203 spin_lock_bh(&queue->rskq_lock);
204 req = queue->rskq_accept_head;
205 if (req) {
206 sk_acceptq_removed(parent);
207 WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
208 if (queue->rskq_accept_head == NULL)
209 queue->rskq_accept_tail = NULL;
210 }
211 spin_unlock_bh(&queue->rskq_lock);
212 return req;
213}
214
215static inline void reqsk_queue_removed(struct request_sock_queue *queue,
216 const struct request_sock *req)
217{
218 if (req->num_timeout == 0)
219 atomic_dec(&queue->young);
220 atomic_dec(&queue->qlen);
221}
222
223static inline void reqsk_queue_added(struct request_sock_queue *queue)
224{
225 atomic_inc(&queue->young);
226 atomic_inc(&queue->qlen);
227}
228
229static inline int reqsk_queue_len(const struct request_sock_queue *queue)
230{
231 return atomic_read(&queue->qlen);
232}
233
234static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
235{
236 return atomic_read(&queue->young);
237}
238
239#endif /* _REQUEST_SOCK_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * NET Generic infrastructure for Network protocols.
4 *
5 * Definitions for request_sock
6 *
7 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 *
9 * From code originally in include/net/tcp.h
10 */
11#ifndef _REQUEST_SOCK_H
12#define _REQUEST_SOCK_H
13
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/refcount.h>
19
20#include <net/sock.h>
21
22struct request_sock;
23struct sk_buff;
24struct dst_entry;
25struct proto;
26
27struct request_sock_ops {
28 int family;
29 unsigned int obj_size;
30 struct kmem_cache *slab;
31 char *slab_name;
32 int (*rtx_syn_ack)(const struct sock *sk,
33 struct request_sock *req);
34 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
35 struct request_sock *req);
36 void (*send_reset)(const struct sock *sk,
37 struct sk_buff *skb);
38 void (*destructor)(struct request_sock *req);
39 void (*syn_ack_timeout)(const struct request_sock *req);
40};
41
42int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
43
44/* struct request_sock - mini sock to represent a connection request
45 */
46struct request_sock {
47 struct sock_common __req_common;
48#define rsk_refcnt __req_common.skc_refcnt
49#define rsk_hash __req_common.skc_hash
50#define rsk_listener __req_common.skc_listener
51#define rsk_window_clamp __req_common.skc_window_clamp
52#define rsk_rcv_wnd __req_common.skc_rcv_wnd
53
54 struct request_sock *dl_next;
55 u16 mss;
56 u8 num_retrans; /* number of retransmits */
57 u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
58 u8 num_timeout:7; /* number of timeouts */
59 u32 ts_recent;
60 struct timer_list rsk_timer;
61 const struct request_sock_ops *rsk_ops;
62 struct sock *sk;
63 u32 *saved_syn;
64 u32 secid;
65 u32 peer_secid;
66};
67
68static inline struct request_sock *inet_reqsk(const struct sock *sk)
69{
70 return (struct request_sock *)sk;
71}
72
73static inline struct sock *req_to_sk(struct request_sock *req)
74{
75 return (struct sock *)req;
76}
77
78static inline struct request_sock *
79reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
80 bool attach_listener)
81{
82 struct request_sock *req;
83
84 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
85 if (!req)
86 return NULL;
87 req->rsk_listener = NULL;
88 if (attach_listener) {
89 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
90 kmem_cache_free(ops->slab, req);
91 return NULL;
92 }
93 req->rsk_listener = sk_listener;
94 }
95 req->rsk_ops = ops;
96 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
97 sk_node_init(&req_to_sk(req)->sk_node);
98 sk_tx_queue_clear(req_to_sk(req));
99 req->saved_syn = NULL;
100 req->num_timeout = 0;
101 req->num_retrans = 0;
102 req->sk = NULL;
103 refcount_set(&req->rsk_refcnt, 0);
104
105 return req;
106}
107
108static inline void __reqsk_free(struct request_sock *req)
109{
110 req->rsk_ops->destructor(req);
111 if (req->rsk_listener)
112 sock_put(req->rsk_listener);
113 kfree(req->saved_syn);
114 kmem_cache_free(req->rsk_ops->slab, req);
115}
116
117static inline void reqsk_free(struct request_sock *req)
118{
119 WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
120 __reqsk_free(req);
121}
122
123static inline void reqsk_put(struct request_sock *req)
124{
125 if (refcount_dec_and_test(&req->rsk_refcnt))
126 reqsk_free(req);
127}
128
129/*
130 * For a TCP Fast Open listener -
131 * lock - protects the access to all the reqsk, which is co-owned by
132 * the listener and the child socket.
133 * qlen - pending TFO requests (still in TCP_SYN_RECV).
134 * max_qlen - max TFO reqs allowed before TFO is disabled.
135 *
136 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
137 * structure above. But there is some implementation difficulty due to
138 * listen_sock being part of request_sock_queue hence will be freed when
139 * a listener is stopped. But TFO related fields may continue to be
140 * accessed even after a listener is closed, until its sk_refcnt drops
141 * to 0 implying no more outstanding TFO reqs. One solution is to keep
142 * listen_opt around until sk_refcnt drops to 0. But there is some other
143 * complexity that needs to be resolved. E.g., a listener can be disabled
144 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
145 */
146struct fastopen_queue {
147 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
148 struct request_sock *rskq_rst_tail; /* requests that caused RST.
149 * This is part of the defense
150 * against spoofing attack.
151 */
152 spinlock_t lock;
153 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
154 int max_qlen; /* != 0 iff TFO is currently enabled */
155
156 struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
157};
158
159/** struct request_sock_queue - queue of request_socks
160 *
161 * @rskq_accept_head - FIFO head of established children
162 * @rskq_accept_tail - FIFO tail of established children
163 * @rskq_defer_accept - User waits for some data after accept()
164 *
165 */
166struct request_sock_queue {
167 spinlock_t rskq_lock;
168 u8 rskq_defer_accept;
169
170 u32 synflood_warned;
171 atomic_t qlen;
172 atomic_t young;
173
174 struct request_sock *rskq_accept_head;
175 struct request_sock *rskq_accept_tail;
176 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
177 * if TFO is enabled.
178 */
179};
180
181void reqsk_queue_alloc(struct request_sock_queue *queue);
182
183void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
184 bool reset);
185
186static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
187{
188 return READ_ONCE(queue->rskq_accept_head) == NULL;
189}
190
191static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
192 struct sock *parent)
193{
194 struct request_sock *req;
195
196 spin_lock_bh(&queue->rskq_lock);
197 req = queue->rskq_accept_head;
198 if (req) {
199 sk_acceptq_removed(parent);
200 WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
201 if (queue->rskq_accept_head == NULL)
202 queue->rskq_accept_tail = NULL;
203 }
204 spin_unlock_bh(&queue->rskq_lock);
205 return req;
206}
207
208static inline void reqsk_queue_removed(struct request_sock_queue *queue,
209 const struct request_sock *req)
210{
211 if (req->num_timeout == 0)
212 atomic_dec(&queue->young);
213 atomic_dec(&queue->qlen);
214}
215
216static inline void reqsk_queue_added(struct request_sock_queue *queue)
217{
218 atomic_inc(&queue->young);
219 atomic_inc(&queue->qlen);
220}
221
222static inline int reqsk_queue_len(const struct request_sock_queue *queue)
223{
224 return atomic_read(&queue->qlen);
225}
226
227static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
228{
229 return atomic_read(&queue->young);
230}
231
232#endif /* _REQUEST_SOCK_H */