Loading...
1/*
2 * NET Generic infrastructure for Network protocols.
3 *
4 * Definitions for request_sock
5 *
6 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 *
8 * From code originally in include/net/tcp.h
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _REQUEST_SOCK_H
16#define _REQUEST_SOCK_H
17
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/bug.h>
22#include <linux/refcount.h>
23
24#include <net/sock.h>
25
26struct request_sock;
27struct sk_buff;
28struct dst_entry;
29struct proto;
30
31struct request_sock_ops {
32 int family;
33 unsigned int obj_size;
34 struct kmem_cache *slab;
35 char *slab_name;
36 int (*rtx_syn_ack)(const struct sock *sk,
37 struct request_sock *req);
38 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
39 struct request_sock *req);
40 void (*send_reset)(const struct sock *sk,
41 struct sk_buff *skb);
42 void (*destructor)(struct request_sock *req);
43 void (*syn_ack_timeout)(const struct request_sock *req);
44};
45
46int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
47
48/* struct request_sock - mini sock to represent a connection request
49 */
50struct request_sock {
51 struct sock_common __req_common;
52#define rsk_refcnt __req_common.skc_refcnt
53#define rsk_hash __req_common.skc_hash
54#define rsk_listener __req_common.skc_listener
55#define rsk_window_clamp __req_common.skc_window_clamp
56#define rsk_rcv_wnd __req_common.skc_rcv_wnd
57
58 struct request_sock *dl_next;
59 u16 mss;
60 u8 num_retrans; /* number of retransmits */
61 u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
62 u8 num_timeout:7; /* number of timeouts */
63 u32 ts_recent;
64 struct timer_list rsk_timer;
65 const struct request_sock_ops *rsk_ops;
66 struct sock *sk;
67 u32 *saved_syn;
68 u32 secid;
69 u32 peer_secid;
70};
71
72static inline struct request_sock *inet_reqsk(const struct sock *sk)
73{
74 return (struct request_sock *)sk;
75}
76
77static inline struct sock *req_to_sk(struct request_sock *req)
78{
79 return (struct sock *)req;
80}
81
82static inline struct request_sock *
83reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
84 bool attach_listener)
85{
86 struct request_sock *req;
87
88 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
89 if (!req)
90 return NULL;
91 req->rsk_listener = NULL;
92 if (attach_listener) {
93 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
94 kmem_cache_free(ops->slab, req);
95 return NULL;
96 }
97 req->rsk_listener = sk_listener;
98 }
99 req->rsk_ops = ops;
100 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
101 sk_node_init(&req_to_sk(req)->sk_node);
102 sk_tx_queue_clear(req_to_sk(req));
103 req->saved_syn = NULL;
104 refcount_set(&req->rsk_refcnt, 0);
105
106 return req;
107}
108
109static inline void reqsk_free(struct request_sock *req)
110{
111 /* temporary debugging */
112 WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
113
114 req->rsk_ops->destructor(req);
115 if (req->rsk_listener)
116 sock_put(req->rsk_listener);
117 kfree(req->saved_syn);
118 kmem_cache_free(req->rsk_ops->slab, req);
119}
120
121static inline void reqsk_put(struct request_sock *req)
122{
123 if (refcount_dec_and_test(&req->rsk_refcnt))
124 reqsk_free(req);
125}
126
127/*
128 * For a TCP Fast Open listener -
129 * lock - protects the access to all the reqsk, which is co-owned by
130 * the listener and the child socket.
131 * qlen - pending TFO requests (still in TCP_SYN_RECV).
132 * max_qlen - max TFO reqs allowed before TFO is disabled.
133 *
134 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
135 * structure above. But there is some implementation difficulty due to
136 * listen_sock being part of request_sock_queue hence will be freed when
137 * a listener is stopped. But TFO related fields may continue to be
138 * accessed even after a listener is closed, until its sk_refcnt drops
139 * to 0 implying no more outstanding TFO reqs. One solution is to keep
140 * listen_opt around until sk_refcnt drops to 0. But there is some other
141 * complexity that needs to be resolved. E.g., a listener can be disabled
142 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
143 */
144struct fastopen_queue {
145 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
146 struct request_sock *rskq_rst_tail; /* requests that caused RST.
147 * This is part of the defense
148 * against spoofing attack.
149 */
150 spinlock_t lock;
151 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
152 int max_qlen; /* != 0 iff TFO is currently enabled */
153
154 struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
155};
156
157/** struct request_sock_queue - queue of request_socks
158 *
159 * @rskq_accept_head - FIFO head of established children
160 * @rskq_accept_tail - FIFO tail of established children
161 * @rskq_defer_accept - User waits for some data after accept()
162 *
163 */
164struct request_sock_queue {
165 spinlock_t rskq_lock;
166 u8 rskq_defer_accept;
167
168 u32 synflood_warned;
169 atomic_t qlen;
170 atomic_t young;
171
172 struct request_sock *rskq_accept_head;
173 struct request_sock *rskq_accept_tail;
174 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
175 * if TFO is enabled.
176 */
177};
178
179void reqsk_queue_alloc(struct request_sock_queue *queue);
180
181void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
182 bool reset);
183
184static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
185{
186 return queue->rskq_accept_head == NULL;
187}
188
189static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
190 struct sock *parent)
191{
192 struct request_sock *req;
193
194 spin_lock_bh(&queue->rskq_lock);
195 req = queue->rskq_accept_head;
196 if (req) {
197 sk_acceptq_removed(parent);
198 queue->rskq_accept_head = req->dl_next;
199 if (queue->rskq_accept_head == NULL)
200 queue->rskq_accept_tail = NULL;
201 }
202 spin_unlock_bh(&queue->rskq_lock);
203 return req;
204}
205
206static inline void reqsk_queue_removed(struct request_sock_queue *queue,
207 const struct request_sock *req)
208{
209 if (req->num_timeout == 0)
210 atomic_dec(&queue->young);
211 atomic_dec(&queue->qlen);
212}
213
214static inline void reqsk_queue_added(struct request_sock_queue *queue)
215{
216 atomic_inc(&queue->young);
217 atomic_inc(&queue->qlen);
218}
219
220static inline int reqsk_queue_len(const struct request_sock_queue *queue)
221{
222 return atomic_read(&queue->qlen);
223}
224
225static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
226{
227 return atomic_read(&queue->young);
228}
229
230#endif /* _REQUEST_SOCK_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * NET Generic infrastructure for Network protocols.
4 *
5 * Definitions for request_sock
6 *
7 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 *
9 * From code originally in include/net/tcp.h
10 */
11#ifndef _REQUEST_SOCK_H
12#define _REQUEST_SOCK_H
13
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/refcount.h>
19
20#include <net/sock.h>
21
22struct request_sock;
23struct sk_buff;
24struct dst_entry;
25struct proto;
26
27struct request_sock_ops {
28 int family;
29 unsigned int obj_size;
30 struct kmem_cache *slab;
31 char *slab_name;
32 int (*rtx_syn_ack)(const struct sock *sk,
33 struct request_sock *req);
34 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
35 struct request_sock *req);
36 void (*send_reset)(const struct sock *sk,
37 struct sk_buff *skb);
38 void (*destructor)(struct request_sock *req);
39 void (*syn_ack_timeout)(const struct request_sock *req);
40};
41
42int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
43
44struct saved_syn {
45 u32 mac_hdrlen;
46 u32 network_hdrlen;
47 u32 tcp_hdrlen;
48 u8 data[];
49};
50
51/* struct request_sock - mini sock to represent a connection request
52 */
53struct request_sock {
54 struct sock_common __req_common;
55#define rsk_refcnt __req_common.skc_refcnt
56#define rsk_hash __req_common.skc_hash
57#define rsk_listener __req_common.skc_listener
58#define rsk_window_clamp __req_common.skc_window_clamp
59#define rsk_rcv_wnd __req_common.skc_rcv_wnd
60
61 struct request_sock *dl_next;
62 u16 mss;
63 u8 num_retrans; /* number of retransmits */
64 u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
65 u8 num_timeout:7; /* number of timeouts */
66 u32 ts_recent;
67 struct timer_list rsk_timer;
68 const struct request_sock_ops *rsk_ops;
69 struct sock *sk;
70 struct saved_syn *saved_syn;
71 u32 secid;
72 u32 peer_secid;
73 u32 timeout;
74};
75
76static inline struct request_sock *inet_reqsk(const struct sock *sk)
77{
78 return (struct request_sock *)sk;
79}
80
81static inline struct sock *req_to_sk(struct request_sock *req)
82{
83 return (struct sock *)req;
84}
85
86static inline struct request_sock *
87reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
88 bool attach_listener)
89{
90 struct request_sock *req;
91
92 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
93 if (!req)
94 return NULL;
95 req->rsk_listener = NULL;
96 if (attach_listener) {
97 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
98 kmem_cache_free(ops->slab, req);
99 return NULL;
100 }
101 req->rsk_listener = sk_listener;
102 }
103 req->rsk_ops = ops;
104 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
105 sk_node_init(&req_to_sk(req)->sk_node);
106 sk_tx_queue_clear(req_to_sk(req));
107 req->saved_syn = NULL;
108 req->timeout = 0;
109 req->num_timeout = 0;
110 req->num_retrans = 0;
111 req->sk = NULL;
112 refcount_set(&req->rsk_refcnt, 0);
113
114 return req;
115}
116
117static inline void __reqsk_free(struct request_sock *req)
118{
119 req->rsk_ops->destructor(req);
120 if (req->rsk_listener)
121 sock_put(req->rsk_listener);
122 kfree(req->saved_syn);
123 kmem_cache_free(req->rsk_ops->slab, req);
124}
125
126static inline void reqsk_free(struct request_sock *req)
127{
128 WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
129 __reqsk_free(req);
130}
131
132static inline void reqsk_put(struct request_sock *req)
133{
134 if (refcount_dec_and_test(&req->rsk_refcnt))
135 reqsk_free(req);
136}
137
138/*
139 * For a TCP Fast Open listener -
140 * lock - protects the access to all the reqsk, which is co-owned by
141 * the listener and the child socket.
142 * qlen - pending TFO requests (still in TCP_SYN_RECV).
143 * max_qlen - max TFO reqs allowed before TFO is disabled.
144 *
145 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
146 * structure above. But there is some implementation difficulty due to
147 * listen_sock being part of request_sock_queue hence will be freed when
148 * a listener is stopped. But TFO related fields may continue to be
149 * accessed even after a listener is closed, until its sk_refcnt drops
150 * to 0 implying no more outstanding TFO reqs. One solution is to keep
151 * listen_opt around until sk_refcnt drops to 0. But there is some other
152 * complexity that needs to be resolved. E.g., a listener can be disabled
153 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
154 */
155struct fastopen_queue {
156 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
157 struct request_sock *rskq_rst_tail; /* requests that caused RST.
158 * This is part of the defense
159 * against spoofing attack.
160 */
161 spinlock_t lock;
162 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
163 int max_qlen; /* != 0 iff TFO is currently enabled */
164
165 struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
166};
167
168/** struct request_sock_queue - queue of request_socks
169 *
170 * @rskq_accept_head - FIFO head of established children
171 * @rskq_accept_tail - FIFO tail of established children
172 * @rskq_defer_accept - User waits for some data after accept()
173 *
174 */
175struct request_sock_queue {
176 spinlock_t rskq_lock;
177 u8 rskq_defer_accept;
178
179 u32 synflood_warned;
180 atomic_t qlen;
181 atomic_t young;
182
183 struct request_sock *rskq_accept_head;
184 struct request_sock *rskq_accept_tail;
185 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
186 * if TFO is enabled.
187 */
188};
189
190void reqsk_queue_alloc(struct request_sock_queue *queue);
191
192void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
193 bool reset);
194
195static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
196{
197 return READ_ONCE(queue->rskq_accept_head) == NULL;
198}
199
200static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
201 struct sock *parent)
202{
203 struct request_sock *req;
204
205 spin_lock_bh(&queue->rskq_lock);
206 req = queue->rskq_accept_head;
207 if (req) {
208 sk_acceptq_removed(parent);
209 WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
210 if (queue->rskq_accept_head == NULL)
211 queue->rskq_accept_tail = NULL;
212 }
213 spin_unlock_bh(&queue->rskq_lock);
214 return req;
215}
216
217static inline void reqsk_queue_removed(struct request_sock_queue *queue,
218 const struct request_sock *req)
219{
220 if (req->num_timeout == 0)
221 atomic_dec(&queue->young);
222 atomic_dec(&queue->qlen);
223}
224
225static inline void reqsk_queue_added(struct request_sock_queue *queue)
226{
227 atomic_inc(&queue->young);
228 atomic_inc(&queue->qlen);
229}
230
231static inline int reqsk_queue_len(const struct request_sock_queue *queue)
232{
233 return atomic_read(&queue->qlen);
234}
235
236static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
237{
238 return atomic_read(&queue->young);
239}
240
241#endif /* _REQUEST_SOCK_H */