Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * NET		Generic infrastructure for INET connection oriented protocols.
  4 *
  5 *		Definitions for inet_connection_sock 
  6 *
  7 * Authors:	Many people, see the TCP sources
  8 *
  9 * 		From code originally in TCP
 10 */
 11#ifndef _INET_CONNECTION_SOCK_H
 12#define _INET_CONNECTION_SOCK_H
 13
 14#include <linux/compiler.h>
 15#include <linux/string.h>
 16#include <linux/timer.h>
 17#include <linux/poll.h>
 18#include <linux/kernel.h>
 19#include <linux/sockptr.h>
 20
 21#include <net/inet_sock.h>
 22#include <net/request_sock.h>
 23
 24/* Cancel timers, when they are not required. */
 25#undef INET_CSK_CLEAR_TIMERS
 26
 27struct inet_bind_bucket;
 28struct inet_bind2_bucket;
 29struct tcp_congestion_ops;
 30
 31/*
 32 * Pointers to address related TCP functions
 33 * (i.e. things that depend on the address family)
 34 */
 35struct inet_connection_sock_af_ops {
 36	int	    (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
 37	void	    (*send_check)(struct sock *sk, struct sk_buff *skb);
 38	int	    (*rebuild_header)(struct sock *sk);
 39	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
 40	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
 41	struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
 42				      struct request_sock *req,
 43				      struct dst_entry *dst,
 44				      struct request_sock *req_unhash,
 45				      bool *own_req);
 46	u16	    net_header_len;
 
 47	u16	    sockaddr_len;
 48	int	    (*setsockopt)(struct sock *sk, int level, int optname,
 49				  sockptr_t optval, unsigned int optlen);
 50	int	    (*getsockopt)(struct sock *sk, int level, int optname,
 51				  char __user *optval, int __user *optlen);
 52	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
 53	void	    (*mtu_reduced)(struct sock *sk);
 54};
 55
 56/** inet_connection_sock - INET connection oriented sock
 57 *
 58 * @icsk_accept_queue:	   FIFO of established children
 59 * @icsk_bind_hash:	   Bind node
 60 * @icsk_bind2_hash:	   Bind node in the bhash2 table
 61 * @icsk_timeout:	   Timeout
 62 * @icsk_retransmit_timer: Resend (no ack)
 63 * @icsk_rto:		   Retransmit timeout
 64 * @icsk_pmtu_cookie	   Last pmtu seen by socket
 65 * @icsk_ca_ops		   Pluggable congestion control hook
 66 * @icsk_af_ops		   Operations which are AF_INET{4,6} specific
 67 * @icsk_ulp_ops	   Pluggable ULP control hook
 68 * @icsk_ulp_data	   ULP private data
 69 * @icsk_clean_acked	   Clean acked data hook
 
 70 * @icsk_ca_state:	   Congestion control state
 71 * @icsk_retransmits:	   Number of unrecovered [RTO] timeouts
 72 * @icsk_pending:	   Scheduled timer event
 73 * @icsk_backoff:	   Backoff
 74 * @icsk_syn_retries:      Number of allowed SYN (or equivalent) retries
 75 * @icsk_probes_out:	   unanswered 0 window probes
 76 * @icsk_ext_hdr_len:	   Network protocol overhead (IP/IPv6 options)
 77 * @icsk_ack:		   Delayed ACK control data
 78 * @icsk_mtup;		   MTU probing control data
 79 * @icsk_probes_tstamp:    Probe timestamp (cleared by non-zero window ack)
 80 * @icsk_user_timeout:	   TCP_USER_TIMEOUT value
 81 */
 82struct inet_connection_sock {
 83	/* inet_sock has to be the first member! */
 84	struct inet_sock	  icsk_inet;
 85	struct request_sock_queue icsk_accept_queue;
 86	struct inet_bind_bucket	  *icsk_bind_hash;
 87	struct inet_bind2_bucket  *icsk_bind2_hash;
 88	unsigned long		  icsk_timeout;
 89 	struct timer_list	  icsk_retransmit_timer;
 90 	struct timer_list	  icsk_delack_timer;
 91	__u32			  icsk_rto;
 92	__u32                     icsk_rto_min;
 93	__u32                     icsk_delack_max;
 94	__u32			  icsk_pmtu_cookie;
 95	const struct tcp_congestion_ops *icsk_ca_ops;
 96	const struct inet_connection_sock_af_ops *icsk_af_ops;
 97	const struct tcp_ulp_ops  *icsk_ulp_ops;
 98	void __rcu		  *icsk_ulp_data;
 99	void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
 
100	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
101	__u8			  icsk_ca_state:5,
102				  icsk_ca_initialized:1,
103				  icsk_ca_setsockopt:1,
104				  icsk_ca_dst_locked:1;
105	__u8			  icsk_retransmits;
106	__u8			  icsk_pending;
107	__u8			  icsk_backoff;
108	__u8			  icsk_syn_retries;
109	__u8			  icsk_probes_out;
110	__u16			  icsk_ext_hdr_len;
111	struct {
112		__u8		  pending;	 /* ACK is pending			   */
113		__u8		  quick;	 /* Scheduled number of quick acks	   */
114		__u8		  pingpong;	 /* The session is interactive		   */
115		__u8		  retry;	 /* Number of attempts			   */
116		#define ATO_BITS 8
117		__u32		  ato:ATO_BITS,	 /* Predicted tick of soft clock	   */
118				  lrcv_flowlabel:20, /* last received ipv6 flowlabel	   */
119				  unused:4;
120		unsigned long	  timeout;	 /* Currently scheduled timeout		   */
121		__u32		  lrcvtime;	 /* timestamp of last received data packet */
122		__u16		  last_seg_size; /* Size of last incoming segment	   */
123		__u16		  rcv_mss;	 /* MSS used for delayed ACK decisions	   */
124	} icsk_ack;
125	struct {
126		/* Range of MTUs to search */
127		int		  search_high;
128		int		  search_low;
129
130		/* Information on the current probe. */
131		u32		  probe_size:31,
132		/* Is the MTUP feature enabled for this connection? */
133				  enabled:1;
134
135		u32		  probe_timestamp;
136	} icsk_mtup;
137	u32			  icsk_probes_tstamp;
138	u32			  icsk_user_timeout;
139
140	u64			  icsk_ca_priv[104 / sizeof(u64)];
141#define ICSK_CA_PRIV_SIZE	  sizeof_field(struct inet_connection_sock, icsk_ca_priv)
142};
143
144#define ICSK_TIME_RETRANS	1	/* Retransmit timer */
145#define ICSK_TIME_DACK		2	/* Delayed ack timer */
146#define ICSK_TIME_PROBE0	3	/* Zero window probe timer */
147#define ICSK_TIME_LOSS_PROBE	5	/* Tail loss probe timer */
148#define ICSK_TIME_REO_TIMEOUT	6	/* Reordering timer */
149
150#define inet_csk(ptr) container_of_const(ptr, struct inet_connection_sock, icsk_inet.sk)
 
 
 
151
152static inline void *inet_csk_ca(const struct sock *sk)
153{
154	return (void *)inet_csk(sk)->icsk_ca_priv;
155}
156
157struct sock *inet_csk_clone_lock(const struct sock *sk,
158				 const struct request_sock *req,
159				 const gfp_t priority);
160
161enum inet_csk_ack_state_t {
162	ICSK_ACK_SCHED	= 1,
163	ICSK_ACK_TIMER  = 2,
164	ICSK_ACK_PUSHED = 4,
165	ICSK_ACK_PUSHED2 = 8,
166	ICSK_ACK_NOW = 16,	/* Send the next ACK immediately (once) */
167	ICSK_ACK_NOMEM = 32,
168};
169
170void inet_csk_init_xmit_timers(struct sock *sk,
171			       void (*retransmit_handler)(struct timer_list *),
172			       void (*delack_handler)(struct timer_list *),
173			       void (*keepalive_handler)(struct timer_list *));
174void inet_csk_clear_xmit_timers(struct sock *sk);
175void inet_csk_clear_xmit_timers_sync(struct sock *sk);
176
177static inline void inet_csk_schedule_ack(struct sock *sk)
178{
179	inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
180}
181
182static inline int inet_csk_ack_scheduled(const struct sock *sk)
183{
184	return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
185}
186
187static inline void inet_csk_delack_init(struct sock *sk)
188{
189	memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
190}
191
192void inet_csk_delete_keepalive_timer(struct sock *sk);
193void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
194
195static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
196{
197	struct inet_connection_sock *icsk = inet_csk(sk);
198
199	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
200		smp_store_release(&icsk->icsk_pending, 0);
201#ifdef INET_CSK_CLEAR_TIMERS
202		sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
203#endif
204	} else if (what == ICSK_TIME_DACK) {
205		smp_store_release(&icsk->icsk_ack.pending, 0);
206		icsk->icsk_ack.retry = 0;
207#ifdef INET_CSK_CLEAR_TIMERS
208		sk_stop_timer(sk, &icsk->icsk_delack_timer);
209#endif
210	} else {
211		pr_debug("inet_csk BUG: unknown timer value\n");
212	}
213}
214
215/*
216 *	Reset the retransmission timer
217 */
218static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
219					     unsigned long when,
220					     const unsigned long max_when)
221{
222	struct inet_connection_sock *icsk = inet_csk(sk);
223
224	if (when > max_when) {
225		pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
226			 sk, what, when, (void *)_THIS_IP_);
227		when = max_when;
228	}
229
230	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
231	    what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
232		smp_store_release(&icsk->icsk_pending, what);
233		icsk->icsk_timeout = jiffies + when;
234		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
235	} else if (what == ICSK_TIME_DACK) {
236		smp_store_release(&icsk->icsk_ack.pending,
237				  icsk->icsk_ack.pending | ICSK_ACK_TIMER);
238		icsk->icsk_ack.timeout = jiffies + when;
239		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
240	} else {
241		pr_debug("inet_csk BUG: unknown timer value\n");
242	}
243}
244
245static inline unsigned long
246inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
247		     unsigned long max_when)
248{
249        u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
250
251        return (unsigned long)min_t(u64, when, max_when);
252}
253
254struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg);
255
256int inet_csk_get_port(struct sock *sk, unsigned short snum);
257
258struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
259				     const struct request_sock *req);
260struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
261					    struct sock *newsk,
262					    const struct request_sock *req);
263
264struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
265				      struct request_sock *req,
266				      struct sock *child);
267bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
268				   unsigned long timeout);
269struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
270					 struct request_sock *req,
271					 bool own_req);
272
273static inline void inet_csk_reqsk_queue_added(struct sock *sk)
274{
275	reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
276}
277
278static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
279{
280	return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
281}
282
283static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
284{
285	return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog);
286}
287
288bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
289void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
290
291static inline unsigned long
292reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
293{
294	u64 timeout = (u64)req->timeout << req->num_timeout;
295
296	return (unsigned long)min_t(u64, timeout, max_timeout);
297}
298
299static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
300{
301	/* The below has to be done to allow calling inet_csk_destroy_sock */
302	sock_set_flag(sk, SOCK_DEAD);
303	this_cpu_inc(*sk->sk_prot->orphan_count);
304}
305
306void inet_csk_destroy_sock(struct sock *sk);
307void inet_csk_prepare_forced_close(struct sock *sk);
308
309/*
310 * LISTEN is a special case for poll..
311 */
312static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
313{
314	return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
315			(EPOLLIN | EPOLLRDNORM) : 0;
316}
317
318int inet_csk_listen_start(struct sock *sk);
319void inet_csk_listen_stop(struct sock *sk);
320
321void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
322
323/* update the fast reuse flag when adding a socket */
324void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
325			       struct sock *sk);
326
327struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
328
 
 
329static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
330{
331	inet_csk(sk)->icsk_ack.pingpong =
332		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh);
333}
334
335static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
336{
337	inet_csk(sk)->icsk_ack.pingpong = 0;
338}
339
340static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
341{
342	return inet_csk(sk)->icsk_ack.pingpong >=
343	       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh);
344}
345
346static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
347{
348	struct inet_connection_sock *icsk = inet_csk(sk);
349
350	if (icsk->icsk_ack.pingpong < U8_MAX)
351		icsk->icsk_ack.pingpong++;
352}
353
354static inline bool inet_csk_has_ulp(const struct sock *sk)
355{
356	return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
357}
358
359static inline void inet_init_csk_locks(struct sock *sk)
360{
361	struct inet_connection_sock *icsk = inet_csk(sk);
362
363	spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
364	spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
365}
366
367#endif /* _INET_CONNECTION_SOCK_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * NET		Generic infrastructure for INET connection oriented protocols.
  4 *
  5 *		Definitions for inet_connection_sock 
  6 *
  7 * Authors:	Many people, see the TCP sources
  8 *
  9 * 		From code originally in TCP
 10 */
 11#ifndef _INET_CONNECTION_SOCK_H
 12#define _INET_CONNECTION_SOCK_H
 13
 14#include <linux/compiler.h>
 15#include <linux/string.h>
 16#include <linux/timer.h>
 17#include <linux/poll.h>
 18#include <linux/kernel.h>
 19#include <linux/sockptr.h>
 20
 21#include <net/inet_sock.h>
 22#include <net/request_sock.h>
 23
 24/* Cancel timers, when they are not required. */
 25#undef INET_CSK_CLEAR_TIMERS
 26
 27struct inet_bind_bucket;
 
 28struct tcp_congestion_ops;
 29
 30/*
 31 * Pointers to address related TCP functions
 32 * (i.e. things that depend on the address family)
 33 */
 34struct inet_connection_sock_af_ops {
 35	int	    (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
 36	void	    (*send_check)(struct sock *sk, struct sk_buff *skb);
 37	int	    (*rebuild_header)(struct sock *sk);
 38	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
 39	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
 40	struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
 41				      struct request_sock *req,
 42				      struct dst_entry *dst,
 43				      struct request_sock *req_unhash,
 44				      bool *own_req);
 45	u16	    net_header_len;
 46	u16	    net_frag_header_len;
 47	u16	    sockaddr_len;
 48	int	    (*setsockopt)(struct sock *sk, int level, int optname,
 49				  sockptr_t optval, unsigned int optlen);
 50	int	    (*getsockopt)(struct sock *sk, int level, int optname,
 51				  char __user *optval, int __user *optlen);
 52	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
 53	void	    (*mtu_reduced)(struct sock *sk);
 54};
 55
 56/** inet_connection_sock - INET connection oriented sock
 57 *
 58 * @icsk_accept_queue:	   FIFO of established children
 59 * @icsk_bind_hash:	   Bind node
 
 60 * @icsk_timeout:	   Timeout
 61 * @icsk_retransmit_timer: Resend (no ack)
 62 * @icsk_rto:		   Retransmit timeout
 63 * @icsk_pmtu_cookie	   Last pmtu seen by socket
 64 * @icsk_ca_ops		   Pluggable congestion control hook
 65 * @icsk_af_ops		   Operations which are AF_INET{4,6} specific
 66 * @icsk_ulp_ops	   Pluggable ULP control hook
 67 * @icsk_ulp_data	   ULP private data
 68 * @icsk_clean_acked	   Clean acked data hook
 69 * @icsk_listen_portaddr_node	hash to the portaddr listener hashtable
 70 * @icsk_ca_state:	   Congestion control state
 71 * @icsk_retransmits:	   Number of unrecovered [RTO] timeouts
 72 * @icsk_pending:	   Scheduled timer event
 73 * @icsk_backoff:	   Backoff
 74 * @icsk_syn_retries:      Number of allowed SYN (or equivalent) retries
 75 * @icsk_probes_out:	   unanswered 0 window probes
 76 * @icsk_ext_hdr_len:	   Network protocol overhead (IP/IPv6 options)
 77 * @icsk_ack:		   Delayed ACK control data
 78 * @icsk_mtup;		   MTU probing control data
 79 * @icsk_probes_tstamp:    Probe timestamp (cleared by non-zero window ack)
 80 * @icsk_user_timeout:	   TCP_USER_TIMEOUT value
 81 */
 82struct inet_connection_sock {
 83	/* inet_sock has to be the first member! */
 84	struct inet_sock	  icsk_inet;
 85	struct request_sock_queue icsk_accept_queue;
 86	struct inet_bind_bucket	  *icsk_bind_hash;
 
 87	unsigned long		  icsk_timeout;
 88 	struct timer_list	  icsk_retransmit_timer;
 89 	struct timer_list	  icsk_delack_timer;
 90	__u32			  icsk_rto;
 91	__u32                     icsk_rto_min;
 92	__u32                     icsk_delack_max;
 93	__u32			  icsk_pmtu_cookie;
 94	const struct tcp_congestion_ops *icsk_ca_ops;
 95	const struct inet_connection_sock_af_ops *icsk_af_ops;
 96	const struct tcp_ulp_ops  *icsk_ulp_ops;
 97	void __rcu		  *icsk_ulp_data;
 98	void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
 99	struct hlist_node         icsk_listen_portaddr_node;
100	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
101	__u8			  icsk_ca_state:5,
102				  icsk_ca_initialized:1,
103				  icsk_ca_setsockopt:1,
104				  icsk_ca_dst_locked:1;
105	__u8			  icsk_retransmits;
106	__u8			  icsk_pending;
107	__u8			  icsk_backoff;
108	__u8			  icsk_syn_retries;
109	__u8			  icsk_probes_out;
110	__u16			  icsk_ext_hdr_len;
111	struct {
112		__u8		  pending;	 /* ACK is pending			   */
113		__u8		  quick;	 /* Scheduled number of quick acks	   */
114		__u8		  pingpong;	 /* The session is interactive		   */
115		__u8		  retry;	 /* Number of attempts			   */
116		__u32		  ato;		 /* Predicted tick of soft clock	   */
 
 
 
117		unsigned long	  timeout;	 /* Currently scheduled timeout		   */
118		__u32		  lrcvtime;	 /* timestamp of last received data packet */
119		__u16		  last_seg_size; /* Size of last incoming segment	   */
120		__u16		  rcv_mss;	 /* MSS used for delayed ACK decisions	   */
121	} icsk_ack;
122	struct {
123		/* Range of MTUs to search */
124		int		  search_high;
125		int		  search_low;
126
127		/* Information on the current probe. */
128		u32		  probe_size:31,
129		/* Is the MTUP feature enabled for this connection? */
130				  enabled:1;
131
132		u32		  probe_timestamp;
133	} icsk_mtup;
134	u32			  icsk_probes_tstamp;
135	u32			  icsk_user_timeout;
136
137	u64			  icsk_ca_priv[104 / sizeof(u64)];
138#define ICSK_CA_PRIV_SIZE	  sizeof_field(struct inet_connection_sock, icsk_ca_priv)
139};
140
141#define ICSK_TIME_RETRANS	1	/* Retransmit timer */
142#define ICSK_TIME_DACK		2	/* Delayed ack timer */
143#define ICSK_TIME_PROBE0	3	/* Zero window probe timer */
144#define ICSK_TIME_LOSS_PROBE	5	/* Tail loss probe timer */
145#define ICSK_TIME_REO_TIMEOUT	6	/* Reordering timer */
146
147static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
148{
149	return (struct inet_connection_sock *)sk;
150}
151
152static inline void *inet_csk_ca(const struct sock *sk)
153{
154	return (void *)inet_csk(sk)->icsk_ca_priv;
155}
156
157struct sock *inet_csk_clone_lock(const struct sock *sk,
158				 const struct request_sock *req,
159				 const gfp_t priority);
160
161enum inet_csk_ack_state_t {
162	ICSK_ACK_SCHED	= 1,
163	ICSK_ACK_TIMER  = 2,
164	ICSK_ACK_PUSHED = 4,
165	ICSK_ACK_PUSHED2 = 8,
166	ICSK_ACK_NOW = 16	/* Send the next ACK immediately (once) */
 
167};
168
169void inet_csk_init_xmit_timers(struct sock *sk,
170			       void (*retransmit_handler)(struct timer_list *),
171			       void (*delack_handler)(struct timer_list *),
172			       void (*keepalive_handler)(struct timer_list *));
173void inet_csk_clear_xmit_timers(struct sock *sk);
 
174
175static inline void inet_csk_schedule_ack(struct sock *sk)
176{
177	inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
178}
179
180static inline int inet_csk_ack_scheduled(const struct sock *sk)
181{
182	return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
183}
184
185static inline void inet_csk_delack_init(struct sock *sk)
186{
187	memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
188}
189
190void inet_csk_delete_keepalive_timer(struct sock *sk);
191void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
192
193static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
194{
195	struct inet_connection_sock *icsk = inet_csk(sk);
196
197	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
198		icsk->icsk_pending = 0;
199#ifdef INET_CSK_CLEAR_TIMERS
200		sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
201#endif
202	} else if (what == ICSK_TIME_DACK) {
203		icsk->icsk_ack.pending = 0;
204		icsk->icsk_ack.retry = 0;
205#ifdef INET_CSK_CLEAR_TIMERS
206		sk_stop_timer(sk, &icsk->icsk_delack_timer);
207#endif
208	} else {
209		pr_debug("inet_csk BUG: unknown timer value\n");
210	}
211}
212
213/*
214 *	Reset the retransmission timer
215 */
216static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
217					     unsigned long when,
218					     const unsigned long max_when)
219{
220	struct inet_connection_sock *icsk = inet_csk(sk);
221
222	if (when > max_when) {
223		pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
224			 sk, what, when, (void *)_THIS_IP_);
225		when = max_when;
226	}
227
228	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
229	    what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
230		icsk->icsk_pending = what;
231		icsk->icsk_timeout = jiffies + when;
232		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
233	} else if (what == ICSK_TIME_DACK) {
234		icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
 
235		icsk->icsk_ack.timeout = jiffies + when;
236		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
237	} else {
238		pr_debug("inet_csk BUG: unknown timer value\n");
239	}
240}
241
242static inline unsigned long
243inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
244		     unsigned long max_when)
245{
246        u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
247
248        return (unsigned long)min_t(u64, when, max_when);
249}
250
251struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
252
253int inet_csk_get_port(struct sock *sk, unsigned short snum);
254
255struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
256				     const struct request_sock *req);
257struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
258					    struct sock *newsk,
259					    const struct request_sock *req);
260
261struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
262				      struct request_sock *req,
263				      struct sock *child);
264void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
265				   unsigned long timeout);
266struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
267					 struct request_sock *req,
268					 bool own_req);
269
270static inline void inet_csk_reqsk_queue_added(struct sock *sk)
271{
272	reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
273}
274
275static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
276{
277	return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
278}
279
280static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
281{
282	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
283}
284
285bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
286void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
287
 
 
 
 
 
 
 
 
288static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
289{
290	/* The below has to be done to allow calling inet_csk_destroy_sock */
291	sock_set_flag(sk, SOCK_DEAD);
292	percpu_counter_inc(sk->sk_prot->orphan_count);
293}
294
295void inet_csk_destroy_sock(struct sock *sk);
296void inet_csk_prepare_forced_close(struct sock *sk);
297
298/*
299 * LISTEN is a special case for poll..
300 */
301static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
302{
303	return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
304			(EPOLLIN | EPOLLRDNORM) : 0;
305}
306
307int inet_csk_listen_start(struct sock *sk, int backlog);
308void inet_csk_listen_stop(struct sock *sk);
309
310void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
311
312/* update the fast reuse flag when adding a socket */
313void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
314			       struct sock *sk);
315
316struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
317
318#define TCP_PINGPONG_THRESH	3
319
320static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
321{
322	inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
 
323}
324
325static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
326{
327	inet_csk(sk)->icsk_ack.pingpong = 0;
328}
329
330static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
331{
332	return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
 
333}
334
335static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
336{
337	struct inet_connection_sock *icsk = inet_csk(sk);
338
339	if (icsk->icsk_ack.pingpong < U8_MAX)
340		icsk->icsk_ack.pingpong++;
341}
342
343static inline bool inet_csk_has_ulp(struct sock *sk)
 
 
 
 
 
344{
345	return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
 
 
 
346}
347
348#endif /* _INET_CONNECTION_SOCK_H */