Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the  BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 *		Definitions for the IP module.
  7 *
  8 * Version:	@(#)ip.h	1.0.2	05/07/93
  9 *
 10 * Authors:	Ross Biro
 11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 12 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 13 *
 14 * Changes:
 15 *		Mike McLagan    :       Routing by source
 16 *
 17 *		This program is free software; you can redistribute it and/or
 18 *		modify it under the terms of the GNU General Public License
 19 *		as published by the Free Software Foundation; either version
 20 *		2 of the License, or (at your option) any later version.
 21 */
 22#ifndef _IP_H
 23#define _IP_H
 24
 25#include <linux/types.h>
 26#include <linux/ip.h>
 27#include <linux/in.h>
 28#include <linux/skbuff.h>
 29#include <linux/jhash.h>
 
 
 30
 31#include <net/inet_sock.h>
 32#include <net/route.h>
 33#include <net/snmp.h>
 34#include <net/flow.h>
 35#include <net/flow_dissector.h>
 36#include <net/netns/hash.h>
 
 37
 38#define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
 39#define IPV4_MIN_MTU		68			/* RFC 791 */
 40
 
 
 
 
 41struct sock;
 42
 43struct inet_skb_parm {
 44	int			iif;
 45	struct ip_options	opt;		/* Compiled IP options		*/
 46	u16			flags;
 47
 48#define IPSKB_FORWARDED		BIT(0)
 49#define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
 50#define IPSKB_XFRM_TRANSFORMED	BIT(2)
 51#define IPSKB_FRAG_COMPLETE	BIT(3)
 52#define IPSKB_REROUTED		BIT(4)
 53#define IPSKB_DOREDIRECT	BIT(5)
 54#define IPSKB_FRAG_PMTU		BIT(6)
 55#define IPSKB_L3SLAVE		BIT(7)
 
 
 56
 57	u16			frag_max_size;
 58};
 59
 60static inline bool ipv4_l3mdev_skb(u16 flags)
 61{
 62	return !!(flags & IPSKB_L3SLAVE);
 63}
 64
 65static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
 66{
 67	return ip_hdr(skb)->ihl * 4;
 68}
 69
 70struct ipcm_cookie {
 71	struct sockcm_cookie	sockc;
 72	__be32			addr;
 73	int			oif;
 74	struct ip_options_rcu	*opt;
 75	__u8			tx_flags;
 76	__u8			ttl;
 77	__s16			tos;
 78	char			priority;
 
 79};
 80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
 82#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
 83
 84/* return enslaved device index if relevant */
 85static inline int inet_sdif(struct sk_buff *skb)
 86{
 87#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 88	if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
 89		return IPCB(skb)->iif;
 90#endif
 91	return 0;
 92}
 93
 94/* Special input handler for packets caught by router alert option.
 95   They are selected only by protocol field, and then processed likely
 96   local ones; but only if someone wants them! Otherwise, router
 97   not running rsvpd will kill RSVP.
 98
 99   It is user level problem, what it will make with them.
100   I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
101   but receiver should be enough clever f.e. to forward mtrace requests,
102   sent to multicast group to reach destination designated router.
103 */
104
105struct ip_ra_chain {
106	struct ip_ra_chain __rcu *next;
107	struct sock		*sk;
108	union {
109		void			(*destructor)(struct sock *);
110		struct sock		*saved_sk;
111	};
112	struct rcu_head		rcu;
113};
114
115/* IP flags. */
116#define IP_CE		0x8000		/* Flag: "Congestion"		*/
117#define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
118#define IP_MF		0x2000		/* Flag: "More Fragments"	*/
119#define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
120
121#define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/
122
123struct msghdr;
124struct net_device;
125struct packet_type;
126struct rtable;
127struct sockaddr;
128
129int igmp_mc_init(void);
130
131/*
132 *	Functions provided by ip.c
133 */
134
135int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
136			  __be32 saddr, __be32 daddr,
137			  struct ip_options_rcu *opt);
138int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
139	   struct net_device *orig_dev);
 
 
140int ip_local_deliver(struct sk_buff *skb);
 
141int ip_mr_input(struct sk_buff *skb);
142int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
143int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
144int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
145		   int (*output)(struct net *, struct sock *, struct sk_buff *));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146void ip_send_check(struct iphdr *ip);
147int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
148int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
149
150int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
 
151void ip_init(void);
152int ip_append_data(struct sock *sk, struct flowi4 *fl4,
153		   int getfrag(void *from, char *to, int offset, int len,
154			       int odd, struct sk_buff *skb),
155		   void *from, int len, int protolen,
156		   struct ipcm_cookie *ipc,
157		   struct rtable **rt,
158		   unsigned int flags);
159int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
160		       struct sk_buff *skb);
161ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
162		       int offset, size_t size, int flags);
163struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
164			      struct sk_buff_head *queue,
165			      struct inet_cork *cork);
166int ip_send_skb(struct net *net, struct sk_buff *skb);
167int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
168void ip_flush_pending_frames(struct sock *sk);
169struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
170			    int getfrag(void *from, char *to, int offset,
171					int len, int odd, struct sk_buff *skb),
172			    void *from, int length, int transhdrlen,
173			    struct ipcm_cookie *ipc, struct rtable **rtp,
174			    unsigned int flags);
 
 
175
176static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
177{
178	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
179}
180
181static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
182{
183	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
 
 
 
 
 
 
 
 
184}
185
186static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
187{
188	return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
189}
190
191/* datagram.c */
192int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
193int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
194
195void ip4_datagram_release_cb(struct sock *sk);
196
197struct ip_reply_arg {
198	struct kvec iov[1];
199	int	    flags;
200	__wsum 	    csum;
201	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
202				/* -1 if not needed */
203	int	    bound_dev_if;
204	u8  	    tos;
205	kuid_t	    uid;
206};
207
208#define IP_REPLY_ARG_NOSRCCHECK 1
209
210static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
211{
212	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
213}
214
215void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
216			   const struct ip_options *sopt,
217			   __be32 daddr, __be32 saddr,
218			   const struct ip_reply_arg *arg,
219			   unsigned int len);
220
221#define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
222#define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
223#define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
224#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
225#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
226#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
227#define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
228#define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
229#define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
230#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
231
232u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
 
 
 
 
233unsigned long snmp_fold_field(void __percpu *mib, int offt);
234#if BITS_PER_LONG==32
235u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
236			 size_t syncp_offset);
237u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
238#else
239static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
240					size_t syncp_offset)
241{
242	return snmp_get_cpu_field(mib, cpu, offct);
243
244}
245
246static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
247{
248	return snmp_fold_field(mib, offt);
249}
250#endif
251
252#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
253{ \
254	int i, c; \
255	for_each_possible_cpu(c) { \
256		for (i = 0; stats_list[i].name; i++) \
257			buff64[i] += snmp_get_cpu_field64( \
258					mib_statistic, \
259					c, stats_list[i].entry, \
260					offset); \
261	} \
262}
263
264#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
265{ \
266	int i, c; \
267	for_each_possible_cpu(c) { \
268		for (i = 0; stats_list[i].name; i++) \
269			buff[i] += snmp_get_cpu_field( \
270						mib_statistic, \
271						c, stats_list[i].entry); \
272	} \
273}
274
275void inet_get_local_port_range(struct net *net, int *low, int *high);
 
 
 
 
 
 
 
276
277#ifdef CONFIG_SYSCTL
278static inline int inet_is_local_reserved_port(struct net *net, int port)
279{
280	if (!net->ipv4.sysctl_local_reserved_ports)
281		return 0;
282	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
283}
284
285static inline bool sysctl_dev_name_is_allowed(const char *name)
286{
287	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
288}
289
290static inline int inet_prot_sock(struct net *net)
291{
292	return net->ipv4.sysctl_ip_prot_sock;
293}
294
295#else
296static inline int inet_is_local_reserved_port(struct net *net, int port)
297{
298	return 0;
299}
300
301static inline int inet_prot_sock(struct net *net)
302{
303	return PROT_SOCK;
304}
305#endif
306
307__be32 inet_current_timestamp(void);
308
309/* From inetpeer.c */
310extern int inet_peer_threshold;
311extern int inet_peer_minttl;
312extern int inet_peer_maxttl;
313
314void ipfrag_init(void);
315
316void ip_static_sysctl_init(void);
317
318#define IP4_REPLY_MARK(net, mark) \
319	((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
320
321static inline bool ip_is_fragment(const struct iphdr *iph)
322{
323	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
324}
325
326#ifdef CONFIG_INET
327#include <net/dst.h>
328
329/* The function in 2.2 was invalid, producing wrong result for
330 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
331static inline
332int ip_decrease_ttl(struct iphdr *iph)
333{
334	u32 check = (__force u32)iph->check;
335	check += (__force u32)htons(0x0100);
336	iph->check = (__force __sum16)(check + (check>=0xFFFF));
337	return --iph->ttl;
338}
339
340static inline int ip_mtu_locked(const struct dst_entry *dst)
341{
342	const struct rtable *rt = (const struct rtable *)dst;
343
344	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
345}
346
347static inline
348int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
349{
350	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
351
352	return  pmtudisc == IP_PMTUDISC_DO ||
353		(pmtudisc == IP_PMTUDISC_WANT &&
354		 !ip_mtu_locked(dst));
355}
356
357static inline bool ip_sk_accept_pmtu(const struct sock *sk)
358{
359	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
360	       inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
 
 
361}
362
363static inline bool ip_sk_use_pmtu(const struct sock *sk)
364{
365	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
366}
367
368static inline bool ip_sk_ignore_df(const struct sock *sk)
369{
370	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
371	       inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
 
372}
373
374static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
375						    bool forwarding)
376{
 
377	struct net *net = dev_net(dst->dev);
 
378
379	if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
380	    ip_mtu_locked(dst) ||
381	    !forwarding)
382		return dst_mtu(dst);
 
 
 
383
384	return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385}
386
387static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
388					  const struct sk_buff *skb)
389{
 
 
390	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
391		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
392
393		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
394	}
395
396	return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397}
398
399u32 ip_idents_reserve(u32 hash, int segs);
400void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
401
402static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
403					struct sock *sk, int segs)
404{
405	struct iphdr *iph = ip_hdr(skb);
406
407	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
408		/* This is only to work around buggy Windows95/2000
409		 * VJ compression implementations.  If the ID field
410		 * does not change, they drop every other packet in
411		 * a TCP stream using header compression.
 
 
 
412		 */
413		if (sk && inet_sk(sk)->inet_daddr) {
414			iph->id = htons(inet_sk(sk)->inet_id);
415			inet_sk(sk)->inet_id += segs;
 
416		} else {
417			iph->id = 0;
418		}
 
 
 
 
 
419	} else {
 
420		__ip_select_ident(net, iph, segs);
421	}
422}
423
424static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
425				   struct sock *sk)
426{
427	ip_select_ident_segs(net, skb, sk, 1);
428}
429
430static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
431{
432	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
433				  skb->len, proto, 0);
434}
435
436/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
437 * Equivalent to :	flow->v4addrs.src = iph->saddr;
438 *			flow->v4addrs.dst = iph->daddr;
439 */
440static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
441					    const struct iphdr *iph)
442{
443	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
444		     offsetof(typeof(flow->addrs), v4addrs.src) +
445			      sizeof(flow->addrs.v4addrs.src));
446	memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
447	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
448}
449
450static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
451{
452	const struct iphdr *iph = skb_gro_network_header(skb);
453
454	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
455				  skb_gro_len(skb), proto, 0);
456}
457
458/*
459 *	Map a multicast IP onto multicast MAC for type ethernet.
460 */
461
462static inline void ip_eth_mc_map(__be32 naddr, char *buf)
463{
464	__u32 addr=ntohl(naddr);
465	buf[0]=0x01;
466	buf[1]=0x00;
467	buf[2]=0x5e;
468	buf[5]=addr&0xFF;
469	addr>>=8;
470	buf[4]=addr&0xFF;
471	addr>>=8;
472	buf[3]=addr&0x7F;
473}
474
475/*
476 *	Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
477 *	Leave P_Key as 0 to be filled in by driver.
478 */
479
480static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
481{
482	__u32 addr;
483	unsigned char scope = broadcast[5] & 0xF;
484
485	buf[0]  = 0;		/* Reserved */
486	buf[1]  = 0xff;		/* Multicast QPN */
487	buf[2]  = 0xff;
488	buf[3]  = 0xff;
489	addr    = ntohl(naddr);
490	buf[4]  = 0xff;
491	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
492	buf[6]  = 0x40;		/* IPv4 signature */
493	buf[7]  = 0x1b;
494	buf[8]  = broadcast[8];		/* P_Key */
495	buf[9]  = broadcast[9];
496	buf[10] = 0;
497	buf[11] = 0;
498	buf[12] = 0;
499	buf[13] = 0;
500	buf[14] = 0;
501	buf[15] = 0;
502	buf[19] = addr & 0xff;
503	addr  >>= 8;
504	buf[18] = addr & 0xff;
505	addr  >>= 8;
506	buf[17] = addr & 0xff;
507	addr  >>= 8;
508	buf[16] = addr & 0x0f;
509}
510
511static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
512{
513	if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
514		memcpy(buf, broadcast, 4);
515	else
516		memcpy(buf, &naddr, sizeof(naddr));
517}
518
519#if IS_ENABLED(CONFIG_IPV6)
520#include <linux/ipv6.h>
521#endif
522
523static __inline__ void inet_reset_saddr(struct sock *sk)
524{
525	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
526#if IS_ENABLED(CONFIG_IPV6)
527	if (sk->sk_family == PF_INET6) {
528		struct ipv6_pinfo *np = inet6_sk(sk);
529
530		memset(&np->saddr, 0, sizeof(np->saddr));
531		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
532	}
533#endif
534}
535
536#endif
537
538static inline unsigned int ipv4_addr_hash(__be32 ip)
539{
540	return (__force unsigned int) ip;
541}
542
543static inline u32 ipv4_portaddr_hash(const struct net *net,
544				     __be32 saddr,
545				     unsigned int port)
546{
547	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
548}
549
550bool ip_call_ra_chain(struct sk_buff *skb);
551
552/*
553 *	Functions provided by ip_fragment.c
554 */
555
556enum ip_defrag_users {
557	IP_DEFRAG_LOCAL_DELIVER,
558	IP_DEFRAG_CALL_RA_CHAIN,
559	IP_DEFRAG_CONNTRACK_IN,
560	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
561	IP_DEFRAG_CONNTRACK_OUT,
562	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
563	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
564	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
565	IP_DEFRAG_VS_IN,
566	IP_DEFRAG_VS_OUT,
567	IP_DEFRAG_VS_FWD,
568	IP_DEFRAG_AF_PACKET,
569	IP_DEFRAG_MACVLAN,
570};
571
572/* Return true if the value of 'user' is between 'lower_bond'
573 * and 'upper_bond' inclusively.
574 */
575static inline bool ip_defrag_user_in_between(u32 user,
576					     enum ip_defrag_users lower_bond,
577					     enum ip_defrag_users upper_bond)
578{
579	return user >= lower_bond && user <= upper_bond;
580}
581
582int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
583#ifdef CONFIG_INET
584struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
585#else
586static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
587{
588	return skb;
589}
590#endif
591
592/*
593 *	Functions provided by ip_forward.c
594 */
595
596int ip_forward(struct sk_buff *skb);
597
598/*
599 *	Functions provided by ip_options.c
600 */
601
602void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
603		      __be32 daddr, struct rtable *rt, int is_frag);
604
605int __ip_options_echo(struct net *net, struct ip_options *dopt,
606		      struct sk_buff *skb, const struct ip_options *sopt);
607static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
608				  struct sk_buff *skb)
609{
610	return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
611}
612
613void ip_options_fragment(struct sk_buff *skb);
 
 
614int ip_options_compile(struct net *net, struct ip_options *opt,
615		       struct sk_buff *skb);
616int ip_options_get(struct net *net, struct ip_options_rcu **optp,
617		   unsigned char *data, int optlen);
618int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
619			     unsigned char __user *data, int optlen);
620void ip_options_undo(struct ip_options *opt);
621void ip_forward_options(struct sk_buff *skb);
622int ip_options_rcv_srr(struct sk_buff *skb);
623
624/*
625 *	Functions provided by ip_sockglue.c
626 */
627
628void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
629void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
630			 struct sk_buff *skb, int tlen, int offset);
631int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
632		 struct ipcm_cookie *ipc, bool allow_ipv6);
633int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
 
 
 
634		  unsigned int optlen);
 
 
635int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
636		  int __user *optlen);
637int compat_ip_setsockopt(struct sock *sk, int level, int optname,
638			 char __user *optval, unsigned int optlen);
639int compat_ip_getsockopt(struct sock *sk, int level, int optname,
640			 char __user *optval, int __user *optlen);
641int ip_ra_control(struct sock *sk, unsigned char on,
642		  void (*destructor)(struct sock *));
643
644int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
645void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
646		   u32 info, u8 *payload);
647void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
648		    u32 info);
649
650static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
651{
652	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
653}
654
655bool icmp_global_allow(void);
656extern int sysctl_icmp_msgs_per_sec;
657extern int sysctl_icmp_msgs_burst;
658
659#ifdef CONFIG_PROC_FS
660int ip_misc_proc_init(void);
661#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
662
663#endif	/* _IP_H */
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Definitions for the IP module.
  8 *
  9 * Version:	@(#)ip.h	1.0.2	05/07/93
 10 *
 11 * Authors:	Ross Biro
 12 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 13 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 14 *
 15 * Changes:
 16 *		Mike McLagan    :       Routing by source
 
 
 
 
 
 17 */
 18#ifndef _IP_H
 19#define _IP_H
 20
 21#include <linux/types.h>
 22#include <linux/ip.h>
 23#include <linux/in.h>
 24#include <linux/skbuff.h>
 25#include <linux/jhash.h>
 26#include <linux/sockptr.h>
 27#include <linux/static_key.h>
 28
 29#include <net/inet_sock.h>
 30#include <net/route.h>
 31#include <net/snmp.h>
 32#include <net/flow.h>
 33#include <net/flow_dissector.h>
 34#include <net/netns/hash.h>
 35#include <net/lwtunnel.h>
 36
 37#define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
 38#define IPV4_MIN_MTU		68			/* RFC 791 */
 39
 40extern unsigned int sysctl_fib_sync_mem;
 41extern unsigned int sysctl_fib_sync_mem_min;
 42extern unsigned int sysctl_fib_sync_mem_max;
 43
 44struct sock;
 45
 46struct inet_skb_parm {
 47	int			iif;
 48	struct ip_options	opt;		/* Compiled IP options		*/
 49	u16			flags;
 50
 51#define IPSKB_FORWARDED		BIT(0)
 52#define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
 53#define IPSKB_XFRM_TRANSFORMED	BIT(2)
 54#define IPSKB_FRAG_COMPLETE	BIT(3)
 55#define IPSKB_REROUTED		BIT(4)
 56#define IPSKB_DOREDIRECT	BIT(5)
 57#define IPSKB_FRAG_PMTU		BIT(6)
 58#define IPSKB_L3SLAVE		BIT(7)
 59#define IPSKB_NOPOLICY		BIT(8)
 60#define IPSKB_MULTIPATH		BIT(9)
 61
 62	u16			frag_max_size;
 63};
 64
 65static inline bool ipv4_l3mdev_skb(u16 flags)
 66{
 67	return !!(flags & IPSKB_L3SLAVE);
 68}
 69
 70static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
 71{
 72	return ip_hdr(skb)->ihl * 4;
 73}
 74
 75struct ipcm_cookie {
 76	struct sockcm_cookie	sockc;
 77	__be32			addr;
 78	int			oif;
 79	struct ip_options_rcu	*opt;
 80	__u8			protocol;
 81	__u8			ttl;
 82	__s16			tos;
 83	char			priority;
 84	__u16			gso_size;
 85};
 86
 87static inline void ipcm_init(struct ipcm_cookie *ipcm)
 88{
 89	*ipcm = (struct ipcm_cookie) { .tos = -1 };
 90}
 91
 92static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
 93				const struct inet_sock *inet)
 94{
 95	ipcm_init(ipcm);
 96
 97	ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
 98	ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
 99	ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
100	ipcm->addr = inet->inet_saddr;
101	ipcm->protocol = inet->inet_num;
102}
103
104#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
105#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
106
107/* return enslaved device index if relevant */
108static inline int inet_sdif(const struct sk_buff *skb)
109{
110#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
111	if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
112		return IPCB(skb)->iif;
113#endif
114	return 0;
115}
116
117/* Special input handler for packets caught by router alert option.
118   They are selected only by protocol field, and then processed likely
119   local ones; but only if someone wants them! Otherwise, router
120   not running rsvpd will kill RSVP.
121
122   It is user level problem, what it will make with them.
123   I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
124   but receiver should be enough clever f.e. to forward mtrace requests,
125   sent to multicast group to reach destination designated router.
126 */
127
128struct ip_ra_chain {
129	struct ip_ra_chain __rcu *next;
130	struct sock		*sk;
131	union {
132		void			(*destructor)(struct sock *);
133		struct sock		*saved_sk;
134	};
135	struct rcu_head		rcu;
136};
137
138/* IP flags. */
139#define IP_CE		0x8000		/* Flag: "Congestion"		*/
140#define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
141#define IP_MF		0x2000		/* Flag: "More Fragments"	*/
142#define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
143
144#define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/
145
146struct msghdr;
147struct net_device;
148struct packet_type;
149struct rtable;
150struct sockaddr;
151
152int igmp_mc_init(void);
153
154/*
155 *	Functions provided by ip.c
156 */
157
158int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
159			  __be32 saddr, __be32 daddr,
160			  struct ip_options_rcu *opt, u8 tos);
161int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
162	   struct net_device *orig_dev);
163void ip_list_rcv(struct list_head *head, struct packet_type *pt,
164		 struct net_device *orig_dev);
165int ip_local_deliver(struct sk_buff *skb);
166void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
167int ip_mr_input(struct sk_buff *skb);
168int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
169int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
170int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
171		   int (*output)(struct net *, struct sock *, struct sk_buff *));
172
173struct ip_fraglist_iter {
174	struct sk_buff	*frag;
175	struct iphdr	*iph;
176	int		offset;
177	unsigned int	hlen;
178};
179
180void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
181		      unsigned int hlen, struct ip_fraglist_iter *iter);
182void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
183
184static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
185{
186	struct sk_buff *skb = iter->frag;
187
188	iter->frag = skb->next;
189	skb_mark_not_on_list(skb);
190
191	return skb;
192}
193
194struct ip_frag_state {
195	bool		DF;
196	unsigned int	hlen;
197	unsigned int	ll_rs;
198	unsigned int	mtu;
199	unsigned int	left;
200	int		offset;
201	int		ptr;
202	__be16		not_last_frag;
203};
204
205void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
206		  unsigned int mtu, bool DF, struct ip_frag_state *state);
207struct sk_buff *ip_frag_next(struct sk_buff *skb,
208			     struct ip_frag_state *state);
209
210void ip_send_check(struct iphdr *ip);
211int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
212int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
213
214int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
215		    __u8 tos);
216void ip_init(void);
217int ip_append_data(struct sock *sk, struct flowi4 *fl4,
218		   int getfrag(void *from, char *to, int offset, int len,
219			       int odd, struct sk_buff *skb),
220		   void *from, int len, int protolen,
221		   struct ipcm_cookie *ipc,
222		   struct rtable **rt,
223		   unsigned int flags);
224int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
225		       struct sk_buff *skb);
 
 
226struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
227			      struct sk_buff_head *queue,
228			      struct inet_cork *cork);
229int ip_send_skb(struct net *net, struct sk_buff *skb);
230int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
231void ip_flush_pending_frames(struct sock *sk);
232struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
233			    int getfrag(void *from, char *to, int offset,
234					int len, int odd, struct sk_buff *skb),
235			    void *from, int length, int transhdrlen,
236			    struct ipcm_cookie *ipc, struct rtable **rtp,
237			    struct inet_cork *cork, unsigned int flags);
238
239int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
240
241static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
242{
243	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
244}
245
246/* Get the route scope that should be used when sending a packet. */
247static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
248				  const struct ipcm_cookie *ipc,
249				  const struct msghdr *msg)
250{
251	if (sock_flag(&inet->sk, SOCK_LOCALROUTE) ||
252	    msg->msg_flags & MSG_DONTROUTE ||
253	    (ipc->opt && ipc->opt->opt.is_strictroute))
254		return RT_SCOPE_LINK;
255
256	return RT_SCOPE_UNIVERSE;
257}
258
259static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
260{
261	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos));
262}
263
264/* datagram.c */
265int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
266int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
267
268void ip4_datagram_release_cb(struct sock *sk);
269
270struct ip_reply_arg {
271	struct kvec iov[1];
272	int	    flags;
273	__wsum 	    csum;
274	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
275				/* -1 if not needed */
276	int	    bound_dev_if;
277	u8  	    tos;
278	kuid_t	    uid;
279};
280
281#define IP_REPLY_ARG_NOSRCCHECK 1
282
283static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
284{
285	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
286}
287
288void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
289			   const struct ip_options *sopt,
290			   __be32 daddr, __be32 saddr,
291			   const struct ip_reply_arg *arg,
292			   unsigned int len, u64 transmit_time, u32 txhash);
293
294#define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
295#define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
296#define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
297#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
298#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
299#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
300#define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
301#define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
302#define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
303#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
304
305static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
306{
307	return  *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
308}
309
310unsigned long snmp_fold_field(void __percpu *mib, int offt);
311#if BITS_PER_LONG==32
312u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
313			 size_t syncp_offset);
314u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
315#else
316static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
317					size_t syncp_offset)
318{
319	return snmp_get_cpu_field(mib, cpu, offct);
320
321}
322
323static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
324{
325	return snmp_fold_field(mib, offt);
326}
327#endif
328
329#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
330{ \
331	int i, c; \
332	for_each_possible_cpu(c) { \
333		for (i = 0; stats_list[i].name; i++) \
334			buff64[i] += snmp_get_cpu_field64( \
335					mib_statistic, \
336					c, stats_list[i].entry, \
337					offset); \
338	} \
339}
340
341#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
342{ \
343	int i, c; \
344	for_each_possible_cpu(c) { \
345		for (i = 0; stats_list[i].name; i++) \
346			buff[i] += snmp_get_cpu_field( \
347						mib_statistic, \
348						c, stats_list[i].entry); \
349	} \
350}
351
352static inline void inet_get_local_port_range(const struct net *net, int *low, int *high)
353{
354	u32 range = READ_ONCE(net->ipv4.ip_local_ports.range);
355
356	*low = range & 0xffff;
357	*high = range >> 16;
358}
359bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
360
361#ifdef CONFIG_SYSCTL
362static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
363{
364	if (!net->ipv4.sysctl_local_reserved_ports)
365		return false;
366	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
367}
368
369static inline bool sysctl_dev_name_is_allowed(const char *name)
370{
371	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
372}
373
374static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
375{
376	return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
377}
378
379#else
380static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
381{
382	return false;
383}
384
385static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
386{
387	return port < PROT_SOCK;
388}
389#endif
390
391__be32 inet_current_timestamp(void);
392
393/* From inetpeer.c */
394extern int inet_peer_threshold;
395extern int inet_peer_minttl;
396extern int inet_peer_maxttl;
397
398void ipfrag_init(void);
399
400void ip_static_sysctl_init(void);
401
402#define IP4_REPLY_MARK(net, mark) \
403	(READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
404
405static inline bool ip_is_fragment(const struct iphdr *iph)
406{
407	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
408}
409
410#ifdef CONFIG_INET
411#include <net/dst.h>
412
413/* The function in 2.2 was invalid, producing wrong result for
414 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
415static inline
416int ip_decrease_ttl(struct iphdr *iph)
417{
418	u32 check = (__force u32)iph->check;
419	check += (__force u32)htons(0x0100);
420	iph->check = (__force __sum16)(check + (check>=0xFFFF));
421	return --iph->ttl;
422}
423
424static inline int ip_mtu_locked(const struct dst_entry *dst)
425{
426	const struct rtable *rt = dst_rtable(dst);
427
428	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
429}
430
431static inline
432int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
433{
434	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
435
436	return  pmtudisc == IP_PMTUDISC_DO ||
437		(pmtudisc == IP_PMTUDISC_WANT &&
438		 !ip_mtu_locked(dst));
439}
440
441static inline bool ip_sk_accept_pmtu(const struct sock *sk)
442{
443	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
444
445	return pmtudisc != IP_PMTUDISC_INTERFACE &&
446	       pmtudisc != IP_PMTUDISC_OMIT;
447}
448
449static inline bool ip_sk_use_pmtu(const struct sock *sk)
450{
451	return READ_ONCE(inet_sk(sk)->pmtudisc) < IP_PMTUDISC_PROBE;
452}
453
454static inline bool ip_sk_ignore_df(const struct sock *sk)
455{
456	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
457
458	return pmtudisc < IP_PMTUDISC_DO || pmtudisc == IP_PMTUDISC_OMIT;
459}
460
461static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
462						    bool forwarding)
463{
464	const struct rtable *rt = dst_rtable(dst);
465	struct net *net = dev_net(dst->dev);
466	unsigned int mtu;
467
468	if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
469	    ip_mtu_locked(dst) ||
470	    !forwarding) {
471		mtu = rt->rt_pmtu;
472		if (mtu && time_before(jiffies, rt->dst.expires))
473			goto out;
474	}
475
476	/* 'forwarding = true' case should always honour route mtu */
477	mtu = dst_metric_raw(dst, RTAX_MTU);
478	if (mtu)
479		goto out;
480
481	mtu = READ_ONCE(dst->dev->mtu);
482
483	if (unlikely(ip_mtu_locked(dst))) {
484		if (rt->rt_uses_gateway && mtu > 576)
485			mtu = 576;
486	}
487
488out:
489	mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
490
491	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
492}
493
494static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
495					  const struct sk_buff *skb)
496{
497	unsigned int mtu;
498
499	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
500		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
501
502		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
503	}
504
505	mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
506	return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
507}
508
509struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
510					int fc_mx_len,
511					struct netlink_ext_ack *extack);
512static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
513{
514	if (fib_metrics != &dst_default_metrics &&
515	    refcount_dec_and_test(&fib_metrics->refcnt))
516		kfree(fib_metrics);
517}
518
519/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
520static inline
521void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
522{
523	dst_init_metrics(dst, fib_metrics->metrics, true);
524
525	if (fib_metrics != &dst_default_metrics) {
526		dst->_metrics |= DST_METRICS_REFCOUNTED;
527		refcount_inc(&fib_metrics->refcnt);
528	}
529}
530
531static inline
532void ip_dst_metrics_put(struct dst_entry *dst)
533{
534	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
535
536	if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
537		kfree(p);
538}
539
 
540void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
541
542static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
543					struct sock *sk, int segs)
544{
545	struct iphdr *iph = ip_hdr(skb);
546
547	/* We had many attacks based on IPID, use the private
548	 * generator as much as we can.
549	 */
550	if (sk && inet_sk(sk)->inet_daddr) {
551		int val;
552
553		/* avoid atomic operations for TCP,
554		 * as we hold socket lock at this point.
555		 */
556		if (sk_is_tcp(sk)) {
557			sock_owned_by_me(sk);
558			val = atomic_read(&inet_sk(sk)->inet_id);
559			atomic_set(&inet_sk(sk)->inet_id, val + segs);
560		} else {
561			val = atomic_add_return(segs, &inet_sk(sk)->inet_id);
562		}
563		iph->id = htons(val);
564		return;
565	}
566	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
567		iph->id = 0;
568	} else {
569		/* Unfortunately we need the big hammer to get a suitable IPID */
570		__ip_select_ident(net, iph, segs);
571	}
572}
573
574static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
575				   struct sock *sk)
576{
577	ip_select_ident_segs(net, skb, sk, 1);
578}
579
580static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
581{
582	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
583				  skb->len, proto, 0);
584}
585
586/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
587 * Equivalent to :	flow->v4addrs.src = iph->saddr;
588 *			flow->v4addrs.dst = iph->daddr;
589 */
590static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
591					    const struct iphdr *iph)
592{
593	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
594		     offsetof(typeof(flow->addrs), v4addrs.src) +
595			      sizeof(flow->addrs.v4addrs.src));
596	memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
597	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
598}
599
 
 
 
 
 
 
 
 
600/*
601 *	Map a multicast IP onto multicast MAC for type ethernet.
602 */
603
604static inline void ip_eth_mc_map(__be32 naddr, char *buf)
605{
606	__u32 addr=ntohl(naddr);
607	buf[0]=0x01;
608	buf[1]=0x00;
609	buf[2]=0x5e;
610	buf[5]=addr&0xFF;
611	addr>>=8;
612	buf[4]=addr&0xFF;
613	addr>>=8;
614	buf[3]=addr&0x7F;
615}
616
617/*
618 *	Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
619 *	Leave P_Key as 0 to be filled in by driver.
620 */
621
622static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
623{
624	__u32 addr;
625	unsigned char scope = broadcast[5] & 0xF;
626
627	buf[0]  = 0;		/* Reserved */
628	buf[1]  = 0xff;		/* Multicast QPN */
629	buf[2]  = 0xff;
630	buf[3]  = 0xff;
631	addr    = ntohl(naddr);
632	buf[4]  = 0xff;
633	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
634	buf[6]  = 0x40;		/* IPv4 signature */
635	buf[7]  = 0x1b;
636	buf[8]  = broadcast[8];		/* P_Key */
637	buf[9]  = broadcast[9];
638	buf[10] = 0;
639	buf[11] = 0;
640	buf[12] = 0;
641	buf[13] = 0;
642	buf[14] = 0;
643	buf[15] = 0;
644	buf[19] = addr & 0xff;
645	addr  >>= 8;
646	buf[18] = addr & 0xff;
647	addr  >>= 8;
648	buf[17] = addr & 0xff;
649	addr  >>= 8;
650	buf[16] = addr & 0x0f;
651}
652
653static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
654{
655	if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
656		memcpy(buf, broadcast, 4);
657	else
658		memcpy(buf, &naddr, sizeof(naddr));
659}
660
661#if IS_ENABLED(CONFIG_IPV6)
662#include <linux/ipv6.h>
663#endif
664
665static __inline__ void inet_reset_saddr(struct sock *sk)
666{
667	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
668#if IS_ENABLED(CONFIG_IPV6)
669	if (sk->sk_family == PF_INET6) {
670		struct ipv6_pinfo *np = inet6_sk(sk);
671
672		memset(&np->saddr, 0, sizeof(np->saddr));
673		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
674	}
675#endif
676}
677
678#endif
679
680static inline unsigned int ipv4_addr_hash(__be32 ip)
681{
682	return (__force unsigned int) ip;
683}
684
685static inline u32 ipv4_portaddr_hash(const struct net *net,
686				     __be32 saddr,
687				     unsigned int port)
688{
689	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
690}
691
692bool ip_call_ra_chain(struct sk_buff *skb);
693
694/*
695 *	Functions provided by ip_fragment.c
696 */
697
698enum ip_defrag_users {
699	IP_DEFRAG_LOCAL_DELIVER,
700	IP_DEFRAG_CALL_RA_CHAIN,
701	IP_DEFRAG_CONNTRACK_IN,
702	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
703	IP_DEFRAG_CONNTRACK_OUT,
704	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
705	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
706	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
707	IP_DEFRAG_VS_IN,
708	IP_DEFRAG_VS_OUT,
709	IP_DEFRAG_VS_FWD,
710	IP_DEFRAG_AF_PACKET,
711	IP_DEFRAG_MACVLAN,
712};
713
714/* Return true if the value of 'user' is between 'lower_bond'
715 * and 'upper_bond' inclusively.
716 */
717static inline bool ip_defrag_user_in_between(u32 user,
718					     enum ip_defrag_users lower_bond,
719					     enum ip_defrag_users upper_bond)
720{
721	return user >= lower_bond && user <= upper_bond;
722}
723
724int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
725#ifdef CONFIG_INET
726struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
727#else
728static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
729{
730	return skb;
731}
732#endif
733
734/*
735 *	Functions provided by ip_forward.c
736 */
737
738int ip_forward(struct sk_buff *skb);
739
740/*
741 *	Functions provided by ip_options.c
742 */
743
744void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
745		      __be32 daddr, struct rtable *rt);
746
747int __ip_options_echo(struct net *net, struct ip_options *dopt,
748		      struct sk_buff *skb, const struct ip_options *sopt);
749static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
750				  struct sk_buff *skb)
751{
752	return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
753}
754
755void ip_options_fragment(struct sk_buff *skb);
756int __ip_options_compile(struct net *net, struct ip_options *opt,
757			 struct sk_buff *skb, __be32 *info);
758int ip_options_compile(struct net *net, struct ip_options *opt,
759		       struct sk_buff *skb);
760int ip_options_get(struct net *net, struct ip_options_rcu **optp,
761		   sockptr_t data, int optlen);
 
 
762void ip_options_undo(struct ip_options *opt);
763void ip_forward_options(struct sk_buff *skb);
764int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
765
766/*
767 *	Functions provided by ip_sockglue.c
768 */
769
770void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst);
771void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
772			 struct sk_buff *skb, int tlen, int offset);
773int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
774		 struct ipcm_cookie *ipc, bool allow_ipv6);
775DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
776int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
777		     unsigned int optlen);
778int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
779		  unsigned int optlen);
780int do_ip_getsockopt(struct sock *sk, int level, int optname,
781		     sockptr_t optval, sockptr_t optlen);
782int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
783		  int __user *optlen);
 
 
 
 
784int ip_ra_control(struct sock *sk, unsigned char on,
785		  void (*destructor)(struct sock *));
786
787int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
788void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
789		   u32 info, u8 *payload);
790void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
791		    u32 info);
792
793static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
794{
795	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
796}
797
798bool icmp_global_allow(void);
799extern int sysctl_icmp_msgs_per_sec;
800extern int sysctl_icmp_msgs_burst;
801
802#ifdef CONFIG_PROC_FS
803int ip_misc_proc_init(void);
804#endif
805
806int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
807				struct netlink_ext_ack *extack);
808
809static inline bool inetdev_valid_mtu(unsigned int mtu)
810{
811	return likely(mtu >= IPV4_MIN_MTU);
812}
813
814void ip_sock_set_freebind(struct sock *sk);
815int ip_sock_set_mtu_discover(struct sock *sk, int val);
816void ip_sock_set_pktinfo(struct sock *sk);
817void ip_sock_set_recverr(struct sock *sk);
818void ip_sock_set_tos(struct sock *sk, int val);
819void  __ip_sock_set_tos(struct sock *sk, int val);
820
821#endif	/* _IP_H */