Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Definitions for the IP module.
  8 *
  9 * Version:	@(#)ip.h	1.0.2	05/07/93
 10 *
 11 * Authors:	Ross Biro
 12 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 13 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 14 *
 15 * Changes:
 16 *		Mike McLagan    :       Routing by source
 
 
 
 
 
 17 */
 18#ifndef _IP_H
 19#define _IP_H
 20
 21#include <linux/types.h>
 22#include <linux/ip.h>
 23#include <linux/in.h>
 24#include <linux/skbuff.h>
 25#include <linux/jhash.h>
 26
 27#include <net/inet_sock.h>
 28#include <net/route.h>
 29#include <net/snmp.h>
 30#include <net/flow.h>
 31#include <net/flow_dissector.h>
 32#include <net/netns/hash.h>
 33
 34#define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
 35#define IPV4_MIN_MTU		68			/* RFC 791 */
 36
 37extern unsigned int sysctl_fib_sync_mem;
 38extern unsigned int sysctl_fib_sync_mem_min;
 39extern unsigned int sysctl_fib_sync_mem_max;
 40
 41struct sock;
 42
 43struct inet_skb_parm {
 44	int			iif;
 45	struct ip_options	opt;		/* Compiled IP options		*/
 46	u16			flags;
 47
 48#define IPSKB_FORWARDED		BIT(0)
 49#define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
 50#define IPSKB_XFRM_TRANSFORMED	BIT(2)
 51#define IPSKB_FRAG_COMPLETE	BIT(3)
 52#define IPSKB_REROUTED		BIT(4)
 53#define IPSKB_DOREDIRECT	BIT(5)
 54#define IPSKB_FRAG_PMTU		BIT(6)
 55#define IPSKB_L3SLAVE		BIT(7)
 56
 57	u16			frag_max_size;
 58};
 59
 60static inline bool ipv4_l3mdev_skb(u16 flags)
 61{
 62	return !!(flags & IPSKB_L3SLAVE);
 63}
 64
 65static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
 66{
 67	return ip_hdr(skb)->ihl * 4;
 68}
 69
 70struct ipcm_cookie {
 71	struct sockcm_cookie	sockc;
 72	__be32			addr;
 73	int			oif;
 74	struct ip_options_rcu	*opt;
 
 75	__u8			ttl;
 76	__s16			tos;
 77	char			priority;
 78	__u16			gso_size;
 79};
 80
 81static inline void ipcm_init(struct ipcm_cookie *ipcm)
 82{
 83	*ipcm = (struct ipcm_cookie) { .tos = -1 };
 84}
 85
 86static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
 87				const struct inet_sock *inet)
 88{
 89	ipcm_init(ipcm);
 90
 91	ipcm->sockc.mark = inet->sk.sk_mark;
 92	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
 93	ipcm->oif = inet->sk.sk_bound_dev_if;
 94	ipcm->addr = inet->inet_saddr;
 95}
 96
 97#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
 98#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
 99
100/* return enslaved device index if relevant */
101static inline int inet_sdif(struct sk_buff *skb)
102{
103#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
104	if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
105		return IPCB(skb)->iif;
106#endif
107	return 0;
108}
109
110/* Special input handler for packets caught by router alert option.
111   They are selected only by protocol field, and then processed likely
112   local ones; but only if someone wants them! Otherwise, router
113   not running rsvpd will kill RSVP.
114
115   It is user level problem, what it will make with them.
116   I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
117   but receiver should be enough clever f.e. to forward mtrace requests,
118   sent to multicast group to reach destination designated router.
119 */
120
121struct ip_ra_chain {
122	struct ip_ra_chain __rcu *next;
123	struct sock		*sk;
124	union {
125		void			(*destructor)(struct sock *);
126		struct sock		*saved_sk;
127	};
128	struct rcu_head		rcu;
129};
130
 
 
131/* IP flags. */
132#define IP_CE		0x8000		/* Flag: "Congestion"		*/
133#define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
134#define IP_MF		0x2000		/* Flag: "More Fragments"	*/
135#define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
136
137#define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/
138
139struct msghdr;
140struct net_device;
141struct packet_type;
142struct rtable;
143struct sockaddr;
144
145int igmp_mc_init(void);
146
147/*
148 *	Functions provided by ip.c
149 */
150
151int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
152			  __be32 saddr, __be32 daddr,
153			  struct ip_options_rcu *opt);
154int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
155	   struct net_device *orig_dev);
156void ip_list_rcv(struct list_head *head, struct packet_type *pt,
157		 struct net_device *orig_dev);
158int ip_local_deliver(struct sk_buff *skb);
159void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
160int ip_mr_input(struct sk_buff *skb);
161int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
162int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
163int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
164		   int (*output)(struct net *, struct sock *, struct sk_buff *));
165
166struct ip_fraglist_iter {
167	struct sk_buff	*frag;
168	struct iphdr	*iph;
169	int		offset;
170	unsigned int	hlen;
171};
172
173void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
174		      unsigned int hlen, struct ip_fraglist_iter *iter);
175void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
176
177static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
178{
179	struct sk_buff *skb = iter->frag;
180
181	iter->frag = skb->next;
182	skb_mark_not_on_list(skb);
183
184	return skb;
185}
186
187struct ip_frag_state {
188	bool		DF;
189	unsigned int	hlen;
190	unsigned int	ll_rs;
191	unsigned int	mtu;
192	unsigned int	left;
193	int		offset;
194	int		ptr;
195	__be16		not_last_frag;
196};
197
198void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
199		  unsigned int mtu, bool DF, struct ip_frag_state *state);
200struct sk_buff *ip_frag_next(struct sk_buff *skb,
201			     struct ip_frag_state *state);
202
203void ip_send_check(struct iphdr *ip);
204int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
205int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
206
207int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
208		    __u8 tos);
209void ip_init(void);
210int ip_append_data(struct sock *sk, struct flowi4 *fl4,
211		   int getfrag(void *from, char *to, int offset, int len,
212			       int odd, struct sk_buff *skb),
213		   void *from, int len, int protolen,
214		   struct ipcm_cookie *ipc,
215		   struct rtable **rt,
216		   unsigned int flags);
217int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
218		       struct sk_buff *skb);
219ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
220		       int offset, size_t size, int flags);
221struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
222			      struct sk_buff_head *queue,
223			      struct inet_cork *cork);
224int ip_send_skb(struct net *net, struct sk_buff *skb);
225int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
226void ip_flush_pending_frames(struct sock *sk);
227struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
228			    int getfrag(void *from, char *to, int offset,
229					int len, int odd, struct sk_buff *skb),
230			    void *from, int length, int transhdrlen,
231			    struct ipcm_cookie *ipc, struct rtable **rtp,
232			    struct inet_cork *cork, unsigned int flags);
233
234static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
235				struct flowi *fl)
236{
237	return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
238}
239
240static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
241{
242	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
243}
244
245static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
246{
247	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
248}
249
250static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
251{
252	return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
253}
254
255/* datagram.c */
256int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
257int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
258
259void ip4_datagram_release_cb(struct sock *sk);
260
261struct ip_reply_arg {
262	struct kvec iov[1];
263	int	    flags;
264	__wsum 	    csum;
265	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
266				/* -1 if not needed */
267	int	    bound_dev_if;
268	u8  	    tos;
269	kuid_t	    uid;
270};
271
272#define IP_REPLY_ARG_NOSRCCHECK 1
273
274static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
275{
276	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
277}
278
279void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
280			   const struct ip_options *sopt,
281			   __be32 daddr, __be32 saddr,
282			   const struct ip_reply_arg *arg,
283			   unsigned int len, u64 transmit_time);
284
285#define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
286#define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
287#define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
288#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
289#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
290#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
291#define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
292#define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
293#define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
294#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
295
296u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
297unsigned long snmp_fold_field(void __percpu *mib, int offt);
298#if BITS_PER_LONG==32
299u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
300			 size_t syncp_offset);
301u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
302#else
303static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
304					size_t syncp_offset)
305{
306	return snmp_get_cpu_field(mib, cpu, offct);
307
308}
309
310static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
311{
312	return snmp_fold_field(mib, offt);
313}
314#endif
315
316#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
317{ \
318	int i, c; \
319	for_each_possible_cpu(c) { \
320		for (i = 0; stats_list[i].name; i++) \
321			buff64[i] += snmp_get_cpu_field64( \
322					mib_statistic, \
323					c, stats_list[i].entry, \
324					offset); \
325	} \
326}
327
328#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
329{ \
330	int i, c; \
331	for_each_possible_cpu(c) { \
332		for (i = 0; stats_list[i].name; i++) \
333			buff[i] += snmp_get_cpu_field( \
334						mib_statistic, \
335						c, stats_list[i].entry); \
336	} \
337}
338
339void inet_get_local_port_range(struct net *net, int *low, int *high);
340
341#ifdef CONFIG_SYSCTL
342static inline int inet_is_local_reserved_port(struct net *net, int port)
343{
344	if (!net->ipv4.sysctl_local_reserved_ports)
345		return 0;
346	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
347}
348
349static inline bool sysctl_dev_name_is_allowed(const char *name)
350{
351	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
352}
353
354static inline int inet_prot_sock(struct net *net)
355{
356	return net->ipv4.sysctl_ip_prot_sock;
357}
358
359#else
360static inline int inet_is_local_reserved_port(struct net *net, int port)
361{
362	return 0;
363}
364
365static inline int inet_prot_sock(struct net *net)
366{
367	return PROT_SOCK;
368}
369#endif
370
371__be32 inet_current_timestamp(void);
372
373/* From inetpeer.c */
374extern int inet_peer_threshold;
375extern int inet_peer_minttl;
376extern int inet_peer_maxttl;
377
378void ipfrag_init(void);
379
380void ip_static_sysctl_init(void);
381
382#define IP4_REPLY_MARK(net, mark) \
383	((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
384
385static inline bool ip_is_fragment(const struct iphdr *iph)
386{
387	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
388}
389
390#ifdef CONFIG_INET
391#include <net/dst.h>
392
393/* The function in 2.2 was invalid, producing wrong result for
394 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
395static inline
396int ip_decrease_ttl(struct iphdr *iph)
397{
398	u32 check = (__force u32)iph->check;
399	check += (__force u32)htons(0x0100);
400	iph->check = (__force __sum16)(check + (check>=0xFFFF));
401	return --iph->ttl;
402}
403
404static inline int ip_mtu_locked(const struct dst_entry *dst)
405{
406	const struct rtable *rt = (const struct rtable *)dst;
407
408	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
409}
410
411static inline
412int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
413{
414	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
415
416	return  pmtudisc == IP_PMTUDISC_DO ||
417		(pmtudisc == IP_PMTUDISC_WANT &&
418		 !ip_mtu_locked(dst));
419}
420
421static inline bool ip_sk_accept_pmtu(const struct sock *sk)
422{
423	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
424	       inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
425}
426
427static inline bool ip_sk_use_pmtu(const struct sock *sk)
428{
429	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
430}
431
432static inline bool ip_sk_ignore_df(const struct sock *sk)
433{
434	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
435	       inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
436}
437
438static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
439						    bool forwarding)
440{
441	struct net *net = dev_net(dst->dev);
442
443	if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
444	    ip_mtu_locked(dst) ||
445	    !forwarding)
446		return dst_mtu(dst);
447
448	return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
449}
450
451static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
452					  const struct sk_buff *skb)
453{
454	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
455		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
456
457		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
458	}
459
460	return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
461}
462
463struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
464					int fc_mx_len,
465					struct netlink_ext_ack *extack);
466static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
467{
468	if (fib_metrics != &dst_default_metrics &&
469	    refcount_dec_and_test(&fib_metrics->refcnt))
470		kfree(fib_metrics);
471}
472
473/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
474static inline
475void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
476{
477	dst_init_metrics(dst, fib_metrics->metrics, true);
478
479	if (fib_metrics != &dst_default_metrics) {
480		dst->_metrics |= DST_METRICS_REFCOUNTED;
481		refcount_inc(&fib_metrics->refcnt);
482	}
483}
484
485static inline
486void ip_dst_metrics_put(struct dst_entry *dst)
487{
488	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
489
490	if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
491		kfree(p);
492}
493
494u32 ip_idents_reserve(u32 hash, int segs);
495void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
496
497static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
498					struct sock *sk, int segs)
499{
500	struct iphdr *iph = ip_hdr(skb);
501
502	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
503		/* This is only to work around buggy Windows95/2000
504		 * VJ compression implementations.  If the ID field
505		 * does not change, they drop every other packet in
506		 * a TCP stream using header compression.
507		 */
508		if (sk && inet_sk(sk)->inet_daddr) {
509			iph->id = htons(inet_sk(sk)->inet_id);
510			inet_sk(sk)->inet_id += segs;
511		} else {
512			iph->id = 0;
513		}
514	} else {
515		__ip_select_ident(net, iph, segs);
516	}
517}
518
519static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
520				   struct sock *sk)
521{
522	ip_select_ident_segs(net, skb, sk, 1);
523}
524
525static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
526{
527	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
528				  skb->len, proto, 0);
529}
530
531/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
532 * Equivalent to :	flow->v4addrs.src = iph->saddr;
533 *			flow->v4addrs.dst = iph->daddr;
534 */
535static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
536					    const struct iphdr *iph)
537{
538	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
539		     offsetof(typeof(flow->addrs), v4addrs.src) +
540			      sizeof(flow->addrs.v4addrs.src));
541	memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
542	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
543}
544
545static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
546{
547	const struct iphdr *iph = skb_gro_network_header(skb);
548
549	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
550				  skb_gro_len(skb), proto, 0);
551}
552
553/*
554 *	Map a multicast IP onto multicast MAC for type ethernet.
555 */
556
557static inline void ip_eth_mc_map(__be32 naddr, char *buf)
558{
559	__u32 addr=ntohl(naddr);
560	buf[0]=0x01;
561	buf[1]=0x00;
562	buf[2]=0x5e;
563	buf[5]=addr&0xFF;
564	addr>>=8;
565	buf[4]=addr&0xFF;
566	addr>>=8;
567	buf[3]=addr&0x7F;
568}
569
570/*
571 *	Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
572 *	Leave P_Key as 0 to be filled in by driver.
573 */
574
575static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
576{
577	__u32 addr;
578	unsigned char scope = broadcast[5] & 0xF;
579
580	buf[0]  = 0;		/* Reserved */
581	buf[1]  = 0xff;		/* Multicast QPN */
582	buf[2]  = 0xff;
583	buf[3]  = 0xff;
584	addr    = ntohl(naddr);
585	buf[4]  = 0xff;
586	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
587	buf[6]  = 0x40;		/* IPv4 signature */
588	buf[7]  = 0x1b;
589	buf[8]  = broadcast[8];		/* P_Key */
590	buf[9]  = broadcast[9];
591	buf[10] = 0;
592	buf[11] = 0;
593	buf[12] = 0;
594	buf[13] = 0;
595	buf[14] = 0;
596	buf[15] = 0;
597	buf[19] = addr & 0xff;
598	addr  >>= 8;
599	buf[18] = addr & 0xff;
600	addr  >>= 8;
601	buf[17] = addr & 0xff;
602	addr  >>= 8;
603	buf[16] = addr & 0x0f;
604}
605
606static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
607{
608	if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
609		memcpy(buf, broadcast, 4);
610	else
611		memcpy(buf, &naddr, sizeof(naddr));
612}
613
614#if IS_ENABLED(CONFIG_IPV6)
615#include <linux/ipv6.h>
616#endif
617
618static __inline__ void inet_reset_saddr(struct sock *sk)
619{
620	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
621#if IS_ENABLED(CONFIG_IPV6)
622	if (sk->sk_family == PF_INET6) {
623		struct ipv6_pinfo *np = inet6_sk(sk);
624
625		memset(&np->saddr, 0, sizeof(np->saddr));
626		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
627	}
628#endif
629}
630
631#endif
632
633static inline unsigned int ipv4_addr_hash(__be32 ip)
634{
635	return (__force unsigned int) ip;
636}
637
638static inline u32 ipv4_portaddr_hash(const struct net *net,
639				     __be32 saddr,
640				     unsigned int port)
641{
642	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
643}
644
645bool ip_call_ra_chain(struct sk_buff *skb);
646
647/*
648 *	Functions provided by ip_fragment.c
649 */
650
651enum ip_defrag_users {
652	IP_DEFRAG_LOCAL_DELIVER,
653	IP_DEFRAG_CALL_RA_CHAIN,
654	IP_DEFRAG_CONNTRACK_IN,
655	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
656	IP_DEFRAG_CONNTRACK_OUT,
657	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
658	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
659	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
660	IP_DEFRAG_VS_IN,
661	IP_DEFRAG_VS_OUT,
662	IP_DEFRAG_VS_FWD,
663	IP_DEFRAG_AF_PACKET,
664	IP_DEFRAG_MACVLAN,
665};
666
667/* Return true if the value of 'user' is between 'lower_bond'
668 * and 'upper_bond' inclusively.
669 */
670static inline bool ip_defrag_user_in_between(u32 user,
671					     enum ip_defrag_users lower_bond,
672					     enum ip_defrag_users upper_bond)
673{
674	return user >= lower_bond && user <= upper_bond;
675}
676
677int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
678#ifdef CONFIG_INET
679struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
680#else
681static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
682{
683	return skb;
684}
685#endif
 
686
687/*
688 *	Functions provided by ip_forward.c
689 */
690
691int ip_forward(struct sk_buff *skb);
692
693/*
694 *	Functions provided by ip_options.c
695 */
696
697void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
698		      __be32 daddr, struct rtable *rt, int is_frag);
699
700int __ip_options_echo(struct net *net, struct ip_options *dopt,
701		      struct sk_buff *skb, const struct ip_options *sopt);
702static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
703				  struct sk_buff *skb)
704{
705	return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
706}
707
708void ip_options_fragment(struct sk_buff *skb);
709int __ip_options_compile(struct net *net, struct ip_options *opt,
710			 struct sk_buff *skb, __be32 *info);
711int ip_options_compile(struct net *net, struct ip_options *opt,
712		       struct sk_buff *skb);
713int ip_options_get(struct net *net, struct ip_options_rcu **optp,
714		   unsigned char *data, int optlen);
715int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
716			     unsigned char __user *data, int optlen);
717void ip_options_undo(struct ip_options *opt);
718void ip_forward_options(struct sk_buff *skb);
719int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
720
721/*
722 *	Functions provided by ip_sockglue.c
723 */
724
725void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
726void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
727			 struct sk_buff *skb, int tlen, int offset);
728int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
729		 struct ipcm_cookie *ipc, bool allow_ipv6);
730int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
731		  unsigned int optlen);
732int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
733		  int __user *optlen);
734int compat_ip_setsockopt(struct sock *sk, int level, int optname,
735			 char __user *optval, unsigned int optlen);
736int compat_ip_getsockopt(struct sock *sk, int level, int optname,
737			 char __user *optval, int __user *optlen);
738int ip_ra_control(struct sock *sk, unsigned char on,
739		  void (*destructor)(struct sock *));
740
741int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
742void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
743		   u32 info, u8 *payload);
744void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
745		    u32 info);
746
747static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
748{
749	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
750}
751
752bool icmp_global_allow(void);
753extern int sysctl_icmp_msgs_per_sec;
754extern int sysctl_icmp_msgs_burst;
755
756#ifdef CONFIG_PROC_FS
757int ip_misc_proc_init(void);
758#endif
759
760int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
761				struct netlink_ext_ack *extack);
762
763#endif	/* _IP_H */
v4.10.11
 
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the  BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 *		Definitions for the IP module.
  7 *
  8 * Version:	@(#)ip.h	1.0.2	05/07/93
  9 *
 10 * Authors:	Ross Biro
 11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 12 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 13 *
 14 * Changes:
 15 *		Mike McLagan    :       Routing by source
 16 *
 17 *		This program is free software; you can redistribute it and/or
 18 *		modify it under the terms of the GNU General Public License
 19 *		as published by the Free Software Foundation; either version
 20 *		2 of the License, or (at your option) any later version.
 21 */
 22#ifndef _IP_H
 23#define _IP_H
 24
 25#include <linux/types.h>
 26#include <linux/ip.h>
 27#include <linux/in.h>
 28#include <linux/skbuff.h>
 
 29
 30#include <net/inet_sock.h>
 31#include <net/route.h>
 32#include <net/snmp.h>
 33#include <net/flow.h>
 34#include <net/flow_dissector.h>
 
 
 
 
 
 
 
 
 35
 36struct sock;
 37
 38struct inet_skb_parm {
 39	int			iif;
 40	struct ip_options	opt;		/* Compiled IP options		*/
 41	u16			flags;
 42
 43#define IPSKB_FORWARDED		BIT(0)
 44#define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
 45#define IPSKB_XFRM_TRANSFORMED	BIT(2)
 46#define IPSKB_FRAG_COMPLETE	BIT(3)
 47#define IPSKB_REROUTED		BIT(4)
 48#define IPSKB_DOREDIRECT	BIT(5)
 49#define IPSKB_FRAG_PMTU		BIT(6)
 50#define IPSKB_L3SLAVE		BIT(7)
 51
 52	u16			frag_max_size;
 53};
 54
 55static inline bool ipv4_l3mdev_skb(u16 flags)
 56{
 57	return !!(flags & IPSKB_L3SLAVE);
 58}
 59
 60static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
 61{
 62	return ip_hdr(skb)->ihl * 4;
 63}
 64
 65struct ipcm_cookie {
 66	struct sockcm_cookie	sockc;
 67	__be32			addr;
 68	int			oif;
 69	struct ip_options_rcu	*opt;
 70	__u8			tx_flags;
 71	__u8			ttl;
 72	__s16			tos;
 73	char			priority;
 
 74};
 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
 77#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79struct ip_ra_chain {
 80	struct ip_ra_chain __rcu *next;
 81	struct sock		*sk;
 82	union {
 83		void			(*destructor)(struct sock *);
 84		struct sock		*saved_sk;
 85	};
 86	struct rcu_head		rcu;
 87};
 88
 89extern struct ip_ra_chain __rcu *ip_ra_chain;
 90
 91/* IP flags. */
 92#define IP_CE		0x8000		/* Flag: "Congestion"		*/
 93#define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
 94#define IP_MF		0x2000		/* Flag: "More Fragments"	*/
 95#define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
 96
 97#define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/
 98
 99struct msghdr;
100struct net_device;
101struct packet_type;
102struct rtable;
103struct sockaddr;
104
105int igmp_mc_init(void);
106
107/*
108 *	Functions provided by ip.c
109 */
110
111int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
112			  __be32 saddr, __be32 daddr,
113			  struct ip_options_rcu *opt);
114int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
115	   struct net_device *orig_dev);
 
 
116int ip_local_deliver(struct sk_buff *skb);
 
117int ip_mr_input(struct sk_buff *skb);
118int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
119int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
120int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
121		   int (*output)(struct net *, struct sock *, struct sk_buff *));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122void ip_send_check(struct iphdr *ip);
123int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
124int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
125
126int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
 
127void ip_init(void);
128int ip_append_data(struct sock *sk, struct flowi4 *fl4,
129		   int getfrag(void *from, char *to, int offset, int len,
130			       int odd, struct sk_buff *skb),
131		   void *from, int len, int protolen,
132		   struct ipcm_cookie *ipc,
133		   struct rtable **rt,
134		   unsigned int flags);
135int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
136		       struct sk_buff *skb);
137ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
138		       int offset, size_t size, int flags);
139struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
140			      struct sk_buff_head *queue,
141			      struct inet_cork *cork);
142int ip_send_skb(struct net *net, struct sk_buff *skb);
143int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
144void ip_flush_pending_frames(struct sock *sk);
145struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
146			    int getfrag(void *from, char *to, int offset,
147					int len, int odd, struct sk_buff *skb),
148			    void *from, int length, int transhdrlen,
149			    struct ipcm_cookie *ipc, struct rtable **rtp,
150			    unsigned int flags);
 
 
 
 
 
 
151
152static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
153{
154	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
155}
156
157static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
158{
159	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
160}
161
162static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
163{
164	return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
165}
166
167/* datagram.c */
168int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
169int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
170
171void ip4_datagram_release_cb(struct sock *sk);
172
173struct ip_reply_arg {
174	struct kvec iov[1];   
175	int	    flags;
176	__wsum 	    csum;
177	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
178				/* -1 if not needed */ 
179	int	    bound_dev_if;
180	u8  	    tos;
181	kuid_t	    uid;
182}; 
183
184#define IP_REPLY_ARG_NOSRCCHECK 1
185
186static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
187{
188	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
189}
190
191void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
192			   const struct ip_options *sopt,
193			   __be32 daddr, __be32 saddr,
194			   const struct ip_reply_arg *arg,
195			   unsigned int len);
196
197#define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
198#define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
199#define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
200#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
201#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
202#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
203#define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
204#define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
205#define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
206#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
207
208u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
209unsigned long snmp_fold_field(void __percpu *mib, int offt);
210#if BITS_PER_LONG==32
211u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
212			 size_t syncp_offset);
213u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
214#else
215static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
216					size_t syncp_offset)
217{
218	return snmp_get_cpu_field(mib, cpu, offct);
219
220}
221
222static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
223{
224	return snmp_fold_field(mib, offt);
225}
226#endif
227
228#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
229{ \
230	int i, c; \
231	for_each_possible_cpu(c) { \
232		for (i = 0; stats_list[i].name; i++) \
233			buff64[i] += snmp_get_cpu_field64( \
234					mib_statistic, \
235					c, stats_list[i].entry, \
236					offset); \
237	} \
238}
239
240#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
241{ \
242	int i, c; \
243	for_each_possible_cpu(c) { \
244		for (i = 0; stats_list[i].name; i++) \
245			buff[i] += snmp_get_cpu_field( \
246						mib_statistic, \
247						c, stats_list[i].entry); \
248	} \
249}
250
251void inet_get_local_port_range(struct net *net, int *low, int *high);
252
253#ifdef CONFIG_SYSCTL
254static inline int inet_is_local_reserved_port(struct net *net, int port)
255{
256	if (!net->ipv4.sysctl_local_reserved_ports)
257		return 0;
258	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
259}
260
261static inline bool sysctl_dev_name_is_allowed(const char *name)
262{
263	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
264}
265
 
 
 
 
 
266#else
267static inline int inet_is_local_reserved_port(struct net *net, int port)
268{
269	return 0;
270}
 
 
 
 
 
271#endif
272
273__be32 inet_current_timestamp(void);
274
275/* From inetpeer.c */
276extern int inet_peer_threshold;
277extern int inet_peer_minttl;
278extern int inet_peer_maxttl;
279
280void ipfrag_init(void);
281
282void ip_static_sysctl_init(void);
283
284#define IP4_REPLY_MARK(net, mark) \
285	((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
286
287static inline bool ip_is_fragment(const struct iphdr *iph)
288{
289	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
290}
291
292#ifdef CONFIG_INET
293#include <net/dst.h>
294
295/* The function in 2.2 was invalid, producing wrong result for
296 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
297static inline
298int ip_decrease_ttl(struct iphdr *iph)
299{
300	u32 check = (__force u32)iph->check;
301	check += (__force u32)htons(0x0100);
302	iph->check = (__force __sum16)(check + (check>=0xFFFF));
303	return --iph->ttl;
304}
305
 
 
 
 
 
 
 
306static inline
307int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
308{
309	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
310
311	return  pmtudisc == IP_PMTUDISC_DO ||
312		(pmtudisc == IP_PMTUDISC_WANT &&
313		 !(dst_metric_locked(dst, RTAX_MTU)));
314}
315
316static inline bool ip_sk_accept_pmtu(const struct sock *sk)
317{
318	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
319	       inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
320}
321
322static inline bool ip_sk_use_pmtu(const struct sock *sk)
323{
324	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
325}
326
327static inline bool ip_sk_ignore_df(const struct sock *sk)
328{
329	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
330	       inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
331}
332
333static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
334						    bool forwarding)
335{
336	struct net *net = dev_net(dst->dev);
337
338	if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
339	    dst_metric_locked(dst, RTAX_MTU) ||
340	    !forwarding)
341		return dst_mtu(dst);
342
343	return min(dst->dev->mtu, IP_MAX_MTU);
344}
345
346static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
347					  const struct sk_buff *skb)
348{
349	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
350		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
351
352		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
353	}
354
355	return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356}
357
358u32 ip_idents_reserve(u32 hash, int segs);
359void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
360
361static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
362					struct sock *sk, int segs)
363{
364	struct iphdr *iph = ip_hdr(skb);
365
366	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
367		/* This is only to work around buggy Windows95/2000
368		 * VJ compression implementations.  If the ID field
369		 * does not change, they drop every other packet in
370		 * a TCP stream using header compression.
371		 */
372		if (sk && inet_sk(sk)->inet_daddr) {
373			iph->id = htons(inet_sk(sk)->inet_id);
374			inet_sk(sk)->inet_id += segs;
375		} else {
376			iph->id = 0;
377		}
378	} else {
379		__ip_select_ident(net, iph, segs);
380	}
381}
382
383static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
384				   struct sock *sk)
385{
386	ip_select_ident_segs(net, skb, sk, 1);
387}
388
389static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
390{
391	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
392				  skb->len, proto, 0);
393}
394
395/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
396 * Equivalent to :	flow->v4addrs.src = iph->saddr;
397 *			flow->v4addrs.dst = iph->daddr;
398 */
399static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
400					    const struct iphdr *iph)
401{
402	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
403		     offsetof(typeof(flow->addrs), v4addrs.src) +
404			      sizeof(flow->addrs.v4addrs.src));
405	memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
406	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
407}
408
409static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
410{
411	const struct iphdr *iph = skb_gro_network_header(skb);
412
413	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
414				  skb_gro_len(skb), proto, 0);
415}
416
417/*
418 *	Map a multicast IP onto multicast MAC for type ethernet.
419 */
420
421static inline void ip_eth_mc_map(__be32 naddr, char *buf)
422{
423	__u32 addr=ntohl(naddr);
424	buf[0]=0x01;
425	buf[1]=0x00;
426	buf[2]=0x5e;
427	buf[5]=addr&0xFF;
428	addr>>=8;
429	buf[4]=addr&0xFF;
430	addr>>=8;
431	buf[3]=addr&0x7F;
432}
433
434/*
435 *	Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
436 *	Leave P_Key as 0 to be filled in by driver.
437 */
438
439static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
440{
441	__u32 addr;
442	unsigned char scope = broadcast[5] & 0xF;
443
444	buf[0]  = 0;		/* Reserved */
445	buf[1]  = 0xff;		/* Multicast QPN */
446	buf[2]  = 0xff;
447	buf[3]  = 0xff;
448	addr    = ntohl(naddr);
449	buf[4]  = 0xff;
450	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
451	buf[6]  = 0x40;		/* IPv4 signature */
452	buf[7]  = 0x1b;
453	buf[8]  = broadcast[8];		/* P_Key */
454	buf[9]  = broadcast[9];
455	buf[10] = 0;
456	buf[11] = 0;
457	buf[12] = 0;
458	buf[13] = 0;
459	buf[14] = 0;
460	buf[15] = 0;
461	buf[19] = addr & 0xff;
462	addr  >>= 8;
463	buf[18] = addr & 0xff;
464	addr  >>= 8;
465	buf[17] = addr & 0xff;
466	addr  >>= 8;
467	buf[16] = addr & 0x0f;
468}
469
470static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
471{
472	if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
473		memcpy(buf, broadcast, 4);
474	else
475		memcpy(buf, &naddr, sizeof(naddr));
476}
477
478#if IS_ENABLED(CONFIG_IPV6)
479#include <linux/ipv6.h>
480#endif
481
482static __inline__ void inet_reset_saddr(struct sock *sk)
483{
484	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
485#if IS_ENABLED(CONFIG_IPV6)
486	if (sk->sk_family == PF_INET6) {
487		struct ipv6_pinfo *np = inet6_sk(sk);
488
489		memset(&np->saddr, 0, sizeof(np->saddr));
490		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
491	}
492#endif
493}
494
495#endif
496
497static inline unsigned int ipv4_addr_hash(__be32 ip)
498{
499	return (__force unsigned int) ip;
500}
501
 
 
 
 
 
 
 
502bool ip_call_ra_chain(struct sk_buff *skb);
503
504/*
505 *	Functions provided by ip_fragment.c
506 */
507
508enum ip_defrag_users {
509	IP_DEFRAG_LOCAL_DELIVER,
510	IP_DEFRAG_CALL_RA_CHAIN,
511	IP_DEFRAG_CONNTRACK_IN,
512	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
513	IP_DEFRAG_CONNTRACK_OUT,
514	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
515	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
516	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
517	IP_DEFRAG_VS_IN,
518	IP_DEFRAG_VS_OUT,
519	IP_DEFRAG_VS_FWD,
520	IP_DEFRAG_AF_PACKET,
521	IP_DEFRAG_MACVLAN,
522};
523
524/* Return true if the value of 'user' is between 'lower_bond'
525 * and 'upper_bond' inclusively.
526 */
527static inline bool ip_defrag_user_in_between(u32 user,
528					     enum ip_defrag_users lower_bond,
529					     enum ip_defrag_users upper_bond)
530{
531	return user >= lower_bond && user <= upper_bond;
532}
533
534int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
535#ifdef CONFIG_INET
536struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
537#else
538static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
539{
540	return skb;
541}
542#endif
543int ip_frag_mem(struct net *net);
544
545/*
546 *	Functions provided by ip_forward.c
547 */
548 
549int ip_forward(struct sk_buff *skb);
550 
551/*
552 *	Functions provided by ip_options.c
553 */
554 
555void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
556		      __be32 daddr, struct rtable *rt, int is_frag);
557
558int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb,
559		      const struct ip_options *sopt);
560static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
 
561{
562	return __ip_options_echo(dopt, skb, &IPCB(skb)->opt);
563}
564
565void ip_options_fragment(struct sk_buff *skb);
 
 
566int ip_options_compile(struct net *net, struct ip_options *opt,
567		       struct sk_buff *skb);
568int ip_options_get(struct net *net, struct ip_options_rcu **optp,
569		   unsigned char *data, int optlen);
570int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
571			     unsigned char __user *data, int optlen);
572void ip_options_undo(struct ip_options *opt);
573void ip_forward_options(struct sk_buff *skb);
574int ip_options_rcv_srr(struct sk_buff *skb);
575
576/*
577 *	Functions provided by ip_sockglue.c
578 */
579
580void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
581void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
582			 struct sk_buff *skb, int tlen, int offset);
583int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
584		 struct ipcm_cookie *ipc, bool allow_ipv6);
585int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
586		  unsigned int optlen);
587int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
588		  int __user *optlen);
589int compat_ip_setsockopt(struct sock *sk, int level, int optname,
590			 char __user *optval, unsigned int optlen);
591int compat_ip_getsockopt(struct sock *sk, int level, int optname,
592			 char __user *optval, int __user *optlen);
593int ip_ra_control(struct sock *sk, unsigned char on,
594		  void (*destructor)(struct sock *));
595
596int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
597void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
598		   u32 info, u8 *payload);
599void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
600		    u32 info);
601
602static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
603{
604	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
605}
606
607bool icmp_global_allow(void);
608extern int sysctl_icmp_msgs_per_sec;
609extern int sysctl_icmp_msgs_burst;
610
611#ifdef CONFIG_PROC_FS
612int ip_misc_proc_init(void);
613#endif
 
 
 
614
615#endif	/* _IP_H */