Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		The Internet Protocol (IP) module.
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Donald Becker, <becker@super.org>
 12 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
 13 *		Richard Underwood
 14 *		Stefan Becker, <stefanb@yello.ping.de>
 15 *		Jorge Cwik, <jorge@laser.satlink.net>
 16 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 17 *
 18 * Fixes:
 19 *		Alan Cox	:	Commented a couple of minor bits of surplus code
 20 *		Alan Cox	:	Undefining IP_FORWARD doesn't include the code
 21 *					(just stops a compiler warning).
 22 *		Alan Cox	:	Frames with >=MAX_ROUTE record routes, strict routes or loose routes
 23 *					are junked rather than corrupting things.
 24 *		Alan Cox	:	Frames to bad broadcast subnets are dumped
 25 *					We used to process them non broadcast and
 26 *					boy could that cause havoc.
 27 *		Alan Cox	:	ip_forward sets the free flag on the
 28 *					new frame it queues. Still crap because
 29 *					it copies the frame but at least it
 30 *					doesn't eat memory too.
 31 *		Alan Cox	:	Generic queue code and memory fixes.
 32 *		Fred Van Kempen :	IP fragment support (borrowed from NET2E)
 33 *		Gerhard Koerting:	Forward fragmented frames correctly.
 34 *		Gerhard Koerting: 	Fixes to my fix of the above 8-).
 35 *		Gerhard Koerting:	IP interface addressing fix.
 36 *		Linus Torvalds	:	More robustness checks
 37 *		Alan Cox	:	Even more checks: Still not as robust as it ought to be
 38 *		Alan Cox	:	Save IP header pointer for later
 39 *		Alan Cox	:	ip option setting
 40 *		Alan Cox	:	Use ip_tos/ip_ttl settings
 41 *		Alan Cox	:	Fragmentation bogosity removed
 42 *					(Thanks to Mark.Bush@prg.ox.ac.uk)
 43 *		Dmitry Gorodchanin :	Send of a raw packet crash fix.
 44 *		Alan Cox	:	Silly ip bug when an overlength
 45 *					fragment turns up. Now frees the
 46 *					queue.
 47 *		Linus Torvalds/ :	Memory leakage on fragmentation
 48 *		Alan Cox	:	handling.
 49 *		Gerhard Koerting:	Forwarding uses IP priority hints
 50 *		Teemu Rantanen	:	Fragment problems.
 51 *		Alan Cox	:	General cleanup, comments and reformat
 52 *		Alan Cox	:	SNMP statistics
 53 *		Alan Cox	:	BSD address rule semantics. Also see
 54 *					UDP as there is a nasty checksum issue
 55 *					if you do things the wrong way.
 56 *		Alan Cox	:	Always defrag, moved IP_FORWARD to the config.in file
 57 *		Alan Cox	: 	IP options adjust sk->priority.
 58 *		Pedro Roque	:	Fix mtu/length error in ip_forward.
 59 *		Alan Cox	:	Avoid ip_chk_addr when possible.
 60 *	Richard Underwood	:	IP multicasting.
 61 *		Alan Cox	:	Cleaned up multicast handlers.
 62 *		Alan Cox	:	RAW sockets demultiplex in the BSD style.
 63 *		Gunther Mayer	:	Fix the SNMP reporting typo
 64 *		Alan Cox	:	Always in group 224.0.0.1
 65 *	Pauline Middelink	:	Fast ip_checksum update when forwarding
 66 *					Masquerading support.
 67 *		Alan Cox	:	Multicast loopback error for 224.0.0.1
 68 *		Alan Cox	:	IP_MULTICAST_LOOP option.
 69 *		Alan Cox	:	Use notifiers.
 70 *		Bjorn Ekwall	:	Removed ip_csum (from slhc.c too)
 71 *		Bjorn Ekwall	:	Moved ip_fast_csum to ip.h (inline!)
 72 *		Stefan Becker   :       Send out ICMP HOST REDIRECT
 73 *	Arnt Gulbrandsen	:	ip_build_xmit
 74 *		Alan Cox	:	Per socket routing cache
 75 *		Alan Cox	:	Fixed routing cache, added header cache.
 76 *		Alan Cox	:	Loopback didn't work right in original ip_build_xmit - fixed it.
 77 *		Alan Cox	:	Only send ICMP_REDIRECT if src/dest are the same net.
 78 *		Alan Cox	:	Incoming IP option handling.
 79 *		Alan Cox	:	Set saddr on raw output frames as per BSD.
 80 *		Alan Cox	:	Stopped broadcast source route explosions.
 81 *		Alan Cox	:	Can disable source routing
 82 *		Takeshi Sone    :	Masquerading didn't work.
 83 *	Dave Bonn,Alan Cox	:	Faster IP forwarding whenever possible.
 84 *		Alan Cox	:	Memory leaks, tramples, misc debugging.
 85 *		Alan Cox	:	Fixed multicast (by popular demand 8))
 86 *		Alan Cox	:	Fixed forwarding (by even more popular demand 8))
 87 *		Alan Cox	:	Fixed SNMP statistics [I think]
 88 *	Gerhard Koerting	:	IP fragmentation forwarding fix
 89 *		Alan Cox	:	Device lock against page fault.
 90 *		Alan Cox	:	IP_HDRINCL facility.
 91 *	Werner Almesberger	:	Zero fragment bug
 92 *		Alan Cox	:	RAW IP frame length bug
 93 *		Alan Cox	:	Outgoing firewall on build_xmit
 94 *		A.N.Kuznetsov	:	IP_OPTIONS support throughout the kernel
 95 *		Alan Cox	:	Multicast routing hooks
 96 *		Jos Vos		:	Do accounting *before* call_in_firewall
 97 *	Willy Konynenberg	:	Transparent proxying support
 98 *
 99 * To Fix:
100 *		IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
101 *		and could be made very efficient with the addition of some virtual memory hacks to permit
102 *		the allocation of a buffer that can then be 'grown' by twiddling page tables.
103 *		Output fragmentation wants updating along with the buffer management to use a single
104 *		interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
105 *		output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
106 *		fragmentation anyway.
107 */
108
109#define pr_fmt(fmt) "IPv4: " fmt
110
111#include <linux/module.h>
112#include <linux/types.h>
113#include <linux/kernel.h>
114#include <linux/string.h>
115#include <linux/errno.h>
116#include <linux/slab.h>
117
118#include <linux/net.h>
119#include <linux/socket.h>
120#include <linux/sockios.h>
121#include <linux/in.h>
122#include <linux/inet.h>
123#include <linux/inetdevice.h>
124#include <linux/netdevice.h>
125#include <linux/etherdevice.h>
126#include <linux/indirect_call_wrapper.h>
127
128#include <net/snmp.h>
129#include <net/ip.h>
130#include <net/protocol.h>
131#include <net/route.h>
132#include <linux/skbuff.h>
133#include <net/sock.h>
134#include <net/arp.h>
135#include <net/icmp.h>
136#include <net/raw.h>
137#include <net/checksum.h>
138#include <net/inet_ecn.h>
139#include <linux/netfilter_ipv4.h>
140#include <net/xfrm.h>
141#include <linux/mroute.h>
142#include <linux/netlink.h>
143#include <net/dst_metadata.h>
144
145/*
146 *	Process Router Attention IP option (RFC 2113)
147 */
148bool ip_call_ra_chain(struct sk_buff *skb)
149{
150	struct ip_ra_chain *ra;
151	u8 protocol = ip_hdr(skb)->protocol;
152	struct sock *last = NULL;
153	struct net_device *dev = skb->dev;
154	struct net *net = dev_net(dev);
155
156	for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
157		struct sock *sk = ra->sk;
158
159		/* If socket is bound to an interface, only report
160		 * the packet if it came  from that interface.
161		 */
162		if (sk && inet_sk(sk)->inet_num == protocol &&
163		    (!sk->sk_bound_dev_if ||
164		     sk->sk_bound_dev_if == dev->ifindex)) {
165			if (ip_is_fragment(ip_hdr(skb))) {
166				if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
167					return true;
168			}
169			if (last) {
170				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
171				if (skb2)
172					raw_rcv(last, skb2);
173			}
174			last = sk;
175		}
176	}
177
178	if (last) {
179		raw_rcv(last, skb);
180		return true;
181	}
182	return false;
183}
184
185INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
186INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
187void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
188{
189	const struct net_protocol *ipprot;
190	int raw, ret;
191
192resubmit:
193	raw = raw_local_deliver(skb, protocol);
194
195	ipprot = rcu_dereference(inet_protos[protocol]);
196	if (ipprot) {
197		if (!ipprot->no_policy) {
198			if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
199				kfree_skb(skb);
200				return;
201			}
202			nf_reset_ct(skb);
203		}
204		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
205				      skb);
206		if (ret < 0) {
207			protocol = -ret;
208			goto resubmit;
209		}
210		__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
211	} else {
212		if (!raw) {
213			if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
214				__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
215				icmp_send(skb, ICMP_DEST_UNREACH,
216					  ICMP_PROT_UNREACH, 0);
217			}
218			kfree_skb(skb);
219		} else {
220			__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
221			consume_skb(skb);
222		}
223	}
224}
225
226static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
227{
228	__skb_pull(skb, skb_network_header_len(skb));
229
230	rcu_read_lock();
231	ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
232	rcu_read_unlock();
233
234	return 0;
235}
236
237/*
238 * 	Deliver IP Packets to the higher protocol layers.
239 */
240int ip_local_deliver(struct sk_buff *skb)
241{
242	/*
243	 *	Reassemble IP fragments.
244	 */
245	struct net *net = dev_net(skb->dev);
246
247	if (ip_is_fragment(ip_hdr(skb))) {
248		if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
249			return 0;
250	}
251
252	return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
253		       net, NULL, skb, skb->dev, NULL,
254		       ip_local_deliver_finish);
255}
 
256
257static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
258{
259	struct ip_options *opt;
260	const struct iphdr *iph;
261
262	/* It looks as overkill, because not all
263	   IP options require packet mangling.
264	   But it is the easiest for now, especially taking
265	   into account that combination of IP options
266	   and running sniffer is extremely rare condition.
267					      --ANK (980813)
268	*/
269	if (skb_cow(skb, skb_headroom(skb))) {
270		__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
271		goto drop;
272	}
273
274	iph = ip_hdr(skb);
275	opt = &(IPCB(skb)->opt);
276	opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
277
278	if (ip_options_compile(dev_net(dev), opt, skb)) {
279		__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
280		goto drop;
281	}
282
283	if (unlikely(opt->srr)) {
284		struct in_device *in_dev = __in_dev_get_rcu(dev);
285
286		if (in_dev) {
287			if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
288				if (IN_DEV_LOG_MARTIANS(in_dev))
289					net_info_ratelimited("source route option %pI4 -> %pI4\n",
290							     &iph->saddr,
291							     &iph->daddr);
292				goto drop;
293			}
294		}
295
296		if (ip_options_rcv_srr(skb, dev))
297			goto drop;
298	}
299
300	return false;
301drop:
302	return true;
303}
304
305static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
306			    const struct sk_buff *hint)
307{
308	return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
309	       ip_hdr(hint)->tos == iph->tos;
310}
311
312INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
313INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
314static int ip_rcv_finish_core(struct net *net, struct sock *sk,
315			      struct sk_buff *skb, struct net_device *dev,
316			      const struct sk_buff *hint)
317{
318	const struct iphdr *iph = ip_hdr(skb);
319	int (*edemux)(struct sk_buff *skb);
320	struct rtable *rt;
321	int err;
322
323	if (ip_can_use_hint(skb, iph, hint)) {
324		err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
325					dev, hint);
326		if (unlikely(err))
327			goto drop_error;
328	}
329
330	if (net->ipv4.sysctl_ip_early_demux &&
331	    !skb_dst(skb) &&
332	    !skb->sk &&
333	    !ip_is_fragment(iph)) {
334		const struct net_protocol *ipprot;
335		int protocol = iph->protocol;
336
337		ipprot = rcu_dereference(inet_protos[protocol]);
338		if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
339			err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux,
340					      udp_v4_early_demux, skb);
341			if (unlikely(err))
342				goto drop_error;
343			/* must reload iph, skb->head might have changed */
344			iph = ip_hdr(skb);
345		}
346	}
347
348	/*
349	 *	Initialise the virtual path cache for the packet. It describes
350	 *	how the packet travels inside Linux networking.
351	 */
352	if (!skb_valid_dst(skb)) {
353		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
354					   iph->tos, dev);
355		if (unlikely(err))
356			goto drop_error;
357	}
358
359#ifdef CONFIG_IP_ROUTE_CLASSID
360	if (unlikely(skb_dst(skb)->tclassid)) {
361		struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
362		u32 idx = skb_dst(skb)->tclassid;
363		st[idx&0xFF].o_packets++;
364		st[idx&0xFF].o_bytes += skb->len;
365		st[(idx>>16)&0xFF].i_packets++;
366		st[(idx>>16)&0xFF].i_bytes += skb->len;
367	}
368#endif
369
370	if (iph->ihl > 5 && ip_rcv_options(skb, dev))
371		goto drop;
372
373	rt = skb_rtable(skb);
374	if (rt->rt_type == RTN_MULTICAST) {
375		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
376	} else if (rt->rt_type == RTN_BROADCAST) {
377		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
378	} else if (skb->pkt_type == PACKET_BROADCAST ||
379		   skb->pkt_type == PACKET_MULTICAST) {
380		struct in_device *in_dev = __in_dev_get_rcu(dev);
381
382		/* RFC 1122 3.3.6:
383		 *
384		 *   When a host sends a datagram to a link-layer broadcast
385		 *   address, the IP destination address MUST be a legal IP
386		 *   broadcast or IP multicast address.
387		 *
388		 *   A host SHOULD silently discard a datagram that is received
389		 *   via a link-layer broadcast (see Section 2.4) but does not
390		 *   specify an IP multicast or broadcast destination address.
391		 *
392		 * This doesn't explicitly say L2 *broadcast*, but broadcast is
393		 * in a way a form of multicast and the most common use case for
394		 * this is 802.11 protecting against cross-station spoofing (the
395		 * so-called "hole-196" attack) so do it for both.
396		 */
397		if (in_dev &&
398		    IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST))
399			goto drop;
400	}
401
402	return NET_RX_SUCCESS;
403
404drop:
405	kfree_skb(skb);
406	return NET_RX_DROP;
407
408drop_error:
409	if (err == -EXDEV)
410		__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
411	goto drop;
412}
413
414static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
415{
416	struct net_device *dev = skb->dev;
417	int ret;
418
419	/* if ingress device is enslaved to an L3 master device pass the
420	 * skb to its handler for processing
421	 */
422	skb = l3mdev_ip_rcv(skb);
423	if (!skb)
424		return NET_RX_SUCCESS;
425
426	ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
427	if (ret != NET_RX_DROP)
428		ret = dst_input(skb);
429	return ret;
430}
431
432/*
433 * 	Main IP Receive routine.
434 */
435static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
436{
437	const struct iphdr *iph;
438	u32 len;
439
440	/* When the interface is in promisc. mode, drop all the crap
441	 * that it receives, do not try to analyse it.
442	 */
443	if (skb->pkt_type == PACKET_OTHERHOST)
444		goto drop;
445
446	__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
447
448	skb = skb_share_check(skb, GFP_ATOMIC);
449	if (!skb) {
450		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
451		goto out;
452	}
453
454	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
455		goto inhdr_error;
456
457	iph = ip_hdr(skb);
458
459	/*
460	 *	RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
461	 *
462	 *	Is the datagram acceptable?
463	 *
464	 *	1.	Length at least the size of an ip header
465	 *	2.	Version of 4
466	 *	3.	Checksums correctly. [Speed optimisation for later, skip loopback checksums]
467	 *	4.	Doesn't have a bogus length
468	 */
469
470	if (iph->ihl < 5 || iph->version != 4)
471		goto inhdr_error;
472
473	BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
474	BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
475	BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
476	__IP_ADD_STATS(net,
477		       IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
478		       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
479
480	if (!pskb_may_pull(skb, iph->ihl*4))
481		goto inhdr_error;
482
483	iph = ip_hdr(skb);
484
485	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
486		goto csum_error;
487
488	len = ntohs(iph->tot_len);
489	if (skb->len < len) {
490		__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
491		goto drop;
492	} else if (len < (iph->ihl*4))
493		goto inhdr_error;
494
495	/* Our transport medium may have padded the buffer out. Now we know it
496	 * is IP we can trim to the true length of the frame.
497	 * Note this now means skb->len holds ntohs(iph->tot_len).
498	 */
499	if (pskb_trim_rcsum(skb, len)) {
500		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
501		goto drop;
502	}
503
504	iph = ip_hdr(skb);
505	skb->transport_header = skb->network_header + iph->ihl*4;
506
507	/* Remove any debris in the socket control block */
508	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
509	IPCB(skb)->iif = skb->skb_iif;
510
511	/* Must drop socket now because of tproxy. */
512	if (!skb_sk_is_prefetched(skb))
513		skb_orphan(skb);
514
515	return skb;
516
517csum_error:
518	__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
519inhdr_error:
520	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
521drop:
522	kfree_skb(skb);
523out:
524	return NULL;
525}
526
527/*
528 * IP receive entry point
529 */
530int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
531	   struct net_device *orig_dev)
532{
533	struct net *net = dev_net(dev);
534
535	skb = ip_rcv_core(skb, net);
536	if (skb == NULL)
537		return NET_RX_DROP;
538
539	return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
540		       net, NULL, skb, dev, NULL,
541		       ip_rcv_finish);
542}
543
544static void ip_sublist_rcv_finish(struct list_head *head)
545{
546	struct sk_buff *skb, *next;
547
548	list_for_each_entry_safe(skb, next, head, list) {
549		skb_list_del_init(skb);
550		dst_input(skb);
551	}
552}
553
554static struct sk_buff *ip_extract_route_hint(const struct net *net,
555					     struct sk_buff *skb, int rt_type)
556{
557	if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
558		return NULL;
559
560	return skb;
561}
562
563static void ip_list_rcv_finish(struct net *net, struct sock *sk,
564			       struct list_head *head)
565{
566	struct sk_buff *skb, *next, *hint = NULL;
567	struct dst_entry *curr_dst = NULL;
568	struct list_head sublist;
569
570	INIT_LIST_HEAD(&sublist);
571	list_for_each_entry_safe(skb, next, head, list) {
572		struct net_device *dev = skb->dev;
573		struct dst_entry *dst;
574
575		skb_list_del_init(skb);
576		/* if ingress device is enslaved to an L3 master device pass the
577		 * skb to its handler for processing
578		 */
579		skb = l3mdev_ip_rcv(skb);
580		if (!skb)
581			continue;
582		if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
583			continue;
584
585		dst = skb_dst(skb);
586		if (curr_dst != dst) {
587			hint = ip_extract_route_hint(net, skb,
588					       ((struct rtable *)dst)->rt_type);
589
590			/* dispatch old sublist */
591			if (!list_empty(&sublist))
592				ip_sublist_rcv_finish(&sublist);
593			/* start new sublist */
594			INIT_LIST_HEAD(&sublist);
595			curr_dst = dst;
596		}
597		list_add_tail(&skb->list, &sublist);
598	}
599	/* dispatch final sublist */
600	ip_sublist_rcv_finish(&sublist);
601}
602
603static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
604			   struct net *net)
605{
606	NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
607		     head, dev, NULL, ip_rcv_finish);
608	ip_list_rcv_finish(net, NULL, head);
609}
610
611/* Receive a list of IP packets */
612void ip_list_rcv(struct list_head *head, struct packet_type *pt,
613		 struct net_device *orig_dev)
614{
615	struct net_device *curr_dev = NULL;
616	struct net *curr_net = NULL;
617	struct sk_buff *skb, *next;
618	struct list_head sublist;
619
620	INIT_LIST_HEAD(&sublist);
621	list_for_each_entry_safe(skb, next, head, list) {
622		struct net_device *dev = skb->dev;
623		struct net *net = dev_net(dev);
624
625		skb_list_del_init(skb);
626		skb = ip_rcv_core(skb, net);
627		if (skb == NULL)
628			continue;
629
630		if (curr_dev != dev || curr_net != net) {
631			/* dispatch old sublist */
632			if (!list_empty(&sublist))
633				ip_sublist_rcv(&sublist, curr_dev, curr_net);
634			/* start new sublist */
635			INIT_LIST_HEAD(&sublist);
636			curr_dev = dev;
637			curr_net = net;
638		}
639		list_add_tail(&skb->list, &sublist);
640	}
641	/* dispatch final sublist */
642	if (!list_empty(&sublist))
643		ip_sublist_rcv(&sublist, curr_dev, curr_net);
644}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		The Internet Protocol (IP) module.
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Donald Becker, <becker@super.org>
 12 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
 13 *		Richard Underwood
 14 *		Stefan Becker, <stefanb@yello.ping.de>
 15 *		Jorge Cwik, <jorge@laser.satlink.net>
 16 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 17 *
 18 * Fixes:
 19 *		Alan Cox	:	Commented a couple of minor bits of surplus code
 20 *		Alan Cox	:	Undefining IP_FORWARD doesn't include the code
 21 *					(just stops a compiler warning).
 22 *		Alan Cox	:	Frames with >=MAX_ROUTE record routes, strict routes or loose routes
 23 *					are junked rather than corrupting things.
 24 *		Alan Cox	:	Frames to bad broadcast subnets are dumped
 25 *					We used to process them non broadcast and
 26 *					boy could that cause havoc.
 27 *		Alan Cox	:	ip_forward sets the free flag on the
 28 *					new frame it queues. Still crap because
 29 *					it copies the frame but at least it
 30 *					doesn't eat memory too.
 31 *		Alan Cox	:	Generic queue code and memory fixes.
 32 *		Fred Van Kempen :	IP fragment support (borrowed from NET2E)
 33 *		Gerhard Koerting:	Forward fragmented frames correctly.
 34 *		Gerhard Koerting: 	Fixes to my fix of the above 8-).
 35 *		Gerhard Koerting:	IP interface addressing fix.
 36 *		Linus Torvalds	:	More robustness checks
 37 *		Alan Cox	:	Even more checks: Still not as robust as it ought to be
 38 *		Alan Cox	:	Save IP header pointer for later
 39 *		Alan Cox	:	ip option setting
 40 *		Alan Cox	:	Use ip_tos/ip_ttl settings
 41 *		Alan Cox	:	Fragmentation bogosity removed
 42 *					(Thanks to Mark.Bush@prg.ox.ac.uk)
 43 *		Dmitry Gorodchanin :	Send of a raw packet crash fix.
 44 *		Alan Cox	:	Silly ip bug when an overlength
 45 *					fragment turns up. Now frees the
 46 *					queue.
 47 *		Linus Torvalds/ :	Memory leakage on fragmentation
 48 *		Alan Cox	:	handling.
 49 *		Gerhard Koerting:	Forwarding uses IP priority hints
 50 *		Teemu Rantanen	:	Fragment problems.
 51 *		Alan Cox	:	General cleanup, comments and reformat
 52 *		Alan Cox	:	SNMP statistics
 53 *		Alan Cox	:	BSD address rule semantics. Also see
 54 *					UDP as there is a nasty checksum issue
 55 *					if you do things the wrong way.
 56 *		Alan Cox	:	Always defrag, moved IP_FORWARD to the config.in file
 57 *		Alan Cox	: 	IP options adjust sk->priority.
 58 *		Pedro Roque	:	Fix mtu/length error in ip_forward.
 59 *		Alan Cox	:	Avoid ip_chk_addr when possible.
 60 *	Richard Underwood	:	IP multicasting.
 61 *		Alan Cox	:	Cleaned up multicast handlers.
 62 *		Alan Cox	:	RAW sockets demultiplex in the BSD style.
 63 *		Gunther Mayer	:	Fix the SNMP reporting typo
 64 *		Alan Cox	:	Always in group 224.0.0.1
 65 *	Pauline Middelink	:	Fast ip_checksum update when forwarding
 66 *					Masquerading support.
 67 *		Alan Cox	:	Multicast loopback error for 224.0.0.1
 68 *		Alan Cox	:	IP_MULTICAST_LOOP option.
 69 *		Alan Cox	:	Use notifiers.
 70 *		Bjorn Ekwall	:	Removed ip_csum (from slhc.c too)
 71 *		Bjorn Ekwall	:	Moved ip_fast_csum to ip.h (inline!)
 72 *		Stefan Becker   :       Send out ICMP HOST REDIRECT
 73 *	Arnt Gulbrandsen	:	ip_build_xmit
 74 *		Alan Cox	:	Per socket routing cache
 75 *		Alan Cox	:	Fixed routing cache, added header cache.
 76 *		Alan Cox	:	Loopback didn't work right in original ip_build_xmit - fixed it.
 77 *		Alan Cox	:	Only send ICMP_REDIRECT if src/dest are the same net.
 78 *		Alan Cox	:	Incoming IP option handling.
 79 *		Alan Cox	:	Set saddr on raw output frames as per BSD.
 80 *		Alan Cox	:	Stopped broadcast source route explosions.
 81 *		Alan Cox	:	Can disable source routing
 82 *		Takeshi Sone    :	Masquerading didn't work.
 83 *	Dave Bonn,Alan Cox	:	Faster IP forwarding whenever possible.
 84 *		Alan Cox	:	Memory leaks, tramples, misc debugging.
 85 *		Alan Cox	:	Fixed multicast (by popular demand 8))
 86 *		Alan Cox	:	Fixed forwarding (by even more popular demand 8))
 87 *		Alan Cox	:	Fixed SNMP statistics [I think]
 88 *	Gerhard Koerting	:	IP fragmentation forwarding fix
 89 *		Alan Cox	:	Device lock against page fault.
 90 *		Alan Cox	:	IP_HDRINCL facility.
 91 *	Werner Almesberger	:	Zero fragment bug
 92 *		Alan Cox	:	RAW IP frame length bug
 93 *		Alan Cox	:	Outgoing firewall on build_xmit
 94 *		A.N.Kuznetsov	:	IP_OPTIONS support throughout the kernel
 95 *		Alan Cox	:	Multicast routing hooks
 96 *		Jos Vos		:	Do accounting *before* call_in_firewall
 97 *	Willy Konynenberg	:	Transparent proxying support
 98 *
 99 * To Fix:
100 *		IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
101 *		and could be made very efficient with the addition of some virtual memory hacks to permit
102 *		the allocation of a buffer that can then be 'grown' by twiddling page tables.
103 *		Output fragmentation wants updating along with the buffer management to use a single
104 *		interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
105 *		output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
106 *		fragmentation anyway.
107 */
108
109#define pr_fmt(fmt) "IPv4: " fmt
110
111#include <linux/module.h>
112#include <linux/types.h>
113#include <linux/kernel.h>
114#include <linux/string.h>
115#include <linux/errno.h>
116#include <linux/slab.h>
117
118#include <linux/net.h>
119#include <linux/socket.h>
120#include <linux/sockios.h>
121#include <linux/in.h>
122#include <linux/inet.h>
123#include <linux/inetdevice.h>
124#include <linux/netdevice.h>
125#include <linux/etherdevice.h>
126#include <linux/indirect_call_wrapper.h>
127
128#include <net/snmp.h>
129#include <net/ip.h>
130#include <net/protocol.h>
131#include <net/route.h>
132#include <linux/skbuff.h>
133#include <net/sock.h>
134#include <net/arp.h>
135#include <net/icmp.h>
136#include <net/raw.h>
137#include <net/checksum.h>
138#include <net/inet_ecn.h>
139#include <linux/netfilter_ipv4.h>
140#include <net/xfrm.h>
141#include <linux/mroute.h>
142#include <linux/netlink.h>
143#include <net/dst_metadata.h>
144
145/*
146 *	Process Router Attention IP option (RFC 2113)
147 */
148bool ip_call_ra_chain(struct sk_buff *skb)
149{
150	struct ip_ra_chain *ra;
151	u8 protocol = ip_hdr(skb)->protocol;
152	struct sock *last = NULL;
153	struct net_device *dev = skb->dev;
154	struct net *net = dev_net(dev);
155
156	for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
157		struct sock *sk = ra->sk;
158
159		/* If socket is bound to an interface, only report
160		 * the packet if it came  from that interface.
161		 */
162		if (sk && inet_sk(sk)->inet_num == protocol &&
163		    (!sk->sk_bound_dev_if ||
164		     sk->sk_bound_dev_if == dev->ifindex)) {
165			if (ip_is_fragment(ip_hdr(skb))) {
166				if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
167					return true;
168			}
169			if (last) {
170				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
171				if (skb2)
172					raw_rcv(last, skb2);
173			}
174			last = sk;
175		}
176	}
177
178	if (last) {
179		raw_rcv(last, skb);
180		return true;
181	}
182	return false;
183}
184
185INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
186INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
187void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
188{
189	const struct net_protocol *ipprot;
190	int raw, ret;
191
192resubmit:
193	raw = raw_local_deliver(skb, protocol);
194
195	ipprot = rcu_dereference(inet_protos[protocol]);
196	if (ipprot) {
197		if (!ipprot->no_policy) {
198			if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
199				kfree_skb(skb);
200				return;
201			}
202			nf_reset_ct(skb);
203		}
204		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
205				      skb);
206		if (ret < 0) {
207			protocol = -ret;
208			goto resubmit;
209		}
210		__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
211	} else {
212		if (!raw) {
213			if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
214				__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
215				icmp_send(skb, ICMP_DEST_UNREACH,
216					  ICMP_PROT_UNREACH, 0);
217			}
218			kfree_skb(skb);
219		} else {
220			__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
221			consume_skb(skb);
222		}
223	}
224}
225
226static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
227{
228	__skb_pull(skb, skb_network_header_len(skb));
229
230	rcu_read_lock();
231	ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
232	rcu_read_unlock();
233
234	return 0;
235}
236
237/*
238 * 	Deliver IP Packets to the higher protocol layers.
239 */
240int ip_local_deliver(struct sk_buff *skb)
241{
242	/*
243	 *	Reassemble IP fragments.
244	 */
245	struct net *net = dev_net(skb->dev);
246
247	if (ip_is_fragment(ip_hdr(skb))) {
248		if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
249			return 0;
250	}
251
252	return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
253		       net, NULL, skb, skb->dev, NULL,
254		       ip_local_deliver_finish);
255}
256EXPORT_SYMBOL(ip_local_deliver);
257
258static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
259{
260	struct ip_options *opt;
261	const struct iphdr *iph;
262
263	/* It looks as overkill, because not all
264	   IP options require packet mangling.
265	   But it is the easiest for now, especially taking
266	   into account that combination of IP options
267	   and running sniffer is extremely rare condition.
268					      --ANK (980813)
269	*/
270	if (skb_cow(skb, skb_headroom(skb))) {
271		__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
272		goto drop;
273	}
274
275	iph = ip_hdr(skb);
276	opt = &(IPCB(skb)->opt);
277	opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
278
279	if (ip_options_compile(dev_net(dev), opt, skb)) {
280		__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
281		goto drop;
282	}
283
284	if (unlikely(opt->srr)) {
285		struct in_device *in_dev = __in_dev_get_rcu(dev);
286
287		if (in_dev) {
288			if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
289				if (IN_DEV_LOG_MARTIANS(in_dev))
290					net_info_ratelimited("source route option %pI4 -> %pI4\n",
291							     &iph->saddr,
292							     &iph->daddr);
293				goto drop;
294			}
295		}
296
297		if (ip_options_rcv_srr(skb, dev))
298			goto drop;
299	}
300
301	return false;
302drop:
303	return true;
304}
305
306static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
307			    const struct sk_buff *hint)
308{
309	return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
310	       ip_hdr(hint)->tos == iph->tos;
311}
312
313INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
314INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
315static int ip_rcv_finish_core(struct net *net, struct sock *sk,
316			      struct sk_buff *skb, struct net_device *dev,
317			      const struct sk_buff *hint)
318{
319	const struct iphdr *iph = ip_hdr(skb);
320	int (*edemux)(struct sk_buff *skb);
321	struct rtable *rt;
322	int err;
323
324	if (ip_can_use_hint(skb, iph, hint)) {
325		err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
326					dev, hint);
327		if (unlikely(err))
328			goto drop_error;
329	}
330
331	if (net->ipv4.sysctl_ip_early_demux &&
332	    !skb_dst(skb) &&
333	    !skb->sk &&
334	    !ip_is_fragment(iph)) {
335		const struct net_protocol *ipprot;
336		int protocol = iph->protocol;
337
338		ipprot = rcu_dereference(inet_protos[protocol]);
339		if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
340			err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux,
341					      udp_v4_early_demux, skb);
342			if (unlikely(err))
343				goto drop_error;
344			/* must reload iph, skb->head might have changed */
345			iph = ip_hdr(skb);
346		}
347	}
348
349	/*
350	 *	Initialise the virtual path cache for the packet. It describes
351	 *	how the packet travels inside Linux networking.
352	 */
353	if (!skb_valid_dst(skb)) {
354		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
355					   iph->tos, dev);
356		if (unlikely(err))
357			goto drop_error;
358	}
359
360#ifdef CONFIG_IP_ROUTE_CLASSID
361	if (unlikely(skb_dst(skb)->tclassid)) {
362		struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
363		u32 idx = skb_dst(skb)->tclassid;
364		st[idx&0xFF].o_packets++;
365		st[idx&0xFF].o_bytes += skb->len;
366		st[(idx>>16)&0xFF].i_packets++;
367		st[(idx>>16)&0xFF].i_bytes += skb->len;
368	}
369#endif
370
371	if (iph->ihl > 5 && ip_rcv_options(skb, dev))
372		goto drop;
373
374	rt = skb_rtable(skb);
375	if (rt->rt_type == RTN_MULTICAST) {
376		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
377	} else if (rt->rt_type == RTN_BROADCAST) {
378		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
379	} else if (skb->pkt_type == PACKET_BROADCAST ||
380		   skb->pkt_type == PACKET_MULTICAST) {
381		struct in_device *in_dev = __in_dev_get_rcu(dev);
382
383		/* RFC 1122 3.3.6:
384		 *
385		 *   When a host sends a datagram to a link-layer broadcast
386		 *   address, the IP destination address MUST be a legal IP
387		 *   broadcast or IP multicast address.
388		 *
389		 *   A host SHOULD silently discard a datagram that is received
390		 *   via a link-layer broadcast (see Section 2.4) but does not
391		 *   specify an IP multicast or broadcast destination address.
392		 *
393		 * This doesn't explicitly say L2 *broadcast*, but broadcast is
394		 * in a way a form of multicast and the most common use case for
395		 * this is 802.11 protecting against cross-station spoofing (the
396		 * so-called "hole-196" attack) so do it for both.
397		 */
398		if (in_dev &&
399		    IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST))
400			goto drop;
401	}
402
403	return NET_RX_SUCCESS;
404
405drop:
406	kfree_skb(skb);
407	return NET_RX_DROP;
408
409drop_error:
410	if (err == -EXDEV)
411		__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
412	goto drop;
413}
414
415static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
416{
417	struct net_device *dev = skb->dev;
418	int ret;
419
420	/* if ingress device is enslaved to an L3 master device pass the
421	 * skb to its handler for processing
422	 */
423	skb = l3mdev_ip_rcv(skb);
424	if (!skb)
425		return NET_RX_SUCCESS;
426
427	ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
428	if (ret != NET_RX_DROP)
429		ret = dst_input(skb);
430	return ret;
431}
432
433/*
434 * 	Main IP Receive routine.
435 */
436static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
437{
438	const struct iphdr *iph;
439	u32 len;
440
441	/* When the interface is in promisc. mode, drop all the crap
442	 * that it receives, do not try to analyse it.
443	 */
444	if (skb->pkt_type == PACKET_OTHERHOST)
445		goto drop;
446
447	__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
448
449	skb = skb_share_check(skb, GFP_ATOMIC);
450	if (!skb) {
451		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
452		goto out;
453	}
454
455	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
456		goto inhdr_error;
457
458	iph = ip_hdr(skb);
459
460	/*
461	 *	RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
462	 *
463	 *	Is the datagram acceptable?
464	 *
465	 *	1.	Length at least the size of an ip header
466	 *	2.	Version of 4
467	 *	3.	Checksums correctly. [Speed optimisation for later, skip loopback checksums]
468	 *	4.	Doesn't have a bogus length
469	 */
470
471	if (iph->ihl < 5 || iph->version != 4)
472		goto inhdr_error;
473
474	BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
475	BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
476	BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
477	__IP_ADD_STATS(net,
478		       IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
479		       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
480
481	if (!pskb_may_pull(skb, iph->ihl*4))
482		goto inhdr_error;
483
484	iph = ip_hdr(skb);
485
486	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
487		goto csum_error;
488
489	len = ntohs(iph->tot_len);
490	if (skb->len < len) {
491		__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
492		goto drop;
493	} else if (len < (iph->ihl*4))
494		goto inhdr_error;
495
496	/* Our transport medium may have padded the buffer out. Now we know it
497	 * is IP we can trim to the true length of the frame.
498	 * Note this now means skb->len holds ntohs(iph->tot_len).
499	 */
500	if (pskb_trim_rcsum(skb, len)) {
501		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
502		goto drop;
503	}
504
505	iph = ip_hdr(skb);
506	skb->transport_header = skb->network_header + iph->ihl*4;
507
508	/* Remove any debris in the socket control block */
509	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
510	IPCB(skb)->iif = skb->skb_iif;
511
512	/* Must drop socket now because of tproxy. */
513	if (!skb_sk_is_prefetched(skb))
514		skb_orphan(skb);
515
516	return skb;
517
518csum_error:
519	__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
520inhdr_error:
521	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
522drop:
523	kfree_skb(skb);
524out:
525	return NULL;
526}
527
528/*
529 * IP receive entry point
530 */
531int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
532	   struct net_device *orig_dev)
533{
534	struct net *net = dev_net(dev);
535
536	skb = ip_rcv_core(skb, net);
537	if (skb == NULL)
538		return NET_RX_DROP;
539
540	return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
541		       net, NULL, skb, dev, NULL,
542		       ip_rcv_finish);
543}
544
545static void ip_sublist_rcv_finish(struct list_head *head)
546{
547	struct sk_buff *skb, *next;
548
549	list_for_each_entry_safe(skb, next, head, list) {
550		skb_list_del_init(skb);
551		dst_input(skb);
552	}
553}
554
555static struct sk_buff *ip_extract_route_hint(const struct net *net,
556					     struct sk_buff *skb, int rt_type)
557{
558	if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
559		return NULL;
560
561	return skb;
562}
563
564static void ip_list_rcv_finish(struct net *net, struct sock *sk,
565			       struct list_head *head)
566{
567	struct sk_buff *skb, *next, *hint = NULL;
568	struct dst_entry *curr_dst = NULL;
569	struct list_head sublist;
570
571	INIT_LIST_HEAD(&sublist);
572	list_for_each_entry_safe(skb, next, head, list) {
573		struct net_device *dev = skb->dev;
574		struct dst_entry *dst;
575
576		skb_list_del_init(skb);
577		/* if ingress device is enslaved to an L3 master device pass the
578		 * skb to its handler for processing
579		 */
580		skb = l3mdev_ip_rcv(skb);
581		if (!skb)
582			continue;
583		if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
584			continue;
585
586		dst = skb_dst(skb);
587		if (curr_dst != dst) {
588			hint = ip_extract_route_hint(net, skb,
589					       ((struct rtable *)dst)->rt_type);
590
591			/* dispatch old sublist */
592			if (!list_empty(&sublist))
593				ip_sublist_rcv_finish(&sublist);
594			/* start new sublist */
595			INIT_LIST_HEAD(&sublist);
596			curr_dst = dst;
597		}
598		list_add_tail(&skb->list, &sublist);
599	}
600	/* dispatch final sublist */
601	ip_sublist_rcv_finish(&sublist);
602}
603
604static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
605			   struct net *net)
606{
607	NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
608		     head, dev, NULL, ip_rcv_finish);
609	ip_list_rcv_finish(net, NULL, head);
610}
611
612/* Receive a list of IP packets */
613void ip_list_rcv(struct list_head *head, struct packet_type *pt,
614		 struct net_device *orig_dev)
615{
616	struct net_device *curr_dev = NULL;
617	struct net *curr_net = NULL;
618	struct sk_buff *skb, *next;
619	struct list_head sublist;
620
621	INIT_LIST_HEAD(&sublist);
622	list_for_each_entry_safe(skb, next, head, list) {
623		struct net_device *dev = skb->dev;
624		struct net *net = dev_net(dev);
625
626		skb_list_del_init(skb);
627		skb = ip_rcv_core(skb, net);
628		if (skb == NULL)
629			continue;
630
631		if (curr_dev != dev || curr_net != net) {
632			/* dispatch old sublist */
633			if (!list_empty(&sublist))
634				ip_sublist_rcv(&sublist, curr_dev, curr_net);
635			/* start new sublist */
636			INIT_LIST_HEAD(&sublist);
637			curr_dev = dev;
638			curr_net = net;
639		}
640		list_add_tail(&skb->list, &sublist);
641	}
642	/* dispatch final sublist */
643	if (!list_empty(&sublist))
644		ip_sublist_rcv(&sublist, curr_dev, curr_net);
645}