Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) module.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 *
17 *
18 * Fixes:
19 * Alan Cox : Commented a couple of minor bits of surplus code
20 * Alan Cox : Undefining IP_FORWARD doesn't include the code
21 * (just stops a compiler warning).
22 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23 * are junked rather than corrupting things.
24 * Alan Cox : Frames to bad broadcast subnets are dumped
25 * We used to process them non broadcast and
26 * boy could that cause havoc.
27 * Alan Cox : ip_forward sets the free flag on the
28 * new frame it queues. Still crap because
29 * it copies the frame but at least it
30 * doesn't eat memory too.
31 * Alan Cox : Generic queue code and memory fixes.
32 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
33 * Gerhard Koerting: Forward fragmented frames correctly.
34 * Gerhard Koerting: Fixes to my fix of the above 8-).
35 * Gerhard Koerting: IP interface addressing fix.
36 * Linus Torvalds : More robustness checks
37 * Alan Cox : Even more checks: Still not as robust as it ought to be
38 * Alan Cox : Save IP header pointer for later
39 * Alan Cox : ip option setting
40 * Alan Cox : Use ip_tos/ip_ttl settings
41 * Alan Cox : Fragmentation bogosity removed
42 * (Thanks to Mark.Bush@prg.ox.ac.uk)
43 * Dmitry Gorodchanin : Send of a raw packet crash fix.
44 * Alan Cox : Silly ip bug when an overlength
45 * fragment turns up. Now frees the
46 * queue.
47 * Linus Torvalds/ : Memory leakage on fragmentation
48 * Alan Cox : handling.
49 * Gerhard Koerting: Forwarding uses IP priority hints
50 * Teemu Rantanen : Fragment problems.
51 * Alan Cox : General cleanup, comments and reformat
52 * Alan Cox : SNMP statistics
53 * Alan Cox : BSD address rule semantics. Also see
54 * UDP as there is a nasty checksum issue
55 * if you do things the wrong way.
56 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57 * Alan Cox : IP options adjust sk->priority.
58 * Pedro Roque : Fix mtu/length error in ip_forward.
59 * Alan Cox : Avoid ip_chk_addr when possible.
60 * Richard Underwood : IP multicasting.
61 * Alan Cox : Cleaned up multicast handlers.
62 * Alan Cox : RAW sockets demultiplex in the BSD style.
63 * Gunther Mayer : Fix the SNMP reporting typo
64 * Alan Cox : Always in group 224.0.0.1
65 * Pauline Middelink : Fast ip_checksum update when forwarding
66 * Masquerading support.
67 * Alan Cox : Multicast loopback error for 224.0.0.1
68 * Alan Cox : IP_MULTICAST_LOOP option.
69 * Alan Cox : Use notifiers.
70 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
71 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
72 * Stefan Becker : Send out ICMP HOST REDIRECT
73 * Arnt Gulbrandsen : ip_build_xmit
74 * Alan Cox : Per socket routing cache
75 * Alan Cox : Fixed routing cache, added header cache.
76 * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
77 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
78 * Alan Cox : Incoming IP option handling.
79 * Alan Cox : Set saddr on raw output frames as per BSD.
80 * Alan Cox : Stopped broadcast source route explosions.
81 * Alan Cox : Can disable source routing
82 * Takeshi Sone : Masquerading didn't work.
83 * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
84 * Alan Cox : Memory leaks, tramples, misc debugging.
85 * Alan Cox : Fixed multicast (by popular demand 8))
86 * Alan Cox : Fixed forwarding (by even more popular demand 8))
87 * Alan Cox : Fixed SNMP statistics [I think]
88 * Gerhard Koerting : IP fragmentation forwarding fix
89 * Alan Cox : Device lock against page fault.
90 * Alan Cox : IP_HDRINCL facility.
91 * Werner Almesberger : Zero fragment bug
92 * Alan Cox : RAW IP frame length bug
93 * Alan Cox : Outgoing firewall on build_xmit
94 * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
95 * Alan Cox : Multicast routing hooks
96 * Jos Vos : Do accounting *before* call_in_firewall
97 * Willy Konynenberg : Transparent proxying support
98 *
99 *
100 *
101 * To Fix:
102 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
103 * and could be made very efficient with the addition of some virtual memory hacks to permit
104 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
105 * Output fragmentation wants updating along with the buffer management to use a single
106 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
107 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
108 * fragmentation anyway.
109 *
110 * This program is free software; you can redistribute it and/or
111 * modify it under the terms of the GNU General Public License
112 * as published by the Free Software Foundation; either version
113 * 2 of the License, or (at your option) any later version.
114 */
115
116#define pr_fmt(fmt) "IPv4: " fmt
117
118#include <linux/module.h>
119#include <linux/types.h>
120#include <linux/kernel.h>
121#include <linux/string.h>
122#include <linux/errno.h>
123#include <linux/slab.h>
124
125#include <linux/net.h>
126#include <linux/socket.h>
127#include <linux/sockios.h>
128#include <linux/in.h>
129#include <linux/inet.h>
130#include <linux/inetdevice.h>
131#include <linux/netdevice.h>
132#include <linux/etherdevice.h>
133
134#include <net/snmp.h>
135#include <net/ip.h>
136#include <net/protocol.h>
137#include <net/route.h>
138#include <linux/skbuff.h>
139#include <net/sock.h>
140#include <net/arp.h>
141#include <net/icmp.h>
142#include <net/raw.h>
143#include <net/checksum.h>
144#include <net/inet_ecn.h>
145#include <linux/netfilter_ipv4.h>
146#include <net/xfrm.h>
147#include <linux/mroute.h>
148#include <linux/netlink.h>
149#include <net/dst_metadata.h>
150
151/*
152 * Process Router Attention IP option (RFC 2113)
153 */
154bool ip_call_ra_chain(struct sk_buff *skb)
155{
156 struct ip_ra_chain *ra;
157 u8 protocol = ip_hdr(skb)->protocol;
158 struct sock *last = NULL;
159 struct net_device *dev = skb->dev;
160 struct net *net = dev_net(dev);
161
162 for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
163 struct sock *sk = ra->sk;
164
165 /* If socket is bound to an interface, only report
166 * the packet if it came from that interface.
167 */
168 if (sk && inet_sk(sk)->inet_num == protocol &&
169 (!sk->sk_bound_dev_if ||
170 sk->sk_bound_dev_if == dev->ifindex)) {
171 if (ip_is_fragment(ip_hdr(skb))) {
172 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
173 return true;
174 }
175 if (last) {
176 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
177 if (skb2)
178 raw_rcv(last, skb2);
179 }
180 last = sk;
181 }
182 }
183
184 if (last) {
185 raw_rcv(last, skb);
186 return true;
187 }
188 return false;
189}
190
191static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
192{
193 __skb_pull(skb, skb_network_header_len(skb));
194
195 rcu_read_lock();
196 {
197 int protocol = ip_hdr(skb)->protocol;
198 const struct net_protocol *ipprot;
199 int raw;
200
201 resubmit:
202 raw = raw_local_deliver(skb, protocol);
203
204 ipprot = rcu_dereference(inet_protos[protocol]);
205 if (ipprot) {
206 int ret;
207
208 if (!ipprot->no_policy) {
209 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
210 kfree_skb(skb);
211 goto out;
212 }
213 nf_reset(skb);
214 }
215 ret = ipprot->handler(skb);
216 if (ret < 0) {
217 protocol = -ret;
218 goto resubmit;
219 }
220 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
221 } else {
222 if (!raw) {
223 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
224 __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
225 icmp_send(skb, ICMP_DEST_UNREACH,
226 ICMP_PROT_UNREACH, 0);
227 }
228 kfree_skb(skb);
229 } else {
230 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
231 consume_skb(skb);
232 }
233 }
234 }
235 out:
236 rcu_read_unlock();
237
238 return 0;
239}
240
241/*
242 * Deliver IP Packets to the higher protocol layers.
243 */
244int ip_local_deliver(struct sk_buff *skb)
245{
246 /*
247 * Reassemble IP fragments.
248 */
249 struct net *net = dev_net(skb->dev);
250
251 if (ip_is_fragment(ip_hdr(skb))) {
252 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
253 return 0;
254 }
255
256 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
257 net, NULL, skb, skb->dev, NULL,
258 ip_local_deliver_finish);
259}
260
261static inline bool ip_rcv_options(struct sk_buff *skb)
262{
263 struct ip_options *opt;
264 const struct iphdr *iph;
265 struct net_device *dev = skb->dev;
266
267 /* It looks as overkill, because not all
268 IP options require packet mangling.
269 But it is the easiest for now, especially taking
270 into account that combination of IP options
271 and running sniffer is extremely rare condition.
272 --ANK (980813)
273 */
274 if (skb_cow(skb, skb_headroom(skb))) {
275 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
276 goto drop;
277 }
278
279 iph = ip_hdr(skb);
280 opt = &(IPCB(skb)->opt);
281 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
282
283 if (ip_options_compile(dev_net(dev), opt, skb)) {
284 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
285 goto drop;
286 }
287
288 if (unlikely(opt->srr)) {
289 struct in_device *in_dev = __in_dev_get_rcu(dev);
290
291 if (in_dev) {
292 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
293 if (IN_DEV_LOG_MARTIANS(in_dev))
294 net_info_ratelimited("source route option %pI4 -> %pI4\n",
295 &iph->saddr,
296 &iph->daddr);
297 goto drop;
298 }
299 }
300
301 if (ip_options_rcv_srr(skb))
302 goto drop;
303 }
304
305 return false;
306drop:
307 return true;
308}
309
310static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
311{
312 const struct iphdr *iph = ip_hdr(skb);
313 int (*edemux)(struct sk_buff *skb);
314 struct net_device *dev = skb->dev;
315 struct rtable *rt;
316 int err;
317
318 /* if ingress device is enslaved to an L3 master device pass the
319 * skb to its handler for processing
320 */
321 skb = l3mdev_ip_rcv(skb);
322 if (!skb)
323 return NET_RX_SUCCESS;
324
325 if (net->ipv4.sysctl_ip_early_demux &&
326 !skb_dst(skb) &&
327 !skb->sk &&
328 !ip_is_fragment(iph)) {
329 const struct net_protocol *ipprot;
330 int protocol = iph->protocol;
331
332 ipprot = rcu_dereference(inet_protos[protocol]);
333 if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
334 err = edemux(skb);
335 if (unlikely(err))
336 goto drop_error;
337 /* must reload iph, skb->head might have changed */
338 iph = ip_hdr(skb);
339 }
340 }
341
342 /*
343 * Initialise the virtual path cache for the packet. It describes
344 * how the packet travels inside Linux networking.
345 */
346 if (!skb_valid_dst(skb)) {
347 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
348 iph->tos, dev);
349 if (unlikely(err))
350 goto drop_error;
351 }
352
353#ifdef CONFIG_IP_ROUTE_CLASSID
354 if (unlikely(skb_dst(skb)->tclassid)) {
355 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
356 u32 idx = skb_dst(skb)->tclassid;
357 st[idx&0xFF].o_packets++;
358 st[idx&0xFF].o_bytes += skb->len;
359 st[(idx>>16)&0xFF].i_packets++;
360 st[(idx>>16)&0xFF].i_bytes += skb->len;
361 }
362#endif
363
364 if (iph->ihl > 5 && ip_rcv_options(skb))
365 goto drop;
366
367 rt = skb_rtable(skb);
368 if (rt->rt_type == RTN_MULTICAST) {
369 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
370 } else if (rt->rt_type == RTN_BROADCAST) {
371 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
372 } else if (skb->pkt_type == PACKET_BROADCAST ||
373 skb->pkt_type == PACKET_MULTICAST) {
374 struct in_device *in_dev = __in_dev_get_rcu(dev);
375
376 /* RFC 1122 3.3.6:
377 *
378 * When a host sends a datagram to a link-layer broadcast
379 * address, the IP destination address MUST be a legal IP
380 * broadcast or IP multicast address.
381 *
382 * A host SHOULD silently discard a datagram that is received
383 * via a link-layer broadcast (see Section 2.4) but does not
384 * specify an IP multicast or broadcast destination address.
385 *
386 * This doesn't explicitly say L2 *broadcast*, but broadcast is
387 * in a way a form of multicast and the most common use case for
388 * this is 802.11 protecting against cross-station spoofing (the
389 * so-called "hole-196" attack) so do it for both.
390 */
391 if (in_dev &&
392 IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST))
393 goto drop;
394 }
395
396 return dst_input(skb);
397
398drop:
399 kfree_skb(skb);
400 return NET_RX_DROP;
401
402drop_error:
403 if (err == -EXDEV)
404 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
405 goto drop;
406}
407
408/*
409 * Main IP Receive routine.
410 */
411int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
412{
413 const struct iphdr *iph;
414 struct net *net;
415 u32 len;
416
417 /* When the interface is in promisc. mode, drop all the crap
418 * that it receives, do not try to analyse it.
419 */
420 if (skb->pkt_type == PACKET_OTHERHOST)
421 goto drop;
422
423
424 net = dev_net(dev);
425 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
426
427 skb = skb_share_check(skb, GFP_ATOMIC);
428 if (!skb) {
429 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
430 goto out;
431 }
432
433 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
434 goto inhdr_error;
435
436 iph = ip_hdr(skb);
437
438 /*
439 * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
440 *
441 * Is the datagram acceptable?
442 *
443 * 1. Length at least the size of an ip header
444 * 2. Version of 4
445 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
446 * 4. Doesn't have a bogus length
447 */
448
449 if (iph->ihl < 5 || iph->version != 4)
450 goto inhdr_error;
451
452 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
453 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
454 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
455 __IP_ADD_STATS(net,
456 IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
457 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
458
459 if (!pskb_may_pull(skb, iph->ihl*4))
460 goto inhdr_error;
461
462 iph = ip_hdr(skb);
463
464 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
465 goto csum_error;
466
467 len = ntohs(iph->tot_len);
468 if (skb->len < len) {
469 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
470 goto drop;
471 } else if (len < (iph->ihl*4))
472 goto inhdr_error;
473
474 /* Our transport medium may have padded the buffer out. Now we know it
475 * is IP we can trim to the true length of the frame.
476 * Note this now means skb->len holds ntohs(iph->tot_len).
477 */
478 if (pskb_trim_rcsum(skb, len)) {
479 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
480 goto drop;
481 }
482
483 skb->transport_header = skb->network_header + iph->ihl*4;
484
485 /* Remove any debris in the socket control block */
486 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
487 IPCB(skb)->iif = skb->skb_iif;
488
489 /* Must drop socket now because of tproxy. */
490 skb_orphan(skb);
491
492 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
493 net, NULL, skb, dev, NULL,
494 ip_rcv_finish);
495
496csum_error:
497 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
498inhdr_error:
499 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
500drop:
501 kfree_skb(skb);
502out:
503 return NET_RX_DROP;
504}
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) module.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 *
17 *
18 * Fixes:
19 * Alan Cox : Commented a couple of minor bits of surplus code
20 * Alan Cox : Undefining IP_FORWARD doesn't include the code
21 * (just stops a compiler warning).
22 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23 * are junked rather than corrupting things.
24 * Alan Cox : Frames to bad broadcast subnets are dumped
25 * We used to process them non broadcast and
26 * boy could that cause havoc.
27 * Alan Cox : ip_forward sets the free flag on the
28 * new frame it queues. Still crap because
29 * it copies the frame but at least it
30 * doesn't eat memory too.
31 * Alan Cox : Generic queue code and memory fixes.
32 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
33 * Gerhard Koerting: Forward fragmented frames correctly.
34 * Gerhard Koerting: Fixes to my fix of the above 8-).
35 * Gerhard Koerting: IP interface addressing fix.
36 * Linus Torvalds : More robustness checks
37 * Alan Cox : Even more checks: Still not as robust as it ought to be
38 * Alan Cox : Save IP header pointer for later
39 * Alan Cox : ip option setting
40 * Alan Cox : Use ip_tos/ip_ttl settings
41 * Alan Cox : Fragmentation bogosity removed
42 * (Thanks to Mark.Bush@prg.ox.ac.uk)
43 * Dmitry Gorodchanin : Send of a raw packet crash fix.
44 * Alan Cox : Silly ip bug when an overlength
45 * fragment turns up. Now frees the
46 * queue.
47 * Linus Torvalds/ : Memory leakage on fragmentation
48 * Alan Cox : handling.
49 * Gerhard Koerting: Forwarding uses IP priority hints
50 * Teemu Rantanen : Fragment problems.
51 * Alan Cox : General cleanup, comments and reformat
52 * Alan Cox : SNMP statistics
53 * Alan Cox : BSD address rule semantics. Also see
54 * UDP as there is a nasty checksum issue
55 * if you do things the wrong way.
56 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57 * Alan Cox : IP options adjust sk->priority.
58 * Pedro Roque : Fix mtu/length error in ip_forward.
59 * Alan Cox : Avoid ip_chk_addr when possible.
60 * Richard Underwood : IP multicasting.
61 * Alan Cox : Cleaned up multicast handlers.
62 * Alan Cox : RAW sockets demultiplex in the BSD style.
63 * Gunther Mayer : Fix the SNMP reporting typo
64 * Alan Cox : Always in group 224.0.0.1
65 * Pauline Middelink : Fast ip_checksum update when forwarding
66 * Masquerading support.
67 * Alan Cox : Multicast loopback error for 224.0.0.1
68 * Alan Cox : IP_MULTICAST_LOOP option.
69 * Alan Cox : Use notifiers.
70 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
71 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
72 * Stefan Becker : Send out ICMP HOST REDIRECT
73 * Arnt Gulbrandsen : ip_build_xmit
74 * Alan Cox : Per socket routing cache
75 * Alan Cox : Fixed routing cache, added header cache.
76 * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
77 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
78 * Alan Cox : Incoming IP option handling.
79 * Alan Cox : Set saddr on raw output frames as per BSD.
80 * Alan Cox : Stopped broadcast source route explosions.
81 * Alan Cox : Can disable source routing
82 * Takeshi Sone : Masquerading didn't work.
83 * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
84 * Alan Cox : Memory leaks, tramples, misc debugging.
85 * Alan Cox : Fixed multicast (by popular demand 8))
86 * Alan Cox : Fixed forwarding (by even more popular demand 8))
87 * Alan Cox : Fixed SNMP statistics [I think]
88 * Gerhard Koerting : IP fragmentation forwarding fix
89 * Alan Cox : Device lock against page fault.
90 * Alan Cox : IP_HDRINCL facility.
91 * Werner Almesberger : Zero fragment bug
92 * Alan Cox : RAW IP frame length bug
93 * Alan Cox : Outgoing firewall on build_xmit
94 * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
95 * Alan Cox : Multicast routing hooks
96 * Jos Vos : Do accounting *before* call_in_firewall
97 * Willy Konynenberg : Transparent proxying support
98 *
99 *
100 *
101 * To Fix:
102 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
103 * and could be made very efficient with the addition of some virtual memory hacks to permit
104 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
105 * Output fragmentation wants updating along with the buffer management to use a single
106 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
107 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
108 * fragmentation anyway.
109 *
110 * This program is free software; you can redistribute it and/or
111 * modify it under the terms of the GNU General Public License
112 * as published by the Free Software Foundation; either version
113 * 2 of the License, or (at your option) any later version.
114 */
115
116#include <asm/system.h>
117#include <linux/module.h>
118#include <linux/types.h>
119#include <linux/kernel.h>
120#include <linux/string.h>
121#include <linux/errno.h>
122#include <linux/slab.h>
123
124#include <linux/net.h>
125#include <linux/socket.h>
126#include <linux/sockios.h>
127#include <linux/in.h>
128#include <linux/inet.h>
129#include <linux/inetdevice.h>
130#include <linux/netdevice.h>
131#include <linux/etherdevice.h>
132
133#include <net/snmp.h>
134#include <net/ip.h>
135#include <net/protocol.h>
136#include <net/route.h>
137#include <linux/skbuff.h>
138#include <net/sock.h>
139#include <net/arp.h>
140#include <net/icmp.h>
141#include <net/raw.h>
142#include <net/checksum.h>
143#include <linux/netfilter_ipv4.h>
144#include <net/xfrm.h>
145#include <linux/mroute.h>
146#include <linux/netlink.h>
147
148/*
149 * Process Router Attention IP option (RFC 2113)
150 */
151int ip_call_ra_chain(struct sk_buff *skb)
152{
153 struct ip_ra_chain *ra;
154 u8 protocol = ip_hdr(skb)->protocol;
155 struct sock *last = NULL;
156 struct net_device *dev = skb->dev;
157
158 for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
159 struct sock *sk = ra->sk;
160
161 /* If socket is bound to an interface, only report
162 * the packet if it came from that interface.
163 */
164 if (sk && inet_sk(sk)->inet_num == protocol &&
165 (!sk->sk_bound_dev_if ||
166 sk->sk_bound_dev_if == dev->ifindex) &&
167 net_eq(sock_net(sk), dev_net(dev))) {
168 if (ip_is_fragment(ip_hdr(skb))) {
169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
170 return 1;
171 }
172 if (last) {
173 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
174 if (skb2)
175 raw_rcv(last, skb2);
176 }
177 last = sk;
178 }
179 }
180
181 if (last) {
182 raw_rcv(last, skb);
183 return 1;
184 }
185 return 0;
186}
187
188static int ip_local_deliver_finish(struct sk_buff *skb)
189{
190 struct net *net = dev_net(skb->dev);
191
192 __skb_pull(skb, ip_hdrlen(skb));
193
194 /* Point into the IP datagram, just past the header. */
195 skb_reset_transport_header(skb);
196
197 rcu_read_lock();
198 {
199 int protocol = ip_hdr(skb)->protocol;
200 int hash, raw;
201 const struct net_protocol *ipprot;
202
203 resubmit:
204 raw = raw_local_deliver(skb, protocol);
205
206 hash = protocol & (MAX_INET_PROTOS - 1);
207 ipprot = rcu_dereference(inet_protos[hash]);
208 if (ipprot != NULL) {
209 int ret;
210
211 if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
212 if (net_ratelimit())
213 printk("%s: proto %d isn't netns-ready\n",
214 __func__, protocol);
215 kfree_skb(skb);
216 goto out;
217 }
218
219 if (!ipprot->no_policy) {
220 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
221 kfree_skb(skb);
222 goto out;
223 }
224 nf_reset(skb);
225 }
226 ret = ipprot->handler(skb);
227 if (ret < 0) {
228 protocol = -ret;
229 goto resubmit;
230 }
231 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
232 } else {
233 if (!raw) {
234 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
235 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
236 icmp_send(skb, ICMP_DEST_UNREACH,
237 ICMP_PROT_UNREACH, 0);
238 }
239 } else
240 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
241 kfree_skb(skb);
242 }
243 }
244 out:
245 rcu_read_unlock();
246
247 return 0;
248}
249
250/*
251 * Deliver IP Packets to the higher protocol layers.
252 */
253int ip_local_deliver(struct sk_buff *skb)
254{
255 /*
256 * Reassemble IP fragments.
257 */
258
259 if (ip_is_fragment(ip_hdr(skb))) {
260 if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
261 return 0;
262 }
263
264 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
265 ip_local_deliver_finish);
266}
267
268static inline int ip_rcv_options(struct sk_buff *skb)
269{
270 struct ip_options *opt;
271 const struct iphdr *iph;
272 struct net_device *dev = skb->dev;
273
274 /* It looks as overkill, because not all
275 IP options require packet mangling.
276 But it is the easiest for now, especially taking
277 into account that combination of IP options
278 and running sniffer is extremely rare condition.
279 --ANK (980813)
280 */
281 if (skb_cow(skb, skb_headroom(skb))) {
282 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
283 goto drop;
284 }
285
286 iph = ip_hdr(skb);
287 opt = &(IPCB(skb)->opt);
288 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
289
290 if (ip_options_compile(dev_net(dev), opt, skb)) {
291 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
292 goto drop;
293 }
294
295 if (unlikely(opt->srr)) {
296 struct in_device *in_dev = __in_dev_get_rcu(dev);
297
298 if (in_dev) {
299 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
300 if (IN_DEV_LOG_MARTIANS(in_dev) &&
301 net_ratelimit())
302 printk(KERN_INFO "source route option %pI4 -> %pI4\n",
303 &iph->saddr, &iph->daddr);
304 goto drop;
305 }
306 }
307
308 if (ip_options_rcv_srr(skb))
309 goto drop;
310 }
311
312 return 0;
313drop:
314 return -1;
315}
316
317static int ip_rcv_finish(struct sk_buff *skb)
318{
319 const struct iphdr *iph = ip_hdr(skb);
320 struct rtable *rt;
321
322 /*
323 * Initialise the virtual path cache for the packet. It describes
324 * how the packet travels inside Linux networking.
325 */
326 if (skb_dst(skb) == NULL) {
327 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
328 iph->tos, skb->dev);
329 if (unlikely(err)) {
330 if (err == -EHOSTUNREACH)
331 IP_INC_STATS_BH(dev_net(skb->dev),
332 IPSTATS_MIB_INADDRERRORS);
333 else if (err == -ENETUNREACH)
334 IP_INC_STATS_BH(dev_net(skb->dev),
335 IPSTATS_MIB_INNOROUTES);
336 else if (err == -EXDEV)
337 NET_INC_STATS_BH(dev_net(skb->dev),
338 LINUX_MIB_IPRPFILTER);
339 goto drop;
340 }
341 }
342
343#ifdef CONFIG_IP_ROUTE_CLASSID
344 if (unlikely(skb_dst(skb)->tclassid)) {
345 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
346 u32 idx = skb_dst(skb)->tclassid;
347 st[idx&0xFF].o_packets++;
348 st[idx&0xFF].o_bytes += skb->len;
349 st[(idx>>16)&0xFF].i_packets++;
350 st[(idx>>16)&0xFF].i_bytes += skb->len;
351 }
352#endif
353
354 if (iph->ihl > 5 && ip_rcv_options(skb))
355 goto drop;
356
357 rt = skb_rtable(skb);
358 if (rt->rt_type == RTN_MULTICAST) {
359 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
360 skb->len);
361 } else if (rt->rt_type == RTN_BROADCAST)
362 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
363 skb->len);
364
365 return dst_input(skb);
366
367drop:
368 kfree_skb(skb);
369 return NET_RX_DROP;
370}
371
372/*
373 * Main IP Receive routine.
374 */
375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
376{
377 const struct iphdr *iph;
378 u32 len;
379
380 /* When the interface is in promisc. mode, drop all the crap
381 * that it receives, do not try to analyse it.
382 */
383 if (skb->pkt_type == PACKET_OTHERHOST)
384 goto drop;
385
386
387 IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
388
389 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
390 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
391 goto out;
392 }
393
394 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
395 goto inhdr_error;
396
397 iph = ip_hdr(skb);
398
399 /*
400 * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
401 *
402 * Is the datagram acceptable?
403 *
404 * 1. Length at least the size of an ip header
405 * 2. Version of 4
406 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
407 * 4. Doesn't have a bogus length
408 */
409
410 if (iph->ihl < 5 || iph->version != 4)
411 goto inhdr_error;
412
413 if (!pskb_may_pull(skb, iph->ihl*4))
414 goto inhdr_error;
415
416 iph = ip_hdr(skb);
417
418 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
419 goto inhdr_error;
420
421 len = ntohs(iph->tot_len);
422 if (skb->len < len) {
423 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
424 goto drop;
425 } else if (len < (iph->ihl*4))
426 goto inhdr_error;
427
428 /* Our transport medium may have padded the buffer out. Now we know it
429 * is IP we can trim to the true length of the frame.
430 * Note this now means skb->len holds ntohs(iph->tot_len).
431 */
432 if (pskb_trim_rcsum(skb, len)) {
433 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
434 goto drop;
435 }
436
437 /* Remove any debris in the socket control block */
438 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
439
440 /* Must drop socket now because of tproxy. */
441 skb_orphan(skb);
442
443 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
444 ip_rcv_finish);
445
446inhdr_error:
447 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
448drop:
449 kfree_skb(skb);
450out:
451 return NET_RX_DROP;
452}