Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The Internet Protocol (IP) output module.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
13 * Richard Underwood
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 *
19 * See ip_input.c for original log
20 *
21 * Fixes:
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
25 * no route is found.
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readibility.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * datagrams.
43 * Hirokazu Takahashi: sendfile() on UDP works now.
44 */
45
46#include <linux/uaccess.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
50#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
53#include <linux/highmem.h>
54#include <linux/slab.h>
55
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
70#include <net/xfrm.h>
71#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
75#include <net/checksum.h>
76#include <net/inetpeer.h>
77#include <net/inet_ecn.h>
78#include <net/lwtunnel.h>
79#include <linux/bpf-cgroup.h>
80#include <linux/igmp.h>
81#include <linux/netfilter_ipv4.h>
82#include <linux/netfilter_bridge.h>
83#include <linux/netlink.h>
84#include <linux/tcp.h>
85
86static int
87ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
88 unsigned int mtu,
89 int (*output)(struct net *, struct sock *, struct sk_buff *));
90
91/* Generate a checksum for an outgoing IP datagram. */
92void ip_send_check(struct iphdr *iph)
93{
94 iph->check = 0;
95 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
96}
97EXPORT_SYMBOL(ip_send_check);
98
99int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
100{
101 struct iphdr *iph = ip_hdr(skb);
102
103 iph->tot_len = htons(skb->len);
104 ip_send_check(iph);
105
106 /* if egress device is enslaved to an L3 master device pass the
107 * skb to its handler for processing
108 */
109 skb = l3mdev_ip_out(sk, skb);
110 if (unlikely(!skb))
111 return 0;
112
113 skb->protocol = htons(ETH_P_IP);
114
115 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
116 net, sk, skb, NULL, skb_dst(skb)->dev,
117 dst_output);
118}
119
120int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
121{
122 int err;
123
124 err = __ip_local_out(net, sk, skb);
125 if (likely(err == 1))
126 err = dst_output(net, sk, skb);
127
128 return err;
129}
130EXPORT_SYMBOL_GPL(ip_local_out);
131
132static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
133{
134 int ttl = inet->uc_ttl;
135
136 if (ttl < 0)
137 ttl = ip4_dst_hoplimit(dst);
138 return ttl;
139}
140
141/*
142 * Add an ip header to a skbuff and send it out.
143 *
144 */
145int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
146 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
147{
148 struct inet_sock *inet = inet_sk(sk);
149 struct rtable *rt = skb_rtable(skb);
150 struct net *net = sock_net(sk);
151 struct iphdr *iph;
152
153 /* Build the IP header. */
154 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
155 skb_reset_network_header(skb);
156 iph = ip_hdr(skb);
157 iph->version = 4;
158 iph->ihl = 5;
159 iph->tos = inet->tos;
160 iph->ttl = ip_select_ttl(inet, &rt->dst);
161 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->saddr = saddr;
163 iph->protocol = sk->sk_protocol;
164 if (ip_dont_fragment(sk, &rt->dst)) {
165 iph->frag_off = htons(IP_DF);
166 iph->id = 0;
167 } else {
168 iph->frag_off = 0;
169 __ip_select_ident(net, iph, 1);
170 }
171
172 if (opt && opt->opt.optlen) {
173 iph->ihl += opt->opt.optlen>>2;
174 ip_options_build(skb, &opt->opt, daddr, rt, 0);
175 }
176
177 skb->priority = sk->sk_priority;
178 if (!skb->mark)
179 skb->mark = sk->sk_mark;
180
181 /* Send it out. */
182 return ip_local_out(net, skb->sk, skb);
183}
184EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
185
186static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
187{
188 struct dst_entry *dst = skb_dst(skb);
189 struct rtable *rt = (struct rtable *)dst;
190 struct net_device *dev = dst->dev;
191 unsigned int hh_len = LL_RESERVED_SPACE(dev);
192 struct neighbour *neigh;
193 bool is_v6gw = false;
194
195 if (rt->rt_type == RTN_MULTICAST) {
196 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
197 } else if (rt->rt_type == RTN_BROADCAST)
198 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
199
200 /* Be paranoid, rather than too clever. */
201 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
202 struct sk_buff *skb2;
203
204 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
205 if (!skb2) {
206 kfree_skb(skb);
207 return -ENOMEM;
208 }
209 if (skb->sk)
210 skb_set_owner_w(skb2, skb->sk);
211 consume_skb(skb);
212 skb = skb2;
213 }
214
215 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
216 int res = lwtunnel_xmit(skb);
217
218 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
219 return res;
220 }
221
222 rcu_read_lock_bh();
223 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
224 if (!IS_ERR(neigh)) {
225 int res;
226
227 sock_confirm_neigh(skb, neigh);
228 /* if crossing protocols, can not use the cached header */
229 res = neigh_output(neigh, skb, is_v6gw);
230 rcu_read_unlock_bh();
231 return res;
232 }
233 rcu_read_unlock_bh();
234
235 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
236 __func__);
237 kfree_skb(skb);
238 return -EINVAL;
239}
240
241static int ip_finish_output_gso(struct net *net, struct sock *sk,
242 struct sk_buff *skb, unsigned int mtu)
243{
244 struct sk_buff *segs, *nskb;
245 netdev_features_t features;
246 int ret = 0;
247
248 /* common case: seglen is <= mtu
249 */
250 if (skb_gso_validate_network_len(skb, mtu))
251 return ip_finish_output2(net, sk, skb);
252
253 /* Slowpath - GSO segment length exceeds the egress MTU.
254 *
255 * This can happen in several cases:
256 * - Forwarding of a TCP GRO skb, when DF flag is not set.
257 * - Forwarding of an skb that arrived on a virtualization interface
258 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
259 * stack.
260 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
261 * interface with a smaller MTU.
262 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
263 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
264 * insufficent MTU.
265 */
266 features = netif_skb_features(skb);
267 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
268 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
269 if (IS_ERR_OR_NULL(segs)) {
270 kfree_skb(skb);
271 return -ENOMEM;
272 }
273
274 consume_skb(skb);
275
276 skb_list_walk_safe(segs, segs, nskb) {
277 int err;
278
279 skb_mark_not_on_list(segs);
280 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
281
282 if (err && ret == 0)
283 ret = err;
284 }
285
286 return ret;
287}
288
289static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
290{
291 unsigned int mtu;
292
293#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
294 /* Policy lookup after SNAT yielded a new policy */
295 if (skb_dst(skb)->xfrm) {
296 IPCB(skb)->flags |= IPSKB_REROUTED;
297 return dst_output(net, sk, skb);
298 }
299#endif
300 mtu = ip_skb_dst_mtu(sk, skb);
301 if (skb_is_gso(skb))
302 return ip_finish_output_gso(net, sk, skb, mtu);
303
304 if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
305 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
306
307 return ip_finish_output2(net, sk, skb);
308}
309
310static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
311{
312 int ret;
313
314 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
315 switch (ret) {
316 case NET_XMIT_SUCCESS:
317 return __ip_finish_output(net, sk, skb);
318 case NET_XMIT_CN:
319 return __ip_finish_output(net, sk, skb) ? : ret;
320 default:
321 kfree_skb(skb);
322 return ret;
323 }
324}
325
326static int ip_mc_finish_output(struct net *net, struct sock *sk,
327 struct sk_buff *skb)
328{
329 struct rtable *new_rt;
330 bool do_cn = false;
331 int ret, err;
332
333 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
334 switch (ret) {
335 case NET_XMIT_CN:
336 do_cn = true;
337 fallthrough;
338 case NET_XMIT_SUCCESS:
339 break;
340 default:
341 kfree_skb(skb);
342 return ret;
343 }
344
345 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
346 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
347 * see ipv4_pktinfo_prepare().
348 */
349 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
350 if (new_rt) {
351 new_rt->rt_iif = 0;
352 skb_dst_drop(skb);
353 skb_dst_set(skb, &new_rt->dst);
354 }
355
356 err = dev_loopback_xmit(net, sk, skb);
357 return (do_cn && err) ? ret : err;
358}
359
360int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
361{
362 struct rtable *rt = skb_rtable(skb);
363 struct net_device *dev = rt->dst.dev;
364
365 /*
366 * If the indicated interface is up and running, send the packet.
367 */
368 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
369
370 skb->dev = dev;
371 skb->protocol = htons(ETH_P_IP);
372
373 /*
374 * Multicasts are looped back for other local users
375 */
376
377 if (rt->rt_flags&RTCF_MULTICAST) {
378 if (sk_mc_loop(sk)
379#ifdef CONFIG_IP_MROUTE
380 /* Small optimization: do not loopback not local frames,
381 which returned after forwarding; they will be dropped
382 by ip_mr_input in any case.
383 Note, that local frames are looped back to be delivered
384 to local recipients.
385
386 This check is duplicated in ip_mr_input at the moment.
387 */
388 &&
389 ((rt->rt_flags & RTCF_LOCAL) ||
390 !(IPCB(skb)->flags & IPSKB_FORWARDED))
391#endif
392 ) {
393 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
394 if (newskb)
395 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
396 net, sk, newskb, NULL, newskb->dev,
397 ip_mc_finish_output);
398 }
399
400 /* Multicasts with ttl 0 must not go beyond the host */
401
402 if (ip_hdr(skb)->ttl == 0) {
403 kfree_skb(skb);
404 return 0;
405 }
406 }
407
408 if (rt->rt_flags&RTCF_BROADCAST) {
409 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
410 if (newskb)
411 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
412 net, sk, newskb, NULL, newskb->dev,
413 ip_mc_finish_output);
414 }
415
416 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
417 net, sk, skb, NULL, skb->dev,
418 ip_finish_output,
419 !(IPCB(skb)->flags & IPSKB_REROUTED));
420}
421
422int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
423{
424 struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
425
426 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
427
428 skb->dev = dev;
429 skb->protocol = htons(ETH_P_IP);
430
431 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
432 net, sk, skb, indev, dev,
433 ip_finish_output,
434 !(IPCB(skb)->flags & IPSKB_REROUTED));
435}
436
437/*
438 * copy saddr and daddr, possibly using 64bit load/stores
439 * Equivalent to :
440 * iph->saddr = fl4->saddr;
441 * iph->daddr = fl4->daddr;
442 */
443static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
444{
445 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
446 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
447 memcpy(&iph->saddr, &fl4->saddr,
448 sizeof(fl4->saddr) + sizeof(fl4->daddr));
449}
450
451/* Note: skb->sk can be different from sk, in case of tunnels */
452int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
453 __u8 tos)
454{
455 struct inet_sock *inet = inet_sk(sk);
456 struct net *net = sock_net(sk);
457 struct ip_options_rcu *inet_opt;
458 struct flowi4 *fl4;
459 struct rtable *rt;
460 struct iphdr *iph;
461 int res;
462
463 /* Skip all of this if the packet is already routed,
464 * f.e. by something like SCTP.
465 */
466 rcu_read_lock();
467 inet_opt = rcu_dereference(inet->inet_opt);
468 fl4 = &fl->u.ip4;
469 rt = skb_rtable(skb);
470 if (rt)
471 goto packet_routed;
472
473 /* Make sure we can route this packet. */
474 rt = (struct rtable *)__sk_dst_check(sk, 0);
475 if (!rt) {
476 __be32 daddr;
477
478 /* Use correct destination address if we have options. */
479 daddr = inet->inet_daddr;
480 if (inet_opt && inet_opt->opt.srr)
481 daddr = inet_opt->opt.faddr;
482
483 /* If this fails, retransmit mechanism of transport layer will
484 * keep trying until route appears or the connection times
485 * itself out.
486 */
487 rt = ip_route_output_ports(net, fl4, sk,
488 daddr, inet->inet_saddr,
489 inet->inet_dport,
490 inet->inet_sport,
491 sk->sk_protocol,
492 RT_CONN_FLAGS_TOS(sk, tos),
493 sk->sk_bound_dev_if);
494 if (IS_ERR(rt))
495 goto no_route;
496 sk_setup_caps(sk, &rt->dst);
497 }
498 skb_dst_set_noref(skb, &rt->dst);
499
500packet_routed:
501 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
502 goto no_route;
503
504 /* OK, we know where to send it, allocate and build IP header. */
505 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
506 skb_reset_network_header(skb);
507 iph = ip_hdr(skb);
508 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
509 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
510 iph->frag_off = htons(IP_DF);
511 else
512 iph->frag_off = 0;
513 iph->ttl = ip_select_ttl(inet, &rt->dst);
514 iph->protocol = sk->sk_protocol;
515 ip_copy_addrs(iph, fl4);
516
517 /* Transport layer set skb->h.foo itself. */
518
519 if (inet_opt && inet_opt->opt.optlen) {
520 iph->ihl += inet_opt->opt.optlen >> 2;
521 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
522 }
523
524 ip_select_ident_segs(net, skb, sk,
525 skb_shinfo(skb)->gso_segs ?: 1);
526
527 /* TODO : should we use skb->sk here instead of sk ? */
528 skb->priority = sk->sk_priority;
529 skb->mark = sk->sk_mark;
530
531 res = ip_local_out(net, sk, skb);
532 rcu_read_unlock();
533 return res;
534
535no_route:
536 rcu_read_unlock();
537 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
538 kfree_skb(skb);
539 return -EHOSTUNREACH;
540}
541EXPORT_SYMBOL(__ip_queue_xmit);
542
543int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
544{
545 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
546}
547EXPORT_SYMBOL(ip_queue_xmit);
548
549static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
550{
551 to->pkt_type = from->pkt_type;
552 to->priority = from->priority;
553 to->protocol = from->protocol;
554 to->skb_iif = from->skb_iif;
555 skb_dst_drop(to);
556 skb_dst_copy(to, from);
557 to->dev = from->dev;
558 to->mark = from->mark;
559
560 skb_copy_hash(to, from);
561
562#ifdef CONFIG_NET_SCHED
563 to->tc_index = from->tc_index;
564#endif
565 nf_copy(to, from);
566 skb_ext_copy(to, from);
567#if IS_ENABLED(CONFIG_IP_VS)
568 to->ipvs_property = from->ipvs_property;
569#endif
570 skb_copy_secmark(to, from);
571}
572
573static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
574 unsigned int mtu,
575 int (*output)(struct net *, struct sock *, struct sk_buff *))
576{
577 struct iphdr *iph = ip_hdr(skb);
578
579 if ((iph->frag_off & htons(IP_DF)) == 0)
580 return ip_do_fragment(net, sk, skb, output);
581
582 if (unlikely(!skb->ignore_df ||
583 (IPCB(skb)->frag_max_size &&
584 IPCB(skb)->frag_max_size > mtu))) {
585 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
586 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
587 htonl(mtu));
588 kfree_skb(skb);
589 return -EMSGSIZE;
590 }
591
592 return ip_do_fragment(net, sk, skb, output);
593}
594
595void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
596 unsigned int hlen, struct ip_fraglist_iter *iter)
597{
598 unsigned int first_len = skb_pagelen(skb);
599
600 iter->frag = skb_shinfo(skb)->frag_list;
601 skb_frag_list_init(skb);
602
603 iter->offset = 0;
604 iter->iph = iph;
605 iter->hlen = hlen;
606
607 skb->data_len = first_len - skb_headlen(skb);
608 skb->len = first_len;
609 iph->tot_len = htons(first_len);
610 iph->frag_off = htons(IP_MF);
611 ip_send_check(iph);
612}
613EXPORT_SYMBOL(ip_fraglist_init);
614
615static void ip_fraglist_ipcb_prepare(struct sk_buff *skb,
616 struct ip_fraglist_iter *iter)
617{
618 struct sk_buff *to = iter->frag;
619
620 /* Copy the flags to each fragment. */
621 IPCB(to)->flags = IPCB(skb)->flags;
622
623 if (iter->offset == 0)
624 ip_options_fragment(to);
625}
626
627void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
628{
629 unsigned int hlen = iter->hlen;
630 struct iphdr *iph = iter->iph;
631 struct sk_buff *frag;
632
633 frag = iter->frag;
634 frag->ip_summed = CHECKSUM_NONE;
635 skb_reset_transport_header(frag);
636 __skb_push(frag, hlen);
637 skb_reset_network_header(frag);
638 memcpy(skb_network_header(frag), iph, hlen);
639 iter->iph = ip_hdr(frag);
640 iph = iter->iph;
641 iph->tot_len = htons(frag->len);
642 ip_copy_metadata(frag, skb);
643 iter->offset += skb->len - hlen;
644 iph->frag_off = htons(iter->offset >> 3);
645 if (frag->next)
646 iph->frag_off |= htons(IP_MF);
647 /* Ready, complete checksum */
648 ip_send_check(iph);
649}
650EXPORT_SYMBOL(ip_fraglist_prepare);
651
652void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
653 unsigned int ll_rs, unsigned int mtu, bool DF,
654 struct ip_frag_state *state)
655{
656 struct iphdr *iph = ip_hdr(skb);
657
658 state->DF = DF;
659 state->hlen = hlen;
660 state->ll_rs = ll_rs;
661 state->mtu = mtu;
662
663 state->left = skb->len - hlen; /* Space per frame */
664 state->ptr = hlen; /* Where to start from */
665
666 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
667 state->not_last_frag = iph->frag_off & htons(IP_MF);
668}
669EXPORT_SYMBOL(ip_frag_init);
670
671static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
672 bool first_frag, struct ip_frag_state *state)
673{
674 /* Copy the flags to each fragment. */
675 IPCB(to)->flags = IPCB(from)->flags;
676
677 /* ANK: dirty, but effective trick. Upgrade options only if
678 * the segment to be fragmented was THE FIRST (otherwise,
679 * options are already fixed) and make it ONCE
680 * on the initial skb, so that all the following fragments
681 * will inherit fixed options.
682 */
683 if (first_frag)
684 ip_options_fragment(from);
685}
686
687struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
688{
689 unsigned int len = state->left;
690 struct sk_buff *skb2;
691 struct iphdr *iph;
692
693 len = state->left;
694 /* IF: it doesn't fit, use 'mtu' - the data space left */
695 if (len > state->mtu)
696 len = state->mtu;
697 /* IF: we are not sending up to and including the packet end
698 then align the next start on an eight byte boundary */
699 if (len < state->left) {
700 len &= ~7;
701 }
702
703 /* Allocate buffer */
704 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
705 if (!skb2)
706 return ERR_PTR(-ENOMEM);
707
708 /*
709 * Set up data on packet
710 */
711
712 ip_copy_metadata(skb2, skb);
713 skb_reserve(skb2, state->ll_rs);
714 skb_put(skb2, len + state->hlen);
715 skb_reset_network_header(skb2);
716 skb2->transport_header = skb2->network_header + state->hlen;
717
718 /*
719 * Charge the memory for the fragment to any owner
720 * it might possess
721 */
722
723 if (skb->sk)
724 skb_set_owner_w(skb2, skb->sk);
725
726 /*
727 * Copy the packet header into the new buffer.
728 */
729
730 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
731
732 /*
733 * Copy a block of the IP datagram.
734 */
735 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
736 BUG();
737 state->left -= len;
738
739 /*
740 * Fill in the new header fields.
741 */
742 iph = ip_hdr(skb2);
743 iph->frag_off = htons((state->offset >> 3));
744 if (state->DF)
745 iph->frag_off |= htons(IP_DF);
746
747 /*
748 * Added AC : If we are fragmenting a fragment that's not the
749 * last fragment then keep MF on each bit
750 */
751 if (state->left > 0 || state->not_last_frag)
752 iph->frag_off |= htons(IP_MF);
753 state->ptr += len;
754 state->offset += len;
755
756 iph->tot_len = htons(len + state->hlen);
757
758 ip_send_check(iph);
759
760 return skb2;
761}
762EXPORT_SYMBOL(ip_frag_next);
763
764/*
765 * This IP datagram is too large to be sent in one piece. Break it up into
766 * smaller pieces (each of size equal to IP header plus
767 * a block of the data of the original IP data part) that will yet fit in a
768 * single device frame, and queue such a frame for sending.
769 */
770
771int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
772 int (*output)(struct net *, struct sock *, struct sk_buff *))
773{
774 struct iphdr *iph;
775 struct sk_buff *skb2;
776 struct rtable *rt = skb_rtable(skb);
777 unsigned int mtu, hlen, ll_rs;
778 struct ip_fraglist_iter iter;
779 ktime_t tstamp = skb->tstamp;
780 struct ip_frag_state state;
781 int err = 0;
782
783 /* for offloaded checksums cleanup checksum before fragmentation */
784 if (skb->ip_summed == CHECKSUM_PARTIAL &&
785 (err = skb_checksum_help(skb)))
786 goto fail;
787
788 /*
789 * Point into the IP datagram header.
790 */
791
792 iph = ip_hdr(skb);
793
794 mtu = ip_skb_dst_mtu(sk, skb);
795 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
796 mtu = IPCB(skb)->frag_max_size;
797
798 /*
799 * Setup starting values.
800 */
801
802 hlen = iph->ihl * 4;
803 mtu = mtu - hlen; /* Size of data space */
804 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
805 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
806
807 /* When frag_list is given, use it. First, check its validity:
808 * some transformers could create wrong frag_list or break existing
809 * one, it is not prohibited. In this case fall back to copying.
810 *
811 * LATER: this step can be merged to real generation of fragments,
812 * we can switch to copy when see the first bad fragment.
813 */
814 if (skb_has_frag_list(skb)) {
815 struct sk_buff *frag, *frag2;
816 unsigned int first_len = skb_pagelen(skb);
817
818 if (first_len - hlen > mtu ||
819 ((first_len - hlen) & 7) ||
820 ip_is_fragment(iph) ||
821 skb_cloned(skb) ||
822 skb_headroom(skb) < ll_rs)
823 goto slow_path;
824
825 skb_walk_frags(skb, frag) {
826 /* Correct geometry. */
827 if (frag->len > mtu ||
828 ((frag->len & 7) && frag->next) ||
829 skb_headroom(frag) < hlen + ll_rs)
830 goto slow_path_clean;
831
832 /* Partially cloned skb? */
833 if (skb_shared(frag))
834 goto slow_path_clean;
835
836 BUG_ON(frag->sk);
837 if (skb->sk) {
838 frag->sk = skb->sk;
839 frag->destructor = sock_wfree;
840 }
841 skb->truesize -= frag->truesize;
842 }
843
844 /* Everything is OK. Generate! */
845 ip_fraglist_init(skb, iph, hlen, &iter);
846
847 for (;;) {
848 /* Prepare header of the next frame,
849 * before previous one went down. */
850 if (iter.frag) {
851 ip_fraglist_ipcb_prepare(skb, &iter);
852 ip_fraglist_prepare(skb, &iter);
853 }
854
855 skb->tstamp = tstamp;
856 err = output(net, sk, skb);
857
858 if (!err)
859 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
860 if (err || !iter.frag)
861 break;
862
863 skb = ip_fraglist_next(&iter);
864 }
865
866 if (err == 0) {
867 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
868 return 0;
869 }
870
871 kfree_skb_list(iter.frag);
872
873 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
874 return err;
875
876slow_path_clean:
877 skb_walk_frags(skb, frag2) {
878 if (frag2 == frag)
879 break;
880 frag2->sk = NULL;
881 frag2->destructor = NULL;
882 skb->truesize += frag2->truesize;
883 }
884 }
885
886slow_path:
887 /*
888 * Fragment the datagram.
889 */
890
891 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
892 &state);
893
894 /*
895 * Keep copying data until we run out.
896 */
897
898 while (state.left > 0) {
899 bool first_frag = (state.offset == 0);
900
901 skb2 = ip_frag_next(skb, &state);
902 if (IS_ERR(skb2)) {
903 err = PTR_ERR(skb2);
904 goto fail;
905 }
906 ip_frag_ipcb(skb, skb2, first_frag, &state);
907
908 /*
909 * Put this fragment into the sending queue.
910 */
911 skb2->tstamp = tstamp;
912 err = output(net, sk, skb2);
913 if (err)
914 goto fail;
915
916 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
917 }
918 consume_skb(skb);
919 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
920 return err;
921
922fail:
923 kfree_skb(skb);
924 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
925 return err;
926}
927EXPORT_SYMBOL(ip_do_fragment);
928
929int
930ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
931{
932 struct msghdr *msg = from;
933
934 if (skb->ip_summed == CHECKSUM_PARTIAL) {
935 if (!copy_from_iter_full(to, len, &msg->msg_iter))
936 return -EFAULT;
937 } else {
938 __wsum csum = 0;
939 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
940 return -EFAULT;
941 skb->csum = csum_block_add(skb->csum, csum, odd);
942 }
943 return 0;
944}
945EXPORT_SYMBOL(ip_generic_getfrag);
946
947static inline __wsum
948csum_page(struct page *page, int offset, int copy)
949{
950 char *kaddr;
951 __wsum csum;
952 kaddr = kmap(page);
953 csum = csum_partial(kaddr + offset, copy, 0);
954 kunmap(page);
955 return csum;
956}
957
958static int __ip_append_data(struct sock *sk,
959 struct flowi4 *fl4,
960 struct sk_buff_head *queue,
961 struct inet_cork *cork,
962 struct page_frag *pfrag,
963 int getfrag(void *from, char *to, int offset,
964 int len, int odd, struct sk_buff *skb),
965 void *from, int length, int transhdrlen,
966 unsigned int flags)
967{
968 struct inet_sock *inet = inet_sk(sk);
969 struct ubuf_info *uarg = NULL;
970 struct sk_buff *skb;
971
972 struct ip_options *opt = cork->opt;
973 int hh_len;
974 int exthdrlen;
975 int mtu;
976 int copy;
977 int err;
978 int offset = 0;
979 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
980 int csummode = CHECKSUM_NONE;
981 struct rtable *rt = (struct rtable *)cork->dst;
982 unsigned int wmem_alloc_delta = 0;
983 bool paged, extra_uref = false;
984 u32 tskey = 0;
985
986 skb = skb_peek_tail(queue);
987
988 exthdrlen = !skb ? rt->dst.header_len : 0;
989 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
990 paged = !!cork->gso_size;
991
992 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
993 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
994 tskey = sk->sk_tskey++;
995
996 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
997
998 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
999 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1000 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1001
1002 if (cork->length + length > maxnonfragsize - fragheaderlen) {
1003 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1004 mtu - (opt ? opt->optlen : 0));
1005 return -EMSGSIZE;
1006 }
1007
1008 /*
1009 * transhdrlen > 0 means that this is the first fragment and we wish
1010 * it won't be fragmented in the future.
1011 */
1012 if (transhdrlen &&
1013 length + fragheaderlen <= mtu &&
1014 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1015 (!(flags & MSG_MORE) || cork->gso_size) &&
1016 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1017 csummode = CHECKSUM_PARTIAL;
1018
1019 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1020 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1021 if (!uarg)
1022 return -ENOBUFS;
1023 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1024 if (rt->dst.dev->features & NETIF_F_SG &&
1025 csummode == CHECKSUM_PARTIAL) {
1026 paged = true;
1027 } else {
1028 uarg->zerocopy = 0;
1029 skb_zcopy_set(skb, uarg, &extra_uref);
1030 }
1031 }
1032
1033 cork->length += length;
1034
1035 /* So, what's going on in the loop below?
1036 *
1037 * We use calculated fragment length to generate chained skb,
1038 * each of segments is IP fragment ready for sending to network after
1039 * adding appropriate IP header.
1040 */
1041
1042 if (!skb)
1043 goto alloc_new_skb;
1044
1045 while (length > 0) {
1046 /* Check if the remaining data fits into current packet. */
1047 copy = mtu - skb->len;
1048 if (copy < length)
1049 copy = maxfraglen - skb->len;
1050 if (copy <= 0) {
1051 char *data;
1052 unsigned int datalen;
1053 unsigned int fraglen;
1054 unsigned int fraggap;
1055 unsigned int alloclen;
1056 unsigned int pagedlen;
1057 struct sk_buff *skb_prev;
1058alloc_new_skb:
1059 skb_prev = skb;
1060 if (skb_prev)
1061 fraggap = skb_prev->len - maxfraglen;
1062 else
1063 fraggap = 0;
1064
1065 /*
1066 * If remaining data exceeds the mtu,
1067 * we know we need more fragment(s).
1068 */
1069 datalen = length + fraggap;
1070 if (datalen > mtu - fragheaderlen)
1071 datalen = maxfraglen - fragheaderlen;
1072 fraglen = datalen + fragheaderlen;
1073 pagedlen = 0;
1074
1075 if ((flags & MSG_MORE) &&
1076 !(rt->dst.dev->features&NETIF_F_SG))
1077 alloclen = mtu;
1078 else if (!paged)
1079 alloclen = fraglen;
1080 else {
1081 alloclen = min_t(int, fraglen, MAX_HEADER);
1082 pagedlen = fraglen - alloclen;
1083 }
1084
1085 alloclen += exthdrlen;
1086
1087 /* The last fragment gets additional space at tail.
1088 * Note, with MSG_MORE we overallocate on fragments,
1089 * because we have no idea what fragment will be
1090 * the last.
1091 */
1092 if (datalen == length + fraggap)
1093 alloclen += rt->dst.trailer_len;
1094
1095 if (transhdrlen) {
1096 skb = sock_alloc_send_skb(sk,
1097 alloclen + hh_len + 15,
1098 (flags & MSG_DONTWAIT), &err);
1099 } else {
1100 skb = NULL;
1101 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1102 2 * sk->sk_sndbuf)
1103 skb = alloc_skb(alloclen + hh_len + 15,
1104 sk->sk_allocation);
1105 if (unlikely(!skb))
1106 err = -ENOBUFS;
1107 }
1108 if (!skb)
1109 goto error;
1110
1111 /*
1112 * Fill in the control structures
1113 */
1114 skb->ip_summed = csummode;
1115 skb->csum = 0;
1116 skb_reserve(skb, hh_len);
1117
1118 /*
1119 * Find where to start putting bytes.
1120 */
1121 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1122 skb_set_network_header(skb, exthdrlen);
1123 skb->transport_header = (skb->network_header +
1124 fragheaderlen);
1125 data += fragheaderlen + exthdrlen;
1126
1127 if (fraggap) {
1128 skb->csum = skb_copy_and_csum_bits(
1129 skb_prev, maxfraglen,
1130 data + transhdrlen, fraggap, 0);
1131 skb_prev->csum = csum_sub(skb_prev->csum,
1132 skb->csum);
1133 data += fraggap;
1134 pskb_trim_unique(skb_prev, maxfraglen);
1135 }
1136
1137 copy = datalen - transhdrlen - fraggap - pagedlen;
1138 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1139 err = -EFAULT;
1140 kfree_skb(skb);
1141 goto error;
1142 }
1143
1144 offset += copy;
1145 length -= copy + transhdrlen;
1146 transhdrlen = 0;
1147 exthdrlen = 0;
1148 csummode = CHECKSUM_NONE;
1149
1150 /* only the initial fragment is time stamped */
1151 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1152 cork->tx_flags = 0;
1153 skb_shinfo(skb)->tskey = tskey;
1154 tskey = 0;
1155 skb_zcopy_set(skb, uarg, &extra_uref);
1156
1157 if ((flags & MSG_CONFIRM) && !skb_prev)
1158 skb_set_dst_pending_confirm(skb, 1);
1159
1160 /*
1161 * Put the packet on the pending queue.
1162 */
1163 if (!skb->destructor) {
1164 skb->destructor = sock_wfree;
1165 skb->sk = sk;
1166 wmem_alloc_delta += skb->truesize;
1167 }
1168 __skb_queue_tail(queue, skb);
1169 continue;
1170 }
1171
1172 if (copy > length)
1173 copy = length;
1174
1175 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1176 skb_tailroom(skb) >= copy) {
1177 unsigned int off;
1178
1179 off = skb->len;
1180 if (getfrag(from, skb_put(skb, copy),
1181 offset, copy, off, skb) < 0) {
1182 __skb_trim(skb, off);
1183 err = -EFAULT;
1184 goto error;
1185 }
1186 } else if (!uarg || !uarg->zerocopy) {
1187 int i = skb_shinfo(skb)->nr_frags;
1188
1189 err = -ENOMEM;
1190 if (!sk_page_frag_refill(sk, pfrag))
1191 goto error;
1192
1193 if (!skb_can_coalesce(skb, i, pfrag->page,
1194 pfrag->offset)) {
1195 err = -EMSGSIZE;
1196 if (i == MAX_SKB_FRAGS)
1197 goto error;
1198
1199 __skb_fill_page_desc(skb, i, pfrag->page,
1200 pfrag->offset, 0);
1201 skb_shinfo(skb)->nr_frags = ++i;
1202 get_page(pfrag->page);
1203 }
1204 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1205 if (getfrag(from,
1206 page_address(pfrag->page) + pfrag->offset,
1207 offset, copy, skb->len, skb) < 0)
1208 goto error_efault;
1209
1210 pfrag->offset += copy;
1211 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1212 skb->len += copy;
1213 skb->data_len += copy;
1214 skb->truesize += copy;
1215 wmem_alloc_delta += copy;
1216 } else {
1217 err = skb_zerocopy_iter_dgram(skb, from, copy);
1218 if (err < 0)
1219 goto error;
1220 }
1221 offset += copy;
1222 length -= copy;
1223 }
1224
1225 if (wmem_alloc_delta)
1226 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1227 return 0;
1228
1229error_efault:
1230 err = -EFAULT;
1231error:
1232 if (uarg)
1233 sock_zerocopy_put_abort(uarg, extra_uref);
1234 cork->length -= length;
1235 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1236 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1237 return err;
1238}
1239
1240static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1241 struct ipcm_cookie *ipc, struct rtable **rtp)
1242{
1243 struct ip_options_rcu *opt;
1244 struct rtable *rt;
1245
1246 rt = *rtp;
1247 if (unlikely(!rt))
1248 return -EFAULT;
1249
1250 /*
1251 * setup for corking.
1252 */
1253 opt = ipc->opt;
1254 if (opt) {
1255 if (!cork->opt) {
1256 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1257 sk->sk_allocation);
1258 if (unlikely(!cork->opt))
1259 return -ENOBUFS;
1260 }
1261 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1262 cork->flags |= IPCORK_OPT;
1263 cork->addr = ipc->addr;
1264 }
1265
1266 cork->fragsize = ip_sk_use_pmtu(sk) ?
1267 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1268
1269 if (!inetdev_valid_mtu(cork->fragsize))
1270 return -ENETUNREACH;
1271
1272 cork->gso_size = ipc->gso_size;
1273
1274 cork->dst = &rt->dst;
1275 /* We stole this route, caller should not release it. */
1276 *rtp = NULL;
1277
1278 cork->length = 0;
1279 cork->ttl = ipc->ttl;
1280 cork->tos = ipc->tos;
1281 cork->mark = ipc->sockc.mark;
1282 cork->priority = ipc->priority;
1283 cork->transmit_time = ipc->sockc.transmit_time;
1284 cork->tx_flags = 0;
1285 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
1286
1287 return 0;
1288}
1289
1290/*
1291 * ip_append_data() and ip_append_page() can make one large IP datagram
1292 * from many pieces of data. Each pieces will be holded on the socket
1293 * until ip_push_pending_frames() is called. Each piece can be a page
1294 * or non-page data.
1295 *
1296 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1297 * this interface potentially.
1298 *
1299 * LATER: length must be adjusted by pad at tail, when it is required.
1300 */
1301int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1302 int getfrag(void *from, char *to, int offset, int len,
1303 int odd, struct sk_buff *skb),
1304 void *from, int length, int transhdrlen,
1305 struct ipcm_cookie *ipc, struct rtable **rtp,
1306 unsigned int flags)
1307{
1308 struct inet_sock *inet = inet_sk(sk);
1309 int err;
1310
1311 if (flags&MSG_PROBE)
1312 return 0;
1313
1314 if (skb_queue_empty(&sk->sk_write_queue)) {
1315 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1316 if (err)
1317 return err;
1318 } else {
1319 transhdrlen = 0;
1320 }
1321
1322 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1323 sk_page_frag(sk), getfrag,
1324 from, length, transhdrlen, flags);
1325}
1326
1327ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1328 int offset, size_t size, int flags)
1329{
1330 struct inet_sock *inet = inet_sk(sk);
1331 struct sk_buff *skb;
1332 struct rtable *rt;
1333 struct ip_options *opt = NULL;
1334 struct inet_cork *cork;
1335 int hh_len;
1336 int mtu;
1337 int len;
1338 int err;
1339 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1340
1341 if (inet->hdrincl)
1342 return -EPERM;
1343
1344 if (flags&MSG_PROBE)
1345 return 0;
1346
1347 if (skb_queue_empty(&sk->sk_write_queue))
1348 return -EINVAL;
1349
1350 cork = &inet->cork.base;
1351 rt = (struct rtable *)cork->dst;
1352 if (cork->flags & IPCORK_OPT)
1353 opt = cork->opt;
1354
1355 if (!(rt->dst.dev->features&NETIF_F_SG))
1356 return -EOPNOTSUPP;
1357
1358 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1359 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
1360
1361 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1362 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1363 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1364
1365 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1366 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1367 mtu - (opt ? opt->optlen : 0));
1368 return -EMSGSIZE;
1369 }
1370
1371 skb = skb_peek_tail(&sk->sk_write_queue);
1372 if (!skb)
1373 return -EINVAL;
1374
1375 cork->length += size;
1376
1377 while (size > 0) {
1378 /* Check if the remaining data fits into current packet. */
1379 len = mtu - skb->len;
1380 if (len < size)
1381 len = maxfraglen - skb->len;
1382
1383 if (len <= 0) {
1384 struct sk_buff *skb_prev;
1385 int alloclen;
1386
1387 skb_prev = skb;
1388 fraggap = skb_prev->len - maxfraglen;
1389
1390 alloclen = fragheaderlen + hh_len + fraggap + 15;
1391 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1392 if (unlikely(!skb)) {
1393 err = -ENOBUFS;
1394 goto error;
1395 }
1396
1397 /*
1398 * Fill in the control structures
1399 */
1400 skb->ip_summed = CHECKSUM_NONE;
1401 skb->csum = 0;
1402 skb_reserve(skb, hh_len);
1403
1404 /*
1405 * Find where to start putting bytes.
1406 */
1407 skb_put(skb, fragheaderlen + fraggap);
1408 skb_reset_network_header(skb);
1409 skb->transport_header = (skb->network_header +
1410 fragheaderlen);
1411 if (fraggap) {
1412 skb->csum = skb_copy_and_csum_bits(skb_prev,
1413 maxfraglen,
1414 skb_transport_header(skb),
1415 fraggap, 0);
1416 skb_prev->csum = csum_sub(skb_prev->csum,
1417 skb->csum);
1418 pskb_trim_unique(skb_prev, maxfraglen);
1419 }
1420
1421 /*
1422 * Put the packet on the pending queue.
1423 */
1424 __skb_queue_tail(&sk->sk_write_queue, skb);
1425 continue;
1426 }
1427
1428 if (len > size)
1429 len = size;
1430
1431 if (skb_append_pagefrags(skb, page, offset, len)) {
1432 err = -EMSGSIZE;
1433 goto error;
1434 }
1435
1436 if (skb->ip_summed == CHECKSUM_NONE) {
1437 __wsum csum;
1438 csum = csum_page(page, offset, len);
1439 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1440 }
1441
1442 skb->len += len;
1443 skb->data_len += len;
1444 skb->truesize += len;
1445 refcount_add(len, &sk->sk_wmem_alloc);
1446 offset += len;
1447 size -= len;
1448 }
1449 return 0;
1450
1451error:
1452 cork->length -= size;
1453 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1454 return err;
1455}
1456
1457static void ip_cork_release(struct inet_cork *cork)
1458{
1459 cork->flags &= ~IPCORK_OPT;
1460 kfree(cork->opt);
1461 cork->opt = NULL;
1462 dst_release(cork->dst);
1463 cork->dst = NULL;
1464}
1465
1466/*
1467 * Combined all pending IP fragments on the socket as one IP datagram
1468 * and push them out.
1469 */
1470struct sk_buff *__ip_make_skb(struct sock *sk,
1471 struct flowi4 *fl4,
1472 struct sk_buff_head *queue,
1473 struct inet_cork *cork)
1474{
1475 struct sk_buff *skb, *tmp_skb;
1476 struct sk_buff **tail_skb;
1477 struct inet_sock *inet = inet_sk(sk);
1478 struct net *net = sock_net(sk);
1479 struct ip_options *opt = NULL;
1480 struct rtable *rt = (struct rtable *)cork->dst;
1481 struct iphdr *iph;
1482 __be16 df = 0;
1483 __u8 ttl;
1484
1485 skb = __skb_dequeue(queue);
1486 if (!skb)
1487 goto out;
1488 tail_skb = &(skb_shinfo(skb)->frag_list);
1489
1490 /* move skb->data to ip header from ext header */
1491 if (skb->data < skb_network_header(skb))
1492 __skb_pull(skb, skb_network_offset(skb));
1493 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1494 __skb_pull(tmp_skb, skb_network_header_len(skb));
1495 *tail_skb = tmp_skb;
1496 tail_skb = &(tmp_skb->next);
1497 skb->len += tmp_skb->len;
1498 skb->data_len += tmp_skb->len;
1499 skb->truesize += tmp_skb->truesize;
1500 tmp_skb->destructor = NULL;
1501 tmp_skb->sk = NULL;
1502 }
1503
1504 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1505 * to fragment the frame generated here. No matter, what transforms
1506 * how transforms change size of the packet, it will come out.
1507 */
1508 skb->ignore_df = ip_sk_ignore_df(sk);
1509
1510 /* DF bit is set when we want to see DF on outgoing frames.
1511 * If ignore_df is set too, we still allow to fragment this frame
1512 * locally. */
1513 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1514 inet->pmtudisc == IP_PMTUDISC_PROBE ||
1515 (skb->len <= dst_mtu(&rt->dst) &&
1516 ip_dont_fragment(sk, &rt->dst)))
1517 df = htons(IP_DF);
1518
1519 if (cork->flags & IPCORK_OPT)
1520 opt = cork->opt;
1521
1522 if (cork->ttl != 0)
1523 ttl = cork->ttl;
1524 else if (rt->rt_type == RTN_MULTICAST)
1525 ttl = inet->mc_ttl;
1526 else
1527 ttl = ip_select_ttl(inet, &rt->dst);
1528
1529 iph = ip_hdr(skb);
1530 iph->version = 4;
1531 iph->ihl = 5;
1532 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1533 iph->frag_off = df;
1534 iph->ttl = ttl;
1535 iph->protocol = sk->sk_protocol;
1536 ip_copy_addrs(iph, fl4);
1537 ip_select_ident(net, skb, sk);
1538
1539 if (opt) {
1540 iph->ihl += opt->optlen>>2;
1541 ip_options_build(skb, opt, cork->addr, rt, 0);
1542 }
1543
1544 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1545 skb->mark = cork->mark;
1546 skb->tstamp = cork->transmit_time;
1547 /*
1548 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1549 * on dst refcount
1550 */
1551 cork->dst = NULL;
1552 skb_dst_set(skb, &rt->dst);
1553
1554 if (iph->protocol == IPPROTO_ICMP)
1555 icmp_out_count(net, ((struct icmphdr *)
1556 skb_transport_header(skb))->type);
1557
1558 ip_cork_release(cork);
1559out:
1560 return skb;
1561}
1562
1563int ip_send_skb(struct net *net, struct sk_buff *skb)
1564{
1565 int err;
1566
1567 err = ip_local_out(net, skb->sk, skb);
1568 if (err) {
1569 if (err > 0)
1570 err = net_xmit_errno(err);
1571 if (err)
1572 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1573 }
1574
1575 return err;
1576}
1577
1578int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1579{
1580 struct sk_buff *skb;
1581
1582 skb = ip_finish_skb(sk, fl4);
1583 if (!skb)
1584 return 0;
1585
1586 /* Netfilter gets whole the not fragmented skb. */
1587 return ip_send_skb(sock_net(sk), skb);
1588}
1589
1590/*
1591 * Throw away all pending data on the socket.
1592 */
1593static void __ip_flush_pending_frames(struct sock *sk,
1594 struct sk_buff_head *queue,
1595 struct inet_cork *cork)
1596{
1597 struct sk_buff *skb;
1598
1599 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1600 kfree_skb(skb);
1601
1602 ip_cork_release(cork);
1603}
1604
1605void ip_flush_pending_frames(struct sock *sk)
1606{
1607 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1608}
1609
1610struct sk_buff *ip_make_skb(struct sock *sk,
1611 struct flowi4 *fl4,
1612 int getfrag(void *from, char *to, int offset,
1613 int len, int odd, struct sk_buff *skb),
1614 void *from, int length, int transhdrlen,
1615 struct ipcm_cookie *ipc, struct rtable **rtp,
1616 struct inet_cork *cork, unsigned int flags)
1617{
1618 struct sk_buff_head queue;
1619 int err;
1620
1621 if (flags & MSG_PROBE)
1622 return NULL;
1623
1624 __skb_queue_head_init(&queue);
1625
1626 cork->flags = 0;
1627 cork->addr = 0;
1628 cork->opt = NULL;
1629 err = ip_setup_cork(sk, cork, ipc, rtp);
1630 if (err)
1631 return ERR_PTR(err);
1632
1633 err = __ip_append_data(sk, fl4, &queue, cork,
1634 ¤t->task_frag, getfrag,
1635 from, length, transhdrlen, flags);
1636 if (err) {
1637 __ip_flush_pending_frames(sk, &queue, cork);
1638 return ERR_PTR(err);
1639 }
1640
1641 return __ip_make_skb(sk, fl4, &queue, cork);
1642}
1643
1644/*
1645 * Fetch data from kernel space and fill in checksum if needed.
1646 */
1647static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1648 int len, int odd, struct sk_buff *skb)
1649{
1650 __wsum csum;
1651
1652 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1653 skb->csum = csum_block_add(skb->csum, csum, odd);
1654 return 0;
1655}
1656
1657/*
1658 * Generic function to send a packet as reply to another packet.
1659 * Used to send some TCP resets/acks so far.
1660 */
1661void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1662 const struct ip_options *sopt,
1663 __be32 daddr, __be32 saddr,
1664 const struct ip_reply_arg *arg,
1665 unsigned int len, u64 transmit_time)
1666{
1667 struct ip_options_data replyopts;
1668 struct ipcm_cookie ipc;
1669 struct flowi4 fl4;
1670 struct rtable *rt = skb_rtable(skb);
1671 struct net *net = sock_net(sk);
1672 struct sk_buff *nskb;
1673 int err;
1674 int oif;
1675
1676 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1677 return;
1678
1679 ipcm_init(&ipc);
1680 ipc.addr = daddr;
1681 ipc.sockc.transmit_time = transmit_time;
1682
1683 if (replyopts.opt.opt.optlen) {
1684 ipc.opt = &replyopts.opt;
1685
1686 if (replyopts.opt.opt.srr)
1687 daddr = replyopts.opt.opt.faddr;
1688 }
1689
1690 oif = arg->bound_dev_if;
1691 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1692 oif = skb->skb_iif;
1693
1694 flowi4_init_output(&fl4, oif,
1695 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1696 RT_TOS(arg->tos),
1697 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1698 ip_reply_arg_flowi_flags(arg),
1699 daddr, saddr,
1700 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1701 arg->uid);
1702 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1703 rt = ip_route_output_key(net, &fl4);
1704 if (IS_ERR(rt))
1705 return;
1706
1707 inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
1708
1709 sk->sk_protocol = ip_hdr(skb)->protocol;
1710 sk->sk_bound_dev_if = arg->bound_dev_if;
1711 sk->sk_sndbuf = sysctl_wmem_default;
1712 ipc.sockc.mark = fl4.flowi4_mark;
1713 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1714 len, 0, &ipc, &rt, MSG_DONTWAIT);
1715 if (unlikely(err)) {
1716 ip_flush_pending_frames(sk);
1717 goto out;
1718 }
1719
1720 nskb = skb_peek(&sk->sk_write_queue);
1721 if (nskb) {
1722 if (arg->csumoffset >= 0)
1723 *((__sum16 *)skb_transport_header(nskb) +
1724 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1725 arg->csum));
1726 nskb->ip_summed = CHECKSUM_NONE;
1727 ip_push_pending_frames(sk, &fl4);
1728 }
1729out:
1730 ip_rt_put(rt);
1731}
1732
1733void __init ip_init(void)
1734{
1735 ip_rt_init();
1736 inet_initpeers();
1737
1738#if defined(CONFIG_IP_MULTICAST)
1739 igmp_mc_init();
1740#endif
1741}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The Internet Protocol (IP) output module.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
13 * Richard Underwood
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 *
19 * See ip_input.c for original log
20 *
21 * Fixes:
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
25 * no route is found.
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readability.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * datagrams.
43 * Hirokazu Takahashi: sendfile() on UDP works now.
44 */
45
46#include <linux/uaccess.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
50#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
53#include <linux/highmem.h>
54#include <linux/slab.h>
55
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
70#include <net/xfrm.h>
71#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
75#include <net/checksum.h>
76#include <net/gso.h>
77#include <net/inetpeer.h>
78#include <net/inet_ecn.h>
79#include <net/lwtunnel.h>
80#include <net/inet_dscp.h>
81#include <linux/bpf-cgroup.h>
82#include <linux/igmp.h>
83#include <linux/netfilter_ipv4.h>
84#include <linux/netfilter_bridge.h>
85#include <linux/netlink.h>
86#include <linux/tcp.h>
87
88static int
89ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
90 unsigned int mtu,
91 int (*output)(struct net *, struct sock *, struct sk_buff *));
92
93/* Generate a checksum for an outgoing IP datagram. */
94void ip_send_check(struct iphdr *iph)
95{
96 iph->check = 0;
97 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
98}
99EXPORT_SYMBOL(ip_send_check);
100
101int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
102{
103 struct iphdr *iph = ip_hdr(skb);
104
105 IP_INC_STATS(net, IPSTATS_MIB_OUTREQUESTS);
106
107 iph_set_totlen(iph, skb->len);
108 ip_send_check(iph);
109
110 /* if egress device is enslaved to an L3 master device pass the
111 * skb to its handler for processing
112 */
113 skb = l3mdev_ip_out(sk, skb);
114 if (unlikely(!skb))
115 return 0;
116
117 skb->protocol = htons(ETH_P_IP);
118
119 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
120 net, sk, skb, NULL, skb_dst(skb)->dev,
121 dst_output);
122}
123
124int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
125{
126 int err;
127
128 err = __ip_local_out(net, sk, skb);
129 if (likely(err == 1))
130 err = dst_output(net, sk, skb);
131
132 return err;
133}
134EXPORT_SYMBOL_GPL(ip_local_out);
135
136static inline int ip_select_ttl(const struct inet_sock *inet,
137 const struct dst_entry *dst)
138{
139 int ttl = READ_ONCE(inet->uc_ttl);
140
141 if (ttl < 0)
142 ttl = ip4_dst_hoplimit(dst);
143 return ttl;
144}
145
146/*
147 * Add an ip header to a skbuff and send it out.
148 *
149 */
150int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
151 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
152 u8 tos)
153{
154 const struct inet_sock *inet = inet_sk(sk);
155 struct rtable *rt = skb_rtable(skb);
156 struct net *net = sock_net(sk);
157 struct iphdr *iph;
158
159 /* Build the IP header. */
160 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
161 skb_reset_network_header(skb);
162 iph = ip_hdr(skb);
163 iph->version = 4;
164 iph->ihl = 5;
165 iph->tos = tos;
166 iph->ttl = ip_select_ttl(inet, &rt->dst);
167 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
168 iph->saddr = saddr;
169 iph->protocol = sk->sk_protocol;
170 /* Do not bother generating IPID for small packets (eg SYNACK) */
171 if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
172 iph->frag_off = htons(IP_DF);
173 iph->id = 0;
174 } else {
175 iph->frag_off = 0;
176 /* TCP packets here are SYNACK with fat IPv4/TCP options.
177 * Avoid using the hashed IP ident generator.
178 */
179 if (sk->sk_protocol == IPPROTO_TCP)
180 iph->id = (__force __be16)get_random_u16();
181 else
182 __ip_select_ident(net, iph, 1);
183 }
184
185 if (opt && opt->opt.optlen) {
186 iph->ihl += opt->opt.optlen>>2;
187 ip_options_build(skb, &opt->opt, daddr, rt);
188 }
189
190 skb->priority = READ_ONCE(sk->sk_priority);
191 if (!skb->mark)
192 skb->mark = READ_ONCE(sk->sk_mark);
193
194 /* Send it out. */
195 return ip_local_out(net, skb->sk, skb);
196}
197EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
198
199static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
200{
201 struct dst_entry *dst = skb_dst(skb);
202 struct rtable *rt = dst_rtable(dst);
203 struct net_device *dev = dst->dev;
204 unsigned int hh_len = LL_RESERVED_SPACE(dev);
205 struct neighbour *neigh;
206 bool is_v6gw = false;
207
208 if (rt->rt_type == RTN_MULTICAST) {
209 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
210 } else if (rt->rt_type == RTN_BROADCAST)
211 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
212
213 /* OUTOCTETS should be counted after fragment */
214 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
215
216 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
217 skb = skb_expand_head(skb, hh_len);
218 if (!skb)
219 return -ENOMEM;
220 }
221
222 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
223 int res = lwtunnel_xmit(skb);
224
225 if (res != LWTUNNEL_XMIT_CONTINUE)
226 return res;
227 }
228
229 rcu_read_lock();
230 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
231 if (!IS_ERR(neigh)) {
232 int res;
233
234 sock_confirm_neigh(skb, neigh);
235 /* if crossing protocols, can not use the cached header */
236 res = neigh_output(neigh, skb, is_v6gw);
237 rcu_read_unlock();
238 return res;
239 }
240 rcu_read_unlock();
241
242 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
243 __func__);
244 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
245 return PTR_ERR(neigh);
246}
247
248static int ip_finish_output_gso(struct net *net, struct sock *sk,
249 struct sk_buff *skb, unsigned int mtu)
250{
251 struct sk_buff *segs, *nskb;
252 netdev_features_t features;
253 int ret = 0;
254
255 /* common case: seglen is <= mtu
256 */
257 if (skb_gso_validate_network_len(skb, mtu))
258 return ip_finish_output2(net, sk, skb);
259
260 /* Slowpath - GSO segment length exceeds the egress MTU.
261 *
262 * This can happen in several cases:
263 * - Forwarding of a TCP GRO skb, when DF flag is not set.
264 * - Forwarding of an skb that arrived on a virtualization interface
265 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
266 * stack.
267 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
268 * interface with a smaller MTU.
269 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
270 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
271 * insufficient MTU.
272 */
273 features = netif_skb_features(skb);
274 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
275 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
276 if (IS_ERR_OR_NULL(segs)) {
277 kfree_skb(skb);
278 return -ENOMEM;
279 }
280
281 consume_skb(skb);
282
283 skb_list_walk_safe(segs, segs, nskb) {
284 int err;
285
286 skb_mark_not_on_list(segs);
287 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
288
289 if (err && ret == 0)
290 ret = err;
291 }
292
293 return ret;
294}
295
296static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
297{
298 unsigned int mtu;
299
300#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
301 /* Policy lookup after SNAT yielded a new policy */
302 if (skb_dst(skb)->xfrm) {
303 IPCB(skb)->flags |= IPSKB_REROUTED;
304 return dst_output(net, sk, skb);
305 }
306#endif
307 mtu = ip_skb_dst_mtu(sk, skb);
308 if (skb_is_gso(skb))
309 return ip_finish_output_gso(net, sk, skb, mtu);
310
311 if (skb->len > mtu || IPCB(skb)->frag_max_size)
312 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
313
314 return ip_finish_output2(net, sk, skb);
315}
316
317static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
318{
319 int ret;
320
321 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
322 switch (ret) {
323 case NET_XMIT_SUCCESS:
324 return __ip_finish_output(net, sk, skb);
325 case NET_XMIT_CN:
326 return __ip_finish_output(net, sk, skb) ? : ret;
327 default:
328 kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
329 return ret;
330 }
331}
332
333static int ip_mc_finish_output(struct net *net, struct sock *sk,
334 struct sk_buff *skb)
335{
336 struct rtable *new_rt;
337 bool do_cn = false;
338 int ret, err;
339
340 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
341 switch (ret) {
342 case NET_XMIT_CN:
343 do_cn = true;
344 fallthrough;
345 case NET_XMIT_SUCCESS:
346 break;
347 default:
348 kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
349 return ret;
350 }
351
352 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
353 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
354 * see ipv4_pktinfo_prepare().
355 */
356 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
357 if (new_rt) {
358 new_rt->rt_iif = 0;
359 skb_dst_drop(skb);
360 skb_dst_set(skb, &new_rt->dst);
361 }
362
363 err = dev_loopback_xmit(net, sk, skb);
364 return (do_cn && err) ? ret : err;
365}
366
367int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
368{
369 struct rtable *rt = skb_rtable(skb);
370 struct net_device *dev = rt->dst.dev;
371
372 /*
373 * If the indicated interface is up and running, send the packet.
374 */
375 skb->dev = dev;
376 skb->protocol = htons(ETH_P_IP);
377
378 /*
379 * Multicasts are looped back for other local users
380 */
381
382 if (rt->rt_flags&RTCF_MULTICAST) {
383 if (sk_mc_loop(sk)
384#ifdef CONFIG_IP_MROUTE
385 /* Small optimization: do not loopback not local frames,
386 which returned after forwarding; they will be dropped
387 by ip_mr_input in any case.
388 Note, that local frames are looped back to be delivered
389 to local recipients.
390
391 This check is duplicated in ip_mr_input at the moment.
392 */
393 &&
394 ((rt->rt_flags & RTCF_LOCAL) ||
395 !(IPCB(skb)->flags & IPSKB_FORWARDED))
396#endif
397 ) {
398 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
399 if (newskb)
400 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
401 net, sk, newskb, NULL, newskb->dev,
402 ip_mc_finish_output);
403 }
404
405 /* Multicasts with ttl 0 must not go beyond the host */
406
407 if (ip_hdr(skb)->ttl == 0) {
408 kfree_skb(skb);
409 return 0;
410 }
411 }
412
413 if (rt->rt_flags&RTCF_BROADCAST) {
414 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
415 if (newskb)
416 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
417 net, sk, newskb, NULL, newskb->dev,
418 ip_mc_finish_output);
419 }
420
421 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
422 net, sk, skb, NULL, skb->dev,
423 ip_finish_output,
424 !(IPCB(skb)->flags & IPSKB_REROUTED));
425}
426
427int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
428{
429 struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
430
431 skb->dev = dev;
432 skb->protocol = htons(ETH_P_IP);
433
434 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
435 net, sk, skb, indev, dev,
436 ip_finish_output,
437 !(IPCB(skb)->flags & IPSKB_REROUTED));
438}
439EXPORT_SYMBOL(ip_output);
440
441/*
442 * copy saddr and daddr, possibly using 64bit load/stores
443 * Equivalent to :
444 * iph->saddr = fl4->saddr;
445 * iph->daddr = fl4->daddr;
446 */
447static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
448{
449 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
450 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
451
452 iph->saddr = fl4->saddr;
453 iph->daddr = fl4->daddr;
454}
455
456/* Note: skb->sk can be different from sk, in case of tunnels */
457int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
458 __u8 tos)
459{
460 struct inet_sock *inet = inet_sk(sk);
461 struct net *net = sock_net(sk);
462 struct ip_options_rcu *inet_opt;
463 struct flowi4 *fl4;
464 struct rtable *rt;
465 struct iphdr *iph;
466 int res;
467
468 /* Skip all of this if the packet is already routed,
469 * f.e. by something like SCTP.
470 */
471 rcu_read_lock();
472 inet_opt = rcu_dereference(inet->inet_opt);
473 fl4 = &fl->u.ip4;
474 rt = skb_rtable(skb);
475 if (rt)
476 goto packet_routed;
477
478 /* Make sure we can route this packet. */
479 rt = dst_rtable(__sk_dst_check(sk, 0));
480 if (!rt) {
481 __be32 daddr;
482
483 /* Use correct destination address if we have options. */
484 daddr = inet->inet_daddr;
485 if (inet_opt && inet_opt->opt.srr)
486 daddr = inet_opt->opt.faddr;
487
488 /* If this fails, retransmit mechanism of transport layer will
489 * keep trying until route appears or the connection times
490 * itself out.
491 */
492 rt = ip_route_output_ports(net, fl4, sk,
493 daddr, inet->inet_saddr,
494 inet->inet_dport,
495 inet->inet_sport,
496 sk->sk_protocol,
497 tos & INET_DSCP_MASK,
498 sk->sk_bound_dev_if);
499 if (IS_ERR(rt))
500 goto no_route;
501 sk_setup_caps(sk, &rt->dst);
502 }
503 skb_dst_set_noref(skb, &rt->dst);
504
505packet_routed:
506 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
507 goto no_route;
508
509 /* OK, we know where to send it, allocate and build IP header. */
510 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
511 skb_reset_network_header(skb);
512 iph = ip_hdr(skb);
513 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
514 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
515 iph->frag_off = htons(IP_DF);
516 else
517 iph->frag_off = 0;
518 iph->ttl = ip_select_ttl(inet, &rt->dst);
519 iph->protocol = sk->sk_protocol;
520 ip_copy_addrs(iph, fl4);
521
522 /* Transport layer set skb->h.foo itself. */
523
524 if (inet_opt && inet_opt->opt.optlen) {
525 iph->ihl += inet_opt->opt.optlen >> 2;
526 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
527 }
528
529 ip_select_ident_segs(net, skb, sk,
530 skb_shinfo(skb)->gso_segs ?: 1);
531
532 /* TODO : should we use skb->sk here instead of sk ? */
533 skb->priority = READ_ONCE(sk->sk_priority);
534 skb->mark = READ_ONCE(sk->sk_mark);
535
536 res = ip_local_out(net, sk, skb);
537 rcu_read_unlock();
538 return res;
539
540no_route:
541 rcu_read_unlock();
542 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
543 kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
544 return -EHOSTUNREACH;
545}
546EXPORT_SYMBOL(__ip_queue_xmit);
547
548int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
549{
550 return __ip_queue_xmit(sk, skb, fl, READ_ONCE(inet_sk(sk)->tos));
551}
552EXPORT_SYMBOL(ip_queue_xmit);
553
554static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
555{
556 to->pkt_type = from->pkt_type;
557 to->priority = from->priority;
558 to->protocol = from->protocol;
559 to->skb_iif = from->skb_iif;
560 skb_dst_drop(to);
561 skb_dst_copy(to, from);
562 to->dev = from->dev;
563 to->mark = from->mark;
564
565 skb_copy_hash(to, from);
566
567#ifdef CONFIG_NET_SCHED
568 to->tc_index = from->tc_index;
569#endif
570 nf_copy(to, from);
571 skb_ext_copy(to, from);
572#if IS_ENABLED(CONFIG_IP_VS)
573 to->ipvs_property = from->ipvs_property;
574#endif
575 skb_copy_secmark(to, from);
576}
577
578static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
579 unsigned int mtu,
580 int (*output)(struct net *, struct sock *, struct sk_buff *))
581{
582 struct iphdr *iph = ip_hdr(skb);
583
584 if ((iph->frag_off & htons(IP_DF)) == 0)
585 return ip_do_fragment(net, sk, skb, output);
586
587 if (unlikely(!skb->ignore_df ||
588 (IPCB(skb)->frag_max_size &&
589 IPCB(skb)->frag_max_size > mtu))) {
590 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
591 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
592 htonl(mtu));
593 kfree_skb(skb);
594 return -EMSGSIZE;
595 }
596
597 return ip_do_fragment(net, sk, skb, output);
598}
599
600void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
601 unsigned int hlen, struct ip_fraglist_iter *iter)
602{
603 unsigned int first_len = skb_pagelen(skb);
604
605 iter->frag = skb_shinfo(skb)->frag_list;
606 skb_frag_list_init(skb);
607
608 iter->offset = 0;
609 iter->iph = iph;
610 iter->hlen = hlen;
611
612 skb->data_len = first_len - skb_headlen(skb);
613 skb->len = first_len;
614 iph->tot_len = htons(first_len);
615 iph->frag_off = htons(IP_MF);
616 ip_send_check(iph);
617}
618EXPORT_SYMBOL(ip_fraglist_init);
619
620void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
621{
622 unsigned int hlen = iter->hlen;
623 struct iphdr *iph = iter->iph;
624 struct sk_buff *frag;
625
626 frag = iter->frag;
627 frag->ip_summed = CHECKSUM_NONE;
628 skb_reset_transport_header(frag);
629 __skb_push(frag, hlen);
630 skb_reset_network_header(frag);
631 memcpy(skb_network_header(frag), iph, hlen);
632 iter->iph = ip_hdr(frag);
633 iph = iter->iph;
634 iph->tot_len = htons(frag->len);
635 ip_copy_metadata(frag, skb);
636 iter->offset += skb->len - hlen;
637 iph->frag_off = htons(iter->offset >> 3);
638 if (frag->next)
639 iph->frag_off |= htons(IP_MF);
640 /* Ready, complete checksum */
641 ip_send_check(iph);
642}
643EXPORT_SYMBOL(ip_fraglist_prepare);
644
645void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
646 unsigned int ll_rs, unsigned int mtu, bool DF,
647 struct ip_frag_state *state)
648{
649 struct iphdr *iph = ip_hdr(skb);
650
651 state->DF = DF;
652 state->hlen = hlen;
653 state->ll_rs = ll_rs;
654 state->mtu = mtu;
655
656 state->left = skb->len - hlen; /* Space per frame */
657 state->ptr = hlen; /* Where to start from */
658
659 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
660 state->not_last_frag = iph->frag_off & htons(IP_MF);
661}
662EXPORT_SYMBOL(ip_frag_init);
663
664static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
665 bool first_frag)
666{
667 /* Copy the flags to each fragment. */
668 IPCB(to)->flags = IPCB(from)->flags;
669
670 /* ANK: dirty, but effective trick. Upgrade options only if
671 * the segment to be fragmented was THE FIRST (otherwise,
672 * options are already fixed) and make it ONCE
673 * on the initial skb, so that all the following fragments
674 * will inherit fixed options.
675 */
676 if (first_frag)
677 ip_options_fragment(from);
678}
679
680struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
681{
682 unsigned int len = state->left;
683 struct sk_buff *skb2;
684 struct iphdr *iph;
685
686 /* IF: it doesn't fit, use 'mtu' - the data space left */
687 if (len > state->mtu)
688 len = state->mtu;
689 /* IF: we are not sending up to and including the packet end
690 then align the next start on an eight byte boundary */
691 if (len < state->left) {
692 len &= ~7;
693 }
694
695 /* Allocate buffer */
696 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
697 if (!skb2)
698 return ERR_PTR(-ENOMEM);
699
700 /*
701 * Set up data on packet
702 */
703
704 ip_copy_metadata(skb2, skb);
705 skb_reserve(skb2, state->ll_rs);
706 skb_put(skb2, len + state->hlen);
707 skb_reset_network_header(skb2);
708 skb2->transport_header = skb2->network_header + state->hlen;
709
710 /*
711 * Charge the memory for the fragment to any owner
712 * it might possess
713 */
714
715 if (skb->sk)
716 skb_set_owner_w(skb2, skb->sk);
717
718 /*
719 * Copy the packet header into the new buffer.
720 */
721
722 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
723
724 /*
725 * Copy a block of the IP datagram.
726 */
727 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
728 BUG();
729 state->left -= len;
730
731 /*
732 * Fill in the new header fields.
733 */
734 iph = ip_hdr(skb2);
735 iph->frag_off = htons((state->offset >> 3));
736 if (state->DF)
737 iph->frag_off |= htons(IP_DF);
738
739 /*
740 * Added AC : If we are fragmenting a fragment that's not the
741 * last fragment then keep MF on each bit
742 */
743 if (state->left > 0 || state->not_last_frag)
744 iph->frag_off |= htons(IP_MF);
745 state->ptr += len;
746 state->offset += len;
747
748 iph->tot_len = htons(len + state->hlen);
749
750 ip_send_check(iph);
751
752 return skb2;
753}
754EXPORT_SYMBOL(ip_frag_next);
755
756/*
757 * This IP datagram is too large to be sent in one piece. Break it up into
758 * smaller pieces (each of size equal to IP header plus
759 * a block of the data of the original IP data part) that will yet fit in a
760 * single device frame, and queue such a frame for sending.
761 */
762
763int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
764 int (*output)(struct net *, struct sock *, struct sk_buff *))
765{
766 struct iphdr *iph;
767 struct sk_buff *skb2;
768 u8 tstamp_type = skb->tstamp_type;
769 struct rtable *rt = skb_rtable(skb);
770 unsigned int mtu, hlen, ll_rs;
771 struct ip_fraglist_iter iter;
772 ktime_t tstamp = skb->tstamp;
773 struct ip_frag_state state;
774 int err = 0;
775
776 /* for offloaded checksums cleanup checksum before fragmentation */
777 if (skb->ip_summed == CHECKSUM_PARTIAL &&
778 (err = skb_checksum_help(skb)))
779 goto fail;
780
781 /*
782 * Point into the IP datagram header.
783 */
784
785 iph = ip_hdr(skb);
786
787 mtu = ip_skb_dst_mtu(sk, skb);
788 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
789 mtu = IPCB(skb)->frag_max_size;
790
791 /*
792 * Setup starting values.
793 */
794
795 hlen = iph->ihl * 4;
796 mtu = mtu - hlen; /* Size of data space */
797 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
798 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
799
800 /* When frag_list is given, use it. First, check its validity:
801 * some transformers could create wrong frag_list or break existing
802 * one, it is not prohibited. In this case fall back to copying.
803 *
804 * LATER: this step can be merged to real generation of fragments,
805 * we can switch to copy when see the first bad fragment.
806 */
807 if (skb_has_frag_list(skb)) {
808 struct sk_buff *frag, *frag2;
809 unsigned int first_len = skb_pagelen(skb);
810
811 if (first_len - hlen > mtu ||
812 ((first_len - hlen) & 7) ||
813 ip_is_fragment(iph) ||
814 skb_cloned(skb) ||
815 skb_headroom(skb) < ll_rs)
816 goto slow_path;
817
818 skb_walk_frags(skb, frag) {
819 /* Correct geometry. */
820 if (frag->len > mtu ||
821 ((frag->len & 7) && frag->next) ||
822 skb_headroom(frag) < hlen + ll_rs)
823 goto slow_path_clean;
824
825 /* Partially cloned skb? */
826 if (skb_shared(frag))
827 goto slow_path_clean;
828
829 BUG_ON(frag->sk);
830 if (skb->sk) {
831 frag->sk = skb->sk;
832 frag->destructor = sock_wfree;
833 }
834 skb->truesize -= frag->truesize;
835 }
836
837 /* Everything is OK. Generate! */
838 ip_fraglist_init(skb, iph, hlen, &iter);
839
840 for (;;) {
841 /* Prepare header of the next frame,
842 * before previous one went down. */
843 if (iter.frag) {
844 bool first_frag = (iter.offset == 0);
845
846 IPCB(iter.frag)->flags = IPCB(skb)->flags;
847 ip_fraglist_prepare(skb, &iter);
848 if (first_frag && IPCB(skb)->opt.optlen) {
849 /* ipcb->opt is not populated for frags
850 * coming from __ip_make_skb(),
851 * ip_options_fragment() needs optlen
852 */
853 IPCB(iter.frag)->opt.optlen =
854 IPCB(skb)->opt.optlen;
855 ip_options_fragment(iter.frag);
856 ip_send_check(iter.iph);
857 }
858 }
859
860 skb_set_delivery_time(skb, tstamp, tstamp_type);
861 err = output(net, sk, skb);
862
863 if (!err)
864 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
865 if (err || !iter.frag)
866 break;
867
868 skb = ip_fraglist_next(&iter);
869 }
870
871 if (err == 0) {
872 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
873 return 0;
874 }
875
876 kfree_skb_list(iter.frag);
877
878 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
879 return err;
880
881slow_path_clean:
882 skb_walk_frags(skb, frag2) {
883 if (frag2 == frag)
884 break;
885 frag2->sk = NULL;
886 frag2->destructor = NULL;
887 skb->truesize += frag2->truesize;
888 }
889 }
890
891slow_path:
892 /*
893 * Fragment the datagram.
894 */
895
896 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
897 &state);
898
899 /*
900 * Keep copying data until we run out.
901 */
902
903 while (state.left > 0) {
904 bool first_frag = (state.offset == 0);
905
906 skb2 = ip_frag_next(skb, &state);
907 if (IS_ERR(skb2)) {
908 err = PTR_ERR(skb2);
909 goto fail;
910 }
911 ip_frag_ipcb(skb, skb2, first_frag);
912
913 /*
914 * Put this fragment into the sending queue.
915 */
916 skb_set_delivery_time(skb2, tstamp, tstamp_type);
917 err = output(net, sk, skb2);
918 if (err)
919 goto fail;
920
921 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
922 }
923 consume_skb(skb);
924 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
925 return err;
926
927fail:
928 kfree_skb(skb);
929 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
930 return err;
931}
932EXPORT_SYMBOL(ip_do_fragment);
933
934int
935ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
936{
937 struct msghdr *msg = from;
938
939 if (skb->ip_summed == CHECKSUM_PARTIAL) {
940 if (!copy_from_iter_full(to, len, &msg->msg_iter))
941 return -EFAULT;
942 } else {
943 __wsum csum = 0;
944 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
945 return -EFAULT;
946 skb->csum = csum_block_add(skb->csum, csum, odd);
947 }
948 return 0;
949}
950EXPORT_SYMBOL(ip_generic_getfrag);
951
952static int __ip_append_data(struct sock *sk,
953 struct flowi4 *fl4,
954 struct sk_buff_head *queue,
955 struct inet_cork *cork,
956 struct page_frag *pfrag,
957 int getfrag(void *from, char *to, int offset,
958 int len, int odd, struct sk_buff *skb),
959 void *from, int length, int transhdrlen,
960 unsigned int flags)
961{
962 struct inet_sock *inet = inet_sk(sk);
963 struct ubuf_info *uarg = NULL;
964 struct sk_buff *skb;
965 struct ip_options *opt = cork->opt;
966 int hh_len;
967 int exthdrlen;
968 int mtu;
969 int copy;
970 int err;
971 int offset = 0;
972 bool zc = false;
973 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
974 int csummode = CHECKSUM_NONE;
975 struct rtable *rt = dst_rtable(cork->dst);
976 bool paged, hold_tskey = false, extra_uref = false;
977 unsigned int wmem_alloc_delta = 0;
978 u32 tskey = 0;
979
980 skb = skb_peek_tail(queue);
981
982 exthdrlen = !skb ? rt->dst.header_len : 0;
983 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
984 paged = !!cork->gso_size;
985
986 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
987
988 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
989 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
990 maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
991
992 if (cork->length + length > maxnonfragsize - fragheaderlen) {
993 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
994 mtu - (opt ? opt->optlen : 0));
995 return -EMSGSIZE;
996 }
997
998 /*
999 * transhdrlen > 0 means that this is the first fragment and we wish
1000 * it won't be fragmented in the future.
1001 */
1002 if (transhdrlen &&
1003 length + fragheaderlen <= mtu &&
1004 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1005 (!(flags & MSG_MORE) || cork->gso_size) &&
1006 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1007 csummode = CHECKSUM_PARTIAL;
1008
1009 if ((flags & MSG_ZEROCOPY) && length) {
1010 struct msghdr *msg = from;
1011
1012 if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1013 if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1014 return -EINVAL;
1015
1016 /* Leave uarg NULL if can't zerocopy, callers should
1017 * be able to handle it.
1018 */
1019 if ((rt->dst.dev->features & NETIF_F_SG) &&
1020 csummode == CHECKSUM_PARTIAL) {
1021 paged = true;
1022 zc = true;
1023 uarg = msg->msg_ubuf;
1024 }
1025 } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1026 uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1027 if (!uarg)
1028 return -ENOBUFS;
1029 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1030 if (rt->dst.dev->features & NETIF_F_SG &&
1031 csummode == CHECKSUM_PARTIAL) {
1032 paged = true;
1033 zc = true;
1034 } else {
1035 uarg_to_msgzc(uarg)->zerocopy = 0;
1036 skb_zcopy_set(skb, uarg, &extra_uref);
1037 }
1038 }
1039 } else if ((flags & MSG_SPLICE_PAGES) && length) {
1040 if (inet_test_bit(HDRINCL, sk))
1041 return -EPERM;
1042 if (rt->dst.dev->features & NETIF_F_SG &&
1043 getfrag == ip_generic_getfrag)
1044 /* We need an empty buffer to attach stuff to */
1045 paged = true;
1046 else
1047 flags &= ~MSG_SPLICE_PAGES;
1048 }
1049
1050 cork->length += length;
1051
1052 if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
1053 READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
1054 if (cork->flags & IPCORK_TS_OPT_ID) {
1055 tskey = cork->ts_opt_id;
1056 } else {
1057 tskey = atomic_inc_return(&sk->sk_tskey) - 1;
1058 hold_tskey = true;
1059 }
1060 }
1061
1062 /* So, what's going on in the loop below?
1063 *
1064 * We use calculated fragment length to generate chained skb,
1065 * each of segments is IP fragment ready for sending to network after
1066 * adding appropriate IP header.
1067 */
1068
1069 if (!skb)
1070 goto alloc_new_skb;
1071
1072 while (length > 0) {
1073 /* Check if the remaining data fits into current packet. */
1074 copy = mtu - skb->len;
1075 if (copy < length)
1076 copy = maxfraglen - skb->len;
1077 if (copy <= 0) {
1078 char *data;
1079 unsigned int datalen;
1080 unsigned int fraglen;
1081 unsigned int fraggap;
1082 unsigned int alloclen, alloc_extra;
1083 unsigned int pagedlen;
1084 struct sk_buff *skb_prev;
1085alloc_new_skb:
1086 skb_prev = skb;
1087 if (skb_prev)
1088 fraggap = skb_prev->len - maxfraglen;
1089 else
1090 fraggap = 0;
1091
1092 /*
1093 * If remaining data exceeds the mtu,
1094 * we know we need more fragment(s).
1095 */
1096 datalen = length + fraggap;
1097 if (datalen > mtu - fragheaderlen)
1098 datalen = maxfraglen - fragheaderlen;
1099 fraglen = datalen + fragheaderlen;
1100 pagedlen = 0;
1101
1102 alloc_extra = hh_len + 15;
1103 alloc_extra += exthdrlen;
1104
1105 /* The last fragment gets additional space at tail.
1106 * Note, with MSG_MORE we overallocate on fragments,
1107 * because we have no idea what fragment will be
1108 * the last.
1109 */
1110 if (datalen == length + fraggap)
1111 alloc_extra += rt->dst.trailer_len;
1112
1113 if ((flags & MSG_MORE) &&
1114 !(rt->dst.dev->features&NETIF_F_SG))
1115 alloclen = mtu;
1116 else if (!paged &&
1117 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1118 !(rt->dst.dev->features & NETIF_F_SG)))
1119 alloclen = fraglen;
1120 else {
1121 alloclen = fragheaderlen + transhdrlen;
1122 pagedlen = datalen - transhdrlen;
1123 }
1124
1125 alloclen += alloc_extra;
1126
1127 if (transhdrlen) {
1128 skb = sock_alloc_send_skb(sk, alloclen,
1129 (flags & MSG_DONTWAIT), &err);
1130 } else {
1131 skb = NULL;
1132 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1133 2 * sk->sk_sndbuf)
1134 skb = alloc_skb(alloclen,
1135 sk->sk_allocation);
1136 if (unlikely(!skb))
1137 err = -ENOBUFS;
1138 }
1139 if (!skb)
1140 goto error;
1141
1142 /*
1143 * Fill in the control structures
1144 */
1145 skb->ip_summed = csummode;
1146 skb->csum = 0;
1147 skb_reserve(skb, hh_len);
1148
1149 /*
1150 * Find where to start putting bytes.
1151 */
1152 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1153 skb_set_network_header(skb, exthdrlen);
1154 skb->transport_header = (skb->network_header +
1155 fragheaderlen);
1156 data += fragheaderlen + exthdrlen;
1157
1158 if (fraggap) {
1159 skb->csum = skb_copy_and_csum_bits(
1160 skb_prev, maxfraglen,
1161 data + transhdrlen, fraggap);
1162 skb_prev->csum = csum_sub(skb_prev->csum,
1163 skb->csum);
1164 data += fraggap;
1165 pskb_trim_unique(skb_prev, maxfraglen);
1166 }
1167
1168 copy = datalen - transhdrlen - fraggap - pagedlen;
1169 /* [!] NOTE: copy will be negative if pagedlen>0
1170 * because then the equation reduces to -fraggap.
1171 */
1172 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1173 err = -EFAULT;
1174 kfree_skb(skb);
1175 goto error;
1176 } else if (flags & MSG_SPLICE_PAGES) {
1177 copy = 0;
1178 }
1179
1180 offset += copy;
1181 length -= copy + transhdrlen;
1182 transhdrlen = 0;
1183 exthdrlen = 0;
1184 csummode = CHECKSUM_NONE;
1185
1186 /* only the initial fragment is time stamped */
1187 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1188 cork->tx_flags = 0;
1189 skb_shinfo(skb)->tskey = tskey;
1190 tskey = 0;
1191 skb_zcopy_set(skb, uarg, &extra_uref);
1192
1193 if ((flags & MSG_CONFIRM) && !skb_prev)
1194 skb_set_dst_pending_confirm(skb, 1);
1195
1196 /*
1197 * Put the packet on the pending queue.
1198 */
1199 if (!skb->destructor) {
1200 skb->destructor = sock_wfree;
1201 skb->sk = sk;
1202 wmem_alloc_delta += skb->truesize;
1203 }
1204 __skb_queue_tail(queue, skb);
1205 continue;
1206 }
1207
1208 if (copy > length)
1209 copy = length;
1210
1211 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1212 skb_tailroom(skb) >= copy) {
1213 unsigned int off;
1214
1215 off = skb->len;
1216 if (getfrag(from, skb_put(skb, copy),
1217 offset, copy, off, skb) < 0) {
1218 __skb_trim(skb, off);
1219 err = -EFAULT;
1220 goto error;
1221 }
1222 } else if (flags & MSG_SPLICE_PAGES) {
1223 struct msghdr *msg = from;
1224
1225 err = -EIO;
1226 if (WARN_ON_ONCE(copy > msg->msg_iter.count))
1227 goto error;
1228
1229 err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1230 sk->sk_allocation);
1231 if (err < 0)
1232 goto error;
1233 copy = err;
1234 wmem_alloc_delta += copy;
1235 } else if (!zc) {
1236 int i = skb_shinfo(skb)->nr_frags;
1237
1238 err = -ENOMEM;
1239 if (!sk_page_frag_refill(sk, pfrag))
1240 goto error;
1241
1242 skb_zcopy_downgrade_managed(skb);
1243 if (!skb_can_coalesce(skb, i, pfrag->page,
1244 pfrag->offset)) {
1245 err = -EMSGSIZE;
1246 if (i == MAX_SKB_FRAGS)
1247 goto error;
1248
1249 __skb_fill_page_desc(skb, i, pfrag->page,
1250 pfrag->offset, 0);
1251 skb_shinfo(skb)->nr_frags = ++i;
1252 get_page(pfrag->page);
1253 }
1254 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1255 if (getfrag(from,
1256 page_address(pfrag->page) + pfrag->offset,
1257 offset, copy, skb->len, skb) < 0)
1258 goto error_efault;
1259
1260 pfrag->offset += copy;
1261 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1262 skb_len_add(skb, copy);
1263 wmem_alloc_delta += copy;
1264 } else {
1265 err = skb_zerocopy_iter_dgram(skb, from, copy);
1266 if (err < 0)
1267 goto error;
1268 }
1269 offset += copy;
1270 length -= copy;
1271 }
1272
1273 if (wmem_alloc_delta)
1274 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1275 return 0;
1276
1277error_efault:
1278 err = -EFAULT;
1279error:
1280 net_zcopy_put_abort(uarg, extra_uref);
1281 cork->length -= length;
1282 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1283 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1284 if (hold_tskey)
1285 atomic_dec(&sk->sk_tskey);
1286 return err;
1287}
1288
1289static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1290 struct ipcm_cookie *ipc, struct rtable **rtp)
1291{
1292 struct ip_options_rcu *opt;
1293 struct rtable *rt;
1294
1295 rt = *rtp;
1296 if (unlikely(!rt))
1297 return -EFAULT;
1298
1299 cork->fragsize = ip_sk_use_pmtu(sk) ?
1300 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1301
1302 if (!inetdev_valid_mtu(cork->fragsize))
1303 return -ENETUNREACH;
1304
1305 /*
1306 * setup for corking.
1307 */
1308 opt = ipc->opt;
1309 if (opt) {
1310 if (!cork->opt) {
1311 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1312 sk->sk_allocation);
1313 if (unlikely(!cork->opt))
1314 return -ENOBUFS;
1315 }
1316 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1317 cork->flags |= IPCORK_OPT;
1318 cork->addr = ipc->addr;
1319 }
1320
1321 cork->gso_size = ipc->gso_size;
1322
1323 cork->dst = &rt->dst;
1324 /* We stole this route, caller should not release it. */
1325 *rtp = NULL;
1326
1327 cork->length = 0;
1328 cork->ttl = ipc->ttl;
1329 cork->tos = ipc->tos;
1330 cork->mark = ipc->sockc.mark;
1331 cork->priority = ipc->priority;
1332 cork->transmit_time = ipc->sockc.transmit_time;
1333 cork->tx_flags = 0;
1334 sock_tx_timestamp(sk, &ipc->sockc, &cork->tx_flags);
1335 if (ipc->sockc.tsflags & SOCKCM_FLAG_TS_OPT_ID) {
1336 cork->flags |= IPCORK_TS_OPT_ID;
1337 cork->ts_opt_id = ipc->sockc.ts_opt_id;
1338 }
1339
1340 return 0;
1341}
1342
1343/*
1344 * ip_append_data() can make one large IP datagram from many pieces of
1345 * data. Each piece will be held on the socket until
1346 * ip_push_pending_frames() is called. Each piece can be a page or
1347 * non-page data.
1348 *
1349 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1350 * this interface potentially.
1351 *
1352 * LATER: length must be adjusted by pad at tail, when it is required.
1353 */
1354int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1355 int getfrag(void *from, char *to, int offset, int len,
1356 int odd, struct sk_buff *skb),
1357 void *from, int length, int transhdrlen,
1358 struct ipcm_cookie *ipc, struct rtable **rtp,
1359 unsigned int flags)
1360{
1361 struct inet_sock *inet = inet_sk(sk);
1362 int err;
1363
1364 if (flags&MSG_PROBE)
1365 return 0;
1366
1367 if (skb_queue_empty(&sk->sk_write_queue)) {
1368 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1369 if (err)
1370 return err;
1371 } else {
1372 transhdrlen = 0;
1373 }
1374
1375 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1376 sk_page_frag(sk), getfrag,
1377 from, length, transhdrlen, flags);
1378}
1379
1380static void ip_cork_release(struct inet_cork *cork)
1381{
1382 cork->flags &= ~IPCORK_OPT;
1383 kfree(cork->opt);
1384 cork->opt = NULL;
1385 dst_release(cork->dst);
1386 cork->dst = NULL;
1387}
1388
1389/*
1390 * Combined all pending IP fragments on the socket as one IP datagram
1391 * and push them out.
1392 */
1393struct sk_buff *__ip_make_skb(struct sock *sk,
1394 struct flowi4 *fl4,
1395 struct sk_buff_head *queue,
1396 struct inet_cork *cork)
1397{
1398 struct sk_buff *skb, *tmp_skb;
1399 struct sk_buff **tail_skb;
1400 struct inet_sock *inet = inet_sk(sk);
1401 struct net *net = sock_net(sk);
1402 struct ip_options *opt = NULL;
1403 struct rtable *rt = dst_rtable(cork->dst);
1404 struct iphdr *iph;
1405 u8 pmtudisc, ttl;
1406 __be16 df = 0;
1407
1408 skb = __skb_dequeue(queue);
1409 if (!skb)
1410 goto out;
1411 tail_skb = &(skb_shinfo(skb)->frag_list);
1412
1413 /* move skb->data to ip header from ext header */
1414 if (skb->data < skb_network_header(skb))
1415 __skb_pull(skb, skb_network_offset(skb));
1416 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1417 __skb_pull(tmp_skb, skb_network_header_len(skb));
1418 *tail_skb = tmp_skb;
1419 tail_skb = &(tmp_skb->next);
1420 skb->len += tmp_skb->len;
1421 skb->data_len += tmp_skb->len;
1422 skb->truesize += tmp_skb->truesize;
1423 tmp_skb->destructor = NULL;
1424 tmp_skb->sk = NULL;
1425 }
1426
1427 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1428 * to fragment the frame generated here. No matter, what transforms
1429 * how transforms change size of the packet, it will come out.
1430 */
1431 skb->ignore_df = ip_sk_ignore_df(sk);
1432
1433 /* DF bit is set when we want to see DF on outgoing frames.
1434 * If ignore_df is set too, we still allow to fragment this frame
1435 * locally. */
1436 pmtudisc = READ_ONCE(inet->pmtudisc);
1437 if (pmtudisc == IP_PMTUDISC_DO ||
1438 pmtudisc == IP_PMTUDISC_PROBE ||
1439 (skb->len <= dst_mtu(&rt->dst) &&
1440 ip_dont_fragment(sk, &rt->dst)))
1441 df = htons(IP_DF);
1442
1443 if (cork->flags & IPCORK_OPT)
1444 opt = cork->opt;
1445
1446 if (cork->ttl != 0)
1447 ttl = cork->ttl;
1448 else if (rt->rt_type == RTN_MULTICAST)
1449 ttl = READ_ONCE(inet->mc_ttl);
1450 else
1451 ttl = ip_select_ttl(inet, &rt->dst);
1452
1453 iph = ip_hdr(skb);
1454 iph->version = 4;
1455 iph->ihl = 5;
1456 iph->tos = (cork->tos != -1) ? cork->tos : READ_ONCE(inet->tos);
1457 iph->frag_off = df;
1458 iph->ttl = ttl;
1459 iph->protocol = sk->sk_protocol;
1460 ip_copy_addrs(iph, fl4);
1461 ip_select_ident(net, skb, sk);
1462
1463 if (opt) {
1464 iph->ihl += opt->optlen >> 2;
1465 ip_options_build(skb, opt, cork->addr, rt);
1466 }
1467
1468 skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
1469 skb->mark = cork->mark;
1470 if (sk_is_tcp(sk))
1471 skb_set_delivery_time(skb, cork->transmit_time, SKB_CLOCK_MONOTONIC);
1472 else
1473 skb_set_delivery_type_by_clockid(skb, cork->transmit_time, sk->sk_clockid);
1474 /*
1475 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1476 * on dst refcount
1477 */
1478 cork->dst = NULL;
1479 skb_dst_set(skb, &rt->dst);
1480
1481 if (iph->protocol == IPPROTO_ICMP) {
1482 u8 icmp_type;
1483
1484 /* For such sockets, transhdrlen is zero when do ip_append_data(),
1485 * so icmphdr does not in skb linear region and can not get icmp_type
1486 * by icmp_hdr(skb)->type.
1487 */
1488 if (sk->sk_type == SOCK_RAW &&
1489 !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
1490 icmp_type = fl4->fl4_icmp_type;
1491 else
1492 icmp_type = icmp_hdr(skb)->type;
1493 icmp_out_count(net, icmp_type);
1494 }
1495
1496 ip_cork_release(cork);
1497out:
1498 return skb;
1499}
1500
1501int ip_send_skb(struct net *net, struct sk_buff *skb)
1502{
1503 int err;
1504
1505 err = ip_local_out(net, skb->sk, skb);
1506 if (err) {
1507 if (err > 0)
1508 err = net_xmit_errno(err);
1509 if (err)
1510 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1511 }
1512
1513 return err;
1514}
1515
1516int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1517{
1518 struct sk_buff *skb;
1519
1520 skb = ip_finish_skb(sk, fl4);
1521 if (!skb)
1522 return 0;
1523
1524 /* Netfilter gets whole the not fragmented skb. */
1525 return ip_send_skb(sock_net(sk), skb);
1526}
1527
1528/*
1529 * Throw away all pending data on the socket.
1530 */
1531static void __ip_flush_pending_frames(struct sock *sk,
1532 struct sk_buff_head *queue,
1533 struct inet_cork *cork)
1534{
1535 struct sk_buff *skb;
1536
1537 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1538 kfree_skb(skb);
1539
1540 ip_cork_release(cork);
1541}
1542
1543void ip_flush_pending_frames(struct sock *sk)
1544{
1545 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1546}
1547
1548struct sk_buff *ip_make_skb(struct sock *sk,
1549 struct flowi4 *fl4,
1550 int getfrag(void *from, char *to, int offset,
1551 int len, int odd, struct sk_buff *skb),
1552 void *from, int length, int transhdrlen,
1553 struct ipcm_cookie *ipc, struct rtable **rtp,
1554 struct inet_cork *cork, unsigned int flags)
1555{
1556 struct sk_buff_head queue;
1557 int err;
1558
1559 if (flags & MSG_PROBE)
1560 return NULL;
1561
1562 __skb_queue_head_init(&queue);
1563
1564 cork->flags = 0;
1565 cork->addr = 0;
1566 cork->opt = NULL;
1567 err = ip_setup_cork(sk, cork, ipc, rtp);
1568 if (err)
1569 return ERR_PTR(err);
1570
1571 err = __ip_append_data(sk, fl4, &queue, cork,
1572 ¤t->task_frag, getfrag,
1573 from, length, transhdrlen, flags);
1574 if (err) {
1575 __ip_flush_pending_frames(sk, &queue, cork);
1576 return ERR_PTR(err);
1577 }
1578
1579 return __ip_make_skb(sk, fl4, &queue, cork);
1580}
1581
1582/*
1583 * Fetch data from kernel space and fill in checksum if needed.
1584 */
1585static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1586 int len, int odd, struct sk_buff *skb)
1587{
1588 __wsum csum;
1589
1590 csum = csum_partial_copy_nocheck(dptr+offset, to, len);
1591 skb->csum = csum_block_add(skb->csum, csum, odd);
1592 return 0;
1593}
1594
1595/*
1596 * Generic function to send a packet as reply to another packet.
1597 * Used to send some TCP resets/acks so far.
1598 */
1599void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
1600 struct sk_buff *skb,
1601 const struct ip_options *sopt,
1602 __be32 daddr, __be32 saddr,
1603 const struct ip_reply_arg *arg,
1604 unsigned int len, u64 transmit_time, u32 txhash)
1605{
1606 struct ip_options_data replyopts;
1607 struct ipcm_cookie ipc;
1608 struct flowi4 fl4;
1609 struct rtable *rt = skb_rtable(skb);
1610 struct net *net = sock_net(sk);
1611 struct sk_buff *nskb;
1612 int err;
1613 int oif;
1614
1615 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1616 return;
1617
1618 ipcm_init(&ipc);
1619 ipc.addr = daddr;
1620 ipc.sockc.transmit_time = transmit_time;
1621
1622 if (replyopts.opt.opt.optlen) {
1623 ipc.opt = &replyopts.opt;
1624
1625 if (replyopts.opt.opt.srr)
1626 daddr = replyopts.opt.opt.faddr;
1627 }
1628
1629 oif = arg->bound_dev_if;
1630 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1631 oif = skb->skb_iif;
1632
1633 flowi4_init_output(&fl4, oif,
1634 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1635 arg->tos & INET_DSCP_MASK,
1636 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1637 ip_reply_arg_flowi_flags(arg),
1638 daddr, saddr,
1639 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1640 arg->uid);
1641 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1642 rt = ip_route_output_flow(net, &fl4, sk);
1643 if (IS_ERR(rt))
1644 return;
1645
1646 inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
1647
1648 sk->sk_protocol = ip_hdr(skb)->protocol;
1649 sk->sk_bound_dev_if = arg->bound_dev_if;
1650 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
1651 ipc.sockc.mark = fl4.flowi4_mark;
1652 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1653 len, 0, &ipc, &rt, MSG_DONTWAIT);
1654 if (unlikely(err)) {
1655 ip_flush_pending_frames(sk);
1656 goto out;
1657 }
1658
1659 nskb = skb_peek(&sk->sk_write_queue);
1660 if (nskb) {
1661 if (arg->csumoffset >= 0)
1662 *((__sum16 *)skb_transport_header(nskb) +
1663 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1664 arg->csum));
1665 nskb->ip_summed = CHECKSUM_NONE;
1666 if (orig_sk)
1667 skb_set_owner_edemux(nskb, (struct sock *)orig_sk);
1668 if (transmit_time)
1669 nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
1670 if (txhash)
1671 skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
1672 ip_push_pending_frames(sk, &fl4);
1673 }
1674out:
1675 ip_rt_put(rt);
1676}
1677
1678void __init ip_init(void)
1679{
1680 ip_rt_init();
1681 inet_initpeers();
1682
1683#if defined(CONFIG_IP_MULTICAST)
1684 igmp_mc_init();
1685#endif
1686}