Loading...
1/*
2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * TCPv6 GSO/GRO support
11 */
12#include <linux/skbuff.h>
13#include <net/protocol.h>
14#include <net/tcp.h>
15#include <net/ip6_checksum.h>
16#include "ip6_offload.h"
17
18static int tcp_v6_gso_send_check(struct sk_buff *skb)
19{
20 const struct ipv6hdr *ipv6h;
21 struct tcphdr *th;
22
23 if (!pskb_may_pull(skb, sizeof(*th)))
24 return -EINVAL;
25
26 ipv6h = ipv6_hdr(skb);
27 th = tcp_hdr(skb);
28
29 th->check = 0;
30 skb->ip_summed = CHECKSUM_PARTIAL;
31 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
32 return 0;
33}
34
35static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
36 struct sk_buff *skb)
37{
38 const struct ipv6hdr *iph = skb_gro_network_header(skb);
39 __wsum wsum;
40
41 /* Don't bother verifying checksum if we're going to flush anyway. */
42 if (NAPI_GRO_CB(skb)->flush)
43 goto skip_csum;
44
45 wsum = NAPI_GRO_CB(skb)->csum;
46
47 switch (skb->ip_summed) {
48 case CHECKSUM_NONE:
49 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
50 wsum);
51
52 /* fall through */
53
54 case CHECKSUM_COMPLETE:
55 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
56 wsum)) {
57 skb->ip_summed = CHECKSUM_UNNECESSARY;
58 break;
59 }
60
61 NAPI_GRO_CB(skb)->flush = 1;
62 return NULL;
63 }
64
65skip_csum:
66 return tcp_gro_receive(head, skb);
67}
68
69static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
70{
71 const struct ipv6hdr *iph = ipv6_hdr(skb);
72 struct tcphdr *th = tcp_hdr(skb);
73
74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
75 &iph->daddr, 0);
76 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
77
78 return tcp_gro_complete(skb);
79}
80
81static const struct net_offload tcpv6_offload = {
82 .callbacks = {
83 .gso_send_check = tcp_v6_gso_send_check,
84 .gso_segment = tcp_gso_segment,
85 .gro_receive = tcp6_gro_receive,
86 .gro_complete = tcp6_gro_complete,
87 },
88};
89
90int __init tcpv6_offload_init(void)
91{
92 return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
93}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * IPV6 GSO/GRO offload support
4 * Linux INET6 implementation
5 *
6 * TCPv6 GSO/GRO support
7 */
8#include <linux/indirect_call_wrapper.h>
9#include <linux/skbuff.h>
10#include <net/inet6_hashtables.h>
11#include <net/gro.h>
12#include <net/protocol.h>
13#include <net/tcp.h>
14#include <net/ip6_checksum.h>
15#include "ip6_offload.h"
16
17static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
18 struct tcphdr *th)
19{
20#if IS_ENABLED(CONFIG_IPV6)
21 const struct ipv6hdr *hdr;
22 struct sk_buff *p;
23 struct sock *sk;
24 struct net *net;
25 int iif, sdif;
26
27 if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
28 return;
29
30 p = tcp_gro_lookup(head, th);
31 if (p) {
32 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
33 return;
34 }
35
36 inet6_get_iif_sdif(skb, &iif, &sdif);
37 hdr = skb_gro_network_header(skb);
38 net = dev_net(skb->dev);
39 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
40 &hdr->saddr, th->source,
41 &hdr->daddr, ntohs(th->dest),
42 iif, sdif);
43 NAPI_GRO_CB(skb)->is_flist = !sk;
44 if (sk)
45 sock_put(sk);
46#endif /* IS_ENABLED(CONFIG_IPV6) */
47}
48
49INDIRECT_CALLABLE_SCOPE
50struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
51{
52 struct tcphdr *th;
53
54 /* Don't bother verifying checksum if we're going to flush anyway. */
55 if (!NAPI_GRO_CB(skb)->flush &&
56 skb_gro_checksum_validate(skb, IPPROTO_TCP,
57 ip6_gro_compute_pseudo))
58 goto flush;
59
60 th = tcp_gro_pull_header(skb);
61 if (!th)
62 goto flush;
63
64 tcp6_check_fraglist_gro(head, skb, th);
65
66 return tcp_gro_receive(head, skb, th);
67
68flush:
69 NAPI_GRO_CB(skb)->flush = 1;
70 return NULL;
71}
72
73INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
74{
75 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
76 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
77 struct tcphdr *th = tcp_hdr(skb);
78
79 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
80 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
81 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
82
83 __skb_incr_checksum_unnecessary(skb);
84
85 return 0;
86 }
87
88 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
89 &iph->daddr, 0);
90 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
91
92 tcp_gro_complete(skb);
93 return 0;
94}
95
96static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
97 __be16 *oldport, __be16 newport)
98{
99 struct tcphdr *th;
100
101 if (*oldport == newport)
102 return;
103
104 th = tcp_hdr(seg);
105 inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
106 *oldport = newport;
107}
108
109static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
110{
111 const struct tcphdr *th;
112 const struct ipv6hdr *iph;
113 struct sk_buff *seg;
114 struct tcphdr *th2;
115 struct ipv6hdr *iph2;
116
117 seg = segs;
118 th = tcp_hdr(seg);
119 iph = ipv6_hdr(seg);
120 th2 = tcp_hdr(seg->next);
121 iph2 = ipv6_hdr(seg->next);
122
123 if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
124 ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
125 ipv6_addr_equal(&iph->daddr, &iph2->daddr))
126 return segs;
127
128 while ((seg = seg->next)) {
129 th2 = tcp_hdr(seg);
130 iph2 = ipv6_hdr(seg);
131
132 iph2->saddr = iph->saddr;
133 iph2->daddr = iph->daddr;
134 __tcpv6_gso_segment_csum(seg, &th2->source, th->source);
135 __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
136 }
137
138 return segs;
139}
140
141static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
142 netdev_features_t features)
143{
144 skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
145 if (IS_ERR(skb))
146 return skb;
147
148 return __tcpv6_gso_segment_list_csum(skb);
149}
150
151static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
152 netdev_features_t features)
153{
154 struct tcphdr *th;
155
156 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
157 return ERR_PTR(-EINVAL);
158
159 if (!pskb_may_pull(skb, sizeof(*th)))
160 return ERR_PTR(-EINVAL);
161
162 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
163 struct tcphdr *th = tcp_hdr(skb);
164
165 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
166 return __tcp6_gso_segment_list(skb, features);
167
168 skb->ip_summed = CHECKSUM_NONE;
169 }
170
171 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
172 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
173 struct tcphdr *th = tcp_hdr(skb);
174
175 /* Set up pseudo header, usually expect stack to have done
176 * this.
177 */
178
179 th->check = 0;
180 skb->ip_summed = CHECKSUM_PARTIAL;
181 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
182 }
183
184 return tcp_gso_segment(skb, features);
185}
186
187int __init tcpv6_offload_init(void)
188{
189 net_hotdata.tcpv6_offload = (struct net_offload) {
190 .callbacks = {
191 .gso_segment = tcp6_gso_segment,
192 .gro_receive = tcp6_gro_receive,
193 .gro_complete = tcp6_gro_complete,
194 },
195 };
196 return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
197}