Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 *	IPV6 GSO/GRO offload support
  3 *	Linux INET6 implementation
  4 *
  5 *	This program is free software; you can redistribute it and/or
  6 *      modify it under the terms of the GNU General Public License
  7 *      as published by the Free Software Foundation; either version
  8 *      2 of the License, or (at your option) any later version.
  9 */
 10
 11#include <linux/kernel.h>
 12#include <linux/socket.h>
 13#include <linux/netdevice.h>
 14#include <linux/skbuff.h>
 15#include <linux/printk.h>
 16
 17#include <net/protocol.h>
 18#include <net/ipv6.h>
 
 19
 20#include "ip6_offload.h"
 21
 22static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
 23{
 24	const struct net_offload *ops = NULL;
 25
 26	for (;;) {
 27		struct ipv6_opt_hdr *opth;
 28		int len;
 29
 30		if (proto != NEXTHDR_HOP) {
 31			ops = rcu_dereference(inet6_offloads[proto]);
 32
 33			if (unlikely(!ops))
 34				break;
 35
 36			if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
 37				break;
 38		}
 39
 40		if (unlikely(!pskb_may_pull(skb, 8)))
 41			break;
 42
 43		opth = (void *)skb->data;
 44		len = ipv6_optlen(opth);
 45
 46		if (unlikely(!pskb_may_pull(skb, len)))
 47			break;
 48
 
 49		proto = opth->nexthdr;
 50		__skb_pull(skb, len);
 51	}
 52
 53	return proto;
 54}
 55
 56static int ipv6_gso_send_check(struct sk_buff *skb)
 57{
 58	const struct ipv6hdr *ipv6h;
 59	const struct net_offload *ops;
 60	int err = -EINVAL;
 61
 62	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
 63		goto out;
 64
 65	ipv6h = ipv6_hdr(skb);
 66	__skb_pull(skb, sizeof(*ipv6h));
 67	err = -EPROTONOSUPPORT;
 68
 69	ops = rcu_dereference(inet6_offloads[
 70		ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
 71
 72	if (likely(ops && ops->callbacks.gso_send_check)) {
 73		skb_reset_transport_header(skb);
 74		err = ops->callbacks.gso_send_check(skb);
 75	}
 76
 77out:
 78	return err;
 79}
 80
 81static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 82	netdev_features_t features)
 83{
 84	struct sk_buff *segs = ERR_PTR(-EINVAL);
 85	struct ipv6hdr *ipv6h;
 86	const struct net_offload *ops;
 87	int proto;
 88	struct frag_hdr *fptr;
 89	unsigned int unfrag_ip6hlen;
 90	u8 *prevhdr;
 91	int offset = 0;
 92	bool encap, udpfrag;
 93	int nhoff;
 94
 95	if (unlikely(skb_shinfo(skb)->gso_type &
 96		     ~(SKB_GSO_UDP |
 97		       SKB_GSO_DODGY |
 98		       SKB_GSO_TCP_ECN |
 99		       SKB_GSO_GRE |
100		       SKB_GSO_IPIP |
101		       SKB_GSO_SIT |
102		       SKB_GSO_UDP_TUNNEL |
103		       SKB_GSO_MPLS |
104		       SKB_GSO_TCPV6 |
105		       0)))
106		goto out;
107
108	skb_reset_network_header(skb);
109	nhoff = skb_network_header(skb) - skb_mac_header(skb);
110	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
111		goto out;
112
113	encap = SKB_GSO_CB(skb)->encap_level > 0;
114	if (encap)
115		features = skb->dev->hw_enc_features & netif_skb_features(skb);
116	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
117
118	ipv6h = ipv6_hdr(skb);
119	__skb_pull(skb, sizeof(*ipv6h));
120	segs = ERR_PTR(-EPROTONOSUPPORT);
121
122	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
123
124	if (skb->encapsulation &&
125	    skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
126		udpfrag = proto == IPPROTO_UDP && encap;
127	else
128		udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
129
130	ops = rcu_dereference(inet6_offloads[proto]);
131	if (likely(ops && ops->callbacks.gso_segment)) {
132		skb_reset_transport_header(skb);
133		segs = ops->callbacks.gso_segment(skb, features);
134	}
135
136	if (IS_ERR(segs))
137		goto out;
138
 
 
139	for (skb = segs; skb; skb = skb->next) {
140		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
141		ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
 
 
 
 
 
 
142		skb->network_header = (u8 *)ipv6h - skb->head;
143
144		if (udpfrag) {
145			unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
146			fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
 
 
 
 
147			fptr->frag_off = htons(offset);
148			if (skb->next != NULL)
149				fptr->frag_off |= htons(IP6_MF);
150			offset += (ntohs(ipv6h->payload_len) -
151				   sizeof(struct frag_hdr));
152		}
153		if (encap)
154			skb_reset_inner_headers(skb);
155	}
156
157out:
158	return segs;
159}
160
161/* Return the total length of all the extension hdrs, following the same
162 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
163 */
164static int ipv6_exthdrs_len(struct ipv6hdr *iph,
165			    const struct net_offload **opps)
166{
167	struct ipv6_opt_hdr *opth = (void *)iph;
168	int len = 0, proto, optlen = sizeof(*iph);
169
170	proto = iph->nexthdr;
171	for (;;) {
172		if (proto != NEXTHDR_HOP) {
173			*opps = rcu_dereference(inet6_offloads[proto]);
174			if (unlikely(!(*opps)))
175				break;
176			if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
177				break;
178		}
179		opth = (void *)opth + optlen;
180		optlen = ipv6_optlen(opth);
181		len += optlen;
182		proto = opth->nexthdr;
183	}
184	return len;
185}
186
187static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
188					 struct sk_buff *skb)
189{
190	const struct net_offload *ops;
191	struct sk_buff **pp = NULL;
192	struct sk_buff *p;
193	struct ipv6hdr *iph;
194	unsigned int nlen;
195	unsigned int hlen;
196	unsigned int off;
197	u16 flush = 1;
198	int proto;
199
200	off = skb_gro_offset(skb);
201	hlen = off + sizeof(*iph);
202	iph = skb_gro_header_fast(skb, off);
203	if (skb_gro_header_hard(skb, hlen)) {
204		iph = skb_gro_header_slow(skb, hlen, off);
205		if (unlikely(!iph))
206			goto out;
207	}
208
209	skb_set_network_header(skb, off);
210	skb_gro_pull(skb, sizeof(*iph));
211	skb_set_transport_header(skb, skb_gro_offset(skb));
212
213	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
214
215	rcu_read_lock();
216	proto = iph->nexthdr;
217	ops = rcu_dereference(inet6_offloads[proto]);
218	if (!ops || !ops->callbacks.gro_receive) {
219		__pskb_pull(skb, skb_gro_offset(skb));
 
220		proto = ipv6_gso_pull_exthdrs(skb, proto);
221		skb_gro_pull(skb, -skb_transport_offset(skb));
222		skb_reset_transport_header(skb);
223		__skb_push(skb, skb_gro_offset(skb));
224
225		ops = rcu_dereference(inet6_offloads[proto]);
226		if (!ops || !ops->callbacks.gro_receive)
227			goto out_unlock;
228
229		iph = ipv6_hdr(skb);
230	}
231
232	NAPI_GRO_CB(skb)->proto = proto;
233
234	flush--;
235	nlen = skb_network_header_len(skb);
236
237	for (p = *head; p; p = p->next) {
238		const struct ipv6hdr *iph2;
239		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
240
241		if (!NAPI_GRO_CB(p)->same_flow)
242			continue;
243
244		iph2 = (struct ipv6hdr *)(p->data + off);
245		first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
246
247		/* All fields must match except length and Traffic Class.
248		 * XXX skbs on the gro_list have all been parsed and pulled
249		 * already so we don't need to compare nlen
250		 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
251		 * memcmp() alone below is suffcient, right?
252		 */
253		 if ((first_word & htonl(0xF00FFFFF)) ||
254		    memcmp(&iph->nexthdr, &iph2->nexthdr,
255			   nlen - offsetof(struct ipv6hdr, nexthdr))) {
256			NAPI_GRO_CB(p)->same_flow = 0;
257			continue;
258		}
259		/* flush if Traffic Class fields are different */
260		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
261		NAPI_GRO_CB(p)->flush |= flush;
 
 
 
 
 
 
262	}
263
 
264	NAPI_GRO_CB(skb)->flush |= flush;
265
266	skb_gro_postpull_rcsum(skb, iph, nlen);
267
268	pp = ops->callbacks.gro_receive(head, skb);
269
270out_unlock:
271	rcu_read_unlock();
272
273out:
274	NAPI_GRO_CB(skb)->flush |= flush;
275
276	return pp;
277}
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
280{
281	const struct net_offload *ops;
282	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
283	int err = -ENOSYS;
284
 
 
 
 
 
285	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
286
287	rcu_read_lock();
288
289	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
290	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
291		goto out_unlock;
292
293	err = ops->callbacks.gro_complete(skb, nhoff);
294
295out_unlock:
296	rcu_read_unlock();
297
298	return err;
299}
300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301static struct packet_offload ipv6_packet_offload __read_mostly = {
302	.type = cpu_to_be16(ETH_P_IPV6),
303	.callbacks = {
304		.gso_send_check = ipv6_gso_send_check,
305		.gso_segment = ipv6_gso_segment,
306		.gro_receive = ipv6_gro_receive,
307		.gro_complete = ipv6_gro_complete,
308	},
309};
310
311static const struct net_offload sit_offload = {
312	.callbacks = {
313		.gso_send_check = ipv6_gso_send_check,
314		.gso_segment	= ipv6_gso_segment,
 
 
 
 
 
 
 
 
 
 
315	},
316};
317
 
 
 
 
 
 
 
318static int __init ipv6_offload_init(void)
319{
320
321	if (tcpv6_offload_init() < 0)
322		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
323	if (udp_offload_init() < 0)
324		pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
325	if (ipv6_exthdrs_offload_init() < 0)
326		pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
327
328	dev_add_offload(&ipv6_packet_offload);
329
330	inet_add_offload(&sit_offload, IPPROTO_IPV6);
 
 
331
332	return 0;
333}
334
335fs_initcall(ipv6_offload_init);
v4.17
  1/*
  2 *	IPV6 GSO/GRO offload support
  3 *	Linux INET6 implementation
  4 *
  5 *	This program is free software; you can redistribute it and/or
  6 *      modify it under the terms of the GNU General Public License
  7 *      as published by the Free Software Foundation; either version
  8 *      2 of the License, or (at your option) any later version.
  9 */
 10
 11#include <linux/kernel.h>
 12#include <linux/socket.h>
 13#include <linux/netdevice.h>
 14#include <linux/skbuff.h>
 15#include <linux/printk.h>
 16
 17#include <net/protocol.h>
 18#include <net/ipv6.h>
 19#include <net/inet_common.h>
 20
 21#include "ip6_offload.h"
 22
 23static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
 24{
 25	const struct net_offload *ops = NULL;
 26
 27	for (;;) {
 28		struct ipv6_opt_hdr *opth;
 29		int len;
 30
 31		if (proto != NEXTHDR_HOP) {
 32			ops = rcu_dereference(inet6_offloads[proto]);
 33
 34			if (unlikely(!ops))
 35				break;
 36
 37			if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
 38				break;
 39		}
 40
 41		if (unlikely(!pskb_may_pull(skb, 8)))
 42			break;
 43
 44		opth = (void *)skb->data;
 45		len = ipv6_optlen(opth);
 46
 47		if (unlikely(!pskb_may_pull(skb, len)))
 48			break;
 49
 50		opth = (void *)skb->data;
 51		proto = opth->nexthdr;
 52		__skb_pull(skb, len);
 53	}
 54
 55	return proto;
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 59	netdev_features_t features)
 60{
 61	struct sk_buff *segs = ERR_PTR(-EINVAL);
 62	struct ipv6hdr *ipv6h;
 63	const struct net_offload *ops;
 64	int proto;
 65	struct frag_hdr *fptr;
 66	unsigned int payload_len;
 67	u8 *prevhdr;
 68	int offset = 0;
 69	bool encap, udpfrag;
 70	int nhoff;
 71	bool gso_partial;
 
 
 
 
 
 
 
 
 
 
 
 
 72
 73	skb_reset_network_header(skb);
 74	nhoff = skb_network_header(skb) - skb_mac_header(skb);
 75	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
 76		goto out;
 77
 78	encap = SKB_GSO_CB(skb)->encap_level > 0;
 79	if (encap)
 80		features &= skb->dev->hw_enc_features;
 81	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
 82
 83	ipv6h = ipv6_hdr(skb);
 84	__skb_pull(skb, sizeof(*ipv6h));
 85	segs = ERR_PTR(-EPROTONOSUPPORT);
 86
 87	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
 88
 89	if (skb->encapsulation &&
 90	    skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
 91		udpfrag = proto == IPPROTO_UDP && encap;
 92	else
 93		udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
 94
 95	ops = rcu_dereference(inet6_offloads[proto]);
 96	if (likely(ops && ops->callbacks.gso_segment)) {
 97		skb_reset_transport_header(skb);
 98		segs = ops->callbacks.gso_segment(skb, features);
 99	}
100
101	if (IS_ERR_OR_NULL(segs))
102		goto out;
103
104	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
105
106	for (skb = segs; skb; skb = skb->next) {
107		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
108		if (gso_partial && skb_is_gso(skb))
109			payload_len = skb_shinfo(skb)->gso_size +
110				      SKB_GSO_CB(skb)->data_offset +
111				      skb->head - (unsigned char *)(ipv6h + 1);
112		else
113			payload_len = skb->len - nhoff - sizeof(*ipv6h);
114		ipv6h->payload_len = htons(payload_len);
115		skb->network_header = (u8 *)ipv6h - skb->head;
116
117		if (udpfrag) {
118			int err = ip6_find_1stfragopt(skb, &prevhdr);
119			if (err < 0) {
120				kfree_skb_list(segs);
121				return ERR_PTR(err);
122			}
123			fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
124			fptr->frag_off = htons(offset);
125			if (skb->next)
126				fptr->frag_off |= htons(IP6_MF);
127			offset += (ntohs(ipv6h->payload_len) -
128				   sizeof(struct frag_hdr));
129		}
130		if (encap)
131			skb_reset_inner_headers(skb);
132	}
133
134out:
135	return segs;
136}
137
138/* Return the total length of all the extension hdrs, following the same
139 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
140 */
141static int ipv6_exthdrs_len(struct ipv6hdr *iph,
142			    const struct net_offload **opps)
143{
144	struct ipv6_opt_hdr *opth = (void *)iph;
145	int len = 0, proto, optlen = sizeof(*iph);
146
147	proto = iph->nexthdr;
148	for (;;) {
149		if (proto != NEXTHDR_HOP) {
150			*opps = rcu_dereference(inet6_offloads[proto]);
151			if (unlikely(!(*opps)))
152				break;
153			if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
154				break;
155		}
156		opth = (void *)opth + optlen;
157		optlen = ipv6_optlen(opth);
158		len += optlen;
159		proto = opth->nexthdr;
160	}
161	return len;
162}
163
164static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
165					 struct sk_buff *skb)
166{
167	const struct net_offload *ops;
168	struct sk_buff **pp = NULL;
169	struct sk_buff *p;
170	struct ipv6hdr *iph;
171	unsigned int nlen;
172	unsigned int hlen;
173	unsigned int off;
174	u16 flush = 1;
175	int proto;
176
177	off = skb_gro_offset(skb);
178	hlen = off + sizeof(*iph);
179	iph = skb_gro_header_fast(skb, off);
180	if (skb_gro_header_hard(skb, hlen)) {
181		iph = skb_gro_header_slow(skb, hlen, off);
182		if (unlikely(!iph))
183			goto out;
184	}
185
186	skb_set_network_header(skb, off);
187	skb_gro_pull(skb, sizeof(*iph));
188	skb_set_transport_header(skb, skb_gro_offset(skb));
189
190	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
191
192	rcu_read_lock();
193	proto = iph->nexthdr;
194	ops = rcu_dereference(inet6_offloads[proto]);
195	if (!ops || !ops->callbacks.gro_receive) {
196		__pskb_pull(skb, skb_gro_offset(skb));
197		skb_gro_frag0_invalidate(skb);
198		proto = ipv6_gso_pull_exthdrs(skb, proto);
199		skb_gro_pull(skb, -skb_transport_offset(skb));
200		skb_reset_transport_header(skb);
201		__skb_push(skb, skb_gro_offset(skb));
202
203		ops = rcu_dereference(inet6_offloads[proto]);
204		if (!ops || !ops->callbacks.gro_receive)
205			goto out_unlock;
206
207		iph = ipv6_hdr(skb);
208	}
209
210	NAPI_GRO_CB(skb)->proto = proto;
211
212	flush--;
213	nlen = skb_network_header_len(skb);
214
215	for (p = *head; p; p = p->next) {
216		const struct ipv6hdr *iph2;
217		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
218
219		if (!NAPI_GRO_CB(p)->same_flow)
220			continue;
221
222		iph2 = (struct ipv6hdr *)(p->data + off);
223		first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
224
225		/* All fields must match except length and Traffic Class.
226		 * XXX skbs on the gro_list have all been parsed and pulled
227		 * already so we don't need to compare nlen
228		 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
229		 * memcmp() alone below is suffcient, right?
230		 */
231		 if ((first_word & htonl(0xF00FFFFF)) ||
232		    memcmp(&iph->nexthdr, &iph2->nexthdr,
233			   nlen - offsetof(struct ipv6hdr, nexthdr))) {
234			NAPI_GRO_CB(p)->same_flow = 0;
235			continue;
236		}
237		/* flush if Traffic Class fields are different */
238		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
239		NAPI_GRO_CB(p)->flush |= flush;
240
241		/* If the previous IP ID value was based on an atomic
242		 * datagram we can overwrite the value and ignore it.
243		 */
244		if (NAPI_GRO_CB(skb)->is_atomic)
245			NAPI_GRO_CB(p)->flush_id = 0;
246	}
247
248	NAPI_GRO_CB(skb)->is_atomic = true;
249	NAPI_GRO_CB(skb)->flush |= flush;
250
251	skb_gro_postpull_rcsum(skb, iph, nlen);
252
253	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
254
255out_unlock:
256	rcu_read_unlock();
257
258out:
259	skb_gro_flush_final(skb, pp, flush);
260
261	return pp;
262}
263
264static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head,
265					       struct sk_buff *skb)
266{
267	/* Common GRO receive for SIT and IP6IP6 */
268
269	if (NAPI_GRO_CB(skb)->encap_mark) {
270		NAPI_GRO_CB(skb)->flush = 1;
271		return NULL;
272	}
273
274	NAPI_GRO_CB(skb)->encap_mark = 1;
275
276	return ipv6_gro_receive(head, skb);
277}
278
279static struct sk_buff **ip4ip6_gro_receive(struct sk_buff **head,
280					   struct sk_buff *skb)
281{
282	/* Common GRO receive for SIT and IP6IP6 */
283
284	if (NAPI_GRO_CB(skb)->encap_mark) {
285		NAPI_GRO_CB(skb)->flush = 1;
286		return NULL;
287	}
288
289	NAPI_GRO_CB(skb)->encap_mark = 1;
290
291	return inet_gro_receive(head, skb);
292}
293
294static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
295{
296	const struct net_offload *ops;
297	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
298	int err = -ENOSYS;
299
300	if (skb->encapsulation) {
301		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
302		skb_set_inner_network_header(skb, nhoff);
303	}
304
305	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
306
307	rcu_read_lock();
308
309	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
310	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
311		goto out_unlock;
312
313	err = ops->callbacks.gro_complete(skb, nhoff);
314
315out_unlock:
316	rcu_read_unlock();
317
318	return err;
319}
320
321static int sit_gro_complete(struct sk_buff *skb, int nhoff)
322{
323	skb->encapsulation = 1;
324	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
325	return ipv6_gro_complete(skb, nhoff);
326}
327
328static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
329{
330	skb->encapsulation = 1;
331	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
332	return ipv6_gro_complete(skb, nhoff);
333}
334
335static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
336{
337	skb->encapsulation = 1;
338	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
339	return inet_gro_complete(skb, nhoff);
340}
341
342static struct packet_offload ipv6_packet_offload __read_mostly = {
343	.type = cpu_to_be16(ETH_P_IPV6),
344	.callbacks = {
 
345		.gso_segment = ipv6_gso_segment,
346		.gro_receive = ipv6_gro_receive,
347		.gro_complete = ipv6_gro_complete,
348	},
349};
350
351static const struct net_offload sit_offload = {
352	.callbacks = {
 
353		.gso_segment	= ipv6_gso_segment,
354		.gro_receive    = sit_ip6ip6_gro_receive,
355		.gro_complete   = sit_gro_complete,
356	},
357};
358
359static const struct net_offload ip4ip6_offload = {
360	.callbacks = {
361		.gso_segment	= inet_gso_segment,
362		.gro_receive    = ip4ip6_gro_receive,
363		.gro_complete   = ip4ip6_gro_complete,
364	},
365};
366
367static const struct net_offload ip6ip6_offload = {
368	.callbacks = {
369		.gso_segment	= ipv6_gso_segment,
370		.gro_receive    = sit_ip6ip6_gro_receive,
371		.gro_complete   = ip6ip6_gro_complete,
372	},
373};
374static int __init ipv6_offload_init(void)
375{
376
377	if (tcpv6_offload_init() < 0)
378		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
 
 
379	if (ipv6_exthdrs_offload_init() < 0)
380		pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
381
382	dev_add_offload(&ipv6_packet_offload);
383
384	inet_add_offload(&sit_offload, IPPROTO_IPV6);
385	inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
386	inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
387
388	return 0;
389}
390
391fs_initcall(ipv6_offload_init);