Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * xfrm_device.c - IPsec device offloading code.
  4 *
  5 * Copyright (c) 2015 secunet Security Networks AG
  6 *
  7 * Author:
  8 * Steffen Klassert <steffen.klassert@secunet.com>
  9 */
 10
 11#include <linux/errno.h>
 12#include <linux/module.h>
 13#include <linux/netdevice.h>
 14#include <linux/skbuff.h>
 15#include <linux/slab.h>
 16#include <linux/spinlock.h>
 17#include <net/dst.h>
 18#include <net/gso.h>
 19#include <net/xfrm.h>
 20#include <linux/notifier.h>
 21
 22#ifdef CONFIG_XFRM_OFFLOAD
 23static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
 24				  unsigned int hsize)
 25{
 26	struct xfrm_offload *xo = xfrm_offload(skb);
 27
 28	skb_reset_mac_len(skb);
 29	if (xo->flags & XFRM_GSO_SEGMENT)
 30		skb->transport_header -= x->props.header_len;
 31
 32	pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
 33}
 34
 35static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
 36				    unsigned int hsize)
 37
 38{
 39	struct xfrm_offload *xo = xfrm_offload(skb);
 40
 41	if (xo->flags & XFRM_GSO_SEGMENT)
 42		skb->transport_header = skb->network_header + hsize;
 43
 44	skb_reset_mac_len(skb);
 45	pskb_pull(skb, skb->mac_len + x->props.header_len);
 46}
 47
 48static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
 49				  unsigned int hsize)
 50{
 51	struct xfrm_offload *xo = xfrm_offload(skb);
 52	int phlen = 0;
 53
 54	if (xo->flags & XFRM_GSO_SEGMENT)
 55		skb->transport_header = skb->network_header + hsize;
 56
 57	skb_reset_mac_len(skb);
 58	if (x->sel.family != AF_INET6) {
 59		phlen = IPV4_BEET_PHMAXLEN;
 60		if (x->outer_mode.family == AF_INET6)
 61			phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
 62	}
 63
 64	pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
 65}
 66
 67/* Adjust pointers into the packet when IPsec is done at layer2 */
 68static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
 69{
 70	switch (x->outer_mode.encap) {
 71	case XFRM_MODE_TUNNEL:
 72		if (x->outer_mode.family == AF_INET)
 73			return __xfrm_mode_tunnel_prep(x, skb,
 74						       sizeof(struct iphdr));
 75		if (x->outer_mode.family == AF_INET6)
 76			return __xfrm_mode_tunnel_prep(x, skb,
 77						       sizeof(struct ipv6hdr));
 78		break;
 79	case XFRM_MODE_TRANSPORT:
 80		if (x->outer_mode.family == AF_INET)
 81			return __xfrm_transport_prep(x, skb,
 82						     sizeof(struct iphdr));
 83		if (x->outer_mode.family == AF_INET6)
 84			return __xfrm_transport_prep(x, skb,
 85						     sizeof(struct ipv6hdr));
 86		break;
 87	case XFRM_MODE_BEET:
 88		if (x->outer_mode.family == AF_INET)
 89			return __xfrm_mode_beet_prep(x, skb,
 90						     sizeof(struct iphdr));
 91		if (x->outer_mode.family == AF_INET6)
 92			return __xfrm_mode_beet_prep(x, skb,
 93						     sizeof(struct ipv6hdr));
 94		break;
 95	case XFRM_MODE_ROUTEOPTIMIZATION:
 96	case XFRM_MODE_IN_TRIGGER:
 97		break;
 98	}
 99}
100
101static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
102{
103	struct xfrm_offload *xo = xfrm_offload(skb);
104	__u32 seq = xo->seq.low;
105
106	seq += skb_shinfo(skb)->gso_segs;
107	if (unlikely(seq < xo->seq.low))
108		return true;
109
110	return false;
111}
112
113struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
114{
115	int err;
116	unsigned long flags;
117	struct xfrm_state *x;
118	struct softnet_data *sd;
119	struct sk_buff *skb2, *nskb, *pskb = NULL;
120	netdev_features_t esp_features = features;
121	struct xfrm_offload *xo = xfrm_offload(skb);
122	struct net_device *dev = skb->dev;
123	struct sec_path *sp;
124
125	if (!xo || (xo->flags & XFRM_XMIT))
126		return skb;
127
128	if (!(features & NETIF_F_HW_ESP))
129		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
130
131	sp = skb_sec_path(skb);
132	x = sp->xvec[sp->len - 1];
133	if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
134		return skb;
135
136	/* The packet was sent to HW IPsec packet offload engine,
137	 * but to wrong device. Drop the packet, so it won't skip
138	 * XFRM stack.
139	 */
140	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
141		kfree_skb(skb);
142		dev_core_stats_tx_dropped_inc(dev);
143		return NULL;
144	}
145
146	/* This skb was already validated on the upper/virtual dev */
147	if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
148		return skb;
149
150	local_irq_save(flags);
151	sd = this_cpu_ptr(&softnet_data);
152	err = !skb_queue_empty(&sd->xfrm_backlog);
153	local_irq_restore(flags);
154
155	if (err) {
156		*again = true;
157		return skb;
158	}
159
160	if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
161				unlikely(xmit_xfrm_check_overflow(skb)))) {
162		struct sk_buff *segs;
163
164		/* Packet got rerouted, fixup features and segment it. */
165		esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
166
167		segs = skb_gso_segment(skb, esp_features);
168		if (IS_ERR(segs)) {
169			kfree_skb(skb);
170			dev_core_stats_tx_dropped_inc(dev);
171			return NULL;
172		} else {
173			consume_skb(skb);
174			skb = segs;
175		}
176	}
177
178	if (!skb->next) {
179		esp_features |= skb->dev->gso_partial_features;
180		xfrm_outer_mode_prep(x, skb);
181
182		xo->flags |= XFRM_DEV_RESUME;
183
184		err = x->type_offload->xmit(x, skb, esp_features);
185		if (err) {
186			if (err == -EINPROGRESS)
187				return NULL;
188
189			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
190			kfree_skb(skb);
191			return NULL;
192		}
193
194		skb_push(skb, skb->data - skb_mac_header(skb));
195
196		return skb;
197	}
198
199	skb_list_walk_safe(skb, skb2, nskb) {
200		esp_features |= skb->dev->gso_partial_features;
201		skb_mark_not_on_list(skb2);
202
203		xo = xfrm_offload(skb2);
204		xo->flags |= XFRM_DEV_RESUME;
205
206		xfrm_outer_mode_prep(x, skb2);
207
208		err = x->type_offload->xmit(x, skb2, esp_features);
209		if (!err) {
210			skb2->next = nskb;
211		} else if (err != -EINPROGRESS) {
212			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
213			skb2->next = nskb;
214			kfree_skb_list(skb2);
215			return NULL;
216		} else {
217			if (skb == skb2)
218				skb = nskb;
219			else
220				pskb->next = nskb;
221
222			continue;
223		}
224
225		skb_push(skb2, skb2->data - skb_mac_header(skb2));
226		pskb = skb2;
227	}
228
229	return skb;
230}
231EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
232
233int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
234		       struct xfrm_user_offload *xuo,
235		       struct netlink_ext_ack *extack)
236{
237	int err;
238	struct dst_entry *dst;
239	struct net_device *dev;
240	struct xfrm_dev_offload *xso = &x->xso;
241	xfrm_address_t *saddr;
242	xfrm_address_t *daddr;
243	bool is_packet_offload;
244
245	if (!x->type_offload) {
246		NL_SET_ERR_MSG(extack, "Type doesn't support offload");
247		return -EINVAL;
248	}
249
250	if (xuo->flags &
251	    ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) {
252		NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
253		return -EINVAL;
254	}
255
256	if ((xuo->flags & XFRM_OFFLOAD_INBOUND && x->dir == XFRM_SA_DIR_OUT) ||
257	    (!(xuo->flags & XFRM_OFFLOAD_INBOUND) && x->dir == XFRM_SA_DIR_IN)) {
258		NL_SET_ERR_MSG(extack, "Mismatched SA and offload direction");
259		return -EINVAL;
260	}
261
262	is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
263
264	/* We don't yet support TFC padding. */
265	if (x->tfcpad) {
266		NL_SET_ERR_MSG(extack, "TFC padding can't be offloaded");
267		return -EINVAL;
268	}
269
270	dev = dev_get_by_index(net, xuo->ifindex);
271	if (!dev) {
272		struct xfrm_dst_lookup_params params;
273
274		if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
275			saddr = &x->props.saddr;
276			daddr = &x->id.daddr;
277		} else {
278			saddr = &x->id.daddr;
279			daddr = &x->props.saddr;
280		}
281
282		memset(&params, 0, sizeof(params));
283		params.net = net;
284		params.saddr = saddr;
285		params.daddr = daddr;
286		params.mark = xfrm_smark_get(0, x);
287		dst = __xfrm_dst_lookup(x->props.family, &params);
288		if (IS_ERR(dst))
289			return (is_packet_offload) ? -EINVAL : 0;
290
291		dev = dst->dev;
292
293		dev_hold(dev);
294		dst_release(dst);
295	}
296
297	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
298		xso->dev = NULL;
299		dev_put(dev);
300		return (is_packet_offload) ? -EINVAL : 0;
301	}
302
303	if (!is_packet_offload && x->props.flags & XFRM_STATE_ESN &&
304	    !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
305		NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
306		xso->dev = NULL;
307		dev_put(dev);
308		return -EINVAL;
309	}
310
311	xso->dev = dev;
312	netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
313	xso->real_dev = dev;
314
315	if (xuo->flags & XFRM_OFFLOAD_INBOUND)
316		xso->dir = XFRM_DEV_OFFLOAD_IN;
317	else
318		xso->dir = XFRM_DEV_OFFLOAD_OUT;
319
320	if (is_packet_offload)
321		xso->type = XFRM_DEV_OFFLOAD_PACKET;
322	else
323		xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
324
325	err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack);
326	if (err) {
327		xso->dev = NULL;
328		xso->dir = 0;
329		xso->real_dev = NULL;
330		netdev_put(dev, &xso->dev_tracker);
331		xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
332
333		/* User explicitly requested packet offload mode and configured
334		 * policy in addition to the XFRM state. So be civil to users,
335		 * and return an error instead of taking fallback path.
 
 
 
336		 */
337		if ((err != -EOPNOTSUPP && !is_packet_offload) || is_packet_offload) {
338			NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state");
 
339			return err;
340		}
341	}
342
343	return 0;
344}
345EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
346
347int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
348			struct xfrm_user_offload *xuo, u8 dir,
349			struct netlink_ext_ack *extack)
350{
351	struct xfrm_dev_offload *xdo = &xp->xdo;
352	struct net_device *dev;
353	int err;
354
355	if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
356		/* We support only packet offload mode and it means
357		 * that user must set XFRM_OFFLOAD_PACKET bit.
358		 */
359		NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
360		return -EINVAL;
361	}
362
363	dev = dev_get_by_index(net, xuo->ifindex);
364	if (!dev)
365		return -EINVAL;
366
367	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
368		xdo->dev = NULL;
369		dev_put(dev);
370		NL_SET_ERR_MSG(extack, "Policy offload is not supported");
371		return -EINVAL;
372	}
373
374	xdo->dev = dev;
375	netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
376	xdo->real_dev = dev;
377	xdo->type = XFRM_DEV_OFFLOAD_PACKET;
378	switch (dir) {
379	case XFRM_POLICY_IN:
380		xdo->dir = XFRM_DEV_OFFLOAD_IN;
381		break;
382	case XFRM_POLICY_OUT:
383		xdo->dir = XFRM_DEV_OFFLOAD_OUT;
384		break;
385	case XFRM_POLICY_FWD:
386		xdo->dir = XFRM_DEV_OFFLOAD_FWD;
387		break;
388	default:
389		xdo->dev = NULL;
390		netdev_put(dev, &xdo->dev_tracker);
391		NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
392		return -EINVAL;
393	}
394
395	err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack);
396	if (err) {
397		xdo->dev = NULL;
398		xdo->real_dev = NULL;
399		xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
400		xdo->dir = 0;
401		netdev_put(dev, &xdo->dev_tracker);
402		NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this policy");
403		return err;
404	}
405
406	return 0;
407}
408EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
409
410bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
411{
412	int mtu;
413	struct dst_entry *dst = skb_dst(skb);
414	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
415	struct net_device *dev = x->xso.dev;
416
417	if (!x->type_offload ||
418	    (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
419		return false;
420
421	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
422	    ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
423	     !xdst->child->xfrm)) {
424		mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
425		if (skb->len <= mtu)
426			goto ok;
427
428		if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
429			goto ok;
430	}
431
432	return false;
433
434ok:
435	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
436		return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
437
438	return true;
439}
440EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
441
442void xfrm_dev_resume(struct sk_buff *skb)
443{
444	struct net_device *dev = skb->dev;
445	int ret = NETDEV_TX_BUSY;
446	struct netdev_queue *txq;
447	struct softnet_data *sd;
448	unsigned long flags;
449
450	rcu_read_lock();
451	txq = netdev_core_pick_tx(dev, skb, NULL);
452
453	HARD_TX_LOCK(dev, txq, smp_processor_id());
454	if (!netif_xmit_frozen_or_stopped(txq))
455		skb = dev_hard_start_xmit(skb, dev, txq, &ret);
456	HARD_TX_UNLOCK(dev, txq);
457
458	if (!dev_xmit_complete(ret)) {
459		local_irq_save(flags);
460		sd = this_cpu_ptr(&softnet_data);
461		skb_queue_tail(&sd->xfrm_backlog, skb);
462		raise_softirq_irqoff(NET_TX_SOFTIRQ);
463		local_irq_restore(flags);
464	}
465	rcu_read_unlock();
466}
467EXPORT_SYMBOL_GPL(xfrm_dev_resume);
468
469void xfrm_dev_backlog(struct softnet_data *sd)
470{
471	struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
472	struct sk_buff_head list;
473	struct sk_buff *skb;
474
475	if (skb_queue_empty(xfrm_backlog))
476		return;
477
478	__skb_queue_head_init(&list);
479
480	spin_lock(&xfrm_backlog->lock);
481	skb_queue_splice_init(xfrm_backlog, &list);
482	spin_unlock(&xfrm_backlog->lock);
483
484	while (!skb_queue_empty(&list)) {
485		skb = __skb_dequeue(&list);
486		xfrm_dev_resume(skb);
487	}
488
489}
490#endif
491
492static int xfrm_api_check(struct net_device *dev)
493{
494#ifdef CONFIG_XFRM_OFFLOAD
495	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
496	    !(dev->features & NETIF_F_HW_ESP))
497		return NOTIFY_BAD;
498
499	if ((dev->features & NETIF_F_HW_ESP) &&
500	    (!(dev->xfrmdev_ops &&
501	       dev->xfrmdev_ops->xdo_dev_state_add &&
502	       dev->xfrmdev_ops->xdo_dev_state_delete)))
503		return NOTIFY_BAD;
504#else
505	if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
506		return NOTIFY_BAD;
507#endif
508
509	return NOTIFY_DONE;
510}
511
512static int xfrm_dev_down(struct net_device *dev)
513{
514	if (dev->features & NETIF_F_HW_ESP) {
515		xfrm_dev_state_flush(dev_net(dev), dev, true);
516		xfrm_dev_policy_flush(dev_net(dev), dev, true);
517	}
518
519	return NOTIFY_DONE;
520}
521
522static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
523{
524	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
525
526	switch (event) {
527	case NETDEV_REGISTER:
528		return xfrm_api_check(dev);
529
530	case NETDEV_FEAT_CHANGE:
531		return xfrm_api_check(dev);
532
533	case NETDEV_DOWN:
534	case NETDEV_UNREGISTER:
535		return xfrm_dev_down(dev);
536	}
537	return NOTIFY_DONE;
538}
539
540static struct notifier_block xfrm_dev_notifier = {
541	.notifier_call	= xfrm_dev_event,
542};
543
544void __init xfrm_dev_init(void)
545{
546	register_netdevice_notifier(&xfrm_dev_notifier);
547}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * xfrm_device.c - IPsec device offloading code.
  4 *
  5 * Copyright (c) 2015 secunet Security Networks AG
  6 *
  7 * Author:
  8 * Steffen Klassert <steffen.klassert@secunet.com>
  9 */
 10
 11#include <linux/errno.h>
 12#include <linux/module.h>
 13#include <linux/netdevice.h>
 14#include <linux/skbuff.h>
 15#include <linux/slab.h>
 16#include <linux/spinlock.h>
 17#include <net/dst.h>
 
 18#include <net/xfrm.h>
 19#include <linux/notifier.h>
 20
 21#ifdef CONFIG_XFRM_OFFLOAD
 22static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
 23				  unsigned int hsize)
 24{
 25	struct xfrm_offload *xo = xfrm_offload(skb);
 26
 27	skb_reset_mac_len(skb);
 28	if (xo->flags & XFRM_GSO_SEGMENT)
 29		skb->transport_header -= x->props.header_len;
 30
 31	pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
 32}
 33
 34static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
 35				    unsigned int hsize)
 36
 37{
 38	struct xfrm_offload *xo = xfrm_offload(skb);
 39
 40	if (xo->flags & XFRM_GSO_SEGMENT)
 41		skb->transport_header = skb->network_header + hsize;
 42
 43	skb_reset_mac_len(skb);
 44	pskb_pull(skb, skb->mac_len + x->props.header_len);
 45}
 46
 47static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
 48				  unsigned int hsize)
 49{
 50	struct xfrm_offload *xo = xfrm_offload(skb);
 51	int phlen = 0;
 52
 53	if (xo->flags & XFRM_GSO_SEGMENT)
 54		skb->transport_header = skb->network_header + hsize;
 55
 56	skb_reset_mac_len(skb);
 57	if (x->sel.family != AF_INET6) {
 58		phlen = IPV4_BEET_PHMAXLEN;
 59		if (x->outer_mode.family == AF_INET6)
 60			phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
 61	}
 62
 63	pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
 64}
 65
 66/* Adjust pointers into the packet when IPsec is done at layer2 */
 67static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
 68{
 69	switch (x->outer_mode.encap) {
 70	case XFRM_MODE_TUNNEL:
 71		if (x->outer_mode.family == AF_INET)
 72			return __xfrm_mode_tunnel_prep(x, skb,
 73						       sizeof(struct iphdr));
 74		if (x->outer_mode.family == AF_INET6)
 75			return __xfrm_mode_tunnel_prep(x, skb,
 76						       sizeof(struct ipv6hdr));
 77		break;
 78	case XFRM_MODE_TRANSPORT:
 79		if (x->outer_mode.family == AF_INET)
 80			return __xfrm_transport_prep(x, skb,
 81						     sizeof(struct iphdr));
 82		if (x->outer_mode.family == AF_INET6)
 83			return __xfrm_transport_prep(x, skb,
 84						     sizeof(struct ipv6hdr));
 85		break;
 86	case XFRM_MODE_BEET:
 87		if (x->outer_mode.family == AF_INET)
 88			return __xfrm_mode_beet_prep(x, skb,
 89						     sizeof(struct iphdr));
 90		if (x->outer_mode.family == AF_INET6)
 91			return __xfrm_mode_beet_prep(x, skb,
 92						     sizeof(struct ipv6hdr));
 93		break;
 94	case XFRM_MODE_ROUTEOPTIMIZATION:
 95	case XFRM_MODE_IN_TRIGGER:
 96		break;
 97	}
 98}
 99
100static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
101{
102	struct xfrm_offload *xo = xfrm_offload(skb);
103	__u32 seq = xo->seq.low;
104
105	seq += skb_shinfo(skb)->gso_segs;
106	if (unlikely(seq < xo->seq.low))
107		return true;
108
109	return false;
110}
111
112struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
113{
114	int err;
115	unsigned long flags;
116	struct xfrm_state *x;
117	struct softnet_data *sd;
118	struct sk_buff *skb2, *nskb, *pskb = NULL;
119	netdev_features_t esp_features = features;
120	struct xfrm_offload *xo = xfrm_offload(skb);
121	struct net_device *dev = skb->dev;
122	struct sec_path *sp;
123
124	if (!xo || (xo->flags & XFRM_XMIT))
125		return skb;
126
127	if (!(features & NETIF_F_HW_ESP))
128		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
129
130	sp = skb_sec_path(skb);
131	x = sp->xvec[sp->len - 1];
132	if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
133		return skb;
134
135	/* The packet was sent to HW IPsec packet offload engine,
136	 * but to wrong device. Drop the packet, so it won't skip
137	 * XFRM stack.
138	 */
139	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
140		kfree_skb(skb);
141		dev_core_stats_tx_dropped_inc(dev);
142		return NULL;
143	}
144
145	/* This skb was already validated on the upper/virtual dev */
146	if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
147		return skb;
148
149	local_irq_save(flags);
150	sd = this_cpu_ptr(&softnet_data);
151	err = !skb_queue_empty(&sd->xfrm_backlog);
152	local_irq_restore(flags);
153
154	if (err) {
155		*again = true;
156		return skb;
157	}
158
159	if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
160				unlikely(xmit_xfrm_check_overflow(skb)))) {
161		struct sk_buff *segs;
162
163		/* Packet got rerouted, fixup features and segment it. */
164		esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
165
166		segs = skb_gso_segment(skb, esp_features);
167		if (IS_ERR(segs)) {
168			kfree_skb(skb);
169			dev_core_stats_tx_dropped_inc(dev);
170			return NULL;
171		} else {
172			consume_skb(skb);
173			skb = segs;
174		}
175	}
176
177	if (!skb->next) {
178		esp_features |= skb->dev->gso_partial_features;
179		xfrm_outer_mode_prep(x, skb);
180
181		xo->flags |= XFRM_DEV_RESUME;
182
183		err = x->type_offload->xmit(x, skb, esp_features);
184		if (err) {
185			if (err == -EINPROGRESS)
186				return NULL;
187
188			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
189			kfree_skb(skb);
190			return NULL;
191		}
192
193		skb_push(skb, skb->data - skb_mac_header(skb));
194
195		return skb;
196	}
197
198	skb_list_walk_safe(skb, skb2, nskb) {
199		esp_features |= skb->dev->gso_partial_features;
200		skb_mark_not_on_list(skb2);
201
202		xo = xfrm_offload(skb2);
203		xo->flags |= XFRM_DEV_RESUME;
204
205		xfrm_outer_mode_prep(x, skb2);
206
207		err = x->type_offload->xmit(x, skb2, esp_features);
208		if (!err) {
209			skb2->next = nskb;
210		} else if (err != -EINPROGRESS) {
211			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
212			skb2->next = nskb;
213			kfree_skb_list(skb2);
214			return NULL;
215		} else {
216			if (skb == skb2)
217				skb = nskb;
218			else
219				pskb->next = nskb;
220
221			continue;
222		}
223
224		skb_push(skb2, skb2->data - skb_mac_header(skb2));
225		pskb = skb2;
226	}
227
228	return skb;
229}
230EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
231
232int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
233		       struct xfrm_user_offload *xuo,
234		       struct netlink_ext_ack *extack)
235{
236	int err;
237	struct dst_entry *dst;
238	struct net_device *dev;
239	struct xfrm_dev_offload *xso = &x->xso;
240	xfrm_address_t *saddr;
241	xfrm_address_t *daddr;
242	bool is_packet_offload;
243
244	if (!x->type_offload) {
245		NL_SET_ERR_MSG(extack, "Type doesn't support offload");
246		return -EINVAL;
247	}
248
249	/* We don't yet support UDP encapsulation and TFC padding. */
250	if (x->encap || x->tfcpad) {
251		NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
252		return -EINVAL;
253	}
254
255	if (xuo->flags &
256	    ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) {
257		NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
258		return -EINVAL;
259	}
260
261	is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
 
 
 
 
 
 
 
262	dev = dev_get_by_index(net, xuo->ifindex);
263	if (!dev) {
 
 
264		if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
265			saddr = &x->props.saddr;
266			daddr = &x->id.daddr;
267		} else {
268			saddr = &x->id.daddr;
269			daddr = &x->props.saddr;
270		}
271
272		dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
273					x->props.family,
274					xfrm_smark_get(0, x));
 
 
 
275		if (IS_ERR(dst))
276			return (is_packet_offload) ? -EINVAL : 0;
277
278		dev = dst->dev;
279
280		dev_hold(dev);
281		dst_release(dst);
282	}
283
284	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
285		xso->dev = NULL;
286		dev_put(dev);
287		return (is_packet_offload) ? -EINVAL : 0;
288	}
289
290	if (x->props.flags & XFRM_STATE_ESN &&
291	    !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
292		NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
293		xso->dev = NULL;
294		dev_put(dev);
295		return -EINVAL;
296	}
297
298	xso->dev = dev;
299	netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
300	xso->real_dev = dev;
301
302	if (xuo->flags & XFRM_OFFLOAD_INBOUND)
303		xso->dir = XFRM_DEV_OFFLOAD_IN;
304	else
305		xso->dir = XFRM_DEV_OFFLOAD_OUT;
306
307	if (is_packet_offload)
308		xso->type = XFRM_DEV_OFFLOAD_PACKET;
309	else
310		xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
311
312	err = dev->xfrmdev_ops->xdo_dev_state_add(x);
313	if (err) {
314		xso->dev = NULL;
315		xso->dir = 0;
316		xso->real_dev = NULL;
317		netdev_put(dev, &xso->dev_tracker);
318		xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
319
320		/* User explicitly requested packet offload mode and configured
321		 * policy in addition to the XFRM state. So be civil to users,
322		 * and return an error instead of taking fallback path.
323		 *
324		 * This WARN_ON() can be seen as a documentation for driver
325		 * authors to do not return -EOPNOTSUPP in packet offload mode.
326		 */
327		WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
328		if (err != -EOPNOTSUPP || is_packet_offload) {
329			NL_SET_ERR_MSG(extack, "Device failed to offload this state");
330			return err;
331		}
332	}
333
334	return 0;
335}
336EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
337
338int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
339			struct xfrm_user_offload *xuo, u8 dir,
340			struct netlink_ext_ack *extack)
341{
342	struct xfrm_dev_offload *xdo = &xp->xdo;
343	struct net_device *dev;
344	int err;
345
346	if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
347		/* We support only packet offload mode and it means
348		 * that user must set XFRM_OFFLOAD_PACKET bit.
349		 */
350		NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
351		return -EINVAL;
352	}
353
354	dev = dev_get_by_index(net, xuo->ifindex);
355	if (!dev)
356		return -EINVAL;
357
358	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
359		xdo->dev = NULL;
360		dev_put(dev);
361		NL_SET_ERR_MSG(extack, "Policy offload is not supported");
362		return -EINVAL;
363	}
364
365	xdo->dev = dev;
366	netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
367	xdo->real_dev = dev;
368	xdo->type = XFRM_DEV_OFFLOAD_PACKET;
369	switch (dir) {
370	case XFRM_POLICY_IN:
371		xdo->dir = XFRM_DEV_OFFLOAD_IN;
372		break;
373	case XFRM_POLICY_OUT:
374		xdo->dir = XFRM_DEV_OFFLOAD_OUT;
375		break;
376	case XFRM_POLICY_FWD:
377		xdo->dir = XFRM_DEV_OFFLOAD_FWD;
378		break;
379	default:
380		xdo->dev = NULL;
381		dev_put(dev);
382		NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
383		return -EINVAL;
384	}
385
386	err = dev->xfrmdev_ops->xdo_dev_policy_add(xp);
387	if (err) {
388		xdo->dev = NULL;
389		xdo->real_dev = NULL;
390		xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
391		xdo->dir = 0;
392		netdev_put(dev, &xdo->dev_tracker);
393		NL_SET_ERR_MSG(extack, "Device failed to offload this policy");
394		return err;
395	}
396
397	return 0;
398}
399EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
400
401bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
402{
403	int mtu;
404	struct dst_entry *dst = skb_dst(skb);
405	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
406	struct net_device *dev = x->xso.dev;
407
408	if (!x->type_offload || x->encap)
 
409		return false;
410
411	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
412	    ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
413	     !xdst->child->xfrm)) {
414		mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
415		if (skb->len <= mtu)
416			goto ok;
417
418		if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
419			goto ok;
420	}
421
422	return false;
423
424ok:
425	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
426		return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
427
428	return true;
429}
430EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
431
432void xfrm_dev_resume(struct sk_buff *skb)
433{
434	struct net_device *dev = skb->dev;
435	int ret = NETDEV_TX_BUSY;
436	struct netdev_queue *txq;
437	struct softnet_data *sd;
438	unsigned long flags;
439
440	rcu_read_lock();
441	txq = netdev_core_pick_tx(dev, skb, NULL);
442
443	HARD_TX_LOCK(dev, txq, smp_processor_id());
444	if (!netif_xmit_frozen_or_stopped(txq))
445		skb = dev_hard_start_xmit(skb, dev, txq, &ret);
446	HARD_TX_UNLOCK(dev, txq);
447
448	if (!dev_xmit_complete(ret)) {
449		local_irq_save(flags);
450		sd = this_cpu_ptr(&softnet_data);
451		skb_queue_tail(&sd->xfrm_backlog, skb);
452		raise_softirq_irqoff(NET_TX_SOFTIRQ);
453		local_irq_restore(flags);
454	}
455	rcu_read_unlock();
456}
457EXPORT_SYMBOL_GPL(xfrm_dev_resume);
458
459void xfrm_dev_backlog(struct softnet_data *sd)
460{
461	struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
462	struct sk_buff_head list;
463	struct sk_buff *skb;
464
465	if (skb_queue_empty(xfrm_backlog))
466		return;
467
468	__skb_queue_head_init(&list);
469
470	spin_lock(&xfrm_backlog->lock);
471	skb_queue_splice_init(xfrm_backlog, &list);
472	spin_unlock(&xfrm_backlog->lock);
473
474	while (!skb_queue_empty(&list)) {
475		skb = __skb_dequeue(&list);
476		xfrm_dev_resume(skb);
477	}
478
479}
480#endif
481
482static int xfrm_api_check(struct net_device *dev)
483{
484#ifdef CONFIG_XFRM_OFFLOAD
485	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
486	    !(dev->features & NETIF_F_HW_ESP))
487		return NOTIFY_BAD;
488
489	if ((dev->features & NETIF_F_HW_ESP) &&
490	    (!(dev->xfrmdev_ops &&
491	       dev->xfrmdev_ops->xdo_dev_state_add &&
492	       dev->xfrmdev_ops->xdo_dev_state_delete)))
493		return NOTIFY_BAD;
494#else
495	if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
496		return NOTIFY_BAD;
497#endif
498
499	return NOTIFY_DONE;
500}
501
502static int xfrm_dev_down(struct net_device *dev)
503{
504	if (dev->features & NETIF_F_HW_ESP) {
505		xfrm_dev_state_flush(dev_net(dev), dev, true);
506		xfrm_dev_policy_flush(dev_net(dev), dev, true);
507	}
508
509	return NOTIFY_DONE;
510}
511
512static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
513{
514	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
515
516	switch (event) {
517	case NETDEV_REGISTER:
518		return xfrm_api_check(dev);
519
520	case NETDEV_FEAT_CHANGE:
521		return xfrm_api_check(dev);
522
523	case NETDEV_DOWN:
524	case NETDEV_UNREGISTER:
525		return xfrm_dev_down(dev);
526	}
527	return NOTIFY_DONE;
528}
529
530static struct notifier_block xfrm_dev_notifier = {
531	.notifier_call	= xfrm_dev_event,
532};
533
534void __init xfrm_dev_init(void)
535{
536	register_netdevice_notifier(&xfrm_dev_notifier);
537}