Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * xfrm_input.c
  3 *
  4 * Changes:
  5 * 	YOSHIFUJI Hideaki @USAGI
  6 * 		Split up af-specific portion
  7 *
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/module.h>
 12#include <linux/netdevice.h>
 13#include <net/dst.h>
 14#include <net/ip.h>
 15#include <net/xfrm.h>
 
 
 16
 17static struct kmem_cache *secpath_cachep __read_mostly;
 18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19void __secpath_destroy(struct sec_path *sp)
 20{
 21	int i;
 22	for (i = 0; i < sp->len; i++)
 23		xfrm_state_put(sp->xvec[i]);
 24	kmem_cache_free(secpath_cachep, sp);
 25}
 26EXPORT_SYMBOL(__secpath_destroy);
 27
 28struct sec_path *secpath_dup(struct sec_path *src)
 29{
 30	struct sec_path *sp;
 31
 32	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
 33	if (!sp)
 34		return NULL;
 35
 36	sp->len = 0;
 37	if (src) {
 38		int i;
 39
 40		memcpy(sp, src, sizeof(*sp));
 41		for (i = 0; i < sp->len; i++)
 42			xfrm_state_hold(sp->xvec[i]);
 43	}
 44	atomic_set(&sp->refcnt, 1);
 45	return sp;
 46}
 47EXPORT_SYMBOL(secpath_dup);
 48
 49/* Fetch spi and seq from ipsec header */
 50
 51int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
 52{
 53	int offset, offset_seq;
 54	int hlen;
 55
 56	switch (nexthdr) {
 57	case IPPROTO_AH:
 58		hlen = sizeof(struct ip_auth_hdr);
 59		offset = offsetof(struct ip_auth_hdr, spi);
 60		offset_seq = offsetof(struct ip_auth_hdr, seq_no);
 61		break;
 62	case IPPROTO_ESP:
 63		hlen = sizeof(struct ip_esp_hdr);
 64		offset = offsetof(struct ip_esp_hdr, spi);
 65		offset_seq = offsetof(struct ip_esp_hdr, seq_no);
 66		break;
 67	case IPPROTO_COMP:
 68		if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
 69			return -EINVAL;
 70		*spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2)));
 71		*seq = 0;
 72		return 0;
 73	default:
 74		return 1;
 75	}
 76
 77	if (!pskb_may_pull(skb, hlen))
 78		return -EINVAL;
 79
 80	*spi = *(__be32*)(skb_transport_header(skb) + offset);
 81	*seq = *(__be32*)(skb_transport_header(skb) + offset_seq);
 82	return 0;
 83}
 84
 85int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
 86{
 87	struct xfrm_mode *inner_mode = x->inner_mode;
 88	int err;
 89
 90	err = x->outer_mode->afinfo->extract_input(x, skb);
 91	if (err)
 92		return err;
 93
 94	if (x->sel.family == AF_UNSPEC) {
 95		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
 96		if (inner_mode == NULL)
 97			return -EAFNOSUPPORT;
 98	}
 99
100	skb->protocol = inner_mode->afinfo->eth_proto;
101	return inner_mode->input2(x, skb);
102}
103EXPORT_SYMBOL(xfrm_prepare_input);
104
105int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
106{
107	struct net *net = dev_net(skb->dev);
108	int err;
109	__be32 seq;
110	__be32 seq_hi;
111	struct xfrm_state *x;
112	xfrm_address_t *daddr;
113	struct xfrm_mode *inner_mode;
 
114	unsigned int family;
115	int decaps = 0;
116	int async = 0;
117
118	/* A negative encap_type indicates async resumption. */
119	if (encap_type < 0) {
120		async = 1;
121		x = xfrm_input_state(skb);
122		seq = XFRM_SKB_CB(skb)->seq.input.low;
 
123		goto resume;
124	}
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126	/* Allocate new secpath or COW existing one. */
127	if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
128		struct sec_path *sp;
129
130		sp = secpath_dup(skb->sp);
131		if (!sp) {
132			XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
133			goto drop;
134		}
135		if (skb->sp)
136			secpath_put(skb->sp);
137		skb->sp = sp;
138	}
139
140	daddr = (xfrm_address_t *)(skb_network_header(skb) +
141				   XFRM_SPI_SKB_CB(skb)->daddroff);
142	family = XFRM_SPI_SKB_CB(skb)->family;
143
144	seq = 0;
145	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
146		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
147		goto drop;
148	}
149
150	do {
151		if (skb->sp->len == XFRM_MAX_DEPTH) {
152			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
153			goto drop;
154		}
155
156		x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
157		if (x == NULL) {
158			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
159			xfrm_audit_state_notfound(skb, family, spi, seq);
160			goto drop;
161		}
162
163		skb->sp->xvec[skb->sp->len++] = x;
164
165		spin_lock(&x->lock);
 
166		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
167			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
 
 
 
 
168			goto drop_unlock;
169		}
170
171		if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
172			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
173			goto drop_unlock;
174		}
175
176		if (x->repl->check(x, skb, seq)) {
177			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
178			goto drop_unlock;
179		}
180
181		if (xfrm_state_check_expire(x)) {
182			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
183			goto drop_unlock;
184		}
185
186		spin_unlock(&x->lock);
187
 
 
 
 
 
188		seq_hi = htonl(xfrm_replay_seqhi(x, seq));
189
190		XFRM_SKB_CB(skb)->seq.input.low = seq;
191		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
192
193		skb_dst_force(skb);
 
194
195		nexthdr = x->type->input(x, skb);
196
197		if (nexthdr == -EINPROGRESS)
198			return 0;
199
200resume:
 
 
201		spin_lock(&x->lock);
202		if (nexthdr <= 0) {
203			if (nexthdr == -EBADMSG) {
204				xfrm_audit_state_icvfail(x, skb,
205							 x->type->proto);
206				x->stats.integrity_failed++;
207			}
208			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
209			goto drop_unlock;
210		}
211
212		/* only the first xfrm gets the encap type */
213		encap_type = 0;
214
215		if (async && x->repl->check(x, skb, seq)) {
216			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
217			goto drop_unlock;
218		}
219
220		x->repl->advance(x, seq);
221
222		x->curlft.bytes += skb->len;
223		x->curlft.packets++;
224
225		spin_unlock(&x->lock);
226
227		XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
228
229		inner_mode = x->inner_mode;
230
231		if (x->sel.family == AF_UNSPEC) {
232			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
233			if (inner_mode == NULL)
 
234				goto drop;
 
235		}
236
237		if (inner_mode->input(x, skb)) {
238			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
239			goto drop;
240		}
241
242		if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
243			decaps = 1;
244			break;
245		}
246
247		/*
248		 * We need the inner address.  However, we only get here for
249		 * transport mode so the outer address is identical.
250		 */
251		daddr = &x->id.daddr;
252		family = x->outer_mode->afinfo->family;
253
254		err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
255		if (err < 0) {
256			XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
257			goto drop;
258		}
259	} while (!err);
260
 
 
 
 
261	nf_reset(skb);
262
263	if (decaps) {
264		skb_dst_drop(skb);
265		netif_rx(skb);
266		return 0;
267	} else {
268		return x->inner_mode->afinfo->transport_finish(skb, async);
269	}
270
271drop_unlock:
272	spin_unlock(&x->lock);
273drop:
 
274	kfree_skb(skb);
275	return 0;
276}
277EXPORT_SYMBOL(xfrm_input);
278
279int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
280{
281	return xfrm_input(skb, nexthdr, 0, -1);
282}
283EXPORT_SYMBOL(xfrm_input_resume);
284
285void __init xfrm_input_init(void)
286{
287	secpath_cachep = kmem_cache_create("secpath_cache",
288					   sizeof(struct sec_path),
289					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
290					   NULL);
291}
v4.6
  1/*
  2 * xfrm_input.c
  3 *
  4 * Changes:
  5 * 	YOSHIFUJI Hideaki @USAGI
  6 * 		Split up af-specific portion
  7 *
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/module.h>
 12#include <linux/netdevice.h>
 13#include <net/dst.h>
 14#include <net/ip.h>
 15#include <net/xfrm.h>
 16#include <net/ip_tunnels.h>
 17#include <net/ip6_tunnel.h>
 18
 19static struct kmem_cache *secpath_cachep __read_mostly;
 20
 21static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
 22static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO];
 23
 24int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
 25{
 26	int err = 0;
 27
 28	if (unlikely(afinfo == NULL))
 29		return -EINVAL;
 30	if (unlikely(afinfo->family >= NPROTO))
 31		return -EAFNOSUPPORT;
 32	spin_lock_bh(&xfrm_input_afinfo_lock);
 33	if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
 34		err = -EEXIST;
 35	else
 36		rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
 37	spin_unlock_bh(&xfrm_input_afinfo_lock);
 38	return err;
 39}
 40EXPORT_SYMBOL(xfrm_input_register_afinfo);
 41
 42int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo)
 43{
 44	int err = 0;
 45
 46	if (unlikely(afinfo == NULL))
 47		return -EINVAL;
 48	if (unlikely(afinfo->family >= NPROTO))
 49		return -EAFNOSUPPORT;
 50	spin_lock_bh(&xfrm_input_afinfo_lock);
 51	if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
 52		if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
 53			err = -EINVAL;
 54		else
 55			RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
 56	}
 57	spin_unlock_bh(&xfrm_input_afinfo_lock);
 58	synchronize_rcu();
 59	return err;
 60}
 61EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
 62
 63static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
 64{
 65	struct xfrm_input_afinfo *afinfo;
 66
 67	if (unlikely(family >= NPROTO))
 68		return NULL;
 69	rcu_read_lock();
 70	afinfo = rcu_dereference(xfrm_input_afinfo[family]);
 71	if (unlikely(!afinfo))
 72		rcu_read_unlock();
 73	return afinfo;
 74}
 75
 76static void xfrm_input_put_afinfo(struct xfrm_input_afinfo *afinfo)
 77{
 78	rcu_read_unlock();
 79}
 80
 81static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
 82		       int err)
 83{
 84	int ret;
 85	struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
 86
 87	if (!afinfo)
 88		return -EAFNOSUPPORT;
 89
 90	ret = afinfo->callback(skb, protocol, err);
 91	xfrm_input_put_afinfo(afinfo);
 92
 93	return ret;
 94}
 95
 96void __secpath_destroy(struct sec_path *sp)
 97{
 98	int i;
 99	for (i = 0; i < sp->len; i++)
100		xfrm_state_put(sp->xvec[i]);
101	kmem_cache_free(secpath_cachep, sp);
102}
103EXPORT_SYMBOL(__secpath_destroy);
104
105struct sec_path *secpath_dup(struct sec_path *src)
106{
107	struct sec_path *sp;
108
109	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
110	if (!sp)
111		return NULL;
112
113	sp->len = 0;
114	if (src) {
115		int i;
116
117		memcpy(sp, src, sizeof(*sp));
118		for (i = 0; i < sp->len; i++)
119			xfrm_state_hold(sp->xvec[i]);
120	}
121	atomic_set(&sp->refcnt, 1);
122	return sp;
123}
124EXPORT_SYMBOL(secpath_dup);
125
126/* Fetch spi and seq from ipsec header */
127
128int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
129{
130	int offset, offset_seq;
131	int hlen;
132
133	switch (nexthdr) {
134	case IPPROTO_AH:
135		hlen = sizeof(struct ip_auth_hdr);
136		offset = offsetof(struct ip_auth_hdr, spi);
137		offset_seq = offsetof(struct ip_auth_hdr, seq_no);
138		break;
139	case IPPROTO_ESP:
140		hlen = sizeof(struct ip_esp_hdr);
141		offset = offsetof(struct ip_esp_hdr, spi);
142		offset_seq = offsetof(struct ip_esp_hdr, seq_no);
143		break;
144	case IPPROTO_COMP:
145		if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
146			return -EINVAL;
147		*spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
148		*seq = 0;
149		return 0;
150	default:
151		return 1;
152	}
153
154	if (!pskb_may_pull(skb, hlen))
155		return -EINVAL;
156
157	*spi = *(__be32 *)(skb_transport_header(skb) + offset);
158	*seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
159	return 0;
160}
161
162int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
163{
164	struct xfrm_mode *inner_mode = x->inner_mode;
165	int err;
166
167	err = x->outer_mode->afinfo->extract_input(x, skb);
168	if (err)
169		return err;
170
171	if (x->sel.family == AF_UNSPEC) {
172		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
173		if (inner_mode == NULL)
174			return -EAFNOSUPPORT;
175	}
176
177	skb->protocol = inner_mode->afinfo->eth_proto;
178	return inner_mode->input2(x, skb);
179}
180EXPORT_SYMBOL(xfrm_prepare_input);
181
182int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
183{
184	struct net *net = dev_net(skb->dev);
185	int err;
186	__be32 seq;
187	__be32 seq_hi;
188	struct xfrm_state *x = NULL;
189	xfrm_address_t *daddr;
190	struct xfrm_mode *inner_mode;
191	u32 mark = skb->mark;
192	unsigned int family;
193	int decaps = 0;
194	int async = 0;
195
196	/* A negative encap_type indicates async resumption. */
197	if (encap_type < 0) {
198		async = 1;
199		x = xfrm_input_state(skb);
200		seq = XFRM_SKB_CB(skb)->seq.input.low;
201		family = x->outer_mode->afinfo->family;
202		goto resume;
203	}
204
205	daddr = (xfrm_address_t *)(skb_network_header(skb) +
206				   XFRM_SPI_SKB_CB(skb)->daddroff);
207	family = XFRM_SPI_SKB_CB(skb)->family;
208
209	/* if tunnel is present override skb->mark value with tunnel i_key */
210	if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
211		switch (family) {
212		case AF_INET:
213			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
214			break;
215		case AF_INET6:
216			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
217			break;
218		}
219	}
220
221	/* Allocate new secpath or COW existing one. */
222	if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
223		struct sec_path *sp;
224
225		sp = secpath_dup(skb->sp);
226		if (!sp) {
227			XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
228			goto drop;
229		}
230		if (skb->sp)
231			secpath_put(skb->sp);
232		skb->sp = sp;
233	}
234
 
 
 
 
235	seq = 0;
236	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
237		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
238		goto drop;
239	}
240
241	do {
242		if (skb->sp->len == XFRM_MAX_DEPTH) {
243			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
244			goto drop;
245		}
246
247		x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
248		if (x == NULL) {
249			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
250			xfrm_audit_state_notfound(skb, family, spi, seq);
251			goto drop;
252		}
253
254		skb->sp->xvec[skb->sp->len++] = x;
255
256		spin_lock(&x->lock);
257
258		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
259			if (x->km.state == XFRM_STATE_ACQ)
260				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
261			else
262				XFRM_INC_STATS(net,
263					       LINUX_MIB_XFRMINSTATEINVALID);
264			goto drop_unlock;
265		}
266
267		if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
268			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
269			goto drop_unlock;
270		}
271
272		if (x->repl->check(x, skb, seq)) {
273			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
274			goto drop_unlock;
275		}
276
277		if (xfrm_state_check_expire(x)) {
278			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
279			goto drop_unlock;
280		}
281
282		spin_unlock(&x->lock);
283
284		if (xfrm_tunnel_check(skb, x, family)) {
285			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
286			goto drop;
287		}
288
289		seq_hi = htonl(xfrm_replay_seqhi(x, seq));
290
291		XFRM_SKB_CB(skb)->seq.input.low = seq;
292		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
293
294		skb_dst_force(skb);
295		dev_hold(skb->dev);
296
297		nexthdr = x->type->input(x, skb);
298
299		if (nexthdr == -EINPROGRESS)
300			return 0;
 
301resume:
302		dev_put(skb->dev);
303
304		spin_lock(&x->lock);
305		if (nexthdr <= 0) {
306			if (nexthdr == -EBADMSG) {
307				xfrm_audit_state_icvfail(x, skb,
308							 x->type->proto);
309				x->stats.integrity_failed++;
310			}
311			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
312			goto drop_unlock;
313		}
314
315		/* only the first xfrm gets the encap type */
316		encap_type = 0;
317
318		if (async && x->repl->recheck(x, skb, seq)) {
319			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
320			goto drop_unlock;
321		}
322
323		x->repl->advance(x, seq);
324
325		x->curlft.bytes += skb->len;
326		x->curlft.packets++;
327
328		spin_unlock(&x->lock);
329
330		XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
331
332		inner_mode = x->inner_mode;
333
334		if (x->sel.family == AF_UNSPEC) {
335			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
336			if (inner_mode == NULL) {
337				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
338				goto drop;
339			}
340		}
341
342		if (inner_mode->input(x, skb)) {
343			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
344			goto drop;
345		}
346
347		if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
348			decaps = 1;
349			break;
350		}
351
352		/*
353		 * We need the inner address.  However, we only get here for
354		 * transport mode so the outer address is identical.
355		 */
356		daddr = &x->id.daddr;
357		family = x->outer_mode->afinfo->family;
358
359		err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
360		if (err < 0) {
361			XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
362			goto drop;
363		}
364	} while (!err);
365
366	err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
367	if (err)
368		goto drop;
369
370	nf_reset(skb);
371
372	if (decaps) {
373		skb_dst_drop(skb);
374		netif_rx(skb);
375		return 0;
376	} else {
377		return x->inner_mode->afinfo->transport_finish(skb, async);
378	}
379
380drop_unlock:
381	spin_unlock(&x->lock);
382drop:
383	xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
384	kfree_skb(skb);
385	return 0;
386}
387EXPORT_SYMBOL(xfrm_input);
388
389int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
390{
391	return xfrm_input(skb, nexthdr, 0, -1);
392}
393EXPORT_SYMBOL(xfrm_input_resume);
394
395void __init xfrm_input_init(void)
396{
397	secpath_cachep = kmem_cache_create("secpath_cache",
398					   sizeof(struct sec_path),
399					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
400					   NULL);
401}