Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * xfrm_input.c
  3 *
  4 * Changes:
  5 * 	YOSHIFUJI Hideaki @USAGI
  6 * 		Split up af-specific portion
  7 *
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/module.h>
 12#include <linux/netdevice.h>
 13#include <net/dst.h>
 14#include <net/ip.h>
 15#include <net/xfrm.h>
 16#include <net/ip_tunnels.h>
 17#include <net/ip6_tunnel.h>
 18
 19static struct kmem_cache *secpath_cachep __read_mostly;
 20
 21static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
 22static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO];
 23
 24int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
 25{
 26	int err = 0;
 27
 28	if (unlikely(afinfo == NULL))
 29		return -EINVAL;
 30	if (unlikely(afinfo->family >= NPROTO))
 31		return -EAFNOSUPPORT;
 32	spin_lock_bh(&xfrm_input_afinfo_lock);
 33	if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
 34		err = -EEXIST;
 35	else
 36		rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
 37	spin_unlock_bh(&xfrm_input_afinfo_lock);
 38	return err;
 39}
 40EXPORT_SYMBOL(xfrm_input_register_afinfo);
 41
 42int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo)
 43{
 44	int err = 0;
 45
 46	if (unlikely(afinfo == NULL))
 47		return -EINVAL;
 48	if (unlikely(afinfo->family >= NPROTO))
 49		return -EAFNOSUPPORT;
 50	spin_lock_bh(&xfrm_input_afinfo_lock);
 51	if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
 52		if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
 53			err = -EINVAL;
 54		else
 55			RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
 56	}
 57	spin_unlock_bh(&xfrm_input_afinfo_lock);
 58	synchronize_rcu();
 59	return err;
 60}
 61EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
 62
 63static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
 64{
 65	struct xfrm_input_afinfo *afinfo;
 66
 67	if (unlikely(family >= NPROTO))
 68		return NULL;
 69	rcu_read_lock();
 70	afinfo = rcu_dereference(xfrm_input_afinfo[family]);
 71	if (unlikely(!afinfo))
 72		rcu_read_unlock();
 73	return afinfo;
 74}
 75
 76static void xfrm_input_put_afinfo(struct xfrm_input_afinfo *afinfo)
 77{
 78	rcu_read_unlock();
 79}
 80
 81static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
 82		       int err)
 83{
 84	int ret;
 85	struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
 86
 87	if (!afinfo)
 88		return -EAFNOSUPPORT;
 89
 90	ret = afinfo->callback(skb, protocol, err);
 91	xfrm_input_put_afinfo(afinfo);
 92
 93	return ret;
 94}
 95
 96void __secpath_destroy(struct sec_path *sp)
 97{
 98	int i;
 99	for (i = 0; i < sp->len; i++)
100		xfrm_state_put(sp->xvec[i]);
101	kmem_cache_free(secpath_cachep, sp);
102}
103EXPORT_SYMBOL(__secpath_destroy);
104
105struct sec_path *secpath_dup(struct sec_path *src)
106{
107	struct sec_path *sp;
108
109	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
110	if (!sp)
111		return NULL;
112
113	sp->len = 0;
114	if (src) {
115		int i;
116
117		memcpy(sp, src, sizeof(*sp));
118		for (i = 0; i < sp->len; i++)
119			xfrm_state_hold(sp->xvec[i]);
120	}
121	atomic_set(&sp->refcnt, 1);
122	return sp;
123}
124EXPORT_SYMBOL(secpath_dup);
125
126/* Fetch spi and seq from ipsec header */
127
128int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
129{
130	int offset, offset_seq;
131	int hlen;
132
133	switch (nexthdr) {
134	case IPPROTO_AH:
135		hlen = sizeof(struct ip_auth_hdr);
136		offset = offsetof(struct ip_auth_hdr, spi);
137		offset_seq = offsetof(struct ip_auth_hdr, seq_no);
138		break;
139	case IPPROTO_ESP:
140		hlen = sizeof(struct ip_esp_hdr);
141		offset = offsetof(struct ip_esp_hdr, spi);
142		offset_seq = offsetof(struct ip_esp_hdr, seq_no);
143		break;
144	case IPPROTO_COMP:
145		if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
146			return -EINVAL;
147		*spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
148		*seq = 0;
149		return 0;
150	default:
151		return 1;
152	}
153
154	if (!pskb_may_pull(skb, hlen))
155		return -EINVAL;
156
157	*spi = *(__be32 *)(skb_transport_header(skb) + offset);
158	*seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
159	return 0;
160}
161
162int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
163{
164	struct xfrm_mode *inner_mode = x->inner_mode;
165	int err;
166
167	err = x->outer_mode->afinfo->extract_input(x, skb);
168	if (err)
169		return err;
170
171	if (x->sel.family == AF_UNSPEC) {
172		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
173		if (inner_mode == NULL)
174			return -EAFNOSUPPORT;
175	}
176
177	skb->protocol = inner_mode->afinfo->eth_proto;
178	return inner_mode->input2(x, skb);
179}
180EXPORT_SYMBOL(xfrm_prepare_input);
181
182int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
183{
184	struct net *net = dev_net(skb->dev);
185	int err;
186	__be32 seq;
187	__be32 seq_hi;
188	struct xfrm_state *x = NULL;
189	xfrm_address_t *daddr;
190	struct xfrm_mode *inner_mode;
191	u32 mark = skb->mark;
192	unsigned int family;
193	int decaps = 0;
194	int async = 0;
195
196	/* A negative encap_type indicates async resumption. */
197	if (encap_type < 0) {
198		async = 1;
199		x = xfrm_input_state(skb);
200		seq = XFRM_SKB_CB(skb)->seq.input.low;
201		family = x->outer_mode->afinfo->family;
202		goto resume;
203	}
204
205	daddr = (xfrm_address_t *)(skb_network_header(skb) +
206				   XFRM_SPI_SKB_CB(skb)->daddroff);
207	family = XFRM_SPI_SKB_CB(skb)->family;
208
209	/* if tunnel is present override skb->mark value with tunnel i_key */
210	if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
211		switch (family) {
212		case AF_INET:
213			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
214			break;
215		case AF_INET6:
216			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
217			break;
218		}
219	}
220
221	/* Allocate new secpath or COW existing one. */
222	if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
223		struct sec_path *sp;
224
225		sp = secpath_dup(skb->sp);
226		if (!sp) {
227			XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
228			goto drop;
229		}
230		if (skb->sp)
231			secpath_put(skb->sp);
232		skb->sp = sp;
233	}
234
235	seq = 0;
236	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
237		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
238		goto drop;
239	}
240
241	do {
242		if (skb->sp->len == XFRM_MAX_DEPTH) {
243			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
244			goto drop;
245		}
246
247		x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
248		if (x == NULL) {
249			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
250			xfrm_audit_state_notfound(skb, family, spi, seq);
251			goto drop;
252		}
253
254		skb->sp->xvec[skb->sp->len++] = x;
255
 
 
 
 
 
256		spin_lock(&x->lock);
 
 
 
 
257
258		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
259			if (x->km.state == XFRM_STATE_ACQ)
260				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
261			else
262				XFRM_INC_STATS(net,
263					       LINUX_MIB_XFRMINSTATEINVALID);
264			goto drop_unlock;
265		}
266
267		if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
268			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
269			goto drop_unlock;
270		}
271
272		if (x->repl->check(x, skb, seq)) {
273			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
274			goto drop_unlock;
275		}
276
277		if (xfrm_state_check_expire(x)) {
278			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
279			goto drop_unlock;
280		}
281
282		spin_unlock(&x->lock);
283
284		if (xfrm_tunnel_check(skb, x, family)) {
285			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
286			goto drop;
287		}
288
289		seq_hi = htonl(xfrm_replay_seqhi(x, seq));
290
291		XFRM_SKB_CB(skb)->seq.input.low = seq;
292		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
293
294		skb_dst_force(skb);
295		dev_hold(skb->dev);
296
297		nexthdr = x->type->input(x, skb);
298
299		if (nexthdr == -EINPROGRESS)
300			return 0;
301resume:
302		dev_put(skb->dev);
303
304		spin_lock(&x->lock);
305		if (nexthdr <= 0) {
306			if (nexthdr == -EBADMSG) {
307				xfrm_audit_state_icvfail(x, skb,
308							 x->type->proto);
309				x->stats.integrity_failed++;
310			}
311			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
312			goto drop_unlock;
313		}
314
315		/* only the first xfrm gets the encap type */
316		encap_type = 0;
317
318		if (async && x->repl->recheck(x, skb, seq)) {
319			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
320			goto drop_unlock;
321		}
322
323		x->repl->advance(x, seq);
324
325		x->curlft.bytes += skb->len;
326		x->curlft.packets++;
327
328		spin_unlock(&x->lock);
329
330		XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
331
332		inner_mode = x->inner_mode;
333
334		if (x->sel.family == AF_UNSPEC) {
335			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
336			if (inner_mode == NULL) {
337				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
338				goto drop;
339			}
340		}
341
342		if (inner_mode->input(x, skb)) {
343			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
344			goto drop;
345		}
346
347		if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
348			decaps = 1;
349			break;
350		}
351
352		/*
353		 * We need the inner address.  However, we only get here for
354		 * transport mode so the outer address is identical.
355		 */
356		daddr = &x->id.daddr;
357		family = x->outer_mode->afinfo->family;
358
359		err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
360		if (err < 0) {
361			XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
362			goto drop;
363		}
364	} while (!err);
365
366	err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
367	if (err)
368		goto drop;
369
370	nf_reset(skb);
371
372	if (decaps) {
373		skb_dst_drop(skb);
374		netif_rx(skb);
375		return 0;
376	} else {
377		return x->inner_mode->afinfo->transport_finish(skb, async);
378	}
379
380drop_unlock:
381	spin_unlock(&x->lock);
382drop:
383	xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
384	kfree_skb(skb);
385	return 0;
386}
387EXPORT_SYMBOL(xfrm_input);
388
389int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
390{
391	return xfrm_input(skb, nexthdr, 0, -1);
392}
393EXPORT_SYMBOL(xfrm_input_resume);
394
395void __init xfrm_input_init(void)
396{
397	secpath_cachep = kmem_cache_create("secpath_cache",
398					   sizeof(struct sec_path),
399					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
400					   NULL);
401}
v3.15
  1/*
  2 * xfrm_input.c
  3 *
  4 * Changes:
  5 * 	YOSHIFUJI Hideaki @USAGI
  6 * 		Split up af-specific portion
  7 *
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/module.h>
 12#include <linux/netdevice.h>
 13#include <net/dst.h>
 14#include <net/ip.h>
 15#include <net/xfrm.h>
 
 
 16
 17static struct kmem_cache *secpath_cachep __read_mostly;
 18
 19static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
 20static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO];
 21
 22int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
 23{
 24	int err = 0;
 25
 26	if (unlikely(afinfo == NULL))
 27		return -EINVAL;
 28	if (unlikely(afinfo->family >= NPROTO))
 29		return -EAFNOSUPPORT;
 30	spin_lock_bh(&xfrm_input_afinfo_lock);
 31	if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
 32		err = -ENOBUFS;
 33	else
 34		rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
 35	spin_unlock_bh(&xfrm_input_afinfo_lock);
 36	return err;
 37}
 38EXPORT_SYMBOL(xfrm_input_register_afinfo);
 39
 40int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo)
 41{
 42	int err = 0;
 43
 44	if (unlikely(afinfo == NULL))
 45		return -EINVAL;
 46	if (unlikely(afinfo->family >= NPROTO))
 47		return -EAFNOSUPPORT;
 48	spin_lock_bh(&xfrm_input_afinfo_lock);
 49	if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
 50		if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
 51			err = -EINVAL;
 52		else
 53			RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
 54	}
 55	spin_unlock_bh(&xfrm_input_afinfo_lock);
 56	synchronize_rcu();
 57	return err;
 58}
 59EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
 60
 61static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
 62{
 63	struct xfrm_input_afinfo *afinfo;
 64
 65	if (unlikely(family >= NPROTO))
 66		return NULL;
 67	rcu_read_lock();
 68	afinfo = rcu_dereference(xfrm_input_afinfo[family]);
 69	if (unlikely(!afinfo))
 70		rcu_read_unlock();
 71	return afinfo;
 72}
 73
 74static void xfrm_input_put_afinfo(struct xfrm_input_afinfo *afinfo)
 75{
 76	rcu_read_unlock();
 77}
 78
 79static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
 80		       int err)
 81{
 82	int ret;
 83	struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
 84
 85	if (!afinfo)
 86		return -EAFNOSUPPORT;
 87
 88	ret = afinfo->callback(skb, protocol, err);
 89	xfrm_input_put_afinfo(afinfo);
 90
 91	return ret;
 92}
 93
 94void __secpath_destroy(struct sec_path *sp)
 95{
 96	int i;
 97	for (i = 0; i < sp->len; i++)
 98		xfrm_state_put(sp->xvec[i]);
 99	kmem_cache_free(secpath_cachep, sp);
100}
101EXPORT_SYMBOL(__secpath_destroy);
102
103struct sec_path *secpath_dup(struct sec_path *src)
104{
105	struct sec_path *sp;
106
107	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
108	if (!sp)
109		return NULL;
110
111	sp->len = 0;
112	if (src) {
113		int i;
114
115		memcpy(sp, src, sizeof(*sp));
116		for (i = 0; i < sp->len; i++)
117			xfrm_state_hold(sp->xvec[i]);
118	}
119	atomic_set(&sp->refcnt, 1);
120	return sp;
121}
122EXPORT_SYMBOL(secpath_dup);
123
124/* Fetch spi and seq from ipsec header */
125
126int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
127{
128	int offset, offset_seq;
129	int hlen;
130
131	switch (nexthdr) {
132	case IPPROTO_AH:
133		hlen = sizeof(struct ip_auth_hdr);
134		offset = offsetof(struct ip_auth_hdr, spi);
135		offset_seq = offsetof(struct ip_auth_hdr, seq_no);
136		break;
137	case IPPROTO_ESP:
138		hlen = sizeof(struct ip_esp_hdr);
139		offset = offsetof(struct ip_esp_hdr, spi);
140		offset_seq = offsetof(struct ip_esp_hdr, seq_no);
141		break;
142	case IPPROTO_COMP:
143		if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
144			return -EINVAL;
145		*spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
146		*seq = 0;
147		return 0;
148	default:
149		return 1;
150	}
151
152	if (!pskb_may_pull(skb, hlen))
153		return -EINVAL;
154
155	*spi = *(__be32 *)(skb_transport_header(skb) + offset);
156	*seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
157	return 0;
158}
159
160int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
161{
162	struct xfrm_mode *inner_mode = x->inner_mode;
163	int err;
164
165	err = x->outer_mode->afinfo->extract_input(x, skb);
166	if (err)
167		return err;
168
169	if (x->sel.family == AF_UNSPEC) {
170		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
171		if (inner_mode == NULL)
172			return -EAFNOSUPPORT;
173	}
174
175	skb->protocol = inner_mode->afinfo->eth_proto;
176	return inner_mode->input2(x, skb);
177}
178EXPORT_SYMBOL(xfrm_prepare_input);
179
180int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
181{
182	struct net *net = dev_net(skb->dev);
183	int err;
184	__be32 seq;
185	__be32 seq_hi;
186	struct xfrm_state *x = NULL;
187	xfrm_address_t *daddr;
188	struct xfrm_mode *inner_mode;
 
189	unsigned int family;
190	int decaps = 0;
191	int async = 0;
192
193	/* A negative encap_type indicates async resumption. */
194	if (encap_type < 0) {
195		async = 1;
196		x = xfrm_input_state(skb);
197		seq = XFRM_SKB_CB(skb)->seq.input.low;
198		family = x->outer_mode->afinfo->family;
199		goto resume;
200	}
201
202	daddr = (xfrm_address_t *)(skb_network_header(skb) +
203				   XFRM_SPI_SKB_CB(skb)->daddroff);
204	family = XFRM_SPI_SKB_CB(skb)->family;
205
 
 
 
 
 
 
 
 
 
 
 
 
206	/* Allocate new secpath or COW existing one. */
207	if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
208		struct sec_path *sp;
209
210		sp = secpath_dup(skb->sp);
211		if (!sp) {
212			XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
213			goto drop;
214		}
215		if (skb->sp)
216			secpath_put(skb->sp);
217		skb->sp = sp;
218	}
219
220	seq = 0;
221	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
222		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
223		goto drop;
224	}
225
226	do {
227		if (skb->sp->len == XFRM_MAX_DEPTH) {
228			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
229			goto drop;
230		}
231
232		x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
233		if (x == NULL) {
234			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
235			xfrm_audit_state_notfound(skb, family, spi, seq);
236			goto drop;
237		}
238
239		skb->sp->xvec[skb->sp->len++] = x;
240
241		if (xfrm_tunnel_check(skb, x, family)) {
242			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
243			goto drop;
244		}
245
246		spin_lock(&x->lock);
247		if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
248			XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
249			goto drop_unlock;
250		}
251
252		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
253			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
 
 
 
 
254			goto drop_unlock;
255		}
256
257		if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
258			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
259			goto drop_unlock;
260		}
261
262		if (x->repl->check(x, skb, seq)) {
263			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
264			goto drop_unlock;
265		}
266
267		if (xfrm_state_check_expire(x)) {
268			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
269			goto drop_unlock;
270		}
271
272		spin_unlock(&x->lock);
273
 
 
 
 
 
274		seq_hi = htonl(xfrm_replay_seqhi(x, seq));
275
276		XFRM_SKB_CB(skb)->seq.input.low = seq;
277		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
278
279		skb_dst_force(skb);
 
280
281		nexthdr = x->type->input(x, skb);
282
283		if (nexthdr == -EINPROGRESS)
284			return 0;
285resume:
 
 
286		spin_lock(&x->lock);
287		if (nexthdr <= 0) {
288			if (nexthdr == -EBADMSG) {
289				xfrm_audit_state_icvfail(x, skb,
290							 x->type->proto);
291				x->stats.integrity_failed++;
292			}
293			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
294			goto drop_unlock;
295		}
296
297		/* only the first xfrm gets the encap type */
298		encap_type = 0;
299
300		if (async && x->repl->recheck(x, skb, seq)) {
301			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
302			goto drop_unlock;
303		}
304
305		x->repl->advance(x, seq);
306
307		x->curlft.bytes += skb->len;
308		x->curlft.packets++;
309
310		spin_unlock(&x->lock);
311
312		XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
313
314		inner_mode = x->inner_mode;
315
316		if (x->sel.family == AF_UNSPEC) {
317			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
318			if (inner_mode == NULL)
 
319				goto drop;
 
320		}
321
322		if (inner_mode->input(x, skb)) {
323			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
324			goto drop;
325		}
326
327		if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
328			decaps = 1;
329			break;
330		}
331
332		/*
333		 * We need the inner address.  However, we only get here for
334		 * transport mode so the outer address is identical.
335		 */
336		daddr = &x->id.daddr;
337		family = x->outer_mode->afinfo->family;
338
339		err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
340		if (err < 0) {
341			XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
342			goto drop;
343		}
344	} while (!err);
345
346	err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
347	if (err)
348		goto drop;
349
350	nf_reset(skb);
351
352	if (decaps) {
353		skb_dst_drop(skb);
354		netif_rx(skb);
355		return 0;
356	} else {
357		return x->inner_mode->afinfo->transport_finish(skb, async);
358	}
359
360drop_unlock:
361	spin_unlock(&x->lock);
362drop:
363	xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
364	kfree_skb(skb);
365	return 0;
366}
367EXPORT_SYMBOL(xfrm_input);
368
369int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
370{
371	return xfrm_input(skb, nexthdr, 0, -1);
372}
373EXPORT_SYMBOL(xfrm_input_resume);
374
375void __init xfrm_input_init(void)
376{
377	secpath_cachep = kmem_cache_create("secpath_cache",
378					   sizeof(struct sec_path),
379					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
380					   NULL);
381}