Loading...
1/*
2 * Copyright (C)2003,2004 USAGI/WIDE Project
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
18 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
19 *
20 * Based on net/ipv4/xfrm4_tunnel.c
21 *
22 */
23#include <linux/module.h>
24#include <linux/xfrm.h>
25#include <linux/slab.h>
26#include <linux/rculist.h>
27#include <net/ip.h>
28#include <net/xfrm.h>
29#include <net/ipv6.h>
30#include <linux/ipv6.h>
31#include <linux/icmpv6.h>
32#include <linux/mutex.h>
33#include <net/netns/generic.h>
34
35#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
36#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
37
38#define XFRM6_TUNNEL_SPI_MIN 1
39#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
40
41struct xfrm6_tunnel_net {
42 struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
43 struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
44 u32 spi;
45};
46
47static unsigned int xfrm6_tunnel_net_id __read_mostly;
48static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
49{
50 return net_generic(net, xfrm6_tunnel_net_id);
51}
52
53/*
54 * xfrm_tunnel_spi things are for allocating unique id ("spi")
55 * per xfrm_address_t.
56 */
57struct xfrm6_tunnel_spi {
58 struct hlist_node list_byaddr;
59 struct hlist_node list_byspi;
60 xfrm_address_t addr;
61 u32 spi;
62 refcount_t refcnt;
63 struct rcu_head rcu_head;
64};
65
66static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
67
68static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
69
70static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
71{
72 unsigned int h;
73
74 h = ipv6_addr_hash((const struct in6_addr *)addr);
75 h ^= h >> 16;
76 h ^= h >> 8;
77 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
78
79 return h;
80}
81
82static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
83{
84 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
85}
86
87static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
88{
89 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
90 struct xfrm6_tunnel_spi *x6spi;
91
92 hlist_for_each_entry_rcu(x6spi,
93 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
94 list_byaddr) {
95 if (xfrm6_addr_equal(&x6spi->addr, saddr))
96 return x6spi;
97 }
98
99 return NULL;
100}
101
102__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
103{
104 struct xfrm6_tunnel_spi *x6spi;
105 u32 spi;
106
107 rcu_read_lock_bh();
108 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
109 spi = x6spi ? x6spi->spi : 0;
110 rcu_read_unlock_bh();
111 return htonl(spi);
112}
113EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
114
115static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
116{
117 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
118 struct xfrm6_tunnel_spi *x6spi;
119 int index = xfrm6_tunnel_spi_hash_byspi(spi);
120
121 hlist_for_each_entry(x6spi,
122 &xfrm6_tn->spi_byspi[index],
123 list_byspi) {
124 if (x6spi->spi == spi)
125 return -1;
126 }
127 return index;
128}
129
130static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
131{
132 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
133 u32 spi;
134 struct xfrm6_tunnel_spi *x6spi;
135 int index;
136
137 if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
138 xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
139 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
140 else
141 xfrm6_tn->spi++;
142
143 for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
144 index = __xfrm6_tunnel_spi_check(net, spi);
145 if (index >= 0)
146 goto alloc_spi;
147 }
148 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
149 index = __xfrm6_tunnel_spi_check(net, spi);
150 if (index >= 0)
151 goto alloc_spi;
152 }
153 spi = 0;
154 goto out;
155alloc_spi:
156 xfrm6_tn->spi = spi;
157 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
158 if (!x6spi)
159 goto out;
160
161 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
162 x6spi->spi = spi;
163 refcount_set(&x6spi->refcnt, 1);
164
165 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
166
167 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
168 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
169out:
170 return spi;
171}
172
173__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
174{
175 struct xfrm6_tunnel_spi *x6spi;
176 u32 spi;
177
178 spin_lock_bh(&xfrm6_tunnel_spi_lock);
179 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
180 if (x6spi) {
181 refcount_inc(&x6spi->refcnt);
182 spi = x6spi->spi;
183 } else
184 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
185 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
186
187 return htonl(spi);
188}
189EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
190
191static void x6spi_destroy_rcu(struct rcu_head *head)
192{
193 kmem_cache_free(xfrm6_tunnel_spi_kmem,
194 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
195}
196
197static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
198{
199 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
200 struct xfrm6_tunnel_spi *x6spi;
201 struct hlist_node *n;
202
203 spin_lock_bh(&xfrm6_tunnel_spi_lock);
204
205 hlist_for_each_entry_safe(x6spi, n,
206 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
207 list_byaddr)
208 {
209 if (xfrm6_addr_equal(&x6spi->addr, saddr)) {
210 if (refcount_dec_and_test(&x6spi->refcnt)) {
211 hlist_del_rcu(&x6spi->list_byaddr);
212 hlist_del_rcu(&x6spi->list_byspi);
213 call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
214 break;
215 }
216 }
217 }
218 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
219}
220
221static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
222{
223 skb_push(skb, -skb_network_offset(skb));
224 return 0;
225}
226
227static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
228{
229 return skb_network_header(skb)[IP6CB(skb)->nhoff];
230}
231
232static int xfrm6_tunnel_rcv(struct sk_buff *skb)
233{
234 struct net *net = dev_net(skb->dev);
235 const struct ipv6hdr *iph = ipv6_hdr(skb);
236 __be32 spi;
237
238 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
239 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
240}
241
242static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
243 u8 type, u8 code, int offset, __be32 info)
244{
245 /* xfrm6_tunnel native err handling */
246 switch (type) {
247 case ICMPV6_DEST_UNREACH:
248 switch (code) {
249 case ICMPV6_NOROUTE:
250 case ICMPV6_ADM_PROHIBITED:
251 case ICMPV6_NOT_NEIGHBOUR:
252 case ICMPV6_ADDR_UNREACH:
253 case ICMPV6_PORT_UNREACH:
254 default:
255 break;
256 }
257 break;
258 case ICMPV6_PKT_TOOBIG:
259 break;
260 case ICMPV6_TIME_EXCEED:
261 switch (code) {
262 case ICMPV6_EXC_HOPLIMIT:
263 break;
264 case ICMPV6_EXC_FRAGTIME:
265 default:
266 break;
267 }
268 break;
269 case ICMPV6_PARAMPROB:
270 switch (code) {
271 case ICMPV6_HDR_FIELD: break;
272 case ICMPV6_UNK_NEXTHDR: break;
273 case ICMPV6_UNK_OPTION: break;
274 }
275 break;
276 default:
277 break;
278 }
279
280 return 0;
281}
282
283static int xfrm6_tunnel_init_state(struct xfrm_state *x)
284{
285 if (x->props.mode != XFRM_MODE_TUNNEL)
286 return -EINVAL;
287
288 if (x->encap)
289 return -EINVAL;
290
291 x->props.header_len = sizeof(struct ipv6hdr);
292
293 return 0;
294}
295
296static void xfrm6_tunnel_destroy(struct xfrm_state *x)
297{
298 struct net *net = xs_net(x);
299
300 xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
301}
302
303static const struct xfrm_type xfrm6_tunnel_type = {
304 .description = "IP6IP6",
305 .owner = THIS_MODULE,
306 .proto = IPPROTO_IPV6,
307 .init_state = xfrm6_tunnel_init_state,
308 .destructor = xfrm6_tunnel_destroy,
309 .input = xfrm6_tunnel_input,
310 .output = xfrm6_tunnel_output,
311};
312
313static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
314 .handler = xfrm6_tunnel_rcv,
315 .err_handler = xfrm6_tunnel_err,
316 .priority = 2,
317};
318
319static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
320 .handler = xfrm6_tunnel_rcv,
321 .err_handler = xfrm6_tunnel_err,
322 .priority = 2,
323};
324
325static int __net_init xfrm6_tunnel_net_init(struct net *net)
326{
327 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
328 unsigned int i;
329
330 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
331 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
332 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
333 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
334 xfrm6_tn->spi = 0;
335
336 return 0;
337}
338
339static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
340{
341 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
342 unsigned int i;
343
344 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
345 xfrm_flush_gc();
346
347 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
348 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
349
350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byspi[i]));
352}
353
354static struct pernet_operations xfrm6_tunnel_net_ops = {
355 .init = xfrm6_tunnel_net_init,
356 .exit = xfrm6_tunnel_net_exit,
357 .id = &xfrm6_tunnel_net_id,
358 .size = sizeof(struct xfrm6_tunnel_net),
359};
360
361static int __init xfrm6_tunnel_init(void)
362{
363 int rv;
364
365 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
366 sizeof(struct xfrm6_tunnel_spi),
367 0, SLAB_HWCACHE_ALIGN,
368 NULL);
369 if (!xfrm6_tunnel_spi_kmem)
370 return -ENOMEM;
371 rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
372 if (rv < 0)
373 goto out_pernet;
374 rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
375 if (rv < 0)
376 goto out_type;
377 rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
378 if (rv < 0)
379 goto out_xfrm6;
380 rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
381 if (rv < 0)
382 goto out_xfrm46;
383 return 0;
384
385out_xfrm46:
386 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
387out_xfrm6:
388 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
389out_type:
390 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
391out_pernet:
392 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
393 return rv;
394}
395
396static void __exit xfrm6_tunnel_fini(void)
397{
398 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
399 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
400 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
401 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
402 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
403}
404
405module_init(xfrm6_tunnel_init);
406module_exit(xfrm6_tunnel_fini);
407MODULE_LICENSE("GPL");
408MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
1/*
2 * Copyright (C)2003,2004 USAGI/WIDE Project
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
20 *
21 * Based on net/ipv4/xfrm4_tunnel.c
22 *
23 */
24#include <linux/module.h>
25#include <linux/xfrm.h>
26#include <linux/slab.h>
27#include <linux/rculist.h>
28#include <net/ip.h>
29#include <net/xfrm.h>
30#include <net/ipv6.h>
31#include <linux/ipv6.h>
32#include <linux/icmpv6.h>
33#include <linux/mutex.h>
34#include <net/netns/generic.h>
35
36#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
37#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
38
39#define XFRM6_TUNNEL_SPI_MIN 1
40#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
41
42struct xfrm6_tunnel_net {
43 struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
44 struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
45 u32 spi;
46};
47
48static int xfrm6_tunnel_net_id __read_mostly;
49static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
50{
51 return net_generic(net, xfrm6_tunnel_net_id);
52}
53
54/*
55 * xfrm_tunnel_spi things are for allocating unique id ("spi")
56 * per xfrm_address_t.
57 */
58struct xfrm6_tunnel_spi {
59 struct hlist_node list_byaddr;
60 struct hlist_node list_byspi;
61 xfrm_address_t addr;
62 u32 spi;
63 atomic_t refcnt;
64 struct rcu_head rcu_head;
65};
66
67static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
68
69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
70
71static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
72{
73 unsigned int h;
74
75 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
76 h ^= h >> 16;
77 h ^= h >> 8;
78 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
79
80 return h;
81}
82
83static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
84{
85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
86}
87
88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
89{
90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
91 struct xfrm6_tunnel_spi *x6spi;
92 struct hlist_node *pos;
93
94 hlist_for_each_entry_rcu(x6spi, pos,
95 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
96 list_byaddr) {
97 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
98 return x6spi;
99 }
100
101 return NULL;
102}
103
104__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
105{
106 struct xfrm6_tunnel_spi *x6spi;
107 u32 spi;
108
109 rcu_read_lock_bh();
110 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
111 spi = x6spi ? x6spi->spi : 0;
112 rcu_read_unlock_bh();
113 return htonl(spi);
114}
115
116EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
117
118static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
119{
120 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
121 struct xfrm6_tunnel_spi *x6spi;
122 int index = xfrm6_tunnel_spi_hash_byspi(spi);
123 struct hlist_node *pos;
124
125 hlist_for_each_entry(x6spi, pos,
126 &xfrm6_tn->spi_byspi[index],
127 list_byspi) {
128 if (x6spi->spi == spi)
129 return -1;
130 }
131 return index;
132}
133
134static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
135{
136 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
137 u32 spi;
138 struct xfrm6_tunnel_spi *x6spi;
139 int index;
140
141 if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
142 xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
143 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
144 else
145 xfrm6_tn->spi++;
146
147 for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
148 index = __xfrm6_tunnel_spi_check(net, spi);
149 if (index >= 0)
150 goto alloc_spi;
151 }
152 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
153 index = __xfrm6_tunnel_spi_check(net, spi);
154 if (index >= 0)
155 goto alloc_spi;
156 }
157 spi = 0;
158 goto out;
159alloc_spi:
160 xfrm6_tn->spi = spi;
161 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
162 if (!x6spi)
163 goto out;
164
165 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
166 x6spi->spi = spi;
167 atomic_set(&x6spi->refcnt, 1);
168
169 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
170
171 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
172 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
173out:
174 return spi;
175}
176
177__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
178{
179 struct xfrm6_tunnel_spi *x6spi;
180 u32 spi;
181
182 spin_lock_bh(&xfrm6_tunnel_spi_lock);
183 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
184 if (x6spi) {
185 atomic_inc(&x6spi->refcnt);
186 spi = x6spi->spi;
187 } else
188 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
189 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
190
191 return htonl(spi);
192}
193
194EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
195
196static void x6spi_destroy_rcu(struct rcu_head *head)
197{
198 kmem_cache_free(xfrm6_tunnel_spi_kmem,
199 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
200}
201
202static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
203{
204 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
205 struct xfrm6_tunnel_spi *x6spi;
206 struct hlist_node *pos, *n;
207
208 spin_lock_bh(&xfrm6_tunnel_spi_lock);
209
210 hlist_for_each_entry_safe(x6spi, pos, n,
211 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
212 list_byaddr)
213 {
214 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
215 if (atomic_dec_and_test(&x6spi->refcnt)) {
216 hlist_del_rcu(&x6spi->list_byaddr);
217 hlist_del_rcu(&x6spi->list_byspi);
218 call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
219 break;
220 }
221 }
222 }
223 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
224}
225
226static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
227{
228 skb_push(skb, -skb_network_offset(skb));
229 return 0;
230}
231
232static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
233{
234 return skb_network_header(skb)[IP6CB(skb)->nhoff];
235}
236
237static int xfrm6_tunnel_rcv(struct sk_buff *skb)
238{
239 struct net *net = dev_net(skb->dev);
240 const struct ipv6hdr *iph = ipv6_hdr(skb);
241 __be32 spi;
242
243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
245}
246
247static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
248 u8 type, u8 code, int offset, __be32 info)
249{
250 /* xfrm6_tunnel native err handling */
251 switch (type) {
252 case ICMPV6_DEST_UNREACH:
253 switch (code) {
254 case ICMPV6_NOROUTE:
255 case ICMPV6_ADM_PROHIBITED:
256 case ICMPV6_NOT_NEIGHBOUR:
257 case ICMPV6_ADDR_UNREACH:
258 case ICMPV6_PORT_UNREACH:
259 default:
260 break;
261 }
262 break;
263 case ICMPV6_PKT_TOOBIG:
264 break;
265 case ICMPV6_TIME_EXCEED:
266 switch (code) {
267 case ICMPV6_EXC_HOPLIMIT:
268 break;
269 case ICMPV6_EXC_FRAGTIME:
270 default:
271 break;
272 }
273 break;
274 case ICMPV6_PARAMPROB:
275 switch (code) {
276 case ICMPV6_HDR_FIELD: break;
277 case ICMPV6_UNK_NEXTHDR: break;
278 case ICMPV6_UNK_OPTION: break;
279 }
280 break;
281 default:
282 break;
283 }
284
285 return 0;
286}
287
288static int xfrm6_tunnel_init_state(struct xfrm_state *x)
289{
290 if (x->props.mode != XFRM_MODE_TUNNEL)
291 return -EINVAL;
292
293 if (x->encap)
294 return -EINVAL;
295
296 x->props.header_len = sizeof(struct ipv6hdr);
297
298 return 0;
299}
300
301static void xfrm6_tunnel_destroy(struct xfrm_state *x)
302{
303 struct net *net = xs_net(x);
304
305 xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
306}
307
308static const struct xfrm_type xfrm6_tunnel_type = {
309 .description = "IP6IP6",
310 .owner = THIS_MODULE,
311 .proto = IPPROTO_IPV6,
312 .init_state = xfrm6_tunnel_init_state,
313 .destructor = xfrm6_tunnel_destroy,
314 .input = xfrm6_tunnel_input,
315 .output = xfrm6_tunnel_output,
316};
317
318static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
319 .handler = xfrm6_tunnel_rcv,
320 .err_handler = xfrm6_tunnel_err,
321 .priority = 2,
322};
323
324static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
325 .handler = xfrm6_tunnel_rcv,
326 .err_handler = xfrm6_tunnel_err,
327 .priority = 2,
328};
329
330static int __net_init xfrm6_tunnel_net_init(struct net *net)
331{
332 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
333 unsigned int i;
334
335 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
336 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
337 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
338 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
339 xfrm6_tn->spi = 0;
340
341 return 0;
342}
343
344static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
345{
346}
347
348static struct pernet_operations xfrm6_tunnel_net_ops = {
349 .init = xfrm6_tunnel_net_init,
350 .exit = xfrm6_tunnel_net_exit,
351 .id = &xfrm6_tunnel_net_id,
352 .size = sizeof(struct xfrm6_tunnel_net),
353};
354
355static int __init xfrm6_tunnel_init(void)
356{
357 int rv;
358
359 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
360 sizeof(struct xfrm6_tunnel_spi),
361 0, SLAB_HWCACHE_ALIGN,
362 NULL);
363 if (!xfrm6_tunnel_spi_kmem)
364 return -ENOMEM;
365 rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
366 if (rv < 0)
367 goto out_pernet;
368 rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
369 if (rv < 0)
370 goto out_type;
371 rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
372 if (rv < 0)
373 goto out_xfrm6;
374 rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
375 if (rv < 0)
376 goto out_xfrm46;
377 return 0;
378
379out_xfrm46:
380 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
381out_xfrm6:
382 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
383out_type:
384 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
385out_pernet:
386 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
387 return rv;
388}
389
390static void __exit xfrm6_tunnel_fini(void)
391{
392 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
393 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
394 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
395 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
396 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
397}
398
399module_init(xfrm6_tunnel_init);
400module_exit(xfrm6_tunnel_fini);
401MODULE_LICENSE("GPL");
402MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);