Loading...
1/*
2 * Copyright (C)2003,2004 USAGI/WIDE Project
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
18 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
19 *
20 * Based on net/ipv4/xfrm4_tunnel.c
21 *
22 */
23#include <linux/module.h>
24#include <linux/xfrm.h>
25#include <linux/slab.h>
26#include <linux/rculist.h>
27#include <net/ip.h>
28#include <net/xfrm.h>
29#include <net/ipv6.h>
30#include <linux/ipv6.h>
31#include <linux/icmpv6.h>
32#include <linux/mutex.h>
33#include <net/netns/generic.h>
34
35#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
36#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
37
38#define XFRM6_TUNNEL_SPI_MIN 1
39#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
40
41struct xfrm6_tunnel_net {
42 struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
43 struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
44 u32 spi;
45};
46
47static int xfrm6_tunnel_net_id __read_mostly;
48static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
49{
50 return net_generic(net, xfrm6_tunnel_net_id);
51}
52
53/*
54 * xfrm_tunnel_spi things are for allocating unique id ("spi")
55 * per xfrm_address_t.
56 */
57struct xfrm6_tunnel_spi {
58 struct hlist_node list_byaddr;
59 struct hlist_node list_byspi;
60 xfrm_address_t addr;
61 u32 spi;
62 atomic_t refcnt;
63 struct rcu_head rcu_head;
64};
65
66static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
67
68static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
69
70static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
71{
72 unsigned int h;
73
74 h = ipv6_addr_hash((const struct in6_addr *)addr);
75 h ^= h >> 16;
76 h ^= h >> 8;
77 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
78
79 return h;
80}
81
82static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
83{
84 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
85}
86
87static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
88{
89 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
90 struct xfrm6_tunnel_spi *x6spi;
91
92 hlist_for_each_entry_rcu(x6spi,
93 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
94 list_byaddr) {
95 if (xfrm6_addr_equal(&x6spi->addr, saddr))
96 return x6spi;
97 }
98
99 return NULL;
100}
101
102__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
103{
104 struct xfrm6_tunnel_spi *x6spi;
105 u32 spi;
106
107 rcu_read_lock_bh();
108 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
109 spi = x6spi ? x6spi->spi : 0;
110 rcu_read_unlock_bh();
111 return htonl(spi);
112}
113
114EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
115
116static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
117{
118 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
119 struct xfrm6_tunnel_spi *x6spi;
120 int index = xfrm6_tunnel_spi_hash_byspi(spi);
121
122 hlist_for_each_entry(x6spi,
123 &xfrm6_tn->spi_byspi[index],
124 list_byspi) {
125 if (x6spi->spi == spi)
126 return -1;
127 }
128 return index;
129}
130
131static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
132{
133 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
134 u32 spi;
135 struct xfrm6_tunnel_spi *x6spi;
136 int index;
137
138 if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
139 xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
140 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
141 else
142 xfrm6_tn->spi++;
143
144 for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
145 index = __xfrm6_tunnel_spi_check(net, spi);
146 if (index >= 0)
147 goto alloc_spi;
148 }
149 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
150 index = __xfrm6_tunnel_spi_check(net, spi);
151 if (index >= 0)
152 goto alloc_spi;
153 }
154 spi = 0;
155 goto out;
156alloc_spi:
157 xfrm6_tn->spi = spi;
158 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
159 if (!x6spi)
160 goto out;
161
162 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
163 x6spi->spi = spi;
164 atomic_set(&x6spi->refcnt, 1);
165
166 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
167
168 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
169 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
170out:
171 return spi;
172}
173
174__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
175{
176 struct xfrm6_tunnel_spi *x6spi;
177 u32 spi;
178
179 spin_lock_bh(&xfrm6_tunnel_spi_lock);
180 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
181 if (x6spi) {
182 atomic_inc(&x6spi->refcnt);
183 spi = x6spi->spi;
184 } else
185 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
186 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
187
188 return htonl(spi);
189}
190
191EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
192
193static void x6spi_destroy_rcu(struct rcu_head *head)
194{
195 kmem_cache_free(xfrm6_tunnel_spi_kmem,
196 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
197}
198
199static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
200{
201 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
202 struct xfrm6_tunnel_spi *x6spi;
203 struct hlist_node *n;
204
205 spin_lock_bh(&xfrm6_tunnel_spi_lock);
206
207 hlist_for_each_entry_safe(x6spi, n,
208 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
209 list_byaddr)
210 {
211 if (xfrm6_addr_equal(&x6spi->addr, saddr)) {
212 if (atomic_dec_and_test(&x6spi->refcnt)) {
213 hlist_del_rcu(&x6spi->list_byaddr);
214 hlist_del_rcu(&x6spi->list_byspi);
215 call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
216 break;
217 }
218 }
219 }
220 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
221}
222
223static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
224{
225 skb_push(skb, -skb_network_offset(skb));
226 return 0;
227}
228
229static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
230{
231 return skb_network_header(skb)[IP6CB(skb)->nhoff];
232}
233
234static int xfrm6_tunnel_rcv(struct sk_buff *skb)
235{
236 struct net *net = dev_net(skb->dev);
237 const struct ipv6hdr *iph = ipv6_hdr(skb);
238 __be32 spi;
239
240 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
241 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
242}
243
244static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
245 u8 type, u8 code, int offset, __be32 info)
246{
247 /* xfrm6_tunnel native err handling */
248 switch (type) {
249 case ICMPV6_DEST_UNREACH:
250 switch (code) {
251 case ICMPV6_NOROUTE:
252 case ICMPV6_ADM_PROHIBITED:
253 case ICMPV6_NOT_NEIGHBOUR:
254 case ICMPV6_ADDR_UNREACH:
255 case ICMPV6_PORT_UNREACH:
256 default:
257 break;
258 }
259 break;
260 case ICMPV6_PKT_TOOBIG:
261 break;
262 case ICMPV6_TIME_EXCEED:
263 switch (code) {
264 case ICMPV6_EXC_HOPLIMIT:
265 break;
266 case ICMPV6_EXC_FRAGTIME:
267 default:
268 break;
269 }
270 break;
271 case ICMPV6_PARAMPROB:
272 switch (code) {
273 case ICMPV6_HDR_FIELD: break;
274 case ICMPV6_UNK_NEXTHDR: break;
275 case ICMPV6_UNK_OPTION: break;
276 }
277 break;
278 default:
279 break;
280 }
281
282 return 0;
283}
284
285static int xfrm6_tunnel_init_state(struct xfrm_state *x)
286{
287 if (x->props.mode != XFRM_MODE_TUNNEL)
288 return -EINVAL;
289
290 if (x->encap)
291 return -EINVAL;
292
293 x->props.header_len = sizeof(struct ipv6hdr);
294
295 return 0;
296}
297
298static void xfrm6_tunnel_destroy(struct xfrm_state *x)
299{
300 struct net *net = xs_net(x);
301
302 xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
303}
304
305static const struct xfrm_type xfrm6_tunnel_type = {
306 .description = "IP6IP6",
307 .owner = THIS_MODULE,
308 .proto = IPPROTO_IPV6,
309 .init_state = xfrm6_tunnel_init_state,
310 .destructor = xfrm6_tunnel_destroy,
311 .input = xfrm6_tunnel_input,
312 .output = xfrm6_tunnel_output,
313};
314
315static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
316 .handler = xfrm6_tunnel_rcv,
317 .err_handler = xfrm6_tunnel_err,
318 .priority = 2,
319};
320
321static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
322 .handler = xfrm6_tunnel_rcv,
323 .err_handler = xfrm6_tunnel_err,
324 .priority = 2,
325};
326
327static int __net_init xfrm6_tunnel_net_init(struct net *net)
328{
329 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
330 unsigned int i;
331
332 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
333 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
334 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
335 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
336 xfrm6_tn->spi = 0;
337
338 return 0;
339}
340
341static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
342{
343}
344
345static struct pernet_operations xfrm6_tunnel_net_ops = {
346 .init = xfrm6_tunnel_net_init,
347 .exit = xfrm6_tunnel_net_exit,
348 .id = &xfrm6_tunnel_net_id,
349 .size = sizeof(struct xfrm6_tunnel_net),
350};
351
352static int __init xfrm6_tunnel_init(void)
353{
354 int rv;
355
356 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
357 sizeof(struct xfrm6_tunnel_spi),
358 0, SLAB_HWCACHE_ALIGN,
359 NULL);
360 if (!xfrm6_tunnel_spi_kmem)
361 return -ENOMEM;
362 rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
363 if (rv < 0)
364 goto out_pernet;
365 rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
366 if (rv < 0)
367 goto out_type;
368 rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
369 if (rv < 0)
370 goto out_xfrm6;
371 rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
372 if (rv < 0)
373 goto out_xfrm46;
374 return 0;
375
376out_xfrm46:
377 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
378out_xfrm6:
379 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
380out_type:
381 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
382out_pernet:
383 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
384 return rv;
385}
386
387static void __exit xfrm6_tunnel_fini(void)
388{
389 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
390 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
391 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
392 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
393 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
394}
395
396module_init(xfrm6_tunnel_init);
397module_exit(xfrm6_tunnel_fini);
398MODULE_LICENSE("GPL");
399MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C)2003,2004 USAGI/WIDE Project
4 *
5 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
6 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
7 *
8 * Based on net/ipv4/xfrm4_tunnel.c
9 */
10#include <linux/module.h>
11#include <linux/xfrm.h>
12#include <linux/slab.h>
13#include <linux/rculist.h>
14#include <net/ip.h>
15#include <net/xfrm.h>
16#include <net/ipv6.h>
17#include <linux/ipv6.h>
18#include <linux/icmpv6.h>
19#include <linux/mutex.h>
20#include <net/netns/generic.h>
21
22#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
23#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
24
25#define XFRM6_TUNNEL_SPI_MIN 1
26#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
27
28struct xfrm6_tunnel_net {
29 struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
30 struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
31 u32 spi;
32};
33
34static unsigned int xfrm6_tunnel_net_id __read_mostly;
35static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
36{
37 return net_generic(net, xfrm6_tunnel_net_id);
38}
39
40/*
41 * xfrm_tunnel_spi things are for allocating unique id ("spi")
42 * per xfrm_address_t.
43 */
44struct xfrm6_tunnel_spi {
45 struct hlist_node list_byaddr;
46 struct hlist_node list_byspi;
47 xfrm_address_t addr;
48 u32 spi;
49 refcount_t refcnt;
50 struct rcu_head rcu_head;
51};
52
53static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
54
55static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
56
57static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
58{
59 unsigned int h;
60
61 h = ipv6_addr_hash((const struct in6_addr *)addr);
62 h ^= h >> 16;
63 h ^= h >> 8;
64 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
65
66 return h;
67}
68
69static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
70{
71 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
72}
73
74static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
75{
76 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
77 struct xfrm6_tunnel_spi *x6spi;
78
79 hlist_for_each_entry_rcu(x6spi,
80 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
81 list_byaddr, lockdep_is_held(&xfrm6_tunnel_spi_lock)) {
82 if (xfrm6_addr_equal(&x6spi->addr, saddr))
83 return x6spi;
84 }
85
86 return NULL;
87}
88
89__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
90{
91 struct xfrm6_tunnel_spi *x6spi;
92 u32 spi;
93
94 rcu_read_lock_bh();
95 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
96 spi = x6spi ? x6spi->spi : 0;
97 rcu_read_unlock_bh();
98 return htonl(spi);
99}
100EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
101
102static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
103{
104 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
105 struct xfrm6_tunnel_spi *x6spi;
106 int index = xfrm6_tunnel_spi_hash_byspi(spi);
107
108 hlist_for_each_entry(x6spi,
109 &xfrm6_tn->spi_byspi[index],
110 list_byspi) {
111 if (x6spi->spi == spi)
112 return -1;
113 }
114 return index;
115}
116
117static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
118{
119 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
120 u32 spi;
121 struct xfrm6_tunnel_spi *x6spi;
122 int index;
123
124 if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
125 xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
126 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
127 else
128 xfrm6_tn->spi++;
129
130 for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
131 index = __xfrm6_tunnel_spi_check(net, spi);
132 if (index >= 0)
133 goto alloc_spi;
134
135 if (spi == XFRM6_TUNNEL_SPI_MAX)
136 break;
137 }
138 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
139 index = __xfrm6_tunnel_spi_check(net, spi);
140 if (index >= 0)
141 goto alloc_spi;
142 }
143 spi = 0;
144 goto out;
145alloc_spi:
146 xfrm6_tn->spi = spi;
147 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
148 if (!x6spi)
149 goto out;
150
151 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
152 x6spi->spi = spi;
153 refcount_set(&x6spi->refcnt, 1);
154
155 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
156
157 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
158 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
159out:
160 return spi;
161}
162
163__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
164{
165 struct xfrm6_tunnel_spi *x6spi;
166 u32 spi;
167
168 spin_lock_bh(&xfrm6_tunnel_spi_lock);
169 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
170 if (x6spi) {
171 refcount_inc(&x6spi->refcnt);
172 spi = x6spi->spi;
173 } else
174 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
175 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
176
177 return htonl(spi);
178}
179EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
180
181static void x6spi_destroy_rcu(struct rcu_head *head)
182{
183 kmem_cache_free(xfrm6_tunnel_spi_kmem,
184 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
185}
186
187static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
188{
189 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
190 struct xfrm6_tunnel_spi *x6spi;
191 struct hlist_node *n;
192
193 spin_lock_bh(&xfrm6_tunnel_spi_lock);
194
195 hlist_for_each_entry_safe(x6spi, n,
196 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
197 list_byaddr)
198 {
199 if (xfrm6_addr_equal(&x6spi->addr, saddr)) {
200 if (refcount_dec_and_test(&x6spi->refcnt)) {
201 hlist_del_rcu(&x6spi->list_byaddr);
202 hlist_del_rcu(&x6spi->list_byspi);
203 call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
204 break;
205 }
206 }
207 }
208 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
209}
210
211static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
212{
213 skb_push(skb, -skb_network_offset(skb));
214 return 0;
215}
216
217static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
218{
219 return skb_network_header(skb)[IP6CB(skb)->nhoff];
220}
221
222static int xfrm6_tunnel_rcv(struct sk_buff *skb)
223{
224 struct net *net = dev_net(skb->dev);
225 const struct ipv6hdr *iph = ipv6_hdr(skb);
226 __be32 spi;
227
228 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
229 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
230}
231
232static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
233 u8 type, u8 code, int offset, __be32 info)
234{
235 /* xfrm6_tunnel native err handling */
236 switch (type) {
237 case ICMPV6_DEST_UNREACH:
238 switch (code) {
239 case ICMPV6_NOROUTE:
240 case ICMPV6_ADM_PROHIBITED:
241 case ICMPV6_NOT_NEIGHBOUR:
242 case ICMPV6_ADDR_UNREACH:
243 case ICMPV6_PORT_UNREACH:
244 default:
245 break;
246 }
247 break;
248 case ICMPV6_PKT_TOOBIG:
249 break;
250 case ICMPV6_TIME_EXCEED:
251 switch (code) {
252 case ICMPV6_EXC_HOPLIMIT:
253 break;
254 case ICMPV6_EXC_FRAGTIME:
255 default:
256 break;
257 }
258 break;
259 case ICMPV6_PARAMPROB:
260 switch (code) {
261 case ICMPV6_HDR_FIELD: break;
262 case ICMPV6_UNK_NEXTHDR: break;
263 case ICMPV6_UNK_OPTION: break;
264 }
265 break;
266 default:
267 break;
268 }
269
270 return 0;
271}
272
273static int xfrm6_tunnel_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
274{
275 if (x->props.mode != XFRM_MODE_TUNNEL) {
276 NL_SET_ERR_MSG(extack, "IPv6 tunnel can only be used with tunnel mode");
277 return -EINVAL;
278 }
279
280 if (x->encap) {
281 NL_SET_ERR_MSG(extack, "IPv6 tunnel is not compatible with encapsulation");
282 return -EINVAL;
283 }
284
285 x->props.header_len = sizeof(struct ipv6hdr);
286
287 return 0;
288}
289
290static void xfrm6_tunnel_destroy(struct xfrm_state *x)
291{
292 struct net *net = xs_net(x);
293
294 xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
295}
296
297static const struct xfrm_type xfrm6_tunnel_type = {
298 .owner = THIS_MODULE,
299 .proto = IPPROTO_IPV6,
300 .init_state = xfrm6_tunnel_init_state,
301 .destructor = xfrm6_tunnel_destroy,
302 .input = xfrm6_tunnel_input,
303 .output = xfrm6_tunnel_output,
304};
305
306static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
307 .handler = xfrm6_tunnel_rcv,
308 .err_handler = xfrm6_tunnel_err,
309 .priority = 3,
310};
311
312static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
313 .handler = xfrm6_tunnel_rcv,
314 .err_handler = xfrm6_tunnel_err,
315 .priority = 3,
316};
317
318static int __net_init xfrm6_tunnel_net_init(struct net *net)
319{
320 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
321 unsigned int i;
322
323 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
324 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
325 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
326 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
327 xfrm6_tn->spi = 0;
328
329 return 0;
330}
331
332static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
333{
334 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
335 unsigned int i;
336
337 xfrm_flush_gc();
338 xfrm_state_flush(net, 0, false, true);
339
340 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
341 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
342
343 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
344 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byspi[i]));
345}
346
347static struct pernet_operations xfrm6_tunnel_net_ops = {
348 .init = xfrm6_tunnel_net_init,
349 .exit = xfrm6_tunnel_net_exit,
350 .id = &xfrm6_tunnel_net_id,
351 .size = sizeof(struct xfrm6_tunnel_net),
352};
353
354static int __init xfrm6_tunnel_init(void)
355{
356 int rv;
357
358 xfrm6_tunnel_spi_kmem = KMEM_CACHE(xfrm6_tunnel_spi, SLAB_HWCACHE_ALIGN);
359 if (!xfrm6_tunnel_spi_kmem)
360 return -ENOMEM;
361 rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
362 if (rv < 0)
363 goto out_pernet;
364 rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
365 if (rv < 0)
366 goto out_type;
367 rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
368 if (rv < 0)
369 goto out_xfrm6;
370 rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
371 if (rv < 0)
372 goto out_xfrm46;
373 return 0;
374
375out_xfrm46:
376 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
377out_xfrm6:
378 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
379out_type:
380 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
381out_pernet:
382 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
383 return rv;
384}
385
386static void __exit xfrm6_tunnel_fini(void)
387{
388 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
389 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
390 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
391 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
392 /* Someone maybe has gotten the xfrm6_tunnel_spi.
393 * So need to wait it.
394 */
395 rcu_barrier();
396 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
397}
398
399module_init(xfrm6_tunnel_init);
400module_exit(xfrm6_tunnel_fini);
401MODULE_DESCRIPTION("IPv6 XFRM tunnel driver");
402MODULE_LICENSE("GPL");
403MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);