Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/jhash.h>
3#include <linux/netfilter.h>
4#include <linux/rcupdate.h>
5#include <linux/rhashtable.h>
6#include <linux/vmalloc.h>
7#include <net/genetlink.h>
8#include <net/netns/generic.h>
9#include <uapi/linux/genetlink.h>
10#include "ila.h"
11
12struct ila_xlat_params {
13 struct ila_params ip;
14 int ifindex;
15};
16
17struct ila_map {
18 struct ila_xlat_params xp;
19 struct rhash_head node;
20 struct ila_map __rcu *next;
21 struct rcu_head rcu;
22};
23
24#define MAX_LOCKS 1024
25#define LOCKS_PER_CPU 10
26
27static int alloc_ila_locks(struct ila_net *ilan)
28{
29 return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
30 MAX_LOCKS, LOCKS_PER_CPU,
31 GFP_KERNEL);
32}
33
34static u32 hashrnd __read_mostly;
35static __always_inline void __ila_hash_secret_init(void)
36{
37 net_get_random_once(&hashrnd, sizeof(hashrnd));
38}
39
40static inline u32 ila_locator_hash(struct ila_locator loc)
41{
42 u32 *v = (u32 *)loc.v32;
43
44 __ila_hash_secret_init();
45 return jhash_2words(v[0], v[1], hashrnd);
46}
47
48static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
49 struct ila_locator loc)
50{
51 return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
52}
53
54static inline int ila_cmp_wildcards(struct ila_map *ila,
55 struct ila_addr *iaddr, int ifindex)
56{
57 return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
58}
59
60static inline int ila_cmp_params(struct ila_map *ila,
61 struct ila_xlat_params *xp)
62{
63 return (ila->xp.ifindex != xp->ifindex);
64}
65
66static int ila_cmpfn(struct rhashtable_compare_arg *arg,
67 const void *obj)
68{
69 const struct ila_map *ila = obj;
70
71 return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
72}
73
74static inline int ila_order(struct ila_map *ila)
75{
76 int score = 0;
77
78 if (ila->xp.ifindex)
79 score += 1 << 1;
80
81 return score;
82}
83
84static const struct rhashtable_params rht_params = {
85 .nelem_hint = 1024,
86 .head_offset = offsetof(struct ila_map, node),
87 .key_offset = offsetof(struct ila_map, xp.ip.locator_match),
88 .key_len = sizeof(u64), /* identifier */
89 .max_size = 1048576,
90 .min_size = 256,
91 .automatic_shrinking = true,
92 .obj_cmpfn = ila_cmpfn,
93};
94
95static int parse_nl_config(struct genl_info *info,
96 struct ila_xlat_params *xp)
97{
98 memset(xp, 0, sizeof(*xp));
99
100 if (info->attrs[ILA_ATTR_LOCATOR])
101 xp->ip.locator.v64 = (__force __be64)nla_get_u64(
102 info->attrs[ILA_ATTR_LOCATOR]);
103
104 if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
105 xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
106 info->attrs[ILA_ATTR_LOCATOR_MATCH]);
107
108 xp->ip.csum_mode = nla_get_u8_default(info->attrs[ILA_ATTR_CSUM_MODE],
109 ILA_CSUM_NO_ACTION);
110
111 xp->ip.ident_type = nla_get_u8_default(info->attrs[ILA_ATTR_IDENT_TYPE],
112 ILA_ATYPE_USE_FORMAT);
113
114 if (info->attrs[ILA_ATTR_IFINDEX])
115 xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
116
117 return 0;
118}
119
120/* Must be called with rcu readlock */
121static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
122 int ifindex,
123 struct ila_net *ilan)
124{
125 struct ila_map *ila;
126
127 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
128 rht_params);
129 while (ila) {
130 if (!ila_cmp_wildcards(ila, iaddr, ifindex))
131 return ila;
132 ila = rcu_access_pointer(ila->next);
133 }
134
135 return NULL;
136}
137
138/* Must be called with rcu readlock */
139static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
140 struct ila_net *ilan)
141{
142 struct ila_map *ila;
143
144 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
145 &xp->ip.locator_match,
146 rht_params);
147 while (ila) {
148 if (!ila_cmp_params(ila, xp))
149 return ila;
150 ila = rcu_access_pointer(ila->next);
151 }
152
153 return NULL;
154}
155
156static inline void ila_release(struct ila_map *ila)
157{
158 kfree_rcu(ila, rcu);
159}
160
161static void ila_free_node(struct ila_map *ila)
162{
163 struct ila_map *next;
164
165 /* Assume rcu_readlock held */
166 while (ila) {
167 next = rcu_access_pointer(ila->next);
168 ila_release(ila);
169 ila = next;
170 }
171}
172
173static void ila_free_cb(void *ptr, void *arg)
174{
175 ila_free_node((struct ila_map *)ptr);
176}
177
178static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
179
180static unsigned int
181ila_nf_input(void *priv,
182 struct sk_buff *skb,
183 const struct nf_hook_state *state)
184{
185 ila_xlat_addr(skb, false);
186 return NF_ACCEPT;
187}
188
189static const struct nf_hook_ops ila_nf_hook_ops[] = {
190 {
191 .hook = ila_nf_input,
192 .pf = NFPROTO_IPV6,
193 .hooknum = NF_INET_PRE_ROUTING,
194 .priority = -1,
195 },
196};
197
198static DEFINE_MUTEX(ila_mutex);
199
200static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
201{
202 struct ila_net *ilan = net_generic(net, ila_net_id);
203 struct ila_map *ila, *head;
204 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
205 int err = 0, order;
206
207 if (!READ_ONCE(ilan->xlat.hooks_registered)) {
208 /* We defer registering net hooks in the namespace until the
209 * first mapping is added.
210 */
211 mutex_lock(&ila_mutex);
212 if (!ilan->xlat.hooks_registered) {
213 err = nf_register_net_hooks(net, ila_nf_hook_ops,
214 ARRAY_SIZE(ila_nf_hook_ops));
215 if (!err)
216 WRITE_ONCE(ilan->xlat.hooks_registered, true);
217 }
218 mutex_unlock(&ila_mutex);
219 if (err)
220 return err;
221 }
222
223 ila = kzalloc(sizeof(*ila), GFP_KERNEL);
224 if (!ila)
225 return -ENOMEM;
226
227 ila_init_saved_csum(&xp->ip);
228
229 ila->xp = *xp;
230
231 order = ila_order(ila);
232
233 spin_lock(lock);
234
235 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
236 &xp->ip.locator_match,
237 rht_params);
238 if (!head) {
239 /* New entry for the rhash_table */
240 err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
241 &ila->node, rht_params);
242 } else {
243 struct ila_map *tila = head, *prev = NULL;
244
245 do {
246 if (!ila_cmp_params(tila, xp)) {
247 err = -EEXIST;
248 goto out;
249 }
250
251 if (order > ila_order(tila))
252 break;
253
254 prev = tila;
255 tila = rcu_dereference_protected(tila->next,
256 lockdep_is_held(lock));
257 } while (tila);
258
259 if (prev) {
260 /* Insert in sub list of head */
261 RCU_INIT_POINTER(ila->next, tila);
262 rcu_assign_pointer(prev->next, ila);
263 } else {
264 /* Make this ila new head */
265 RCU_INIT_POINTER(ila->next, head);
266 err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
267 &head->node,
268 &ila->node, rht_params);
269 if (err)
270 goto out;
271 }
272 }
273
274out:
275 spin_unlock(lock);
276
277 if (err)
278 kfree(ila);
279
280 return err;
281}
282
283static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
284{
285 struct ila_net *ilan = net_generic(net, ila_net_id);
286 struct ila_map *ila, *head, *prev;
287 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
288 int err = -ENOENT;
289
290 spin_lock(lock);
291
292 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
293 &xp->ip.locator_match, rht_params);
294 ila = head;
295
296 prev = NULL;
297
298 while (ila) {
299 if (ila_cmp_params(ila, xp)) {
300 prev = ila;
301 ila = rcu_dereference_protected(ila->next,
302 lockdep_is_held(lock));
303 continue;
304 }
305
306 err = 0;
307
308 if (prev) {
309 /* Not head, just delete from list */
310 rcu_assign_pointer(prev->next, ila->next);
311 } else {
312 /* It is the head. If there is something in the
313 * sublist we need to make a new head.
314 */
315 head = rcu_dereference_protected(ila->next,
316 lockdep_is_held(lock));
317 if (head) {
318 /* Put first entry in the sublist into the
319 * table
320 */
321 err = rhashtable_replace_fast(
322 &ilan->xlat.rhash_table, &ila->node,
323 &head->node, rht_params);
324 if (err)
325 goto out;
326 } else {
327 /* Entry no longer used */
328 err = rhashtable_remove_fast(
329 &ilan->xlat.rhash_table,
330 &ila->node, rht_params);
331 }
332 }
333
334 ila_release(ila);
335
336 break;
337 }
338
339out:
340 spin_unlock(lock);
341
342 return err;
343}
344
345int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
346{
347 struct net *net = genl_info_net(info);
348 struct ila_xlat_params p;
349 int err;
350
351 err = parse_nl_config(info, &p);
352 if (err)
353 return err;
354
355 return ila_add_mapping(net, &p);
356}
357
358int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
359{
360 struct net *net = genl_info_net(info);
361 struct ila_xlat_params xp;
362 int err;
363
364 err = parse_nl_config(info, &xp);
365 if (err)
366 return err;
367
368 ila_del_mapping(net, &xp);
369
370 return 0;
371}
372
373static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
374 struct ila_map *ila)
375{
376 return ila_get_lock(ilan, ila->xp.ip.locator_match);
377}
378
379int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
380{
381 struct net *net = genl_info_net(info);
382 struct ila_net *ilan = net_generic(net, ila_net_id);
383 struct rhashtable_iter iter;
384 struct ila_map *ila;
385 spinlock_t *lock;
386 int ret = 0;
387
388 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
389 rhashtable_walk_start(&iter);
390
391 for (;;) {
392 ila = rhashtable_walk_next(&iter);
393
394 if (IS_ERR(ila)) {
395 if (PTR_ERR(ila) == -EAGAIN)
396 continue;
397 ret = PTR_ERR(ila);
398 goto done;
399 } else if (!ila) {
400 break;
401 }
402
403 lock = lock_from_ila_map(ilan, ila);
404
405 spin_lock(lock);
406
407 ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
408 &ila->node, rht_params);
409 if (!ret)
410 ila_free_node(ila);
411
412 spin_unlock(lock);
413
414 if (ret)
415 break;
416 }
417
418done:
419 rhashtable_walk_stop(&iter);
420 rhashtable_walk_exit(&iter);
421 return ret;
422}
423
424static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
425{
426 if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
427 (__force u64)ila->xp.ip.locator.v64,
428 ILA_ATTR_PAD) ||
429 nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
430 (__force u64)ila->xp.ip.locator_match.v64,
431 ILA_ATTR_PAD) ||
432 nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
433 nla_put_u8(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode) ||
434 nla_put_u8(msg, ILA_ATTR_IDENT_TYPE, ila->xp.ip.ident_type))
435 return -1;
436
437 return 0;
438}
439
440static int ila_dump_info(struct ila_map *ila,
441 u32 portid, u32 seq, u32 flags,
442 struct sk_buff *skb, u8 cmd)
443{
444 void *hdr;
445
446 hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
447 if (!hdr)
448 return -ENOMEM;
449
450 if (ila_fill_info(ila, skb) < 0)
451 goto nla_put_failure;
452
453 genlmsg_end(skb, hdr);
454 return 0;
455
456nla_put_failure:
457 genlmsg_cancel(skb, hdr);
458 return -EMSGSIZE;
459}
460
461int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
462{
463 struct net *net = genl_info_net(info);
464 struct ila_net *ilan = net_generic(net, ila_net_id);
465 struct sk_buff *msg;
466 struct ila_xlat_params xp;
467 struct ila_map *ila;
468 int ret;
469
470 ret = parse_nl_config(info, &xp);
471 if (ret)
472 return ret;
473
474 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
475 if (!msg)
476 return -ENOMEM;
477
478 rcu_read_lock();
479
480 ret = -ESRCH;
481 ila = ila_lookup_by_params(&xp, ilan);
482 if (ila) {
483 ret = ila_dump_info(ila,
484 info->snd_portid,
485 info->snd_seq, 0, msg,
486 info->genlhdr->cmd);
487 }
488
489 rcu_read_unlock();
490
491 if (ret < 0)
492 goto out_free;
493
494 return genlmsg_reply(msg, info);
495
496out_free:
497 nlmsg_free(msg);
498 return ret;
499}
500
501struct ila_dump_iter {
502 struct rhashtable_iter rhiter;
503 int skip;
504};
505
506int ila_xlat_nl_dump_start(struct netlink_callback *cb)
507{
508 struct net *net = sock_net(cb->skb->sk);
509 struct ila_net *ilan = net_generic(net, ila_net_id);
510 struct ila_dump_iter *iter;
511
512 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
513 if (!iter)
514 return -ENOMEM;
515
516 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
517
518 iter->skip = 0;
519 cb->args[0] = (long)iter;
520
521 return 0;
522}
523
524int ila_xlat_nl_dump_done(struct netlink_callback *cb)
525{
526 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
527
528 rhashtable_walk_exit(&iter->rhiter);
529
530 kfree(iter);
531
532 return 0;
533}
534
535int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
536{
537 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
538 struct rhashtable_iter *rhiter = &iter->rhiter;
539 int skip = iter->skip;
540 struct ila_map *ila;
541 int ret;
542
543 rhashtable_walk_start(rhiter);
544
545 /* Get first entry */
546 ila = rhashtable_walk_peek(rhiter);
547
548 if (ila && !IS_ERR(ila) && skip) {
549 /* Skip over visited entries */
550
551 while (ila && skip) {
552 /* Skip over any ila entries in this list that we
553 * have already dumped.
554 */
555 ila = rcu_access_pointer(ila->next);
556 skip--;
557 }
558 }
559
560 skip = 0;
561
562 for (;;) {
563 if (IS_ERR(ila)) {
564 ret = PTR_ERR(ila);
565 if (ret == -EAGAIN) {
566 /* Table has changed and iter has reset. Return
567 * -EAGAIN to the application even if we have
568 * written data to the skb. The application
569 * needs to deal with this.
570 */
571
572 goto out_ret;
573 } else {
574 break;
575 }
576 } else if (!ila) {
577 ret = 0;
578 break;
579 }
580
581 while (ila) {
582 ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
583 cb->nlh->nlmsg_seq, NLM_F_MULTI,
584 skb, ILA_CMD_GET);
585 if (ret)
586 goto out;
587
588 skip++;
589 ila = rcu_access_pointer(ila->next);
590 }
591
592 skip = 0;
593 ila = rhashtable_walk_next(rhiter);
594 }
595
596out:
597 iter->skip = skip;
598 ret = (skb->len ? : ret);
599
600out_ret:
601 rhashtable_walk_stop(rhiter);
602 return ret;
603}
604
605int ila_xlat_init_net(struct net *net)
606{
607 struct ila_net *ilan = net_generic(net, ila_net_id);
608 int err;
609
610 err = alloc_ila_locks(ilan);
611 if (err)
612 return err;
613
614 err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
615 if (err) {
616 free_bucket_spinlocks(ilan->xlat.locks);
617 return err;
618 }
619
620 return 0;
621}
622
623void ila_xlat_pre_exit_net(struct net *net)
624{
625 struct ila_net *ilan = net_generic(net, ila_net_id);
626
627 if (ilan->xlat.hooks_registered)
628 nf_unregister_net_hooks(net, ila_nf_hook_ops,
629 ARRAY_SIZE(ila_nf_hook_ops));
630}
631
632void ila_xlat_exit_net(struct net *net)
633{
634 struct ila_net *ilan = net_generic(net, ila_net_id);
635
636 rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
637
638 free_bucket_spinlocks(ilan->xlat.locks);
639}
640
641static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
642{
643 struct ila_map *ila;
644 struct ipv6hdr *ip6h = ipv6_hdr(skb);
645 struct net *net = dev_net(skb->dev);
646 struct ila_net *ilan = net_generic(net, ila_net_id);
647 struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
648
649 /* Assumes skb contains a valid IPv6 header that is pulled */
650
651 /* No check here that ILA type in the mapping matches what is in the
652 * address. We assume that whatever sender gaves us can be translated.
653 * The checksum mode however is relevant.
654 */
655
656 rcu_read_lock();
657
658 ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
659 if (ila)
660 ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila);
661
662 rcu_read_unlock();
663
664 return 0;
665}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/jhash.h>
3#include <linux/netfilter.h>
4#include <linux/rcupdate.h>
5#include <linux/rhashtable.h>
6#include <linux/vmalloc.h>
7#include <net/genetlink.h>
8#include <net/netns/generic.h>
9#include <uapi/linux/genetlink.h>
10#include "ila.h"
11
12struct ila_xlat_params {
13 struct ila_params ip;
14 int ifindex;
15};
16
17struct ila_map {
18 struct ila_xlat_params xp;
19 struct rhash_head node;
20 struct ila_map __rcu *next;
21 struct rcu_head rcu;
22};
23
24#define MAX_LOCKS 1024
25#define LOCKS_PER_CPU 10
26
27static int alloc_ila_locks(struct ila_net *ilan)
28{
29 return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
30 MAX_LOCKS, LOCKS_PER_CPU,
31 GFP_KERNEL);
32}
33
34static u32 hashrnd __read_mostly;
35static __always_inline void __ila_hash_secret_init(void)
36{
37 net_get_random_once(&hashrnd, sizeof(hashrnd));
38}
39
40static inline u32 ila_locator_hash(struct ila_locator loc)
41{
42 u32 *v = (u32 *)loc.v32;
43
44 __ila_hash_secret_init();
45 return jhash_2words(v[0], v[1], hashrnd);
46}
47
48static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
49 struct ila_locator loc)
50{
51 return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
52}
53
54static inline int ila_cmp_wildcards(struct ila_map *ila,
55 struct ila_addr *iaddr, int ifindex)
56{
57 return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
58}
59
60static inline int ila_cmp_params(struct ila_map *ila,
61 struct ila_xlat_params *xp)
62{
63 return (ila->xp.ifindex != xp->ifindex);
64}
65
66static int ila_cmpfn(struct rhashtable_compare_arg *arg,
67 const void *obj)
68{
69 const struct ila_map *ila = obj;
70
71 return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
72}
73
74static inline int ila_order(struct ila_map *ila)
75{
76 int score = 0;
77
78 if (ila->xp.ifindex)
79 score += 1 << 1;
80
81 return score;
82}
83
84static const struct rhashtable_params rht_params = {
85 .nelem_hint = 1024,
86 .head_offset = offsetof(struct ila_map, node),
87 .key_offset = offsetof(struct ila_map, xp.ip.locator_match),
88 .key_len = sizeof(u64), /* identifier */
89 .max_size = 1048576,
90 .min_size = 256,
91 .automatic_shrinking = true,
92 .obj_cmpfn = ila_cmpfn,
93};
94
95static int parse_nl_config(struct genl_info *info,
96 struct ila_xlat_params *xp)
97{
98 memset(xp, 0, sizeof(*xp));
99
100 if (info->attrs[ILA_ATTR_LOCATOR])
101 xp->ip.locator.v64 = (__force __be64)nla_get_u64(
102 info->attrs[ILA_ATTR_LOCATOR]);
103
104 if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
105 xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
106 info->attrs[ILA_ATTR_LOCATOR_MATCH]);
107
108 if (info->attrs[ILA_ATTR_CSUM_MODE])
109 xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]);
110 else
111 xp->ip.csum_mode = ILA_CSUM_NO_ACTION;
112
113 if (info->attrs[ILA_ATTR_IDENT_TYPE])
114 xp->ip.ident_type = nla_get_u8(
115 info->attrs[ILA_ATTR_IDENT_TYPE]);
116 else
117 xp->ip.ident_type = ILA_ATYPE_USE_FORMAT;
118
119 if (info->attrs[ILA_ATTR_IFINDEX])
120 xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
121
122 return 0;
123}
124
125/* Must be called with rcu readlock */
126static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
127 int ifindex,
128 struct ila_net *ilan)
129{
130 struct ila_map *ila;
131
132 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
133 rht_params);
134 while (ila) {
135 if (!ila_cmp_wildcards(ila, iaddr, ifindex))
136 return ila;
137 ila = rcu_access_pointer(ila->next);
138 }
139
140 return NULL;
141}
142
143/* Must be called with rcu readlock */
144static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
145 struct ila_net *ilan)
146{
147 struct ila_map *ila;
148
149 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
150 &xp->ip.locator_match,
151 rht_params);
152 while (ila) {
153 if (!ila_cmp_params(ila, xp))
154 return ila;
155 ila = rcu_access_pointer(ila->next);
156 }
157
158 return NULL;
159}
160
161static inline void ila_release(struct ila_map *ila)
162{
163 kfree_rcu(ila, rcu);
164}
165
166static void ila_free_node(struct ila_map *ila)
167{
168 struct ila_map *next;
169
170 /* Assume rcu_readlock held */
171 while (ila) {
172 next = rcu_access_pointer(ila->next);
173 ila_release(ila);
174 ila = next;
175 }
176}
177
178static void ila_free_cb(void *ptr, void *arg)
179{
180 ila_free_node((struct ila_map *)ptr);
181}
182
183static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
184
185static unsigned int
186ila_nf_input(void *priv,
187 struct sk_buff *skb,
188 const struct nf_hook_state *state)
189{
190 ila_xlat_addr(skb, false);
191 return NF_ACCEPT;
192}
193
194static const struct nf_hook_ops ila_nf_hook_ops[] = {
195 {
196 .hook = ila_nf_input,
197 .pf = NFPROTO_IPV6,
198 .hooknum = NF_INET_PRE_ROUTING,
199 .priority = -1,
200 },
201};
202
203static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
204{
205 struct ila_net *ilan = net_generic(net, ila_net_id);
206 struct ila_map *ila, *head;
207 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
208 int err = 0, order;
209
210 if (!ilan->xlat.hooks_registered) {
211 /* We defer registering net hooks in the namespace until the
212 * first mapping is added.
213 */
214 err = nf_register_net_hooks(net, ila_nf_hook_ops,
215 ARRAY_SIZE(ila_nf_hook_ops));
216 if (err)
217 return err;
218
219 ilan->xlat.hooks_registered = true;
220 }
221
222 ila = kzalloc(sizeof(*ila), GFP_KERNEL);
223 if (!ila)
224 return -ENOMEM;
225
226 ila_init_saved_csum(&xp->ip);
227
228 ila->xp = *xp;
229
230 order = ila_order(ila);
231
232 spin_lock(lock);
233
234 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
235 &xp->ip.locator_match,
236 rht_params);
237 if (!head) {
238 /* New entry for the rhash_table */
239 err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
240 &ila->node, rht_params);
241 } else {
242 struct ila_map *tila = head, *prev = NULL;
243
244 do {
245 if (!ila_cmp_params(tila, xp)) {
246 err = -EEXIST;
247 goto out;
248 }
249
250 if (order > ila_order(tila))
251 break;
252
253 prev = tila;
254 tila = rcu_dereference_protected(tila->next,
255 lockdep_is_held(lock));
256 } while (tila);
257
258 if (prev) {
259 /* Insert in sub list of head */
260 RCU_INIT_POINTER(ila->next, tila);
261 rcu_assign_pointer(prev->next, ila);
262 } else {
263 /* Make this ila new head */
264 RCU_INIT_POINTER(ila->next, head);
265 err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
266 &head->node,
267 &ila->node, rht_params);
268 if (err)
269 goto out;
270 }
271 }
272
273out:
274 spin_unlock(lock);
275
276 if (err)
277 kfree(ila);
278
279 return err;
280}
281
282static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
283{
284 struct ila_net *ilan = net_generic(net, ila_net_id);
285 struct ila_map *ila, *head, *prev;
286 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
287 int err = -ENOENT;
288
289 spin_lock(lock);
290
291 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
292 &xp->ip.locator_match, rht_params);
293 ila = head;
294
295 prev = NULL;
296
297 while (ila) {
298 if (ila_cmp_params(ila, xp)) {
299 prev = ila;
300 ila = rcu_dereference_protected(ila->next,
301 lockdep_is_held(lock));
302 continue;
303 }
304
305 err = 0;
306
307 if (prev) {
308 /* Not head, just delete from list */
309 rcu_assign_pointer(prev->next, ila->next);
310 } else {
311 /* It is the head. If there is something in the
312 * sublist we need to make a new head.
313 */
314 head = rcu_dereference_protected(ila->next,
315 lockdep_is_held(lock));
316 if (head) {
317 /* Put first entry in the sublist into the
318 * table
319 */
320 err = rhashtable_replace_fast(
321 &ilan->xlat.rhash_table, &ila->node,
322 &head->node, rht_params);
323 if (err)
324 goto out;
325 } else {
326 /* Entry no longer used */
327 err = rhashtable_remove_fast(
328 &ilan->xlat.rhash_table,
329 &ila->node, rht_params);
330 }
331 }
332
333 ila_release(ila);
334
335 break;
336 }
337
338out:
339 spin_unlock(lock);
340
341 return err;
342}
343
344int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
345{
346 struct net *net = genl_info_net(info);
347 struct ila_xlat_params p;
348 int err;
349
350 err = parse_nl_config(info, &p);
351 if (err)
352 return err;
353
354 return ila_add_mapping(net, &p);
355}
356
357int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
358{
359 struct net *net = genl_info_net(info);
360 struct ila_xlat_params xp;
361 int err;
362
363 err = parse_nl_config(info, &xp);
364 if (err)
365 return err;
366
367 ila_del_mapping(net, &xp);
368
369 return 0;
370}
371
372static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
373 struct ila_map *ila)
374{
375 return ila_get_lock(ilan, ila->xp.ip.locator_match);
376}
377
378int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
379{
380 struct net *net = genl_info_net(info);
381 struct ila_net *ilan = net_generic(net, ila_net_id);
382 struct rhashtable_iter iter;
383 struct ila_map *ila;
384 spinlock_t *lock;
385 int ret = 0;
386
387 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
388 rhashtable_walk_start(&iter);
389
390 for (;;) {
391 ila = rhashtable_walk_next(&iter);
392
393 if (IS_ERR(ila)) {
394 if (PTR_ERR(ila) == -EAGAIN)
395 continue;
396 ret = PTR_ERR(ila);
397 goto done;
398 } else if (!ila) {
399 break;
400 }
401
402 lock = lock_from_ila_map(ilan, ila);
403
404 spin_lock(lock);
405
406 ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
407 &ila->node, rht_params);
408 if (!ret)
409 ila_free_node(ila);
410
411 spin_unlock(lock);
412
413 if (ret)
414 break;
415 }
416
417done:
418 rhashtable_walk_stop(&iter);
419 rhashtable_walk_exit(&iter);
420 return ret;
421}
422
423static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
424{
425 if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
426 (__force u64)ila->xp.ip.locator.v64,
427 ILA_ATTR_PAD) ||
428 nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
429 (__force u64)ila->xp.ip.locator_match.v64,
430 ILA_ATTR_PAD) ||
431 nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
432 nla_put_u8(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode) ||
433 nla_put_u8(msg, ILA_ATTR_IDENT_TYPE, ila->xp.ip.ident_type))
434 return -1;
435
436 return 0;
437}
438
439static int ila_dump_info(struct ila_map *ila,
440 u32 portid, u32 seq, u32 flags,
441 struct sk_buff *skb, u8 cmd)
442{
443 void *hdr;
444
445 hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
446 if (!hdr)
447 return -ENOMEM;
448
449 if (ila_fill_info(ila, skb) < 0)
450 goto nla_put_failure;
451
452 genlmsg_end(skb, hdr);
453 return 0;
454
455nla_put_failure:
456 genlmsg_cancel(skb, hdr);
457 return -EMSGSIZE;
458}
459
460int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
461{
462 struct net *net = genl_info_net(info);
463 struct ila_net *ilan = net_generic(net, ila_net_id);
464 struct sk_buff *msg;
465 struct ila_xlat_params xp;
466 struct ila_map *ila;
467 int ret;
468
469 ret = parse_nl_config(info, &xp);
470 if (ret)
471 return ret;
472
473 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
474 if (!msg)
475 return -ENOMEM;
476
477 rcu_read_lock();
478
479 ret = -ESRCH;
480 ila = ila_lookup_by_params(&xp, ilan);
481 if (ila) {
482 ret = ila_dump_info(ila,
483 info->snd_portid,
484 info->snd_seq, 0, msg,
485 info->genlhdr->cmd);
486 }
487
488 rcu_read_unlock();
489
490 if (ret < 0)
491 goto out_free;
492
493 return genlmsg_reply(msg, info);
494
495out_free:
496 nlmsg_free(msg);
497 return ret;
498}
499
500struct ila_dump_iter {
501 struct rhashtable_iter rhiter;
502 int skip;
503};
504
505int ila_xlat_nl_dump_start(struct netlink_callback *cb)
506{
507 struct net *net = sock_net(cb->skb->sk);
508 struct ila_net *ilan = net_generic(net, ila_net_id);
509 struct ila_dump_iter *iter;
510
511 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
512 if (!iter)
513 return -ENOMEM;
514
515 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
516
517 iter->skip = 0;
518 cb->args[0] = (long)iter;
519
520 return 0;
521}
522
523int ila_xlat_nl_dump_done(struct netlink_callback *cb)
524{
525 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
526
527 rhashtable_walk_exit(&iter->rhiter);
528
529 kfree(iter);
530
531 return 0;
532}
533
534int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
535{
536 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
537 struct rhashtable_iter *rhiter = &iter->rhiter;
538 int skip = iter->skip;
539 struct ila_map *ila;
540 int ret;
541
542 rhashtable_walk_start(rhiter);
543
544 /* Get first entry */
545 ila = rhashtable_walk_peek(rhiter);
546
547 if (ila && !IS_ERR(ila) && skip) {
548 /* Skip over visited entries */
549
550 while (ila && skip) {
551 /* Skip over any ila entries in this list that we
552 * have already dumped.
553 */
554 ila = rcu_access_pointer(ila->next);
555 skip--;
556 }
557 }
558
559 skip = 0;
560
561 for (;;) {
562 if (IS_ERR(ila)) {
563 ret = PTR_ERR(ila);
564 if (ret == -EAGAIN) {
565 /* Table has changed and iter has reset. Return
566 * -EAGAIN to the application even if we have
567 * written data to the skb. The application
568 * needs to deal with this.
569 */
570
571 goto out_ret;
572 } else {
573 break;
574 }
575 } else if (!ila) {
576 ret = 0;
577 break;
578 }
579
580 while (ila) {
581 ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
582 cb->nlh->nlmsg_seq, NLM_F_MULTI,
583 skb, ILA_CMD_GET);
584 if (ret)
585 goto out;
586
587 skip++;
588 ila = rcu_access_pointer(ila->next);
589 }
590
591 skip = 0;
592 ila = rhashtable_walk_next(rhiter);
593 }
594
595out:
596 iter->skip = skip;
597 ret = (skb->len ? : ret);
598
599out_ret:
600 rhashtable_walk_stop(rhiter);
601 return ret;
602}
603
604int ila_xlat_init_net(struct net *net)
605{
606 struct ila_net *ilan = net_generic(net, ila_net_id);
607 int err;
608
609 err = alloc_ila_locks(ilan);
610 if (err)
611 return err;
612
613 err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
614 if (err) {
615 free_bucket_spinlocks(ilan->xlat.locks);
616 return err;
617 }
618
619 return 0;
620}
621
622void ila_xlat_exit_net(struct net *net)
623{
624 struct ila_net *ilan = net_generic(net, ila_net_id);
625
626 rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
627
628 free_bucket_spinlocks(ilan->xlat.locks);
629
630 if (ilan->xlat.hooks_registered)
631 nf_unregister_net_hooks(net, ila_nf_hook_ops,
632 ARRAY_SIZE(ila_nf_hook_ops));
633}
634
635static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
636{
637 struct ila_map *ila;
638 struct ipv6hdr *ip6h = ipv6_hdr(skb);
639 struct net *net = dev_net(skb->dev);
640 struct ila_net *ilan = net_generic(net, ila_net_id);
641 struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
642
643 /* Assumes skb contains a valid IPv6 header that is pulled */
644
645 /* No check here that ILA type in the mapping matches what is in the
646 * address. We assume that whatever sender gaves us can be translated.
647 * The checksum mode however is relevant.
648 */
649
650 rcu_read_lock();
651
652 ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
653 if (ila)
654 ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila);
655
656 rcu_read_unlock();
657
658 return 0;
659}