Loading...
1/*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/kmod.h>
19#include <linux/list.h>
20#include <linux/spinlock.h>
21#include <linux/workqueue.h>
22#include <linux/notifier.h>
23#include <linux/netdevice.h>
24#include <linux/netfilter.h>
25#include <linux/module.h>
26#include <linux/cache.h>
27#include <linux/audit.h>
28#include <net/dst.h>
29#include <net/flow.h>
30#include <net/xfrm.h>
31#include <net/ip.h>
32#ifdef CONFIG_XFRM_STATISTICS
33#include <net/snmp.h>
34#endif
35
36#include "xfrm_hash.h"
37
38DEFINE_MUTEX(xfrm_cfg_mutex);
39EXPORT_SYMBOL(xfrm_cfg_mutex);
40
41static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
42static struct dst_entry *xfrm_policy_sk_bundles;
43static DEFINE_RWLOCK(xfrm_policy_lock);
44
45static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
46static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
47
48static struct kmem_cache *xfrm_dst_cache __read_mostly;
49
50static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
51static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
52static void xfrm_init_pmtu(struct dst_entry *dst);
53static int stale_bundle(struct dst_entry *dst);
54static int xfrm_bundle_ok(struct xfrm_dst *xdst);
55
56
57static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
58 int dir);
59
60static inline bool
61__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
62{
63 const struct flowi4 *fl4 = &fl->u.ip4;
64
65 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
66 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
67 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
68 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
69 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
70 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
71}
72
73static inline bool
74__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
75{
76 const struct flowi6 *fl6 = &fl->u.ip6;
77
78 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
79 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
80 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
81 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
82 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
83 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
84}
85
86bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
87 unsigned short family)
88{
89 switch (family) {
90 case AF_INET:
91 return __xfrm4_selector_match(sel, fl);
92 case AF_INET6:
93 return __xfrm6_selector_match(sel, fl);
94 }
95 return false;
96}
97
98static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
99 const xfrm_address_t *saddr,
100 const xfrm_address_t *daddr,
101 int family)
102{
103 struct xfrm_policy_afinfo *afinfo;
104 struct dst_entry *dst;
105
106 afinfo = xfrm_policy_get_afinfo(family);
107 if (unlikely(afinfo == NULL))
108 return ERR_PTR(-EAFNOSUPPORT);
109
110 dst = afinfo->dst_lookup(net, tos, saddr, daddr);
111
112 xfrm_policy_put_afinfo(afinfo);
113
114 return dst;
115}
116
117static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
118 xfrm_address_t *prev_saddr,
119 xfrm_address_t *prev_daddr,
120 int family)
121{
122 struct net *net = xs_net(x);
123 xfrm_address_t *saddr = &x->props.saddr;
124 xfrm_address_t *daddr = &x->id.daddr;
125 struct dst_entry *dst;
126
127 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
128 saddr = x->coaddr;
129 daddr = prev_daddr;
130 }
131 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
132 saddr = prev_saddr;
133 daddr = x->coaddr;
134 }
135
136 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
137
138 if (!IS_ERR(dst)) {
139 if (prev_saddr != saddr)
140 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
141 if (prev_daddr != daddr)
142 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
143 }
144
145 return dst;
146}
147
148static inline unsigned long make_jiffies(long secs)
149{
150 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
151 return MAX_SCHEDULE_TIMEOUT-1;
152 else
153 return secs*HZ;
154}
155
156static void xfrm_policy_timer(unsigned long data)
157{
158 struct xfrm_policy *xp = (struct xfrm_policy*)data;
159 unsigned long now = get_seconds();
160 long next = LONG_MAX;
161 int warn = 0;
162 int dir;
163
164 read_lock(&xp->lock);
165
166 if (unlikely(xp->walk.dead))
167 goto out;
168
169 dir = xfrm_policy_id2dir(xp->index);
170
171 if (xp->lft.hard_add_expires_seconds) {
172 long tmo = xp->lft.hard_add_expires_seconds +
173 xp->curlft.add_time - now;
174 if (tmo <= 0)
175 goto expired;
176 if (tmo < next)
177 next = tmo;
178 }
179 if (xp->lft.hard_use_expires_seconds) {
180 long tmo = xp->lft.hard_use_expires_seconds +
181 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
182 if (tmo <= 0)
183 goto expired;
184 if (tmo < next)
185 next = tmo;
186 }
187 if (xp->lft.soft_add_expires_seconds) {
188 long tmo = xp->lft.soft_add_expires_seconds +
189 xp->curlft.add_time - now;
190 if (tmo <= 0) {
191 warn = 1;
192 tmo = XFRM_KM_TIMEOUT;
193 }
194 if (tmo < next)
195 next = tmo;
196 }
197 if (xp->lft.soft_use_expires_seconds) {
198 long tmo = xp->lft.soft_use_expires_seconds +
199 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
200 if (tmo <= 0) {
201 warn = 1;
202 tmo = XFRM_KM_TIMEOUT;
203 }
204 if (tmo < next)
205 next = tmo;
206 }
207
208 if (warn)
209 km_policy_expired(xp, dir, 0, 0);
210 if (next != LONG_MAX &&
211 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
212 xfrm_pol_hold(xp);
213
214out:
215 read_unlock(&xp->lock);
216 xfrm_pol_put(xp);
217 return;
218
219expired:
220 read_unlock(&xp->lock);
221 if (!xfrm_policy_delete(xp, dir))
222 km_policy_expired(xp, dir, 1, 0);
223 xfrm_pol_put(xp);
224}
225
226static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
227{
228 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
229
230 if (unlikely(pol->walk.dead))
231 flo = NULL;
232 else
233 xfrm_pol_hold(pol);
234
235 return flo;
236}
237
238static int xfrm_policy_flo_check(struct flow_cache_object *flo)
239{
240 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
241
242 return !pol->walk.dead;
243}
244
245static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
246{
247 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
248}
249
250static const struct flow_cache_ops xfrm_policy_fc_ops = {
251 .get = xfrm_policy_flo_get,
252 .check = xfrm_policy_flo_check,
253 .delete = xfrm_policy_flo_delete,
254};
255
256/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
257 * SPD calls.
258 */
259
260struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
261{
262 struct xfrm_policy *policy;
263
264 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
265
266 if (policy) {
267 write_pnet(&policy->xp_net, net);
268 INIT_LIST_HEAD(&policy->walk.all);
269 INIT_HLIST_NODE(&policy->bydst);
270 INIT_HLIST_NODE(&policy->byidx);
271 rwlock_init(&policy->lock);
272 atomic_set(&policy->refcnt, 1);
273 setup_timer(&policy->timer, xfrm_policy_timer,
274 (unsigned long)policy);
275 policy->flo.ops = &xfrm_policy_fc_ops;
276 }
277 return policy;
278}
279EXPORT_SYMBOL(xfrm_policy_alloc);
280
281/* Destroy xfrm_policy: descendant resources must be released to this moment. */
282
283void xfrm_policy_destroy(struct xfrm_policy *policy)
284{
285 BUG_ON(!policy->walk.dead);
286
287 if (del_timer(&policy->timer))
288 BUG();
289
290 security_xfrm_policy_free(policy->security);
291 kfree(policy);
292}
293EXPORT_SYMBOL(xfrm_policy_destroy);
294
295/* Rule must be locked. Release descentant resources, announce
296 * entry dead. The rule must be unlinked from lists to the moment.
297 */
298
299static void xfrm_policy_kill(struct xfrm_policy *policy)
300{
301 policy->walk.dead = 1;
302
303 atomic_inc(&policy->genid);
304
305 if (del_timer(&policy->timer))
306 xfrm_pol_put(policy);
307
308 xfrm_pol_put(policy);
309}
310
311static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
312
313static inline unsigned int idx_hash(struct net *net, u32 index)
314{
315 return __idx_hash(index, net->xfrm.policy_idx_hmask);
316}
317
318static struct hlist_head *policy_hash_bysel(struct net *net,
319 const struct xfrm_selector *sel,
320 unsigned short family, int dir)
321{
322 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
323 unsigned int hash = __sel_hash(sel, family, hmask);
324
325 return (hash == hmask + 1 ?
326 &net->xfrm.policy_inexact[dir] :
327 net->xfrm.policy_bydst[dir].table + hash);
328}
329
330static struct hlist_head *policy_hash_direct(struct net *net,
331 const xfrm_address_t *daddr,
332 const xfrm_address_t *saddr,
333 unsigned short family, int dir)
334{
335 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
336 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
337
338 return net->xfrm.policy_bydst[dir].table + hash;
339}
340
341static void xfrm_dst_hash_transfer(struct hlist_head *list,
342 struct hlist_head *ndsttable,
343 unsigned int nhashmask)
344{
345 struct hlist_node *entry, *tmp, *entry0 = NULL;
346 struct xfrm_policy *pol;
347 unsigned int h0 = 0;
348
349redo:
350 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
351 unsigned int h;
352
353 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
354 pol->family, nhashmask);
355 if (!entry0) {
356 hlist_del(entry);
357 hlist_add_head(&pol->bydst, ndsttable+h);
358 h0 = h;
359 } else {
360 if (h != h0)
361 continue;
362 hlist_del(entry);
363 hlist_add_after(entry0, &pol->bydst);
364 }
365 entry0 = entry;
366 }
367 if (!hlist_empty(list)) {
368 entry0 = NULL;
369 goto redo;
370 }
371}
372
373static void xfrm_idx_hash_transfer(struct hlist_head *list,
374 struct hlist_head *nidxtable,
375 unsigned int nhashmask)
376{
377 struct hlist_node *entry, *tmp;
378 struct xfrm_policy *pol;
379
380 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
381 unsigned int h;
382
383 h = __idx_hash(pol->index, nhashmask);
384 hlist_add_head(&pol->byidx, nidxtable+h);
385 }
386}
387
388static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
389{
390 return ((old_hmask + 1) << 1) - 1;
391}
392
393static void xfrm_bydst_resize(struct net *net, int dir)
394{
395 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
396 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
397 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
398 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
399 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
400 int i;
401
402 if (!ndst)
403 return;
404
405 write_lock_bh(&xfrm_policy_lock);
406
407 for (i = hmask; i >= 0; i--)
408 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
409
410 net->xfrm.policy_bydst[dir].table = ndst;
411 net->xfrm.policy_bydst[dir].hmask = nhashmask;
412
413 write_unlock_bh(&xfrm_policy_lock);
414
415 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
416}
417
418static void xfrm_byidx_resize(struct net *net, int total)
419{
420 unsigned int hmask = net->xfrm.policy_idx_hmask;
421 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
422 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
423 struct hlist_head *oidx = net->xfrm.policy_byidx;
424 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
425 int i;
426
427 if (!nidx)
428 return;
429
430 write_lock_bh(&xfrm_policy_lock);
431
432 for (i = hmask; i >= 0; i--)
433 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
434
435 net->xfrm.policy_byidx = nidx;
436 net->xfrm.policy_idx_hmask = nhashmask;
437
438 write_unlock_bh(&xfrm_policy_lock);
439
440 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
441}
442
443static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
444{
445 unsigned int cnt = net->xfrm.policy_count[dir];
446 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
447
448 if (total)
449 *total += cnt;
450
451 if ((hmask + 1) < xfrm_policy_hashmax &&
452 cnt > hmask)
453 return 1;
454
455 return 0;
456}
457
458static inline int xfrm_byidx_should_resize(struct net *net, int total)
459{
460 unsigned int hmask = net->xfrm.policy_idx_hmask;
461
462 if ((hmask + 1) < xfrm_policy_hashmax &&
463 total > hmask)
464 return 1;
465
466 return 0;
467}
468
469void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
470{
471 read_lock_bh(&xfrm_policy_lock);
472 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
473 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
474 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
475 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
476 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
477 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
478 si->spdhcnt = net->xfrm.policy_idx_hmask;
479 si->spdhmcnt = xfrm_policy_hashmax;
480 read_unlock_bh(&xfrm_policy_lock);
481}
482EXPORT_SYMBOL(xfrm_spd_getinfo);
483
484static DEFINE_MUTEX(hash_resize_mutex);
485static void xfrm_hash_resize(struct work_struct *work)
486{
487 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
488 int dir, total;
489
490 mutex_lock(&hash_resize_mutex);
491
492 total = 0;
493 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
494 if (xfrm_bydst_should_resize(net, dir, &total))
495 xfrm_bydst_resize(net, dir);
496 }
497 if (xfrm_byidx_should_resize(net, total))
498 xfrm_byidx_resize(net, total);
499
500 mutex_unlock(&hash_resize_mutex);
501}
502
503/* Generate new index... KAME seems to generate them ordered by cost
504 * of an absolute inpredictability of ordering of rules. This will not pass. */
505static u32 xfrm_gen_index(struct net *net, int dir)
506{
507 static u32 idx_generator;
508
509 for (;;) {
510 struct hlist_node *entry;
511 struct hlist_head *list;
512 struct xfrm_policy *p;
513 u32 idx;
514 int found;
515
516 idx = (idx_generator | dir);
517 idx_generator += 8;
518 if (idx == 0)
519 idx = 8;
520 list = net->xfrm.policy_byidx + idx_hash(net, idx);
521 found = 0;
522 hlist_for_each_entry(p, entry, list, byidx) {
523 if (p->index == idx) {
524 found = 1;
525 break;
526 }
527 }
528 if (!found)
529 return idx;
530 }
531}
532
533static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
534{
535 u32 *p1 = (u32 *) s1;
536 u32 *p2 = (u32 *) s2;
537 int len = sizeof(struct xfrm_selector) / sizeof(u32);
538 int i;
539
540 for (i = 0; i < len; i++) {
541 if (p1[i] != p2[i])
542 return 1;
543 }
544
545 return 0;
546}
547
548int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
549{
550 struct net *net = xp_net(policy);
551 struct xfrm_policy *pol;
552 struct xfrm_policy *delpol;
553 struct hlist_head *chain;
554 struct hlist_node *entry, *newpos;
555 u32 mark = policy->mark.v & policy->mark.m;
556
557 write_lock_bh(&xfrm_policy_lock);
558 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
559 delpol = NULL;
560 newpos = NULL;
561 hlist_for_each_entry(pol, entry, chain, bydst) {
562 if (pol->type == policy->type &&
563 !selector_cmp(&pol->selector, &policy->selector) &&
564 (mark & pol->mark.m) == pol->mark.v &&
565 xfrm_sec_ctx_match(pol->security, policy->security) &&
566 !WARN_ON(delpol)) {
567 if (excl) {
568 write_unlock_bh(&xfrm_policy_lock);
569 return -EEXIST;
570 }
571 delpol = pol;
572 if (policy->priority > pol->priority)
573 continue;
574 } else if (policy->priority >= pol->priority) {
575 newpos = &pol->bydst;
576 continue;
577 }
578 if (delpol)
579 break;
580 }
581 if (newpos)
582 hlist_add_after(newpos, &policy->bydst);
583 else
584 hlist_add_head(&policy->bydst, chain);
585 xfrm_pol_hold(policy);
586 net->xfrm.policy_count[dir]++;
587 atomic_inc(&flow_cache_genid);
588 if (delpol)
589 __xfrm_policy_unlink(delpol, dir);
590 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
591 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
592 policy->curlft.add_time = get_seconds();
593 policy->curlft.use_time = 0;
594 if (!mod_timer(&policy->timer, jiffies + HZ))
595 xfrm_pol_hold(policy);
596 list_add(&policy->walk.all, &net->xfrm.policy_all);
597 write_unlock_bh(&xfrm_policy_lock);
598
599 if (delpol)
600 xfrm_policy_kill(delpol);
601 else if (xfrm_bydst_should_resize(net, dir, NULL))
602 schedule_work(&net->xfrm.policy_hash_work);
603
604 return 0;
605}
606EXPORT_SYMBOL(xfrm_policy_insert);
607
608struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
609 int dir, struct xfrm_selector *sel,
610 struct xfrm_sec_ctx *ctx, int delete,
611 int *err)
612{
613 struct xfrm_policy *pol, *ret;
614 struct hlist_head *chain;
615 struct hlist_node *entry;
616
617 *err = 0;
618 write_lock_bh(&xfrm_policy_lock);
619 chain = policy_hash_bysel(net, sel, sel->family, dir);
620 ret = NULL;
621 hlist_for_each_entry(pol, entry, chain, bydst) {
622 if (pol->type == type &&
623 (mark & pol->mark.m) == pol->mark.v &&
624 !selector_cmp(sel, &pol->selector) &&
625 xfrm_sec_ctx_match(ctx, pol->security)) {
626 xfrm_pol_hold(pol);
627 if (delete) {
628 *err = security_xfrm_policy_delete(
629 pol->security);
630 if (*err) {
631 write_unlock_bh(&xfrm_policy_lock);
632 return pol;
633 }
634 __xfrm_policy_unlink(pol, dir);
635 }
636 ret = pol;
637 break;
638 }
639 }
640 write_unlock_bh(&xfrm_policy_lock);
641
642 if (ret && delete)
643 xfrm_policy_kill(ret);
644 return ret;
645}
646EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
647
648struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
649 int dir, u32 id, int delete, int *err)
650{
651 struct xfrm_policy *pol, *ret;
652 struct hlist_head *chain;
653 struct hlist_node *entry;
654
655 *err = -ENOENT;
656 if (xfrm_policy_id2dir(id) != dir)
657 return NULL;
658
659 *err = 0;
660 write_lock_bh(&xfrm_policy_lock);
661 chain = net->xfrm.policy_byidx + idx_hash(net, id);
662 ret = NULL;
663 hlist_for_each_entry(pol, entry, chain, byidx) {
664 if (pol->type == type && pol->index == id &&
665 (mark & pol->mark.m) == pol->mark.v) {
666 xfrm_pol_hold(pol);
667 if (delete) {
668 *err = security_xfrm_policy_delete(
669 pol->security);
670 if (*err) {
671 write_unlock_bh(&xfrm_policy_lock);
672 return pol;
673 }
674 __xfrm_policy_unlink(pol, dir);
675 }
676 ret = pol;
677 break;
678 }
679 }
680 write_unlock_bh(&xfrm_policy_lock);
681
682 if (ret && delete)
683 xfrm_policy_kill(ret);
684 return ret;
685}
686EXPORT_SYMBOL(xfrm_policy_byid);
687
688#ifdef CONFIG_SECURITY_NETWORK_XFRM
689static inline int
690xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
691{
692 int dir, err = 0;
693
694 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
695 struct xfrm_policy *pol;
696 struct hlist_node *entry;
697 int i;
698
699 hlist_for_each_entry(pol, entry,
700 &net->xfrm.policy_inexact[dir], bydst) {
701 if (pol->type != type)
702 continue;
703 err = security_xfrm_policy_delete(pol->security);
704 if (err) {
705 xfrm_audit_policy_delete(pol, 0,
706 audit_info->loginuid,
707 audit_info->sessionid,
708 audit_info->secid);
709 return err;
710 }
711 }
712 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
713 hlist_for_each_entry(pol, entry,
714 net->xfrm.policy_bydst[dir].table + i,
715 bydst) {
716 if (pol->type != type)
717 continue;
718 err = security_xfrm_policy_delete(
719 pol->security);
720 if (err) {
721 xfrm_audit_policy_delete(pol, 0,
722 audit_info->loginuid,
723 audit_info->sessionid,
724 audit_info->secid);
725 return err;
726 }
727 }
728 }
729 }
730 return err;
731}
732#else
733static inline int
734xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
735{
736 return 0;
737}
738#endif
739
740int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
741{
742 int dir, err = 0, cnt = 0;
743
744 write_lock_bh(&xfrm_policy_lock);
745
746 err = xfrm_policy_flush_secctx_check(net, type, audit_info);
747 if (err)
748 goto out;
749
750 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
751 struct xfrm_policy *pol;
752 struct hlist_node *entry;
753 int i;
754
755 again1:
756 hlist_for_each_entry(pol, entry,
757 &net->xfrm.policy_inexact[dir], bydst) {
758 if (pol->type != type)
759 continue;
760 __xfrm_policy_unlink(pol, dir);
761 write_unlock_bh(&xfrm_policy_lock);
762 cnt++;
763
764 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
765 audit_info->sessionid,
766 audit_info->secid);
767
768 xfrm_policy_kill(pol);
769
770 write_lock_bh(&xfrm_policy_lock);
771 goto again1;
772 }
773
774 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
775 again2:
776 hlist_for_each_entry(pol, entry,
777 net->xfrm.policy_bydst[dir].table + i,
778 bydst) {
779 if (pol->type != type)
780 continue;
781 __xfrm_policy_unlink(pol, dir);
782 write_unlock_bh(&xfrm_policy_lock);
783 cnt++;
784
785 xfrm_audit_policy_delete(pol, 1,
786 audit_info->loginuid,
787 audit_info->sessionid,
788 audit_info->secid);
789 xfrm_policy_kill(pol);
790
791 write_lock_bh(&xfrm_policy_lock);
792 goto again2;
793 }
794 }
795
796 }
797 if (!cnt)
798 err = -ESRCH;
799out:
800 write_unlock_bh(&xfrm_policy_lock);
801 return err;
802}
803EXPORT_SYMBOL(xfrm_policy_flush);
804
805int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
806 int (*func)(struct xfrm_policy *, int, int, void*),
807 void *data)
808{
809 struct xfrm_policy *pol;
810 struct xfrm_policy_walk_entry *x;
811 int error = 0;
812
813 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
814 walk->type != XFRM_POLICY_TYPE_ANY)
815 return -EINVAL;
816
817 if (list_empty(&walk->walk.all) && walk->seq != 0)
818 return 0;
819
820 write_lock_bh(&xfrm_policy_lock);
821 if (list_empty(&walk->walk.all))
822 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
823 else
824 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
825 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
826 if (x->dead)
827 continue;
828 pol = container_of(x, struct xfrm_policy, walk);
829 if (walk->type != XFRM_POLICY_TYPE_ANY &&
830 walk->type != pol->type)
831 continue;
832 error = func(pol, xfrm_policy_id2dir(pol->index),
833 walk->seq, data);
834 if (error) {
835 list_move_tail(&walk->walk.all, &x->all);
836 goto out;
837 }
838 walk->seq++;
839 }
840 if (walk->seq == 0) {
841 error = -ENOENT;
842 goto out;
843 }
844 list_del_init(&walk->walk.all);
845out:
846 write_unlock_bh(&xfrm_policy_lock);
847 return error;
848}
849EXPORT_SYMBOL(xfrm_policy_walk);
850
851void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
852{
853 INIT_LIST_HEAD(&walk->walk.all);
854 walk->walk.dead = 1;
855 walk->type = type;
856 walk->seq = 0;
857}
858EXPORT_SYMBOL(xfrm_policy_walk_init);
859
860void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
861{
862 if (list_empty(&walk->walk.all))
863 return;
864
865 write_lock_bh(&xfrm_policy_lock);
866 list_del(&walk->walk.all);
867 write_unlock_bh(&xfrm_policy_lock);
868}
869EXPORT_SYMBOL(xfrm_policy_walk_done);
870
871/*
872 * Find policy to apply to this flow.
873 *
874 * Returns 0 if policy found, else an -errno.
875 */
876static int xfrm_policy_match(const struct xfrm_policy *pol,
877 const struct flowi *fl,
878 u8 type, u16 family, int dir)
879{
880 const struct xfrm_selector *sel = &pol->selector;
881 int ret = -ESRCH;
882 bool match;
883
884 if (pol->family != family ||
885 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
886 pol->type != type)
887 return ret;
888
889 match = xfrm_selector_match(sel, fl, family);
890 if (match)
891 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
892 dir);
893
894 return ret;
895}
896
897static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
898 const struct flowi *fl,
899 u16 family, u8 dir)
900{
901 int err;
902 struct xfrm_policy *pol, *ret;
903 const xfrm_address_t *daddr, *saddr;
904 struct hlist_node *entry;
905 struct hlist_head *chain;
906 u32 priority = ~0U;
907
908 daddr = xfrm_flowi_daddr(fl, family);
909 saddr = xfrm_flowi_saddr(fl, family);
910 if (unlikely(!daddr || !saddr))
911 return NULL;
912
913 read_lock_bh(&xfrm_policy_lock);
914 chain = policy_hash_direct(net, daddr, saddr, family, dir);
915 ret = NULL;
916 hlist_for_each_entry(pol, entry, chain, bydst) {
917 err = xfrm_policy_match(pol, fl, type, family, dir);
918 if (err) {
919 if (err == -ESRCH)
920 continue;
921 else {
922 ret = ERR_PTR(err);
923 goto fail;
924 }
925 } else {
926 ret = pol;
927 priority = ret->priority;
928 break;
929 }
930 }
931 chain = &net->xfrm.policy_inexact[dir];
932 hlist_for_each_entry(pol, entry, chain, bydst) {
933 err = xfrm_policy_match(pol, fl, type, family, dir);
934 if (err) {
935 if (err == -ESRCH)
936 continue;
937 else {
938 ret = ERR_PTR(err);
939 goto fail;
940 }
941 } else if (pol->priority < priority) {
942 ret = pol;
943 break;
944 }
945 }
946 if (ret)
947 xfrm_pol_hold(ret);
948fail:
949 read_unlock_bh(&xfrm_policy_lock);
950
951 return ret;
952}
953
954static struct xfrm_policy *
955__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
956{
957#ifdef CONFIG_XFRM_SUB_POLICY
958 struct xfrm_policy *pol;
959
960 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
961 if (pol != NULL)
962 return pol;
963#endif
964 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
965}
966
967static struct flow_cache_object *
968xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
969 u8 dir, struct flow_cache_object *old_obj, void *ctx)
970{
971 struct xfrm_policy *pol;
972
973 if (old_obj)
974 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
975
976 pol = __xfrm_policy_lookup(net, fl, family, dir);
977 if (IS_ERR_OR_NULL(pol))
978 return ERR_CAST(pol);
979
980 /* Resolver returns two references:
981 * one for cache and one for caller of flow_cache_lookup() */
982 xfrm_pol_hold(pol);
983
984 return &pol->flo;
985}
986
987static inline int policy_to_flow_dir(int dir)
988{
989 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
990 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
991 XFRM_POLICY_FWD == FLOW_DIR_FWD)
992 return dir;
993 switch (dir) {
994 default:
995 case XFRM_POLICY_IN:
996 return FLOW_DIR_IN;
997 case XFRM_POLICY_OUT:
998 return FLOW_DIR_OUT;
999 case XFRM_POLICY_FWD:
1000 return FLOW_DIR_FWD;
1001 }
1002}
1003
1004static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1005 const struct flowi *fl)
1006{
1007 struct xfrm_policy *pol;
1008
1009 read_lock_bh(&xfrm_policy_lock);
1010 if ((pol = sk->sk_policy[dir]) != NULL) {
1011 bool match = xfrm_selector_match(&pol->selector, fl,
1012 sk->sk_family);
1013 int err = 0;
1014
1015 if (match) {
1016 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1017 pol = NULL;
1018 goto out;
1019 }
1020 err = security_xfrm_policy_lookup(pol->security,
1021 fl->flowi_secid,
1022 policy_to_flow_dir(dir));
1023 if (!err)
1024 xfrm_pol_hold(pol);
1025 else if (err == -ESRCH)
1026 pol = NULL;
1027 else
1028 pol = ERR_PTR(err);
1029 } else
1030 pol = NULL;
1031 }
1032out:
1033 read_unlock_bh(&xfrm_policy_lock);
1034 return pol;
1035}
1036
1037static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1038{
1039 struct net *net = xp_net(pol);
1040 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1041 pol->family, dir);
1042
1043 list_add(&pol->walk.all, &net->xfrm.policy_all);
1044 hlist_add_head(&pol->bydst, chain);
1045 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1046 net->xfrm.policy_count[dir]++;
1047 xfrm_pol_hold(pol);
1048
1049 if (xfrm_bydst_should_resize(net, dir, NULL))
1050 schedule_work(&net->xfrm.policy_hash_work);
1051}
1052
1053static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1054 int dir)
1055{
1056 struct net *net = xp_net(pol);
1057
1058 if (hlist_unhashed(&pol->bydst))
1059 return NULL;
1060
1061 hlist_del(&pol->bydst);
1062 hlist_del(&pol->byidx);
1063 list_del(&pol->walk.all);
1064 net->xfrm.policy_count[dir]--;
1065
1066 return pol;
1067}
1068
1069int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1070{
1071 write_lock_bh(&xfrm_policy_lock);
1072 pol = __xfrm_policy_unlink(pol, dir);
1073 write_unlock_bh(&xfrm_policy_lock);
1074 if (pol) {
1075 xfrm_policy_kill(pol);
1076 return 0;
1077 }
1078 return -ENOENT;
1079}
1080EXPORT_SYMBOL(xfrm_policy_delete);
1081
1082int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1083{
1084 struct net *net = xp_net(pol);
1085 struct xfrm_policy *old_pol;
1086
1087#ifdef CONFIG_XFRM_SUB_POLICY
1088 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1089 return -EINVAL;
1090#endif
1091
1092 write_lock_bh(&xfrm_policy_lock);
1093 old_pol = sk->sk_policy[dir];
1094 sk->sk_policy[dir] = pol;
1095 if (pol) {
1096 pol->curlft.add_time = get_seconds();
1097 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1098 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1099 }
1100 if (old_pol)
1101 /* Unlinking succeeds always. This is the only function
1102 * allowed to delete or replace socket policy.
1103 */
1104 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1105 write_unlock_bh(&xfrm_policy_lock);
1106
1107 if (old_pol) {
1108 xfrm_policy_kill(old_pol);
1109 }
1110 return 0;
1111}
1112
1113static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1114{
1115 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1116
1117 if (newp) {
1118 newp->selector = old->selector;
1119 if (security_xfrm_policy_clone(old->security,
1120 &newp->security)) {
1121 kfree(newp);
1122 return NULL; /* ENOMEM */
1123 }
1124 newp->lft = old->lft;
1125 newp->curlft = old->curlft;
1126 newp->mark = old->mark;
1127 newp->action = old->action;
1128 newp->flags = old->flags;
1129 newp->xfrm_nr = old->xfrm_nr;
1130 newp->index = old->index;
1131 newp->type = old->type;
1132 memcpy(newp->xfrm_vec, old->xfrm_vec,
1133 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1134 write_lock_bh(&xfrm_policy_lock);
1135 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1136 write_unlock_bh(&xfrm_policy_lock);
1137 xfrm_pol_put(newp);
1138 }
1139 return newp;
1140}
1141
1142int __xfrm_sk_clone_policy(struct sock *sk)
1143{
1144 struct xfrm_policy *p0 = sk->sk_policy[0],
1145 *p1 = sk->sk_policy[1];
1146
1147 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1148 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1149 return -ENOMEM;
1150 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1151 return -ENOMEM;
1152 return 0;
1153}
1154
1155static int
1156xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1157 unsigned short family)
1158{
1159 int err;
1160 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1161
1162 if (unlikely(afinfo == NULL))
1163 return -EINVAL;
1164 err = afinfo->get_saddr(net, local, remote);
1165 xfrm_policy_put_afinfo(afinfo);
1166 return err;
1167}
1168
1169/* Resolve list of templates for the flow, given policy. */
1170
1171static int
1172xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1173 struct xfrm_state **xfrm, unsigned short family)
1174{
1175 struct net *net = xp_net(policy);
1176 int nx;
1177 int i, error;
1178 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1179 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1180 xfrm_address_t tmp;
1181
1182 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1183 struct xfrm_state *x;
1184 xfrm_address_t *remote = daddr;
1185 xfrm_address_t *local = saddr;
1186 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1187
1188 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1189 tmpl->mode == XFRM_MODE_BEET) {
1190 remote = &tmpl->id.daddr;
1191 local = &tmpl->saddr;
1192 if (xfrm_addr_any(local, tmpl->encap_family)) {
1193 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1194 if (error)
1195 goto fail;
1196 local = &tmp;
1197 }
1198 }
1199
1200 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1201
1202 if (x && x->km.state == XFRM_STATE_VALID) {
1203 xfrm[nx++] = x;
1204 daddr = remote;
1205 saddr = local;
1206 continue;
1207 }
1208 if (x) {
1209 error = (x->km.state == XFRM_STATE_ERROR ?
1210 -EINVAL : -EAGAIN);
1211 xfrm_state_put(x);
1212 }
1213 else if (error == -ESRCH)
1214 error = -EAGAIN;
1215
1216 if (!tmpl->optional)
1217 goto fail;
1218 }
1219 return nx;
1220
1221fail:
1222 for (nx--; nx>=0; nx--)
1223 xfrm_state_put(xfrm[nx]);
1224 return error;
1225}
1226
1227static int
1228xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1229 struct xfrm_state **xfrm, unsigned short family)
1230{
1231 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1232 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1233 int cnx = 0;
1234 int error;
1235 int ret;
1236 int i;
1237
1238 for (i = 0; i < npols; i++) {
1239 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1240 error = -ENOBUFS;
1241 goto fail;
1242 }
1243
1244 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1245 if (ret < 0) {
1246 error = ret;
1247 goto fail;
1248 } else
1249 cnx += ret;
1250 }
1251
1252 /* found states are sorted for outbound processing */
1253 if (npols > 1)
1254 xfrm_state_sort(xfrm, tpp, cnx, family);
1255
1256 return cnx;
1257
1258 fail:
1259 for (cnx--; cnx>=0; cnx--)
1260 xfrm_state_put(tpp[cnx]);
1261 return error;
1262
1263}
1264
1265/* Check that the bundle accepts the flow and its components are
1266 * still valid.
1267 */
1268
1269static inline int xfrm_get_tos(const struct flowi *fl, int family)
1270{
1271 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1272 int tos;
1273
1274 if (!afinfo)
1275 return -EINVAL;
1276
1277 tos = afinfo->get_tos(fl);
1278
1279 xfrm_policy_put_afinfo(afinfo);
1280
1281 return tos;
1282}
1283
1284static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1285{
1286 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1287 struct dst_entry *dst = &xdst->u.dst;
1288
1289 if (xdst->route == NULL) {
1290 /* Dummy bundle - if it has xfrms we were not
1291 * able to build bundle as template resolution failed.
1292 * It means we need to try again resolving. */
1293 if (xdst->num_xfrms > 0)
1294 return NULL;
1295 } else {
1296 /* Real bundle */
1297 if (stale_bundle(dst))
1298 return NULL;
1299 }
1300
1301 dst_hold(dst);
1302 return flo;
1303}
1304
1305static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1306{
1307 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1308 struct dst_entry *dst = &xdst->u.dst;
1309
1310 if (!xdst->route)
1311 return 0;
1312 if (stale_bundle(dst))
1313 return 0;
1314
1315 return 1;
1316}
1317
1318static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1319{
1320 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1321 struct dst_entry *dst = &xdst->u.dst;
1322
1323 dst_free(dst);
1324}
1325
1326static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1327 .get = xfrm_bundle_flo_get,
1328 .check = xfrm_bundle_flo_check,
1329 .delete = xfrm_bundle_flo_delete,
1330};
1331
1332static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1333{
1334 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1335 struct dst_ops *dst_ops;
1336 struct xfrm_dst *xdst;
1337
1338 if (!afinfo)
1339 return ERR_PTR(-EINVAL);
1340
1341 switch (family) {
1342 case AF_INET:
1343 dst_ops = &net->xfrm.xfrm4_dst_ops;
1344 break;
1345#if IS_ENABLED(CONFIG_IPV6)
1346 case AF_INET6:
1347 dst_ops = &net->xfrm.xfrm6_dst_ops;
1348 break;
1349#endif
1350 default:
1351 BUG();
1352 }
1353 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
1354
1355 if (likely(xdst)) {
1356 memset(&xdst->u.rt6.rt6i_table, 0,
1357 sizeof(*xdst) - sizeof(struct dst_entry));
1358 xdst->flo.ops = &xfrm_bundle_fc_ops;
1359 } else
1360 xdst = ERR_PTR(-ENOBUFS);
1361
1362 xfrm_policy_put_afinfo(afinfo);
1363
1364 return xdst;
1365}
1366
1367static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1368 int nfheader_len)
1369{
1370 struct xfrm_policy_afinfo *afinfo =
1371 xfrm_policy_get_afinfo(dst->ops->family);
1372 int err;
1373
1374 if (!afinfo)
1375 return -EINVAL;
1376
1377 err = afinfo->init_path(path, dst, nfheader_len);
1378
1379 xfrm_policy_put_afinfo(afinfo);
1380
1381 return err;
1382}
1383
1384static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1385 const struct flowi *fl)
1386{
1387 struct xfrm_policy_afinfo *afinfo =
1388 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1389 int err;
1390
1391 if (!afinfo)
1392 return -EINVAL;
1393
1394 err = afinfo->fill_dst(xdst, dev, fl);
1395
1396 xfrm_policy_put_afinfo(afinfo);
1397
1398 return err;
1399}
1400
1401
1402/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1403 * all the metrics... Shortly, bundle a bundle.
1404 */
1405
1406static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1407 struct xfrm_state **xfrm, int nx,
1408 const struct flowi *fl,
1409 struct dst_entry *dst)
1410{
1411 struct net *net = xp_net(policy);
1412 unsigned long now = jiffies;
1413 struct net_device *dev;
1414 struct xfrm_mode *inner_mode;
1415 struct dst_entry *dst_prev = NULL;
1416 struct dst_entry *dst0 = NULL;
1417 int i = 0;
1418 int err;
1419 int header_len = 0;
1420 int nfheader_len = 0;
1421 int trailer_len = 0;
1422 int tos;
1423 int family = policy->selector.family;
1424 xfrm_address_t saddr, daddr;
1425
1426 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1427
1428 tos = xfrm_get_tos(fl, family);
1429 err = tos;
1430 if (tos < 0)
1431 goto put_states;
1432
1433 dst_hold(dst);
1434
1435 for (; i < nx; i++) {
1436 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1437 struct dst_entry *dst1 = &xdst->u.dst;
1438
1439 err = PTR_ERR(xdst);
1440 if (IS_ERR(xdst)) {
1441 dst_release(dst);
1442 goto put_states;
1443 }
1444
1445 if (xfrm[i]->sel.family == AF_UNSPEC) {
1446 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1447 xfrm_af2proto(family));
1448 if (!inner_mode) {
1449 err = -EAFNOSUPPORT;
1450 dst_release(dst);
1451 goto put_states;
1452 }
1453 } else
1454 inner_mode = xfrm[i]->inner_mode;
1455
1456 if (!dst_prev)
1457 dst0 = dst1;
1458 else {
1459 dst_prev->child = dst_clone(dst1);
1460 dst1->flags |= DST_NOHASH;
1461 }
1462
1463 xdst->route = dst;
1464 dst_copy_metrics(dst1, dst);
1465
1466 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1467 family = xfrm[i]->props.family;
1468 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1469 family);
1470 err = PTR_ERR(dst);
1471 if (IS_ERR(dst))
1472 goto put_states;
1473 } else
1474 dst_hold(dst);
1475
1476 dst1->xfrm = xfrm[i];
1477 xdst->xfrm_genid = xfrm[i]->genid;
1478
1479 dst1->obsolete = -1;
1480 dst1->flags |= DST_HOST;
1481 dst1->lastuse = now;
1482
1483 dst1->input = dst_discard;
1484 dst1->output = inner_mode->afinfo->output;
1485
1486 dst1->next = dst_prev;
1487 dst_prev = dst1;
1488
1489 header_len += xfrm[i]->props.header_len;
1490 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1491 nfheader_len += xfrm[i]->props.header_len;
1492 trailer_len += xfrm[i]->props.trailer_len;
1493 }
1494
1495 dst_prev->child = dst;
1496 dst0->path = dst;
1497
1498 err = -ENODEV;
1499 dev = dst->dev;
1500 if (!dev)
1501 goto free_dst;
1502
1503 /* Copy neighbour for reachability confirmation */
1504 dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
1505
1506 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1507 xfrm_init_pmtu(dst_prev);
1508
1509 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1510 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1511
1512 err = xfrm_fill_dst(xdst, dev, fl);
1513 if (err)
1514 goto free_dst;
1515
1516 dst_prev->header_len = header_len;
1517 dst_prev->trailer_len = trailer_len;
1518 header_len -= xdst->u.dst.xfrm->props.header_len;
1519 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1520 }
1521
1522out:
1523 return dst0;
1524
1525put_states:
1526 for (; i < nx; i++)
1527 xfrm_state_put(xfrm[i]);
1528free_dst:
1529 if (dst0)
1530 dst_free(dst0);
1531 dst0 = ERR_PTR(err);
1532 goto out;
1533}
1534
1535static int inline
1536xfrm_dst_alloc_copy(void **target, const void *src, int size)
1537{
1538 if (!*target) {
1539 *target = kmalloc(size, GFP_ATOMIC);
1540 if (!*target)
1541 return -ENOMEM;
1542 }
1543 memcpy(*target, src, size);
1544 return 0;
1545}
1546
1547static int inline
1548xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
1549{
1550#ifdef CONFIG_XFRM_SUB_POLICY
1551 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1552 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1553 sel, sizeof(*sel));
1554#else
1555 return 0;
1556#endif
1557}
1558
1559static int inline
1560xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
1561{
1562#ifdef CONFIG_XFRM_SUB_POLICY
1563 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1564 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1565#else
1566 return 0;
1567#endif
1568}
1569
1570static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1571 struct xfrm_policy **pols,
1572 int *num_pols, int *num_xfrms)
1573{
1574 int i;
1575
1576 if (*num_pols == 0 || !pols[0]) {
1577 *num_pols = 0;
1578 *num_xfrms = 0;
1579 return 0;
1580 }
1581 if (IS_ERR(pols[0]))
1582 return PTR_ERR(pols[0]);
1583
1584 *num_xfrms = pols[0]->xfrm_nr;
1585
1586#ifdef CONFIG_XFRM_SUB_POLICY
1587 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1588 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1589 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1590 XFRM_POLICY_TYPE_MAIN,
1591 fl, family,
1592 XFRM_POLICY_OUT);
1593 if (pols[1]) {
1594 if (IS_ERR(pols[1])) {
1595 xfrm_pols_put(pols, *num_pols);
1596 return PTR_ERR(pols[1]);
1597 }
1598 (*num_pols) ++;
1599 (*num_xfrms) += pols[1]->xfrm_nr;
1600 }
1601 }
1602#endif
1603 for (i = 0; i < *num_pols; i++) {
1604 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1605 *num_xfrms = -1;
1606 break;
1607 }
1608 }
1609
1610 return 0;
1611
1612}
1613
1614static struct xfrm_dst *
1615xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1616 const struct flowi *fl, u16 family,
1617 struct dst_entry *dst_orig)
1618{
1619 struct net *net = xp_net(pols[0]);
1620 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1621 struct dst_entry *dst;
1622 struct xfrm_dst *xdst;
1623 int err;
1624
1625 /* Try to instantiate a bundle */
1626 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1627 if (err <= 0) {
1628 if (err != 0 && err != -EAGAIN)
1629 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1630 return ERR_PTR(err);
1631 }
1632
1633 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1634 if (IS_ERR(dst)) {
1635 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1636 return ERR_CAST(dst);
1637 }
1638
1639 xdst = (struct xfrm_dst *)dst;
1640 xdst->num_xfrms = err;
1641 if (num_pols > 1)
1642 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1643 else
1644 err = xfrm_dst_update_origin(dst, fl);
1645 if (unlikely(err)) {
1646 dst_free(dst);
1647 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1648 return ERR_PTR(err);
1649 }
1650
1651 xdst->num_pols = num_pols;
1652 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1653 xdst->policy_genid = atomic_read(&pols[0]->genid);
1654
1655 return xdst;
1656}
1657
1658static struct flow_cache_object *
1659xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1660 struct flow_cache_object *oldflo, void *ctx)
1661{
1662 struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1663 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1664 struct xfrm_dst *xdst, *new_xdst;
1665 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1666
1667 /* Check if the policies from old bundle are usable */
1668 xdst = NULL;
1669 if (oldflo) {
1670 xdst = container_of(oldflo, struct xfrm_dst, flo);
1671 num_pols = xdst->num_pols;
1672 num_xfrms = xdst->num_xfrms;
1673 pol_dead = 0;
1674 for (i = 0; i < num_pols; i++) {
1675 pols[i] = xdst->pols[i];
1676 pol_dead |= pols[i]->walk.dead;
1677 }
1678 if (pol_dead) {
1679 dst_free(&xdst->u.dst);
1680 xdst = NULL;
1681 num_pols = 0;
1682 num_xfrms = 0;
1683 oldflo = NULL;
1684 }
1685 }
1686
1687 /* Resolve policies to use if we couldn't get them from
1688 * previous cache entry */
1689 if (xdst == NULL) {
1690 num_pols = 1;
1691 pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
1692 err = xfrm_expand_policies(fl, family, pols,
1693 &num_pols, &num_xfrms);
1694 if (err < 0)
1695 goto inc_error;
1696 if (num_pols == 0)
1697 return NULL;
1698 if (num_xfrms <= 0)
1699 goto make_dummy_bundle;
1700 }
1701
1702 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1703 if (IS_ERR(new_xdst)) {
1704 err = PTR_ERR(new_xdst);
1705 if (err != -EAGAIN)
1706 goto error;
1707 if (oldflo == NULL)
1708 goto make_dummy_bundle;
1709 dst_hold(&xdst->u.dst);
1710 return oldflo;
1711 } else if (new_xdst == NULL) {
1712 num_xfrms = 0;
1713 if (oldflo == NULL)
1714 goto make_dummy_bundle;
1715 xdst->num_xfrms = 0;
1716 dst_hold(&xdst->u.dst);
1717 return oldflo;
1718 }
1719
1720 /* Kill the previous bundle */
1721 if (xdst) {
1722 /* The policies were stolen for newly generated bundle */
1723 xdst->num_pols = 0;
1724 dst_free(&xdst->u.dst);
1725 }
1726
1727 /* Flow cache does not have reference, it dst_free()'s,
1728 * but we do need to return one reference for original caller */
1729 dst_hold(&new_xdst->u.dst);
1730 return &new_xdst->flo;
1731
1732make_dummy_bundle:
1733 /* We found policies, but there's no bundles to instantiate:
1734 * either because the policy blocks, has no transformations or
1735 * we could not build template (no xfrm_states).*/
1736 xdst = xfrm_alloc_dst(net, family);
1737 if (IS_ERR(xdst)) {
1738 xfrm_pols_put(pols, num_pols);
1739 return ERR_CAST(xdst);
1740 }
1741 xdst->num_pols = num_pols;
1742 xdst->num_xfrms = num_xfrms;
1743 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1744
1745 dst_hold(&xdst->u.dst);
1746 return &xdst->flo;
1747
1748inc_error:
1749 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1750error:
1751 if (xdst != NULL)
1752 dst_free(&xdst->u.dst);
1753 else
1754 xfrm_pols_put(pols, num_pols);
1755 return ERR_PTR(err);
1756}
1757
1758static struct dst_entry *make_blackhole(struct net *net, u16 family,
1759 struct dst_entry *dst_orig)
1760{
1761 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1762 struct dst_entry *ret;
1763
1764 if (!afinfo) {
1765 dst_release(dst_orig);
1766 ret = ERR_PTR(-EINVAL);
1767 } else {
1768 ret = afinfo->blackhole_route(net, dst_orig);
1769 }
1770 xfrm_policy_put_afinfo(afinfo);
1771
1772 return ret;
1773}
1774
1775/* Main function: finds/creates a bundle for given flow.
1776 *
1777 * At the moment we eat a raw IP route. Mostly to speed up lookups
1778 * on interfaces with disabled IPsec.
1779 */
1780struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
1781 const struct flowi *fl,
1782 struct sock *sk, int flags)
1783{
1784 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1785 struct flow_cache_object *flo;
1786 struct xfrm_dst *xdst;
1787 struct dst_entry *dst, *route;
1788 u16 family = dst_orig->ops->family;
1789 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1790 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
1791
1792restart:
1793 dst = NULL;
1794 xdst = NULL;
1795 route = NULL;
1796
1797 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1798 num_pols = 1;
1799 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1800 err = xfrm_expand_policies(fl, family, pols,
1801 &num_pols, &num_xfrms);
1802 if (err < 0)
1803 goto dropdst;
1804
1805 if (num_pols) {
1806 if (num_xfrms <= 0) {
1807 drop_pols = num_pols;
1808 goto no_transform;
1809 }
1810
1811 xdst = xfrm_resolve_and_create_bundle(
1812 pols, num_pols, fl,
1813 family, dst_orig);
1814 if (IS_ERR(xdst)) {
1815 xfrm_pols_put(pols, num_pols);
1816 err = PTR_ERR(xdst);
1817 goto dropdst;
1818 } else if (xdst == NULL) {
1819 num_xfrms = 0;
1820 drop_pols = num_pols;
1821 goto no_transform;
1822 }
1823
1824 dst_hold(&xdst->u.dst);
1825
1826 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
1827 xdst->u.dst.next = xfrm_policy_sk_bundles;
1828 xfrm_policy_sk_bundles = &xdst->u.dst;
1829 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
1830
1831 route = xdst->route;
1832 }
1833 }
1834
1835 if (xdst == NULL) {
1836 /* To accelerate a bit... */
1837 if ((dst_orig->flags & DST_NOXFRM) ||
1838 !net->xfrm.policy_count[XFRM_POLICY_OUT])
1839 goto nopol;
1840
1841 flo = flow_cache_lookup(net, fl, family, dir,
1842 xfrm_bundle_lookup, dst_orig);
1843 if (flo == NULL)
1844 goto nopol;
1845 if (IS_ERR(flo)) {
1846 err = PTR_ERR(flo);
1847 goto dropdst;
1848 }
1849 xdst = container_of(flo, struct xfrm_dst, flo);
1850
1851 num_pols = xdst->num_pols;
1852 num_xfrms = xdst->num_xfrms;
1853 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
1854 route = xdst->route;
1855 }
1856
1857 dst = &xdst->u.dst;
1858 if (route == NULL && num_xfrms > 0) {
1859 /* The only case when xfrm_bundle_lookup() returns a
1860 * bundle with null route, is when the template could
1861 * not be resolved. It means policies are there, but
1862 * bundle could not be created, since we don't yet
1863 * have the xfrm_state's. We need to wait for KM to
1864 * negotiate new SA's or bail out with error.*/
1865 if (net->xfrm.sysctl_larval_drop) {
1866 /* EREMOTE tells the caller to generate
1867 * a one-shot blackhole route. */
1868 dst_release(dst);
1869 xfrm_pols_put(pols, drop_pols);
1870 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1871
1872 return make_blackhole(net, family, dst_orig);
1873 }
1874 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
1875 DECLARE_WAITQUEUE(wait, current);
1876
1877 add_wait_queue(&net->xfrm.km_waitq, &wait);
1878 set_current_state(TASK_INTERRUPTIBLE);
1879 schedule();
1880 set_current_state(TASK_RUNNING);
1881 remove_wait_queue(&net->xfrm.km_waitq, &wait);
1882
1883 if (!signal_pending(current)) {
1884 dst_release(dst);
1885 goto restart;
1886 }
1887
1888 err = -ERESTART;
1889 } else
1890 err = -EAGAIN;
1891
1892 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1893 goto error;
1894 }
1895
1896no_transform:
1897 if (num_pols == 0)
1898 goto nopol;
1899
1900 if ((flags & XFRM_LOOKUP_ICMP) &&
1901 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
1902 err = -ENOENT;
1903 goto error;
1904 }
1905
1906 for (i = 0; i < num_pols; i++)
1907 pols[i]->curlft.use_time = get_seconds();
1908
1909 if (num_xfrms < 0) {
1910 /* Prohibit the flow */
1911 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1912 err = -EPERM;
1913 goto error;
1914 } else if (num_xfrms > 0) {
1915 /* Flow transformed */
1916 dst_release(dst_orig);
1917 } else {
1918 /* Flow passes untransformed */
1919 dst_release(dst);
1920 dst = dst_orig;
1921 }
1922ok:
1923 xfrm_pols_put(pols, drop_pols);
1924 if (dst && dst->xfrm &&
1925 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
1926 dst->flags |= DST_XFRM_TUNNEL;
1927 return dst;
1928
1929nopol:
1930 if (!(flags & XFRM_LOOKUP_ICMP)) {
1931 dst = dst_orig;
1932 goto ok;
1933 }
1934 err = -ENOENT;
1935error:
1936 dst_release(dst);
1937dropdst:
1938 dst_release(dst_orig);
1939 xfrm_pols_put(pols, drop_pols);
1940 return ERR_PTR(err);
1941}
1942EXPORT_SYMBOL(xfrm_lookup);
1943
1944static inline int
1945xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
1946{
1947 struct xfrm_state *x;
1948
1949 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
1950 return 0;
1951 x = skb->sp->xvec[idx];
1952 if (!x->type->reject)
1953 return 0;
1954 return x->type->reject(x, skb, fl);
1955}
1956
1957/* When skb is transformed back to its "native" form, we have to
1958 * check policy restrictions. At the moment we make this in maximally
1959 * stupid way. Shame on me. :-) Of course, connected sockets must
1960 * have policy cached at them.
1961 */
1962
1963static inline int
1964xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
1965 unsigned short family)
1966{
1967 if (xfrm_state_kern(x))
1968 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
1969 return x->id.proto == tmpl->id.proto &&
1970 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1971 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1972 x->props.mode == tmpl->mode &&
1973 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
1974 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1975 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1976 xfrm_state_addr_cmp(tmpl, x, family));
1977}
1978
1979/*
1980 * 0 or more than 0 is returned when validation is succeeded (either bypass
1981 * because of optional transport mode, or next index of the mathced secpath
1982 * state with the template.
1983 * -1 is returned when no matching template is found.
1984 * Otherwise "-2 - errored_index" is returned.
1985 */
1986static inline int
1987xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
1988 unsigned short family)
1989{
1990 int idx = start;
1991
1992 if (tmpl->optional) {
1993 if (tmpl->mode == XFRM_MODE_TRANSPORT)
1994 return start;
1995 } else
1996 start = -1;
1997 for (; idx < sp->len; idx++) {
1998 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1999 return ++idx;
2000 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2001 if (start == -1)
2002 start = -2-idx;
2003 break;
2004 }
2005 }
2006 return start;
2007}
2008
2009int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2010 unsigned int family, int reverse)
2011{
2012 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2013 int err;
2014
2015 if (unlikely(afinfo == NULL))
2016 return -EAFNOSUPPORT;
2017
2018 afinfo->decode_session(skb, fl, reverse);
2019 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2020 xfrm_policy_put_afinfo(afinfo);
2021 return err;
2022}
2023EXPORT_SYMBOL(__xfrm_decode_session);
2024
2025static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2026{
2027 for (; k < sp->len; k++) {
2028 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2029 *idxp = k;
2030 return 1;
2031 }
2032 }
2033
2034 return 0;
2035}
2036
2037int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2038 unsigned short family)
2039{
2040 struct net *net = dev_net(skb->dev);
2041 struct xfrm_policy *pol;
2042 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2043 int npols = 0;
2044 int xfrm_nr;
2045 int pi;
2046 int reverse;
2047 struct flowi fl;
2048 u8 fl_dir;
2049 int xerr_idx = -1;
2050
2051 reverse = dir & ~XFRM_POLICY_MASK;
2052 dir &= XFRM_POLICY_MASK;
2053 fl_dir = policy_to_flow_dir(dir);
2054
2055 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2056 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2057 return 0;
2058 }
2059
2060 nf_nat_decode_session(skb, &fl, family);
2061
2062 /* First, check used SA against their selectors. */
2063 if (skb->sp) {
2064 int i;
2065
2066 for (i=skb->sp->len-1; i>=0; i--) {
2067 struct xfrm_state *x = skb->sp->xvec[i];
2068 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2069 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2070 return 0;
2071 }
2072 }
2073 }
2074
2075 pol = NULL;
2076 if (sk && sk->sk_policy[dir]) {
2077 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2078 if (IS_ERR(pol)) {
2079 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2080 return 0;
2081 }
2082 }
2083
2084 if (!pol) {
2085 struct flow_cache_object *flo;
2086
2087 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2088 xfrm_policy_lookup, NULL);
2089 if (IS_ERR_OR_NULL(flo))
2090 pol = ERR_CAST(flo);
2091 else
2092 pol = container_of(flo, struct xfrm_policy, flo);
2093 }
2094
2095 if (IS_ERR(pol)) {
2096 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2097 return 0;
2098 }
2099
2100 if (!pol) {
2101 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2102 xfrm_secpath_reject(xerr_idx, skb, &fl);
2103 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2104 return 0;
2105 }
2106 return 1;
2107 }
2108
2109 pol->curlft.use_time = get_seconds();
2110
2111 pols[0] = pol;
2112 npols ++;
2113#ifdef CONFIG_XFRM_SUB_POLICY
2114 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2115 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2116 &fl, family,
2117 XFRM_POLICY_IN);
2118 if (pols[1]) {
2119 if (IS_ERR(pols[1])) {
2120 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2121 return 0;
2122 }
2123 pols[1]->curlft.use_time = get_seconds();
2124 npols ++;
2125 }
2126 }
2127#endif
2128
2129 if (pol->action == XFRM_POLICY_ALLOW) {
2130 struct sec_path *sp;
2131 static struct sec_path dummy;
2132 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2133 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2134 struct xfrm_tmpl **tpp = tp;
2135 int ti = 0;
2136 int i, k;
2137
2138 if ((sp = skb->sp) == NULL)
2139 sp = &dummy;
2140
2141 for (pi = 0; pi < npols; pi++) {
2142 if (pols[pi] != pol &&
2143 pols[pi]->action != XFRM_POLICY_ALLOW) {
2144 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2145 goto reject;
2146 }
2147 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2148 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2149 goto reject_error;
2150 }
2151 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2152 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2153 }
2154 xfrm_nr = ti;
2155 if (npols > 1) {
2156 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
2157 tpp = stp;
2158 }
2159
2160 /* For each tunnel xfrm, find the first matching tmpl.
2161 * For each tmpl before that, find corresponding xfrm.
2162 * Order is _important_. Later we will implement
2163 * some barriers, but at the moment barriers
2164 * are implied between each two transformations.
2165 */
2166 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2167 k = xfrm_policy_ok(tpp[i], sp, k, family);
2168 if (k < 0) {
2169 if (k < -1)
2170 /* "-2 - errored_index" returned */
2171 xerr_idx = -(2+k);
2172 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2173 goto reject;
2174 }
2175 }
2176
2177 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2178 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2179 goto reject;
2180 }
2181
2182 xfrm_pols_put(pols, npols);
2183 return 1;
2184 }
2185 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2186
2187reject:
2188 xfrm_secpath_reject(xerr_idx, skb, &fl);
2189reject_error:
2190 xfrm_pols_put(pols, npols);
2191 return 0;
2192}
2193EXPORT_SYMBOL(__xfrm_policy_check);
2194
2195int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2196{
2197 struct net *net = dev_net(skb->dev);
2198 struct flowi fl;
2199 struct dst_entry *dst;
2200 int res = 1;
2201
2202 if (xfrm_decode_session(skb, &fl, family) < 0) {
2203 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2204 return 0;
2205 }
2206
2207 skb_dst_force(skb);
2208
2209 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2210 if (IS_ERR(dst)) {
2211 res = 0;
2212 dst = NULL;
2213 }
2214 skb_dst_set(skb, dst);
2215 return res;
2216}
2217EXPORT_SYMBOL(__xfrm_route_forward);
2218
2219/* Optimize later using cookies and generation ids. */
2220
2221static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2222{
2223 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2224 * to "-1" to force all XFRM destinations to get validated by
2225 * dst_ops->check on every use. We do this because when a
2226 * normal route referenced by an XFRM dst is obsoleted we do
2227 * not go looking around for all parent referencing XFRM dsts
2228 * so that we can invalidate them. It is just too much work.
2229 * Instead we make the checks here on every use. For example:
2230 *
2231 * XFRM dst A --> IPv4 dst X
2232 *
2233 * X is the "xdst->route" of A (X is also the "dst->path" of A
2234 * in this example). If X is marked obsolete, "A" will not
2235 * notice. That's what we are validating here via the
2236 * stale_bundle() check.
2237 *
2238 * When a policy's bundle is pruned, we dst_free() the XFRM
2239 * dst which causes it's ->obsolete field to be set to a
2240 * positive non-zero integer. If an XFRM dst has been pruned
2241 * like this, we want to force a new route lookup.
2242 */
2243 if (dst->obsolete < 0 && !stale_bundle(dst))
2244 return dst;
2245
2246 return NULL;
2247}
2248
2249static int stale_bundle(struct dst_entry *dst)
2250{
2251 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2252}
2253
2254void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2255{
2256 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2257 dst->dev = dev_net(dev)->loopback_dev;
2258 dev_hold(dst->dev);
2259 dev_put(dev);
2260 }
2261}
2262EXPORT_SYMBOL(xfrm_dst_ifdown);
2263
2264static void xfrm_link_failure(struct sk_buff *skb)
2265{
2266 /* Impossible. Such dst must be popped before reaches point of failure. */
2267}
2268
2269static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2270{
2271 if (dst) {
2272 if (dst->obsolete) {
2273 dst_release(dst);
2274 dst = NULL;
2275 }
2276 }
2277 return dst;
2278}
2279
2280static void __xfrm_garbage_collect(struct net *net)
2281{
2282 struct dst_entry *head, *next;
2283
2284 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2285 head = xfrm_policy_sk_bundles;
2286 xfrm_policy_sk_bundles = NULL;
2287 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2288
2289 while (head) {
2290 next = head->next;
2291 dst_free(head);
2292 head = next;
2293 }
2294}
2295
2296static void xfrm_garbage_collect(struct net *net)
2297{
2298 flow_cache_flush();
2299 __xfrm_garbage_collect(net);
2300}
2301
2302static void xfrm_garbage_collect_deferred(struct net *net)
2303{
2304 flow_cache_flush_deferred();
2305 __xfrm_garbage_collect(net);
2306}
2307
2308static void xfrm_init_pmtu(struct dst_entry *dst)
2309{
2310 do {
2311 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2312 u32 pmtu, route_mtu_cached;
2313
2314 pmtu = dst_mtu(dst->child);
2315 xdst->child_mtu_cached = pmtu;
2316
2317 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2318
2319 route_mtu_cached = dst_mtu(xdst->route);
2320 xdst->route_mtu_cached = route_mtu_cached;
2321
2322 if (pmtu > route_mtu_cached)
2323 pmtu = route_mtu_cached;
2324
2325 dst_metric_set(dst, RTAX_MTU, pmtu);
2326 } while ((dst = dst->next));
2327}
2328
2329/* Check that the bundle accepts the flow and its components are
2330 * still valid.
2331 */
2332
2333static int xfrm_bundle_ok(struct xfrm_dst *first)
2334{
2335 struct dst_entry *dst = &first->u.dst;
2336 struct xfrm_dst *last;
2337 u32 mtu;
2338
2339 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2340 (dst->dev && !netif_running(dst->dev)))
2341 return 0;
2342
2343 last = NULL;
2344
2345 do {
2346 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2347
2348 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2349 return 0;
2350 if (xdst->xfrm_genid != dst->xfrm->genid)
2351 return 0;
2352 if (xdst->num_pols > 0 &&
2353 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2354 return 0;
2355
2356 mtu = dst_mtu(dst->child);
2357 if (xdst->child_mtu_cached != mtu) {
2358 last = xdst;
2359 xdst->child_mtu_cached = mtu;
2360 }
2361
2362 if (!dst_check(xdst->route, xdst->route_cookie))
2363 return 0;
2364 mtu = dst_mtu(xdst->route);
2365 if (xdst->route_mtu_cached != mtu) {
2366 last = xdst;
2367 xdst->route_mtu_cached = mtu;
2368 }
2369
2370 dst = dst->child;
2371 } while (dst->xfrm);
2372
2373 if (likely(!last))
2374 return 1;
2375
2376 mtu = last->child_mtu_cached;
2377 for (;;) {
2378 dst = &last->u.dst;
2379
2380 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2381 if (mtu > last->route_mtu_cached)
2382 mtu = last->route_mtu_cached;
2383 dst_metric_set(dst, RTAX_MTU, mtu);
2384
2385 if (last == first)
2386 break;
2387
2388 last = (struct xfrm_dst *)last->u.dst.next;
2389 last->child_mtu_cached = mtu;
2390 }
2391
2392 return 1;
2393}
2394
2395static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2396{
2397 return dst_metric_advmss(dst->path);
2398}
2399
2400static unsigned int xfrm_mtu(const struct dst_entry *dst)
2401{
2402 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2403
2404 return mtu ? : dst_mtu(dst->path);
2405}
2406
2407static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
2408{
2409 return dst_neigh_lookup(dst->path, daddr);
2410}
2411
2412int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2413{
2414 struct net *net;
2415 int err = 0;
2416 if (unlikely(afinfo == NULL))
2417 return -EINVAL;
2418 if (unlikely(afinfo->family >= NPROTO))
2419 return -EAFNOSUPPORT;
2420 write_lock_bh(&xfrm_policy_afinfo_lock);
2421 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2422 err = -ENOBUFS;
2423 else {
2424 struct dst_ops *dst_ops = afinfo->dst_ops;
2425 if (likely(dst_ops->kmem_cachep == NULL))
2426 dst_ops->kmem_cachep = xfrm_dst_cache;
2427 if (likely(dst_ops->check == NULL))
2428 dst_ops->check = xfrm_dst_check;
2429 if (likely(dst_ops->default_advmss == NULL))
2430 dst_ops->default_advmss = xfrm_default_advmss;
2431 if (likely(dst_ops->mtu == NULL))
2432 dst_ops->mtu = xfrm_mtu;
2433 if (likely(dst_ops->negative_advice == NULL))
2434 dst_ops->negative_advice = xfrm_negative_advice;
2435 if (likely(dst_ops->link_failure == NULL))
2436 dst_ops->link_failure = xfrm_link_failure;
2437 if (likely(dst_ops->neigh_lookup == NULL))
2438 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2439 if (likely(afinfo->garbage_collect == NULL))
2440 afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2441 xfrm_policy_afinfo[afinfo->family] = afinfo;
2442 }
2443 write_unlock_bh(&xfrm_policy_afinfo_lock);
2444
2445 rtnl_lock();
2446 for_each_net(net) {
2447 struct dst_ops *xfrm_dst_ops;
2448
2449 switch (afinfo->family) {
2450 case AF_INET:
2451 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2452 break;
2453#if IS_ENABLED(CONFIG_IPV6)
2454 case AF_INET6:
2455 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2456 break;
2457#endif
2458 default:
2459 BUG();
2460 }
2461 *xfrm_dst_ops = *afinfo->dst_ops;
2462 }
2463 rtnl_unlock();
2464
2465 return err;
2466}
2467EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2468
2469int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2470{
2471 int err = 0;
2472 if (unlikely(afinfo == NULL))
2473 return -EINVAL;
2474 if (unlikely(afinfo->family >= NPROTO))
2475 return -EAFNOSUPPORT;
2476 write_lock_bh(&xfrm_policy_afinfo_lock);
2477 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2478 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2479 err = -EINVAL;
2480 else {
2481 struct dst_ops *dst_ops = afinfo->dst_ops;
2482 xfrm_policy_afinfo[afinfo->family] = NULL;
2483 dst_ops->kmem_cachep = NULL;
2484 dst_ops->check = NULL;
2485 dst_ops->negative_advice = NULL;
2486 dst_ops->link_failure = NULL;
2487 afinfo->garbage_collect = NULL;
2488 }
2489 }
2490 write_unlock_bh(&xfrm_policy_afinfo_lock);
2491 return err;
2492}
2493EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2494
2495static void __net_init xfrm_dst_ops_init(struct net *net)
2496{
2497 struct xfrm_policy_afinfo *afinfo;
2498
2499 read_lock_bh(&xfrm_policy_afinfo_lock);
2500 afinfo = xfrm_policy_afinfo[AF_INET];
2501 if (afinfo)
2502 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2503#if IS_ENABLED(CONFIG_IPV6)
2504 afinfo = xfrm_policy_afinfo[AF_INET6];
2505 if (afinfo)
2506 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2507#endif
2508 read_unlock_bh(&xfrm_policy_afinfo_lock);
2509}
2510
2511static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2512{
2513 struct xfrm_policy_afinfo *afinfo;
2514 if (unlikely(family >= NPROTO))
2515 return NULL;
2516 read_lock(&xfrm_policy_afinfo_lock);
2517 afinfo = xfrm_policy_afinfo[family];
2518 if (unlikely(!afinfo))
2519 read_unlock(&xfrm_policy_afinfo_lock);
2520 return afinfo;
2521}
2522
2523static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2524{
2525 read_unlock(&xfrm_policy_afinfo_lock);
2526}
2527
2528static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2529{
2530 struct net_device *dev = ptr;
2531
2532 switch (event) {
2533 case NETDEV_DOWN:
2534 xfrm_garbage_collect(dev_net(dev));
2535 }
2536 return NOTIFY_DONE;
2537}
2538
2539static struct notifier_block xfrm_dev_notifier = {
2540 .notifier_call = xfrm_dev_event,
2541};
2542
2543#ifdef CONFIG_XFRM_STATISTICS
2544static int __net_init xfrm_statistics_init(struct net *net)
2545{
2546 int rv;
2547
2548 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2549 sizeof(struct linux_xfrm_mib),
2550 __alignof__(struct linux_xfrm_mib)) < 0)
2551 return -ENOMEM;
2552 rv = xfrm_proc_init(net);
2553 if (rv < 0)
2554 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2555 return rv;
2556}
2557
2558static void xfrm_statistics_fini(struct net *net)
2559{
2560 xfrm_proc_fini(net);
2561 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2562}
2563#else
2564static int __net_init xfrm_statistics_init(struct net *net)
2565{
2566 return 0;
2567}
2568
2569static void xfrm_statistics_fini(struct net *net)
2570{
2571}
2572#endif
2573
2574static int __net_init xfrm_policy_init(struct net *net)
2575{
2576 unsigned int hmask, sz;
2577 int dir;
2578
2579 if (net_eq(net, &init_net))
2580 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2581 sizeof(struct xfrm_dst),
2582 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2583 NULL);
2584
2585 hmask = 8 - 1;
2586 sz = (hmask+1) * sizeof(struct hlist_head);
2587
2588 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2589 if (!net->xfrm.policy_byidx)
2590 goto out_byidx;
2591 net->xfrm.policy_idx_hmask = hmask;
2592
2593 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2594 struct xfrm_policy_hash *htab;
2595
2596 net->xfrm.policy_count[dir] = 0;
2597 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2598
2599 htab = &net->xfrm.policy_bydst[dir];
2600 htab->table = xfrm_hash_alloc(sz);
2601 if (!htab->table)
2602 goto out_bydst;
2603 htab->hmask = hmask;
2604 }
2605
2606 INIT_LIST_HEAD(&net->xfrm.policy_all);
2607 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2608 if (net_eq(net, &init_net))
2609 register_netdevice_notifier(&xfrm_dev_notifier);
2610 return 0;
2611
2612out_bydst:
2613 for (dir--; dir >= 0; dir--) {
2614 struct xfrm_policy_hash *htab;
2615
2616 htab = &net->xfrm.policy_bydst[dir];
2617 xfrm_hash_free(htab->table, sz);
2618 }
2619 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2620out_byidx:
2621 return -ENOMEM;
2622}
2623
2624static void xfrm_policy_fini(struct net *net)
2625{
2626 struct xfrm_audit audit_info;
2627 unsigned int sz;
2628 int dir;
2629
2630 flush_work(&net->xfrm.policy_hash_work);
2631#ifdef CONFIG_XFRM_SUB_POLICY
2632 audit_info.loginuid = -1;
2633 audit_info.sessionid = -1;
2634 audit_info.secid = 0;
2635 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2636#endif
2637 audit_info.loginuid = -1;
2638 audit_info.sessionid = -1;
2639 audit_info.secid = 0;
2640 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2641
2642 WARN_ON(!list_empty(&net->xfrm.policy_all));
2643
2644 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2645 struct xfrm_policy_hash *htab;
2646
2647 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2648
2649 htab = &net->xfrm.policy_bydst[dir];
2650 sz = (htab->hmask + 1);
2651 WARN_ON(!hlist_empty(htab->table));
2652 xfrm_hash_free(htab->table, sz);
2653 }
2654
2655 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2656 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2657 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2658}
2659
2660static int __net_init xfrm_net_init(struct net *net)
2661{
2662 int rv;
2663
2664 rv = xfrm_statistics_init(net);
2665 if (rv < 0)
2666 goto out_statistics;
2667 rv = xfrm_state_init(net);
2668 if (rv < 0)
2669 goto out_state;
2670 rv = xfrm_policy_init(net);
2671 if (rv < 0)
2672 goto out_policy;
2673 xfrm_dst_ops_init(net);
2674 rv = xfrm_sysctl_init(net);
2675 if (rv < 0)
2676 goto out_sysctl;
2677 return 0;
2678
2679out_sysctl:
2680 xfrm_policy_fini(net);
2681out_policy:
2682 xfrm_state_fini(net);
2683out_state:
2684 xfrm_statistics_fini(net);
2685out_statistics:
2686 return rv;
2687}
2688
2689static void __net_exit xfrm_net_exit(struct net *net)
2690{
2691 xfrm_sysctl_fini(net);
2692 xfrm_policy_fini(net);
2693 xfrm_state_fini(net);
2694 xfrm_statistics_fini(net);
2695}
2696
2697static struct pernet_operations __net_initdata xfrm_net_ops = {
2698 .init = xfrm_net_init,
2699 .exit = xfrm_net_exit,
2700};
2701
2702void __init xfrm_init(void)
2703{
2704 register_pernet_subsys(&xfrm_net_ops);
2705 xfrm_input_init();
2706}
2707
2708#ifdef CONFIG_AUDITSYSCALL
2709static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2710 struct audit_buffer *audit_buf)
2711{
2712 struct xfrm_sec_ctx *ctx = xp->security;
2713 struct xfrm_selector *sel = &xp->selector;
2714
2715 if (ctx)
2716 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2717 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2718
2719 switch(sel->family) {
2720 case AF_INET:
2721 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2722 if (sel->prefixlen_s != 32)
2723 audit_log_format(audit_buf, " src_prefixlen=%d",
2724 sel->prefixlen_s);
2725 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2726 if (sel->prefixlen_d != 32)
2727 audit_log_format(audit_buf, " dst_prefixlen=%d",
2728 sel->prefixlen_d);
2729 break;
2730 case AF_INET6:
2731 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2732 if (sel->prefixlen_s != 128)
2733 audit_log_format(audit_buf, " src_prefixlen=%d",
2734 sel->prefixlen_s);
2735 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2736 if (sel->prefixlen_d != 128)
2737 audit_log_format(audit_buf, " dst_prefixlen=%d",
2738 sel->prefixlen_d);
2739 break;
2740 }
2741}
2742
2743void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2744 uid_t auid, u32 sessionid, u32 secid)
2745{
2746 struct audit_buffer *audit_buf;
2747
2748 audit_buf = xfrm_audit_start("SPD-add");
2749 if (audit_buf == NULL)
2750 return;
2751 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2752 audit_log_format(audit_buf, " res=%u", result);
2753 xfrm_audit_common_policyinfo(xp, audit_buf);
2754 audit_log_end(audit_buf);
2755}
2756EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2757
2758void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
2759 uid_t auid, u32 sessionid, u32 secid)
2760{
2761 struct audit_buffer *audit_buf;
2762
2763 audit_buf = xfrm_audit_start("SPD-delete");
2764 if (audit_buf == NULL)
2765 return;
2766 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2767 audit_log_format(audit_buf, " res=%u", result);
2768 xfrm_audit_common_policyinfo(xp, audit_buf);
2769 audit_log_end(audit_buf);
2770}
2771EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2772#endif
2773
2774#ifdef CONFIG_XFRM_MIGRATE
2775static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
2776 const struct xfrm_selector *sel_tgt)
2777{
2778 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2779 if (sel_tgt->family == sel_cmp->family &&
2780 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2781 sel_cmp->family) == 0 &&
2782 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2783 sel_cmp->family) == 0 &&
2784 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2785 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2786 return true;
2787 }
2788 } else {
2789 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2790 return true;
2791 }
2792 }
2793 return false;
2794}
2795
2796static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
2797 u8 dir, u8 type)
2798{
2799 struct xfrm_policy *pol, *ret = NULL;
2800 struct hlist_node *entry;
2801 struct hlist_head *chain;
2802 u32 priority = ~0U;
2803
2804 read_lock_bh(&xfrm_policy_lock);
2805 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
2806 hlist_for_each_entry(pol, entry, chain, bydst) {
2807 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2808 pol->type == type) {
2809 ret = pol;
2810 priority = ret->priority;
2811 break;
2812 }
2813 }
2814 chain = &init_net.xfrm.policy_inexact[dir];
2815 hlist_for_each_entry(pol, entry, chain, bydst) {
2816 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2817 pol->type == type &&
2818 pol->priority < priority) {
2819 ret = pol;
2820 break;
2821 }
2822 }
2823
2824 if (ret)
2825 xfrm_pol_hold(ret);
2826
2827 read_unlock_bh(&xfrm_policy_lock);
2828
2829 return ret;
2830}
2831
2832static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
2833{
2834 int match = 0;
2835
2836 if (t->mode == m->mode && t->id.proto == m->proto &&
2837 (m->reqid == 0 || t->reqid == m->reqid)) {
2838 switch (t->mode) {
2839 case XFRM_MODE_TUNNEL:
2840 case XFRM_MODE_BEET:
2841 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2842 m->old_family) == 0 &&
2843 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2844 m->old_family) == 0) {
2845 match = 1;
2846 }
2847 break;
2848 case XFRM_MODE_TRANSPORT:
2849 /* in case of transport mode, template does not store
2850 any IP addresses, hence we just compare mode and
2851 protocol */
2852 match = 1;
2853 break;
2854 default:
2855 break;
2856 }
2857 }
2858 return match;
2859}
2860
2861/* update endpoint address(es) of template(s) */
2862static int xfrm_policy_migrate(struct xfrm_policy *pol,
2863 struct xfrm_migrate *m, int num_migrate)
2864{
2865 struct xfrm_migrate *mp;
2866 int i, j, n = 0;
2867
2868 write_lock_bh(&pol->lock);
2869 if (unlikely(pol->walk.dead)) {
2870 /* target policy has been deleted */
2871 write_unlock_bh(&pol->lock);
2872 return -ENOENT;
2873 }
2874
2875 for (i = 0; i < pol->xfrm_nr; i++) {
2876 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2877 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2878 continue;
2879 n++;
2880 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
2881 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
2882 continue;
2883 /* update endpoints */
2884 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2885 sizeof(pol->xfrm_vec[i].id.daddr));
2886 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2887 sizeof(pol->xfrm_vec[i].saddr));
2888 pol->xfrm_vec[i].encap_family = mp->new_family;
2889 /* flush bundles */
2890 atomic_inc(&pol->genid);
2891 }
2892 }
2893
2894 write_unlock_bh(&pol->lock);
2895
2896 if (!n)
2897 return -ENODATA;
2898
2899 return 0;
2900}
2901
2902static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
2903{
2904 int i, j;
2905
2906 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2907 return -EINVAL;
2908
2909 for (i = 0; i < num_migrate; i++) {
2910 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2911 m[i].old_family) == 0) &&
2912 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2913 m[i].old_family) == 0))
2914 return -EINVAL;
2915 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2916 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2917 return -EINVAL;
2918
2919 /* check if there is any duplicated entry */
2920 for (j = i + 1; j < num_migrate; j++) {
2921 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2922 sizeof(m[i].old_daddr)) &&
2923 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2924 sizeof(m[i].old_saddr)) &&
2925 m[i].proto == m[j].proto &&
2926 m[i].mode == m[j].mode &&
2927 m[i].reqid == m[j].reqid &&
2928 m[i].old_family == m[j].old_family)
2929 return -EINVAL;
2930 }
2931 }
2932
2933 return 0;
2934}
2935
2936int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2937 struct xfrm_migrate *m, int num_migrate,
2938 struct xfrm_kmaddress *k)
2939{
2940 int i, err, nx_cur = 0, nx_new = 0;
2941 struct xfrm_policy *pol = NULL;
2942 struct xfrm_state *x, *xc;
2943 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2944 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2945 struct xfrm_migrate *mp;
2946
2947 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2948 goto out;
2949
2950 /* Stage 1 - find policy */
2951 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2952 err = -ENOENT;
2953 goto out;
2954 }
2955
2956 /* Stage 2 - find and update state(s) */
2957 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2958 if ((x = xfrm_migrate_state_find(mp))) {
2959 x_cur[nx_cur] = x;
2960 nx_cur++;
2961 if ((xc = xfrm_state_migrate(x, mp))) {
2962 x_new[nx_new] = xc;
2963 nx_new++;
2964 } else {
2965 err = -ENODATA;
2966 goto restore_state;
2967 }
2968 }
2969 }
2970
2971 /* Stage 3 - update policy */
2972 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2973 goto restore_state;
2974
2975 /* Stage 4 - delete old state(s) */
2976 if (nx_cur) {
2977 xfrm_states_put(x_cur, nx_cur);
2978 xfrm_states_delete(x_cur, nx_cur);
2979 }
2980
2981 /* Stage 5 - announce */
2982 km_migrate(sel, dir, type, m, num_migrate, k);
2983
2984 xfrm_pol_put(pol);
2985
2986 return 0;
2987out:
2988 return err;
2989
2990restore_state:
2991 if (pol)
2992 xfrm_pol_put(pol);
2993 if (nx_cur)
2994 xfrm_states_put(x_cur, nx_cur);
2995 if (nx_new)
2996 xfrm_states_delete(x_new, nx_new);
2997
2998 return err;
2999}
3000EXPORT_SYMBOL(xfrm_migrate);
3001#endif
1/*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/kmod.h>
19#include <linux/list.h>
20#include <linux/spinlock.h>
21#include <linux/workqueue.h>
22#include <linux/notifier.h>
23#include <linux/netdevice.h>
24#include <linux/netfilter.h>
25#include <linux/module.h>
26#include <linux/cache.h>
27#include <linux/audit.h>
28#include <net/dst.h>
29#include <net/flow.h>
30#include <net/xfrm.h>
31#include <net/ip.h>
32#ifdef CONFIG_XFRM_STATISTICS
33#include <net/snmp.h>
34#endif
35
36#include "xfrm_hash.h"
37
38#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40#define XFRM_MAX_QUEUE_LEN 100
41
42static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
43static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
44 __read_mostly;
45
46static struct kmem_cache *xfrm_dst_cache __read_mostly;
47
48static void xfrm_init_pmtu(struct dst_entry *dst);
49static int stale_bundle(struct dst_entry *dst);
50static int xfrm_bundle_ok(struct xfrm_dst *xdst);
51static void xfrm_policy_queue_process(unsigned long arg);
52
53static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
54 int dir);
55
56static inline bool
57__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
58{
59 const struct flowi4 *fl4 = &fl->u.ip4;
60
61 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
62 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
63 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
64 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
65 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
66 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
67}
68
69static inline bool
70__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
71{
72 const struct flowi6 *fl6 = &fl->u.ip6;
73
74 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
75 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
76 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
77 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
78 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
79 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
80}
81
82bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
83 unsigned short family)
84{
85 switch (family) {
86 case AF_INET:
87 return __xfrm4_selector_match(sel, fl);
88 case AF_INET6:
89 return __xfrm6_selector_match(sel, fl);
90 }
91 return false;
92}
93
94static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
95{
96 struct xfrm_policy_afinfo *afinfo;
97
98 if (unlikely(family >= NPROTO))
99 return NULL;
100 rcu_read_lock();
101 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
102 if (unlikely(!afinfo))
103 rcu_read_unlock();
104 return afinfo;
105}
106
107static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
108{
109 rcu_read_unlock();
110}
111
112static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
113 const xfrm_address_t *saddr,
114 const xfrm_address_t *daddr,
115 int family)
116{
117 struct xfrm_policy_afinfo *afinfo;
118 struct dst_entry *dst;
119
120 afinfo = xfrm_policy_get_afinfo(family);
121 if (unlikely(afinfo == NULL))
122 return ERR_PTR(-EAFNOSUPPORT);
123
124 dst = afinfo->dst_lookup(net, tos, saddr, daddr);
125
126 xfrm_policy_put_afinfo(afinfo);
127
128 return dst;
129}
130
131static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
132 xfrm_address_t *prev_saddr,
133 xfrm_address_t *prev_daddr,
134 int family)
135{
136 struct net *net = xs_net(x);
137 xfrm_address_t *saddr = &x->props.saddr;
138 xfrm_address_t *daddr = &x->id.daddr;
139 struct dst_entry *dst;
140
141 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
142 saddr = x->coaddr;
143 daddr = prev_daddr;
144 }
145 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
146 saddr = prev_saddr;
147 daddr = x->coaddr;
148 }
149
150 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
151
152 if (!IS_ERR(dst)) {
153 if (prev_saddr != saddr)
154 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
155 if (prev_daddr != daddr)
156 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
157 }
158
159 return dst;
160}
161
162static inline unsigned long make_jiffies(long secs)
163{
164 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
165 return MAX_SCHEDULE_TIMEOUT-1;
166 else
167 return secs*HZ;
168}
169
170static void xfrm_policy_timer(unsigned long data)
171{
172 struct xfrm_policy *xp = (struct xfrm_policy *)data;
173 unsigned long now = get_seconds();
174 long next = LONG_MAX;
175 int warn = 0;
176 int dir;
177
178 read_lock(&xp->lock);
179
180 if (unlikely(xp->walk.dead))
181 goto out;
182
183 dir = xfrm_policy_id2dir(xp->index);
184
185 if (xp->lft.hard_add_expires_seconds) {
186 long tmo = xp->lft.hard_add_expires_seconds +
187 xp->curlft.add_time - now;
188 if (tmo <= 0)
189 goto expired;
190 if (tmo < next)
191 next = tmo;
192 }
193 if (xp->lft.hard_use_expires_seconds) {
194 long tmo = xp->lft.hard_use_expires_seconds +
195 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
196 if (tmo <= 0)
197 goto expired;
198 if (tmo < next)
199 next = tmo;
200 }
201 if (xp->lft.soft_add_expires_seconds) {
202 long tmo = xp->lft.soft_add_expires_seconds +
203 xp->curlft.add_time - now;
204 if (tmo <= 0) {
205 warn = 1;
206 tmo = XFRM_KM_TIMEOUT;
207 }
208 if (tmo < next)
209 next = tmo;
210 }
211 if (xp->lft.soft_use_expires_seconds) {
212 long tmo = xp->lft.soft_use_expires_seconds +
213 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
214 if (tmo <= 0) {
215 warn = 1;
216 tmo = XFRM_KM_TIMEOUT;
217 }
218 if (tmo < next)
219 next = tmo;
220 }
221
222 if (warn)
223 km_policy_expired(xp, dir, 0, 0);
224 if (next != LONG_MAX &&
225 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
226 xfrm_pol_hold(xp);
227
228out:
229 read_unlock(&xp->lock);
230 xfrm_pol_put(xp);
231 return;
232
233expired:
234 read_unlock(&xp->lock);
235 if (!xfrm_policy_delete(xp, dir))
236 km_policy_expired(xp, dir, 1, 0);
237 xfrm_pol_put(xp);
238}
239
240static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
241{
242 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
243
244 if (unlikely(pol->walk.dead))
245 flo = NULL;
246 else
247 xfrm_pol_hold(pol);
248
249 return flo;
250}
251
252static int xfrm_policy_flo_check(struct flow_cache_object *flo)
253{
254 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
255
256 return !pol->walk.dead;
257}
258
259static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
260{
261 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
262}
263
264static const struct flow_cache_ops xfrm_policy_fc_ops = {
265 .get = xfrm_policy_flo_get,
266 .check = xfrm_policy_flo_check,
267 .delete = xfrm_policy_flo_delete,
268};
269
270/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
271 * SPD calls.
272 */
273
274struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
275{
276 struct xfrm_policy *policy;
277
278 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
279
280 if (policy) {
281 write_pnet(&policy->xp_net, net);
282 INIT_LIST_HEAD(&policy->walk.all);
283 INIT_HLIST_NODE(&policy->bydst);
284 INIT_HLIST_NODE(&policy->byidx);
285 rwlock_init(&policy->lock);
286 atomic_set(&policy->refcnt, 1);
287 skb_queue_head_init(&policy->polq.hold_queue);
288 setup_timer(&policy->timer, xfrm_policy_timer,
289 (unsigned long)policy);
290 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
291 (unsigned long)policy);
292 policy->flo.ops = &xfrm_policy_fc_ops;
293 }
294 return policy;
295}
296EXPORT_SYMBOL(xfrm_policy_alloc);
297
298/* Destroy xfrm_policy: descendant resources must be released to this moment. */
299
300void xfrm_policy_destroy(struct xfrm_policy *policy)
301{
302 BUG_ON(!policy->walk.dead);
303
304 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
305 BUG();
306
307 security_xfrm_policy_free(policy->security);
308 kfree(policy);
309}
310EXPORT_SYMBOL(xfrm_policy_destroy);
311
312static void xfrm_queue_purge(struct sk_buff_head *list)
313{
314 struct sk_buff *skb;
315
316 while ((skb = skb_dequeue(list)) != NULL)
317 kfree_skb(skb);
318}
319
320/* Rule must be locked. Release descentant resources, announce
321 * entry dead. The rule must be unlinked from lists to the moment.
322 */
323
324static void xfrm_policy_kill(struct xfrm_policy *policy)
325{
326 policy->walk.dead = 1;
327
328 atomic_inc(&policy->genid);
329
330 if (del_timer(&policy->polq.hold_timer))
331 xfrm_pol_put(policy);
332 xfrm_queue_purge(&policy->polq.hold_queue);
333
334 if (del_timer(&policy->timer))
335 xfrm_pol_put(policy);
336
337 xfrm_pol_put(policy);
338}
339
340static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
341
342static inline unsigned int idx_hash(struct net *net, u32 index)
343{
344 return __idx_hash(index, net->xfrm.policy_idx_hmask);
345}
346
347static struct hlist_head *policy_hash_bysel(struct net *net,
348 const struct xfrm_selector *sel,
349 unsigned short family, int dir)
350{
351 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
352 unsigned int hash = __sel_hash(sel, family, hmask);
353
354 return (hash == hmask + 1 ?
355 &net->xfrm.policy_inexact[dir] :
356 net->xfrm.policy_bydst[dir].table + hash);
357}
358
359static struct hlist_head *policy_hash_direct(struct net *net,
360 const xfrm_address_t *daddr,
361 const xfrm_address_t *saddr,
362 unsigned short family, int dir)
363{
364 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
365 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
366
367 return net->xfrm.policy_bydst[dir].table + hash;
368}
369
370static void xfrm_dst_hash_transfer(struct hlist_head *list,
371 struct hlist_head *ndsttable,
372 unsigned int nhashmask)
373{
374 struct hlist_node *tmp, *entry0 = NULL;
375 struct xfrm_policy *pol;
376 unsigned int h0 = 0;
377
378redo:
379 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
380 unsigned int h;
381
382 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
383 pol->family, nhashmask);
384 if (!entry0) {
385 hlist_del(&pol->bydst);
386 hlist_add_head(&pol->bydst, ndsttable+h);
387 h0 = h;
388 } else {
389 if (h != h0)
390 continue;
391 hlist_del(&pol->bydst);
392 hlist_add_after(entry0, &pol->bydst);
393 }
394 entry0 = &pol->bydst;
395 }
396 if (!hlist_empty(list)) {
397 entry0 = NULL;
398 goto redo;
399 }
400}
401
402static void xfrm_idx_hash_transfer(struct hlist_head *list,
403 struct hlist_head *nidxtable,
404 unsigned int nhashmask)
405{
406 struct hlist_node *tmp;
407 struct xfrm_policy *pol;
408
409 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
410 unsigned int h;
411
412 h = __idx_hash(pol->index, nhashmask);
413 hlist_add_head(&pol->byidx, nidxtable+h);
414 }
415}
416
417static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
418{
419 return ((old_hmask + 1) << 1) - 1;
420}
421
422static void xfrm_bydst_resize(struct net *net, int dir)
423{
424 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
425 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
426 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
427 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
428 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
429 int i;
430
431 if (!ndst)
432 return;
433
434 write_lock_bh(&net->xfrm.xfrm_policy_lock);
435
436 for (i = hmask; i >= 0; i--)
437 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
438
439 net->xfrm.policy_bydst[dir].table = ndst;
440 net->xfrm.policy_bydst[dir].hmask = nhashmask;
441
442 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
443
444 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
445}
446
447static void xfrm_byidx_resize(struct net *net, int total)
448{
449 unsigned int hmask = net->xfrm.policy_idx_hmask;
450 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
451 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
452 struct hlist_head *oidx = net->xfrm.policy_byidx;
453 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
454 int i;
455
456 if (!nidx)
457 return;
458
459 write_lock_bh(&net->xfrm.xfrm_policy_lock);
460
461 for (i = hmask; i >= 0; i--)
462 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
463
464 net->xfrm.policy_byidx = nidx;
465 net->xfrm.policy_idx_hmask = nhashmask;
466
467 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
468
469 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
470}
471
472static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
473{
474 unsigned int cnt = net->xfrm.policy_count[dir];
475 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
476
477 if (total)
478 *total += cnt;
479
480 if ((hmask + 1) < xfrm_policy_hashmax &&
481 cnt > hmask)
482 return 1;
483
484 return 0;
485}
486
487static inline int xfrm_byidx_should_resize(struct net *net, int total)
488{
489 unsigned int hmask = net->xfrm.policy_idx_hmask;
490
491 if ((hmask + 1) < xfrm_policy_hashmax &&
492 total > hmask)
493 return 1;
494
495 return 0;
496}
497
498void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
499{
500 read_lock_bh(&net->xfrm.xfrm_policy_lock);
501 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
502 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
503 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
504 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
505 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
506 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
507 si->spdhcnt = net->xfrm.policy_idx_hmask;
508 si->spdhmcnt = xfrm_policy_hashmax;
509 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
510}
511EXPORT_SYMBOL(xfrm_spd_getinfo);
512
513static DEFINE_MUTEX(hash_resize_mutex);
514static void xfrm_hash_resize(struct work_struct *work)
515{
516 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
517 int dir, total;
518
519 mutex_lock(&hash_resize_mutex);
520
521 total = 0;
522 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
523 if (xfrm_bydst_should_resize(net, dir, &total))
524 xfrm_bydst_resize(net, dir);
525 }
526 if (xfrm_byidx_should_resize(net, total))
527 xfrm_byidx_resize(net, total);
528
529 mutex_unlock(&hash_resize_mutex);
530}
531
532/* Generate new index... KAME seems to generate them ordered by cost
533 * of an absolute inpredictability of ordering of rules. This will not pass. */
534static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
535{
536 static u32 idx_generator;
537
538 for (;;) {
539 struct hlist_head *list;
540 struct xfrm_policy *p;
541 u32 idx;
542 int found;
543
544 if (!index) {
545 idx = (idx_generator | dir);
546 idx_generator += 8;
547 } else {
548 idx = index;
549 index = 0;
550 }
551
552 if (idx == 0)
553 idx = 8;
554 list = net->xfrm.policy_byidx + idx_hash(net, idx);
555 found = 0;
556 hlist_for_each_entry(p, list, byidx) {
557 if (p->index == idx) {
558 found = 1;
559 break;
560 }
561 }
562 if (!found)
563 return idx;
564 }
565}
566
567static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
568{
569 u32 *p1 = (u32 *) s1;
570 u32 *p2 = (u32 *) s2;
571 int len = sizeof(struct xfrm_selector) / sizeof(u32);
572 int i;
573
574 for (i = 0; i < len; i++) {
575 if (p1[i] != p2[i])
576 return 1;
577 }
578
579 return 0;
580}
581
582static void xfrm_policy_requeue(struct xfrm_policy *old,
583 struct xfrm_policy *new)
584{
585 struct xfrm_policy_queue *pq = &old->polq;
586 struct sk_buff_head list;
587
588 __skb_queue_head_init(&list);
589
590 spin_lock_bh(&pq->hold_queue.lock);
591 skb_queue_splice_init(&pq->hold_queue, &list);
592 if (del_timer(&pq->hold_timer))
593 xfrm_pol_put(old);
594 spin_unlock_bh(&pq->hold_queue.lock);
595
596 if (skb_queue_empty(&list))
597 return;
598
599 pq = &new->polq;
600
601 spin_lock_bh(&pq->hold_queue.lock);
602 skb_queue_splice(&list, &pq->hold_queue);
603 pq->timeout = XFRM_QUEUE_TMO_MIN;
604 if (!mod_timer(&pq->hold_timer, jiffies))
605 xfrm_pol_hold(new);
606 spin_unlock_bh(&pq->hold_queue.lock);
607}
608
609static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
610 struct xfrm_policy *pol)
611{
612 u32 mark = policy->mark.v & policy->mark.m;
613
614 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
615 return true;
616
617 if ((mark & pol->mark.m) == pol->mark.v &&
618 policy->priority == pol->priority)
619 return true;
620
621 return false;
622}
623
624int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
625{
626 struct net *net = xp_net(policy);
627 struct xfrm_policy *pol;
628 struct xfrm_policy *delpol;
629 struct hlist_head *chain;
630 struct hlist_node *newpos;
631
632 write_lock_bh(&net->xfrm.xfrm_policy_lock);
633 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
634 delpol = NULL;
635 newpos = NULL;
636 hlist_for_each_entry(pol, chain, bydst) {
637 if (pol->type == policy->type &&
638 !selector_cmp(&pol->selector, &policy->selector) &&
639 xfrm_policy_mark_match(policy, pol) &&
640 xfrm_sec_ctx_match(pol->security, policy->security) &&
641 !WARN_ON(delpol)) {
642 if (excl) {
643 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
644 return -EEXIST;
645 }
646 delpol = pol;
647 if (policy->priority > pol->priority)
648 continue;
649 } else if (policy->priority >= pol->priority) {
650 newpos = &pol->bydst;
651 continue;
652 }
653 if (delpol)
654 break;
655 }
656 if (newpos)
657 hlist_add_after(newpos, &policy->bydst);
658 else
659 hlist_add_head(&policy->bydst, chain);
660 xfrm_pol_hold(policy);
661 net->xfrm.policy_count[dir]++;
662 atomic_inc(&net->xfrm.flow_cache_genid);
663
664 /* After previous checking, family can either be AF_INET or AF_INET6 */
665 if (policy->family == AF_INET)
666 rt_genid_bump_ipv4(net);
667 else
668 rt_genid_bump_ipv6(net);
669
670 if (delpol) {
671 xfrm_policy_requeue(delpol, policy);
672 __xfrm_policy_unlink(delpol, dir);
673 }
674 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
675 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
676 policy->curlft.add_time = get_seconds();
677 policy->curlft.use_time = 0;
678 if (!mod_timer(&policy->timer, jiffies + HZ))
679 xfrm_pol_hold(policy);
680 list_add(&policy->walk.all, &net->xfrm.policy_all);
681 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
682
683 if (delpol)
684 xfrm_policy_kill(delpol);
685 else if (xfrm_bydst_should_resize(net, dir, NULL))
686 schedule_work(&net->xfrm.policy_hash_work);
687
688 return 0;
689}
690EXPORT_SYMBOL(xfrm_policy_insert);
691
692struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
693 int dir, struct xfrm_selector *sel,
694 struct xfrm_sec_ctx *ctx, int delete,
695 int *err)
696{
697 struct xfrm_policy *pol, *ret;
698 struct hlist_head *chain;
699
700 *err = 0;
701 write_lock_bh(&net->xfrm.xfrm_policy_lock);
702 chain = policy_hash_bysel(net, sel, sel->family, dir);
703 ret = NULL;
704 hlist_for_each_entry(pol, chain, bydst) {
705 if (pol->type == type &&
706 (mark & pol->mark.m) == pol->mark.v &&
707 !selector_cmp(sel, &pol->selector) &&
708 xfrm_sec_ctx_match(ctx, pol->security)) {
709 xfrm_pol_hold(pol);
710 if (delete) {
711 *err = security_xfrm_policy_delete(
712 pol->security);
713 if (*err) {
714 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
715 return pol;
716 }
717 __xfrm_policy_unlink(pol, dir);
718 }
719 ret = pol;
720 break;
721 }
722 }
723 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
724
725 if (ret && delete)
726 xfrm_policy_kill(ret);
727 return ret;
728}
729EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
730
731struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
732 int dir, u32 id, int delete, int *err)
733{
734 struct xfrm_policy *pol, *ret;
735 struct hlist_head *chain;
736
737 *err = -ENOENT;
738 if (xfrm_policy_id2dir(id) != dir)
739 return NULL;
740
741 *err = 0;
742 write_lock_bh(&net->xfrm.xfrm_policy_lock);
743 chain = net->xfrm.policy_byidx + idx_hash(net, id);
744 ret = NULL;
745 hlist_for_each_entry(pol, chain, byidx) {
746 if (pol->type == type && pol->index == id &&
747 (mark & pol->mark.m) == pol->mark.v) {
748 xfrm_pol_hold(pol);
749 if (delete) {
750 *err = security_xfrm_policy_delete(
751 pol->security);
752 if (*err) {
753 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
754 return pol;
755 }
756 __xfrm_policy_unlink(pol, dir);
757 }
758 ret = pol;
759 break;
760 }
761 }
762 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
763
764 if (ret && delete)
765 xfrm_policy_kill(ret);
766 return ret;
767}
768EXPORT_SYMBOL(xfrm_policy_byid);
769
770#ifdef CONFIG_SECURITY_NETWORK_XFRM
771static inline int
772xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
773{
774 int dir, err = 0;
775
776 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
777 struct xfrm_policy *pol;
778 int i;
779
780 hlist_for_each_entry(pol,
781 &net->xfrm.policy_inexact[dir], bydst) {
782 if (pol->type != type)
783 continue;
784 err = security_xfrm_policy_delete(pol->security);
785 if (err) {
786 xfrm_audit_policy_delete(pol, 0,
787 audit_info->loginuid,
788 audit_info->sessionid,
789 audit_info->secid);
790 return err;
791 }
792 }
793 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
794 hlist_for_each_entry(pol,
795 net->xfrm.policy_bydst[dir].table + i,
796 bydst) {
797 if (pol->type != type)
798 continue;
799 err = security_xfrm_policy_delete(
800 pol->security);
801 if (err) {
802 xfrm_audit_policy_delete(pol, 0,
803 audit_info->loginuid,
804 audit_info->sessionid,
805 audit_info->secid);
806 return err;
807 }
808 }
809 }
810 }
811 return err;
812}
813#else
814static inline int
815xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
816{
817 return 0;
818}
819#endif
820
821int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
822{
823 int dir, err = 0, cnt = 0;
824
825 write_lock_bh(&net->xfrm.xfrm_policy_lock);
826
827 err = xfrm_policy_flush_secctx_check(net, type, audit_info);
828 if (err)
829 goto out;
830
831 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
832 struct xfrm_policy *pol;
833 int i;
834
835 again1:
836 hlist_for_each_entry(pol,
837 &net->xfrm.policy_inexact[dir], bydst) {
838 if (pol->type != type)
839 continue;
840 __xfrm_policy_unlink(pol, dir);
841 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
842 cnt++;
843
844 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
845 audit_info->sessionid,
846 audit_info->secid);
847
848 xfrm_policy_kill(pol);
849
850 write_lock_bh(&net->xfrm.xfrm_policy_lock);
851 goto again1;
852 }
853
854 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
855 again2:
856 hlist_for_each_entry(pol,
857 net->xfrm.policy_bydst[dir].table + i,
858 bydst) {
859 if (pol->type != type)
860 continue;
861 __xfrm_policy_unlink(pol, dir);
862 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
863 cnt++;
864
865 xfrm_audit_policy_delete(pol, 1,
866 audit_info->loginuid,
867 audit_info->sessionid,
868 audit_info->secid);
869 xfrm_policy_kill(pol);
870
871 write_lock_bh(&net->xfrm.xfrm_policy_lock);
872 goto again2;
873 }
874 }
875
876 }
877 if (!cnt)
878 err = -ESRCH;
879out:
880 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
881 return err;
882}
883EXPORT_SYMBOL(xfrm_policy_flush);
884
885int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
886 int (*func)(struct xfrm_policy *, int, int, void*),
887 void *data)
888{
889 struct xfrm_policy *pol;
890 struct xfrm_policy_walk_entry *x;
891 int error = 0;
892
893 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
894 walk->type != XFRM_POLICY_TYPE_ANY)
895 return -EINVAL;
896
897 if (list_empty(&walk->walk.all) && walk->seq != 0)
898 return 0;
899
900 write_lock_bh(&net->xfrm.xfrm_policy_lock);
901 if (list_empty(&walk->walk.all))
902 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
903 else
904 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
905 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
906 if (x->dead)
907 continue;
908 pol = container_of(x, struct xfrm_policy, walk);
909 if (walk->type != XFRM_POLICY_TYPE_ANY &&
910 walk->type != pol->type)
911 continue;
912 error = func(pol, xfrm_policy_id2dir(pol->index),
913 walk->seq, data);
914 if (error) {
915 list_move_tail(&walk->walk.all, &x->all);
916 goto out;
917 }
918 walk->seq++;
919 }
920 if (walk->seq == 0) {
921 error = -ENOENT;
922 goto out;
923 }
924 list_del_init(&walk->walk.all);
925out:
926 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
927 return error;
928}
929EXPORT_SYMBOL(xfrm_policy_walk);
930
931void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
932{
933 INIT_LIST_HEAD(&walk->walk.all);
934 walk->walk.dead = 1;
935 walk->type = type;
936 walk->seq = 0;
937}
938EXPORT_SYMBOL(xfrm_policy_walk_init);
939
940void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
941{
942 if (list_empty(&walk->walk.all))
943 return;
944
945 write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
946 list_del(&walk->walk.all);
947 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
948}
949EXPORT_SYMBOL(xfrm_policy_walk_done);
950
951/*
952 * Find policy to apply to this flow.
953 *
954 * Returns 0 if policy found, else an -errno.
955 */
956static int xfrm_policy_match(const struct xfrm_policy *pol,
957 const struct flowi *fl,
958 u8 type, u16 family, int dir)
959{
960 const struct xfrm_selector *sel = &pol->selector;
961 int ret = -ESRCH;
962 bool match;
963
964 if (pol->family != family ||
965 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
966 pol->type != type)
967 return ret;
968
969 match = xfrm_selector_match(sel, fl, family);
970 if (match)
971 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
972 dir);
973
974 return ret;
975}
976
977static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
978 const struct flowi *fl,
979 u16 family, u8 dir)
980{
981 int err;
982 struct xfrm_policy *pol, *ret;
983 const xfrm_address_t *daddr, *saddr;
984 struct hlist_head *chain;
985 u32 priority = ~0U;
986
987 daddr = xfrm_flowi_daddr(fl, family);
988 saddr = xfrm_flowi_saddr(fl, family);
989 if (unlikely(!daddr || !saddr))
990 return NULL;
991
992 read_lock_bh(&net->xfrm.xfrm_policy_lock);
993 chain = policy_hash_direct(net, daddr, saddr, family, dir);
994 ret = NULL;
995 hlist_for_each_entry(pol, chain, bydst) {
996 err = xfrm_policy_match(pol, fl, type, family, dir);
997 if (err) {
998 if (err == -ESRCH)
999 continue;
1000 else {
1001 ret = ERR_PTR(err);
1002 goto fail;
1003 }
1004 } else {
1005 ret = pol;
1006 priority = ret->priority;
1007 break;
1008 }
1009 }
1010 chain = &net->xfrm.policy_inexact[dir];
1011 hlist_for_each_entry(pol, chain, bydst) {
1012 err = xfrm_policy_match(pol, fl, type, family, dir);
1013 if (err) {
1014 if (err == -ESRCH)
1015 continue;
1016 else {
1017 ret = ERR_PTR(err);
1018 goto fail;
1019 }
1020 } else if (pol->priority < priority) {
1021 ret = pol;
1022 break;
1023 }
1024 }
1025 if (ret)
1026 xfrm_pol_hold(ret);
1027fail:
1028 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1029
1030 return ret;
1031}
1032
1033static struct xfrm_policy *
1034__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1035{
1036#ifdef CONFIG_XFRM_SUB_POLICY
1037 struct xfrm_policy *pol;
1038
1039 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1040 if (pol != NULL)
1041 return pol;
1042#endif
1043 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1044}
1045
1046static int flow_to_policy_dir(int dir)
1047{
1048 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1049 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1050 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1051 return dir;
1052
1053 switch (dir) {
1054 default:
1055 case FLOW_DIR_IN:
1056 return XFRM_POLICY_IN;
1057 case FLOW_DIR_OUT:
1058 return XFRM_POLICY_OUT;
1059 case FLOW_DIR_FWD:
1060 return XFRM_POLICY_FWD;
1061 }
1062}
1063
1064static struct flow_cache_object *
1065xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1066 u8 dir, struct flow_cache_object *old_obj, void *ctx)
1067{
1068 struct xfrm_policy *pol;
1069
1070 if (old_obj)
1071 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1072
1073 pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1074 if (IS_ERR_OR_NULL(pol))
1075 return ERR_CAST(pol);
1076
1077 /* Resolver returns two references:
1078 * one for cache and one for caller of flow_cache_lookup() */
1079 xfrm_pol_hold(pol);
1080
1081 return &pol->flo;
1082}
1083
1084static inline int policy_to_flow_dir(int dir)
1085{
1086 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1087 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1088 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1089 return dir;
1090 switch (dir) {
1091 default:
1092 case XFRM_POLICY_IN:
1093 return FLOW_DIR_IN;
1094 case XFRM_POLICY_OUT:
1095 return FLOW_DIR_OUT;
1096 case XFRM_POLICY_FWD:
1097 return FLOW_DIR_FWD;
1098 }
1099}
1100
1101static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1102 const struct flowi *fl)
1103{
1104 struct xfrm_policy *pol;
1105 struct net *net = sock_net(sk);
1106
1107 read_lock_bh(&net->xfrm.xfrm_policy_lock);
1108 if ((pol = sk->sk_policy[dir]) != NULL) {
1109 bool match = xfrm_selector_match(&pol->selector, fl,
1110 sk->sk_family);
1111 int err = 0;
1112
1113 if (match) {
1114 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1115 pol = NULL;
1116 goto out;
1117 }
1118 err = security_xfrm_policy_lookup(pol->security,
1119 fl->flowi_secid,
1120 policy_to_flow_dir(dir));
1121 if (!err)
1122 xfrm_pol_hold(pol);
1123 else if (err == -ESRCH)
1124 pol = NULL;
1125 else
1126 pol = ERR_PTR(err);
1127 } else
1128 pol = NULL;
1129 }
1130out:
1131 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1132 return pol;
1133}
1134
1135static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1136{
1137 struct net *net = xp_net(pol);
1138 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1139 pol->family, dir);
1140
1141 list_add(&pol->walk.all, &net->xfrm.policy_all);
1142 hlist_add_head(&pol->bydst, chain);
1143 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1144 net->xfrm.policy_count[dir]++;
1145 xfrm_pol_hold(pol);
1146
1147 if (xfrm_bydst_should_resize(net, dir, NULL))
1148 schedule_work(&net->xfrm.policy_hash_work);
1149}
1150
1151static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1152 int dir)
1153{
1154 struct net *net = xp_net(pol);
1155
1156 if (hlist_unhashed(&pol->bydst))
1157 return NULL;
1158
1159 hlist_del_init(&pol->bydst);
1160 hlist_del(&pol->byidx);
1161 list_del(&pol->walk.all);
1162 net->xfrm.policy_count[dir]--;
1163
1164 return pol;
1165}
1166
1167int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1168{
1169 struct net *net = xp_net(pol);
1170
1171 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1172 pol = __xfrm_policy_unlink(pol, dir);
1173 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1174 if (pol) {
1175 xfrm_policy_kill(pol);
1176 return 0;
1177 }
1178 return -ENOENT;
1179}
1180EXPORT_SYMBOL(xfrm_policy_delete);
1181
1182int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1183{
1184 struct net *net = xp_net(pol);
1185 struct xfrm_policy *old_pol;
1186
1187#ifdef CONFIG_XFRM_SUB_POLICY
1188 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1189 return -EINVAL;
1190#endif
1191
1192 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1193 old_pol = sk->sk_policy[dir];
1194 sk->sk_policy[dir] = pol;
1195 if (pol) {
1196 pol->curlft.add_time = get_seconds();
1197 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1198 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1199 }
1200 if (old_pol) {
1201 if (pol)
1202 xfrm_policy_requeue(old_pol, pol);
1203
1204 /* Unlinking succeeds always. This is the only function
1205 * allowed to delete or replace socket policy.
1206 */
1207 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1208 }
1209 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1210
1211 if (old_pol) {
1212 xfrm_policy_kill(old_pol);
1213 }
1214 return 0;
1215}
1216
1217static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1218{
1219 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1220 struct net *net = xp_net(old);
1221
1222 if (newp) {
1223 newp->selector = old->selector;
1224 if (security_xfrm_policy_clone(old->security,
1225 &newp->security)) {
1226 kfree(newp);
1227 return NULL; /* ENOMEM */
1228 }
1229 newp->lft = old->lft;
1230 newp->curlft = old->curlft;
1231 newp->mark = old->mark;
1232 newp->action = old->action;
1233 newp->flags = old->flags;
1234 newp->xfrm_nr = old->xfrm_nr;
1235 newp->index = old->index;
1236 newp->type = old->type;
1237 memcpy(newp->xfrm_vec, old->xfrm_vec,
1238 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1239 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1240 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1241 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1242 xfrm_pol_put(newp);
1243 }
1244 return newp;
1245}
1246
1247int __xfrm_sk_clone_policy(struct sock *sk)
1248{
1249 struct xfrm_policy *p0 = sk->sk_policy[0],
1250 *p1 = sk->sk_policy[1];
1251
1252 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1253 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1254 return -ENOMEM;
1255 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1256 return -ENOMEM;
1257 return 0;
1258}
1259
1260static int
1261xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1262 unsigned short family)
1263{
1264 int err;
1265 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1266
1267 if (unlikely(afinfo == NULL))
1268 return -EINVAL;
1269 err = afinfo->get_saddr(net, local, remote);
1270 xfrm_policy_put_afinfo(afinfo);
1271 return err;
1272}
1273
1274/* Resolve list of templates for the flow, given policy. */
1275
1276static int
1277xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1278 struct xfrm_state **xfrm, unsigned short family)
1279{
1280 struct net *net = xp_net(policy);
1281 int nx;
1282 int i, error;
1283 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1284 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1285 xfrm_address_t tmp;
1286
1287 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1288 struct xfrm_state *x;
1289 xfrm_address_t *remote = daddr;
1290 xfrm_address_t *local = saddr;
1291 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1292
1293 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1294 tmpl->mode == XFRM_MODE_BEET) {
1295 remote = &tmpl->id.daddr;
1296 local = &tmpl->saddr;
1297 if (xfrm_addr_any(local, tmpl->encap_family)) {
1298 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1299 if (error)
1300 goto fail;
1301 local = &tmp;
1302 }
1303 }
1304
1305 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1306
1307 if (x && x->km.state == XFRM_STATE_VALID) {
1308 xfrm[nx++] = x;
1309 daddr = remote;
1310 saddr = local;
1311 continue;
1312 }
1313 if (x) {
1314 error = (x->km.state == XFRM_STATE_ERROR ?
1315 -EINVAL : -EAGAIN);
1316 xfrm_state_put(x);
1317 } else if (error == -ESRCH) {
1318 error = -EAGAIN;
1319 }
1320
1321 if (!tmpl->optional)
1322 goto fail;
1323 }
1324 return nx;
1325
1326fail:
1327 for (nx--; nx >= 0; nx--)
1328 xfrm_state_put(xfrm[nx]);
1329 return error;
1330}
1331
1332static int
1333xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1334 struct xfrm_state **xfrm, unsigned short family)
1335{
1336 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1337 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1338 int cnx = 0;
1339 int error;
1340 int ret;
1341 int i;
1342
1343 for (i = 0; i < npols; i++) {
1344 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1345 error = -ENOBUFS;
1346 goto fail;
1347 }
1348
1349 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1350 if (ret < 0) {
1351 error = ret;
1352 goto fail;
1353 } else
1354 cnx += ret;
1355 }
1356
1357 /* found states are sorted for outbound processing */
1358 if (npols > 1)
1359 xfrm_state_sort(xfrm, tpp, cnx, family);
1360
1361 return cnx;
1362
1363 fail:
1364 for (cnx--; cnx >= 0; cnx--)
1365 xfrm_state_put(tpp[cnx]);
1366 return error;
1367
1368}
1369
1370/* Check that the bundle accepts the flow and its components are
1371 * still valid.
1372 */
1373
1374static inline int xfrm_get_tos(const struct flowi *fl, int family)
1375{
1376 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1377 int tos;
1378
1379 if (!afinfo)
1380 return -EINVAL;
1381
1382 tos = afinfo->get_tos(fl);
1383
1384 xfrm_policy_put_afinfo(afinfo);
1385
1386 return tos;
1387}
1388
1389static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1390{
1391 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1392 struct dst_entry *dst = &xdst->u.dst;
1393
1394 if (xdst->route == NULL) {
1395 /* Dummy bundle - if it has xfrms we were not
1396 * able to build bundle as template resolution failed.
1397 * It means we need to try again resolving. */
1398 if (xdst->num_xfrms > 0)
1399 return NULL;
1400 } else if (dst->flags & DST_XFRM_QUEUE) {
1401 return NULL;
1402 } else {
1403 /* Real bundle */
1404 if (stale_bundle(dst))
1405 return NULL;
1406 }
1407
1408 dst_hold(dst);
1409 return flo;
1410}
1411
1412static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1413{
1414 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1415 struct dst_entry *dst = &xdst->u.dst;
1416
1417 if (!xdst->route)
1418 return 0;
1419 if (stale_bundle(dst))
1420 return 0;
1421
1422 return 1;
1423}
1424
1425static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1426{
1427 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1428 struct dst_entry *dst = &xdst->u.dst;
1429
1430 dst_free(dst);
1431}
1432
1433static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1434 .get = xfrm_bundle_flo_get,
1435 .check = xfrm_bundle_flo_check,
1436 .delete = xfrm_bundle_flo_delete,
1437};
1438
1439static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1440{
1441 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1442 struct dst_ops *dst_ops;
1443 struct xfrm_dst *xdst;
1444
1445 if (!afinfo)
1446 return ERR_PTR(-EINVAL);
1447
1448 switch (family) {
1449 case AF_INET:
1450 dst_ops = &net->xfrm.xfrm4_dst_ops;
1451 break;
1452#if IS_ENABLED(CONFIG_IPV6)
1453 case AF_INET6:
1454 dst_ops = &net->xfrm.xfrm6_dst_ops;
1455 break;
1456#endif
1457 default:
1458 BUG();
1459 }
1460 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1461
1462 if (likely(xdst)) {
1463 struct dst_entry *dst = &xdst->u.dst;
1464
1465 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1466 xdst->flo.ops = &xfrm_bundle_fc_ops;
1467 if (afinfo->init_dst)
1468 afinfo->init_dst(net, xdst);
1469 } else
1470 xdst = ERR_PTR(-ENOBUFS);
1471
1472 xfrm_policy_put_afinfo(afinfo);
1473
1474 return xdst;
1475}
1476
1477static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1478 int nfheader_len)
1479{
1480 struct xfrm_policy_afinfo *afinfo =
1481 xfrm_policy_get_afinfo(dst->ops->family);
1482 int err;
1483
1484 if (!afinfo)
1485 return -EINVAL;
1486
1487 err = afinfo->init_path(path, dst, nfheader_len);
1488
1489 xfrm_policy_put_afinfo(afinfo);
1490
1491 return err;
1492}
1493
1494static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1495 const struct flowi *fl)
1496{
1497 struct xfrm_policy_afinfo *afinfo =
1498 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1499 int err;
1500
1501 if (!afinfo)
1502 return -EINVAL;
1503
1504 err = afinfo->fill_dst(xdst, dev, fl);
1505
1506 xfrm_policy_put_afinfo(afinfo);
1507
1508 return err;
1509}
1510
1511
1512/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1513 * all the metrics... Shortly, bundle a bundle.
1514 */
1515
1516static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1517 struct xfrm_state **xfrm, int nx,
1518 const struct flowi *fl,
1519 struct dst_entry *dst)
1520{
1521 struct net *net = xp_net(policy);
1522 unsigned long now = jiffies;
1523 struct net_device *dev;
1524 struct xfrm_mode *inner_mode;
1525 struct dst_entry *dst_prev = NULL;
1526 struct dst_entry *dst0 = NULL;
1527 int i = 0;
1528 int err;
1529 int header_len = 0;
1530 int nfheader_len = 0;
1531 int trailer_len = 0;
1532 int tos;
1533 int family = policy->selector.family;
1534 xfrm_address_t saddr, daddr;
1535
1536 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1537
1538 tos = xfrm_get_tos(fl, family);
1539 err = tos;
1540 if (tos < 0)
1541 goto put_states;
1542
1543 dst_hold(dst);
1544
1545 for (; i < nx; i++) {
1546 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1547 struct dst_entry *dst1 = &xdst->u.dst;
1548
1549 err = PTR_ERR(xdst);
1550 if (IS_ERR(xdst)) {
1551 dst_release(dst);
1552 goto put_states;
1553 }
1554
1555 if (xfrm[i]->sel.family == AF_UNSPEC) {
1556 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1557 xfrm_af2proto(family));
1558 if (!inner_mode) {
1559 err = -EAFNOSUPPORT;
1560 dst_release(dst);
1561 goto put_states;
1562 }
1563 } else
1564 inner_mode = xfrm[i]->inner_mode;
1565
1566 if (!dst_prev)
1567 dst0 = dst1;
1568 else {
1569 dst_prev->child = dst_clone(dst1);
1570 dst1->flags |= DST_NOHASH;
1571 }
1572
1573 xdst->route = dst;
1574 dst_copy_metrics(dst1, dst);
1575
1576 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1577 family = xfrm[i]->props.family;
1578 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1579 family);
1580 err = PTR_ERR(dst);
1581 if (IS_ERR(dst))
1582 goto put_states;
1583 } else
1584 dst_hold(dst);
1585
1586 dst1->xfrm = xfrm[i];
1587 xdst->xfrm_genid = xfrm[i]->genid;
1588
1589 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1590 dst1->flags |= DST_HOST;
1591 dst1->lastuse = now;
1592
1593 dst1->input = dst_discard;
1594 dst1->output = inner_mode->afinfo->output;
1595
1596 dst1->next = dst_prev;
1597 dst_prev = dst1;
1598
1599 header_len += xfrm[i]->props.header_len;
1600 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1601 nfheader_len += xfrm[i]->props.header_len;
1602 trailer_len += xfrm[i]->props.trailer_len;
1603 }
1604
1605 dst_prev->child = dst;
1606 dst0->path = dst;
1607
1608 err = -ENODEV;
1609 dev = dst->dev;
1610 if (!dev)
1611 goto free_dst;
1612
1613 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1614 xfrm_init_pmtu(dst_prev);
1615
1616 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1617 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1618
1619 err = xfrm_fill_dst(xdst, dev, fl);
1620 if (err)
1621 goto free_dst;
1622
1623 dst_prev->header_len = header_len;
1624 dst_prev->trailer_len = trailer_len;
1625 header_len -= xdst->u.dst.xfrm->props.header_len;
1626 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1627 }
1628
1629out:
1630 return dst0;
1631
1632put_states:
1633 for (; i < nx; i++)
1634 xfrm_state_put(xfrm[i]);
1635free_dst:
1636 if (dst0)
1637 dst_free(dst0);
1638 dst0 = ERR_PTR(err);
1639 goto out;
1640}
1641
1642#ifdef CONFIG_XFRM_SUB_POLICY
1643static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1644{
1645 if (!*target) {
1646 *target = kmalloc(size, GFP_ATOMIC);
1647 if (!*target)
1648 return -ENOMEM;
1649 }
1650
1651 memcpy(*target, src, size);
1652 return 0;
1653}
1654#endif
1655
1656static int xfrm_dst_update_parent(struct dst_entry *dst,
1657 const struct xfrm_selector *sel)
1658{
1659#ifdef CONFIG_XFRM_SUB_POLICY
1660 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1661 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1662 sel, sizeof(*sel));
1663#else
1664 return 0;
1665#endif
1666}
1667
1668static int xfrm_dst_update_origin(struct dst_entry *dst,
1669 const struct flowi *fl)
1670{
1671#ifdef CONFIG_XFRM_SUB_POLICY
1672 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1673 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1674#else
1675 return 0;
1676#endif
1677}
1678
1679static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1680 struct xfrm_policy **pols,
1681 int *num_pols, int *num_xfrms)
1682{
1683 int i;
1684
1685 if (*num_pols == 0 || !pols[0]) {
1686 *num_pols = 0;
1687 *num_xfrms = 0;
1688 return 0;
1689 }
1690 if (IS_ERR(pols[0]))
1691 return PTR_ERR(pols[0]);
1692
1693 *num_xfrms = pols[0]->xfrm_nr;
1694
1695#ifdef CONFIG_XFRM_SUB_POLICY
1696 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1697 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1698 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1699 XFRM_POLICY_TYPE_MAIN,
1700 fl, family,
1701 XFRM_POLICY_OUT);
1702 if (pols[1]) {
1703 if (IS_ERR(pols[1])) {
1704 xfrm_pols_put(pols, *num_pols);
1705 return PTR_ERR(pols[1]);
1706 }
1707 (*num_pols)++;
1708 (*num_xfrms) += pols[1]->xfrm_nr;
1709 }
1710 }
1711#endif
1712 for (i = 0; i < *num_pols; i++) {
1713 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1714 *num_xfrms = -1;
1715 break;
1716 }
1717 }
1718
1719 return 0;
1720
1721}
1722
1723static struct xfrm_dst *
1724xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1725 const struct flowi *fl, u16 family,
1726 struct dst_entry *dst_orig)
1727{
1728 struct net *net = xp_net(pols[0]);
1729 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1730 struct dst_entry *dst;
1731 struct xfrm_dst *xdst;
1732 int err;
1733
1734 /* Try to instantiate a bundle */
1735 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1736 if (err <= 0) {
1737 if (err != 0 && err != -EAGAIN)
1738 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1739 return ERR_PTR(err);
1740 }
1741
1742 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1743 if (IS_ERR(dst)) {
1744 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1745 return ERR_CAST(dst);
1746 }
1747
1748 xdst = (struct xfrm_dst *)dst;
1749 xdst->num_xfrms = err;
1750 if (num_pols > 1)
1751 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1752 else
1753 err = xfrm_dst_update_origin(dst, fl);
1754 if (unlikely(err)) {
1755 dst_free(dst);
1756 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1757 return ERR_PTR(err);
1758 }
1759
1760 xdst->num_pols = num_pols;
1761 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1762 xdst->policy_genid = atomic_read(&pols[0]->genid);
1763
1764 return xdst;
1765}
1766
1767static void xfrm_policy_queue_process(unsigned long arg)
1768{
1769 int err = 0;
1770 struct sk_buff *skb;
1771 struct sock *sk;
1772 struct dst_entry *dst;
1773 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1774 struct xfrm_policy_queue *pq = &pol->polq;
1775 struct flowi fl;
1776 struct sk_buff_head list;
1777
1778 spin_lock(&pq->hold_queue.lock);
1779 skb = skb_peek(&pq->hold_queue);
1780 if (!skb) {
1781 spin_unlock(&pq->hold_queue.lock);
1782 goto out;
1783 }
1784 dst = skb_dst(skb);
1785 sk = skb->sk;
1786 xfrm_decode_session(skb, &fl, dst->ops->family);
1787 spin_unlock(&pq->hold_queue.lock);
1788
1789 dst_hold(dst->path);
1790 dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1791 sk, 0);
1792 if (IS_ERR(dst))
1793 goto purge_queue;
1794
1795 if (dst->flags & DST_XFRM_QUEUE) {
1796 dst_release(dst);
1797
1798 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1799 goto purge_queue;
1800
1801 pq->timeout = pq->timeout << 1;
1802 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1803 xfrm_pol_hold(pol);
1804 goto out;
1805 }
1806
1807 dst_release(dst);
1808
1809 __skb_queue_head_init(&list);
1810
1811 spin_lock(&pq->hold_queue.lock);
1812 pq->timeout = 0;
1813 skb_queue_splice_init(&pq->hold_queue, &list);
1814 spin_unlock(&pq->hold_queue.lock);
1815
1816 while (!skb_queue_empty(&list)) {
1817 skb = __skb_dequeue(&list);
1818
1819 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1820 dst_hold(skb_dst(skb)->path);
1821 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1822 &fl, skb->sk, 0);
1823 if (IS_ERR(dst)) {
1824 kfree_skb(skb);
1825 continue;
1826 }
1827
1828 nf_reset(skb);
1829 skb_dst_drop(skb);
1830 skb_dst_set(skb, dst);
1831
1832 err = dst_output(skb);
1833 }
1834
1835out:
1836 xfrm_pol_put(pol);
1837 return;
1838
1839purge_queue:
1840 pq->timeout = 0;
1841 xfrm_queue_purge(&pq->hold_queue);
1842 xfrm_pol_put(pol);
1843}
1844
1845static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
1846{
1847 unsigned long sched_next;
1848 struct dst_entry *dst = skb_dst(skb);
1849 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1850 struct xfrm_policy *pol = xdst->pols[0];
1851 struct xfrm_policy_queue *pq = &pol->polq;
1852 const struct sk_buff *fclone = skb + 1;
1853
1854 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
1855 fclone->fclone == SKB_FCLONE_CLONE)) {
1856 kfree_skb(skb);
1857 return 0;
1858 }
1859
1860 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1861 kfree_skb(skb);
1862 return -EAGAIN;
1863 }
1864
1865 skb_dst_force(skb);
1866
1867 spin_lock_bh(&pq->hold_queue.lock);
1868
1869 if (!pq->timeout)
1870 pq->timeout = XFRM_QUEUE_TMO_MIN;
1871
1872 sched_next = jiffies + pq->timeout;
1873
1874 if (del_timer(&pq->hold_timer)) {
1875 if (time_before(pq->hold_timer.expires, sched_next))
1876 sched_next = pq->hold_timer.expires;
1877 xfrm_pol_put(pol);
1878 }
1879
1880 __skb_queue_tail(&pq->hold_queue, skb);
1881 if (!mod_timer(&pq->hold_timer, sched_next))
1882 xfrm_pol_hold(pol);
1883
1884 spin_unlock_bh(&pq->hold_queue.lock);
1885
1886 return 0;
1887}
1888
1889static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1890 struct dst_entry *dst,
1891 const struct flowi *fl,
1892 int num_xfrms,
1893 u16 family)
1894{
1895 int err;
1896 struct net_device *dev;
1897 struct dst_entry *dst1;
1898 struct xfrm_dst *xdst;
1899
1900 xdst = xfrm_alloc_dst(net, family);
1901 if (IS_ERR(xdst))
1902 return xdst;
1903
1904 if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
1905 return xdst;
1906
1907 dst1 = &xdst->u.dst;
1908 dst_hold(dst);
1909 xdst->route = dst;
1910
1911 dst_copy_metrics(dst1, dst);
1912
1913 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1914 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1915 dst1->lastuse = jiffies;
1916
1917 dst1->input = dst_discard;
1918 dst1->output = xdst_queue_output;
1919
1920 dst_hold(dst);
1921 dst1->child = dst;
1922 dst1->path = dst;
1923
1924 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1925
1926 err = -ENODEV;
1927 dev = dst->dev;
1928 if (!dev)
1929 goto free_dst;
1930
1931 err = xfrm_fill_dst(xdst, dev, fl);
1932 if (err)
1933 goto free_dst;
1934
1935out:
1936 return xdst;
1937
1938free_dst:
1939 dst_release(dst1);
1940 xdst = ERR_PTR(err);
1941 goto out;
1942}
1943
1944static struct flow_cache_object *
1945xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1946 struct flow_cache_object *oldflo, void *ctx)
1947{
1948 struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1949 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1950 struct xfrm_dst *xdst, *new_xdst;
1951 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1952
1953 /* Check if the policies from old bundle are usable */
1954 xdst = NULL;
1955 if (oldflo) {
1956 xdst = container_of(oldflo, struct xfrm_dst, flo);
1957 num_pols = xdst->num_pols;
1958 num_xfrms = xdst->num_xfrms;
1959 pol_dead = 0;
1960 for (i = 0; i < num_pols; i++) {
1961 pols[i] = xdst->pols[i];
1962 pol_dead |= pols[i]->walk.dead;
1963 }
1964 if (pol_dead) {
1965 dst_free(&xdst->u.dst);
1966 xdst = NULL;
1967 num_pols = 0;
1968 num_xfrms = 0;
1969 oldflo = NULL;
1970 }
1971 }
1972
1973 /* Resolve policies to use if we couldn't get them from
1974 * previous cache entry */
1975 if (xdst == NULL) {
1976 num_pols = 1;
1977 pols[0] = __xfrm_policy_lookup(net, fl, family,
1978 flow_to_policy_dir(dir));
1979 err = xfrm_expand_policies(fl, family, pols,
1980 &num_pols, &num_xfrms);
1981 if (err < 0)
1982 goto inc_error;
1983 if (num_pols == 0)
1984 return NULL;
1985 if (num_xfrms <= 0)
1986 goto make_dummy_bundle;
1987 }
1988
1989 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1990 if (IS_ERR(new_xdst)) {
1991 err = PTR_ERR(new_xdst);
1992 if (err != -EAGAIN)
1993 goto error;
1994 if (oldflo == NULL)
1995 goto make_dummy_bundle;
1996 dst_hold(&xdst->u.dst);
1997 return oldflo;
1998 } else if (new_xdst == NULL) {
1999 num_xfrms = 0;
2000 if (oldflo == NULL)
2001 goto make_dummy_bundle;
2002 xdst->num_xfrms = 0;
2003 dst_hold(&xdst->u.dst);
2004 return oldflo;
2005 }
2006
2007 /* Kill the previous bundle */
2008 if (xdst) {
2009 /* The policies were stolen for newly generated bundle */
2010 xdst->num_pols = 0;
2011 dst_free(&xdst->u.dst);
2012 }
2013
2014 /* Flow cache does not have reference, it dst_free()'s,
2015 * but we do need to return one reference for original caller */
2016 dst_hold(&new_xdst->u.dst);
2017 return &new_xdst->flo;
2018
2019make_dummy_bundle:
2020 /* We found policies, but there's no bundles to instantiate:
2021 * either because the policy blocks, has no transformations or
2022 * we could not build template (no xfrm_states).*/
2023 xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
2024 if (IS_ERR(xdst)) {
2025 xfrm_pols_put(pols, num_pols);
2026 return ERR_CAST(xdst);
2027 }
2028 xdst->num_pols = num_pols;
2029 xdst->num_xfrms = num_xfrms;
2030 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2031
2032 dst_hold(&xdst->u.dst);
2033 return &xdst->flo;
2034
2035inc_error:
2036 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2037error:
2038 if (xdst != NULL)
2039 dst_free(&xdst->u.dst);
2040 else
2041 xfrm_pols_put(pols, num_pols);
2042 return ERR_PTR(err);
2043}
2044
2045static struct dst_entry *make_blackhole(struct net *net, u16 family,
2046 struct dst_entry *dst_orig)
2047{
2048 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2049 struct dst_entry *ret;
2050
2051 if (!afinfo) {
2052 dst_release(dst_orig);
2053 return ERR_PTR(-EINVAL);
2054 } else {
2055 ret = afinfo->blackhole_route(net, dst_orig);
2056 }
2057 xfrm_policy_put_afinfo(afinfo);
2058
2059 return ret;
2060}
2061
2062/* Main function: finds/creates a bundle for given flow.
2063 *
2064 * At the moment we eat a raw IP route. Mostly to speed up lookups
2065 * on interfaces with disabled IPsec.
2066 */
2067struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2068 const struct flowi *fl,
2069 struct sock *sk, int flags)
2070{
2071 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2072 struct flow_cache_object *flo;
2073 struct xfrm_dst *xdst;
2074 struct dst_entry *dst, *route;
2075 u16 family = dst_orig->ops->family;
2076 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2077 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2078
2079 dst = NULL;
2080 xdst = NULL;
2081 route = NULL;
2082
2083 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2084 num_pols = 1;
2085 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
2086 err = xfrm_expand_policies(fl, family, pols,
2087 &num_pols, &num_xfrms);
2088 if (err < 0)
2089 goto dropdst;
2090
2091 if (num_pols) {
2092 if (num_xfrms <= 0) {
2093 drop_pols = num_pols;
2094 goto no_transform;
2095 }
2096
2097 xdst = xfrm_resolve_and_create_bundle(
2098 pols, num_pols, fl,
2099 family, dst_orig);
2100 if (IS_ERR(xdst)) {
2101 xfrm_pols_put(pols, num_pols);
2102 err = PTR_ERR(xdst);
2103 goto dropdst;
2104 } else if (xdst == NULL) {
2105 num_xfrms = 0;
2106 drop_pols = num_pols;
2107 goto no_transform;
2108 }
2109
2110 route = xdst->route;
2111 }
2112 }
2113
2114 if (xdst == NULL) {
2115 /* To accelerate a bit... */
2116 if ((dst_orig->flags & DST_NOXFRM) ||
2117 !net->xfrm.policy_count[XFRM_POLICY_OUT])
2118 goto nopol;
2119
2120 flo = flow_cache_lookup(net, fl, family, dir,
2121 xfrm_bundle_lookup, dst_orig);
2122 if (flo == NULL)
2123 goto nopol;
2124 if (IS_ERR(flo)) {
2125 err = PTR_ERR(flo);
2126 goto dropdst;
2127 }
2128 xdst = container_of(flo, struct xfrm_dst, flo);
2129
2130 num_pols = xdst->num_pols;
2131 num_xfrms = xdst->num_xfrms;
2132 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2133 route = xdst->route;
2134 }
2135
2136 dst = &xdst->u.dst;
2137 if (route == NULL && num_xfrms > 0) {
2138 /* The only case when xfrm_bundle_lookup() returns a
2139 * bundle with null route, is when the template could
2140 * not be resolved. It means policies are there, but
2141 * bundle could not be created, since we don't yet
2142 * have the xfrm_state's. We need to wait for KM to
2143 * negotiate new SA's or bail out with error.*/
2144 if (net->xfrm.sysctl_larval_drop) {
2145 dst_release(dst);
2146 xfrm_pols_put(pols, drop_pols);
2147 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2148
2149 return make_blackhole(net, family, dst_orig);
2150 }
2151
2152 err = -EAGAIN;
2153
2154 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2155 goto error;
2156 }
2157
2158no_transform:
2159 if (num_pols == 0)
2160 goto nopol;
2161
2162 if ((flags & XFRM_LOOKUP_ICMP) &&
2163 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2164 err = -ENOENT;
2165 goto error;
2166 }
2167
2168 for (i = 0; i < num_pols; i++)
2169 pols[i]->curlft.use_time = get_seconds();
2170
2171 if (num_xfrms < 0) {
2172 /* Prohibit the flow */
2173 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2174 err = -EPERM;
2175 goto error;
2176 } else if (num_xfrms > 0) {
2177 /* Flow transformed */
2178 dst_release(dst_orig);
2179 } else {
2180 /* Flow passes untransformed */
2181 dst_release(dst);
2182 dst = dst_orig;
2183 }
2184ok:
2185 xfrm_pols_put(pols, drop_pols);
2186 if (dst && dst->xfrm &&
2187 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2188 dst->flags |= DST_XFRM_TUNNEL;
2189 return dst;
2190
2191nopol:
2192 if (!(flags & XFRM_LOOKUP_ICMP)) {
2193 dst = dst_orig;
2194 goto ok;
2195 }
2196 err = -ENOENT;
2197error:
2198 dst_release(dst);
2199dropdst:
2200 dst_release(dst_orig);
2201 xfrm_pols_put(pols, drop_pols);
2202 return ERR_PTR(err);
2203}
2204EXPORT_SYMBOL(xfrm_lookup);
2205
2206static inline int
2207xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2208{
2209 struct xfrm_state *x;
2210
2211 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2212 return 0;
2213 x = skb->sp->xvec[idx];
2214 if (!x->type->reject)
2215 return 0;
2216 return x->type->reject(x, skb, fl);
2217}
2218
2219/* When skb is transformed back to its "native" form, we have to
2220 * check policy restrictions. At the moment we make this in maximally
2221 * stupid way. Shame on me. :-) Of course, connected sockets must
2222 * have policy cached at them.
2223 */
2224
2225static inline int
2226xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2227 unsigned short family)
2228{
2229 if (xfrm_state_kern(x))
2230 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2231 return x->id.proto == tmpl->id.proto &&
2232 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2233 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2234 x->props.mode == tmpl->mode &&
2235 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2236 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2237 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2238 xfrm_state_addr_cmp(tmpl, x, family));
2239}
2240
2241/*
2242 * 0 or more than 0 is returned when validation is succeeded (either bypass
2243 * because of optional transport mode, or next index of the mathced secpath
2244 * state with the template.
2245 * -1 is returned when no matching template is found.
2246 * Otherwise "-2 - errored_index" is returned.
2247 */
2248static inline int
2249xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2250 unsigned short family)
2251{
2252 int idx = start;
2253
2254 if (tmpl->optional) {
2255 if (tmpl->mode == XFRM_MODE_TRANSPORT)
2256 return start;
2257 } else
2258 start = -1;
2259 for (; idx < sp->len; idx++) {
2260 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2261 return ++idx;
2262 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2263 if (start == -1)
2264 start = -2-idx;
2265 break;
2266 }
2267 }
2268 return start;
2269}
2270
2271int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2272 unsigned int family, int reverse)
2273{
2274 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2275 int err;
2276
2277 if (unlikely(afinfo == NULL))
2278 return -EAFNOSUPPORT;
2279
2280 afinfo->decode_session(skb, fl, reverse);
2281 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2282 xfrm_policy_put_afinfo(afinfo);
2283 return err;
2284}
2285EXPORT_SYMBOL(__xfrm_decode_session);
2286
2287static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2288{
2289 for (; k < sp->len; k++) {
2290 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2291 *idxp = k;
2292 return 1;
2293 }
2294 }
2295
2296 return 0;
2297}
2298
2299int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2300 unsigned short family)
2301{
2302 struct net *net = dev_net(skb->dev);
2303 struct xfrm_policy *pol;
2304 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2305 int npols = 0;
2306 int xfrm_nr;
2307 int pi;
2308 int reverse;
2309 struct flowi fl;
2310 u8 fl_dir;
2311 int xerr_idx = -1;
2312
2313 reverse = dir & ~XFRM_POLICY_MASK;
2314 dir &= XFRM_POLICY_MASK;
2315 fl_dir = policy_to_flow_dir(dir);
2316
2317 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2318 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2319 return 0;
2320 }
2321
2322 nf_nat_decode_session(skb, &fl, family);
2323
2324 /* First, check used SA against their selectors. */
2325 if (skb->sp) {
2326 int i;
2327
2328 for (i = skb->sp->len-1; i >= 0; i--) {
2329 struct xfrm_state *x = skb->sp->xvec[i];
2330 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2331 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2332 return 0;
2333 }
2334 }
2335 }
2336
2337 pol = NULL;
2338 if (sk && sk->sk_policy[dir]) {
2339 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2340 if (IS_ERR(pol)) {
2341 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2342 return 0;
2343 }
2344 }
2345
2346 if (!pol) {
2347 struct flow_cache_object *flo;
2348
2349 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2350 xfrm_policy_lookup, NULL);
2351 if (IS_ERR_OR_NULL(flo))
2352 pol = ERR_CAST(flo);
2353 else
2354 pol = container_of(flo, struct xfrm_policy, flo);
2355 }
2356
2357 if (IS_ERR(pol)) {
2358 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2359 return 0;
2360 }
2361
2362 if (!pol) {
2363 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2364 xfrm_secpath_reject(xerr_idx, skb, &fl);
2365 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2366 return 0;
2367 }
2368 return 1;
2369 }
2370
2371 pol->curlft.use_time = get_seconds();
2372
2373 pols[0] = pol;
2374 npols++;
2375#ifdef CONFIG_XFRM_SUB_POLICY
2376 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2377 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2378 &fl, family,
2379 XFRM_POLICY_IN);
2380 if (pols[1]) {
2381 if (IS_ERR(pols[1])) {
2382 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2383 return 0;
2384 }
2385 pols[1]->curlft.use_time = get_seconds();
2386 npols++;
2387 }
2388 }
2389#endif
2390
2391 if (pol->action == XFRM_POLICY_ALLOW) {
2392 struct sec_path *sp;
2393 static struct sec_path dummy;
2394 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2395 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2396 struct xfrm_tmpl **tpp = tp;
2397 int ti = 0;
2398 int i, k;
2399
2400 if ((sp = skb->sp) == NULL)
2401 sp = &dummy;
2402
2403 for (pi = 0; pi < npols; pi++) {
2404 if (pols[pi] != pol &&
2405 pols[pi]->action != XFRM_POLICY_ALLOW) {
2406 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2407 goto reject;
2408 }
2409 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2410 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2411 goto reject_error;
2412 }
2413 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2414 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2415 }
2416 xfrm_nr = ti;
2417 if (npols > 1) {
2418 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2419 tpp = stp;
2420 }
2421
2422 /* For each tunnel xfrm, find the first matching tmpl.
2423 * For each tmpl before that, find corresponding xfrm.
2424 * Order is _important_. Later we will implement
2425 * some barriers, but at the moment barriers
2426 * are implied between each two transformations.
2427 */
2428 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2429 k = xfrm_policy_ok(tpp[i], sp, k, family);
2430 if (k < 0) {
2431 if (k < -1)
2432 /* "-2 - errored_index" returned */
2433 xerr_idx = -(2+k);
2434 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2435 goto reject;
2436 }
2437 }
2438
2439 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2440 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2441 goto reject;
2442 }
2443
2444 xfrm_pols_put(pols, npols);
2445 return 1;
2446 }
2447 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2448
2449reject:
2450 xfrm_secpath_reject(xerr_idx, skb, &fl);
2451reject_error:
2452 xfrm_pols_put(pols, npols);
2453 return 0;
2454}
2455EXPORT_SYMBOL(__xfrm_policy_check);
2456
2457int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2458{
2459 struct net *net = dev_net(skb->dev);
2460 struct flowi fl;
2461 struct dst_entry *dst;
2462 int res = 1;
2463
2464 if (xfrm_decode_session(skb, &fl, family) < 0) {
2465 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2466 return 0;
2467 }
2468
2469 skb_dst_force(skb);
2470
2471 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2472 if (IS_ERR(dst)) {
2473 res = 0;
2474 dst = NULL;
2475 }
2476 skb_dst_set(skb, dst);
2477 return res;
2478}
2479EXPORT_SYMBOL(__xfrm_route_forward);
2480
2481/* Optimize later using cookies and generation ids. */
2482
2483static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2484{
2485 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2486 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2487 * get validated by dst_ops->check on every use. We do this
2488 * because when a normal route referenced by an XFRM dst is
2489 * obsoleted we do not go looking around for all parent
2490 * referencing XFRM dsts so that we can invalidate them. It
2491 * is just too much work. Instead we make the checks here on
2492 * every use. For example:
2493 *
2494 * XFRM dst A --> IPv4 dst X
2495 *
2496 * X is the "xdst->route" of A (X is also the "dst->path" of A
2497 * in this example). If X is marked obsolete, "A" will not
2498 * notice. That's what we are validating here via the
2499 * stale_bundle() check.
2500 *
2501 * When a policy's bundle is pruned, we dst_free() the XFRM
2502 * dst which causes it's ->obsolete field to be set to
2503 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
2504 * this, we want to force a new route lookup.
2505 */
2506 if (dst->obsolete < 0 && !stale_bundle(dst))
2507 return dst;
2508
2509 return NULL;
2510}
2511
2512static int stale_bundle(struct dst_entry *dst)
2513{
2514 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2515}
2516
2517void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2518{
2519 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2520 dst->dev = dev_net(dev)->loopback_dev;
2521 dev_hold(dst->dev);
2522 dev_put(dev);
2523 }
2524}
2525EXPORT_SYMBOL(xfrm_dst_ifdown);
2526
2527static void xfrm_link_failure(struct sk_buff *skb)
2528{
2529 /* Impossible. Such dst must be popped before reaches point of failure. */
2530}
2531
2532static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2533{
2534 if (dst) {
2535 if (dst->obsolete) {
2536 dst_release(dst);
2537 dst = NULL;
2538 }
2539 }
2540 return dst;
2541}
2542
2543void xfrm_garbage_collect(struct net *net)
2544{
2545 flow_cache_flush(net);
2546}
2547EXPORT_SYMBOL(xfrm_garbage_collect);
2548
2549static void xfrm_garbage_collect_deferred(struct net *net)
2550{
2551 flow_cache_flush_deferred(net);
2552}
2553
2554static void xfrm_init_pmtu(struct dst_entry *dst)
2555{
2556 do {
2557 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2558 u32 pmtu, route_mtu_cached;
2559
2560 pmtu = dst_mtu(dst->child);
2561 xdst->child_mtu_cached = pmtu;
2562
2563 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2564
2565 route_mtu_cached = dst_mtu(xdst->route);
2566 xdst->route_mtu_cached = route_mtu_cached;
2567
2568 if (pmtu > route_mtu_cached)
2569 pmtu = route_mtu_cached;
2570
2571 dst_metric_set(dst, RTAX_MTU, pmtu);
2572 } while ((dst = dst->next));
2573}
2574
2575/* Check that the bundle accepts the flow and its components are
2576 * still valid.
2577 */
2578
2579static int xfrm_bundle_ok(struct xfrm_dst *first)
2580{
2581 struct dst_entry *dst = &first->u.dst;
2582 struct xfrm_dst *last;
2583 u32 mtu;
2584
2585 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2586 (dst->dev && !netif_running(dst->dev)))
2587 return 0;
2588
2589 if (dst->flags & DST_XFRM_QUEUE)
2590 return 1;
2591
2592 last = NULL;
2593
2594 do {
2595 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2596
2597 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2598 return 0;
2599 if (xdst->xfrm_genid != dst->xfrm->genid)
2600 return 0;
2601 if (xdst->num_pols > 0 &&
2602 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2603 return 0;
2604
2605 mtu = dst_mtu(dst->child);
2606 if (xdst->child_mtu_cached != mtu) {
2607 last = xdst;
2608 xdst->child_mtu_cached = mtu;
2609 }
2610
2611 if (!dst_check(xdst->route, xdst->route_cookie))
2612 return 0;
2613 mtu = dst_mtu(xdst->route);
2614 if (xdst->route_mtu_cached != mtu) {
2615 last = xdst;
2616 xdst->route_mtu_cached = mtu;
2617 }
2618
2619 dst = dst->child;
2620 } while (dst->xfrm);
2621
2622 if (likely(!last))
2623 return 1;
2624
2625 mtu = last->child_mtu_cached;
2626 for (;;) {
2627 dst = &last->u.dst;
2628
2629 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2630 if (mtu > last->route_mtu_cached)
2631 mtu = last->route_mtu_cached;
2632 dst_metric_set(dst, RTAX_MTU, mtu);
2633
2634 if (last == first)
2635 break;
2636
2637 last = (struct xfrm_dst *)last->u.dst.next;
2638 last->child_mtu_cached = mtu;
2639 }
2640
2641 return 1;
2642}
2643
2644static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2645{
2646 return dst_metric_advmss(dst->path);
2647}
2648
2649static unsigned int xfrm_mtu(const struct dst_entry *dst)
2650{
2651 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2652
2653 return mtu ? : dst_mtu(dst->path);
2654}
2655
2656static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2657 struct sk_buff *skb,
2658 const void *daddr)
2659{
2660 return dst->path->ops->neigh_lookup(dst, skb, daddr);
2661}
2662
2663int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2664{
2665 struct net *net;
2666 int err = 0;
2667 if (unlikely(afinfo == NULL))
2668 return -EINVAL;
2669 if (unlikely(afinfo->family >= NPROTO))
2670 return -EAFNOSUPPORT;
2671 spin_lock(&xfrm_policy_afinfo_lock);
2672 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2673 err = -ENOBUFS;
2674 else {
2675 struct dst_ops *dst_ops = afinfo->dst_ops;
2676 if (likely(dst_ops->kmem_cachep == NULL))
2677 dst_ops->kmem_cachep = xfrm_dst_cache;
2678 if (likely(dst_ops->check == NULL))
2679 dst_ops->check = xfrm_dst_check;
2680 if (likely(dst_ops->default_advmss == NULL))
2681 dst_ops->default_advmss = xfrm_default_advmss;
2682 if (likely(dst_ops->mtu == NULL))
2683 dst_ops->mtu = xfrm_mtu;
2684 if (likely(dst_ops->negative_advice == NULL))
2685 dst_ops->negative_advice = xfrm_negative_advice;
2686 if (likely(dst_ops->link_failure == NULL))
2687 dst_ops->link_failure = xfrm_link_failure;
2688 if (likely(dst_ops->neigh_lookup == NULL))
2689 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2690 if (likely(afinfo->garbage_collect == NULL))
2691 afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2692 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2693 }
2694 spin_unlock(&xfrm_policy_afinfo_lock);
2695
2696 rtnl_lock();
2697 for_each_net(net) {
2698 struct dst_ops *xfrm_dst_ops;
2699
2700 switch (afinfo->family) {
2701 case AF_INET:
2702 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2703 break;
2704#if IS_ENABLED(CONFIG_IPV6)
2705 case AF_INET6:
2706 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2707 break;
2708#endif
2709 default:
2710 BUG();
2711 }
2712 *xfrm_dst_ops = *afinfo->dst_ops;
2713 }
2714 rtnl_unlock();
2715
2716 return err;
2717}
2718EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2719
2720int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2721{
2722 int err = 0;
2723 if (unlikely(afinfo == NULL))
2724 return -EINVAL;
2725 if (unlikely(afinfo->family >= NPROTO))
2726 return -EAFNOSUPPORT;
2727 spin_lock(&xfrm_policy_afinfo_lock);
2728 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2729 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2730 err = -EINVAL;
2731 else
2732 RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2733 NULL);
2734 }
2735 spin_unlock(&xfrm_policy_afinfo_lock);
2736 if (!err) {
2737 struct dst_ops *dst_ops = afinfo->dst_ops;
2738
2739 synchronize_rcu();
2740
2741 dst_ops->kmem_cachep = NULL;
2742 dst_ops->check = NULL;
2743 dst_ops->negative_advice = NULL;
2744 dst_ops->link_failure = NULL;
2745 afinfo->garbage_collect = NULL;
2746 }
2747 return err;
2748}
2749EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2750
2751static void __net_init xfrm_dst_ops_init(struct net *net)
2752{
2753 struct xfrm_policy_afinfo *afinfo;
2754
2755 rcu_read_lock();
2756 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
2757 if (afinfo)
2758 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2759#if IS_ENABLED(CONFIG_IPV6)
2760 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
2761 if (afinfo)
2762 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2763#endif
2764 rcu_read_unlock();
2765}
2766
2767static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2768{
2769 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2770
2771 switch (event) {
2772 case NETDEV_DOWN:
2773 xfrm_garbage_collect(dev_net(dev));
2774 }
2775 return NOTIFY_DONE;
2776}
2777
2778static struct notifier_block xfrm_dev_notifier = {
2779 .notifier_call = xfrm_dev_event,
2780};
2781
2782#ifdef CONFIG_XFRM_STATISTICS
2783static int __net_init xfrm_statistics_init(struct net *net)
2784{
2785 int rv;
2786
2787 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2788 sizeof(struct linux_xfrm_mib),
2789 __alignof__(struct linux_xfrm_mib)) < 0)
2790 return -ENOMEM;
2791 rv = xfrm_proc_init(net);
2792 if (rv < 0)
2793 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2794 return rv;
2795}
2796
2797static void xfrm_statistics_fini(struct net *net)
2798{
2799 xfrm_proc_fini(net);
2800 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2801}
2802#else
2803static int __net_init xfrm_statistics_init(struct net *net)
2804{
2805 return 0;
2806}
2807
2808static void xfrm_statistics_fini(struct net *net)
2809{
2810}
2811#endif
2812
2813static int __net_init xfrm_policy_init(struct net *net)
2814{
2815 unsigned int hmask, sz;
2816 int dir;
2817
2818 if (net_eq(net, &init_net))
2819 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2820 sizeof(struct xfrm_dst),
2821 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2822 NULL);
2823
2824 hmask = 8 - 1;
2825 sz = (hmask+1) * sizeof(struct hlist_head);
2826
2827 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2828 if (!net->xfrm.policy_byidx)
2829 goto out_byidx;
2830 net->xfrm.policy_idx_hmask = hmask;
2831
2832 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2833 struct xfrm_policy_hash *htab;
2834
2835 net->xfrm.policy_count[dir] = 0;
2836 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2837
2838 htab = &net->xfrm.policy_bydst[dir];
2839 htab->table = xfrm_hash_alloc(sz);
2840 if (!htab->table)
2841 goto out_bydst;
2842 htab->hmask = hmask;
2843 }
2844
2845 INIT_LIST_HEAD(&net->xfrm.policy_all);
2846 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2847 if (net_eq(net, &init_net))
2848 register_netdevice_notifier(&xfrm_dev_notifier);
2849 return 0;
2850
2851out_bydst:
2852 for (dir--; dir >= 0; dir--) {
2853 struct xfrm_policy_hash *htab;
2854
2855 htab = &net->xfrm.policy_bydst[dir];
2856 xfrm_hash_free(htab->table, sz);
2857 }
2858 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2859out_byidx:
2860 return -ENOMEM;
2861}
2862
2863static void xfrm_policy_fini(struct net *net)
2864{
2865 struct xfrm_audit audit_info;
2866 unsigned int sz;
2867 int dir;
2868
2869 flush_work(&net->xfrm.policy_hash_work);
2870#ifdef CONFIG_XFRM_SUB_POLICY
2871 audit_info.loginuid = INVALID_UID;
2872 audit_info.sessionid = (unsigned int)-1;
2873 audit_info.secid = 0;
2874 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2875#endif
2876 audit_info.loginuid = INVALID_UID;
2877 audit_info.sessionid = (unsigned int)-1;
2878 audit_info.secid = 0;
2879 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2880
2881 WARN_ON(!list_empty(&net->xfrm.policy_all));
2882
2883 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2884 struct xfrm_policy_hash *htab;
2885
2886 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2887
2888 htab = &net->xfrm.policy_bydst[dir];
2889 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2890 WARN_ON(!hlist_empty(htab->table));
2891 xfrm_hash_free(htab->table, sz);
2892 }
2893
2894 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2895 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2896 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2897}
2898
2899static int __net_init xfrm_net_init(struct net *net)
2900{
2901 int rv;
2902
2903 rv = xfrm_statistics_init(net);
2904 if (rv < 0)
2905 goto out_statistics;
2906 rv = xfrm_state_init(net);
2907 if (rv < 0)
2908 goto out_state;
2909 rv = xfrm_policy_init(net);
2910 if (rv < 0)
2911 goto out_policy;
2912 xfrm_dst_ops_init(net);
2913 rv = xfrm_sysctl_init(net);
2914 if (rv < 0)
2915 goto out_sysctl;
2916 rv = flow_cache_init(net);
2917 if (rv < 0)
2918 goto out;
2919
2920 /* Initialize the per-net locks here */
2921 spin_lock_init(&net->xfrm.xfrm_state_lock);
2922 rwlock_init(&net->xfrm.xfrm_policy_lock);
2923 mutex_init(&net->xfrm.xfrm_cfg_mutex);
2924
2925 return 0;
2926
2927out:
2928 xfrm_sysctl_fini(net);
2929out_sysctl:
2930 xfrm_policy_fini(net);
2931out_policy:
2932 xfrm_state_fini(net);
2933out_state:
2934 xfrm_statistics_fini(net);
2935out_statistics:
2936 return rv;
2937}
2938
2939static void __net_exit xfrm_net_exit(struct net *net)
2940{
2941 flow_cache_fini(net);
2942 xfrm_sysctl_fini(net);
2943 xfrm_policy_fini(net);
2944 xfrm_state_fini(net);
2945 xfrm_statistics_fini(net);
2946}
2947
2948static struct pernet_operations __net_initdata xfrm_net_ops = {
2949 .init = xfrm_net_init,
2950 .exit = xfrm_net_exit,
2951};
2952
2953void __init xfrm_init(void)
2954{
2955 register_pernet_subsys(&xfrm_net_ops);
2956 xfrm_input_init();
2957}
2958
2959#ifdef CONFIG_AUDITSYSCALL
2960static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2961 struct audit_buffer *audit_buf)
2962{
2963 struct xfrm_sec_ctx *ctx = xp->security;
2964 struct xfrm_selector *sel = &xp->selector;
2965
2966 if (ctx)
2967 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2968 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2969
2970 switch (sel->family) {
2971 case AF_INET:
2972 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2973 if (sel->prefixlen_s != 32)
2974 audit_log_format(audit_buf, " src_prefixlen=%d",
2975 sel->prefixlen_s);
2976 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2977 if (sel->prefixlen_d != 32)
2978 audit_log_format(audit_buf, " dst_prefixlen=%d",
2979 sel->prefixlen_d);
2980 break;
2981 case AF_INET6:
2982 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2983 if (sel->prefixlen_s != 128)
2984 audit_log_format(audit_buf, " src_prefixlen=%d",
2985 sel->prefixlen_s);
2986 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2987 if (sel->prefixlen_d != 128)
2988 audit_log_format(audit_buf, " dst_prefixlen=%d",
2989 sel->prefixlen_d);
2990 break;
2991 }
2992}
2993
2994void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2995 kuid_t auid, unsigned int sessionid, u32 secid)
2996{
2997 struct audit_buffer *audit_buf;
2998
2999 audit_buf = xfrm_audit_start("SPD-add");
3000 if (audit_buf == NULL)
3001 return;
3002 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3003 audit_log_format(audit_buf, " res=%u", result);
3004 xfrm_audit_common_policyinfo(xp, audit_buf);
3005 audit_log_end(audit_buf);
3006}
3007EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3008
3009void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3010 kuid_t auid, unsigned int sessionid, u32 secid)
3011{
3012 struct audit_buffer *audit_buf;
3013
3014 audit_buf = xfrm_audit_start("SPD-delete");
3015 if (audit_buf == NULL)
3016 return;
3017 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3018 audit_log_format(audit_buf, " res=%u", result);
3019 xfrm_audit_common_policyinfo(xp, audit_buf);
3020 audit_log_end(audit_buf);
3021}
3022EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3023#endif
3024
3025#ifdef CONFIG_XFRM_MIGRATE
3026static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3027 const struct xfrm_selector *sel_tgt)
3028{
3029 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3030 if (sel_tgt->family == sel_cmp->family &&
3031 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3032 sel_cmp->family) &&
3033 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3034 sel_cmp->family) &&
3035 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3036 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3037 return true;
3038 }
3039 } else {
3040 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3041 return true;
3042 }
3043 }
3044 return false;
3045}
3046
3047static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3048 u8 dir, u8 type, struct net *net)
3049{
3050 struct xfrm_policy *pol, *ret = NULL;
3051 struct hlist_head *chain;
3052 u32 priority = ~0U;
3053
3054 read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
3055 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3056 hlist_for_each_entry(pol, chain, bydst) {
3057 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3058 pol->type == type) {
3059 ret = pol;
3060 priority = ret->priority;
3061 break;
3062 }
3063 }
3064 chain = &net->xfrm.policy_inexact[dir];
3065 hlist_for_each_entry(pol, chain, bydst) {
3066 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3067 pol->type == type &&
3068 pol->priority < priority) {
3069 ret = pol;
3070 break;
3071 }
3072 }
3073
3074 if (ret)
3075 xfrm_pol_hold(ret);
3076
3077 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
3078
3079 return ret;
3080}
3081
3082static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3083{
3084 int match = 0;
3085
3086 if (t->mode == m->mode && t->id.proto == m->proto &&
3087 (m->reqid == 0 || t->reqid == m->reqid)) {
3088 switch (t->mode) {
3089 case XFRM_MODE_TUNNEL:
3090 case XFRM_MODE_BEET:
3091 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3092 m->old_family) &&
3093 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3094 m->old_family)) {
3095 match = 1;
3096 }
3097 break;
3098 case XFRM_MODE_TRANSPORT:
3099 /* in case of transport mode, template does not store
3100 any IP addresses, hence we just compare mode and
3101 protocol */
3102 match = 1;
3103 break;
3104 default:
3105 break;
3106 }
3107 }
3108 return match;
3109}
3110
3111/* update endpoint address(es) of template(s) */
3112static int xfrm_policy_migrate(struct xfrm_policy *pol,
3113 struct xfrm_migrate *m, int num_migrate)
3114{
3115 struct xfrm_migrate *mp;
3116 int i, j, n = 0;
3117
3118 write_lock_bh(&pol->lock);
3119 if (unlikely(pol->walk.dead)) {
3120 /* target policy has been deleted */
3121 write_unlock_bh(&pol->lock);
3122 return -ENOENT;
3123 }
3124
3125 for (i = 0; i < pol->xfrm_nr; i++) {
3126 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3127 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3128 continue;
3129 n++;
3130 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3131 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3132 continue;
3133 /* update endpoints */
3134 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3135 sizeof(pol->xfrm_vec[i].id.daddr));
3136 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3137 sizeof(pol->xfrm_vec[i].saddr));
3138 pol->xfrm_vec[i].encap_family = mp->new_family;
3139 /* flush bundles */
3140 atomic_inc(&pol->genid);
3141 }
3142 }
3143
3144 write_unlock_bh(&pol->lock);
3145
3146 if (!n)
3147 return -ENODATA;
3148
3149 return 0;
3150}
3151
3152static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3153{
3154 int i, j;
3155
3156 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3157 return -EINVAL;
3158
3159 for (i = 0; i < num_migrate; i++) {
3160 if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3161 m[i].old_family) &&
3162 xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3163 m[i].old_family))
3164 return -EINVAL;
3165 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3166 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3167 return -EINVAL;
3168
3169 /* check if there is any duplicated entry */
3170 for (j = i + 1; j < num_migrate; j++) {
3171 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3172 sizeof(m[i].old_daddr)) &&
3173 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3174 sizeof(m[i].old_saddr)) &&
3175 m[i].proto == m[j].proto &&
3176 m[i].mode == m[j].mode &&
3177 m[i].reqid == m[j].reqid &&
3178 m[i].old_family == m[j].old_family)
3179 return -EINVAL;
3180 }
3181 }
3182
3183 return 0;
3184}
3185
3186int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3187 struct xfrm_migrate *m, int num_migrate,
3188 struct xfrm_kmaddress *k, struct net *net)
3189{
3190 int i, err, nx_cur = 0, nx_new = 0;
3191 struct xfrm_policy *pol = NULL;
3192 struct xfrm_state *x, *xc;
3193 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3194 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3195 struct xfrm_migrate *mp;
3196
3197 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3198 goto out;
3199
3200 /* Stage 1 - find policy */
3201 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3202 err = -ENOENT;
3203 goto out;
3204 }
3205
3206 /* Stage 2 - find and update state(s) */
3207 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3208 if ((x = xfrm_migrate_state_find(mp, net))) {
3209 x_cur[nx_cur] = x;
3210 nx_cur++;
3211 if ((xc = xfrm_state_migrate(x, mp))) {
3212 x_new[nx_new] = xc;
3213 nx_new++;
3214 } else {
3215 err = -ENODATA;
3216 goto restore_state;
3217 }
3218 }
3219 }
3220
3221 /* Stage 3 - update policy */
3222 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3223 goto restore_state;
3224
3225 /* Stage 4 - delete old state(s) */
3226 if (nx_cur) {
3227 xfrm_states_put(x_cur, nx_cur);
3228 xfrm_states_delete(x_cur, nx_cur);
3229 }
3230
3231 /* Stage 5 - announce */
3232 km_migrate(sel, dir, type, m, num_migrate, k);
3233
3234 xfrm_pol_put(pol);
3235
3236 return 0;
3237out:
3238 return err;
3239
3240restore_state:
3241 if (pol)
3242 xfrm_pol_put(pol);
3243 if (nx_cur)
3244 xfrm_states_put(x_cur, nx_cur);
3245 if (nx_new)
3246 xfrm_states_delete(x_new, nx_new);
3247
3248 return err;
3249}
3250EXPORT_SYMBOL(xfrm_migrate);
3251#endif