Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * xfrm_policy.c
   4 *
   5 * Changes:
   6 *	Mitsuru KANDA @USAGI
   7 * 	Kazunori MIYAZAWA @USAGI
   8 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
   9 * 		IPv6 support
  10 * 	Kazunori MIYAZAWA @USAGI
  11 * 	YOSHIFUJI Hideaki
  12 * 		Split up af-specific portion
  13 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
  14 *
  15 */
  16
  17#include <linux/err.h>
  18#include <linux/slab.h>
  19#include <linux/kmod.h>
  20#include <linux/list.h>
  21#include <linux/spinlock.h>
  22#include <linux/workqueue.h>
  23#include <linux/notifier.h>
  24#include <linux/netdevice.h>
  25#include <linux/netfilter.h>
  26#include <linux/module.h>
  27#include <linux/cache.h>
  28#include <linux/cpu.h>
  29#include <linux/audit.h>
  30#include <linux/rhashtable.h>
  31#include <linux/if_tunnel.h>
  32#include <net/dst.h>
  33#include <net/flow.h>
  34#include <net/xfrm.h>
  35#include <net/ip.h>
  36#if IS_ENABLED(CONFIG_IPV6_MIP6)
  37#include <net/mip6.h>
  38#endif
  39#ifdef CONFIG_XFRM_STATISTICS
  40#include <net/snmp.h>
  41#endif
  42#ifdef CONFIG_XFRM_ESPINTCP
  43#include <net/espintcp.h>
  44#endif
  45
  46#include "xfrm_hash.h"
  47
  48#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  49#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  50#define XFRM_MAX_QUEUE_LEN	100
  51
  52struct xfrm_flo {
  53	struct dst_entry *dst_orig;
  54	u8 flags;
  55};
  56
  57/* prefixes smaller than this are stored in lists, not trees. */
  58#define INEXACT_PREFIXLEN_IPV4	16
  59#define INEXACT_PREFIXLEN_IPV6	48
  60
  61struct xfrm_pol_inexact_node {
  62	struct rb_node node;
  63	union {
  64		xfrm_address_t addr;
  65		struct rcu_head rcu;
  66	};
  67	u8 prefixlen;
  68
  69	struct rb_root root;
  70
  71	/* the policies matching this node, can be empty list */
  72	struct hlist_head hhead;
  73};
  74
  75/* xfrm inexact policy search tree:
  76 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
  77 *  |
  78 * +---- root_d: sorted by daddr:prefix
  79 * |                 |
  80 * |        xfrm_pol_inexact_node
  81 * |                 |
  82 * |                 +- root: sorted by saddr/prefix
  83 * |                 |              |
  84 * |                 |         xfrm_pol_inexact_node
  85 * |                 |              |
  86 * |                 |              + root: unused
  87 * |                 |              |
  88 * |                 |              + hhead: saddr:daddr policies
  89 * |                 |
  90 * |                 +- coarse policies and all any:daddr policies
  91 * |
  92 * +---- root_s: sorted by saddr:prefix
  93 * |                 |
  94 * |        xfrm_pol_inexact_node
  95 * |                 |
  96 * |                 + root: unused
  97 * |                 |
  98 * |                 + hhead: saddr:any policies
  99 * |
 100 * +---- coarse policies and all any:any policies
 101 *
 102 * Lookups return four candidate lists:
 103 * 1. any:any list from top-level xfrm_pol_inexact_bin
 104 * 2. any:daddr list from daddr tree
 105 * 3. saddr:daddr list from 2nd level daddr tree
 106 * 4. saddr:any list from saddr tree
 107 *
 108 * This result set then needs to be searched for the policy with
 109 * the lowest priority.  If two results have same prio, youngest one wins.
 110 */
 111
 112struct xfrm_pol_inexact_key {
 113	possible_net_t net;
 114	u32 if_id;
 115	u16 family;
 116	u8 dir, type;
 117};
 118
 119struct xfrm_pol_inexact_bin {
 120	struct xfrm_pol_inexact_key k;
 121	struct rhash_head head;
 122	/* list containing '*:*' policies */
 123	struct hlist_head hhead;
 124
 125	seqcount_spinlock_t count;
 126	/* tree sorted by daddr/prefix */
 127	struct rb_root root_d;
 128
 129	/* tree sorted by saddr/prefix */
 130	struct rb_root root_s;
 131
 132	/* slow path below */
 133	struct list_head inexact_bins;
 134	struct rcu_head rcu;
 135};
 136
 137enum xfrm_pol_inexact_candidate_type {
 138	XFRM_POL_CAND_BOTH,
 139	XFRM_POL_CAND_SADDR,
 140	XFRM_POL_CAND_DADDR,
 141	XFRM_POL_CAND_ANY,
 142
 143	XFRM_POL_CAND_MAX,
 144};
 145
 146struct xfrm_pol_inexact_candidates {
 147	struct hlist_head *res[XFRM_POL_CAND_MAX];
 148};
 149
 150static DEFINE_SPINLOCK(xfrm_if_cb_lock);
 151static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
 152
 153static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 154static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
 155						__read_mostly;
 156
 157static struct kmem_cache *xfrm_dst_cache __ro_after_init;
 158
 159static struct rhashtable xfrm_policy_inexact_table;
 160static const struct rhashtable_params xfrm_pol_inexact_params;
 161
 162static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
 163static int stale_bundle(struct dst_entry *dst);
 164static int xfrm_bundle_ok(struct xfrm_dst *xdst);
 165static void xfrm_policy_queue_process(struct timer_list *t);
 166
 167static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
 168static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
 169						int dir);
 170
 171static struct xfrm_pol_inexact_bin *
 172xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
 173			   u32 if_id);
 174
 175static struct xfrm_pol_inexact_bin *
 176xfrm_policy_inexact_lookup_rcu(struct net *net,
 177			       u8 type, u16 family, u8 dir, u32 if_id);
 178static struct xfrm_policy *
 179xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
 180			bool excl);
 181static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
 182					    struct xfrm_policy *policy);
 183
 184static bool
 185xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
 186				    struct xfrm_pol_inexact_bin *b,
 187				    const xfrm_address_t *saddr,
 188				    const xfrm_address_t *daddr);
 189
 190static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
 191{
 192	return refcount_inc_not_zero(&policy->refcnt);
 193}
 194
 195static inline bool
 196__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 197{
 198	const struct flowi4 *fl4 = &fl->u.ip4;
 199
 200	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
 201		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
 202		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
 203		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
 204		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
 205		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
 206}
 207
 208static inline bool
 209__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 210{
 211	const struct flowi6 *fl6 = &fl->u.ip6;
 212
 213	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
 214		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
 215		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
 216		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
 217		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
 218		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
 219}
 220
 221bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
 222			 unsigned short family)
 223{
 224	switch (family) {
 225	case AF_INET:
 226		return __xfrm4_selector_match(sel, fl);
 227	case AF_INET6:
 228		return __xfrm6_selector_match(sel, fl);
 229	}
 230	return false;
 231}
 232
 233static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
 234{
 235	const struct xfrm_policy_afinfo *afinfo;
 236
 237	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
 238		return NULL;
 239	rcu_read_lock();
 240	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
 241	if (unlikely(!afinfo))
 242		rcu_read_unlock();
 243	return afinfo;
 244}
 245
 246/* Called with rcu_read_lock(). */
 247static const struct xfrm_if_cb *xfrm_if_get_cb(void)
 248{
 249	return rcu_dereference(xfrm_if_cb);
 250}
 251
 252struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
 253				    const xfrm_address_t *saddr,
 254				    const xfrm_address_t *daddr,
 255				    int family, u32 mark)
 256{
 257	const struct xfrm_policy_afinfo *afinfo;
 258	struct dst_entry *dst;
 259
 260	afinfo = xfrm_policy_get_afinfo(family);
 261	if (unlikely(afinfo == NULL))
 262		return ERR_PTR(-EAFNOSUPPORT);
 263
 264	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
 265
 266	rcu_read_unlock();
 267
 268	return dst;
 269}
 270EXPORT_SYMBOL(__xfrm_dst_lookup);
 271
 272static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
 273						int tos, int oif,
 274						xfrm_address_t *prev_saddr,
 275						xfrm_address_t *prev_daddr,
 276						int family, u32 mark)
 277{
 278	struct net *net = xs_net(x);
 279	xfrm_address_t *saddr = &x->props.saddr;
 280	xfrm_address_t *daddr = &x->id.daddr;
 281	struct dst_entry *dst;
 282
 283	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
 284		saddr = x->coaddr;
 285		daddr = prev_daddr;
 286	}
 287	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
 288		saddr = prev_saddr;
 289		daddr = x->coaddr;
 290	}
 291
 292	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
 293
 294	if (!IS_ERR(dst)) {
 295		if (prev_saddr != saddr)
 296			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
 297		if (prev_daddr != daddr)
 298			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
 299	}
 300
 301	return dst;
 302}
 303
 304static inline unsigned long make_jiffies(long secs)
 305{
 306	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
 307		return MAX_SCHEDULE_TIMEOUT-1;
 308	else
 309		return secs*HZ;
 310}
 311
 312static void xfrm_policy_timer(struct timer_list *t)
 313{
 314	struct xfrm_policy *xp = from_timer(xp, t, timer);
 315	time64_t now = ktime_get_real_seconds();
 316	time64_t next = TIME64_MAX;
 317	int warn = 0;
 318	int dir;
 319
 320	read_lock(&xp->lock);
 321
 322	if (unlikely(xp->walk.dead))
 323		goto out;
 324
 325	dir = xfrm_policy_id2dir(xp->index);
 326
 327	if (xp->lft.hard_add_expires_seconds) {
 328		time64_t tmo = xp->lft.hard_add_expires_seconds +
 329			xp->curlft.add_time - now;
 330		if (tmo <= 0)
 331			goto expired;
 332		if (tmo < next)
 333			next = tmo;
 334	}
 335	if (xp->lft.hard_use_expires_seconds) {
 336		time64_t tmo = xp->lft.hard_use_expires_seconds +
 337			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 338		if (tmo <= 0)
 339			goto expired;
 340		if (tmo < next)
 341			next = tmo;
 342	}
 343	if (xp->lft.soft_add_expires_seconds) {
 344		time64_t tmo = xp->lft.soft_add_expires_seconds +
 345			xp->curlft.add_time - now;
 346		if (tmo <= 0) {
 347			warn = 1;
 348			tmo = XFRM_KM_TIMEOUT;
 349		}
 350		if (tmo < next)
 351			next = tmo;
 352	}
 353	if (xp->lft.soft_use_expires_seconds) {
 354		time64_t tmo = xp->lft.soft_use_expires_seconds +
 355			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 356		if (tmo <= 0) {
 357			warn = 1;
 358			tmo = XFRM_KM_TIMEOUT;
 359		}
 360		if (tmo < next)
 361			next = tmo;
 362	}
 363
 364	if (warn)
 365		km_policy_expired(xp, dir, 0, 0);
 366	if (next != TIME64_MAX &&
 367	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
 368		xfrm_pol_hold(xp);
 369
 370out:
 371	read_unlock(&xp->lock);
 372	xfrm_pol_put(xp);
 373	return;
 374
 375expired:
 376	read_unlock(&xp->lock);
 377	if (!xfrm_policy_delete(xp, dir))
 378		km_policy_expired(xp, dir, 1, 0);
 379	xfrm_pol_put(xp);
 380}
 381
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 382/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 383 * SPD calls.
 384 */
 385
 386struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
 387{
 388	struct xfrm_policy *policy;
 389
 390	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
 391
 392	if (policy) {
 393		write_pnet(&policy->xp_net, net);
 394		INIT_LIST_HEAD(&policy->walk.all);
 395		INIT_HLIST_NODE(&policy->bydst_inexact_list);
 396		INIT_HLIST_NODE(&policy->bydst);
 397		INIT_HLIST_NODE(&policy->byidx);
 398		rwlock_init(&policy->lock);
 399		refcount_set(&policy->refcnt, 1);
 400		skb_queue_head_init(&policy->polq.hold_queue);
 401		timer_setup(&policy->timer, xfrm_policy_timer, 0);
 402		timer_setup(&policy->polq.hold_timer,
 403			    xfrm_policy_queue_process, 0);
 
 
 404	}
 405	return policy;
 406}
 407EXPORT_SYMBOL(xfrm_policy_alloc);
 408
 409static void xfrm_policy_destroy_rcu(struct rcu_head *head)
 410{
 411	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
 412
 413	security_xfrm_policy_free(policy->security);
 414	kfree(policy);
 415}
 416
 417/* Destroy xfrm_policy: descendant resources must be released to this moment. */
 418
 419void xfrm_policy_destroy(struct xfrm_policy *policy)
 420{
 421	BUG_ON(!policy->walk.dead);
 422
 423	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
 424		BUG();
 425
 426	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
 
 427}
 428EXPORT_SYMBOL(xfrm_policy_destroy);
 429
 430/* Rule must be locked. Release descendant resources, announce
 
 
 
 
 
 
 
 
 431 * entry dead. The rule must be unlinked from lists to the moment.
 432 */
 433
 434static void xfrm_policy_kill(struct xfrm_policy *policy)
 435{
 436	write_lock_bh(&policy->lock);
 437	policy->walk.dead = 1;
 438	write_unlock_bh(&policy->lock);
 439
 440	atomic_inc(&policy->genid);
 441
 442	if (del_timer(&policy->polq.hold_timer))
 443		xfrm_pol_put(policy);
 444	skb_queue_purge(&policy->polq.hold_queue);
 445
 446	if (del_timer(&policy->timer))
 447		xfrm_pol_put(policy);
 448
 449	xfrm_pol_put(policy);
 450}
 451
 452static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
 453
 454static inline unsigned int idx_hash(struct net *net, u32 index)
 455{
 456	return __idx_hash(index, net->xfrm.policy_idx_hmask);
 457}
 458
 459/* calculate policy hash thresholds */
 460static void __get_hash_thresh(struct net *net,
 461			      unsigned short family, int dir,
 462			      u8 *dbits, u8 *sbits)
 463{
 464	switch (family) {
 465	case AF_INET:
 466		*dbits = net->xfrm.policy_bydst[dir].dbits4;
 467		*sbits = net->xfrm.policy_bydst[dir].sbits4;
 468		break;
 469
 470	case AF_INET6:
 471		*dbits = net->xfrm.policy_bydst[dir].dbits6;
 472		*sbits = net->xfrm.policy_bydst[dir].sbits6;
 473		break;
 474
 475	default:
 476		*dbits = 0;
 477		*sbits = 0;
 478	}
 479}
 480
 481static struct hlist_head *policy_hash_bysel(struct net *net,
 482					    const struct xfrm_selector *sel,
 483					    unsigned short family, int dir)
 484{
 485	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 486	unsigned int hash;
 487	u8 dbits;
 488	u8 sbits;
 489
 490	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 491	hash = __sel_hash(sel, family, hmask, dbits, sbits);
 492
 493	if (hash == hmask + 1)
 494		return NULL;
 495
 496	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 497		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 498}
 499
 500static struct hlist_head *policy_hash_direct(struct net *net,
 501					     const xfrm_address_t *daddr,
 502					     const xfrm_address_t *saddr,
 503					     unsigned short family, int dir)
 504{
 505	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 506	unsigned int hash;
 507	u8 dbits;
 508	u8 sbits;
 509
 510	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 511	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
 512
 513	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 514		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 515}
 516
 517static void xfrm_dst_hash_transfer(struct net *net,
 518				   struct hlist_head *list,
 519				   struct hlist_head *ndsttable,
 520				   unsigned int nhashmask,
 521				   int dir)
 522{
 523	struct hlist_node *tmp, *entry0 = NULL;
 524	struct xfrm_policy *pol;
 525	unsigned int h0 = 0;
 526	u8 dbits;
 527	u8 sbits;
 528
 529redo:
 530	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
 531		unsigned int h;
 532
 533		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
 534		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
 535				pol->family, nhashmask, dbits, sbits);
 536		if (!entry0) {
 537			hlist_del_rcu(&pol->bydst);
 538			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
 539			h0 = h;
 540		} else {
 541			if (h != h0)
 542				continue;
 543			hlist_del_rcu(&pol->bydst);
 544			hlist_add_behind_rcu(&pol->bydst, entry0);
 545		}
 546		entry0 = &pol->bydst;
 547	}
 548	if (!hlist_empty(list)) {
 549		entry0 = NULL;
 550		goto redo;
 551	}
 552}
 553
 554static void xfrm_idx_hash_transfer(struct hlist_head *list,
 555				   struct hlist_head *nidxtable,
 556				   unsigned int nhashmask)
 557{
 558	struct hlist_node *tmp;
 559	struct xfrm_policy *pol;
 560
 561	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
 562		unsigned int h;
 563
 564		h = __idx_hash(pol->index, nhashmask);
 565		hlist_add_head(&pol->byidx, nidxtable+h);
 566	}
 567}
 568
 569static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
 570{
 571	return ((old_hmask + 1) << 1) - 1;
 572}
 573
 574static void xfrm_bydst_resize(struct net *net, int dir)
 575{
 576	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 577	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 578	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 
 579	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
 580	struct hlist_head *odst;
 581	int i;
 582
 583	if (!ndst)
 584		return;
 585
 586	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 587	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 588
 589	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
 590				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
 591
 592	for (i = hmask; i >= 0; i--)
 593		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
 594
 595	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
 596	net->xfrm.policy_bydst[dir].hmask = nhashmask;
 597
 598	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
 599	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 600
 601	synchronize_rcu();
 602
 603	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
 604}
 605
 606static void xfrm_byidx_resize(struct net *net, int total)
 607{
 608	unsigned int hmask = net->xfrm.policy_idx_hmask;
 609	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 610	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 611	struct hlist_head *oidx = net->xfrm.policy_byidx;
 612	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
 613	int i;
 614
 615	if (!nidx)
 616		return;
 617
 618	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 619
 620	for (i = hmask; i >= 0; i--)
 621		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
 622
 623	net->xfrm.policy_byidx = nidx;
 624	net->xfrm.policy_idx_hmask = nhashmask;
 625
 626	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 627
 628	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
 629}
 630
 631static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
 632{
 633	unsigned int cnt = net->xfrm.policy_count[dir];
 634	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 635
 636	if (total)
 637		*total += cnt;
 638
 639	if ((hmask + 1) < xfrm_policy_hashmax &&
 640	    cnt > hmask)
 641		return 1;
 642
 643	return 0;
 644}
 645
 646static inline int xfrm_byidx_should_resize(struct net *net, int total)
 647{
 648	unsigned int hmask = net->xfrm.policy_idx_hmask;
 649
 650	if ((hmask + 1) < xfrm_policy_hashmax &&
 651	    total > hmask)
 652		return 1;
 653
 654	return 0;
 655}
 656
 657void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
 658{
 
 659	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
 660	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
 661	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
 662	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
 663	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
 664	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
 665	si->spdhcnt = net->xfrm.policy_idx_hmask;
 666	si->spdhmcnt = xfrm_policy_hashmax;
 
 667}
 668EXPORT_SYMBOL(xfrm_spd_getinfo);
 669
 670static DEFINE_MUTEX(hash_resize_mutex);
 671static void xfrm_hash_resize(struct work_struct *work)
 672{
 673	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
 674	int dir, total;
 675
 676	mutex_lock(&hash_resize_mutex);
 677
 678	total = 0;
 679	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 680		if (xfrm_bydst_should_resize(net, dir, &total))
 681			xfrm_bydst_resize(net, dir);
 682	}
 683	if (xfrm_byidx_should_resize(net, total))
 684		xfrm_byidx_resize(net, total);
 685
 686	mutex_unlock(&hash_resize_mutex);
 687}
 688
 689/* Make sure *pol can be inserted into fastbin.
 690 * Useful to check that later insert requests will be successful
 691 * (provided xfrm_policy_lock is held throughout).
 692 */
 693static struct xfrm_pol_inexact_bin *
 694xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
 695{
 696	struct xfrm_pol_inexact_bin *bin, *prev;
 697	struct xfrm_pol_inexact_key k = {
 698		.family = pol->family,
 699		.type = pol->type,
 700		.dir = dir,
 701		.if_id = pol->if_id,
 702	};
 703	struct net *net = xp_net(pol);
 704
 705	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
 706
 707	write_pnet(&k.net, net);
 708	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
 709				     xfrm_pol_inexact_params);
 710	if (bin)
 711		return bin;
 712
 713	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
 714	if (!bin)
 715		return NULL;
 716
 717	bin->k = k;
 718	INIT_HLIST_HEAD(&bin->hhead);
 719	bin->root_d = RB_ROOT;
 720	bin->root_s = RB_ROOT;
 721	seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
 722
 723	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
 724						&bin->k, &bin->head,
 725						xfrm_pol_inexact_params);
 726	if (!prev) {
 727		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
 728		return bin;
 729	}
 730
 731	kfree(bin);
 732
 733	return IS_ERR(prev) ? NULL : prev;
 734}
 735
 736static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
 737					       int family, u8 prefixlen)
 738{
 739	if (xfrm_addr_any(addr, family))
 740		return true;
 741
 742	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
 743		return true;
 744
 745	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
 746		return true;
 747
 748	return false;
 749}
 750
 751static bool
 752xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
 753{
 754	const xfrm_address_t *addr;
 755	bool saddr_any, daddr_any;
 756	u8 prefixlen;
 757
 758	addr = &policy->selector.saddr;
 759	prefixlen = policy->selector.prefixlen_s;
 760
 761	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
 762						       policy->family,
 763						       prefixlen);
 764	addr = &policy->selector.daddr;
 765	prefixlen = policy->selector.prefixlen_d;
 766	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
 767						       policy->family,
 768						       prefixlen);
 769	return saddr_any && daddr_any;
 770}
 771
 772static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
 773				       const xfrm_address_t *addr, u8 prefixlen)
 774{
 775	node->addr = *addr;
 776	node->prefixlen = prefixlen;
 777}
 778
 779static struct xfrm_pol_inexact_node *
 780xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
 781{
 782	struct xfrm_pol_inexact_node *node;
 783
 784	node = kzalloc(sizeof(*node), GFP_ATOMIC);
 785	if (node)
 786		xfrm_pol_inexact_node_init(node, addr, prefixlen);
 787
 788	return node;
 789}
 790
 791static int xfrm_policy_addr_delta(const xfrm_address_t *a,
 792				  const xfrm_address_t *b,
 793				  u8 prefixlen, u16 family)
 794{
 795	u32 ma, mb, mask;
 796	unsigned int pdw, pbi;
 797	int delta = 0;
 798
 799	switch (family) {
 800	case AF_INET:
 801		if (prefixlen == 0)
 802			return 0;
 803		mask = ~0U << (32 - prefixlen);
 804		ma = ntohl(a->a4) & mask;
 805		mb = ntohl(b->a4) & mask;
 806		if (ma < mb)
 807			delta = -1;
 808		else if (ma > mb)
 809			delta = 1;
 810		break;
 811	case AF_INET6:
 812		pdw = prefixlen >> 5;
 813		pbi = prefixlen & 0x1f;
 814
 815		if (pdw) {
 816			delta = memcmp(a->a6, b->a6, pdw << 2);
 817			if (delta)
 818				return delta;
 819		}
 820		if (pbi) {
 821			mask = ~0U << (32 - pbi);
 822			ma = ntohl(a->a6[pdw]) & mask;
 823			mb = ntohl(b->a6[pdw]) & mask;
 824			if (ma < mb)
 825				delta = -1;
 826			else if (ma > mb)
 827				delta = 1;
 828		}
 829		break;
 830	default:
 831		break;
 832	}
 833
 834	return delta;
 835}
 836
 837static void xfrm_policy_inexact_list_reinsert(struct net *net,
 838					      struct xfrm_pol_inexact_node *n,
 839					      u16 family)
 840{
 841	unsigned int matched_s, matched_d;
 842	struct xfrm_policy *policy, *p;
 843
 844	matched_s = 0;
 845	matched_d = 0;
 846
 847	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
 848		struct hlist_node *newpos = NULL;
 849		bool matches_s, matches_d;
 850
 851		if (!policy->bydst_reinsert)
 852			continue;
 853
 854		WARN_ON_ONCE(policy->family != family);
 855
 856		policy->bydst_reinsert = false;
 857		hlist_for_each_entry(p, &n->hhead, bydst) {
 858			if (policy->priority > p->priority)
 859				newpos = &p->bydst;
 860			else if (policy->priority == p->priority &&
 861				 policy->pos > p->pos)
 862				newpos = &p->bydst;
 863			else
 864				break;
 865		}
 866
 867		if (newpos)
 868			hlist_add_behind_rcu(&policy->bydst, newpos);
 869		else
 870			hlist_add_head_rcu(&policy->bydst, &n->hhead);
 871
 872		/* paranoia checks follow.
 873		 * Check that the reinserted policy matches at least
 874		 * saddr or daddr for current node prefix.
 875		 *
 876		 * Matching both is fine, matching saddr in one policy
 877		 * (but not daddr) and then matching only daddr in another
 878		 * is a bug.
 879		 */
 880		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
 881						   &n->addr,
 882						   n->prefixlen,
 883						   family) == 0;
 884		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
 885						   &n->addr,
 886						   n->prefixlen,
 887						   family) == 0;
 888		if (matches_s && matches_d)
 889			continue;
 890
 891		WARN_ON_ONCE(!matches_s && !matches_d);
 892		if (matches_s)
 893			matched_s++;
 894		if (matches_d)
 895			matched_d++;
 896		WARN_ON_ONCE(matched_s && matched_d);
 897	}
 898}
 899
 900static void xfrm_policy_inexact_node_reinsert(struct net *net,
 901					      struct xfrm_pol_inexact_node *n,
 902					      struct rb_root *new,
 903					      u16 family)
 904{
 905	struct xfrm_pol_inexact_node *node;
 906	struct rb_node **p, *parent;
 907
 908	/* we should not have another subtree here */
 909	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
 910restart:
 911	parent = NULL;
 912	p = &new->rb_node;
 913	while (*p) {
 914		u8 prefixlen;
 915		int delta;
 916
 917		parent = *p;
 918		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
 919
 920		prefixlen = min(node->prefixlen, n->prefixlen);
 921
 922		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
 923					       prefixlen, family);
 924		if (delta < 0) {
 925			p = &parent->rb_left;
 926		} else if (delta > 0) {
 927			p = &parent->rb_right;
 928		} else {
 929			bool same_prefixlen = node->prefixlen == n->prefixlen;
 930			struct xfrm_policy *tmp;
 931
 932			hlist_for_each_entry(tmp, &n->hhead, bydst) {
 933				tmp->bydst_reinsert = true;
 934				hlist_del_rcu(&tmp->bydst);
 935			}
 936
 937			node->prefixlen = prefixlen;
 938
 939			xfrm_policy_inexact_list_reinsert(net, node, family);
 940
 941			if (same_prefixlen) {
 942				kfree_rcu(n, rcu);
 943				return;
 944			}
 945
 946			rb_erase(*p, new);
 947			kfree_rcu(n, rcu);
 948			n = node;
 949			goto restart;
 950		}
 951	}
 952
 953	rb_link_node_rcu(&n->node, parent, p);
 954	rb_insert_color(&n->node, new);
 955}
 956
 957/* merge nodes v and n */
 958static void xfrm_policy_inexact_node_merge(struct net *net,
 959					   struct xfrm_pol_inexact_node *v,
 960					   struct xfrm_pol_inexact_node *n,
 961					   u16 family)
 962{
 963	struct xfrm_pol_inexact_node *node;
 964	struct xfrm_policy *tmp;
 965	struct rb_node *rnode;
 966
 967	/* To-be-merged node v has a subtree.
 968	 *
 969	 * Dismantle it and insert its nodes to n->root.
 970	 */
 971	while ((rnode = rb_first(&v->root)) != NULL) {
 972		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
 973		rb_erase(&node->node, &v->root);
 974		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
 975						  family);
 976	}
 977
 978	hlist_for_each_entry(tmp, &v->hhead, bydst) {
 979		tmp->bydst_reinsert = true;
 980		hlist_del_rcu(&tmp->bydst);
 981	}
 982
 983	xfrm_policy_inexact_list_reinsert(net, n, family);
 984}
 985
 986static struct xfrm_pol_inexact_node *
 987xfrm_policy_inexact_insert_node(struct net *net,
 988				struct rb_root *root,
 989				xfrm_address_t *addr,
 990				u16 family, u8 prefixlen, u8 dir)
 991{
 992	struct xfrm_pol_inexact_node *cached = NULL;
 993	struct rb_node **p, *parent = NULL;
 994	struct xfrm_pol_inexact_node *node;
 995
 996	p = &root->rb_node;
 997	while (*p) {
 998		int delta;
 999
1000		parent = *p;
1001		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1002
1003		delta = xfrm_policy_addr_delta(addr, &node->addr,
1004					       node->prefixlen,
1005					       family);
1006		if (delta == 0 && prefixlen >= node->prefixlen) {
1007			WARN_ON_ONCE(cached); /* ipsec policies got lost */
1008			return node;
1009		}
1010
1011		if (delta < 0)
1012			p = &parent->rb_left;
1013		else
1014			p = &parent->rb_right;
1015
1016		if (prefixlen < node->prefixlen) {
1017			delta = xfrm_policy_addr_delta(addr, &node->addr,
1018						       prefixlen,
1019						       family);
1020			if (delta)
1021				continue;
1022
1023			/* This node is a subnet of the new prefix. It needs
1024			 * to be removed and re-inserted with the smaller
1025			 * prefix and all nodes that are now also covered
1026			 * by the reduced prefixlen.
1027			 */
1028			rb_erase(&node->node, root);
1029
1030			if (!cached) {
1031				xfrm_pol_inexact_node_init(node, addr,
1032							   prefixlen);
1033				cached = node;
1034			} else {
1035				/* This node also falls within the new
1036				 * prefixlen. Merge the to-be-reinserted
1037				 * node and this one.
1038				 */
1039				xfrm_policy_inexact_node_merge(net, node,
1040							       cached, family);
1041				kfree_rcu(node, rcu);
1042			}
1043
1044			/* restart */
1045			p = &root->rb_node;
1046			parent = NULL;
1047		}
1048	}
1049
1050	node = cached;
1051	if (!node) {
1052		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1053		if (!node)
1054			return NULL;
1055	}
1056
1057	rb_link_node_rcu(&node->node, parent, p);
1058	rb_insert_color(&node->node, root);
1059
1060	return node;
1061}
1062
1063static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1064{
1065	struct xfrm_pol_inexact_node *node;
1066	struct rb_node *rn = rb_first(r);
1067
1068	while (rn) {
1069		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1070
1071		xfrm_policy_inexact_gc_tree(&node->root, rm);
1072		rn = rb_next(rn);
1073
1074		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1075			WARN_ON_ONCE(rm);
1076			continue;
1077		}
1078
1079		rb_erase(&node->node, r);
1080		kfree_rcu(node, rcu);
1081	}
1082}
1083
1084static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1085{
1086	write_seqcount_begin(&b->count);
1087	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1088	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1089	write_seqcount_end(&b->count);
1090
1091	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1092	    !hlist_empty(&b->hhead)) {
1093		WARN_ON_ONCE(net_exit);
1094		return;
1095	}
1096
1097	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1098				   xfrm_pol_inexact_params) == 0) {
1099		list_del(&b->inexact_bins);
1100		kfree_rcu(b, rcu);
1101	}
1102}
1103
1104static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1105{
1106	struct net *net = read_pnet(&b->k.net);
1107
1108	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1109	__xfrm_policy_inexact_prune_bin(b, false);
1110	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1111}
1112
1113static void __xfrm_policy_inexact_flush(struct net *net)
1114{
1115	struct xfrm_pol_inexact_bin *bin, *t;
1116
1117	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1118
1119	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1120		__xfrm_policy_inexact_prune_bin(bin, false);
1121}
1122
1123static struct hlist_head *
1124xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1125				struct xfrm_policy *policy, u8 dir)
1126{
1127	struct xfrm_pol_inexact_node *n;
1128	struct net *net;
1129
1130	net = xp_net(policy);
1131	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1132
1133	if (xfrm_policy_inexact_insert_use_any_list(policy))
1134		return &bin->hhead;
1135
1136	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1137					       policy->family,
1138					       policy->selector.prefixlen_d)) {
1139		write_seqcount_begin(&bin->count);
1140		n = xfrm_policy_inexact_insert_node(net,
1141						    &bin->root_s,
1142						    &policy->selector.saddr,
1143						    policy->family,
1144						    policy->selector.prefixlen_s,
1145						    dir);
1146		write_seqcount_end(&bin->count);
1147		if (!n)
1148			return NULL;
1149
1150		return &n->hhead;
1151	}
1152
1153	/* daddr is fixed */
1154	write_seqcount_begin(&bin->count);
1155	n = xfrm_policy_inexact_insert_node(net,
1156					    &bin->root_d,
1157					    &policy->selector.daddr,
1158					    policy->family,
1159					    policy->selector.prefixlen_d, dir);
1160	write_seqcount_end(&bin->count);
1161	if (!n)
1162		return NULL;
1163
1164	/* saddr is wildcard */
1165	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1166					       policy->family,
1167					       policy->selector.prefixlen_s))
1168		return &n->hhead;
1169
1170	write_seqcount_begin(&bin->count);
1171	n = xfrm_policy_inexact_insert_node(net,
1172					    &n->root,
1173					    &policy->selector.saddr,
1174					    policy->family,
1175					    policy->selector.prefixlen_s, dir);
1176	write_seqcount_end(&bin->count);
1177	if (!n)
1178		return NULL;
1179
1180	return &n->hhead;
1181}
1182
1183static struct xfrm_policy *
1184xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1185{
1186	struct xfrm_pol_inexact_bin *bin;
1187	struct xfrm_policy *delpol;
1188	struct hlist_head *chain;
1189	struct net *net;
1190
1191	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1192	if (!bin)
1193		return ERR_PTR(-ENOMEM);
1194
1195	net = xp_net(policy);
1196	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1197
1198	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1199	if (!chain) {
1200		__xfrm_policy_inexact_prune_bin(bin, false);
1201		return ERR_PTR(-ENOMEM);
1202	}
1203
1204	delpol = xfrm_policy_insert_list(chain, policy, excl);
1205	if (delpol && excl) {
1206		__xfrm_policy_inexact_prune_bin(bin, false);
1207		return ERR_PTR(-EEXIST);
1208	}
1209
1210	chain = &net->xfrm.policy_inexact[dir];
1211	xfrm_policy_insert_inexact_list(chain, policy);
1212
1213	if (delpol)
1214		__xfrm_policy_inexact_prune_bin(bin, false);
1215
1216	return delpol;
1217}
1218
1219static void xfrm_hash_rebuild(struct work_struct *work)
1220{
1221	struct net *net = container_of(work, struct net,
1222				       xfrm.policy_hthresh.work);
1223	unsigned int hmask;
1224	struct xfrm_policy *pol;
1225	struct xfrm_policy *policy;
1226	struct hlist_head *chain;
1227	struct hlist_head *odst;
1228	struct hlist_node *newpos;
1229	int i;
1230	int dir;
1231	unsigned seq;
1232	u8 lbits4, rbits4, lbits6, rbits6;
1233
1234	mutex_lock(&hash_resize_mutex);
1235
1236	/* read selector prefixlen thresholds */
1237	do {
1238		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1239
1240		lbits4 = net->xfrm.policy_hthresh.lbits4;
1241		rbits4 = net->xfrm.policy_hthresh.rbits4;
1242		lbits6 = net->xfrm.policy_hthresh.lbits6;
1243		rbits6 = net->xfrm.policy_hthresh.rbits6;
1244	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1245
1246	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1247	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1248
1249	/* make sure that we can insert the indirect policies again before
1250	 * we start with destructive action.
1251	 */
1252	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1253		struct xfrm_pol_inexact_bin *bin;
1254		u8 dbits, sbits;
1255
1256		dir = xfrm_policy_id2dir(policy->index);
1257		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1258			continue;
1259
1260		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1261			if (policy->family == AF_INET) {
1262				dbits = rbits4;
1263				sbits = lbits4;
1264			} else {
1265				dbits = rbits6;
1266				sbits = lbits6;
1267			}
1268		} else {
1269			if (policy->family == AF_INET) {
1270				dbits = lbits4;
1271				sbits = rbits4;
1272			} else {
1273				dbits = lbits6;
1274				sbits = rbits6;
1275			}
1276		}
1277
1278		if (policy->selector.prefixlen_d < dbits ||
1279		    policy->selector.prefixlen_s < sbits)
1280			continue;
1281
1282		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1283		if (!bin)
1284			goto out_unlock;
1285
1286		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1287			goto out_unlock;
1288	}
1289
1290	/* reset the bydst and inexact table in all directions */
1291	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1292		struct hlist_node *n;
1293
1294		hlist_for_each_entry_safe(policy, n,
1295					  &net->xfrm.policy_inexact[dir],
1296					  bydst_inexact_list) {
1297			hlist_del_rcu(&policy->bydst);
1298			hlist_del_init(&policy->bydst_inexact_list);
1299		}
1300
1301		hmask = net->xfrm.policy_bydst[dir].hmask;
1302		odst = net->xfrm.policy_bydst[dir].table;
1303		for (i = hmask; i >= 0; i--) {
1304			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1305				hlist_del_rcu(&policy->bydst);
1306		}
1307		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1308			/* dir out => dst = remote, src = local */
1309			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1310			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1311			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1312			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1313		} else {
1314			/* dir in/fwd => dst = local, src = remote */
1315			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1316			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1317			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1318			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1319		}
1320	}
1321
1322	/* re-insert all policies by order of creation */
1323	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1324		if (policy->walk.dead)
1325			continue;
1326		dir = xfrm_policy_id2dir(policy->index);
1327		if (dir >= XFRM_POLICY_MAX) {
1328			/* skip socket policies */
1329			continue;
1330		}
1331		newpos = NULL;
1332		chain = policy_hash_bysel(net, &policy->selector,
1333					  policy->family, dir);
1334
1335		if (!chain) {
1336			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1337
1338			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1339			continue;
1340		}
1341
1342		hlist_for_each_entry(pol, chain, bydst) {
1343			if (policy->priority >= pol->priority)
1344				newpos = &pol->bydst;
1345			else
1346				break;
1347		}
1348		if (newpos)
1349			hlist_add_behind_rcu(&policy->bydst, newpos);
1350		else
1351			hlist_add_head_rcu(&policy->bydst, chain);
1352	}
1353
1354out_unlock:
1355	__xfrm_policy_inexact_flush(net);
1356	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1357	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1358
1359	mutex_unlock(&hash_resize_mutex);
1360}
1361
1362void xfrm_policy_hash_rebuild(struct net *net)
1363{
1364	schedule_work(&net->xfrm.policy_hthresh.work);
1365}
1366EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1367
1368/* Generate new index... KAME seems to generate them ordered by cost
1369 * of an absolute inpredictability of ordering of rules. This will not pass. */
1370static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1371{
1372	static u32 idx_generator;
1373
1374	for (;;) {
1375		struct hlist_head *list;
1376		struct xfrm_policy *p;
1377		u32 idx;
1378		int found;
1379
1380		if (!index) {
1381			idx = (idx_generator | dir);
1382			idx_generator += 8;
1383		} else {
1384			idx = index;
1385			index = 0;
1386		}
1387
1388		if (idx == 0)
1389			idx = 8;
1390		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1391		found = 0;
1392		hlist_for_each_entry(p, list, byidx) {
1393			if (p->index == idx) {
1394				found = 1;
1395				break;
1396			}
1397		}
1398		if (!found)
1399			return idx;
1400	}
1401}
1402
1403static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1404{
1405	u32 *p1 = (u32 *) s1;
1406	u32 *p2 = (u32 *) s2;
1407	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1408	int i;
1409
1410	for (i = 0; i < len; i++) {
1411		if (p1[i] != p2[i])
1412			return 1;
1413	}
1414
1415	return 0;
1416}
1417
1418static void xfrm_policy_requeue(struct xfrm_policy *old,
1419				struct xfrm_policy *new)
1420{
1421	struct xfrm_policy_queue *pq = &old->polq;
1422	struct sk_buff_head list;
1423
1424	if (skb_queue_empty(&pq->hold_queue))
1425		return;
1426
1427	__skb_queue_head_init(&list);
1428
1429	spin_lock_bh(&pq->hold_queue.lock);
1430	skb_queue_splice_init(&pq->hold_queue, &list);
1431	if (del_timer(&pq->hold_timer))
1432		xfrm_pol_put(old);
1433	spin_unlock_bh(&pq->hold_queue.lock);
1434
 
 
 
1435	pq = &new->polq;
1436
1437	spin_lock_bh(&pq->hold_queue.lock);
1438	skb_queue_splice(&list, &pq->hold_queue);
1439	pq->timeout = XFRM_QUEUE_TMO_MIN;
1440	if (!mod_timer(&pq->hold_timer, jiffies))
1441		xfrm_pol_hold(new);
1442	spin_unlock_bh(&pq->hold_queue.lock);
1443}
1444
1445static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1446					  struct xfrm_policy *pol)
1447{
1448	return mark->v == pol->mark.v && mark->m == pol->mark.m;
1449}
1450
1451static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1452{
1453	const struct xfrm_pol_inexact_key *k = data;
1454	u32 a = k->type << 24 | k->dir << 16 | k->family;
1455
1456	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1457			    seed);
1458}
1459
1460static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1461{
1462	const struct xfrm_pol_inexact_bin *b = data;
1463
1464	return xfrm_pol_bin_key(&b->k, 0, seed);
1465}
1466
1467static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1468			    const void *ptr)
1469{
1470	const struct xfrm_pol_inexact_key *key = arg->key;
1471	const struct xfrm_pol_inexact_bin *b = ptr;
1472	int ret;
1473
1474	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1475		return -1;
1476
1477	ret = b->k.dir ^ key->dir;
1478	if (ret)
1479		return ret;
1480
1481	ret = b->k.type ^ key->type;
1482	if (ret)
1483		return ret;
1484
1485	ret = b->k.family ^ key->family;
1486	if (ret)
1487		return ret;
1488
1489	return b->k.if_id ^ key->if_id;
1490}
1491
1492static const struct rhashtable_params xfrm_pol_inexact_params = {
1493	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1494	.hashfn			= xfrm_pol_bin_key,
1495	.obj_hashfn		= xfrm_pol_bin_obj,
1496	.obj_cmpfn		= xfrm_pol_bin_cmp,
1497	.automatic_shrinking	= true,
1498};
1499
1500static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1501					    struct xfrm_policy *policy)
1502{
1503	struct xfrm_policy *pol, *delpol = NULL;
1504	struct hlist_node *newpos = NULL;
1505	int i = 0;
1506
1507	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1508		if (pol->type == policy->type &&
1509		    pol->if_id == policy->if_id &&
1510		    !selector_cmp(&pol->selector, &policy->selector) &&
1511		    xfrm_policy_mark_match(&policy->mark, pol) &&
1512		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1513		    !WARN_ON(delpol)) {
1514			delpol = pol;
1515			if (policy->priority > pol->priority)
1516				continue;
1517		} else if (policy->priority >= pol->priority) {
1518			newpos = &pol->bydst_inexact_list;
1519			continue;
1520		}
1521		if (delpol)
1522			break;
1523	}
1524
1525	if (newpos)
1526		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1527	else
1528		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1529
1530	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1531		pol->pos = i;
1532		i++;
1533	}
1534}
1535
1536static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1537						   struct xfrm_policy *policy,
1538						   bool excl)
1539{
1540	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
 
 
 
 
1541
 
 
 
 
1542	hlist_for_each_entry(pol, chain, bydst) {
1543		if (pol->type == policy->type &&
1544		    pol->if_id == policy->if_id &&
1545		    !selector_cmp(&pol->selector, &policy->selector) &&
1546		    xfrm_policy_mark_match(&policy->mark, pol) &&
1547		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1548		    !WARN_ON(delpol)) {
1549			if (excl)
1550				return ERR_PTR(-EEXIST);
 
 
1551			delpol = pol;
1552			if (policy->priority > pol->priority)
1553				continue;
1554		} else if (policy->priority >= pol->priority) {
1555			newpos = pol;
1556			continue;
1557		}
1558		if (delpol)
1559			break;
1560	}
1561
1562	if (newpos)
1563		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1564	else
1565		hlist_add_head_rcu(&policy->bydst, chain);
1566
1567	return delpol;
1568}
1569
1570int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1571{
1572	struct net *net = xp_net(policy);
1573	struct xfrm_policy *delpol;
1574	struct hlist_head *chain;
1575
1576	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1577	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1578	if (chain)
1579		delpol = xfrm_policy_insert_list(chain, policy, excl);
1580	else
1581		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1582
1583	if (IS_ERR(delpol)) {
1584		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1585		return PTR_ERR(delpol);
1586	}
1587
1588	__xfrm_policy_link(policy, dir);
1589
1590	/* After previous checking, family can either be AF_INET or AF_INET6 */
1591	if (policy->family == AF_INET)
1592		rt_genid_bump_ipv4(net);
1593	else
1594		rt_genid_bump_ipv6(net);
1595
1596	if (delpol) {
1597		xfrm_policy_requeue(delpol, policy);
1598		__xfrm_policy_unlink(delpol, dir);
1599	}
1600	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1601	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1602	policy->curlft.add_time = ktime_get_real_seconds();
1603	policy->curlft.use_time = 0;
1604	if (!mod_timer(&policy->timer, jiffies + HZ))
1605		xfrm_pol_hold(policy);
1606	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
1607
1608	if (delpol)
1609		xfrm_policy_kill(delpol);
1610	else if (xfrm_bydst_should_resize(net, dir, NULL))
1611		schedule_work(&net->xfrm.policy_hash_work);
1612
1613	return 0;
1614}
1615EXPORT_SYMBOL(xfrm_policy_insert);
1616
1617static struct xfrm_policy *
1618__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1619			u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1620			struct xfrm_sec_ctx *ctx)
1621{
1622	struct xfrm_policy *pol;
1623
1624	if (!chain)
1625		return NULL;
1626
1627	hlist_for_each_entry(pol, chain, bydst) {
1628		if (pol->type == type &&
1629		    pol->if_id == if_id &&
1630		    xfrm_policy_mark_match(mark, pol) &&
1631		    !selector_cmp(sel, &pol->selector) &&
1632		    xfrm_sec_ctx_match(ctx, pol->security))
1633			return pol;
1634	}
1635
1636	return NULL;
1637}
1638
1639struct xfrm_policy *
1640xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1641		      u8 type, int dir, struct xfrm_selector *sel,
1642		      struct xfrm_sec_ctx *ctx, int delete, int *err)
1643{
1644	struct xfrm_pol_inexact_bin *bin = NULL;
1645	struct xfrm_policy *pol, *ret = NULL;
1646	struct hlist_head *chain;
1647
1648	*err = 0;
1649	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1650	chain = policy_hash_bysel(net, sel, sel->family, dir);
1651	if (!chain) {
1652		struct xfrm_pol_inexact_candidates cand;
1653		int i;
1654
1655		bin = xfrm_policy_inexact_lookup(net, type,
1656						 sel->family, dir, if_id);
1657		if (!bin) {
1658			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1659			return NULL;
1660		}
1661
1662		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1663							 &sel->saddr,
1664							 &sel->daddr)) {
1665			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1666			return NULL;
1667		}
1668
1669		pol = NULL;
1670		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1671			struct xfrm_policy *tmp;
1672
1673			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1674						      if_id, type, dir,
1675						      sel, ctx);
1676			if (!tmp)
1677				continue;
1678
1679			if (!pol || tmp->pos < pol->pos)
1680				pol = tmp;
1681		}
1682	} else {
1683		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1684					      sel, ctx);
1685	}
1686
1687	if (pol) {
1688		xfrm_pol_hold(pol);
1689		if (delete) {
1690			*err = security_xfrm_policy_delete(pol->security);
1691			if (*err) {
1692				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1693				return pol;
1694			}
1695			__xfrm_policy_unlink(pol, dir);
 
1696		}
1697		ret = pol;
1698	}
1699	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1700
1701	if (ret && delete)
1702		xfrm_policy_kill(ret);
1703	if (bin && delete)
1704		xfrm_policy_inexact_prune_bin(bin);
1705	return ret;
1706}
1707EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1708
1709struct xfrm_policy *
1710xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1711		 u8 type, int dir, u32 id, int delete, int *err)
1712{
1713	struct xfrm_policy *pol, *ret;
1714	struct hlist_head *chain;
1715
1716	*err = -ENOENT;
1717	if (xfrm_policy_id2dir(id) != dir)
1718		return NULL;
1719
1720	*err = 0;
1721	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1722	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1723	ret = NULL;
1724	hlist_for_each_entry(pol, chain, byidx) {
1725		if (pol->type == type && pol->index == id &&
1726		    pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1727			xfrm_pol_hold(pol);
1728			if (delete) {
1729				*err = security_xfrm_policy_delete(
1730								pol->security);
1731				if (*err) {
1732					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1733					return pol;
1734				}
1735				__xfrm_policy_unlink(pol, dir);
1736			}
1737			ret = pol;
1738			break;
1739		}
1740	}
1741	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1742
1743	if (ret && delete)
1744		xfrm_policy_kill(ret);
1745	return ret;
1746}
1747EXPORT_SYMBOL(xfrm_policy_byid);
1748
1749#ifdef CONFIG_SECURITY_NETWORK_XFRM
1750static inline int
1751xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1752{
1753	struct xfrm_policy *pol;
1754	int err = 0;
1755
1756	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1757		if (pol->walk.dead ||
1758		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1759		    pol->type != type)
1760			continue;
1761
1762		err = security_xfrm_policy_delete(pol->security);
1763		if (err) {
1764			xfrm_audit_policy_delete(pol, 0, task_valid);
1765			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766		}
1767	}
1768	return err;
1769}
1770#else
1771static inline int
1772xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1773{
1774	return 0;
1775}
1776#endif
1777
1778int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1779{
1780	int dir, err = 0, cnt = 0;
1781	struct xfrm_policy *pol;
1782
1783	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1784
1785	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1786	if (err)
1787		goto out;
1788
1789again:
1790	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1791		dir = xfrm_policy_id2dir(pol->index);
1792		if (pol->walk.dead ||
1793		    dir >= XFRM_POLICY_MAX ||
1794		    pol->type != type)
1795			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1796
1797		__xfrm_policy_unlink(pol, dir);
1798		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1799		cnt++;
1800		xfrm_audit_policy_delete(pol, 1, task_valid);
1801		xfrm_policy_kill(pol);
1802		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1803		goto again;
1804	}
1805	if (cnt)
1806		__xfrm_policy_inexact_flush(net);
1807	else
1808		err = -ESRCH;
1809out:
1810	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1811	return err;
1812}
1813EXPORT_SYMBOL(xfrm_policy_flush);
1814
1815int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1816		     int (*func)(struct xfrm_policy *, int, int, void*),
1817		     void *data)
1818{
1819	struct xfrm_policy *pol;
1820	struct xfrm_policy_walk_entry *x;
1821	int error = 0;
1822
1823	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1824	    walk->type != XFRM_POLICY_TYPE_ANY)
1825		return -EINVAL;
1826
1827	if (list_empty(&walk->walk.all) && walk->seq != 0)
1828		return 0;
1829
1830	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1831	if (list_empty(&walk->walk.all))
1832		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1833	else
1834		x = list_first_entry(&walk->walk.all,
1835				     struct xfrm_policy_walk_entry, all);
1836
1837	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1838		if (x->dead)
1839			continue;
1840		pol = container_of(x, struct xfrm_policy, walk);
1841		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1842		    walk->type != pol->type)
1843			continue;
1844		error = func(pol, xfrm_policy_id2dir(pol->index),
1845			     walk->seq, data);
1846		if (error) {
1847			list_move_tail(&walk->walk.all, &x->all);
1848			goto out;
1849		}
1850		walk->seq++;
1851	}
1852	if (walk->seq == 0) {
1853		error = -ENOENT;
1854		goto out;
1855	}
1856	list_del_init(&walk->walk.all);
1857out:
1858	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1859	return error;
1860}
1861EXPORT_SYMBOL(xfrm_policy_walk);
1862
1863void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1864{
1865	INIT_LIST_HEAD(&walk->walk.all);
1866	walk->walk.dead = 1;
1867	walk->type = type;
1868	walk->seq = 0;
1869}
1870EXPORT_SYMBOL(xfrm_policy_walk_init);
1871
1872void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1873{
1874	if (list_empty(&walk->walk.all))
1875		return;
1876
1877	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1878	list_del(&walk->walk.all);
1879	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1880}
1881EXPORT_SYMBOL(xfrm_policy_walk_done);
1882
1883/*
1884 * Find policy to apply to this flow.
1885 *
1886 * Returns 0 if policy found, else an -errno.
1887 */
1888static int xfrm_policy_match(const struct xfrm_policy *pol,
1889			     const struct flowi *fl,
1890			     u8 type, u16 family, int dir, u32 if_id)
1891{
1892	const struct xfrm_selector *sel = &pol->selector;
1893	int ret = -ESRCH;
1894	bool match;
1895
1896	if (pol->family != family ||
1897	    pol->if_id != if_id ||
1898	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1899	    pol->type != type)
1900		return ret;
1901
1902	match = xfrm_selector_match(sel, fl, family);
1903	if (match)
1904		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1905	return ret;
1906}
1907
1908static struct xfrm_pol_inexact_node *
1909xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1910				seqcount_spinlock_t *count,
1911				const xfrm_address_t *addr, u16 family)
1912{
1913	const struct rb_node *parent;
1914	int seq;
1915
1916again:
1917	seq = read_seqcount_begin(count);
1918
1919	parent = rcu_dereference_raw(r->rb_node);
1920	while (parent) {
1921		struct xfrm_pol_inexact_node *node;
1922		int delta;
1923
1924		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1925
1926		delta = xfrm_policy_addr_delta(addr, &node->addr,
1927					       node->prefixlen, family);
1928		if (delta < 0) {
1929			parent = rcu_dereference_raw(parent->rb_left);
1930			continue;
1931		} else if (delta > 0) {
1932			parent = rcu_dereference_raw(parent->rb_right);
1933			continue;
1934		}
1935
1936		return node;
1937	}
1938
1939	if (read_seqcount_retry(count, seq))
1940		goto again;
1941
1942	return NULL;
1943}
1944
1945static bool
1946xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1947				    struct xfrm_pol_inexact_bin *b,
1948				    const xfrm_address_t *saddr,
1949				    const xfrm_address_t *daddr)
1950{
1951	struct xfrm_pol_inexact_node *n;
1952	u16 family;
1953
1954	if (!b)
1955		return false;
1956
1957	family = b->k.family;
1958	memset(cand, 0, sizeof(*cand));
1959	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1960
1961	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1962					    family);
1963	if (n) {
1964		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1965		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1966						    family);
1967		if (n)
1968			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1969	}
1970
1971	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1972					    family);
1973	if (n)
1974		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1975
1976	return true;
1977}
1978
1979static struct xfrm_pol_inexact_bin *
1980xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1981			       u8 dir, u32 if_id)
1982{
1983	struct xfrm_pol_inexact_key k = {
1984		.family = family,
1985		.type = type,
1986		.dir = dir,
1987		.if_id = if_id,
1988	};
1989
1990	write_pnet(&k.net, net);
1991
1992	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1993				 xfrm_pol_inexact_params);
1994}
1995
1996static struct xfrm_pol_inexact_bin *
1997xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
1998			   u8 dir, u32 if_id)
1999{
2000	struct xfrm_pol_inexact_bin *bin;
2001
2002	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2003
2004	rcu_read_lock();
2005	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2006	rcu_read_unlock();
2007
2008	return bin;
2009}
2010
2011static struct xfrm_policy *
2012__xfrm_policy_eval_candidates(struct hlist_head *chain,
2013			      struct xfrm_policy *prefer,
2014			      const struct flowi *fl,
2015			      u8 type, u16 family, int dir, u32 if_id)
2016{
2017	u32 priority = prefer ? prefer->priority : ~0u;
2018	struct xfrm_policy *pol;
2019
2020	if (!chain)
2021		return NULL;
2022
2023	hlist_for_each_entry_rcu(pol, chain, bydst) {
2024		int err;
2025
2026		if (pol->priority > priority)
2027			break;
2028
2029		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2030		if (err) {
2031			if (err != -ESRCH)
2032				return ERR_PTR(err);
2033
2034			continue;
2035		}
2036
2037		if (prefer) {
2038			/* matches.  Is it older than *prefer? */
2039			if (pol->priority == priority &&
2040			    prefer->pos < pol->pos)
2041				return prefer;
2042		}
2043
2044		return pol;
2045	}
2046
2047	return NULL;
2048}
2049
2050static struct xfrm_policy *
2051xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2052			    struct xfrm_policy *prefer,
2053			    const struct flowi *fl,
2054			    u8 type, u16 family, int dir, u32 if_id)
2055{
2056	struct xfrm_policy *tmp;
2057	int i;
2058
2059	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2060		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2061						    prefer,
2062						    fl, type, family, dir,
2063						    if_id);
2064		if (!tmp)
2065			continue;
2066
2067		if (IS_ERR(tmp))
2068			return tmp;
2069		prefer = tmp;
2070	}
2071
2072	return prefer;
2073}
2074
2075static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2076						     const struct flowi *fl,
2077						     u16 family, u8 dir,
2078						     u32 if_id)
2079{
2080	struct xfrm_pol_inexact_candidates cand;
2081	const xfrm_address_t *daddr, *saddr;
2082	struct xfrm_pol_inexact_bin *bin;
2083	struct xfrm_policy *pol, *ret;
 
2084	struct hlist_head *chain;
2085	unsigned int sequence;
2086	int err;
2087
2088	daddr = xfrm_flowi_daddr(fl, family);
2089	saddr = xfrm_flowi_saddr(fl, family);
2090	if (unlikely(!daddr || !saddr))
2091		return NULL;
2092
2093	rcu_read_lock();
2094 retry:
2095	do {
2096		sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2097		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2098	} while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2099
2100	ret = NULL;
2101	hlist_for_each_entry_rcu(pol, chain, bydst) {
2102		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2103		if (err) {
2104			if (err == -ESRCH)
2105				continue;
2106			else {
2107				ret = ERR_PTR(err);
2108				goto fail;
2109			}
2110		} else {
2111			ret = pol;
 
2112			break;
2113		}
2114	}
2115	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2116	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2117							 daddr))
2118		goto skip_inexact;
2119
2120	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2121					  family, dir, if_id);
2122	if (pol) {
2123		ret = pol;
2124		if (IS_ERR(pol))
2125			goto fail;
 
 
 
2126	}
2127
2128skip_inexact:
2129	if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2130		goto retry;
2131
2132	if (ret && !xfrm_pol_hold_rcu(ret))
2133		goto retry;
2134fail:
2135	rcu_read_unlock();
2136
2137	return ret;
2138}
2139
2140static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2141					      const struct flowi *fl,
2142					      u16 family, u8 dir, u32 if_id)
2143{
2144#ifdef CONFIG_XFRM_SUB_POLICY
2145	struct xfrm_policy *pol;
2146
2147	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2148					dir, if_id);
2149	if (pol != NULL)
2150		return pol;
2151#endif
2152	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2153					 dir, if_id);
2154}
2155
2156static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2157						 const struct flowi *fl,
2158						 u16 family, u32 if_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2159{
2160	struct xfrm_policy *pol;
2161
2162	rcu_read_lock();
2163 again:
2164	pol = rcu_dereference(sk->sk_policy[dir]);
2165	if (pol != NULL) {
2166		bool match;
2167		int err = 0;
2168
2169		if (pol->family != family) {
2170			pol = NULL;
2171			goto out;
2172		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2173
2174		match = xfrm_selector_match(&pol->selector, fl, family);
2175		if (match) {
2176			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2177			    pol->if_id != if_id) {
2178				pol = NULL;
2179				goto out;
2180			}
2181			err = security_xfrm_policy_lookup(pol->security,
2182						      fl->flowi_secid);
2183			if (!err) {
2184				if (!xfrm_pol_hold_rcu(pol))
2185					goto again;
2186			} else if (err == -ESRCH) {
2187				pol = NULL;
2188			} else {
2189				pol = ERR_PTR(err);
2190			}
2191		} else
2192			pol = NULL;
2193	}
2194out:
2195	rcu_read_unlock();
2196	return pol;
2197}
2198
2199static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2200{
2201	struct net *net = xp_net(pol);
 
 
2202
2203	list_add(&pol->walk.all, &net->xfrm.policy_all);
 
 
2204	net->xfrm.policy_count[dir]++;
2205	xfrm_pol_hold(pol);
 
 
 
2206}
2207
2208static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2209						int dir)
2210{
2211	struct net *net = xp_net(pol);
2212
2213	if (list_empty(&pol->walk.all))
2214		return NULL;
2215
2216	/* Socket policies are not hashed. */
2217	if (!hlist_unhashed(&pol->bydst)) {
2218		hlist_del_rcu(&pol->bydst);
2219		hlist_del_init(&pol->bydst_inexact_list);
2220		hlist_del(&pol->byidx);
2221	}
2222
2223	list_del_init(&pol->walk.all);
2224	net->xfrm.policy_count[dir]--;
2225
2226	return pol;
2227}
2228
2229static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2230{
2231	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2232}
2233
2234static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2235{
2236	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2237}
2238
2239int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2240{
2241	struct net *net = xp_net(pol);
2242
2243	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2244	pol = __xfrm_policy_unlink(pol, dir);
2245	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2246	if (pol) {
2247		xfrm_policy_kill(pol);
2248		return 0;
2249	}
2250	return -ENOENT;
2251}
2252EXPORT_SYMBOL(xfrm_policy_delete);
2253
2254int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2255{
2256	struct net *net = sock_net(sk);
2257	struct xfrm_policy *old_pol;
2258
2259#ifdef CONFIG_XFRM_SUB_POLICY
2260	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2261		return -EINVAL;
2262#endif
2263
2264	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2265	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2266				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2267	if (pol) {
2268		pol->curlft.add_time = ktime_get_real_seconds();
2269		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2270		xfrm_sk_policy_link(pol, dir);
2271	}
2272	rcu_assign_pointer(sk->sk_policy[dir], pol);
2273	if (old_pol) {
2274		if (pol)
2275			xfrm_policy_requeue(old_pol, pol);
2276
2277		/* Unlinking succeeds always. This is the only function
2278		 * allowed to delete or replace socket policy.
2279		 */
2280		xfrm_sk_policy_unlink(old_pol, dir);
2281	}
2282	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2283
2284	if (old_pol) {
2285		xfrm_policy_kill(old_pol);
2286	}
2287	return 0;
2288}
2289
2290static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2291{
2292	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2293	struct net *net = xp_net(old);
2294
2295	if (newp) {
2296		newp->selector = old->selector;
2297		if (security_xfrm_policy_clone(old->security,
2298					       &newp->security)) {
2299			kfree(newp);
2300			return NULL;  /* ENOMEM */
2301		}
2302		newp->lft = old->lft;
2303		newp->curlft = old->curlft;
2304		newp->mark = old->mark;
2305		newp->if_id = old->if_id;
2306		newp->action = old->action;
2307		newp->flags = old->flags;
2308		newp->xfrm_nr = old->xfrm_nr;
2309		newp->index = old->index;
2310		newp->type = old->type;
2311		newp->family = old->family;
2312		memcpy(newp->xfrm_vec, old->xfrm_vec,
2313		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2314		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2315		xfrm_sk_policy_link(newp, dir);
2316		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2317		xfrm_pol_put(newp);
2318	}
2319	return newp;
2320}
2321
2322int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2323{
2324	const struct xfrm_policy *p;
2325	struct xfrm_policy *np;
2326	int i, ret = 0;
2327
2328	rcu_read_lock();
2329	for (i = 0; i < 2; i++) {
2330		p = rcu_dereference(osk->sk_policy[i]);
2331		if (p) {
2332			np = clone_policy(p, i);
2333			if (unlikely(!np)) {
2334				ret = -ENOMEM;
2335				break;
2336			}
2337			rcu_assign_pointer(sk->sk_policy[i], np);
2338		}
2339	}
2340	rcu_read_unlock();
2341	return ret;
2342}
2343
2344static int
2345xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2346	       xfrm_address_t *remote, unsigned short family, u32 mark)
2347{
2348	int err;
2349	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2350
2351	if (unlikely(afinfo == NULL))
2352		return -EINVAL;
2353	err = afinfo->get_saddr(net, oif, local, remote, mark);
2354	rcu_read_unlock();
2355	return err;
2356}
2357
2358/* Resolve list of templates for the flow, given policy. */
2359
2360static int
2361xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2362		      struct xfrm_state **xfrm, unsigned short family)
2363{
2364	struct net *net = xp_net(policy);
2365	int nx;
2366	int i, error;
2367	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2368	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2369	xfrm_address_t tmp;
2370
2371	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2372		struct xfrm_state *x;
2373		xfrm_address_t *remote = daddr;
2374		xfrm_address_t *local  = saddr;
2375		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2376
2377		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2378		    tmpl->mode == XFRM_MODE_BEET) {
2379			remote = &tmpl->id.daddr;
2380			local = &tmpl->saddr;
2381			if (xfrm_addr_any(local, tmpl->encap_family)) {
2382				error = xfrm_get_saddr(net, fl->flowi_oif,
2383						       &tmp, remote,
2384						       tmpl->encap_family, 0);
2385				if (error)
2386					goto fail;
2387				local = &tmp;
2388			}
2389		}
2390
2391		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2392				    family, policy->if_id);
2393
2394		if (x && x->km.state == XFRM_STATE_VALID) {
2395			xfrm[nx++] = x;
2396			daddr = remote;
2397			saddr = local;
2398			continue;
2399		}
2400		if (x) {
2401			error = (x->km.state == XFRM_STATE_ERROR ?
2402				 -EINVAL : -EAGAIN);
2403			xfrm_state_put(x);
2404		} else if (error == -ESRCH) {
2405			error = -EAGAIN;
2406		}
2407
2408		if (!tmpl->optional)
2409			goto fail;
2410	}
2411	return nx;
2412
2413fail:
2414	for (nx--; nx >= 0; nx--)
2415		xfrm_state_put(xfrm[nx]);
2416	return error;
2417}
2418
2419static int
2420xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2421		  struct xfrm_state **xfrm, unsigned short family)
2422{
2423	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2424	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2425	int cnx = 0;
2426	int error;
2427	int ret;
2428	int i;
2429
2430	for (i = 0; i < npols; i++) {
2431		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2432			error = -ENOBUFS;
2433			goto fail;
2434		}
2435
2436		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2437		if (ret < 0) {
2438			error = ret;
2439			goto fail;
2440		} else
2441			cnx += ret;
2442	}
2443
2444	/* found states are sorted for outbound processing */
2445	if (npols > 1)
2446		xfrm_state_sort(xfrm, tpp, cnx, family);
2447
2448	return cnx;
2449
2450 fail:
2451	for (cnx--; cnx >= 0; cnx--)
2452		xfrm_state_put(tpp[cnx]);
2453	return error;
2454
2455}
2456
2457static int xfrm_get_tos(const struct flowi *fl, int family)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2458{
2459	if (family == AF_INET)
2460		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2461
2462	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2463}
2464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2465static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2466{
2467	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2468	struct dst_ops *dst_ops;
2469	struct xfrm_dst *xdst;
2470
2471	if (!afinfo)
2472		return ERR_PTR(-EINVAL);
2473
2474	switch (family) {
2475	case AF_INET:
2476		dst_ops = &net->xfrm.xfrm4_dst_ops;
2477		break;
2478#if IS_ENABLED(CONFIG_IPV6)
2479	case AF_INET6:
2480		dst_ops = &net->xfrm.xfrm6_dst_ops;
2481		break;
2482#endif
2483	default:
2484		BUG();
2485	}
2486	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2487
2488	if (likely(xdst)) {
2489		struct dst_entry *dst = &xdst->u.dst;
2490
2491		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
 
 
 
2492	} else
2493		xdst = ERR_PTR(-ENOBUFS);
2494
2495	rcu_read_unlock();
2496
2497	return xdst;
2498}
2499
2500static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2501			   int nfheader_len)
2502{
2503	if (dst->ops->family == AF_INET6) {
2504		struct rt6_info *rt = (struct rt6_info *)dst;
2505		path->path_cookie = rt6_get_cookie(rt);
2506		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2507	}
 
 
 
 
 
 
 
2508}
2509
2510static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2511				const struct flowi *fl)
2512{
2513	const struct xfrm_policy_afinfo *afinfo =
2514		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2515	int err;
2516
2517	if (!afinfo)
2518		return -EINVAL;
2519
2520	err = afinfo->fill_dst(xdst, dev, fl);
2521
2522	rcu_read_unlock();
2523
2524	return err;
2525}
2526
2527
2528/* Allocate chain of dst_entry's, attach known xfrm's, calculate
2529 * all the metrics... Shortly, bundle a bundle.
2530 */
2531
2532static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2533					    struct xfrm_state **xfrm,
2534					    struct xfrm_dst **bundle,
2535					    int nx,
2536					    const struct flowi *fl,
2537					    struct dst_entry *dst)
2538{
2539	const struct xfrm_state_afinfo *afinfo;
2540	const struct xfrm_mode *inner_mode;
2541	struct net *net = xp_net(policy);
2542	unsigned long now = jiffies;
2543	struct net_device *dev;
2544	struct xfrm_dst *xdst_prev = NULL;
2545	struct xfrm_dst *xdst0 = NULL;
 
2546	int i = 0;
2547	int err;
2548	int header_len = 0;
2549	int nfheader_len = 0;
2550	int trailer_len = 0;
2551	int tos;
2552	int family = policy->selector.family;
2553	xfrm_address_t saddr, daddr;
2554
2555	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2556
2557	tos = xfrm_get_tos(fl, family);
 
 
 
2558
2559	dst_hold(dst);
2560
2561	for (; i < nx; i++) {
2562		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2563		struct dst_entry *dst1 = &xdst->u.dst;
2564
2565		err = PTR_ERR(xdst);
2566		if (IS_ERR(xdst)) {
2567			dst_release(dst);
2568			goto put_states;
2569		}
2570
2571		bundle[i] = xdst;
2572		if (!xdst_prev)
2573			xdst0 = xdst;
2574		else
2575			/* Ref count is taken during xfrm_alloc_dst()
2576			 * No need to do dst_clone() on dst1
2577			 */
2578			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2579
2580		if (xfrm[i]->sel.family == AF_UNSPEC) {
2581			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2582							xfrm_af2proto(family));
2583			if (!inner_mode) {
2584				err = -EAFNOSUPPORT;
2585				dst_release(dst);
2586				goto put_states;
2587			}
2588		} else
2589			inner_mode = &xfrm[i]->inner_mode;
 
 
 
 
 
 
 
2590
2591		xdst->route = dst;
2592		dst_copy_metrics(dst1, dst);
2593
2594		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2595			__u32 mark = 0;
2596
2597			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2598				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2599
2600			family = xfrm[i]->props.family;
2601			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2602					      &saddr, &daddr, family, mark);
2603			err = PTR_ERR(dst);
2604			if (IS_ERR(dst))
2605				goto put_states;
2606		} else
2607			dst_hold(dst);
2608
2609		dst1->xfrm = xfrm[i];
2610		xdst->xfrm_genid = xfrm[i]->genid;
2611
2612		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
 
2613		dst1->lastuse = now;
2614
2615		dst1->input = dst_discard;
 
2616
2617		rcu_read_lock();
2618		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2619		if (likely(afinfo))
2620			dst1->output = afinfo->output;
2621		else
2622			dst1->output = dst_discard_out;
2623		rcu_read_unlock();
2624
2625		xdst_prev = xdst;
2626
2627		header_len += xfrm[i]->props.header_len;
2628		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2629			nfheader_len += xfrm[i]->props.header_len;
2630		trailer_len += xfrm[i]->props.trailer_len;
2631	}
2632
2633	xfrm_dst_set_child(xdst_prev, dst);
2634	xdst0->path = dst;
2635
2636	err = -ENODEV;
2637	dev = dst->dev;
2638	if (!dev)
2639		goto free_dst;
2640
2641	xfrm_init_path(xdst0, dst, nfheader_len);
2642	xfrm_init_pmtu(bundle, nx);
2643
2644	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2645	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2646		err = xfrm_fill_dst(xdst_prev, dev, fl);
 
2647		if (err)
2648			goto free_dst;
2649
2650		xdst_prev->u.dst.header_len = header_len;
2651		xdst_prev->u.dst.trailer_len = trailer_len;
2652		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2653		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2654	}
2655
2656	return &xdst0->u.dst;
 
2657
2658put_states:
2659	for (; i < nx; i++)
2660		xfrm_state_put(xfrm[i]);
2661free_dst:
2662	if (xdst0)
2663		dst_release_immediate(&xdst0->u.dst);
 
 
 
2664
2665	return ERR_PTR(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2666}
2667
2668static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2669				struct xfrm_policy **pols,
2670				int *num_pols, int *num_xfrms)
2671{
2672	int i;
2673
2674	if (*num_pols == 0 || !pols[0]) {
2675		*num_pols = 0;
2676		*num_xfrms = 0;
2677		return 0;
2678	}
2679	if (IS_ERR(pols[0]))
2680		return PTR_ERR(pols[0]);
2681
2682	*num_xfrms = pols[0]->xfrm_nr;
2683
2684#ifdef CONFIG_XFRM_SUB_POLICY
2685	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2686	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2687		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2688						    XFRM_POLICY_TYPE_MAIN,
2689						    fl, family,
2690						    XFRM_POLICY_OUT,
2691						    pols[0]->if_id);
2692		if (pols[1]) {
2693			if (IS_ERR(pols[1])) {
2694				xfrm_pols_put(pols, *num_pols);
2695				return PTR_ERR(pols[1]);
2696			}
2697			(*num_pols)++;
2698			(*num_xfrms) += pols[1]->xfrm_nr;
2699		}
2700	}
2701#endif
2702	for (i = 0; i < *num_pols; i++) {
2703		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2704			*num_xfrms = -1;
2705			break;
2706		}
2707	}
2708
2709	return 0;
2710
2711}
2712
2713static struct xfrm_dst *
2714xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2715			       const struct flowi *fl, u16 family,
2716			       struct dst_entry *dst_orig)
2717{
2718	struct net *net = xp_net(pols[0]);
2719	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2720	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2721	struct xfrm_dst *xdst;
2722	struct dst_entry *dst;
 
2723	int err;
2724
2725	/* Try to instantiate a bundle */
2726	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2727	if (err <= 0) {
2728		if (err == 0)
2729			return NULL;
2730
2731		if (err != -EAGAIN)
2732			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2733		return ERR_PTR(err);
2734	}
2735
2736	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2737	if (IS_ERR(dst)) {
2738		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2739		return ERR_CAST(dst);
2740	}
2741
2742	xdst = (struct xfrm_dst *)dst;
2743	xdst->num_xfrms = err;
 
 
 
 
 
 
 
 
 
 
2744	xdst->num_pols = num_pols;
2745	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2746	xdst->policy_genid = atomic_read(&pols[0]->genid);
2747
2748	return xdst;
2749}
2750
2751static void xfrm_policy_queue_process(struct timer_list *t)
2752{
 
2753	struct sk_buff *skb;
2754	struct sock *sk;
2755	struct dst_entry *dst;
2756	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2757	struct net *net = xp_net(pol);
2758	struct xfrm_policy_queue *pq = &pol->polq;
2759	struct flowi fl;
2760	struct sk_buff_head list;
2761	__u32 skb_mark;
2762
2763	spin_lock(&pq->hold_queue.lock);
2764	skb = skb_peek(&pq->hold_queue);
2765	if (!skb) {
2766		spin_unlock(&pq->hold_queue.lock);
2767		goto out;
2768	}
2769	dst = skb_dst(skb);
2770	sk = skb->sk;
2771
2772	/* Fixup the mark to support VTI. */
2773	skb_mark = skb->mark;
2774	skb->mark = pol->mark.v;
2775	xfrm_decode_session(skb, &fl, dst->ops->family);
2776	skb->mark = skb_mark;
2777	spin_unlock(&pq->hold_queue.lock);
2778
2779	dst_hold(xfrm_dst_path(dst));
2780	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
 
2781	if (IS_ERR(dst))
2782		goto purge_queue;
2783
2784	if (dst->flags & DST_XFRM_QUEUE) {
2785		dst_release(dst);
2786
2787		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2788			goto purge_queue;
2789
2790		pq->timeout = pq->timeout << 1;
2791		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2792			xfrm_pol_hold(pol);
2793		goto out;
2794	}
2795
2796	dst_release(dst);
2797
2798	__skb_queue_head_init(&list);
2799
2800	spin_lock(&pq->hold_queue.lock);
2801	pq->timeout = 0;
2802	skb_queue_splice_init(&pq->hold_queue, &list);
2803	spin_unlock(&pq->hold_queue.lock);
2804
2805	while (!skb_queue_empty(&list)) {
2806		skb = __skb_dequeue(&list);
2807
2808		/* Fixup the mark to support VTI. */
2809		skb_mark = skb->mark;
2810		skb->mark = pol->mark.v;
2811		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2812		skb->mark = skb_mark;
2813
2814		dst_hold(xfrm_dst_path(skb_dst(skb)));
2815		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2816		if (IS_ERR(dst)) {
2817			kfree_skb(skb);
2818			continue;
2819		}
2820
2821		nf_reset_ct(skb);
2822		skb_dst_drop(skb);
2823		skb_dst_set(skb, dst);
2824
2825		dst_output(net, skb->sk, skb);
2826	}
2827
2828out:
2829	xfrm_pol_put(pol);
2830	return;
2831
2832purge_queue:
2833	pq->timeout = 0;
2834	skb_queue_purge(&pq->hold_queue);
2835	xfrm_pol_put(pol);
2836}
2837
2838static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2839{
2840	unsigned long sched_next;
2841	struct dst_entry *dst = skb_dst(skb);
2842	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2843	struct xfrm_policy *pol = xdst->pols[0];
2844	struct xfrm_policy_queue *pq = &pol->polq;
 
2845
2846	if (unlikely(skb_fclone_busy(sk, skb))) {
 
2847		kfree_skb(skb);
2848		return 0;
2849	}
2850
2851	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2852		kfree_skb(skb);
2853		return -EAGAIN;
2854	}
2855
2856	skb_dst_force(skb);
2857
2858	spin_lock_bh(&pq->hold_queue.lock);
2859
2860	if (!pq->timeout)
2861		pq->timeout = XFRM_QUEUE_TMO_MIN;
2862
2863	sched_next = jiffies + pq->timeout;
2864
2865	if (del_timer(&pq->hold_timer)) {
2866		if (time_before(pq->hold_timer.expires, sched_next))
2867			sched_next = pq->hold_timer.expires;
2868		xfrm_pol_put(pol);
2869	}
2870
2871	__skb_queue_tail(&pq->hold_queue, skb);
2872	if (!mod_timer(&pq->hold_timer, sched_next))
2873		xfrm_pol_hold(pol);
2874
2875	spin_unlock_bh(&pq->hold_queue.lock);
2876
2877	return 0;
2878}
2879
2880static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2881						 struct xfrm_flo *xflo,
2882						 const struct flowi *fl,
2883						 int num_xfrms,
2884						 u16 family)
2885{
2886	int err;
2887	struct net_device *dev;
2888	struct dst_entry *dst;
2889	struct dst_entry *dst1;
2890	struct xfrm_dst *xdst;
2891
2892	xdst = xfrm_alloc_dst(net, family);
2893	if (IS_ERR(xdst))
2894		return xdst;
2895
2896	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2897	    net->xfrm.sysctl_larval_drop ||
2898	    num_xfrms <= 0)
2899		return xdst;
2900
2901	dst = xflo->dst_orig;
2902	dst1 = &xdst->u.dst;
2903	dst_hold(dst);
2904	xdst->route = dst;
2905
2906	dst_copy_metrics(dst1, dst);
2907
2908	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2909	dst1->flags |= DST_XFRM_QUEUE;
2910	dst1->lastuse = jiffies;
2911
2912	dst1->input = dst_discard;
2913	dst1->output = xdst_queue_output;
2914
2915	dst_hold(dst);
2916	xfrm_dst_set_child(xdst, dst);
2917	xdst->path = dst;
2918
2919	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2920
2921	err = -ENODEV;
2922	dev = dst->dev;
2923	if (!dev)
2924		goto free_dst;
2925
2926	err = xfrm_fill_dst(xdst, dev, fl);
2927	if (err)
2928		goto free_dst;
2929
2930out:
2931	return xdst;
2932
2933free_dst:
2934	dst_release(dst1);
2935	xdst = ERR_PTR(err);
2936	goto out;
2937}
2938
2939static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2940					   const struct flowi *fl,
2941					   u16 family, u8 dir,
2942					   struct xfrm_flo *xflo, u32 if_id)
2943{
 
2944	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2945	int num_pols = 0, num_xfrms = 0, err;
2946	struct xfrm_dst *xdst;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2947
2948	/* Resolve policies to use if we couldn't get them from
2949	 * previous cache entry */
2950	num_pols = 1;
2951	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2952	err = xfrm_expand_policies(fl, family, pols,
 
 
2953					   &num_pols, &num_xfrms);
2954	if (err < 0)
2955		goto inc_error;
2956	if (num_pols == 0)
2957		return NULL;
2958	if (num_xfrms <= 0)
2959		goto make_dummy_bundle;
2960
2961	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2962					      xflo->dst_orig);
2963	if (IS_ERR(xdst)) {
2964		err = PTR_ERR(xdst);
2965		if (err == -EREMOTE) {
2966			xfrm_pols_put(pols, num_pols);
2967			return NULL;
2968		}
 
 
2969
 
 
 
2970		if (err != -EAGAIN)
2971			goto error;
2972		goto make_dummy_bundle;
2973	} else if (xdst == NULL) {
 
 
 
2974		num_xfrms = 0;
2975		goto make_dummy_bundle;
2976	}
2977
2978	return xdst;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2979
2980make_dummy_bundle:
2981	/* We found policies, but there's no bundles to instantiate:
2982	 * either because the policy blocks, has no transformations or
2983	 * we could not build template (no xfrm_states).*/
2984	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2985	if (IS_ERR(xdst)) {
2986		xfrm_pols_put(pols, num_pols);
2987		return ERR_CAST(xdst);
2988	}
2989	xdst->num_pols = num_pols;
2990	xdst->num_xfrms = num_xfrms;
2991	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2992
2993	return xdst;
 
2994
2995inc_error:
2996	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2997error:
2998	xfrm_pols_put(pols, num_pols);
 
 
 
2999	return ERR_PTR(err);
3000}
3001
3002static struct dst_entry *make_blackhole(struct net *net, u16 family,
3003					struct dst_entry *dst_orig)
3004{
3005	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3006	struct dst_entry *ret;
3007
3008	if (!afinfo) {
3009		dst_release(dst_orig);
3010		return ERR_PTR(-EINVAL);
3011	} else {
3012		ret = afinfo->blackhole_route(net, dst_orig);
3013	}
3014	rcu_read_unlock();
3015
3016	return ret;
3017}
3018
3019/* Finds/creates a bundle for given flow and if_id
3020 *
3021 * At the moment we eat a raw IP route. Mostly to speed up lookups
3022 * on interfaces with disabled IPsec.
3023 *
3024 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3025 * compatibility
3026 */
3027struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3028					struct dst_entry *dst_orig,
3029					const struct flowi *fl,
3030					const struct sock *sk,
3031					int flags, u32 if_id)
3032{
3033	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
 
3034	struct xfrm_dst *xdst;
3035	struct dst_entry *dst, *route;
3036	u16 family = dst_orig->ops->family;
3037	u8 dir = XFRM_POLICY_OUT;
3038	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3039
3040	dst = NULL;
3041	xdst = NULL;
3042	route = NULL;
3043
3044	sk = sk_const_to_full_sk(sk);
3045	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3046		num_pols = 1;
3047		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3048						if_id);
3049		err = xfrm_expand_policies(fl, family, pols,
3050					   &num_pols, &num_xfrms);
3051		if (err < 0)
3052			goto dropdst;
3053
3054		if (num_pols) {
3055			if (num_xfrms <= 0) {
3056				drop_pols = num_pols;
3057				goto no_transform;
3058			}
3059
3060			xdst = xfrm_resolve_and_create_bundle(
3061					pols, num_pols, fl,
3062					family, dst_orig);
3063
3064			if (IS_ERR(xdst)) {
3065				xfrm_pols_put(pols, num_pols);
3066				err = PTR_ERR(xdst);
3067				if (err == -EREMOTE)
3068					goto nopol;
3069
3070				goto dropdst;
3071			} else if (xdst == NULL) {
3072				num_xfrms = 0;
3073				drop_pols = num_pols;
3074				goto no_transform;
3075			}
3076
3077			route = xdst->route;
3078		}
3079	}
3080
3081	if (xdst == NULL) {
3082		struct xfrm_flo xflo;
3083
3084		xflo.dst_orig = dst_orig;
3085		xflo.flags = flags;
3086
3087		/* To accelerate a bit...  */
3088		if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3089			       !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3090			goto nopol;
3091
3092		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3093		if (xdst == NULL)
 
3094			goto nopol;
3095		if (IS_ERR(xdst)) {
3096			err = PTR_ERR(xdst);
3097			goto dropdst;
3098		}
 
3099
3100		num_pols = xdst->num_pols;
3101		num_xfrms = xdst->num_xfrms;
3102		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3103		route = xdst->route;
3104	}
3105
3106	dst = &xdst->u.dst;
3107	if (route == NULL && num_xfrms > 0) {
3108		/* The only case when xfrm_bundle_lookup() returns a
3109		 * bundle with null route, is when the template could
3110		 * not be resolved. It means policies are there, but
3111		 * bundle could not be created, since we don't yet
3112		 * have the xfrm_state's. We need to wait for KM to
3113		 * negotiate new SA's or bail out with error.*/
3114		if (net->xfrm.sysctl_larval_drop) {
 
 
3115			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3116			err = -EREMOTE;
3117			goto error;
3118		}
3119
3120		err = -EAGAIN;
3121
3122		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3123		goto error;
3124	}
3125
3126no_transform:
3127	if (num_pols == 0)
3128		goto nopol;
3129
3130	if ((flags & XFRM_LOOKUP_ICMP) &&
3131	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3132		err = -ENOENT;
3133		goto error;
3134	}
3135
3136	for (i = 0; i < num_pols; i++)
3137		pols[i]->curlft.use_time = ktime_get_real_seconds();
3138
3139	if (num_xfrms < 0) {
3140		/* Prohibit the flow */
3141		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3142		err = -EPERM;
3143		goto error;
3144	} else if (num_xfrms > 0) {
3145		/* Flow transformed */
3146		dst_release(dst_orig);
3147	} else {
3148		/* Flow passes untransformed */
3149		dst_release(dst);
3150		dst = dst_orig;
3151	}
3152ok:
3153	xfrm_pols_put(pols, drop_pols);
3154	if (dst && dst->xfrm &&
3155	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3156		dst->flags |= DST_XFRM_TUNNEL;
3157	return dst;
3158
3159nopol:
3160	if (!(flags & XFRM_LOOKUP_ICMP)) {
3161		dst = dst_orig;
3162		goto ok;
3163	}
3164	err = -ENOENT;
3165error:
3166	dst_release(dst);
3167dropdst:
3168	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3169		dst_release(dst_orig);
3170	xfrm_pols_put(pols, drop_pols);
3171	return ERR_PTR(err);
3172}
3173EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3174
3175/* Main function: finds/creates a bundle for given flow.
3176 *
3177 * At the moment we eat a raw IP route. Mostly to speed up lookups
3178 * on interfaces with disabled IPsec.
3179 */
3180struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3181			      const struct flowi *fl, const struct sock *sk,
3182			      int flags)
3183{
3184	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3185}
3186EXPORT_SYMBOL(xfrm_lookup);
3187
3188/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3189 * Otherwise we may send out blackholed packets.
3190 */
3191struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3192				    const struct flowi *fl,
3193				    const struct sock *sk, int flags)
3194{
3195	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3196					    flags | XFRM_LOOKUP_QUEUE |
3197					    XFRM_LOOKUP_KEEP_DST_REF);
3198
3199	if (PTR_ERR(dst) == -EREMOTE)
3200		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3201
3202	if (IS_ERR(dst))
3203		dst_release(dst_orig);
3204
3205	return dst;
3206}
3207EXPORT_SYMBOL(xfrm_lookup_route);
3208
3209static inline int
3210xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3211{
3212	struct sec_path *sp = skb_sec_path(skb);
3213	struct xfrm_state *x;
3214
3215	if (!sp || idx < 0 || idx >= sp->len)
3216		return 0;
3217	x = sp->xvec[idx];
3218	if (!x->type->reject)
3219		return 0;
3220	return x->type->reject(x, skb, fl);
3221}
3222
3223/* When skb is transformed back to its "native" form, we have to
3224 * check policy restrictions. At the moment we make this in maximally
3225 * stupid way. Shame on me. :-) Of course, connected sockets must
3226 * have policy cached at them.
3227 */
3228
3229static inline int
3230xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3231	      unsigned short family)
3232{
3233	if (xfrm_state_kern(x))
3234		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3235	return	x->id.proto == tmpl->id.proto &&
3236		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3237		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3238		x->props.mode == tmpl->mode &&
3239		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3240		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3241		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3242		  xfrm_state_addr_cmp(tmpl, x, family));
3243}
3244
3245/*
3246 * 0 or more than 0 is returned when validation is succeeded (either bypass
3247 * because of optional transport mode, or next index of the matched secpath
3248 * state with the template.
3249 * -1 is returned when no matching template is found.
3250 * Otherwise "-2 - errored_index" is returned.
3251 */
3252static inline int
3253xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3254	       unsigned short family)
3255{
3256	int idx = start;
3257
3258	if (tmpl->optional) {
3259		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3260			return start;
3261	} else
3262		start = -1;
3263	for (; idx < sp->len; idx++) {
3264		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3265			return ++idx;
3266		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3267			if (start == -1)
3268				start = -2-idx;
3269			break;
3270		}
3271	}
3272	return start;
3273}
3274
3275static void
3276decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3277{
3278	const struct iphdr *iph = ip_hdr(skb);
3279	int ihl = iph->ihl;
3280	u8 *xprth = skb_network_header(skb) + ihl * 4;
3281	struct flowi4 *fl4 = &fl->u.ip4;
3282	int oif = 0;
3283
3284	if (skb_dst(skb) && skb_dst(skb)->dev)
3285		oif = skb_dst(skb)->dev->ifindex;
3286
3287	memset(fl4, 0, sizeof(struct flowi4));
3288	fl4->flowi4_mark = skb->mark;
3289	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3290
3291	fl4->flowi4_proto = iph->protocol;
3292	fl4->daddr = reverse ? iph->saddr : iph->daddr;
3293	fl4->saddr = reverse ? iph->daddr : iph->saddr;
3294	fl4->flowi4_tos = iph->tos;
3295
3296	if (!ip_is_fragment(iph)) {
3297		switch (iph->protocol) {
3298		case IPPROTO_UDP:
3299		case IPPROTO_UDPLITE:
3300		case IPPROTO_TCP:
3301		case IPPROTO_SCTP:
3302		case IPPROTO_DCCP:
3303			if (xprth + 4 < skb->data ||
3304			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3305				__be16 *ports;
3306
3307				xprth = skb_network_header(skb) + ihl * 4;
3308				ports = (__be16 *)xprth;
3309
3310				fl4->fl4_sport = ports[!!reverse];
3311				fl4->fl4_dport = ports[!reverse];
3312			}
3313			break;
3314		case IPPROTO_ICMP:
3315			if (xprth + 2 < skb->data ||
3316			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
3317				u8 *icmp;
3318
3319				xprth = skb_network_header(skb) + ihl * 4;
3320				icmp = xprth;
3321
3322				fl4->fl4_icmp_type = icmp[0];
3323				fl4->fl4_icmp_code = icmp[1];
3324			}
3325			break;
3326		case IPPROTO_GRE:
3327			if (xprth + 12 < skb->data ||
3328			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
3329				__be16 *greflags;
3330				__be32 *gre_hdr;
3331
3332				xprth = skb_network_header(skb) + ihl * 4;
3333				greflags = (__be16 *)xprth;
3334				gre_hdr = (__be32 *)xprth;
3335
3336				if (greflags[0] & GRE_KEY) {
3337					if (greflags[0] & GRE_CSUM)
3338						gre_hdr++;
3339					fl4->fl4_gre_key = gre_hdr[1];
3340				}
3341			}
3342			break;
3343		default:
3344			break;
3345		}
3346	}
3347}
3348
3349#if IS_ENABLED(CONFIG_IPV6)
3350static void
3351decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3352{
3353	struct flowi6 *fl6 = &fl->u.ip6;
3354	int onlyproto = 0;
3355	const struct ipv6hdr *hdr = ipv6_hdr(skb);
3356	u32 offset = sizeof(*hdr);
3357	struct ipv6_opt_hdr *exthdr;
3358	const unsigned char *nh = skb_network_header(skb);
3359	u16 nhoff = IP6CB(skb)->nhoff;
3360	int oif = 0;
3361	u8 nexthdr;
3362
3363	if (!nhoff)
3364		nhoff = offsetof(struct ipv6hdr, nexthdr);
3365
3366	nexthdr = nh[nhoff];
3367
3368	if (skb_dst(skb) && skb_dst(skb)->dev)
3369		oif = skb_dst(skb)->dev->ifindex;
3370
3371	memset(fl6, 0, sizeof(struct flowi6));
3372	fl6->flowi6_mark = skb->mark;
3373	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3374
3375	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3376	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3377
3378	while (nh + offset + sizeof(*exthdr) < skb->data ||
3379	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3380		nh = skb_network_header(skb);
3381		exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3382
3383		switch (nexthdr) {
3384		case NEXTHDR_FRAGMENT:
3385			onlyproto = 1;
3386			fallthrough;
3387		case NEXTHDR_ROUTING:
3388		case NEXTHDR_HOP:
3389		case NEXTHDR_DEST:
3390			offset += ipv6_optlen(exthdr);
3391			nexthdr = exthdr->nexthdr;
3392			exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3393			break;
3394		case IPPROTO_UDP:
3395		case IPPROTO_UDPLITE:
3396		case IPPROTO_TCP:
3397		case IPPROTO_SCTP:
3398		case IPPROTO_DCCP:
3399			if (!onlyproto && (nh + offset + 4 < skb->data ||
3400			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3401				__be16 *ports;
3402
3403				nh = skb_network_header(skb);
3404				ports = (__be16 *)(nh + offset);
3405				fl6->fl6_sport = ports[!!reverse];
3406				fl6->fl6_dport = ports[!reverse];
3407			}
3408			fl6->flowi6_proto = nexthdr;
3409			return;
3410		case IPPROTO_ICMPV6:
3411			if (!onlyproto && (nh + offset + 2 < skb->data ||
3412			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3413				u8 *icmp;
3414
3415				nh = skb_network_header(skb);
3416				icmp = (u8 *)(nh + offset);
3417				fl6->fl6_icmp_type = icmp[0];
3418				fl6->fl6_icmp_code = icmp[1];
3419			}
3420			fl6->flowi6_proto = nexthdr;
3421			return;
3422#if IS_ENABLED(CONFIG_IPV6_MIP6)
3423		case IPPROTO_MH:
3424			offset += ipv6_optlen(exthdr);
3425			if (!onlyproto && (nh + offset + 3 < skb->data ||
3426			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3427				struct ip6_mh *mh;
3428
3429				nh = skb_network_header(skb);
3430				mh = (struct ip6_mh *)(nh + offset);
3431				fl6->fl6_mh_type = mh->ip6mh_type;
3432			}
3433			fl6->flowi6_proto = nexthdr;
3434			return;
3435#endif
3436		default:
3437			fl6->flowi6_proto = nexthdr;
3438			return;
3439		}
3440	}
3441}
3442#endif
3443
3444int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3445			  unsigned int family, int reverse)
3446{
3447	switch (family) {
3448	case AF_INET:
3449		decode_session4(skb, fl, reverse);
3450		break;
3451#if IS_ENABLED(CONFIG_IPV6)
3452	case AF_INET6:
3453		decode_session6(skb, fl, reverse);
3454		break;
3455#endif
3456	default:
3457		return -EAFNOSUPPORT;
3458	}
3459
3460	return security_xfrm_decode_session(skb, &fl->flowi_secid);
 
 
 
3461}
3462EXPORT_SYMBOL(__xfrm_decode_session);
3463
3464static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3465{
3466	for (; k < sp->len; k++) {
3467		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3468			*idxp = k;
3469			return 1;
3470		}
3471	}
3472
3473	return 0;
3474}
3475
3476int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3477			unsigned short family)
3478{
3479	struct net *net = dev_net(skb->dev);
3480	struct xfrm_policy *pol;
3481	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3482	int npols = 0;
3483	int xfrm_nr;
3484	int pi;
3485	int reverse;
3486	struct flowi fl;
 
3487	int xerr_idx = -1;
3488	const struct xfrm_if_cb *ifcb;
3489	struct sec_path *sp;
3490	struct xfrm_if *xi;
3491	u32 if_id = 0;
3492
3493	rcu_read_lock();
3494	ifcb = xfrm_if_get_cb();
3495
3496	if (ifcb) {
3497		xi = ifcb->decode_session(skb, family);
3498		if (xi) {
3499			if_id = xi->p.if_id;
3500			net = xi->net;
3501		}
3502	}
3503	rcu_read_unlock();
3504
3505	reverse = dir & ~XFRM_POLICY_MASK;
3506	dir &= XFRM_POLICY_MASK;
 
3507
3508	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3509		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3510		return 0;
3511	}
3512
3513	nf_nat_decode_session(skb, &fl, family);
3514
3515	/* First, check used SA against their selectors. */
3516	sp = skb_sec_path(skb);
3517	if (sp) {
3518		int i;
3519
3520		for (i = sp->len - 1; i >= 0; i--) {
3521			struct xfrm_state *x = sp->xvec[i];
3522			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3523				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3524				return 0;
3525			}
3526		}
3527	}
3528
3529	pol = NULL;
3530	sk = sk_to_full_sk(sk);
3531	if (sk && sk->sk_policy[dir]) {
3532		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3533		if (IS_ERR(pol)) {
3534			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3535			return 0;
3536		}
3537	}
3538
3539	if (!pol)
3540		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
 
 
 
 
 
 
 
 
3541
3542	if (IS_ERR(pol)) {
3543		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3544		return 0;
3545	}
3546
3547	if (!pol) {
3548		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3549			xfrm_secpath_reject(xerr_idx, skb, &fl);
3550			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3551			return 0;
3552		}
3553		return 1;
3554	}
3555
3556	pol->curlft.use_time = ktime_get_real_seconds();
3557
3558	pols[0] = pol;
3559	npols++;
3560#ifdef CONFIG_XFRM_SUB_POLICY
3561	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3562		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3563						    &fl, family,
3564						    XFRM_POLICY_IN, if_id);
3565		if (pols[1]) {
3566			if (IS_ERR(pols[1])) {
3567				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3568				return 0;
3569			}
3570			pols[1]->curlft.use_time = ktime_get_real_seconds();
3571			npols++;
3572		}
3573	}
3574#endif
3575
3576	if (pol->action == XFRM_POLICY_ALLOW) {
 
3577		static struct sec_path dummy;
3578		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3579		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3580		struct xfrm_tmpl **tpp = tp;
3581		int ti = 0;
3582		int i, k;
3583
3584		sp = skb_sec_path(skb);
3585		if (!sp)
3586			sp = &dummy;
3587
3588		for (pi = 0; pi < npols; pi++) {
3589			if (pols[pi] != pol &&
3590			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3591				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3592				goto reject;
3593			}
3594			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3595				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3596				goto reject_error;
3597			}
3598			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3599				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3600		}
3601		xfrm_nr = ti;
3602		if (npols > 1) {
3603			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3604			tpp = stp;
3605		}
3606
3607		/* For each tunnel xfrm, find the first matching tmpl.
3608		 * For each tmpl before that, find corresponding xfrm.
3609		 * Order is _important_. Later we will implement
3610		 * some barriers, but at the moment barriers
3611		 * are implied between each two transformations.
3612		 */
3613		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3614			k = xfrm_policy_ok(tpp[i], sp, k, family);
3615			if (k < 0) {
3616				if (k < -1)
3617					/* "-2 - errored_index" returned */
3618					xerr_idx = -(2+k);
3619				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3620				goto reject;
3621			}
3622		}
3623
3624		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3625			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3626			goto reject;
3627		}
3628
3629		xfrm_pols_put(pols, npols);
3630		return 1;
3631	}
3632	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3633
3634reject:
3635	xfrm_secpath_reject(xerr_idx, skb, &fl);
3636reject_error:
3637	xfrm_pols_put(pols, npols);
3638	return 0;
3639}
3640EXPORT_SYMBOL(__xfrm_policy_check);
3641
3642int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3643{
3644	struct net *net = dev_net(skb->dev);
3645	struct flowi fl;
3646	struct dst_entry *dst;
3647	int res = 1;
3648
3649	if (xfrm_decode_session(skb, &fl, family) < 0) {
3650		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3651		return 0;
3652	}
3653
3654	skb_dst_force(skb);
3655	if (!skb_dst(skb)) {
3656		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3657		return 0;
3658	}
3659
3660	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3661	if (IS_ERR(dst)) {
3662		res = 0;
3663		dst = NULL;
3664	}
3665	skb_dst_set(skb, dst);
3666	return res;
3667}
3668EXPORT_SYMBOL(__xfrm_route_forward);
3669
3670/* Optimize later using cookies and generation ids. */
3671
3672static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3673{
3674	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3675	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3676	 * get validated by dst_ops->check on every use.  We do this
3677	 * because when a normal route referenced by an XFRM dst is
3678	 * obsoleted we do not go looking around for all parent
3679	 * referencing XFRM dsts so that we can invalidate them.  It
3680	 * is just too much work.  Instead we make the checks here on
3681	 * every use.  For example:
3682	 *
3683	 *	XFRM dst A --> IPv4 dst X
3684	 *
3685	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3686	 * in this example).  If X is marked obsolete, "A" will not
3687	 * notice.  That's what we are validating here via the
3688	 * stale_bundle() check.
3689	 *
3690	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3691	 * be marked on it.
3692	 * This will force stale_bundle() to fail on any xdst bundle with
3693	 * this dst linked in it.
3694	 */
3695	if (dst->obsolete < 0 && !stale_bundle(dst))
3696		return dst;
3697
3698	return NULL;
3699}
3700
3701static int stale_bundle(struct dst_entry *dst)
3702{
3703	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3704}
3705
3706void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3707{
3708	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3709		dst->dev = dev_net(dev)->loopback_dev;
3710		dev_hold(dst->dev);
3711		dev_put(dev);
3712	}
3713}
3714EXPORT_SYMBOL(xfrm_dst_ifdown);
3715
3716static void xfrm_link_failure(struct sk_buff *skb)
3717{
3718	/* Impossible. Such dst must be popped before reaches point of failure. */
3719}
3720
3721static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3722{
3723	if (dst) {
3724		if (dst->obsolete) {
3725			dst_release(dst);
3726			dst = NULL;
3727		}
3728	}
3729	return dst;
3730}
3731
3732static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
 
 
 
 
 
 
3733{
3734	while (nr--) {
3735		struct xfrm_dst *xdst = bundle[nr];
 
 
 
 
 
3736		u32 pmtu, route_mtu_cached;
3737		struct dst_entry *dst;
3738
3739		dst = &xdst->u.dst;
3740		pmtu = dst_mtu(xfrm_dst_child(dst));
3741		xdst->child_mtu_cached = pmtu;
3742
3743		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3744
3745		route_mtu_cached = dst_mtu(xdst->route);
3746		xdst->route_mtu_cached = route_mtu_cached;
3747
3748		if (pmtu > route_mtu_cached)
3749			pmtu = route_mtu_cached;
3750
3751		dst_metric_set(dst, RTAX_MTU, pmtu);
3752	}
3753}
3754
3755/* Check that the bundle accepts the flow and its components are
3756 * still valid.
3757 */
3758
3759static int xfrm_bundle_ok(struct xfrm_dst *first)
3760{
3761	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3762	struct dst_entry *dst = &first->u.dst;
3763	struct xfrm_dst *xdst;
3764	int start_from, nr;
3765	u32 mtu;
3766
3767	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3768	    (dst->dev && !netif_running(dst->dev)))
3769		return 0;
3770
3771	if (dst->flags & DST_XFRM_QUEUE)
3772		return 1;
3773
3774	start_from = nr = 0;
 
3775	do {
3776		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3777
3778		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3779			return 0;
3780		if (xdst->xfrm_genid != dst->xfrm->genid)
3781			return 0;
3782		if (xdst->num_pols > 0 &&
3783		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3784			return 0;
3785
3786		bundle[nr++] = xdst;
3787
3788		mtu = dst_mtu(xfrm_dst_child(dst));
3789		if (xdst->child_mtu_cached != mtu) {
3790			start_from = nr;
3791			xdst->child_mtu_cached = mtu;
3792		}
3793
3794		if (!dst_check(xdst->route, xdst->route_cookie))
3795			return 0;
3796		mtu = dst_mtu(xdst->route);
3797		if (xdst->route_mtu_cached != mtu) {
3798			start_from = nr;
3799			xdst->route_mtu_cached = mtu;
3800		}
3801
3802		dst = xfrm_dst_child(dst);
3803	} while (dst->xfrm);
3804
3805	if (likely(!start_from))
3806		return 1;
3807
3808	xdst = bundle[start_from - 1];
3809	mtu = xdst->child_mtu_cached;
3810	while (start_from--) {
3811		dst = &xdst->u.dst;
3812
3813		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3814		if (mtu > xdst->route_mtu_cached)
3815			mtu = xdst->route_mtu_cached;
3816		dst_metric_set(dst, RTAX_MTU, mtu);
3817		if (!start_from)
 
3818			break;
3819
3820		xdst = bundle[start_from - 1];
3821		xdst->child_mtu_cached = mtu;
3822	}
3823
3824	return 1;
3825}
3826
3827static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3828{
3829	return dst_metric_advmss(xfrm_dst_path(dst));
3830}
3831
3832static unsigned int xfrm_mtu(const struct dst_entry *dst)
3833{
3834	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3835
3836	return mtu ? : dst_mtu(xfrm_dst_path(dst));
3837}
3838
3839static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3840					const void *daddr)
3841{
3842	while (dst->xfrm) {
3843		const struct xfrm_state *xfrm = dst->xfrm;
3844
3845		dst = xfrm_dst_child(dst);
3846
3847		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3848			continue;
3849		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3850			daddr = xfrm->coaddr;
3851		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3852			daddr = &xfrm->id.daddr;
3853	}
3854	return daddr;
3855}
3856
3857static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3858					   struct sk_buff *skb,
3859					   const void *daddr)
3860{
3861	const struct dst_entry *path = xfrm_dst_path(dst);
3862
3863	if (!skb)
3864		daddr = xfrm_get_dst_nexthop(dst, daddr);
3865	return path->ops->neigh_lookup(path, skb, daddr);
3866}
3867
3868static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3869{
3870	const struct dst_entry *path = xfrm_dst_path(dst);
3871
3872	daddr = xfrm_get_dst_nexthop(dst, daddr);
3873	path->ops->confirm_neigh(path, daddr);
3874}
3875
3876int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3877{
 
3878	int err = 0;
3879
3880	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
 
3881		return -EAFNOSUPPORT;
3882
3883	spin_lock(&xfrm_policy_afinfo_lock);
3884	if (unlikely(xfrm_policy_afinfo[family] != NULL))
3885		err = -EEXIST;
3886	else {
3887		struct dst_ops *dst_ops = afinfo->dst_ops;
3888		if (likely(dst_ops->kmem_cachep == NULL))
3889			dst_ops->kmem_cachep = xfrm_dst_cache;
3890		if (likely(dst_ops->check == NULL))
3891			dst_ops->check = xfrm_dst_check;
3892		if (likely(dst_ops->default_advmss == NULL))
3893			dst_ops->default_advmss = xfrm_default_advmss;
3894		if (likely(dst_ops->mtu == NULL))
3895			dst_ops->mtu = xfrm_mtu;
3896		if (likely(dst_ops->negative_advice == NULL))
3897			dst_ops->negative_advice = xfrm_negative_advice;
3898		if (likely(dst_ops->link_failure == NULL))
3899			dst_ops->link_failure = xfrm_link_failure;
3900		if (likely(dst_ops->neigh_lookup == NULL))
3901			dst_ops->neigh_lookup = xfrm_neigh_lookup;
3902		if (likely(!dst_ops->confirm_neigh))
3903			dst_ops->confirm_neigh = xfrm_confirm_neigh;
3904		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3905	}
3906	spin_unlock(&xfrm_policy_afinfo_lock);
3907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3908	return err;
3909}
3910EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3911
3912void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3913{
3914	struct dst_ops *dst_ops = afinfo->dst_ops;
3915	int i;
3916
3917	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3918		if (xfrm_policy_afinfo[i] != afinfo)
3919			continue;
3920		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3921		break;
 
 
 
 
3922	}
 
 
 
3923
3924	synchronize_rcu();
3925
3926	dst_ops->kmem_cachep = NULL;
3927	dst_ops->check = NULL;
3928	dst_ops->negative_advice = NULL;
3929	dst_ops->link_failure = NULL;
 
 
 
3930}
3931EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3932
3933void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3934{
3935	spin_lock(&xfrm_if_cb_lock);
3936	rcu_assign_pointer(xfrm_if_cb, ifcb);
3937	spin_unlock(&xfrm_if_cb_lock);
 
 
 
 
 
 
 
 
 
3938}
3939EXPORT_SYMBOL(xfrm_if_register_cb);
3940
3941void xfrm_if_unregister_cb(void)
3942{
3943	RCU_INIT_POINTER(xfrm_if_cb, NULL);
3944	synchronize_rcu();
 
 
 
 
 
3945}
3946EXPORT_SYMBOL(xfrm_if_unregister_cb);
 
 
 
3947
3948#ifdef CONFIG_XFRM_STATISTICS
3949static int __net_init xfrm_statistics_init(struct net *net)
3950{
3951	int rv;
3952	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3953	if (!net->mib.xfrm_statistics)
 
 
3954		return -ENOMEM;
3955	rv = xfrm_proc_init(net);
3956	if (rv < 0)
3957		free_percpu(net->mib.xfrm_statistics);
3958	return rv;
3959}
3960
3961static void xfrm_statistics_fini(struct net *net)
3962{
3963	xfrm_proc_fini(net);
3964	free_percpu(net->mib.xfrm_statistics);
3965}
3966#else
3967static int __net_init xfrm_statistics_init(struct net *net)
3968{
3969	return 0;
3970}
3971
3972static void xfrm_statistics_fini(struct net *net)
3973{
3974}
3975#endif
3976
3977static int __net_init xfrm_policy_init(struct net *net)
3978{
3979	unsigned int hmask, sz;
3980	int dir, err;
3981
3982	if (net_eq(net, &init_net)) {
3983		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
3984					   sizeof(struct xfrm_dst),
3985					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3986					   NULL);
3987		err = rhashtable_init(&xfrm_policy_inexact_table,
3988				      &xfrm_pol_inexact_params);
3989		BUG_ON(err);
3990	}
3991
3992	hmask = 8 - 1;
3993	sz = (hmask+1) * sizeof(struct hlist_head);
3994
3995	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
3996	if (!net->xfrm.policy_byidx)
3997		goto out_byidx;
3998	net->xfrm.policy_idx_hmask = hmask;
3999
4000	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4001		struct xfrm_policy_hash *htab;
4002
4003		net->xfrm.policy_count[dir] = 0;
4004		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4005		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4006
4007		htab = &net->xfrm.policy_bydst[dir];
4008		htab->table = xfrm_hash_alloc(sz);
4009		if (!htab->table)
4010			goto out_bydst;
4011		htab->hmask = hmask;
4012		htab->dbits4 = 32;
4013		htab->sbits4 = 32;
4014		htab->dbits6 = 128;
4015		htab->sbits6 = 128;
4016	}
4017	net->xfrm.policy_hthresh.lbits4 = 32;
4018	net->xfrm.policy_hthresh.rbits4 = 32;
4019	net->xfrm.policy_hthresh.lbits6 = 128;
4020	net->xfrm.policy_hthresh.rbits6 = 128;
4021
4022	seqlock_init(&net->xfrm.policy_hthresh.lock);
4023
4024	INIT_LIST_HEAD(&net->xfrm.policy_all);
4025	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4026	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4027	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
 
4028	return 0;
4029
4030out_bydst:
4031	for (dir--; dir >= 0; dir--) {
4032		struct xfrm_policy_hash *htab;
4033
4034		htab = &net->xfrm.policy_bydst[dir];
4035		xfrm_hash_free(htab->table, sz);
4036	}
4037	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4038out_byidx:
4039	return -ENOMEM;
4040}
4041
4042static void xfrm_policy_fini(struct net *net)
4043{
4044	struct xfrm_pol_inexact_bin *b, *t;
4045	unsigned int sz;
4046	int dir;
4047
4048	flush_work(&net->xfrm.policy_hash_work);
4049#ifdef CONFIG_XFRM_SUB_POLICY
4050	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
 
 
 
4051#endif
4052	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
 
 
 
4053
4054	WARN_ON(!list_empty(&net->xfrm.policy_all));
4055
4056	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4057		struct xfrm_policy_hash *htab;
4058
4059		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4060
4061		htab = &net->xfrm.policy_bydst[dir];
4062		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4063		WARN_ON(!hlist_empty(htab->table));
4064		xfrm_hash_free(htab->table, sz);
4065	}
4066
4067	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4068	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4069	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4070
4071	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4072	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4073		__xfrm_policy_inexact_prune_bin(b, true);
4074	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4075}
4076
4077static int __net_init xfrm_net_init(struct net *net)
4078{
4079	int rv;
4080
4081	/* Initialize the per-net locks here */
4082	spin_lock_init(&net->xfrm.xfrm_state_lock);
4083	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4084	seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4085	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4086
4087	rv = xfrm_statistics_init(net);
4088	if (rv < 0)
4089		goto out_statistics;
4090	rv = xfrm_state_init(net);
4091	if (rv < 0)
4092		goto out_state;
4093	rv = xfrm_policy_init(net);
4094	if (rv < 0)
4095		goto out_policy;
 
4096	rv = xfrm_sysctl_init(net);
4097	if (rv < 0)
4098		goto out_sysctl;
 
 
 
 
 
 
 
 
4099
4100	return 0;
4101
 
 
4102out_sysctl:
4103	xfrm_policy_fini(net);
4104out_policy:
4105	xfrm_state_fini(net);
4106out_state:
4107	xfrm_statistics_fini(net);
4108out_statistics:
4109	return rv;
4110}
4111
4112static void __net_exit xfrm_net_exit(struct net *net)
4113{
 
4114	xfrm_sysctl_fini(net);
4115	xfrm_policy_fini(net);
4116	xfrm_state_fini(net);
4117	xfrm_statistics_fini(net);
4118}
4119
4120static struct pernet_operations __net_initdata xfrm_net_ops = {
4121	.init = xfrm_net_init,
4122	.exit = xfrm_net_exit,
4123};
4124
4125void __init xfrm_init(void)
4126{
4127	register_pernet_subsys(&xfrm_net_ops);
4128	xfrm_dev_init();
4129	xfrm_input_init();
4130
4131#ifdef CONFIG_XFRM_ESPINTCP
4132	espintcp_init();
4133#endif
4134}
4135
4136#ifdef CONFIG_AUDITSYSCALL
4137static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4138					 struct audit_buffer *audit_buf)
4139{
4140	struct xfrm_sec_ctx *ctx = xp->security;
4141	struct xfrm_selector *sel = &xp->selector;
4142
4143	if (ctx)
4144		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4145				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4146
4147	switch (sel->family) {
4148	case AF_INET:
4149		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4150		if (sel->prefixlen_s != 32)
4151			audit_log_format(audit_buf, " src_prefixlen=%d",
4152					 sel->prefixlen_s);
4153		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4154		if (sel->prefixlen_d != 32)
4155			audit_log_format(audit_buf, " dst_prefixlen=%d",
4156					 sel->prefixlen_d);
4157		break;
4158	case AF_INET6:
4159		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4160		if (sel->prefixlen_s != 128)
4161			audit_log_format(audit_buf, " src_prefixlen=%d",
4162					 sel->prefixlen_s);
4163		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4164		if (sel->prefixlen_d != 128)
4165			audit_log_format(audit_buf, " dst_prefixlen=%d",
4166					 sel->prefixlen_d);
4167		break;
4168	}
4169}
4170
4171void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
 
4172{
4173	struct audit_buffer *audit_buf;
4174
4175	audit_buf = xfrm_audit_start("SPD-add");
4176	if (audit_buf == NULL)
4177		return;
4178	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4179	audit_log_format(audit_buf, " res=%u", result);
4180	xfrm_audit_common_policyinfo(xp, audit_buf);
4181	audit_log_end(audit_buf);
4182}
4183EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4184
4185void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4186			      bool task_valid)
4187{
4188	struct audit_buffer *audit_buf;
4189
4190	audit_buf = xfrm_audit_start("SPD-delete");
4191	if (audit_buf == NULL)
4192		return;
4193	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4194	audit_log_format(audit_buf, " res=%u", result);
4195	xfrm_audit_common_policyinfo(xp, audit_buf);
4196	audit_log_end(audit_buf);
4197}
4198EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4199#endif
4200
4201#ifdef CONFIG_XFRM_MIGRATE
4202static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4203					const struct xfrm_selector *sel_tgt)
4204{
4205	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4206		if (sel_tgt->family == sel_cmp->family &&
4207		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4208				    sel_cmp->family) &&
4209		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4210				    sel_cmp->family) &&
4211		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4212		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4213			return true;
4214		}
4215	} else {
4216		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4217			return true;
4218		}
4219	}
4220	return false;
4221}
4222
4223static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4224						    u8 dir, u8 type, struct net *net)
4225{
4226	struct xfrm_policy *pol, *ret = NULL;
4227	struct hlist_head *chain;
4228	u32 priority = ~0U;
4229
4230	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4231	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4232	hlist_for_each_entry(pol, chain, bydst) {
4233		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4234		    pol->type == type) {
4235			ret = pol;
4236			priority = ret->priority;
4237			break;
4238		}
4239	}
4240	chain = &net->xfrm.policy_inexact[dir];
4241	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4242		if ((pol->priority >= priority) && ret)
4243			break;
4244
4245		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4246		    pol->type == type) {
 
4247			ret = pol;
4248			break;
4249		}
4250	}
4251
4252	xfrm_pol_hold(ret);
 
4253
4254	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4255
4256	return ret;
4257}
4258
4259static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4260{
4261	int match = 0;
4262
4263	if (t->mode == m->mode && t->id.proto == m->proto &&
4264	    (m->reqid == 0 || t->reqid == m->reqid)) {
4265		switch (t->mode) {
4266		case XFRM_MODE_TUNNEL:
4267		case XFRM_MODE_BEET:
4268			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4269					    m->old_family) &&
4270			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4271					    m->old_family)) {
4272				match = 1;
4273			}
4274			break;
4275		case XFRM_MODE_TRANSPORT:
4276			/* in case of transport mode, template does not store
4277			   any IP addresses, hence we just compare mode and
4278			   protocol */
4279			match = 1;
4280			break;
4281		default:
4282			break;
4283		}
4284	}
4285	return match;
4286}
4287
4288/* update endpoint address(es) of template(s) */
4289static int xfrm_policy_migrate(struct xfrm_policy *pol,
4290			       struct xfrm_migrate *m, int num_migrate)
4291{
4292	struct xfrm_migrate *mp;
4293	int i, j, n = 0;
4294
4295	write_lock_bh(&pol->lock);
4296	if (unlikely(pol->walk.dead)) {
4297		/* target policy has been deleted */
4298		write_unlock_bh(&pol->lock);
4299		return -ENOENT;
4300	}
4301
4302	for (i = 0; i < pol->xfrm_nr; i++) {
4303		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4304			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4305				continue;
4306			n++;
4307			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4308			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4309				continue;
4310			/* update endpoints */
4311			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4312			       sizeof(pol->xfrm_vec[i].id.daddr));
4313			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4314			       sizeof(pol->xfrm_vec[i].saddr));
4315			pol->xfrm_vec[i].encap_family = mp->new_family;
4316			/* flush bundles */
4317			atomic_inc(&pol->genid);
4318		}
4319	}
4320
4321	write_unlock_bh(&pol->lock);
4322
4323	if (!n)
4324		return -ENODATA;
4325
4326	return 0;
4327}
4328
4329static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4330{
4331	int i, j;
4332
4333	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4334		return -EINVAL;
4335
4336	for (i = 0; i < num_migrate; i++) {
 
 
 
 
 
4337		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4338		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4339			return -EINVAL;
4340
4341		/* check if there is any duplicated entry */
4342		for (j = i + 1; j < num_migrate; j++) {
4343			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4344				    sizeof(m[i].old_daddr)) &&
4345			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4346				    sizeof(m[i].old_saddr)) &&
4347			    m[i].proto == m[j].proto &&
4348			    m[i].mode == m[j].mode &&
4349			    m[i].reqid == m[j].reqid &&
4350			    m[i].old_family == m[j].old_family)
4351				return -EINVAL;
4352		}
4353	}
4354
4355	return 0;
4356}
4357
4358int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4359		 struct xfrm_migrate *m, int num_migrate,
4360		 struct xfrm_kmaddress *k, struct net *net,
4361		 struct xfrm_encap_tmpl *encap)
4362{
4363	int i, err, nx_cur = 0, nx_new = 0;
4364	struct xfrm_policy *pol = NULL;
4365	struct xfrm_state *x, *xc;
4366	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4367	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4368	struct xfrm_migrate *mp;
4369
4370	/* Stage 0 - sanity checks */
4371	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4372		goto out;
4373
4374	if (dir >= XFRM_POLICY_MAX) {
4375		err = -EINVAL;
4376		goto out;
4377	}
4378
4379	/* Stage 1 - find policy */
4380	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4381		err = -ENOENT;
4382		goto out;
4383	}
4384
4385	/* Stage 2 - find and update state(s) */
4386	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4387		if ((x = xfrm_migrate_state_find(mp, net))) {
4388			x_cur[nx_cur] = x;
4389			nx_cur++;
4390			xc = xfrm_state_migrate(x, mp, encap);
4391			if (xc) {
4392				x_new[nx_new] = xc;
4393				nx_new++;
4394			} else {
4395				err = -ENODATA;
4396				goto restore_state;
4397			}
4398		}
4399	}
4400
4401	/* Stage 3 - update policy */
4402	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4403		goto restore_state;
4404
4405	/* Stage 4 - delete old state(s) */
4406	if (nx_cur) {
4407		xfrm_states_put(x_cur, nx_cur);
4408		xfrm_states_delete(x_cur, nx_cur);
4409	}
4410
4411	/* Stage 5 - announce */
4412	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4413
4414	xfrm_pol_put(pol);
4415
4416	return 0;
4417out:
4418	return err;
4419
4420restore_state:
4421	if (pol)
4422		xfrm_pol_put(pol);
4423	if (nx_cur)
4424		xfrm_states_put(x_cur, nx_cur);
4425	if (nx_new)
4426		xfrm_states_delete(x_new, nx_new);
4427
4428	return err;
4429}
4430EXPORT_SYMBOL(xfrm_migrate);
4431#endif
v3.15
 
   1/*
   2 * xfrm_policy.c
   3 *
   4 * Changes:
   5 *	Mitsuru KANDA @USAGI
   6 * 	Kazunori MIYAZAWA @USAGI
   7 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
   8 * 		IPv6 support
   9 * 	Kazunori MIYAZAWA @USAGI
  10 * 	YOSHIFUJI Hideaki
  11 * 		Split up af-specific portion
  12 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
  13 *
  14 */
  15
  16#include <linux/err.h>
  17#include <linux/slab.h>
  18#include <linux/kmod.h>
  19#include <linux/list.h>
  20#include <linux/spinlock.h>
  21#include <linux/workqueue.h>
  22#include <linux/notifier.h>
  23#include <linux/netdevice.h>
  24#include <linux/netfilter.h>
  25#include <linux/module.h>
  26#include <linux/cache.h>
 
  27#include <linux/audit.h>
 
 
  28#include <net/dst.h>
  29#include <net/flow.h>
  30#include <net/xfrm.h>
  31#include <net/ip.h>
 
 
 
  32#ifdef CONFIG_XFRM_STATISTICS
  33#include <net/snmp.h>
  34#endif
 
 
 
  35
  36#include "xfrm_hash.h"
  37
  38#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  39#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  40#define XFRM_MAX_QUEUE_LEN	100
  41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  43static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
  44						__read_mostly;
  45
  46static struct kmem_cache *xfrm_dst_cache __read_mostly;
  47
  48static void xfrm_init_pmtu(struct dst_entry *dst);
 
 
 
  49static int stale_bundle(struct dst_entry *dst);
  50static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  51static void xfrm_policy_queue_process(unsigned long arg);
  52
 
  53static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  54						int dir);
  55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56static inline bool
  57__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  58{
  59	const struct flowi4 *fl4 = &fl->u.ip4;
  60
  61	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  62		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  63		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  64		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  65		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
  66		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  67}
  68
  69static inline bool
  70__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  71{
  72	const struct flowi6 *fl6 = &fl->u.ip6;
  73
  74	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  75		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  76		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  77		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  78		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
  79		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  80}
  81
  82bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  83			 unsigned short family)
  84{
  85	switch (family) {
  86	case AF_INET:
  87		return __xfrm4_selector_match(sel, fl);
  88	case AF_INET6:
  89		return __xfrm6_selector_match(sel, fl);
  90	}
  91	return false;
  92}
  93
  94static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  95{
  96	struct xfrm_policy_afinfo *afinfo;
  97
  98	if (unlikely(family >= NPROTO))
  99		return NULL;
 100	rcu_read_lock();
 101	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
 102	if (unlikely(!afinfo))
 103		rcu_read_unlock();
 104	return afinfo;
 105}
 106
 107static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
 
 108{
 109	rcu_read_unlock();
 110}
 111
 112static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
 113						  const xfrm_address_t *saddr,
 114						  const xfrm_address_t *daddr,
 115						  int family)
 116{
 117	struct xfrm_policy_afinfo *afinfo;
 118	struct dst_entry *dst;
 119
 120	afinfo = xfrm_policy_get_afinfo(family);
 121	if (unlikely(afinfo == NULL))
 122		return ERR_PTR(-EAFNOSUPPORT);
 123
 124	dst = afinfo->dst_lookup(net, tos, saddr, daddr);
 125
 126	xfrm_policy_put_afinfo(afinfo);
 127
 128	return dst;
 129}
 
 130
 131static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
 
 132						xfrm_address_t *prev_saddr,
 133						xfrm_address_t *prev_daddr,
 134						int family)
 135{
 136	struct net *net = xs_net(x);
 137	xfrm_address_t *saddr = &x->props.saddr;
 138	xfrm_address_t *daddr = &x->id.daddr;
 139	struct dst_entry *dst;
 140
 141	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
 142		saddr = x->coaddr;
 143		daddr = prev_daddr;
 144	}
 145	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
 146		saddr = prev_saddr;
 147		daddr = x->coaddr;
 148	}
 149
 150	dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
 151
 152	if (!IS_ERR(dst)) {
 153		if (prev_saddr != saddr)
 154			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
 155		if (prev_daddr != daddr)
 156			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
 157	}
 158
 159	return dst;
 160}
 161
 162static inline unsigned long make_jiffies(long secs)
 163{
 164	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
 165		return MAX_SCHEDULE_TIMEOUT-1;
 166	else
 167		return secs*HZ;
 168}
 169
 170static void xfrm_policy_timer(unsigned long data)
 171{
 172	struct xfrm_policy *xp = (struct xfrm_policy *)data;
 173	unsigned long now = get_seconds();
 174	long next = LONG_MAX;
 175	int warn = 0;
 176	int dir;
 177
 178	read_lock(&xp->lock);
 179
 180	if (unlikely(xp->walk.dead))
 181		goto out;
 182
 183	dir = xfrm_policy_id2dir(xp->index);
 184
 185	if (xp->lft.hard_add_expires_seconds) {
 186		long tmo = xp->lft.hard_add_expires_seconds +
 187			xp->curlft.add_time - now;
 188		if (tmo <= 0)
 189			goto expired;
 190		if (tmo < next)
 191			next = tmo;
 192	}
 193	if (xp->lft.hard_use_expires_seconds) {
 194		long tmo = xp->lft.hard_use_expires_seconds +
 195			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 196		if (tmo <= 0)
 197			goto expired;
 198		if (tmo < next)
 199			next = tmo;
 200	}
 201	if (xp->lft.soft_add_expires_seconds) {
 202		long tmo = xp->lft.soft_add_expires_seconds +
 203			xp->curlft.add_time - now;
 204		if (tmo <= 0) {
 205			warn = 1;
 206			tmo = XFRM_KM_TIMEOUT;
 207		}
 208		if (tmo < next)
 209			next = tmo;
 210	}
 211	if (xp->lft.soft_use_expires_seconds) {
 212		long tmo = xp->lft.soft_use_expires_seconds +
 213			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 214		if (tmo <= 0) {
 215			warn = 1;
 216			tmo = XFRM_KM_TIMEOUT;
 217		}
 218		if (tmo < next)
 219			next = tmo;
 220	}
 221
 222	if (warn)
 223		km_policy_expired(xp, dir, 0, 0);
 224	if (next != LONG_MAX &&
 225	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
 226		xfrm_pol_hold(xp);
 227
 228out:
 229	read_unlock(&xp->lock);
 230	xfrm_pol_put(xp);
 231	return;
 232
 233expired:
 234	read_unlock(&xp->lock);
 235	if (!xfrm_policy_delete(xp, dir))
 236		km_policy_expired(xp, dir, 1, 0);
 237	xfrm_pol_put(xp);
 238}
 239
 240static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
 241{
 242	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
 243
 244	if (unlikely(pol->walk.dead))
 245		flo = NULL;
 246	else
 247		xfrm_pol_hold(pol);
 248
 249	return flo;
 250}
 251
 252static int xfrm_policy_flo_check(struct flow_cache_object *flo)
 253{
 254	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
 255
 256	return !pol->walk.dead;
 257}
 258
 259static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
 260{
 261	xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
 262}
 263
 264static const struct flow_cache_ops xfrm_policy_fc_ops = {
 265	.get = xfrm_policy_flo_get,
 266	.check = xfrm_policy_flo_check,
 267	.delete = xfrm_policy_flo_delete,
 268};
 269
 270/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 271 * SPD calls.
 272 */
 273
 274struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
 275{
 276	struct xfrm_policy *policy;
 277
 278	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
 279
 280	if (policy) {
 281		write_pnet(&policy->xp_net, net);
 282		INIT_LIST_HEAD(&policy->walk.all);
 
 283		INIT_HLIST_NODE(&policy->bydst);
 284		INIT_HLIST_NODE(&policy->byidx);
 285		rwlock_init(&policy->lock);
 286		atomic_set(&policy->refcnt, 1);
 287		skb_queue_head_init(&policy->polq.hold_queue);
 288		setup_timer(&policy->timer, xfrm_policy_timer,
 289				(unsigned long)policy);
 290		setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
 291			    (unsigned long)policy);
 292		policy->flo.ops = &xfrm_policy_fc_ops;
 293	}
 294	return policy;
 295}
 296EXPORT_SYMBOL(xfrm_policy_alloc);
 297
 
 
 
 
 
 
 
 
 298/* Destroy xfrm_policy: descendant resources must be released to this moment. */
 299
 300void xfrm_policy_destroy(struct xfrm_policy *policy)
 301{
 302	BUG_ON(!policy->walk.dead);
 303
 304	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
 305		BUG();
 306
 307	security_xfrm_policy_free(policy->security);
 308	kfree(policy);
 309}
 310EXPORT_SYMBOL(xfrm_policy_destroy);
 311
 312static void xfrm_queue_purge(struct sk_buff_head *list)
 313{
 314	struct sk_buff *skb;
 315
 316	while ((skb = skb_dequeue(list)) != NULL)
 317		kfree_skb(skb);
 318}
 319
 320/* Rule must be locked. Release descentant resources, announce
 321 * entry dead. The rule must be unlinked from lists to the moment.
 322 */
 323
 324static void xfrm_policy_kill(struct xfrm_policy *policy)
 325{
 
 326	policy->walk.dead = 1;
 
 327
 328	atomic_inc(&policy->genid);
 329
 330	if (del_timer(&policy->polq.hold_timer))
 331		xfrm_pol_put(policy);
 332	xfrm_queue_purge(&policy->polq.hold_queue);
 333
 334	if (del_timer(&policy->timer))
 335		xfrm_pol_put(policy);
 336
 337	xfrm_pol_put(policy);
 338}
 339
 340static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
 341
 342static inline unsigned int idx_hash(struct net *net, u32 index)
 343{
 344	return __idx_hash(index, net->xfrm.policy_idx_hmask);
 345}
 346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347static struct hlist_head *policy_hash_bysel(struct net *net,
 348					    const struct xfrm_selector *sel,
 349					    unsigned short family, int dir)
 350{
 351	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 352	unsigned int hash = __sel_hash(sel, family, hmask);
 
 
 
 
 
 353
 354	return (hash == hmask + 1 ?
 355		&net->xfrm.policy_inexact[dir] :
 356		net->xfrm.policy_bydst[dir].table + hash);
 
 
 357}
 358
 359static struct hlist_head *policy_hash_direct(struct net *net,
 360					     const xfrm_address_t *daddr,
 361					     const xfrm_address_t *saddr,
 362					     unsigned short family, int dir)
 363{
 364	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 365	unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
 
 
 366
 367	return net->xfrm.policy_bydst[dir].table + hash;
 
 
 
 
 368}
 369
 370static void xfrm_dst_hash_transfer(struct hlist_head *list,
 
 371				   struct hlist_head *ndsttable,
 372				   unsigned int nhashmask)
 
 373{
 374	struct hlist_node *tmp, *entry0 = NULL;
 375	struct xfrm_policy *pol;
 376	unsigned int h0 = 0;
 
 
 377
 378redo:
 379	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
 380		unsigned int h;
 381
 
 382		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
 383				pol->family, nhashmask);
 384		if (!entry0) {
 385			hlist_del(&pol->bydst);
 386			hlist_add_head(&pol->bydst, ndsttable+h);
 387			h0 = h;
 388		} else {
 389			if (h != h0)
 390				continue;
 391			hlist_del(&pol->bydst);
 392			hlist_add_after(entry0, &pol->bydst);
 393		}
 394		entry0 = &pol->bydst;
 395	}
 396	if (!hlist_empty(list)) {
 397		entry0 = NULL;
 398		goto redo;
 399	}
 400}
 401
 402static void xfrm_idx_hash_transfer(struct hlist_head *list,
 403				   struct hlist_head *nidxtable,
 404				   unsigned int nhashmask)
 405{
 406	struct hlist_node *tmp;
 407	struct xfrm_policy *pol;
 408
 409	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
 410		unsigned int h;
 411
 412		h = __idx_hash(pol->index, nhashmask);
 413		hlist_add_head(&pol->byidx, nidxtable+h);
 414	}
 415}
 416
 417static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
 418{
 419	return ((old_hmask + 1) << 1) - 1;
 420}
 421
 422static void xfrm_bydst_resize(struct net *net, int dir)
 423{
 424	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 425	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 426	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 427	struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
 428	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
 
 429	int i;
 430
 431	if (!ndst)
 432		return;
 433
 434	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 
 435
 436	for (i = hmask; i >= 0; i--)
 437		xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
 438
 439	net->xfrm.policy_bydst[dir].table = ndst;
 440	net->xfrm.policy_bydst[dir].hmask = nhashmask;
 441
 442	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 443
 444	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
 445}
 446
 447static void xfrm_byidx_resize(struct net *net, int total)
 448{
 449	unsigned int hmask = net->xfrm.policy_idx_hmask;
 450	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 451	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 452	struct hlist_head *oidx = net->xfrm.policy_byidx;
 453	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
 454	int i;
 455
 456	if (!nidx)
 457		return;
 458
 459	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 460
 461	for (i = hmask; i >= 0; i--)
 462		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
 463
 464	net->xfrm.policy_byidx = nidx;
 465	net->xfrm.policy_idx_hmask = nhashmask;
 466
 467	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 468
 469	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
 470}
 471
 472static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
 473{
 474	unsigned int cnt = net->xfrm.policy_count[dir];
 475	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 476
 477	if (total)
 478		*total += cnt;
 479
 480	if ((hmask + 1) < xfrm_policy_hashmax &&
 481	    cnt > hmask)
 482		return 1;
 483
 484	return 0;
 485}
 486
 487static inline int xfrm_byidx_should_resize(struct net *net, int total)
 488{
 489	unsigned int hmask = net->xfrm.policy_idx_hmask;
 490
 491	if ((hmask + 1) < xfrm_policy_hashmax &&
 492	    total > hmask)
 493		return 1;
 494
 495	return 0;
 496}
 497
 498void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
 499{
 500	read_lock_bh(&net->xfrm.xfrm_policy_lock);
 501	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
 502	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
 503	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
 504	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
 505	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
 506	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
 507	si->spdhcnt = net->xfrm.policy_idx_hmask;
 508	si->spdhmcnt = xfrm_policy_hashmax;
 509	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
 510}
 511EXPORT_SYMBOL(xfrm_spd_getinfo);
 512
 513static DEFINE_MUTEX(hash_resize_mutex);
 514static void xfrm_hash_resize(struct work_struct *work)
 515{
 516	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
 517	int dir, total;
 518
 519	mutex_lock(&hash_resize_mutex);
 520
 521	total = 0;
 522	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
 523		if (xfrm_bydst_should_resize(net, dir, &total))
 524			xfrm_bydst_resize(net, dir);
 525	}
 526	if (xfrm_byidx_should_resize(net, total))
 527		xfrm_byidx_resize(net, total);
 528
 529	mutex_unlock(&hash_resize_mutex);
 530}
 531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532/* Generate new index... KAME seems to generate them ordered by cost
 533 * of an absolute inpredictability of ordering of rules. This will not pass. */
 534static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
 535{
 536	static u32 idx_generator;
 537
 538	for (;;) {
 539		struct hlist_head *list;
 540		struct xfrm_policy *p;
 541		u32 idx;
 542		int found;
 543
 544		if (!index) {
 545			idx = (idx_generator | dir);
 546			idx_generator += 8;
 547		} else {
 548			idx = index;
 549			index = 0;
 550		}
 551
 552		if (idx == 0)
 553			idx = 8;
 554		list = net->xfrm.policy_byidx + idx_hash(net, idx);
 555		found = 0;
 556		hlist_for_each_entry(p, list, byidx) {
 557			if (p->index == idx) {
 558				found = 1;
 559				break;
 560			}
 561		}
 562		if (!found)
 563			return idx;
 564	}
 565}
 566
 567static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
 568{
 569	u32 *p1 = (u32 *) s1;
 570	u32 *p2 = (u32 *) s2;
 571	int len = sizeof(struct xfrm_selector) / sizeof(u32);
 572	int i;
 573
 574	for (i = 0; i < len; i++) {
 575		if (p1[i] != p2[i])
 576			return 1;
 577	}
 578
 579	return 0;
 580}
 581
 582static void xfrm_policy_requeue(struct xfrm_policy *old,
 583				struct xfrm_policy *new)
 584{
 585	struct xfrm_policy_queue *pq = &old->polq;
 586	struct sk_buff_head list;
 587
 
 
 
 588	__skb_queue_head_init(&list);
 589
 590	spin_lock_bh(&pq->hold_queue.lock);
 591	skb_queue_splice_init(&pq->hold_queue, &list);
 592	if (del_timer(&pq->hold_timer))
 593		xfrm_pol_put(old);
 594	spin_unlock_bh(&pq->hold_queue.lock);
 595
 596	if (skb_queue_empty(&list))
 597		return;
 598
 599	pq = &new->polq;
 600
 601	spin_lock_bh(&pq->hold_queue.lock);
 602	skb_queue_splice(&list, &pq->hold_queue);
 603	pq->timeout = XFRM_QUEUE_TMO_MIN;
 604	if (!mod_timer(&pq->hold_timer, jiffies))
 605		xfrm_pol_hold(new);
 606	spin_unlock_bh(&pq->hold_queue.lock);
 607}
 608
 609static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
 610				   struct xfrm_policy *pol)
 611{
 612	u32 mark = policy->mark.v & policy->mark.m;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 613
 614	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
 615		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 616
 617	if ((mark & pol->mark.m) == pol->mark.v &&
 618	    policy->priority == pol->priority)
 619		return true;
 
 620
 621	return false;
 
 
 
 622}
 623
 624int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
 
 
 625{
 626	struct net *net = xp_net(policy);
 627	struct xfrm_policy *pol;
 628	struct xfrm_policy *delpol;
 629	struct hlist_head *chain;
 630	struct hlist_node *newpos;
 631
 632	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 633	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
 634	delpol = NULL;
 635	newpos = NULL;
 636	hlist_for_each_entry(pol, chain, bydst) {
 637		if (pol->type == policy->type &&
 
 638		    !selector_cmp(&pol->selector, &policy->selector) &&
 639		    xfrm_policy_mark_match(policy, pol) &&
 640		    xfrm_sec_ctx_match(pol->security, policy->security) &&
 641		    !WARN_ON(delpol)) {
 642			if (excl) {
 643				write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 644				return -EEXIST;
 645			}
 646			delpol = pol;
 647			if (policy->priority > pol->priority)
 648				continue;
 649		} else if (policy->priority >= pol->priority) {
 650			newpos = &pol->bydst;
 651			continue;
 652		}
 653		if (delpol)
 654			break;
 655	}
 
 656	if (newpos)
 657		hlist_add_after(newpos, &policy->bydst);
 658	else
 659		hlist_add_head(&policy->bydst, chain);
 660	xfrm_pol_hold(policy);
 661	net->xfrm.policy_count[dir]++;
 662	atomic_inc(&net->xfrm.flow_cache_genid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 663
 664	/* After previous checking, family can either be AF_INET or AF_INET6 */
 665	if (policy->family == AF_INET)
 666		rt_genid_bump_ipv4(net);
 667	else
 668		rt_genid_bump_ipv6(net);
 669
 670	if (delpol) {
 671		xfrm_policy_requeue(delpol, policy);
 672		__xfrm_policy_unlink(delpol, dir);
 673	}
 674	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
 675	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
 676	policy->curlft.add_time = get_seconds();
 677	policy->curlft.use_time = 0;
 678	if (!mod_timer(&policy->timer, jiffies + HZ))
 679		xfrm_pol_hold(policy);
 680	list_add(&policy->walk.all, &net->xfrm.policy_all);
 681	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 682
 683	if (delpol)
 684		xfrm_policy_kill(delpol);
 685	else if (xfrm_bydst_should_resize(net, dir, NULL))
 686		schedule_work(&net->xfrm.policy_hash_work);
 687
 688	return 0;
 689}
 690EXPORT_SYMBOL(xfrm_policy_insert);
 691
 692struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
 693					  int dir, struct xfrm_selector *sel,
 694					  struct xfrm_sec_ctx *ctx, int delete,
 695					  int *err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 696{
 697	struct xfrm_policy *pol, *ret;
 
 698	struct hlist_head *chain;
 699
 700	*err = 0;
 701	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 702	chain = policy_hash_bysel(net, sel, sel->family, dir);
 703	ret = NULL;
 704	hlist_for_each_entry(pol, chain, bydst) {
 705		if (pol->type == type &&
 706		    (mark & pol->mark.m) == pol->mark.v &&
 707		    !selector_cmp(sel, &pol->selector) &&
 708		    xfrm_sec_ctx_match(ctx, pol->security)) {
 709			xfrm_pol_hold(pol);
 710			if (delete) {
 711				*err = security_xfrm_policy_delete(
 712								pol->security);
 713				if (*err) {
 714					write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 715					return pol;
 716				}
 717				__xfrm_policy_unlink(pol, dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718			}
 719			ret = pol;
 720			break;
 721		}
 
 722	}
 723	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 724
 725	if (ret && delete)
 726		xfrm_policy_kill(ret);
 
 
 727	return ret;
 728}
 729EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
 730
 731struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
 732				     int dir, u32 id, int delete, int *err)
 
 733{
 734	struct xfrm_policy *pol, *ret;
 735	struct hlist_head *chain;
 736
 737	*err = -ENOENT;
 738	if (xfrm_policy_id2dir(id) != dir)
 739		return NULL;
 740
 741	*err = 0;
 742	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 743	chain = net->xfrm.policy_byidx + idx_hash(net, id);
 744	ret = NULL;
 745	hlist_for_each_entry(pol, chain, byidx) {
 746		if (pol->type == type && pol->index == id &&
 747		    (mark & pol->mark.m) == pol->mark.v) {
 748			xfrm_pol_hold(pol);
 749			if (delete) {
 750				*err = security_xfrm_policy_delete(
 751								pol->security);
 752				if (*err) {
 753					write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 754					return pol;
 755				}
 756				__xfrm_policy_unlink(pol, dir);
 757			}
 758			ret = pol;
 759			break;
 760		}
 761	}
 762	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 763
 764	if (ret && delete)
 765		xfrm_policy_kill(ret);
 766	return ret;
 767}
 768EXPORT_SYMBOL(xfrm_policy_byid);
 769
 770#ifdef CONFIG_SECURITY_NETWORK_XFRM
 771static inline int
 772xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
 773{
 774	int dir, err = 0;
 
 775
 776	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 777		struct xfrm_policy *pol;
 778		int i;
 
 
 779
 780		hlist_for_each_entry(pol,
 781				     &net->xfrm.policy_inexact[dir], bydst) {
 782			if (pol->type != type)
 783				continue;
 784			err = security_xfrm_policy_delete(pol->security);
 785			if (err) {
 786				xfrm_audit_policy_delete(pol, 0,
 787							 audit_info->loginuid,
 788							 audit_info->sessionid,
 789							 audit_info->secid);
 790				return err;
 791			}
 792		}
 793		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
 794			hlist_for_each_entry(pol,
 795					     net->xfrm.policy_bydst[dir].table + i,
 796					     bydst) {
 797				if (pol->type != type)
 798					continue;
 799				err = security_xfrm_policy_delete(
 800								pol->security);
 801				if (err) {
 802					xfrm_audit_policy_delete(pol, 0,
 803							audit_info->loginuid,
 804							audit_info->sessionid,
 805							audit_info->secid);
 806					return err;
 807				}
 808			}
 809		}
 810	}
 811	return err;
 812}
 813#else
 814static inline int
 815xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
 816{
 817	return 0;
 818}
 819#endif
 820
 821int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
 822{
 823	int dir, err = 0, cnt = 0;
 
 824
 825	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 826
 827	err = xfrm_policy_flush_secctx_check(net, type, audit_info);
 828	if (err)
 829		goto out;
 830
 831	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 832		struct xfrm_policy *pol;
 833		int i;
 834
 835	again1:
 836		hlist_for_each_entry(pol,
 837				     &net->xfrm.policy_inexact[dir], bydst) {
 838			if (pol->type != type)
 839				continue;
 840			__xfrm_policy_unlink(pol, dir);
 841			write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 842			cnt++;
 843
 844			xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
 845						 audit_info->sessionid,
 846						 audit_info->secid);
 847
 848			xfrm_policy_kill(pol);
 849
 850			write_lock_bh(&net->xfrm.xfrm_policy_lock);
 851			goto again1;
 852		}
 853
 854		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
 855	again2:
 856			hlist_for_each_entry(pol,
 857					     net->xfrm.policy_bydst[dir].table + i,
 858					     bydst) {
 859				if (pol->type != type)
 860					continue;
 861				__xfrm_policy_unlink(pol, dir);
 862				write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 863				cnt++;
 864
 865				xfrm_audit_policy_delete(pol, 1,
 866							 audit_info->loginuid,
 867							 audit_info->sessionid,
 868							 audit_info->secid);
 869				xfrm_policy_kill(pol);
 870
 871				write_lock_bh(&net->xfrm.xfrm_policy_lock);
 872				goto again2;
 873			}
 874		}
 875
 
 
 
 
 
 
 
 876	}
 877	if (!cnt)
 
 
 878		err = -ESRCH;
 879out:
 880	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 881	return err;
 882}
 883EXPORT_SYMBOL(xfrm_policy_flush);
 884
 885int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
 886		     int (*func)(struct xfrm_policy *, int, int, void*),
 887		     void *data)
 888{
 889	struct xfrm_policy *pol;
 890	struct xfrm_policy_walk_entry *x;
 891	int error = 0;
 892
 893	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
 894	    walk->type != XFRM_POLICY_TYPE_ANY)
 895		return -EINVAL;
 896
 897	if (list_empty(&walk->walk.all) && walk->seq != 0)
 898		return 0;
 899
 900	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 901	if (list_empty(&walk->walk.all))
 902		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
 903	else
 904		x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
 
 
 905	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
 906		if (x->dead)
 907			continue;
 908		pol = container_of(x, struct xfrm_policy, walk);
 909		if (walk->type != XFRM_POLICY_TYPE_ANY &&
 910		    walk->type != pol->type)
 911			continue;
 912		error = func(pol, xfrm_policy_id2dir(pol->index),
 913			     walk->seq, data);
 914		if (error) {
 915			list_move_tail(&walk->walk.all, &x->all);
 916			goto out;
 917		}
 918		walk->seq++;
 919	}
 920	if (walk->seq == 0) {
 921		error = -ENOENT;
 922		goto out;
 923	}
 924	list_del_init(&walk->walk.all);
 925out:
 926	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 927	return error;
 928}
 929EXPORT_SYMBOL(xfrm_policy_walk);
 930
 931void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
 932{
 933	INIT_LIST_HEAD(&walk->walk.all);
 934	walk->walk.dead = 1;
 935	walk->type = type;
 936	walk->seq = 0;
 937}
 938EXPORT_SYMBOL(xfrm_policy_walk_init);
 939
 940void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
 941{
 942	if (list_empty(&walk->walk.all))
 943		return;
 944
 945	write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
 946	list_del(&walk->walk.all);
 947	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 948}
 949EXPORT_SYMBOL(xfrm_policy_walk_done);
 950
 951/*
 952 * Find policy to apply to this flow.
 953 *
 954 * Returns 0 if policy found, else an -errno.
 955 */
 956static int xfrm_policy_match(const struct xfrm_policy *pol,
 957			     const struct flowi *fl,
 958			     u8 type, u16 family, int dir)
 959{
 960	const struct xfrm_selector *sel = &pol->selector;
 961	int ret = -ESRCH;
 962	bool match;
 963
 964	if (pol->family != family ||
 
 965	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
 966	    pol->type != type)
 967		return ret;
 968
 969	match = xfrm_selector_match(sel, fl, family);
 970	if (match)
 971		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
 972						  dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973
 974	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975}
 976
 977static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
 978						     const struct flowi *fl,
 979						     u16 family, u8 dir)
 
 980{
 981	int err;
 
 
 982	struct xfrm_policy *pol, *ret;
 983	const xfrm_address_t *daddr, *saddr;
 984	struct hlist_head *chain;
 985	u32 priority = ~0U;
 
 986
 987	daddr = xfrm_flowi_daddr(fl, family);
 988	saddr = xfrm_flowi_saddr(fl, family);
 989	if (unlikely(!daddr || !saddr))
 990		return NULL;
 991
 992	read_lock_bh(&net->xfrm.xfrm_policy_lock);
 993	chain = policy_hash_direct(net, daddr, saddr, family, dir);
 
 
 
 
 
 994	ret = NULL;
 995	hlist_for_each_entry(pol, chain, bydst) {
 996		err = xfrm_policy_match(pol, fl, type, family, dir);
 997		if (err) {
 998			if (err == -ESRCH)
 999				continue;
1000			else {
1001				ret = ERR_PTR(err);
1002				goto fail;
1003			}
1004		} else {
1005			ret = pol;
1006			priority = ret->priority;
1007			break;
1008		}
1009	}
1010	chain = &net->xfrm.policy_inexact[dir];
1011	hlist_for_each_entry(pol, chain, bydst) {
1012		err = xfrm_policy_match(pol, fl, type, family, dir);
1013		if (err) {
1014			if (err == -ESRCH)
1015				continue;
1016			else {
1017				ret = ERR_PTR(err);
1018				goto fail;
1019			}
1020		} else if (pol->priority < priority) {
1021			ret = pol;
1022			break;
1023		}
1024	}
1025	if (ret)
1026		xfrm_pol_hold(ret);
 
 
 
 
 
1027fail:
1028	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1029
1030	return ret;
1031}
1032
1033static struct xfrm_policy *
1034__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
 
1035{
1036#ifdef CONFIG_XFRM_SUB_POLICY
1037	struct xfrm_policy *pol;
1038
1039	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
 
1040	if (pol != NULL)
1041		return pol;
1042#endif
1043	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
 
1044}
1045
1046static int flow_to_policy_dir(int dir)
1047{
1048	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1049	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1050	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1051		return dir;
1052
1053	switch (dir) {
1054	default:
1055	case FLOW_DIR_IN:
1056		return XFRM_POLICY_IN;
1057	case FLOW_DIR_OUT:
1058		return XFRM_POLICY_OUT;
1059	case FLOW_DIR_FWD:
1060		return XFRM_POLICY_FWD;
1061	}
1062}
1063
1064static struct flow_cache_object *
1065xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1066		   u8 dir, struct flow_cache_object *old_obj, void *ctx)
1067{
1068	struct xfrm_policy *pol;
1069
1070	if (old_obj)
1071		xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
 
 
 
 
1072
1073	pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1074	if (IS_ERR_OR_NULL(pol))
1075		return ERR_CAST(pol);
1076
1077	/* Resolver returns two references:
1078	 * one for cache and one for caller of flow_cache_lookup() */
1079	xfrm_pol_hold(pol);
1080
1081	return &pol->flo;
1082}
1083
1084static inline int policy_to_flow_dir(int dir)
1085{
1086	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1087	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1088	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1089		return dir;
1090	switch (dir) {
1091	default:
1092	case XFRM_POLICY_IN:
1093		return FLOW_DIR_IN;
1094	case XFRM_POLICY_OUT:
1095		return FLOW_DIR_OUT;
1096	case XFRM_POLICY_FWD:
1097		return FLOW_DIR_FWD;
1098	}
1099}
1100
1101static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1102						 const struct flowi *fl)
1103{
1104	struct xfrm_policy *pol;
1105	struct net *net = sock_net(sk);
1106
1107	read_lock_bh(&net->xfrm.xfrm_policy_lock);
1108	if ((pol = sk->sk_policy[dir]) != NULL) {
1109		bool match = xfrm_selector_match(&pol->selector, fl,
1110						 sk->sk_family);
1111		int err = 0;
1112
 
1113		if (match) {
1114			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
 
1115				pol = NULL;
1116				goto out;
1117			}
1118			err = security_xfrm_policy_lookup(pol->security,
1119						      fl->flowi_secid,
1120						      policy_to_flow_dir(dir));
1121			if (!err)
1122				xfrm_pol_hold(pol);
1123			else if (err == -ESRCH)
1124				pol = NULL;
1125			else
1126				pol = ERR_PTR(err);
 
1127		} else
1128			pol = NULL;
1129	}
1130out:
1131	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1132	return pol;
1133}
1134
1135static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1136{
1137	struct net *net = xp_net(pol);
1138	struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1139						     pol->family, dir);
1140
1141	list_add(&pol->walk.all, &net->xfrm.policy_all);
1142	hlist_add_head(&pol->bydst, chain);
1143	hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1144	net->xfrm.policy_count[dir]++;
1145	xfrm_pol_hold(pol);
1146
1147	if (xfrm_bydst_should_resize(net, dir, NULL))
1148		schedule_work(&net->xfrm.policy_hash_work);
1149}
1150
1151static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1152						int dir)
1153{
1154	struct net *net = xp_net(pol);
1155
1156	if (hlist_unhashed(&pol->bydst))
1157		return NULL;
1158
1159	hlist_del_init(&pol->bydst);
1160	hlist_del(&pol->byidx);
1161	list_del(&pol->walk.all);
 
 
 
 
 
1162	net->xfrm.policy_count[dir]--;
1163
1164	return pol;
1165}
1166
 
 
 
 
 
 
 
 
 
 
1167int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1168{
1169	struct net *net = xp_net(pol);
1170
1171	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1172	pol = __xfrm_policy_unlink(pol, dir);
1173	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1174	if (pol) {
1175		xfrm_policy_kill(pol);
1176		return 0;
1177	}
1178	return -ENOENT;
1179}
1180EXPORT_SYMBOL(xfrm_policy_delete);
1181
1182int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1183{
1184	struct net *net = xp_net(pol);
1185	struct xfrm_policy *old_pol;
1186
1187#ifdef CONFIG_XFRM_SUB_POLICY
1188	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1189		return -EINVAL;
1190#endif
1191
1192	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1193	old_pol = sk->sk_policy[dir];
1194	sk->sk_policy[dir] = pol;
1195	if (pol) {
1196		pol->curlft.add_time = get_seconds();
1197		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1198		__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1199	}
 
1200	if (old_pol) {
1201		if (pol)
1202			xfrm_policy_requeue(old_pol, pol);
1203
1204		/* Unlinking succeeds always. This is the only function
1205		 * allowed to delete or replace socket policy.
1206		 */
1207		__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1208	}
1209	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1210
1211	if (old_pol) {
1212		xfrm_policy_kill(old_pol);
1213	}
1214	return 0;
1215}
1216
1217static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1218{
1219	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1220	struct net *net = xp_net(old);
1221
1222	if (newp) {
1223		newp->selector = old->selector;
1224		if (security_xfrm_policy_clone(old->security,
1225					       &newp->security)) {
1226			kfree(newp);
1227			return NULL;  /* ENOMEM */
1228		}
1229		newp->lft = old->lft;
1230		newp->curlft = old->curlft;
1231		newp->mark = old->mark;
 
1232		newp->action = old->action;
1233		newp->flags = old->flags;
1234		newp->xfrm_nr = old->xfrm_nr;
1235		newp->index = old->index;
1236		newp->type = old->type;
 
1237		memcpy(newp->xfrm_vec, old->xfrm_vec,
1238		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1239		write_lock_bh(&net->xfrm.xfrm_policy_lock);
1240		__xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1241		write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1242		xfrm_pol_put(newp);
1243	}
1244	return newp;
1245}
1246
1247int __xfrm_sk_clone_policy(struct sock *sk)
1248{
1249	struct xfrm_policy *p0 = sk->sk_policy[0],
1250			   *p1 = sk->sk_policy[1];
 
1251
1252	sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1253	if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1254		return -ENOMEM;
1255	if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1256		return -ENOMEM;
1257	return 0;
 
 
 
 
 
 
 
 
1258}
1259
1260static int
1261xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1262	       unsigned short family)
1263{
1264	int err;
1265	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1266
1267	if (unlikely(afinfo == NULL))
1268		return -EINVAL;
1269	err = afinfo->get_saddr(net, local, remote);
1270	xfrm_policy_put_afinfo(afinfo);
1271	return err;
1272}
1273
1274/* Resolve list of templates for the flow, given policy. */
1275
1276static int
1277xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1278		      struct xfrm_state **xfrm, unsigned short family)
1279{
1280	struct net *net = xp_net(policy);
1281	int nx;
1282	int i, error;
1283	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1284	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1285	xfrm_address_t tmp;
1286
1287	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1288		struct xfrm_state *x;
1289		xfrm_address_t *remote = daddr;
1290		xfrm_address_t *local  = saddr;
1291		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1292
1293		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1294		    tmpl->mode == XFRM_MODE_BEET) {
1295			remote = &tmpl->id.daddr;
1296			local = &tmpl->saddr;
1297			if (xfrm_addr_any(local, tmpl->encap_family)) {
1298				error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
 
 
1299				if (error)
1300					goto fail;
1301				local = &tmp;
1302			}
1303		}
1304
1305		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
 
1306
1307		if (x && x->km.state == XFRM_STATE_VALID) {
1308			xfrm[nx++] = x;
1309			daddr = remote;
1310			saddr = local;
1311			continue;
1312		}
1313		if (x) {
1314			error = (x->km.state == XFRM_STATE_ERROR ?
1315				 -EINVAL : -EAGAIN);
1316			xfrm_state_put(x);
1317		} else if (error == -ESRCH) {
1318			error = -EAGAIN;
1319		}
1320
1321		if (!tmpl->optional)
1322			goto fail;
1323	}
1324	return nx;
1325
1326fail:
1327	for (nx--; nx >= 0; nx--)
1328		xfrm_state_put(xfrm[nx]);
1329	return error;
1330}
1331
1332static int
1333xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1334		  struct xfrm_state **xfrm, unsigned short family)
1335{
1336	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1337	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1338	int cnx = 0;
1339	int error;
1340	int ret;
1341	int i;
1342
1343	for (i = 0; i < npols; i++) {
1344		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1345			error = -ENOBUFS;
1346			goto fail;
1347		}
1348
1349		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1350		if (ret < 0) {
1351			error = ret;
1352			goto fail;
1353		} else
1354			cnx += ret;
1355	}
1356
1357	/* found states are sorted for outbound processing */
1358	if (npols > 1)
1359		xfrm_state_sort(xfrm, tpp, cnx, family);
1360
1361	return cnx;
1362
1363 fail:
1364	for (cnx--; cnx >= 0; cnx--)
1365		xfrm_state_put(tpp[cnx]);
1366	return error;
1367
1368}
1369
1370/* Check that the bundle accepts the flow and its components are
1371 * still valid.
1372 */
1373
1374static inline int xfrm_get_tos(const struct flowi *fl, int family)
1375{
1376	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1377	int tos;
1378
1379	if (!afinfo)
1380		return -EINVAL;
1381
1382	tos = afinfo->get_tos(fl);
1383
1384	xfrm_policy_put_afinfo(afinfo);
1385
1386	return tos;
1387}
1388
1389static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1390{
1391	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1392	struct dst_entry *dst = &xdst->u.dst;
1393
1394	if (xdst->route == NULL) {
1395		/* Dummy bundle - if it has xfrms we were not
1396		 * able to build bundle as template resolution failed.
1397		 * It means we need to try again resolving. */
1398		if (xdst->num_xfrms > 0)
1399			return NULL;
1400	} else if (dst->flags & DST_XFRM_QUEUE) {
1401		return NULL;
1402	} else {
1403		/* Real bundle */
1404		if (stale_bundle(dst))
1405			return NULL;
1406	}
1407
1408	dst_hold(dst);
1409	return flo;
1410}
1411
1412static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1413{
1414	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1415	struct dst_entry *dst = &xdst->u.dst;
1416
1417	if (!xdst->route)
1418		return 0;
1419	if (stale_bundle(dst))
1420		return 0;
1421
1422	return 1;
1423}
1424
1425static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1426{
1427	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1428	struct dst_entry *dst = &xdst->u.dst;
1429
1430	dst_free(dst);
1431}
1432
1433static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1434	.get = xfrm_bundle_flo_get,
1435	.check = xfrm_bundle_flo_check,
1436	.delete = xfrm_bundle_flo_delete,
1437};
1438
1439static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1440{
1441	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1442	struct dst_ops *dst_ops;
1443	struct xfrm_dst *xdst;
1444
1445	if (!afinfo)
1446		return ERR_PTR(-EINVAL);
1447
1448	switch (family) {
1449	case AF_INET:
1450		dst_ops = &net->xfrm.xfrm4_dst_ops;
1451		break;
1452#if IS_ENABLED(CONFIG_IPV6)
1453	case AF_INET6:
1454		dst_ops = &net->xfrm.xfrm6_dst_ops;
1455		break;
1456#endif
1457	default:
1458		BUG();
1459	}
1460	xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1461
1462	if (likely(xdst)) {
1463		struct dst_entry *dst = &xdst->u.dst;
1464
1465		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1466		xdst->flo.ops = &xfrm_bundle_fc_ops;
1467		if (afinfo->init_dst)
1468			afinfo->init_dst(net, xdst);
1469	} else
1470		xdst = ERR_PTR(-ENOBUFS);
1471
1472	xfrm_policy_put_afinfo(afinfo);
1473
1474	return xdst;
1475}
1476
1477static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1478				 int nfheader_len)
1479{
1480	struct xfrm_policy_afinfo *afinfo =
1481		xfrm_policy_get_afinfo(dst->ops->family);
1482	int err;
1483
1484	if (!afinfo)
1485		return -EINVAL;
1486
1487	err = afinfo->init_path(path, dst, nfheader_len);
1488
1489	xfrm_policy_put_afinfo(afinfo);
1490
1491	return err;
1492}
1493
1494static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1495				const struct flowi *fl)
1496{
1497	struct xfrm_policy_afinfo *afinfo =
1498		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1499	int err;
1500
1501	if (!afinfo)
1502		return -EINVAL;
1503
1504	err = afinfo->fill_dst(xdst, dev, fl);
1505
1506	xfrm_policy_put_afinfo(afinfo);
1507
1508	return err;
1509}
1510
1511
1512/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1513 * all the metrics... Shortly, bundle a bundle.
1514 */
1515
1516static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1517					    struct xfrm_state **xfrm, int nx,
 
 
1518					    const struct flowi *fl,
1519					    struct dst_entry *dst)
1520{
 
 
1521	struct net *net = xp_net(policy);
1522	unsigned long now = jiffies;
1523	struct net_device *dev;
1524	struct xfrm_mode *inner_mode;
1525	struct dst_entry *dst_prev = NULL;
1526	struct dst_entry *dst0 = NULL;
1527	int i = 0;
1528	int err;
1529	int header_len = 0;
1530	int nfheader_len = 0;
1531	int trailer_len = 0;
1532	int tos;
1533	int family = policy->selector.family;
1534	xfrm_address_t saddr, daddr;
1535
1536	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1537
1538	tos = xfrm_get_tos(fl, family);
1539	err = tos;
1540	if (tos < 0)
1541		goto put_states;
1542
1543	dst_hold(dst);
1544
1545	for (; i < nx; i++) {
1546		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1547		struct dst_entry *dst1 = &xdst->u.dst;
1548
1549		err = PTR_ERR(xdst);
1550		if (IS_ERR(xdst)) {
1551			dst_release(dst);
1552			goto put_states;
1553		}
1554
 
 
 
 
 
 
 
 
 
1555		if (xfrm[i]->sel.family == AF_UNSPEC) {
1556			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1557							xfrm_af2proto(family));
1558			if (!inner_mode) {
1559				err = -EAFNOSUPPORT;
1560				dst_release(dst);
1561				goto put_states;
1562			}
1563		} else
1564			inner_mode = xfrm[i]->inner_mode;
1565
1566		if (!dst_prev)
1567			dst0 = dst1;
1568		else {
1569			dst_prev->child = dst_clone(dst1);
1570			dst1->flags |= DST_NOHASH;
1571		}
1572
1573		xdst->route = dst;
1574		dst_copy_metrics(dst1, dst);
1575
1576		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
 
 
 
 
 
1577			family = xfrm[i]->props.family;
1578			dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1579					      family);
1580			err = PTR_ERR(dst);
1581			if (IS_ERR(dst))
1582				goto put_states;
1583		} else
1584			dst_hold(dst);
1585
1586		dst1->xfrm = xfrm[i];
1587		xdst->xfrm_genid = xfrm[i]->genid;
1588
1589		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1590		dst1->flags |= DST_HOST;
1591		dst1->lastuse = now;
1592
1593		dst1->input = dst_discard;
1594		dst1->output = inner_mode->afinfo->output;
1595
1596		dst1->next = dst_prev;
1597		dst_prev = dst1;
 
 
 
 
 
 
 
1598
1599		header_len += xfrm[i]->props.header_len;
1600		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1601			nfheader_len += xfrm[i]->props.header_len;
1602		trailer_len += xfrm[i]->props.trailer_len;
1603	}
1604
1605	dst_prev->child = dst;
1606	dst0->path = dst;
1607
1608	err = -ENODEV;
1609	dev = dst->dev;
1610	if (!dev)
1611		goto free_dst;
1612
1613	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1614	xfrm_init_pmtu(dst_prev);
1615
1616	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1617		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1618
1619		err = xfrm_fill_dst(xdst, dev, fl);
1620		if (err)
1621			goto free_dst;
1622
1623		dst_prev->header_len = header_len;
1624		dst_prev->trailer_len = trailer_len;
1625		header_len -= xdst->u.dst.xfrm->props.header_len;
1626		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1627	}
1628
1629out:
1630	return dst0;
1631
1632put_states:
1633	for (; i < nx; i++)
1634		xfrm_state_put(xfrm[i]);
1635free_dst:
1636	if (dst0)
1637		dst_free(dst0);
1638	dst0 = ERR_PTR(err);
1639	goto out;
1640}
1641
1642#ifdef CONFIG_XFRM_SUB_POLICY
1643static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1644{
1645	if (!*target) {
1646		*target = kmalloc(size, GFP_ATOMIC);
1647		if (!*target)
1648			return -ENOMEM;
1649	}
1650
1651	memcpy(*target, src, size);
1652	return 0;
1653}
1654#endif
1655
1656static int xfrm_dst_update_parent(struct dst_entry *dst,
1657				  const struct xfrm_selector *sel)
1658{
1659#ifdef CONFIG_XFRM_SUB_POLICY
1660	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1661	return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1662				   sel, sizeof(*sel));
1663#else
1664	return 0;
1665#endif
1666}
1667
1668static int xfrm_dst_update_origin(struct dst_entry *dst,
1669				  const struct flowi *fl)
1670{
1671#ifdef CONFIG_XFRM_SUB_POLICY
1672	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1673	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1674#else
1675	return 0;
1676#endif
1677}
1678
1679static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1680				struct xfrm_policy **pols,
1681				int *num_pols, int *num_xfrms)
1682{
1683	int i;
1684
1685	if (*num_pols == 0 || !pols[0]) {
1686		*num_pols = 0;
1687		*num_xfrms = 0;
1688		return 0;
1689	}
1690	if (IS_ERR(pols[0]))
1691		return PTR_ERR(pols[0]);
1692
1693	*num_xfrms = pols[0]->xfrm_nr;
1694
1695#ifdef CONFIG_XFRM_SUB_POLICY
1696	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1697	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1698		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1699						    XFRM_POLICY_TYPE_MAIN,
1700						    fl, family,
1701						    XFRM_POLICY_OUT);
 
1702		if (pols[1]) {
1703			if (IS_ERR(pols[1])) {
1704				xfrm_pols_put(pols, *num_pols);
1705				return PTR_ERR(pols[1]);
1706			}
1707			(*num_pols)++;
1708			(*num_xfrms) += pols[1]->xfrm_nr;
1709		}
1710	}
1711#endif
1712	for (i = 0; i < *num_pols; i++) {
1713		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1714			*num_xfrms = -1;
1715			break;
1716		}
1717	}
1718
1719	return 0;
1720
1721}
1722
1723static struct xfrm_dst *
1724xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1725			       const struct flowi *fl, u16 family,
1726			       struct dst_entry *dst_orig)
1727{
1728	struct net *net = xp_net(pols[0]);
1729	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
 
 
1730	struct dst_entry *dst;
1731	struct xfrm_dst *xdst;
1732	int err;
1733
1734	/* Try to instantiate a bundle */
1735	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1736	if (err <= 0) {
1737		if (err != 0 && err != -EAGAIN)
 
 
 
1738			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1739		return ERR_PTR(err);
1740	}
1741
1742	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1743	if (IS_ERR(dst)) {
1744		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1745		return ERR_CAST(dst);
1746	}
1747
1748	xdst = (struct xfrm_dst *)dst;
1749	xdst->num_xfrms = err;
1750	if (num_pols > 1)
1751		err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1752	else
1753		err = xfrm_dst_update_origin(dst, fl);
1754	if (unlikely(err)) {
1755		dst_free(dst);
1756		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1757		return ERR_PTR(err);
1758	}
1759
1760	xdst->num_pols = num_pols;
1761	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1762	xdst->policy_genid = atomic_read(&pols[0]->genid);
1763
1764	return xdst;
1765}
1766
1767static void xfrm_policy_queue_process(unsigned long arg)
1768{
1769	int err = 0;
1770	struct sk_buff *skb;
1771	struct sock *sk;
1772	struct dst_entry *dst;
1773	struct xfrm_policy *pol = (struct xfrm_policy *)arg;
 
1774	struct xfrm_policy_queue *pq = &pol->polq;
1775	struct flowi fl;
1776	struct sk_buff_head list;
 
1777
1778	spin_lock(&pq->hold_queue.lock);
1779	skb = skb_peek(&pq->hold_queue);
1780	if (!skb) {
1781		spin_unlock(&pq->hold_queue.lock);
1782		goto out;
1783	}
1784	dst = skb_dst(skb);
1785	sk = skb->sk;
 
 
 
 
1786	xfrm_decode_session(skb, &fl, dst->ops->family);
 
1787	spin_unlock(&pq->hold_queue.lock);
1788
1789	dst_hold(dst->path);
1790	dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1791			  sk, 0);
1792	if (IS_ERR(dst))
1793		goto purge_queue;
1794
1795	if (dst->flags & DST_XFRM_QUEUE) {
1796		dst_release(dst);
1797
1798		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1799			goto purge_queue;
1800
1801		pq->timeout = pq->timeout << 1;
1802		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1803			xfrm_pol_hold(pol);
1804	goto out;
1805	}
1806
1807	dst_release(dst);
1808
1809	__skb_queue_head_init(&list);
1810
1811	spin_lock(&pq->hold_queue.lock);
1812	pq->timeout = 0;
1813	skb_queue_splice_init(&pq->hold_queue, &list);
1814	spin_unlock(&pq->hold_queue.lock);
1815
1816	while (!skb_queue_empty(&list)) {
1817		skb = __skb_dequeue(&list);
1818
 
 
 
1819		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1820		dst_hold(skb_dst(skb)->path);
1821		dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1822				  &fl, skb->sk, 0);
 
1823		if (IS_ERR(dst)) {
1824			kfree_skb(skb);
1825			continue;
1826		}
1827
1828		nf_reset(skb);
1829		skb_dst_drop(skb);
1830		skb_dst_set(skb, dst);
1831
1832		err = dst_output(skb);
1833	}
1834
1835out:
1836	xfrm_pol_put(pol);
1837	return;
1838
1839purge_queue:
1840	pq->timeout = 0;
1841	xfrm_queue_purge(&pq->hold_queue);
1842	xfrm_pol_put(pol);
1843}
1844
1845static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
1846{
1847	unsigned long sched_next;
1848	struct dst_entry *dst = skb_dst(skb);
1849	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1850	struct xfrm_policy *pol = xdst->pols[0];
1851	struct xfrm_policy_queue *pq = &pol->polq;
1852	const struct sk_buff *fclone = skb + 1;
1853
1854	if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
1855		     fclone->fclone == SKB_FCLONE_CLONE)) {
1856		kfree_skb(skb);
1857		return 0;
1858	}
1859
1860	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1861		kfree_skb(skb);
1862		return -EAGAIN;
1863	}
1864
1865	skb_dst_force(skb);
1866
1867	spin_lock_bh(&pq->hold_queue.lock);
1868
1869	if (!pq->timeout)
1870		pq->timeout = XFRM_QUEUE_TMO_MIN;
1871
1872	sched_next = jiffies + pq->timeout;
1873
1874	if (del_timer(&pq->hold_timer)) {
1875		if (time_before(pq->hold_timer.expires, sched_next))
1876			sched_next = pq->hold_timer.expires;
1877		xfrm_pol_put(pol);
1878	}
1879
1880	__skb_queue_tail(&pq->hold_queue, skb);
1881	if (!mod_timer(&pq->hold_timer, sched_next))
1882		xfrm_pol_hold(pol);
1883
1884	spin_unlock_bh(&pq->hold_queue.lock);
1885
1886	return 0;
1887}
1888
1889static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1890						 struct dst_entry *dst,
1891						 const struct flowi *fl,
1892						 int num_xfrms,
1893						 u16 family)
1894{
1895	int err;
1896	struct net_device *dev;
 
1897	struct dst_entry *dst1;
1898	struct xfrm_dst *xdst;
1899
1900	xdst = xfrm_alloc_dst(net, family);
1901	if (IS_ERR(xdst))
1902		return xdst;
1903
1904	if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
 
 
1905		return xdst;
1906
 
1907	dst1 = &xdst->u.dst;
1908	dst_hold(dst);
1909	xdst->route = dst;
1910
1911	dst_copy_metrics(dst1, dst);
1912
1913	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1914	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1915	dst1->lastuse = jiffies;
1916
1917	dst1->input = dst_discard;
1918	dst1->output = xdst_queue_output;
1919
1920	dst_hold(dst);
1921	dst1->child = dst;
1922	dst1->path = dst;
1923
1924	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1925
1926	err = -ENODEV;
1927	dev = dst->dev;
1928	if (!dev)
1929		goto free_dst;
1930
1931	err = xfrm_fill_dst(xdst, dev, fl);
1932	if (err)
1933		goto free_dst;
1934
1935out:
1936	return xdst;
1937
1938free_dst:
1939	dst_release(dst1);
1940	xdst = ERR_PTR(err);
1941	goto out;
1942}
1943
1944static struct flow_cache_object *
1945xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1946		   struct flow_cache_object *oldflo, void *ctx)
 
1947{
1948	struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1949	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1950	struct xfrm_dst *xdst, *new_xdst;
1951	int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1952
1953	/* Check if the policies from old bundle are usable */
1954	xdst = NULL;
1955	if (oldflo) {
1956		xdst = container_of(oldflo, struct xfrm_dst, flo);
1957		num_pols = xdst->num_pols;
1958		num_xfrms = xdst->num_xfrms;
1959		pol_dead = 0;
1960		for (i = 0; i < num_pols; i++) {
1961			pols[i] = xdst->pols[i];
1962			pol_dead |= pols[i]->walk.dead;
1963		}
1964		if (pol_dead) {
1965			dst_free(&xdst->u.dst);
1966			xdst = NULL;
1967			num_pols = 0;
1968			num_xfrms = 0;
1969			oldflo = NULL;
1970		}
1971	}
1972
1973	/* Resolve policies to use if we couldn't get them from
1974	 * previous cache entry */
1975	if (xdst == NULL) {
1976		num_pols = 1;
1977		pols[0] = __xfrm_policy_lookup(net, fl, family,
1978					       flow_to_policy_dir(dir));
1979		err = xfrm_expand_policies(fl, family, pols,
1980					   &num_pols, &num_xfrms);
1981		if (err < 0)
1982			goto inc_error;
1983		if (num_pols == 0)
 
 
 
 
 
 
 
 
 
 
1984			return NULL;
1985		if (num_xfrms <= 0)
1986			goto make_dummy_bundle;
1987	}
1988
1989	new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1990	if (IS_ERR(new_xdst)) {
1991		err = PTR_ERR(new_xdst);
1992		if (err != -EAGAIN)
1993			goto error;
1994		if (oldflo == NULL)
1995			goto make_dummy_bundle;
1996		dst_hold(&xdst->u.dst);
1997		return oldflo;
1998	} else if (new_xdst == NULL) {
1999		num_xfrms = 0;
2000		if (oldflo == NULL)
2001			goto make_dummy_bundle;
2002		xdst->num_xfrms = 0;
2003		dst_hold(&xdst->u.dst);
2004		return oldflo;
2005	}
2006
2007	/* Kill the previous bundle */
2008	if (xdst) {
2009		/* The policies were stolen for newly generated bundle */
2010		xdst->num_pols = 0;
2011		dst_free(&xdst->u.dst);
2012	}
2013
2014	/* Flow cache does not have reference, it dst_free()'s,
2015	 * but we do need to return one reference for original caller */
2016	dst_hold(&new_xdst->u.dst);
2017	return &new_xdst->flo;
2018
2019make_dummy_bundle:
2020	/* We found policies, but there's no bundles to instantiate:
2021	 * either because the policy blocks, has no transformations or
2022	 * we could not build template (no xfrm_states).*/
2023	xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
2024	if (IS_ERR(xdst)) {
2025		xfrm_pols_put(pols, num_pols);
2026		return ERR_CAST(xdst);
2027	}
2028	xdst->num_pols = num_pols;
2029	xdst->num_xfrms = num_xfrms;
2030	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2031
2032	dst_hold(&xdst->u.dst);
2033	return &xdst->flo;
2034
2035inc_error:
2036	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2037error:
2038	if (xdst != NULL)
2039		dst_free(&xdst->u.dst);
2040	else
2041		xfrm_pols_put(pols, num_pols);
2042	return ERR_PTR(err);
2043}
2044
2045static struct dst_entry *make_blackhole(struct net *net, u16 family,
2046					struct dst_entry *dst_orig)
2047{
2048	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2049	struct dst_entry *ret;
2050
2051	if (!afinfo) {
2052		dst_release(dst_orig);
2053		return ERR_PTR(-EINVAL);
2054	} else {
2055		ret = afinfo->blackhole_route(net, dst_orig);
2056	}
2057	xfrm_policy_put_afinfo(afinfo);
2058
2059	return ret;
2060}
2061
2062/* Main function: finds/creates a bundle for given flow.
2063 *
2064 * At the moment we eat a raw IP route. Mostly to speed up lookups
2065 * on interfaces with disabled IPsec.
 
 
 
2066 */
2067struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2068			      const struct flowi *fl,
2069			      struct sock *sk, int flags)
 
 
2070{
2071	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2072	struct flow_cache_object *flo;
2073	struct xfrm_dst *xdst;
2074	struct dst_entry *dst, *route;
2075	u16 family = dst_orig->ops->family;
2076	u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2077	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2078
2079	dst = NULL;
2080	xdst = NULL;
2081	route = NULL;
2082
 
2083	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2084		num_pols = 1;
2085		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
 
2086		err = xfrm_expand_policies(fl, family, pols,
2087					   &num_pols, &num_xfrms);
2088		if (err < 0)
2089			goto dropdst;
2090
2091		if (num_pols) {
2092			if (num_xfrms <= 0) {
2093				drop_pols = num_pols;
2094				goto no_transform;
2095			}
2096
2097			xdst = xfrm_resolve_and_create_bundle(
2098					pols, num_pols, fl,
2099					family, dst_orig);
 
2100			if (IS_ERR(xdst)) {
2101				xfrm_pols_put(pols, num_pols);
2102				err = PTR_ERR(xdst);
 
 
 
2103				goto dropdst;
2104			} else if (xdst == NULL) {
2105				num_xfrms = 0;
2106				drop_pols = num_pols;
2107				goto no_transform;
2108			}
2109
2110			route = xdst->route;
2111		}
2112	}
2113
2114	if (xdst == NULL) {
 
 
 
 
 
2115		/* To accelerate a bit...  */
2116		if ((dst_orig->flags & DST_NOXFRM) ||
2117		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2118			goto nopol;
2119
2120		flo = flow_cache_lookup(net, fl, family, dir,
2121					xfrm_bundle_lookup, dst_orig);
2122		if (flo == NULL)
2123			goto nopol;
2124		if (IS_ERR(flo)) {
2125			err = PTR_ERR(flo);
2126			goto dropdst;
2127		}
2128		xdst = container_of(flo, struct xfrm_dst, flo);
2129
2130		num_pols = xdst->num_pols;
2131		num_xfrms = xdst->num_xfrms;
2132		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2133		route = xdst->route;
2134	}
2135
2136	dst = &xdst->u.dst;
2137	if (route == NULL && num_xfrms > 0) {
2138		/* The only case when xfrm_bundle_lookup() returns a
2139		 * bundle with null route, is when the template could
2140		 * not be resolved. It means policies are there, but
2141		 * bundle could not be created, since we don't yet
2142		 * have the xfrm_state's. We need to wait for KM to
2143		 * negotiate new SA's or bail out with error.*/
2144		if (net->xfrm.sysctl_larval_drop) {
2145			dst_release(dst);
2146			xfrm_pols_put(pols, drop_pols);
2147			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2148
2149			return make_blackhole(net, family, dst_orig);
2150		}
2151
2152		err = -EAGAIN;
2153
2154		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2155		goto error;
2156	}
2157
2158no_transform:
2159	if (num_pols == 0)
2160		goto nopol;
2161
2162	if ((flags & XFRM_LOOKUP_ICMP) &&
2163	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2164		err = -ENOENT;
2165		goto error;
2166	}
2167
2168	for (i = 0; i < num_pols; i++)
2169		pols[i]->curlft.use_time = get_seconds();
2170
2171	if (num_xfrms < 0) {
2172		/* Prohibit the flow */
2173		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2174		err = -EPERM;
2175		goto error;
2176	} else if (num_xfrms > 0) {
2177		/* Flow transformed */
2178		dst_release(dst_orig);
2179	} else {
2180		/* Flow passes untransformed */
2181		dst_release(dst);
2182		dst = dst_orig;
2183	}
2184ok:
2185	xfrm_pols_put(pols, drop_pols);
2186	if (dst && dst->xfrm &&
2187	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2188		dst->flags |= DST_XFRM_TUNNEL;
2189	return dst;
2190
2191nopol:
2192	if (!(flags & XFRM_LOOKUP_ICMP)) {
2193		dst = dst_orig;
2194		goto ok;
2195	}
2196	err = -ENOENT;
2197error:
2198	dst_release(dst);
2199dropdst:
2200	dst_release(dst_orig);
 
2201	xfrm_pols_put(pols, drop_pols);
2202	return ERR_PTR(err);
2203}
 
 
 
 
 
 
 
 
 
 
 
 
 
2204EXPORT_SYMBOL(xfrm_lookup);
2205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2206static inline int
2207xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2208{
 
2209	struct xfrm_state *x;
2210
2211	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2212		return 0;
2213	x = skb->sp->xvec[idx];
2214	if (!x->type->reject)
2215		return 0;
2216	return x->type->reject(x, skb, fl);
2217}
2218
2219/* When skb is transformed back to its "native" form, we have to
2220 * check policy restrictions. At the moment we make this in maximally
2221 * stupid way. Shame on me. :-) Of course, connected sockets must
2222 * have policy cached at them.
2223 */
2224
2225static inline int
2226xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2227	      unsigned short family)
2228{
2229	if (xfrm_state_kern(x))
2230		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2231	return	x->id.proto == tmpl->id.proto &&
2232		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2233		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2234		x->props.mode == tmpl->mode &&
2235		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2236		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2237		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2238		  xfrm_state_addr_cmp(tmpl, x, family));
2239}
2240
2241/*
2242 * 0 or more than 0 is returned when validation is succeeded (either bypass
2243 * because of optional transport mode, or next index of the mathced secpath
2244 * state with the template.
2245 * -1 is returned when no matching template is found.
2246 * Otherwise "-2 - errored_index" is returned.
2247 */
2248static inline int
2249xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2250	       unsigned short family)
2251{
2252	int idx = start;
2253
2254	if (tmpl->optional) {
2255		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2256			return start;
2257	} else
2258		start = -1;
2259	for (; idx < sp->len; idx++) {
2260		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2261			return ++idx;
2262		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2263			if (start == -1)
2264				start = -2-idx;
2265			break;
2266		}
2267	}
2268	return start;
2269}
2270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2271int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2272			  unsigned int family, int reverse)
2273{
2274	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2275	int err;
2276
2277	if (unlikely(afinfo == NULL))
 
 
 
 
 
 
2278		return -EAFNOSUPPORT;
 
2279
2280	afinfo->decode_session(skb, fl, reverse);
2281	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2282	xfrm_policy_put_afinfo(afinfo);
2283	return err;
2284}
2285EXPORT_SYMBOL(__xfrm_decode_session);
2286
2287static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2288{
2289	for (; k < sp->len; k++) {
2290		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2291			*idxp = k;
2292			return 1;
2293		}
2294	}
2295
2296	return 0;
2297}
2298
2299int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2300			unsigned short family)
2301{
2302	struct net *net = dev_net(skb->dev);
2303	struct xfrm_policy *pol;
2304	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2305	int npols = 0;
2306	int xfrm_nr;
2307	int pi;
2308	int reverse;
2309	struct flowi fl;
2310	u8 fl_dir;
2311	int xerr_idx = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2312
2313	reverse = dir & ~XFRM_POLICY_MASK;
2314	dir &= XFRM_POLICY_MASK;
2315	fl_dir = policy_to_flow_dir(dir);
2316
2317	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2318		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2319		return 0;
2320	}
2321
2322	nf_nat_decode_session(skb, &fl, family);
2323
2324	/* First, check used SA against their selectors. */
2325	if (skb->sp) {
 
2326		int i;
2327
2328		for (i = skb->sp->len-1; i >= 0; i--) {
2329			struct xfrm_state *x = skb->sp->xvec[i];
2330			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2331				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2332				return 0;
2333			}
2334		}
2335	}
2336
2337	pol = NULL;
 
2338	if (sk && sk->sk_policy[dir]) {
2339		pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2340		if (IS_ERR(pol)) {
2341			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2342			return 0;
2343		}
2344	}
2345
2346	if (!pol) {
2347		struct flow_cache_object *flo;
2348
2349		flo = flow_cache_lookup(net, &fl, family, fl_dir,
2350					xfrm_policy_lookup, NULL);
2351		if (IS_ERR_OR_NULL(flo))
2352			pol = ERR_CAST(flo);
2353		else
2354			pol = container_of(flo, struct xfrm_policy, flo);
2355	}
2356
2357	if (IS_ERR(pol)) {
2358		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2359		return 0;
2360	}
2361
2362	if (!pol) {
2363		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2364			xfrm_secpath_reject(xerr_idx, skb, &fl);
2365			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2366			return 0;
2367		}
2368		return 1;
2369	}
2370
2371	pol->curlft.use_time = get_seconds();
2372
2373	pols[0] = pol;
2374	npols++;
2375#ifdef CONFIG_XFRM_SUB_POLICY
2376	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2377		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2378						    &fl, family,
2379						    XFRM_POLICY_IN);
2380		if (pols[1]) {
2381			if (IS_ERR(pols[1])) {
2382				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2383				return 0;
2384			}
2385			pols[1]->curlft.use_time = get_seconds();
2386			npols++;
2387		}
2388	}
2389#endif
2390
2391	if (pol->action == XFRM_POLICY_ALLOW) {
2392		struct sec_path *sp;
2393		static struct sec_path dummy;
2394		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2395		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2396		struct xfrm_tmpl **tpp = tp;
2397		int ti = 0;
2398		int i, k;
2399
2400		if ((sp = skb->sp) == NULL)
 
2401			sp = &dummy;
2402
2403		for (pi = 0; pi < npols; pi++) {
2404			if (pols[pi] != pol &&
2405			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2406				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2407				goto reject;
2408			}
2409			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2410				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2411				goto reject_error;
2412			}
2413			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2414				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2415		}
2416		xfrm_nr = ti;
2417		if (npols > 1) {
2418			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2419			tpp = stp;
2420		}
2421
2422		/* For each tunnel xfrm, find the first matching tmpl.
2423		 * For each tmpl before that, find corresponding xfrm.
2424		 * Order is _important_. Later we will implement
2425		 * some barriers, but at the moment barriers
2426		 * are implied between each two transformations.
2427		 */
2428		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2429			k = xfrm_policy_ok(tpp[i], sp, k, family);
2430			if (k < 0) {
2431				if (k < -1)
2432					/* "-2 - errored_index" returned */
2433					xerr_idx = -(2+k);
2434				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2435				goto reject;
2436			}
2437		}
2438
2439		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2440			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2441			goto reject;
2442		}
2443
2444		xfrm_pols_put(pols, npols);
2445		return 1;
2446	}
2447	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2448
2449reject:
2450	xfrm_secpath_reject(xerr_idx, skb, &fl);
2451reject_error:
2452	xfrm_pols_put(pols, npols);
2453	return 0;
2454}
2455EXPORT_SYMBOL(__xfrm_policy_check);
2456
2457int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2458{
2459	struct net *net = dev_net(skb->dev);
2460	struct flowi fl;
2461	struct dst_entry *dst;
2462	int res = 1;
2463
2464	if (xfrm_decode_session(skb, &fl, family) < 0) {
2465		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2466		return 0;
2467	}
2468
2469	skb_dst_force(skb);
 
 
 
 
2470
2471	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2472	if (IS_ERR(dst)) {
2473		res = 0;
2474		dst = NULL;
2475	}
2476	skb_dst_set(skb, dst);
2477	return res;
2478}
2479EXPORT_SYMBOL(__xfrm_route_forward);
2480
2481/* Optimize later using cookies and generation ids. */
2482
2483static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2484{
2485	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2486	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2487	 * get validated by dst_ops->check on every use.  We do this
2488	 * because when a normal route referenced by an XFRM dst is
2489	 * obsoleted we do not go looking around for all parent
2490	 * referencing XFRM dsts so that we can invalidate them.  It
2491	 * is just too much work.  Instead we make the checks here on
2492	 * every use.  For example:
2493	 *
2494	 *	XFRM dst A --> IPv4 dst X
2495	 *
2496	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2497	 * in this example).  If X is marked obsolete, "A" will not
2498	 * notice.  That's what we are validating here via the
2499	 * stale_bundle() check.
2500	 *
2501	 * When a policy's bundle is pruned, we dst_free() the XFRM
2502	 * dst which causes it's ->obsolete field to be set to
2503	 * DST_OBSOLETE_DEAD.  If an XFRM dst has been pruned like
2504	 * this, we want to force a new route lookup.
2505	 */
2506	if (dst->obsolete < 0 && !stale_bundle(dst))
2507		return dst;
2508
2509	return NULL;
2510}
2511
2512static int stale_bundle(struct dst_entry *dst)
2513{
2514	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2515}
2516
2517void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2518{
2519	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2520		dst->dev = dev_net(dev)->loopback_dev;
2521		dev_hold(dst->dev);
2522		dev_put(dev);
2523	}
2524}
2525EXPORT_SYMBOL(xfrm_dst_ifdown);
2526
2527static void xfrm_link_failure(struct sk_buff *skb)
2528{
2529	/* Impossible. Such dst must be popped before reaches point of failure. */
2530}
2531
2532static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2533{
2534	if (dst) {
2535		if (dst->obsolete) {
2536			dst_release(dst);
2537			dst = NULL;
2538		}
2539	}
2540	return dst;
2541}
2542
2543void xfrm_garbage_collect(struct net *net)
2544{
2545	flow_cache_flush(net);
2546}
2547EXPORT_SYMBOL(xfrm_garbage_collect);
2548
2549static void xfrm_garbage_collect_deferred(struct net *net)
2550{
2551	flow_cache_flush_deferred(net);
2552}
2553
2554static void xfrm_init_pmtu(struct dst_entry *dst)
2555{
2556	do {
2557		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2558		u32 pmtu, route_mtu_cached;
 
2559
2560		pmtu = dst_mtu(dst->child);
 
2561		xdst->child_mtu_cached = pmtu;
2562
2563		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2564
2565		route_mtu_cached = dst_mtu(xdst->route);
2566		xdst->route_mtu_cached = route_mtu_cached;
2567
2568		if (pmtu > route_mtu_cached)
2569			pmtu = route_mtu_cached;
2570
2571		dst_metric_set(dst, RTAX_MTU, pmtu);
2572	} while ((dst = dst->next));
2573}
2574
2575/* Check that the bundle accepts the flow and its components are
2576 * still valid.
2577 */
2578
2579static int xfrm_bundle_ok(struct xfrm_dst *first)
2580{
 
2581	struct dst_entry *dst = &first->u.dst;
2582	struct xfrm_dst *last;
 
2583	u32 mtu;
2584
2585	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2586	    (dst->dev && !netif_running(dst->dev)))
2587		return 0;
2588
2589	if (dst->flags & DST_XFRM_QUEUE)
2590		return 1;
2591
2592	last = NULL;
2593
2594	do {
2595		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2596
2597		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2598			return 0;
2599		if (xdst->xfrm_genid != dst->xfrm->genid)
2600			return 0;
2601		if (xdst->num_pols > 0 &&
2602		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2603			return 0;
2604
2605		mtu = dst_mtu(dst->child);
 
 
2606		if (xdst->child_mtu_cached != mtu) {
2607			last = xdst;
2608			xdst->child_mtu_cached = mtu;
2609		}
2610
2611		if (!dst_check(xdst->route, xdst->route_cookie))
2612			return 0;
2613		mtu = dst_mtu(xdst->route);
2614		if (xdst->route_mtu_cached != mtu) {
2615			last = xdst;
2616			xdst->route_mtu_cached = mtu;
2617		}
2618
2619		dst = dst->child;
2620	} while (dst->xfrm);
2621
2622	if (likely(!last))
2623		return 1;
2624
2625	mtu = last->child_mtu_cached;
2626	for (;;) {
2627		dst = &last->u.dst;
 
2628
2629		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2630		if (mtu > last->route_mtu_cached)
2631			mtu = last->route_mtu_cached;
2632		dst_metric_set(dst, RTAX_MTU, mtu);
2633
2634		if (last == first)
2635			break;
2636
2637		last = (struct xfrm_dst *)last->u.dst.next;
2638		last->child_mtu_cached = mtu;
2639	}
2640
2641	return 1;
2642}
2643
2644static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2645{
2646	return dst_metric_advmss(dst->path);
2647}
2648
2649static unsigned int xfrm_mtu(const struct dst_entry *dst)
2650{
2651	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2652
2653	return mtu ? : dst_mtu(dst->path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2654}
2655
2656static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2657					   struct sk_buff *skb,
2658					   const void *daddr)
2659{
2660	return dst->path->ops->neigh_lookup(dst, skb, daddr);
 
 
 
 
 
 
 
 
 
 
 
 
2661}
2662
2663int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2664{
2665	struct net *net;
2666	int err = 0;
2667	if (unlikely(afinfo == NULL))
2668		return -EINVAL;
2669	if (unlikely(afinfo->family >= NPROTO))
2670		return -EAFNOSUPPORT;
 
2671	spin_lock(&xfrm_policy_afinfo_lock);
2672	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2673		err = -ENOBUFS;
2674	else {
2675		struct dst_ops *dst_ops = afinfo->dst_ops;
2676		if (likely(dst_ops->kmem_cachep == NULL))
2677			dst_ops->kmem_cachep = xfrm_dst_cache;
2678		if (likely(dst_ops->check == NULL))
2679			dst_ops->check = xfrm_dst_check;
2680		if (likely(dst_ops->default_advmss == NULL))
2681			dst_ops->default_advmss = xfrm_default_advmss;
2682		if (likely(dst_ops->mtu == NULL))
2683			dst_ops->mtu = xfrm_mtu;
2684		if (likely(dst_ops->negative_advice == NULL))
2685			dst_ops->negative_advice = xfrm_negative_advice;
2686		if (likely(dst_ops->link_failure == NULL))
2687			dst_ops->link_failure = xfrm_link_failure;
2688		if (likely(dst_ops->neigh_lookup == NULL))
2689			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2690		if (likely(afinfo->garbage_collect == NULL))
2691			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2692		rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2693	}
2694	spin_unlock(&xfrm_policy_afinfo_lock);
2695
2696	rtnl_lock();
2697	for_each_net(net) {
2698		struct dst_ops *xfrm_dst_ops;
2699
2700		switch (afinfo->family) {
2701		case AF_INET:
2702			xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2703			break;
2704#if IS_ENABLED(CONFIG_IPV6)
2705		case AF_INET6:
2706			xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2707			break;
2708#endif
2709		default:
2710			BUG();
2711		}
2712		*xfrm_dst_ops = *afinfo->dst_ops;
2713	}
2714	rtnl_unlock();
2715
2716	return err;
2717}
2718EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2719
2720int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2721{
2722	int err = 0;
2723	if (unlikely(afinfo == NULL))
2724		return -EINVAL;
2725	if (unlikely(afinfo->family >= NPROTO))
2726		return -EAFNOSUPPORT;
2727	spin_lock(&xfrm_policy_afinfo_lock);
2728	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2729		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2730			err = -EINVAL;
2731		else
2732			RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2733					 NULL);
2734	}
2735	spin_unlock(&xfrm_policy_afinfo_lock);
2736	if (!err) {
2737		struct dst_ops *dst_ops = afinfo->dst_ops;
2738
2739		synchronize_rcu();
2740
2741		dst_ops->kmem_cachep = NULL;
2742		dst_ops->check = NULL;
2743		dst_ops->negative_advice = NULL;
2744		dst_ops->link_failure = NULL;
2745		afinfo->garbage_collect = NULL;
2746	}
2747	return err;
2748}
2749EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2750
2751static void __net_init xfrm_dst_ops_init(struct net *net)
2752{
2753	struct xfrm_policy_afinfo *afinfo;
2754
2755	rcu_read_lock();
2756	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
2757	if (afinfo)
2758		net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2759#if IS_ENABLED(CONFIG_IPV6)
2760	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
2761	if (afinfo)
2762		net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2763#endif
2764	rcu_read_unlock();
2765}
 
2766
2767static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2768{
2769	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2770
2771	switch (event) {
2772	case NETDEV_DOWN:
2773		xfrm_garbage_collect(dev_net(dev));
2774	}
2775	return NOTIFY_DONE;
2776}
2777
2778static struct notifier_block xfrm_dev_notifier = {
2779	.notifier_call	= xfrm_dev_event,
2780};
2781
2782#ifdef CONFIG_XFRM_STATISTICS
2783static int __net_init xfrm_statistics_init(struct net *net)
2784{
2785	int rv;
2786
2787	if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2788			  sizeof(struct linux_xfrm_mib),
2789			  __alignof__(struct linux_xfrm_mib)) < 0)
2790		return -ENOMEM;
2791	rv = xfrm_proc_init(net);
2792	if (rv < 0)
2793		snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2794	return rv;
2795}
2796
2797static void xfrm_statistics_fini(struct net *net)
2798{
2799	xfrm_proc_fini(net);
2800	snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2801}
2802#else
2803static int __net_init xfrm_statistics_init(struct net *net)
2804{
2805	return 0;
2806}
2807
2808static void xfrm_statistics_fini(struct net *net)
2809{
2810}
2811#endif
2812
2813static int __net_init xfrm_policy_init(struct net *net)
2814{
2815	unsigned int hmask, sz;
2816	int dir;
2817
2818	if (net_eq(net, &init_net))
2819		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2820					   sizeof(struct xfrm_dst),
2821					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2822					   NULL);
 
 
 
 
2823
2824	hmask = 8 - 1;
2825	sz = (hmask+1) * sizeof(struct hlist_head);
2826
2827	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2828	if (!net->xfrm.policy_byidx)
2829		goto out_byidx;
2830	net->xfrm.policy_idx_hmask = hmask;
2831
2832	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2833		struct xfrm_policy_hash *htab;
2834
2835		net->xfrm.policy_count[dir] = 0;
 
2836		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2837
2838		htab = &net->xfrm.policy_bydst[dir];
2839		htab->table = xfrm_hash_alloc(sz);
2840		if (!htab->table)
2841			goto out_bydst;
2842		htab->hmask = hmask;
2843	}
 
 
 
 
 
 
 
 
 
 
2844
2845	INIT_LIST_HEAD(&net->xfrm.policy_all);
 
2846	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2847	if (net_eq(net, &init_net))
2848		register_netdevice_notifier(&xfrm_dev_notifier);
2849	return 0;
2850
2851out_bydst:
2852	for (dir--; dir >= 0; dir--) {
2853		struct xfrm_policy_hash *htab;
2854
2855		htab = &net->xfrm.policy_bydst[dir];
2856		xfrm_hash_free(htab->table, sz);
2857	}
2858	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2859out_byidx:
2860	return -ENOMEM;
2861}
2862
2863static void xfrm_policy_fini(struct net *net)
2864{
2865	struct xfrm_audit audit_info;
2866	unsigned int sz;
2867	int dir;
2868
2869	flush_work(&net->xfrm.policy_hash_work);
2870#ifdef CONFIG_XFRM_SUB_POLICY
2871	audit_info.loginuid = INVALID_UID;
2872	audit_info.sessionid = (unsigned int)-1;
2873	audit_info.secid = 0;
2874	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2875#endif
2876	audit_info.loginuid = INVALID_UID;
2877	audit_info.sessionid = (unsigned int)-1;
2878	audit_info.secid = 0;
2879	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2880
2881	WARN_ON(!list_empty(&net->xfrm.policy_all));
2882
2883	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2884		struct xfrm_policy_hash *htab;
2885
2886		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2887
2888		htab = &net->xfrm.policy_bydst[dir];
2889		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2890		WARN_ON(!hlist_empty(htab->table));
2891		xfrm_hash_free(htab->table, sz);
2892	}
2893
2894	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2895	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2896	xfrm_hash_free(net->xfrm.policy_byidx, sz);
 
 
 
 
 
2897}
2898
2899static int __net_init xfrm_net_init(struct net *net)
2900{
2901	int rv;
2902
 
 
 
 
 
 
2903	rv = xfrm_statistics_init(net);
2904	if (rv < 0)
2905		goto out_statistics;
2906	rv = xfrm_state_init(net);
2907	if (rv < 0)
2908		goto out_state;
2909	rv = xfrm_policy_init(net);
2910	if (rv < 0)
2911		goto out_policy;
2912	xfrm_dst_ops_init(net);
2913	rv = xfrm_sysctl_init(net);
2914	if (rv < 0)
2915		goto out_sysctl;
2916	rv = flow_cache_init(net);
2917	if (rv < 0)
2918		goto out;
2919
2920	/* Initialize the per-net locks here */
2921	spin_lock_init(&net->xfrm.xfrm_state_lock);
2922	rwlock_init(&net->xfrm.xfrm_policy_lock);
2923	mutex_init(&net->xfrm.xfrm_cfg_mutex);
2924
2925	return 0;
2926
2927out:
2928	xfrm_sysctl_fini(net);
2929out_sysctl:
2930	xfrm_policy_fini(net);
2931out_policy:
2932	xfrm_state_fini(net);
2933out_state:
2934	xfrm_statistics_fini(net);
2935out_statistics:
2936	return rv;
2937}
2938
2939static void __net_exit xfrm_net_exit(struct net *net)
2940{
2941	flow_cache_fini(net);
2942	xfrm_sysctl_fini(net);
2943	xfrm_policy_fini(net);
2944	xfrm_state_fini(net);
2945	xfrm_statistics_fini(net);
2946}
2947
2948static struct pernet_operations __net_initdata xfrm_net_ops = {
2949	.init = xfrm_net_init,
2950	.exit = xfrm_net_exit,
2951};
2952
2953void __init xfrm_init(void)
2954{
2955	register_pernet_subsys(&xfrm_net_ops);
 
2956	xfrm_input_init();
 
 
 
 
2957}
2958
2959#ifdef CONFIG_AUDITSYSCALL
2960static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2961					 struct audit_buffer *audit_buf)
2962{
2963	struct xfrm_sec_ctx *ctx = xp->security;
2964	struct xfrm_selector *sel = &xp->selector;
2965
2966	if (ctx)
2967		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2968				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2969
2970	switch (sel->family) {
2971	case AF_INET:
2972		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2973		if (sel->prefixlen_s != 32)
2974			audit_log_format(audit_buf, " src_prefixlen=%d",
2975					 sel->prefixlen_s);
2976		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2977		if (sel->prefixlen_d != 32)
2978			audit_log_format(audit_buf, " dst_prefixlen=%d",
2979					 sel->prefixlen_d);
2980		break;
2981	case AF_INET6:
2982		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2983		if (sel->prefixlen_s != 128)
2984			audit_log_format(audit_buf, " src_prefixlen=%d",
2985					 sel->prefixlen_s);
2986		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2987		if (sel->prefixlen_d != 128)
2988			audit_log_format(audit_buf, " dst_prefixlen=%d",
2989					 sel->prefixlen_d);
2990		break;
2991	}
2992}
2993
2994void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2995			   kuid_t auid, unsigned int sessionid, u32 secid)
2996{
2997	struct audit_buffer *audit_buf;
2998
2999	audit_buf = xfrm_audit_start("SPD-add");
3000	if (audit_buf == NULL)
3001		return;
3002	xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3003	audit_log_format(audit_buf, " res=%u", result);
3004	xfrm_audit_common_policyinfo(xp, audit_buf);
3005	audit_log_end(audit_buf);
3006}
3007EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3008
3009void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3010			      kuid_t auid, unsigned int sessionid, u32 secid)
3011{
3012	struct audit_buffer *audit_buf;
3013
3014	audit_buf = xfrm_audit_start("SPD-delete");
3015	if (audit_buf == NULL)
3016		return;
3017	xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3018	audit_log_format(audit_buf, " res=%u", result);
3019	xfrm_audit_common_policyinfo(xp, audit_buf);
3020	audit_log_end(audit_buf);
3021}
3022EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3023#endif
3024
3025#ifdef CONFIG_XFRM_MIGRATE
3026static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3027					const struct xfrm_selector *sel_tgt)
3028{
3029	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3030		if (sel_tgt->family == sel_cmp->family &&
3031		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3032				    sel_cmp->family) &&
3033		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3034				    sel_cmp->family) &&
3035		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3036		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3037			return true;
3038		}
3039	} else {
3040		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3041			return true;
3042		}
3043	}
3044	return false;
3045}
3046
3047static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3048						    u8 dir, u8 type, struct net *net)
3049{
3050	struct xfrm_policy *pol, *ret = NULL;
3051	struct hlist_head *chain;
3052	u32 priority = ~0U;
3053
3054	read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
3055	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3056	hlist_for_each_entry(pol, chain, bydst) {
3057		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3058		    pol->type == type) {
3059			ret = pol;
3060			priority = ret->priority;
3061			break;
3062		}
3063	}
3064	chain = &net->xfrm.policy_inexact[dir];
3065	hlist_for_each_entry(pol, chain, bydst) {
 
 
 
3066		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3067		    pol->type == type &&
3068		    pol->priority < priority) {
3069			ret = pol;
3070			break;
3071		}
3072	}
3073
3074	if (ret)
3075		xfrm_pol_hold(ret);
3076
3077	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
3078
3079	return ret;
3080}
3081
3082static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3083{
3084	int match = 0;
3085
3086	if (t->mode == m->mode && t->id.proto == m->proto &&
3087	    (m->reqid == 0 || t->reqid == m->reqid)) {
3088		switch (t->mode) {
3089		case XFRM_MODE_TUNNEL:
3090		case XFRM_MODE_BEET:
3091			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3092					    m->old_family) &&
3093			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3094					    m->old_family)) {
3095				match = 1;
3096			}
3097			break;
3098		case XFRM_MODE_TRANSPORT:
3099			/* in case of transport mode, template does not store
3100			   any IP addresses, hence we just compare mode and
3101			   protocol */
3102			match = 1;
3103			break;
3104		default:
3105			break;
3106		}
3107	}
3108	return match;
3109}
3110
3111/* update endpoint address(es) of template(s) */
3112static int xfrm_policy_migrate(struct xfrm_policy *pol,
3113			       struct xfrm_migrate *m, int num_migrate)
3114{
3115	struct xfrm_migrate *mp;
3116	int i, j, n = 0;
3117
3118	write_lock_bh(&pol->lock);
3119	if (unlikely(pol->walk.dead)) {
3120		/* target policy has been deleted */
3121		write_unlock_bh(&pol->lock);
3122		return -ENOENT;
3123	}
3124
3125	for (i = 0; i < pol->xfrm_nr; i++) {
3126		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3127			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3128				continue;
3129			n++;
3130			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3131			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3132				continue;
3133			/* update endpoints */
3134			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3135			       sizeof(pol->xfrm_vec[i].id.daddr));
3136			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3137			       sizeof(pol->xfrm_vec[i].saddr));
3138			pol->xfrm_vec[i].encap_family = mp->new_family;
3139			/* flush bundles */
3140			atomic_inc(&pol->genid);
3141		}
3142	}
3143
3144	write_unlock_bh(&pol->lock);
3145
3146	if (!n)
3147		return -ENODATA;
3148
3149	return 0;
3150}
3151
3152static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3153{
3154	int i, j;
3155
3156	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3157		return -EINVAL;
3158
3159	for (i = 0; i < num_migrate; i++) {
3160		if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3161				    m[i].old_family) &&
3162		    xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3163				    m[i].old_family))
3164			return -EINVAL;
3165		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3166		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3167			return -EINVAL;
3168
3169		/* check if there is any duplicated entry */
3170		for (j = i + 1; j < num_migrate; j++) {
3171			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3172				    sizeof(m[i].old_daddr)) &&
3173			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3174				    sizeof(m[i].old_saddr)) &&
3175			    m[i].proto == m[j].proto &&
3176			    m[i].mode == m[j].mode &&
3177			    m[i].reqid == m[j].reqid &&
3178			    m[i].old_family == m[j].old_family)
3179				return -EINVAL;
3180		}
3181	}
3182
3183	return 0;
3184}
3185
3186int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3187		 struct xfrm_migrate *m, int num_migrate,
3188		 struct xfrm_kmaddress *k, struct net *net)
 
3189{
3190	int i, err, nx_cur = 0, nx_new = 0;
3191	struct xfrm_policy *pol = NULL;
3192	struct xfrm_state *x, *xc;
3193	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3194	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3195	struct xfrm_migrate *mp;
3196
 
3197	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3198		goto out;
3199
 
 
 
 
 
3200	/* Stage 1 - find policy */
3201	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3202		err = -ENOENT;
3203		goto out;
3204	}
3205
3206	/* Stage 2 - find and update state(s) */
3207	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3208		if ((x = xfrm_migrate_state_find(mp, net))) {
3209			x_cur[nx_cur] = x;
3210			nx_cur++;
3211			if ((xc = xfrm_state_migrate(x, mp))) {
 
3212				x_new[nx_new] = xc;
3213				nx_new++;
3214			} else {
3215				err = -ENODATA;
3216				goto restore_state;
3217			}
3218		}
3219	}
3220
3221	/* Stage 3 - update policy */
3222	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3223		goto restore_state;
3224
3225	/* Stage 4 - delete old state(s) */
3226	if (nx_cur) {
3227		xfrm_states_put(x_cur, nx_cur);
3228		xfrm_states_delete(x_cur, nx_cur);
3229	}
3230
3231	/* Stage 5 - announce */
3232	km_migrate(sel, dir, type, m, num_migrate, k);
3233
3234	xfrm_pol_put(pol);
3235
3236	return 0;
3237out:
3238	return err;
3239
3240restore_state:
3241	if (pol)
3242		xfrm_pol_put(pol);
3243	if (nx_cur)
3244		xfrm_states_put(x_cur, nx_cur);
3245	if (nx_new)
3246		xfrm_states_delete(x_new, nx_new);
3247
3248	return err;
3249}
3250EXPORT_SYMBOL(xfrm_migrate);
3251#endif