Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * xfrm_policy.c
   4 *
   5 * Changes:
   6 *	Mitsuru KANDA @USAGI
   7 * 	Kazunori MIYAZAWA @USAGI
   8 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
   9 * 		IPv6 support
  10 * 	Kazunori MIYAZAWA @USAGI
  11 * 	YOSHIFUJI Hideaki
  12 * 		Split up af-specific portion
  13 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
  14 *
  15 */
  16
  17#include <linux/err.h>
  18#include <linux/slab.h>
  19#include <linux/kmod.h>
  20#include <linux/list.h>
  21#include <linux/spinlock.h>
  22#include <linux/workqueue.h>
  23#include <linux/notifier.h>
  24#include <linux/netdevice.h>
  25#include <linux/netfilter.h>
  26#include <linux/module.h>
  27#include <linux/cache.h>
  28#include <linux/cpu.h>
  29#include <linux/audit.h>
  30#include <linux/rhashtable.h>
  31#include <linux/if_tunnel.h>
  32#include <net/dst.h>
  33#include <net/flow.h>
  34#include <net/xfrm.h>
  35#include <net/ip.h>
  36#if IS_ENABLED(CONFIG_IPV6_MIP6)
  37#include <net/mip6.h>
  38#endif
  39#ifdef CONFIG_XFRM_STATISTICS
  40#include <net/snmp.h>
  41#endif
  42
  43#include "xfrm_hash.h"
  44
  45#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  46#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  47#define XFRM_MAX_QUEUE_LEN	100
  48
  49struct xfrm_flo {
  50	struct dst_entry *dst_orig;
  51	u8 flags;
  52};
  53
  54/* prefixes smaller than this are stored in lists, not trees. */
  55#define INEXACT_PREFIXLEN_IPV4	16
  56#define INEXACT_PREFIXLEN_IPV6	48
  57
  58struct xfrm_pol_inexact_node {
  59	struct rb_node node;
  60	union {
  61		xfrm_address_t addr;
  62		struct rcu_head rcu;
  63	};
  64	u8 prefixlen;
  65
  66	struct rb_root root;
  67
  68	/* the policies matching this node, can be empty list */
  69	struct hlist_head hhead;
  70};
  71
  72/* xfrm inexact policy search tree:
  73 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
  74 *  |
  75 * +---- root_d: sorted by daddr:prefix
  76 * |                 |
  77 * |        xfrm_pol_inexact_node
  78 * |                 |
  79 * |                 +- root: sorted by saddr/prefix
  80 * |                 |              |
  81 * |                 |         xfrm_pol_inexact_node
  82 * |                 |              |
  83 * |                 |              + root: unused
  84 * |                 |              |
  85 * |                 |              + hhead: saddr:daddr policies
  86 * |                 |
  87 * |                 +- coarse policies and all any:daddr policies
  88 * |
  89 * +---- root_s: sorted by saddr:prefix
  90 * |                 |
  91 * |        xfrm_pol_inexact_node
  92 * |                 |
  93 * |                 + root: unused
  94 * |                 |
  95 * |                 + hhead: saddr:any policies
  96 * |
  97 * +---- coarse policies and all any:any policies
  98 *
  99 * Lookups return four candidate lists:
 100 * 1. any:any list from top-level xfrm_pol_inexact_bin
 101 * 2. any:daddr list from daddr tree
 102 * 3. saddr:daddr list from 2nd level daddr tree
 103 * 4. saddr:any list from saddr tree
 104 *
 105 * This result set then needs to be searched for the policy with
 106 * the lowest priority.  If two results have same prio, youngest one wins.
 107 */
 108
 109struct xfrm_pol_inexact_key {
 110	possible_net_t net;
 111	u32 if_id;
 112	u16 family;
 113	u8 dir, type;
 114};
 115
 116struct xfrm_pol_inexact_bin {
 117	struct xfrm_pol_inexact_key k;
 118	struct rhash_head head;
 119	/* list containing '*:*' policies */
 120	struct hlist_head hhead;
 121
 122	seqcount_t count;
 123	/* tree sorted by daddr/prefix */
 124	struct rb_root root_d;
 125
 126	/* tree sorted by saddr/prefix */
 127	struct rb_root root_s;
 128
 129	/* slow path below */
 130	struct list_head inexact_bins;
 131	struct rcu_head rcu;
 132};
 133
 134enum xfrm_pol_inexact_candidate_type {
 135	XFRM_POL_CAND_BOTH,
 136	XFRM_POL_CAND_SADDR,
 137	XFRM_POL_CAND_DADDR,
 138	XFRM_POL_CAND_ANY,
 139
 140	XFRM_POL_CAND_MAX,
 141};
 142
 143struct xfrm_pol_inexact_candidates {
 144	struct hlist_head *res[XFRM_POL_CAND_MAX];
 145};
 146
 147static DEFINE_SPINLOCK(xfrm_if_cb_lock);
 148static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
 149
 150static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 151static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
 152						__read_mostly;
 153
 154static struct kmem_cache *xfrm_dst_cache __ro_after_init;
 155static __read_mostly seqcount_t xfrm_policy_hash_generation;
 156
 157static struct rhashtable xfrm_policy_inexact_table;
 158static const struct rhashtable_params xfrm_pol_inexact_params;
 159
 160static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
 161static int stale_bundle(struct dst_entry *dst);
 162static int xfrm_bundle_ok(struct xfrm_dst *xdst);
 163static void xfrm_policy_queue_process(struct timer_list *t);
 164
 165static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
 166static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
 167						int dir);
 168
 169static struct xfrm_pol_inexact_bin *
 170xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
 171			   u32 if_id);
 172
 173static struct xfrm_pol_inexact_bin *
 174xfrm_policy_inexact_lookup_rcu(struct net *net,
 175			       u8 type, u16 family, u8 dir, u32 if_id);
 176static struct xfrm_policy *
 177xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
 178			bool excl);
 179static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
 180					    struct xfrm_policy *policy);
 181
 182static bool
 183xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
 184				    struct xfrm_pol_inexact_bin *b,
 185				    const xfrm_address_t *saddr,
 186				    const xfrm_address_t *daddr);
 187
 188static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
 189{
 190	return refcount_inc_not_zero(&policy->refcnt);
 191}
 192
 193static inline bool
 194__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 195{
 196	const struct flowi4 *fl4 = &fl->u.ip4;
 197
 198	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
 199		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
 200		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
 201		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
 202		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
 203		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
 204}
 205
 206static inline bool
 207__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 208{
 209	const struct flowi6 *fl6 = &fl->u.ip6;
 210
 211	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
 212		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
 213		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
 214		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
 215		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
 216		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
 217}
 218
 219bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
 220			 unsigned short family)
 221{
 222	switch (family) {
 223	case AF_INET:
 224		return __xfrm4_selector_match(sel, fl);
 225	case AF_INET6:
 226		return __xfrm6_selector_match(sel, fl);
 227	}
 228	return false;
 229}
 230
 231static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
 232{
 233	const struct xfrm_policy_afinfo *afinfo;
 234
 235	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
 236		return NULL;
 237	rcu_read_lock();
 238	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
 239	if (unlikely(!afinfo))
 240		rcu_read_unlock();
 241	return afinfo;
 242}
 243
 244/* Called with rcu_read_lock(). */
 245static const struct xfrm_if_cb *xfrm_if_get_cb(void)
 246{
 247	return rcu_dereference(xfrm_if_cb);
 248}
 249
 250struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
 251				    const xfrm_address_t *saddr,
 252				    const xfrm_address_t *daddr,
 253				    int family, u32 mark)
 
 254{
 255	const struct xfrm_policy_afinfo *afinfo;
 256	struct dst_entry *dst;
 257
 258	afinfo = xfrm_policy_get_afinfo(family);
 259	if (unlikely(afinfo == NULL))
 260		return ERR_PTR(-EAFNOSUPPORT);
 261
 262	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
 263
 264	rcu_read_unlock();
 265
 266	return dst;
 267}
 268EXPORT_SYMBOL(__xfrm_dst_lookup);
 269
 270static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
 271						int tos, int oif,
 272						xfrm_address_t *prev_saddr,
 273						xfrm_address_t *prev_daddr,
 274						int family, u32 mark)
 275{
 276	struct net *net = xs_net(x);
 277	xfrm_address_t *saddr = &x->props.saddr;
 278	xfrm_address_t *daddr = &x->id.daddr;
 279	struct dst_entry *dst;
 280
 281	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
 282		saddr = x->coaddr;
 283		daddr = prev_daddr;
 284	}
 285	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
 286		saddr = prev_saddr;
 287		daddr = x->coaddr;
 288	}
 289
 290	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
 291
 292	if (!IS_ERR(dst)) {
 293		if (prev_saddr != saddr)
 294			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
 295		if (prev_daddr != daddr)
 296			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
 297	}
 298
 299	return dst;
 300}
 301
 302static inline unsigned long make_jiffies(long secs)
 303{
 304	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
 305		return MAX_SCHEDULE_TIMEOUT-1;
 306	else
 307		return secs*HZ;
 308}
 309
 310static void xfrm_policy_timer(struct timer_list *t)
 311{
 312	struct xfrm_policy *xp = from_timer(xp, t, timer);
 313	time64_t now = ktime_get_real_seconds();
 314	time64_t next = TIME64_MAX;
 315	int warn = 0;
 316	int dir;
 317
 318	read_lock(&xp->lock);
 319
 320	if (unlikely(xp->walk.dead))
 321		goto out;
 322
 323	dir = xfrm_policy_id2dir(xp->index);
 324
 325	if (xp->lft.hard_add_expires_seconds) {
 326		time64_t tmo = xp->lft.hard_add_expires_seconds +
 327			xp->curlft.add_time - now;
 328		if (tmo <= 0)
 329			goto expired;
 330		if (tmo < next)
 331			next = tmo;
 332	}
 333	if (xp->lft.hard_use_expires_seconds) {
 334		time64_t tmo = xp->lft.hard_use_expires_seconds +
 335			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 336		if (tmo <= 0)
 337			goto expired;
 338		if (tmo < next)
 339			next = tmo;
 340	}
 341	if (xp->lft.soft_add_expires_seconds) {
 342		time64_t tmo = xp->lft.soft_add_expires_seconds +
 343			xp->curlft.add_time - now;
 344		if (tmo <= 0) {
 345			warn = 1;
 346			tmo = XFRM_KM_TIMEOUT;
 347		}
 348		if (tmo < next)
 349			next = tmo;
 350	}
 351	if (xp->lft.soft_use_expires_seconds) {
 352		time64_t tmo = xp->lft.soft_use_expires_seconds +
 353			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 354		if (tmo <= 0) {
 355			warn = 1;
 356			tmo = XFRM_KM_TIMEOUT;
 357		}
 358		if (tmo < next)
 359			next = tmo;
 360	}
 361
 362	if (warn)
 363		km_policy_expired(xp, dir, 0, 0);
 364	if (next != TIME64_MAX &&
 365	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
 366		xfrm_pol_hold(xp);
 367
 368out:
 369	read_unlock(&xp->lock);
 370	xfrm_pol_put(xp);
 371	return;
 372
 373expired:
 374	read_unlock(&xp->lock);
 375	if (!xfrm_policy_delete(xp, dir))
 376		km_policy_expired(xp, dir, 1, 0);
 377	xfrm_pol_put(xp);
 378}
 379
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 381 * SPD calls.
 382 */
 383
 384struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
 385{
 386	struct xfrm_policy *policy;
 387
 388	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
 389
 390	if (policy) {
 391		write_pnet(&policy->xp_net, net);
 392		INIT_LIST_HEAD(&policy->walk.all);
 393		INIT_HLIST_NODE(&policy->bydst_inexact_list);
 394		INIT_HLIST_NODE(&policy->bydst);
 395		INIT_HLIST_NODE(&policy->byidx);
 396		rwlock_init(&policy->lock);
 397		refcount_set(&policy->refcnt, 1);
 398		skb_queue_head_init(&policy->polq.hold_queue);
 399		timer_setup(&policy->timer, xfrm_policy_timer, 0);
 400		timer_setup(&policy->polq.hold_timer,
 401			    xfrm_policy_queue_process, 0);
 
 
 402	}
 403	return policy;
 404}
 405EXPORT_SYMBOL(xfrm_policy_alloc);
 406
 407static void xfrm_policy_destroy_rcu(struct rcu_head *head)
 408{
 409	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
 410
 411	security_xfrm_policy_free(policy->security);
 412	kfree(policy);
 413}
 414
 415/* Destroy xfrm_policy: descendant resources must be released to this moment. */
 416
 417void xfrm_policy_destroy(struct xfrm_policy *policy)
 418{
 419	BUG_ON(!policy->walk.dead);
 420
 421	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
 422		BUG();
 423
 424	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
 425}
 426EXPORT_SYMBOL(xfrm_policy_destroy);
 427
 428/* Rule must be locked. Release descendant resources, announce
 429 * entry dead. The rule must be unlinked from lists to the moment.
 430 */
 431
 432static void xfrm_policy_kill(struct xfrm_policy *policy)
 433{
 434	policy->walk.dead = 1;
 435
 436	atomic_inc(&policy->genid);
 437
 438	if (del_timer(&policy->polq.hold_timer))
 439		xfrm_pol_put(policy);
 440	skb_queue_purge(&policy->polq.hold_queue);
 441
 442	if (del_timer(&policy->timer))
 443		xfrm_pol_put(policy);
 444
 445	xfrm_pol_put(policy);
 446}
 447
 448static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
 449
 450static inline unsigned int idx_hash(struct net *net, u32 index)
 451{
 452	return __idx_hash(index, net->xfrm.policy_idx_hmask);
 453}
 454
 455/* calculate policy hash thresholds */
 456static void __get_hash_thresh(struct net *net,
 457			      unsigned short family, int dir,
 458			      u8 *dbits, u8 *sbits)
 459{
 460	switch (family) {
 461	case AF_INET:
 462		*dbits = net->xfrm.policy_bydst[dir].dbits4;
 463		*sbits = net->xfrm.policy_bydst[dir].sbits4;
 464		break;
 465
 466	case AF_INET6:
 467		*dbits = net->xfrm.policy_bydst[dir].dbits6;
 468		*sbits = net->xfrm.policy_bydst[dir].sbits6;
 469		break;
 470
 471	default:
 472		*dbits = 0;
 473		*sbits = 0;
 474	}
 475}
 476
 477static struct hlist_head *policy_hash_bysel(struct net *net,
 478					    const struct xfrm_selector *sel,
 479					    unsigned short family, int dir)
 480{
 481	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 482	unsigned int hash;
 483	u8 dbits;
 484	u8 sbits;
 485
 486	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 487	hash = __sel_hash(sel, family, hmask, dbits, sbits);
 488
 489	if (hash == hmask + 1)
 490		return NULL;
 491
 492	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 493		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 494}
 495
 496static struct hlist_head *policy_hash_direct(struct net *net,
 497					     const xfrm_address_t *daddr,
 498					     const xfrm_address_t *saddr,
 499					     unsigned short family, int dir)
 500{
 501	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 502	unsigned int hash;
 503	u8 dbits;
 504	u8 sbits;
 505
 506	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 507	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
 508
 509	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 510		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 511}
 512
 513static void xfrm_dst_hash_transfer(struct net *net,
 514				   struct hlist_head *list,
 515				   struct hlist_head *ndsttable,
 516				   unsigned int nhashmask,
 517				   int dir)
 518{
 519	struct hlist_node *tmp, *entry0 = NULL;
 520	struct xfrm_policy *pol;
 521	unsigned int h0 = 0;
 522	u8 dbits;
 523	u8 sbits;
 524
 525redo:
 526	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
 527		unsigned int h;
 528
 529		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
 530		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
 531				pol->family, nhashmask, dbits, sbits);
 532		if (!entry0) {
 533			hlist_del_rcu(&pol->bydst);
 534			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
 535			h0 = h;
 536		} else {
 537			if (h != h0)
 538				continue;
 539			hlist_del_rcu(&pol->bydst);
 540			hlist_add_behind_rcu(&pol->bydst, entry0);
 541		}
 542		entry0 = &pol->bydst;
 543	}
 544	if (!hlist_empty(list)) {
 545		entry0 = NULL;
 546		goto redo;
 547	}
 548}
 549
 550static void xfrm_idx_hash_transfer(struct hlist_head *list,
 551				   struct hlist_head *nidxtable,
 552				   unsigned int nhashmask)
 553{
 554	struct hlist_node *tmp;
 555	struct xfrm_policy *pol;
 556
 557	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
 558		unsigned int h;
 559
 560		h = __idx_hash(pol->index, nhashmask);
 561		hlist_add_head(&pol->byidx, nidxtable+h);
 562	}
 563}
 564
 565static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
 566{
 567	return ((old_hmask + 1) << 1) - 1;
 568}
 569
 570static void xfrm_bydst_resize(struct net *net, int dir)
 571{
 572	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 573	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 574	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 
 575	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
 576	struct hlist_head *odst;
 577	int i;
 578
 579	if (!ndst)
 580		return;
 581
 582	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 583	write_seqcount_begin(&xfrm_policy_hash_generation);
 584
 585	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
 586				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
 587
 588	for (i = hmask; i >= 0; i--)
 589		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
 590
 591	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
 592	net->xfrm.policy_bydst[dir].hmask = nhashmask;
 593
 594	write_seqcount_end(&xfrm_policy_hash_generation);
 595	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 596
 597	synchronize_rcu();
 598
 599	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
 600}
 601
 602static void xfrm_byidx_resize(struct net *net, int total)
 603{
 604	unsigned int hmask = net->xfrm.policy_idx_hmask;
 605	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 606	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 607	struct hlist_head *oidx = net->xfrm.policy_byidx;
 608	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
 609	int i;
 610
 611	if (!nidx)
 612		return;
 613
 614	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 615
 616	for (i = hmask; i >= 0; i--)
 617		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
 618
 619	net->xfrm.policy_byidx = nidx;
 620	net->xfrm.policy_idx_hmask = nhashmask;
 621
 622	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 623
 624	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
 625}
 626
 627static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
 628{
 629	unsigned int cnt = net->xfrm.policy_count[dir];
 630	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 631
 632	if (total)
 633		*total += cnt;
 634
 635	if ((hmask + 1) < xfrm_policy_hashmax &&
 636	    cnt > hmask)
 637		return 1;
 638
 639	return 0;
 640}
 641
 642static inline int xfrm_byidx_should_resize(struct net *net, int total)
 643{
 644	unsigned int hmask = net->xfrm.policy_idx_hmask;
 645
 646	if ((hmask + 1) < xfrm_policy_hashmax &&
 647	    total > hmask)
 648		return 1;
 649
 650	return 0;
 651}
 652
 653void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
 654{
 
 655	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
 656	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
 657	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
 658	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
 659	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
 660	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
 661	si->spdhcnt = net->xfrm.policy_idx_hmask;
 662	si->spdhmcnt = xfrm_policy_hashmax;
 
 663}
 664EXPORT_SYMBOL(xfrm_spd_getinfo);
 665
 666static DEFINE_MUTEX(hash_resize_mutex);
 667static void xfrm_hash_resize(struct work_struct *work)
 668{
 669	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
 670	int dir, total;
 671
 672	mutex_lock(&hash_resize_mutex);
 673
 674	total = 0;
 675	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 676		if (xfrm_bydst_should_resize(net, dir, &total))
 677			xfrm_bydst_resize(net, dir);
 678	}
 679	if (xfrm_byidx_should_resize(net, total))
 680		xfrm_byidx_resize(net, total);
 681
 682	mutex_unlock(&hash_resize_mutex);
 683}
 684
 685/* Make sure *pol can be inserted into fastbin.
 686 * Useful to check that later insert requests will be sucessful
 687 * (provided xfrm_policy_lock is held throughout).
 688 */
 689static struct xfrm_pol_inexact_bin *
 690xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
 691{
 692	struct xfrm_pol_inexact_bin *bin, *prev;
 693	struct xfrm_pol_inexact_key k = {
 694		.family = pol->family,
 695		.type = pol->type,
 696		.dir = dir,
 697		.if_id = pol->if_id,
 698	};
 699	struct net *net = xp_net(pol);
 700
 701	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
 702
 703	write_pnet(&k.net, net);
 704	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
 705				     xfrm_pol_inexact_params);
 706	if (bin)
 707		return bin;
 708
 709	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
 710	if (!bin)
 711		return NULL;
 712
 713	bin->k = k;
 714	INIT_HLIST_HEAD(&bin->hhead);
 715	bin->root_d = RB_ROOT;
 716	bin->root_s = RB_ROOT;
 717	seqcount_init(&bin->count);
 718
 719	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
 720						&bin->k, &bin->head,
 721						xfrm_pol_inexact_params);
 722	if (!prev) {
 723		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
 724		return bin;
 725	}
 726
 727	kfree(bin);
 728
 729	return IS_ERR(prev) ? NULL : prev;
 730}
 731
 732static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
 733					       int family, u8 prefixlen)
 734{
 735	if (xfrm_addr_any(addr, family))
 736		return true;
 737
 738	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
 739		return true;
 740
 741	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
 742		return true;
 743
 744	return false;
 745}
 746
 747static bool
 748xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
 749{
 750	const xfrm_address_t *addr;
 751	bool saddr_any, daddr_any;
 752	u8 prefixlen;
 753
 754	addr = &policy->selector.saddr;
 755	prefixlen = policy->selector.prefixlen_s;
 756
 757	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
 758						       policy->family,
 759						       prefixlen);
 760	addr = &policy->selector.daddr;
 761	prefixlen = policy->selector.prefixlen_d;
 762	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
 763						       policy->family,
 764						       prefixlen);
 765	return saddr_any && daddr_any;
 766}
 767
 768static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
 769				       const xfrm_address_t *addr, u8 prefixlen)
 770{
 771	node->addr = *addr;
 772	node->prefixlen = prefixlen;
 773}
 774
 775static struct xfrm_pol_inexact_node *
 776xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
 777{
 778	struct xfrm_pol_inexact_node *node;
 779
 780	node = kzalloc(sizeof(*node), GFP_ATOMIC);
 781	if (node)
 782		xfrm_pol_inexact_node_init(node, addr, prefixlen);
 783
 784	return node;
 785}
 786
 787static int xfrm_policy_addr_delta(const xfrm_address_t *a,
 788				  const xfrm_address_t *b,
 789				  u8 prefixlen, u16 family)
 790{
 791	unsigned int pdw, pbi;
 792	int delta = 0;
 793
 794	switch (family) {
 795	case AF_INET:
 796		if (sizeof(long) == 4 && prefixlen == 0)
 797			return ntohl(a->a4) - ntohl(b->a4);
 798		return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
 799		       (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
 800	case AF_INET6:
 801		pdw = prefixlen >> 5;
 802		pbi = prefixlen & 0x1f;
 803
 804		if (pdw) {
 805			delta = memcmp(a->a6, b->a6, pdw << 2);
 806			if (delta)
 807				return delta;
 808		}
 809		if (pbi) {
 810			u32 mask = ~0u << (32 - pbi);
 811
 812			delta = (ntohl(a->a6[pdw]) & mask) -
 813				(ntohl(b->a6[pdw]) & mask);
 814		}
 815		break;
 816	default:
 817		break;
 818	}
 819
 820	return delta;
 821}
 822
 823static void xfrm_policy_inexact_list_reinsert(struct net *net,
 824					      struct xfrm_pol_inexact_node *n,
 825					      u16 family)
 826{
 827	unsigned int matched_s, matched_d;
 828	struct xfrm_policy *policy, *p;
 829
 830	matched_s = 0;
 831	matched_d = 0;
 832
 833	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
 834		struct hlist_node *newpos = NULL;
 835		bool matches_s, matches_d;
 836
 837		if (!policy->bydst_reinsert)
 838			continue;
 839
 840		WARN_ON_ONCE(policy->family != family);
 841
 842		policy->bydst_reinsert = false;
 843		hlist_for_each_entry(p, &n->hhead, bydst) {
 844			if (policy->priority > p->priority)
 845				newpos = &p->bydst;
 846			else if (policy->priority == p->priority &&
 847				 policy->pos > p->pos)
 848				newpos = &p->bydst;
 849			else
 850				break;
 851		}
 852
 853		if (newpos)
 854			hlist_add_behind_rcu(&policy->bydst, newpos);
 855		else
 856			hlist_add_head_rcu(&policy->bydst, &n->hhead);
 857
 858		/* paranoia checks follow.
 859		 * Check that the reinserted policy matches at least
 860		 * saddr or daddr for current node prefix.
 861		 *
 862		 * Matching both is fine, matching saddr in one policy
 863		 * (but not daddr) and then matching only daddr in another
 864		 * is a bug.
 865		 */
 866		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
 867						   &n->addr,
 868						   n->prefixlen,
 869						   family) == 0;
 870		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
 871						   &n->addr,
 872						   n->prefixlen,
 873						   family) == 0;
 874		if (matches_s && matches_d)
 875			continue;
 876
 877		WARN_ON_ONCE(!matches_s && !matches_d);
 878		if (matches_s)
 879			matched_s++;
 880		if (matches_d)
 881			matched_d++;
 882		WARN_ON_ONCE(matched_s && matched_d);
 883	}
 884}
 885
 886static void xfrm_policy_inexact_node_reinsert(struct net *net,
 887					      struct xfrm_pol_inexact_node *n,
 888					      struct rb_root *new,
 889					      u16 family)
 890{
 891	struct xfrm_pol_inexact_node *node;
 892	struct rb_node **p, *parent;
 893
 894	/* we should not have another subtree here */
 895	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
 896restart:
 897	parent = NULL;
 898	p = &new->rb_node;
 899	while (*p) {
 900		u8 prefixlen;
 901		int delta;
 902
 903		parent = *p;
 904		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
 905
 906		prefixlen = min(node->prefixlen, n->prefixlen);
 907
 908		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
 909					       prefixlen, family);
 910		if (delta < 0) {
 911			p = &parent->rb_left;
 912		} else if (delta > 0) {
 913			p = &parent->rb_right;
 914		} else {
 915			bool same_prefixlen = node->prefixlen == n->prefixlen;
 916			struct xfrm_policy *tmp;
 917
 918			hlist_for_each_entry(tmp, &n->hhead, bydst) {
 919				tmp->bydst_reinsert = true;
 920				hlist_del_rcu(&tmp->bydst);
 921			}
 922
 923			node->prefixlen = prefixlen;
 924
 925			xfrm_policy_inexact_list_reinsert(net, node, family);
 926
 927			if (same_prefixlen) {
 928				kfree_rcu(n, rcu);
 929				return;
 930			}
 931
 932			rb_erase(*p, new);
 933			kfree_rcu(n, rcu);
 934			n = node;
 935			goto restart;
 936		}
 937	}
 938
 939	rb_link_node_rcu(&n->node, parent, p);
 940	rb_insert_color(&n->node, new);
 941}
 942
 943/* merge nodes v and n */
 944static void xfrm_policy_inexact_node_merge(struct net *net,
 945					   struct xfrm_pol_inexact_node *v,
 946					   struct xfrm_pol_inexact_node *n,
 947					   u16 family)
 948{
 949	struct xfrm_pol_inexact_node *node;
 950	struct xfrm_policy *tmp;
 951	struct rb_node *rnode;
 952
 953	/* To-be-merged node v has a subtree.
 954	 *
 955	 * Dismantle it and insert its nodes to n->root.
 956	 */
 957	while ((rnode = rb_first(&v->root)) != NULL) {
 958		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
 959		rb_erase(&node->node, &v->root);
 960		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
 961						  family);
 962	}
 963
 964	hlist_for_each_entry(tmp, &v->hhead, bydst) {
 965		tmp->bydst_reinsert = true;
 966		hlist_del_rcu(&tmp->bydst);
 967	}
 968
 969	xfrm_policy_inexact_list_reinsert(net, n, family);
 970}
 971
 972static struct xfrm_pol_inexact_node *
 973xfrm_policy_inexact_insert_node(struct net *net,
 974				struct rb_root *root,
 975				xfrm_address_t *addr,
 976				u16 family, u8 prefixlen, u8 dir)
 977{
 978	struct xfrm_pol_inexact_node *cached = NULL;
 979	struct rb_node **p, *parent = NULL;
 980	struct xfrm_pol_inexact_node *node;
 981
 982	p = &root->rb_node;
 983	while (*p) {
 984		int delta;
 985
 986		parent = *p;
 987		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
 988
 989		delta = xfrm_policy_addr_delta(addr, &node->addr,
 990					       node->prefixlen,
 991					       family);
 992		if (delta == 0 && prefixlen >= node->prefixlen) {
 993			WARN_ON_ONCE(cached); /* ipsec policies got lost */
 994			return node;
 995		}
 996
 997		if (delta < 0)
 998			p = &parent->rb_left;
 999		else
1000			p = &parent->rb_right;
1001
1002		if (prefixlen < node->prefixlen) {
1003			delta = xfrm_policy_addr_delta(addr, &node->addr,
1004						       prefixlen,
1005						       family);
1006			if (delta)
1007				continue;
1008
1009			/* This node is a subnet of the new prefix. It needs
1010			 * to be removed and re-inserted with the smaller
1011			 * prefix and all nodes that are now also covered
1012			 * by the reduced prefixlen.
1013			 */
1014			rb_erase(&node->node, root);
1015
1016			if (!cached) {
1017				xfrm_pol_inexact_node_init(node, addr,
1018							   prefixlen);
1019				cached = node;
1020			} else {
1021				/* This node also falls within the new
1022				 * prefixlen. Merge the to-be-reinserted
1023				 * node and this one.
1024				 */
1025				xfrm_policy_inexact_node_merge(net, node,
1026							       cached, family);
1027				kfree_rcu(node, rcu);
1028			}
1029
1030			/* restart */
1031			p = &root->rb_node;
1032			parent = NULL;
1033		}
1034	}
1035
1036	node = cached;
1037	if (!node) {
1038		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1039		if (!node)
1040			return NULL;
1041	}
1042
1043	rb_link_node_rcu(&node->node, parent, p);
1044	rb_insert_color(&node->node, root);
1045
1046	return node;
1047}
1048
1049static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1050{
1051	struct xfrm_pol_inexact_node *node;
1052	struct rb_node *rn = rb_first(r);
1053
1054	while (rn) {
1055		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1056
1057		xfrm_policy_inexact_gc_tree(&node->root, rm);
1058		rn = rb_next(rn);
1059
1060		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1061			WARN_ON_ONCE(rm);
1062			continue;
1063		}
1064
1065		rb_erase(&node->node, r);
1066		kfree_rcu(node, rcu);
1067	}
1068}
1069
1070static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1071{
1072	write_seqcount_begin(&b->count);
1073	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1074	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1075	write_seqcount_end(&b->count);
1076
1077	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1078	    !hlist_empty(&b->hhead)) {
1079		WARN_ON_ONCE(net_exit);
1080		return;
1081	}
1082
1083	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1084				   xfrm_pol_inexact_params) == 0) {
1085		list_del(&b->inexact_bins);
1086		kfree_rcu(b, rcu);
1087	}
1088}
1089
1090static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1091{
1092	struct net *net = read_pnet(&b->k.net);
1093
1094	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1095	__xfrm_policy_inexact_prune_bin(b, false);
1096	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1097}
1098
1099static void __xfrm_policy_inexact_flush(struct net *net)
1100{
1101	struct xfrm_pol_inexact_bin *bin, *t;
1102
1103	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1104
1105	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1106		__xfrm_policy_inexact_prune_bin(bin, false);
1107}
1108
1109static struct hlist_head *
1110xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1111				struct xfrm_policy *policy, u8 dir)
1112{
1113	struct xfrm_pol_inexact_node *n;
1114	struct net *net;
1115
1116	net = xp_net(policy);
1117	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1118
1119	if (xfrm_policy_inexact_insert_use_any_list(policy))
1120		return &bin->hhead;
1121
1122	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1123					       policy->family,
1124					       policy->selector.prefixlen_d)) {
1125		write_seqcount_begin(&bin->count);
1126		n = xfrm_policy_inexact_insert_node(net,
1127						    &bin->root_s,
1128						    &policy->selector.saddr,
1129						    policy->family,
1130						    policy->selector.prefixlen_s,
1131						    dir);
1132		write_seqcount_end(&bin->count);
1133		if (!n)
1134			return NULL;
1135
1136		return &n->hhead;
1137	}
1138
1139	/* daddr is fixed */
1140	write_seqcount_begin(&bin->count);
1141	n = xfrm_policy_inexact_insert_node(net,
1142					    &bin->root_d,
1143					    &policy->selector.daddr,
1144					    policy->family,
1145					    policy->selector.prefixlen_d, dir);
1146	write_seqcount_end(&bin->count);
1147	if (!n)
1148		return NULL;
1149
1150	/* saddr is wildcard */
1151	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1152					       policy->family,
1153					       policy->selector.prefixlen_s))
1154		return &n->hhead;
1155
1156	write_seqcount_begin(&bin->count);
1157	n = xfrm_policy_inexact_insert_node(net,
1158					    &n->root,
1159					    &policy->selector.saddr,
1160					    policy->family,
1161					    policy->selector.prefixlen_s, dir);
1162	write_seqcount_end(&bin->count);
1163	if (!n)
1164		return NULL;
1165
1166	return &n->hhead;
1167}
1168
1169static struct xfrm_policy *
1170xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1171{
1172	struct xfrm_pol_inexact_bin *bin;
1173	struct xfrm_policy *delpol;
1174	struct hlist_head *chain;
1175	struct net *net;
1176
1177	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1178	if (!bin)
1179		return ERR_PTR(-ENOMEM);
1180
1181	net = xp_net(policy);
1182	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1183
1184	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1185	if (!chain) {
1186		__xfrm_policy_inexact_prune_bin(bin, false);
1187		return ERR_PTR(-ENOMEM);
1188	}
1189
1190	delpol = xfrm_policy_insert_list(chain, policy, excl);
1191	if (delpol && excl) {
1192		__xfrm_policy_inexact_prune_bin(bin, false);
1193		return ERR_PTR(-EEXIST);
1194	}
1195
1196	chain = &net->xfrm.policy_inexact[dir];
1197	xfrm_policy_insert_inexact_list(chain, policy);
1198
1199	if (delpol)
1200		__xfrm_policy_inexact_prune_bin(bin, false);
1201
1202	return delpol;
1203}
1204
1205static void xfrm_hash_rebuild(struct work_struct *work)
1206{
1207	struct net *net = container_of(work, struct net,
1208				       xfrm.policy_hthresh.work);
1209	unsigned int hmask;
1210	struct xfrm_policy *pol;
1211	struct xfrm_policy *policy;
1212	struct hlist_head *chain;
1213	struct hlist_head *odst;
1214	struct hlist_node *newpos;
1215	int i;
1216	int dir;
1217	unsigned seq;
1218	u8 lbits4, rbits4, lbits6, rbits6;
1219
1220	mutex_lock(&hash_resize_mutex);
1221
1222	/* read selector prefixlen thresholds */
1223	do {
1224		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1225
1226		lbits4 = net->xfrm.policy_hthresh.lbits4;
1227		rbits4 = net->xfrm.policy_hthresh.rbits4;
1228		lbits6 = net->xfrm.policy_hthresh.lbits6;
1229		rbits6 = net->xfrm.policy_hthresh.rbits6;
1230	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1231
1232	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1233	write_seqcount_begin(&xfrm_policy_hash_generation);
1234
1235	/* make sure that we can insert the indirect policies again before
1236	 * we start with destructive action.
1237	 */
1238	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1239		struct xfrm_pol_inexact_bin *bin;
1240		u8 dbits, sbits;
1241
1242		dir = xfrm_policy_id2dir(policy->index);
1243		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1244			continue;
1245
1246		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1247			if (policy->family == AF_INET) {
1248				dbits = rbits4;
1249				sbits = lbits4;
1250			} else {
1251				dbits = rbits6;
1252				sbits = lbits6;
1253			}
1254		} else {
1255			if (policy->family == AF_INET) {
1256				dbits = lbits4;
1257				sbits = rbits4;
1258			} else {
1259				dbits = lbits6;
1260				sbits = rbits6;
1261			}
1262		}
1263
1264		if (policy->selector.prefixlen_d < dbits ||
1265		    policy->selector.prefixlen_s < sbits)
1266			continue;
1267
1268		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1269		if (!bin)
1270			goto out_unlock;
1271
1272		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1273			goto out_unlock;
1274	}
1275
1276	/* reset the bydst and inexact table in all directions */
1277	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1278		struct hlist_node *n;
1279
1280		hlist_for_each_entry_safe(policy, n,
1281					  &net->xfrm.policy_inexact[dir],
1282					  bydst_inexact_list) {
1283			hlist_del_rcu(&policy->bydst);
1284			hlist_del_init(&policy->bydst_inexact_list);
1285		}
1286
1287		hmask = net->xfrm.policy_bydst[dir].hmask;
1288		odst = net->xfrm.policy_bydst[dir].table;
1289		for (i = hmask; i >= 0; i--) {
1290			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1291				hlist_del_rcu(&policy->bydst);
1292		}
1293		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1294			/* dir out => dst = remote, src = local */
1295			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1296			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1297			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1298			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1299		} else {
1300			/* dir in/fwd => dst = local, src = remote */
1301			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1302			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1303			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1304			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1305		}
1306	}
1307
1308	/* re-insert all policies by order of creation */
1309	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1310		if (policy->walk.dead)
1311			continue;
1312		dir = xfrm_policy_id2dir(policy->index);
1313		if (dir >= XFRM_POLICY_MAX) {
1314			/* skip socket policies */
1315			continue;
1316		}
1317		newpos = NULL;
1318		chain = policy_hash_bysel(net, &policy->selector,
1319					  policy->family, dir);
1320
1321		if (!chain) {
1322			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1323
1324			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1325			continue;
1326		}
1327
1328		hlist_for_each_entry(pol, chain, bydst) {
1329			if (policy->priority >= pol->priority)
1330				newpos = &pol->bydst;
1331			else
1332				break;
1333		}
1334		if (newpos)
1335			hlist_add_behind_rcu(&policy->bydst, newpos);
1336		else
1337			hlist_add_head_rcu(&policy->bydst, chain);
1338	}
1339
1340out_unlock:
1341	__xfrm_policy_inexact_flush(net);
1342	write_seqcount_end(&xfrm_policy_hash_generation);
1343	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1344
1345	mutex_unlock(&hash_resize_mutex);
1346}
1347
1348void xfrm_policy_hash_rebuild(struct net *net)
1349{
1350	schedule_work(&net->xfrm.policy_hthresh.work);
1351}
1352EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1353
1354/* Generate new index... KAME seems to generate them ordered by cost
1355 * of an absolute inpredictability of ordering of rules. This will not pass. */
1356static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1357{
1358	static u32 idx_generator;
1359
1360	for (;;) {
1361		struct hlist_head *list;
1362		struct xfrm_policy *p;
1363		u32 idx;
1364		int found;
1365
1366		if (!index) {
1367			idx = (idx_generator | dir);
1368			idx_generator += 8;
1369		} else {
1370			idx = index;
1371			index = 0;
1372		}
1373
1374		if (idx == 0)
1375			idx = 8;
1376		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1377		found = 0;
1378		hlist_for_each_entry(p, list, byidx) {
1379			if (p->index == idx) {
1380				found = 1;
1381				break;
1382			}
1383		}
1384		if (!found)
1385			return idx;
1386	}
1387}
1388
1389static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1390{
1391	u32 *p1 = (u32 *) s1;
1392	u32 *p2 = (u32 *) s2;
1393	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1394	int i;
1395
1396	for (i = 0; i < len; i++) {
1397		if (p1[i] != p2[i])
1398			return 1;
1399	}
1400
1401	return 0;
1402}
1403
1404static void xfrm_policy_requeue(struct xfrm_policy *old,
1405				struct xfrm_policy *new)
1406{
1407	struct xfrm_policy_queue *pq = &old->polq;
1408	struct sk_buff_head list;
1409
1410	if (skb_queue_empty(&pq->hold_queue))
1411		return;
1412
1413	__skb_queue_head_init(&list);
1414
1415	spin_lock_bh(&pq->hold_queue.lock);
1416	skb_queue_splice_init(&pq->hold_queue, &list);
1417	if (del_timer(&pq->hold_timer))
1418		xfrm_pol_put(old);
1419	spin_unlock_bh(&pq->hold_queue.lock);
1420
1421	pq = &new->polq;
1422
1423	spin_lock_bh(&pq->hold_queue.lock);
1424	skb_queue_splice(&list, &pq->hold_queue);
1425	pq->timeout = XFRM_QUEUE_TMO_MIN;
1426	if (!mod_timer(&pq->hold_timer, jiffies))
1427		xfrm_pol_hold(new);
1428	spin_unlock_bh(&pq->hold_queue.lock);
1429}
1430
1431static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
1432				   struct xfrm_policy *pol)
1433{
1434	u32 mark = policy->mark.v & policy->mark.m;
1435
1436	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
1437		return true;
1438
1439	if ((mark & pol->mark.m) == pol->mark.v &&
1440	    policy->priority == pol->priority)
1441		return true;
1442
1443	return false;
1444}
1445
1446static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1447{
1448	const struct xfrm_pol_inexact_key *k = data;
1449	u32 a = k->type << 24 | k->dir << 16 | k->family;
1450
1451	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1452			    seed);
1453}
1454
1455static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1456{
1457	const struct xfrm_pol_inexact_bin *b = data;
1458
1459	return xfrm_pol_bin_key(&b->k, 0, seed);
1460}
1461
1462static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1463			    const void *ptr)
1464{
1465	const struct xfrm_pol_inexact_key *key = arg->key;
1466	const struct xfrm_pol_inexact_bin *b = ptr;
1467	int ret;
1468
1469	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1470		return -1;
1471
1472	ret = b->k.dir ^ key->dir;
1473	if (ret)
1474		return ret;
1475
1476	ret = b->k.type ^ key->type;
1477	if (ret)
1478		return ret;
1479
1480	ret = b->k.family ^ key->family;
1481	if (ret)
1482		return ret;
1483
1484	return b->k.if_id ^ key->if_id;
1485}
1486
1487static const struct rhashtable_params xfrm_pol_inexact_params = {
1488	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1489	.hashfn			= xfrm_pol_bin_key,
1490	.obj_hashfn		= xfrm_pol_bin_obj,
1491	.obj_cmpfn		= xfrm_pol_bin_cmp,
1492	.automatic_shrinking	= true,
1493};
1494
1495static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1496					    struct xfrm_policy *policy)
1497{
1498	struct xfrm_policy *pol, *delpol = NULL;
1499	struct hlist_node *newpos = NULL;
1500	int i = 0;
1501
1502	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1503		if (pol->type == policy->type &&
1504		    pol->if_id == policy->if_id &&
1505		    !selector_cmp(&pol->selector, &policy->selector) &&
1506		    xfrm_policy_mark_match(policy, pol) &&
1507		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1508		    !WARN_ON(delpol)) {
1509			delpol = pol;
1510			if (policy->priority > pol->priority)
1511				continue;
1512		} else if (policy->priority >= pol->priority) {
1513			newpos = &pol->bydst_inexact_list;
1514			continue;
1515		}
1516		if (delpol)
1517			break;
1518	}
1519
1520	if (newpos)
1521		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1522	else
1523		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1524
1525	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1526		pol->pos = i;
1527		i++;
1528	}
1529}
1530
1531static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1532						   struct xfrm_policy *policy,
1533						   bool excl)
1534{
1535	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
 
 
 
 
1536
 
 
 
 
1537	hlist_for_each_entry(pol, chain, bydst) {
1538		if (pol->type == policy->type &&
1539		    pol->if_id == policy->if_id &&
1540		    !selector_cmp(&pol->selector, &policy->selector) &&
1541		    xfrm_policy_mark_match(policy, pol) &&
1542		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1543		    !WARN_ON(delpol)) {
1544			if (excl)
1545				return ERR_PTR(-EEXIST);
 
 
1546			delpol = pol;
1547			if (policy->priority > pol->priority)
1548				continue;
1549		} else if (policy->priority >= pol->priority) {
1550			newpos = pol;
1551			continue;
1552		}
1553		if (delpol)
1554			break;
1555	}
1556
1557	if (newpos)
1558		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1559	else
1560		hlist_add_head_rcu(&policy->bydst, chain);
1561
1562	return delpol;
1563}
1564
1565int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1566{
1567	struct net *net = xp_net(policy);
1568	struct xfrm_policy *delpol;
1569	struct hlist_head *chain;
1570
1571	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1572	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1573	if (chain)
1574		delpol = xfrm_policy_insert_list(chain, policy, excl);
1575	else
1576		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1577
1578	if (IS_ERR(delpol)) {
1579		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1580		return PTR_ERR(delpol);
1581	}
1582
1583	__xfrm_policy_link(policy, dir);
 
1584
1585	/* After previous checking, family can either be AF_INET or AF_INET6 */
1586	if (policy->family == AF_INET)
1587		rt_genid_bump_ipv4(net);
1588	else
1589		rt_genid_bump_ipv6(net);
1590
1591	if (delpol) {
1592		xfrm_policy_requeue(delpol, policy);
1593		__xfrm_policy_unlink(delpol, dir);
1594	}
1595	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1596	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1597	policy->curlft.add_time = ktime_get_real_seconds();
1598	policy->curlft.use_time = 0;
1599	if (!mod_timer(&policy->timer, jiffies + HZ))
1600		xfrm_pol_hold(policy);
1601	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1602
1603	if (delpol)
1604		xfrm_policy_kill(delpol);
1605	else if (xfrm_bydst_should_resize(net, dir, NULL))
1606		schedule_work(&net->xfrm.policy_hash_work);
1607
1608	return 0;
1609}
1610EXPORT_SYMBOL(xfrm_policy_insert);
1611
1612static struct xfrm_policy *
1613__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
1614			u8 type, int dir,
1615			struct xfrm_selector *sel,
1616			struct xfrm_sec_ctx *ctx)
1617{
1618	struct xfrm_policy *pol;
1619
1620	if (!chain)
1621		return NULL;
1622
1623	hlist_for_each_entry(pol, chain, bydst) {
1624		if (pol->type == type &&
1625		    pol->if_id == if_id &&
1626		    (mark & pol->mark.m) == pol->mark.v &&
1627		    !selector_cmp(sel, &pol->selector) &&
1628		    xfrm_sec_ctx_match(ctx, pol->security))
1629			return pol;
1630	}
1631
1632	return NULL;
1633}
1634
1635struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1636					  u8 type, int dir,
1637					  struct xfrm_selector *sel,
1638					  struct xfrm_sec_ctx *ctx, int delete,
1639					  int *err)
1640{
1641	struct xfrm_pol_inexact_bin *bin = NULL;
1642	struct xfrm_policy *pol, *ret = NULL;
1643	struct hlist_head *chain;
1644
1645	*err = 0;
1646	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1647	chain = policy_hash_bysel(net, sel, sel->family, dir);
1648	if (!chain) {
1649		struct xfrm_pol_inexact_candidates cand;
1650		int i;
1651
1652		bin = xfrm_policy_inexact_lookup(net, type,
1653						 sel->family, dir, if_id);
1654		if (!bin) {
1655			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1656			return NULL;
1657		}
1658
1659		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1660							 &sel->saddr,
1661							 &sel->daddr)) {
1662			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1663			return NULL;
1664		}
1665
1666		pol = NULL;
1667		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1668			struct xfrm_policy *tmp;
1669
1670			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1671						      if_id, type, dir,
1672						      sel, ctx);
1673			if (!tmp)
1674				continue;
1675
1676			if (!pol || tmp->pos < pol->pos)
1677				pol = tmp;
1678		}
1679	} else {
1680		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1681					      sel, ctx);
1682	}
1683
1684	if (pol) {
1685		xfrm_pol_hold(pol);
1686		if (delete) {
1687			*err = security_xfrm_policy_delete(pol->security);
1688			if (*err) {
1689				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1690				return pol;
1691			}
1692			__xfrm_policy_unlink(pol, dir);
 
1693		}
1694		ret = pol;
1695	}
1696	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1697
1698	if (ret && delete)
1699		xfrm_policy_kill(ret);
1700	if (bin && delete)
1701		xfrm_policy_inexact_prune_bin(bin);
1702	return ret;
1703}
1704EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1705
1706struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
1707				     u8 type, int dir, u32 id, int delete,
1708				     int *err)
1709{
1710	struct xfrm_policy *pol, *ret;
1711	struct hlist_head *chain;
1712
1713	*err = -ENOENT;
1714	if (xfrm_policy_id2dir(id) != dir)
1715		return NULL;
1716
1717	*err = 0;
1718	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1719	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1720	ret = NULL;
1721	hlist_for_each_entry(pol, chain, byidx) {
1722		if (pol->type == type && pol->index == id &&
1723		    pol->if_id == if_id &&
1724		    (mark & pol->mark.m) == pol->mark.v) {
1725			xfrm_pol_hold(pol);
1726			if (delete) {
1727				*err = security_xfrm_policy_delete(
1728								pol->security);
1729				if (*err) {
1730					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1731					return pol;
1732				}
1733				__xfrm_policy_unlink(pol, dir);
1734			}
1735			ret = pol;
1736			break;
1737		}
1738	}
1739	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1740
1741	if (ret && delete)
1742		xfrm_policy_kill(ret);
1743	return ret;
1744}
1745EXPORT_SYMBOL(xfrm_policy_byid);
1746
1747#ifdef CONFIG_SECURITY_NETWORK_XFRM
1748static inline int
1749xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1750{
1751	struct xfrm_policy *pol;
1752	int err = 0;
1753
1754	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1755		if (pol->walk.dead ||
1756		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1757		    pol->type != type)
1758			continue;
1759
1760		err = security_xfrm_policy_delete(pol->security);
1761		if (err) {
1762			xfrm_audit_policy_delete(pol, 0, task_valid);
1763			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1764		}
1765	}
1766	return err;
1767}
1768#else
1769static inline int
1770xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1771{
1772	return 0;
1773}
1774#endif
1775
1776int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1777{
1778	int dir, err = 0, cnt = 0;
1779	struct xfrm_policy *pol;
1780
1781	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1782
1783	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1784	if (err)
1785		goto out;
1786
1787again:
1788	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1789		dir = xfrm_policy_id2dir(pol->index);
1790		if (pol->walk.dead ||
1791		    dir >= XFRM_POLICY_MAX ||
1792		    pol->type != type)
1793			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1794
1795		__xfrm_policy_unlink(pol, dir);
1796		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1797		cnt++;
1798		xfrm_audit_policy_delete(pol, 1, task_valid);
1799		xfrm_policy_kill(pol);
1800		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1801		goto again;
1802	}
1803	if (cnt)
1804		__xfrm_policy_inexact_flush(net);
1805	else
1806		err = -ESRCH;
1807out:
1808	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1809	return err;
1810}
1811EXPORT_SYMBOL(xfrm_policy_flush);
1812
1813int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1814		     int (*func)(struct xfrm_policy *, int, int, void*),
1815		     void *data)
1816{
1817	struct xfrm_policy *pol;
1818	struct xfrm_policy_walk_entry *x;
1819	int error = 0;
1820
1821	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1822	    walk->type != XFRM_POLICY_TYPE_ANY)
1823		return -EINVAL;
1824
1825	if (list_empty(&walk->walk.all) && walk->seq != 0)
1826		return 0;
1827
1828	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1829	if (list_empty(&walk->walk.all))
1830		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1831	else
1832		x = list_first_entry(&walk->walk.all,
1833				     struct xfrm_policy_walk_entry, all);
1834
1835	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1836		if (x->dead)
1837			continue;
1838		pol = container_of(x, struct xfrm_policy, walk);
1839		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1840		    walk->type != pol->type)
1841			continue;
1842		error = func(pol, xfrm_policy_id2dir(pol->index),
1843			     walk->seq, data);
1844		if (error) {
1845			list_move_tail(&walk->walk.all, &x->all);
1846			goto out;
1847		}
1848		walk->seq++;
1849	}
1850	if (walk->seq == 0) {
1851		error = -ENOENT;
1852		goto out;
1853	}
1854	list_del_init(&walk->walk.all);
1855out:
1856	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1857	return error;
1858}
1859EXPORT_SYMBOL(xfrm_policy_walk);
1860
1861void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1862{
1863	INIT_LIST_HEAD(&walk->walk.all);
1864	walk->walk.dead = 1;
1865	walk->type = type;
1866	walk->seq = 0;
1867}
1868EXPORT_SYMBOL(xfrm_policy_walk_init);
1869
1870void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1871{
1872	if (list_empty(&walk->walk.all))
1873		return;
1874
1875	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1876	list_del(&walk->walk.all);
1877	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1878}
1879EXPORT_SYMBOL(xfrm_policy_walk_done);
1880
1881/*
1882 * Find policy to apply to this flow.
1883 *
1884 * Returns 0 if policy found, else an -errno.
1885 */
1886static int xfrm_policy_match(const struct xfrm_policy *pol,
1887			     const struct flowi *fl,
1888			     u8 type, u16 family, int dir, u32 if_id)
1889{
1890	const struct xfrm_selector *sel = &pol->selector;
1891	int ret = -ESRCH;
1892	bool match;
1893
1894	if (pol->family != family ||
1895	    pol->if_id != if_id ||
1896	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1897	    pol->type != type)
1898		return ret;
1899
1900	match = xfrm_selector_match(sel, fl, family);
1901	if (match)
1902		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1903						  dir);
1904	return ret;
1905}
1906
1907static struct xfrm_pol_inexact_node *
1908xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1909				seqcount_t *count,
1910				const xfrm_address_t *addr, u16 family)
1911{
1912	const struct rb_node *parent;
1913	int seq;
1914
1915again:
1916	seq = read_seqcount_begin(count);
1917
1918	parent = rcu_dereference_raw(r->rb_node);
1919	while (parent) {
1920		struct xfrm_pol_inexact_node *node;
1921		int delta;
1922
1923		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1924
1925		delta = xfrm_policy_addr_delta(addr, &node->addr,
1926					       node->prefixlen, family);
1927		if (delta < 0) {
1928			parent = rcu_dereference_raw(parent->rb_left);
1929			continue;
1930		} else if (delta > 0) {
1931			parent = rcu_dereference_raw(parent->rb_right);
1932			continue;
1933		}
1934
1935		return node;
1936	}
1937
1938	if (read_seqcount_retry(count, seq))
1939		goto again;
1940
1941	return NULL;
1942}
1943
1944static bool
1945xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1946				    struct xfrm_pol_inexact_bin *b,
1947				    const xfrm_address_t *saddr,
1948				    const xfrm_address_t *daddr)
1949{
1950	struct xfrm_pol_inexact_node *n;
1951	u16 family;
1952
1953	if (!b)
1954		return false;
1955
1956	family = b->k.family;
1957	memset(cand, 0, sizeof(*cand));
1958	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1959
1960	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1961					    family);
1962	if (n) {
1963		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1964		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1965						    family);
1966		if (n)
1967			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1968	}
1969
1970	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1971					    family);
1972	if (n)
1973		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1974
1975	return true;
1976}
1977
1978static struct xfrm_pol_inexact_bin *
1979xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1980			       u8 dir, u32 if_id)
1981{
1982	struct xfrm_pol_inexact_key k = {
1983		.family = family,
1984		.type = type,
1985		.dir = dir,
1986		.if_id = if_id,
1987	};
1988
1989	write_pnet(&k.net, net);
1990
1991	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1992				 xfrm_pol_inexact_params);
1993}
1994
1995static struct xfrm_pol_inexact_bin *
1996xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
1997			   u8 dir, u32 if_id)
1998{
1999	struct xfrm_pol_inexact_bin *bin;
2000
2001	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2002
2003	rcu_read_lock();
2004	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2005	rcu_read_unlock();
2006
2007	return bin;
2008}
2009
2010static struct xfrm_policy *
2011__xfrm_policy_eval_candidates(struct hlist_head *chain,
2012			      struct xfrm_policy *prefer,
2013			      const struct flowi *fl,
2014			      u8 type, u16 family, int dir, u32 if_id)
2015{
2016	u32 priority = prefer ? prefer->priority : ~0u;
2017	struct xfrm_policy *pol;
2018
2019	if (!chain)
2020		return NULL;
2021
2022	hlist_for_each_entry_rcu(pol, chain, bydst) {
2023		int err;
2024
2025		if (pol->priority > priority)
2026			break;
2027
2028		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2029		if (err) {
2030			if (err != -ESRCH)
2031				return ERR_PTR(err);
2032
2033			continue;
2034		}
2035
2036		if (prefer) {
2037			/* matches.  Is it older than *prefer? */
2038			if (pol->priority == priority &&
2039			    prefer->pos < pol->pos)
2040				return prefer;
2041		}
2042
2043		return pol;
2044	}
2045
2046	return NULL;
2047}
2048
2049static struct xfrm_policy *
2050xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2051			    struct xfrm_policy *prefer,
2052			    const struct flowi *fl,
2053			    u8 type, u16 family, int dir, u32 if_id)
2054{
2055	struct xfrm_policy *tmp;
2056	int i;
2057
2058	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2059		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2060						    prefer,
2061						    fl, type, family, dir,
2062						    if_id);
2063		if (!tmp)
2064			continue;
2065
2066		if (IS_ERR(tmp))
2067			return tmp;
2068		prefer = tmp;
2069	}
2070
2071	return prefer;
2072}
2073
2074static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2075						     const struct flowi *fl,
2076						     u16 family, u8 dir,
2077						     u32 if_id)
2078{
2079	struct xfrm_pol_inexact_candidates cand;
2080	const xfrm_address_t *daddr, *saddr;
2081	struct xfrm_pol_inexact_bin *bin;
2082	struct xfrm_policy *pol, *ret;
 
2083	struct hlist_head *chain;
2084	unsigned int sequence;
2085	int err;
2086
2087	daddr = xfrm_flowi_daddr(fl, family);
2088	saddr = xfrm_flowi_saddr(fl, family);
2089	if (unlikely(!daddr || !saddr))
2090		return NULL;
2091
2092	rcu_read_lock();
2093 retry:
2094	do {
2095		sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
2096		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2097	} while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
2098
2099	ret = NULL;
2100	hlist_for_each_entry_rcu(pol, chain, bydst) {
2101		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2102		if (err) {
2103			if (err == -ESRCH)
2104				continue;
2105			else {
2106				ret = ERR_PTR(err);
2107				goto fail;
2108			}
2109		} else {
2110			ret = pol;
 
2111			break;
2112		}
2113	}
2114	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2115	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2116							 daddr))
2117		goto skip_inexact;
2118
2119	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2120					  family, dir, if_id);
2121	if (pol) {
2122		ret = pol;
2123		if (IS_ERR(pol))
2124			goto fail;
 
 
 
 
 
 
2125	}
2126
2127skip_inexact:
2128	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
2129		goto retry;
2130
2131	if (ret && !xfrm_pol_hold_rcu(ret))
2132		goto retry;
2133fail:
2134	rcu_read_unlock();
2135
2136	return ret;
2137}
2138
2139static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2140					      const struct flowi *fl,
2141					      u16 family, u8 dir, u32 if_id)
2142{
2143#ifdef CONFIG_XFRM_SUB_POLICY
2144	struct xfrm_policy *pol;
2145
2146	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2147					dir, if_id);
2148	if (pol != NULL)
2149		return pol;
2150#endif
2151	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2152					 dir, if_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2153}
2154
2155static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2156						 const struct flowi *fl,
2157						 u16 family, u32 if_id)
2158{
2159	struct xfrm_policy *pol;
 
2160
2161	rcu_read_lock();
2162 again:
2163	pol = rcu_dereference(sk->sk_policy[dir]);
2164	if (pol != NULL) {
2165		bool match;
 
2166		int err = 0;
2167
2168		if (pol->family != family) {
2169			pol = NULL;
2170			goto out;
2171		}
2172
2173		match = xfrm_selector_match(&pol->selector, fl, family);
2174		if (match) {
2175			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2176			    pol->if_id != if_id) {
2177				pol = NULL;
2178				goto out;
2179			}
2180			err = security_xfrm_policy_lookup(pol->security,
2181						      fl->flowi_secid,
2182						      dir);
2183			if (!err) {
2184				if (!xfrm_pol_hold_rcu(pol))
2185					goto again;
2186			} else if (err == -ESRCH) {
2187				pol = NULL;
2188			} else {
2189				pol = ERR_PTR(err);
2190			}
2191		} else
2192			pol = NULL;
2193	}
2194out:
 
2195	rcu_read_unlock();
2196	return pol;
2197}
2198
2199static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2200{
2201	struct net *net = xp_net(pol);
2202
2203	list_add(&pol->walk.all, &net->xfrm.policy_all);
2204	net->xfrm.policy_count[dir]++;
2205	xfrm_pol_hold(pol);
2206}
2207
2208static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2209						int dir)
2210{
2211	struct net *net = xp_net(pol);
2212
2213	if (list_empty(&pol->walk.all))
2214		return NULL;
2215
2216	/* Socket policies are not hashed. */
2217	if (!hlist_unhashed(&pol->bydst)) {
2218		hlist_del_rcu(&pol->bydst);
2219		hlist_del_init(&pol->bydst_inexact_list);
2220		hlist_del(&pol->byidx);
2221	}
2222
2223	list_del_init(&pol->walk.all);
2224	net->xfrm.policy_count[dir]--;
2225
2226	return pol;
2227}
2228
2229static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2230{
2231	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2232}
2233
2234static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2235{
2236	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2237}
2238
2239int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2240{
2241	struct net *net = xp_net(pol);
2242
2243	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2244	pol = __xfrm_policy_unlink(pol, dir);
2245	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2246	if (pol) {
2247		xfrm_policy_kill(pol);
2248		return 0;
2249	}
2250	return -ENOENT;
2251}
2252EXPORT_SYMBOL(xfrm_policy_delete);
2253
2254int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2255{
2256	struct net *net = sock_net(sk);
2257	struct xfrm_policy *old_pol;
2258
2259#ifdef CONFIG_XFRM_SUB_POLICY
2260	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2261		return -EINVAL;
2262#endif
2263
2264	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2265	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2266				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2267	if (pol) {
2268		pol->curlft.add_time = ktime_get_real_seconds();
2269		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2270		xfrm_sk_policy_link(pol, dir);
2271	}
2272	rcu_assign_pointer(sk->sk_policy[dir], pol);
2273	if (old_pol) {
2274		if (pol)
2275			xfrm_policy_requeue(old_pol, pol);
2276
2277		/* Unlinking succeeds always. This is the only function
2278		 * allowed to delete or replace socket policy.
2279		 */
2280		xfrm_sk_policy_unlink(old_pol, dir);
2281	}
2282	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2283
2284	if (old_pol) {
2285		xfrm_policy_kill(old_pol);
2286	}
2287	return 0;
2288}
2289
2290static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2291{
2292	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2293	struct net *net = xp_net(old);
2294
2295	if (newp) {
2296		newp->selector = old->selector;
2297		if (security_xfrm_policy_clone(old->security,
2298					       &newp->security)) {
2299			kfree(newp);
2300			return NULL;  /* ENOMEM */
2301		}
2302		newp->lft = old->lft;
2303		newp->curlft = old->curlft;
2304		newp->mark = old->mark;
2305		newp->if_id = old->if_id;
2306		newp->action = old->action;
2307		newp->flags = old->flags;
2308		newp->xfrm_nr = old->xfrm_nr;
2309		newp->index = old->index;
2310		newp->type = old->type;
2311		newp->family = old->family;
2312		memcpy(newp->xfrm_vec, old->xfrm_vec,
2313		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2314		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2315		xfrm_sk_policy_link(newp, dir);
2316		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2317		xfrm_pol_put(newp);
2318	}
2319	return newp;
2320}
2321
2322int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2323{
2324	const struct xfrm_policy *p;
2325	struct xfrm_policy *np;
2326	int i, ret = 0;
2327
2328	rcu_read_lock();
2329	for (i = 0; i < 2; i++) {
2330		p = rcu_dereference(osk->sk_policy[i]);
2331		if (p) {
2332			np = clone_policy(p, i);
2333			if (unlikely(!np)) {
2334				ret = -ENOMEM;
2335				break;
2336			}
2337			rcu_assign_pointer(sk->sk_policy[i], np);
2338		}
2339	}
2340	rcu_read_unlock();
2341	return ret;
2342}
2343
2344static int
2345xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2346	       xfrm_address_t *remote, unsigned short family, u32 mark)
2347{
2348	int err;
2349	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2350
2351	if (unlikely(afinfo == NULL))
2352		return -EINVAL;
2353	err = afinfo->get_saddr(net, oif, local, remote, mark);
2354	rcu_read_unlock();
2355	return err;
2356}
2357
2358/* Resolve list of templates for the flow, given policy. */
2359
2360static int
2361xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2362		      struct xfrm_state **xfrm, unsigned short family)
2363{
2364	struct net *net = xp_net(policy);
2365	int nx;
2366	int i, error;
2367	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2368	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2369	xfrm_address_t tmp;
2370
2371	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2372		struct xfrm_state *x;
2373		xfrm_address_t *remote = daddr;
2374		xfrm_address_t *local  = saddr;
2375		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2376
2377		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2378		    tmpl->mode == XFRM_MODE_BEET) {
2379			remote = &tmpl->id.daddr;
2380			local = &tmpl->saddr;
2381			if (xfrm_addr_any(local, tmpl->encap_family)) {
2382				error = xfrm_get_saddr(net, fl->flowi_oif,
2383						       &tmp, remote,
2384						       tmpl->encap_family, 0);
2385				if (error)
2386					goto fail;
2387				local = &tmp;
2388			}
2389		}
2390
2391		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2392				    family, policy->if_id);
2393
2394		if (x && x->km.state == XFRM_STATE_VALID) {
2395			xfrm[nx++] = x;
2396			daddr = remote;
2397			saddr = local;
2398			continue;
2399		}
2400		if (x) {
2401			error = (x->km.state == XFRM_STATE_ERROR ?
2402				 -EINVAL : -EAGAIN);
2403			xfrm_state_put(x);
2404		} else if (error == -ESRCH) {
2405			error = -EAGAIN;
2406		}
2407
2408		if (!tmpl->optional)
2409			goto fail;
2410	}
2411	return nx;
2412
2413fail:
2414	for (nx--; nx >= 0; nx--)
2415		xfrm_state_put(xfrm[nx]);
2416	return error;
2417}
2418
2419static int
2420xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2421		  struct xfrm_state **xfrm, unsigned short family)
2422{
2423	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2424	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2425	int cnx = 0;
2426	int error;
2427	int ret;
2428	int i;
2429
2430	for (i = 0; i < npols; i++) {
2431		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2432			error = -ENOBUFS;
2433			goto fail;
2434		}
2435
2436		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2437		if (ret < 0) {
2438			error = ret;
2439			goto fail;
2440		} else
2441			cnx += ret;
2442	}
2443
2444	/* found states are sorted for outbound processing */
2445	if (npols > 1)
2446		xfrm_state_sort(xfrm, tpp, cnx, family);
2447
2448	return cnx;
2449
2450 fail:
2451	for (cnx--; cnx >= 0; cnx--)
2452		xfrm_state_put(tpp[cnx]);
2453	return error;
2454
2455}
2456
2457static int xfrm_get_tos(const struct flowi *fl, int family)
 
 
 
 
2458{
2459	if (family == AF_INET)
2460		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2461
2462	return 0;
 
 
 
 
 
 
 
2463}
2464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2465static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2466{
2467	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2468	struct dst_ops *dst_ops;
2469	struct xfrm_dst *xdst;
2470
2471	if (!afinfo)
2472		return ERR_PTR(-EINVAL);
2473
2474	switch (family) {
2475	case AF_INET:
2476		dst_ops = &net->xfrm.xfrm4_dst_ops;
2477		break;
2478#if IS_ENABLED(CONFIG_IPV6)
2479	case AF_INET6:
2480		dst_ops = &net->xfrm.xfrm6_dst_ops;
2481		break;
2482#endif
2483	default:
2484		BUG();
2485	}
2486	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2487
2488	if (likely(xdst)) {
2489		struct dst_entry *dst = &xdst->u.dst;
2490
2491		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
 
2492	} else
2493		xdst = ERR_PTR(-ENOBUFS);
2494
2495	rcu_read_unlock();
2496
2497	return xdst;
2498}
2499
2500static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2501			   int nfheader_len)
2502{
2503	if (dst->ops->family == AF_INET6) {
2504		struct rt6_info *rt = (struct rt6_info *)dst;
2505		path->path_cookie = rt6_get_cookie(rt);
2506		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2507	}
 
 
 
 
 
 
 
2508}
2509
2510static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2511				const struct flowi *fl)
2512{
2513	const struct xfrm_policy_afinfo *afinfo =
2514		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2515	int err;
2516
2517	if (!afinfo)
2518		return -EINVAL;
2519
2520	err = afinfo->fill_dst(xdst, dev, fl);
2521
2522	rcu_read_unlock();
2523
2524	return err;
2525}
2526
2527
2528/* Allocate chain of dst_entry's, attach known xfrm's, calculate
2529 * all the metrics... Shortly, bundle a bundle.
2530 */
2531
2532static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2533					    struct xfrm_state **xfrm,
2534					    struct xfrm_dst **bundle,
2535					    int nx,
2536					    const struct flowi *fl,
2537					    struct dst_entry *dst)
2538{
2539	const struct xfrm_state_afinfo *afinfo;
2540	const struct xfrm_mode *inner_mode;
2541	struct net *net = xp_net(policy);
2542	unsigned long now = jiffies;
2543	struct net_device *dev;
2544	struct xfrm_dst *xdst_prev = NULL;
2545	struct xfrm_dst *xdst0 = NULL;
 
2546	int i = 0;
2547	int err;
2548	int header_len = 0;
2549	int nfheader_len = 0;
2550	int trailer_len = 0;
2551	int tos;
2552	int family = policy->selector.family;
2553	xfrm_address_t saddr, daddr;
2554
2555	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2556
2557	tos = xfrm_get_tos(fl, family);
 
 
 
2558
2559	dst_hold(dst);
2560
2561	for (; i < nx; i++) {
2562		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2563		struct dst_entry *dst1 = &xdst->u.dst;
2564
2565		err = PTR_ERR(xdst);
2566		if (IS_ERR(xdst)) {
2567			dst_release(dst);
2568			goto put_states;
2569		}
2570
2571		bundle[i] = xdst;
2572		if (!xdst_prev)
2573			xdst0 = xdst;
2574		else
2575			/* Ref count is taken during xfrm_alloc_dst()
2576			 * No need to do dst_clone() on dst1
2577			 */
2578			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2579
2580		if (xfrm[i]->sel.family == AF_UNSPEC) {
2581			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2582							xfrm_af2proto(family));
2583			if (!inner_mode) {
2584				err = -EAFNOSUPPORT;
2585				dst_release(dst);
2586				goto put_states;
2587			}
2588		} else
2589			inner_mode = &xfrm[i]->inner_mode;
 
 
 
 
 
 
 
2590
2591		xdst->route = dst;
2592		dst_copy_metrics(dst1, dst);
2593
2594		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2595			__u32 mark = 0;
2596
2597			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2598				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2599
2600			family = xfrm[i]->props.family;
2601			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2602					      &saddr, &daddr, family, mark);
2603			err = PTR_ERR(dst);
2604			if (IS_ERR(dst))
2605				goto put_states;
2606		} else
2607			dst_hold(dst);
2608
2609		dst1->xfrm = xfrm[i];
2610		xdst->xfrm_genid = xfrm[i]->genid;
2611
2612		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2613		dst1->flags |= DST_HOST;
2614		dst1->lastuse = now;
2615
2616		dst1->input = dst_discard;
 
2617
2618		rcu_read_lock();
2619		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2620		if (likely(afinfo))
2621			dst1->output = afinfo->output;
2622		else
2623			dst1->output = dst_discard_out;
2624		rcu_read_unlock();
2625
2626		xdst_prev = xdst;
2627
2628		header_len += xfrm[i]->props.header_len;
2629		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2630			nfheader_len += xfrm[i]->props.header_len;
2631		trailer_len += xfrm[i]->props.trailer_len;
2632	}
2633
2634	xfrm_dst_set_child(xdst_prev, dst);
2635	xdst0->path = dst;
2636
2637	err = -ENODEV;
2638	dev = dst->dev;
2639	if (!dev)
2640		goto free_dst;
2641
2642	xfrm_init_path(xdst0, dst, nfheader_len);
2643	xfrm_init_pmtu(bundle, nx);
 
 
 
2644
2645	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2646	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2647		err = xfrm_fill_dst(xdst_prev, dev, fl);
2648		if (err)
2649			goto free_dst;
2650
2651		xdst_prev->u.dst.header_len = header_len;
2652		xdst_prev->u.dst.trailer_len = trailer_len;
2653		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2654		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2655	}
2656
2657	return &xdst0->u.dst;
 
2658
2659put_states:
2660	for (; i < nx; i++)
2661		xfrm_state_put(xfrm[i]);
2662free_dst:
2663	if (xdst0)
2664		dst_release_immediate(&xdst0->u.dst);
 
 
 
2665
2666	return ERR_PTR(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2667}
2668
2669static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2670				struct xfrm_policy **pols,
2671				int *num_pols, int *num_xfrms)
2672{
2673	int i;
2674
2675	if (*num_pols == 0 || !pols[0]) {
2676		*num_pols = 0;
2677		*num_xfrms = 0;
2678		return 0;
2679	}
2680	if (IS_ERR(pols[0]))
2681		return PTR_ERR(pols[0]);
2682
2683	*num_xfrms = pols[0]->xfrm_nr;
2684
2685#ifdef CONFIG_XFRM_SUB_POLICY
2686	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2687	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2688		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2689						    XFRM_POLICY_TYPE_MAIN,
2690						    fl, family,
2691						    XFRM_POLICY_OUT,
2692						    pols[0]->if_id);
2693		if (pols[1]) {
2694			if (IS_ERR(pols[1])) {
2695				xfrm_pols_put(pols, *num_pols);
2696				return PTR_ERR(pols[1]);
2697			}
2698			(*num_pols)++;
2699			(*num_xfrms) += pols[1]->xfrm_nr;
2700		}
2701	}
2702#endif
2703	for (i = 0; i < *num_pols; i++) {
2704		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2705			*num_xfrms = -1;
2706			break;
2707		}
2708	}
2709
2710	return 0;
2711
2712}
2713
2714static struct xfrm_dst *
2715xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2716			       const struct flowi *fl, u16 family,
2717			       struct dst_entry *dst_orig)
2718{
2719	struct net *net = xp_net(pols[0]);
2720	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2721	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2722	struct xfrm_dst *xdst;
2723	struct dst_entry *dst;
 
2724	int err;
2725
2726	/* Try to instantiate a bundle */
2727	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2728	if (err <= 0) {
2729		if (err == 0)
2730			return NULL;
2731
2732		if (err != -EAGAIN)
2733			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2734		return ERR_PTR(err);
2735	}
2736
2737	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2738	if (IS_ERR(dst)) {
2739		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2740		return ERR_CAST(dst);
2741	}
2742
2743	xdst = (struct xfrm_dst *)dst;
2744	xdst->num_xfrms = err;
 
 
 
 
 
 
 
 
 
 
2745	xdst->num_pols = num_pols;
2746	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2747	xdst->policy_genid = atomic_read(&pols[0]->genid);
2748
2749	return xdst;
2750}
2751
2752static void xfrm_policy_queue_process(struct timer_list *t)
2753{
2754	struct sk_buff *skb;
2755	struct sock *sk;
2756	struct dst_entry *dst;
2757	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2758	struct net *net = xp_net(pol);
2759	struct xfrm_policy_queue *pq = &pol->polq;
2760	struct flowi fl;
2761	struct sk_buff_head list;
2762
2763	spin_lock(&pq->hold_queue.lock);
2764	skb = skb_peek(&pq->hold_queue);
2765	if (!skb) {
2766		spin_unlock(&pq->hold_queue.lock);
2767		goto out;
2768	}
2769	dst = skb_dst(skb);
2770	sk = skb->sk;
2771	xfrm_decode_session(skb, &fl, dst->ops->family);
2772	spin_unlock(&pq->hold_queue.lock);
2773
2774	dst_hold(xfrm_dst_path(dst));
2775	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2776	if (IS_ERR(dst))
2777		goto purge_queue;
2778
2779	if (dst->flags & DST_XFRM_QUEUE) {
2780		dst_release(dst);
2781
2782		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2783			goto purge_queue;
2784
2785		pq->timeout = pq->timeout << 1;
2786		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2787			xfrm_pol_hold(pol);
2788		goto out;
2789	}
2790
2791	dst_release(dst);
2792
2793	__skb_queue_head_init(&list);
2794
2795	spin_lock(&pq->hold_queue.lock);
2796	pq->timeout = 0;
2797	skb_queue_splice_init(&pq->hold_queue, &list);
2798	spin_unlock(&pq->hold_queue.lock);
2799
2800	while (!skb_queue_empty(&list)) {
2801		skb = __skb_dequeue(&list);
2802
2803		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2804		dst_hold(xfrm_dst_path(skb_dst(skb)));
2805		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2806		if (IS_ERR(dst)) {
2807			kfree_skb(skb);
2808			continue;
2809		}
2810
2811		nf_reset_ct(skb);
2812		skb_dst_drop(skb);
2813		skb_dst_set(skb, dst);
2814
2815		dst_output(net, skb->sk, skb);
2816	}
2817
2818out:
2819	xfrm_pol_put(pol);
2820	return;
2821
2822purge_queue:
2823	pq->timeout = 0;
2824	skb_queue_purge(&pq->hold_queue);
2825	xfrm_pol_put(pol);
2826}
2827
2828static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2829{
2830	unsigned long sched_next;
2831	struct dst_entry *dst = skb_dst(skb);
2832	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2833	struct xfrm_policy *pol = xdst->pols[0];
2834	struct xfrm_policy_queue *pq = &pol->polq;
2835
2836	if (unlikely(skb_fclone_busy(sk, skb))) {
2837		kfree_skb(skb);
2838		return 0;
2839	}
2840
2841	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2842		kfree_skb(skb);
2843		return -EAGAIN;
2844	}
2845
2846	skb_dst_force(skb);
2847
2848	spin_lock_bh(&pq->hold_queue.lock);
2849
2850	if (!pq->timeout)
2851		pq->timeout = XFRM_QUEUE_TMO_MIN;
2852
2853	sched_next = jiffies + pq->timeout;
2854
2855	if (del_timer(&pq->hold_timer)) {
2856		if (time_before(pq->hold_timer.expires, sched_next))
2857			sched_next = pq->hold_timer.expires;
2858		xfrm_pol_put(pol);
2859	}
2860
2861	__skb_queue_tail(&pq->hold_queue, skb);
2862	if (!mod_timer(&pq->hold_timer, sched_next))
2863		xfrm_pol_hold(pol);
2864
2865	spin_unlock_bh(&pq->hold_queue.lock);
2866
2867	return 0;
2868}
2869
2870static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2871						 struct xfrm_flo *xflo,
2872						 const struct flowi *fl,
2873						 int num_xfrms,
2874						 u16 family)
2875{
2876	int err;
2877	struct net_device *dev;
2878	struct dst_entry *dst;
2879	struct dst_entry *dst1;
2880	struct xfrm_dst *xdst;
2881
2882	xdst = xfrm_alloc_dst(net, family);
2883	if (IS_ERR(xdst))
2884		return xdst;
2885
2886	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2887	    net->xfrm.sysctl_larval_drop ||
2888	    num_xfrms <= 0)
2889		return xdst;
2890
2891	dst = xflo->dst_orig;
2892	dst1 = &xdst->u.dst;
2893	dst_hold(dst);
2894	xdst->route = dst;
2895
2896	dst_copy_metrics(dst1, dst);
2897
2898	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2899	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2900	dst1->lastuse = jiffies;
2901
2902	dst1->input = dst_discard;
2903	dst1->output = xdst_queue_output;
2904
2905	dst_hold(dst);
2906	xfrm_dst_set_child(xdst, dst);
2907	xdst->path = dst;
2908
2909	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2910
2911	err = -ENODEV;
2912	dev = dst->dev;
2913	if (!dev)
2914		goto free_dst;
2915
2916	err = xfrm_fill_dst(xdst, dev, fl);
2917	if (err)
2918		goto free_dst;
2919
2920out:
2921	return xdst;
2922
2923free_dst:
2924	dst_release(dst1);
2925	xdst = ERR_PTR(err);
2926	goto out;
2927}
2928
2929static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2930					   const struct flowi *fl,
2931					   u16 family, u8 dir,
2932					   struct xfrm_flo *xflo, u32 if_id)
2933{
 
2934	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2935	int num_pols = 0, num_xfrms = 0, err;
2936	struct xfrm_dst *xdst;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2937
2938	/* Resolve policies to use if we couldn't get them from
2939	 * previous cache entry */
2940	num_pols = 1;
2941	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2942	err = xfrm_expand_policies(fl, family, pols,
 
 
2943					   &num_pols, &num_xfrms);
2944	if (err < 0)
2945		goto inc_error;
2946	if (num_pols == 0)
2947		return NULL;
2948	if (num_xfrms <= 0)
2949		goto make_dummy_bundle;
2950
2951	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2952					      xflo->dst_orig);
2953	if (IS_ERR(xdst)) {
2954		err = PTR_ERR(xdst);
2955		if (err == -EREMOTE) {
2956			xfrm_pols_put(pols, num_pols);
2957			return NULL;
2958		}
 
 
2959
 
 
 
 
2960		if (err != -EAGAIN)
2961			goto error;
2962		goto make_dummy_bundle;
2963	} else if (xdst == NULL) {
 
 
 
2964		num_xfrms = 0;
2965		goto make_dummy_bundle;
2966	}
2967
2968	return xdst;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2969
2970make_dummy_bundle:
2971	/* We found policies, but there's no bundles to instantiate:
2972	 * either because the policy blocks, has no transformations or
2973	 * we could not build template (no xfrm_states).*/
2974	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2975	if (IS_ERR(xdst)) {
2976		xfrm_pols_put(pols, num_pols);
2977		return ERR_CAST(xdst);
2978	}
2979	xdst->num_pols = num_pols;
2980	xdst->num_xfrms = num_xfrms;
2981	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2982
2983	return xdst;
 
2984
2985inc_error:
2986	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2987error:
2988	xfrm_pols_put(pols, num_pols);
 
 
 
2989	return ERR_PTR(err);
2990}
2991
2992static struct dst_entry *make_blackhole(struct net *net, u16 family,
2993					struct dst_entry *dst_orig)
2994{
2995	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2996	struct dst_entry *ret;
2997
2998	if (!afinfo) {
2999		dst_release(dst_orig);
3000		return ERR_PTR(-EINVAL);
3001	} else {
3002		ret = afinfo->blackhole_route(net, dst_orig);
3003	}
3004	rcu_read_unlock();
3005
3006	return ret;
3007}
3008
3009/* Finds/creates a bundle for given flow and if_id
3010 *
3011 * At the moment we eat a raw IP route. Mostly to speed up lookups
3012 * on interfaces with disabled IPsec.
3013 *
3014 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3015 * compatibility
3016 */
3017struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3018					struct dst_entry *dst_orig,
3019					const struct flowi *fl,
3020					const struct sock *sk,
3021					int flags, u32 if_id)
3022{
3023	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
 
3024	struct xfrm_dst *xdst;
3025	struct dst_entry *dst, *route;
3026	u16 family = dst_orig->ops->family;
3027	u8 dir = XFRM_POLICY_OUT;
3028	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3029
3030	dst = NULL;
3031	xdst = NULL;
3032	route = NULL;
3033
3034	sk = sk_const_to_full_sk(sk);
3035	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3036		num_pols = 1;
3037		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3038						if_id);
3039		err = xfrm_expand_policies(fl, family, pols,
3040					   &num_pols, &num_xfrms);
3041		if (err < 0)
3042			goto dropdst;
3043
3044		if (num_pols) {
3045			if (num_xfrms <= 0) {
3046				drop_pols = num_pols;
3047				goto no_transform;
3048			}
3049
3050			xdst = xfrm_resolve_and_create_bundle(
3051					pols, num_pols, fl,
3052					family, dst_orig);
3053
3054			if (IS_ERR(xdst)) {
3055				xfrm_pols_put(pols, num_pols);
3056				err = PTR_ERR(xdst);
3057				if (err == -EREMOTE)
3058					goto nopol;
3059
3060				goto dropdst;
3061			} else if (xdst == NULL) {
3062				num_xfrms = 0;
3063				drop_pols = num_pols;
3064				goto no_transform;
3065			}
3066
 
 
3067			route = xdst->route;
3068		}
3069	}
3070
3071	if (xdst == NULL) {
3072		struct xfrm_flo xflo;
3073
3074		xflo.dst_orig = dst_orig;
3075		xflo.flags = flags;
3076
3077		/* To accelerate a bit...  */
3078		if ((dst_orig->flags & DST_NOXFRM) ||
3079		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
3080			goto nopol;
3081
3082		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3083		if (xdst == NULL)
 
3084			goto nopol;
3085		if (IS_ERR(xdst)) {
3086			err = PTR_ERR(xdst);
3087			goto dropdst;
3088		}
 
3089
3090		num_pols = xdst->num_pols;
3091		num_xfrms = xdst->num_xfrms;
3092		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3093		route = xdst->route;
3094	}
3095
3096	dst = &xdst->u.dst;
3097	if (route == NULL && num_xfrms > 0) {
3098		/* The only case when xfrm_bundle_lookup() returns a
3099		 * bundle with null route, is when the template could
3100		 * not be resolved. It means policies are there, but
3101		 * bundle could not be created, since we don't yet
3102		 * have the xfrm_state's. We need to wait for KM to
3103		 * negotiate new SA's or bail out with error.*/
3104		if (net->xfrm.sysctl_larval_drop) {
3105			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3106			err = -EREMOTE;
3107			goto error;
3108		}
3109
3110		err = -EAGAIN;
3111
3112		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3113		goto error;
3114	}
3115
3116no_transform:
3117	if (num_pols == 0)
3118		goto nopol;
3119
3120	if ((flags & XFRM_LOOKUP_ICMP) &&
3121	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3122		err = -ENOENT;
3123		goto error;
3124	}
3125
3126	for (i = 0; i < num_pols; i++)
3127		pols[i]->curlft.use_time = ktime_get_real_seconds();
3128
3129	if (num_xfrms < 0) {
3130		/* Prohibit the flow */
3131		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3132		err = -EPERM;
3133		goto error;
3134	} else if (num_xfrms > 0) {
3135		/* Flow transformed */
3136		dst_release(dst_orig);
3137	} else {
3138		/* Flow passes untransformed */
3139		dst_release(dst);
3140		dst = dst_orig;
3141	}
3142ok:
3143	xfrm_pols_put(pols, drop_pols);
3144	if (dst && dst->xfrm &&
3145	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3146		dst->flags |= DST_XFRM_TUNNEL;
3147	return dst;
3148
3149nopol:
3150	if (!(flags & XFRM_LOOKUP_ICMP)) {
3151		dst = dst_orig;
3152		goto ok;
3153	}
3154	err = -ENOENT;
3155error:
3156	dst_release(dst);
3157dropdst:
3158	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3159		dst_release(dst_orig);
3160	xfrm_pols_put(pols, drop_pols);
3161	return ERR_PTR(err);
3162}
3163EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3164
3165/* Main function: finds/creates a bundle for given flow.
3166 *
3167 * At the moment we eat a raw IP route. Mostly to speed up lookups
3168 * on interfaces with disabled IPsec.
3169 */
3170struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3171			      const struct flowi *fl, const struct sock *sk,
3172			      int flags)
3173{
3174	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3175}
3176EXPORT_SYMBOL(xfrm_lookup);
3177
3178/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3179 * Otherwise we may send out blackholed packets.
3180 */
3181struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3182				    const struct flowi *fl,
3183				    const struct sock *sk, int flags)
3184{
3185	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3186					    flags | XFRM_LOOKUP_QUEUE |
3187					    XFRM_LOOKUP_KEEP_DST_REF);
3188
3189	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
3190		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3191
3192	if (IS_ERR(dst))
3193		dst_release(dst_orig);
3194
3195	return dst;
3196}
3197EXPORT_SYMBOL(xfrm_lookup_route);
3198
3199static inline int
3200xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3201{
3202	struct sec_path *sp = skb_sec_path(skb);
3203	struct xfrm_state *x;
3204
3205	if (!sp || idx < 0 || idx >= sp->len)
3206		return 0;
3207	x = sp->xvec[idx];
3208	if (!x->type->reject)
3209		return 0;
3210	return x->type->reject(x, skb, fl);
3211}
3212
3213/* When skb is transformed back to its "native" form, we have to
3214 * check policy restrictions. At the moment we make this in maximally
3215 * stupid way. Shame on me. :-) Of course, connected sockets must
3216 * have policy cached at them.
3217 */
3218
3219static inline int
3220xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3221	      unsigned short family)
3222{
3223	if (xfrm_state_kern(x))
3224		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3225	return	x->id.proto == tmpl->id.proto &&
3226		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3227		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3228		x->props.mode == tmpl->mode &&
3229		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3230		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3231		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3232		  xfrm_state_addr_cmp(tmpl, x, family));
3233}
3234
3235/*
3236 * 0 or more than 0 is returned when validation is succeeded (either bypass
3237 * because of optional transport mode, or next index of the mathced secpath
3238 * state with the template.
3239 * -1 is returned when no matching template is found.
3240 * Otherwise "-2 - errored_index" is returned.
3241 */
3242static inline int
3243xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3244	       unsigned short family)
3245{
3246	int idx = start;
3247
3248	if (tmpl->optional) {
3249		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3250			return start;
3251	} else
3252		start = -1;
3253	for (; idx < sp->len; idx++) {
3254		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3255			return ++idx;
3256		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3257			if (start == -1)
3258				start = -2-idx;
3259			break;
3260		}
3261	}
3262	return start;
3263}
3264
3265static void
3266decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3267{
3268	const struct iphdr *iph = ip_hdr(skb);
3269	int ihl = iph->ihl;
3270	u8 *xprth = skb_network_header(skb) + ihl * 4;
3271	struct flowi4 *fl4 = &fl->u.ip4;
3272	int oif = 0;
3273
3274	if (skb_dst(skb) && skb_dst(skb)->dev)
3275		oif = skb_dst(skb)->dev->ifindex;
3276
3277	memset(fl4, 0, sizeof(struct flowi4));
3278	fl4->flowi4_mark = skb->mark;
3279	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3280
3281	fl4->flowi4_proto = iph->protocol;
3282	fl4->daddr = reverse ? iph->saddr : iph->daddr;
3283	fl4->saddr = reverse ? iph->daddr : iph->saddr;
3284	fl4->flowi4_tos = iph->tos;
3285
3286	if (!ip_is_fragment(iph)) {
3287		switch (iph->protocol) {
3288		case IPPROTO_UDP:
3289		case IPPROTO_UDPLITE:
3290		case IPPROTO_TCP:
3291		case IPPROTO_SCTP:
3292		case IPPROTO_DCCP:
3293			if (xprth + 4 < skb->data ||
3294			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3295				__be16 *ports;
3296
3297				xprth = skb_network_header(skb) + ihl * 4;
3298				ports = (__be16 *)xprth;
3299
3300				fl4->fl4_sport = ports[!!reverse];
3301				fl4->fl4_dport = ports[!reverse];
3302			}
3303			break;
3304		case IPPROTO_ICMP:
3305			if (xprth + 2 < skb->data ||
3306			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
3307				u8 *icmp;
3308
3309				xprth = skb_network_header(skb) + ihl * 4;
3310				icmp = xprth;
3311
3312				fl4->fl4_icmp_type = icmp[0];
3313				fl4->fl4_icmp_code = icmp[1];
3314			}
3315			break;
3316		case IPPROTO_ESP:
3317			if (xprth + 4 < skb->data ||
3318			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3319				__be32 *ehdr;
3320
3321				xprth = skb_network_header(skb) + ihl * 4;
3322				ehdr = (__be32 *)xprth;
3323
3324				fl4->fl4_ipsec_spi = ehdr[0];
3325			}
3326			break;
3327		case IPPROTO_AH:
3328			if (xprth + 8 < skb->data ||
3329			    pskb_may_pull(skb, xprth + 8 - skb->data)) {
3330				__be32 *ah_hdr;
3331
3332				xprth = skb_network_header(skb) + ihl * 4;
3333				ah_hdr = (__be32 *)xprth;
3334
3335				fl4->fl4_ipsec_spi = ah_hdr[1];
3336			}
3337			break;
3338		case IPPROTO_COMP:
3339			if (xprth + 4 < skb->data ||
3340			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3341				__be16 *ipcomp_hdr;
3342
3343				xprth = skb_network_header(skb) + ihl * 4;
3344				ipcomp_hdr = (__be16 *)xprth;
3345
3346				fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
3347			}
3348			break;
3349		case IPPROTO_GRE:
3350			if (xprth + 12 < skb->data ||
3351			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
3352				__be16 *greflags;
3353				__be32 *gre_hdr;
3354
3355				xprth = skb_network_header(skb) + ihl * 4;
3356				greflags = (__be16 *)xprth;
3357				gre_hdr = (__be32 *)xprth;
3358
3359				if (greflags[0] & GRE_KEY) {
3360					if (greflags[0] & GRE_CSUM)
3361						gre_hdr++;
3362					fl4->fl4_gre_key = gre_hdr[1];
3363				}
3364			}
3365			break;
3366		default:
3367			fl4->fl4_ipsec_spi = 0;
3368			break;
3369		}
3370	}
3371}
3372
3373#if IS_ENABLED(CONFIG_IPV6)
3374static void
3375decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3376{
3377	struct flowi6 *fl6 = &fl->u.ip6;
3378	int onlyproto = 0;
3379	const struct ipv6hdr *hdr = ipv6_hdr(skb);
3380	u32 offset = sizeof(*hdr);
3381	struct ipv6_opt_hdr *exthdr;
3382	const unsigned char *nh = skb_network_header(skb);
3383	u16 nhoff = IP6CB(skb)->nhoff;
3384	int oif = 0;
3385	u8 nexthdr;
3386
3387	if (!nhoff)
3388		nhoff = offsetof(struct ipv6hdr, nexthdr);
3389
3390	nexthdr = nh[nhoff];
3391
3392	if (skb_dst(skb) && skb_dst(skb)->dev)
3393		oif = skb_dst(skb)->dev->ifindex;
3394
3395	memset(fl6, 0, sizeof(struct flowi6));
3396	fl6->flowi6_mark = skb->mark;
3397	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3398
3399	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3400	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3401
3402	while (nh + offset + sizeof(*exthdr) < skb->data ||
3403	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3404		nh = skb_network_header(skb);
3405		exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3406
3407		switch (nexthdr) {
3408		case NEXTHDR_FRAGMENT:
3409			onlyproto = 1;
3410			/* fall through */
3411		case NEXTHDR_ROUTING:
3412		case NEXTHDR_HOP:
3413		case NEXTHDR_DEST:
3414			offset += ipv6_optlen(exthdr);
3415			nexthdr = exthdr->nexthdr;
3416			exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3417			break;
3418		case IPPROTO_UDP:
3419		case IPPROTO_UDPLITE:
3420		case IPPROTO_TCP:
3421		case IPPROTO_SCTP:
3422		case IPPROTO_DCCP:
3423			if (!onlyproto && (nh + offset + 4 < skb->data ||
3424			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3425				__be16 *ports;
3426
3427				nh = skb_network_header(skb);
3428				ports = (__be16 *)(nh + offset);
3429				fl6->fl6_sport = ports[!!reverse];
3430				fl6->fl6_dport = ports[!reverse];
3431			}
3432			fl6->flowi6_proto = nexthdr;
3433			return;
3434		case IPPROTO_ICMPV6:
3435			if (!onlyproto && (nh + offset + 2 < skb->data ||
3436			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3437				u8 *icmp;
3438
3439				nh = skb_network_header(skb);
3440				icmp = (u8 *)(nh + offset);
3441				fl6->fl6_icmp_type = icmp[0];
3442				fl6->fl6_icmp_code = icmp[1];
3443			}
3444			fl6->flowi6_proto = nexthdr;
3445			return;
3446#if IS_ENABLED(CONFIG_IPV6_MIP6)
3447		case IPPROTO_MH:
3448			offset += ipv6_optlen(exthdr);
3449			if (!onlyproto && (nh + offset + 3 < skb->data ||
3450			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3451				struct ip6_mh *mh;
3452
3453				nh = skb_network_header(skb);
3454				mh = (struct ip6_mh *)(nh + offset);
3455				fl6->fl6_mh_type = mh->ip6mh_type;
3456			}
3457			fl6->flowi6_proto = nexthdr;
3458			return;
3459#endif
3460		/* XXX Why are there these headers? */
3461		case IPPROTO_AH:
3462		case IPPROTO_ESP:
3463		case IPPROTO_COMP:
3464		default:
3465			fl6->fl6_ipsec_spi = 0;
3466			fl6->flowi6_proto = nexthdr;
3467			return;
3468		}
3469	}
3470}
3471#endif
3472
3473int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3474			  unsigned int family, int reverse)
3475{
3476	switch (family) {
3477	case AF_INET:
3478		decode_session4(skb, fl, reverse);
3479		break;
3480#if IS_ENABLED(CONFIG_IPV6)
3481	case AF_INET6:
3482		decode_session6(skb, fl, reverse);
3483		break;
3484#endif
3485	default:
3486		return -EAFNOSUPPORT;
3487	}
3488
3489	return security_xfrm_decode_session(skb, &fl->flowi_secid);
 
 
 
3490}
3491EXPORT_SYMBOL(__xfrm_decode_session);
3492
3493static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3494{
3495	for (; k < sp->len; k++) {
3496		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3497			*idxp = k;
3498			return 1;
3499		}
3500	}
3501
3502	return 0;
3503}
3504
3505int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3506			unsigned short family)
3507{
3508	struct net *net = dev_net(skb->dev);
3509	struct xfrm_policy *pol;
3510	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3511	int npols = 0;
3512	int xfrm_nr;
3513	int pi;
3514	int reverse;
3515	struct flowi fl;
 
3516	int xerr_idx = -1;
3517	const struct xfrm_if_cb *ifcb;
3518	struct sec_path *sp;
3519	struct xfrm_if *xi;
3520	u32 if_id = 0;
3521
3522	rcu_read_lock();
3523	ifcb = xfrm_if_get_cb();
3524
3525	if (ifcb) {
3526		xi = ifcb->decode_session(skb, family);
3527		if (xi) {
3528			if_id = xi->p.if_id;
3529			net = xi->net;
3530		}
3531	}
3532	rcu_read_unlock();
3533
3534	reverse = dir & ~XFRM_POLICY_MASK;
3535	dir &= XFRM_POLICY_MASK;
 
3536
3537	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3538		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3539		return 0;
3540	}
3541
3542	nf_nat_decode_session(skb, &fl, family);
3543
3544	/* First, check used SA against their selectors. */
3545	sp = skb_sec_path(skb);
3546	if (sp) {
3547		int i;
3548
3549		for (i = sp->len - 1; i >= 0; i--) {
3550			struct xfrm_state *x = sp->xvec[i];
3551			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3552				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3553				return 0;
3554			}
3555		}
3556	}
3557
3558	pol = NULL;
3559	sk = sk_to_full_sk(sk);
3560	if (sk && sk->sk_policy[dir]) {
3561		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3562		if (IS_ERR(pol)) {
3563			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3564			return 0;
3565		}
3566	}
3567
3568	if (!pol)
3569		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
 
 
 
 
 
 
 
 
3570
3571	if (IS_ERR(pol)) {
3572		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3573		return 0;
3574	}
3575
3576	if (!pol) {
3577		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3578			xfrm_secpath_reject(xerr_idx, skb, &fl);
3579			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3580			return 0;
3581		}
3582		return 1;
3583	}
3584
3585	pol->curlft.use_time = ktime_get_real_seconds();
3586
3587	pols[0] = pol;
3588	npols++;
3589#ifdef CONFIG_XFRM_SUB_POLICY
3590	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3591		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3592						    &fl, family,
3593						    XFRM_POLICY_IN, if_id);
3594		if (pols[1]) {
3595			if (IS_ERR(pols[1])) {
3596				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3597				return 0;
3598			}
3599			pols[1]->curlft.use_time = ktime_get_real_seconds();
3600			npols++;
3601		}
3602	}
3603#endif
3604
3605	if (pol->action == XFRM_POLICY_ALLOW) {
 
3606		static struct sec_path dummy;
3607		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3608		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3609		struct xfrm_tmpl **tpp = tp;
3610		int ti = 0;
3611		int i, k;
3612
3613		sp = skb_sec_path(skb);
3614		if (!sp)
3615			sp = &dummy;
3616
3617		for (pi = 0; pi < npols; pi++) {
3618			if (pols[pi] != pol &&
3619			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3620				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3621				goto reject;
3622			}
3623			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3624				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3625				goto reject_error;
3626			}
3627			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3628				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3629		}
3630		xfrm_nr = ti;
3631		if (npols > 1) {
3632			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3633			tpp = stp;
3634		}
3635
3636		/* For each tunnel xfrm, find the first matching tmpl.
3637		 * For each tmpl before that, find corresponding xfrm.
3638		 * Order is _important_. Later we will implement
3639		 * some barriers, but at the moment barriers
3640		 * are implied between each two transformations.
3641		 */
3642		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3643			k = xfrm_policy_ok(tpp[i], sp, k, family);
3644			if (k < 0) {
3645				if (k < -1)
3646					/* "-2 - errored_index" returned */
3647					xerr_idx = -(2+k);
3648				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3649				goto reject;
3650			}
3651		}
3652
3653		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3654			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3655			goto reject;
3656		}
3657
3658		xfrm_pols_put(pols, npols);
3659		return 1;
3660	}
3661	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3662
3663reject:
3664	xfrm_secpath_reject(xerr_idx, skb, &fl);
3665reject_error:
3666	xfrm_pols_put(pols, npols);
3667	return 0;
3668}
3669EXPORT_SYMBOL(__xfrm_policy_check);
3670
3671int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3672{
3673	struct net *net = dev_net(skb->dev);
3674	struct flowi fl;
3675	struct dst_entry *dst;
3676	int res = 1;
3677
3678	if (xfrm_decode_session(skb, &fl, family) < 0) {
3679		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3680		return 0;
3681	}
3682
3683	skb_dst_force(skb);
3684	if (!skb_dst(skb)) {
3685		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3686		return 0;
3687	}
3688
3689	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3690	if (IS_ERR(dst)) {
3691		res = 0;
3692		dst = NULL;
3693	}
3694	skb_dst_set(skb, dst);
3695	return res;
3696}
3697EXPORT_SYMBOL(__xfrm_route_forward);
3698
3699/* Optimize later using cookies and generation ids. */
3700
3701static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3702{
3703	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3704	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3705	 * get validated by dst_ops->check on every use.  We do this
3706	 * because when a normal route referenced by an XFRM dst is
3707	 * obsoleted we do not go looking around for all parent
3708	 * referencing XFRM dsts so that we can invalidate them.  It
3709	 * is just too much work.  Instead we make the checks here on
3710	 * every use.  For example:
3711	 *
3712	 *	XFRM dst A --> IPv4 dst X
3713	 *
3714	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3715	 * in this example).  If X is marked obsolete, "A" will not
3716	 * notice.  That's what we are validating here via the
3717	 * stale_bundle() check.
3718	 *
3719	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3720	 * be marked on it.
3721	 * This will force stale_bundle() to fail on any xdst bundle with
3722	 * this dst linked in it.
3723	 */
3724	if (dst->obsolete < 0 && !stale_bundle(dst))
3725		return dst;
3726
3727	return NULL;
3728}
3729
3730static int stale_bundle(struct dst_entry *dst)
3731{
3732	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3733}
3734
3735void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3736{
3737	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3738		dst->dev = dev_net(dev)->loopback_dev;
3739		dev_hold(dst->dev);
3740		dev_put(dev);
3741	}
3742}
3743EXPORT_SYMBOL(xfrm_dst_ifdown);
3744
3745static void xfrm_link_failure(struct sk_buff *skb)
3746{
3747	/* Impossible. Such dst must be popped before reaches point of failure. */
3748}
3749
3750static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3751{
3752	if (dst) {
3753		if (dst->obsolete) {
3754			dst_release(dst);
3755			dst = NULL;
3756		}
3757	}
3758	return dst;
3759}
3760
3761static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3762{
3763	while (nr--) {
3764		struct xfrm_dst *xdst = bundle[nr];
 
 
 
 
 
 
 
 
 
 
 
3765		u32 pmtu, route_mtu_cached;
3766		struct dst_entry *dst;
3767
3768		dst = &xdst->u.dst;
3769		pmtu = dst_mtu(xfrm_dst_child(dst));
3770		xdst->child_mtu_cached = pmtu;
3771
3772		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3773
3774		route_mtu_cached = dst_mtu(xdst->route);
3775		xdst->route_mtu_cached = route_mtu_cached;
3776
3777		if (pmtu > route_mtu_cached)
3778			pmtu = route_mtu_cached;
3779
3780		dst_metric_set(dst, RTAX_MTU, pmtu);
3781	}
3782}
3783
3784/* Check that the bundle accepts the flow and its components are
3785 * still valid.
3786 */
3787
3788static int xfrm_bundle_ok(struct xfrm_dst *first)
3789{
3790	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3791	struct dst_entry *dst = &first->u.dst;
3792	struct xfrm_dst *xdst;
3793	int start_from, nr;
3794	u32 mtu;
3795
3796	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3797	    (dst->dev && !netif_running(dst->dev)))
3798		return 0;
3799
3800	if (dst->flags & DST_XFRM_QUEUE)
3801		return 1;
3802
3803	start_from = nr = 0;
 
3804	do {
3805		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3806
3807		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3808			return 0;
3809		if (xdst->xfrm_genid != dst->xfrm->genid)
3810			return 0;
3811		if (xdst->num_pols > 0 &&
3812		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3813			return 0;
3814
3815		bundle[nr++] = xdst;
3816
3817		mtu = dst_mtu(xfrm_dst_child(dst));
3818		if (xdst->child_mtu_cached != mtu) {
3819			start_from = nr;
3820			xdst->child_mtu_cached = mtu;
3821		}
3822
3823		if (!dst_check(xdst->route, xdst->route_cookie))
3824			return 0;
3825		mtu = dst_mtu(xdst->route);
3826		if (xdst->route_mtu_cached != mtu) {
3827			start_from = nr;
3828			xdst->route_mtu_cached = mtu;
3829		}
3830
3831		dst = xfrm_dst_child(dst);
3832	} while (dst->xfrm);
3833
3834	if (likely(!start_from))
3835		return 1;
3836
3837	xdst = bundle[start_from - 1];
3838	mtu = xdst->child_mtu_cached;
3839	while (start_from--) {
3840		dst = &xdst->u.dst;
3841
3842		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3843		if (mtu > xdst->route_mtu_cached)
3844			mtu = xdst->route_mtu_cached;
3845		dst_metric_set(dst, RTAX_MTU, mtu);
3846		if (!start_from)
 
3847			break;
3848
3849		xdst = bundle[start_from - 1];
3850		xdst->child_mtu_cached = mtu;
3851	}
3852
3853	return 1;
3854}
3855
3856static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3857{
3858	return dst_metric_advmss(xfrm_dst_path(dst));
3859}
3860
3861static unsigned int xfrm_mtu(const struct dst_entry *dst)
3862{
3863	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3864
3865	return mtu ? : dst_mtu(xfrm_dst_path(dst));
3866}
3867
3868static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3869					const void *daddr)
3870{
3871	while (dst->xfrm) {
3872		const struct xfrm_state *xfrm = dst->xfrm;
3873
3874		dst = xfrm_dst_child(dst);
3875
3876		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3877			continue;
3878		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3879			daddr = xfrm->coaddr;
3880		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3881			daddr = &xfrm->id.daddr;
3882	}
3883	return daddr;
3884}
3885
3886static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3887					   struct sk_buff *skb,
3888					   const void *daddr)
3889{
3890	const struct dst_entry *path = xfrm_dst_path(dst);
3891
3892	if (!skb)
3893		daddr = xfrm_get_dst_nexthop(dst, daddr);
3894	return path->ops->neigh_lookup(path, skb, daddr);
3895}
3896
3897static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3898{
3899	const struct dst_entry *path = xfrm_dst_path(dst);
3900
3901	daddr = xfrm_get_dst_nexthop(dst, daddr);
3902	path->ops->confirm_neigh(path, daddr);
3903}
3904
3905int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3906{
3907	int err = 0;
3908
3909	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
 
3910		return -EAFNOSUPPORT;
3911
3912	spin_lock(&xfrm_policy_afinfo_lock);
3913	if (unlikely(xfrm_policy_afinfo[family] != NULL))
3914		err = -EEXIST;
3915	else {
3916		struct dst_ops *dst_ops = afinfo->dst_ops;
3917		if (likely(dst_ops->kmem_cachep == NULL))
3918			dst_ops->kmem_cachep = xfrm_dst_cache;
3919		if (likely(dst_ops->check == NULL))
3920			dst_ops->check = xfrm_dst_check;
3921		if (likely(dst_ops->default_advmss == NULL))
3922			dst_ops->default_advmss = xfrm_default_advmss;
3923		if (likely(dst_ops->mtu == NULL))
3924			dst_ops->mtu = xfrm_mtu;
3925		if (likely(dst_ops->negative_advice == NULL))
3926			dst_ops->negative_advice = xfrm_negative_advice;
3927		if (likely(dst_ops->link_failure == NULL))
3928			dst_ops->link_failure = xfrm_link_failure;
3929		if (likely(dst_ops->neigh_lookup == NULL))
3930			dst_ops->neigh_lookup = xfrm_neigh_lookup;
3931		if (likely(!dst_ops->confirm_neigh))
3932			dst_ops->confirm_neigh = xfrm_confirm_neigh;
3933		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3934	}
3935	spin_unlock(&xfrm_policy_afinfo_lock);
3936
3937	return err;
3938}
3939EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3940
3941void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3942{
3943	struct dst_ops *dst_ops = afinfo->dst_ops;
3944	int i;
3945
3946	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3947		if (xfrm_policy_afinfo[i] != afinfo)
3948			continue;
3949		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3950		break;
 
 
 
 
3951	}
 
 
 
3952
3953	synchronize_rcu();
3954
3955	dst_ops->kmem_cachep = NULL;
3956	dst_ops->check = NULL;
3957	dst_ops->negative_advice = NULL;
3958	dst_ops->link_failure = NULL;
 
 
 
3959}
3960EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3961
3962void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3963{
3964	spin_lock(&xfrm_if_cb_lock);
3965	rcu_assign_pointer(xfrm_if_cb, ifcb);
3966	spin_unlock(&xfrm_if_cb_lock);
3967}
3968EXPORT_SYMBOL(xfrm_if_register_cb);
3969
3970void xfrm_if_unregister_cb(void)
3971{
3972	RCU_INIT_POINTER(xfrm_if_cb, NULL);
3973	synchronize_rcu();
 
3974}
3975EXPORT_SYMBOL(xfrm_if_unregister_cb);
 
 
 
3976
3977#ifdef CONFIG_XFRM_STATISTICS
3978static int __net_init xfrm_statistics_init(struct net *net)
3979{
3980	int rv;
3981	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3982	if (!net->mib.xfrm_statistics)
3983		return -ENOMEM;
3984	rv = xfrm_proc_init(net);
3985	if (rv < 0)
3986		free_percpu(net->mib.xfrm_statistics);
3987	return rv;
3988}
3989
3990static void xfrm_statistics_fini(struct net *net)
3991{
3992	xfrm_proc_fini(net);
3993	free_percpu(net->mib.xfrm_statistics);
3994}
3995#else
3996static int __net_init xfrm_statistics_init(struct net *net)
3997{
3998	return 0;
3999}
4000
4001static void xfrm_statistics_fini(struct net *net)
4002{
4003}
4004#endif
4005
4006static int __net_init xfrm_policy_init(struct net *net)
4007{
4008	unsigned int hmask, sz;
4009	int dir, err;
4010
4011	if (net_eq(net, &init_net)) {
4012		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4013					   sizeof(struct xfrm_dst),
4014					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4015					   NULL);
4016		err = rhashtable_init(&xfrm_policy_inexact_table,
4017				      &xfrm_pol_inexact_params);
4018		BUG_ON(err);
4019	}
4020
4021	hmask = 8 - 1;
4022	sz = (hmask+1) * sizeof(struct hlist_head);
4023
4024	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4025	if (!net->xfrm.policy_byidx)
4026		goto out_byidx;
4027	net->xfrm.policy_idx_hmask = hmask;
4028
4029	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4030		struct xfrm_policy_hash *htab;
4031
4032		net->xfrm.policy_count[dir] = 0;
4033		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4034		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4035
4036		htab = &net->xfrm.policy_bydst[dir];
4037		htab->table = xfrm_hash_alloc(sz);
4038		if (!htab->table)
4039			goto out_bydst;
4040		htab->hmask = hmask;
4041		htab->dbits4 = 32;
4042		htab->sbits4 = 32;
4043		htab->dbits6 = 128;
4044		htab->sbits6 = 128;
4045	}
4046	net->xfrm.policy_hthresh.lbits4 = 32;
4047	net->xfrm.policy_hthresh.rbits4 = 32;
4048	net->xfrm.policy_hthresh.lbits6 = 128;
4049	net->xfrm.policy_hthresh.rbits6 = 128;
4050
4051	seqlock_init(&net->xfrm.policy_hthresh.lock);
4052
4053	INIT_LIST_HEAD(&net->xfrm.policy_all);
4054	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4055	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4056	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
 
 
4057	return 0;
4058
4059out_bydst:
4060	for (dir--; dir >= 0; dir--) {
4061		struct xfrm_policy_hash *htab;
4062
4063		htab = &net->xfrm.policy_bydst[dir];
4064		xfrm_hash_free(htab->table, sz);
4065	}
4066	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4067out_byidx:
4068	return -ENOMEM;
4069}
4070
4071static void xfrm_policy_fini(struct net *net)
4072{
4073	struct xfrm_pol_inexact_bin *b, *t;
4074	unsigned int sz;
4075	int dir;
4076
4077	flush_work(&net->xfrm.policy_hash_work);
4078#ifdef CONFIG_XFRM_SUB_POLICY
4079	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4080#endif
4081	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4082
4083	WARN_ON(!list_empty(&net->xfrm.policy_all));
4084
4085	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4086		struct xfrm_policy_hash *htab;
4087
4088		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4089
4090		htab = &net->xfrm.policy_bydst[dir];
4091		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4092		WARN_ON(!hlist_empty(htab->table));
4093		xfrm_hash_free(htab->table, sz);
4094	}
4095
4096	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4097	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4098	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4099
4100	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4101	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4102		__xfrm_policy_inexact_prune_bin(b, true);
4103	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4104}
4105
4106static int __net_init xfrm_net_init(struct net *net)
4107{
4108	int rv;
4109
4110	/* Initialize the per-net locks here */
4111	spin_lock_init(&net->xfrm.xfrm_state_lock);
4112	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4113	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4114
4115	rv = xfrm_statistics_init(net);
4116	if (rv < 0)
4117		goto out_statistics;
4118	rv = xfrm_state_init(net);
4119	if (rv < 0)
4120		goto out_state;
4121	rv = xfrm_policy_init(net);
4122	if (rv < 0)
4123		goto out_policy;
4124	rv = xfrm_sysctl_init(net);
4125	if (rv < 0)
4126		goto out_sysctl;
 
 
 
 
 
 
 
 
4127
4128	return 0;
4129
 
 
4130out_sysctl:
4131	xfrm_policy_fini(net);
4132out_policy:
4133	xfrm_state_fini(net);
4134out_state:
4135	xfrm_statistics_fini(net);
4136out_statistics:
4137	return rv;
4138}
4139
4140static void __net_exit xfrm_net_exit(struct net *net)
4141{
 
4142	xfrm_sysctl_fini(net);
4143	xfrm_policy_fini(net);
4144	xfrm_state_fini(net);
4145	xfrm_statistics_fini(net);
4146}
4147
4148static struct pernet_operations __net_initdata xfrm_net_ops = {
4149	.init = xfrm_net_init,
4150	.exit = xfrm_net_exit,
4151};
4152
4153void __init xfrm_init(void)
4154{
4155	register_pernet_subsys(&xfrm_net_ops);
4156	xfrm_dev_init();
4157	seqcount_init(&xfrm_policy_hash_generation);
4158	xfrm_input_init();
4159
4160	RCU_INIT_POINTER(xfrm_if_cb, NULL);
4161	synchronize_rcu();
4162}
4163
4164#ifdef CONFIG_AUDITSYSCALL
4165static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4166					 struct audit_buffer *audit_buf)
4167{
4168	struct xfrm_sec_ctx *ctx = xp->security;
4169	struct xfrm_selector *sel = &xp->selector;
4170
4171	if (ctx)
4172		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4173				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4174
4175	switch (sel->family) {
4176	case AF_INET:
4177		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4178		if (sel->prefixlen_s != 32)
4179			audit_log_format(audit_buf, " src_prefixlen=%d",
4180					 sel->prefixlen_s);
4181		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4182		if (sel->prefixlen_d != 32)
4183			audit_log_format(audit_buf, " dst_prefixlen=%d",
4184					 sel->prefixlen_d);
4185		break;
4186	case AF_INET6:
4187		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4188		if (sel->prefixlen_s != 128)
4189			audit_log_format(audit_buf, " src_prefixlen=%d",
4190					 sel->prefixlen_s);
4191		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4192		if (sel->prefixlen_d != 128)
4193			audit_log_format(audit_buf, " dst_prefixlen=%d",
4194					 sel->prefixlen_d);
4195		break;
4196	}
4197}
4198
4199void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4200{
4201	struct audit_buffer *audit_buf;
4202
4203	audit_buf = xfrm_audit_start("SPD-add");
4204	if (audit_buf == NULL)
4205		return;
4206	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4207	audit_log_format(audit_buf, " res=%u", result);
4208	xfrm_audit_common_policyinfo(xp, audit_buf);
4209	audit_log_end(audit_buf);
4210}
4211EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4212
4213void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4214			      bool task_valid)
4215{
4216	struct audit_buffer *audit_buf;
4217
4218	audit_buf = xfrm_audit_start("SPD-delete");
4219	if (audit_buf == NULL)
4220		return;
4221	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4222	audit_log_format(audit_buf, " res=%u", result);
4223	xfrm_audit_common_policyinfo(xp, audit_buf);
4224	audit_log_end(audit_buf);
4225}
4226EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4227#endif
4228
4229#ifdef CONFIG_XFRM_MIGRATE
4230static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4231					const struct xfrm_selector *sel_tgt)
4232{
4233	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4234		if (sel_tgt->family == sel_cmp->family &&
4235		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4236				    sel_cmp->family) &&
4237		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4238				    sel_cmp->family) &&
4239		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4240		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4241			return true;
4242		}
4243	} else {
4244		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4245			return true;
4246		}
4247	}
4248	return false;
4249}
4250
4251static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4252						    u8 dir, u8 type, struct net *net)
4253{
4254	struct xfrm_policy *pol, *ret = NULL;
4255	struct hlist_head *chain;
4256	u32 priority = ~0U;
4257
4258	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4259	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4260	hlist_for_each_entry(pol, chain, bydst) {
4261		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4262		    pol->type == type) {
4263			ret = pol;
4264			priority = ret->priority;
4265			break;
4266		}
4267	}
4268	chain = &net->xfrm.policy_inexact[dir];
4269	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4270		if ((pol->priority >= priority) && ret)
4271			break;
4272
4273		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4274		    pol->type == type) {
4275			ret = pol;
4276			break;
4277		}
4278	}
4279
4280	xfrm_pol_hold(ret);
4281
4282	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4283
4284	return ret;
4285}
4286
4287static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4288{
4289	int match = 0;
4290
4291	if (t->mode == m->mode && t->id.proto == m->proto &&
4292	    (m->reqid == 0 || t->reqid == m->reqid)) {
4293		switch (t->mode) {
4294		case XFRM_MODE_TUNNEL:
4295		case XFRM_MODE_BEET:
4296			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4297					    m->old_family) &&
4298			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4299					    m->old_family)) {
4300				match = 1;
4301			}
4302			break;
4303		case XFRM_MODE_TRANSPORT:
4304			/* in case of transport mode, template does not store
4305			   any IP addresses, hence we just compare mode and
4306			   protocol */
4307			match = 1;
4308			break;
4309		default:
4310			break;
4311		}
4312	}
4313	return match;
4314}
4315
4316/* update endpoint address(es) of template(s) */
4317static int xfrm_policy_migrate(struct xfrm_policy *pol,
4318			       struct xfrm_migrate *m, int num_migrate)
4319{
4320	struct xfrm_migrate *mp;
4321	int i, j, n = 0;
4322
4323	write_lock_bh(&pol->lock);
4324	if (unlikely(pol->walk.dead)) {
4325		/* target policy has been deleted */
4326		write_unlock_bh(&pol->lock);
4327		return -ENOENT;
4328	}
4329
4330	for (i = 0; i < pol->xfrm_nr; i++) {
4331		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4332			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4333				continue;
4334			n++;
4335			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4336			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4337				continue;
4338			/* update endpoints */
4339			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4340			       sizeof(pol->xfrm_vec[i].id.daddr));
4341			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4342			       sizeof(pol->xfrm_vec[i].saddr));
4343			pol->xfrm_vec[i].encap_family = mp->new_family;
4344			/* flush bundles */
4345			atomic_inc(&pol->genid);
4346		}
4347	}
4348
4349	write_unlock_bh(&pol->lock);
4350
4351	if (!n)
4352		return -ENODATA;
4353
4354	return 0;
4355}
4356
4357static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4358{
4359	int i, j;
4360
4361	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4362		return -EINVAL;
4363
4364	for (i = 0; i < num_migrate; i++) {
 
 
 
 
 
4365		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4366		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4367			return -EINVAL;
4368
4369		/* check if there is any duplicated entry */
4370		for (j = i + 1; j < num_migrate; j++) {
4371			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4372				    sizeof(m[i].old_daddr)) &&
4373			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4374				    sizeof(m[i].old_saddr)) &&
4375			    m[i].proto == m[j].proto &&
4376			    m[i].mode == m[j].mode &&
4377			    m[i].reqid == m[j].reqid &&
4378			    m[i].old_family == m[j].old_family)
4379				return -EINVAL;
4380		}
4381	}
4382
4383	return 0;
4384}
4385
4386int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4387		 struct xfrm_migrate *m, int num_migrate,
4388		 struct xfrm_kmaddress *k, struct net *net,
4389		 struct xfrm_encap_tmpl *encap)
4390{
4391	int i, err, nx_cur = 0, nx_new = 0;
4392	struct xfrm_policy *pol = NULL;
4393	struct xfrm_state *x, *xc;
4394	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4395	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4396	struct xfrm_migrate *mp;
4397
4398	/* Stage 0 - sanity checks */
4399	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4400		goto out;
4401
4402	if (dir >= XFRM_POLICY_MAX) {
4403		err = -EINVAL;
4404		goto out;
4405	}
4406
4407	/* Stage 1 - find policy */
4408	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4409		err = -ENOENT;
4410		goto out;
4411	}
4412
4413	/* Stage 2 - find and update state(s) */
4414	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4415		if ((x = xfrm_migrate_state_find(mp, net))) {
4416			x_cur[nx_cur] = x;
4417			nx_cur++;
4418			xc = xfrm_state_migrate(x, mp, encap);
4419			if (xc) {
4420				x_new[nx_new] = xc;
4421				nx_new++;
4422			} else {
4423				err = -ENODATA;
4424				goto restore_state;
4425			}
4426		}
4427	}
4428
4429	/* Stage 3 - update policy */
4430	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4431		goto restore_state;
4432
4433	/* Stage 4 - delete old state(s) */
4434	if (nx_cur) {
4435		xfrm_states_put(x_cur, nx_cur);
4436		xfrm_states_delete(x_cur, nx_cur);
4437	}
4438
4439	/* Stage 5 - announce */
4440	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4441
4442	xfrm_pol_put(pol);
4443
4444	return 0;
4445out:
4446	return err;
4447
4448restore_state:
4449	if (pol)
4450		xfrm_pol_put(pol);
4451	if (nx_cur)
4452		xfrm_states_put(x_cur, nx_cur);
4453	if (nx_new)
4454		xfrm_states_delete(x_new, nx_new);
4455
4456	return err;
4457}
4458EXPORT_SYMBOL(xfrm_migrate);
4459#endif
v4.6
 
   1/*
   2 * xfrm_policy.c
   3 *
   4 * Changes:
   5 *	Mitsuru KANDA @USAGI
   6 * 	Kazunori MIYAZAWA @USAGI
   7 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
   8 * 		IPv6 support
   9 * 	Kazunori MIYAZAWA @USAGI
  10 * 	YOSHIFUJI Hideaki
  11 * 		Split up af-specific portion
  12 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
  13 *
  14 */
  15
  16#include <linux/err.h>
  17#include <linux/slab.h>
  18#include <linux/kmod.h>
  19#include <linux/list.h>
  20#include <linux/spinlock.h>
  21#include <linux/workqueue.h>
  22#include <linux/notifier.h>
  23#include <linux/netdevice.h>
  24#include <linux/netfilter.h>
  25#include <linux/module.h>
  26#include <linux/cache.h>
 
  27#include <linux/audit.h>
 
 
  28#include <net/dst.h>
  29#include <net/flow.h>
  30#include <net/xfrm.h>
  31#include <net/ip.h>
 
 
 
  32#ifdef CONFIG_XFRM_STATISTICS
  33#include <net/snmp.h>
  34#endif
  35
  36#include "xfrm_hash.h"
  37
  38#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  39#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  40#define XFRM_MAX_QUEUE_LEN	100
  41
  42struct xfrm_flo {
  43	struct dst_entry *dst_orig;
  44	u8 flags;
  45};
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  48static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
  49						__read_mostly;
  50
  51static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
 
 
 
  52
  53static void xfrm_init_pmtu(struct dst_entry *dst);
  54static int stale_bundle(struct dst_entry *dst);
  55static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  56static void xfrm_policy_queue_process(unsigned long arg);
  57
  58static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
  59static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  60						int dir);
  61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  62static inline bool
  63__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  64{
  65	const struct flowi4 *fl4 = &fl->u.ip4;
  66
  67	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  68		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  69		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  70		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  71		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
  72		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  73}
  74
  75static inline bool
  76__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  77{
  78	const struct flowi6 *fl6 = &fl->u.ip6;
  79
  80	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  81		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  82		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  83		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  84		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
  85		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  86}
  87
  88bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  89			 unsigned short family)
  90{
  91	switch (family) {
  92	case AF_INET:
  93		return __xfrm4_selector_match(sel, fl);
  94	case AF_INET6:
  95		return __xfrm6_selector_match(sel, fl);
  96	}
  97	return false;
  98}
  99
 100static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
 101{
 102	struct xfrm_policy_afinfo *afinfo;
 103
 104	if (unlikely(family >= NPROTO))
 105		return NULL;
 106	rcu_read_lock();
 107	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
 108	if (unlikely(!afinfo))
 109		rcu_read_unlock();
 110	return afinfo;
 111}
 112
 113static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
 
 114{
 115	rcu_read_unlock();
 116}
 117
 118static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
 119						  int tos, int oif,
 120						  const xfrm_address_t *saddr,
 121						  const xfrm_address_t *daddr,
 122						  int family)
 123{
 124	struct xfrm_policy_afinfo *afinfo;
 125	struct dst_entry *dst;
 126
 127	afinfo = xfrm_policy_get_afinfo(family);
 128	if (unlikely(afinfo == NULL))
 129		return ERR_PTR(-EAFNOSUPPORT);
 130
 131	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
 132
 133	xfrm_policy_put_afinfo(afinfo);
 134
 135	return dst;
 136}
 
 137
 138static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
 139						int tos, int oif,
 140						xfrm_address_t *prev_saddr,
 141						xfrm_address_t *prev_daddr,
 142						int family)
 143{
 144	struct net *net = xs_net(x);
 145	xfrm_address_t *saddr = &x->props.saddr;
 146	xfrm_address_t *daddr = &x->id.daddr;
 147	struct dst_entry *dst;
 148
 149	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
 150		saddr = x->coaddr;
 151		daddr = prev_daddr;
 152	}
 153	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
 154		saddr = prev_saddr;
 155		daddr = x->coaddr;
 156	}
 157
 158	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
 159
 160	if (!IS_ERR(dst)) {
 161		if (prev_saddr != saddr)
 162			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
 163		if (prev_daddr != daddr)
 164			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
 165	}
 166
 167	return dst;
 168}
 169
 170static inline unsigned long make_jiffies(long secs)
 171{
 172	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
 173		return MAX_SCHEDULE_TIMEOUT-1;
 174	else
 175		return secs*HZ;
 176}
 177
 178static void xfrm_policy_timer(unsigned long data)
 179{
 180	struct xfrm_policy *xp = (struct xfrm_policy *)data;
 181	unsigned long now = get_seconds();
 182	long next = LONG_MAX;
 183	int warn = 0;
 184	int dir;
 185
 186	read_lock(&xp->lock);
 187
 188	if (unlikely(xp->walk.dead))
 189		goto out;
 190
 191	dir = xfrm_policy_id2dir(xp->index);
 192
 193	if (xp->lft.hard_add_expires_seconds) {
 194		long tmo = xp->lft.hard_add_expires_seconds +
 195			xp->curlft.add_time - now;
 196		if (tmo <= 0)
 197			goto expired;
 198		if (tmo < next)
 199			next = tmo;
 200	}
 201	if (xp->lft.hard_use_expires_seconds) {
 202		long tmo = xp->lft.hard_use_expires_seconds +
 203			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 204		if (tmo <= 0)
 205			goto expired;
 206		if (tmo < next)
 207			next = tmo;
 208	}
 209	if (xp->lft.soft_add_expires_seconds) {
 210		long tmo = xp->lft.soft_add_expires_seconds +
 211			xp->curlft.add_time - now;
 212		if (tmo <= 0) {
 213			warn = 1;
 214			tmo = XFRM_KM_TIMEOUT;
 215		}
 216		if (tmo < next)
 217			next = tmo;
 218	}
 219	if (xp->lft.soft_use_expires_seconds) {
 220		long tmo = xp->lft.soft_use_expires_seconds +
 221			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 222		if (tmo <= 0) {
 223			warn = 1;
 224			tmo = XFRM_KM_TIMEOUT;
 225		}
 226		if (tmo < next)
 227			next = tmo;
 228	}
 229
 230	if (warn)
 231		km_policy_expired(xp, dir, 0, 0);
 232	if (next != LONG_MAX &&
 233	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
 234		xfrm_pol_hold(xp);
 235
 236out:
 237	read_unlock(&xp->lock);
 238	xfrm_pol_put(xp);
 239	return;
 240
 241expired:
 242	read_unlock(&xp->lock);
 243	if (!xfrm_policy_delete(xp, dir))
 244		km_policy_expired(xp, dir, 1, 0);
 245	xfrm_pol_put(xp);
 246}
 247
 248static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
 249{
 250	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
 251
 252	if (unlikely(pol->walk.dead))
 253		flo = NULL;
 254	else
 255		xfrm_pol_hold(pol);
 256
 257	return flo;
 258}
 259
 260static int xfrm_policy_flo_check(struct flow_cache_object *flo)
 261{
 262	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
 263
 264	return !pol->walk.dead;
 265}
 266
 267static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
 268{
 269	xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
 270}
 271
 272static const struct flow_cache_ops xfrm_policy_fc_ops = {
 273	.get = xfrm_policy_flo_get,
 274	.check = xfrm_policy_flo_check,
 275	.delete = xfrm_policy_flo_delete,
 276};
 277
 278/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 279 * SPD calls.
 280 */
 281
 282struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
 283{
 284	struct xfrm_policy *policy;
 285
 286	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
 287
 288	if (policy) {
 289		write_pnet(&policy->xp_net, net);
 290		INIT_LIST_HEAD(&policy->walk.all);
 
 291		INIT_HLIST_NODE(&policy->bydst);
 292		INIT_HLIST_NODE(&policy->byidx);
 293		rwlock_init(&policy->lock);
 294		atomic_set(&policy->refcnt, 1);
 295		skb_queue_head_init(&policy->polq.hold_queue);
 296		setup_timer(&policy->timer, xfrm_policy_timer,
 297				(unsigned long)policy);
 298		setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
 299			    (unsigned long)policy);
 300		policy->flo.ops = &xfrm_policy_fc_ops;
 301	}
 302	return policy;
 303}
 304EXPORT_SYMBOL(xfrm_policy_alloc);
 305
 306static void xfrm_policy_destroy_rcu(struct rcu_head *head)
 307{
 308	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
 309
 310	security_xfrm_policy_free(policy->security);
 311	kfree(policy);
 312}
 313
 314/* Destroy xfrm_policy: descendant resources must be released to this moment. */
 315
 316void xfrm_policy_destroy(struct xfrm_policy *policy)
 317{
 318	BUG_ON(!policy->walk.dead);
 319
 320	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
 321		BUG();
 322
 323	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
 324}
 325EXPORT_SYMBOL(xfrm_policy_destroy);
 326
 327/* Rule must be locked. Release descentant resources, announce
 328 * entry dead. The rule must be unlinked from lists to the moment.
 329 */
 330
 331static void xfrm_policy_kill(struct xfrm_policy *policy)
 332{
 333	policy->walk.dead = 1;
 334
 335	atomic_inc(&policy->genid);
 336
 337	if (del_timer(&policy->polq.hold_timer))
 338		xfrm_pol_put(policy);
 339	skb_queue_purge(&policy->polq.hold_queue);
 340
 341	if (del_timer(&policy->timer))
 342		xfrm_pol_put(policy);
 343
 344	xfrm_pol_put(policy);
 345}
 346
 347static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
 348
 349static inline unsigned int idx_hash(struct net *net, u32 index)
 350{
 351	return __idx_hash(index, net->xfrm.policy_idx_hmask);
 352}
 353
 354/* calculate policy hash thresholds */
 355static void __get_hash_thresh(struct net *net,
 356			      unsigned short family, int dir,
 357			      u8 *dbits, u8 *sbits)
 358{
 359	switch (family) {
 360	case AF_INET:
 361		*dbits = net->xfrm.policy_bydst[dir].dbits4;
 362		*sbits = net->xfrm.policy_bydst[dir].sbits4;
 363		break;
 364
 365	case AF_INET6:
 366		*dbits = net->xfrm.policy_bydst[dir].dbits6;
 367		*sbits = net->xfrm.policy_bydst[dir].sbits6;
 368		break;
 369
 370	default:
 371		*dbits = 0;
 372		*sbits = 0;
 373	}
 374}
 375
 376static struct hlist_head *policy_hash_bysel(struct net *net,
 377					    const struct xfrm_selector *sel,
 378					    unsigned short family, int dir)
 379{
 380	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 381	unsigned int hash;
 382	u8 dbits;
 383	u8 sbits;
 384
 385	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 386	hash = __sel_hash(sel, family, hmask, dbits, sbits);
 387
 388	return (hash == hmask + 1 ?
 389		&net->xfrm.policy_inexact[dir] :
 390		net->xfrm.policy_bydst[dir].table + hash);
 
 
 391}
 392
 393static struct hlist_head *policy_hash_direct(struct net *net,
 394					     const xfrm_address_t *daddr,
 395					     const xfrm_address_t *saddr,
 396					     unsigned short family, int dir)
 397{
 398	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 399	unsigned int hash;
 400	u8 dbits;
 401	u8 sbits;
 402
 403	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 404	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
 405
 406	return net->xfrm.policy_bydst[dir].table + hash;
 
 407}
 408
 409static void xfrm_dst_hash_transfer(struct net *net,
 410				   struct hlist_head *list,
 411				   struct hlist_head *ndsttable,
 412				   unsigned int nhashmask,
 413				   int dir)
 414{
 415	struct hlist_node *tmp, *entry0 = NULL;
 416	struct xfrm_policy *pol;
 417	unsigned int h0 = 0;
 418	u8 dbits;
 419	u8 sbits;
 420
 421redo:
 422	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
 423		unsigned int h;
 424
 425		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
 426		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
 427				pol->family, nhashmask, dbits, sbits);
 428		if (!entry0) {
 429			hlist_del(&pol->bydst);
 430			hlist_add_head(&pol->bydst, ndsttable+h);
 431			h0 = h;
 432		} else {
 433			if (h != h0)
 434				continue;
 435			hlist_del(&pol->bydst);
 436			hlist_add_behind(&pol->bydst, entry0);
 437		}
 438		entry0 = &pol->bydst;
 439	}
 440	if (!hlist_empty(list)) {
 441		entry0 = NULL;
 442		goto redo;
 443	}
 444}
 445
 446static void xfrm_idx_hash_transfer(struct hlist_head *list,
 447				   struct hlist_head *nidxtable,
 448				   unsigned int nhashmask)
 449{
 450	struct hlist_node *tmp;
 451	struct xfrm_policy *pol;
 452
 453	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
 454		unsigned int h;
 455
 456		h = __idx_hash(pol->index, nhashmask);
 457		hlist_add_head(&pol->byidx, nidxtable+h);
 458	}
 459}
 460
 461static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
 462{
 463	return ((old_hmask + 1) << 1) - 1;
 464}
 465
 466static void xfrm_bydst_resize(struct net *net, int dir)
 467{
 468	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 469	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 470	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 471	struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
 472	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
 
 473	int i;
 474
 475	if (!ndst)
 476		return;
 477
 478	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 
 479
 480	for (i = hmask; i >= 0; i--)
 481		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
 482
 483	net->xfrm.policy_bydst[dir].table = ndst;
 484	net->xfrm.policy_bydst[dir].hmask = nhashmask;
 485
 486	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 487
 488	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
 489}
 490
 491static void xfrm_byidx_resize(struct net *net, int total)
 492{
 493	unsigned int hmask = net->xfrm.policy_idx_hmask;
 494	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 495	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 496	struct hlist_head *oidx = net->xfrm.policy_byidx;
 497	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
 498	int i;
 499
 500	if (!nidx)
 501		return;
 502
 503	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 504
 505	for (i = hmask; i >= 0; i--)
 506		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
 507
 508	net->xfrm.policy_byidx = nidx;
 509	net->xfrm.policy_idx_hmask = nhashmask;
 510
 511	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 512
 513	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
 514}
 515
 516static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
 517{
 518	unsigned int cnt = net->xfrm.policy_count[dir];
 519	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 520
 521	if (total)
 522		*total += cnt;
 523
 524	if ((hmask + 1) < xfrm_policy_hashmax &&
 525	    cnt > hmask)
 526		return 1;
 527
 528	return 0;
 529}
 530
 531static inline int xfrm_byidx_should_resize(struct net *net, int total)
 532{
 533	unsigned int hmask = net->xfrm.policy_idx_hmask;
 534
 535	if ((hmask + 1) < xfrm_policy_hashmax &&
 536	    total > hmask)
 537		return 1;
 538
 539	return 0;
 540}
 541
 542void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
 543{
 544	read_lock_bh(&net->xfrm.xfrm_policy_lock);
 545	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
 546	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
 547	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
 548	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
 549	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
 550	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
 551	si->spdhcnt = net->xfrm.policy_idx_hmask;
 552	si->spdhmcnt = xfrm_policy_hashmax;
 553	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
 554}
 555EXPORT_SYMBOL(xfrm_spd_getinfo);
 556
 557static DEFINE_MUTEX(hash_resize_mutex);
 558static void xfrm_hash_resize(struct work_struct *work)
 559{
 560	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
 561	int dir, total;
 562
 563	mutex_lock(&hash_resize_mutex);
 564
 565	total = 0;
 566	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 567		if (xfrm_bydst_should_resize(net, dir, &total))
 568			xfrm_bydst_resize(net, dir);
 569	}
 570	if (xfrm_byidx_should_resize(net, total))
 571		xfrm_byidx_resize(net, total);
 572
 573	mutex_unlock(&hash_resize_mutex);
 574}
 575
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576static void xfrm_hash_rebuild(struct work_struct *work)
 577{
 578	struct net *net = container_of(work, struct net,
 579				       xfrm.policy_hthresh.work);
 580	unsigned int hmask;
 581	struct xfrm_policy *pol;
 582	struct xfrm_policy *policy;
 583	struct hlist_head *chain;
 584	struct hlist_head *odst;
 585	struct hlist_node *newpos;
 586	int i;
 587	int dir;
 588	unsigned seq;
 589	u8 lbits4, rbits4, lbits6, rbits6;
 590
 591	mutex_lock(&hash_resize_mutex);
 592
 593	/* read selector prefixlen thresholds */
 594	do {
 595		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
 596
 597		lbits4 = net->xfrm.policy_hthresh.lbits4;
 598		rbits4 = net->xfrm.policy_hthresh.rbits4;
 599		lbits6 = net->xfrm.policy_hthresh.lbits6;
 600		rbits6 = net->xfrm.policy_hthresh.rbits6;
 601	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
 602
 603	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604
 605	/* reset the bydst and inexact table in all directions */
 606	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 607		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
 
 
 
 
 
 
 
 
 608		hmask = net->xfrm.policy_bydst[dir].hmask;
 609		odst = net->xfrm.policy_bydst[dir].table;
 610		for (i = hmask; i >= 0; i--)
 611			INIT_HLIST_HEAD(odst + i);
 
 
 612		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
 613			/* dir out => dst = remote, src = local */
 614			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
 615			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
 616			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
 617			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
 618		} else {
 619			/* dir in/fwd => dst = local, src = remote */
 620			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
 621			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
 622			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
 623			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
 624		}
 625	}
 626
 627	/* re-insert all policies by order of creation */
 628	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
 
 
 
 
 
 
 
 629		newpos = NULL;
 630		chain = policy_hash_bysel(net, &policy->selector,
 631					  policy->family,
 632					  xfrm_policy_id2dir(policy->index));
 
 
 
 
 
 
 
 633		hlist_for_each_entry(pol, chain, bydst) {
 634			if (policy->priority >= pol->priority)
 635				newpos = &pol->bydst;
 636			else
 637				break;
 638		}
 639		if (newpos)
 640			hlist_add_behind(&policy->bydst, newpos);
 641		else
 642			hlist_add_head(&policy->bydst, chain);
 643	}
 644
 645	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 646
 647	mutex_unlock(&hash_resize_mutex);
 648}
 649
 650void xfrm_policy_hash_rebuild(struct net *net)
 651{
 652	schedule_work(&net->xfrm.policy_hthresh.work);
 653}
 654EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
 655
 656/* Generate new index... KAME seems to generate them ordered by cost
 657 * of an absolute inpredictability of ordering of rules. This will not pass. */
 658static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
 659{
 660	static u32 idx_generator;
 661
 662	for (;;) {
 663		struct hlist_head *list;
 664		struct xfrm_policy *p;
 665		u32 idx;
 666		int found;
 667
 668		if (!index) {
 669			idx = (idx_generator | dir);
 670			idx_generator += 8;
 671		} else {
 672			idx = index;
 673			index = 0;
 674		}
 675
 676		if (idx == 0)
 677			idx = 8;
 678		list = net->xfrm.policy_byidx + idx_hash(net, idx);
 679		found = 0;
 680		hlist_for_each_entry(p, list, byidx) {
 681			if (p->index == idx) {
 682				found = 1;
 683				break;
 684			}
 685		}
 686		if (!found)
 687			return idx;
 688	}
 689}
 690
 691static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
 692{
 693	u32 *p1 = (u32 *) s1;
 694	u32 *p2 = (u32 *) s2;
 695	int len = sizeof(struct xfrm_selector) / sizeof(u32);
 696	int i;
 697
 698	for (i = 0; i < len; i++) {
 699		if (p1[i] != p2[i])
 700			return 1;
 701	}
 702
 703	return 0;
 704}
 705
 706static void xfrm_policy_requeue(struct xfrm_policy *old,
 707				struct xfrm_policy *new)
 708{
 709	struct xfrm_policy_queue *pq = &old->polq;
 710	struct sk_buff_head list;
 711
 712	if (skb_queue_empty(&pq->hold_queue))
 713		return;
 714
 715	__skb_queue_head_init(&list);
 716
 717	spin_lock_bh(&pq->hold_queue.lock);
 718	skb_queue_splice_init(&pq->hold_queue, &list);
 719	if (del_timer(&pq->hold_timer))
 720		xfrm_pol_put(old);
 721	spin_unlock_bh(&pq->hold_queue.lock);
 722
 723	pq = &new->polq;
 724
 725	spin_lock_bh(&pq->hold_queue.lock);
 726	skb_queue_splice(&list, &pq->hold_queue);
 727	pq->timeout = XFRM_QUEUE_TMO_MIN;
 728	if (!mod_timer(&pq->hold_timer, jiffies))
 729		xfrm_pol_hold(new);
 730	spin_unlock_bh(&pq->hold_queue.lock);
 731}
 732
 733static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
 734				   struct xfrm_policy *pol)
 735{
 736	u32 mark = policy->mark.v & policy->mark.m;
 737
 738	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
 739		return true;
 740
 741	if ((mark & pol->mark.m) == pol->mark.v &&
 742	    policy->priority == pol->priority)
 743		return true;
 744
 745	return false;
 746}
 747
 748int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 749{
 750	struct net *net = xp_net(policy);
 751	struct xfrm_policy *pol;
 752	struct xfrm_policy *delpol;
 753	struct hlist_head *chain;
 754	struct hlist_node *newpos;
 755
 756	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 757	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
 758	delpol = NULL;
 759	newpos = NULL;
 760	hlist_for_each_entry(pol, chain, bydst) {
 761		if (pol->type == policy->type &&
 
 762		    !selector_cmp(&pol->selector, &policy->selector) &&
 763		    xfrm_policy_mark_match(policy, pol) &&
 764		    xfrm_sec_ctx_match(pol->security, policy->security) &&
 765		    !WARN_ON(delpol)) {
 766			if (excl) {
 767				write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 768				return -EEXIST;
 769			}
 770			delpol = pol;
 771			if (policy->priority > pol->priority)
 772				continue;
 773		} else if (policy->priority >= pol->priority) {
 774			newpos = &pol->bydst;
 775			continue;
 776		}
 777		if (delpol)
 778			break;
 779	}
 
 780	if (newpos)
 781		hlist_add_behind(&policy->bydst, newpos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782	else
 783		hlist_add_head(&policy->bydst, chain);
 
 
 
 
 
 
 784	__xfrm_policy_link(policy, dir);
 785	atomic_inc(&net->xfrm.flow_cache_genid);
 786
 787	/* After previous checking, family can either be AF_INET or AF_INET6 */
 788	if (policy->family == AF_INET)
 789		rt_genid_bump_ipv4(net);
 790	else
 791		rt_genid_bump_ipv6(net);
 792
 793	if (delpol) {
 794		xfrm_policy_requeue(delpol, policy);
 795		__xfrm_policy_unlink(delpol, dir);
 796	}
 797	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
 798	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
 799	policy->curlft.add_time = get_seconds();
 800	policy->curlft.use_time = 0;
 801	if (!mod_timer(&policy->timer, jiffies + HZ))
 802		xfrm_pol_hold(policy);
 803	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 804
 805	if (delpol)
 806		xfrm_policy_kill(delpol);
 807	else if (xfrm_bydst_should_resize(net, dir, NULL))
 808		schedule_work(&net->xfrm.policy_hash_work);
 809
 810	return 0;
 811}
 812EXPORT_SYMBOL(xfrm_policy_insert);
 813
 814struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
 815					  int dir, struct xfrm_selector *sel,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816					  struct xfrm_sec_ctx *ctx, int delete,
 817					  int *err)
 818{
 819	struct xfrm_policy *pol, *ret;
 
 820	struct hlist_head *chain;
 821
 822	*err = 0;
 823	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 824	chain = policy_hash_bysel(net, sel, sel->family, dir);
 825	ret = NULL;
 826	hlist_for_each_entry(pol, chain, bydst) {
 827		if (pol->type == type &&
 828		    (mark & pol->mark.m) == pol->mark.v &&
 829		    !selector_cmp(sel, &pol->selector) &&
 830		    xfrm_sec_ctx_match(ctx, pol->security)) {
 831			xfrm_pol_hold(pol);
 832			if (delete) {
 833				*err = security_xfrm_policy_delete(
 834								pol->security);
 835				if (*err) {
 836					write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 837					return pol;
 838				}
 839				__xfrm_policy_unlink(pol, dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840			}
 841			ret = pol;
 842			break;
 843		}
 
 844	}
 845	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 846
 847	if (ret && delete)
 848		xfrm_policy_kill(ret);
 
 
 849	return ret;
 850}
 851EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
 852
 853struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
 854				     int dir, u32 id, int delete, int *err)
 
 855{
 856	struct xfrm_policy *pol, *ret;
 857	struct hlist_head *chain;
 858
 859	*err = -ENOENT;
 860	if (xfrm_policy_id2dir(id) != dir)
 861		return NULL;
 862
 863	*err = 0;
 864	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 865	chain = net->xfrm.policy_byidx + idx_hash(net, id);
 866	ret = NULL;
 867	hlist_for_each_entry(pol, chain, byidx) {
 868		if (pol->type == type && pol->index == id &&
 
 869		    (mark & pol->mark.m) == pol->mark.v) {
 870			xfrm_pol_hold(pol);
 871			if (delete) {
 872				*err = security_xfrm_policy_delete(
 873								pol->security);
 874				if (*err) {
 875					write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 876					return pol;
 877				}
 878				__xfrm_policy_unlink(pol, dir);
 879			}
 880			ret = pol;
 881			break;
 882		}
 883	}
 884	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 885
 886	if (ret && delete)
 887		xfrm_policy_kill(ret);
 888	return ret;
 889}
 890EXPORT_SYMBOL(xfrm_policy_byid);
 891
 892#ifdef CONFIG_SECURITY_NETWORK_XFRM
 893static inline int
 894xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 895{
 896	int dir, err = 0;
 
 897
 898	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 899		struct xfrm_policy *pol;
 900		int i;
 
 
 901
 902		hlist_for_each_entry(pol,
 903				     &net->xfrm.policy_inexact[dir], bydst) {
 904			if (pol->type != type)
 905				continue;
 906			err = security_xfrm_policy_delete(pol->security);
 907			if (err) {
 908				xfrm_audit_policy_delete(pol, 0, task_valid);
 909				return err;
 910			}
 911		}
 912		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
 913			hlist_for_each_entry(pol,
 914					     net->xfrm.policy_bydst[dir].table + i,
 915					     bydst) {
 916				if (pol->type != type)
 917					continue;
 918				err = security_xfrm_policy_delete(
 919								pol->security);
 920				if (err) {
 921					xfrm_audit_policy_delete(pol, 0,
 922								 task_valid);
 923					return err;
 924				}
 925			}
 926		}
 927	}
 928	return err;
 929}
 930#else
 931static inline int
 932xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 933{
 934	return 0;
 935}
 936#endif
 937
 938int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
 939{
 940	int dir, err = 0, cnt = 0;
 
 941
 942	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 943
 944	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
 945	if (err)
 946		goto out;
 947
 948	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 949		struct xfrm_policy *pol;
 950		int i;
 951
 952	again1:
 953		hlist_for_each_entry(pol,
 954				     &net->xfrm.policy_inexact[dir], bydst) {
 955			if (pol->type != type)
 956				continue;
 957			__xfrm_policy_unlink(pol, dir);
 958			write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 959			cnt++;
 960
 961			xfrm_audit_policy_delete(pol, 1, task_valid);
 962
 963			xfrm_policy_kill(pol);
 964
 965			write_lock_bh(&net->xfrm.xfrm_policy_lock);
 966			goto again1;
 967		}
 968
 969		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
 970	again2:
 971			hlist_for_each_entry(pol,
 972					     net->xfrm.policy_bydst[dir].table + i,
 973					     bydst) {
 974				if (pol->type != type)
 975					continue;
 976				__xfrm_policy_unlink(pol, dir);
 977				write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 978				cnt++;
 979
 980				xfrm_audit_policy_delete(pol, 1, task_valid);
 981				xfrm_policy_kill(pol);
 982
 983				write_lock_bh(&net->xfrm.xfrm_policy_lock);
 984				goto again2;
 985			}
 986		}
 987
 
 
 
 
 
 
 
 988	}
 989	if (!cnt)
 
 
 990		err = -ESRCH;
 991out:
 992	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 993	return err;
 994}
 995EXPORT_SYMBOL(xfrm_policy_flush);
 996
 997int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
 998		     int (*func)(struct xfrm_policy *, int, int, void*),
 999		     void *data)
1000{
1001	struct xfrm_policy *pol;
1002	struct xfrm_policy_walk_entry *x;
1003	int error = 0;
1004
1005	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1006	    walk->type != XFRM_POLICY_TYPE_ANY)
1007		return -EINVAL;
1008
1009	if (list_empty(&walk->walk.all) && walk->seq != 0)
1010		return 0;
1011
1012	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1013	if (list_empty(&walk->walk.all))
1014		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1015	else
1016		x = list_first_entry(&walk->walk.all,
1017				     struct xfrm_policy_walk_entry, all);
1018
1019	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1020		if (x->dead)
1021			continue;
1022		pol = container_of(x, struct xfrm_policy, walk);
1023		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1024		    walk->type != pol->type)
1025			continue;
1026		error = func(pol, xfrm_policy_id2dir(pol->index),
1027			     walk->seq, data);
1028		if (error) {
1029			list_move_tail(&walk->walk.all, &x->all);
1030			goto out;
1031		}
1032		walk->seq++;
1033	}
1034	if (walk->seq == 0) {
1035		error = -ENOENT;
1036		goto out;
1037	}
1038	list_del_init(&walk->walk.all);
1039out:
1040	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1041	return error;
1042}
1043EXPORT_SYMBOL(xfrm_policy_walk);
1044
1045void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1046{
1047	INIT_LIST_HEAD(&walk->walk.all);
1048	walk->walk.dead = 1;
1049	walk->type = type;
1050	walk->seq = 0;
1051}
1052EXPORT_SYMBOL(xfrm_policy_walk_init);
1053
1054void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1055{
1056	if (list_empty(&walk->walk.all))
1057		return;
1058
1059	write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1060	list_del(&walk->walk.all);
1061	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1062}
1063EXPORT_SYMBOL(xfrm_policy_walk_done);
1064
1065/*
1066 * Find policy to apply to this flow.
1067 *
1068 * Returns 0 if policy found, else an -errno.
1069 */
1070static int xfrm_policy_match(const struct xfrm_policy *pol,
1071			     const struct flowi *fl,
1072			     u8 type, u16 family, int dir)
1073{
1074	const struct xfrm_selector *sel = &pol->selector;
1075	int ret = -ESRCH;
1076	bool match;
1077
1078	if (pol->family != family ||
 
1079	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1080	    pol->type != type)
1081		return ret;
1082
1083	match = xfrm_selector_match(sel, fl, family);
1084	if (match)
1085		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1086						  dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088	return ret;
 
 
 
 
 
1089}
1090
1091static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1092						     const struct flowi *fl,
1093						     u16 family, u8 dir)
 
1094{
1095	int err;
 
 
1096	struct xfrm_policy *pol, *ret;
1097	const xfrm_address_t *daddr, *saddr;
1098	struct hlist_head *chain;
1099	u32 priority = ~0U;
 
1100
1101	daddr = xfrm_flowi_daddr(fl, family);
1102	saddr = xfrm_flowi_saddr(fl, family);
1103	if (unlikely(!daddr || !saddr))
1104		return NULL;
1105
1106	read_lock_bh(&net->xfrm.xfrm_policy_lock);
1107	chain = policy_hash_direct(net, daddr, saddr, family, dir);
 
 
 
 
 
1108	ret = NULL;
1109	hlist_for_each_entry(pol, chain, bydst) {
1110		err = xfrm_policy_match(pol, fl, type, family, dir);
1111		if (err) {
1112			if (err == -ESRCH)
1113				continue;
1114			else {
1115				ret = ERR_PTR(err);
1116				goto fail;
1117			}
1118		} else {
1119			ret = pol;
1120			priority = ret->priority;
1121			break;
1122		}
1123	}
1124	chain = &net->xfrm.policy_inexact[dir];
1125	hlist_for_each_entry(pol, chain, bydst) {
1126		if ((pol->priority >= priority) && ret)
1127			break;
1128
1129		err = xfrm_policy_match(pol, fl, type, family, dir);
1130		if (err) {
1131			if (err == -ESRCH)
1132				continue;
1133			else {
1134				ret = ERR_PTR(err);
1135				goto fail;
1136			}
1137		} else {
1138			ret = pol;
1139			break;
1140		}
1141	}
1142
1143	xfrm_pol_hold(ret);
 
 
 
 
 
1144fail:
1145	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1146
1147	return ret;
1148}
1149
1150static struct xfrm_policy *
1151__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
 
1152{
1153#ifdef CONFIG_XFRM_SUB_POLICY
1154	struct xfrm_policy *pol;
1155
1156	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
 
1157	if (pol != NULL)
1158		return pol;
1159#endif
1160	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1161}
1162
1163static int flow_to_policy_dir(int dir)
1164{
1165	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1166	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1167	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1168		return dir;
1169
1170	switch (dir) {
1171	default:
1172	case FLOW_DIR_IN:
1173		return XFRM_POLICY_IN;
1174	case FLOW_DIR_OUT:
1175		return XFRM_POLICY_OUT;
1176	case FLOW_DIR_FWD:
1177		return XFRM_POLICY_FWD;
1178	}
1179}
1180
1181static struct flow_cache_object *
1182xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1183		   u8 dir, struct flow_cache_object *old_obj, void *ctx)
1184{
1185	struct xfrm_policy *pol;
1186
1187	if (old_obj)
1188		xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1189
1190	pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1191	if (IS_ERR_OR_NULL(pol))
1192		return ERR_CAST(pol);
1193
1194	/* Resolver returns two references:
1195	 * one for cache and one for caller of flow_cache_lookup() */
1196	xfrm_pol_hold(pol);
1197
1198	return &pol->flo;
1199}
1200
1201static inline int policy_to_flow_dir(int dir)
1202{
1203	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1204	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1205	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1206		return dir;
1207	switch (dir) {
1208	default:
1209	case XFRM_POLICY_IN:
1210		return FLOW_DIR_IN;
1211	case XFRM_POLICY_OUT:
1212		return FLOW_DIR_OUT;
1213	case XFRM_POLICY_FWD:
1214		return FLOW_DIR_FWD;
1215	}
1216}
1217
1218static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1219						 const struct flowi *fl)
 
1220{
1221	struct xfrm_policy *pol;
1222	struct net *net = sock_net(sk);
1223
1224	rcu_read_lock();
1225	read_lock_bh(&net->xfrm.xfrm_policy_lock);
1226	pol = rcu_dereference(sk->sk_policy[dir]);
1227	if (pol != NULL) {
1228		bool match = xfrm_selector_match(&pol->selector, fl,
1229						 sk->sk_family);
1230		int err = 0;
1231
 
 
 
 
 
 
1232		if (match) {
1233			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
 
1234				pol = NULL;
1235				goto out;
1236			}
1237			err = security_xfrm_policy_lookup(pol->security,
1238						      fl->flowi_secid,
1239						      policy_to_flow_dir(dir));
1240			if (!err)
1241				xfrm_pol_hold(pol);
1242			else if (err == -ESRCH)
 
1243				pol = NULL;
1244			else
1245				pol = ERR_PTR(err);
 
1246		} else
1247			pol = NULL;
1248	}
1249out:
1250	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1251	rcu_read_unlock();
1252	return pol;
1253}
1254
1255static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1256{
1257	struct net *net = xp_net(pol);
1258
1259	list_add(&pol->walk.all, &net->xfrm.policy_all);
1260	net->xfrm.policy_count[dir]++;
1261	xfrm_pol_hold(pol);
1262}
1263
1264static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1265						int dir)
1266{
1267	struct net *net = xp_net(pol);
1268
1269	if (list_empty(&pol->walk.all))
1270		return NULL;
1271
1272	/* Socket policies are not hashed. */
1273	if (!hlist_unhashed(&pol->bydst)) {
1274		hlist_del(&pol->bydst);
 
1275		hlist_del(&pol->byidx);
1276	}
1277
1278	list_del_init(&pol->walk.all);
1279	net->xfrm.policy_count[dir]--;
1280
1281	return pol;
1282}
1283
1284static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1285{
1286	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1287}
1288
1289static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1290{
1291	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1292}
1293
1294int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1295{
1296	struct net *net = xp_net(pol);
1297
1298	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1299	pol = __xfrm_policy_unlink(pol, dir);
1300	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1301	if (pol) {
1302		xfrm_policy_kill(pol);
1303		return 0;
1304	}
1305	return -ENOENT;
1306}
1307EXPORT_SYMBOL(xfrm_policy_delete);
1308
1309int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1310{
1311	struct net *net = xp_net(pol);
1312	struct xfrm_policy *old_pol;
1313
1314#ifdef CONFIG_XFRM_SUB_POLICY
1315	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1316		return -EINVAL;
1317#endif
1318
1319	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1320	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1321				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1322	if (pol) {
1323		pol->curlft.add_time = get_seconds();
1324		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1325		xfrm_sk_policy_link(pol, dir);
1326	}
1327	rcu_assign_pointer(sk->sk_policy[dir], pol);
1328	if (old_pol) {
1329		if (pol)
1330			xfrm_policy_requeue(old_pol, pol);
1331
1332		/* Unlinking succeeds always. This is the only function
1333		 * allowed to delete or replace socket policy.
1334		 */
1335		xfrm_sk_policy_unlink(old_pol, dir);
1336	}
1337	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1338
1339	if (old_pol) {
1340		xfrm_policy_kill(old_pol);
1341	}
1342	return 0;
1343}
1344
1345static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1346{
1347	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1348	struct net *net = xp_net(old);
1349
1350	if (newp) {
1351		newp->selector = old->selector;
1352		if (security_xfrm_policy_clone(old->security,
1353					       &newp->security)) {
1354			kfree(newp);
1355			return NULL;  /* ENOMEM */
1356		}
1357		newp->lft = old->lft;
1358		newp->curlft = old->curlft;
1359		newp->mark = old->mark;
 
1360		newp->action = old->action;
1361		newp->flags = old->flags;
1362		newp->xfrm_nr = old->xfrm_nr;
1363		newp->index = old->index;
1364		newp->type = old->type;
 
1365		memcpy(newp->xfrm_vec, old->xfrm_vec,
1366		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1367		write_lock_bh(&net->xfrm.xfrm_policy_lock);
1368		xfrm_sk_policy_link(newp, dir);
1369		write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1370		xfrm_pol_put(newp);
1371	}
1372	return newp;
1373}
1374
1375int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1376{
1377	const struct xfrm_policy *p;
1378	struct xfrm_policy *np;
1379	int i, ret = 0;
1380
1381	rcu_read_lock();
1382	for (i = 0; i < 2; i++) {
1383		p = rcu_dereference(osk->sk_policy[i]);
1384		if (p) {
1385			np = clone_policy(p, i);
1386			if (unlikely(!np)) {
1387				ret = -ENOMEM;
1388				break;
1389			}
1390			rcu_assign_pointer(sk->sk_policy[i], np);
1391		}
1392	}
1393	rcu_read_unlock();
1394	return ret;
1395}
1396
1397static int
1398xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1399	       xfrm_address_t *remote, unsigned short family)
1400{
1401	int err;
1402	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1403
1404	if (unlikely(afinfo == NULL))
1405		return -EINVAL;
1406	err = afinfo->get_saddr(net, oif, local, remote);
1407	xfrm_policy_put_afinfo(afinfo);
1408	return err;
1409}
1410
1411/* Resolve list of templates for the flow, given policy. */
1412
1413static int
1414xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1415		      struct xfrm_state **xfrm, unsigned short family)
1416{
1417	struct net *net = xp_net(policy);
1418	int nx;
1419	int i, error;
1420	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1421	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1422	xfrm_address_t tmp;
1423
1424	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1425		struct xfrm_state *x;
1426		xfrm_address_t *remote = daddr;
1427		xfrm_address_t *local  = saddr;
1428		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1429
1430		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1431		    tmpl->mode == XFRM_MODE_BEET) {
1432			remote = &tmpl->id.daddr;
1433			local = &tmpl->saddr;
1434			if (xfrm_addr_any(local, tmpl->encap_family)) {
1435				error = xfrm_get_saddr(net, fl->flowi_oif,
1436						       &tmp, remote,
1437						       tmpl->encap_family);
1438				if (error)
1439					goto fail;
1440				local = &tmp;
1441			}
1442		}
1443
1444		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
 
1445
1446		if (x && x->km.state == XFRM_STATE_VALID) {
1447			xfrm[nx++] = x;
1448			daddr = remote;
1449			saddr = local;
1450			continue;
1451		}
1452		if (x) {
1453			error = (x->km.state == XFRM_STATE_ERROR ?
1454				 -EINVAL : -EAGAIN);
1455			xfrm_state_put(x);
1456		} else if (error == -ESRCH) {
1457			error = -EAGAIN;
1458		}
1459
1460		if (!tmpl->optional)
1461			goto fail;
1462	}
1463	return nx;
1464
1465fail:
1466	for (nx--; nx >= 0; nx--)
1467		xfrm_state_put(xfrm[nx]);
1468	return error;
1469}
1470
1471static int
1472xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1473		  struct xfrm_state **xfrm, unsigned short family)
1474{
1475	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1476	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1477	int cnx = 0;
1478	int error;
1479	int ret;
1480	int i;
1481
1482	for (i = 0; i < npols; i++) {
1483		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1484			error = -ENOBUFS;
1485			goto fail;
1486		}
1487
1488		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1489		if (ret < 0) {
1490			error = ret;
1491			goto fail;
1492		} else
1493			cnx += ret;
1494	}
1495
1496	/* found states are sorted for outbound processing */
1497	if (npols > 1)
1498		xfrm_state_sort(xfrm, tpp, cnx, family);
1499
1500	return cnx;
1501
1502 fail:
1503	for (cnx--; cnx >= 0; cnx--)
1504		xfrm_state_put(tpp[cnx]);
1505	return error;
1506
1507}
1508
1509/* Check that the bundle accepts the flow and its components are
1510 * still valid.
1511 */
1512
1513static inline int xfrm_get_tos(const struct flowi *fl, int family)
1514{
1515	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1516	int tos;
1517
1518	if (!afinfo)
1519		return -EINVAL;
1520
1521	tos = afinfo->get_tos(fl);
1522
1523	xfrm_policy_put_afinfo(afinfo);
1524
1525	return tos;
1526}
1527
1528static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1529{
1530	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1531	struct dst_entry *dst = &xdst->u.dst;
1532
1533	if (xdst->route == NULL) {
1534		/* Dummy bundle - if it has xfrms we were not
1535		 * able to build bundle as template resolution failed.
1536		 * It means we need to try again resolving. */
1537		if (xdst->num_xfrms > 0)
1538			return NULL;
1539	} else if (dst->flags & DST_XFRM_QUEUE) {
1540		return NULL;
1541	} else {
1542		/* Real bundle */
1543		if (stale_bundle(dst))
1544			return NULL;
1545	}
1546
1547	dst_hold(dst);
1548	return flo;
1549}
1550
1551static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1552{
1553	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1554	struct dst_entry *dst = &xdst->u.dst;
1555
1556	if (!xdst->route)
1557		return 0;
1558	if (stale_bundle(dst))
1559		return 0;
1560
1561	return 1;
1562}
1563
1564static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1565{
1566	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1567	struct dst_entry *dst = &xdst->u.dst;
1568
1569	dst_free(dst);
1570}
1571
1572static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1573	.get = xfrm_bundle_flo_get,
1574	.check = xfrm_bundle_flo_check,
1575	.delete = xfrm_bundle_flo_delete,
1576};
1577
1578static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1579{
1580	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1581	struct dst_ops *dst_ops;
1582	struct xfrm_dst *xdst;
1583
1584	if (!afinfo)
1585		return ERR_PTR(-EINVAL);
1586
1587	switch (family) {
1588	case AF_INET:
1589		dst_ops = &net->xfrm.xfrm4_dst_ops;
1590		break;
1591#if IS_ENABLED(CONFIG_IPV6)
1592	case AF_INET6:
1593		dst_ops = &net->xfrm.xfrm6_dst_ops;
1594		break;
1595#endif
1596	default:
1597		BUG();
1598	}
1599	xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1600
1601	if (likely(xdst)) {
1602		struct dst_entry *dst = &xdst->u.dst;
1603
1604		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1605		xdst->flo.ops = &xfrm_bundle_fc_ops;
1606	} else
1607		xdst = ERR_PTR(-ENOBUFS);
1608
1609	xfrm_policy_put_afinfo(afinfo);
1610
1611	return xdst;
1612}
1613
1614static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1615				 int nfheader_len)
1616{
1617	struct xfrm_policy_afinfo *afinfo =
1618		xfrm_policy_get_afinfo(dst->ops->family);
1619	int err;
1620
1621	if (!afinfo)
1622		return -EINVAL;
1623
1624	err = afinfo->init_path(path, dst, nfheader_len);
1625
1626	xfrm_policy_put_afinfo(afinfo);
1627
1628	return err;
1629}
1630
1631static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1632				const struct flowi *fl)
1633{
1634	struct xfrm_policy_afinfo *afinfo =
1635		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1636	int err;
1637
1638	if (!afinfo)
1639		return -EINVAL;
1640
1641	err = afinfo->fill_dst(xdst, dev, fl);
1642
1643	xfrm_policy_put_afinfo(afinfo);
1644
1645	return err;
1646}
1647
1648
1649/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1650 * all the metrics... Shortly, bundle a bundle.
1651 */
1652
1653static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1654					    struct xfrm_state **xfrm, int nx,
 
 
1655					    const struct flowi *fl,
1656					    struct dst_entry *dst)
1657{
 
 
1658	struct net *net = xp_net(policy);
1659	unsigned long now = jiffies;
1660	struct net_device *dev;
1661	struct xfrm_mode *inner_mode;
1662	struct dst_entry *dst_prev = NULL;
1663	struct dst_entry *dst0 = NULL;
1664	int i = 0;
1665	int err;
1666	int header_len = 0;
1667	int nfheader_len = 0;
1668	int trailer_len = 0;
1669	int tos;
1670	int family = policy->selector.family;
1671	xfrm_address_t saddr, daddr;
1672
1673	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1674
1675	tos = xfrm_get_tos(fl, family);
1676	err = tos;
1677	if (tos < 0)
1678		goto put_states;
1679
1680	dst_hold(dst);
1681
1682	for (; i < nx; i++) {
1683		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1684		struct dst_entry *dst1 = &xdst->u.dst;
1685
1686		err = PTR_ERR(xdst);
1687		if (IS_ERR(xdst)) {
1688			dst_release(dst);
1689			goto put_states;
1690		}
1691
 
 
 
 
 
 
 
 
 
1692		if (xfrm[i]->sel.family == AF_UNSPEC) {
1693			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1694							xfrm_af2proto(family));
1695			if (!inner_mode) {
1696				err = -EAFNOSUPPORT;
1697				dst_release(dst);
1698				goto put_states;
1699			}
1700		} else
1701			inner_mode = xfrm[i]->inner_mode;
1702
1703		if (!dst_prev)
1704			dst0 = dst1;
1705		else {
1706			dst_prev->child = dst_clone(dst1);
1707			dst1->flags |= DST_NOHASH;
1708		}
1709
1710		xdst->route = dst;
1711		dst_copy_metrics(dst1, dst);
1712
1713		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
 
 
 
 
 
1714			family = xfrm[i]->props.family;
1715			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1716					      &saddr, &daddr, family);
1717			err = PTR_ERR(dst);
1718			if (IS_ERR(dst))
1719				goto put_states;
1720		} else
1721			dst_hold(dst);
1722
1723		dst1->xfrm = xfrm[i];
1724		xdst->xfrm_genid = xfrm[i]->genid;
1725
1726		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1727		dst1->flags |= DST_HOST;
1728		dst1->lastuse = now;
1729
1730		dst1->input = dst_discard;
1731		dst1->output = inner_mode->afinfo->output;
1732
1733		dst1->next = dst_prev;
1734		dst_prev = dst1;
 
 
 
 
 
 
 
1735
1736		header_len += xfrm[i]->props.header_len;
1737		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1738			nfheader_len += xfrm[i]->props.header_len;
1739		trailer_len += xfrm[i]->props.trailer_len;
1740	}
1741
1742	dst_prev->child = dst;
1743	dst0->path = dst;
1744
1745	err = -ENODEV;
1746	dev = dst->dev;
1747	if (!dev)
1748		goto free_dst;
1749
1750	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1751	xfrm_init_pmtu(dst_prev);
1752
1753	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1754		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1755
1756		err = xfrm_fill_dst(xdst, dev, fl);
 
 
1757		if (err)
1758			goto free_dst;
1759
1760		dst_prev->header_len = header_len;
1761		dst_prev->trailer_len = trailer_len;
1762		header_len -= xdst->u.dst.xfrm->props.header_len;
1763		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1764	}
1765
1766out:
1767	return dst0;
1768
1769put_states:
1770	for (; i < nx; i++)
1771		xfrm_state_put(xfrm[i]);
1772free_dst:
1773	if (dst0)
1774		dst_free(dst0);
1775	dst0 = ERR_PTR(err);
1776	goto out;
1777}
1778
1779#ifdef CONFIG_XFRM_SUB_POLICY
1780static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1781{
1782	if (!*target) {
1783		*target = kmalloc(size, GFP_ATOMIC);
1784		if (!*target)
1785			return -ENOMEM;
1786	}
1787
1788	memcpy(*target, src, size);
1789	return 0;
1790}
1791#endif
1792
1793static int xfrm_dst_update_parent(struct dst_entry *dst,
1794				  const struct xfrm_selector *sel)
1795{
1796#ifdef CONFIG_XFRM_SUB_POLICY
1797	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1798	return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1799				   sel, sizeof(*sel));
1800#else
1801	return 0;
1802#endif
1803}
1804
1805static int xfrm_dst_update_origin(struct dst_entry *dst,
1806				  const struct flowi *fl)
1807{
1808#ifdef CONFIG_XFRM_SUB_POLICY
1809	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1810	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1811#else
1812	return 0;
1813#endif
1814}
1815
1816static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1817				struct xfrm_policy **pols,
1818				int *num_pols, int *num_xfrms)
1819{
1820	int i;
1821
1822	if (*num_pols == 0 || !pols[0]) {
1823		*num_pols = 0;
1824		*num_xfrms = 0;
1825		return 0;
1826	}
1827	if (IS_ERR(pols[0]))
1828		return PTR_ERR(pols[0]);
1829
1830	*num_xfrms = pols[0]->xfrm_nr;
1831
1832#ifdef CONFIG_XFRM_SUB_POLICY
1833	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1834	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1835		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1836						    XFRM_POLICY_TYPE_MAIN,
1837						    fl, family,
1838						    XFRM_POLICY_OUT);
 
1839		if (pols[1]) {
1840			if (IS_ERR(pols[1])) {
1841				xfrm_pols_put(pols, *num_pols);
1842				return PTR_ERR(pols[1]);
1843			}
1844			(*num_pols)++;
1845			(*num_xfrms) += pols[1]->xfrm_nr;
1846		}
1847	}
1848#endif
1849	for (i = 0; i < *num_pols; i++) {
1850		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1851			*num_xfrms = -1;
1852			break;
1853		}
1854	}
1855
1856	return 0;
1857
1858}
1859
1860static struct xfrm_dst *
1861xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1862			       const struct flowi *fl, u16 family,
1863			       struct dst_entry *dst_orig)
1864{
1865	struct net *net = xp_net(pols[0]);
1866	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
 
 
1867	struct dst_entry *dst;
1868	struct xfrm_dst *xdst;
1869	int err;
1870
1871	/* Try to instantiate a bundle */
1872	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1873	if (err <= 0) {
1874		if (err != 0 && err != -EAGAIN)
 
 
 
1875			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1876		return ERR_PTR(err);
1877	}
1878
1879	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1880	if (IS_ERR(dst)) {
1881		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1882		return ERR_CAST(dst);
1883	}
1884
1885	xdst = (struct xfrm_dst *)dst;
1886	xdst->num_xfrms = err;
1887	if (num_pols > 1)
1888		err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1889	else
1890		err = xfrm_dst_update_origin(dst, fl);
1891	if (unlikely(err)) {
1892		dst_free(dst);
1893		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1894		return ERR_PTR(err);
1895	}
1896
1897	xdst->num_pols = num_pols;
1898	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1899	xdst->policy_genid = atomic_read(&pols[0]->genid);
1900
1901	return xdst;
1902}
1903
1904static void xfrm_policy_queue_process(unsigned long arg)
1905{
1906	struct sk_buff *skb;
1907	struct sock *sk;
1908	struct dst_entry *dst;
1909	struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1910	struct net *net = xp_net(pol);
1911	struct xfrm_policy_queue *pq = &pol->polq;
1912	struct flowi fl;
1913	struct sk_buff_head list;
1914
1915	spin_lock(&pq->hold_queue.lock);
1916	skb = skb_peek(&pq->hold_queue);
1917	if (!skb) {
1918		spin_unlock(&pq->hold_queue.lock);
1919		goto out;
1920	}
1921	dst = skb_dst(skb);
1922	sk = skb->sk;
1923	xfrm_decode_session(skb, &fl, dst->ops->family);
1924	spin_unlock(&pq->hold_queue.lock);
1925
1926	dst_hold(dst->path);
1927	dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
1928	if (IS_ERR(dst))
1929		goto purge_queue;
1930
1931	if (dst->flags & DST_XFRM_QUEUE) {
1932		dst_release(dst);
1933
1934		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1935			goto purge_queue;
1936
1937		pq->timeout = pq->timeout << 1;
1938		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1939			xfrm_pol_hold(pol);
1940	goto out;
1941	}
1942
1943	dst_release(dst);
1944
1945	__skb_queue_head_init(&list);
1946
1947	spin_lock(&pq->hold_queue.lock);
1948	pq->timeout = 0;
1949	skb_queue_splice_init(&pq->hold_queue, &list);
1950	spin_unlock(&pq->hold_queue.lock);
1951
1952	while (!skb_queue_empty(&list)) {
1953		skb = __skb_dequeue(&list);
1954
1955		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1956		dst_hold(skb_dst(skb)->path);
1957		dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
1958		if (IS_ERR(dst)) {
1959			kfree_skb(skb);
1960			continue;
1961		}
1962
1963		nf_reset(skb);
1964		skb_dst_drop(skb);
1965		skb_dst_set(skb, dst);
1966
1967		dst_output(net, skb->sk, skb);
1968	}
1969
1970out:
1971	xfrm_pol_put(pol);
1972	return;
1973
1974purge_queue:
1975	pq->timeout = 0;
1976	skb_queue_purge(&pq->hold_queue);
1977	xfrm_pol_put(pol);
1978}
1979
1980static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1981{
1982	unsigned long sched_next;
1983	struct dst_entry *dst = skb_dst(skb);
1984	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1985	struct xfrm_policy *pol = xdst->pols[0];
1986	struct xfrm_policy_queue *pq = &pol->polq;
1987
1988	if (unlikely(skb_fclone_busy(sk, skb))) {
1989		kfree_skb(skb);
1990		return 0;
1991	}
1992
1993	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1994		kfree_skb(skb);
1995		return -EAGAIN;
1996	}
1997
1998	skb_dst_force(skb);
1999
2000	spin_lock_bh(&pq->hold_queue.lock);
2001
2002	if (!pq->timeout)
2003		pq->timeout = XFRM_QUEUE_TMO_MIN;
2004
2005	sched_next = jiffies + pq->timeout;
2006
2007	if (del_timer(&pq->hold_timer)) {
2008		if (time_before(pq->hold_timer.expires, sched_next))
2009			sched_next = pq->hold_timer.expires;
2010		xfrm_pol_put(pol);
2011	}
2012
2013	__skb_queue_tail(&pq->hold_queue, skb);
2014	if (!mod_timer(&pq->hold_timer, sched_next))
2015		xfrm_pol_hold(pol);
2016
2017	spin_unlock_bh(&pq->hold_queue.lock);
2018
2019	return 0;
2020}
2021
2022static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2023						 struct xfrm_flo *xflo,
2024						 const struct flowi *fl,
2025						 int num_xfrms,
2026						 u16 family)
2027{
2028	int err;
2029	struct net_device *dev;
2030	struct dst_entry *dst;
2031	struct dst_entry *dst1;
2032	struct xfrm_dst *xdst;
2033
2034	xdst = xfrm_alloc_dst(net, family);
2035	if (IS_ERR(xdst))
2036		return xdst;
2037
2038	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2039	    net->xfrm.sysctl_larval_drop ||
2040	    num_xfrms <= 0)
2041		return xdst;
2042
2043	dst = xflo->dst_orig;
2044	dst1 = &xdst->u.dst;
2045	dst_hold(dst);
2046	xdst->route = dst;
2047
2048	dst_copy_metrics(dst1, dst);
2049
2050	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2051	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2052	dst1->lastuse = jiffies;
2053
2054	dst1->input = dst_discard;
2055	dst1->output = xdst_queue_output;
2056
2057	dst_hold(dst);
2058	dst1->child = dst;
2059	dst1->path = dst;
2060
2061	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2062
2063	err = -ENODEV;
2064	dev = dst->dev;
2065	if (!dev)
2066		goto free_dst;
2067
2068	err = xfrm_fill_dst(xdst, dev, fl);
2069	if (err)
2070		goto free_dst;
2071
2072out:
2073	return xdst;
2074
2075free_dst:
2076	dst_release(dst1);
2077	xdst = ERR_PTR(err);
2078	goto out;
2079}
2080
2081static struct flow_cache_object *
2082xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
2083		   struct flow_cache_object *oldflo, void *ctx)
 
2084{
2085	struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
2086	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2087	struct xfrm_dst *xdst, *new_xdst;
2088	int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
2089
2090	/* Check if the policies from old bundle are usable */
2091	xdst = NULL;
2092	if (oldflo) {
2093		xdst = container_of(oldflo, struct xfrm_dst, flo);
2094		num_pols = xdst->num_pols;
2095		num_xfrms = xdst->num_xfrms;
2096		pol_dead = 0;
2097		for (i = 0; i < num_pols; i++) {
2098			pols[i] = xdst->pols[i];
2099			pol_dead |= pols[i]->walk.dead;
2100		}
2101		if (pol_dead) {
2102			dst_free(&xdst->u.dst);
2103			xdst = NULL;
2104			num_pols = 0;
2105			num_xfrms = 0;
2106			oldflo = NULL;
2107		}
2108	}
2109
2110	/* Resolve policies to use if we couldn't get them from
2111	 * previous cache entry */
2112	if (xdst == NULL) {
2113		num_pols = 1;
2114		pols[0] = __xfrm_policy_lookup(net, fl, family,
2115					       flow_to_policy_dir(dir));
2116		err = xfrm_expand_policies(fl, family, pols,
2117					   &num_pols, &num_xfrms);
2118		if (err < 0)
2119			goto inc_error;
2120		if (num_pols == 0)
 
 
 
 
 
 
 
 
 
 
2121			return NULL;
2122		if (num_xfrms <= 0)
2123			goto make_dummy_bundle;
2124	}
2125
2126	new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2127						  xflo->dst_orig);
2128	if (IS_ERR(new_xdst)) {
2129		err = PTR_ERR(new_xdst);
2130		if (err != -EAGAIN)
2131			goto error;
2132		if (oldflo == NULL)
2133			goto make_dummy_bundle;
2134		dst_hold(&xdst->u.dst);
2135		return oldflo;
2136	} else if (new_xdst == NULL) {
2137		num_xfrms = 0;
2138		if (oldflo == NULL)
2139			goto make_dummy_bundle;
2140		xdst->num_xfrms = 0;
2141		dst_hold(&xdst->u.dst);
2142		return oldflo;
2143	}
2144
2145	/* Kill the previous bundle */
2146	if (xdst) {
2147		/* The policies were stolen for newly generated bundle */
2148		xdst->num_pols = 0;
2149		dst_free(&xdst->u.dst);
2150	}
2151
2152	/* Flow cache does not have reference, it dst_free()'s,
2153	 * but we do need to return one reference for original caller */
2154	dst_hold(&new_xdst->u.dst);
2155	return &new_xdst->flo;
2156
2157make_dummy_bundle:
2158	/* We found policies, but there's no bundles to instantiate:
2159	 * either because the policy blocks, has no transformations or
2160	 * we could not build template (no xfrm_states).*/
2161	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2162	if (IS_ERR(xdst)) {
2163		xfrm_pols_put(pols, num_pols);
2164		return ERR_CAST(xdst);
2165	}
2166	xdst->num_pols = num_pols;
2167	xdst->num_xfrms = num_xfrms;
2168	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2169
2170	dst_hold(&xdst->u.dst);
2171	return &xdst->flo;
2172
2173inc_error:
2174	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2175error:
2176	if (xdst != NULL)
2177		dst_free(&xdst->u.dst);
2178	else
2179		xfrm_pols_put(pols, num_pols);
2180	return ERR_PTR(err);
2181}
2182
2183static struct dst_entry *make_blackhole(struct net *net, u16 family,
2184					struct dst_entry *dst_orig)
2185{
2186	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2187	struct dst_entry *ret;
2188
2189	if (!afinfo) {
2190		dst_release(dst_orig);
2191		return ERR_PTR(-EINVAL);
2192	} else {
2193		ret = afinfo->blackhole_route(net, dst_orig);
2194	}
2195	xfrm_policy_put_afinfo(afinfo);
2196
2197	return ret;
2198}
2199
2200/* Main function: finds/creates a bundle for given flow.
2201 *
2202 * At the moment we eat a raw IP route. Mostly to speed up lookups
2203 * on interfaces with disabled IPsec.
 
 
 
2204 */
2205struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2206			      const struct flowi *fl,
2207			      const struct sock *sk, int flags)
 
 
2208{
2209	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2210	struct flow_cache_object *flo;
2211	struct xfrm_dst *xdst;
2212	struct dst_entry *dst, *route;
2213	u16 family = dst_orig->ops->family;
2214	u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2215	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2216
2217	dst = NULL;
2218	xdst = NULL;
2219	route = NULL;
2220
2221	sk = sk_const_to_full_sk(sk);
2222	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2223		num_pols = 1;
2224		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
 
2225		err = xfrm_expand_policies(fl, family, pols,
2226					   &num_pols, &num_xfrms);
2227		if (err < 0)
2228			goto dropdst;
2229
2230		if (num_pols) {
2231			if (num_xfrms <= 0) {
2232				drop_pols = num_pols;
2233				goto no_transform;
2234			}
2235
2236			xdst = xfrm_resolve_and_create_bundle(
2237					pols, num_pols, fl,
2238					family, dst_orig);
 
2239			if (IS_ERR(xdst)) {
2240				xfrm_pols_put(pols, num_pols);
2241				err = PTR_ERR(xdst);
 
 
 
2242				goto dropdst;
2243			} else if (xdst == NULL) {
2244				num_xfrms = 0;
2245				drop_pols = num_pols;
2246				goto no_transform;
2247			}
2248
2249			dst_hold(&xdst->u.dst);
2250			xdst->u.dst.flags |= DST_NOCACHE;
2251			route = xdst->route;
2252		}
2253	}
2254
2255	if (xdst == NULL) {
2256		struct xfrm_flo xflo;
2257
2258		xflo.dst_orig = dst_orig;
2259		xflo.flags = flags;
2260
2261		/* To accelerate a bit...  */
2262		if ((dst_orig->flags & DST_NOXFRM) ||
2263		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2264			goto nopol;
2265
2266		flo = flow_cache_lookup(net, fl, family, dir,
2267					xfrm_bundle_lookup, &xflo);
2268		if (flo == NULL)
2269			goto nopol;
2270		if (IS_ERR(flo)) {
2271			err = PTR_ERR(flo);
2272			goto dropdst;
2273		}
2274		xdst = container_of(flo, struct xfrm_dst, flo);
2275
2276		num_pols = xdst->num_pols;
2277		num_xfrms = xdst->num_xfrms;
2278		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2279		route = xdst->route;
2280	}
2281
2282	dst = &xdst->u.dst;
2283	if (route == NULL && num_xfrms > 0) {
2284		/* The only case when xfrm_bundle_lookup() returns a
2285		 * bundle with null route, is when the template could
2286		 * not be resolved. It means policies are there, but
2287		 * bundle could not be created, since we don't yet
2288		 * have the xfrm_state's. We need to wait for KM to
2289		 * negotiate new SA's or bail out with error.*/
2290		if (net->xfrm.sysctl_larval_drop) {
2291			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2292			err = -EREMOTE;
2293			goto error;
2294		}
2295
2296		err = -EAGAIN;
2297
2298		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2299		goto error;
2300	}
2301
2302no_transform:
2303	if (num_pols == 0)
2304		goto nopol;
2305
2306	if ((flags & XFRM_LOOKUP_ICMP) &&
2307	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2308		err = -ENOENT;
2309		goto error;
2310	}
2311
2312	for (i = 0; i < num_pols; i++)
2313		pols[i]->curlft.use_time = get_seconds();
2314
2315	if (num_xfrms < 0) {
2316		/* Prohibit the flow */
2317		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2318		err = -EPERM;
2319		goto error;
2320	} else if (num_xfrms > 0) {
2321		/* Flow transformed */
2322		dst_release(dst_orig);
2323	} else {
2324		/* Flow passes untransformed */
2325		dst_release(dst);
2326		dst = dst_orig;
2327	}
2328ok:
2329	xfrm_pols_put(pols, drop_pols);
2330	if (dst && dst->xfrm &&
2331	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2332		dst->flags |= DST_XFRM_TUNNEL;
2333	return dst;
2334
2335nopol:
2336	if (!(flags & XFRM_LOOKUP_ICMP)) {
2337		dst = dst_orig;
2338		goto ok;
2339	}
2340	err = -ENOENT;
2341error:
2342	dst_release(dst);
2343dropdst:
2344	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2345		dst_release(dst_orig);
2346	xfrm_pols_put(pols, drop_pols);
2347	return ERR_PTR(err);
2348}
 
 
 
 
 
 
 
 
 
 
 
 
 
2349EXPORT_SYMBOL(xfrm_lookup);
2350
2351/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2352 * Otherwise we may send out blackholed packets.
2353 */
2354struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2355				    const struct flowi *fl,
2356				    const struct sock *sk, int flags)
2357{
2358	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2359					    flags | XFRM_LOOKUP_QUEUE |
2360					    XFRM_LOOKUP_KEEP_DST_REF);
2361
2362	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2363		return make_blackhole(net, dst_orig->ops->family, dst_orig);
2364
 
 
 
2365	return dst;
2366}
2367EXPORT_SYMBOL(xfrm_lookup_route);
2368
2369static inline int
2370xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2371{
 
2372	struct xfrm_state *x;
2373
2374	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2375		return 0;
2376	x = skb->sp->xvec[idx];
2377	if (!x->type->reject)
2378		return 0;
2379	return x->type->reject(x, skb, fl);
2380}
2381
2382/* When skb is transformed back to its "native" form, we have to
2383 * check policy restrictions. At the moment we make this in maximally
2384 * stupid way. Shame on me. :-) Of course, connected sockets must
2385 * have policy cached at them.
2386 */
2387
2388static inline int
2389xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2390	      unsigned short family)
2391{
2392	if (xfrm_state_kern(x))
2393		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2394	return	x->id.proto == tmpl->id.proto &&
2395		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2396		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2397		x->props.mode == tmpl->mode &&
2398		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2399		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2400		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2401		  xfrm_state_addr_cmp(tmpl, x, family));
2402}
2403
2404/*
2405 * 0 or more than 0 is returned when validation is succeeded (either bypass
2406 * because of optional transport mode, or next index of the mathced secpath
2407 * state with the template.
2408 * -1 is returned when no matching template is found.
2409 * Otherwise "-2 - errored_index" is returned.
2410 */
2411static inline int
2412xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2413	       unsigned short family)
2414{
2415	int idx = start;
2416
2417	if (tmpl->optional) {
2418		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2419			return start;
2420	} else
2421		start = -1;
2422	for (; idx < sp->len; idx++) {
2423		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2424			return ++idx;
2425		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2426			if (start == -1)
2427				start = -2-idx;
2428			break;
2429		}
2430	}
2431	return start;
2432}
2433
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2434int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2435			  unsigned int family, int reverse)
2436{
2437	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2438	int err;
2439
2440	if (unlikely(afinfo == NULL))
 
 
 
 
 
 
2441		return -EAFNOSUPPORT;
 
2442
2443	afinfo->decode_session(skb, fl, reverse);
2444	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2445	xfrm_policy_put_afinfo(afinfo);
2446	return err;
2447}
2448EXPORT_SYMBOL(__xfrm_decode_session);
2449
2450static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2451{
2452	for (; k < sp->len; k++) {
2453		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2454			*idxp = k;
2455			return 1;
2456		}
2457	}
2458
2459	return 0;
2460}
2461
2462int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2463			unsigned short family)
2464{
2465	struct net *net = dev_net(skb->dev);
2466	struct xfrm_policy *pol;
2467	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2468	int npols = 0;
2469	int xfrm_nr;
2470	int pi;
2471	int reverse;
2472	struct flowi fl;
2473	u8 fl_dir;
2474	int xerr_idx = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2475
2476	reverse = dir & ~XFRM_POLICY_MASK;
2477	dir &= XFRM_POLICY_MASK;
2478	fl_dir = policy_to_flow_dir(dir);
2479
2480	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2481		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2482		return 0;
2483	}
2484
2485	nf_nat_decode_session(skb, &fl, family);
2486
2487	/* First, check used SA against their selectors. */
2488	if (skb->sp) {
 
2489		int i;
2490
2491		for (i = skb->sp->len-1; i >= 0; i--) {
2492			struct xfrm_state *x = skb->sp->xvec[i];
2493			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2494				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2495				return 0;
2496			}
2497		}
2498	}
2499
2500	pol = NULL;
2501	sk = sk_to_full_sk(sk);
2502	if (sk && sk->sk_policy[dir]) {
2503		pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2504		if (IS_ERR(pol)) {
2505			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2506			return 0;
2507		}
2508	}
2509
2510	if (!pol) {
2511		struct flow_cache_object *flo;
2512
2513		flo = flow_cache_lookup(net, &fl, family, fl_dir,
2514					xfrm_policy_lookup, NULL);
2515		if (IS_ERR_OR_NULL(flo))
2516			pol = ERR_CAST(flo);
2517		else
2518			pol = container_of(flo, struct xfrm_policy, flo);
2519	}
2520
2521	if (IS_ERR(pol)) {
2522		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2523		return 0;
2524	}
2525
2526	if (!pol) {
2527		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2528			xfrm_secpath_reject(xerr_idx, skb, &fl);
2529			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2530			return 0;
2531		}
2532		return 1;
2533	}
2534
2535	pol->curlft.use_time = get_seconds();
2536
2537	pols[0] = pol;
2538	npols++;
2539#ifdef CONFIG_XFRM_SUB_POLICY
2540	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2541		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2542						    &fl, family,
2543						    XFRM_POLICY_IN);
2544		if (pols[1]) {
2545			if (IS_ERR(pols[1])) {
2546				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2547				return 0;
2548			}
2549			pols[1]->curlft.use_time = get_seconds();
2550			npols++;
2551		}
2552	}
2553#endif
2554
2555	if (pol->action == XFRM_POLICY_ALLOW) {
2556		struct sec_path *sp;
2557		static struct sec_path dummy;
2558		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2559		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2560		struct xfrm_tmpl **tpp = tp;
2561		int ti = 0;
2562		int i, k;
2563
2564		if ((sp = skb->sp) == NULL)
 
2565			sp = &dummy;
2566
2567		for (pi = 0; pi < npols; pi++) {
2568			if (pols[pi] != pol &&
2569			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2570				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2571				goto reject;
2572			}
2573			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2574				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2575				goto reject_error;
2576			}
2577			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2578				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2579		}
2580		xfrm_nr = ti;
2581		if (npols > 1) {
2582			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2583			tpp = stp;
2584		}
2585
2586		/* For each tunnel xfrm, find the first matching tmpl.
2587		 * For each tmpl before that, find corresponding xfrm.
2588		 * Order is _important_. Later we will implement
2589		 * some barriers, but at the moment barriers
2590		 * are implied between each two transformations.
2591		 */
2592		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2593			k = xfrm_policy_ok(tpp[i], sp, k, family);
2594			if (k < 0) {
2595				if (k < -1)
2596					/* "-2 - errored_index" returned */
2597					xerr_idx = -(2+k);
2598				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2599				goto reject;
2600			}
2601		}
2602
2603		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2604			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2605			goto reject;
2606		}
2607
2608		xfrm_pols_put(pols, npols);
2609		return 1;
2610	}
2611	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2612
2613reject:
2614	xfrm_secpath_reject(xerr_idx, skb, &fl);
2615reject_error:
2616	xfrm_pols_put(pols, npols);
2617	return 0;
2618}
2619EXPORT_SYMBOL(__xfrm_policy_check);
2620
2621int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2622{
2623	struct net *net = dev_net(skb->dev);
2624	struct flowi fl;
2625	struct dst_entry *dst;
2626	int res = 1;
2627
2628	if (xfrm_decode_session(skb, &fl, family) < 0) {
2629		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2630		return 0;
2631	}
2632
2633	skb_dst_force(skb);
 
 
 
 
2634
2635	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2636	if (IS_ERR(dst)) {
2637		res = 0;
2638		dst = NULL;
2639	}
2640	skb_dst_set(skb, dst);
2641	return res;
2642}
2643EXPORT_SYMBOL(__xfrm_route_forward);
2644
2645/* Optimize later using cookies and generation ids. */
2646
2647static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2648{
2649	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2650	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2651	 * get validated by dst_ops->check on every use.  We do this
2652	 * because when a normal route referenced by an XFRM dst is
2653	 * obsoleted we do not go looking around for all parent
2654	 * referencing XFRM dsts so that we can invalidate them.  It
2655	 * is just too much work.  Instead we make the checks here on
2656	 * every use.  For example:
2657	 *
2658	 *	XFRM dst A --> IPv4 dst X
2659	 *
2660	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2661	 * in this example).  If X is marked obsolete, "A" will not
2662	 * notice.  That's what we are validating here via the
2663	 * stale_bundle() check.
2664	 *
2665	 * When a policy's bundle is pruned, we dst_free() the XFRM
2666	 * dst which causes it's ->obsolete field to be set to
2667	 * DST_OBSOLETE_DEAD.  If an XFRM dst has been pruned like
2668	 * this, we want to force a new route lookup.
2669	 */
2670	if (dst->obsolete < 0 && !stale_bundle(dst))
2671		return dst;
2672
2673	return NULL;
2674}
2675
2676static int stale_bundle(struct dst_entry *dst)
2677{
2678	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2679}
2680
2681void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2682{
2683	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2684		dst->dev = dev_net(dev)->loopback_dev;
2685		dev_hold(dst->dev);
2686		dev_put(dev);
2687	}
2688}
2689EXPORT_SYMBOL(xfrm_dst_ifdown);
2690
2691static void xfrm_link_failure(struct sk_buff *skb)
2692{
2693	/* Impossible. Such dst must be popped before reaches point of failure. */
2694}
2695
2696static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2697{
2698	if (dst) {
2699		if (dst->obsolete) {
2700			dst_release(dst);
2701			dst = NULL;
2702		}
2703	}
2704	return dst;
2705}
2706
2707void xfrm_garbage_collect(struct net *net)
2708{
2709	flow_cache_flush(net);
2710}
2711EXPORT_SYMBOL(xfrm_garbage_collect);
2712
2713static void xfrm_garbage_collect_deferred(struct net *net)
2714{
2715	flow_cache_flush_deferred(net);
2716}
2717
2718static void xfrm_init_pmtu(struct dst_entry *dst)
2719{
2720	do {
2721		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2722		u32 pmtu, route_mtu_cached;
 
2723
2724		pmtu = dst_mtu(dst->child);
 
2725		xdst->child_mtu_cached = pmtu;
2726
2727		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2728
2729		route_mtu_cached = dst_mtu(xdst->route);
2730		xdst->route_mtu_cached = route_mtu_cached;
2731
2732		if (pmtu > route_mtu_cached)
2733			pmtu = route_mtu_cached;
2734
2735		dst_metric_set(dst, RTAX_MTU, pmtu);
2736	} while ((dst = dst->next));
2737}
2738
2739/* Check that the bundle accepts the flow and its components are
2740 * still valid.
2741 */
2742
2743static int xfrm_bundle_ok(struct xfrm_dst *first)
2744{
 
2745	struct dst_entry *dst = &first->u.dst;
2746	struct xfrm_dst *last;
 
2747	u32 mtu;
2748
2749	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2750	    (dst->dev && !netif_running(dst->dev)))
2751		return 0;
2752
2753	if (dst->flags & DST_XFRM_QUEUE)
2754		return 1;
2755
2756	last = NULL;
2757
2758	do {
2759		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2760
2761		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2762			return 0;
2763		if (xdst->xfrm_genid != dst->xfrm->genid)
2764			return 0;
2765		if (xdst->num_pols > 0 &&
2766		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2767			return 0;
2768
2769		mtu = dst_mtu(dst->child);
 
 
2770		if (xdst->child_mtu_cached != mtu) {
2771			last = xdst;
2772			xdst->child_mtu_cached = mtu;
2773		}
2774
2775		if (!dst_check(xdst->route, xdst->route_cookie))
2776			return 0;
2777		mtu = dst_mtu(xdst->route);
2778		if (xdst->route_mtu_cached != mtu) {
2779			last = xdst;
2780			xdst->route_mtu_cached = mtu;
2781		}
2782
2783		dst = dst->child;
2784	} while (dst->xfrm);
2785
2786	if (likely(!last))
2787		return 1;
2788
2789	mtu = last->child_mtu_cached;
2790	for (;;) {
2791		dst = &last->u.dst;
 
2792
2793		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2794		if (mtu > last->route_mtu_cached)
2795			mtu = last->route_mtu_cached;
2796		dst_metric_set(dst, RTAX_MTU, mtu);
2797
2798		if (last == first)
2799			break;
2800
2801		last = (struct xfrm_dst *)last->u.dst.next;
2802		last->child_mtu_cached = mtu;
2803	}
2804
2805	return 1;
2806}
2807
2808static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2809{
2810	return dst_metric_advmss(dst->path);
2811}
2812
2813static unsigned int xfrm_mtu(const struct dst_entry *dst)
2814{
2815	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2816
2817	return mtu ? : dst_mtu(dst->path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2818}
2819
2820static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2821					   struct sk_buff *skb,
2822					   const void *daddr)
2823{
2824	return dst->path->ops->neigh_lookup(dst, skb, daddr);
 
 
 
 
2825}
2826
2827int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
 
 
 
 
 
 
 
 
2828{
2829	int err = 0;
2830	if (unlikely(afinfo == NULL))
2831		return -EINVAL;
2832	if (unlikely(afinfo->family >= NPROTO))
2833		return -EAFNOSUPPORT;
 
2834	spin_lock(&xfrm_policy_afinfo_lock);
2835	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2836		err = -EEXIST;
2837	else {
2838		struct dst_ops *dst_ops = afinfo->dst_ops;
2839		if (likely(dst_ops->kmem_cachep == NULL))
2840			dst_ops->kmem_cachep = xfrm_dst_cache;
2841		if (likely(dst_ops->check == NULL))
2842			dst_ops->check = xfrm_dst_check;
2843		if (likely(dst_ops->default_advmss == NULL))
2844			dst_ops->default_advmss = xfrm_default_advmss;
2845		if (likely(dst_ops->mtu == NULL))
2846			dst_ops->mtu = xfrm_mtu;
2847		if (likely(dst_ops->negative_advice == NULL))
2848			dst_ops->negative_advice = xfrm_negative_advice;
2849		if (likely(dst_ops->link_failure == NULL))
2850			dst_ops->link_failure = xfrm_link_failure;
2851		if (likely(dst_ops->neigh_lookup == NULL))
2852			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2853		if (likely(afinfo->garbage_collect == NULL))
2854			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2855		rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2856	}
2857	spin_unlock(&xfrm_policy_afinfo_lock);
2858
2859	return err;
2860}
2861EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2862
2863int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2864{
2865	int err = 0;
2866	if (unlikely(afinfo == NULL))
2867		return -EINVAL;
2868	if (unlikely(afinfo->family >= NPROTO))
2869		return -EAFNOSUPPORT;
2870	spin_lock(&xfrm_policy_afinfo_lock);
2871	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2872		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2873			err = -EINVAL;
2874		else
2875			RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2876					 NULL);
2877	}
2878	spin_unlock(&xfrm_policy_afinfo_lock);
2879	if (!err) {
2880		struct dst_ops *dst_ops = afinfo->dst_ops;
2881
2882		synchronize_rcu();
2883
2884		dst_ops->kmem_cachep = NULL;
2885		dst_ops->check = NULL;
2886		dst_ops->negative_advice = NULL;
2887		dst_ops->link_failure = NULL;
2888		afinfo->garbage_collect = NULL;
2889	}
2890	return err;
2891}
2892EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2893
2894static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2895{
2896	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
 
 
 
2897
2898	switch (event) {
2899	case NETDEV_DOWN:
2900		xfrm_garbage_collect(dev_net(dev));
2901	}
2902	return NOTIFY_DONE;
2903}
2904
2905static struct notifier_block xfrm_dev_notifier = {
2906	.notifier_call	= xfrm_dev_event,
2907};
2908
2909#ifdef CONFIG_XFRM_STATISTICS
2910static int __net_init xfrm_statistics_init(struct net *net)
2911{
2912	int rv;
2913	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2914	if (!net->mib.xfrm_statistics)
2915		return -ENOMEM;
2916	rv = xfrm_proc_init(net);
2917	if (rv < 0)
2918		free_percpu(net->mib.xfrm_statistics);
2919	return rv;
2920}
2921
2922static void xfrm_statistics_fini(struct net *net)
2923{
2924	xfrm_proc_fini(net);
2925	free_percpu(net->mib.xfrm_statistics);
2926}
2927#else
2928static int __net_init xfrm_statistics_init(struct net *net)
2929{
2930	return 0;
2931}
2932
2933static void xfrm_statistics_fini(struct net *net)
2934{
2935}
2936#endif
2937
2938static int __net_init xfrm_policy_init(struct net *net)
2939{
2940	unsigned int hmask, sz;
2941	int dir;
2942
2943	if (net_eq(net, &init_net))
2944		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2945					   sizeof(struct xfrm_dst),
2946					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2947					   NULL);
 
 
 
 
2948
2949	hmask = 8 - 1;
2950	sz = (hmask+1) * sizeof(struct hlist_head);
2951
2952	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2953	if (!net->xfrm.policy_byidx)
2954		goto out_byidx;
2955	net->xfrm.policy_idx_hmask = hmask;
2956
2957	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2958		struct xfrm_policy_hash *htab;
2959
2960		net->xfrm.policy_count[dir] = 0;
2961		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2962		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2963
2964		htab = &net->xfrm.policy_bydst[dir];
2965		htab->table = xfrm_hash_alloc(sz);
2966		if (!htab->table)
2967			goto out_bydst;
2968		htab->hmask = hmask;
2969		htab->dbits4 = 32;
2970		htab->sbits4 = 32;
2971		htab->dbits6 = 128;
2972		htab->sbits6 = 128;
2973	}
2974	net->xfrm.policy_hthresh.lbits4 = 32;
2975	net->xfrm.policy_hthresh.rbits4 = 32;
2976	net->xfrm.policy_hthresh.lbits6 = 128;
2977	net->xfrm.policy_hthresh.rbits6 = 128;
2978
2979	seqlock_init(&net->xfrm.policy_hthresh.lock);
2980
2981	INIT_LIST_HEAD(&net->xfrm.policy_all);
 
2982	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2983	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2984	if (net_eq(net, &init_net))
2985		register_netdevice_notifier(&xfrm_dev_notifier);
2986	return 0;
2987
2988out_bydst:
2989	for (dir--; dir >= 0; dir--) {
2990		struct xfrm_policy_hash *htab;
2991
2992		htab = &net->xfrm.policy_bydst[dir];
2993		xfrm_hash_free(htab->table, sz);
2994	}
2995	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2996out_byidx:
2997	return -ENOMEM;
2998}
2999
3000static void xfrm_policy_fini(struct net *net)
3001{
 
3002	unsigned int sz;
3003	int dir;
3004
3005	flush_work(&net->xfrm.policy_hash_work);
3006#ifdef CONFIG_XFRM_SUB_POLICY
3007	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
3008#endif
3009	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
3010
3011	WARN_ON(!list_empty(&net->xfrm.policy_all));
3012
3013	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
3014		struct xfrm_policy_hash *htab;
3015
3016		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
3017
3018		htab = &net->xfrm.policy_bydst[dir];
3019		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
3020		WARN_ON(!hlist_empty(htab->table));
3021		xfrm_hash_free(htab->table, sz);
3022	}
3023
3024	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
3025	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
3026	xfrm_hash_free(net->xfrm.policy_byidx, sz);
 
 
 
 
 
3027}
3028
3029static int __net_init xfrm_net_init(struct net *net)
3030{
3031	int rv;
3032
 
 
 
 
 
3033	rv = xfrm_statistics_init(net);
3034	if (rv < 0)
3035		goto out_statistics;
3036	rv = xfrm_state_init(net);
3037	if (rv < 0)
3038		goto out_state;
3039	rv = xfrm_policy_init(net);
3040	if (rv < 0)
3041		goto out_policy;
3042	rv = xfrm_sysctl_init(net);
3043	if (rv < 0)
3044		goto out_sysctl;
3045	rv = flow_cache_init(net);
3046	if (rv < 0)
3047		goto out;
3048
3049	/* Initialize the per-net locks here */
3050	spin_lock_init(&net->xfrm.xfrm_state_lock);
3051	rwlock_init(&net->xfrm.xfrm_policy_lock);
3052	mutex_init(&net->xfrm.xfrm_cfg_mutex);
3053
3054	return 0;
3055
3056out:
3057	xfrm_sysctl_fini(net);
3058out_sysctl:
3059	xfrm_policy_fini(net);
3060out_policy:
3061	xfrm_state_fini(net);
3062out_state:
3063	xfrm_statistics_fini(net);
3064out_statistics:
3065	return rv;
3066}
3067
3068static void __net_exit xfrm_net_exit(struct net *net)
3069{
3070	flow_cache_fini(net);
3071	xfrm_sysctl_fini(net);
3072	xfrm_policy_fini(net);
3073	xfrm_state_fini(net);
3074	xfrm_statistics_fini(net);
3075}
3076
3077static struct pernet_operations __net_initdata xfrm_net_ops = {
3078	.init = xfrm_net_init,
3079	.exit = xfrm_net_exit,
3080};
3081
3082void __init xfrm_init(void)
3083{
3084	register_pernet_subsys(&xfrm_net_ops);
 
 
3085	xfrm_input_init();
 
 
 
3086}
3087
3088#ifdef CONFIG_AUDITSYSCALL
3089static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
3090					 struct audit_buffer *audit_buf)
3091{
3092	struct xfrm_sec_ctx *ctx = xp->security;
3093	struct xfrm_selector *sel = &xp->selector;
3094
3095	if (ctx)
3096		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3097				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3098
3099	switch (sel->family) {
3100	case AF_INET:
3101		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
3102		if (sel->prefixlen_s != 32)
3103			audit_log_format(audit_buf, " src_prefixlen=%d",
3104					 sel->prefixlen_s);
3105		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3106		if (sel->prefixlen_d != 32)
3107			audit_log_format(audit_buf, " dst_prefixlen=%d",
3108					 sel->prefixlen_d);
3109		break;
3110	case AF_INET6:
3111		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3112		if (sel->prefixlen_s != 128)
3113			audit_log_format(audit_buf, " src_prefixlen=%d",
3114					 sel->prefixlen_s);
3115		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3116		if (sel->prefixlen_d != 128)
3117			audit_log_format(audit_buf, " dst_prefixlen=%d",
3118					 sel->prefixlen_d);
3119		break;
3120	}
3121}
3122
3123void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
3124{
3125	struct audit_buffer *audit_buf;
3126
3127	audit_buf = xfrm_audit_start("SPD-add");
3128	if (audit_buf == NULL)
3129		return;
3130	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3131	audit_log_format(audit_buf, " res=%u", result);
3132	xfrm_audit_common_policyinfo(xp, audit_buf);
3133	audit_log_end(audit_buf);
3134}
3135EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3136
3137void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3138			      bool task_valid)
3139{
3140	struct audit_buffer *audit_buf;
3141
3142	audit_buf = xfrm_audit_start("SPD-delete");
3143	if (audit_buf == NULL)
3144		return;
3145	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3146	audit_log_format(audit_buf, " res=%u", result);
3147	xfrm_audit_common_policyinfo(xp, audit_buf);
3148	audit_log_end(audit_buf);
3149}
3150EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3151#endif
3152
3153#ifdef CONFIG_XFRM_MIGRATE
3154static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3155					const struct xfrm_selector *sel_tgt)
3156{
3157	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3158		if (sel_tgt->family == sel_cmp->family &&
3159		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3160				    sel_cmp->family) &&
3161		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3162				    sel_cmp->family) &&
3163		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3164		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3165			return true;
3166		}
3167	} else {
3168		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3169			return true;
3170		}
3171	}
3172	return false;
3173}
3174
3175static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3176						    u8 dir, u8 type, struct net *net)
3177{
3178	struct xfrm_policy *pol, *ret = NULL;
3179	struct hlist_head *chain;
3180	u32 priority = ~0U;
3181
3182	read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
3183	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3184	hlist_for_each_entry(pol, chain, bydst) {
3185		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3186		    pol->type == type) {
3187			ret = pol;
3188			priority = ret->priority;
3189			break;
3190		}
3191	}
3192	chain = &net->xfrm.policy_inexact[dir];
3193	hlist_for_each_entry(pol, chain, bydst) {
3194		if ((pol->priority >= priority) && ret)
3195			break;
3196
3197		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3198		    pol->type == type) {
3199			ret = pol;
3200			break;
3201		}
3202	}
3203
3204	xfrm_pol_hold(ret);
3205
3206	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
3207
3208	return ret;
3209}
3210
3211static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3212{
3213	int match = 0;
3214
3215	if (t->mode == m->mode && t->id.proto == m->proto &&
3216	    (m->reqid == 0 || t->reqid == m->reqid)) {
3217		switch (t->mode) {
3218		case XFRM_MODE_TUNNEL:
3219		case XFRM_MODE_BEET:
3220			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3221					    m->old_family) &&
3222			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3223					    m->old_family)) {
3224				match = 1;
3225			}
3226			break;
3227		case XFRM_MODE_TRANSPORT:
3228			/* in case of transport mode, template does not store
3229			   any IP addresses, hence we just compare mode and
3230			   protocol */
3231			match = 1;
3232			break;
3233		default:
3234			break;
3235		}
3236	}
3237	return match;
3238}
3239
3240/* update endpoint address(es) of template(s) */
3241static int xfrm_policy_migrate(struct xfrm_policy *pol,
3242			       struct xfrm_migrate *m, int num_migrate)
3243{
3244	struct xfrm_migrate *mp;
3245	int i, j, n = 0;
3246
3247	write_lock_bh(&pol->lock);
3248	if (unlikely(pol->walk.dead)) {
3249		/* target policy has been deleted */
3250		write_unlock_bh(&pol->lock);
3251		return -ENOENT;
3252	}
3253
3254	for (i = 0; i < pol->xfrm_nr; i++) {
3255		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3256			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3257				continue;
3258			n++;
3259			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3260			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3261				continue;
3262			/* update endpoints */
3263			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3264			       sizeof(pol->xfrm_vec[i].id.daddr));
3265			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3266			       sizeof(pol->xfrm_vec[i].saddr));
3267			pol->xfrm_vec[i].encap_family = mp->new_family;
3268			/* flush bundles */
3269			atomic_inc(&pol->genid);
3270		}
3271	}
3272
3273	write_unlock_bh(&pol->lock);
3274
3275	if (!n)
3276		return -ENODATA;
3277
3278	return 0;
3279}
3280
3281static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3282{
3283	int i, j;
3284
3285	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3286		return -EINVAL;
3287
3288	for (i = 0; i < num_migrate; i++) {
3289		if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3290				    m[i].old_family) &&
3291		    xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3292				    m[i].old_family))
3293			return -EINVAL;
3294		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3295		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3296			return -EINVAL;
3297
3298		/* check if there is any duplicated entry */
3299		for (j = i + 1; j < num_migrate; j++) {
3300			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3301				    sizeof(m[i].old_daddr)) &&
3302			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3303				    sizeof(m[i].old_saddr)) &&
3304			    m[i].proto == m[j].proto &&
3305			    m[i].mode == m[j].mode &&
3306			    m[i].reqid == m[j].reqid &&
3307			    m[i].old_family == m[j].old_family)
3308				return -EINVAL;
3309		}
3310	}
3311
3312	return 0;
3313}
3314
3315int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3316		 struct xfrm_migrate *m, int num_migrate,
3317		 struct xfrm_kmaddress *k, struct net *net)
 
3318{
3319	int i, err, nx_cur = 0, nx_new = 0;
3320	struct xfrm_policy *pol = NULL;
3321	struct xfrm_state *x, *xc;
3322	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3323	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3324	struct xfrm_migrate *mp;
3325
 
3326	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3327		goto out;
3328
 
 
 
 
 
3329	/* Stage 1 - find policy */
3330	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3331		err = -ENOENT;
3332		goto out;
3333	}
3334
3335	/* Stage 2 - find and update state(s) */
3336	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3337		if ((x = xfrm_migrate_state_find(mp, net))) {
3338			x_cur[nx_cur] = x;
3339			nx_cur++;
3340			if ((xc = xfrm_state_migrate(x, mp))) {
 
3341				x_new[nx_new] = xc;
3342				nx_new++;
3343			} else {
3344				err = -ENODATA;
3345				goto restore_state;
3346			}
3347		}
3348	}
3349
3350	/* Stage 3 - update policy */
3351	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3352		goto restore_state;
3353
3354	/* Stage 4 - delete old state(s) */
3355	if (nx_cur) {
3356		xfrm_states_put(x_cur, nx_cur);
3357		xfrm_states_delete(x_cur, nx_cur);
3358	}
3359
3360	/* Stage 5 - announce */
3361	km_migrate(sel, dir, type, m, num_migrate, k);
3362
3363	xfrm_pol_put(pol);
3364
3365	return 0;
3366out:
3367	return err;
3368
3369restore_state:
3370	if (pol)
3371		xfrm_pol_put(pol);
3372	if (nx_cur)
3373		xfrm_states_put(x_cur, nx_cur);
3374	if (nx_new)
3375		xfrm_states_delete(x_new, nx_new);
3376
3377	return err;
3378}
3379EXPORT_SYMBOL(xfrm_migrate);
3380#endif