Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * xfrm_policy.c
   4 *
   5 * Changes:
   6 *	Mitsuru KANDA @USAGI
   7 * 	Kazunori MIYAZAWA @USAGI
   8 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
   9 * 		IPv6 support
  10 * 	Kazunori MIYAZAWA @USAGI
  11 * 	YOSHIFUJI Hideaki
  12 * 		Split up af-specific portion
  13 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
  14 *
  15 */
  16
  17#include <linux/err.h>
  18#include <linux/slab.h>
  19#include <linux/kmod.h>
  20#include <linux/list.h>
  21#include <linux/spinlock.h>
  22#include <linux/workqueue.h>
  23#include <linux/notifier.h>
  24#include <linux/netdevice.h>
  25#include <linux/netfilter.h>
  26#include <linux/module.h>
  27#include <linux/cache.h>
  28#include <linux/cpu.h>
  29#include <linux/audit.h>
  30#include <linux/rhashtable.h>
  31#include <linux/if_tunnel.h>
  32#include <linux/icmp.h>
  33#include <net/dst.h>
  34#include <net/flow.h>
  35#include <net/inet_ecn.h>
  36#include <net/xfrm.h>
  37#include <net/ip.h>
  38#include <net/gre.h>
  39#if IS_ENABLED(CONFIG_IPV6_MIP6)
  40#include <net/mip6.h>
  41#endif
  42#ifdef CONFIG_XFRM_STATISTICS
  43#include <net/snmp.h>
  44#endif
  45#ifdef CONFIG_XFRM_ESPINTCP
  46#include <net/espintcp.h>
  47#endif
  48
  49#include "xfrm_hash.h"
  50
  51#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  52#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  53#define XFRM_MAX_QUEUE_LEN	100
  54
  55struct xfrm_flo {
  56	struct dst_entry *dst_orig;
  57	u8 flags;
  58};
  59
  60/* prefixes smaller than this are stored in lists, not trees. */
  61#define INEXACT_PREFIXLEN_IPV4	16
  62#define INEXACT_PREFIXLEN_IPV6	48
  63
  64struct xfrm_pol_inexact_node {
  65	struct rb_node node;
  66	union {
  67		xfrm_address_t addr;
  68		struct rcu_head rcu;
  69	};
  70	u8 prefixlen;
  71
  72	struct rb_root root;
  73
  74	/* the policies matching this node, can be empty list */
  75	struct hlist_head hhead;
  76};
  77
  78/* xfrm inexact policy search tree:
  79 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
  80 *  |
  81 * +---- root_d: sorted by daddr:prefix
  82 * |                 |
  83 * |        xfrm_pol_inexact_node
  84 * |                 |
  85 * |                 +- root: sorted by saddr/prefix
  86 * |                 |              |
  87 * |                 |         xfrm_pol_inexact_node
  88 * |                 |              |
  89 * |                 |              + root: unused
  90 * |                 |              |
  91 * |                 |              + hhead: saddr:daddr policies
  92 * |                 |
  93 * |                 +- coarse policies and all any:daddr policies
  94 * |
  95 * +---- root_s: sorted by saddr:prefix
  96 * |                 |
  97 * |        xfrm_pol_inexact_node
  98 * |                 |
  99 * |                 + root: unused
 100 * |                 |
 101 * |                 + hhead: saddr:any policies
 102 * |
 103 * +---- coarse policies and all any:any policies
 104 *
 105 * Lookups return four candidate lists:
 106 * 1. any:any list from top-level xfrm_pol_inexact_bin
 107 * 2. any:daddr list from daddr tree
 108 * 3. saddr:daddr list from 2nd level daddr tree
 109 * 4. saddr:any list from saddr tree
 110 *
 111 * This result set then needs to be searched for the policy with
 112 * the lowest priority.  If two results have same prio, youngest one wins.
 113 */
 114
 115struct xfrm_pol_inexact_key {
 116	possible_net_t net;
 117	u32 if_id;
 118	u16 family;
 119	u8 dir, type;
 120};
 121
 122struct xfrm_pol_inexact_bin {
 123	struct xfrm_pol_inexact_key k;
 124	struct rhash_head head;
 125	/* list containing '*:*' policies */
 126	struct hlist_head hhead;
 127
 128	seqcount_spinlock_t count;
 129	/* tree sorted by daddr/prefix */
 130	struct rb_root root_d;
 131
 132	/* tree sorted by saddr/prefix */
 133	struct rb_root root_s;
 134
 135	/* slow path below */
 136	struct list_head inexact_bins;
 137	struct rcu_head rcu;
 138};
 139
 140enum xfrm_pol_inexact_candidate_type {
 141	XFRM_POL_CAND_BOTH,
 142	XFRM_POL_CAND_SADDR,
 143	XFRM_POL_CAND_DADDR,
 144	XFRM_POL_CAND_ANY,
 145
 146	XFRM_POL_CAND_MAX,
 147};
 148
 149struct xfrm_pol_inexact_candidates {
 150	struct hlist_head *res[XFRM_POL_CAND_MAX];
 151};
 152
 153struct xfrm_flow_keys {
 154	struct flow_dissector_key_basic basic;
 155	struct flow_dissector_key_control control;
 156	union {
 157		struct flow_dissector_key_ipv4_addrs ipv4;
 158		struct flow_dissector_key_ipv6_addrs ipv6;
 159	} addrs;
 160	struct flow_dissector_key_ip ip;
 161	struct flow_dissector_key_icmp icmp;
 162	struct flow_dissector_key_ports ports;
 163	struct flow_dissector_key_keyid gre;
 164};
 165
 166static struct flow_dissector xfrm_session_dissector __ro_after_init;
 167
 168static DEFINE_SPINLOCK(xfrm_if_cb_lock);
 169static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
 170
 171static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 172static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
 173						__read_mostly;
 174
 175static struct kmem_cache *xfrm_dst_cache __ro_after_init;
 176
 177static struct rhashtable xfrm_policy_inexact_table;
 178static const struct rhashtable_params xfrm_pol_inexact_params;
 179
 180static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
 181static int stale_bundle(struct dst_entry *dst);
 182static int xfrm_bundle_ok(struct xfrm_dst *xdst);
 183static void xfrm_policy_queue_process(struct timer_list *t);
 184
 185static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
 186static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
 187						int dir);
 188
 189static struct xfrm_pol_inexact_bin *
 190xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
 191			   u32 if_id);
 192
 193static struct xfrm_pol_inexact_bin *
 194xfrm_policy_inexact_lookup_rcu(struct net *net,
 195			       u8 type, u16 family, u8 dir, u32 if_id);
 196static struct xfrm_policy *
 197xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
 198			bool excl);
 199static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
 200					    struct xfrm_policy *policy);
 201
 202static bool
 203xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
 204				    struct xfrm_pol_inexact_bin *b,
 205				    const xfrm_address_t *saddr,
 206				    const xfrm_address_t *daddr);
 207
 208static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
 209{
 210	return refcount_inc_not_zero(&policy->refcnt);
 211}
 212
 213static inline bool
 214__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 215{
 216	const struct flowi4 *fl4 = &fl->u.ip4;
 217
 218	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
 219		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
 220		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
 221		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
 222		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
 223		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
 224}
 225
 226static inline bool
 227__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 228{
 229	const struct flowi6 *fl6 = &fl->u.ip6;
 230
 231	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
 232		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
 233		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
 234		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
 235		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
 236		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
 237}
 238
 239bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
 240			 unsigned short family)
 241{
 242	switch (family) {
 243	case AF_INET:
 244		return __xfrm4_selector_match(sel, fl);
 245	case AF_INET6:
 246		return __xfrm6_selector_match(sel, fl);
 247	}
 248	return false;
 249}
 250
 251static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
 252{
 253	const struct xfrm_policy_afinfo *afinfo;
 254
 255	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
 256		return NULL;
 257	rcu_read_lock();
 258	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
 259	if (unlikely(!afinfo))
 260		rcu_read_unlock();
 261	return afinfo;
 262}
 263
 264/* Called with rcu_read_lock(). */
 265static const struct xfrm_if_cb *xfrm_if_get_cb(void)
 266{
 267	return rcu_dereference(xfrm_if_cb);
 268}
 269
 270struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
 271				    const xfrm_address_t *saddr,
 272				    const xfrm_address_t *daddr,
 273				    int family, u32 mark)
 
 274{
 275	const struct xfrm_policy_afinfo *afinfo;
 276	struct dst_entry *dst;
 277
 278	afinfo = xfrm_policy_get_afinfo(family);
 279	if (unlikely(afinfo == NULL))
 280		return ERR_PTR(-EAFNOSUPPORT);
 281
 282	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
 283
 284	rcu_read_unlock();
 285
 286	return dst;
 287}
 288EXPORT_SYMBOL(__xfrm_dst_lookup);
 289
 290static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
 291						int tos, int oif,
 292						xfrm_address_t *prev_saddr,
 293						xfrm_address_t *prev_daddr,
 294						int family, u32 mark)
 295{
 296	struct net *net = xs_net(x);
 297	xfrm_address_t *saddr = &x->props.saddr;
 298	xfrm_address_t *daddr = &x->id.daddr;
 299	struct dst_entry *dst;
 300
 301	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
 302		saddr = x->coaddr;
 303		daddr = prev_daddr;
 304	}
 305	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
 306		saddr = prev_saddr;
 307		daddr = x->coaddr;
 308	}
 309
 310	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
 311
 312	if (!IS_ERR(dst)) {
 313		if (prev_saddr != saddr)
 314			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
 315		if (prev_daddr != daddr)
 316			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
 317	}
 318
 319	return dst;
 320}
 321
 322static inline unsigned long make_jiffies(long secs)
 323{
 324	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
 325		return MAX_SCHEDULE_TIMEOUT-1;
 326	else
 327		return secs*HZ;
 328}
 329
 330static void xfrm_policy_timer(struct timer_list *t)
 331{
 332	struct xfrm_policy *xp = from_timer(xp, t, timer);
 333	time64_t now = ktime_get_real_seconds();
 334	time64_t next = TIME64_MAX;
 335	int warn = 0;
 336	int dir;
 337
 338	read_lock(&xp->lock);
 339
 340	if (unlikely(xp->walk.dead))
 341		goto out;
 342
 343	dir = xfrm_policy_id2dir(xp->index);
 344
 345	if (xp->lft.hard_add_expires_seconds) {
 346		time64_t tmo = xp->lft.hard_add_expires_seconds +
 347			xp->curlft.add_time - now;
 348		if (tmo <= 0)
 349			goto expired;
 350		if (tmo < next)
 351			next = tmo;
 352	}
 353	if (xp->lft.hard_use_expires_seconds) {
 354		time64_t tmo = xp->lft.hard_use_expires_seconds +
 355			(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
 356		if (tmo <= 0)
 357			goto expired;
 358		if (tmo < next)
 359			next = tmo;
 360	}
 361	if (xp->lft.soft_add_expires_seconds) {
 362		time64_t tmo = xp->lft.soft_add_expires_seconds +
 363			xp->curlft.add_time - now;
 364		if (tmo <= 0) {
 365			warn = 1;
 366			tmo = XFRM_KM_TIMEOUT;
 367		}
 368		if (tmo < next)
 369			next = tmo;
 370	}
 371	if (xp->lft.soft_use_expires_seconds) {
 372		time64_t tmo = xp->lft.soft_use_expires_seconds +
 373			(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
 374		if (tmo <= 0) {
 375			warn = 1;
 376			tmo = XFRM_KM_TIMEOUT;
 377		}
 378		if (tmo < next)
 379			next = tmo;
 380	}
 381
 382	if (warn)
 383		km_policy_expired(xp, dir, 0, 0);
 384	if (next != TIME64_MAX &&
 385	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
 386		xfrm_pol_hold(xp);
 387
 388out:
 389	read_unlock(&xp->lock);
 390	xfrm_pol_put(xp);
 391	return;
 392
 393expired:
 394	read_unlock(&xp->lock);
 395	if (!xfrm_policy_delete(xp, dir))
 396		km_policy_expired(xp, dir, 1, 0);
 397	xfrm_pol_put(xp);
 398}
 399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 401 * SPD calls.
 402 */
 403
 404struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
 405{
 406	struct xfrm_policy *policy;
 407
 408	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
 409
 410	if (policy) {
 411		write_pnet(&policy->xp_net, net);
 412		INIT_LIST_HEAD(&policy->walk.all);
 413		INIT_HLIST_NODE(&policy->bydst_inexact_list);
 414		INIT_HLIST_NODE(&policy->bydst);
 415		INIT_HLIST_NODE(&policy->byidx);
 416		rwlock_init(&policy->lock);
 417		refcount_set(&policy->refcnt, 1);
 418		skb_queue_head_init(&policy->polq.hold_queue);
 419		timer_setup(&policy->timer, xfrm_policy_timer, 0);
 420		timer_setup(&policy->polq.hold_timer,
 421			    xfrm_policy_queue_process, 0);
 
 
 422	}
 423	return policy;
 424}
 425EXPORT_SYMBOL(xfrm_policy_alloc);
 426
 427static void xfrm_policy_destroy_rcu(struct rcu_head *head)
 428{
 429	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
 430
 431	security_xfrm_policy_free(policy->security);
 432	kfree(policy);
 433}
 434
 435/* Destroy xfrm_policy: descendant resources must be released to this moment. */
 436
 437void xfrm_policy_destroy(struct xfrm_policy *policy)
 438{
 439	BUG_ON(!policy->walk.dead);
 440
 441	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
 442		BUG();
 443
 444	xfrm_dev_policy_free(policy);
 445	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
 446}
 447EXPORT_SYMBOL(xfrm_policy_destroy);
 448
 449/* Rule must be locked. Release descendant resources, announce
 450 * entry dead. The rule must be unlinked from lists to the moment.
 451 */
 452
 453static void xfrm_policy_kill(struct xfrm_policy *policy)
 454{
 455	write_lock_bh(&policy->lock);
 456	policy->walk.dead = 1;
 457	write_unlock_bh(&policy->lock);
 458
 459	atomic_inc(&policy->genid);
 460
 461	if (del_timer(&policy->polq.hold_timer))
 462		xfrm_pol_put(policy);
 463	skb_queue_purge(&policy->polq.hold_queue);
 464
 465	if (del_timer(&policy->timer))
 466		xfrm_pol_put(policy);
 467
 468	xfrm_pol_put(policy);
 469}
 470
 471static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
 472
 473static inline unsigned int idx_hash(struct net *net, u32 index)
 474{
 475	return __idx_hash(index, net->xfrm.policy_idx_hmask);
 476}
 477
 478/* calculate policy hash thresholds */
 479static void __get_hash_thresh(struct net *net,
 480			      unsigned short family, int dir,
 481			      u8 *dbits, u8 *sbits)
 482{
 483	switch (family) {
 484	case AF_INET:
 485		*dbits = net->xfrm.policy_bydst[dir].dbits4;
 486		*sbits = net->xfrm.policy_bydst[dir].sbits4;
 487		break;
 488
 489	case AF_INET6:
 490		*dbits = net->xfrm.policy_bydst[dir].dbits6;
 491		*sbits = net->xfrm.policy_bydst[dir].sbits6;
 492		break;
 493
 494	default:
 495		*dbits = 0;
 496		*sbits = 0;
 497	}
 498}
 499
 500static struct hlist_head *policy_hash_bysel(struct net *net,
 501					    const struct xfrm_selector *sel,
 502					    unsigned short family, int dir)
 503{
 504	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 505	unsigned int hash;
 506	u8 dbits;
 507	u8 sbits;
 508
 509	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 510	hash = __sel_hash(sel, family, hmask, dbits, sbits);
 511
 512	if (hash == hmask + 1)
 513		return NULL;
 514
 515	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 516		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 517}
 518
 519static struct hlist_head *policy_hash_direct(struct net *net,
 520					     const xfrm_address_t *daddr,
 521					     const xfrm_address_t *saddr,
 522					     unsigned short family, int dir)
 523{
 524	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 525	unsigned int hash;
 526	u8 dbits;
 527	u8 sbits;
 528
 529	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 530	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
 531
 532	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 533		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 534}
 535
 536static void xfrm_dst_hash_transfer(struct net *net,
 537				   struct hlist_head *list,
 538				   struct hlist_head *ndsttable,
 539				   unsigned int nhashmask,
 540				   int dir)
 541{
 542	struct hlist_node *tmp, *entry0 = NULL;
 543	struct xfrm_policy *pol;
 544	unsigned int h0 = 0;
 545	u8 dbits;
 546	u8 sbits;
 547
 548redo:
 549	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
 550		unsigned int h;
 551
 552		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
 553		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
 554				pol->family, nhashmask, dbits, sbits);
 555		if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
 556			hlist_del_rcu(&pol->bydst);
 557			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
 558			h0 = h;
 559		} else {
 560			if (h != h0)
 561				continue;
 562			hlist_del_rcu(&pol->bydst);
 563			hlist_add_behind_rcu(&pol->bydst, entry0);
 564		}
 565		entry0 = &pol->bydst;
 566	}
 567	if (!hlist_empty(list)) {
 568		entry0 = NULL;
 569		goto redo;
 570	}
 571}
 572
 573static void xfrm_idx_hash_transfer(struct hlist_head *list,
 574				   struct hlist_head *nidxtable,
 575				   unsigned int nhashmask)
 576{
 577	struct hlist_node *tmp;
 578	struct xfrm_policy *pol;
 579
 580	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
 581		unsigned int h;
 582
 583		h = __idx_hash(pol->index, nhashmask);
 584		hlist_add_head(&pol->byidx, nidxtable+h);
 585	}
 586}
 587
 588static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
 589{
 590	return ((old_hmask + 1) << 1) - 1;
 591}
 592
 593static void xfrm_bydst_resize(struct net *net, int dir)
 594{
 595	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 596	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 597	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 
 598	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
 599	struct hlist_head *odst;
 600	int i;
 601
 602	if (!ndst)
 603		return;
 604
 605	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 606	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 607
 608	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
 609				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
 610
 611	for (i = hmask; i >= 0; i--)
 612		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
 613
 614	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
 615	net->xfrm.policy_bydst[dir].hmask = nhashmask;
 616
 617	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
 618	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 619
 620	synchronize_rcu();
 621
 622	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
 623}
 624
 625static void xfrm_byidx_resize(struct net *net)
 626{
 627	unsigned int hmask = net->xfrm.policy_idx_hmask;
 628	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 629	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 630	struct hlist_head *oidx = net->xfrm.policy_byidx;
 631	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
 632	int i;
 633
 634	if (!nidx)
 635		return;
 636
 637	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 638
 639	for (i = hmask; i >= 0; i--)
 640		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
 641
 642	net->xfrm.policy_byidx = nidx;
 643	net->xfrm.policy_idx_hmask = nhashmask;
 644
 645	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 646
 647	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
 648}
 649
 650static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
 651{
 652	unsigned int cnt = net->xfrm.policy_count[dir];
 653	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 654
 655	if (total)
 656		*total += cnt;
 657
 658	if ((hmask + 1) < xfrm_policy_hashmax &&
 659	    cnt > hmask)
 660		return 1;
 661
 662	return 0;
 663}
 664
 665static inline int xfrm_byidx_should_resize(struct net *net, int total)
 666{
 667	unsigned int hmask = net->xfrm.policy_idx_hmask;
 668
 669	if ((hmask + 1) < xfrm_policy_hashmax &&
 670	    total > hmask)
 671		return 1;
 672
 673	return 0;
 674}
 675
 676void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
 677{
 
 678	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
 679	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
 680	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
 681	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
 682	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
 683	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
 684	si->spdhcnt = net->xfrm.policy_idx_hmask;
 685	si->spdhmcnt = xfrm_policy_hashmax;
 
 686}
 687EXPORT_SYMBOL(xfrm_spd_getinfo);
 688
 689static DEFINE_MUTEX(hash_resize_mutex);
 690static void xfrm_hash_resize(struct work_struct *work)
 691{
 692	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
 693	int dir, total;
 694
 695	mutex_lock(&hash_resize_mutex);
 696
 697	total = 0;
 698	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 699		if (xfrm_bydst_should_resize(net, dir, &total))
 700			xfrm_bydst_resize(net, dir);
 701	}
 702	if (xfrm_byidx_should_resize(net, total))
 703		xfrm_byidx_resize(net);
 704
 705	mutex_unlock(&hash_resize_mutex);
 706}
 707
 708/* Make sure *pol can be inserted into fastbin.
 709 * Useful to check that later insert requests will be successful
 710 * (provided xfrm_policy_lock is held throughout).
 711 */
 712static struct xfrm_pol_inexact_bin *
 713xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
 714{
 715	struct xfrm_pol_inexact_bin *bin, *prev;
 716	struct xfrm_pol_inexact_key k = {
 717		.family = pol->family,
 718		.type = pol->type,
 719		.dir = dir,
 720		.if_id = pol->if_id,
 721	};
 722	struct net *net = xp_net(pol);
 723
 724	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
 725
 726	write_pnet(&k.net, net);
 727	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
 728				     xfrm_pol_inexact_params);
 729	if (bin)
 730		return bin;
 731
 732	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
 733	if (!bin)
 734		return NULL;
 735
 736	bin->k = k;
 737	INIT_HLIST_HEAD(&bin->hhead);
 738	bin->root_d = RB_ROOT;
 739	bin->root_s = RB_ROOT;
 740	seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
 741
 742	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
 743						&bin->k, &bin->head,
 744						xfrm_pol_inexact_params);
 745	if (!prev) {
 746		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
 747		return bin;
 748	}
 749
 750	kfree(bin);
 751
 752	return IS_ERR(prev) ? NULL : prev;
 753}
 754
 755static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
 756					       int family, u8 prefixlen)
 757{
 758	if (xfrm_addr_any(addr, family))
 759		return true;
 760
 761	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
 762		return true;
 763
 764	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
 765		return true;
 766
 767	return false;
 768}
 769
 770static bool
 771xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
 772{
 773	const xfrm_address_t *addr;
 774	bool saddr_any, daddr_any;
 775	u8 prefixlen;
 776
 777	addr = &policy->selector.saddr;
 778	prefixlen = policy->selector.prefixlen_s;
 779
 780	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
 781						       policy->family,
 782						       prefixlen);
 783	addr = &policy->selector.daddr;
 784	prefixlen = policy->selector.prefixlen_d;
 785	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
 786						       policy->family,
 787						       prefixlen);
 788	return saddr_any && daddr_any;
 789}
 790
 791static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
 792				       const xfrm_address_t *addr, u8 prefixlen)
 793{
 794	node->addr = *addr;
 795	node->prefixlen = prefixlen;
 796}
 797
 798static struct xfrm_pol_inexact_node *
 799xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
 800{
 801	struct xfrm_pol_inexact_node *node;
 802
 803	node = kzalloc(sizeof(*node), GFP_ATOMIC);
 804	if (node)
 805		xfrm_pol_inexact_node_init(node, addr, prefixlen);
 806
 807	return node;
 808}
 809
 810static int xfrm_policy_addr_delta(const xfrm_address_t *a,
 811				  const xfrm_address_t *b,
 812				  u8 prefixlen, u16 family)
 813{
 814	u32 ma, mb, mask;
 815	unsigned int pdw, pbi;
 816	int delta = 0;
 817
 818	switch (family) {
 819	case AF_INET:
 820		if (prefixlen == 0)
 821			return 0;
 822		mask = ~0U << (32 - prefixlen);
 823		ma = ntohl(a->a4) & mask;
 824		mb = ntohl(b->a4) & mask;
 825		if (ma < mb)
 826			delta = -1;
 827		else if (ma > mb)
 828			delta = 1;
 829		break;
 830	case AF_INET6:
 831		pdw = prefixlen >> 5;
 832		pbi = prefixlen & 0x1f;
 833
 834		if (pdw) {
 835			delta = memcmp(a->a6, b->a6, pdw << 2);
 836			if (delta)
 837				return delta;
 838		}
 839		if (pbi) {
 840			mask = ~0U << (32 - pbi);
 841			ma = ntohl(a->a6[pdw]) & mask;
 842			mb = ntohl(b->a6[pdw]) & mask;
 843			if (ma < mb)
 844				delta = -1;
 845			else if (ma > mb)
 846				delta = 1;
 847		}
 848		break;
 849	default:
 850		break;
 851	}
 852
 853	return delta;
 854}
 855
 856static void xfrm_policy_inexact_list_reinsert(struct net *net,
 857					      struct xfrm_pol_inexact_node *n,
 858					      u16 family)
 859{
 860	unsigned int matched_s, matched_d;
 861	struct xfrm_policy *policy, *p;
 862
 863	matched_s = 0;
 864	matched_d = 0;
 865
 866	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
 867		struct hlist_node *newpos = NULL;
 868		bool matches_s, matches_d;
 869
 870		if (policy->walk.dead || !policy->bydst_reinsert)
 871			continue;
 872
 873		WARN_ON_ONCE(policy->family != family);
 874
 875		policy->bydst_reinsert = false;
 876		hlist_for_each_entry(p, &n->hhead, bydst) {
 877			if (policy->priority > p->priority)
 878				newpos = &p->bydst;
 879			else if (policy->priority == p->priority &&
 880				 policy->pos > p->pos)
 881				newpos = &p->bydst;
 882			else
 883				break;
 884		}
 885
 886		if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
 887			hlist_add_behind_rcu(&policy->bydst, newpos);
 888		else
 889			hlist_add_head_rcu(&policy->bydst, &n->hhead);
 890
 891		/* paranoia checks follow.
 892		 * Check that the reinserted policy matches at least
 893		 * saddr or daddr for current node prefix.
 894		 *
 895		 * Matching both is fine, matching saddr in one policy
 896		 * (but not daddr) and then matching only daddr in another
 897		 * is a bug.
 898		 */
 899		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
 900						   &n->addr,
 901						   n->prefixlen,
 902						   family) == 0;
 903		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
 904						   &n->addr,
 905						   n->prefixlen,
 906						   family) == 0;
 907		if (matches_s && matches_d)
 908			continue;
 909
 910		WARN_ON_ONCE(!matches_s && !matches_d);
 911		if (matches_s)
 912			matched_s++;
 913		if (matches_d)
 914			matched_d++;
 915		WARN_ON_ONCE(matched_s && matched_d);
 916	}
 917}
 918
 919static void xfrm_policy_inexact_node_reinsert(struct net *net,
 920					      struct xfrm_pol_inexact_node *n,
 921					      struct rb_root *new,
 922					      u16 family)
 923{
 924	struct xfrm_pol_inexact_node *node;
 925	struct rb_node **p, *parent;
 926
 927	/* we should not have another subtree here */
 928	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
 929restart:
 930	parent = NULL;
 931	p = &new->rb_node;
 932	while (*p) {
 933		u8 prefixlen;
 934		int delta;
 935
 936		parent = *p;
 937		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
 938
 939		prefixlen = min(node->prefixlen, n->prefixlen);
 940
 941		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
 942					       prefixlen, family);
 943		if (delta < 0) {
 944			p = &parent->rb_left;
 945		} else if (delta > 0) {
 946			p = &parent->rb_right;
 947		} else {
 948			bool same_prefixlen = node->prefixlen == n->prefixlen;
 949			struct xfrm_policy *tmp;
 950
 951			hlist_for_each_entry(tmp, &n->hhead, bydst) {
 952				tmp->bydst_reinsert = true;
 953				hlist_del_rcu(&tmp->bydst);
 954			}
 955
 956			node->prefixlen = prefixlen;
 957
 958			xfrm_policy_inexact_list_reinsert(net, node, family);
 959
 960			if (same_prefixlen) {
 961				kfree_rcu(n, rcu);
 962				return;
 963			}
 964
 965			rb_erase(*p, new);
 966			kfree_rcu(n, rcu);
 967			n = node;
 968			goto restart;
 969		}
 970	}
 971
 972	rb_link_node_rcu(&n->node, parent, p);
 973	rb_insert_color(&n->node, new);
 974}
 975
 976/* merge nodes v and n */
 977static void xfrm_policy_inexact_node_merge(struct net *net,
 978					   struct xfrm_pol_inexact_node *v,
 979					   struct xfrm_pol_inexact_node *n,
 980					   u16 family)
 981{
 982	struct xfrm_pol_inexact_node *node;
 983	struct xfrm_policy *tmp;
 984	struct rb_node *rnode;
 985
 986	/* To-be-merged node v has a subtree.
 987	 *
 988	 * Dismantle it and insert its nodes to n->root.
 989	 */
 990	while ((rnode = rb_first(&v->root)) != NULL) {
 991		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
 992		rb_erase(&node->node, &v->root);
 993		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
 994						  family);
 995	}
 996
 997	hlist_for_each_entry(tmp, &v->hhead, bydst) {
 998		tmp->bydst_reinsert = true;
 999		hlist_del_rcu(&tmp->bydst);
1000	}
1001
1002	xfrm_policy_inexact_list_reinsert(net, n, family);
1003}
1004
1005static struct xfrm_pol_inexact_node *
1006xfrm_policy_inexact_insert_node(struct net *net,
1007				struct rb_root *root,
1008				xfrm_address_t *addr,
1009				u16 family, u8 prefixlen, u8 dir)
1010{
1011	struct xfrm_pol_inexact_node *cached = NULL;
1012	struct rb_node **p, *parent = NULL;
1013	struct xfrm_pol_inexact_node *node;
1014
1015	p = &root->rb_node;
1016	while (*p) {
1017		int delta;
1018
1019		parent = *p;
1020		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1021
1022		delta = xfrm_policy_addr_delta(addr, &node->addr,
1023					       node->prefixlen,
1024					       family);
1025		if (delta == 0 && prefixlen >= node->prefixlen) {
1026			WARN_ON_ONCE(cached); /* ipsec policies got lost */
1027			return node;
1028		}
1029
1030		if (delta < 0)
1031			p = &parent->rb_left;
1032		else
1033			p = &parent->rb_right;
1034
1035		if (prefixlen < node->prefixlen) {
1036			delta = xfrm_policy_addr_delta(addr, &node->addr,
1037						       prefixlen,
1038						       family);
1039			if (delta)
1040				continue;
1041
1042			/* This node is a subnet of the new prefix. It needs
1043			 * to be removed and re-inserted with the smaller
1044			 * prefix and all nodes that are now also covered
1045			 * by the reduced prefixlen.
1046			 */
1047			rb_erase(&node->node, root);
1048
1049			if (!cached) {
1050				xfrm_pol_inexact_node_init(node, addr,
1051							   prefixlen);
1052				cached = node;
1053			} else {
1054				/* This node also falls within the new
1055				 * prefixlen. Merge the to-be-reinserted
1056				 * node and this one.
1057				 */
1058				xfrm_policy_inexact_node_merge(net, node,
1059							       cached, family);
1060				kfree_rcu(node, rcu);
1061			}
1062
1063			/* restart */
1064			p = &root->rb_node;
1065			parent = NULL;
1066		}
1067	}
1068
1069	node = cached;
1070	if (!node) {
1071		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1072		if (!node)
1073			return NULL;
1074	}
1075
1076	rb_link_node_rcu(&node->node, parent, p);
1077	rb_insert_color(&node->node, root);
1078
1079	return node;
1080}
1081
1082static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1083{
1084	struct xfrm_pol_inexact_node *node;
1085	struct rb_node *rn = rb_first(r);
1086
1087	while (rn) {
1088		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1089
1090		xfrm_policy_inexact_gc_tree(&node->root, rm);
1091		rn = rb_next(rn);
1092
1093		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1094			WARN_ON_ONCE(rm);
1095			continue;
1096		}
1097
1098		rb_erase(&node->node, r);
1099		kfree_rcu(node, rcu);
1100	}
1101}
1102
1103static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1104{
1105	write_seqcount_begin(&b->count);
1106	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1107	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1108	write_seqcount_end(&b->count);
1109
1110	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1111	    !hlist_empty(&b->hhead)) {
1112		WARN_ON_ONCE(net_exit);
1113		return;
1114	}
1115
1116	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1117				   xfrm_pol_inexact_params) == 0) {
1118		list_del(&b->inexact_bins);
1119		kfree_rcu(b, rcu);
1120	}
1121}
1122
1123static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1124{
1125	struct net *net = read_pnet(&b->k.net);
1126
1127	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1128	__xfrm_policy_inexact_prune_bin(b, false);
1129	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1130}
1131
1132static void __xfrm_policy_inexact_flush(struct net *net)
1133{
1134	struct xfrm_pol_inexact_bin *bin, *t;
1135
1136	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1137
1138	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1139		__xfrm_policy_inexact_prune_bin(bin, false);
1140}
1141
1142static struct hlist_head *
1143xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1144				struct xfrm_policy *policy, u8 dir)
1145{
1146	struct xfrm_pol_inexact_node *n;
1147	struct net *net;
1148
1149	net = xp_net(policy);
1150	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1151
1152	if (xfrm_policy_inexact_insert_use_any_list(policy))
1153		return &bin->hhead;
1154
1155	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1156					       policy->family,
1157					       policy->selector.prefixlen_d)) {
1158		write_seqcount_begin(&bin->count);
1159		n = xfrm_policy_inexact_insert_node(net,
1160						    &bin->root_s,
1161						    &policy->selector.saddr,
1162						    policy->family,
1163						    policy->selector.prefixlen_s,
1164						    dir);
1165		write_seqcount_end(&bin->count);
1166		if (!n)
1167			return NULL;
1168
1169		return &n->hhead;
1170	}
1171
1172	/* daddr is fixed */
1173	write_seqcount_begin(&bin->count);
1174	n = xfrm_policy_inexact_insert_node(net,
1175					    &bin->root_d,
1176					    &policy->selector.daddr,
1177					    policy->family,
1178					    policy->selector.prefixlen_d, dir);
1179	write_seqcount_end(&bin->count);
1180	if (!n)
1181		return NULL;
1182
1183	/* saddr is wildcard */
1184	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1185					       policy->family,
1186					       policy->selector.prefixlen_s))
1187		return &n->hhead;
1188
1189	write_seqcount_begin(&bin->count);
1190	n = xfrm_policy_inexact_insert_node(net,
1191					    &n->root,
1192					    &policy->selector.saddr,
1193					    policy->family,
1194					    policy->selector.prefixlen_s, dir);
1195	write_seqcount_end(&bin->count);
1196	if (!n)
1197		return NULL;
1198
1199	return &n->hhead;
1200}
1201
1202static struct xfrm_policy *
1203xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1204{
1205	struct xfrm_pol_inexact_bin *bin;
1206	struct xfrm_policy *delpol;
1207	struct hlist_head *chain;
1208	struct net *net;
1209
1210	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1211	if (!bin)
1212		return ERR_PTR(-ENOMEM);
1213
1214	net = xp_net(policy);
1215	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1216
1217	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1218	if (!chain) {
1219		__xfrm_policy_inexact_prune_bin(bin, false);
1220		return ERR_PTR(-ENOMEM);
1221	}
1222
1223	delpol = xfrm_policy_insert_list(chain, policy, excl);
1224	if (delpol && excl) {
1225		__xfrm_policy_inexact_prune_bin(bin, false);
1226		return ERR_PTR(-EEXIST);
1227	}
1228
1229	chain = &net->xfrm.policy_inexact[dir];
1230	xfrm_policy_insert_inexact_list(chain, policy);
1231
1232	if (delpol)
1233		__xfrm_policy_inexact_prune_bin(bin, false);
1234
1235	return delpol;
1236}
1237
1238static void xfrm_hash_rebuild(struct work_struct *work)
1239{
1240	struct net *net = container_of(work, struct net,
1241				       xfrm.policy_hthresh.work);
1242	unsigned int hmask;
1243	struct xfrm_policy *pol;
1244	struct xfrm_policy *policy;
1245	struct hlist_head *chain;
1246	struct hlist_head *odst;
1247	struct hlist_node *newpos;
1248	int i;
1249	int dir;
1250	unsigned seq;
1251	u8 lbits4, rbits4, lbits6, rbits6;
1252
1253	mutex_lock(&hash_resize_mutex);
1254
1255	/* read selector prefixlen thresholds */
1256	do {
1257		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1258
1259		lbits4 = net->xfrm.policy_hthresh.lbits4;
1260		rbits4 = net->xfrm.policy_hthresh.rbits4;
1261		lbits6 = net->xfrm.policy_hthresh.lbits6;
1262		rbits6 = net->xfrm.policy_hthresh.rbits6;
1263	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1264
1265	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1266	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1267
1268	/* make sure that we can insert the indirect policies again before
1269	 * we start with destructive action.
1270	 */
1271	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1272		struct xfrm_pol_inexact_bin *bin;
1273		u8 dbits, sbits;
1274
1275		if (policy->walk.dead)
1276			continue;
1277
1278		dir = xfrm_policy_id2dir(policy->index);
1279		if (dir >= XFRM_POLICY_MAX)
1280			continue;
1281
1282		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1283			if (policy->family == AF_INET) {
1284				dbits = rbits4;
1285				sbits = lbits4;
1286			} else {
1287				dbits = rbits6;
1288				sbits = lbits6;
1289			}
1290		} else {
1291			if (policy->family == AF_INET) {
1292				dbits = lbits4;
1293				sbits = rbits4;
1294			} else {
1295				dbits = lbits6;
1296				sbits = rbits6;
1297			}
1298		}
1299
1300		if (policy->selector.prefixlen_d < dbits ||
1301		    policy->selector.prefixlen_s < sbits)
1302			continue;
1303
1304		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1305		if (!bin)
1306			goto out_unlock;
1307
1308		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1309			goto out_unlock;
1310	}
1311
1312	/* reset the bydst and inexact table in all directions */
1313	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1314		struct hlist_node *n;
1315
1316		hlist_for_each_entry_safe(policy, n,
1317					  &net->xfrm.policy_inexact[dir],
1318					  bydst_inexact_list) {
1319			hlist_del_rcu(&policy->bydst);
1320			hlist_del_init(&policy->bydst_inexact_list);
1321		}
1322
1323		hmask = net->xfrm.policy_bydst[dir].hmask;
1324		odst = net->xfrm.policy_bydst[dir].table;
1325		for (i = hmask; i >= 0; i--) {
1326			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1327				hlist_del_rcu(&policy->bydst);
1328		}
1329		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1330			/* dir out => dst = remote, src = local */
1331			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1332			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1333			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1334			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1335		} else {
1336			/* dir in/fwd => dst = local, src = remote */
1337			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1338			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1339			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1340			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1341		}
1342	}
1343
1344	/* re-insert all policies by order of creation */
1345	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1346		if (policy->walk.dead)
1347			continue;
1348		dir = xfrm_policy_id2dir(policy->index);
1349		if (dir >= XFRM_POLICY_MAX) {
1350			/* skip socket policies */
1351			continue;
1352		}
1353		newpos = NULL;
1354		chain = policy_hash_bysel(net, &policy->selector,
1355					  policy->family, dir);
1356
1357		if (!chain) {
1358			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1359
1360			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1361			continue;
1362		}
1363
1364		hlist_for_each_entry(pol, chain, bydst) {
1365			if (policy->priority >= pol->priority)
1366				newpos = &pol->bydst;
1367			else
1368				break;
1369		}
1370		if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1371			hlist_add_behind_rcu(&policy->bydst, newpos);
1372		else
1373			hlist_add_head_rcu(&policy->bydst, chain);
1374	}
1375
1376out_unlock:
1377	__xfrm_policy_inexact_flush(net);
1378	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1379	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1380
1381	mutex_unlock(&hash_resize_mutex);
1382}
1383
1384void xfrm_policy_hash_rebuild(struct net *net)
1385{
1386	schedule_work(&net->xfrm.policy_hthresh.work);
1387}
1388EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1389
1390/* Generate new index... KAME seems to generate them ordered by cost
1391 * of an absolute inpredictability of ordering of rules. This will not pass. */
1392static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1393{
 
 
1394	for (;;) {
1395		struct hlist_head *list;
1396		struct xfrm_policy *p;
1397		u32 idx;
1398		int found;
1399
1400		if (!index) {
1401			idx = (net->xfrm.idx_generator | dir);
1402			net->xfrm.idx_generator += 8;
1403		} else {
1404			idx = index;
1405			index = 0;
1406		}
1407
1408		if (idx == 0)
1409			idx = 8;
1410		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1411		found = 0;
1412		hlist_for_each_entry(p, list, byidx) {
1413			if (p->index == idx) {
1414				found = 1;
1415				break;
1416			}
1417		}
1418		if (!found)
1419			return idx;
1420	}
1421}
1422
1423static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1424{
1425	u32 *p1 = (u32 *) s1;
1426	u32 *p2 = (u32 *) s2;
1427	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1428	int i;
1429
1430	for (i = 0; i < len; i++) {
1431		if (p1[i] != p2[i])
1432			return 1;
1433	}
1434
1435	return 0;
1436}
1437
1438static void xfrm_policy_requeue(struct xfrm_policy *old,
1439				struct xfrm_policy *new)
1440{
1441	struct xfrm_policy_queue *pq = &old->polq;
1442	struct sk_buff_head list;
1443
1444	if (skb_queue_empty(&pq->hold_queue))
1445		return;
1446
1447	__skb_queue_head_init(&list);
1448
1449	spin_lock_bh(&pq->hold_queue.lock);
1450	skb_queue_splice_init(&pq->hold_queue, &list);
1451	if (del_timer(&pq->hold_timer))
1452		xfrm_pol_put(old);
1453	spin_unlock_bh(&pq->hold_queue.lock);
1454
1455	pq = &new->polq;
1456
1457	spin_lock_bh(&pq->hold_queue.lock);
1458	skb_queue_splice(&list, &pq->hold_queue);
1459	pq->timeout = XFRM_QUEUE_TMO_MIN;
1460	if (!mod_timer(&pq->hold_timer, jiffies))
1461		xfrm_pol_hold(new);
1462	spin_unlock_bh(&pq->hold_queue.lock);
1463}
1464
1465static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1466					  struct xfrm_policy *pol)
1467{
1468	return mark->v == pol->mark.v && mark->m == pol->mark.m;
1469}
1470
1471static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1472{
1473	const struct xfrm_pol_inexact_key *k = data;
1474	u32 a = k->type << 24 | k->dir << 16 | k->family;
1475
1476	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1477			    seed);
1478}
1479
1480static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1481{
1482	const struct xfrm_pol_inexact_bin *b = data;
1483
1484	return xfrm_pol_bin_key(&b->k, 0, seed);
1485}
1486
1487static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1488			    const void *ptr)
1489{
1490	const struct xfrm_pol_inexact_key *key = arg->key;
1491	const struct xfrm_pol_inexact_bin *b = ptr;
1492	int ret;
1493
1494	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1495		return -1;
1496
1497	ret = b->k.dir ^ key->dir;
1498	if (ret)
1499		return ret;
1500
1501	ret = b->k.type ^ key->type;
1502	if (ret)
1503		return ret;
1504
1505	ret = b->k.family ^ key->family;
1506	if (ret)
1507		return ret;
1508
1509	return b->k.if_id ^ key->if_id;
1510}
1511
1512static const struct rhashtable_params xfrm_pol_inexact_params = {
1513	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1514	.hashfn			= xfrm_pol_bin_key,
1515	.obj_hashfn		= xfrm_pol_bin_obj,
1516	.obj_cmpfn		= xfrm_pol_bin_cmp,
1517	.automatic_shrinking	= true,
1518};
1519
1520static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1521					    struct xfrm_policy *policy)
1522{
1523	struct xfrm_policy *pol, *delpol = NULL;
1524	struct hlist_node *newpos = NULL;
1525	int i = 0;
1526
1527	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1528		if (pol->type == policy->type &&
1529		    pol->if_id == policy->if_id &&
1530		    !selector_cmp(&pol->selector, &policy->selector) &&
1531		    xfrm_policy_mark_match(&policy->mark, pol) &&
1532		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1533		    !WARN_ON(delpol)) {
1534			delpol = pol;
1535			if (policy->priority > pol->priority)
1536				continue;
1537		} else if (policy->priority >= pol->priority) {
1538			newpos = &pol->bydst_inexact_list;
1539			continue;
1540		}
1541		if (delpol)
1542			break;
1543	}
1544
1545	if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1546		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1547	else
1548		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1549
1550	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1551		pol->pos = i;
1552		i++;
1553	}
1554}
1555
1556static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1557						   struct xfrm_policy *policy,
1558						   bool excl)
1559{
1560	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
 
 
 
 
1561
 
 
 
 
1562	hlist_for_each_entry(pol, chain, bydst) {
1563		if (pol->type == policy->type &&
1564		    pol->if_id == policy->if_id &&
1565		    !selector_cmp(&pol->selector, &policy->selector) &&
1566		    xfrm_policy_mark_match(&policy->mark, pol) &&
1567		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1568		    !WARN_ON(delpol)) {
1569			if (excl)
1570				return ERR_PTR(-EEXIST);
 
 
1571			delpol = pol;
1572			if (policy->priority > pol->priority)
1573				continue;
1574		} else if (policy->priority >= pol->priority) {
1575			newpos = pol;
1576			continue;
1577		}
1578		if (delpol)
1579			break;
1580	}
1581
1582	if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1583		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1584	else
1585		/* Packet offload policies enter to the head
1586		 * to speed-up lookups.
1587		 */
1588		hlist_add_head_rcu(&policy->bydst, chain);
1589
1590	return delpol;
1591}
1592
1593int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1594{
1595	struct net *net = xp_net(policy);
1596	struct xfrm_policy *delpol;
1597	struct hlist_head *chain;
1598
1599	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1600	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1601	if (chain)
1602		delpol = xfrm_policy_insert_list(chain, policy, excl);
1603	else
1604		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1605
1606	if (IS_ERR(delpol)) {
1607		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1608		return PTR_ERR(delpol);
1609	}
1610
1611	__xfrm_policy_link(policy, dir);
 
1612
1613	/* After previous checking, family can either be AF_INET or AF_INET6 */
1614	if (policy->family == AF_INET)
1615		rt_genid_bump_ipv4(net);
1616	else
1617		rt_genid_bump_ipv6(net);
1618
1619	if (delpol) {
1620		xfrm_policy_requeue(delpol, policy);
1621		__xfrm_policy_unlink(delpol, dir);
1622	}
1623	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1624	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1625	policy->curlft.add_time = ktime_get_real_seconds();
1626	policy->curlft.use_time = 0;
1627	if (!mod_timer(&policy->timer, jiffies + HZ))
1628		xfrm_pol_hold(policy);
1629	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1630
1631	if (delpol)
1632		xfrm_policy_kill(delpol);
1633	else if (xfrm_bydst_should_resize(net, dir, NULL))
1634		schedule_work(&net->xfrm.policy_hash_work);
1635
1636	return 0;
1637}
1638EXPORT_SYMBOL(xfrm_policy_insert);
1639
1640static struct xfrm_policy *
1641__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1642			u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1643			struct xfrm_sec_ctx *ctx)
1644{
1645	struct xfrm_policy *pol;
1646
1647	if (!chain)
1648		return NULL;
1649
1650	hlist_for_each_entry(pol, chain, bydst) {
1651		if (pol->type == type &&
1652		    pol->if_id == if_id &&
1653		    xfrm_policy_mark_match(mark, pol) &&
1654		    !selector_cmp(sel, &pol->selector) &&
1655		    xfrm_sec_ctx_match(ctx, pol->security))
1656			return pol;
1657	}
1658
1659	return NULL;
1660}
1661
1662struct xfrm_policy *
1663xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1664		      u8 type, int dir, struct xfrm_selector *sel,
1665		      struct xfrm_sec_ctx *ctx, int delete, int *err)
1666{
1667	struct xfrm_pol_inexact_bin *bin = NULL;
1668	struct xfrm_policy *pol, *ret = NULL;
1669	struct hlist_head *chain;
1670
1671	*err = 0;
1672	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1673	chain = policy_hash_bysel(net, sel, sel->family, dir);
1674	if (!chain) {
1675		struct xfrm_pol_inexact_candidates cand;
1676		int i;
1677
1678		bin = xfrm_policy_inexact_lookup(net, type,
1679						 sel->family, dir, if_id);
1680		if (!bin) {
1681			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1682			return NULL;
1683		}
1684
1685		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1686							 &sel->saddr,
1687							 &sel->daddr)) {
1688			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1689			return NULL;
1690		}
1691
1692		pol = NULL;
1693		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1694			struct xfrm_policy *tmp;
1695
1696			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1697						      if_id, type, dir,
1698						      sel, ctx);
1699			if (!tmp)
1700				continue;
1701
1702			if (!pol || tmp->pos < pol->pos)
1703				pol = tmp;
1704		}
1705	} else {
1706		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1707					      sel, ctx);
1708	}
1709
1710	if (pol) {
1711		xfrm_pol_hold(pol);
1712		if (delete) {
1713			*err = security_xfrm_policy_delete(pol->security);
1714			if (*err) {
1715				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1716				return pol;
1717			}
1718			__xfrm_policy_unlink(pol, dir);
 
1719		}
1720		ret = pol;
1721	}
1722	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1723
1724	if (ret && delete)
1725		xfrm_policy_kill(ret);
1726	if (bin && delete)
1727		xfrm_policy_inexact_prune_bin(bin);
1728	return ret;
1729}
1730EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1731
1732struct xfrm_policy *
1733xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1734		 u8 type, int dir, u32 id, int delete, int *err)
1735{
1736	struct xfrm_policy *pol, *ret;
1737	struct hlist_head *chain;
1738
1739	*err = -ENOENT;
1740	if (xfrm_policy_id2dir(id) != dir)
1741		return NULL;
1742
1743	*err = 0;
1744	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1745	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1746	ret = NULL;
1747	hlist_for_each_entry(pol, chain, byidx) {
1748		if (pol->type == type && pol->index == id &&
1749		    pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1750			xfrm_pol_hold(pol);
1751			if (delete) {
1752				*err = security_xfrm_policy_delete(
1753								pol->security);
1754				if (*err) {
1755					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1756					return pol;
1757				}
1758				__xfrm_policy_unlink(pol, dir);
1759			}
1760			ret = pol;
1761			break;
1762		}
1763	}
1764	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1765
1766	if (ret && delete)
1767		xfrm_policy_kill(ret);
1768	return ret;
1769}
1770EXPORT_SYMBOL(xfrm_policy_byid);
1771
1772#ifdef CONFIG_SECURITY_NETWORK_XFRM
1773static inline int
1774xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1775{
1776	struct xfrm_policy *pol;
1777	int err = 0;
1778
1779	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1780		if (pol->walk.dead ||
1781		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1782		    pol->type != type)
1783			continue;
1784
1785		err = security_xfrm_policy_delete(pol->security);
1786		if (err) {
1787			xfrm_audit_policy_delete(pol, 0, task_valid);
1788			return err;
 
 
 
 
 
1789		}
1790	}
1791	return err;
1792}
1793
1794static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1795						     struct net_device *dev,
1796						     bool task_valid)
1797{
1798	struct xfrm_policy *pol;
1799	int err = 0;
1800
1801	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1802		if (pol->walk.dead ||
1803		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1804		    pol->xdo.dev != dev)
1805			continue;
1806
1807		err = security_xfrm_policy_delete(pol->security);
1808		if (err) {
1809			xfrm_audit_policy_delete(pol, 0, task_valid);
1810			return err;
1811		}
1812	}
1813	return err;
1814}
1815#else
1816static inline int
1817xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1818{
1819	return 0;
1820}
1821
1822static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1823						     struct net_device *dev,
1824						     bool task_valid)
1825{
1826	return 0;
1827}
1828#endif
1829
1830int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1831{
1832	int dir, err = 0, cnt = 0;
1833	struct xfrm_policy *pol;
1834
1835	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1836
1837	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1838	if (err)
1839		goto out;
1840
1841again:
1842	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1843		if (pol->walk.dead)
1844			continue;
1845
1846		dir = xfrm_policy_id2dir(pol->index);
1847		if (dir >= XFRM_POLICY_MAX ||
1848		    pol->type != type)
1849			continue;
 
 
 
 
1850
1851		__xfrm_policy_unlink(pol, dir);
1852		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1853		xfrm_dev_policy_delete(pol);
1854		cnt++;
1855		xfrm_audit_policy_delete(pol, 1, task_valid);
1856		xfrm_policy_kill(pol);
1857		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1858		goto again;
1859	}
1860	if (cnt)
1861		__xfrm_policy_inexact_flush(net);
1862	else
1863		err = -ESRCH;
1864out:
1865	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1866	return err;
1867}
1868EXPORT_SYMBOL(xfrm_policy_flush);
1869
1870int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1871			  bool task_valid)
1872{
1873	int dir, err = 0, cnt = 0;
1874	struct xfrm_policy *pol;
1875
1876	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 
 
1877
1878	err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
1879	if (err)
1880		goto out;
 
 
 
 
 
 
 
1881
1882again:
1883	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1884		if (pol->walk.dead)
1885			continue;
1886
1887		dir = xfrm_policy_id2dir(pol->index);
1888		if (dir >= XFRM_POLICY_MAX ||
1889		    pol->xdo.dev != dev)
1890			continue;
1891
1892		__xfrm_policy_unlink(pol, dir);
1893		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1894		xfrm_dev_policy_delete(pol);
1895		cnt++;
1896		xfrm_audit_policy_delete(pol, 1, task_valid);
1897		xfrm_policy_kill(pol);
1898		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1899		goto again;
1900	}
1901	if (cnt)
1902		__xfrm_policy_inexact_flush(net);
1903	else
1904		err = -ESRCH;
1905out:
1906	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1907	return err;
1908}
1909EXPORT_SYMBOL(xfrm_dev_policy_flush);
1910
1911int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1912		     int (*func)(struct xfrm_policy *, int, int, void*),
1913		     void *data)
1914{
1915	struct xfrm_policy *pol;
1916	struct xfrm_policy_walk_entry *x;
1917	int error = 0;
1918
1919	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1920	    walk->type != XFRM_POLICY_TYPE_ANY)
1921		return -EINVAL;
1922
1923	if (list_empty(&walk->walk.all) && walk->seq != 0)
1924		return 0;
1925
1926	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1927	if (list_empty(&walk->walk.all))
1928		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1929	else
1930		x = list_first_entry(&walk->walk.all,
1931				     struct xfrm_policy_walk_entry, all);
1932
1933	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1934		if (x->dead)
1935			continue;
1936		pol = container_of(x, struct xfrm_policy, walk);
1937		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1938		    walk->type != pol->type)
1939			continue;
1940		error = func(pol, xfrm_policy_id2dir(pol->index),
1941			     walk->seq, data);
1942		if (error) {
1943			list_move_tail(&walk->walk.all, &x->all);
1944			goto out;
1945		}
1946		walk->seq++;
1947	}
1948	if (walk->seq == 0) {
1949		error = -ENOENT;
1950		goto out;
1951	}
1952	list_del_init(&walk->walk.all);
1953out:
1954	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1955	return error;
1956}
1957EXPORT_SYMBOL(xfrm_policy_walk);
1958
1959void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1960{
1961	INIT_LIST_HEAD(&walk->walk.all);
1962	walk->walk.dead = 1;
1963	walk->type = type;
1964	walk->seq = 0;
1965}
1966EXPORT_SYMBOL(xfrm_policy_walk_init);
1967
1968void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1969{
1970	if (list_empty(&walk->walk.all))
1971		return;
1972
1973	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1974	list_del(&walk->walk.all);
1975	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1976}
1977EXPORT_SYMBOL(xfrm_policy_walk_done);
1978
1979/*
1980 * Find policy to apply to this flow.
1981 *
1982 * Returns 0 if policy found, else an -errno.
1983 */
1984static int xfrm_policy_match(const struct xfrm_policy *pol,
1985			     const struct flowi *fl,
1986			     u8 type, u16 family, u32 if_id)
1987{
1988	const struct xfrm_selector *sel = &pol->selector;
1989	int ret = -ESRCH;
1990	bool match;
1991
1992	if (pol->family != family ||
1993	    pol->if_id != if_id ||
1994	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1995	    pol->type != type)
1996		return ret;
1997
1998	match = xfrm_selector_match(sel, fl, family);
1999	if (match)
2000		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
2001	return ret;
2002}
2003
2004static struct xfrm_pol_inexact_node *
2005xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
2006				seqcount_spinlock_t *count,
2007				const xfrm_address_t *addr, u16 family)
2008{
2009	const struct rb_node *parent;
2010	int seq;
2011
2012again:
2013	seq = read_seqcount_begin(count);
2014
2015	parent = rcu_dereference_raw(r->rb_node);
2016	while (parent) {
2017		struct xfrm_pol_inexact_node *node;
2018		int delta;
2019
2020		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
2021
2022		delta = xfrm_policy_addr_delta(addr, &node->addr,
2023					       node->prefixlen, family);
2024		if (delta < 0) {
2025			parent = rcu_dereference_raw(parent->rb_left);
2026			continue;
2027		} else if (delta > 0) {
2028			parent = rcu_dereference_raw(parent->rb_right);
2029			continue;
2030		}
2031
2032		return node;
2033	}
2034
2035	if (read_seqcount_retry(count, seq))
2036		goto again;
2037
2038	return NULL;
2039}
2040
2041static bool
2042xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
2043				    struct xfrm_pol_inexact_bin *b,
2044				    const xfrm_address_t *saddr,
2045				    const xfrm_address_t *daddr)
2046{
2047	struct xfrm_pol_inexact_node *n;
2048	u16 family;
2049
2050	if (!b)
2051		return false;
2052
2053	family = b->k.family;
2054	memset(cand, 0, sizeof(*cand));
2055	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
2056
2057	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
2058					    family);
2059	if (n) {
2060		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
2061		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
2062						    family);
2063		if (n)
2064			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
2065	}
2066
2067	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
2068					    family);
2069	if (n)
2070		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
2071
2072	return true;
2073}
2074
2075static struct xfrm_pol_inexact_bin *
2076xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
2077			       u8 dir, u32 if_id)
2078{
2079	struct xfrm_pol_inexact_key k = {
2080		.family = family,
2081		.type = type,
2082		.dir = dir,
2083		.if_id = if_id,
2084	};
2085
2086	write_pnet(&k.net, net);
2087
2088	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
2089				 xfrm_pol_inexact_params);
2090}
2091
2092static struct xfrm_pol_inexact_bin *
2093xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2094			   u8 dir, u32 if_id)
2095{
2096	struct xfrm_pol_inexact_bin *bin;
2097
2098	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2099
2100	rcu_read_lock();
2101	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2102	rcu_read_unlock();
2103
2104	return bin;
2105}
2106
2107static struct xfrm_policy *
2108__xfrm_policy_eval_candidates(struct hlist_head *chain,
2109			      struct xfrm_policy *prefer,
2110			      const struct flowi *fl,
2111			      u8 type, u16 family, u32 if_id)
2112{
2113	u32 priority = prefer ? prefer->priority : ~0u;
2114	struct xfrm_policy *pol;
2115
2116	if (!chain)
2117		return NULL;
2118
2119	hlist_for_each_entry_rcu(pol, chain, bydst) {
2120		int err;
2121
2122		if (pol->priority > priority)
2123			break;
2124
2125		err = xfrm_policy_match(pol, fl, type, family, if_id);
2126		if (err) {
2127			if (err != -ESRCH)
2128				return ERR_PTR(err);
2129
2130			continue;
2131		}
2132
2133		if (prefer) {
2134			/* matches.  Is it older than *prefer? */
2135			if (pol->priority == priority &&
2136			    prefer->pos < pol->pos)
2137				return prefer;
2138		}
2139
2140		return pol;
2141	}
2142
2143	return NULL;
2144}
2145
2146static struct xfrm_policy *
2147xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2148			    struct xfrm_policy *prefer,
2149			    const struct flowi *fl,
2150			    u8 type, u16 family, u32 if_id)
2151{
2152	struct xfrm_policy *tmp;
2153	int i;
2154
2155	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2156		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2157						    prefer,
2158						    fl, type, family, if_id);
2159		if (!tmp)
2160			continue;
2161
2162		if (IS_ERR(tmp))
2163			return tmp;
2164		prefer = tmp;
2165	}
2166
2167	return prefer;
2168}
2169
2170static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2171						     const struct flowi *fl,
2172						     u16 family, u8 dir,
2173						     u32 if_id)
2174{
2175	struct xfrm_pol_inexact_candidates cand;
2176	const xfrm_address_t *daddr, *saddr;
2177	struct xfrm_pol_inexact_bin *bin;
2178	struct xfrm_policy *pol, *ret;
 
2179	struct hlist_head *chain;
2180	unsigned int sequence;
2181	int err;
2182
2183	daddr = xfrm_flowi_daddr(fl, family);
2184	saddr = xfrm_flowi_saddr(fl, family);
2185	if (unlikely(!daddr || !saddr))
2186		return NULL;
2187
2188	rcu_read_lock();
2189 retry:
2190	do {
2191		sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2192		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2193	} while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2194
2195	ret = NULL;
2196	hlist_for_each_entry_rcu(pol, chain, bydst) {
2197		err = xfrm_policy_match(pol, fl, type, family, if_id);
2198		if (err) {
2199			if (err == -ESRCH)
2200				continue;
2201			else {
2202				ret = ERR_PTR(err);
2203				goto fail;
2204			}
2205		} else {
2206			ret = pol;
 
2207			break;
2208		}
2209	}
2210	if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
2211		goto skip_inexact;
2212
2213	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2214	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2215							 daddr))
2216		goto skip_inexact;
2217
2218	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2219					  family, if_id);
2220	if (pol) {
2221		ret = pol;
2222		if (IS_ERR(pol))
2223			goto fail;
 
 
 
 
 
 
2224	}
2225
2226skip_inexact:
2227	if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2228		goto retry;
2229
2230	if (ret && !xfrm_pol_hold_rcu(ret))
2231		goto retry;
2232fail:
2233	rcu_read_unlock();
2234
2235	return ret;
2236}
2237
2238static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2239					      const struct flowi *fl,
2240					      u16 family, u8 dir, u32 if_id)
2241{
2242#ifdef CONFIG_XFRM_SUB_POLICY
2243	struct xfrm_policy *pol;
2244
2245	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2246					dir, if_id);
2247	if (pol != NULL)
2248		return pol;
2249#endif
2250	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2251					 dir, if_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252}
2253
2254static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2255						 const struct flowi *fl,
2256						 u16 family, u32 if_id)
2257{
2258	struct xfrm_policy *pol;
 
2259
2260	rcu_read_lock();
2261 again:
2262	pol = rcu_dereference(sk->sk_policy[dir]);
2263	if (pol != NULL) {
2264		bool match;
 
2265		int err = 0;
2266
2267		if (pol->family != family) {
2268			pol = NULL;
2269			goto out;
2270		}
2271
2272		match = xfrm_selector_match(&pol->selector, fl, family);
2273		if (match) {
2274			if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
2275			    pol->if_id != if_id) {
2276				pol = NULL;
2277				goto out;
2278			}
2279			err = security_xfrm_policy_lookup(pol->security,
2280						      fl->flowi_secid);
2281			if (!err) {
2282				if (!xfrm_pol_hold_rcu(pol))
2283					goto again;
2284			} else if (err == -ESRCH) {
2285				pol = NULL;
2286			} else {
2287				pol = ERR_PTR(err);
2288			}
2289		} else
2290			pol = NULL;
2291	}
2292out:
 
2293	rcu_read_unlock();
2294	return pol;
2295}
2296
2297static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2298{
2299	struct net *net = xp_net(pol);
2300
2301	list_add(&pol->walk.all, &net->xfrm.policy_all);
2302	net->xfrm.policy_count[dir]++;
2303	xfrm_pol_hold(pol);
2304}
2305
2306static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2307						int dir)
2308{
2309	struct net *net = xp_net(pol);
2310
2311	if (list_empty(&pol->walk.all))
2312		return NULL;
2313
2314	/* Socket policies are not hashed. */
2315	if (!hlist_unhashed(&pol->bydst)) {
2316		hlist_del_rcu(&pol->bydst);
2317		hlist_del_init(&pol->bydst_inexact_list);
2318		hlist_del(&pol->byidx);
2319	}
2320
2321	list_del_init(&pol->walk.all);
2322	net->xfrm.policy_count[dir]--;
2323
2324	return pol;
2325}
2326
2327static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2328{
2329	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2330}
2331
2332static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2333{
2334	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2335}
2336
2337int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2338{
2339	struct net *net = xp_net(pol);
2340
2341	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2342	pol = __xfrm_policy_unlink(pol, dir);
2343	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2344	if (pol) {
2345		xfrm_dev_policy_delete(pol);
2346		xfrm_policy_kill(pol);
2347		return 0;
2348	}
2349	return -ENOENT;
2350}
2351EXPORT_SYMBOL(xfrm_policy_delete);
2352
2353int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2354{
2355	struct net *net = sock_net(sk);
2356	struct xfrm_policy *old_pol;
2357
2358#ifdef CONFIG_XFRM_SUB_POLICY
2359	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2360		return -EINVAL;
2361#endif
2362
2363	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2364	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2365				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2366	if (pol) {
2367		pol->curlft.add_time = ktime_get_real_seconds();
2368		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2369		xfrm_sk_policy_link(pol, dir);
2370	}
2371	rcu_assign_pointer(sk->sk_policy[dir], pol);
2372	if (old_pol) {
2373		if (pol)
2374			xfrm_policy_requeue(old_pol, pol);
2375
2376		/* Unlinking succeeds always. This is the only function
2377		 * allowed to delete or replace socket policy.
2378		 */
2379		xfrm_sk_policy_unlink(old_pol, dir);
2380	}
2381	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2382
2383	if (old_pol) {
2384		xfrm_policy_kill(old_pol);
2385	}
2386	return 0;
2387}
2388
2389static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2390{
2391	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2392	struct net *net = xp_net(old);
2393
2394	if (newp) {
2395		newp->selector = old->selector;
2396		if (security_xfrm_policy_clone(old->security,
2397					       &newp->security)) {
2398			kfree(newp);
2399			return NULL;  /* ENOMEM */
2400		}
2401		newp->lft = old->lft;
2402		newp->curlft = old->curlft;
2403		newp->mark = old->mark;
2404		newp->if_id = old->if_id;
2405		newp->action = old->action;
2406		newp->flags = old->flags;
2407		newp->xfrm_nr = old->xfrm_nr;
2408		newp->index = old->index;
2409		newp->type = old->type;
2410		newp->family = old->family;
2411		memcpy(newp->xfrm_vec, old->xfrm_vec,
2412		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2413		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2414		xfrm_sk_policy_link(newp, dir);
2415		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2416		xfrm_pol_put(newp);
2417	}
2418	return newp;
2419}
2420
2421int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2422{
2423	const struct xfrm_policy *p;
2424	struct xfrm_policy *np;
2425	int i, ret = 0;
2426
2427	rcu_read_lock();
2428	for (i = 0; i < 2; i++) {
2429		p = rcu_dereference(osk->sk_policy[i]);
2430		if (p) {
2431			np = clone_policy(p, i);
2432			if (unlikely(!np)) {
2433				ret = -ENOMEM;
2434				break;
2435			}
2436			rcu_assign_pointer(sk->sk_policy[i], np);
2437		}
2438	}
2439	rcu_read_unlock();
2440	return ret;
2441}
2442
2443static int
2444xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2445	       xfrm_address_t *remote, unsigned short family, u32 mark)
2446{
2447	int err;
2448	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2449
2450	if (unlikely(afinfo == NULL))
2451		return -EINVAL;
2452	err = afinfo->get_saddr(net, oif, local, remote, mark);
2453	rcu_read_unlock();
2454	return err;
2455}
2456
2457/* Resolve list of templates for the flow, given policy. */
2458
2459static int
2460xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2461		      struct xfrm_state **xfrm, unsigned short family)
2462{
2463	struct net *net = xp_net(policy);
2464	int nx;
2465	int i, error;
2466	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2467	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2468	xfrm_address_t tmp;
2469
2470	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2471		struct xfrm_state *x;
2472		xfrm_address_t *remote = daddr;
2473		xfrm_address_t *local  = saddr;
2474		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2475
2476		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2477		    tmpl->mode == XFRM_MODE_BEET) {
2478			remote = &tmpl->id.daddr;
2479			local = &tmpl->saddr;
2480			if (xfrm_addr_any(local, tmpl->encap_family)) {
2481				error = xfrm_get_saddr(net, fl->flowi_oif,
2482						       &tmp, remote,
2483						       tmpl->encap_family, 0);
2484				if (error)
2485					goto fail;
2486				local = &tmp;
2487			}
2488		}
2489
2490		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2491				    family, policy->if_id);
2492
2493		if (x && x->km.state == XFRM_STATE_VALID) {
2494			xfrm[nx++] = x;
2495			daddr = remote;
2496			saddr = local;
2497			continue;
2498		}
2499		if (x) {
2500			error = (x->km.state == XFRM_STATE_ERROR ?
2501				 -EINVAL : -EAGAIN);
2502			xfrm_state_put(x);
2503		} else if (error == -ESRCH) {
2504			error = -EAGAIN;
2505		}
2506
2507		if (!tmpl->optional)
2508			goto fail;
2509	}
2510	return nx;
2511
2512fail:
2513	for (nx--; nx >= 0; nx--)
2514		xfrm_state_put(xfrm[nx]);
2515	return error;
2516}
2517
2518static int
2519xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2520		  struct xfrm_state **xfrm, unsigned short family)
2521{
2522	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2523	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2524	int cnx = 0;
2525	int error;
2526	int ret;
2527	int i;
2528
2529	for (i = 0; i < npols; i++) {
2530		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2531			error = -ENOBUFS;
2532			goto fail;
2533		}
2534
2535		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2536		if (ret < 0) {
2537			error = ret;
2538			goto fail;
2539		} else
2540			cnx += ret;
2541	}
2542
2543	/* found states are sorted for outbound processing */
2544	if (npols > 1)
2545		xfrm_state_sort(xfrm, tpp, cnx, family);
2546
2547	return cnx;
2548
2549 fail:
2550	for (cnx--; cnx >= 0; cnx--)
2551		xfrm_state_put(tpp[cnx]);
2552	return error;
2553
2554}
2555
2556static int xfrm_get_tos(const struct flowi *fl, int family)
 
 
 
 
2557{
2558	if (family == AF_INET)
2559		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2560
2561	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2562}
2563
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2564static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2565{
2566	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2567	struct dst_ops *dst_ops;
2568	struct xfrm_dst *xdst;
2569
2570	if (!afinfo)
2571		return ERR_PTR(-EINVAL);
2572
2573	switch (family) {
2574	case AF_INET:
2575		dst_ops = &net->xfrm.xfrm4_dst_ops;
2576		break;
2577#if IS_ENABLED(CONFIG_IPV6)
2578	case AF_INET6:
2579		dst_ops = &net->xfrm.xfrm6_dst_ops;
2580		break;
2581#endif
2582	default:
2583		BUG();
2584	}
2585	xdst = dst_alloc(dst_ops, NULL, DST_OBSOLETE_NONE, 0);
2586
2587	if (likely(xdst)) {
2588		memset_after(xdst, 0, u.dst);
 
 
 
2589	} else
2590		xdst = ERR_PTR(-ENOBUFS);
2591
2592	rcu_read_unlock();
2593
2594	return xdst;
2595}
2596
2597static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2598			   int nfheader_len)
2599{
2600	if (dst->ops->family == AF_INET6) {
2601		path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
2602		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2603	}
 
 
 
 
 
 
 
 
2604}
2605
2606static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2607				const struct flowi *fl)
2608{
2609	const struct xfrm_policy_afinfo *afinfo =
2610		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2611	int err;
2612
2613	if (!afinfo)
2614		return -EINVAL;
2615
2616	err = afinfo->fill_dst(xdst, dev, fl);
2617
2618	rcu_read_unlock();
2619
2620	return err;
2621}
2622
2623
2624/* Allocate chain of dst_entry's, attach known xfrm's, calculate
2625 * all the metrics... Shortly, bundle a bundle.
2626 */
2627
2628static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2629					    struct xfrm_state **xfrm,
2630					    struct xfrm_dst **bundle,
2631					    int nx,
2632					    const struct flowi *fl,
2633					    struct dst_entry *dst)
2634{
2635	const struct xfrm_state_afinfo *afinfo;
2636	const struct xfrm_mode *inner_mode;
2637	struct net *net = xp_net(policy);
2638	unsigned long now = jiffies;
2639	struct net_device *dev;
2640	struct xfrm_dst *xdst_prev = NULL;
2641	struct xfrm_dst *xdst0 = NULL;
 
2642	int i = 0;
2643	int err;
2644	int header_len = 0;
2645	int nfheader_len = 0;
2646	int trailer_len = 0;
2647	int tos;
2648	int family = policy->selector.family;
2649	xfrm_address_t saddr, daddr;
2650
2651	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2652
2653	tos = xfrm_get_tos(fl, family);
 
 
 
2654
2655	dst_hold(dst);
2656
2657	for (; i < nx; i++) {
2658		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2659		struct dst_entry *dst1 = &xdst->u.dst;
2660
2661		err = PTR_ERR(xdst);
2662		if (IS_ERR(xdst)) {
2663			dst_release(dst);
2664			goto put_states;
2665		}
2666
2667		bundle[i] = xdst;
2668		if (!xdst_prev)
2669			xdst0 = xdst;
2670		else
2671			/* Ref count is taken during xfrm_alloc_dst()
2672			 * No need to do dst_clone() on dst1
2673			 */
2674			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2675
2676		if (xfrm[i]->sel.family == AF_UNSPEC) {
2677			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2678							xfrm_af2proto(family));
2679			if (!inner_mode) {
2680				err = -EAFNOSUPPORT;
2681				dst_release(dst);
2682				goto put_states;
2683			}
2684		} else
2685			inner_mode = &xfrm[i]->inner_mode;
 
 
 
 
 
 
 
2686
2687		xdst->route = dst;
2688		dst_copy_metrics(dst1, dst);
2689
2690		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2691			__u32 mark = 0;
2692			int oif;
2693
2694			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2695				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2696
2697			if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2698				family = xfrm[i]->props.family;
2699
2700			oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2701			dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2702					      &saddr, &daddr, family, mark);
2703			err = PTR_ERR(dst);
2704			if (IS_ERR(dst))
2705				goto put_states;
2706		} else
2707			dst_hold(dst);
2708
2709		dst1->xfrm = xfrm[i];
2710		xdst->xfrm_genid = xfrm[i]->genid;
2711
2712		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
 
2713		dst1->lastuse = now;
2714
2715		dst1->input = dst_discard;
 
2716
2717		rcu_read_lock();
2718		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2719		if (likely(afinfo))
2720			dst1->output = afinfo->output;
2721		else
2722			dst1->output = dst_discard_out;
2723		rcu_read_unlock();
2724
2725		xdst_prev = xdst;
2726
2727		header_len += xfrm[i]->props.header_len;
2728		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2729			nfheader_len += xfrm[i]->props.header_len;
2730		trailer_len += xfrm[i]->props.trailer_len;
2731	}
2732
2733	xfrm_dst_set_child(xdst_prev, dst);
2734	xdst0->path = dst;
2735
2736	err = -ENODEV;
2737	dev = dst->dev;
2738	if (!dev)
2739		goto free_dst;
2740
2741	xfrm_init_path(xdst0, dst, nfheader_len);
2742	xfrm_init_pmtu(bundle, nx);
 
 
 
2743
2744	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2745	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2746		err = xfrm_fill_dst(xdst_prev, dev, fl);
2747		if (err)
2748			goto free_dst;
2749
2750		xdst_prev->u.dst.header_len = header_len;
2751		xdst_prev->u.dst.trailer_len = trailer_len;
2752		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2753		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2754	}
2755
2756	return &xdst0->u.dst;
 
2757
2758put_states:
2759	for (; i < nx; i++)
2760		xfrm_state_put(xfrm[i]);
2761free_dst:
2762	if (xdst0)
2763		dst_release_immediate(&xdst0->u.dst);
 
 
 
2764
2765	return ERR_PTR(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2766}
2767
2768static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2769				struct xfrm_policy **pols,
2770				int *num_pols, int *num_xfrms)
2771{
2772	int i;
2773
2774	if (*num_pols == 0 || !pols[0]) {
2775		*num_pols = 0;
2776		*num_xfrms = 0;
2777		return 0;
2778	}
2779	if (IS_ERR(pols[0])) {
2780		*num_pols = 0;
2781		return PTR_ERR(pols[0]);
2782	}
2783
2784	*num_xfrms = pols[0]->xfrm_nr;
2785
2786#ifdef CONFIG_XFRM_SUB_POLICY
2787	if (pols[0]->action == XFRM_POLICY_ALLOW &&
2788	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2789		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2790						    XFRM_POLICY_TYPE_MAIN,
2791						    fl, family,
2792						    XFRM_POLICY_OUT,
2793						    pols[0]->if_id);
2794		if (pols[1]) {
2795			if (IS_ERR(pols[1])) {
2796				xfrm_pols_put(pols, *num_pols);
2797				*num_pols = 0;
2798				return PTR_ERR(pols[1]);
2799			}
2800			(*num_pols)++;
2801			(*num_xfrms) += pols[1]->xfrm_nr;
2802		}
2803	}
2804#endif
2805	for (i = 0; i < *num_pols; i++) {
2806		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2807			*num_xfrms = -1;
2808			break;
2809		}
2810	}
2811
2812	return 0;
2813
2814}
2815
2816static struct xfrm_dst *
2817xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2818			       const struct flowi *fl, u16 family,
2819			       struct dst_entry *dst_orig)
2820{
2821	struct net *net = xp_net(pols[0]);
2822	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2823	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2824	struct xfrm_dst *xdst;
2825	struct dst_entry *dst;
 
2826	int err;
2827
2828	/* Try to instantiate a bundle */
2829	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2830	if (err <= 0) {
2831		if (err == 0)
2832			return NULL;
2833
2834		if (err != -EAGAIN)
2835			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2836		return ERR_PTR(err);
2837	}
2838
2839	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2840	if (IS_ERR(dst)) {
2841		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2842		return ERR_CAST(dst);
2843	}
2844
2845	xdst = (struct xfrm_dst *)dst;
2846	xdst->num_xfrms = err;
 
 
 
 
 
 
 
 
 
 
2847	xdst->num_pols = num_pols;
2848	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2849	xdst->policy_genid = atomic_read(&pols[0]->genid);
2850
2851	return xdst;
2852}
2853
2854static void xfrm_policy_queue_process(struct timer_list *t)
2855{
2856	struct sk_buff *skb;
2857	struct sock *sk;
2858	struct dst_entry *dst;
2859	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2860	struct net *net = xp_net(pol);
2861	struct xfrm_policy_queue *pq = &pol->polq;
2862	struct flowi fl;
2863	struct sk_buff_head list;
2864	__u32 skb_mark;
2865
2866	spin_lock(&pq->hold_queue.lock);
2867	skb = skb_peek(&pq->hold_queue);
2868	if (!skb) {
2869		spin_unlock(&pq->hold_queue.lock);
2870		goto out;
2871	}
2872	dst = skb_dst(skb);
2873	sk = skb->sk;
2874
2875	/* Fixup the mark to support VTI. */
2876	skb_mark = skb->mark;
2877	skb->mark = pol->mark.v;
2878	xfrm_decode_session(net, skb, &fl, dst->ops->family);
2879	skb->mark = skb_mark;
2880	spin_unlock(&pq->hold_queue.lock);
2881
2882	dst_hold(xfrm_dst_path(dst));
2883	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2884	if (IS_ERR(dst))
2885		goto purge_queue;
2886
2887	if (dst->flags & DST_XFRM_QUEUE) {
2888		dst_release(dst);
2889
2890		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2891			goto purge_queue;
2892
2893		pq->timeout = pq->timeout << 1;
2894		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2895			xfrm_pol_hold(pol);
2896		goto out;
2897	}
2898
2899	dst_release(dst);
2900
2901	__skb_queue_head_init(&list);
2902
2903	spin_lock(&pq->hold_queue.lock);
2904	pq->timeout = 0;
2905	skb_queue_splice_init(&pq->hold_queue, &list);
2906	spin_unlock(&pq->hold_queue.lock);
2907
2908	while (!skb_queue_empty(&list)) {
2909		skb = __skb_dequeue(&list);
2910
2911		/* Fixup the mark to support VTI. */
2912		skb_mark = skb->mark;
2913		skb->mark = pol->mark.v;
2914		xfrm_decode_session(net, skb, &fl, skb_dst(skb)->ops->family);
2915		skb->mark = skb_mark;
2916
2917		dst_hold(xfrm_dst_path(skb_dst(skb)));
2918		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2919		if (IS_ERR(dst)) {
2920			kfree_skb(skb);
2921			continue;
2922		}
2923
2924		nf_reset_ct(skb);
2925		skb_dst_drop(skb);
2926		skb_dst_set(skb, dst);
2927
2928		dst_output(net, skb->sk, skb);
2929	}
2930
2931out:
2932	xfrm_pol_put(pol);
2933	return;
2934
2935purge_queue:
2936	pq->timeout = 0;
2937	skb_queue_purge(&pq->hold_queue);
2938	xfrm_pol_put(pol);
2939}
2940
2941static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2942{
2943	unsigned long sched_next;
2944	struct dst_entry *dst = skb_dst(skb);
2945	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2946	struct xfrm_policy *pol = xdst->pols[0];
2947	struct xfrm_policy_queue *pq = &pol->polq;
2948
2949	if (unlikely(skb_fclone_busy(sk, skb))) {
2950		kfree_skb(skb);
2951		return 0;
2952	}
2953
2954	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2955		kfree_skb(skb);
2956		return -EAGAIN;
2957	}
2958
2959	skb_dst_force(skb);
2960
2961	spin_lock_bh(&pq->hold_queue.lock);
2962
2963	if (!pq->timeout)
2964		pq->timeout = XFRM_QUEUE_TMO_MIN;
2965
2966	sched_next = jiffies + pq->timeout;
2967
2968	if (del_timer(&pq->hold_timer)) {
2969		if (time_before(pq->hold_timer.expires, sched_next))
2970			sched_next = pq->hold_timer.expires;
2971		xfrm_pol_put(pol);
2972	}
2973
2974	__skb_queue_tail(&pq->hold_queue, skb);
2975	if (!mod_timer(&pq->hold_timer, sched_next))
2976		xfrm_pol_hold(pol);
2977
2978	spin_unlock_bh(&pq->hold_queue.lock);
2979
2980	return 0;
2981}
2982
2983static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2984						 struct xfrm_flo *xflo,
2985						 const struct flowi *fl,
2986						 int num_xfrms,
2987						 u16 family)
2988{
2989	int err;
2990	struct net_device *dev;
2991	struct dst_entry *dst;
2992	struct dst_entry *dst1;
2993	struct xfrm_dst *xdst;
2994
2995	xdst = xfrm_alloc_dst(net, family);
2996	if (IS_ERR(xdst))
2997		return xdst;
2998
2999	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
3000	    net->xfrm.sysctl_larval_drop ||
3001	    num_xfrms <= 0)
3002		return xdst;
3003
3004	dst = xflo->dst_orig;
3005	dst1 = &xdst->u.dst;
3006	dst_hold(dst);
3007	xdst->route = dst;
3008
3009	dst_copy_metrics(dst1, dst);
3010
3011	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
3012	dst1->flags |= DST_XFRM_QUEUE;
3013	dst1->lastuse = jiffies;
3014
3015	dst1->input = dst_discard;
3016	dst1->output = xdst_queue_output;
3017
3018	dst_hold(dst);
3019	xfrm_dst_set_child(xdst, dst);
3020	xdst->path = dst;
3021
3022	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
3023
3024	err = -ENODEV;
3025	dev = dst->dev;
3026	if (!dev)
3027		goto free_dst;
3028
3029	err = xfrm_fill_dst(xdst, dev, fl);
3030	if (err)
3031		goto free_dst;
3032
3033out:
3034	return xdst;
3035
3036free_dst:
3037	dst_release(dst1);
3038	xdst = ERR_PTR(err);
3039	goto out;
3040}
3041
3042static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
3043					   const struct flowi *fl,
3044					   u16 family, u8 dir,
3045					   struct xfrm_flo *xflo, u32 if_id)
3046{
 
3047	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3048	int num_pols = 0, num_xfrms = 0, err;
3049	struct xfrm_dst *xdst;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3050
3051	/* Resolve policies to use if we couldn't get them from
3052	 * previous cache entry */
3053	num_pols = 1;
3054	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
3055	err = xfrm_expand_policies(fl, family, pols,
 
 
3056					   &num_pols, &num_xfrms);
3057	if (err < 0)
3058		goto inc_error;
3059	if (num_pols == 0)
3060		return NULL;
3061	if (num_xfrms <= 0)
3062		goto make_dummy_bundle;
3063
3064	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3065					      xflo->dst_orig);
3066	if (IS_ERR(xdst)) {
3067		err = PTR_ERR(xdst);
3068		if (err == -EREMOTE) {
3069			xfrm_pols_put(pols, num_pols);
3070			return NULL;
3071		}
 
 
3072
 
 
 
 
3073		if (err != -EAGAIN)
3074			goto error;
3075		goto make_dummy_bundle;
3076	} else if (xdst == NULL) {
 
 
 
3077		num_xfrms = 0;
3078		goto make_dummy_bundle;
3079	}
3080
3081	return xdst;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3082
3083make_dummy_bundle:
3084	/* We found policies, but there's no bundles to instantiate:
3085	 * either because the policy blocks, has no transformations or
3086	 * we could not build template (no xfrm_states).*/
3087	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
3088	if (IS_ERR(xdst)) {
3089		xfrm_pols_put(pols, num_pols);
3090		return ERR_CAST(xdst);
3091	}
3092	xdst->num_pols = num_pols;
3093	xdst->num_xfrms = num_xfrms;
3094	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3095
3096	return xdst;
 
3097
3098inc_error:
3099	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3100error:
3101	xfrm_pols_put(pols, num_pols);
 
 
 
3102	return ERR_PTR(err);
3103}
3104
3105static struct dst_entry *make_blackhole(struct net *net, u16 family,
3106					struct dst_entry *dst_orig)
3107{
3108	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3109	struct dst_entry *ret;
3110
3111	if (!afinfo) {
3112		dst_release(dst_orig);
3113		return ERR_PTR(-EINVAL);
3114	} else {
3115		ret = afinfo->blackhole_route(net, dst_orig);
3116	}
3117	rcu_read_unlock();
3118
3119	return ret;
3120}
3121
3122/* Finds/creates a bundle for given flow and if_id
3123 *
3124 * At the moment we eat a raw IP route. Mostly to speed up lookups
3125 * on interfaces with disabled IPsec.
3126 *
3127 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3128 * compatibility
3129 */
3130struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3131					struct dst_entry *dst_orig,
3132					const struct flowi *fl,
3133					const struct sock *sk,
3134					int flags, u32 if_id)
3135{
3136	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
 
3137	struct xfrm_dst *xdst;
3138	struct dst_entry *dst, *route;
3139	u16 family = dst_orig->ops->family;
3140	u8 dir = XFRM_POLICY_OUT;
3141	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3142
3143	dst = NULL;
3144	xdst = NULL;
3145	route = NULL;
3146
3147	sk = sk_const_to_full_sk(sk);
3148	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3149		num_pols = 1;
3150		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3151						if_id);
3152		err = xfrm_expand_policies(fl, family, pols,
3153					   &num_pols, &num_xfrms);
3154		if (err < 0)
3155			goto dropdst;
3156
3157		if (num_pols) {
3158			if (num_xfrms <= 0) {
3159				drop_pols = num_pols;
3160				goto no_transform;
3161			}
3162
3163			xdst = xfrm_resolve_and_create_bundle(
3164					pols, num_pols, fl,
3165					family, dst_orig);
3166
3167			if (IS_ERR(xdst)) {
3168				xfrm_pols_put(pols, num_pols);
3169				err = PTR_ERR(xdst);
3170				if (err == -EREMOTE)
3171					goto nopol;
3172
3173				goto dropdst;
3174			} else if (xdst == NULL) {
3175				num_xfrms = 0;
3176				drop_pols = num_pols;
3177				goto no_transform;
3178			}
3179
 
 
3180			route = xdst->route;
3181		}
3182	}
3183
3184	if (xdst == NULL) {
3185		struct xfrm_flo xflo;
3186
3187		xflo.dst_orig = dst_orig;
3188		xflo.flags = flags;
3189
3190		/* To accelerate a bit...  */
3191		if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3192			       !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3193			goto nopol;
3194
3195		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3196		if (xdst == NULL)
 
3197			goto nopol;
3198		if (IS_ERR(xdst)) {
3199			err = PTR_ERR(xdst);
3200			goto dropdst;
3201		}
 
3202
3203		num_pols = xdst->num_pols;
3204		num_xfrms = xdst->num_xfrms;
3205		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3206		route = xdst->route;
3207	}
3208
3209	dst = &xdst->u.dst;
3210	if (route == NULL && num_xfrms > 0) {
3211		/* The only case when xfrm_bundle_lookup() returns a
3212		 * bundle with null route, is when the template could
3213		 * not be resolved. It means policies are there, but
3214		 * bundle could not be created, since we don't yet
3215		 * have the xfrm_state's. We need to wait for KM to
3216		 * negotiate new SA's or bail out with error.*/
3217		if (net->xfrm.sysctl_larval_drop) {
3218			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3219			err = -EREMOTE;
3220			goto error;
3221		}
3222
3223		err = -EAGAIN;
3224
3225		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3226		goto error;
3227	}
3228
3229no_transform:
3230	if (num_pols == 0)
3231		goto nopol;
3232
3233	if ((flags & XFRM_LOOKUP_ICMP) &&
3234	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3235		err = -ENOENT;
3236		goto error;
3237	}
3238
3239	for (i = 0; i < num_pols; i++)
3240		WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
3241
3242	if (num_xfrms < 0) {
3243		/* Prohibit the flow */
3244		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3245		err = -EPERM;
3246		goto error;
3247	} else if (num_xfrms > 0) {
3248		/* Flow transformed */
3249		dst_release(dst_orig);
3250	} else {
3251		/* Flow passes untransformed */
3252		dst_release(dst);
3253		dst = dst_orig;
3254	}
3255ok:
3256	xfrm_pols_put(pols, drop_pols);
3257	if (dst && dst->xfrm &&
3258	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3259		dst->flags |= DST_XFRM_TUNNEL;
3260	return dst;
3261
3262nopol:
3263	if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3264	    net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3265		err = -EPERM;
3266		goto error;
3267	}
3268	if (!(flags & XFRM_LOOKUP_ICMP)) {
3269		dst = dst_orig;
3270		goto ok;
3271	}
3272	err = -ENOENT;
3273error:
3274	dst_release(dst);
3275dropdst:
3276	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3277		dst_release(dst_orig);
3278	xfrm_pols_put(pols, drop_pols);
3279	return ERR_PTR(err);
3280}
3281EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3282
3283/* Main function: finds/creates a bundle for given flow.
3284 *
3285 * At the moment we eat a raw IP route. Mostly to speed up lookups
3286 * on interfaces with disabled IPsec.
3287 */
3288struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3289			      const struct flowi *fl, const struct sock *sk,
3290			      int flags)
3291{
3292	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3293}
3294EXPORT_SYMBOL(xfrm_lookup);
3295
3296/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3297 * Otherwise we may send out blackholed packets.
3298 */
3299struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3300				    const struct flowi *fl,
3301				    const struct sock *sk, int flags)
3302{
3303	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3304					    flags | XFRM_LOOKUP_QUEUE |
3305					    XFRM_LOOKUP_KEEP_DST_REF);
3306
3307	if (PTR_ERR(dst) == -EREMOTE)
3308		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3309
3310	if (IS_ERR(dst))
3311		dst_release(dst_orig);
3312
3313	return dst;
3314}
3315EXPORT_SYMBOL(xfrm_lookup_route);
3316
3317static inline int
3318xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3319{
3320	struct sec_path *sp = skb_sec_path(skb);
3321	struct xfrm_state *x;
3322
3323	if (!sp || idx < 0 || idx >= sp->len)
3324		return 0;
3325	x = sp->xvec[idx];
3326	if (!x->type->reject)
3327		return 0;
3328	return x->type->reject(x, skb, fl);
3329}
3330
3331/* When skb is transformed back to its "native" form, we have to
3332 * check policy restrictions. At the moment we make this in maximally
3333 * stupid way. Shame on me. :-) Of course, connected sockets must
3334 * have policy cached at them.
3335 */
3336
3337static inline int
3338xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3339	      unsigned short family, u32 if_id)
3340{
3341	if (xfrm_state_kern(x))
3342		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3343	return	x->id.proto == tmpl->id.proto &&
3344		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3345		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3346		x->props.mode == tmpl->mode &&
3347		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3348		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3349		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3350		  xfrm_state_addr_cmp(tmpl, x, family)) &&
3351		(if_id == 0 || if_id == x->if_id);
3352}
3353
3354/*
3355 * 0 or more than 0 is returned when validation is succeeded (either bypass
3356 * because of optional transport mode, or next index of the matched secpath
3357 * state with the template.
3358 * -1 is returned when no matching template is found.
3359 * Otherwise "-2 - errored_index" is returned.
3360 */
3361static inline int
3362xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3363	       unsigned short family, u32 if_id)
3364{
3365	int idx = start;
3366
3367	if (tmpl->optional) {
3368		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3369			return start;
3370	} else
3371		start = -1;
3372	for (; idx < sp->len; idx++) {
3373		if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
3374			return ++idx;
3375		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3376			if (idx < sp->verified_cnt) {
3377				/* Secpath entry previously verified, consider optional and
3378				 * continue searching
3379				 */
3380				continue;
3381			}
3382
3383			if (start == -1)
3384				start = -2-idx;
3385			break;
3386		}
3387	}
3388	return start;
3389}
3390
3391static void
3392decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3393{
3394	struct flowi4 *fl4 = &fl->u.ip4;
3395
3396	memset(fl4, 0, sizeof(struct flowi4));
3397
3398	if (reverse) {
3399		fl4->saddr = flkeys->addrs.ipv4.dst;
3400		fl4->daddr = flkeys->addrs.ipv4.src;
3401		fl4->fl4_sport = flkeys->ports.dst;
3402		fl4->fl4_dport = flkeys->ports.src;
3403	} else {
3404		fl4->saddr = flkeys->addrs.ipv4.src;
3405		fl4->daddr = flkeys->addrs.ipv4.dst;
3406		fl4->fl4_sport = flkeys->ports.src;
3407		fl4->fl4_dport = flkeys->ports.dst;
3408	}
3409
3410	switch (flkeys->basic.ip_proto) {
3411	case IPPROTO_GRE:
3412		fl4->fl4_gre_key = flkeys->gre.keyid;
3413		break;
3414	case IPPROTO_ICMP:
3415		fl4->fl4_icmp_type = flkeys->icmp.type;
3416		fl4->fl4_icmp_code = flkeys->icmp.code;
3417		break;
3418	}
3419
3420	fl4->flowi4_proto = flkeys->basic.ip_proto;
3421	fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
3422}
3423
3424#if IS_ENABLED(CONFIG_IPV6)
3425static void
3426decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3427{
3428	struct flowi6 *fl6 = &fl->u.ip6;
3429
3430	memset(fl6, 0, sizeof(struct flowi6));
3431
3432	if (reverse) {
3433		fl6->saddr = flkeys->addrs.ipv6.dst;
3434		fl6->daddr = flkeys->addrs.ipv6.src;
3435		fl6->fl6_sport = flkeys->ports.dst;
3436		fl6->fl6_dport = flkeys->ports.src;
3437	} else {
3438		fl6->saddr = flkeys->addrs.ipv6.src;
3439		fl6->daddr = flkeys->addrs.ipv6.dst;
3440		fl6->fl6_sport = flkeys->ports.src;
3441		fl6->fl6_dport = flkeys->ports.dst;
3442	}
3443
3444	switch (flkeys->basic.ip_proto) {
3445	case IPPROTO_GRE:
3446		fl6->fl6_gre_key = flkeys->gre.keyid;
3447		break;
3448	case IPPROTO_ICMPV6:
3449		fl6->fl6_icmp_type = flkeys->icmp.type;
3450		fl6->fl6_icmp_code = flkeys->icmp.code;
3451		break;
3452	}
3453
3454	fl6->flowi6_proto = flkeys->basic.ip_proto;
3455}
3456#endif
3457
3458int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
3459			  unsigned int family, int reverse)
3460{
3461	struct xfrm_flow_keys flkeys;
3462
3463	memset(&flkeys, 0, sizeof(flkeys));
3464	__skb_flow_dissect(net, skb, &xfrm_session_dissector, &flkeys,
3465			   NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
3466
3467	switch (family) {
3468	case AF_INET:
3469		decode_session4(&flkeys, fl, reverse);
3470		break;
3471#if IS_ENABLED(CONFIG_IPV6)
3472	case AF_INET6:
3473		decode_session6(&flkeys, fl, reverse);
3474		break;
3475#endif
3476	default:
3477		return -EAFNOSUPPORT;
3478	}
3479
3480	fl->flowi_mark = skb->mark;
3481	if (reverse) {
3482		fl->flowi_oif = skb->skb_iif;
3483	} else {
3484		int oif = 0;
3485
3486		if (skb_dst(skb) && skb_dst(skb)->dev)
3487			oif = skb_dst(skb)->dev->ifindex;
3488
3489		fl->flowi_oif = oif;
3490	}
3491
3492	return security_xfrm_decode_session(skb, &fl->flowi_secid);
3493}
3494EXPORT_SYMBOL(__xfrm_decode_session);
3495
3496static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3497{
3498	for (; k < sp->len; k++) {
3499		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3500			*idxp = k;
3501			return 1;
3502		}
3503	}
3504
3505	return 0;
3506}
3507
3508static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
3509{
3510	const struct flowi4 *fl4 = &fl->u.ip4;
3511
3512	if (family == AF_INET &&
3513	    fl4->flowi4_proto == IPPROTO_ICMP &&
3514	    (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
3515	     fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
3516		return true;
3517
3518#if IS_ENABLED(CONFIG_IPV6)
3519	if (family == AF_INET6) {
3520		const struct flowi6 *fl6 = &fl->u.ip6;
3521
3522		if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
3523		    (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
3524		    fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
3525		    fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
3526			return true;
3527	}
3528#endif
3529	return false;
3530}
3531
3532static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
3533				  const struct flowi *fl, struct flowi *fl1)
3534{
3535	bool ret = true;
3536	struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3537	int hl = family == AF_INET ? (sizeof(struct iphdr) +  sizeof(struct icmphdr)) :
3538		 (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
3539
3540	if (!newskb)
3541		return true;
3542
3543	if (!pskb_pull(newskb, hl))
3544		goto out;
3545
3546	skb_reset_network_header(newskb);
3547
3548	if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0)
3549		goto out;
3550
3551	fl1->flowi_oif = fl->flowi_oif;
3552	fl1->flowi_mark = fl->flowi_mark;
3553	fl1->flowi_tos = fl->flowi_tos;
3554	nf_nat_decode_session(newskb, fl1, family);
3555	ret = false;
3556
3557out:
3558	consume_skb(newskb);
3559	return ret;
3560}
3561
3562static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
3563					   const struct xfrm_selector *sel,
3564					   const struct flowi *fl)
3565{
3566	bool ret = false;
3567
3568	if (icmp_err_packet(fl, family)) {
3569		struct flowi fl1;
3570
3571		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3572			return ret;
3573
3574		ret = xfrm_selector_match(sel, &fl1, family);
3575	}
3576
3577	return ret;
3578}
3579
3580static inline struct
3581xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
3582			      const struct flowi *fl, unsigned short family,
3583			      u32 if_id)
3584{
3585	struct xfrm_policy *pol = NULL;
3586
3587	if (icmp_err_packet(fl, family)) {
3588		struct flowi fl1;
3589		struct net *net = dev_net(skb->dev);
3590
3591		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3592			return pol;
3593
3594		pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id);
3595		if (IS_ERR(pol))
3596			pol = NULL;
3597	}
3598
3599	return pol;
3600}
3601
3602static inline struct
3603dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
3604			     unsigned short family, struct dst_entry *dst)
3605{
3606	if (icmp_err_packet(fl, family)) {
3607		struct net *net = dev_net(skb->dev);
3608		struct dst_entry *dst2;
3609		struct flowi fl1;
3610
3611		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3612			return dst;
3613
3614		dst_hold(dst);
3615
3616		dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
3617
3618		if (IS_ERR(dst2))
3619			return dst;
3620
3621		if (dst2->xfrm) {
3622			dst_release(dst);
3623			dst = dst2;
3624		} else {
3625			dst_release(dst2);
3626		}
3627	}
3628
3629	return dst;
3630}
3631
3632int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3633			unsigned short family)
3634{
3635	struct net *net = dev_net(skb->dev);
3636	struct xfrm_policy *pol;
3637	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3638	int npols = 0;
3639	int xfrm_nr;
3640	int pi;
3641	int reverse;
3642	struct flowi fl;
 
3643	int xerr_idx = -1;
3644	const struct xfrm_if_cb *ifcb;
3645	struct sec_path *sp;
3646	u32 if_id = 0;
3647
3648	rcu_read_lock();
3649	ifcb = xfrm_if_get_cb();
3650
3651	if (ifcb) {
3652		struct xfrm_if_decode_session_result r;
3653
3654		if (ifcb->decode_session(skb, family, &r)) {
3655			if_id = r.if_id;
3656			net = r.net;
3657		}
3658	}
3659	rcu_read_unlock();
3660
3661	reverse = dir & ~XFRM_POLICY_MASK;
3662	dir &= XFRM_POLICY_MASK;
 
3663
3664	if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) {
3665		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3666		return 0;
3667	}
3668
3669	nf_nat_decode_session(skb, &fl, family);
3670
3671	/* First, check used SA against their selectors. */
3672	sp = skb_sec_path(skb);
3673	if (sp) {
3674		int i;
3675
3676		for (i = sp->len - 1; i >= 0; i--) {
3677			struct xfrm_state *x = sp->xvec[i];
3678			int ret = 0;
3679
3680			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3681				ret = 1;
3682				if (x->props.flags & XFRM_STATE_ICMP &&
3683				    xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl))
3684					ret = 0;
3685				if (ret) {
3686					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3687					return 0;
3688				}
3689			}
3690		}
3691	}
3692
3693	pol = NULL;
3694	sk = sk_to_full_sk(sk);
3695	if (sk && sk->sk_policy[dir]) {
3696		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3697		if (IS_ERR(pol)) {
3698			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3699			return 0;
3700		}
3701	}
3702
3703	if (!pol)
3704		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
 
 
 
 
 
 
 
 
3705
3706	if (IS_ERR(pol)) {
3707		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3708		return 0;
3709	}
3710
3711	if (!pol && dir == XFRM_POLICY_FWD)
3712		pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
3713
3714	if (!pol) {
3715		if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3716			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3717			return 0;
3718		}
3719
3720		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3721			xfrm_secpath_reject(xerr_idx, skb, &fl);
3722			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3723			return 0;
3724		}
3725		return 1;
3726	}
3727
3728	/* This lockless write can happen from different cpus. */
3729	WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
3730
3731	pols[0] = pol;
3732	npols++;
3733#ifdef CONFIG_XFRM_SUB_POLICY
3734	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3735		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3736						    &fl, family,
3737						    XFRM_POLICY_IN, if_id);
3738		if (pols[1]) {
3739			if (IS_ERR(pols[1])) {
3740				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3741				xfrm_pol_put(pols[0]);
3742				return 0;
3743			}
3744			/* This write can happen from different cpus. */
3745			WRITE_ONCE(pols[1]->curlft.use_time,
3746				   ktime_get_real_seconds());
3747			npols++;
3748		}
3749	}
3750#endif
3751
3752	if (pol->action == XFRM_POLICY_ALLOW) {
 
3753		static struct sec_path dummy;
3754		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3755		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3756		struct xfrm_tmpl **tpp = tp;
3757		int ti = 0;
3758		int i, k;
3759
3760		sp = skb_sec_path(skb);
3761		if (!sp)
3762			sp = &dummy;
3763
3764		for (pi = 0; pi < npols; pi++) {
3765			if (pols[pi] != pol &&
3766			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3767				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3768				goto reject;
3769			}
3770			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3771				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3772				goto reject_error;
3773			}
3774			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3775				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3776		}
3777		xfrm_nr = ti;
3778
3779		if (npols > 1) {
3780			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3781			tpp = stp;
3782		}
3783
3784		/* For each tunnel xfrm, find the first matching tmpl.
3785		 * For each tmpl before that, find corresponding xfrm.
3786		 * Order is _important_. Later we will implement
3787		 * some barriers, but at the moment barriers
3788		 * are implied between each two transformations.
3789		 * Upon success, marks secpath entries as having been
3790		 * verified to allow them to be skipped in future policy
3791		 * checks (e.g. nested tunnels).
3792		 */
3793		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3794			k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
3795			if (k < 0) {
3796				if (k < -1)
3797					/* "-2 - errored_index" returned */
3798					xerr_idx = -(2+k);
3799				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3800				goto reject;
3801			}
3802		}
3803
3804		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3805			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3806			goto reject;
3807		}
3808
3809		xfrm_pols_put(pols, npols);
3810		sp->verified_cnt = k;
3811
3812		return 1;
3813	}
3814	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3815
3816reject:
3817	xfrm_secpath_reject(xerr_idx, skb, &fl);
3818reject_error:
3819	xfrm_pols_put(pols, npols);
3820	return 0;
3821}
3822EXPORT_SYMBOL(__xfrm_policy_check);
3823
3824int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3825{
3826	struct net *net = dev_net(skb->dev);
3827	struct flowi fl;
3828	struct dst_entry *dst;
3829	int res = 1;
3830
3831	if (xfrm_decode_session(net, skb, &fl, family) < 0) {
3832		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3833		return 0;
3834	}
3835
3836	skb_dst_force(skb);
3837	if (!skb_dst(skb)) {
3838		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3839		return 0;
3840	}
3841
3842	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3843	if (IS_ERR(dst)) {
3844		res = 0;
3845		dst = NULL;
3846	}
3847
3848	if (dst && !dst->xfrm)
3849		dst = xfrm_out_fwd_icmp(skb, &fl, family, dst);
3850
3851	skb_dst_set(skb, dst);
3852	return res;
3853}
3854EXPORT_SYMBOL(__xfrm_route_forward);
3855
3856/* Optimize later using cookies and generation ids. */
3857
3858static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3859{
3860	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3861	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3862	 * get validated by dst_ops->check on every use.  We do this
3863	 * because when a normal route referenced by an XFRM dst is
3864	 * obsoleted we do not go looking around for all parent
3865	 * referencing XFRM dsts so that we can invalidate them.  It
3866	 * is just too much work.  Instead we make the checks here on
3867	 * every use.  For example:
3868	 *
3869	 *	XFRM dst A --> IPv4 dst X
3870	 *
3871	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3872	 * in this example).  If X is marked obsolete, "A" will not
3873	 * notice.  That's what we are validating here via the
3874	 * stale_bundle() check.
3875	 *
3876	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3877	 * be marked on it.
3878	 * This will force stale_bundle() to fail on any xdst bundle with
3879	 * this dst linked in it.
3880	 */
3881	if (dst->obsolete < 0 && !stale_bundle(dst))
3882		return dst;
3883
3884	return NULL;
3885}
3886
3887static int stale_bundle(struct dst_entry *dst)
3888{
3889	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3890}
3891
3892void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3893{
3894	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3895		dst->dev = blackhole_netdev;
3896		dev_hold(dst->dev);
3897		dev_put(dev);
3898	}
3899}
3900EXPORT_SYMBOL(xfrm_dst_ifdown);
3901
3902static void xfrm_link_failure(struct sk_buff *skb)
3903{
3904	/* Impossible. Such dst must be popped before reaches point of failure. */
3905}
3906
3907static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
3908{
3909	if (dst->obsolete)
3910		sk_dst_reset(sk);
 
 
 
 
 
3911}
3912
3913static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3914{
3915	while (nr--) {
3916		struct xfrm_dst *xdst = bundle[nr];
 
 
 
 
 
 
 
 
 
 
 
3917		u32 pmtu, route_mtu_cached;
3918		struct dst_entry *dst;
3919
3920		dst = &xdst->u.dst;
3921		pmtu = dst_mtu(xfrm_dst_child(dst));
3922		xdst->child_mtu_cached = pmtu;
3923
3924		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3925
3926		route_mtu_cached = dst_mtu(xdst->route);
3927		xdst->route_mtu_cached = route_mtu_cached;
3928
3929		if (pmtu > route_mtu_cached)
3930			pmtu = route_mtu_cached;
3931
3932		dst_metric_set(dst, RTAX_MTU, pmtu);
3933	}
3934}
3935
3936/* Check that the bundle accepts the flow and its components are
3937 * still valid.
3938 */
3939
3940static int xfrm_bundle_ok(struct xfrm_dst *first)
3941{
3942	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3943	struct dst_entry *dst = &first->u.dst;
3944	struct xfrm_dst *xdst;
3945	int start_from, nr;
3946	u32 mtu;
3947
3948	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3949	    (dst->dev && !netif_running(dst->dev)))
3950		return 0;
3951
3952	if (dst->flags & DST_XFRM_QUEUE)
3953		return 1;
3954
3955	start_from = nr = 0;
 
3956	do {
3957		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3958
3959		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3960			return 0;
3961		if (xdst->xfrm_genid != dst->xfrm->genid)
3962			return 0;
3963		if (xdst->num_pols > 0 &&
3964		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3965			return 0;
3966
3967		bundle[nr++] = xdst;
3968
3969		mtu = dst_mtu(xfrm_dst_child(dst));
3970		if (xdst->child_mtu_cached != mtu) {
3971			start_from = nr;
3972			xdst->child_mtu_cached = mtu;
3973		}
3974
3975		if (!dst_check(xdst->route, xdst->route_cookie))
3976			return 0;
3977		mtu = dst_mtu(xdst->route);
3978		if (xdst->route_mtu_cached != mtu) {
3979			start_from = nr;
3980			xdst->route_mtu_cached = mtu;
3981		}
3982
3983		dst = xfrm_dst_child(dst);
3984	} while (dst->xfrm);
3985
3986	if (likely(!start_from))
3987		return 1;
3988
3989	xdst = bundle[start_from - 1];
3990	mtu = xdst->child_mtu_cached;
3991	while (start_from--) {
3992		dst = &xdst->u.dst;
3993
3994		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3995		if (mtu > xdst->route_mtu_cached)
3996			mtu = xdst->route_mtu_cached;
3997		dst_metric_set(dst, RTAX_MTU, mtu);
3998		if (!start_from)
 
3999			break;
4000
4001		xdst = bundle[start_from - 1];
4002		xdst->child_mtu_cached = mtu;
4003	}
4004
4005	return 1;
4006}
4007
4008static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
4009{
4010	return dst_metric_advmss(xfrm_dst_path(dst));
4011}
4012
4013static unsigned int xfrm_mtu(const struct dst_entry *dst)
4014{
4015	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
4016
4017	return mtu ? : dst_mtu(xfrm_dst_path(dst));
4018}
4019
4020static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
4021					const void *daddr)
4022{
4023	while (dst->xfrm) {
4024		const struct xfrm_state *xfrm = dst->xfrm;
4025
4026		dst = xfrm_dst_child(dst);
4027
4028		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
4029			continue;
4030		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
4031			daddr = xfrm->coaddr;
4032		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
4033			daddr = &xfrm->id.daddr;
4034	}
4035	return daddr;
4036}
4037
4038static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
4039					   struct sk_buff *skb,
4040					   const void *daddr)
4041{
4042	const struct dst_entry *path = xfrm_dst_path(dst);
4043
4044	if (!skb)
4045		daddr = xfrm_get_dst_nexthop(dst, daddr);
4046	return path->ops->neigh_lookup(path, skb, daddr);
4047}
4048
4049static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
4050{
4051	const struct dst_entry *path = xfrm_dst_path(dst);
4052
4053	daddr = xfrm_get_dst_nexthop(dst, daddr);
4054	path->ops->confirm_neigh(path, daddr);
4055}
4056
4057int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
4058{
4059	int err = 0;
4060
4061	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
 
4062		return -EAFNOSUPPORT;
4063
4064	spin_lock(&xfrm_policy_afinfo_lock);
4065	if (unlikely(xfrm_policy_afinfo[family] != NULL))
4066		err = -EEXIST;
4067	else {
4068		struct dst_ops *dst_ops = afinfo->dst_ops;
4069		if (likely(dst_ops->kmem_cachep == NULL))
4070			dst_ops->kmem_cachep = xfrm_dst_cache;
4071		if (likely(dst_ops->check == NULL))
4072			dst_ops->check = xfrm_dst_check;
4073		if (likely(dst_ops->default_advmss == NULL))
4074			dst_ops->default_advmss = xfrm_default_advmss;
4075		if (likely(dst_ops->mtu == NULL))
4076			dst_ops->mtu = xfrm_mtu;
4077		if (likely(dst_ops->negative_advice == NULL))
4078			dst_ops->negative_advice = xfrm_negative_advice;
4079		if (likely(dst_ops->link_failure == NULL))
4080			dst_ops->link_failure = xfrm_link_failure;
4081		if (likely(dst_ops->neigh_lookup == NULL))
4082			dst_ops->neigh_lookup = xfrm_neigh_lookup;
4083		if (likely(!dst_ops->confirm_neigh))
4084			dst_ops->confirm_neigh = xfrm_confirm_neigh;
4085		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
4086	}
4087	spin_unlock(&xfrm_policy_afinfo_lock);
4088
4089	return err;
4090}
4091EXPORT_SYMBOL(xfrm_policy_register_afinfo);
4092
4093void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
4094{
4095	struct dst_ops *dst_ops = afinfo->dst_ops;
4096	int i;
4097
4098	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4099		if (xfrm_policy_afinfo[i] != afinfo)
4100			continue;
4101		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4102		break;
 
 
 
 
4103	}
 
 
 
4104
4105	synchronize_rcu();
4106
4107	dst_ops->kmem_cachep = NULL;
4108	dst_ops->check = NULL;
4109	dst_ops->negative_advice = NULL;
4110	dst_ops->link_failure = NULL;
 
 
 
4111}
4112EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4113
4114void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4115{
4116	spin_lock(&xfrm_if_cb_lock);
4117	rcu_assign_pointer(xfrm_if_cb, ifcb);
4118	spin_unlock(&xfrm_if_cb_lock);
4119}
4120EXPORT_SYMBOL(xfrm_if_register_cb);
4121
4122void xfrm_if_unregister_cb(void)
4123{
4124	RCU_INIT_POINTER(xfrm_if_cb, NULL);
4125	synchronize_rcu();
 
4126}
4127EXPORT_SYMBOL(xfrm_if_unregister_cb);
 
 
 
4128
4129#ifdef CONFIG_XFRM_STATISTICS
4130static int __net_init xfrm_statistics_init(struct net *net)
4131{
4132	int rv;
4133	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4134	if (!net->mib.xfrm_statistics)
4135		return -ENOMEM;
4136	rv = xfrm_proc_init(net);
4137	if (rv < 0)
4138		free_percpu(net->mib.xfrm_statistics);
4139	return rv;
4140}
4141
4142static void xfrm_statistics_fini(struct net *net)
4143{
4144	xfrm_proc_fini(net);
4145	free_percpu(net->mib.xfrm_statistics);
4146}
4147#else
4148static int __net_init xfrm_statistics_init(struct net *net)
4149{
4150	return 0;
4151}
4152
4153static void xfrm_statistics_fini(struct net *net)
4154{
4155}
4156#endif
4157
4158static int __net_init xfrm_policy_init(struct net *net)
4159{
4160	unsigned int hmask, sz;
4161	int dir, err;
4162
4163	if (net_eq(net, &init_net)) {
4164		xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4165		err = rhashtable_init(&xfrm_policy_inexact_table,
4166				      &xfrm_pol_inexact_params);
4167		BUG_ON(err);
4168	}
4169
4170	hmask = 8 - 1;
4171	sz = (hmask+1) * sizeof(struct hlist_head);
4172
4173	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4174	if (!net->xfrm.policy_byidx)
4175		goto out_byidx;
4176	net->xfrm.policy_idx_hmask = hmask;
4177
4178	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4179		struct xfrm_policy_hash *htab;
4180
4181		net->xfrm.policy_count[dir] = 0;
4182		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4183		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4184
4185		htab = &net->xfrm.policy_bydst[dir];
4186		htab->table = xfrm_hash_alloc(sz);
4187		if (!htab->table)
4188			goto out_bydst;
4189		htab->hmask = hmask;
4190		htab->dbits4 = 32;
4191		htab->sbits4 = 32;
4192		htab->dbits6 = 128;
4193		htab->sbits6 = 128;
4194	}
4195	net->xfrm.policy_hthresh.lbits4 = 32;
4196	net->xfrm.policy_hthresh.rbits4 = 32;
4197	net->xfrm.policy_hthresh.lbits6 = 128;
4198	net->xfrm.policy_hthresh.rbits6 = 128;
4199
4200	seqlock_init(&net->xfrm.policy_hthresh.lock);
4201
4202	INIT_LIST_HEAD(&net->xfrm.policy_all);
4203	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4204	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4205	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
 
 
4206	return 0;
4207
4208out_bydst:
4209	for (dir--; dir >= 0; dir--) {
4210		struct xfrm_policy_hash *htab;
4211
4212		htab = &net->xfrm.policy_bydst[dir];
4213		xfrm_hash_free(htab->table, sz);
4214	}
4215	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4216out_byidx:
4217	return -ENOMEM;
4218}
4219
4220static void xfrm_policy_fini(struct net *net)
4221{
4222	struct xfrm_pol_inexact_bin *b, *t;
4223	unsigned int sz;
4224	int dir;
4225
4226	flush_work(&net->xfrm.policy_hash_work);
4227#ifdef CONFIG_XFRM_SUB_POLICY
4228	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4229#endif
4230	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4231
4232	WARN_ON(!list_empty(&net->xfrm.policy_all));
4233
4234	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4235		struct xfrm_policy_hash *htab;
4236
4237		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4238
4239		htab = &net->xfrm.policy_bydst[dir];
4240		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4241		WARN_ON(!hlist_empty(htab->table));
4242		xfrm_hash_free(htab->table, sz);
4243	}
4244
4245	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4246	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4247	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4248
4249	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4250	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4251		__xfrm_policy_inexact_prune_bin(b, true);
4252	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4253}
4254
4255static int __net_init xfrm_net_init(struct net *net)
4256{
4257	int rv;
4258
4259	/* Initialize the per-net locks here */
4260	spin_lock_init(&net->xfrm.xfrm_state_lock);
4261	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4262	seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4263	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4264	net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4265	net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4266	net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4267
4268	rv = xfrm_statistics_init(net);
4269	if (rv < 0)
4270		goto out_statistics;
4271	rv = xfrm_state_init(net);
4272	if (rv < 0)
4273		goto out_state;
4274	rv = xfrm_policy_init(net);
4275	if (rv < 0)
4276		goto out_policy;
4277	rv = xfrm_sysctl_init(net);
4278	if (rv < 0)
4279		goto out_sysctl;
 
 
 
 
 
 
 
 
4280
4281	return 0;
4282
 
 
4283out_sysctl:
4284	xfrm_policy_fini(net);
4285out_policy:
4286	xfrm_state_fini(net);
4287out_state:
4288	xfrm_statistics_fini(net);
4289out_statistics:
4290	return rv;
4291}
4292
4293static void __net_exit xfrm_net_exit(struct net *net)
4294{
 
4295	xfrm_sysctl_fini(net);
4296	xfrm_policy_fini(net);
4297	xfrm_state_fini(net);
4298	xfrm_statistics_fini(net);
4299}
4300
4301static struct pernet_operations __net_initdata xfrm_net_ops = {
4302	.init = xfrm_net_init,
4303	.exit = xfrm_net_exit,
4304};
4305
4306static const struct flow_dissector_key xfrm_flow_dissector_keys[] = {
4307	{
4308		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
4309		.offset = offsetof(struct xfrm_flow_keys, control),
4310	},
4311	{
4312		.key_id = FLOW_DISSECTOR_KEY_BASIC,
4313		.offset = offsetof(struct xfrm_flow_keys, basic),
4314	},
4315	{
4316		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
4317		.offset = offsetof(struct xfrm_flow_keys, addrs.ipv4),
4318	},
4319	{
4320		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
4321		.offset = offsetof(struct xfrm_flow_keys, addrs.ipv6),
4322	},
4323	{
4324		.key_id = FLOW_DISSECTOR_KEY_PORTS,
4325		.offset = offsetof(struct xfrm_flow_keys, ports),
4326	},
4327	{
4328		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
4329		.offset = offsetof(struct xfrm_flow_keys, gre),
4330	},
4331	{
4332		.key_id = FLOW_DISSECTOR_KEY_IP,
4333		.offset = offsetof(struct xfrm_flow_keys, ip),
4334	},
4335	{
4336		.key_id = FLOW_DISSECTOR_KEY_ICMP,
4337		.offset = offsetof(struct xfrm_flow_keys, icmp),
4338	},
4339};
4340
4341void __init xfrm_init(void)
4342{
4343	skb_flow_dissector_init(&xfrm_session_dissector,
4344				xfrm_flow_dissector_keys,
4345				ARRAY_SIZE(xfrm_flow_dissector_keys));
4346
4347	register_pernet_subsys(&xfrm_net_ops);
4348	xfrm_dev_init();
4349	xfrm_input_init();
4350
4351#ifdef CONFIG_XFRM_ESPINTCP
4352	espintcp_init();
4353#endif
4354
4355	register_xfrm_state_bpf();
4356}
4357
4358#ifdef CONFIG_AUDITSYSCALL
4359static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4360					 struct audit_buffer *audit_buf)
4361{
4362	struct xfrm_sec_ctx *ctx = xp->security;
4363	struct xfrm_selector *sel = &xp->selector;
4364
4365	if (ctx)
4366		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4367				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4368
4369	switch (sel->family) {
4370	case AF_INET:
4371		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4372		if (sel->prefixlen_s != 32)
4373			audit_log_format(audit_buf, " src_prefixlen=%d",
4374					 sel->prefixlen_s);
4375		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4376		if (sel->prefixlen_d != 32)
4377			audit_log_format(audit_buf, " dst_prefixlen=%d",
4378					 sel->prefixlen_d);
4379		break;
4380	case AF_INET6:
4381		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4382		if (sel->prefixlen_s != 128)
4383			audit_log_format(audit_buf, " src_prefixlen=%d",
4384					 sel->prefixlen_s);
4385		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4386		if (sel->prefixlen_d != 128)
4387			audit_log_format(audit_buf, " dst_prefixlen=%d",
4388					 sel->prefixlen_d);
4389		break;
4390	}
4391}
4392
4393void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4394{
4395	struct audit_buffer *audit_buf;
4396
4397	audit_buf = xfrm_audit_start("SPD-add");
4398	if (audit_buf == NULL)
4399		return;
4400	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4401	audit_log_format(audit_buf, " res=%u", result);
4402	xfrm_audit_common_policyinfo(xp, audit_buf);
4403	audit_log_end(audit_buf);
4404}
4405EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4406
4407void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4408			      bool task_valid)
4409{
4410	struct audit_buffer *audit_buf;
4411
4412	audit_buf = xfrm_audit_start("SPD-delete");
4413	if (audit_buf == NULL)
4414		return;
4415	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4416	audit_log_format(audit_buf, " res=%u", result);
4417	xfrm_audit_common_policyinfo(xp, audit_buf);
4418	audit_log_end(audit_buf);
4419}
4420EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4421#endif
4422
4423#ifdef CONFIG_XFRM_MIGRATE
4424static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4425					const struct xfrm_selector *sel_tgt)
4426{
4427	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4428		if (sel_tgt->family == sel_cmp->family &&
4429		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4430				    sel_cmp->family) &&
4431		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4432				    sel_cmp->family) &&
4433		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4434		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4435			return true;
4436		}
4437	} else {
4438		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4439			return true;
4440		}
4441	}
4442	return false;
4443}
4444
4445static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4446						    u8 dir, u8 type, struct net *net, u32 if_id)
4447{
4448	struct xfrm_policy *pol, *ret = NULL;
4449	struct hlist_head *chain;
4450	u32 priority = ~0U;
4451
4452	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4453	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4454	hlist_for_each_entry(pol, chain, bydst) {
4455		if ((if_id == 0 || pol->if_id == if_id) &&
4456		    xfrm_migrate_selector_match(sel, &pol->selector) &&
4457		    pol->type == type) {
4458			ret = pol;
4459			priority = ret->priority;
4460			break;
4461		}
4462	}
4463	chain = &net->xfrm.policy_inexact[dir];
4464	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4465		if ((pol->priority >= priority) && ret)
4466			break;
4467
4468		if ((if_id == 0 || pol->if_id == if_id) &&
4469		    xfrm_migrate_selector_match(sel, &pol->selector) &&
4470		    pol->type == type) {
4471			ret = pol;
4472			break;
4473		}
4474	}
4475
4476	xfrm_pol_hold(ret);
4477
4478	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4479
4480	return ret;
4481}
4482
4483static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4484{
4485	int match = 0;
4486
4487	if (t->mode == m->mode && t->id.proto == m->proto &&
4488	    (m->reqid == 0 || t->reqid == m->reqid)) {
4489		switch (t->mode) {
4490		case XFRM_MODE_TUNNEL:
4491		case XFRM_MODE_BEET:
4492			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4493					    m->old_family) &&
4494			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4495					    m->old_family)) {
4496				match = 1;
4497			}
4498			break;
4499		case XFRM_MODE_TRANSPORT:
4500			/* in case of transport mode, template does not store
4501			   any IP addresses, hence we just compare mode and
4502			   protocol */
4503			match = 1;
4504			break;
4505		default:
4506			break;
4507		}
4508	}
4509	return match;
4510}
4511
4512/* update endpoint address(es) of template(s) */
4513static int xfrm_policy_migrate(struct xfrm_policy *pol,
4514			       struct xfrm_migrate *m, int num_migrate,
4515			       struct netlink_ext_ack *extack)
4516{
4517	struct xfrm_migrate *mp;
4518	int i, j, n = 0;
4519
4520	write_lock_bh(&pol->lock);
4521	if (unlikely(pol->walk.dead)) {
4522		/* target policy has been deleted */
4523		NL_SET_ERR_MSG(extack, "Target policy not found");
4524		write_unlock_bh(&pol->lock);
4525		return -ENOENT;
4526	}
4527
4528	for (i = 0; i < pol->xfrm_nr; i++) {
4529		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4530			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4531				continue;
4532			n++;
4533			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4534			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4535				continue;
4536			/* update endpoints */
4537			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4538			       sizeof(pol->xfrm_vec[i].id.daddr));
4539			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4540			       sizeof(pol->xfrm_vec[i].saddr));
4541			pol->xfrm_vec[i].encap_family = mp->new_family;
4542			/* flush bundles */
4543			atomic_inc(&pol->genid);
4544		}
4545	}
4546
4547	write_unlock_bh(&pol->lock);
4548
4549	if (!n)
4550		return -ENODATA;
4551
4552	return 0;
4553}
4554
4555static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
4556			      struct netlink_ext_ack *extack)
4557{
4558	int i, j;
4559
4560	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
4561		NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4562		return -EINVAL;
4563	}
4564
4565	for (i = 0; i < num_migrate; i++) {
 
 
 
 
 
4566		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4567		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) {
4568			NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
4569			return -EINVAL;
4570		}
4571
4572		/* check if there is any duplicated entry */
4573		for (j = i + 1; j < num_migrate; j++) {
4574			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4575				    sizeof(m[i].old_daddr)) &&
4576			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4577				    sizeof(m[i].old_saddr)) &&
4578			    m[i].proto == m[j].proto &&
4579			    m[i].mode == m[j].mode &&
4580			    m[i].reqid == m[j].reqid &&
4581			    m[i].old_family == m[j].old_family) {
4582				NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
4583				return -EINVAL;
4584			}
4585		}
4586	}
4587
4588	return 0;
4589}
4590
4591int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4592		 struct xfrm_migrate *m, int num_migrate,
4593		 struct xfrm_kmaddress *k, struct net *net,
4594		 struct xfrm_encap_tmpl *encap, u32 if_id,
4595		 struct netlink_ext_ack *extack)
4596{
4597	int i, err, nx_cur = 0, nx_new = 0;
4598	struct xfrm_policy *pol = NULL;
4599	struct xfrm_state *x, *xc;
4600	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4601	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4602	struct xfrm_migrate *mp;
4603
4604	/* Stage 0 - sanity checks */
4605	err = xfrm_migrate_check(m, num_migrate, extack);
4606	if (err < 0)
4607		goto out;
4608
4609	if (dir >= XFRM_POLICY_MAX) {
4610		NL_SET_ERR_MSG(extack, "Invalid policy direction");
4611		err = -EINVAL;
4612		goto out;
4613	}
4614
4615	/* Stage 1 - find policy */
4616	pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
4617	if (!pol) {
4618		NL_SET_ERR_MSG(extack, "Target policy not found");
4619		err = -ENOENT;
4620		goto out;
4621	}
4622
4623	/* Stage 2 - find and update state(s) */
4624	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4625		if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4626			x_cur[nx_cur] = x;
4627			nx_cur++;
4628			xc = xfrm_state_migrate(x, mp, encap);
4629			if (xc) {
4630				x_new[nx_new] = xc;
4631				nx_new++;
4632			} else {
4633				err = -ENODATA;
4634				goto restore_state;
4635			}
4636		}
4637	}
4638
4639	/* Stage 3 - update policy */
4640	err = xfrm_policy_migrate(pol, m, num_migrate, extack);
4641	if (err < 0)
4642		goto restore_state;
4643
4644	/* Stage 4 - delete old state(s) */
4645	if (nx_cur) {
4646		xfrm_states_put(x_cur, nx_cur);
4647		xfrm_states_delete(x_cur, nx_cur);
4648	}
4649
4650	/* Stage 5 - announce */
4651	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4652
4653	xfrm_pol_put(pol);
4654
4655	return 0;
4656out:
4657	return err;
4658
4659restore_state:
4660	if (pol)
4661		xfrm_pol_put(pol);
4662	if (nx_cur)
4663		xfrm_states_put(x_cur, nx_cur);
4664	if (nx_new)
4665		xfrm_states_delete(x_new, nx_new);
4666
4667	return err;
4668}
4669EXPORT_SYMBOL(xfrm_migrate);
4670#endif
v4.6
 
   1/*
   2 * xfrm_policy.c
   3 *
   4 * Changes:
   5 *	Mitsuru KANDA @USAGI
   6 * 	Kazunori MIYAZAWA @USAGI
   7 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
   8 * 		IPv6 support
   9 * 	Kazunori MIYAZAWA @USAGI
  10 * 	YOSHIFUJI Hideaki
  11 * 		Split up af-specific portion
  12 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
  13 *
  14 */
  15
  16#include <linux/err.h>
  17#include <linux/slab.h>
  18#include <linux/kmod.h>
  19#include <linux/list.h>
  20#include <linux/spinlock.h>
  21#include <linux/workqueue.h>
  22#include <linux/notifier.h>
  23#include <linux/netdevice.h>
  24#include <linux/netfilter.h>
  25#include <linux/module.h>
  26#include <linux/cache.h>
 
  27#include <linux/audit.h>
 
 
 
  28#include <net/dst.h>
  29#include <net/flow.h>
 
  30#include <net/xfrm.h>
  31#include <net/ip.h>
 
 
 
 
  32#ifdef CONFIG_XFRM_STATISTICS
  33#include <net/snmp.h>
  34#endif
 
 
 
  35
  36#include "xfrm_hash.h"
  37
  38#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  39#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  40#define XFRM_MAX_QUEUE_LEN	100
  41
  42struct xfrm_flo {
  43	struct dst_entry *dst_orig;
  44	u8 flags;
  45};
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  48static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
  49						__read_mostly;
  50
  51static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
 
 
  52
  53static void xfrm_init_pmtu(struct dst_entry *dst);
  54static int stale_bundle(struct dst_entry *dst);
  55static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  56static void xfrm_policy_queue_process(unsigned long arg);
  57
  58static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
  59static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  60						int dir);
  61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  62static inline bool
  63__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  64{
  65	const struct flowi4 *fl4 = &fl->u.ip4;
  66
  67	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  68		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  69		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  70		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  71		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
  72		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  73}
  74
  75static inline bool
  76__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  77{
  78	const struct flowi6 *fl6 = &fl->u.ip6;
  79
  80	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  81		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  82		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  83		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  84		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
  85		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  86}
  87
  88bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  89			 unsigned short family)
  90{
  91	switch (family) {
  92	case AF_INET:
  93		return __xfrm4_selector_match(sel, fl);
  94	case AF_INET6:
  95		return __xfrm6_selector_match(sel, fl);
  96	}
  97	return false;
  98}
  99
 100static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
 101{
 102	struct xfrm_policy_afinfo *afinfo;
 103
 104	if (unlikely(family >= NPROTO))
 105		return NULL;
 106	rcu_read_lock();
 107	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
 108	if (unlikely(!afinfo))
 109		rcu_read_unlock();
 110	return afinfo;
 111}
 112
 113static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
 
 114{
 115	rcu_read_unlock();
 116}
 117
 118static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
 119						  int tos, int oif,
 120						  const xfrm_address_t *saddr,
 121						  const xfrm_address_t *daddr,
 122						  int family)
 123{
 124	struct xfrm_policy_afinfo *afinfo;
 125	struct dst_entry *dst;
 126
 127	afinfo = xfrm_policy_get_afinfo(family);
 128	if (unlikely(afinfo == NULL))
 129		return ERR_PTR(-EAFNOSUPPORT);
 130
 131	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
 132
 133	xfrm_policy_put_afinfo(afinfo);
 134
 135	return dst;
 136}
 
 137
 138static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
 139						int tos, int oif,
 140						xfrm_address_t *prev_saddr,
 141						xfrm_address_t *prev_daddr,
 142						int family)
 143{
 144	struct net *net = xs_net(x);
 145	xfrm_address_t *saddr = &x->props.saddr;
 146	xfrm_address_t *daddr = &x->id.daddr;
 147	struct dst_entry *dst;
 148
 149	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
 150		saddr = x->coaddr;
 151		daddr = prev_daddr;
 152	}
 153	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
 154		saddr = prev_saddr;
 155		daddr = x->coaddr;
 156	}
 157
 158	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
 159
 160	if (!IS_ERR(dst)) {
 161		if (prev_saddr != saddr)
 162			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
 163		if (prev_daddr != daddr)
 164			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
 165	}
 166
 167	return dst;
 168}
 169
 170static inline unsigned long make_jiffies(long secs)
 171{
 172	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
 173		return MAX_SCHEDULE_TIMEOUT-1;
 174	else
 175		return secs*HZ;
 176}
 177
 178static void xfrm_policy_timer(unsigned long data)
 179{
 180	struct xfrm_policy *xp = (struct xfrm_policy *)data;
 181	unsigned long now = get_seconds();
 182	long next = LONG_MAX;
 183	int warn = 0;
 184	int dir;
 185
 186	read_lock(&xp->lock);
 187
 188	if (unlikely(xp->walk.dead))
 189		goto out;
 190
 191	dir = xfrm_policy_id2dir(xp->index);
 192
 193	if (xp->lft.hard_add_expires_seconds) {
 194		long tmo = xp->lft.hard_add_expires_seconds +
 195			xp->curlft.add_time - now;
 196		if (tmo <= 0)
 197			goto expired;
 198		if (tmo < next)
 199			next = tmo;
 200	}
 201	if (xp->lft.hard_use_expires_seconds) {
 202		long tmo = xp->lft.hard_use_expires_seconds +
 203			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 204		if (tmo <= 0)
 205			goto expired;
 206		if (tmo < next)
 207			next = tmo;
 208	}
 209	if (xp->lft.soft_add_expires_seconds) {
 210		long tmo = xp->lft.soft_add_expires_seconds +
 211			xp->curlft.add_time - now;
 212		if (tmo <= 0) {
 213			warn = 1;
 214			tmo = XFRM_KM_TIMEOUT;
 215		}
 216		if (tmo < next)
 217			next = tmo;
 218	}
 219	if (xp->lft.soft_use_expires_seconds) {
 220		long tmo = xp->lft.soft_use_expires_seconds +
 221			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 222		if (tmo <= 0) {
 223			warn = 1;
 224			tmo = XFRM_KM_TIMEOUT;
 225		}
 226		if (tmo < next)
 227			next = tmo;
 228	}
 229
 230	if (warn)
 231		km_policy_expired(xp, dir, 0, 0);
 232	if (next != LONG_MAX &&
 233	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
 234		xfrm_pol_hold(xp);
 235
 236out:
 237	read_unlock(&xp->lock);
 238	xfrm_pol_put(xp);
 239	return;
 240
 241expired:
 242	read_unlock(&xp->lock);
 243	if (!xfrm_policy_delete(xp, dir))
 244		km_policy_expired(xp, dir, 1, 0);
 245	xfrm_pol_put(xp);
 246}
 247
 248static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
 249{
 250	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
 251
 252	if (unlikely(pol->walk.dead))
 253		flo = NULL;
 254	else
 255		xfrm_pol_hold(pol);
 256
 257	return flo;
 258}
 259
 260static int xfrm_policy_flo_check(struct flow_cache_object *flo)
 261{
 262	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
 263
 264	return !pol->walk.dead;
 265}
 266
 267static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
 268{
 269	xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
 270}
 271
 272static const struct flow_cache_ops xfrm_policy_fc_ops = {
 273	.get = xfrm_policy_flo_get,
 274	.check = xfrm_policy_flo_check,
 275	.delete = xfrm_policy_flo_delete,
 276};
 277
 278/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 279 * SPD calls.
 280 */
 281
 282struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
 283{
 284	struct xfrm_policy *policy;
 285
 286	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
 287
 288	if (policy) {
 289		write_pnet(&policy->xp_net, net);
 290		INIT_LIST_HEAD(&policy->walk.all);
 
 291		INIT_HLIST_NODE(&policy->bydst);
 292		INIT_HLIST_NODE(&policy->byidx);
 293		rwlock_init(&policy->lock);
 294		atomic_set(&policy->refcnt, 1);
 295		skb_queue_head_init(&policy->polq.hold_queue);
 296		setup_timer(&policy->timer, xfrm_policy_timer,
 297				(unsigned long)policy);
 298		setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
 299			    (unsigned long)policy);
 300		policy->flo.ops = &xfrm_policy_fc_ops;
 301	}
 302	return policy;
 303}
 304EXPORT_SYMBOL(xfrm_policy_alloc);
 305
 306static void xfrm_policy_destroy_rcu(struct rcu_head *head)
 307{
 308	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
 309
 310	security_xfrm_policy_free(policy->security);
 311	kfree(policy);
 312}
 313
 314/* Destroy xfrm_policy: descendant resources must be released to this moment. */
 315
 316void xfrm_policy_destroy(struct xfrm_policy *policy)
 317{
 318	BUG_ON(!policy->walk.dead);
 319
 320	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
 321		BUG();
 322
 
 323	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
 324}
 325EXPORT_SYMBOL(xfrm_policy_destroy);
 326
 327/* Rule must be locked. Release descentant resources, announce
 328 * entry dead. The rule must be unlinked from lists to the moment.
 329 */
 330
 331static void xfrm_policy_kill(struct xfrm_policy *policy)
 332{
 
 333	policy->walk.dead = 1;
 
 334
 335	atomic_inc(&policy->genid);
 336
 337	if (del_timer(&policy->polq.hold_timer))
 338		xfrm_pol_put(policy);
 339	skb_queue_purge(&policy->polq.hold_queue);
 340
 341	if (del_timer(&policy->timer))
 342		xfrm_pol_put(policy);
 343
 344	xfrm_pol_put(policy);
 345}
 346
 347static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
 348
 349static inline unsigned int idx_hash(struct net *net, u32 index)
 350{
 351	return __idx_hash(index, net->xfrm.policy_idx_hmask);
 352}
 353
 354/* calculate policy hash thresholds */
 355static void __get_hash_thresh(struct net *net,
 356			      unsigned short family, int dir,
 357			      u8 *dbits, u8 *sbits)
 358{
 359	switch (family) {
 360	case AF_INET:
 361		*dbits = net->xfrm.policy_bydst[dir].dbits4;
 362		*sbits = net->xfrm.policy_bydst[dir].sbits4;
 363		break;
 364
 365	case AF_INET6:
 366		*dbits = net->xfrm.policy_bydst[dir].dbits6;
 367		*sbits = net->xfrm.policy_bydst[dir].sbits6;
 368		break;
 369
 370	default:
 371		*dbits = 0;
 372		*sbits = 0;
 373	}
 374}
 375
 376static struct hlist_head *policy_hash_bysel(struct net *net,
 377					    const struct xfrm_selector *sel,
 378					    unsigned short family, int dir)
 379{
 380	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 381	unsigned int hash;
 382	u8 dbits;
 383	u8 sbits;
 384
 385	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 386	hash = __sel_hash(sel, family, hmask, dbits, sbits);
 387
 388	return (hash == hmask + 1 ?
 389		&net->xfrm.policy_inexact[dir] :
 390		net->xfrm.policy_bydst[dir].table + hash);
 
 
 391}
 392
 393static struct hlist_head *policy_hash_direct(struct net *net,
 394					     const xfrm_address_t *daddr,
 395					     const xfrm_address_t *saddr,
 396					     unsigned short family, int dir)
 397{
 398	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 399	unsigned int hash;
 400	u8 dbits;
 401	u8 sbits;
 402
 403	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 404	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
 405
 406	return net->xfrm.policy_bydst[dir].table + hash;
 
 407}
 408
 409static void xfrm_dst_hash_transfer(struct net *net,
 410				   struct hlist_head *list,
 411				   struct hlist_head *ndsttable,
 412				   unsigned int nhashmask,
 413				   int dir)
 414{
 415	struct hlist_node *tmp, *entry0 = NULL;
 416	struct xfrm_policy *pol;
 417	unsigned int h0 = 0;
 418	u8 dbits;
 419	u8 sbits;
 420
 421redo:
 422	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
 423		unsigned int h;
 424
 425		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
 426		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
 427				pol->family, nhashmask, dbits, sbits);
 428		if (!entry0) {
 429			hlist_del(&pol->bydst);
 430			hlist_add_head(&pol->bydst, ndsttable+h);
 431			h0 = h;
 432		} else {
 433			if (h != h0)
 434				continue;
 435			hlist_del(&pol->bydst);
 436			hlist_add_behind(&pol->bydst, entry0);
 437		}
 438		entry0 = &pol->bydst;
 439	}
 440	if (!hlist_empty(list)) {
 441		entry0 = NULL;
 442		goto redo;
 443	}
 444}
 445
 446static void xfrm_idx_hash_transfer(struct hlist_head *list,
 447				   struct hlist_head *nidxtable,
 448				   unsigned int nhashmask)
 449{
 450	struct hlist_node *tmp;
 451	struct xfrm_policy *pol;
 452
 453	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
 454		unsigned int h;
 455
 456		h = __idx_hash(pol->index, nhashmask);
 457		hlist_add_head(&pol->byidx, nidxtable+h);
 458	}
 459}
 460
 461static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
 462{
 463	return ((old_hmask + 1) << 1) - 1;
 464}
 465
 466static void xfrm_bydst_resize(struct net *net, int dir)
 467{
 468	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 469	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 470	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 471	struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
 472	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
 
 473	int i;
 474
 475	if (!ndst)
 476		return;
 477
 478	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 
 479
 480	for (i = hmask; i >= 0; i--)
 481		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
 482
 483	net->xfrm.policy_bydst[dir].table = ndst;
 484	net->xfrm.policy_bydst[dir].hmask = nhashmask;
 485
 486	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 487
 488	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
 489}
 490
 491static void xfrm_byidx_resize(struct net *net, int total)
 492{
 493	unsigned int hmask = net->xfrm.policy_idx_hmask;
 494	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 495	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 496	struct hlist_head *oidx = net->xfrm.policy_byidx;
 497	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
 498	int i;
 499
 500	if (!nidx)
 501		return;
 502
 503	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 504
 505	for (i = hmask; i >= 0; i--)
 506		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
 507
 508	net->xfrm.policy_byidx = nidx;
 509	net->xfrm.policy_idx_hmask = nhashmask;
 510
 511	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 512
 513	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
 514}
 515
 516static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
 517{
 518	unsigned int cnt = net->xfrm.policy_count[dir];
 519	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 520
 521	if (total)
 522		*total += cnt;
 523
 524	if ((hmask + 1) < xfrm_policy_hashmax &&
 525	    cnt > hmask)
 526		return 1;
 527
 528	return 0;
 529}
 530
 531static inline int xfrm_byidx_should_resize(struct net *net, int total)
 532{
 533	unsigned int hmask = net->xfrm.policy_idx_hmask;
 534
 535	if ((hmask + 1) < xfrm_policy_hashmax &&
 536	    total > hmask)
 537		return 1;
 538
 539	return 0;
 540}
 541
 542void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
 543{
 544	read_lock_bh(&net->xfrm.xfrm_policy_lock);
 545	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
 546	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
 547	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
 548	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
 549	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
 550	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
 551	si->spdhcnt = net->xfrm.policy_idx_hmask;
 552	si->spdhmcnt = xfrm_policy_hashmax;
 553	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
 554}
 555EXPORT_SYMBOL(xfrm_spd_getinfo);
 556
 557static DEFINE_MUTEX(hash_resize_mutex);
 558static void xfrm_hash_resize(struct work_struct *work)
 559{
 560	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
 561	int dir, total;
 562
 563	mutex_lock(&hash_resize_mutex);
 564
 565	total = 0;
 566	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 567		if (xfrm_bydst_should_resize(net, dir, &total))
 568			xfrm_bydst_resize(net, dir);
 569	}
 570	if (xfrm_byidx_should_resize(net, total))
 571		xfrm_byidx_resize(net, total);
 572
 573	mutex_unlock(&hash_resize_mutex);
 574}
 575
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576static void xfrm_hash_rebuild(struct work_struct *work)
 577{
 578	struct net *net = container_of(work, struct net,
 579				       xfrm.policy_hthresh.work);
 580	unsigned int hmask;
 581	struct xfrm_policy *pol;
 582	struct xfrm_policy *policy;
 583	struct hlist_head *chain;
 584	struct hlist_head *odst;
 585	struct hlist_node *newpos;
 586	int i;
 587	int dir;
 588	unsigned seq;
 589	u8 lbits4, rbits4, lbits6, rbits6;
 590
 591	mutex_lock(&hash_resize_mutex);
 592
 593	/* read selector prefixlen thresholds */
 594	do {
 595		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
 596
 597		lbits4 = net->xfrm.policy_hthresh.lbits4;
 598		rbits4 = net->xfrm.policy_hthresh.rbits4;
 599		lbits6 = net->xfrm.policy_hthresh.lbits6;
 600		rbits6 = net->xfrm.policy_hthresh.rbits6;
 601	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
 602
 603	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604
 605	/* reset the bydst and inexact table in all directions */
 606	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 607		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
 
 
 
 
 
 
 
 
 608		hmask = net->xfrm.policy_bydst[dir].hmask;
 609		odst = net->xfrm.policy_bydst[dir].table;
 610		for (i = hmask; i >= 0; i--)
 611			INIT_HLIST_HEAD(odst + i);
 
 
 612		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
 613			/* dir out => dst = remote, src = local */
 614			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
 615			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
 616			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
 617			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
 618		} else {
 619			/* dir in/fwd => dst = local, src = remote */
 620			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
 621			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
 622			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
 623			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
 624		}
 625	}
 626
 627	/* re-insert all policies by order of creation */
 628	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
 
 
 
 
 
 
 
 629		newpos = NULL;
 630		chain = policy_hash_bysel(net, &policy->selector,
 631					  policy->family,
 632					  xfrm_policy_id2dir(policy->index));
 
 
 
 
 
 
 
 633		hlist_for_each_entry(pol, chain, bydst) {
 634			if (policy->priority >= pol->priority)
 635				newpos = &pol->bydst;
 636			else
 637				break;
 638		}
 639		if (newpos)
 640			hlist_add_behind(&policy->bydst, newpos);
 641		else
 642			hlist_add_head(&policy->bydst, chain);
 643	}
 644
 645	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 646
 647	mutex_unlock(&hash_resize_mutex);
 648}
 649
 650void xfrm_policy_hash_rebuild(struct net *net)
 651{
 652	schedule_work(&net->xfrm.policy_hthresh.work);
 653}
 654EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
 655
 656/* Generate new index... KAME seems to generate them ordered by cost
 657 * of an absolute inpredictability of ordering of rules. This will not pass. */
 658static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
 659{
 660	static u32 idx_generator;
 661
 662	for (;;) {
 663		struct hlist_head *list;
 664		struct xfrm_policy *p;
 665		u32 idx;
 666		int found;
 667
 668		if (!index) {
 669			idx = (idx_generator | dir);
 670			idx_generator += 8;
 671		} else {
 672			idx = index;
 673			index = 0;
 674		}
 675
 676		if (idx == 0)
 677			idx = 8;
 678		list = net->xfrm.policy_byidx + idx_hash(net, idx);
 679		found = 0;
 680		hlist_for_each_entry(p, list, byidx) {
 681			if (p->index == idx) {
 682				found = 1;
 683				break;
 684			}
 685		}
 686		if (!found)
 687			return idx;
 688	}
 689}
 690
 691static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
 692{
 693	u32 *p1 = (u32 *) s1;
 694	u32 *p2 = (u32 *) s2;
 695	int len = sizeof(struct xfrm_selector) / sizeof(u32);
 696	int i;
 697
 698	for (i = 0; i < len; i++) {
 699		if (p1[i] != p2[i])
 700			return 1;
 701	}
 702
 703	return 0;
 704}
 705
 706static void xfrm_policy_requeue(struct xfrm_policy *old,
 707				struct xfrm_policy *new)
 708{
 709	struct xfrm_policy_queue *pq = &old->polq;
 710	struct sk_buff_head list;
 711
 712	if (skb_queue_empty(&pq->hold_queue))
 713		return;
 714
 715	__skb_queue_head_init(&list);
 716
 717	spin_lock_bh(&pq->hold_queue.lock);
 718	skb_queue_splice_init(&pq->hold_queue, &list);
 719	if (del_timer(&pq->hold_timer))
 720		xfrm_pol_put(old);
 721	spin_unlock_bh(&pq->hold_queue.lock);
 722
 723	pq = &new->polq;
 724
 725	spin_lock_bh(&pq->hold_queue.lock);
 726	skb_queue_splice(&list, &pq->hold_queue);
 727	pq->timeout = XFRM_QUEUE_TMO_MIN;
 728	if (!mod_timer(&pq->hold_timer, jiffies))
 729		xfrm_pol_hold(new);
 730	spin_unlock_bh(&pq->hold_queue.lock);
 731}
 732
 733static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
 734				   struct xfrm_policy *pol)
 
 
 
 
 
 735{
 736	u32 mark = policy->mark.v & policy->mark.m;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737
 738	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
 739		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740
 741	if ((mark & pol->mark.m) == pol->mark.v &&
 742	    policy->priority == pol->priority)
 743		return true;
 
 744
 745	return false;
 
 
 
 746}
 747
 748int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
 
 
 749{
 750	struct net *net = xp_net(policy);
 751	struct xfrm_policy *pol;
 752	struct xfrm_policy *delpol;
 753	struct hlist_head *chain;
 754	struct hlist_node *newpos;
 755
 756	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 757	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
 758	delpol = NULL;
 759	newpos = NULL;
 760	hlist_for_each_entry(pol, chain, bydst) {
 761		if (pol->type == policy->type &&
 
 762		    !selector_cmp(&pol->selector, &policy->selector) &&
 763		    xfrm_policy_mark_match(policy, pol) &&
 764		    xfrm_sec_ctx_match(pol->security, policy->security) &&
 765		    !WARN_ON(delpol)) {
 766			if (excl) {
 767				write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 768				return -EEXIST;
 769			}
 770			delpol = pol;
 771			if (policy->priority > pol->priority)
 772				continue;
 773		} else if (policy->priority >= pol->priority) {
 774			newpos = &pol->bydst;
 775			continue;
 776		}
 777		if (delpol)
 778			break;
 779	}
 780	if (newpos)
 781		hlist_add_behind(&policy->bydst, newpos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782	else
 783		hlist_add_head(&policy->bydst, chain);
 
 
 
 
 
 
 784	__xfrm_policy_link(policy, dir);
 785	atomic_inc(&net->xfrm.flow_cache_genid);
 786
 787	/* After previous checking, family can either be AF_INET or AF_INET6 */
 788	if (policy->family == AF_INET)
 789		rt_genid_bump_ipv4(net);
 790	else
 791		rt_genid_bump_ipv6(net);
 792
 793	if (delpol) {
 794		xfrm_policy_requeue(delpol, policy);
 795		__xfrm_policy_unlink(delpol, dir);
 796	}
 797	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
 798	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
 799	policy->curlft.add_time = get_seconds();
 800	policy->curlft.use_time = 0;
 801	if (!mod_timer(&policy->timer, jiffies + HZ))
 802		xfrm_pol_hold(policy);
 803	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 804
 805	if (delpol)
 806		xfrm_policy_kill(delpol);
 807	else if (xfrm_bydst_should_resize(net, dir, NULL))
 808		schedule_work(&net->xfrm.policy_hash_work);
 809
 810	return 0;
 811}
 812EXPORT_SYMBOL(xfrm_policy_insert);
 813
 814struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
 815					  int dir, struct xfrm_selector *sel,
 816					  struct xfrm_sec_ctx *ctx, int delete,
 817					  int *err)
 818{
 819	struct xfrm_policy *pol, *ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 820	struct hlist_head *chain;
 821
 822	*err = 0;
 823	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 824	chain = policy_hash_bysel(net, sel, sel->family, dir);
 825	ret = NULL;
 826	hlist_for_each_entry(pol, chain, bydst) {
 827		if (pol->type == type &&
 828		    (mark & pol->mark.m) == pol->mark.v &&
 829		    !selector_cmp(sel, &pol->selector) &&
 830		    xfrm_sec_ctx_match(ctx, pol->security)) {
 831			xfrm_pol_hold(pol);
 832			if (delete) {
 833				*err = security_xfrm_policy_delete(
 834								pol->security);
 835				if (*err) {
 836					write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 837					return pol;
 838				}
 839				__xfrm_policy_unlink(pol, dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840			}
 841			ret = pol;
 842			break;
 843		}
 
 844	}
 845	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 846
 847	if (ret && delete)
 848		xfrm_policy_kill(ret);
 
 
 849	return ret;
 850}
 851EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
 852
 853struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
 854				     int dir, u32 id, int delete, int *err)
 
 855{
 856	struct xfrm_policy *pol, *ret;
 857	struct hlist_head *chain;
 858
 859	*err = -ENOENT;
 860	if (xfrm_policy_id2dir(id) != dir)
 861		return NULL;
 862
 863	*err = 0;
 864	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 865	chain = net->xfrm.policy_byidx + idx_hash(net, id);
 866	ret = NULL;
 867	hlist_for_each_entry(pol, chain, byidx) {
 868		if (pol->type == type && pol->index == id &&
 869		    (mark & pol->mark.m) == pol->mark.v) {
 870			xfrm_pol_hold(pol);
 871			if (delete) {
 872				*err = security_xfrm_policy_delete(
 873								pol->security);
 874				if (*err) {
 875					write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 876					return pol;
 877				}
 878				__xfrm_policy_unlink(pol, dir);
 879			}
 880			ret = pol;
 881			break;
 882		}
 883	}
 884	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 885
 886	if (ret && delete)
 887		xfrm_policy_kill(ret);
 888	return ret;
 889}
 890EXPORT_SYMBOL(xfrm_policy_byid);
 891
 892#ifdef CONFIG_SECURITY_NETWORK_XFRM
 893static inline int
 894xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 895{
 896	int dir, err = 0;
 
 897
 898	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 899		struct xfrm_policy *pol;
 900		int i;
 
 
 901
 902		hlist_for_each_entry(pol,
 903				     &net->xfrm.policy_inexact[dir], bydst) {
 904			if (pol->type != type)
 905				continue;
 906			err = security_xfrm_policy_delete(pol->security);
 907			if (err) {
 908				xfrm_audit_policy_delete(pol, 0, task_valid);
 909				return err;
 910			}
 911		}
 912		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
 913			hlist_for_each_entry(pol,
 914					     net->xfrm.policy_bydst[dir].table + i,
 915					     bydst) {
 916				if (pol->type != type)
 917					continue;
 918				err = security_xfrm_policy_delete(
 919								pol->security);
 920				if (err) {
 921					xfrm_audit_policy_delete(pol, 0,
 922								 task_valid);
 923					return err;
 924				}
 925			}
 
 
 
 
 
 
 
 926		}
 927	}
 928	return err;
 929}
 930#else
 931static inline int
 932xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 933{
 934	return 0;
 935}
 
 
 
 
 
 
 
 936#endif
 937
 938int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
 939{
 940	int dir, err = 0, cnt = 0;
 
 941
 942	write_lock_bh(&net->xfrm.xfrm_policy_lock);
 943
 944	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
 945	if (err)
 946		goto out;
 947
 948	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 949		struct xfrm_policy *pol;
 950		int i;
 
 951
 952	again1:
 953		hlist_for_each_entry(pol,
 954				     &net->xfrm.policy_inexact[dir], bydst) {
 955			if (pol->type != type)
 956				continue;
 957			__xfrm_policy_unlink(pol, dir);
 958			write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 959			cnt++;
 960
 961			xfrm_audit_policy_delete(pol, 1, task_valid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 962
 963			xfrm_policy_kill(pol);
 
 
 
 
 964
 965			write_lock_bh(&net->xfrm.xfrm_policy_lock);
 966			goto again1;
 967		}
 968
 969		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
 970	again2:
 971			hlist_for_each_entry(pol,
 972					     net->xfrm.policy_bydst[dir].table + i,
 973					     bydst) {
 974				if (pol->type != type)
 975					continue;
 976				__xfrm_policy_unlink(pol, dir);
 977				write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 978				cnt++;
 979
 980				xfrm_audit_policy_delete(pol, 1, task_valid);
 981				xfrm_policy_kill(pol);
 
 
 982
 983				write_lock_bh(&net->xfrm.xfrm_policy_lock);
 984				goto again2;
 985			}
 986		}
 987
 
 
 
 
 
 
 
 
 988	}
 989	if (!cnt)
 
 
 990		err = -ESRCH;
 991out:
 992	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
 993	return err;
 994}
 995EXPORT_SYMBOL(xfrm_policy_flush);
 996
 997int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
 998		     int (*func)(struct xfrm_policy *, int, int, void*),
 999		     void *data)
1000{
1001	struct xfrm_policy *pol;
1002	struct xfrm_policy_walk_entry *x;
1003	int error = 0;
1004
1005	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1006	    walk->type != XFRM_POLICY_TYPE_ANY)
1007		return -EINVAL;
1008
1009	if (list_empty(&walk->walk.all) && walk->seq != 0)
1010		return 0;
1011
1012	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1013	if (list_empty(&walk->walk.all))
1014		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1015	else
1016		x = list_first_entry(&walk->walk.all,
1017				     struct xfrm_policy_walk_entry, all);
1018
1019	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1020		if (x->dead)
1021			continue;
1022		pol = container_of(x, struct xfrm_policy, walk);
1023		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1024		    walk->type != pol->type)
1025			continue;
1026		error = func(pol, xfrm_policy_id2dir(pol->index),
1027			     walk->seq, data);
1028		if (error) {
1029			list_move_tail(&walk->walk.all, &x->all);
1030			goto out;
1031		}
1032		walk->seq++;
1033	}
1034	if (walk->seq == 0) {
1035		error = -ENOENT;
1036		goto out;
1037	}
1038	list_del_init(&walk->walk.all);
1039out:
1040	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1041	return error;
1042}
1043EXPORT_SYMBOL(xfrm_policy_walk);
1044
1045void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1046{
1047	INIT_LIST_HEAD(&walk->walk.all);
1048	walk->walk.dead = 1;
1049	walk->type = type;
1050	walk->seq = 0;
1051}
1052EXPORT_SYMBOL(xfrm_policy_walk_init);
1053
1054void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1055{
1056	if (list_empty(&walk->walk.all))
1057		return;
1058
1059	write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1060	list_del(&walk->walk.all);
1061	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1062}
1063EXPORT_SYMBOL(xfrm_policy_walk_done);
1064
1065/*
1066 * Find policy to apply to this flow.
1067 *
1068 * Returns 0 if policy found, else an -errno.
1069 */
1070static int xfrm_policy_match(const struct xfrm_policy *pol,
1071			     const struct flowi *fl,
1072			     u8 type, u16 family, int dir)
1073{
1074	const struct xfrm_selector *sel = &pol->selector;
1075	int ret = -ESRCH;
1076	bool match;
1077
1078	if (pol->family != family ||
 
1079	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1080	    pol->type != type)
1081		return ret;
1082
1083	match = xfrm_selector_match(sel, fl, family);
1084	if (match)
1085		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1086						  dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1089}
1090
1091static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1092						     const struct flowi *fl,
1093						     u16 family, u8 dir)
 
1094{
1095	int err;
 
 
1096	struct xfrm_policy *pol, *ret;
1097	const xfrm_address_t *daddr, *saddr;
1098	struct hlist_head *chain;
1099	u32 priority = ~0U;
 
1100
1101	daddr = xfrm_flowi_daddr(fl, family);
1102	saddr = xfrm_flowi_saddr(fl, family);
1103	if (unlikely(!daddr || !saddr))
1104		return NULL;
1105
1106	read_lock_bh(&net->xfrm.xfrm_policy_lock);
1107	chain = policy_hash_direct(net, daddr, saddr, family, dir);
 
 
 
 
 
1108	ret = NULL;
1109	hlist_for_each_entry(pol, chain, bydst) {
1110		err = xfrm_policy_match(pol, fl, type, family, dir);
1111		if (err) {
1112			if (err == -ESRCH)
1113				continue;
1114			else {
1115				ret = ERR_PTR(err);
1116				goto fail;
1117			}
1118		} else {
1119			ret = pol;
1120			priority = ret->priority;
1121			break;
1122		}
1123	}
1124	chain = &net->xfrm.policy_inexact[dir];
1125	hlist_for_each_entry(pol, chain, bydst) {
1126		if ((pol->priority >= priority) && ret)
1127			break;
 
 
 
1128
1129		err = xfrm_policy_match(pol, fl, type, family, dir);
1130		if (err) {
1131			if (err == -ESRCH)
1132				continue;
1133			else {
1134				ret = ERR_PTR(err);
1135				goto fail;
1136			}
1137		} else {
1138			ret = pol;
1139			break;
1140		}
1141	}
1142
1143	xfrm_pol_hold(ret);
 
 
 
 
 
1144fail:
1145	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1146
1147	return ret;
1148}
1149
1150static struct xfrm_policy *
1151__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
 
1152{
1153#ifdef CONFIG_XFRM_SUB_POLICY
1154	struct xfrm_policy *pol;
1155
1156	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
 
1157	if (pol != NULL)
1158		return pol;
1159#endif
1160	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1161}
1162
1163static int flow_to_policy_dir(int dir)
1164{
1165	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1166	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1167	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1168		return dir;
1169
1170	switch (dir) {
1171	default:
1172	case FLOW_DIR_IN:
1173		return XFRM_POLICY_IN;
1174	case FLOW_DIR_OUT:
1175		return XFRM_POLICY_OUT;
1176	case FLOW_DIR_FWD:
1177		return XFRM_POLICY_FWD;
1178	}
1179}
1180
1181static struct flow_cache_object *
1182xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1183		   u8 dir, struct flow_cache_object *old_obj, void *ctx)
1184{
1185	struct xfrm_policy *pol;
1186
1187	if (old_obj)
1188		xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1189
1190	pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1191	if (IS_ERR_OR_NULL(pol))
1192		return ERR_CAST(pol);
1193
1194	/* Resolver returns two references:
1195	 * one for cache and one for caller of flow_cache_lookup() */
1196	xfrm_pol_hold(pol);
1197
1198	return &pol->flo;
1199}
1200
1201static inline int policy_to_flow_dir(int dir)
1202{
1203	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1204	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1205	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1206		return dir;
1207	switch (dir) {
1208	default:
1209	case XFRM_POLICY_IN:
1210		return FLOW_DIR_IN;
1211	case XFRM_POLICY_OUT:
1212		return FLOW_DIR_OUT;
1213	case XFRM_POLICY_FWD:
1214		return FLOW_DIR_FWD;
1215	}
1216}
1217
1218static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1219						 const struct flowi *fl)
 
1220{
1221	struct xfrm_policy *pol;
1222	struct net *net = sock_net(sk);
1223
1224	rcu_read_lock();
1225	read_lock_bh(&net->xfrm.xfrm_policy_lock);
1226	pol = rcu_dereference(sk->sk_policy[dir]);
1227	if (pol != NULL) {
1228		bool match = xfrm_selector_match(&pol->selector, fl,
1229						 sk->sk_family);
1230		int err = 0;
1231
 
 
 
 
 
 
1232		if (match) {
1233			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
 
1234				pol = NULL;
1235				goto out;
1236			}
1237			err = security_xfrm_policy_lookup(pol->security,
1238						      fl->flowi_secid,
1239						      policy_to_flow_dir(dir));
1240			if (!err)
1241				xfrm_pol_hold(pol);
1242			else if (err == -ESRCH)
1243				pol = NULL;
1244			else
1245				pol = ERR_PTR(err);
 
1246		} else
1247			pol = NULL;
1248	}
1249out:
1250	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1251	rcu_read_unlock();
1252	return pol;
1253}
1254
1255static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1256{
1257	struct net *net = xp_net(pol);
1258
1259	list_add(&pol->walk.all, &net->xfrm.policy_all);
1260	net->xfrm.policy_count[dir]++;
1261	xfrm_pol_hold(pol);
1262}
1263
1264static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1265						int dir)
1266{
1267	struct net *net = xp_net(pol);
1268
1269	if (list_empty(&pol->walk.all))
1270		return NULL;
1271
1272	/* Socket policies are not hashed. */
1273	if (!hlist_unhashed(&pol->bydst)) {
1274		hlist_del(&pol->bydst);
 
1275		hlist_del(&pol->byidx);
1276	}
1277
1278	list_del_init(&pol->walk.all);
1279	net->xfrm.policy_count[dir]--;
1280
1281	return pol;
1282}
1283
1284static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1285{
1286	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1287}
1288
1289static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1290{
1291	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1292}
1293
1294int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1295{
1296	struct net *net = xp_net(pol);
1297
1298	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1299	pol = __xfrm_policy_unlink(pol, dir);
1300	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1301	if (pol) {
 
1302		xfrm_policy_kill(pol);
1303		return 0;
1304	}
1305	return -ENOENT;
1306}
1307EXPORT_SYMBOL(xfrm_policy_delete);
1308
1309int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1310{
1311	struct net *net = xp_net(pol);
1312	struct xfrm_policy *old_pol;
1313
1314#ifdef CONFIG_XFRM_SUB_POLICY
1315	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1316		return -EINVAL;
1317#endif
1318
1319	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1320	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1321				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1322	if (pol) {
1323		pol->curlft.add_time = get_seconds();
1324		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1325		xfrm_sk_policy_link(pol, dir);
1326	}
1327	rcu_assign_pointer(sk->sk_policy[dir], pol);
1328	if (old_pol) {
1329		if (pol)
1330			xfrm_policy_requeue(old_pol, pol);
1331
1332		/* Unlinking succeeds always. This is the only function
1333		 * allowed to delete or replace socket policy.
1334		 */
1335		xfrm_sk_policy_unlink(old_pol, dir);
1336	}
1337	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1338
1339	if (old_pol) {
1340		xfrm_policy_kill(old_pol);
1341	}
1342	return 0;
1343}
1344
1345static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1346{
1347	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1348	struct net *net = xp_net(old);
1349
1350	if (newp) {
1351		newp->selector = old->selector;
1352		if (security_xfrm_policy_clone(old->security,
1353					       &newp->security)) {
1354			kfree(newp);
1355			return NULL;  /* ENOMEM */
1356		}
1357		newp->lft = old->lft;
1358		newp->curlft = old->curlft;
1359		newp->mark = old->mark;
 
1360		newp->action = old->action;
1361		newp->flags = old->flags;
1362		newp->xfrm_nr = old->xfrm_nr;
1363		newp->index = old->index;
1364		newp->type = old->type;
 
1365		memcpy(newp->xfrm_vec, old->xfrm_vec,
1366		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1367		write_lock_bh(&net->xfrm.xfrm_policy_lock);
1368		xfrm_sk_policy_link(newp, dir);
1369		write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1370		xfrm_pol_put(newp);
1371	}
1372	return newp;
1373}
1374
1375int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1376{
1377	const struct xfrm_policy *p;
1378	struct xfrm_policy *np;
1379	int i, ret = 0;
1380
1381	rcu_read_lock();
1382	for (i = 0; i < 2; i++) {
1383		p = rcu_dereference(osk->sk_policy[i]);
1384		if (p) {
1385			np = clone_policy(p, i);
1386			if (unlikely(!np)) {
1387				ret = -ENOMEM;
1388				break;
1389			}
1390			rcu_assign_pointer(sk->sk_policy[i], np);
1391		}
1392	}
1393	rcu_read_unlock();
1394	return ret;
1395}
1396
1397static int
1398xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1399	       xfrm_address_t *remote, unsigned short family)
1400{
1401	int err;
1402	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1403
1404	if (unlikely(afinfo == NULL))
1405		return -EINVAL;
1406	err = afinfo->get_saddr(net, oif, local, remote);
1407	xfrm_policy_put_afinfo(afinfo);
1408	return err;
1409}
1410
1411/* Resolve list of templates for the flow, given policy. */
1412
1413static int
1414xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1415		      struct xfrm_state **xfrm, unsigned short family)
1416{
1417	struct net *net = xp_net(policy);
1418	int nx;
1419	int i, error;
1420	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1421	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1422	xfrm_address_t tmp;
1423
1424	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1425		struct xfrm_state *x;
1426		xfrm_address_t *remote = daddr;
1427		xfrm_address_t *local  = saddr;
1428		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1429
1430		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1431		    tmpl->mode == XFRM_MODE_BEET) {
1432			remote = &tmpl->id.daddr;
1433			local = &tmpl->saddr;
1434			if (xfrm_addr_any(local, tmpl->encap_family)) {
1435				error = xfrm_get_saddr(net, fl->flowi_oif,
1436						       &tmp, remote,
1437						       tmpl->encap_family);
1438				if (error)
1439					goto fail;
1440				local = &tmp;
1441			}
1442		}
1443
1444		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
 
1445
1446		if (x && x->km.state == XFRM_STATE_VALID) {
1447			xfrm[nx++] = x;
1448			daddr = remote;
1449			saddr = local;
1450			continue;
1451		}
1452		if (x) {
1453			error = (x->km.state == XFRM_STATE_ERROR ?
1454				 -EINVAL : -EAGAIN);
1455			xfrm_state_put(x);
1456		} else if (error == -ESRCH) {
1457			error = -EAGAIN;
1458		}
1459
1460		if (!tmpl->optional)
1461			goto fail;
1462	}
1463	return nx;
1464
1465fail:
1466	for (nx--; nx >= 0; nx--)
1467		xfrm_state_put(xfrm[nx]);
1468	return error;
1469}
1470
1471static int
1472xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1473		  struct xfrm_state **xfrm, unsigned short family)
1474{
1475	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1476	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1477	int cnx = 0;
1478	int error;
1479	int ret;
1480	int i;
1481
1482	for (i = 0; i < npols; i++) {
1483		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1484			error = -ENOBUFS;
1485			goto fail;
1486		}
1487
1488		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1489		if (ret < 0) {
1490			error = ret;
1491			goto fail;
1492		} else
1493			cnx += ret;
1494	}
1495
1496	/* found states are sorted for outbound processing */
1497	if (npols > 1)
1498		xfrm_state_sort(xfrm, tpp, cnx, family);
1499
1500	return cnx;
1501
1502 fail:
1503	for (cnx--; cnx >= 0; cnx--)
1504		xfrm_state_put(tpp[cnx]);
1505	return error;
1506
1507}
1508
1509/* Check that the bundle accepts the flow and its components are
1510 * still valid.
1511 */
1512
1513static inline int xfrm_get_tos(const struct flowi *fl, int family)
1514{
1515	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1516	int tos;
1517
1518	if (!afinfo)
1519		return -EINVAL;
1520
1521	tos = afinfo->get_tos(fl);
1522
1523	xfrm_policy_put_afinfo(afinfo);
1524
1525	return tos;
1526}
1527
1528static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1529{
1530	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1531	struct dst_entry *dst = &xdst->u.dst;
1532
1533	if (xdst->route == NULL) {
1534		/* Dummy bundle - if it has xfrms we were not
1535		 * able to build bundle as template resolution failed.
1536		 * It means we need to try again resolving. */
1537		if (xdst->num_xfrms > 0)
1538			return NULL;
1539	} else if (dst->flags & DST_XFRM_QUEUE) {
1540		return NULL;
1541	} else {
1542		/* Real bundle */
1543		if (stale_bundle(dst))
1544			return NULL;
1545	}
1546
1547	dst_hold(dst);
1548	return flo;
1549}
1550
1551static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1552{
1553	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1554	struct dst_entry *dst = &xdst->u.dst;
1555
1556	if (!xdst->route)
1557		return 0;
1558	if (stale_bundle(dst))
1559		return 0;
1560
1561	return 1;
1562}
1563
1564static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1565{
1566	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1567	struct dst_entry *dst = &xdst->u.dst;
1568
1569	dst_free(dst);
1570}
1571
1572static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1573	.get = xfrm_bundle_flo_get,
1574	.check = xfrm_bundle_flo_check,
1575	.delete = xfrm_bundle_flo_delete,
1576};
1577
1578static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1579{
1580	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1581	struct dst_ops *dst_ops;
1582	struct xfrm_dst *xdst;
1583
1584	if (!afinfo)
1585		return ERR_PTR(-EINVAL);
1586
1587	switch (family) {
1588	case AF_INET:
1589		dst_ops = &net->xfrm.xfrm4_dst_ops;
1590		break;
1591#if IS_ENABLED(CONFIG_IPV6)
1592	case AF_INET6:
1593		dst_ops = &net->xfrm.xfrm6_dst_ops;
1594		break;
1595#endif
1596	default:
1597		BUG();
1598	}
1599	xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1600
1601	if (likely(xdst)) {
1602		struct dst_entry *dst = &xdst->u.dst;
1603
1604		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1605		xdst->flo.ops = &xfrm_bundle_fc_ops;
1606	} else
1607		xdst = ERR_PTR(-ENOBUFS);
1608
1609	xfrm_policy_put_afinfo(afinfo);
1610
1611	return xdst;
1612}
1613
1614static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1615				 int nfheader_len)
1616{
1617	struct xfrm_policy_afinfo *afinfo =
1618		xfrm_policy_get_afinfo(dst->ops->family);
1619	int err;
1620
1621	if (!afinfo)
1622		return -EINVAL;
1623
1624	err = afinfo->init_path(path, dst, nfheader_len);
1625
1626	xfrm_policy_put_afinfo(afinfo);
1627
1628	return err;
1629}
1630
1631static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1632				const struct flowi *fl)
1633{
1634	struct xfrm_policy_afinfo *afinfo =
1635		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1636	int err;
1637
1638	if (!afinfo)
1639		return -EINVAL;
1640
1641	err = afinfo->fill_dst(xdst, dev, fl);
1642
1643	xfrm_policy_put_afinfo(afinfo);
1644
1645	return err;
1646}
1647
1648
1649/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1650 * all the metrics... Shortly, bundle a bundle.
1651 */
1652
1653static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1654					    struct xfrm_state **xfrm, int nx,
 
 
1655					    const struct flowi *fl,
1656					    struct dst_entry *dst)
1657{
 
 
1658	struct net *net = xp_net(policy);
1659	unsigned long now = jiffies;
1660	struct net_device *dev;
1661	struct xfrm_mode *inner_mode;
1662	struct dst_entry *dst_prev = NULL;
1663	struct dst_entry *dst0 = NULL;
1664	int i = 0;
1665	int err;
1666	int header_len = 0;
1667	int nfheader_len = 0;
1668	int trailer_len = 0;
1669	int tos;
1670	int family = policy->selector.family;
1671	xfrm_address_t saddr, daddr;
1672
1673	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1674
1675	tos = xfrm_get_tos(fl, family);
1676	err = tos;
1677	if (tos < 0)
1678		goto put_states;
1679
1680	dst_hold(dst);
1681
1682	for (; i < nx; i++) {
1683		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1684		struct dst_entry *dst1 = &xdst->u.dst;
1685
1686		err = PTR_ERR(xdst);
1687		if (IS_ERR(xdst)) {
1688			dst_release(dst);
1689			goto put_states;
1690		}
1691
 
 
 
 
 
 
 
 
 
1692		if (xfrm[i]->sel.family == AF_UNSPEC) {
1693			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1694							xfrm_af2proto(family));
1695			if (!inner_mode) {
1696				err = -EAFNOSUPPORT;
1697				dst_release(dst);
1698				goto put_states;
1699			}
1700		} else
1701			inner_mode = xfrm[i]->inner_mode;
1702
1703		if (!dst_prev)
1704			dst0 = dst1;
1705		else {
1706			dst_prev->child = dst_clone(dst1);
1707			dst1->flags |= DST_NOHASH;
1708		}
1709
1710		xdst->route = dst;
1711		dst_copy_metrics(dst1, dst);
1712
1713		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1714			family = xfrm[i]->props.family;
1715			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1716					      &saddr, &daddr, family);
 
 
 
 
 
 
 
 
 
1717			err = PTR_ERR(dst);
1718			if (IS_ERR(dst))
1719				goto put_states;
1720		} else
1721			dst_hold(dst);
1722
1723		dst1->xfrm = xfrm[i];
1724		xdst->xfrm_genid = xfrm[i]->genid;
1725
1726		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1727		dst1->flags |= DST_HOST;
1728		dst1->lastuse = now;
1729
1730		dst1->input = dst_discard;
1731		dst1->output = inner_mode->afinfo->output;
1732
1733		dst1->next = dst_prev;
1734		dst_prev = dst1;
 
 
 
 
 
 
 
1735
1736		header_len += xfrm[i]->props.header_len;
1737		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1738			nfheader_len += xfrm[i]->props.header_len;
1739		trailer_len += xfrm[i]->props.trailer_len;
1740	}
1741
1742	dst_prev->child = dst;
1743	dst0->path = dst;
1744
1745	err = -ENODEV;
1746	dev = dst->dev;
1747	if (!dev)
1748		goto free_dst;
1749
1750	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1751	xfrm_init_pmtu(dst_prev);
1752
1753	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1754		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1755
1756		err = xfrm_fill_dst(xdst, dev, fl);
 
 
1757		if (err)
1758			goto free_dst;
1759
1760		dst_prev->header_len = header_len;
1761		dst_prev->trailer_len = trailer_len;
1762		header_len -= xdst->u.dst.xfrm->props.header_len;
1763		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1764	}
1765
1766out:
1767	return dst0;
1768
1769put_states:
1770	for (; i < nx; i++)
1771		xfrm_state_put(xfrm[i]);
1772free_dst:
1773	if (dst0)
1774		dst_free(dst0);
1775	dst0 = ERR_PTR(err);
1776	goto out;
1777}
1778
1779#ifdef CONFIG_XFRM_SUB_POLICY
1780static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1781{
1782	if (!*target) {
1783		*target = kmalloc(size, GFP_ATOMIC);
1784		if (!*target)
1785			return -ENOMEM;
1786	}
1787
1788	memcpy(*target, src, size);
1789	return 0;
1790}
1791#endif
1792
1793static int xfrm_dst_update_parent(struct dst_entry *dst,
1794				  const struct xfrm_selector *sel)
1795{
1796#ifdef CONFIG_XFRM_SUB_POLICY
1797	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1798	return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1799				   sel, sizeof(*sel));
1800#else
1801	return 0;
1802#endif
1803}
1804
1805static int xfrm_dst_update_origin(struct dst_entry *dst,
1806				  const struct flowi *fl)
1807{
1808#ifdef CONFIG_XFRM_SUB_POLICY
1809	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1810	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1811#else
1812	return 0;
1813#endif
1814}
1815
1816static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1817				struct xfrm_policy **pols,
1818				int *num_pols, int *num_xfrms)
1819{
1820	int i;
1821
1822	if (*num_pols == 0 || !pols[0]) {
1823		*num_pols = 0;
1824		*num_xfrms = 0;
1825		return 0;
1826	}
1827	if (IS_ERR(pols[0]))
 
1828		return PTR_ERR(pols[0]);
 
1829
1830	*num_xfrms = pols[0]->xfrm_nr;
1831
1832#ifdef CONFIG_XFRM_SUB_POLICY
1833	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1834	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1835		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1836						    XFRM_POLICY_TYPE_MAIN,
1837						    fl, family,
1838						    XFRM_POLICY_OUT);
 
1839		if (pols[1]) {
1840			if (IS_ERR(pols[1])) {
1841				xfrm_pols_put(pols, *num_pols);
 
1842				return PTR_ERR(pols[1]);
1843			}
1844			(*num_pols)++;
1845			(*num_xfrms) += pols[1]->xfrm_nr;
1846		}
1847	}
1848#endif
1849	for (i = 0; i < *num_pols; i++) {
1850		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1851			*num_xfrms = -1;
1852			break;
1853		}
1854	}
1855
1856	return 0;
1857
1858}
1859
1860static struct xfrm_dst *
1861xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1862			       const struct flowi *fl, u16 family,
1863			       struct dst_entry *dst_orig)
1864{
1865	struct net *net = xp_net(pols[0]);
1866	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
 
 
1867	struct dst_entry *dst;
1868	struct xfrm_dst *xdst;
1869	int err;
1870
1871	/* Try to instantiate a bundle */
1872	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1873	if (err <= 0) {
1874		if (err != 0 && err != -EAGAIN)
 
 
 
1875			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1876		return ERR_PTR(err);
1877	}
1878
1879	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1880	if (IS_ERR(dst)) {
1881		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1882		return ERR_CAST(dst);
1883	}
1884
1885	xdst = (struct xfrm_dst *)dst;
1886	xdst->num_xfrms = err;
1887	if (num_pols > 1)
1888		err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1889	else
1890		err = xfrm_dst_update_origin(dst, fl);
1891	if (unlikely(err)) {
1892		dst_free(dst);
1893		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1894		return ERR_PTR(err);
1895	}
1896
1897	xdst->num_pols = num_pols;
1898	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1899	xdst->policy_genid = atomic_read(&pols[0]->genid);
1900
1901	return xdst;
1902}
1903
1904static void xfrm_policy_queue_process(unsigned long arg)
1905{
1906	struct sk_buff *skb;
1907	struct sock *sk;
1908	struct dst_entry *dst;
1909	struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1910	struct net *net = xp_net(pol);
1911	struct xfrm_policy_queue *pq = &pol->polq;
1912	struct flowi fl;
1913	struct sk_buff_head list;
 
1914
1915	spin_lock(&pq->hold_queue.lock);
1916	skb = skb_peek(&pq->hold_queue);
1917	if (!skb) {
1918		spin_unlock(&pq->hold_queue.lock);
1919		goto out;
1920	}
1921	dst = skb_dst(skb);
1922	sk = skb->sk;
1923	xfrm_decode_session(skb, &fl, dst->ops->family);
 
 
 
 
 
1924	spin_unlock(&pq->hold_queue.lock);
1925
1926	dst_hold(dst->path);
1927	dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
1928	if (IS_ERR(dst))
1929		goto purge_queue;
1930
1931	if (dst->flags & DST_XFRM_QUEUE) {
1932		dst_release(dst);
1933
1934		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1935			goto purge_queue;
1936
1937		pq->timeout = pq->timeout << 1;
1938		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1939			xfrm_pol_hold(pol);
1940	goto out;
1941	}
1942
1943	dst_release(dst);
1944
1945	__skb_queue_head_init(&list);
1946
1947	spin_lock(&pq->hold_queue.lock);
1948	pq->timeout = 0;
1949	skb_queue_splice_init(&pq->hold_queue, &list);
1950	spin_unlock(&pq->hold_queue.lock);
1951
1952	while (!skb_queue_empty(&list)) {
1953		skb = __skb_dequeue(&list);
1954
1955		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1956		dst_hold(skb_dst(skb)->path);
1957		dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
 
 
 
 
 
1958		if (IS_ERR(dst)) {
1959			kfree_skb(skb);
1960			continue;
1961		}
1962
1963		nf_reset(skb);
1964		skb_dst_drop(skb);
1965		skb_dst_set(skb, dst);
1966
1967		dst_output(net, skb->sk, skb);
1968	}
1969
1970out:
1971	xfrm_pol_put(pol);
1972	return;
1973
1974purge_queue:
1975	pq->timeout = 0;
1976	skb_queue_purge(&pq->hold_queue);
1977	xfrm_pol_put(pol);
1978}
1979
1980static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1981{
1982	unsigned long sched_next;
1983	struct dst_entry *dst = skb_dst(skb);
1984	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1985	struct xfrm_policy *pol = xdst->pols[0];
1986	struct xfrm_policy_queue *pq = &pol->polq;
1987
1988	if (unlikely(skb_fclone_busy(sk, skb))) {
1989		kfree_skb(skb);
1990		return 0;
1991	}
1992
1993	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1994		kfree_skb(skb);
1995		return -EAGAIN;
1996	}
1997
1998	skb_dst_force(skb);
1999
2000	spin_lock_bh(&pq->hold_queue.lock);
2001
2002	if (!pq->timeout)
2003		pq->timeout = XFRM_QUEUE_TMO_MIN;
2004
2005	sched_next = jiffies + pq->timeout;
2006
2007	if (del_timer(&pq->hold_timer)) {
2008		if (time_before(pq->hold_timer.expires, sched_next))
2009			sched_next = pq->hold_timer.expires;
2010		xfrm_pol_put(pol);
2011	}
2012
2013	__skb_queue_tail(&pq->hold_queue, skb);
2014	if (!mod_timer(&pq->hold_timer, sched_next))
2015		xfrm_pol_hold(pol);
2016
2017	spin_unlock_bh(&pq->hold_queue.lock);
2018
2019	return 0;
2020}
2021
2022static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2023						 struct xfrm_flo *xflo,
2024						 const struct flowi *fl,
2025						 int num_xfrms,
2026						 u16 family)
2027{
2028	int err;
2029	struct net_device *dev;
2030	struct dst_entry *dst;
2031	struct dst_entry *dst1;
2032	struct xfrm_dst *xdst;
2033
2034	xdst = xfrm_alloc_dst(net, family);
2035	if (IS_ERR(xdst))
2036		return xdst;
2037
2038	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2039	    net->xfrm.sysctl_larval_drop ||
2040	    num_xfrms <= 0)
2041		return xdst;
2042
2043	dst = xflo->dst_orig;
2044	dst1 = &xdst->u.dst;
2045	dst_hold(dst);
2046	xdst->route = dst;
2047
2048	dst_copy_metrics(dst1, dst);
2049
2050	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2051	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2052	dst1->lastuse = jiffies;
2053
2054	dst1->input = dst_discard;
2055	dst1->output = xdst_queue_output;
2056
2057	dst_hold(dst);
2058	dst1->child = dst;
2059	dst1->path = dst;
2060
2061	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2062
2063	err = -ENODEV;
2064	dev = dst->dev;
2065	if (!dev)
2066		goto free_dst;
2067
2068	err = xfrm_fill_dst(xdst, dev, fl);
2069	if (err)
2070		goto free_dst;
2071
2072out:
2073	return xdst;
2074
2075free_dst:
2076	dst_release(dst1);
2077	xdst = ERR_PTR(err);
2078	goto out;
2079}
2080
2081static struct flow_cache_object *
2082xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
2083		   struct flow_cache_object *oldflo, void *ctx)
 
2084{
2085	struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
2086	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2087	struct xfrm_dst *xdst, *new_xdst;
2088	int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
2089
2090	/* Check if the policies from old bundle are usable */
2091	xdst = NULL;
2092	if (oldflo) {
2093		xdst = container_of(oldflo, struct xfrm_dst, flo);
2094		num_pols = xdst->num_pols;
2095		num_xfrms = xdst->num_xfrms;
2096		pol_dead = 0;
2097		for (i = 0; i < num_pols; i++) {
2098			pols[i] = xdst->pols[i];
2099			pol_dead |= pols[i]->walk.dead;
2100		}
2101		if (pol_dead) {
2102			dst_free(&xdst->u.dst);
2103			xdst = NULL;
2104			num_pols = 0;
2105			num_xfrms = 0;
2106			oldflo = NULL;
2107		}
2108	}
2109
2110	/* Resolve policies to use if we couldn't get them from
2111	 * previous cache entry */
2112	if (xdst == NULL) {
2113		num_pols = 1;
2114		pols[0] = __xfrm_policy_lookup(net, fl, family,
2115					       flow_to_policy_dir(dir));
2116		err = xfrm_expand_policies(fl, family, pols,
2117					   &num_pols, &num_xfrms);
2118		if (err < 0)
2119			goto inc_error;
2120		if (num_pols == 0)
 
 
 
 
 
 
 
 
 
 
2121			return NULL;
2122		if (num_xfrms <= 0)
2123			goto make_dummy_bundle;
2124	}
2125
2126	new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2127						  xflo->dst_orig);
2128	if (IS_ERR(new_xdst)) {
2129		err = PTR_ERR(new_xdst);
2130		if (err != -EAGAIN)
2131			goto error;
2132		if (oldflo == NULL)
2133			goto make_dummy_bundle;
2134		dst_hold(&xdst->u.dst);
2135		return oldflo;
2136	} else if (new_xdst == NULL) {
2137		num_xfrms = 0;
2138		if (oldflo == NULL)
2139			goto make_dummy_bundle;
2140		xdst->num_xfrms = 0;
2141		dst_hold(&xdst->u.dst);
2142		return oldflo;
2143	}
2144
2145	/* Kill the previous bundle */
2146	if (xdst) {
2147		/* The policies were stolen for newly generated bundle */
2148		xdst->num_pols = 0;
2149		dst_free(&xdst->u.dst);
2150	}
2151
2152	/* Flow cache does not have reference, it dst_free()'s,
2153	 * but we do need to return one reference for original caller */
2154	dst_hold(&new_xdst->u.dst);
2155	return &new_xdst->flo;
2156
2157make_dummy_bundle:
2158	/* We found policies, but there's no bundles to instantiate:
2159	 * either because the policy blocks, has no transformations or
2160	 * we could not build template (no xfrm_states).*/
2161	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2162	if (IS_ERR(xdst)) {
2163		xfrm_pols_put(pols, num_pols);
2164		return ERR_CAST(xdst);
2165	}
2166	xdst->num_pols = num_pols;
2167	xdst->num_xfrms = num_xfrms;
2168	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2169
2170	dst_hold(&xdst->u.dst);
2171	return &xdst->flo;
2172
2173inc_error:
2174	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2175error:
2176	if (xdst != NULL)
2177		dst_free(&xdst->u.dst);
2178	else
2179		xfrm_pols_put(pols, num_pols);
2180	return ERR_PTR(err);
2181}
2182
2183static struct dst_entry *make_blackhole(struct net *net, u16 family,
2184					struct dst_entry *dst_orig)
2185{
2186	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2187	struct dst_entry *ret;
2188
2189	if (!afinfo) {
2190		dst_release(dst_orig);
2191		return ERR_PTR(-EINVAL);
2192	} else {
2193		ret = afinfo->blackhole_route(net, dst_orig);
2194	}
2195	xfrm_policy_put_afinfo(afinfo);
2196
2197	return ret;
2198}
2199
2200/* Main function: finds/creates a bundle for given flow.
2201 *
2202 * At the moment we eat a raw IP route. Mostly to speed up lookups
2203 * on interfaces with disabled IPsec.
 
 
 
2204 */
2205struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2206			      const struct flowi *fl,
2207			      const struct sock *sk, int flags)
 
 
2208{
2209	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2210	struct flow_cache_object *flo;
2211	struct xfrm_dst *xdst;
2212	struct dst_entry *dst, *route;
2213	u16 family = dst_orig->ops->family;
2214	u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2215	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2216
2217	dst = NULL;
2218	xdst = NULL;
2219	route = NULL;
2220
2221	sk = sk_const_to_full_sk(sk);
2222	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2223		num_pols = 1;
2224		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
 
2225		err = xfrm_expand_policies(fl, family, pols,
2226					   &num_pols, &num_xfrms);
2227		if (err < 0)
2228			goto dropdst;
2229
2230		if (num_pols) {
2231			if (num_xfrms <= 0) {
2232				drop_pols = num_pols;
2233				goto no_transform;
2234			}
2235
2236			xdst = xfrm_resolve_and_create_bundle(
2237					pols, num_pols, fl,
2238					family, dst_orig);
 
2239			if (IS_ERR(xdst)) {
2240				xfrm_pols_put(pols, num_pols);
2241				err = PTR_ERR(xdst);
 
 
 
2242				goto dropdst;
2243			} else if (xdst == NULL) {
2244				num_xfrms = 0;
2245				drop_pols = num_pols;
2246				goto no_transform;
2247			}
2248
2249			dst_hold(&xdst->u.dst);
2250			xdst->u.dst.flags |= DST_NOCACHE;
2251			route = xdst->route;
2252		}
2253	}
2254
2255	if (xdst == NULL) {
2256		struct xfrm_flo xflo;
2257
2258		xflo.dst_orig = dst_orig;
2259		xflo.flags = flags;
2260
2261		/* To accelerate a bit...  */
2262		if ((dst_orig->flags & DST_NOXFRM) ||
2263		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2264			goto nopol;
2265
2266		flo = flow_cache_lookup(net, fl, family, dir,
2267					xfrm_bundle_lookup, &xflo);
2268		if (flo == NULL)
2269			goto nopol;
2270		if (IS_ERR(flo)) {
2271			err = PTR_ERR(flo);
2272			goto dropdst;
2273		}
2274		xdst = container_of(flo, struct xfrm_dst, flo);
2275
2276		num_pols = xdst->num_pols;
2277		num_xfrms = xdst->num_xfrms;
2278		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2279		route = xdst->route;
2280	}
2281
2282	dst = &xdst->u.dst;
2283	if (route == NULL && num_xfrms > 0) {
2284		/* The only case when xfrm_bundle_lookup() returns a
2285		 * bundle with null route, is when the template could
2286		 * not be resolved. It means policies are there, but
2287		 * bundle could not be created, since we don't yet
2288		 * have the xfrm_state's. We need to wait for KM to
2289		 * negotiate new SA's or bail out with error.*/
2290		if (net->xfrm.sysctl_larval_drop) {
2291			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2292			err = -EREMOTE;
2293			goto error;
2294		}
2295
2296		err = -EAGAIN;
2297
2298		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2299		goto error;
2300	}
2301
2302no_transform:
2303	if (num_pols == 0)
2304		goto nopol;
2305
2306	if ((flags & XFRM_LOOKUP_ICMP) &&
2307	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2308		err = -ENOENT;
2309		goto error;
2310	}
2311
2312	for (i = 0; i < num_pols; i++)
2313		pols[i]->curlft.use_time = get_seconds();
2314
2315	if (num_xfrms < 0) {
2316		/* Prohibit the flow */
2317		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2318		err = -EPERM;
2319		goto error;
2320	} else if (num_xfrms > 0) {
2321		/* Flow transformed */
2322		dst_release(dst_orig);
2323	} else {
2324		/* Flow passes untransformed */
2325		dst_release(dst);
2326		dst = dst_orig;
2327	}
2328ok:
2329	xfrm_pols_put(pols, drop_pols);
2330	if (dst && dst->xfrm &&
2331	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2332		dst->flags |= DST_XFRM_TUNNEL;
2333	return dst;
2334
2335nopol:
 
 
 
 
 
2336	if (!(flags & XFRM_LOOKUP_ICMP)) {
2337		dst = dst_orig;
2338		goto ok;
2339	}
2340	err = -ENOENT;
2341error:
2342	dst_release(dst);
2343dropdst:
2344	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2345		dst_release(dst_orig);
2346	xfrm_pols_put(pols, drop_pols);
2347	return ERR_PTR(err);
2348}
 
 
 
 
 
 
 
 
 
 
 
 
 
2349EXPORT_SYMBOL(xfrm_lookup);
2350
2351/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2352 * Otherwise we may send out blackholed packets.
2353 */
2354struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2355				    const struct flowi *fl,
2356				    const struct sock *sk, int flags)
2357{
2358	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2359					    flags | XFRM_LOOKUP_QUEUE |
2360					    XFRM_LOOKUP_KEEP_DST_REF);
2361
2362	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2363		return make_blackhole(net, dst_orig->ops->family, dst_orig);
2364
 
 
 
2365	return dst;
2366}
2367EXPORT_SYMBOL(xfrm_lookup_route);
2368
2369static inline int
2370xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2371{
 
2372	struct xfrm_state *x;
2373
2374	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2375		return 0;
2376	x = skb->sp->xvec[idx];
2377	if (!x->type->reject)
2378		return 0;
2379	return x->type->reject(x, skb, fl);
2380}
2381
2382/* When skb is transformed back to its "native" form, we have to
2383 * check policy restrictions. At the moment we make this in maximally
2384 * stupid way. Shame on me. :-) Of course, connected sockets must
2385 * have policy cached at them.
2386 */
2387
2388static inline int
2389xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2390	      unsigned short family)
2391{
2392	if (xfrm_state_kern(x))
2393		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2394	return	x->id.proto == tmpl->id.proto &&
2395		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2396		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2397		x->props.mode == tmpl->mode &&
2398		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2399		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2400		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2401		  xfrm_state_addr_cmp(tmpl, x, family));
 
2402}
2403
2404/*
2405 * 0 or more than 0 is returned when validation is succeeded (either bypass
2406 * because of optional transport mode, or next index of the mathced secpath
2407 * state with the template.
2408 * -1 is returned when no matching template is found.
2409 * Otherwise "-2 - errored_index" is returned.
2410 */
2411static inline int
2412xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2413	       unsigned short family)
2414{
2415	int idx = start;
2416
2417	if (tmpl->optional) {
2418		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2419			return start;
2420	} else
2421		start = -1;
2422	for (; idx < sp->len; idx++) {
2423		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2424			return ++idx;
2425		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
 
 
 
 
 
 
 
2426			if (start == -1)
2427				start = -2-idx;
2428			break;
2429		}
2430	}
2431	return start;
2432}
2433
2434int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2435			  unsigned int family, int reverse)
2436{
2437	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2438	int err;
 
 
 
2439
2440	if (unlikely(afinfo == NULL))
 
 
 
 
 
 
 
 
 
2441		return -EAFNOSUPPORT;
 
 
 
 
 
 
 
 
 
 
2442
2443	afinfo->decode_session(skb, fl, reverse);
2444	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2445	xfrm_policy_put_afinfo(afinfo);
2446	return err;
2447}
2448EXPORT_SYMBOL(__xfrm_decode_session);
2449
2450static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2451{
2452	for (; k < sp->len; k++) {
2453		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2454			*idxp = k;
2455			return 1;
2456		}
2457	}
2458
2459	return 0;
2460}
2461
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2462int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2463			unsigned short family)
2464{
2465	struct net *net = dev_net(skb->dev);
2466	struct xfrm_policy *pol;
2467	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2468	int npols = 0;
2469	int xfrm_nr;
2470	int pi;
2471	int reverse;
2472	struct flowi fl;
2473	u8 fl_dir;
2474	int xerr_idx = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2475
2476	reverse = dir & ~XFRM_POLICY_MASK;
2477	dir &= XFRM_POLICY_MASK;
2478	fl_dir = policy_to_flow_dir(dir);
2479
2480	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2481		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2482		return 0;
2483	}
2484
2485	nf_nat_decode_session(skb, &fl, family);
2486
2487	/* First, check used SA against their selectors. */
2488	if (skb->sp) {
 
2489		int i;
2490
2491		for (i = skb->sp->len-1; i >= 0; i--) {
2492			struct xfrm_state *x = skb->sp->xvec[i];
 
 
2493			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2494				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2495				return 0;
 
 
 
 
 
 
2496			}
2497		}
2498	}
2499
2500	pol = NULL;
2501	sk = sk_to_full_sk(sk);
2502	if (sk && sk->sk_policy[dir]) {
2503		pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2504		if (IS_ERR(pol)) {
2505			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2506			return 0;
2507		}
2508	}
2509
2510	if (!pol) {
2511		struct flow_cache_object *flo;
2512
2513		flo = flow_cache_lookup(net, &fl, family, fl_dir,
2514					xfrm_policy_lookup, NULL);
2515		if (IS_ERR_OR_NULL(flo))
2516			pol = ERR_CAST(flo);
2517		else
2518			pol = container_of(flo, struct xfrm_policy, flo);
2519	}
2520
2521	if (IS_ERR(pol)) {
2522		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2523		return 0;
2524	}
2525
 
 
 
2526	if (!pol) {
2527		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
 
 
 
 
 
2528			xfrm_secpath_reject(xerr_idx, skb, &fl);
2529			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2530			return 0;
2531		}
2532		return 1;
2533	}
2534
2535	pol->curlft.use_time = get_seconds();
 
2536
2537	pols[0] = pol;
2538	npols++;
2539#ifdef CONFIG_XFRM_SUB_POLICY
2540	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2541		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2542						    &fl, family,
2543						    XFRM_POLICY_IN);
2544		if (pols[1]) {
2545			if (IS_ERR(pols[1])) {
2546				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
 
2547				return 0;
2548			}
2549			pols[1]->curlft.use_time = get_seconds();
 
 
2550			npols++;
2551		}
2552	}
2553#endif
2554
2555	if (pol->action == XFRM_POLICY_ALLOW) {
2556		struct sec_path *sp;
2557		static struct sec_path dummy;
2558		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2559		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2560		struct xfrm_tmpl **tpp = tp;
2561		int ti = 0;
2562		int i, k;
2563
2564		if ((sp = skb->sp) == NULL)
 
2565			sp = &dummy;
2566
2567		for (pi = 0; pi < npols; pi++) {
2568			if (pols[pi] != pol &&
2569			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2570				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2571				goto reject;
2572			}
2573			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2574				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2575				goto reject_error;
2576			}
2577			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2578				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2579		}
2580		xfrm_nr = ti;
 
2581		if (npols > 1) {
2582			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2583			tpp = stp;
2584		}
2585
2586		/* For each tunnel xfrm, find the first matching tmpl.
2587		 * For each tmpl before that, find corresponding xfrm.
2588		 * Order is _important_. Later we will implement
2589		 * some barriers, but at the moment barriers
2590		 * are implied between each two transformations.
 
 
 
2591		 */
2592		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2593			k = xfrm_policy_ok(tpp[i], sp, k, family);
2594			if (k < 0) {
2595				if (k < -1)
2596					/* "-2 - errored_index" returned */
2597					xerr_idx = -(2+k);
2598				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2599				goto reject;
2600			}
2601		}
2602
2603		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2604			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2605			goto reject;
2606		}
2607
2608		xfrm_pols_put(pols, npols);
 
 
2609		return 1;
2610	}
2611	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2612
2613reject:
2614	xfrm_secpath_reject(xerr_idx, skb, &fl);
2615reject_error:
2616	xfrm_pols_put(pols, npols);
2617	return 0;
2618}
2619EXPORT_SYMBOL(__xfrm_policy_check);
2620
2621int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2622{
2623	struct net *net = dev_net(skb->dev);
2624	struct flowi fl;
2625	struct dst_entry *dst;
2626	int res = 1;
2627
2628	if (xfrm_decode_session(skb, &fl, family) < 0) {
2629		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2630		return 0;
2631	}
2632
2633	skb_dst_force(skb);
 
 
 
 
2634
2635	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2636	if (IS_ERR(dst)) {
2637		res = 0;
2638		dst = NULL;
2639	}
 
 
 
 
2640	skb_dst_set(skb, dst);
2641	return res;
2642}
2643EXPORT_SYMBOL(__xfrm_route_forward);
2644
2645/* Optimize later using cookies and generation ids. */
2646
2647static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2648{
2649	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2650	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2651	 * get validated by dst_ops->check on every use.  We do this
2652	 * because when a normal route referenced by an XFRM dst is
2653	 * obsoleted we do not go looking around for all parent
2654	 * referencing XFRM dsts so that we can invalidate them.  It
2655	 * is just too much work.  Instead we make the checks here on
2656	 * every use.  For example:
2657	 *
2658	 *	XFRM dst A --> IPv4 dst X
2659	 *
2660	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2661	 * in this example).  If X is marked obsolete, "A" will not
2662	 * notice.  That's what we are validating here via the
2663	 * stale_bundle() check.
2664	 *
2665	 * When a policy's bundle is pruned, we dst_free() the XFRM
2666	 * dst which causes it's ->obsolete field to be set to
2667	 * DST_OBSOLETE_DEAD.  If an XFRM dst has been pruned like
2668	 * this, we want to force a new route lookup.
2669	 */
2670	if (dst->obsolete < 0 && !stale_bundle(dst))
2671		return dst;
2672
2673	return NULL;
2674}
2675
2676static int stale_bundle(struct dst_entry *dst)
2677{
2678	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2679}
2680
2681void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2682{
2683	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2684		dst->dev = dev_net(dev)->loopback_dev;
2685		dev_hold(dst->dev);
2686		dev_put(dev);
2687	}
2688}
2689EXPORT_SYMBOL(xfrm_dst_ifdown);
2690
2691static void xfrm_link_failure(struct sk_buff *skb)
2692{
2693	/* Impossible. Such dst must be popped before reaches point of failure. */
2694}
2695
2696static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2697{
2698	if (dst) {
2699		if (dst->obsolete) {
2700			dst_release(dst);
2701			dst = NULL;
2702		}
2703	}
2704	return dst;
2705}
2706
2707void xfrm_garbage_collect(struct net *net)
2708{
2709	flow_cache_flush(net);
2710}
2711EXPORT_SYMBOL(xfrm_garbage_collect);
2712
2713static void xfrm_garbage_collect_deferred(struct net *net)
2714{
2715	flow_cache_flush_deferred(net);
2716}
2717
2718static void xfrm_init_pmtu(struct dst_entry *dst)
2719{
2720	do {
2721		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2722		u32 pmtu, route_mtu_cached;
 
2723
2724		pmtu = dst_mtu(dst->child);
 
2725		xdst->child_mtu_cached = pmtu;
2726
2727		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2728
2729		route_mtu_cached = dst_mtu(xdst->route);
2730		xdst->route_mtu_cached = route_mtu_cached;
2731
2732		if (pmtu > route_mtu_cached)
2733			pmtu = route_mtu_cached;
2734
2735		dst_metric_set(dst, RTAX_MTU, pmtu);
2736	} while ((dst = dst->next));
2737}
2738
2739/* Check that the bundle accepts the flow and its components are
2740 * still valid.
2741 */
2742
2743static int xfrm_bundle_ok(struct xfrm_dst *first)
2744{
 
2745	struct dst_entry *dst = &first->u.dst;
2746	struct xfrm_dst *last;
 
2747	u32 mtu;
2748
2749	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2750	    (dst->dev && !netif_running(dst->dev)))
2751		return 0;
2752
2753	if (dst->flags & DST_XFRM_QUEUE)
2754		return 1;
2755
2756	last = NULL;
2757
2758	do {
2759		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2760
2761		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2762			return 0;
2763		if (xdst->xfrm_genid != dst->xfrm->genid)
2764			return 0;
2765		if (xdst->num_pols > 0 &&
2766		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2767			return 0;
2768
2769		mtu = dst_mtu(dst->child);
 
 
2770		if (xdst->child_mtu_cached != mtu) {
2771			last = xdst;
2772			xdst->child_mtu_cached = mtu;
2773		}
2774
2775		if (!dst_check(xdst->route, xdst->route_cookie))
2776			return 0;
2777		mtu = dst_mtu(xdst->route);
2778		if (xdst->route_mtu_cached != mtu) {
2779			last = xdst;
2780			xdst->route_mtu_cached = mtu;
2781		}
2782
2783		dst = dst->child;
2784	} while (dst->xfrm);
2785
2786	if (likely(!last))
2787		return 1;
2788
2789	mtu = last->child_mtu_cached;
2790	for (;;) {
2791		dst = &last->u.dst;
 
2792
2793		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2794		if (mtu > last->route_mtu_cached)
2795			mtu = last->route_mtu_cached;
2796		dst_metric_set(dst, RTAX_MTU, mtu);
2797
2798		if (last == first)
2799			break;
2800
2801		last = (struct xfrm_dst *)last->u.dst.next;
2802		last->child_mtu_cached = mtu;
2803	}
2804
2805	return 1;
2806}
2807
2808static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2809{
2810	return dst_metric_advmss(dst->path);
2811}
2812
2813static unsigned int xfrm_mtu(const struct dst_entry *dst)
2814{
2815	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2816
2817	return mtu ? : dst_mtu(dst->path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2818}
2819
2820static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2821					   struct sk_buff *skb,
2822					   const void *daddr)
2823{
2824	return dst->path->ops->neigh_lookup(dst, skb, daddr);
 
 
 
 
2825}
2826
2827int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
 
 
 
 
 
 
 
 
2828{
2829	int err = 0;
2830	if (unlikely(afinfo == NULL))
2831		return -EINVAL;
2832	if (unlikely(afinfo->family >= NPROTO))
2833		return -EAFNOSUPPORT;
 
2834	spin_lock(&xfrm_policy_afinfo_lock);
2835	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2836		err = -EEXIST;
2837	else {
2838		struct dst_ops *dst_ops = afinfo->dst_ops;
2839		if (likely(dst_ops->kmem_cachep == NULL))
2840			dst_ops->kmem_cachep = xfrm_dst_cache;
2841		if (likely(dst_ops->check == NULL))
2842			dst_ops->check = xfrm_dst_check;
2843		if (likely(dst_ops->default_advmss == NULL))
2844			dst_ops->default_advmss = xfrm_default_advmss;
2845		if (likely(dst_ops->mtu == NULL))
2846			dst_ops->mtu = xfrm_mtu;
2847		if (likely(dst_ops->negative_advice == NULL))
2848			dst_ops->negative_advice = xfrm_negative_advice;
2849		if (likely(dst_ops->link_failure == NULL))
2850			dst_ops->link_failure = xfrm_link_failure;
2851		if (likely(dst_ops->neigh_lookup == NULL))
2852			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2853		if (likely(afinfo->garbage_collect == NULL))
2854			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2855		rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2856	}
2857	spin_unlock(&xfrm_policy_afinfo_lock);
2858
2859	return err;
2860}
2861EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2862
2863int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2864{
2865	int err = 0;
2866	if (unlikely(afinfo == NULL))
2867		return -EINVAL;
2868	if (unlikely(afinfo->family >= NPROTO))
2869		return -EAFNOSUPPORT;
2870	spin_lock(&xfrm_policy_afinfo_lock);
2871	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2872		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2873			err = -EINVAL;
2874		else
2875			RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2876					 NULL);
2877	}
2878	spin_unlock(&xfrm_policy_afinfo_lock);
2879	if (!err) {
2880		struct dst_ops *dst_ops = afinfo->dst_ops;
2881
2882		synchronize_rcu();
2883
2884		dst_ops->kmem_cachep = NULL;
2885		dst_ops->check = NULL;
2886		dst_ops->negative_advice = NULL;
2887		dst_ops->link_failure = NULL;
2888		afinfo->garbage_collect = NULL;
2889	}
2890	return err;
2891}
2892EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2893
2894static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2895{
2896	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
 
 
 
2897
2898	switch (event) {
2899	case NETDEV_DOWN:
2900		xfrm_garbage_collect(dev_net(dev));
2901	}
2902	return NOTIFY_DONE;
2903}
2904
2905static struct notifier_block xfrm_dev_notifier = {
2906	.notifier_call	= xfrm_dev_event,
2907};
2908
2909#ifdef CONFIG_XFRM_STATISTICS
2910static int __net_init xfrm_statistics_init(struct net *net)
2911{
2912	int rv;
2913	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2914	if (!net->mib.xfrm_statistics)
2915		return -ENOMEM;
2916	rv = xfrm_proc_init(net);
2917	if (rv < 0)
2918		free_percpu(net->mib.xfrm_statistics);
2919	return rv;
2920}
2921
2922static void xfrm_statistics_fini(struct net *net)
2923{
2924	xfrm_proc_fini(net);
2925	free_percpu(net->mib.xfrm_statistics);
2926}
2927#else
2928static int __net_init xfrm_statistics_init(struct net *net)
2929{
2930	return 0;
2931}
2932
2933static void xfrm_statistics_fini(struct net *net)
2934{
2935}
2936#endif
2937
2938static int __net_init xfrm_policy_init(struct net *net)
2939{
2940	unsigned int hmask, sz;
2941	int dir;
2942
2943	if (net_eq(net, &init_net))
2944		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2945					   sizeof(struct xfrm_dst),
2946					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2947					   NULL);
 
2948
2949	hmask = 8 - 1;
2950	sz = (hmask+1) * sizeof(struct hlist_head);
2951
2952	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2953	if (!net->xfrm.policy_byidx)
2954		goto out_byidx;
2955	net->xfrm.policy_idx_hmask = hmask;
2956
2957	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2958		struct xfrm_policy_hash *htab;
2959
2960		net->xfrm.policy_count[dir] = 0;
2961		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2962		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2963
2964		htab = &net->xfrm.policy_bydst[dir];
2965		htab->table = xfrm_hash_alloc(sz);
2966		if (!htab->table)
2967			goto out_bydst;
2968		htab->hmask = hmask;
2969		htab->dbits4 = 32;
2970		htab->sbits4 = 32;
2971		htab->dbits6 = 128;
2972		htab->sbits6 = 128;
2973	}
2974	net->xfrm.policy_hthresh.lbits4 = 32;
2975	net->xfrm.policy_hthresh.rbits4 = 32;
2976	net->xfrm.policy_hthresh.lbits6 = 128;
2977	net->xfrm.policy_hthresh.rbits6 = 128;
2978
2979	seqlock_init(&net->xfrm.policy_hthresh.lock);
2980
2981	INIT_LIST_HEAD(&net->xfrm.policy_all);
 
2982	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2983	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2984	if (net_eq(net, &init_net))
2985		register_netdevice_notifier(&xfrm_dev_notifier);
2986	return 0;
2987
2988out_bydst:
2989	for (dir--; dir >= 0; dir--) {
2990		struct xfrm_policy_hash *htab;
2991
2992		htab = &net->xfrm.policy_bydst[dir];
2993		xfrm_hash_free(htab->table, sz);
2994	}
2995	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2996out_byidx:
2997	return -ENOMEM;
2998}
2999
3000static void xfrm_policy_fini(struct net *net)
3001{
 
3002	unsigned int sz;
3003	int dir;
3004
3005	flush_work(&net->xfrm.policy_hash_work);
3006#ifdef CONFIG_XFRM_SUB_POLICY
3007	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
3008#endif
3009	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
3010
3011	WARN_ON(!list_empty(&net->xfrm.policy_all));
3012
3013	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
3014		struct xfrm_policy_hash *htab;
3015
3016		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
3017
3018		htab = &net->xfrm.policy_bydst[dir];
3019		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
3020		WARN_ON(!hlist_empty(htab->table));
3021		xfrm_hash_free(htab->table, sz);
3022	}
3023
3024	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
3025	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
3026	xfrm_hash_free(net->xfrm.policy_byidx, sz);
 
 
 
 
 
3027}
3028
3029static int __net_init xfrm_net_init(struct net *net)
3030{
3031	int rv;
3032
 
 
 
 
 
 
 
 
 
3033	rv = xfrm_statistics_init(net);
3034	if (rv < 0)
3035		goto out_statistics;
3036	rv = xfrm_state_init(net);
3037	if (rv < 0)
3038		goto out_state;
3039	rv = xfrm_policy_init(net);
3040	if (rv < 0)
3041		goto out_policy;
3042	rv = xfrm_sysctl_init(net);
3043	if (rv < 0)
3044		goto out_sysctl;
3045	rv = flow_cache_init(net);
3046	if (rv < 0)
3047		goto out;
3048
3049	/* Initialize the per-net locks here */
3050	spin_lock_init(&net->xfrm.xfrm_state_lock);
3051	rwlock_init(&net->xfrm.xfrm_policy_lock);
3052	mutex_init(&net->xfrm.xfrm_cfg_mutex);
3053
3054	return 0;
3055
3056out:
3057	xfrm_sysctl_fini(net);
3058out_sysctl:
3059	xfrm_policy_fini(net);
3060out_policy:
3061	xfrm_state_fini(net);
3062out_state:
3063	xfrm_statistics_fini(net);
3064out_statistics:
3065	return rv;
3066}
3067
3068static void __net_exit xfrm_net_exit(struct net *net)
3069{
3070	flow_cache_fini(net);
3071	xfrm_sysctl_fini(net);
3072	xfrm_policy_fini(net);
3073	xfrm_state_fini(net);
3074	xfrm_statistics_fini(net);
3075}
3076
3077static struct pernet_operations __net_initdata xfrm_net_ops = {
3078	.init = xfrm_net_init,
3079	.exit = xfrm_net_exit,
3080};
3081
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3082void __init xfrm_init(void)
3083{
 
 
 
 
3084	register_pernet_subsys(&xfrm_net_ops);
 
3085	xfrm_input_init();
 
 
 
 
 
 
3086}
3087
3088#ifdef CONFIG_AUDITSYSCALL
3089static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
3090					 struct audit_buffer *audit_buf)
3091{
3092	struct xfrm_sec_ctx *ctx = xp->security;
3093	struct xfrm_selector *sel = &xp->selector;
3094
3095	if (ctx)
3096		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3097				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3098
3099	switch (sel->family) {
3100	case AF_INET:
3101		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
3102		if (sel->prefixlen_s != 32)
3103			audit_log_format(audit_buf, " src_prefixlen=%d",
3104					 sel->prefixlen_s);
3105		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3106		if (sel->prefixlen_d != 32)
3107			audit_log_format(audit_buf, " dst_prefixlen=%d",
3108					 sel->prefixlen_d);
3109		break;
3110	case AF_INET6:
3111		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3112		if (sel->prefixlen_s != 128)
3113			audit_log_format(audit_buf, " src_prefixlen=%d",
3114					 sel->prefixlen_s);
3115		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3116		if (sel->prefixlen_d != 128)
3117			audit_log_format(audit_buf, " dst_prefixlen=%d",
3118					 sel->prefixlen_d);
3119		break;
3120	}
3121}
3122
3123void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
3124{
3125	struct audit_buffer *audit_buf;
3126
3127	audit_buf = xfrm_audit_start("SPD-add");
3128	if (audit_buf == NULL)
3129		return;
3130	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3131	audit_log_format(audit_buf, " res=%u", result);
3132	xfrm_audit_common_policyinfo(xp, audit_buf);
3133	audit_log_end(audit_buf);
3134}
3135EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3136
3137void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3138			      bool task_valid)
3139{
3140	struct audit_buffer *audit_buf;
3141
3142	audit_buf = xfrm_audit_start("SPD-delete");
3143	if (audit_buf == NULL)
3144		return;
3145	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3146	audit_log_format(audit_buf, " res=%u", result);
3147	xfrm_audit_common_policyinfo(xp, audit_buf);
3148	audit_log_end(audit_buf);
3149}
3150EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3151#endif
3152
3153#ifdef CONFIG_XFRM_MIGRATE
3154static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3155					const struct xfrm_selector *sel_tgt)
3156{
3157	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3158		if (sel_tgt->family == sel_cmp->family &&
3159		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3160				    sel_cmp->family) &&
3161		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3162				    sel_cmp->family) &&
3163		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3164		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3165			return true;
3166		}
3167	} else {
3168		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3169			return true;
3170		}
3171	}
3172	return false;
3173}
3174
3175static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3176						    u8 dir, u8 type, struct net *net)
3177{
3178	struct xfrm_policy *pol, *ret = NULL;
3179	struct hlist_head *chain;
3180	u32 priority = ~0U;
3181
3182	read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
3183	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3184	hlist_for_each_entry(pol, chain, bydst) {
3185		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
 
3186		    pol->type == type) {
3187			ret = pol;
3188			priority = ret->priority;
3189			break;
3190		}
3191	}
3192	chain = &net->xfrm.policy_inexact[dir];
3193	hlist_for_each_entry(pol, chain, bydst) {
3194		if ((pol->priority >= priority) && ret)
3195			break;
3196
3197		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
 
3198		    pol->type == type) {
3199			ret = pol;
3200			break;
3201		}
3202	}
3203
3204	xfrm_pol_hold(ret);
3205
3206	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
3207
3208	return ret;
3209}
3210
3211static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3212{
3213	int match = 0;
3214
3215	if (t->mode == m->mode && t->id.proto == m->proto &&
3216	    (m->reqid == 0 || t->reqid == m->reqid)) {
3217		switch (t->mode) {
3218		case XFRM_MODE_TUNNEL:
3219		case XFRM_MODE_BEET:
3220			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3221					    m->old_family) &&
3222			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3223					    m->old_family)) {
3224				match = 1;
3225			}
3226			break;
3227		case XFRM_MODE_TRANSPORT:
3228			/* in case of transport mode, template does not store
3229			   any IP addresses, hence we just compare mode and
3230			   protocol */
3231			match = 1;
3232			break;
3233		default:
3234			break;
3235		}
3236	}
3237	return match;
3238}
3239
3240/* update endpoint address(es) of template(s) */
3241static int xfrm_policy_migrate(struct xfrm_policy *pol,
3242			       struct xfrm_migrate *m, int num_migrate)
 
3243{
3244	struct xfrm_migrate *mp;
3245	int i, j, n = 0;
3246
3247	write_lock_bh(&pol->lock);
3248	if (unlikely(pol->walk.dead)) {
3249		/* target policy has been deleted */
 
3250		write_unlock_bh(&pol->lock);
3251		return -ENOENT;
3252	}
3253
3254	for (i = 0; i < pol->xfrm_nr; i++) {
3255		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3256			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3257				continue;
3258			n++;
3259			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3260			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3261				continue;
3262			/* update endpoints */
3263			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3264			       sizeof(pol->xfrm_vec[i].id.daddr));
3265			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3266			       sizeof(pol->xfrm_vec[i].saddr));
3267			pol->xfrm_vec[i].encap_family = mp->new_family;
3268			/* flush bundles */
3269			atomic_inc(&pol->genid);
3270		}
3271	}
3272
3273	write_unlock_bh(&pol->lock);
3274
3275	if (!n)
3276		return -ENODATA;
3277
3278	return 0;
3279}
3280
3281static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
 
3282{
3283	int i, j;
3284
3285	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
 
3286		return -EINVAL;
 
3287
3288	for (i = 0; i < num_migrate; i++) {
3289		if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3290				    m[i].old_family) &&
3291		    xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3292				    m[i].old_family))
3293			return -EINVAL;
3294		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3295		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
 
3296			return -EINVAL;
 
3297
3298		/* check if there is any duplicated entry */
3299		for (j = i + 1; j < num_migrate; j++) {
3300			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3301				    sizeof(m[i].old_daddr)) &&
3302			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3303				    sizeof(m[i].old_saddr)) &&
3304			    m[i].proto == m[j].proto &&
3305			    m[i].mode == m[j].mode &&
3306			    m[i].reqid == m[j].reqid &&
3307			    m[i].old_family == m[j].old_family)
 
3308				return -EINVAL;
 
3309		}
3310	}
3311
3312	return 0;
3313}
3314
3315int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3316		 struct xfrm_migrate *m, int num_migrate,
3317		 struct xfrm_kmaddress *k, struct net *net)
 
 
3318{
3319	int i, err, nx_cur = 0, nx_new = 0;
3320	struct xfrm_policy *pol = NULL;
3321	struct xfrm_state *x, *xc;
3322	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3323	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3324	struct xfrm_migrate *mp;
3325
3326	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
 
 
3327		goto out;
3328
 
 
 
 
 
 
3329	/* Stage 1 - find policy */
3330	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
 
 
3331		err = -ENOENT;
3332		goto out;
3333	}
3334
3335	/* Stage 2 - find and update state(s) */
3336	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3337		if ((x = xfrm_migrate_state_find(mp, net))) {
3338			x_cur[nx_cur] = x;
3339			nx_cur++;
3340			if ((xc = xfrm_state_migrate(x, mp))) {
 
3341				x_new[nx_new] = xc;
3342				nx_new++;
3343			} else {
3344				err = -ENODATA;
3345				goto restore_state;
3346			}
3347		}
3348	}
3349
3350	/* Stage 3 - update policy */
3351	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
 
3352		goto restore_state;
3353
3354	/* Stage 4 - delete old state(s) */
3355	if (nx_cur) {
3356		xfrm_states_put(x_cur, nx_cur);
3357		xfrm_states_delete(x_cur, nx_cur);
3358	}
3359
3360	/* Stage 5 - announce */
3361	km_migrate(sel, dir, type, m, num_migrate, k);
3362
3363	xfrm_pol_put(pol);
3364
3365	return 0;
3366out:
3367	return err;
3368
3369restore_state:
3370	if (pol)
3371		xfrm_pol_put(pol);
3372	if (nx_cur)
3373		xfrm_states_put(x_cur, nx_cur);
3374	if (nx_new)
3375		xfrm_states_delete(x_new, nx_new);
3376
3377	return err;
3378}
3379EXPORT_SYMBOL(xfrm_migrate);
3380#endif