Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * xfrm_policy.c
   4 *
   5 * Changes:
   6 *	Mitsuru KANDA @USAGI
   7 * 	Kazunori MIYAZAWA @USAGI
   8 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
   9 * 		IPv6 support
  10 * 	Kazunori MIYAZAWA @USAGI
  11 * 	YOSHIFUJI Hideaki
  12 * 		Split up af-specific portion
  13 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
  14 *
  15 */
  16
  17#include <linux/err.h>
  18#include <linux/slab.h>
  19#include <linux/kmod.h>
  20#include <linux/list.h>
  21#include <linux/spinlock.h>
  22#include <linux/workqueue.h>
  23#include <linux/notifier.h>
  24#include <linux/netdevice.h>
  25#include <linux/netfilter.h>
  26#include <linux/module.h>
  27#include <linux/cache.h>
  28#include <linux/cpu.h>
  29#include <linux/audit.h>
  30#include <linux/rhashtable.h>
  31#include <linux/if_tunnel.h>
  32#include <linux/icmp.h>
  33#include <net/dst.h>
  34#include <net/flow.h>
  35#include <net/inet_ecn.h>
  36#include <net/xfrm.h>
  37#include <net/ip.h>
  38#include <net/gre.h>
  39#if IS_ENABLED(CONFIG_IPV6_MIP6)
  40#include <net/mip6.h>
  41#endif
  42#ifdef CONFIG_XFRM_STATISTICS
  43#include <net/snmp.h>
  44#endif
  45#ifdef CONFIG_XFRM_ESPINTCP
  46#include <net/espintcp.h>
  47#endif
  48#include <net/inet_dscp.h>
  49
  50#include "xfrm_hash.h"
  51
  52#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  53#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  54#define XFRM_MAX_QUEUE_LEN	100
  55
  56struct xfrm_flo {
  57	struct dst_entry *dst_orig;
  58	u8 flags;
  59};
  60
  61/* prefixes smaller than this are stored in lists, not trees. */
  62#define INEXACT_PREFIXLEN_IPV4	16
  63#define INEXACT_PREFIXLEN_IPV6	48
  64
  65struct xfrm_pol_inexact_node {
  66	struct rb_node node;
  67	union {
  68		xfrm_address_t addr;
  69		struct rcu_head rcu;
  70	};
  71	u8 prefixlen;
  72
  73	struct rb_root root;
  74
  75	/* the policies matching this node, can be empty list */
  76	struct hlist_head hhead;
  77};
  78
  79/* xfrm inexact policy search tree:
  80 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
  81 *  |
  82 * +---- root_d: sorted by daddr:prefix
  83 * |                 |
  84 * |        xfrm_pol_inexact_node
  85 * |                 |
  86 * |                 +- root: sorted by saddr/prefix
  87 * |                 |              |
  88 * |                 |         xfrm_pol_inexact_node
  89 * |                 |              |
  90 * |                 |              + root: unused
  91 * |                 |              |
  92 * |                 |              + hhead: saddr:daddr policies
  93 * |                 |
  94 * |                 +- coarse policies and all any:daddr policies
  95 * |
  96 * +---- root_s: sorted by saddr:prefix
  97 * |                 |
  98 * |        xfrm_pol_inexact_node
  99 * |                 |
 100 * |                 + root: unused
 101 * |                 |
 102 * |                 + hhead: saddr:any policies
 103 * |
 104 * +---- coarse policies and all any:any policies
 105 *
 106 * Lookups return four candidate lists:
 107 * 1. any:any list from top-level xfrm_pol_inexact_bin
 108 * 2. any:daddr list from daddr tree
 109 * 3. saddr:daddr list from 2nd level daddr tree
 110 * 4. saddr:any list from saddr tree
 111 *
 112 * This result set then needs to be searched for the policy with
 113 * the lowest priority.  If two candidates have the same priority, the
 114 * struct xfrm_policy pos member with the lower number is used.
 115 *
 116 * This replicates previous single-list-search algorithm which would
 117 * return first matching policy in the (ordered-by-priority) list.
 118 */
 119
 120struct xfrm_pol_inexact_key {
 121	possible_net_t net;
 122	u32 if_id;
 123	u16 family;
 124	u8 dir, type;
 125};
 126
 127struct xfrm_pol_inexact_bin {
 128	struct xfrm_pol_inexact_key k;
 129	struct rhash_head head;
 130	/* list containing '*:*' policies */
 131	struct hlist_head hhead;
 132
 133	seqcount_spinlock_t count;
 134	/* tree sorted by daddr/prefix */
 135	struct rb_root root_d;
 136
 137	/* tree sorted by saddr/prefix */
 138	struct rb_root root_s;
 139
 140	/* slow path below */
 141	struct list_head inexact_bins;
 142	struct rcu_head rcu;
 143};
 144
 145enum xfrm_pol_inexact_candidate_type {
 146	XFRM_POL_CAND_BOTH,
 147	XFRM_POL_CAND_SADDR,
 148	XFRM_POL_CAND_DADDR,
 149	XFRM_POL_CAND_ANY,
 150
 151	XFRM_POL_CAND_MAX,
 152};
 153
 154struct xfrm_pol_inexact_candidates {
 155	struct hlist_head *res[XFRM_POL_CAND_MAX];
 156};
 157
 158struct xfrm_flow_keys {
 159	struct flow_dissector_key_basic basic;
 160	struct flow_dissector_key_control control;
 161	union {
 162		struct flow_dissector_key_ipv4_addrs ipv4;
 163		struct flow_dissector_key_ipv6_addrs ipv6;
 164	} addrs;
 165	struct flow_dissector_key_ip ip;
 166	struct flow_dissector_key_icmp icmp;
 167	struct flow_dissector_key_ports ports;
 168	struct flow_dissector_key_keyid gre;
 169};
 170
 171static struct flow_dissector xfrm_session_dissector __ro_after_init;
 172
 173static DEFINE_SPINLOCK(xfrm_if_cb_lock);
 174static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
 175
 176static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 177static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
 178						__read_mostly;
 179
 180static struct kmem_cache *xfrm_dst_cache __ro_after_init;
 181
 182static struct rhashtable xfrm_policy_inexact_table;
 183static const struct rhashtable_params xfrm_pol_inexact_params;
 184
 185static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
 186static int stale_bundle(struct dst_entry *dst);
 187static int xfrm_bundle_ok(struct xfrm_dst *xdst);
 188static void xfrm_policy_queue_process(struct timer_list *t);
 189
 190static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
 191static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
 192						int dir);
 193
 194static struct xfrm_pol_inexact_bin *
 195xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
 196			   u32 if_id);
 197
 198static struct xfrm_pol_inexact_bin *
 199xfrm_policy_inexact_lookup_rcu(struct net *net,
 200			       u8 type, u16 family, u8 dir, u32 if_id);
 201static struct xfrm_policy *
 202xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
 203			bool excl);
 204
 205static bool
 206xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
 207				    struct xfrm_pol_inexact_bin *b,
 208				    const xfrm_address_t *saddr,
 209				    const xfrm_address_t *daddr);
 210
 211static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
 212{
 213	return refcount_inc_not_zero(&policy->refcnt);
 214}
 215
 216static inline bool
 217__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 218{
 219	const struct flowi4 *fl4 = &fl->u.ip4;
 220
 221	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
 222		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
 223		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
 224		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
 225		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
 226		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
 227}
 228
 229static inline bool
 230__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 231{
 232	const struct flowi6 *fl6 = &fl->u.ip6;
 233
 234	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
 235		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
 236		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
 237		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
 238		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
 239		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
 240}
 241
 242bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
 243			 unsigned short family)
 244{
 245	switch (family) {
 246	case AF_INET:
 247		return __xfrm4_selector_match(sel, fl);
 248	case AF_INET6:
 249		return __xfrm6_selector_match(sel, fl);
 250	}
 251	return false;
 252}
 253
 254static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
 255{
 256	const struct xfrm_policy_afinfo *afinfo;
 257
 258	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
 259		return NULL;
 260	rcu_read_lock();
 261	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
 262	if (unlikely(!afinfo))
 263		rcu_read_unlock();
 264	return afinfo;
 265}
 266
 267/* Called with rcu_read_lock(). */
 268static const struct xfrm_if_cb *xfrm_if_get_cb(void)
 269{
 270	return rcu_dereference(xfrm_if_cb);
 271}
 272
 273struct dst_entry *__xfrm_dst_lookup(int family,
 274				    const struct xfrm_dst_lookup_params *params)
 275{
 276	const struct xfrm_policy_afinfo *afinfo;
 277	struct dst_entry *dst;
 278
 279	afinfo = xfrm_policy_get_afinfo(family);
 280	if (unlikely(afinfo == NULL))
 281		return ERR_PTR(-EAFNOSUPPORT);
 282
 283	dst = afinfo->dst_lookup(params);
 284
 285	rcu_read_unlock();
 286
 287	return dst;
 288}
 289EXPORT_SYMBOL(__xfrm_dst_lookup);
 290
 291static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
 292						dscp_t dscp, int oif,
 293						xfrm_address_t *prev_saddr,
 294						xfrm_address_t *prev_daddr,
 295						int family, u32 mark)
 296{
 297	struct xfrm_dst_lookup_params params;
 298	struct net *net = xs_net(x);
 299	xfrm_address_t *saddr = &x->props.saddr;
 300	xfrm_address_t *daddr = &x->id.daddr;
 301	struct dst_entry *dst;
 302
 303	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
 304		saddr = x->coaddr;
 305		daddr = prev_daddr;
 306	}
 307	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
 308		saddr = prev_saddr;
 309		daddr = x->coaddr;
 310	}
 311
 312	params.net = net;
 313	params.saddr = saddr;
 314	params.daddr = daddr;
 315	params.dscp = dscp;
 316	params.oif = oif;
 317	params.mark = mark;
 318	params.ipproto = x->id.proto;
 319	if (x->encap) {
 320		switch (x->encap->encap_type) {
 321		case UDP_ENCAP_ESPINUDP:
 322			params.ipproto = IPPROTO_UDP;
 323			params.uli.ports.sport = x->encap->encap_sport;
 324			params.uli.ports.dport = x->encap->encap_dport;
 325			break;
 326		case TCP_ENCAP_ESPINTCP:
 327			params.ipproto = IPPROTO_TCP;
 328			params.uli.ports.sport = x->encap->encap_sport;
 329			params.uli.ports.dport = x->encap->encap_dport;
 330			break;
 331		}
 332	}
 333
 334	dst = __xfrm_dst_lookup(family, &params);
 335
 336	if (!IS_ERR(dst)) {
 337		if (prev_saddr != saddr)
 338			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
 339		if (prev_daddr != daddr)
 340			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
 341	}
 342
 343	return dst;
 344}
 345
 346static inline unsigned long make_jiffies(long secs)
 347{
 348	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
 349		return MAX_SCHEDULE_TIMEOUT-1;
 350	else
 351		return secs*HZ;
 352}
 353
 354static void xfrm_policy_timer(struct timer_list *t)
 355{
 356	struct xfrm_policy *xp = from_timer(xp, t, timer);
 357	time64_t now = ktime_get_real_seconds();
 358	time64_t next = TIME64_MAX;
 359	int warn = 0;
 360	int dir;
 361
 362	read_lock(&xp->lock);
 363
 364	if (unlikely(xp->walk.dead))
 365		goto out;
 366
 367	dir = xfrm_policy_id2dir(xp->index);
 368
 369	if (xp->lft.hard_add_expires_seconds) {
 370		time64_t tmo = xp->lft.hard_add_expires_seconds +
 371			xp->curlft.add_time - now;
 372		if (tmo <= 0)
 373			goto expired;
 374		if (tmo < next)
 375			next = tmo;
 376	}
 377	if (xp->lft.hard_use_expires_seconds) {
 378		time64_t tmo = xp->lft.hard_use_expires_seconds +
 379			(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
 380		if (tmo <= 0)
 381			goto expired;
 382		if (tmo < next)
 383			next = tmo;
 384	}
 385	if (xp->lft.soft_add_expires_seconds) {
 386		time64_t tmo = xp->lft.soft_add_expires_seconds +
 387			xp->curlft.add_time - now;
 388		if (tmo <= 0) {
 389			warn = 1;
 390			tmo = XFRM_KM_TIMEOUT;
 391		}
 392		if (tmo < next)
 393			next = tmo;
 394	}
 395	if (xp->lft.soft_use_expires_seconds) {
 396		time64_t tmo = xp->lft.soft_use_expires_seconds +
 397			(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
 398		if (tmo <= 0) {
 399			warn = 1;
 400			tmo = XFRM_KM_TIMEOUT;
 401		}
 402		if (tmo < next)
 403			next = tmo;
 404	}
 405
 406	if (warn)
 407		km_policy_expired(xp, dir, 0, 0);
 408	if (next != TIME64_MAX &&
 409	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
 410		xfrm_pol_hold(xp);
 411
 412out:
 413	read_unlock(&xp->lock);
 414	xfrm_pol_put(xp);
 415	return;
 416
 417expired:
 418	read_unlock(&xp->lock);
 419	if (!xfrm_policy_delete(xp, dir))
 420		km_policy_expired(xp, dir, 1, 0);
 421	xfrm_pol_put(xp);
 422}
 423
 424/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 425 * SPD calls.
 426 */
 427
 428struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
 429{
 430	struct xfrm_policy *policy;
 431
 432	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
 433
 434	if (policy) {
 435		write_pnet(&policy->xp_net, net);
 436		INIT_LIST_HEAD(&policy->walk.all);
 437		INIT_HLIST_HEAD(&policy->state_cache_list);
 438		INIT_HLIST_NODE(&policy->bydst);
 439		INIT_HLIST_NODE(&policy->byidx);
 440		rwlock_init(&policy->lock);
 441		refcount_set(&policy->refcnt, 1);
 442		skb_queue_head_init(&policy->polq.hold_queue);
 443		timer_setup(&policy->timer, xfrm_policy_timer, 0);
 444		timer_setup(&policy->polq.hold_timer,
 445			    xfrm_policy_queue_process, 0);
 446	}
 447	return policy;
 448}
 449EXPORT_SYMBOL(xfrm_policy_alloc);
 450
 451static void xfrm_policy_destroy_rcu(struct rcu_head *head)
 452{
 453	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
 454
 455	security_xfrm_policy_free(policy->security);
 456	kfree(policy);
 457}
 458
 459/* Destroy xfrm_policy: descendant resources must be released to this moment. */
 460
 461void xfrm_policy_destroy(struct xfrm_policy *policy)
 462{
 463	BUG_ON(!policy->walk.dead);
 464
 465	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
 466		BUG();
 467
 468	xfrm_dev_policy_free(policy);
 469	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
 470}
 471EXPORT_SYMBOL(xfrm_policy_destroy);
 472
 473/* Rule must be locked. Release descendant resources, announce
 474 * entry dead. The rule must be unlinked from lists to the moment.
 475 */
 476
 477static void xfrm_policy_kill(struct xfrm_policy *policy)
 478{
 479	struct net *net = xp_net(policy);
 480	struct xfrm_state *x;
 481
 482	xfrm_dev_policy_delete(policy);
 483
 484	write_lock_bh(&policy->lock);
 485	policy->walk.dead = 1;
 486	write_unlock_bh(&policy->lock);
 487
 488	atomic_inc(&policy->genid);
 489
 490	if (del_timer(&policy->polq.hold_timer))
 491		xfrm_pol_put(policy);
 492	skb_queue_purge(&policy->polq.hold_queue);
 493
 494	if (del_timer(&policy->timer))
 495		xfrm_pol_put(policy);
 496
 497	/* XXX: Flush state cache */
 498	spin_lock_bh(&net->xfrm.xfrm_state_lock);
 499	hlist_for_each_entry_rcu(x, &policy->state_cache_list, state_cache) {
 500		hlist_del_init_rcu(&x->state_cache);
 501	}
 502	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 503
 504	xfrm_pol_put(policy);
 505}
 506
 507static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
 508
 509static inline unsigned int idx_hash(struct net *net, u32 index)
 510{
 511	return __idx_hash(index, net->xfrm.policy_idx_hmask);
 512}
 513
 514/* calculate policy hash thresholds */
 515static void __get_hash_thresh(struct net *net,
 516			      unsigned short family, int dir,
 517			      u8 *dbits, u8 *sbits)
 518{
 519	switch (family) {
 520	case AF_INET:
 521		*dbits = net->xfrm.policy_bydst[dir].dbits4;
 522		*sbits = net->xfrm.policy_bydst[dir].sbits4;
 523		break;
 524
 525	case AF_INET6:
 526		*dbits = net->xfrm.policy_bydst[dir].dbits6;
 527		*sbits = net->xfrm.policy_bydst[dir].sbits6;
 528		break;
 529
 530	default:
 531		*dbits = 0;
 532		*sbits = 0;
 533	}
 534}
 535
 536static struct hlist_head *policy_hash_bysel(struct net *net,
 537					    const struct xfrm_selector *sel,
 538					    unsigned short family, int dir)
 539{
 540	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 541	unsigned int hash;
 542	u8 dbits;
 543	u8 sbits;
 544
 545	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 546	hash = __sel_hash(sel, family, hmask, dbits, sbits);
 547
 548	if (hash == hmask + 1)
 549		return NULL;
 550
 551	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 552		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 553}
 554
 555static struct hlist_head *policy_hash_direct(struct net *net,
 556					     const xfrm_address_t *daddr,
 557					     const xfrm_address_t *saddr,
 558					     unsigned short family, int dir)
 559{
 560	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 561	unsigned int hash;
 562	u8 dbits;
 563	u8 sbits;
 564
 565	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 566	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
 567
 568	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 569		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 570}
 571
 572static void xfrm_dst_hash_transfer(struct net *net,
 573				   struct hlist_head *list,
 574				   struct hlist_head *ndsttable,
 575				   unsigned int nhashmask,
 576				   int dir)
 577{
 578	struct hlist_node *tmp, *entry0 = NULL;
 579	struct xfrm_policy *pol;
 580	unsigned int h0 = 0;
 581	u8 dbits;
 582	u8 sbits;
 583
 584redo:
 585	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
 586		unsigned int h;
 587
 588		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
 589		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
 590				pol->family, nhashmask, dbits, sbits);
 591		if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
 592			hlist_del_rcu(&pol->bydst);
 593			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
 594			h0 = h;
 595		} else {
 596			if (h != h0)
 597				continue;
 598			hlist_del_rcu(&pol->bydst);
 599			hlist_add_behind_rcu(&pol->bydst, entry0);
 600		}
 601		entry0 = &pol->bydst;
 602	}
 603	if (!hlist_empty(list)) {
 604		entry0 = NULL;
 605		goto redo;
 606	}
 607}
 608
 609static void xfrm_idx_hash_transfer(struct hlist_head *list,
 610				   struct hlist_head *nidxtable,
 611				   unsigned int nhashmask)
 612{
 613	struct hlist_node *tmp;
 614	struct xfrm_policy *pol;
 615
 616	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
 617		unsigned int h;
 618
 619		h = __idx_hash(pol->index, nhashmask);
 620		hlist_add_head(&pol->byidx, nidxtable+h);
 621	}
 622}
 623
 624static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
 625{
 626	return ((old_hmask + 1) << 1) - 1;
 627}
 628
 629static void xfrm_bydst_resize(struct net *net, int dir)
 630{
 631	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 632	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 633	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 634	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
 635	struct hlist_head *odst;
 636	int i;
 637
 638	if (!ndst)
 639		return;
 640
 641	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 642	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 
 
 
 643
 644	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
 645				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
 646
 647	for (i = hmask; i >= 0; i--)
 648		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
 649
 650	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
 651	net->xfrm.policy_bydst[dir].hmask = nhashmask;
 652
 653	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
 654	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 655
 656	synchronize_rcu();
 657
 658	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
 659}
 660
 661static void xfrm_byidx_resize(struct net *net)
 662{
 663	unsigned int hmask = net->xfrm.policy_idx_hmask;
 664	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 665	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 666	struct hlist_head *oidx = net->xfrm.policy_byidx;
 667	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
 668	int i;
 669
 670	if (!nidx)
 671		return;
 672
 673	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 674
 675	for (i = hmask; i >= 0; i--)
 676		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
 677
 678	net->xfrm.policy_byidx = nidx;
 679	net->xfrm.policy_idx_hmask = nhashmask;
 680
 681	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 682
 683	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
 684}
 685
 686static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
 687{
 688	unsigned int cnt = net->xfrm.policy_count[dir];
 689	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 690
 691	if (total)
 692		*total += cnt;
 693
 694	if ((hmask + 1) < xfrm_policy_hashmax &&
 695	    cnt > hmask)
 696		return 1;
 697
 698	return 0;
 699}
 700
 701static inline int xfrm_byidx_should_resize(struct net *net, int total)
 702{
 703	unsigned int hmask = net->xfrm.policy_idx_hmask;
 704
 705	if ((hmask + 1) < xfrm_policy_hashmax &&
 706	    total > hmask)
 707		return 1;
 708
 709	return 0;
 710}
 711
 712void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
 713{
 714	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
 715	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
 716	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
 717	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
 718	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
 719	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
 720	si->spdhcnt = net->xfrm.policy_idx_hmask;
 721	si->spdhmcnt = xfrm_policy_hashmax;
 722}
 723EXPORT_SYMBOL(xfrm_spd_getinfo);
 724
 725static DEFINE_MUTEX(hash_resize_mutex);
 726static void xfrm_hash_resize(struct work_struct *work)
 727{
 728	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
 729	int dir, total;
 730
 731	mutex_lock(&hash_resize_mutex);
 732
 733	total = 0;
 734	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 735		if (xfrm_bydst_should_resize(net, dir, &total))
 736			xfrm_bydst_resize(net, dir);
 737	}
 738	if (xfrm_byidx_should_resize(net, total))
 739		xfrm_byidx_resize(net);
 740
 741	mutex_unlock(&hash_resize_mutex);
 742}
 743
 744/* Make sure *pol can be inserted into fastbin.
 745 * Useful to check that later insert requests will be successful
 746 * (provided xfrm_policy_lock is held throughout).
 747 */
 748static struct xfrm_pol_inexact_bin *
 749xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
 750{
 751	struct xfrm_pol_inexact_bin *bin, *prev;
 752	struct xfrm_pol_inexact_key k = {
 753		.family = pol->family,
 754		.type = pol->type,
 755		.dir = dir,
 756		.if_id = pol->if_id,
 757	};
 758	struct net *net = xp_net(pol);
 759
 760	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
 761
 762	write_pnet(&k.net, net);
 763	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
 764				     xfrm_pol_inexact_params);
 765	if (bin)
 766		return bin;
 767
 768	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
 769	if (!bin)
 770		return NULL;
 771
 772	bin->k = k;
 773	INIT_HLIST_HEAD(&bin->hhead);
 774	bin->root_d = RB_ROOT;
 775	bin->root_s = RB_ROOT;
 776	seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
 777
 778	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
 779						&bin->k, &bin->head,
 780						xfrm_pol_inexact_params);
 781	if (!prev) {
 782		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
 783		return bin;
 784	}
 785
 786	kfree(bin);
 787
 788	return IS_ERR(prev) ? NULL : prev;
 789}
 790
 791static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
 792					       int family, u8 prefixlen)
 793{
 794	if (xfrm_addr_any(addr, family))
 795		return true;
 796
 797	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
 798		return true;
 799
 800	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
 801		return true;
 802
 803	return false;
 804}
 805
 806static bool
 807xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
 808{
 809	const xfrm_address_t *addr;
 810	bool saddr_any, daddr_any;
 811	u8 prefixlen;
 812
 813	addr = &policy->selector.saddr;
 814	prefixlen = policy->selector.prefixlen_s;
 815
 816	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
 817						       policy->family,
 818						       prefixlen);
 819	addr = &policy->selector.daddr;
 820	prefixlen = policy->selector.prefixlen_d;
 821	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
 822						       policy->family,
 823						       prefixlen);
 824	return saddr_any && daddr_any;
 825}
 826
 827static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
 828				       const xfrm_address_t *addr, u8 prefixlen)
 829{
 830	node->addr = *addr;
 831	node->prefixlen = prefixlen;
 832}
 833
 834static struct xfrm_pol_inexact_node *
 835xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
 836{
 837	struct xfrm_pol_inexact_node *node;
 838
 839	node = kzalloc(sizeof(*node), GFP_ATOMIC);
 840	if (node)
 841		xfrm_pol_inexact_node_init(node, addr, prefixlen);
 842
 843	return node;
 844}
 845
 846static int xfrm_policy_addr_delta(const xfrm_address_t *a,
 847				  const xfrm_address_t *b,
 848				  u8 prefixlen, u16 family)
 849{
 850	u32 ma, mb, mask;
 851	unsigned int pdw, pbi;
 852	int delta = 0;
 853
 854	switch (family) {
 855	case AF_INET:
 856		if (prefixlen == 0)
 857			return 0;
 858		mask = ~0U << (32 - prefixlen);
 859		ma = ntohl(a->a4) & mask;
 860		mb = ntohl(b->a4) & mask;
 861		if (ma < mb)
 862			delta = -1;
 863		else if (ma > mb)
 864			delta = 1;
 865		break;
 866	case AF_INET6:
 867		pdw = prefixlen >> 5;
 868		pbi = prefixlen & 0x1f;
 869
 870		if (pdw) {
 871			delta = memcmp(a->a6, b->a6, pdw << 2);
 872			if (delta)
 873				return delta;
 874		}
 875		if (pbi) {
 876			mask = ~0U << (32 - pbi);
 877			ma = ntohl(a->a6[pdw]) & mask;
 878			mb = ntohl(b->a6[pdw]) & mask;
 879			if (ma < mb)
 880				delta = -1;
 881			else if (ma > mb)
 882				delta = 1;
 883		}
 884		break;
 885	default:
 886		break;
 887	}
 888
 889	return delta;
 890}
 891
 892static void xfrm_policy_inexact_list_reinsert(struct net *net,
 893					      struct xfrm_pol_inexact_node *n,
 894					      u16 family)
 895{
 896	unsigned int matched_s, matched_d;
 897	struct xfrm_policy *policy, *p;
 898
 899	matched_s = 0;
 900	matched_d = 0;
 901
 902	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
 903		struct hlist_node *newpos = NULL;
 904		bool matches_s, matches_d;
 905
 906		if (policy->walk.dead || !policy->bydst_reinsert)
 907			continue;
 908
 909		WARN_ON_ONCE(policy->family != family);
 910
 911		policy->bydst_reinsert = false;
 912		hlist_for_each_entry(p, &n->hhead, bydst) {
 913			if (policy->priority > p->priority)
 914				newpos = &p->bydst;
 915			else if (policy->priority == p->priority &&
 916				 policy->pos > p->pos)
 917				newpos = &p->bydst;
 918			else
 919				break;
 920		}
 921
 922		if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
 923			hlist_add_behind_rcu(&policy->bydst, newpos);
 924		else
 925			hlist_add_head_rcu(&policy->bydst, &n->hhead);
 926
 927		/* paranoia checks follow.
 928		 * Check that the reinserted policy matches at least
 929		 * saddr or daddr for current node prefix.
 930		 *
 931		 * Matching both is fine, matching saddr in one policy
 932		 * (but not daddr) and then matching only daddr in another
 933		 * is a bug.
 934		 */
 935		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
 936						   &n->addr,
 937						   n->prefixlen,
 938						   family) == 0;
 939		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
 940						   &n->addr,
 941						   n->prefixlen,
 942						   family) == 0;
 943		if (matches_s && matches_d)
 944			continue;
 945
 946		WARN_ON_ONCE(!matches_s && !matches_d);
 947		if (matches_s)
 948			matched_s++;
 949		if (matches_d)
 950			matched_d++;
 951		WARN_ON_ONCE(matched_s && matched_d);
 952	}
 953}
 954
 955static void xfrm_policy_inexact_node_reinsert(struct net *net,
 956					      struct xfrm_pol_inexact_node *n,
 957					      struct rb_root *new,
 958					      u16 family)
 959{
 960	struct xfrm_pol_inexact_node *node;
 961	struct rb_node **p, *parent;
 962
 963	/* we should not have another subtree here */
 964	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
 965restart:
 966	parent = NULL;
 967	p = &new->rb_node;
 968	while (*p) {
 969		u8 prefixlen;
 970		int delta;
 971
 972		parent = *p;
 973		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
 974
 975		prefixlen = min(node->prefixlen, n->prefixlen);
 976
 977		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
 978					       prefixlen, family);
 979		if (delta < 0) {
 980			p = &parent->rb_left;
 981		} else if (delta > 0) {
 982			p = &parent->rb_right;
 983		} else {
 984			bool same_prefixlen = node->prefixlen == n->prefixlen;
 985			struct xfrm_policy *tmp;
 986
 987			hlist_for_each_entry(tmp, &n->hhead, bydst) {
 988				tmp->bydst_reinsert = true;
 989				hlist_del_rcu(&tmp->bydst);
 990			}
 991
 992			node->prefixlen = prefixlen;
 993
 994			xfrm_policy_inexact_list_reinsert(net, node, family);
 995
 996			if (same_prefixlen) {
 997				kfree_rcu(n, rcu);
 998				return;
 999			}
1000
1001			rb_erase(*p, new);
1002			kfree_rcu(n, rcu);
1003			n = node;
1004			goto restart;
1005		}
1006	}
1007
1008	rb_link_node_rcu(&n->node, parent, p);
1009	rb_insert_color(&n->node, new);
1010}
1011
1012/* merge nodes v and n */
1013static void xfrm_policy_inexact_node_merge(struct net *net,
1014					   struct xfrm_pol_inexact_node *v,
1015					   struct xfrm_pol_inexact_node *n,
1016					   u16 family)
1017{
1018	struct xfrm_pol_inexact_node *node;
1019	struct xfrm_policy *tmp;
1020	struct rb_node *rnode;
1021
1022	/* To-be-merged node v has a subtree.
1023	 *
1024	 * Dismantle it and insert its nodes to n->root.
1025	 */
1026	while ((rnode = rb_first(&v->root)) != NULL) {
1027		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
1028		rb_erase(&node->node, &v->root);
1029		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
1030						  family);
1031	}
1032
1033	hlist_for_each_entry(tmp, &v->hhead, bydst) {
1034		tmp->bydst_reinsert = true;
1035		hlist_del_rcu(&tmp->bydst);
1036	}
1037
1038	xfrm_policy_inexact_list_reinsert(net, n, family);
1039}
1040
1041static struct xfrm_pol_inexact_node *
1042xfrm_policy_inexact_insert_node(struct net *net,
1043				struct rb_root *root,
1044				xfrm_address_t *addr,
1045				u16 family, u8 prefixlen, u8 dir)
1046{
1047	struct xfrm_pol_inexact_node *cached = NULL;
1048	struct rb_node **p, *parent = NULL;
1049	struct xfrm_pol_inexact_node *node;
1050
1051	p = &root->rb_node;
1052	while (*p) {
1053		int delta;
1054
1055		parent = *p;
1056		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1057
1058		delta = xfrm_policy_addr_delta(addr, &node->addr,
1059					       node->prefixlen,
1060					       family);
1061		if (delta == 0 && prefixlen >= node->prefixlen) {
1062			WARN_ON_ONCE(cached); /* ipsec policies got lost */
1063			return node;
1064		}
1065
1066		if (delta < 0)
1067			p = &parent->rb_left;
1068		else
1069			p = &parent->rb_right;
1070
1071		if (prefixlen < node->prefixlen) {
1072			delta = xfrm_policy_addr_delta(addr, &node->addr,
1073						       prefixlen,
1074						       family);
1075			if (delta)
1076				continue;
1077
1078			/* This node is a subnet of the new prefix. It needs
1079			 * to be removed and re-inserted with the smaller
1080			 * prefix and all nodes that are now also covered
1081			 * by the reduced prefixlen.
1082			 */
1083			rb_erase(&node->node, root);
1084
1085			if (!cached) {
1086				xfrm_pol_inexact_node_init(node, addr,
1087							   prefixlen);
1088				cached = node;
1089			} else {
1090				/* This node also falls within the new
1091				 * prefixlen. Merge the to-be-reinserted
1092				 * node and this one.
1093				 */
1094				xfrm_policy_inexact_node_merge(net, node,
1095							       cached, family);
1096				kfree_rcu(node, rcu);
1097			}
1098
1099			/* restart */
1100			p = &root->rb_node;
1101			parent = NULL;
1102		}
1103	}
1104
1105	node = cached;
1106	if (!node) {
1107		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1108		if (!node)
1109			return NULL;
1110	}
1111
1112	rb_link_node_rcu(&node->node, parent, p);
1113	rb_insert_color(&node->node, root);
1114
1115	return node;
1116}
1117
1118static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1119{
1120	struct xfrm_pol_inexact_node *node;
1121	struct rb_node *rn = rb_first(r);
1122
1123	while (rn) {
1124		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1125
1126		xfrm_policy_inexact_gc_tree(&node->root, rm);
1127		rn = rb_next(rn);
1128
1129		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1130			WARN_ON_ONCE(rm);
1131			continue;
1132		}
1133
1134		rb_erase(&node->node, r);
1135		kfree_rcu(node, rcu);
1136	}
1137}
1138
1139static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1140{
1141	write_seqcount_begin(&b->count);
1142	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1143	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1144	write_seqcount_end(&b->count);
1145
1146	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1147	    !hlist_empty(&b->hhead)) {
1148		WARN_ON_ONCE(net_exit);
1149		return;
1150	}
1151
1152	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1153				   xfrm_pol_inexact_params) == 0) {
1154		list_del(&b->inexact_bins);
1155		kfree_rcu(b, rcu);
1156	}
1157}
1158
1159static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1160{
1161	struct net *net = read_pnet(&b->k.net);
1162
1163	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1164	__xfrm_policy_inexact_prune_bin(b, false);
1165	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1166}
1167
1168static void __xfrm_policy_inexact_flush(struct net *net)
1169{
1170	struct xfrm_pol_inexact_bin *bin, *t;
1171
1172	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1173
1174	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1175		__xfrm_policy_inexact_prune_bin(bin, false);
1176}
1177
1178static struct hlist_head *
1179xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1180				struct xfrm_policy *policy, u8 dir)
1181{
1182	struct xfrm_pol_inexact_node *n;
1183	struct net *net;
1184
1185	net = xp_net(policy);
1186	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1187
1188	if (xfrm_policy_inexact_insert_use_any_list(policy))
1189		return &bin->hhead;
1190
1191	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1192					       policy->family,
1193					       policy->selector.prefixlen_d)) {
1194		write_seqcount_begin(&bin->count);
1195		n = xfrm_policy_inexact_insert_node(net,
1196						    &bin->root_s,
1197						    &policy->selector.saddr,
1198						    policy->family,
1199						    policy->selector.prefixlen_s,
1200						    dir);
1201		write_seqcount_end(&bin->count);
1202		if (!n)
1203			return NULL;
1204
1205		return &n->hhead;
1206	}
1207
1208	/* daddr is fixed */
1209	write_seqcount_begin(&bin->count);
1210	n = xfrm_policy_inexact_insert_node(net,
1211					    &bin->root_d,
1212					    &policy->selector.daddr,
1213					    policy->family,
1214					    policy->selector.prefixlen_d, dir);
1215	write_seqcount_end(&bin->count);
1216	if (!n)
1217		return NULL;
1218
1219	/* saddr is wildcard */
1220	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1221					       policy->family,
1222					       policy->selector.prefixlen_s))
1223		return &n->hhead;
1224
1225	write_seqcount_begin(&bin->count);
1226	n = xfrm_policy_inexact_insert_node(net,
1227					    &n->root,
1228					    &policy->selector.saddr,
1229					    policy->family,
1230					    policy->selector.prefixlen_s, dir);
1231	write_seqcount_end(&bin->count);
1232	if (!n)
1233		return NULL;
1234
1235	return &n->hhead;
1236}
1237
1238static struct xfrm_policy *
1239xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1240{
1241	struct xfrm_pol_inexact_bin *bin;
1242	struct xfrm_policy *delpol;
1243	struct hlist_head *chain;
1244	struct net *net;
1245
1246	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1247	if (!bin)
1248		return ERR_PTR(-ENOMEM);
1249
1250	net = xp_net(policy);
1251	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1252
1253	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1254	if (!chain) {
1255		__xfrm_policy_inexact_prune_bin(bin, false);
1256		return ERR_PTR(-ENOMEM);
1257	}
1258
1259	delpol = xfrm_policy_insert_list(chain, policy, excl);
1260	if (delpol && excl) {
1261		__xfrm_policy_inexact_prune_bin(bin, false);
1262		return ERR_PTR(-EEXIST);
1263	}
1264
1265	if (delpol)
1266		__xfrm_policy_inexact_prune_bin(bin, false);
1267
1268	return delpol;
1269}
1270
1271static bool xfrm_policy_is_dead_or_sk(const struct xfrm_policy *policy)
1272{
1273	int dir;
1274
1275	if (policy->walk.dead)
1276		return true;
1277
1278	dir = xfrm_policy_id2dir(policy->index);
1279	return dir >= XFRM_POLICY_MAX;
1280}
1281
1282static void xfrm_hash_rebuild(struct work_struct *work)
1283{
1284	struct net *net = container_of(work, struct net,
1285				       xfrm.policy_hthresh.work);
 
1286	struct xfrm_policy *pol;
1287	struct xfrm_policy *policy;
1288	struct hlist_head *chain;
 
1289	struct hlist_node *newpos;
 
1290	int dir;
1291	unsigned seq;
1292	u8 lbits4, rbits4, lbits6, rbits6;
1293
1294	mutex_lock(&hash_resize_mutex);
1295
1296	/* read selector prefixlen thresholds */
1297	do {
1298		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1299
1300		lbits4 = net->xfrm.policy_hthresh.lbits4;
1301		rbits4 = net->xfrm.policy_hthresh.rbits4;
1302		lbits6 = net->xfrm.policy_hthresh.lbits6;
1303		rbits6 = net->xfrm.policy_hthresh.rbits6;
1304	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1305
1306	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1307	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1308
1309	/* make sure that we can insert the indirect policies again before
1310	 * we start with destructive action.
1311	 */
1312	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1313		struct xfrm_pol_inexact_bin *bin;
1314		u8 dbits, sbits;
1315
1316		if (xfrm_policy_is_dead_or_sk(policy))
1317			continue;
1318
1319		dir = xfrm_policy_id2dir(policy->index);
1320		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1321			if (policy->family == AF_INET) {
1322				dbits = rbits4;
1323				sbits = lbits4;
1324			} else {
1325				dbits = rbits6;
1326				sbits = lbits6;
1327			}
1328		} else {
1329			if (policy->family == AF_INET) {
1330				dbits = lbits4;
1331				sbits = rbits4;
1332			} else {
1333				dbits = lbits6;
1334				sbits = rbits6;
1335			}
1336		}
1337
1338		if (policy->selector.prefixlen_d < dbits ||
1339		    policy->selector.prefixlen_s < sbits)
1340			continue;
1341
1342		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1343		if (!bin)
1344			goto out_unlock;
1345
1346		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1347			goto out_unlock;
1348	}
1349
 
1350	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 
 
 
 
 
1351		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1352			/* dir out => dst = remote, src = local */
1353			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1354			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1355			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1356			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1357		} else {
1358			/* dir in/fwd => dst = local, src = remote */
1359			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1360			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1361			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1362			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1363		}
1364	}
1365
1366	/* re-insert all policies by order of creation */
1367	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1368		if (xfrm_policy_is_dead_or_sk(policy))
 
 
1369			continue;
1370
1371		hlist_del_rcu(&policy->bydst);
1372
1373		newpos = NULL;
1374		dir = xfrm_policy_id2dir(policy->index);
1375		chain = policy_hash_bysel(net, &policy->selector,
1376					  policy->family, dir);
1377
1378		if (!chain) {
1379			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1380
1381			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1382			continue;
1383		}
1384
1385		hlist_for_each_entry(pol, chain, bydst) {
1386			if (policy->priority >= pol->priority)
1387				newpos = &pol->bydst;
1388			else
1389				break;
1390		}
1391		if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1392			hlist_add_behind_rcu(&policy->bydst, newpos);
1393		else
1394			hlist_add_head_rcu(&policy->bydst, chain);
1395	}
1396
1397out_unlock:
1398	__xfrm_policy_inexact_flush(net);
1399	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1400	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1401
1402	mutex_unlock(&hash_resize_mutex);
1403}
1404
1405void xfrm_policy_hash_rebuild(struct net *net)
1406{
1407	schedule_work(&net->xfrm.policy_hthresh.work);
1408}
1409EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1410
1411/* Generate new index... KAME seems to generate them ordered by cost
1412 * of an absolute inpredictability of ordering of rules. This will not pass. */
1413static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1414{
 
 
1415	for (;;) {
1416		struct hlist_head *list;
1417		struct xfrm_policy *p;
1418		u32 idx;
1419		int found;
1420
1421		if (!index) {
1422			idx = (net->xfrm.idx_generator | dir);
1423			net->xfrm.idx_generator += 8;
1424		} else {
1425			idx = index;
1426			index = 0;
1427		}
1428
1429		if (idx == 0)
1430			idx = 8;
1431		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1432		found = 0;
1433		hlist_for_each_entry(p, list, byidx) {
1434			if (p->index == idx) {
1435				found = 1;
1436				break;
1437			}
1438		}
1439		if (!found)
1440			return idx;
1441	}
1442}
1443
1444static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1445{
1446	u32 *p1 = (u32 *) s1;
1447	u32 *p2 = (u32 *) s2;
1448	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1449	int i;
1450
1451	for (i = 0; i < len; i++) {
1452		if (p1[i] != p2[i])
1453			return 1;
1454	}
1455
1456	return 0;
1457}
1458
1459static void xfrm_policy_requeue(struct xfrm_policy *old,
1460				struct xfrm_policy *new)
1461{
1462	struct xfrm_policy_queue *pq = &old->polq;
1463	struct sk_buff_head list;
1464
1465	if (skb_queue_empty(&pq->hold_queue))
1466		return;
1467
1468	__skb_queue_head_init(&list);
1469
1470	spin_lock_bh(&pq->hold_queue.lock);
1471	skb_queue_splice_init(&pq->hold_queue, &list);
1472	if (del_timer(&pq->hold_timer))
1473		xfrm_pol_put(old);
1474	spin_unlock_bh(&pq->hold_queue.lock);
1475
1476	pq = &new->polq;
1477
1478	spin_lock_bh(&pq->hold_queue.lock);
1479	skb_queue_splice(&list, &pq->hold_queue);
1480	pq->timeout = XFRM_QUEUE_TMO_MIN;
1481	if (!mod_timer(&pq->hold_timer, jiffies))
1482		xfrm_pol_hold(new);
1483	spin_unlock_bh(&pq->hold_queue.lock);
1484}
1485
1486static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1487					  struct xfrm_policy *pol)
1488{
1489	return mark->v == pol->mark.v && mark->m == pol->mark.m;
1490}
1491
1492static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1493{
1494	const struct xfrm_pol_inexact_key *k = data;
1495	u32 a = k->type << 24 | k->dir << 16 | k->family;
1496
1497	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1498			    seed);
1499}
1500
1501static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1502{
1503	const struct xfrm_pol_inexact_bin *b = data;
1504
1505	return xfrm_pol_bin_key(&b->k, 0, seed);
1506}
1507
1508static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1509			    const void *ptr)
1510{
1511	const struct xfrm_pol_inexact_key *key = arg->key;
1512	const struct xfrm_pol_inexact_bin *b = ptr;
1513	int ret;
1514
1515	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1516		return -1;
1517
1518	ret = b->k.dir ^ key->dir;
1519	if (ret)
1520		return ret;
1521
1522	ret = b->k.type ^ key->type;
1523	if (ret)
1524		return ret;
1525
1526	ret = b->k.family ^ key->family;
1527	if (ret)
1528		return ret;
1529
1530	return b->k.if_id ^ key->if_id;
1531}
1532
1533static const struct rhashtable_params xfrm_pol_inexact_params = {
1534	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1535	.hashfn			= xfrm_pol_bin_key,
1536	.obj_hashfn		= xfrm_pol_bin_obj,
1537	.obj_cmpfn		= xfrm_pol_bin_cmp,
1538	.automatic_shrinking	= true,
1539};
1540
1541static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1542						   struct xfrm_policy *policy,
1543						   bool excl)
1544{
1545	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
 
 
 
 
1546
 
 
 
 
1547	hlist_for_each_entry(pol, chain, bydst) {
1548		if (pol->type == policy->type &&
1549		    pol->if_id == policy->if_id &&
1550		    !selector_cmp(&pol->selector, &policy->selector) &&
1551		    xfrm_policy_mark_match(&policy->mark, pol) &&
1552		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1553		    !WARN_ON(delpol)) {
1554			if (excl)
1555				return ERR_PTR(-EEXIST);
 
 
1556			delpol = pol;
1557			if (policy->priority > pol->priority)
1558				continue;
1559		} else if (policy->priority >= pol->priority) {
1560			newpos = pol;
1561			continue;
1562		}
1563		if (delpol)
1564			break;
1565	}
1566
1567	if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1568		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1569	else
1570		/* Packet offload policies enter to the head
1571		 * to speed-up lookups.
1572		 */
1573		hlist_add_head_rcu(&policy->bydst, chain);
1574
1575	return delpol;
1576}
1577
1578int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1579{
1580	struct net *net = xp_net(policy);
1581	struct xfrm_policy *delpol;
1582	struct hlist_head *chain;
1583
1584	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1585	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1586	if (chain)
1587		delpol = xfrm_policy_insert_list(chain, policy, excl);
1588	else
1589		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1590
1591	if (IS_ERR(delpol)) {
1592		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1593		return PTR_ERR(delpol);
1594	}
1595
1596	__xfrm_policy_link(policy, dir);
1597
1598	/* After previous checking, family can either be AF_INET or AF_INET6 */
1599	if (policy->family == AF_INET)
1600		rt_genid_bump_ipv4(net);
1601	else
1602		rt_genid_bump_ipv6(net);
1603
1604	if (delpol) {
1605		xfrm_policy_requeue(delpol, policy);
1606		__xfrm_policy_unlink(delpol, dir);
1607	}
1608	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1609	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1610	policy->curlft.add_time = ktime_get_real_seconds();
1611	policy->curlft.use_time = 0;
1612	if (!mod_timer(&policy->timer, jiffies + HZ))
1613		xfrm_pol_hold(policy);
1614	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1615
1616	if (delpol)
1617		xfrm_policy_kill(delpol);
1618	else if (xfrm_bydst_should_resize(net, dir, NULL))
1619		schedule_work(&net->xfrm.policy_hash_work);
1620
1621	return 0;
1622}
1623EXPORT_SYMBOL(xfrm_policy_insert);
1624
1625static struct xfrm_policy *
1626__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1627			u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1628			struct xfrm_sec_ctx *ctx)
1629{
1630	struct xfrm_policy *pol;
1631
1632	if (!chain)
1633		return NULL;
1634
1635	hlist_for_each_entry(pol, chain, bydst) {
1636		if (pol->type == type &&
1637		    pol->if_id == if_id &&
1638		    xfrm_policy_mark_match(mark, pol) &&
1639		    !selector_cmp(sel, &pol->selector) &&
1640		    xfrm_sec_ctx_match(ctx, pol->security))
1641			return pol;
1642	}
1643
1644	return NULL;
1645}
1646
1647struct xfrm_policy *
1648xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1649		      u8 type, int dir, struct xfrm_selector *sel,
1650		      struct xfrm_sec_ctx *ctx, int delete, int *err)
1651{
1652	struct xfrm_pol_inexact_bin *bin = NULL;
1653	struct xfrm_policy *pol, *ret = NULL;
1654	struct hlist_head *chain;
1655
1656	*err = 0;
1657	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1658	chain = policy_hash_bysel(net, sel, sel->family, dir);
1659	if (!chain) {
1660		struct xfrm_pol_inexact_candidates cand;
1661		int i;
1662
1663		bin = xfrm_policy_inexact_lookup(net, type,
1664						 sel->family, dir, if_id);
1665		if (!bin) {
1666			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1667			return NULL;
1668		}
1669
1670		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1671							 &sel->saddr,
1672							 &sel->daddr)) {
1673			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1674			return NULL;
1675		}
1676
1677		pol = NULL;
1678		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1679			struct xfrm_policy *tmp;
1680
1681			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1682						      if_id, type, dir,
1683						      sel, ctx);
1684			if (!tmp)
1685				continue;
1686
1687			if (!pol || tmp->pos < pol->pos)
1688				pol = tmp;
1689		}
1690	} else {
1691		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1692					      sel, ctx);
1693	}
1694
1695	if (pol) {
1696		xfrm_pol_hold(pol);
1697		if (delete) {
1698			*err = security_xfrm_policy_delete(pol->security);
1699			if (*err) {
1700				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1701				return pol;
1702			}
1703			__xfrm_policy_unlink(pol, dir);
 
1704		}
1705		ret = pol;
1706	}
1707	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1708
1709	if (ret && delete)
1710		xfrm_policy_kill(ret);
1711	if (bin && delete)
1712		xfrm_policy_inexact_prune_bin(bin);
1713	return ret;
1714}
1715EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1716
1717struct xfrm_policy *
1718xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1719		 u8 type, int dir, u32 id, int delete, int *err)
1720{
1721	struct xfrm_policy *pol, *ret;
1722	struct hlist_head *chain;
1723
1724	*err = -ENOENT;
1725	if (xfrm_policy_id2dir(id) != dir)
1726		return NULL;
1727
1728	*err = 0;
1729	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1730	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1731	ret = NULL;
1732	hlist_for_each_entry(pol, chain, byidx) {
1733		if (pol->type == type && pol->index == id &&
1734		    pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1735			xfrm_pol_hold(pol);
1736			if (delete) {
1737				*err = security_xfrm_policy_delete(
1738								pol->security);
1739				if (*err) {
1740					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1741					return pol;
1742				}
1743				__xfrm_policy_unlink(pol, dir);
1744			}
1745			ret = pol;
1746			break;
1747		}
1748	}
1749	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1750
1751	if (ret && delete)
1752		xfrm_policy_kill(ret);
1753	return ret;
1754}
1755EXPORT_SYMBOL(xfrm_policy_byid);
1756
1757#ifdef CONFIG_SECURITY_NETWORK_XFRM
1758static inline int
1759xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1760{
1761	struct xfrm_policy *pol;
1762	int err = 0;
1763
1764	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1765		if (pol->walk.dead ||
1766		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1767		    pol->type != type)
1768			continue;
1769
1770		err = security_xfrm_policy_delete(pol->security);
1771		if (err) {
1772			xfrm_audit_policy_delete(pol, 0, task_valid);
1773			return err;
 
 
 
 
 
1774		}
1775	}
1776	return err;
1777}
1778
1779static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1780						     struct net_device *dev,
1781						     bool task_valid)
1782{
1783	struct xfrm_policy *pol;
1784	int err = 0;
1785
1786	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1787		if (pol->walk.dead ||
1788		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1789		    pol->xdo.dev != dev)
1790			continue;
1791
1792		err = security_xfrm_policy_delete(pol->security);
1793		if (err) {
1794			xfrm_audit_policy_delete(pol, 0, task_valid);
1795			return err;
1796		}
1797	}
1798	return err;
1799}
1800#else
1801static inline int
1802xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1803{
1804	return 0;
1805}
1806
1807static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1808						     struct net_device *dev,
1809						     bool task_valid)
1810{
1811	return 0;
1812}
1813#endif
1814
1815int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1816{
1817	int dir, err = 0, cnt = 0;
1818	struct xfrm_policy *pol;
1819
1820	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1821
1822	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1823	if (err)
1824		goto out;
1825
1826again:
1827	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1828		if (pol->walk.dead)
1829			continue;
1830
1831		dir = xfrm_policy_id2dir(pol->index);
1832		if (dir >= XFRM_POLICY_MAX ||
1833		    pol->type != type)
1834			continue;
 
 
 
 
1835
1836		__xfrm_policy_unlink(pol, dir);
1837		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1838		cnt++;
1839		xfrm_audit_policy_delete(pol, 1, task_valid);
1840		xfrm_policy_kill(pol);
1841		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1842		goto again;
1843	}
1844	if (cnt)
1845		__xfrm_policy_inexact_flush(net);
1846	else
1847		err = -ESRCH;
1848out:
1849	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1850	return err;
1851}
1852EXPORT_SYMBOL(xfrm_policy_flush);
1853
1854int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1855			  bool task_valid)
1856{
1857	int dir, err = 0, cnt = 0;
1858	struct xfrm_policy *pol;
1859
1860	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 
 
1861
1862	err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
1863	if (err)
1864		goto out;
 
 
 
 
 
 
 
1865
1866again:
1867	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1868		if (pol->walk.dead)
1869			continue;
1870
1871		dir = xfrm_policy_id2dir(pol->index);
1872		if (dir >= XFRM_POLICY_MAX ||
1873		    pol->xdo.dev != dev)
1874			continue;
1875
1876		__xfrm_policy_unlink(pol, dir);
1877		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1878		cnt++;
1879		xfrm_audit_policy_delete(pol, 1, task_valid);
1880		xfrm_policy_kill(pol);
1881		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1882		goto again;
1883	}
1884	if (cnt)
1885		__xfrm_policy_inexact_flush(net);
1886	else
1887		err = -ESRCH;
1888out:
1889	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1890	return err;
1891}
1892EXPORT_SYMBOL(xfrm_dev_policy_flush);
1893
1894int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1895		     int (*func)(struct xfrm_policy *, int, int, void*),
1896		     void *data)
1897{
1898	struct xfrm_policy *pol;
1899	struct xfrm_policy_walk_entry *x;
1900	int error = 0;
1901
1902	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1903	    walk->type != XFRM_POLICY_TYPE_ANY)
1904		return -EINVAL;
1905
1906	if (list_empty(&walk->walk.all) && walk->seq != 0)
1907		return 0;
1908
1909	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1910	if (list_empty(&walk->walk.all))
1911		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1912	else
1913		x = list_first_entry(&walk->walk.all,
1914				     struct xfrm_policy_walk_entry, all);
1915
1916	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1917		if (x->dead)
1918			continue;
1919		pol = container_of(x, struct xfrm_policy, walk);
1920		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1921		    walk->type != pol->type)
1922			continue;
1923		error = func(pol, xfrm_policy_id2dir(pol->index),
1924			     walk->seq, data);
1925		if (error) {
1926			list_move_tail(&walk->walk.all, &x->all);
1927			goto out;
1928		}
1929		walk->seq++;
1930	}
1931	if (walk->seq == 0) {
1932		error = -ENOENT;
1933		goto out;
1934	}
1935	list_del_init(&walk->walk.all);
1936out:
1937	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1938	return error;
1939}
1940EXPORT_SYMBOL(xfrm_policy_walk);
1941
1942void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1943{
1944	INIT_LIST_HEAD(&walk->walk.all);
1945	walk->walk.dead = 1;
1946	walk->type = type;
1947	walk->seq = 0;
1948}
1949EXPORT_SYMBOL(xfrm_policy_walk_init);
1950
1951void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1952{
1953	if (list_empty(&walk->walk.all))
1954		return;
1955
1956	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1957	list_del(&walk->walk.all);
1958	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1959}
1960EXPORT_SYMBOL(xfrm_policy_walk_done);
1961
1962/*
1963 * Find policy to apply to this flow.
1964 *
1965 * Returns 0 if policy found, else an -errno.
1966 */
1967static int xfrm_policy_match(const struct xfrm_policy *pol,
1968			     const struct flowi *fl,
1969			     u8 type, u16 family, u32 if_id)
1970{
1971	const struct xfrm_selector *sel = &pol->selector;
1972	int ret = -ESRCH;
1973	bool match;
1974
1975	if (pol->family != family ||
1976	    pol->if_id != if_id ||
1977	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1978	    pol->type != type)
1979		return ret;
1980
1981	match = xfrm_selector_match(sel, fl, family);
1982	if (match)
1983		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1984	return ret;
1985}
1986
1987static struct xfrm_pol_inexact_node *
1988xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1989				seqcount_spinlock_t *count,
1990				const xfrm_address_t *addr, u16 family)
1991{
1992	const struct rb_node *parent;
1993	int seq;
1994
1995again:
1996	seq = read_seqcount_begin(count);
1997
1998	parent = rcu_dereference_raw(r->rb_node);
1999	while (parent) {
2000		struct xfrm_pol_inexact_node *node;
2001		int delta;
2002
2003		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
2004
2005		delta = xfrm_policy_addr_delta(addr, &node->addr,
2006					       node->prefixlen, family);
2007		if (delta < 0) {
2008			parent = rcu_dereference_raw(parent->rb_left);
2009			continue;
2010		} else if (delta > 0) {
2011			parent = rcu_dereference_raw(parent->rb_right);
2012			continue;
2013		}
2014
2015		return node;
2016	}
2017
2018	if (read_seqcount_retry(count, seq))
2019		goto again;
2020
2021	return NULL;
2022}
2023
2024static bool
2025xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
2026				    struct xfrm_pol_inexact_bin *b,
2027				    const xfrm_address_t *saddr,
2028				    const xfrm_address_t *daddr)
2029{
2030	struct xfrm_pol_inexact_node *n;
2031	u16 family;
2032
2033	if (!b)
2034		return false;
2035
2036	family = b->k.family;
2037	memset(cand, 0, sizeof(*cand));
2038	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
2039
2040	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
2041					    family);
2042	if (n) {
2043		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
2044		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
2045						    family);
2046		if (n)
2047			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
2048	}
2049
2050	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
2051					    family);
2052	if (n)
2053		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
2054
2055	return true;
2056}
2057
2058static struct xfrm_pol_inexact_bin *
2059xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
2060			       u8 dir, u32 if_id)
2061{
2062	struct xfrm_pol_inexact_key k = {
2063		.family = family,
2064		.type = type,
2065		.dir = dir,
2066		.if_id = if_id,
2067	};
2068
2069	write_pnet(&k.net, net);
2070
2071	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
2072				 xfrm_pol_inexact_params);
2073}
2074
2075static struct xfrm_pol_inexact_bin *
2076xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2077			   u8 dir, u32 if_id)
2078{
2079	struct xfrm_pol_inexact_bin *bin;
2080
2081	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2082
2083	rcu_read_lock();
2084	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2085	rcu_read_unlock();
2086
2087	return bin;
2088}
2089
2090static struct xfrm_policy *
2091__xfrm_policy_eval_candidates(struct hlist_head *chain,
2092			      struct xfrm_policy *prefer,
2093			      const struct flowi *fl,
2094			      u8 type, u16 family, u32 if_id)
2095{
2096	u32 priority = prefer ? prefer->priority : ~0u;
2097	struct xfrm_policy *pol;
2098
2099	if (!chain)
2100		return NULL;
2101
2102	hlist_for_each_entry_rcu(pol, chain, bydst) {
2103		int err;
2104
2105		if (pol->priority > priority)
2106			break;
2107
2108		err = xfrm_policy_match(pol, fl, type, family, if_id);
2109		if (err) {
2110			if (err != -ESRCH)
2111				return ERR_PTR(err);
2112
2113			continue;
2114		}
2115
2116		if (prefer) {
2117			/* matches.  Is it older than *prefer? */
2118			if (pol->priority == priority &&
2119			    prefer->pos < pol->pos)
2120				return prefer;
2121		}
2122
2123		return pol;
2124	}
2125
2126	return NULL;
2127}
2128
2129static struct xfrm_policy *
2130xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2131			    struct xfrm_policy *prefer,
2132			    const struct flowi *fl,
2133			    u8 type, u16 family, u32 if_id)
2134{
2135	struct xfrm_policy *tmp;
2136	int i;
2137
2138	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2139		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2140						    prefer,
2141						    fl, type, family, if_id);
2142		if (!tmp)
2143			continue;
2144
2145		if (IS_ERR(tmp))
2146			return tmp;
2147		prefer = tmp;
2148	}
2149
2150	return prefer;
2151}
2152
2153static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2154						     const struct flowi *fl,
2155						     u16 family, u8 dir,
2156						     u32 if_id)
2157{
2158	struct xfrm_pol_inexact_candidates cand;
2159	const xfrm_address_t *daddr, *saddr;
2160	struct xfrm_pol_inexact_bin *bin;
2161	struct xfrm_policy *pol, *ret;
 
2162	struct hlist_head *chain;
2163	unsigned int sequence;
2164	int err;
2165
2166	daddr = xfrm_flowi_daddr(fl, family);
2167	saddr = xfrm_flowi_saddr(fl, family);
2168	if (unlikely(!daddr || !saddr))
2169		return NULL;
2170
2171	rcu_read_lock();
2172 retry:
2173	do {
2174		sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2175		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2176	} while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2177
 
2178	ret = NULL;
2179	hlist_for_each_entry_rcu(pol, chain, bydst) {
2180		err = xfrm_policy_match(pol, fl, type, family, if_id);
2181		if (err) {
2182			if (err == -ESRCH)
2183				continue;
2184			else {
2185				ret = ERR_PTR(err);
2186				goto fail;
2187			}
2188		} else {
2189			ret = pol;
 
2190			break;
2191		}
2192	}
2193	if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
2194		goto skip_inexact;
2195
2196	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2197	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2198							 daddr))
2199		goto skip_inexact;
2200
2201	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2202					  family, if_id);
2203	if (pol) {
2204		ret = pol;
2205		if (IS_ERR(pol))
2206			goto fail;
 
 
 
 
 
 
2207	}
2208
2209skip_inexact:
2210	if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2211		goto retry;
2212
2213	if (ret && !xfrm_pol_hold_rcu(ret))
2214		goto retry;
2215fail:
2216	rcu_read_unlock();
2217
2218	return ret;
2219}
2220
2221static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2222					      const struct flowi *fl,
2223					      u16 family, u8 dir, u32 if_id)
2224{
2225#ifdef CONFIG_XFRM_SUB_POLICY
2226	struct xfrm_policy *pol;
2227
2228	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2229					dir, if_id);
2230	if (pol != NULL)
2231		return pol;
2232#endif
2233	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2234					 dir, if_id);
2235}
2236
2237static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2238						 const struct flowi *fl,
2239						 u16 family, u32 if_id)
2240{
2241	struct xfrm_policy *pol;
2242
2243	rcu_read_lock();
2244 again:
2245	pol = rcu_dereference(sk->sk_policy[dir]);
2246	if (pol != NULL) {
2247		bool match;
2248		int err = 0;
2249
2250		if (pol->family != family) {
2251			pol = NULL;
2252			goto out;
2253		}
2254
2255		match = xfrm_selector_match(&pol->selector, fl, family);
2256		if (match) {
2257			if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
2258			    pol->if_id != if_id) {
2259				pol = NULL;
2260				goto out;
2261			}
2262			err = security_xfrm_policy_lookup(pol->security,
2263						      fl->flowi_secid);
 
2264			if (!err) {
2265				if (!xfrm_pol_hold_rcu(pol))
2266					goto again;
2267			} else if (err == -ESRCH) {
2268				pol = NULL;
2269			} else {
2270				pol = ERR_PTR(err);
2271			}
2272		} else
2273			pol = NULL;
2274	}
2275out:
2276	rcu_read_unlock();
2277	return pol;
2278}
2279
2280static u32 xfrm_gen_pos_slow(struct net *net)
2281{
2282	struct xfrm_policy *policy;
2283	u32 i = 0;
2284
2285	/* oldest entry is last in list */
2286	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
2287		if (!xfrm_policy_is_dead_or_sk(policy))
2288			policy->pos = ++i;
2289	}
2290
2291	return i;
2292}
2293
2294static u32 xfrm_gen_pos(struct net *net)
2295{
2296	const struct xfrm_policy *policy;
2297	u32 i = 0;
2298
2299	/* most recently added policy is at the head of the list */
2300	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
2301		if (xfrm_policy_is_dead_or_sk(policy))
2302			continue;
2303
2304		if (policy->pos == UINT_MAX)
2305			return xfrm_gen_pos_slow(net);
2306
2307		i = policy->pos + 1;
2308		break;
2309	}
2310
2311	return i;
2312}
2313
2314static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2315{
2316	struct net *net = xp_net(pol);
2317
2318	switch (dir) {
2319	case XFRM_POLICY_IN:
2320	case XFRM_POLICY_FWD:
2321	case XFRM_POLICY_OUT:
2322		pol->pos = xfrm_gen_pos(net);
2323		break;
2324	}
2325
2326	list_add(&pol->walk.all, &net->xfrm.policy_all);
2327	net->xfrm.policy_count[dir]++;
2328	xfrm_pol_hold(pol);
2329}
2330
2331static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2332						int dir)
2333{
2334	struct net *net = xp_net(pol);
2335
2336	if (list_empty(&pol->walk.all))
2337		return NULL;
2338
2339	/* Socket policies are not hashed. */
2340	if (!hlist_unhashed(&pol->bydst)) {
2341		hlist_del_rcu(&pol->bydst);
2342		hlist_del(&pol->byidx);
2343	}
2344
2345	list_del_init(&pol->walk.all);
2346	net->xfrm.policy_count[dir]--;
2347
2348	return pol;
2349}
2350
2351static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2352{
2353	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2354}
2355
2356static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2357{
2358	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2359}
2360
2361int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2362{
2363	struct net *net = xp_net(pol);
2364
2365	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2366	pol = __xfrm_policy_unlink(pol, dir);
2367	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2368	if (pol) {
2369		xfrm_policy_kill(pol);
2370		return 0;
2371	}
2372	return -ENOENT;
2373}
2374EXPORT_SYMBOL(xfrm_policy_delete);
2375
2376int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2377{
2378	struct net *net = sock_net(sk);
2379	struct xfrm_policy *old_pol;
2380
2381#ifdef CONFIG_XFRM_SUB_POLICY
2382	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2383		return -EINVAL;
2384#endif
2385
2386	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2387	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2388				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2389	if (pol) {
2390		pol->curlft.add_time = ktime_get_real_seconds();
2391		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2392		xfrm_sk_policy_link(pol, dir);
2393	}
2394	rcu_assign_pointer(sk->sk_policy[dir], pol);
2395	if (old_pol) {
2396		if (pol)
2397			xfrm_policy_requeue(old_pol, pol);
2398
2399		/* Unlinking succeeds always. This is the only function
2400		 * allowed to delete or replace socket policy.
2401		 */
2402		xfrm_sk_policy_unlink(old_pol, dir);
2403	}
2404	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2405
2406	if (old_pol) {
2407		xfrm_policy_kill(old_pol);
2408	}
2409	return 0;
2410}
2411
2412static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2413{
2414	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2415	struct net *net = xp_net(old);
2416
2417	if (newp) {
2418		newp->selector = old->selector;
2419		if (security_xfrm_policy_clone(old->security,
2420					       &newp->security)) {
2421			kfree(newp);
2422			return NULL;  /* ENOMEM */
2423		}
2424		newp->lft = old->lft;
2425		newp->curlft = old->curlft;
2426		newp->mark = old->mark;
2427		newp->if_id = old->if_id;
2428		newp->action = old->action;
2429		newp->flags = old->flags;
2430		newp->xfrm_nr = old->xfrm_nr;
2431		newp->index = old->index;
2432		newp->type = old->type;
2433		newp->family = old->family;
2434		memcpy(newp->xfrm_vec, old->xfrm_vec,
2435		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2436		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2437		xfrm_sk_policy_link(newp, dir);
2438		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2439		xfrm_pol_put(newp);
2440	}
2441	return newp;
2442}
2443
2444int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2445{
2446	const struct xfrm_policy *p;
2447	struct xfrm_policy *np;
2448	int i, ret = 0;
2449
2450	rcu_read_lock();
2451	for (i = 0; i < 2; i++) {
2452		p = rcu_dereference(osk->sk_policy[i]);
2453		if (p) {
2454			np = clone_policy(p, i);
2455			if (unlikely(!np)) {
2456				ret = -ENOMEM;
2457				break;
2458			}
2459			rcu_assign_pointer(sk->sk_policy[i], np);
2460		}
2461	}
2462	rcu_read_unlock();
2463	return ret;
2464}
2465
2466static int
2467xfrm_get_saddr(unsigned short family, xfrm_address_t *saddr,
2468	       const struct xfrm_dst_lookup_params *params)
2469{
2470	int err;
2471	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2472
2473	if (unlikely(afinfo == NULL))
2474		return -EINVAL;
2475	err = afinfo->get_saddr(saddr, params);
2476	rcu_read_unlock();
2477	return err;
2478}
2479
2480/* Resolve list of templates for the flow, given policy. */
2481
2482static int
2483xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2484		      struct xfrm_state **xfrm, unsigned short family)
2485{
2486	struct net *net = xp_net(policy);
2487	int nx;
2488	int i, error;
2489	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2490	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2491	xfrm_address_t tmp;
2492
2493	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2494		struct xfrm_state *x;
2495		xfrm_address_t *remote = daddr;
2496		xfrm_address_t *local  = saddr;
2497		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2498
2499		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2500		    tmpl->mode == XFRM_MODE_BEET) {
2501			remote = &tmpl->id.daddr;
2502			local = &tmpl->saddr;
2503			if (xfrm_addr_any(local, tmpl->encap_family)) {
2504				struct xfrm_dst_lookup_params params;
2505
2506				memset(&params, 0, sizeof(params));
2507				params.net = net;
2508				params.oif = fl->flowi_oif;
2509				params.daddr = remote;
2510				error = xfrm_get_saddr(tmpl->encap_family, &tmp,
2511						       &params);
2512				if (error)
2513					goto fail;
2514				local = &tmp;
2515			}
2516		}
2517
2518		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2519				    family, policy->if_id);
2520		if (x && x->dir && x->dir != XFRM_SA_DIR_OUT) {
2521			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEDIRERROR);
2522			xfrm_state_put(x);
2523			error = -EINVAL;
2524			goto fail;
2525		}
2526
2527		if (x && x->km.state == XFRM_STATE_VALID) {
2528			xfrm[nx++] = x;
2529			daddr = remote;
2530			saddr = local;
2531			continue;
2532		}
2533		if (x) {
2534			error = (x->km.state == XFRM_STATE_ERROR ?
2535				 -EINVAL : -EAGAIN);
2536			xfrm_state_put(x);
2537		} else if (error == -ESRCH) {
2538			error = -EAGAIN;
2539		}
2540
2541		if (!tmpl->optional)
2542			goto fail;
2543	}
2544	return nx;
2545
2546fail:
2547	for (nx--; nx >= 0; nx--)
2548		xfrm_state_put(xfrm[nx]);
2549	return error;
2550}
2551
2552static int
2553xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2554		  struct xfrm_state **xfrm, unsigned short family)
2555{
2556	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2557	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2558	int cnx = 0;
2559	int error;
2560	int ret;
2561	int i;
2562
2563	for (i = 0; i < npols; i++) {
2564		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2565			error = -ENOBUFS;
2566			goto fail;
2567		}
2568
2569		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2570		if (ret < 0) {
2571			error = ret;
2572			goto fail;
2573		} else
2574			cnx += ret;
2575	}
2576
2577	/* found states are sorted for outbound processing */
2578	if (npols > 1)
2579		xfrm_state_sort(xfrm, tpp, cnx, family);
2580
2581	return cnx;
2582
2583 fail:
2584	for (cnx--; cnx >= 0; cnx--)
2585		xfrm_state_put(tpp[cnx]);
2586	return error;
2587
2588}
2589
2590static dscp_t xfrm_get_dscp(const struct flowi *fl, int family)
2591{
2592	if (family == AF_INET)
2593		return inet_dsfield_to_dscp(fl->u.ip4.flowi4_tos);
2594
2595	return 0;
 
 
 
 
 
 
 
 
2596}
2597
2598static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2599{
2600	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2601	struct dst_ops *dst_ops;
2602	struct xfrm_dst *xdst;
2603
2604	if (!afinfo)
2605		return ERR_PTR(-EINVAL);
2606
2607	switch (family) {
2608	case AF_INET:
2609		dst_ops = &net->xfrm.xfrm4_dst_ops;
2610		break;
2611#if IS_ENABLED(CONFIG_IPV6)
2612	case AF_INET6:
2613		dst_ops = &net->xfrm.xfrm6_dst_ops;
2614		break;
2615#endif
2616	default:
2617		BUG();
2618	}
2619	xdst = dst_alloc(dst_ops, NULL, DST_OBSOLETE_NONE, 0);
2620
2621	if (likely(xdst)) {
2622		memset_after(xdst, 0, u.dst);
 
 
2623	} else
2624		xdst = ERR_PTR(-ENOBUFS);
2625
2626	rcu_read_unlock();
2627
2628	return xdst;
2629}
2630
2631static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2632			   int nfheader_len)
2633{
2634	if (dst->ops->family == AF_INET6) {
2635		path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
2636		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2637	}
 
 
 
 
 
 
 
 
2638}
2639
2640static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2641				const struct flowi *fl)
2642{
2643	const struct xfrm_policy_afinfo *afinfo =
2644		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2645	int err;
2646
2647	if (!afinfo)
2648		return -EINVAL;
2649
2650	err = afinfo->fill_dst(xdst, dev, fl);
2651
2652	rcu_read_unlock();
2653
2654	return err;
2655}
2656
2657
2658/* Allocate chain of dst_entry's, attach known xfrm's, calculate
2659 * all the metrics... Shortly, bundle a bundle.
2660 */
2661
2662static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2663					    struct xfrm_state **xfrm,
2664					    struct xfrm_dst **bundle,
2665					    int nx,
2666					    const struct flowi *fl,
2667					    struct dst_entry *dst)
2668{
2669	const struct xfrm_state_afinfo *afinfo;
2670	const struct xfrm_mode *inner_mode;
2671	struct net *net = xp_net(policy);
2672	unsigned long now = jiffies;
2673	struct net_device *dev;
 
2674	struct xfrm_dst *xdst_prev = NULL;
2675	struct xfrm_dst *xdst0 = NULL;
2676	int i = 0;
2677	int err;
2678	int header_len = 0;
2679	int nfheader_len = 0;
2680	int trailer_len = 0;
 
2681	int family = policy->selector.family;
2682	xfrm_address_t saddr, daddr;
2683	dscp_t dscp;
2684
2685	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2686
2687	dscp = xfrm_get_dscp(fl, family);
2688
2689	dst_hold(dst);
2690
2691	for (; i < nx; i++) {
2692		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2693		struct dst_entry *dst1 = &xdst->u.dst;
2694
2695		err = PTR_ERR(xdst);
2696		if (IS_ERR(xdst)) {
2697			dst_release(dst);
2698			goto put_states;
2699		}
2700
2701		bundle[i] = xdst;
2702		if (!xdst_prev)
2703			xdst0 = xdst;
2704		else
2705			/* Ref count is taken during xfrm_alloc_dst()
2706			 * No need to do dst_clone() on dst1
2707			 */
2708			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2709
2710		if (xfrm[i]->sel.family == AF_UNSPEC) {
2711			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2712							xfrm_af2proto(family));
2713			if (!inner_mode) {
2714				err = -EAFNOSUPPORT;
2715				dst_release(dst);
2716				goto put_states;
2717			}
2718		} else
2719			inner_mode = &xfrm[i]->inner_mode;
2720
2721		xdst->route = dst;
2722		dst_copy_metrics(dst1, dst);
2723
2724		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2725			__u32 mark = 0;
2726			int oif;
2727
2728			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2729				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2730
2731			if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2732				family = xfrm[i]->props.family;
2733
2734			oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2735			dst = xfrm_dst_lookup(xfrm[i], dscp, oif, &saddr,
2736					      &daddr, family, mark);
2737			err = PTR_ERR(dst);
2738			if (IS_ERR(dst))
2739				goto put_states;
2740		} else
2741			dst_hold(dst);
2742
2743		dst1->xfrm = xfrm[i];
2744		xdst->xfrm_genid = xfrm[i]->genid;
2745
2746		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
 
2747		dst1->lastuse = now;
2748
2749		dst1->input = dst_discard;
2750
2751		rcu_read_lock();
2752		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2753		if (likely(afinfo))
2754			dst1->output = afinfo->output;
2755		else
2756			dst1->output = dst_discard_out;
2757		rcu_read_unlock();
2758
2759		xdst_prev = xdst;
2760
2761		header_len += xfrm[i]->props.header_len;
2762		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2763			nfheader_len += xfrm[i]->props.header_len;
2764		trailer_len += xfrm[i]->props.trailer_len;
2765	}
2766
2767	xfrm_dst_set_child(xdst_prev, dst);
2768	xdst0->path = dst;
2769
2770	err = -ENODEV;
2771	dev = dst->dev;
2772	if (!dev)
2773		goto free_dst;
2774
2775	xfrm_init_path(xdst0, dst, nfheader_len);
2776	xfrm_init_pmtu(bundle, nx);
2777
2778	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2779	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2780		err = xfrm_fill_dst(xdst_prev, dev, fl);
2781		if (err)
2782			goto free_dst;
2783
2784		xdst_prev->u.dst.header_len = header_len;
2785		xdst_prev->u.dst.trailer_len = trailer_len;
2786		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2787		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2788	}
2789
2790	return &xdst0->u.dst;
2791
2792put_states:
2793	for (; i < nx; i++)
2794		xfrm_state_put(xfrm[i]);
2795free_dst:
2796	if (xdst0)
2797		dst_release_immediate(&xdst0->u.dst);
2798
2799	return ERR_PTR(err);
2800}
2801
2802static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2803				struct xfrm_policy **pols,
2804				int *num_pols, int *num_xfrms)
2805{
2806	int i;
2807
2808	if (*num_pols == 0 || !pols[0]) {
2809		*num_pols = 0;
2810		*num_xfrms = 0;
2811		return 0;
2812	}
2813	if (IS_ERR(pols[0])) {
2814		*num_pols = 0;
2815		return PTR_ERR(pols[0]);
2816	}
2817
2818	*num_xfrms = pols[0]->xfrm_nr;
2819
2820#ifdef CONFIG_XFRM_SUB_POLICY
2821	if (pols[0]->action == XFRM_POLICY_ALLOW &&
2822	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2823		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2824						    XFRM_POLICY_TYPE_MAIN,
2825						    fl, family,
2826						    XFRM_POLICY_OUT,
2827						    pols[0]->if_id);
2828		if (pols[1]) {
2829			if (IS_ERR(pols[1])) {
2830				xfrm_pols_put(pols, *num_pols);
2831				*num_pols = 0;
2832				return PTR_ERR(pols[1]);
2833			}
2834			(*num_pols)++;
2835			(*num_xfrms) += pols[1]->xfrm_nr;
2836		}
2837	}
2838#endif
2839	for (i = 0; i < *num_pols; i++) {
2840		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2841			*num_xfrms = -1;
2842			break;
2843		}
2844	}
2845
2846	return 0;
2847
2848}
2849
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2850static struct xfrm_dst *
2851xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2852			       const struct flowi *fl, u16 family,
2853			       struct dst_entry *dst_orig)
2854{
2855	struct net *net = xp_net(pols[0]);
2856	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2857	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2858	struct xfrm_dst *xdst;
2859	struct dst_entry *dst;
2860	int err;
2861
2862	/* Try to instantiate a bundle */
2863	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2864	if (err <= 0) {
2865		if (err == 0)
2866			return NULL;
2867
2868		if (err != -EAGAIN)
2869			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2870		return ERR_PTR(err);
2871	}
2872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2873	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2874	if (IS_ERR(dst)) {
2875		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2876		return ERR_CAST(dst);
2877	}
2878
2879	xdst = (struct xfrm_dst *)dst;
2880	xdst->num_xfrms = err;
2881	xdst->num_pols = num_pols;
2882	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2883	xdst->policy_genid = atomic_read(&pols[0]->genid);
2884
 
 
 
2885	return xdst;
2886}
2887
2888static void xfrm_policy_queue_process(struct timer_list *t)
2889{
2890	struct sk_buff *skb;
2891	struct sock *sk;
2892	struct dst_entry *dst;
2893	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2894	struct net *net = xp_net(pol);
2895	struct xfrm_policy_queue *pq = &pol->polq;
2896	struct flowi fl;
2897	struct sk_buff_head list;
2898	__u32 skb_mark;
2899
2900	spin_lock(&pq->hold_queue.lock);
2901	skb = skb_peek(&pq->hold_queue);
2902	if (!skb) {
2903		spin_unlock(&pq->hold_queue.lock);
2904		goto out;
2905	}
2906	dst = skb_dst(skb);
2907	sk = skb->sk;
2908
2909	/* Fixup the mark to support VTI. */
2910	skb_mark = skb->mark;
2911	skb->mark = pol->mark.v;
2912	xfrm_decode_session(net, skb, &fl, dst->ops->family);
2913	skb->mark = skb_mark;
2914	spin_unlock(&pq->hold_queue.lock);
2915
2916	dst_hold(xfrm_dst_path(dst));
2917	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2918	if (IS_ERR(dst))
2919		goto purge_queue;
2920
2921	if (dst->flags & DST_XFRM_QUEUE) {
2922		dst_release(dst);
2923
2924		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2925			goto purge_queue;
2926
2927		pq->timeout = pq->timeout << 1;
2928		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2929			xfrm_pol_hold(pol);
2930		goto out;
2931	}
2932
2933	dst_release(dst);
2934
2935	__skb_queue_head_init(&list);
2936
2937	spin_lock(&pq->hold_queue.lock);
2938	pq->timeout = 0;
2939	skb_queue_splice_init(&pq->hold_queue, &list);
2940	spin_unlock(&pq->hold_queue.lock);
2941
2942	while (!skb_queue_empty(&list)) {
2943		skb = __skb_dequeue(&list);
2944
2945		/* Fixup the mark to support VTI. */
2946		skb_mark = skb->mark;
2947		skb->mark = pol->mark.v;
2948		xfrm_decode_session(net, skb, &fl, skb_dst(skb)->ops->family);
2949		skb->mark = skb_mark;
2950
2951		dst_hold(xfrm_dst_path(skb_dst(skb)));
2952		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2953		if (IS_ERR(dst)) {
2954			kfree_skb(skb);
2955			continue;
2956		}
2957
2958		nf_reset_ct(skb);
2959		skb_dst_drop(skb);
2960		skb_dst_set(skb, dst);
2961
2962		dst_output(net, skb->sk, skb);
2963	}
2964
2965out:
2966	xfrm_pol_put(pol);
2967	return;
2968
2969purge_queue:
2970	pq->timeout = 0;
2971	skb_queue_purge(&pq->hold_queue);
2972	xfrm_pol_put(pol);
2973}
2974
2975static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2976{
2977	unsigned long sched_next;
2978	struct dst_entry *dst = skb_dst(skb);
2979	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2980	struct xfrm_policy *pol = xdst->pols[0];
2981	struct xfrm_policy_queue *pq = &pol->polq;
2982
2983	if (unlikely(skb_fclone_busy(sk, skb))) {
2984		kfree_skb(skb);
2985		return 0;
2986	}
2987
2988	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2989		kfree_skb(skb);
2990		return -EAGAIN;
2991	}
2992
2993	skb_dst_force(skb);
2994
2995	spin_lock_bh(&pq->hold_queue.lock);
2996
2997	if (!pq->timeout)
2998		pq->timeout = XFRM_QUEUE_TMO_MIN;
2999
3000	sched_next = jiffies + pq->timeout;
3001
3002	if (del_timer(&pq->hold_timer)) {
3003		if (time_before(pq->hold_timer.expires, sched_next))
3004			sched_next = pq->hold_timer.expires;
3005		xfrm_pol_put(pol);
3006	}
3007
3008	__skb_queue_tail(&pq->hold_queue, skb);
3009	if (!mod_timer(&pq->hold_timer, sched_next))
3010		xfrm_pol_hold(pol);
3011
3012	spin_unlock_bh(&pq->hold_queue.lock);
3013
3014	return 0;
3015}
3016
3017static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
3018						 struct xfrm_flo *xflo,
3019						 const struct flowi *fl,
3020						 int num_xfrms,
3021						 u16 family)
3022{
3023	int err;
3024	struct net_device *dev;
3025	struct dst_entry *dst;
3026	struct dst_entry *dst1;
3027	struct xfrm_dst *xdst;
3028
3029	xdst = xfrm_alloc_dst(net, family);
3030	if (IS_ERR(xdst))
3031		return xdst;
3032
3033	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
3034	    net->xfrm.sysctl_larval_drop ||
3035	    num_xfrms <= 0)
3036		return xdst;
3037
3038	dst = xflo->dst_orig;
3039	dst1 = &xdst->u.dst;
3040	dst_hold(dst);
3041	xdst->route = dst;
3042
3043	dst_copy_metrics(dst1, dst);
3044
3045	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
3046	dst1->flags |= DST_XFRM_QUEUE;
3047	dst1->lastuse = jiffies;
3048
3049	dst1->input = dst_discard;
3050	dst1->output = xdst_queue_output;
3051
3052	dst_hold(dst);
3053	xfrm_dst_set_child(xdst, dst);
3054	xdst->path = dst;
3055
3056	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
3057
3058	err = -ENODEV;
3059	dev = dst->dev;
3060	if (!dev)
3061		goto free_dst;
3062
3063	err = xfrm_fill_dst(xdst, dev, fl);
3064	if (err)
3065		goto free_dst;
3066
3067out:
3068	return xdst;
3069
3070free_dst:
3071	dst_release(dst1);
3072	xdst = ERR_PTR(err);
3073	goto out;
3074}
3075
3076static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
3077					   const struct flowi *fl,
3078					   u16 family, u8 dir,
3079					   struct xfrm_flo *xflo, u32 if_id)
3080{
3081	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3082	int num_pols = 0, num_xfrms = 0, err;
3083	struct xfrm_dst *xdst;
3084
3085	/* Resolve policies to use if we couldn't get them from
3086	 * previous cache entry */
3087	num_pols = 1;
3088	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
3089	err = xfrm_expand_policies(fl, family, pols,
3090					   &num_pols, &num_xfrms);
3091	if (err < 0)
3092		goto inc_error;
3093	if (num_pols == 0)
3094		return NULL;
3095	if (num_xfrms <= 0)
3096		goto make_dummy_bundle;
3097
 
3098	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3099					      xflo->dst_orig);
 
 
3100	if (IS_ERR(xdst)) {
3101		err = PTR_ERR(xdst);
3102		if (err == -EREMOTE) {
3103			xfrm_pols_put(pols, num_pols);
3104			return NULL;
3105		}
3106
3107		if (err != -EAGAIN)
3108			goto error;
3109		goto make_dummy_bundle;
3110	} else if (xdst == NULL) {
3111		num_xfrms = 0;
3112		goto make_dummy_bundle;
3113	}
3114
3115	return xdst;
3116
3117make_dummy_bundle:
3118	/* We found policies, but there's no bundles to instantiate:
3119	 * either because the policy blocks, has no transformations or
3120	 * we could not build template (no xfrm_states).*/
3121	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
3122	if (IS_ERR(xdst)) {
3123		xfrm_pols_put(pols, num_pols);
3124		return ERR_CAST(xdst);
3125	}
3126	xdst->num_pols = num_pols;
3127	xdst->num_xfrms = num_xfrms;
3128	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3129
3130	return xdst;
3131
3132inc_error:
3133	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3134error:
3135	xfrm_pols_put(pols, num_pols);
3136	return ERR_PTR(err);
3137}
3138
3139static struct dst_entry *make_blackhole(struct net *net, u16 family,
3140					struct dst_entry *dst_orig)
3141{
3142	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3143	struct dst_entry *ret;
3144
3145	if (!afinfo) {
3146		dst_release(dst_orig);
3147		return ERR_PTR(-EINVAL);
3148	} else {
3149		ret = afinfo->blackhole_route(net, dst_orig);
3150	}
3151	rcu_read_unlock();
3152
3153	return ret;
3154}
3155
3156/* Finds/creates a bundle for given flow and if_id
3157 *
3158 * At the moment we eat a raw IP route. Mostly to speed up lookups
3159 * on interfaces with disabled IPsec.
3160 *
3161 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3162 * compatibility
3163 */
3164struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3165					struct dst_entry *dst_orig,
3166					const struct flowi *fl,
3167					const struct sock *sk,
3168					int flags, u32 if_id)
3169{
3170	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3171	struct xfrm_dst *xdst;
3172	struct dst_entry *dst, *route;
3173	u16 family = dst_orig->ops->family;
3174	u8 dir = XFRM_POLICY_OUT;
3175	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3176
3177	dst = NULL;
3178	xdst = NULL;
3179	route = NULL;
3180
3181	sk = sk_const_to_full_sk(sk);
3182	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3183		num_pols = 1;
3184		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3185						if_id);
3186		err = xfrm_expand_policies(fl, family, pols,
3187					   &num_pols, &num_xfrms);
3188		if (err < 0)
3189			goto dropdst;
3190
3191		if (num_pols) {
3192			if (num_xfrms <= 0) {
3193				drop_pols = num_pols;
3194				goto no_transform;
3195			}
3196
 
3197			xdst = xfrm_resolve_and_create_bundle(
3198					pols, num_pols, fl,
3199					family, dst_orig);
 
3200
3201			if (IS_ERR(xdst)) {
3202				xfrm_pols_put(pols, num_pols);
3203				err = PTR_ERR(xdst);
3204				if (err == -EREMOTE)
3205					goto nopol;
3206
3207				goto dropdst;
3208			} else if (xdst == NULL) {
3209				num_xfrms = 0;
3210				drop_pols = num_pols;
3211				goto no_transform;
3212			}
3213
3214			route = xdst->route;
3215		}
3216	}
3217
3218	if (xdst == NULL) {
3219		struct xfrm_flo xflo;
3220
3221		xflo.dst_orig = dst_orig;
3222		xflo.flags = flags;
3223
3224		/* To accelerate a bit...  */
3225		if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3226			       !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3227			goto nopol;
3228
3229		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3230		if (xdst == NULL)
3231			goto nopol;
3232		if (IS_ERR(xdst)) {
3233			err = PTR_ERR(xdst);
3234			goto dropdst;
3235		}
3236
3237		num_pols = xdst->num_pols;
3238		num_xfrms = xdst->num_xfrms;
3239		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3240		route = xdst->route;
3241	}
3242
3243	dst = &xdst->u.dst;
3244	if (route == NULL && num_xfrms > 0) {
3245		/* The only case when xfrm_bundle_lookup() returns a
3246		 * bundle with null route, is when the template could
3247		 * not be resolved. It means policies are there, but
3248		 * bundle could not be created, since we don't yet
3249		 * have the xfrm_state's. We need to wait for KM to
3250		 * negotiate new SA's or bail out with error.*/
3251		if (net->xfrm.sysctl_larval_drop) {
3252			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3253			err = -EREMOTE;
3254			goto error;
3255		}
3256
3257		err = -EAGAIN;
3258
3259		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3260		goto error;
3261	}
3262
3263no_transform:
3264	if (num_pols == 0)
3265		goto nopol;
3266
3267	if ((flags & XFRM_LOOKUP_ICMP) &&
3268	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3269		err = -ENOENT;
3270		goto error;
3271	}
3272
3273	for (i = 0; i < num_pols; i++)
3274		WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
3275
3276	if (num_xfrms < 0) {
3277		/* Prohibit the flow */
3278		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3279		err = -EPERM;
3280		goto error;
3281	} else if (num_xfrms > 0) {
3282		/* Flow transformed */
3283		dst_release(dst_orig);
3284	} else {
3285		/* Flow passes untransformed */
3286		dst_release(dst);
3287		dst = dst_orig;
3288	}
3289
3290ok:
3291	xfrm_pols_put(pols, drop_pols);
3292	if (dst && dst->xfrm &&
3293	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3294		dst->flags |= DST_XFRM_TUNNEL;
3295	return dst;
3296
3297nopol:
3298	if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3299	    net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3300		err = -EPERM;
3301		goto error;
3302	}
3303	if (!(flags & XFRM_LOOKUP_ICMP)) {
3304		dst = dst_orig;
3305		goto ok;
3306	}
3307	err = -ENOENT;
3308error:
3309	dst_release(dst);
3310dropdst:
3311	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3312		dst_release(dst_orig);
3313	xfrm_pols_put(pols, drop_pols);
3314	return ERR_PTR(err);
3315}
3316EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3317
3318/* Main function: finds/creates a bundle for given flow.
3319 *
3320 * At the moment we eat a raw IP route. Mostly to speed up lookups
3321 * on interfaces with disabled IPsec.
3322 */
3323struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3324			      const struct flowi *fl, const struct sock *sk,
3325			      int flags)
3326{
3327	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3328}
3329EXPORT_SYMBOL(xfrm_lookup);
3330
3331/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3332 * Otherwise we may send out blackholed packets.
3333 */
3334struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3335				    const struct flowi *fl,
3336				    const struct sock *sk, int flags)
3337{
3338	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3339					    flags | XFRM_LOOKUP_QUEUE |
3340					    XFRM_LOOKUP_KEEP_DST_REF);
3341
3342	if (PTR_ERR(dst) == -EREMOTE)
3343		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3344
3345	if (IS_ERR(dst))
3346		dst_release(dst_orig);
3347
3348	return dst;
3349}
3350EXPORT_SYMBOL(xfrm_lookup_route);
3351
3352static inline int
3353xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3354{
3355	struct sec_path *sp = skb_sec_path(skb);
3356	struct xfrm_state *x;
3357
3358	if (!sp || idx < 0 || idx >= sp->len)
3359		return 0;
3360	x = sp->xvec[idx];
3361	if (!x->type->reject)
3362		return 0;
3363	return x->type->reject(x, skb, fl);
3364}
3365
3366/* When skb is transformed back to its "native" form, we have to
3367 * check policy restrictions. At the moment we make this in maximally
3368 * stupid way. Shame on me. :-) Of course, connected sockets must
3369 * have policy cached at them.
3370 */
3371
3372static inline int
3373xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3374	      unsigned short family, u32 if_id)
3375{
3376	if (xfrm_state_kern(x))
3377		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3378	return	x->id.proto == tmpl->id.proto &&
3379		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3380		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3381		x->props.mode == tmpl->mode &&
3382		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3383		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3384		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3385		  xfrm_state_addr_cmp(tmpl, x, family)) &&
3386		(if_id == 0 || if_id == x->if_id);
3387}
3388
3389/*
3390 * 0 or more than 0 is returned when validation is succeeded (either bypass
3391 * because of optional transport mode, or next index of the matched secpath
3392 * state with the template.
3393 * -1 is returned when no matching template is found.
3394 * Otherwise "-2 - errored_index" is returned.
3395 */
3396static inline int
3397xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3398	       unsigned short family, u32 if_id)
3399{
3400	int idx = start;
3401
3402	if (tmpl->optional) {
3403		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3404			return start;
3405	} else
3406		start = -1;
3407	for (; idx < sp->len; idx++) {
3408		if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
3409			return ++idx;
3410		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3411			if (idx < sp->verified_cnt) {
3412				/* Secpath entry previously verified, consider optional and
3413				 * continue searching
3414				 */
3415				continue;
3416			}
3417
3418			if (start == -1)
3419				start = -2-idx;
3420			break;
3421		}
3422	}
3423	return start;
3424}
3425
3426static void
3427decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3428{
3429	struct flowi4 *fl4 = &fl->u.ip4;
3430
3431	memset(fl4, 0, sizeof(struct flowi4));
3432
3433	if (reverse) {
3434		fl4->saddr = flkeys->addrs.ipv4.dst;
3435		fl4->daddr = flkeys->addrs.ipv4.src;
3436		fl4->fl4_sport = flkeys->ports.dst;
3437		fl4->fl4_dport = flkeys->ports.src;
3438	} else {
3439		fl4->saddr = flkeys->addrs.ipv4.src;
3440		fl4->daddr = flkeys->addrs.ipv4.dst;
3441		fl4->fl4_sport = flkeys->ports.src;
3442		fl4->fl4_dport = flkeys->ports.dst;
3443	}
3444
3445	switch (flkeys->basic.ip_proto) {
3446	case IPPROTO_GRE:
3447		fl4->fl4_gre_key = flkeys->gre.keyid;
3448		break;
3449	case IPPROTO_ICMP:
3450		fl4->fl4_icmp_type = flkeys->icmp.type;
3451		fl4->fl4_icmp_code = flkeys->icmp.code;
3452		break;
3453	}
3454
3455	fl4->flowi4_proto = flkeys->basic.ip_proto;
3456	fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
3457}
3458
3459#if IS_ENABLED(CONFIG_IPV6)
3460static void
3461decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3462{
3463	struct flowi6 *fl6 = &fl->u.ip6;
3464
3465	memset(fl6, 0, sizeof(struct flowi6));
3466
3467	if (reverse) {
3468		fl6->saddr = flkeys->addrs.ipv6.dst;
3469		fl6->daddr = flkeys->addrs.ipv6.src;
3470		fl6->fl6_sport = flkeys->ports.dst;
3471		fl6->fl6_dport = flkeys->ports.src;
3472	} else {
3473		fl6->saddr = flkeys->addrs.ipv6.src;
3474		fl6->daddr = flkeys->addrs.ipv6.dst;
3475		fl6->fl6_sport = flkeys->ports.src;
3476		fl6->fl6_dport = flkeys->ports.dst;
3477	}
3478
3479	switch (flkeys->basic.ip_proto) {
3480	case IPPROTO_GRE:
3481		fl6->fl6_gre_key = flkeys->gre.keyid;
3482		break;
3483	case IPPROTO_ICMPV6:
3484		fl6->fl6_icmp_type = flkeys->icmp.type;
3485		fl6->fl6_icmp_code = flkeys->icmp.code;
3486		break;
3487	}
3488
3489	fl6->flowi6_proto = flkeys->basic.ip_proto;
3490}
3491#endif
3492
3493int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
3494			  unsigned int family, int reverse)
3495{
3496	struct xfrm_flow_keys flkeys;
3497
3498	memset(&flkeys, 0, sizeof(flkeys));
3499	__skb_flow_dissect(net, skb, &xfrm_session_dissector, &flkeys,
3500			   NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
3501
3502	switch (family) {
3503	case AF_INET:
3504		decode_session4(&flkeys, fl, reverse);
3505		break;
3506#if IS_ENABLED(CONFIG_IPV6)
3507	case AF_INET6:
3508		decode_session6(&flkeys, fl, reverse);
3509		break;
3510#endif
3511	default:
3512		return -EAFNOSUPPORT;
3513	}
3514
3515	fl->flowi_mark = skb->mark;
3516	if (reverse) {
3517		fl->flowi_oif = skb->skb_iif;
3518	} else {
3519		int oif = 0;
3520
3521		if (skb_dst(skb) && skb_dst(skb)->dev)
3522			oif = skb_dst(skb)->dev->ifindex;
3523
3524		fl->flowi_oif = oif;
3525	}
3526
3527	return security_xfrm_decode_session(skb, &fl->flowi_secid);
3528}
3529EXPORT_SYMBOL(__xfrm_decode_session);
3530
3531static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3532{
3533	for (; k < sp->len; k++) {
3534		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3535			*idxp = k;
3536			return 1;
3537		}
3538	}
3539
3540	return 0;
3541}
3542
3543static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
3544{
3545	const struct flowi4 *fl4 = &fl->u.ip4;
3546
3547	if (family == AF_INET &&
3548	    fl4->flowi4_proto == IPPROTO_ICMP &&
3549	    (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
3550	     fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
3551		return true;
3552
3553#if IS_ENABLED(CONFIG_IPV6)
3554	if (family == AF_INET6) {
3555		const struct flowi6 *fl6 = &fl->u.ip6;
3556
3557		if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
3558		    (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
3559		    fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
3560		    fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
3561			return true;
3562	}
3563#endif
3564	return false;
3565}
3566
3567static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
3568				  const struct flowi *fl, struct flowi *fl1)
3569{
3570	bool ret = true;
3571	struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3572	int hl = family == AF_INET ? (sizeof(struct iphdr) +  sizeof(struct icmphdr)) :
3573		 (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
3574
3575	if (!newskb)
3576		return true;
3577
3578	if (!pskb_pull(newskb, hl))
3579		goto out;
3580
3581	skb_reset_network_header(newskb);
3582
3583	if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0)
3584		goto out;
3585
3586	fl1->flowi_oif = fl->flowi_oif;
3587	fl1->flowi_mark = fl->flowi_mark;
3588	fl1->flowi_tos = fl->flowi_tos;
3589	nf_nat_decode_session(newskb, fl1, family);
3590	ret = false;
3591
3592out:
3593	consume_skb(newskb);
3594	return ret;
3595}
3596
3597static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
3598					   const struct xfrm_selector *sel,
3599					   const struct flowi *fl)
3600{
3601	bool ret = false;
3602
3603	if (icmp_err_packet(fl, family)) {
3604		struct flowi fl1;
3605
3606		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3607			return ret;
3608
3609		ret = xfrm_selector_match(sel, &fl1, family);
3610	}
3611
3612	return ret;
3613}
3614
3615static inline struct
3616xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
3617			      const struct flowi *fl, unsigned short family,
3618			      u32 if_id)
3619{
3620	struct xfrm_policy *pol = NULL;
3621
3622	if (icmp_err_packet(fl, family)) {
3623		struct flowi fl1;
3624		struct net *net = dev_net(skb->dev);
3625
3626		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3627			return pol;
3628
3629		pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id);
3630		if (IS_ERR(pol))
3631			pol = NULL;
3632	}
3633
3634	return pol;
3635}
3636
3637static inline struct
3638dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
3639			     unsigned short family, struct dst_entry *dst)
3640{
3641	if (icmp_err_packet(fl, family)) {
3642		struct net *net = dev_net(skb->dev);
3643		struct dst_entry *dst2;
3644		struct flowi fl1;
3645
3646		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3647			return dst;
3648
3649		dst_hold(dst);
3650
3651		dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
3652
3653		if (IS_ERR(dst2))
3654			return dst;
3655
3656		if (dst2->xfrm) {
3657			dst_release(dst);
3658			dst = dst2;
3659		} else {
3660			dst_release(dst2);
3661		}
3662	}
3663
3664	return dst;
3665}
3666
3667int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3668			unsigned short family)
3669{
3670	struct net *net = dev_net(skb->dev);
3671	struct xfrm_policy *pol;
3672	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3673	int npols = 0;
3674	int xfrm_nr;
3675	int pi;
3676	int reverse;
3677	struct flowi fl;
3678	int xerr_idx = -1;
3679	const struct xfrm_if_cb *ifcb;
3680	struct sec_path *sp;
3681	u32 if_id = 0;
3682
3683	rcu_read_lock();
3684	ifcb = xfrm_if_get_cb();
3685
3686	if (ifcb) {
3687		struct xfrm_if_decode_session_result r;
3688
3689		if (ifcb->decode_session(skb, family, &r)) {
3690			if_id = r.if_id;
3691			net = r.net;
3692		}
3693	}
3694	rcu_read_unlock();
3695
3696	reverse = dir & ~XFRM_POLICY_MASK;
3697	dir &= XFRM_POLICY_MASK;
3698
3699	if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) {
3700		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3701		return 0;
3702	}
3703
3704	nf_nat_decode_session(skb, &fl, family);
3705
3706	/* First, check used SA against their selectors. */
3707	sp = skb_sec_path(skb);
3708	if (sp) {
3709		int i;
3710
3711		for (i = sp->len - 1; i >= 0; i--) {
3712			struct xfrm_state *x = sp->xvec[i];
3713			int ret = 0;
3714
3715			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3716				ret = 1;
3717				if (x->props.flags & XFRM_STATE_ICMP &&
3718				    xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl))
3719					ret = 0;
3720				if (ret) {
3721					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3722					return 0;
3723				}
3724			}
3725		}
3726	}
3727
3728	pol = NULL;
3729	sk = sk_to_full_sk(sk);
3730	if (sk && sk->sk_policy[dir]) {
3731		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3732		if (IS_ERR(pol)) {
3733			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3734			return 0;
3735		}
3736	}
3737
3738	if (!pol)
3739		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3740
3741	if (IS_ERR(pol)) {
3742		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3743		return 0;
3744	}
3745
3746	if (!pol && dir == XFRM_POLICY_FWD)
3747		pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
3748
3749	if (!pol) {
3750		const bool is_crypto_offload = sp &&
3751			(xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO);
3752
3753		if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3754			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3755			return 0;
3756		}
3757
3758		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) {
3759			xfrm_secpath_reject(xerr_idx, skb, &fl);
3760			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3761			return 0;
3762		}
3763		return 1;
3764	}
3765
3766	/* This lockless write can happen from different cpus. */
3767	WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
3768
3769	pols[0] = pol;
3770	npols++;
3771#ifdef CONFIG_XFRM_SUB_POLICY
3772	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3773		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3774						    &fl, family,
3775						    XFRM_POLICY_IN, if_id);
3776		if (pols[1]) {
3777			if (IS_ERR(pols[1])) {
3778				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3779				xfrm_pol_put(pols[0]);
3780				return 0;
3781			}
3782			/* This write can happen from different cpus. */
3783			WRITE_ONCE(pols[1]->curlft.use_time,
3784				   ktime_get_real_seconds());
3785			npols++;
3786		}
3787	}
3788#endif
3789
3790	if (pol->action == XFRM_POLICY_ALLOW) {
 
3791		static struct sec_path dummy;
3792		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3793		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3794		struct xfrm_tmpl **tpp = tp;
3795		int ti = 0;
3796		int i, k;
3797
3798		sp = skb_sec_path(skb);
3799		if (!sp)
3800			sp = &dummy;
3801
3802		for (pi = 0; pi < npols; pi++) {
3803			if (pols[pi] != pol &&
3804			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3805				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3806				goto reject;
3807			}
3808			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3809				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3810				goto reject_error;
3811			}
3812			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3813				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3814		}
3815		xfrm_nr = ti;
3816
3817		if (npols > 1) {
3818			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3819			tpp = stp;
3820		}
3821
3822		/* For each tunnel xfrm, find the first matching tmpl.
3823		 * For each tmpl before that, find corresponding xfrm.
3824		 * Order is _important_. Later we will implement
3825		 * some barriers, but at the moment barriers
3826		 * are implied between each two transformations.
3827		 * Upon success, marks secpath entries as having been
3828		 * verified to allow them to be skipped in future policy
3829		 * checks (e.g. nested tunnels).
3830		 */
3831		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3832			k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
3833			if (k < 0) {
3834				if (k < -1)
3835					/* "-2 - errored_index" returned */
3836					xerr_idx = -(2+k);
3837				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3838				goto reject;
3839			}
3840		}
3841
3842		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3843			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3844			goto reject;
3845		}
3846
3847		xfrm_pols_put(pols, npols);
3848		sp->verified_cnt = k;
3849
3850		return 1;
3851	}
3852	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3853
3854reject:
3855	xfrm_secpath_reject(xerr_idx, skb, &fl);
3856reject_error:
3857	xfrm_pols_put(pols, npols);
3858	return 0;
3859}
3860EXPORT_SYMBOL(__xfrm_policy_check);
3861
3862int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3863{
3864	struct net *net = dev_net(skb->dev);
3865	struct flowi fl;
3866	struct dst_entry *dst;
3867	int res = 1;
3868
3869	if (xfrm_decode_session(net, skb, &fl, family) < 0) {
3870		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3871		return 0;
3872	}
3873
3874	skb_dst_force(skb);
3875	if (!skb_dst(skb)) {
3876		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3877		return 0;
3878	}
3879
3880	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3881	if (IS_ERR(dst)) {
3882		res = 0;
3883		dst = NULL;
3884	}
3885
3886	if (dst && !dst->xfrm)
3887		dst = xfrm_out_fwd_icmp(skb, &fl, family, dst);
3888
3889	skb_dst_set(skb, dst);
3890	return res;
3891}
3892EXPORT_SYMBOL(__xfrm_route_forward);
3893
3894/* Optimize later using cookies and generation ids. */
3895
3896static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3897{
3898	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3899	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3900	 * get validated by dst_ops->check on every use.  We do this
3901	 * because when a normal route referenced by an XFRM dst is
3902	 * obsoleted we do not go looking around for all parent
3903	 * referencing XFRM dsts so that we can invalidate them.  It
3904	 * is just too much work.  Instead we make the checks here on
3905	 * every use.  For example:
3906	 *
3907	 *	XFRM dst A --> IPv4 dst X
3908	 *
3909	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3910	 * in this example).  If X is marked obsolete, "A" will not
3911	 * notice.  That's what we are validating here via the
3912	 * stale_bundle() check.
3913	 *
3914	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3915	 * be marked on it.
3916	 * This will force stale_bundle() to fail on any xdst bundle with
3917	 * this dst linked in it.
3918	 */
3919	if (dst->obsolete < 0 && !stale_bundle(dst))
3920		return dst;
3921
3922	return NULL;
3923}
3924
3925static int stale_bundle(struct dst_entry *dst)
3926{
3927	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3928}
3929
3930void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3931{
3932	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3933		dst->dev = blackhole_netdev;
3934		dev_hold(dst->dev);
3935		dev_put(dev);
3936	}
3937}
3938EXPORT_SYMBOL(xfrm_dst_ifdown);
3939
3940static void xfrm_link_failure(struct sk_buff *skb)
3941{
3942	/* Impossible. Such dst must be popped before reaches point of failure. */
3943}
3944
3945static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
3946{
3947	if (dst->obsolete)
3948		sk_dst_reset(sk);
 
 
 
 
 
3949}
3950
3951static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3952{
3953	while (nr--) {
3954		struct xfrm_dst *xdst = bundle[nr];
3955		u32 pmtu, route_mtu_cached;
3956		struct dst_entry *dst;
3957
3958		dst = &xdst->u.dst;
3959		pmtu = dst_mtu(xfrm_dst_child(dst));
3960		xdst->child_mtu_cached = pmtu;
3961
3962		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3963
3964		route_mtu_cached = dst_mtu(xdst->route);
3965		xdst->route_mtu_cached = route_mtu_cached;
3966
3967		if (pmtu > route_mtu_cached)
3968			pmtu = route_mtu_cached;
3969
3970		dst_metric_set(dst, RTAX_MTU, pmtu);
3971	}
3972}
3973
3974/* Check that the bundle accepts the flow and its components are
3975 * still valid.
3976 */
3977
3978static int xfrm_bundle_ok(struct xfrm_dst *first)
3979{
3980	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3981	struct dst_entry *dst = &first->u.dst;
3982	struct xfrm_dst *xdst;
3983	int start_from, nr;
3984	u32 mtu;
3985
3986	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3987	    (dst->dev && !netif_running(dst->dev)))
3988		return 0;
3989
3990	if (dst->flags & DST_XFRM_QUEUE)
3991		return 1;
3992
3993	start_from = nr = 0;
3994	do {
3995		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3996
3997		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3998			return 0;
3999		if (xdst->xfrm_genid != dst->xfrm->genid)
4000			return 0;
4001		if (xdst->num_pols > 0 &&
4002		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
4003			return 0;
4004
4005		bundle[nr++] = xdst;
4006
4007		mtu = dst_mtu(xfrm_dst_child(dst));
4008		if (xdst->child_mtu_cached != mtu) {
4009			start_from = nr;
4010			xdst->child_mtu_cached = mtu;
4011		}
4012
4013		if (!dst_check(xdst->route, xdst->route_cookie))
4014			return 0;
4015		mtu = dst_mtu(xdst->route);
4016		if (xdst->route_mtu_cached != mtu) {
4017			start_from = nr;
4018			xdst->route_mtu_cached = mtu;
4019		}
4020
4021		dst = xfrm_dst_child(dst);
4022	} while (dst->xfrm);
4023
4024	if (likely(!start_from))
4025		return 1;
4026
4027	xdst = bundle[start_from - 1];
4028	mtu = xdst->child_mtu_cached;
4029	while (start_from--) {
4030		dst = &xdst->u.dst;
4031
4032		mtu = xfrm_state_mtu(dst->xfrm, mtu);
4033		if (mtu > xdst->route_mtu_cached)
4034			mtu = xdst->route_mtu_cached;
4035		dst_metric_set(dst, RTAX_MTU, mtu);
4036		if (!start_from)
4037			break;
4038
4039		xdst = bundle[start_from - 1];
4040		xdst->child_mtu_cached = mtu;
4041	}
4042
4043	return 1;
4044}
4045
4046static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
4047{
4048	return dst_metric_advmss(xfrm_dst_path(dst));
4049}
4050
4051static unsigned int xfrm_mtu(const struct dst_entry *dst)
4052{
4053	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
4054
4055	return mtu ? : dst_mtu(xfrm_dst_path(dst));
4056}
4057
4058static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
4059					const void *daddr)
4060{
4061	while (dst->xfrm) {
4062		const struct xfrm_state *xfrm = dst->xfrm;
4063
4064		dst = xfrm_dst_child(dst);
4065
4066		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
4067			continue;
4068		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
4069			daddr = xfrm->coaddr;
4070		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
4071			daddr = &xfrm->id.daddr;
4072	}
4073	return daddr;
4074}
4075
4076static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
4077					   struct sk_buff *skb,
4078					   const void *daddr)
4079{
4080	const struct dst_entry *path = xfrm_dst_path(dst);
4081
4082	if (!skb)
4083		daddr = xfrm_get_dst_nexthop(dst, daddr);
4084	return path->ops->neigh_lookup(path, skb, daddr);
4085}
4086
4087static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
4088{
4089	const struct dst_entry *path = xfrm_dst_path(dst);
4090
4091	daddr = xfrm_get_dst_nexthop(dst, daddr);
4092	path->ops->confirm_neigh(path, daddr);
4093}
4094
4095int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
4096{
4097	int err = 0;
4098
4099	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
4100		return -EAFNOSUPPORT;
4101
4102	spin_lock(&xfrm_policy_afinfo_lock);
4103	if (unlikely(xfrm_policy_afinfo[family] != NULL))
4104		err = -EEXIST;
4105	else {
4106		struct dst_ops *dst_ops = afinfo->dst_ops;
4107		if (likely(dst_ops->kmem_cachep == NULL))
4108			dst_ops->kmem_cachep = xfrm_dst_cache;
4109		if (likely(dst_ops->check == NULL))
4110			dst_ops->check = xfrm_dst_check;
4111		if (likely(dst_ops->default_advmss == NULL))
4112			dst_ops->default_advmss = xfrm_default_advmss;
4113		if (likely(dst_ops->mtu == NULL))
4114			dst_ops->mtu = xfrm_mtu;
4115		if (likely(dst_ops->negative_advice == NULL))
4116			dst_ops->negative_advice = xfrm_negative_advice;
4117		if (likely(dst_ops->link_failure == NULL))
4118			dst_ops->link_failure = xfrm_link_failure;
4119		if (likely(dst_ops->neigh_lookup == NULL))
4120			dst_ops->neigh_lookup = xfrm_neigh_lookup;
4121		if (likely(!dst_ops->confirm_neigh))
4122			dst_ops->confirm_neigh = xfrm_confirm_neigh;
4123		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
4124	}
4125	spin_unlock(&xfrm_policy_afinfo_lock);
4126
4127	return err;
4128}
4129EXPORT_SYMBOL(xfrm_policy_register_afinfo);
4130
4131void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
4132{
4133	struct dst_ops *dst_ops = afinfo->dst_ops;
4134	int i;
4135
4136	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4137		if (xfrm_policy_afinfo[i] != afinfo)
4138			continue;
4139		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4140		break;
4141	}
4142
4143	synchronize_rcu();
4144
4145	dst_ops->kmem_cachep = NULL;
4146	dst_ops->check = NULL;
4147	dst_ops->negative_advice = NULL;
4148	dst_ops->link_failure = NULL;
4149}
4150EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4151
4152void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4153{
4154	spin_lock(&xfrm_if_cb_lock);
4155	rcu_assign_pointer(xfrm_if_cb, ifcb);
4156	spin_unlock(&xfrm_if_cb_lock);
4157}
4158EXPORT_SYMBOL(xfrm_if_register_cb);
4159
4160void xfrm_if_unregister_cb(void)
4161{
4162	RCU_INIT_POINTER(xfrm_if_cb, NULL);
4163	synchronize_rcu();
4164}
4165EXPORT_SYMBOL(xfrm_if_unregister_cb);
4166
4167#ifdef CONFIG_XFRM_STATISTICS
4168static int __net_init xfrm_statistics_init(struct net *net)
4169{
4170	int rv;
4171	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4172	if (!net->mib.xfrm_statistics)
4173		return -ENOMEM;
4174	rv = xfrm_proc_init(net);
4175	if (rv < 0)
4176		free_percpu(net->mib.xfrm_statistics);
4177	return rv;
4178}
4179
4180static void xfrm_statistics_fini(struct net *net)
4181{
4182	xfrm_proc_fini(net);
4183	free_percpu(net->mib.xfrm_statistics);
4184}
4185#else
4186static int __net_init xfrm_statistics_init(struct net *net)
4187{
4188	return 0;
4189}
4190
4191static void xfrm_statistics_fini(struct net *net)
4192{
4193}
4194#endif
4195
4196static int __net_init xfrm_policy_init(struct net *net)
4197{
4198	unsigned int hmask, sz;
4199	int dir, err;
4200
4201	if (net_eq(net, &init_net)) {
4202		xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4203		err = rhashtable_init(&xfrm_policy_inexact_table,
4204				      &xfrm_pol_inexact_params);
4205		BUG_ON(err);
4206	}
4207
4208	hmask = 8 - 1;
4209	sz = (hmask+1) * sizeof(struct hlist_head);
4210
4211	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4212	if (!net->xfrm.policy_byidx)
4213		goto out_byidx;
4214	net->xfrm.policy_idx_hmask = hmask;
4215
4216	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4217		struct xfrm_policy_hash *htab;
4218
4219		net->xfrm.policy_count[dir] = 0;
4220		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
 
4221
4222		htab = &net->xfrm.policy_bydst[dir];
4223		htab->table = xfrm_hash_alloc(sz);
4224		if (!htab->table)
4225			goto out_bydst;
4226		htab->hmask = hmask;
4227		htab->dbits4 = 32;
4228		htab->sbits4 = 32;
4229		htab->dbits6 = 128;
4230		htab->sbits6 = 128;
4231	}
4232	net->xfrm.policy_hthresh.lbits4 = 32;
4233	net->xfrm.policy_hthresh.rbits4 = 32;
4234	net->xfrm.policy_hthresh.lbits6 = 128;
4235	net->xfrm.policy_hthresh.rbits6 = 128;
4236
4237	seqlock_init(&net->xfrm.policy_hthresh.lock);
4238
4239	INIT_LIST_HEAD(&net->xfrm.policy_all);
4240	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4241	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4242	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4243	return 0;
4244
4245out_bydst:
4246	for (dir--; dir >= 0; dir--) {
4247		struct xfrm_policy_hash *htab;
4248
4249		htab = &net->xfrm.policy_bydst[dir];
4250		xfrm_hash_free(htab->table, sz);
4251	}
4252	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4253out_byidx:
4254	return -ENOMEM;
4255}
4256
4257static void xfrm_policy_fini(struct net *net)
4258{
4259	struct xfrm_pol_inexact_bin *b, *t;
4260	unsigned int sz;
4261	int dir;
4262
4263	flush_work(&net->xfrm.policy_hash_work);
4264#ifdef CONFIG_XFRM_SUB_POLICY
4265	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4266#endif
4267	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4268
4269	WARN_ON(!list_empty(&net->xfrm.policy_all));
4270
4271	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4272		struct xfrm_policy_hash *htab;
4273
 
 
4274		htab = &net->xfrm.policy_bydst[dir];
4275		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4276		WARN_ON(!hlist_empty(htab->table));
4277		xfrm_hash_free(htab->table, sz);
4278	}
4279
4280	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4281	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4282	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4283
4284	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4285	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4286		__xfrm_policy_inexact_prune_bin(b, true);
4287	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4288}
4289
4290static int __net_init xfrm_net_init(struct net *net)
4291{
4292	int rv;
4293
4294	/* Initialize the per-net locks here */
4295	spin_lock_init(&net->xfrm.xfrm_state_lock);
4296	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4297	seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4298	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4299	net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4300	net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4301	net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4302
4303	rv = xfrm_statistics_init(net);
4304	if (rv < 0)
4305		goto out_statistics;
4306	rv = xfrm_state_init(net);
4307	if (rv < 0)
4308		goto out_state;
4309	rv = xfrm_policy_init(net);
4310	if (rv < 0)
4311		goto out_policy;
4312	rv = xfrm_sysctl_init(net);
4313	if (rv < 0)
4314		goto out_sysctl;
4315
4316	rv = xfrm_nat_keepalive_net_init(net);
4317	if (rv < 0)
4318		goto out_nat_keepalive;
4319
4320	return 0;
4321
4322out_nat_keepalive:
4323	xfrm_sysctl_fini(net);
4324out_sysctl:
4325	xfrm_policy_fini(net);
4326out_policy:
4327	xfrm_state_fini(net);
4328out_state:
4329	xfrm_statistics_fini(net);
4330out_statistics:
4331	return rv;
4332}
4333
4334static void __net_exit xfrm_net_exit(struct net *net)
4335{
4336	xfrm_nat_keepalive_net_fini(net);
4337	xfrm_sysctl_fini(net);
4338	xfrm_policy_fini(net);
4339	xfrm_state_fini(net);
4340	xfrm_statistics_fini(net);
4341}
4342
4343static struct pernet_operations __net_initdata xfrm_net_ops = {
4344	.init = xfrm_net_init,
4345	.exit = xfrm_net_exit,
4346};
4347
4348static const struct flow_dissector_key xfrm_flow_dissector_keys[] = {
4349	{
4350		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
4351		.offset = offsetof(struct xfrm_flow_keys, control),
4352	},
4353	{
4354		.key_id = FLOW_DISSECTOR_KEY_BASIC,
4355		.offset = offsetof(struct xfrm_flow_keys, basic),
4356	},
4357	{
4358		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
4359		.offset = offsetof(struct xfrm_flow_keys, addrs.ipv4),
4360	},
4361	{
4362		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
4363		.offset = offsetof(struct xfrm_flow_keys, addrs.ipv6),
4364	},
4365	{
4366		.key_id = FLOW_DISSECTOR_KEY_PORTS,
4367		.offset = offsetof(struct xfrm_flow_keys, ports),
4368	},
4369	{
4370		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
4371		.offset = offsetof(struct xfrm_flow_keys, gre),
4372	},
4373	{
4374		.key_id = FLOW_DISSECTOR_KEY_IP,
4375		.offset = offsetof(struct xfrm_flow_keys, ip),
4376	},
4377	{
4378		.key_id = FLOW_DISSECTOR_KEY_ICMP,
4379		.offset = offsetof(struct xfrm_flow_keys, icmp),
4380	},
4381};
4382
4383void __init xfrm_init(void)
4384{
4385	skb_flow_dissector_init(&xfrm_session_dissector,
4386				xfrm_flow_dissector_keys,
4387				ARRAY_SIZE(xfrm_flow_dissector_keys));
 
 
 
 
 
4388
4389	register_pernet_subsys(&xfrm_net_ops);
4390	xfrm_dev_init();
 
4391	xfrm_input_init();
4392
4393#ifdef CONFIG_XFRM_ESPINTCP
4394	espintcp_init();
4395#endif
4396
4397	register_xfrm_state_bpf();
4398	xfrm_nat_keepalive_init(AF_INET);
4399}
4400
4401#ifdef CONFIG_AUDITSYSCALL
4402static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4403					 struct audit_buffer *audit_buf)
4404{
4405	struct xfrm_sec_ctx *ctx = xp->security;
4406	struct xfrm_selector *sel = &xp->selector;
4407
4408	if (ctx)
4409		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4410				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4411
4412	switch (sel->family) {
4413	case AF_INET:
4414		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4415		if (sel->prefixlen_s != 32)
4416			audit_log_format(audit_buf, " src_prefixlen=%d",
4417					 sel->prefixlen_s);
4418		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4419		if (sel->prefixlen_d != 32)
4420			audit_log_format(audit_buf, " dst_prefixlen=%d",
4421					 sel->prefixlen_d);
4422		break;
4423	case AF_INET6:
4424		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4425		if (sel->prefixlen_s != 128)
4426			audit_log_format(audit_buf, " src_prefixlen=%d",
4427					 sel->prefixlen_s);
4428		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4429		if (sel->prefixlen_d != 128)
4430			audit_log_format(audit_buf, " dst_prefixlen=%d",
4431					 sel->prefixlen_d);
4432		break;
4433	}
4434}
4435
4436void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4437{
4438	struct audit_buffer *audit_buf;
4439
4440	audit_buf = xfrm_audit_start("SPD-add");
4441	if (audit_buf == NULL)
4442		return;
4443	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4444	audit_log_format(audit_buf, " res=%u", result);
4445	xfrm_audit_common_policyinfo(xp, audit_buf);
4446	audit_log_end(audit_buf);
4447}
4448EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4449
4450void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4451			      bool task_valid)
4452{
4453	struct audit_buffer *audit_buf;
4454
4455	audit_buf = xfrm_audit_start("SPD-delete");
4456	if (audit_buf == NULL)
4457		return;
4458	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4459	audit_log_format(audit_buf, " res=%u", result);
4460	xfrm_audit_common_policyinfo(xp, audit_buf);
4461	audit_log_end(audit_buf);
4462}
4463EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4464#endif
4465
4466#ifdef CONFIG_XFRM_MIGRATE
4467static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4468						    u8 dir, u8 type, struct net *net, u32 if_id)
4469{
4470	struct xfrm_policy *pol;
4471	struct flowi fl;
4472
4473	memset(&fl, 0, sizeof(fl));
 
 
 
 
 
 
 
 
 
 
 
 
 
4474
4475	fl.flowi_proto = sel->proto;
 
 
 
 
 
4476
4477	switch (sel->family) {
4478	case AF_INET:
4479		fl.u.ip4.saddr = sel->saddr.a4;
4480		fl.u.ip4.daddr = sel->daddr.a4;
4481		if (sel->proto == IPSEC_ULPROTO_ANY)
 
 
4482			break;
4483		fl.u.flowi4_oif = sel->ifindex;
4484		fl.u.ip4.fl4_sport = sel->sport;
4485		fl.u.ip4.fl4_dport = sel->dport;
4486		break;
4487	case AF_INET6:
4488		fl.u.ip6.saddr = sel->saddr.in6;
4489		fl.u.ip6.daddr = sel->daddr.in6;
4490		if (sel->proto == IPSEC_ULPROTO_ANY)
4491			break;
4492		fl.u.flowi6_oif = sel->ifindex;
4493		fl.u.ip6.fl4_sport = sel->sport;
4494		fl.u.ip6.fl4_dport = sel->dport;
4495		break;
4496	default:
4497		return ERR_PTR(-EAFNOSUPPORT);
4498	}
4499
4500	rcu_read_lock();
4501
4502	pol = xfrm_policy_lookup_bytype(net, type, &fl, sel->family, dir, if_id);
4503	if (IS_ERR_OR_NULL(pol))
4504		goto out_unlock;
4505
4506	if (!xfrm_pol_hold_rcu(pol))
4507		pol = NULL;
4508out_unlock:
4509	rcu_read_unlock();
4510	return pol;
4511}
4512
4513static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4514{
4515	int match = 0;
4516
4517	if (t->mode == m->mode && t->id.proto == m->proto &&
4518	    (m->reqid == 0 || t->reqid == m->reqid)) {
4519		switch (t->mode) {
4520		case XFRM_MODE_TUNNEL:
4521		case XFRM_MODE_BEET:
4522			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4523					    m->old_family) &&
4524			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4525					    m->old_family)) {
4526				match = 1;
4527			}
4528			break;
4529		case XFRM_MODE_TRANSPORT:
4530			/* in case of transport mode, template does not store
4531			   any IP addresses, hence we just compare mode and
4532			   protocol */
4533			match = 1;
4534			break;
4535		default:
4536			break;
4537		}
4538	}
4539	return match;
4540}
4541
4542/* update endpoint address(es) of template(s) */
4543static int xfrm_policy_migrate(struct xfrm_policy *pol,
4544			       struct xfrm_migrate *m, int num_migrate,
4545			       struct netlink_ext_ack *extack)
4546{
4547	struct xfrm_migrate *mp;
4548	int i, j, n = 0;
4549
4550	write_lock_bh(&pol->lock);
4551	if (unlikely(pol->walk.dead)) {
4552		/* target policy has been deleted */
4553		NL_SET_ERR_MSG(extack, "Target policy not found");
4554		write_unlock_bh(&pol->lock);
4555		return -ENOENT;
4556	}
4557
4558	for (i = 0; i < pol->xfrm_nr; i++) {
4559		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4560			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4561				continue;
4562			n++;
4563			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4564			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4565				continue;
4566			/* update endpoints */
4567			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4568			       sizeof(pol->xfrm_vec[i].id.daddr));
4569			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4570			       sizeof(pol->xfrm_vec[i].saddr));
4571			pol->xfrm_vec[i].encap_family = mp->new_family;
4572			/* flush bundles */
4573			atomic_inc(&pol->genid);
4574		}
4575	}
4576
4577	write_unlock_bh(&pol->lock);
4578
4579	if (!n)
4580		return -ENODATA;
4581
4582	return 0;
4583}
4584
4585static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
4586			      struct netlink_ext_ack *extack)
4587{
4588	int i, j;
4589
4590	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
4591		NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4592		return -EINVAL;
4593	}
4594
4595	for (i = 0; i < num_migrate; i++) {
4596		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4597		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) {
4598			NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
4599			return -EINVAL;
4600		}
4601
4602		/* check if there is any duplicated entry */
4603		for (j = i + 1; j < num_migrate; j++) {
4604			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4605				    sizeof(m[i].old_daddr)) &&
4606			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4607				    sizeof(m[i].old_saddr)) &&
4608			    m[i].proto == m[j].proto &&
4609			    m[i].mode == m[j].mode &&
4610			    m[i].reqid == m[j].reqid &&
4611			    m[i].old_family == m[j].old_family) {
4612				NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
4613				return -EINVAL;
4614			}
4615		}
4616	}
4617
4618	return 0;
4619}
4620
4621int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4622		 struct xfrm_migrate *m, int num_migrate,
4623		 struct xfrm_kmaddress *k, struct net *net,
4624		 struct xfrm_encap_tmpl *encap, u32 if_id,
4625		 struct netlink_ext_ack *extack)
4626{
4627	int i, err, nx_cur = 0, nx_new = 0;
4628	struct xfrm_policy *pol = NULL;
4629	struct xfrm_state *x, *xc;
4630	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4631	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4632	struct xfrm_migrate *mp;
4633
4634	/* Stage 0 - sanity checks */
4635	err = xfrm_migrate_check(m, num_migrate, extack);
4636	if (err < 0)
4637		goto out;
4638
4639	if (dir >= XFRM_POLICY_MAX) {
4640		NL_SET_ERR_MSG(extack, "Invalid policy direction");
4641		err = -EINVAL;
4642		goto out;
4643	}
4644
4645	/* Stage 1 - find policy */
4646	pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
4647	if (IS_ERR_OR_NULL(pol)) {
4648		NL_SET_ERR_MSG(extack, "Target policy not found");
4649		err = IS_ERR(pol) ? PTR_ERR(pol) : -ENOENT;
4650		goto out;
4651	}
4652
4653	/* Stage 2 - find and update state(s) */
4654	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4655		if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4656			x_cur[nx_cur] = x;
4657			nx_cur++;
4658			xc = xfrm_state_migrate(x, mp, encap);
4659			if (xc) {
4660				x_new[nx_new] = xc;
4661				nx_new++;
4662			} else {
4663				err = -ENODATA;
4664				goto restore_state;
4665			}
4666		}
4667	}
4668
4669	/* Stage 3 - update policy */
4670	err = xfrm_policy_migrate(pol, m, num_migrate, extack);
4671	if (err < 0)
4672		goto restore_state;
4673
4674	/* Stage 4 - delete old state(s) */
4675	if (nx_cur) {
4676		xfrm_states_put(x_cur, nx_cur);
4677		xfrm_states_delete(x_cur, nx_cur);
4678	}
4679
4680	/* Stage 5 - announce */
4681	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4682
4683	xfrm_pol_put(pol);
4684
4685	return 0;
4686out:
4687	return err;
4688
4689restore_state:
4690	if (pol)
4691		xfrm_pol_put(pol);
4692	if (nx_cur)
4693		xfrm_states_put(x_cur, nx_cur);
4694	if (nx_new)
4695		xfrm_states_delete(x_new, nx_new);
4696
4697	return err;
4698}
4699EXPORT_SYMBOL(xfrm_migrate);
4700#endif
v4.17
 
   1/*
   2 * xfrm_policy.c
   3 *
   4 * Changes:
   5 *	Mitsuru KANDA @USAGI
   6 * 	Kazunori MIYAZAWA @USAGI
   7 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
   8 * 		IPv6 support
   9 * 	Kazunori MIYAZAWA @USAGI
  10 * 	YOSHIFUJI Hideaki
  11 * 		Split up af-specific portion
  12 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
  13 *
  14 */
  15
  16#include <linux/err.h>
  17#include <linux/slab.h>
  18#include <linux/kmod.h>
  19#include <linux/list.h>
  20#include <linux/spinlock.h>
  21#include <linux/workqueue.h>
  22#include <linux/notifier.h>
  23#include <linux/netdevice.h>
  24#include <linux/netfilter.h>
  25#include <linux/module.h>
  26#include <linux/cache.h>
  27#include <linux/cpu.h>
  28#include <linux/audit.h>
 
 
 
  29#include <net/dst.h>
  30#include <net/flow.h>
 
  31#include <net/xfrm.h>
  32#include <net/ip.h>
 
 
 
 
  33#ifdef CONFIG_XFRM_STATISTICS
  34#include <net/snmp.h>
  35#endif
 
 
 
 
  36
  37#include "xfrm_hash.h"
  38
  39#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  40#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  41#define XFRM_MAX_QUEUE_LEN	100
  42
  43struct xfrm_flo {
  44	struct dst_entry *dst_orig;
  45	u8 flags;
  46};
  47
  48static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst);
  49static struct work_struct *xfrm_pcpu_work __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  51static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
  52						__read_mostly;
  53
  54static struct kmem_cache *xfrm_dst_cache __ro_after_init;
  55static __read_mostly seqcount_t xfrm_policy_hash_generation;
 
 
  56
  57static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
  58static int stale_bundle(struct dst_entry *dst);
  59static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  60static void xfrm_policy_queue_process(struct timer_list *t);
  61
  62static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
  63static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  64						int dir);
  65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
  67{
  68	return refcount_inc_not_zero(&policy->refcnt);
  69}
  70
  71static inline bool
  72__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  73{
  74	const struct flowi4 *fl4 = &fl->u.ip4;
  75
  76	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  77		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  78		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  79		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  80		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
  81		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  82}
  83
  84static inline bool
  85__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  86{
  87	const struct flowi6 *fl6 = &fl->u.ip6;
  88
  89	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  90		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  91		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  92		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  93		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
  94		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  95}
  96
  97bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  98			 unsigned short family)
  99{
 100	switch (family) {
 101	case AF_INET:
 102		return __xfrm4_selector_match(sel, fl);
 103	case AF_INET6:
 104		return __xfrm6_selector_match(sel, fl);
 105	}
 106	return false;
 107}
 108
 109static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
 110{
 111	const struct xfrm_policy_afinfo *afinfo;
 112
 113	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
 114		return NULL;
 115	rcu_read_lock();
 116	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
 117	if (unlikely(!afinfo))
 118		rcu_read_unlock();
 119	return afinfo;
 120}
 121
 122struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
 123				    const xfrm_address_t *saddr,
 124				    const xfrm_address_t *daddr,
 125				    int family, u32 mark)
 
 
 
 
 126{
 127	const struct xfrm_policy_afinfo *afinfo;
 128	struct dst_entry *dst;
 129
 130	afinfo = xfrm_policy_get_afinfo(family);
 131	if (unlikely(afinfo == NULL))
 132		return ERR_PTR(-EAFNOSUPPORT);
 133
 134	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
 135
 136	rcu_read_unlock();
 137
 138	return dst;
 139}
 140EXPORT_SYMBOL(__xfrm_dst_lookup);
 141
 142static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
 143						int tos, int oif,
 144						xfrm_address_t *prev_saddr,
 145						xfrm_address_t *prev_daddr,
 146						int family, u32 mark)
 147{
 
 148	struct net *net = xs_net(x);
 149	xfrm_address_t *saddr = &x->props.saddr;
 150	xfrm_address_t *daddr = &x->id.daddr;
 151	struct dst_entry *dst;
 152
 153	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
 154		saddr = x->coaddr;
 155		daddr = prev_daddr;
 156	}
 157	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
 158		saddr = prev_saddr;
 159		daddr = x->coaddr;
 160	}
 161
 162	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 163
 164	if (!IS_ERR(dst)) {
 165		if (prev_saddr != saddr)
 166			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
 167		if (prev_daddr != daddr)
 168			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
 169	}
 170
 171	return dst;
 172}
 173
 174static inline unsigned long make_jiffies(long secs)
 175{
 176	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
 177		return MAX_SCHEDULE_TIMEOUT-1;
 178	else
 179		return secs*HZ;
 180}
 181
 182static void xfrm_policy_timer(struct timer_list *t)
 183{
 184	struct xfrm_policy *xp = from_timer(xp, t, timer);
 185	unsigned long now = get_seconds();
 186	long next = LONG_MAX;
 187	int warn = 0;
 188	int dir;
 189
 190	read_lock(&xp->lock);
 191
 192	if (unlikely(xp->walk.dead))
 193		goto out;
 194
 195	dir = xfrm_policy_id2dir(xp->index);
 196
 197	if (xp->lft.hard_add_expires_seconds) {
 198		long tmo = xp->lft.hard_add_expires_seconds +
 199			xp->curlft.add_time - now;
 200		if (tmo <= 0)
 201			goto expired;
 202		if (tmo < next)
 203			next = tmo;
 204	}
 205	if (xp->lft.hard_use_expires_seconds) {
 206		long tmo = xp->lft.hard_use_expires_seconds +
 207			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 208		if (tmo <= 0)
 209			goto expired;
 210		if (tmo < next)
 211			next = tmo;
 212	}
 213	if (xp->lft.soft_add_expires_seconds) {
 214		long tmo = xp->lft.soft_add_expires_seconds +
 215			xp->curlft.add_time - now;
 216		if (tmo <= 0) {
 217			warn = 1;
 218			tmo = XFRM_KM_TIMEOUT;
 219		}
 220		if (tmo < next)
 221			next = tmo;
 222	}
 223	if (xp->lft.soft_use_expires_seconds) {
 224		long tmo = xp->lft.soft_use_expires_seconds +
 225			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
 226		if (tmo <= 0) {
 227			warn = 1;
 228			tmo = XFRM_KM_TIMEOUT;
 229		}
 230		if (tmo < next)
 231			next = tmo;
 232	}
 233
 234	if (warn)
 235		km_policy_expired(xp, dir, 0, 0);
 236	if (next != LONG_MAX &&
 237	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
 238		xfrm_pol_hold(xp);
 239
 240out:
 241	read_unlock(&xp->lock);
 242	xfrm_pol_put(xp);
 243	return;
 244
 245expired:
 246	read_unlock(&xp->lock);
 247	if (!xfrm_policy_delete(xp, dir))
 248		km_policy_expired(xp, dir, 1, 0);
 249	xfrm_pol_put(xp);
 250}
 251
 252/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 253 * SPD calls.
 254 */
 255
 256struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
 257{
 258	struct xfrm_policy *policy;
 259
 260	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
 261
 262	if (policy) {
 263		write_pnet(&policy->xp_net, net);
 264		INIT_LIST_HEAD(&policy->walk.all);
 
 265		INIT_HLIST_NODE(&policy->bydst);
 266		INIT_HLIST_NODE(&policy->byidx);
 267		rwlock_init(&policy->lock);
 268		refcount_set(&policy->refcnt, 1);
 269		skb_queue_head_init(&policy->polq.hold_queue);
 270		timer_setup(&policy->timer, xfrm_policy_timer, 0);
 271		timer_setup(&policy->polq.hold_timer,
 272			    xfrm_policy_queue_process, 0);
 273	}
 274	return policy;
 275}
 276EXPORT_SYMBOL(xfrm_policy_alloc);
 277
 278static void xfrm_policy_destroy_rcu(struct rcu_head *head)
 279{
 280	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
 281
 282	security_xfrm_policy_free(policy->security);
 283	kfree(policy);
 284}
 285
 286/* Destroy xfrm_policy: descendant resources must be released to this moment. */
 287
 288void xfrm_policy_destroy(struct xfrm_policy *policy)
 289{
 290	BUG_ON(!policy->walk.dead);
 291
 292	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
 293		BUG();
 294
 
 295	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
 296}
 297EXPORT_SYMBOL(xfrm_policy_destroy);
 298
 299/* Rule must be locked. Release descendant resources, announce
 300 * entry dead. The rule must be unlinked from lists to the moment.
 301 */
 302
 303static void xfrm_policy_kill(struct xfrm_policy *policy)
 304{
 
 
 
 
 
 
 305	policy->walk.dead = 1;
 
 306
 307	atomic_inc(&policy->genid);
 308
 309	if (del_timer(&policy->polq.hold_timer))
 310		xfrm_pol_put(policy);
 311	skb_queue_purge(&policy->polq.hold_queue);
 312
 313	if (del_timer(&policy->timer))
 314		xfrm_pol_put(policy);
 315
 
 
 
 
 
 
 
 316	xfrm_pol_put(policy);
 317}
 318
 319static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
 320
 321static inline unsigned int idx_hash(struct net *net, u32 index)
 322{
 323	return __idx_hash(index, net->xfrm.policy_idx_hmask);
 324}
 325
 326/* calculate policy hash thresholds */
 327static void __get_hash_thresh(struct net *net,
 328			      unsigned short family, int dir,
 329			      u8 *dbits, u8 *sbits)
 330{
 331	switch (family) {
 332	case AF_INET:
 333		*dbits = net->xfrm.policy_bydst[dir].dbits4;
 334		*sbits = net->xfrm.policy_bydst[dir].sbits4;
 335		break;
 336
 337	case AF_INET6:
 338		*dbits = net->xfrm.policy_bydst[dir].dbits6;
 339		*sbits = net->xfrm.policy_bydst[dir].sbits6;
 340		break;
 341
 342	default:
 343		*dbits = 0;
 344		*sbits = 0;
 345	}
 346}
 347
 348static struct hlist_head *policy_hash_bysel(struct net *net,
 349					    const struct xfrm_selector *sel,
 350					    unsigned short family, int dir)
 351{
 352	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 353	unsigned int hash;
 354	u8 dbits;
 355	u8 sbits;
 356
 357	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 358	hash = __sel_hash(sel, family, hmask, dbits, sbits);
 359
 360	if (hash == hmask + 1)
 361		return &net->xfrm.policy_inexact[dir];
 362
 363	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 364		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 365}
 366
 367static struct hlist_head *policy_hash_direct(struct net *net,
 368					     const xfrm_address_t *daddr,
 369					     const xfrm_address_t *saddr,
 370					     unsigned short family, int dir)
 371{
 372	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 373	unsigned int hash;
 374	u8 dbits;
 375	u8 sbits;
 376
 377	__get_hash_thresh(net, family, dir, &dbits, &sbits);
 378	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
 379
 380	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
 381		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
 382}
 383
 384static void xfrm_dst_hash_transfer(struct net *net,
 385				   struct hlist_head *list,
 386				   struct hlist_head *ndsttable,
 387				   unsigned int nhashmask,
 388				   int dir)
 389{
 390	struct hlist_node *tmp, *entry0 = NULL;
 391	struct xfrm_policy *pol;
 392	unsigned int h0 = 0;
 393	u8 dbits;
 394	u8 sbits;
 395
 396redo:
 397	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
 398		unsigned int h;
 399
 400		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
 401		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
 402				pol->family, nhashmask, dbits, sbits);
 403		if (!entry0) {
 404			hlist_del_rcu(&pol->bydst);
 405			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
 406			h0 = h;
 407		} else {
 408			if (h != h0)
 409				continue;
 410			hlist_del_rcu(&pol->bydst);
 411			hlist_add_behind_rcu(&pol->bydst, entry0);
 412		}
 413		entry0 = &pol->bydst;
 414	}
 415	if (!hlist_empty(list)) {
 416		entry0 = NULL;
 417		goto redo;
 418	}
 419}
 420
 421static void xfrm_idx_hash_transfer(struct hlist_head *list,
 422				   struct hlist_head *nidxtable,
 423				   unsigned int nhashmask)
 424{
 425	struct hlist_node *tmp;
 426	struct xfrm_policy *pol;
 427
 428	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
 429		unsigned int h;
 430
 431		h = __idx_hash(pol->index, nhashmask);
 432		hlist_add_head(&pol->byidx, nidxtable+h);
 433	}
 434}
 435
 436static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
 437{
 438	return ((old_hmask + 1) << 1) - 1;
 439}
 440
 441static void xfrm_bydst_resize(struct net *net, int dir)
 442{
 443	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 444	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 445	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 446	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
 447	struct hlist_head *odst;
 448	int i;
 449
 450	if (!ndst)
 451		return;
 452
 453	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 454	write_seqcount_begin(&xfrm_policy_hash_generation);
 455
 456	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
 457				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
 458
 459	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
 460				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
 461
 462	for (i = hmask; i >= 0; i--)
 463		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
 464
 465	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
 466	net->xfrm.policy_bydst[dir].hmask = nhashmask;
 467
 468	write_seqcount_end(&xfrm_policy_hash_generation);
 469	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 470
 471	synchronize_rcu();
 472
 473	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
 474}
 475
 476static void xfrm_byidx_resize(struct net *net, int total)
 477{
 478	unsigned int hmask = net->xfrm.policy_idx_hmask;
 479	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
 480	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
 481	struct hlist_head *oidx = net->xfrm.policy_byidx;
 482	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
 483	int i;
 484
 485	if (!nidx)
 486		return;
 487
 488	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 489
 490	for (i = hmask; i >= 0; i--)
 491		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
 492
 493	net->xfrm.policy_byidx = nidx;
 494	net->xfrm.policy_idx_hmask = nhashmask;
 495
 496	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 497
 498	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
 499}
 500
 501static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
 502{
 503	unsigned int cnt = net->xfrm.policy_count[dir];
 504	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
 505
 506	if (total)
 507		*total += cnt;
 508
 509	if ((hmask + 1) < xfrm_policy_hashmax &&
 510	    cnt > hmask)
 511		return 1;
 512
 513	return 0;
 514}
 515
 516static inline int xfrm_byidx_should_resize(struct net *net, int total)
 517{
 518	unsigned int hmask = net->xfrm.policy_idx_hmask;
 519
 520	if ((hmask + 1) < xfrm_policy_hashmax &&
 521	    total > hmask)
 522		return 1;
 523
 524	return 0;
 525}
 526
 527void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
 528{
 529	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
 530	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
 531	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
 532	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
 533	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
 534	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
 535	si->spdhcnt = net->xfrm.policy_idx_hmask;
 536	si->spdhmcnt = xfrm_policy_hashmax;
 537}
 538EXPORT_SYMBOL(xfrm_spd_getinfo);
 539
 540static DEFINE_MUTEX(hash_resize_mutex);
 541static void xfrm_hash_resize(struct work_struct *work)
 542{
 543	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
 544	int dir, total;
 545
 546	mutex_lock(&hash_resize_mutex);
 547
 548	total = 0;
 549	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 550		if (xfrm_bydst_should_resize(net, dir, &total))
 551			xfrm_bydst_resize(net, dir);
 552	}
 553	if (xfrm_byidx_should_resize(net, total))
 554		xfrm_byidx_resize(net, total);
 555
 556	mutex_unlock(&hash_resize_mutex);
 557}
 558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 559static void xfrm_hash_rebuild(struct work_struct *work)
 560{
 561	struct net *net = container_of(work, struct net,
 562				       xfrm.policy_hthresh.work);
 563	unsigned int hmask;
 564	struct xfrm_policy *pol;
 565	struct xfrm_policy *policy;
 566	struct hlist_head *chain;
 567	struct hlist_head *odst;
 568	struct hlist_node *newpos;
 569	int i;
 570	int dir;
 571	unsigned seq;
 572	u8 lbits4, rbits4, lbits6, rbits6;
 573
 574	mutex_lock(&hash_resize_mutex);
 575
 576	/* read selector prefixlen thresholds */
 577	do {
 578		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
 579
 580		lbits4 = net->xfrm.policy_hthresh.lbits4;
 581		rbits4 = net->xfrm.policy_hthresh.rbits4;
 582		lbits6 = net->xfrm.policy_hthresh.lbits6;
 583		rbits6 = net->xfrm.policy_hthresh.rbits6;
 584	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
 585
 586	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 587
 588	/* reset the bydst and inexact table in all directions */
 589	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 590		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
 591		hmask = net->xfrm.policy_bydst[dir].hmask;
 592		odst = net->xfrm.policy_bydst[dir].table;
 593		for (i = hmask; i >= 0; i--)
 594			INIT_HLIST_HEAD(odst + i);
 595		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
 596			/* dir out => dst = remote, src = local */
 597			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
 598			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
 599			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
 600			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
 601		} else {
 602			/* dir in/fwd => dst = local, src = remote */
 603			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
 604			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
 605			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
 606			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
 607		}
 608	}
 609
 610	/* re-insert all policies by order of creation */
 611	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
 612		if (policy->walk.dead ||
 613		    xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
 614			/* skip socket policies */
 615			continue;
 616		}
 
 
 617		newpos = NULL;
 
 618		chain = policy_hash_bysel(net, &policy->selector,
 619					  policy->family,
 620					  xfrm_policy_id2dir(policy->index));
 
 
 
 
 
 
 
 621		hlist_for_each_entry(pol, chain, bydst) {
 622			if (policy->priority >= pol->priority)
 623				newpos = &pol->bydst;
 624			else
 625				break;
 626		}
 627		if (newpos)
 628			hlist_add_behind(&policy->bydst, newpos);
 629		else
 630			hlist_add_head(&policy->bydst, chain);
 631	}
 632
 
 
 
 633	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 634
 635	mutex_unlock(&hash_resize_mutex);
 636}
 637
 638void xfrm_policy_hash_rebuild(struct net *net)
 639{
 640	schedule_work(&net->xfrm.policy_hthresh.work);
 641}
 642EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
 643
 644/* Generate new index... KAME seems to generate them ordered by cost
 645 * of an absolute inpredictability of ordering of rules. This will not pass. */
 646static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
 647{
 648	static u32 idx_generator;
 649
 650	for (;;) {
 651		struct hlist_head *list;
 652		struct xfrm_policy *p;
 653		u32 idx;
 654		int found;
 655
 656		if (!index) {
 657			idx = (idx_generator | dir);
 658			idx_generator += 8;
 659		} else {
 660			idx = index;
 661			index = 0;
 662		}
 663
 664		if (idx == 0)
 665			idx = 8;
 666		list = net->xfrm.policy_byidx + idx_hash(net, idx);
 667		found = 0;
 668		hlist_for_each_entry(p, list, byidx) {
 669			if (p->index == idx) {
 670				found = 1;
 671				break;
 672			}
 673		}
 674		if (!found)
 675			return idx;
 676	}
 677}
 678
 679static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
 680{
 681	u32 *p1 = (u32 *) s1;
 682	u32 *p2 = (u32 *) s2;
 683	int len = sizeof(struct xfrm_selector) / sizeof(u32);
 684	int i;
 685
 686	for (i = 0; i < len; i++) {
 687		if (p1[i] != p2[i])
 688			return 1;
 689	}
 690
 691	return 0;
 692}
 693
 694static void xfrm_policy_requeue(struct xfrm_policy *old,
 695				struct xfrm_policy *new)
 696{
 697	struct xfrm_policy_queue *pq = &old->polq;
 698	struct sk_buff_head list;
 699
 700	if (skb_queue_empty(&pq->hold_queue))
 701		return;
 702
 703	__skb_queue_head_init(&list);
 704
 705	spin_lock_bh(&pq->hold_queue.lock);
 706	skb_queue_splice_init(&pq->hold_queue, &list);
 707	if (del_timer(&pq->hold_timer))
 708		xfrm_pol_put(old);
 709	spin_unlock_bh(&pq->hold_queue.lock);
 710
 711	pq = &new->polq;
 712
 713	spin_lock_bh(&pq->hold_queue.lock);
 714	skb_queue_splice(&list, &pq->hold_queue);
 715	pq->timeout = XFRM_QUEUE_TMO_MIN;
 716	if (!mod_timer(&pq->hold_timer, jiffies))
 717		xfrm_pol_hold(new);
 718	spin_unlock_bh(&pq->hold_queue.lock);
 719}
 720
 721static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
 722				   struct xfrm_policy *pol)
 723{
 724	u32 mark = policy->mark.v & policy->mark.m;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725
 726	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
 727		return true;
 
 728
 729	if ((mark & pol->mark.m) == pol->mark.v &&
 730	    policy->priority == pol->priority)
 731		return true;
 732
 733	return false;
 734}
 735
 736int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
 
 
 
 
 
 
 
 
 
 
 737{
 738	struct net *net = xp_net(policy);
 739	struct xfrm_policy *pol;
 740	struct xfrm_policy *delpol;
 741	struct hlist_head *chain;
 742	struct hlist_node *newpos;
 743
 744	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 745	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
 746	delpol = NULL;
 747	newpos = NULL;
 748	hlist_for_each_entry(pol, chain, bydst) {
 749		if (pol->type == policy->type &&
 
 750		    !selector_cmp(&pol->selector, &policy->selector) &&
 751		    xfrm_policy_mark_match(policy, pol) &&
 752		    xfrm_sec_ctx_match(pol->security, policy->security) &&
 753		    !WARN_ON(delpol)) {
 754			if (excl) {
 755				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 756				return -EEXIST;
 757			}
 758			delpol = pol;
 759			if (policy->priority > pol->priority)
 760				continue;
 761		} else if (policy->priority >= pol->priority) {
 762			newpos = &pol->bydst;
 763			continue;
 764		}
 765		if (delpol)
 766			break;
 767	}
 768	if (newpos)
 769		hlist_add_behind(&policy->bydst, newpos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 770	else
 771		hlist_add_head(&policy->bydst, chain);
 
 
 
 
 
 
 772	__xfrm_policy_link(policy, dir);
 773
 774	/* After previous checking, family can either be AF_INET or AF_INET6 */
 775	if (policy->family == AF_INET)
 776		rt_genid_bump_ipv4(net);
 777	else
 778		rt_genid_bump_ipv6(net);
 779
 780	if (delpol) {
 781		xfrm_policy_requeue(delpol, policy);
 782		__xfrm_policy_unlink(delpol, dir);
 783	}
 784	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
 785	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
 786	policy->curlft.add_time = get_seconds();
 787	policy->curlft.use_time = 0;
 788	if (!mod_timer(&policy->timer, jiffies + HZ))
 789		xfrm_pol_hold(policy);
 790	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 791
 792	if (delpol)
 793		xfrm_policy_kill(delpol);
 794	else if (xfrm_bydst_should_resize(net, dir, NULL))
 795		schedule_work(&net->xfrm.policy_hash_work);
 796
 797	return 0;
 798}
 799EXPORT_SYMBOL(xfrm_policy_insert);
 800
 801struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
 802					  int dir, struct xfrm_selector *sel,
 803					  struct xfrm_sec_ctx *ctx, int delete,
 804					  int *err)
 805{
 806	struct xfrm_policy *pol, *ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 807	struct hlist_head *chain;
 808
 809	*err = 0;
 810	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 811	chain = policy_hash_bysel(net, sel, sel->family, dir);
 812	ret = NULL;
 813	hlist_for_each_entry(pol, chain, bydst) {
 814		if (pol->type == type &&
 815		    (mark & pol->mark.m) == pol->mark.v &&
 816		    !selector_cmp(sel, &pol->selector) &&
 817		    xfrm_sec_ctx_match(ctx, pol->security)) {
 818			xfrm_pol_hold(pol);
 819			if (delete) {
 820				*err = security_xfrm_policy_delete(
 821								pol->security);
 822				if (*err) {
 823					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 824					return pol;
 825				}
 826				__xfrm_policy_unlink(pol, dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 827			}
 828			ret = pol;
 829			break;
 830		}
 
 831	}
 832	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 833
 834	if (ret && delete)
 835		xfrm_policy_kill(ret);
 
 
 836	return ret;
 837}
 838EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
 839
 840struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
 841				     int dir, u32 id, int delete, int *err)
 
 842{
 843	struct xfrm_policy *pol, *ret;
 844	struct hlist_head *chain;
 845
 846	*err = -ENOENT;
 847	if (xfrm_policy_id2dir(id) != dir)
 848		return NULL;
 849
 850	*err = 0;
 851	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 852	chain = net->xfrm.policy_byidx + idx_hash(net, id);
 853	ret = NULL;
 854	hlist_for_each_entry(pol, chain, byidx) {
 855		if (pol->type == type && pol->index == id &&
 856		    (mark & pol->mark.m) == pol->mark.v) {
 857			xfrm_pol_hold(pol);
 858			if (delete) {
 859				*err = security_xfrm_policy_delete(
 860								pol->security);
 861				if (*err) {
 862					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 863					return pol;
 864				}
 865				__xfrm_policy_unlink(pol, dir);
 866			}
 867			ret = pol;
 868			break;
 869		}
 870	}
 871	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 872
 873	if (ret && delete)
 874		xfrm_policy_kill(ret);
 875	return ret;
 876}
 877EXPORT_SYMBOL(xfrm_policy_byid);
 878
 879#ifdef CONFIG_SECURITY_NETWORK_XFRM
 880static inline int
 881xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 882{
 883	int dir, err = 0;
 
 884
 885	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 886		struct xfrm_policy *pol;
 887		int i;
 
 
 888
 889		hlist_for_each_entry(pol,
 890				     &net->xfrm.policy_inexact[dir], bydst) {
 891			if (pol->type != type)
 892				continue;
 893			err = security_xfrm_policy_delete(pol->security);
 894			if (err) {
 895				xfrm_audit_policy_delete(pol, 0, task_valid);
 896				return err;
 897			}
 898		}
 899		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
 900			hlist_for_each_entry(pol,
 901					     net->xfrm.policy_bydst[dir].table + i,
 902					     bydst) {
 903				if (pol->type != type)
 904					continue;
 905				err = security_xfrm_policy_delete(
 906								pol->security);
 907				if (err) {
 908					xfrm_audit_policy_delete(pol, 0,
 909								 task_valid);
 910					return err;
 911				}
 912			}
 
 
 
 
 
 
 
 913		}
 914	}
 915	return err;
 916}
 917#else
 918static inline int
 919xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 920{
 921	return 0;
 922}
 
 
 
 
 
 
 
 923#endif
 924
 925int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
 926{
 927	int dir, err = 0, cnt = 0;
 
 928
 929	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 930
 931	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
 932	if (err)
 933		goto out;
 934
 935	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
 936		struct xfrm_policy *pol;
 937		int i;
 
 938
 939	again1:
 940		hlist_for_each_entry(pol,
 941				     &net->xfrm.policy_inexact[dir], bydst) {
 942			if (pol->type != type)
 943				continue;
 944			__xfrm_policy_unlink(pol, dir);
 945			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 946			cnt++;
 947
 948			xfrm_audit_policy_delete(pol, 1, task_valid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949
 950			xfrm_policy_kill(pol);
 
 
 
 
 951
 952			spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 953			goto again1;
 954		}
 955
 956		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
 957	again2:
 958			hlist_for_each_entry(pol,
 959					     net->xfrm.policy_bydst[dir].table + i,
 960					     bydst) {
 961				if (pol->type != type)
 962					continue;
 963				__xfrm_policy_unlink(pol, dir);
 964				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 965				cnt++;
 966
 967				xfrm_audit_policy_delete(pol, 1, task_valid);
 968				xfrm_policy_kill(pol);
 
 
 969
 970				spin_lock_bh(&net->xfrm.xfrm_policy_lock);
 971				goto again2;
 972			}
 973		}
 974
 
 
 
 
 
 
 
 975	}
 976	if (!cnt)
 
 
 977		err = -ESRCH;
 978out:
 979	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 980	return err;
 981}
 982EXPORT_SYMBOL(xfrm_policy_flush);
 983
 984int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
 985		     int (*func)(struct xfrm_policy *, int, int, void*),
 986		     void *data)
 987{
 988	struct xfrm_policy *pol;
 989	struct xfrm_policy_walk_entry *x;
 990	int error = 0;
 991
 992	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
 993	    walk->type != XFRM_POLICY_TYPE_ANY)
 994		return -EINVAL;
 995
 996	if (list_empty(&walk->walk.all) && walk->seq != 0)
 997		return 0;
 998
 999	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1000	if (list_empty(&walk->walk.all))
1001		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1002	else
1003		x = list_first_entry(&walk->walk.all,
1004				     struct xfrm_policy_walk_entry, all);
1005
1006	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1007		if (x->dead)
1008			continue;
1009		pol = container_of(x, struct xfrm_policy, walk);
1010		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1011		    walk->type != pol->type)
1012			continue;
1013		error = func(pol, xfrm_policy_id2dir(pol->index),
1014			     walk->seq, data);
1015		if (error) {
1016			list_move_tail(&walk->walk.all, &x->all);
1017			goto out;
1018		}
1019		walk->seq++;
1020	}
1021	if (walk->seq == 0) {
1022		error = -ENOENT;
1023		goto out;
1024	}
1025	list_del_init(&walk->walk.all);
1026out:
1027	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1028	return error;
1029}
1030EXPORT_SYMBOL(xfrm_policy_walk);
1031
1032void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1033{
1034	INIT_LIST_HEAD(&walk->walk.all);
1035	walk->walk.dead = 1;
1036	walk->type = type;
1037	walk->seq = 0;
1038}
1039EXPORT_SYMBOL(xfrm_policy_walk_init);
1040
1041void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1042{
1043	if (list_empty(&walk->walk.all))
1044		return;
1045
1046	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1047	list_del(&walk->walk.all);
1048	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1049}
1050EXPORT_SYMBOL(xfrm_policy_walk_done);
1051
1052/*
1053 * Find policy to apply to this flow.
1054 *
1055 * Returns 0 if policy found, else an -errno.
1056 */
1057static int xfrm_policy_match(const struct xfrm_policy *pol,
1058			     const struct flowi *fl,
1059			     u8 type, u16 family, int dir)
1060{
1061	const struct xfrm_selector *sel = &pol->selector;
1062	int ret = -ESRCH;
1063	bool match;
1064
1065	if (pol->family != family ||
 
1066	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1067	    pol->type != type)
1068		return ret;
1069
1070	match = xfrm_selector_match(sel, fl, family);
1071	if (match)
1072		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1073						  dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1074
1075	return ret;
1076}
1077
1078static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1079						     const struct flowi *fl,
1080						     u16 family, u8 dir)
 
1081{
1082	int err;
 
 
1083	struct xfrm_policy *pol, *ret;
1084	const xfrm_address_t *daddr, *saddr;
1085	struct hlist_head *chain;
1086	unsigned int sequence;
1087	u32 priority;
1088
1089	daddr = xfrm_flowi_daddr(fl, family);
1090	saddr = xfrm_flowi_saddr(fl, family);
1091	if (unlikely(!daddr || !saddr))
1092		return NULL;
1093
1094	rcu_read_lock();
1095 retry:
1096	do {
1097		sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
1098		chain = policy_hash_direct(net, daddr, saddr, family, dir);
1099	} while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
1100
1101	priority = ~0U;
1102	ret = NULL;
1103	hlist_for_each_entry_rcu(pol, chain, bydst) {
1104		err = xfrm_policy_match(pol, fl, type, family, dir);
1105		if (err) {
1106			if (err == -ESRCH)
1107				continue;
1108			else {
1109				ret = ERR_PTR(err);
1110				goto fail;
1111			}
1112		} else {
1113			ret = pol;
1114			priority = ret->priority;
1115			break;
1116		}
1117	}
1118	chain = &net->xfrm.policy_inexact[dir];
1119	hlist_for_each_entry_rcu(pol, chain, bydst) {
1120		if ((pol->priority >= priority) && ret)
1121			break;
 
 
 
1122
1123		err = xfrm_policy_match(pol, fl, type, family, dir);
1124		if (err) {
1125			if (err == -ESRCH)
1126				continue;
1127			else {
1128				ret = ERR_PTR(err);
1129				goto fail;
1130			}
1131		} else {
1132			ret = pol;
1133			break;
1134		}
1135	}
1136
1137	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
 
1138		goto retry;
1139
1140	if (ret && !xfrm_pol_hold_rcu(ret))
1141		goto retry;
1142fail:
1143	rcu_read_unlock();
1144
1145	return ret;
1146}
1147
1148static struct xfrm_policy *
1149xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
 
1150{
1151#ifdef CONFIG_XFRM_SUB_POLICY
1152	struct xfrm_policy *pol;
1153
1154	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
 
1155	if (pol != NULL)
1156		return pol;
1157#endif
1158	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
 
1159}
1160
1161static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1162						 const struct flowi *fl, u16 family)
 
1163{
1164	struct xfrm_policy *pol;
1165
1166	rcu_read_lock();
1167 again:
1168	pol = rcu_dereference(sk->sk_policy[dir]);
1169	if (pol != NULL) {
1170		bool match;
1171		int err = 0;
1172
1173		if (pol->family != family) {
1174			pol = NULL;
1175			goto out;
1176		}
1177
1178		match = xfrm_selector_match(&pol->selector, fl, family);
1179		if (match) {
1180			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
 
1181				pol = NULL;
1182				goto out;
1183			}
1184			err = security_xfrm_policy_lookup(pol->security,
1185						      fl->flowi_secid,
1186						      dir);
1187			if (!err) {
1188				if (!xfrm_pol_hold_rcu(pol))
1189					goto again;
1190			} else if (err == -ESRCH) {
1191				pol = NULL;
1192			} else {
1193				pol = ERR_PTR(err);
1194			}
1195		} else
1196			pol = NULL;
1197	}
1198out:
1199	rcu_read_unlock();
1200	return pol;
1201}
1202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1203static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1204{
1205	struct net *net = xp_net(pol);
1206
 
 
 
 
 
 
 
 
1207	list_add(&pol->walk.all, &net->xfrm.policy_all);
1208	net->xfrm.policy_count[dir]++;
1209	xfrm_pol_hold(pol);
1210}
1211
1212static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1213						int dir)
1214{
1215	struct net *net = xp_net(pol);
1216
1217	if (list_empty(&pol->walk.all))
1218		return NULL;
1219
1220	/* Socket policies are not hashed. */
1221	if (!hlist_unhashed(&pol->bydst)) {
1222		hlist_del_rcu(&pol->bydst);
1223		hlist_del(&pol->byidx);
1224	}
1225
1226	list_del_init(&pol->walk.all);
1227	net->xfrm.policy_count[dir]--;
1228
1229	return pol;
1230}
1231
1232static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1233{
1234	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1235}
1236
1237static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1238{
1239	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1240}
1241
1242int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1243{
1244	struct net *net = xp_net(pol);
1245
1246	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1247	pol = __xfrm_policy_unlink(pol, dir);
1248	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1249	if (pol) {
1250		xfrm_policy_kill(pol);
1251		return 0;
1252	}
1253	return -ENOENT;
1254}
1255EXPORT_SYMBOL(xfrm_policy_delete);
1256
1257int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1258{
1259	struct net *net = sock_net(sk);
1260	struct xfrm_policy *old_pol;
1261
1262#ifdef CONFIG_XFRM_SUB_POLICY
1263	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1264		return -EINVAL;
1265#endif
1266
1267	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1268	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1269				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1270	if (pol) {
1271		pol->curlft.add_time = get_seconds();
1272		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1273		xfrm_sk_policy_link(pol, dir);
1274	}
1275	rcu_assign_pointer(sk->sk_policy[dir], pol);
1276	if (old_pol) {
1277		if (pol)
1278			xfrm_policy_requeue(old_pol, pol);
1279
1280		/* Unlinking succeeds always. This is the only function
1281		 * allowed to delete or replace socket policy.
1282		 */
1283		xfrm_sk_policy_unlink(old_pol, dir);
1284	}
1285	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1286
1287	if (old_pol) {
1288		xfrm_policy_kill(old_pol);
1289	}
1290	return 0;
1291}
1292
1293static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1294{
1295	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1296	struct net *net = xp_net(old);
1297
1298	if (newp) {
1299		newp->selector = old->selector;
1300		if (security_xfrm_policy_clone(old->security,
1301					       &newp->security)) {
1302			kfree(newp);
1303			return NULL;  /* ENOMEM */
1304		}
1305		newp->lft = old->lft;
1306		newp->curlft = old->curlft;
1307		newp->mark = old->mark;
 
1308		newp->action = old->action;
1309		newp->flags = old->flags;
1310		newp->xfrm_nr = old->xfrm_nr;
1311		newp->index = old->index;
1312		newp->type = old->type;
1313		newp->family = old->family;
1314		memcpy(newp->xfrm_vec, old->xfrm_vec,
1315		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1316		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1317		xfrm_sk_policy_link(newp, dir);
1318		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1319		xfrm_pol_put(newp);
1320	}
1321	return newp;
1322}
1323
1324int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1325{
1326	const struct xfrm_policy *p;
1327	struct xfrm_policy *np;
1328	int i, ret = 0;
1329
1330	rcu_read_lock();
1331	for (i = 0; i < 2; i++) {
1332		p = rcu_dereference(osk->sk_policy[i]);
1333		if (p) {
1334			np = clone_policy(p, i);
1335			if (unlikely(!np)) {
1336				ret = -ENOMEM;
1337				break;
1338			}
1339			rcu_assign_pointer(sk->sk_policy[i], np);
1340		}
1341	}
1342	rcu_read_unlock();
1343	return ret;
1344}
1345
1346static int
1347xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1348	       xfrm_address_t *remote, unsigned short family, u32 mark)
1349{
1350	int err;
1351	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1352
1353	if (unlikely(afinfo == NULL))
1354		return -EINVAL;
1355	err = afinfo->get_saddr(net, oif, local, remote, mark);
1356	rcu_read_unlock();
1357	return err;
1358}
1359
1360/* Resolve list of templates for the flow, given policy. */
1361
1362static int
1363xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1364		      struct xfrm_state **xfrm, unsigned short family)
1365{
1366	struct net *net = xp_net(policy);
1367	int nx;
1368	int i, error;
1369	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1370	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1371	xfrm_address_t tmp;
1372
1373	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1374		struct xfrm_state *x;
1375		xfrm_address_t *remote = daddr;
1376		xfrm_address_t *local  = saddr;
1377		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1378
1379		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1380		    tmpl->mode == XFRM_MODE_BEET) {
1381			remote = &tmpl->id.daddr;
1382			local = &tmpl->saddr;
1383			if (xfrm_addr_any(local, tmpl->encap_family)) {
1384				error = xfrm_get_saddr(net, fl->flowi_oif,
1385						       &tmp, remote,
1386						       tmpl->encap_family, 0);
 
 
 
 
 
1387				if (error)
1388					goto fail;
1389				local = &tmp;
1390			}
1391		}
1392
1393		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
 
 
 
 
 
 
 
1394
1395		if (x && x->km.state == XFRM_STATE_VALID) {
1396			xfrm[nx++] = x;
1397			daddr = remote;
1398			saddr = local;
1399			continue;
1400		}
1401		if (x) {
1402			error = (x->km.state == XFRM_STATE_ERROR ?
1403				 -EINVAL : -EAGAIN);
1404			xfrm_state_put(x);
1405		} else if (error == -ESRCH) {
1406			error = -EAGAIN;
1407		}
1408
1409		if (!tmpl->optional)
1410			goto fail;
1411	}
1412	return nx;
1413
1414fail:
1415	for (nx--; nx >= 0; nx--)
1416		xfrm_state_put(xfrm[nx]);
1417	return error;
1418}
1419
1420static int
1421xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1422		  struct xfrm_state **xfrm, unsigned short family)
1423{
1424	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1425	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1426	int cnx = 0;
1427	int error;
1428	int ret;
1429	int i;
1430
1431	for (i = 0; i < npols; i++) {
1432		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1433			error = -ENOBUFS;
1434			goto fail;
1435		}
1436
1437		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1438		if (ret < 0) {
1439			error = ret;
1440			goto fail;
1441		} else
1442			cnx += ret;
1443	}
1444
1445	/* found states are sorted for outbound processing */
1446	if (npols > 1)
1447		xfrm_state_sort(xfrm, tpp, cnx, family);
1448
1449	return cnx;
1450
1451 fail:
1452	for (cnx--; cnx >= 0; cnx--)
1453		xfrm_state_put(tpp[cnx]);
1454	return error;
1455
1456}
1457
1458static int xfrm_get_tos(const struct flowi *fl, int family)
1459{
1460	const struct xfrm_policy_afinfo *afinfo;
1461	int tos;
1462
1463	afinfo = xfrm_policy_get_afinfo(family);
1464	if (!afinfo)
1465		return 0;
1466
1467	tos = afinfo->get_tos(fl);
1468
1469	rcu_read_unlock();
1470
1471	return tos;
1472}
1473
1474static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1475{
1476	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1477	struct dst_ops *dst_ops;
1478	struct xfrm_dst *xdst;
1479
1480	if (!afinfo)
1481		return ERR_PTR(-EINVAL);
1482
1483	switch (family) {
1484	case AF_INET:
1485		dst_ops = &net->xfrm.xfrm4_dst_ops;
1486		break;
1487#if IS_ENABLED(CONFIG_IPV6)
1488	case AF_INET6:
1489		dst_ops = &net->xfrm.xfrm6_dst_ops;
1490		break;
1491#endif
1492	default:
1493		BUG();
1494	}
1495	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
1496
1497	if (likely(xdst)) {
1498		struct dst_entry *dst = &xdst->u.dst;
1499
1500		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1501	} else
1502		xdst = ERR_PTR(-ENOBUFS);
1503
1504	rcu_read_unlock();
1505
1506	return xdst;
1507}
1508
1509static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1510				 int nfheader_len)
1511{
1512	const struct xfrm_policy_afinfo *afinfo =
1513		xfrm_policy_get_afinfo(dst->ops->family);
1514	int err;
1515
1516	if (!afinfo)
1517		return -EINVAL;
1518
1519	err = afinfo->init_path(path, dst, nfheader_len);
1520
1521	rcu_read_unlock();
1522
1523	return err;
1524}
1525
1526static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1527				const struct flowi *fl)
1528{
1529	const struct xfrm_policy_afinfo *afinfo =
1530		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1531	int err;
1532
1533	if (!afinfo)
1534		return -EINVAL;
1535
1536	err = afinfo->fill_dst(xdst, dev, fl);
1537
1538	rcu_read_unlock();
1539
1540	return err;
1541}
1542
1543
1544/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1545 * all the metrics... Shortly, bundle a bundle.
1546 */
1547
1548static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1549					    struct xfrm_state **xfrm,
1550					    struct xfrm_dst **bundle,
1551					    int nx,
1552					    const struct flowi *fl,
1553					    struct dst_entry *dst)
1554{
 
 
1555	struct net *net = xp_net(policy);
1556	unsigned long now = jiffies;
1557	struct net_device *dev;
1558	struct xfrm_mode *inner_mode;
1559	struct xfrm_dst *xdst_prev = NULL;
1560	struct xfrm_dst *xdst0 = NULL;
1561	int i = 0;
1562	int err;
1563	int header_len = 0;
1564	int nfheader_len = 0;
1565	int trailer_len = 0;
1566	int tos;
1567	int family = policy->selector.family;
1568	xfrm_address_t saddr, daddr;
 
1569
1570	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1571
1572	tos = xfrm_get_tos(fl, family);
1573
1574	dst_hold(dst);
1575
1576	for (; i < nx; i++) {
1577		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1578		struct dst_entry *dst1 = &xdst->u.dst;
1579
1580		err = PTR_ERR(xdst);
1581		if (IS_ERR(xdst)) {
1582			dst_release(dst);
1583			goto put_states;
1584		}
1585
1586		bundle[i] = xdst;
1587		if (!xdst_prev)
1588			xdst0 = xdst;
1589		else
1590			/* Ref count is taken during xfrm_alloc_dst()
1591			 * No need to do dst_clone() on dst1
1592			 */
1593			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
1594
1595		if (xfrm[i]->sel.family == AF_UNSPEC) {
1596			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1597							xfrm_af2proto(family));
1598			if (!inner_mode) {
1599				err = -EAFNOSUPPORT;
1600				dst_release(dst);
1601				goto put_states;
1602			}
1603		} else
1604			inner_mode = xfrm[i]->inner_mode;
1605
1606		xdst->route = dst;
1607		dst_copy_metrics(dst1, dst);
1608
1609		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1610			family = xfrm[i]->props.family;
1611			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1612					      &saddr, &daddr, family,
1613					      xfrm[i]->props.output_mark);
 
 
 
 
 
 
 
 
1614			err = PTR_ERR(dst);
1615			if (IS_ERR(dst))
1616				goto put_states;
1617		} else
1618			dst_hold(dst);
1619
1620		dst1->xfrm = xfrm[i];
1621		xdst->xfrm_genid = xfrm[i]->genid;
1622
1623		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1624		dst1->flags |= DST_HOST;
1625		dst1->lastuse = now;
1626
1627		dst1->input = dst_discard;
1628		dst1->output = inner_mode->afinfo->output;
 
 
 
 
 
 
 
1629
1630		xdst_prev = xdst;
1631
1632		header_len += xfrm[i]->props.header_len;
1633		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1634			nfheader_len += xfrm[i]->props.header_len;
1635		trailer_len += xfrm[i]->props.trailer_len;
1636	}
1637
1638	xfrm_dst_set_child(xdst_prev, dst);
1639	xdst0->path = dst;
1640
1641	err = -ENODEV;
1642	dev = dst->dev;
1643	if (!dev)
1644		goto free_dst;
1645
1646	xfrm_init_path(xdst0, dst, nfheader_len);
1647	xfrm_init_pmtu(bundle, nx);
1648
1649	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
1650	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
1651		err = xfrm_fill_dst(xdst_prev, dev, fl);
1652		if (err)
1653			goto free_dst;
1654
1655		xdst_prev->u.dst.header_len = header_len;
1656		xdst_prev->u.dst.trailer_len = trailer_len;
1657		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
1658		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
1659	}
1660
1661	return &xdst0->u.dst;
1662
1663put_states:
1664	for (; i < nx; i++)
1665		xfrm_state_put(xfrm[i]);
1666free_dst:
1667	if (xdst0)
1668		dst_release_immediate(&xdst0->u.dst);
1669
1670	return ERR_PTR(err);
1671}
1672
1673static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1674				struct xfrm_policy **pols,
1675				int *num_pols, int *num_xfrms)
1676{
1677	int i;
1678
1679	if (*num_pols == 0 || !pols[0]) {
1680		*num_pols = 0;
1681		*num_xfrms = 0;
1682		return 0;
1683	}
1684	if (IS_ERR(pols[0]))
 
1685		return PTR_ERR(pols[0]);
 
1686
1687	*num_xfrms = pols[0]->xfrm_nr;
1688
1689#ifdef CONFIG_XFRM_SUB_POLICY
1690	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1691	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1692		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1693						    XFRM_POLICY_TYPE_MAIN,
1694						    fl, family,
1695						    XFRM_POLICY_OUT);
 
1696		if (pols[1]) {
1697			if (IS_ERR(pols[1])) {
1698				xfrm_pols_put(pols, *num_pols);
 
1699				return PTR_ERR(pols[1]);
1700			}
1701			(*num_pols)++;
1702			(*num_xfrms) += pols[1]->xfrm_nr;
1703		}
1704	}
1705#endif
1706	for (i = 0; i < *num_pols; i++) {
1707		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1708			*num_xfrms = -1;
1709			break;
1710		}
1711	}
1712
1713	return 0;
1714
1715}
1716
1717static void xfrm_last_dst_update(struct xfrm_dst *xdst, struct xfrm_dst *old)
1718{
1719	this_cpu_write(xfrm_last_dst, xdst);
1720	if (old)
1721		dst_release(&old->u.dst);
1722}
1723
1724static void __xfrm_pcpu_work_fn(void)
1725{
1726	struct xfrm_dst *old;
1727
1728	old = this_cpu_read(xfrm_last_dst);
1729	if (old && !xfrm_bundle_ok(old))
1730		xfrm_last_dst_update(NULL, old);
1731}
1732
1733static void xfrm_pcpu_work_fn(struct work_struct *work)
1734{
1735	local_bh_disable();
1736	rcu_read_lock();
1737	__xfrm_pcpu_work_fn();
1738	rcu_read_unlock();
1739	local_bh_enable();
1740}
1741
1742void xfrm_policy_cache_flush(void)
1743{
1744	struct xfrm_dst *old;
1745	bool found = false;
1746	int cpu;
1747
1748	might_sleep();
1749
1750	local_bh_disable();
1751	rcu_read_lock();
1752	for_each_possible_cpu(cpu) {
1753		old = per_cpu(xfrm_last_dst, cpu);
1754		if (old && !xfrm_bundle_ok(old)) {
1755			if (smp_processor_id() == cpu) {
1756				__xfrm_pcpu_work_fn();
1757				continue;
1758			}
1759			found = true;
1760			break;
1761		}
1762	}
1763
1764	rcu_read_unlock();
1765	local_bh_enable();
1766
1767	if (!found)
1768		return;
1769
1770	get_online_cpus();
1771
1772	for_each_possible_cpu(cpu) {
1773		bool bundle_release;
1774
1775		rcu_read_lock();
1776		old = per_cpu(xfrm_last_dst, cpu);
1777		bundle_release = old && !xfrm_bundle_ok(old);
1778		rcu_read_unlock();
1779
1780		if (!bundle_release)
1781			continue;
1782
1783		if (cpu_online(cpu)) {
1784			schedule_work_on(cpu, &xfrm_pcpu_work[cpu]);
1785			continue;
1786		}
1787
1788		rcu_read_lock();
1789		old = per_cpu(xfrm_last_dst, cpu);
1790		if (old && !xfrm_bundle_ok(old)) {
1791			per_cpu(xfrm_last_dst, cpu) = NULL;
1792			dst_release(&old->u.dst);
1793		}
1794		rcu_read_unlock();
1795	}
1796
1797	put_online_cpus();
1798}
1799
1800static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
1801				struct xfrm_state * const xfrm[],
1802				int num)
1803{
1804	const struct dst_entry *dst = &xdst->u.dst;
1805	int i;
1806
1807	if (xdst->num_xfrms != num)
1808		return false;
1809
1810	for (i = 0; i < num; i++) {
1811		if (!dst || dst->xfrm != xfrm[i])
1812			return false;
1813		dst = xfrm_dst_child(dst);
1814	}
1815
1816	return xfrm_bundle_ok(xdst);
1817}
1818
1819static struct xfrm_dst *
1820xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1821			       const struct flowi *fl, u16 family,
1822			       struct dst_entry *dst_orig)
1823{
1824	struct net *net = xp_net(pols[0]);
1825	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1826	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
1827	struct xfrm_dst *xdst, *old;
1828	struct dst_entry *dst;
1829	int err;
1830
1831	/* Try to instantiate a bundle */
1832	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1833	if (err <= 0) {
1834		if (err != 0 && err != -EAGAIN)
 
 
 
1835			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1836		return ERR_PTR(err);
1837	}
1838
1839	xdst = this_cpu_read(xfrm_last_dst);
1840	if (xdst &&
1841	    xdst->u.dst.dev == dst_orig->dev &&
1842	    xdst->num_pols == num_pols &&
1843	    memcmp(xdst->pols, pols,
1844		   sizeof(struct xfrm_policy *) * num_pols) == 0 &&
1845	    xfrm_xdst_can_reuse(xdst, xfrm, err)) {
1846		dst_hold(&xdst->u.dst);
1847		xfrm_pols_put(pols, num_pols);
1848		while (err > 0)
1849			xfrm_state_put(xfrm[--err]);
1850		return xdst;
1851	}
1852
1853	old = xdst;
1854
1855	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
1856	if (IS_ERR(dst)) {
1857		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1858		return ERR_CAST(dst);
1859	}
1860
1861	xdst = (struct xfrm_dst *)dst;
1862	xdst->num_xfrms = err;
1863	xdst->num_pols = num_pols;
1864	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1865	xdst->policy_genid = atomic_read(&pols[0]->genid);
1866
1867	atomic_set(&xdst->u.dst.__refcnt, 2);
1868	xfrm_last_dst_update(xdst, old);
1869
1870	return xdst;
1871}
1872
1873static void xfrm_policy_queue_process(struct timer_list *t)
1874{
1875	struct sk_buff *skb;
1876	struct sock *sk;
1877	struct dst_entry *dst;
1878	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
1879	struct net *net = xp_net(pol);
1880	struct xfrm_policy_queue *pq = &pol->polq;
1881	struct flowi fl;
1882	struct sk_buff_head list;
 
1883
1884	spin_lock(&pq->hold_queue.lock);
1885	skb = skb_peek(&pq->hold_queue);
1886	if (!skb) {
1887		spin_unlock(&pq->hold_queue.lock);
1888		goto out;
1889	}
1890	dst = skb_dst(skb);
1891	sk = skb->sk;
1892	xfrm_decode_session(skb, &fl, dst->ops->family);
 
 
 
 
 
1893	spin_unlock(&pq->hold_queue.lock);
1894
1895	dst_hold(xfrm_dst_path(dst));
1896	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
1897	if (IS_ERR(dst))
1898		goto purge_queue;
1899
1900	if (dst->flags & DST_XFRM_QUEUE) {
1901		dst_release(dst);
1902
1903		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1904			goto purge_queue;
1905
1906		pq->timeout = pq->timeout << 1;
1907		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1908			xfrm_pol_hold(pol);
1909	goto out;
1910	}
1911
1912	dst_release(dst);
1913
1914	__skb_queue_head_init(&list);
1915
1916	spin_lock(&pq->hold_queue.lock);
1917	pq->timeout = 0;
1918	skb_queue_splice_init(&pq->hold_queue, &list);
1919	spin_unlock(&pq->hold_queue.lock);
1920
1921	while (!skb_queue_empty(&list)) {
1922		skb = __skb_dequeue(&list);
1923
1924		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
 
 
 
 
 
1925		dst_hold(xfrm_dst_path(skb_dst(skb)));
1926		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
1927		if (IS_ERR(dst)) {
1928			kfree_skb(skb);
1929			continue;
1930		}
1931
1932		nf_reset(skb);
1933		skb_dst_drop(skb);
1934		skb_dst_set(skb, dst);
1935
1936		dst_output(net, skb->sk, skb);
1937	}
1938
1939out:
1940	xfrm_pol_put(pol);
1941	return;
1942
1943purge_queue:
1944	pq->timeout = 0;
1945	skb_queue_purge(&pq->hold_queue);
1946	xfrm_pol_put(pol);
1947}
1948
1949static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1950{
1951	unsigned long sched_next;
1952	struct dst_entry *dst = skb_dst(skb);
1953	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1954	struct xfrm_policy *pol = xdst->pols[0];
1955	struct xfrm_policy_queue *pq = &pol->polq;
1956
1957	if (unlikely(skb_fclone_busy(sk, skb))) {
1958		kfree_skb(skb);
1959		return 0;
1960	}
1961
1962	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1963		kfree_skb(skb);
1964		return -EAGAIN;
1965	}
1966
1967	skb_dst_force(skb);
1968
1969	spin_lock_bh(&pq->hold_queue.lock);
1970
1971	if (!pq->timeout)
1972		pq->timeout = XFRM_QUEUE_TMO_MIN;
1973
1974	sched_next = jiffies + pq->timeout;
1975
1976	if (del_timer(&pq->hold_timer)) {
1977		if (time_before(pq->hold_timer.expires, sched_next))
1978			sched_next = pq->hold_timer.expires;
1979		xfrm_pol_put(pol);
1980	}
1981
1982	__skb_queue_tail(&pq->hold_queue, skb);
1983	if (!mod_timer(&pq->hold_timer, sched_next))
1984		xfrm_pol_hold(pol);
1985
1986	spin_unlock_bh(&pq->hold_queue.lock);
1987
1988	return 0;
1989}
1990
1991static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1992						 struct xfrm_flo *xflo,
1993						 const struct flowi *fl,
1994						 int num_xfrms,
1995						 u16 family)
1996{
1997	int err;
1998	struct net_device *dev;
1999	struct dst_entry *dst;
2000	struct dst_entry *dst1;
2001	struct xfrm_dst *xdst;
2002
2003	xdst = xfrm_alloc_dst(net, family);
2004	if (IS_ERR(xdst))
2005		return xdst;
2006
2007	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2008	    net->xfrm.sysctl_larval_drop ||
2009	    num_xfrms <= 0)
2010		return xdst;
2011
2012	dst = xflo->dst_orig;
2013	dst1 = &xdst->u.dst;
2014	dst_hold(dst);
2015	xdst->route = dst;
2016
2017	dst_copy_metrics(dst1, dst);
2018
2019	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2020	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2021	dst1->lastuse = jiffies;
2022
2023	dst1->input = dst_discard;
2024	dst1->output = xdst_queue_output;
2025
2026	dst_hold(dst);
2027	xfrm_dst_set_child(xdst, dst);
2028	xdst->path = dst;
2029
2030	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2031
2032	err = -ENODEV;
2033	dev = dst->dev;
2034	if (!dev)
2035		goto free_dst;
2036
2037	err = xfrm_fill_dst(xdst, dev, fl);
2038	if (err)
2039		goto free_dst;
2040
2041out:
2042	return xdst;
2043
2044free_dst:
2045	dst_release(dst1);
2046	xdst = ERR_PTR(err);
2047	goto out;
2048}
2049
2050static struct xfrm_dst *
2051xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo)
 
 
2052{
2053	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2054	int num_pols = 0, num_xfrms = 0, err;
2055	struct xfrm_dst *xdst;
2056
2057	/* Resolve policies to use if we couldn't get them from
2058	 * previous cache entry */
2059	num_pols = 1;
2060	pols[0] = xfrm_policy_lookup(net, fl, family, dir);
2061	err = xfrm_expand_policies(fl, family, pols,
2062					   &num_pols, &num_xfrms);
2063	if (err < 0)
2064		goto inc_error;
2065	if (num_pols == 0)
2066		return NULL;
2067	if (num_xfrms <= 0)
2068		goto make_dummy_bundle;
2069
2070	local_bh_disable();
2071	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2072					      xflo->dst_orig);
2073	local_bh_enable();
2074
2075	if (IS_ERR(xdst)) {
2076		err = PTR_ERR(xdst);
 
 
 
 
 
2077		if (err != -EAGAIN)
2078			goto error;
2079		goto make_dummy_bundle;
2080	} else if (xdst == NULL) {
2081		num_xfrms = 0;
2082		goto make_dummy_bundle;
2083	}
2084
2085	return xdst;
2086
2087make_dummy_bundle:
2088	/* We found policies, but there's no bundles to instantiate:
2089	 * either because the policy blocks, has no transformations or
2090	 * we could not build template (no xfrm_states).*/
2091	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2092	if (IS_ERR(xdst)) {
2093		xfrm_pols_put(pols, num_pols);
2094		return ERR_CAST(xdst);
2095	}
2096	xdst->num_pols = num_pols;
2097	xdst->num_xfrms = num_xfrms;
2098	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2099
2100	return xdst;
2101
2102inc_error:
2103	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2104error:
2105	xfrm_pols_put(pols, num_pols);
2106	return ERR_PTR(err);
2107}
2108
2109static struct dst_entry *make_blackhole(struct net *net, u16 family,
2110					struct dst_entry *dst_orig)
2111{
2112	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2113	struct dst_entry *ret;
2114
2115	if (!afinfo) {
2116		dst_release(dst_orig);
2117		return ERR_PTR(-EINVAL);
2118	} else {
2119		ret = afinfo->blackhole_route(net, dst_orig);
2120	}
2121	rcu_read_unlock();
2122
2123	return ret;
2124}
2125
2126/* Main function: finds/creates a bundle for given flow.
2127 *
2128 * At the moment we eat a raw IP route. Mostly to speed up lookups
2129 * on interfaces with disabled IPsec.
 
 
 
2130 */
2131struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2132			      const struct flowi *fl,
2133			      const struct sock *sk, int flags)
 
 
2134{
2135	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2136	struct xfrm_dst *xdst;
2137	struct dst_entry *dst, *route;
2138	u16 family = dst_orig->ops->family;
2139	u8 dir = XFRM_POLICY_OUT;
2140	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2141
2142	dst = NULL;
2143	xdst = NULL;
2144	route = NULL;
2145
2146	sk = sk_const_to_full_sk(sk);
2147	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2148		num_pols = 1;
2149		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
 
2150		err = xfrm_expand_policies(fl, family, pols,
2151					   &num_pols, &num_xfrms);
2152		if (err < 0)
2153			goto dropdst;
2154
2155		if (num_pols) {
2156			if (num_xfrms <= 0) {
2157				drop_pols = num_pols;
2158				goto no_transform;
2159			}
2160
2161			local_bh_disable();
2162			xdst = xfrm_resolve_and_create_bundle(
2163					pols, num_pols, fl,
2164					family, dst_orig);
2165			local_bh_enable();
2166
2167			if (IS_ERR(xdst)) {
2168				xfrm_pols_put(pols, num_pols);
2169				err = PTR_ERR(xdst);
 
 
 
2170				goto dropdst;
2171			} else if (xdst == NULL) {
2172				num_xfrms = 0;
2173				drop_pols = num_pols;
2174				goto no_transform;
2175			}
2176
2177			route = xdst->route;
2178		}
2179	}
2180
2181	if (xdst == NULL) {
2182		struct xfrm_flo xflo;
2183
2184		xflo.dst_orig = dst_orig;
2185		xflo.flags = flags;
2186
2187		/* To accelerate a bit...  */
2188		if ((dst_orig->flags & DST_NOXFRM) ||
2189		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2190			goto nopol;
2191
2192		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo);
2193		if (xdst == NULL)
2194			goto nopol;
2195		if (IS_ERR(xdst)) {
2196			err = PTR_ERR(xdst);
2197			goto dropdst;
2198		}
2199
2200		num_pols = xdst->num_pols;
2201		num_xfrms = xdst->num_xfrms;
2202		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2203		route = xdst->route;
2204	}
2205
2206	dst = &xdst->u.dst;
2207	if (route == NULL && num_xfrms > 0) {
2208		/* The only case when xfrm_bundle_lookup() returns a
2209		 * bundle with null route, is when the template could
2210		 * not be resolved. It means policies are there, but
2211		 * bundle could not be created, since we don't yet
2212		 * have the xfrm_state's. We need to wait for KM to
2213		 * negotiate new SA's or bail out with error.*/
2214		if (net->xfrm.sysctl_larval_drop) {
2215			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2216			err = -EREMOTE;
2217			goto error;
2218		}
2219
2220		err = -EAGAIN;
2221
2222		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2223		goto error;
2224	}
2225
2226no_transform:
2227	if (num_pols == 0)
2228		goto nopol;
2229
2230	if ((flags & XFRM_LOOKUP_ICMP) &&
2231	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2232		err = -ENOENT;
2233		goto error;
2234	}
2235
2236	for (i = 0; i < num_pols; i++)
2237		pols[i]->curlft.use_time = get_seconds();
2238
2239	if (num_xfrms < 0) {
2240		/* Prohibit the flow */
2241		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2242		err = -EPERM;
2243		goto error;
2244	} else if (num_xfrms > 0) {
2245		/* Flow transformed */
2246		dst_release(dst_orig);
2247	} else {
2248		/* Flow passes untransformed */
2249		dst_release(dst);
2250		dst = dst_orig;
2251	}
 
2252ok:
2253	xfrm_pols_put(pols, drop_pols);
2254	if (dst && dst->xfrm &&
2255	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2256		dst->flags |= DST_XFRM_TUNNEL;
2257	return dst;
2258
2259nopol:
 
 
 
 
 
2260	if (!(flags & XFRM_LOOKUP_ICMP)) {
2261		dst = dst_orig;
2262		goto ok;
2263	}
2264	err = -ENOENT;
2265error:
2266	dst_release(dst);
2267dropdst:
2268	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2269		dst_release(dst_orig);
2270	xfrm_pols_put(pols, drop_pols);
2271	return ERR_PTR(err);
2272}
 
 
 
 
 
 
 
 
 
 
 
 
 
2273EXPORT_SYMBOL(xfrm_lookup);
2274
2275/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2276 * Otherwise we may send out blackholed packets.
2277 */
2278struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2279				    const struct flowi *fl,
2280				    const struct sock *sk, int flags)
2281{
2282	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2283					    flags | XFRM_LOOKUP_QUEUE |
2284					    XFRM_LOOKUP_KEEP_DST_REF);
2285
2286	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2287		return make_blackhole(net, dst_orig->ops->family, dst_orig);
2288
 
 
 
2289	return dst;
2290}
2291EXPORT_SYMBOL(xfrm_lookup_route);
2292
2293static inline int
2294xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2295{
 
2296	struct xfrm_state *x;
2297
2298	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2299		return 0;
2300	x = skb->sp->xvec[idx];
2301	if (!x->type->reject)
2302		return 0;
2303	return x->type->reject(x, skb, fl);
2304}
2305
2306/* When skb is transformed back to its "native" form, we have to
2307 * check policy restrictions. At the moment we make this in maximally
2308 * stupid way. Shame on me. :-) Of course, connected sockets must
2309 * have policy cached at them.
2310 */
2311
2312static inline int
2313xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2314	      unsigned short family)
2315{
2316	if (xfrm_state_kern(x))
2317		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2318	return	x->id.proto == tmpl->id.proto &&
2319		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2320		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2321		x->props.mode == tmpl->mode &&
2322		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2323		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2324		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2325		  xfrm_state_addr_cmp(tmpl, x, family));
 
2326}
2327
2328/*
2329 * 0 or more than 0 is returned when validation is succeeded (either bypass
2330 * because of optional transport mode, or next index of the mathced secpath
2331 * state with the template.
2332 * -1 is returned when no matching template is found.
2333 * Otherwise "-2 - errored_index" is returned.
2334 */
2335static inline int
2336xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2337	       unsigned short family)
2338{
2339	int idx = start;
2340
2341	if (tmpl->optional) {
2342		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2343			return start;
2344	} else
2345		start = -1;
2346	for (; idx < sp->len; idx++) {
2347		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2348			return ++idx;
2349		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
 
 
 
 
 
 
 
2350			if (start == -1)
2351				start = -2-idx;
2352			break;
2353		}
2354	}
2355	return start;
2356}
2357
2358int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2359			  unsigned int family, int reverse)
2360{
2361	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2362	int err;
 
 
 
2363
2364	if (unlikely(afinfo == NULL))
 
 
 
 
 
 
 
 
 
2365		return -EAFNOSUPPORT;
 
 
 
 
 
 
 
 
 
 
2366
2367	afinfo->decode_session(skb, fl, reverse);
2368	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2369	rcu_read_unlock();
2370	return err;
2371}
2372EXPORT_SYMBOL(__xfrm_decode_session);
2373
2374static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2375{
2376	for (; k < sp->len; k++) {
2377		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2378			*idxp = k;
2379			return 1;
2380		}
2381	}
2382
2383	return 0;
2384}
2385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2386int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2387			unsigned short family)
2388{
2389	struct net *net = dev_net(skb->dev);
2390	struct xfrm_policy *pol;
2391	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2392	int npols = 0;
2393	int xfrm_nr;
2394	int pi;
2395	int reverse;
2396	struct flowi fl;
2397	int xerr_idx = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2398
2399	reverse = dir & ~XFRM_POLICY_MASK;
2400	dir &= XFRM_POLICY_MASK;
2401
2402	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2403		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2404		return 0;
2405	}
2406
2407	nf_nat_decode_session(skb, &fl, family);
2408
2409	/* First, check used SA against their selectors. */
2410	if (skb->sp) {
 
2411		int i;
2412
2413		for (i = skb->sp->len-1; i >= 0; i--) {
2414			struct xfrm_state *x = skb->sp->xvec[i];
 
 
2415			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2416				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2417				return 0;
 
 
 
 
 
 
2418			}
2419		}
2420	}
2421
2422	pol = NULL;
2423	sk = sk_to_full_sk(sk);
2424	if (sk && sk->sk_policy[dir]) {
2425		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2426		if (IS_ERR(pol)) {
2427			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2428			return 0;
2429		}
2430	}
2431
2432	if (!pol)
2433		pol = xfrm_policy_lookup(net, &fl, family, dir);
2434
2435	if (IS_ERR(pol)) {
2436		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2437		return 0;
2438	}
2439
 
 
 
2440	if (!pol) {
2441		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
 
 
 
 
 
 
 
 
2442			xfrm_secpath_reject(xerr_idx, skb, &fl);
2443			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2444			return 0;
2445		}
2446		return 1;
2447	}
2448
2449	pol->curlft.use_time = get_seconds();
 
2450
2451	pols[0] = pol;
2452	npols++;
2453#ifdef CONFIG_XFRM_SUB_POLICY
2454	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2455		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2456						    &fl, family,
2457						    XFRM_POLICY_IN);
2458		if (pols[1]) {
2459			if (IS_ERR(pols[1])) {
2460				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
 
2461				return 0;
2462			}
2463			pols[1]->curlft.use_time = get_seconds();
 
 
2464			npols++;
2465		}
2466	}
2467#endif
2468
2469	if (pol->action == XFRM_POLICY_ALLOW) {
2470		struct sec_path *sp;
2471		static struct sec_path dummy;
2472		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2473		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2474		struct xfrm_tmpl **tpp = tp;
2475		int ti = 0;
2476		int i, k;
2477
2478		if ((sp = skb->sp) == NULL)
 
2479			sp = &dummy;
2480
2481		for (pi = 0; pi < npols; pi++) {
2482			if (pols[pi] != pol &&
2483			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2484				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2485				goto reject;
2486			}
2487			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2488				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2489				goto reject_error;
2490			}
2491			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2492				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2493		}
2494		xfrm_nr = ti;
 
2495		if (npols > 1) {
2496			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2497			tpp = stp;
2498		}
2499
2500		/* For each tunnel xfrm, find the first matching tmpl.
2501		 * For each tmpl before that, find corresponding xfrm.
2502		 * Order is _important_. Later we will implement
2503		 * some barriers, but at the moment barriers
2504		 * are implied between each two transformations.
 
 
 
2505		 */
2506		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2507			k = xfrm_policy_ok(tpp[i], sp, k, family);
2508			if (k < 0) {
2509				if (k < -1)
2510					/* "-2 - errored_index" returned */
2511					xerr_idx = -(2+k);
2512				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2513				goto reject;
2514			}
2515		}
2516
2517		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2518			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2519			goto reject;
2520		}
2521
2522		xfrm_pols_put(pols, npols);
 
 
2523		return 1;
2524	}
2525	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2526
2527reject:
2528	xfrm_secpath_reject(xerr_idx, skb, &fl);
2529reject_error:
2530	xfrm_pols_put(pols, npols);
2531	return 0;
2532}
2533EXPORT_SYMBOL(__xfrm_policy_check);
2534
2535int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2536{
2537	struct net *net = dev_net(skb->dev);
2538	struct flowi fl;
2539	struct dst_entry *dst;
2540	int res = 1;
2541
2542	if (xfrm_decode_session(skb, &fl, family) < 0) {
2543		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2544		return 0;
2545	}
2546
2547	skb_dst_force(skb);
 
 
 
 
2548
2549	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2550	if (IS_ERR(dst)) {
2551		res = 0;
2552		dst = NULL;
2553	}
 
 
 
 
2554	skb_dst_set(skb, dst);
2555	return res;
2556}
2557EXPORT_SYMBOL(__xfrm_route_forward);
2558
2559/* Optimize later using cookies and generation ids. */
2560
2561static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2562{
2563	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2564	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2565	 * get validated by dst_ops->check on every use.  We do this
2566	 * because when a normal route referenced by an XFRM dst is
2567	 * obsoleted we do not go looking around for all parent
2568	 * referencing XFRM dsts so that we can invalidate them.  It
2569	 * is just too much work.  Instead we make the checks here on
2570	 * every use.  For example:
2571	 *
2572	 *	XFRM dst A --> IPv4 dst X
2573	 *
2574	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2575	 * in this example).  If X is marked obsolete, "A" will not
2576	 * notice.  That's what we are validating here via the
2577	 * stale_bundle() check.
2578	 *
2579	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
2580	 * be marked on it.
2581	 * This will force stale_bundle() to fail on any xdst bundle with
2582	 * this dst linked in it.
2583	 */
2584	if (dst->obsolete < 0 && !stale_bundle(dst))
2585		return dst;
2586
2587	return NULL;
2588}
2589
2590static int stale_bundle(struct dst_entry *dst)
2591{
2592	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2593}
2594
2595void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2596{
2597	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
2598		dst->dev = dev_net(dev)->loopback_dev;
2599		dev_hold(dst->dev);
2600		dev_put(dev);
2601	}
2602}
2603EXPORT_SYMBOL(xfrm_dst_ifdown);
2604
2605static void xfrm_link_failure(struct sk_buff *skb)
2606{
2607	/* Impossible. Such dst must be popped before reaches point of failure. */
2608}
2609
2610static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2611{
2612	if (dst) {
2613		if (dst->obsolete) {
2614			dst_release(dst);
2615			dst = NULL;
2616		}
2617	}
2618	return dst;
2619}
2620
2621static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
2622{
2623	while (nr--) {
2624		struct xfrm_dst *xdst = bundle[nr];
2625		u32 pmtu, route_mtu_cached;
2626		struct dst_entry *dst;
2627
2628		dst = &xdst->u.dst;
2629		pmtu = dst_mtu(xfrm_dst_child(dst));
2630		xdst->child_mtu_cached = pmtu;
2631
2632		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2633
2634		route_mtu_cached = dst_mtu(xdst->route);
2635		xdst->route_mtu_cached = route_mtu_cached;
2636
2637		if (pmtu > route_mtu_cached)
2638			pmtu = route_mtu_cached;
2639
2640		dst_metric_set(dst, RTAX_MTU, pmtu);
2641	}
2642}
2643
2644/* Check that the bundle accepts the flow and its components are
2645 * still valid.
2646 */
2647
2648static int xfrm_bundle_ok(struct xfrm_dst *first)
2649{
2650	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2651	struct dst_entry *dst = &first->u.dst;
2652	struct xfrm_dst *xdst;
2653	int start_from, nr;
2654	u32 mtu;
2655
2656	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
2657	    (dst->dev && !netif_running(dst->dev)))
2658		return 0;
2659
2660	if (dst->flags & DST_XFRM_QUEUE)
2661		return 1;
2662
2663	start_from = nr = 0;
2664	do {
2665		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2666
2667		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2668			return 0;
2669		if (xdst->xfrm_genid != dst->xfrm->genid)
2670			return 0;
2671		if (xdst->num_pols > 0 &&
2672		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2673			return 0;
2674
2675		bundle[nr++] = xdst;
2676
2677		mtu = dst_mtu(xfrm_dst_child(dst));
2678		if (xdst->child_mtu_cached != mtu) {
2679			start_from = nr;
2680			xdst->child_mtu_cached = mtu;
2681		}
2682
2683		if (!dst_check(xdst->route, xdst->route_cookie))
2684			return 0;
2685		mtu = dst_mtu(xdst->route);
2686		if (xdst->route_mtu_cached != mtu) {
2687			start_from = nr;
2688			xdst->route_mtu_cached = mtu;
2689		}
2690
2691		dst = xfrm_dst_child(dst);
2692	} while (dst->xfrm);
2693
2694	if (likely(!start_from))
2695		return 1;
2696
2697	xdst = bundle[start_from - 1];
2698	mtu = xdst->child_mtu_cached;
2699	while (start_from--) {
2700		dst = &xdst->u.dst;
2701
2702		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2703		if (mtu > xdst->route_mtu_cached)
2704			mtu = xdst->route_mtu_cached;
2705		dst_metric_set(dst, RTAX_MTU, mtu);
2706		if (!start_from)
2707			break;
2708
2709		xdst = bundle[start_from - 1];
2710		xdst->child_mtu_cached = mtu;
2711	}
2712
2713	return 1;
2714}
2715
2716static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2717{
2718	return dst_metric_advmss(xfrm_dst_path(dst));
2719}
2720
2721static unsigned int xfrm_mtu(const struct dst_entry *dst)
2722{
2723	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2724
2725	return mtu ? : dst_mtu(xfrm_dst_path(dst));
2726}
2727
2728static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2729					const void *daddr)
2730{
2731	while (dst->xfrm) {
2732		const struct xfrm_state *xfrm = dst->xfrm;
2733
2734		dst = xfrm_dst_child(dst);
2735
2736		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2737			continue;
2738		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2739			daddr = xfrm->coaddr;
2740		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2741			daddr = &xfrm->id.daddr;
2742	}
2743	return daddr;
2744}
2745
2746static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2747					   struct sk_buff *skb,
2748					   const void *daddr)
2749{
2750	const struct dst_entry *path = xfrm_dst_path(dst);
2751
2752	if (!skb)
2753		daddr = xfrm_get_dst_nexthop(dst, daddr);
2754	return path->ops->neigh_lookup(path, skb, daddr);
2755}
2756
2757static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
2758{
2759	const struct dst_entry *path = xfrm_dst_path(dst);
2760
2761	daddr = xfrm_get_dst_nexthop(dst, daddr);
2762	path->ops->confirm_neigh(path, daddr);
2763}
2764
2765int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
2766{
2767	int err = 0;
2768
2769	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
2770		return -EAFNOSUPPORT;
2771
2772	spin_lock(&xfrm_policy_afinfo_lock);
2773	if (unlikely(xfrm_policy_afinfo[family] != NULL))
2774		err = -EEXIST;
2775	else {
2776		struct dst_ops *dst_ops = afinfo->dst_ops;
2777		if (likely(dst_ops->kmem_cachep == NULL))
2778			dst_ops->kmem_cachep = xfrm_dst_cache;
2779		if (likely(dst_ops->check == NULL))
2780			dst_ops->check = xfrm_dst_check;
2781		if (likely(dst_ops->default_advmss == NULL))
2782			dst_ops->default_advmss = xfrm_default_advmss;
2783		if (likely(dst_ops->mtu == NULL))
2784			dst_ops->mtu = xfrm_mtu;
2785		if (likely(dst_ops->negative_advice == NULL))
2786			dst_ops->negative_advice = xfrm_negative_advice;
2787		if (likely(dst_ops->link_failure == NULL))
2788			dst_ops->link_failure = xfrm_link_failure;
2789		if (likely(dst_ops->neigh_lookup == NULL))
2790			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2791		if (likely(!dst_ops->confirm_neigh))
2792			dst_ops->confirm_neigh = xfrm_confirm_neigh;
2793		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
2794	}
2795	spin_unlock(&xfrm_policy_afinfo_lock);
2796
2797	return err;
2798}
2799EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2800
2801void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
2802{
2803	struct dst_ops *dst_ops = afinfo->dst_ops;
2804	int i;
2805
2806	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
2807		if (xfrm_policy_afinfo[i] != afinfo)
2808			continue;
2809		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
2810		break;
2811	}
2812
2813	synchronize_rcu();
2814
2815	dst_ops->kmem_cachep = NULL;
2816	dst_ops->check = NULL;
2817	dst_ops->negative_advice = NULL;
2818	dst_ops->link_failure = NULL;
2819}
2820EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2821
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2822#ifdef CONFIG_XFRM_STATISTICS
2823static int __net_init xfrm_statistics_init(struct net *net)
2824{
2825	int rv;
2826	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2827	if (!net->mib.xfrm_statistics)
2828		return -ENOMEM;
2829	rv = xfrm_proc_init(net);
2830	if (rv < 0)
2831		free_percpu(net->mib.xfrm_statistics);
2832	return rv;
2833}
2834
2835static void xfrm_statistics_fini(struct net *net)
2836{
2837	xfrm_proc_fini(net);
2838	free_percpu(net->mib.xfrm_statistics);
2839}
2840#else
2841static int __net_init xfrm_statistics_init(struct net *net)
2842{
2843	return 0;
2844}
2845
2846static void xfrm_statistics_fini(struct net *net)
2847{
2848}
2849#endif
2850
2851static int __net_init xfrm_policy_init(struct net *net)
2852{
2853	unsigned int hmask, sz;
2854	int dir;
2855
2856	if (net_eq(net, &init_net))
2857		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2858					   sizeof(struct xfrm_dst),
2859					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2860					   NULL);
 
2861
2862	hmask = 8 - 1;
2863	sz = (hmask+1) * sizeof(struct hlist_head);
2864
2865	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2866	if (!net->xfrm.policy_byidx)
2867		goto out_byidx;
2868	net->xfrm.policy_idx_hmask = hmask;
2869
2870	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2871		struct xfrm_policy_hash *htab;
2872
2873		net->xfrm.policy_count[dir] = 0;
2874		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2875		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2876
2877		htab = &net->xfrm.policy_bydst[dir];
2878		htab->table = xfrm_hash_alloc(sz);
2879		if (!htab->table)
2880			goto out_bydst;
2881		htab->hmask = hmask;
2882		htab->dbits4 = 32;
2883		htab->sbits4 = 32;
2884		htab->dbits6 = 128;
2885		htab->sbits6 = 128;
2886	}
2887	net->xfrm.policy_hthresh.lbits4 = 32;
2888	net->xfrm.policy_hthresh.rbits4 = 32;
2889	net->xfrm.policy_hthresh.lbits6 = 128;
2890	net->xfrm.policy_hthresh.rbits6 = 128;
2891
2892	seqlock_init(&net->xfrm.policy_hthresh.lock);
2893
2894	INIT_LIST_HEAD(&net->xfrm.policy_all);
 
2895	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2896	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2897	return 0;
2898
2899out_bydst:
2900	for (dir--; dir >= 0; dir--) {
2901		struct xfrm_policy_hash *htab;
2902
2903		htab = &net->xfrm.policy_bydst[dir];
2904		xfrm_hash_free(htab->table, sz);
2905	}
2906	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2907out_byidx:
2908	return -ENOMEM;
2909}
2910
2911static void xfrm_policy_fini(struct net *net)
2912{
 
2913	unsigned int sz;
2914	int dir;
2915
2916	flush_work(&net->xfrm.policy_hash_work);
2917#ifdef CONFIG_XFRM_SUB_POLICY
2918	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2919#endif
2920	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2921
2922	WARN_ON(!list_empty(&net->xfrm.policy_all));
2923
2924	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2925		struct xfrm_policy_hash *htab;
2926
2927		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2928
2929		htab = &net->xfrm.policy_bydst[dir];
2930		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2931		WARN_ON(!hlist_empty(htab->table));
2932		xfrm_hash_free(htab->table, sz);
2933	}
2934
2935	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2936	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2937	xfrm_hash_free(net->xfrm.policy_byidx, sz);
 
 
 
 
 
2938}
2939
2940static int __net_init xfrm_net_init(struct net *net)
2941{
2942	int rv;
2943
2944	/* Initialize the per-net locks here */
2945	spin_lock_init(&net->xfrm.xfrm_state_lock);
2946	spin_lock_init(&net->xfrm.xfrm_policy_lock);
 
2947	mutex_init(&net->xfrm.xfrm_cfg_mutex);
 
 
 
2948
2949	rv = xfrm_statistics_init(net);
2950	if (rv < 0)
2951		goto out_statistics;
2952	rv = xfrm_state_init(net);
2953	if (rv < 0)
2954		goto out_state;
2955	rv = xfrm_policy_init(net);
2956	if (rv < 0)
2957		goto out_policy;
2958	rv = xfrm_sysctl_init(net);
2959	if (rv < 0)
2960		goto out_sysctl;
2961
 
 
 
 
2962	return 0;
2963
 
 
2964out_sysctl:
2965	xfrm_policy_fini(net);
2966out_policy:
2967	xfrm_state_fini(net);
2968out_state:
2969	xfrm_statistics_fini(net);
2970out_statistics:
2971	return rv;
2972}
2973
2974static void __net_exit xfrm_net_exit(struct net *net)
2975{
 
2976	xfrm_sysctl_fini(net);
2977	xfrm_policy_fini(net);
2978	xfrm_state_fini(net);
2979	xfrm_statistics_fini(net);
2980}
2981
2982static struct pernet_operations __net_initdata xfrm_net_ops = {
2983	.init = xfrm_net_init,
2984	.exit = xfrm_net_exit,
2985};
2986
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2987void __init xfrm_init(void)
2988{
2989	int i;
2990
2991	xfrm_pcpu_work = kmalloc_array(NR_CPUS, sizeof(*xfrm_pcpu_work),
2992				       GFP_KERNEL);
2993	BUG_ON(!xfrm_pcpu_work);
2994
2995	for (i = 0; i < NR_CPUS; i++)
2996		INIT_WORK(&xfrm_pcpu_work[i], xfrm_pcpu_work_fn);
2997
2998	register_pernet_subsys(&xfrm_net_ops);
2999	xfrm_dev_init();
3000	seqcount_init(&xfrm_policy_hash_generation);
3001	xfrm_input_init();
 
 
 
 
 
 
 
3002}
3003
3004#ifdef CONFIG_AUDITSYSCALL
3005static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
3006					 struct audit_buffer *audit_buf)
3007{
3008	struct xfrm_sec_ctx *ctx = xp->security;
3009	struct xfrm_selector *sel = &xp->selector;
3010
3011	if (ctx)
3012		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3013				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3014
3015	switch (sel->family) {
3016	case AF_INET:
3017		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
3018		if (sel->prefixlen_s != 32)
3019			audit_log_format(audit_buf, " src_prefixlen=%d",
3020					 sel->prefixlen_s);
3021		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3022		if (sel->prefixlen_d != 32)
3023			audit_log_format(audit_buf, " dst_prefixlen=%d",
3024					 sel->prefixlen_d);
3025		break;
3026	case AF_INET6:
3027		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3028		if (sel->prefixlen_s != 128)
3029			audit_log_format(audit_buf, " src_prefixlen=%d",
3030					 sel->prefixlen_s);
3031		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3032		if (sel->prefixlen_d != 128)
3033			audit_log_format(audit_buf, " dst_prefixlen=%d",
3034					 sel->prefixlen_d);
3035		break;
3036	}
3037}
3038
3039void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
3040{
3041	struct audit_buffer *audit_buf;
3042
3043	audit_buf = xfrm_audit_start("SPD-add");
3044	if (audit_buf == NULL)
3045		return;
3046	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3047	audit_log_format(audit_buf, " res=%u", result);
3048	xfrm_audit_common_policyinfo(xp, audit_buf);
3049	audit_log_end(audit_buf);
3050}
3051EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3052
3053void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3054			      bool task_valid)
3055{
3056	struct audit_buffer *audit_buf;
3057
3058	audit_buf = xfrm_audit_start("SPD-delete");
3059	if (audit_buf == NULL)
3060		return;
3061	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3062	audit_log_format(audit_buf, " res=%u", result);
3063	xfrm_audit_common_policyinfo(xp, audit_buf);
3064	audit_log_end(audit_buf);
3065}
3066EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3067#endif
3068
3069#ifdef CONFIG_XFRM_MIGRATE
3070static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3071					const struct xfrm_selector *sel_tgt)
3072{
3073	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3074		if (sel_tgt->family == sel_cmp->family &&
3075		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3076				    sel_cmp->family) &&
3077		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3078				    sel_cmp->family) &&
3079		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3080		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3081			return true;
3082		}
3083	} else {
3084		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3085			return true;
3086		}
3087	}
3088	return false;
3089}
3090
3091static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3092						    u8 dir, u8 type, struct net *net)
3093{
3094	struct xfrm_policy *pol, *ret = NULL;
3095	struct hlist_head *chain;
3096	u32 priority = ~0U;
3097
3098	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
3099	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3100	hlist_for_each_entry(pol, chain, bydst) {
3101		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3102		    pol->type == type) {
3103			ret = pol;
3104			priority = ret->priority;
3105			break;
3106		}
3107	}
3108	chain = &net->xfrm.policy_inexact[dir];
3109	hlist_for_each_entry(pol, chain, bydst) {
3110		if ((pol->priority >= priority) && ret)
 
 
 
3111			break;
3112
3113		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3114		    pol->type == type) {
3115			ret = pol;
3116			break;
3117		}
3118	}
3119
3120	xfrm_pol_hold(ret);
3121
3122	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
3123
3124	return ret;
 
 
 
 
 
 
3125}
3126
3127static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3128{
3129	int match = 0;
3130
3131	if (t->mode == m->mode && t->id.proto == m->proto &&
3132	    (m->reqid == 0 || t->reqid == m->reqid)) {
3133		switch (t->mode) {
3134		case XFRM_MODE_TUNNEL:
3135		case XFRM_MODE_BEET:
3136			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3137					    m->old_family) &&
3138			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3139					    m->old_family)) {
3140				match = 1;
3141			}
3142			break;
3143		case XFRM_MODE_TRANSPORT:
3144			/* in case of transport mode, template does not store
3145			   any IP addresses, hence we just compare mode and
3146			   protocol */
3147			match = 1;
3148			break;
3149		default:
3150			break;
3151		}
3152	}
3153	return match;
3154}
3155
3156/* update endpoint address(es) of template(s) */
3157static int xfrm_policy_migrate(struct xfrm_policy *pol,
3158			       struct xfrm_migrate *m, int num_migrate)
 
3159{
3160	struct xfrm_migrate *mp;
3161	int i, j, n = 0;
3162
3163	write_lock_bh(&pol->lock);
3164	if (unlikely(pol->walk.dead)) {
3165		/* target policy has been deleted */
 
3166		write_unlock_bh(&pol->lock);
3167		return -ENOENT;
3168	}
3169
3170	for (i = 0; i < pol->xfrm_nr; i++) {
3171		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3172			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3173				continue;
3174			n++;
3175			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3176			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3177				continue;
3178			/* update endpoints */
3179			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3180			       sizeof(pol->xfrm_vec[i].id.daddr));
3181			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3182			       sizeof(pol->xfrm_vec[i].saddr));
3183			pol->xfrm_vec[i].encap_family = mp->new_family;
3184			/* flush bundles */
3185			atomic_inc(&pol->genid);
3186		}
3187	}
3188
3189	write_unlock_bh(&pol->lock);
3190
3191	if (!n)
3192		return -ENODATA;
3193
3194	return 0;
3195}
3196
3197static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
 
3198{
3199	int i, j;
3200
3201	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
 
3202		return -EINVAL;
 
3203
3204	for (i = 0; i < num_migrate; i++) {
3205		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3206		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
 
3207			return -EINVAL;
 
3208
3209		/* check if there is any duplicated entry */
3210		for (j = i + 1; j < num_migrate; j++) {
3211			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3212				    sizeof(m[i].old_daddr)) &&
3213			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3214				    sizeof(m[i].old_saddr)) &&
3215			    m[i].proto == m[j].proto &&
3216			    m[i].mode == m[j].mode &&
3217			    m[i].reqid == m[j].reqid &&
3218			    m[i].old_family == m[j].old_family)
 
3219				return -EINVAL;
 
3220		}
3221	}
3222
3223	return 0;
3224}
3225
3226int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3227		 struct xfrm_migrate *m, int num_migrate,
3228		 struct xfrm_kmaddress *k, struct net *net,
3229		 struct xfrm_encap_tmpl *encap)
 
3230{
3231	int i, err, nx_cur = 0, nx_new = 0;
3232	struct xfrm_policy *pol = NULL;
3233	struct xfrm_state *x, *xc;
3234	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3235	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3236	struct xfrm_migrate *mp;
3237
3238	/* Stage 0 - sanity checks */
3239	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
 
3240		goto out;
3241
3242	if (dir >= XFRM_POLICY_MAX) {
 
3243		err = -EINVAL;
3244		goto out;
3245	}
3246
3247	/* Stage 1 - find policy */
3248	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3249		err = -ENOENT;
 
 
3250		goto out;
3251	}
3252
3253	/* Stage 2 - find and update state(s) */
3254	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3255		if ((x = xfrm_migrate_state_find(mp, net))) {
3256			x_cur[nx_cur] = x;
3257			nx_cur++;
3258			xc = xfrm_state_migrate(x, mp, encap);
3259			if (xc) {
3260				x_new[nx_new] = xc;
3261				nx_new++;
3262			} else {
3263				err = -ENODATA;
3264				goto restore_state;
3265			}
3266		}
3267	}
3268
3269	/* Stage 3 - update policy */
3270	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
 
3271		goto restore_state;
3272
3273	/* Stage 4 - delete old state(s) */
3274	if (nx_cur) {
3275		xfrm_states_put(x_cur, nx_cur);
3276		xfrm_states_delete(x_cur, nx_cur);
3277	}
3278
3279	/* Stage 5 - announce */
3280	km_migrate(sel, dir, type, m, num_migrate, k, encap);
3281
3282	xfrm_pol_put(pol);
3283
3284	return 0;
3285out:
3286	return err;
3287
3288restore_state:
3289	if (pol)
3290		xfrm_pol_put(pol);
3291	if (nx_cur)
3292		xfrm_states_put(x_cur, nx_cur);
3293	if (nx_new)
3294		xfrm_states_delete(x_new, nx_new);
3295
3296	return err;
3297}
3298EXPORT_SYMBOL(xfrm_migrate);
3299#endif