Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * x_tables core - Backend for {ip,ip6,arp}_tables
   4 *
   5 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
   6 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
   7 *
   8 * Based on existing ip_tables code which is
   9 *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  10 *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
  11 */
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/socket.h>
  16#include <linux/net.h>
  17#include <linux/proc_fs.h>
  18#include <linux/seq_file.h>
  19#include <linux/string.h>
  20#include <linux/vmalloc.h>
  21#include <linux/mutex.h>
  22#include <linux/mm.h>
  23#include <linux/slab.h>
  24#include <linux/audit.h>
  25#include <linux/user_namespace.h>
  26#include <net/net_namespace.h>
  27#include <net/netns/generic.h>
  28
  29#include <linux/netfilter/x_tables.h>
  30#include <linux/netfilter_arp.h>
  31#include <linux/netfilter_ipv4/ip_tables.h>
  32#include <linux/netfilter_ipv6/ip6_tables.h>
  33#include <linux/netfilter_arp/arp_tables.h>
  34
  35MODULE_LICENSE("GPL");
  36MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  37MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
  38
  39#define XT_PCPU_BLOCK_SIZE 4096
  40#define XT_MAX_TABLE_SIZE	(512 * 1024 * 1024)
  41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42struct xt_pernet {
  43	struct list_head tables[NFPROTO_NUMPROTO];
  44};
  45
  46struct compat_delta {
  47	unsigned int offset; /* offset in kernel */
  48	int delta; /* delta in 32bit user land */
  49};
  50
  51struct xt_af {
  52	struct mutex mutex;
  53	struct list_head match;
  54	struct list_head target;
  55#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
  56	struct mutex compat_mutex;
  57	struct compat_delta *compat_tab;
  58	unsigned int number; /* number of slots in compat_tab[] */
  59	unsigned int cur; /* number of used slots in compat_tab[] */
  60#endif
  61};
  62
  63static unsigned int xt_pernet_id __read_mostly;
  64static struct xt_af *xt __read_mostly;
  65
  66static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
  67	[NFPROTO_UNSPEC] = "x",
  68	[NFPROTO_IPV4]   = "ip",
  69	[NFPROTO_ARP]    = "arp",
  70	[NFPROTO_BRIDGE] = "eb",
  71	[NFPROTO_IPV6]   = "ip6",
  72};
  73
  74/* Registration hooks for targets. */
  75int xt_register_target(struct xt_target *target)
  76{
  77	u_int8_t af = target->family;
  78
  79	mutex_lock(&xt[af].mutex);
  80	list_add(&target->list, &xt[af].target);
  81	mutex_unlock(&xt[af].mutex);
  82	return 0;
  83}
  84EXPORT_SYMBOL(xt_register_target);
  85
  86void
  87xt_unregister_target(struct xt_target *target)
  88{
  89	u_int8_t af = target->family;
  90
  91	mutex_lock(&xt[af].mutex);
  92	list_del(&target->list);
  93	mutex_unlock(&xt[af].mutex);
  94}
  95EXPORT_SYMBOL(xt_unregister_target);
  96
  97int
  98xt_register_targets(struct xt_target *target, unsigned int n)
  99{
 100	unsigned int i;
 101	int err = 0;
 102
 103	for (i = 0; i < n; i++) {
 104		err = xt_register_target(&target[i]);
 105		if (err)
 106			goto err;
 107	}
 108	return err;
 109
 110err:
 111	if (i > 0)
 112		xt_unregister_targets(target, i);
 113	return err;
 114}
 115EXPORT_SYMBOL(xt_register_targets);
 116
 117void
 118xt_unregister_targets(struct xt_target *target, unsigned int n)
 119{
 120	while (n-- > 0)
 121		xt_unregister_target(&target[n]);
 122}
 123EXPORT_SYMBOL(xt_unregister_targets);
 124
 125int xt_register_match(struct xt_match *match)
 126{
 127	u_int8_t af = match->family;
 128
 129	mutex_lock(&xt[af].mutex);
 130	list_add(&match->list, &xt[af].match);
 131	mutex_unlock(&xt[af].mutex);
 132	return 0;
 133}
 134EXPORT_SYMBOL(xt_register_match);
 135
 136void
 137xt_unregister_match(struct xt_match *match)
 138{
 139	u_int8_t af = match->family;
 140
 141	mutex_lock(&xt[af].mutex);
 142	list_del(&match->list);
 143	mutex_unlock(&xt[af].mutex);
 144}
 145EXPORT_SYMBOL(xt_unregister_match);
 146
 147int
 148xt_register_matches(struct xt_match *match, unsigned int n)
 149{
 150	unsigned int i;
 151	int err = 0;
 152
 153	for (i = 0; i < n; i++) {
 154		err = xt_register_match(&match[i]);
 155		if (err)
 156			goto err;
 157	}
 158	return err;
 159
 160err:
 161	if (i > 0)
 162		xt_unregister_matches(match, i);
 163	return err;
 164}
 165EXPORT_SYMBOL(xt_register_matches);
 166
 167void
 168xt_unregister_matches(struct xt_match *match, unsigned int n)
 169{
 170	while (n-- > 0)
 171		xt_unregister_match(&match[n]);
 172}
 173EXPORT_SYMBOL(xt_unregister_matches);
 174
 175
 176/*
 177 * These are weird, but module loading must not be done with mutex
 178 * held (since they will register), and we have to have a single
 179 * function to use.
 180 */
 181
 182/* Find match, grabs ref.  Returns ERR_PTR() on error. */
 183struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
 184{
 185	struct xt_match *m;
 186	int err = -ENOENT;
 187
 188	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 189		return ERR_PTR(-EINVAL);
 190
 191	mutex_lock(&xt[af].mutex);
 192	list_for_each_entry(m, &xt[af].match, list) {
 193		if (strcmp(m->name, name) == 0) {
 194			if (m->revision == revision) {
 195				if (try_module_get(m->me)) {
 196					mutex_unlock(&xt[af].mutex);
 197					return m;
 198				}
 199			} else
 200				err = -EPROTOTYPE; /* Found something. */
 201		}
 202	}
 203	mutex_unlock(&xt[af].mutex);
 204
 205	if (af != NFPROTO_UNSPEC)
 206		/* Try searching again in the family-independent list */
 207		return xt_find_match(NFPROTO_UNSPEC, name, revision);
 208
 209	return ERR_PTR(err);
 210}
 211EXPORT_SYMBOL(xt_find_match);
 212
 213struct xt_match *
 214xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
 215{
 216	struct xt_match *match;
 217
 218	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 219		return ERR_PTR(-EINVAL);
 220
 221	match = xt_find_match(nfproto, name, revision);
 222	if (IS_ERR(match)) {
 223		request_module("%st_%s", xt_prefix[nfproto], name);
 224		match = xt_find_match(nfproto, name, revision);
 225	}
 226
 227	return match;
 228}
 229EXPORT_SYMBOL_GPL(xt_request_find_match);
 230
 231/* Find target, grabs ref.  Returns ERR_PTR() on error. */
 232static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
 233{
 234	struct xt_target *t;
 235	int err = -ENOENT;
 236
 237	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 238		return ERR_PTR(-EINVAL);
 239
 240	mutex_lock(&xt[af].mutex);
 241	list_for_each_entry(t, &xt[af].target, list) {
 242		if (strcmp(t->name, name) == 0) {
 243			if (t->revision == revision) {
 244				if (try_module_get(t->me)) {
 245					mutex_unlock(&xt[af].mutex);
 246					return t;
 247				}
 248			} else
 249				err = -EPROTOTYPE; /* Found something. */
 250		}
 251	}
 252	mutex_unlock(&xt[af].mutex);
 253
 254	if (af != NFPROTO_UNSPEC)
 255		/* Try searching again in the family-independent list */
 256		return xt_find_target(NFPROTO_UNSPEC, name, revision);
 257
 258	return ERR_PTR(err);
 259}
 260
 261struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
 262{
 263	struct xt_target *target;
 264
 265	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 266		return ERR_PTR(-EINVAL);
 267
 268	target = xt_find_target(af, name, revision);
 269	if (IS_ERR(target)) {
 270		request_module("%st_%s", xt_prefix[af], name);
 271		target = xt_find_target(af, name, revision);
 272	}
 273
 274	return target;
 275}
 276EXPORT_SYMBOL_GPL(xt_request_find_target);
 277
 278
 279static int xt_obj_to_user(u16 __user *psize, u16 size,
 280			  void __user *pname, const char *name,
 281			  u8 __user *prev, u8 rev)
 282{
 283	if (put_user(size, psize))
 284		return -EFAULT;
 285	if (copy_to_user(pname, name, strlen(name) + 1))
 286		return -EFAULT;
 287	if (put_user(rev, prev))
 288		return -EFAULT;
 289
 290	return 0;
 291}
 292
 293#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE)				\
 294	xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size,	\
 295		       U->u.user.name, K->u.kernel.TYPE->name,		\
 296		       &U->u.user.revision, K->u.kernel.TYPE->revision)
 297
 298int xt_data_to_user(void __user *dst, const void *src,
 299		    int usersize, int size, int aligned_size)
 300{
 301	usersize = usersize ? : size;
 302	if (copy_to_user(dst, src, usersize))
 303		return -EFAULT;
 304	if (usersize != aligned_size &&
 305	    clear_user(dst + usersize, aligned_size - usersize))
 306		return -EFAULT;
 307
 308	return 0;
 309}
 310EXPORT_SYMBOL_GPL(xt_data_to_user);
 311
 312#define XT_DATA_TO_USER(U, K, TYPE)					\
 313	xt_data_to_user(U->data, K->data,				\
 314			K->u.kernel.TYPE->usersize,			\
 315			K->u.kernel.TYPE->TYPE##size,			\
 316			XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
 317
 318int xt_match_to_user(const struct xt_entry_match *m,
 319		     struct xt_entry_match __user *u)
 320{
 321	return XT_OBJ_TO_USER(u, m, match, 0) ||
 322	       XT_DATA_TO_USER(u, m, match);
 323}
 324EXPORT_SYMBOL_GPL(xt_match_to_user);
 325
 326int xt_target_to_user(const struct xt_entry_target *t,
 327		      struct xt_entry_target __user *u)
 328{
 329	return XT_OBJ_TO_USER(u, t, target, 0) ||
 330	       XT_DATA_TO_USER(u, t, target);
 331}
 332EXPORT_SYMBOL_GPL(xt_target_to_user);
 333
 334static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
 335{
 336	const struct xt_match *m;
 337	int have_rev = 0;
 338
 339	mutex_lock(&xt[af].mutex);
 340	list_for_each_entry(m, &xt[af].match, list) {
 341		if (strcmp(m->name, name) == 0) {
 342			if (m->revision > *bestp)
 343				*bestp = m->revision;
 344			if (m->revision == revision)
 345				have_rev = 1;
 346		}
 347	}
 348	mutex_unlock(&xt[af].mutex);
 349
 350	if (af != NFPROTO_UNSPEC && !have_rev)
 351		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
 352
 353	return have_rev;
 354}
 355
 356static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
 357{
 358	const struct xt_target *t;
 359	int have_rev = 0;
 360
 361	mutex_lock(&xt[af].mutex);
 362	list_for_each_entry(t, &xt[af].target, list) {
 363		if (strcmp(t->name, name) == 0) {
 364			if (t->revision > *bestp)
 365				*bestp = t->revision;
 366			if (t->revision == revision)
 367				have_rev = 1;
 368		}
 369	}
 370	mutex_unlock(&xt[af].mutex);
 371
 372	if (af != NFPROTO_UNSPEC && !have_rev)
 373		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
 374
 375	return have_rev;
 376}
 377
 378/* Returns true or false (if no such extension at all) */
 379int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 380		     int *err)
 381{
 382	int have_rev, best = -1;
 383
 384	if (target == 1)
 385		have_rev = target_revfn(af, name, revision, &best);
 386	else
 387		have_rev = match_revfn(af, name, revision, &best);
 388
 389	/* Nothing at all?  Return 0 to try loading module. */
 390	if (best == -1) {
 391		*err = -ENOENT;
 392		return 0;
 393	}
 394
 395	*err = best;
 396	if (!have_rev)
 397		*err = -EPROTONOSUPPORT;
 398	return 1;
 399}
 400EXPORT_SYMBOL_GPL(xt_find_revision);
 401
 402static char *
 403textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
 404{
 405	static const char *const inetbr_names[] = {
 406		"PREROUTING", "INPUT", "FORWARD",
 407		"OUTPUT", "POSTROUTING", "BROUTING",
 408	};
 409	static const char *const arp_names[] = {
 410		"INPUT", "FORWARD", "OUTPUT",
 411	};
 412	const char *const *names;
 413	unsigned int i, max;
 414	char *p = buf;
 415	bool np = false;
 416	int res;
 417
 418	names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
 419	max   = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
 420	                                   ARRAY_SIZE(inetbr_names);
 421	*p = '\0';
 422	for (i = 0; i < max; ++i) {
 423		if (!(mask & (1 << i)))
 424			continue;
 425		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
 426		if (res > 0) {
 427			size -= res;
 428			p += res;
 429		}
 430		np = true;
 431	}
 432
 433	return buf;
 434}
 435
 436/**
 437 * xt_check_proc_name - check that name is suitable for /proc file creation
 438 *
 439 * @name: file name candidate
 440 * @size: length of buffer
 441 *
 442 * some x_tables modules wish to create a file in /proc.
 443 * This function makes sure that the name is suitable for this
 444 * purpose, it checks that name is NUL terminated and isn't a 'special'
 445 * name, like "..".
 446 *
 447 * returns negative number on error or 0 if name is useable.
 448 */
 449int xt_check_proc_name(const char *name, unsigned int size)
 450{
 451	if (name[0] == '\0')
 452		return -EINVAL;
 453
 454	if (strnlen(name, size) == size)
 455		return -ENAMETOOLONG;
 456
 457	if (strcmp(name, ".") == 0 ||
 458	    strcmp(name, "..") == 0 ||
 459	    strchr(name, '/'))
 460		return -EINVAL;
 461
 462	return 0;
 463}
 464EXPORT_SYMBOL(xt_check_proc_name);
 465
 466int xt_check_match(struct xt_mtchk_param *par,
 467		   unsigned int size, u16 proto, bool inv_proto)
 468{
 469	int ret;
 470
 471	if (XT_ALIGN(par->match->matchsize) != size &&
 472	    par->match->matchsize != -1) {
 473		/*
 474		 * ebt_among is exempt from centralized matchsize checking
 475		 * because it uses a dynamic-size data set.
 476		 */
 477		pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
 478				   xt_prefix[par->family], par->match->name,
 479				   par->match->revision,
 480				   XT_ALIGN(par->match->matchsize), size);
 481		return -EINVAL;
 482	}
 483	if (par->match->table != NULL &&
 484	    strcmp(par->match->table, par->table) != 0) {
 485		pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
 486				    xt_prefix[par->family], par->match->name,
 487				    par->match->table, par->table);
 488		return -EINVAL;
 489	}
 490	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
 491		char used[64], allow[64];
 492
 493		pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
 494				    xt_prefix[par->family], par->match->name,
 495				    textify_hooks(used, sizeof(used),
 496						  par->hook_mask, par->family),
 497				    textify_hooks(allow, sizeof(allow),
 498						  par->match->hooks,
 499						  par->family));
 500		return -EINVAL;
 501	}
 502	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
 503		pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
 504				    xt_prefix[par->family], par->match->name,
 505				    par->match->proto);
 506		return -EINVAL;
 507	}
 508	if (par->match->checkentry != NULL) {
 509		ret = par->match->checkentry(par);
 510		if (ret < 0)
 511			return ret;
 512		else if (ret > 0)
 513			/* Flag up potential errors. */
 514			return -EIO;
 515	}
 516	return 0;
 517}
 518EXPORT_SYMBOL_GPL(xt_check_match);
 519
 520/** xt_check_entry_match - check that matches end before start of target
 521 *
 522 * @match: beginning of xt_entry_match
 523 * @target: beginning of this rules target (alleged end of matches)
 524 * @alignment: alignment requirement of match structures
 525 *
 526 * Validates that all matches add up to the beginning of the target,
 527 * and that each match covers at least the base structure size.
 528 *
 529 * Return: 0 on success, negative errno on failure.
 530 */
 531static int xt_check_entry_match(const char *match, const char *target,
 532				const size_t alignment)
 533{
 534	const struct xt_entry_match *pos;
 535	int length = target - match;
 536
 537	if (length == 0) /* no matches */
 538		return 0;
 539
 540	pos = (struct xt_entry_match *)match;
 541	do {
 542		if ((unsigned long)pos % alignment)
 543			return -EINVAL;
 544
 545		if (length < (int)sizeof(struct xt_entry_match))
 546			return -EINVAL;
 547
 548		if (pos->u.match_size < sizeof(struct xt_entry_match))
 549			return -EINVAL;
 550
 551		if (pos->u.match_size > length)
 552			return -EINVAL;
 553
 554		length -= pos->u.match_size;
 555		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
 556	} while (length > 0);
 557
 558	return 0;
 559}
 560
 561/** xt_check_table_hooks - check hook entry points are sane
 562 *
 563 * @info xt_table_info to check
 564 * @valid_hooks - hook entry points that we can enter from
 565 *
 566 * Validates that the hook entry and underflows points are set up.
 567 *
 568 * Return: 0 on success, negative errno on failure.
 569 */
 570int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
 571{
 572	const char *err = "unsorted underflow";
 573	unsigned int i, max_uflow, max_entry;
 574	bool check_hooks = false;
 575
 576	BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
 577
 578	max_entry = 0;
 579	max_uflow = 0;
 580
 581	for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
 582		if (!(valid_hooks & (1 << i)))
 583			continue;
 584
 585		if (info->hook_entry[i] == 0xFFFFFFFF)
 586			return -EINVAL;
 587		if (info->underflow[i] == 0xFFFFFFFF)
 588			return -EINVAL;
 589
 590		if (check_hooks) {
 591			if (max_uflow > info->underflow[i])
 592				goto error;
 593
 594			if (max_uflow == info->underflow[i]) {
 595				err = "duplicate underflow";
 596				goto error;
 597			}
 598			if (max_entry > info->hook_entry[i]) {
 599				err = "unsorted entry";
 600				goto error;
 601			}
 602			if (max_entry == info->hook_entry[i]) {
 603				err = "duplicate entry";
 604				goto error;
 605			}
 606		}
 607		max_entry = info->hook_entry[i];
 608		max_uflow = info->underflow[i];
 609		check_hooks = true;
 610	}
 611
 612	return 0;
 613error:
 614	pr_err_ratelimited("%s at hook %d\n", err, i);
 615	return -EINVAL;
 616}
 617EXPORT_SYMBOL(xt_check_table_hooks);
 618
 619static bool verdict_ok(int verdict)
 620{
 621	if (verdict > 0)
 622		return true;
 623
 624	if (verdict < 0) {
 625		int v = -verdict - 1;
 626
 627		if (verdict == XT_RETURN)
 628			return true;
 629
 630		switch (v) {
 631		case NF_ACCEPT: return true;
 632		case NF_DROP: return true;
 633		case NF_QUEUE: return true;
 634		default:
 635			break;
 636		}
 637
 638		return false;
 639	}
 640
 641	return false;
 642}
 643
 644static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
 645			const char *msg, unsigned int msglen)
 646{
 647	return usersize == kernsize && strnlen(msg, msglen) < msglen;
 648}
 649
 650#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
 651int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 652{
 653	struct xt_af *xp = &xt[af];
 654
 655	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 656
 657	if (WARN_ON(!xp->compat_tab))
 658		return -ENOMEM;
 659
 660	if (xp->cur >= xp->number)
 661		return -EINVAL;
 662
 663	if (xp->cur)
 664		delta += xp->compat_tab[xp->cur - 1].delta;
 665	xp->compat_tab[xp->cur].offset = offset;
 666	xp->compat_tab[xp->cur].delta = delta;
 667	xp->cur++;
 668	return 0;
 669}
 670EXPORT_SYMBOL_GPL(xt_compat_add_offset);
 671
 672void xt_compat_flush_offsets(u_int8_t af)
 673{
 674	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 675
 676	if (xt[af].compat_tab) {
 677		vfree(xt[af].compat_tab);
 678		xt[af].compat_tab = NULL;
 679		xt[af].number = 0;
 680		xt[af].cur = 0;
 681	}
 682}
 683EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
 684
 685int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
 686{
 687	struct compat_delta *tmp = xt[af].compat_tab;
 688	int mid, left = 0, right = xt[af].cur - 1;
 689
 690	while (left <= right) {
 691		mid = (left + right) >> 1;
 692		if (offset > tmp[mid].offset)
 693			left = mid + 1;
 694		else if (offset < tmp[mid].offset)
 695			right = mid - 1;
 696		else
 697			return mid ? tmp[mid - 1].delta : 0;
 698	}
 699	return left ? tmp[left - 1].delta : 0;
 700}
 701EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 702
 703int xt_compat_init_offsets(u8 af, unsigned int number)
 704{
 705	size_t mem;
 706
 707	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 708
 709	if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
 710		return -EINVAL;
 711
 712	if (WARN_ON(xt[af].compat_tab))
 713		return -EINVAL;
 714
 715	mem = sizeof(struct compat_delta) * number;
 716	if (mem > XT_MAX_TABLE_SIZE)
 717		return -ENOMEM;
 718
 719	xt[af].compat_tab = vmalloc(mem);
 720	if (!xt[af].compat_tab)
 721		return -ENOMEM;
 722
 723	xt[af].number = number;
 724	xt[af].cur = 0;
 725
 726	return 0;
 727}
 728EXPORT_SYMBOL(xt_compat_init_offsets);
 729
 730int xt_compat_match_offset(const struct xt_match *match)
 731{
 732	u_int16_t csize = match->compatsize ? : match->matchsize;
 733	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
 734}
 735EXPORT_SYMBOL_GPL(xt_compat_match_offset);
 736
 737void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 738			       unsigned int *size)
 739{
 740	const struct xt_match *match = m->u.kernel.match;
 741	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
 742	int off = xt_compat_match_offset(match);
 743	u_int16_t msize = cm->u.user.match_size;
 744	char name[sizeof(m->u.user.name)];
 745
 746	m = *dstptr;
 747	memcpy(m, cm, sizeof(*cm));
 748	if (match->compat_from_user)
 749		match->compat_from_user(m->data, cm->data);
 750	else
 751		memcpy(m->data, cm->data, msize - sizeof(*cm));
 752
 753	msize += off;
 754	m->u.user.match_size = msize;
 755	strlcpy(name, match->name, sizeof(name));
 756	module_put(match->me);
 757	strncpy(m->u.user.name, name, sizeof(m->u.user.name));
 758
 759	*size += off;
 760	*dstptr += msize;
 761}
 762EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
 763
 764#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE)			\
 765	xt_data_to_user(U->data, K->data,				\
 766			K->u.kernel.TYPE->usersize,			\
 767			C_SIZE,						\
 768			COMPAT_XT_ALIGN(C_SIZE))
 769
 770int xt_compat_match_to_user(const struct xt_entry_match *m,
 771			    void __user **dstptr, unsigned int *size)
 772{
 773	const struct xt_match *match = m->u.kernel.match;
 774	struct compat_xt_entry_match __user *cm = *dstptr;
 775	int off = xt_compat_match_offset(match);
 776	u_int16_t msize = m->u.user.match_size - off;
 777
 778	if (XT_OBJ_TO_USER(cm, m, match, msize))
 779		return -EFAULT;
 780
 781	if (match->compat_to_user) {
 782		if (match->compat_to_user((void __user *)cm->data, m->data))
 783			return -EFAULT;
 784	} else {
 785		if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
 786			return -EFAULT;
 787	}
 788
 789	*size -= off;
 790	*dstptr += msize;
 791	return 0;
 792}
 793EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
 794
 795/* non-compat version may have padding after verdict */
 796struct compat_xt_standard_target {
 797	struct compat_xt_entry_target t;
 798	compat_uint_t verdict;
 799};
 800
 801struct compat_xt_error_target {
 802	struct compat_xt_entry_target t;
 803	char errorname[XT_FUNCTION_MAXNAMELEN];
 804};
 805
 806int xt_compat_check_entry_offsets(const void *base, const char *elems,
 807				  unsigned int target_offset,
 808				  unsigned int next_offset)
 809{
 810	long size_of_base_struct = elems - (const char *)base;
 811	const struct compat_xt_entry_target *t;
 812	const char *e = base;
 813
 814	if (target_offset < size_of_base_struct)
 815		return -EINVAL;
 816
 817	if (target_offset + sizeof(*t) > next_offset)
 818		return -EINVAL;
 819
 820	t = (void *)(e + target_offset);
 821	if (t->u.target_size < sizeof(*t))
 822		return -EINVAL;
 823
 824	if (target_offset + t->u.target_size > next_offset)
 825		return -EINVAL;
 826
 827	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
 828		const struct compat_xt_standard_target *st = (const void *)t;
 829
 830		if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
 831			return -EINVAL;
 832
 833		if (!verdict_ok(st->verdict))
 834			return -EINVAL;
 835	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
 836		const struct compat_xt_error_target *et = (const void *)t;
 837
 838		if (!error_tg_ok(t->u.target_size, sizeof(*et),
 839				 et->errorname, sizeof(et->errorname)))
 840			return -EINVAL;
 841	}
 842
 843	/* compat_xt_entry match has less strict alignment requirements,
 844	 * otherwise they are identical.  In case of padding differences
 845	 * we need to add compat version of xt_check_entry_match.
 846	 */
 847	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
 848
 849	return xt_check_entry_match(elems, base + target_offset,
 850				    __alignof__(struct compat_xt_entry_match));
 851}
 852EXPORT_SYMBOL(xt_compat_check_entry_offsets);
 853#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
 854
 855/**
 856 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
 857 *
 858 * @base: pointer to arp/ip/ip6t_entry
 859 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
 860 * @target_offset: the arp/ip/ip6_t->target_offset
 861 * @next_offset: the arp/ip/ip6_t->next_offset
 862 *
 863 * validates that target_offset and next_offset are sane and that all
 864 * match sizes (if any) align with the target offset.
 865 *
 866 * This function does not validate the targets or matches themselves, it
 867 * only tests that all the offsets and sizes are correct, that all
 868 * match structures are aligned, and that the last structure ends where
 869 * the target structure begins.
 870 *
 871 * Also see xt_compat_check_entry_offsets for CONFIG_NETFILTER_XTABLES_COMPAT version.
 872 *
 873 * The arp/ip/ip6t_entry structure @base must have passed following tests:
 874 * - it must point to a valid memory location
 875 * - base to base + next_offset must be accessible, i.e. not exceed allocated
 876 *   length.
 877 *
 878 * A well-formed entry looks like this:
 879 *
 880 * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
 881 * e->elems[]-----'                              |               |
 882 *                matchsize                      |               |
 883 *                                matchsize      |               |
 884 *                                               |               |
 885 * target_offset---------------------------------'               |
 886 * next_offset---------------------------------------------------'
 887 *
 888 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
 889 *          This is where matches (if any) and the target reside.
 890 * target_offset: beginning of target.
 891 * next_offset: start of the next rule; also: size of this rule.
 892 * Since targets have a minimum size, target_offset + minlen <= next_offset.
 893 *
 894 * Every match stores its size, sum of sizes must not exceed target_offset.
 895 *
 896 * Return: 0 on success, negative errno on failure.
 897 */
 898int xt_check_entry_offsets(const void *base,
 899			   const char *elems,
 900			   unsigned int target_offset,
 901			   unsigned int next_offset)
 902{
 903	long size_of_base_struct = elems - (const char *)base;
 904	const struct xt_entry_target *t;
 905	const char *e = base;
 906
 907	/* target start is within the ip/ip6/arpt_entry struct */
 908	if (target_offset < size_of_base_struct)
 909		return -EINVAL;
 910
 911	if (target_offset + sizeof(*t) > next_offset)
 912		return -EINVAL;
 913
 914	t = (void *)(e + target_offset);
 915	if (t->u.target_size < sizeof(*t))
 916		return -EINVAL;
 917
 918	if (target_offset + t->u.target_size > next_offset)
 919		return -EINVAL;
 920
 921	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
 922		const struct xt_standard_target *st = (const void *)t;
 923
 924		if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
 925			return -EINVAL;
 926
 927		if (!verdict_ok(st->verdict))
 928			return -EINVAL;
 929	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
 930		const struct xt_error_target *et = (const void *)t;
 931
 932		if (!error_tg_ok(t->u.target_size, sizeof(*et),
 933				 et->errorname, sizeof(et->errorname)))
 934			return -EINVAL;
 935	}
 936
 937	return xt_check_entry_match(elems, base + target_offset,
 938				    __alignof__(struct xt_entry_match));
 939}
 940EXPORT_SYMBOL(xt_check_entry_offsets);
 941
 942/**
 943 * xt_alloc_entry_offsets - allocate array to store rule head offsets
 944 *
 945 * @size: number of entries
 946 *
 947 * Return: NULL or zeroed kmalloc'd or vmalloc'd array
 948 */
 949unsigned int *xt_alloc_entry_offsets(unsigned int size)
 950{
 951	if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
 952		return NULL;
 953
 954	return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
 955
 956}
 957EXPORT_SYMBOL(xt_alloc_entry_offsets);
 958
 959/**
 960 * xt_find_jump_offset - check if target is a valid jump offset
 961 *
 962 * @offsets: array containing all valid rule start offsets of a rule blob
 963 * @target: the jump target to search for
 964 * @size: entries in @offset
 965 */
 966bool xt_find_jump_offset(const unsigned int *offsets,
 967			 unsigned int target, unsigned int size)
 968{
 969	int m, low = 0, hi = size;
 970
 971	while (hi > low) {
 972		m = (low + hi) / 2u;
 973
 974		if (offsets[m] > target)
 975			hi = m;
 976		else if (offsets[m] < target)
 977			low = m + 1;
 978		else
 979			return true;
 980	}
 981
 982	return false;
 983}
 984EXPORT_SYMBOL(xt_find_jump_offset);
 985
 986int xt_check_target(struct xt_tgchk_param *par,
 987		    unsigned int size, u16 proto, bool inv_proto)
 988{
 989	int ret;
 990
 991	if (XT_ALIGN(par->target->targetsize) != size) {
 992		pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
 993				   xt_prefix[par->family], par->target->name,
 994				   par->target->revision,
 995				   XT_ALIGN(par->target->targetsize), size);
 996		return -EINVAL;
 997	}
 998	if (par->target->table != NULL &&
 999	    strcmp(par->target->table, par->table) != 0) {
1000		pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1001				    xt_prefix[par->family], par->target->name,
1002				    par->target->table, par->table);
1003		return -EINVAL;
1004	}
1005	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1006		char used[64], allow[64];
1007
1008		pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1009				    xt_prefix[par->family], par->target->name,
1010				    textify_hooks(used, sizeof(used),
1011						  par->hook_mask, par->family),
1012				    textify_hooks(allow, sizeof(allow),
1013						  par->target->hooks,
1014						  par->family));
1015		return -EINVAL;
1016	}
1017	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1018		pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1019				    xt_prefix[par->family], par->target->name,
1020				    par->target->proto);
1021		return -EINVAL;
1022	}
1023	if (par->target->checkentry != NULL) {
1024		ret = par->target->checkentry(par);
1025		if (ret < 0)
1026			return ret;
1027		else if (ret > 0)
1028			/* Flag up potential errors. */
1029			return -EIO;
1030	}
1031	return 0;
1032}
1033EXPORT_SYMBOL_GPL(xt_check_target);
1034
1035/**
1036 * xt_copy_counters - copy counters and metadata from a sockptr_t
1037 *
1038 * @arg: src sockptr
1039 * @len: alleged size of userspace memory
1040 * @info: where to store the xt_counters_info metadata
1041 *
1042 * Copies counter meta data from @user and stores it in @info.
1043 *
1044 * vmallocs memory to hold the counters, then copies the counter data
1045 * from @user to the new memory and returns a pointer to it.
1046 *
1047 * If called from a compat syscall, @info gets converted automatically to the
1048 * 64bit representation.
1049 *
1050 * The metadata associated with the counters is stored in @info.
1051 *
1052 * Return: returns pointer that caller has to test via IS_ERR().
1053 * If IS_ERR is false, caller has to vfree the pointer.
1054 */
1055void *xt_copy_counters(sockptr_t arg, unsigned int len,
1056		       struct xt_counters_info *info)
1057{
1058	size_t offset;
1059	void *mem;
1060	u64 size;
1061
1062#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1063	if (in_compat_syscall()) {
1064		/* structures only differ in size due to alignment */
1065		struct compat_xt_counters_info compat_tmp;
1066
1067		if (len <= sizeof(compat_tmp))
1068			return ERR_PTR(-EINVAL);
1069
1070		len -= sizeof(compat_tmp);
1071		if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1072			return ERR_PTR(-EFAULT);
1073
1074		memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1075		info->num_counters = compat_tmp.num_counters;
1076		offset = sizeof(compat_tmp);
1077	} else
1078#endif
1079	{
1080		if (len <= sizeof(*info))
1081			return ERR_PTR(-EINVAL);
1082
1083		len -= sizeof(*info);
1084		if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1085			return ERR_PTR(-EFAULT);
1086
1087		offset = sizeof(*info);
1088	}
1089	info->name[sizeof(info->name) - 1] = '\0';
1090
1091	size = sizeof(struct xt_counters);
1092	size *= info->num_counters;
1093
1094	if (size != (u64)len)
1095		return ERR_PTR(-EINVAL);
1096
1097	mem = vmalloc(len);
1098	if (!mem)
1099		return ERR_PTR(-ENOMEM);
1100
1101	if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1102		return mem;
1103
1104	vfree(mem);
1105	return ERR_PTR(-EFAULT);
1106}
1107EXPORT_SYMBOL_GPL(xt_copy_counters);
1108
1109#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1110int xt_compat_target_offset(const struct xt_target *target)
1111{
1112	u_int16_t csize = target->compatsize ? : target->targetsize;
1113	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1114}
1115EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1116
1117void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1118				unsigned int *size)
1119{
1120	const struct xt_target *target = t->u.kernel.target;
1121	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1122	int off = xt_compat_target_offset(target);
1123	u_int16_t tsize = ct->u.user.target_size;
1124	char name[sizeof(t->u.user.name)];
1125
1126	t = *dstptr;
1127	memcpy(t, ct, sizeof(*ct));
1128	if (target->compat_from_user)
1129		target->compat_from_user(t->data, ct->data);
1130	else
1131		memcpy(t->data, ct->data, tsize - sizeof(*ct));
 
1132
1133	tsize += off;
1134	t->u.user.target_size = tsize;
1135	strlcpy(name, target->name, sizeof(name));
1136	module_put(target->me);
1137	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1138
1139	*size += off;
1140	*dstptr += tsize;
1141}
1142EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1143
1144int xt_compat_target_to_user(const struct xt_entry_target *t,
1145			     void __user **dstptr, unsigned int *size)
1146{
1147	const struct xt_target *target = t->u.kernel.target;
1148	struct compat_xt_entry_target __user *ct = *dstptr;
1149	int off = xt_compat_target_offset(target);
1150	u_int16_t tsize = t->u.user.target_size - off;
1151
1152	if (XT_OBJ_TO_USER(ct, t, target, tsize))
1153		return -EFAULT;
1154
1155	if (target->compat_to_user) {
1156		if (target->compat_to_user((void __user *)ct->data, t->data))
1157			return -EFAULT;
1158	} else {
1159		if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1160			return -EFAULT;
1161	}
1162
1163	*size -= off;
1164	*dstptr += tsize;
1165	return 0;
1166}
1167EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1168#endif
1169
1170struct xt_table_info *xt_alloc_table_info(unsigned int size)
1171{
1172	struct xt_table_info *info = NULL;
1173	size_t sz = sizeof(*info) + size;
1174
1175	if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1176		return NULL;
1177
1178	info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1179	if (!info)
1180		return NULL;
1181
1182	memset(info, 0, sizeof(*info));
1183	info->size = size;
1184	return info;
1185}
1186EXPORT_SYMBOL(xt_alloc_table_info);
1187
1188void xt_free_table_info(struct xt_table_info *info)
1189{
1190	int cpu;
1191
1192	if (info->jumpstack != NULL) {
1193		for_each_possible_cpu(cpu)
1194			kvfree(info->jumpstack[cpu]);
1195		kvfree(info->jumpstack);
1196	}
1197
1198	kvfree(info);
1199}
1200EXPORT_SYMBOL(xt_free_table_info);
1201
1202struct xt_table *xt_find_table(struct net *net, u8 af, const char *name)
1203{
1204	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1205	struct xt_table *t;
1206
1207	mutex_lock(&xt[af].mutex);
1208	list_for_each_entry(t, &xt_net->tables[af], list) {
1209		if (strcmp(t->name, name) == 0) {
1210			mutex_unlock(&xt[af].mutex);
1211			return t;
1212		}
1213	}
1214	mutex_unlock(&xt[af].mutex);
1215	return NULL;
1216}
1217EXPORT_SYMBOL(xt_find_table);
1218
1219/* Find table by name, grabs mutex & ref.  Returns ERR_PTR on error. */
1220struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1221				    const char *name)
1222{
1223	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1224	struct xt_table *t, *found = NULL;
 
 
1225
1226	mutex_lock(&xt[af].mutex);
1227	list_for_each_entry(t, &xt_net->tables[af], list)
1228		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1229			return t;
1230
1231	if (net == &init_net)
1232		goto out;
1233
1234	/* Table doesn't exist in this netns, re-try init */
1235	xt_net = net_generic(&init_net, xt_pernet_id);
1236	list_for_each_entry(t, &xt_net->tables[af], list) {
1237		int err;
1238
1239		if (strcmp(t->name, name))
1240			continue;
1241		if (!try_module_get(t->me))
1242			goto out;
 
 
 
1243		mutex_unlock(&xt[af].mutex);
1244		err = t->table_init(net);
1245		if (err < 0) {
1246			module_put(t->me);
1247			return ERR_PTR(err);
1248		}
1249
1250		found = t;
1251
1252		mutex_lock(&xt[af].mutex);
1253		break;
1254	}
1255
1256	if (!found)
1257		goto out;
1258
1259	xt_net = net_generic(net, xt_pernet_id);
1260	/* and once again: */
1261	list_for_each_entry(t, &xt_net->tables[af], list)
1262		if (strcmp(t->name, name) == 0)
1263			return t;
1264
1265	module_put(found->me);
1266 out:
1267	mutex_unlock(&xt[af].mutex);
1268	return ERR_PTR(-ENOENT);
1269}
1270EXPORT_SYMBOL_GPL(xt_find_table_lock);
1271
1272struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1273					    const char *name)
1274{
1275	struct xt_table *t = xt_find_table_lock(net, af, name);
1276
1277#ifdef CONFIG_MODULES
1278	if (IS_ERR(t)) {
1279		int err = request_module("%stable_%s", xt_prefix[af], name);
1280		if (err < 0)
1281			return ERR_PTR(err);
1282		t = xt_find_table_lock(net, af, name);
1283	}
1284#endif
1285
1286	return t;
1287}
1288EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1289
1290void xt_table_unlock(struct xt_table *table)
1291{
1292	mutex_unlock(&xt[table->af].mutex);
1293}
1294EXPORT_SYMBOL_GPL(xt_table_unlock);
1295
1296#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1297void xt_compat_lock(u_int8_t af)
1298{
1299	mutex_lock(&xt[af].compat_mutex);
1300}
1301EXPORT_SYMBOL_GPL(xt_compat_lock);
1302
1303void xt_compat_unlock(u_int8_t af)
1304{
1305	mutex_unlock(&xt[af].compat_mutex);
1306}
1307EXPORT_SYMBOL_GPL(xt_compat_unlock);
1308#endif
1309
1310DEFINE_PER_CPU(seqcount_t, xt_recseq);
1311EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1312
1313struct static_key xt_tee_enabled __read_mostly;
1314EXPORT_SYMBOL_GPL(xt_tee_enabled);
1315
1316static int xt_jumpstack_alloc(struct xt_table_info *i)
1317{
1318	unsigned int size;
1319	int cpu;
1320
1321	size = sizeof(void **) * nr_cpu_ids;
1322	if (size > PAGE_SIZE)
1323		i->jumpstack = kvzalloc(size, GFP_KERNEL);
1324	else
1325		i->jumpstack = kzalloc(size, GFP_KERNEL);
1326	if (i->jumpstack == NULL)
1327		return -ENOMEM;
1328
1329	/* ruleset without jumps -- no stack needed */
1330	if (i->stacksize == 0)
1331		return 0;
1332
1333	/* Jumpstack needs to be able to record two full callchains, one
1334	 * from the first rule set traversal, plus one table reentrancy
1335	 * via -j TEE without clobbering the callchain that brought us to
1336	 * TEE target.
1337	 *
1338	 * This is done by allocating two jumpstacks per cpu, on reentry
1339	 * the upper half of the stack is used.
1340	 *
1341	 * see the jumpstack setup in ipt_do_table() for more details.
1342	 */
1343	size = sizeof(void *) * i->stacksize * 2u;
1344	for_each_possible_cpu(cpu) {
1345		i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1346			cpu_to_node(cpu));
1347		if (i->jumpstack[cpu] == NULL)
1348			/*
1349			 * Freeing will be done later on by the callers. The
1350			 * chain is: xt_replace_table -> __do_replace ->
1351			 * do_replace -> xt_free_table_info.
1352			 */
1353			return -ENOMEM;
1354	}
1355
1356	return 0;
1357}
1358
1359struct xt_counters *xt_counters_alloc(unsigned int counters)
1360{
1361	struct xt_counters *mem;
1362
1363	if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1364		return NULL;
1365
1366	counters *= sizeof(*mem);
1367	if (counters > XT_MAX_TABLE_SIZE)
1368		return NULL;
1369
1370	return vzalloc(counters);
1371}
1372EXPORT_SYMBOL(xt_counters_alloc);
1373
1374struct xt_table_info *
1375xt_replace_table(struct xt_table *table,
1376	      unsigned int num_counters,
1377	      struct xt_table_info *newinfo,
1378	      int *error)
1379{
1380	struct xt_table_info *private;
1381	unsigned int cpu;
1382	int ret;
1383
1384	ret = xt_jumpstack_alloc(newinfo);
1385	if (ret < 0) {
1386		*error = ret;
1387		return NULL;
1388	}
1389
1390	/* Do the substitution. */
1391	local_bh_disable();
1392	private = table->private;
1393
1394	/* Check inside lock: is the old number correct? */
1395	if (num_counters != private->number) {
1396		pr_debug("num_counters != table->private->number (%u/%u)\n",
1397			 num_counters, private->number);
1398		local_bh_enable();
1399		*error = -EAGAIN;
1400		return NULL;
1401	}
1402
1403	newinfo->initial_entries = private->initial_entries;
1404	/*
1405	 * Ensure contents of newinfo are visible before assigning to
1406	 * private.
1407	 */
1408	smp_wmb();
1409	table->private = newinfo;
1410
1411	/* make sure all cpus see new ->private value */
1412	smp_mb();
1413
1414	/*
1415	 * Even though table entries have now been swapped, other CPU's
1416	 * may still be using the old entries...
1417	 */
1418	local_bh_enable();
1419
1420	/* ... so wait for even xt_recseq on all cpus */
1421	for_each_possible_cpu(cpu) {
1422		seqcount_t *s = &per_cpu(xt_recseq, cpu);
1423		u32 seq = raw_read_seqcount(s);
1424
1425		if (seq & 1) {
1426			do {
1427				cond_resched();
1428				cpu_relax();
1429			} while (seq == raw_read_seqcount(s));
1430		}
1431	}
1432
1433	audit_log_nfcfg(table->name, table->af, private->number,
1434			!private->number ? AUDIT_XT_OP_REGISTER :
1435					   AUDIT_XT_OP_REPLACE,
1436			GFP_KERNEL);
1437	return private;
1438}
1439EXPORT_SYMBOL_GPL(xt_replace_table);
1440
1441struct xt_table *xt_register_table(struct net *net,
1442				   const struct xt_table *input_table,
1443				   struct xt_table_info *bootstrap,
1444				   struct xt_table_info *newinfo)
1445{
1446	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1447	struct xt_table_info *private;
1448	struct xt_table *t, *table;
1449	int ret;
1450
1451	/* Don't add one object to multiple lists. */
1452	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1453	if (!table) {
1454		ret = -ENOMEM;
1455		goto out;
1456	}
1457
1458	mutex_lock(&xt[table->af].mutex);
1459	/* Don't autoload: we'd eat our tail... */
1460	list_for_each_entry(t, &xt_net->tables[table->af], list) {
1461		if (strcmp(t->name, table->name) == 0) {
1462			ret = -EEXIST;
1463			goto unlock;
1464		}
1465	}
1466
1467	/* Simplifies replace_table code. */
1468	table->private = bootstrap;
1469
1470	if (!xt_replace_table(table, 0, newinfo, &ret))
1471		goto unlock;
1472
1473	private = table->private;
1474	pr_debug("table->private->number = %u\n", private->number);
1475
1476	/* save number of initial entries */
1477	private->initial_entries = private->number;
1478
1479	list_add(&table->list, &xt_net->tables[table->af]);
1480	mutex_unlock(&xt[table->af].mutex);
1481	return table;
1482
1483unlock:
1484	mutex_unlock(&xt[table->af].mutex);
1485	kfree(table);
1486out:
1487	return ERR_PTR(ret);
1488}
1489EXPORT_SYMBOL_GPL(xt_register_table);
1490
1491void *xt_unregister_table(struct xt_table *table)
1492{
1493	struct xt_table_info *private;
1494
1495	mutex_lock(&xt[table->af].mutex);
1496	private = table->private;
1497	list_del(&table->list);
1498	mutex_unlock(&xt[table->af].mutex);
1499	audit_log_nfcfg(table->name, table->af, private->number,
1500			AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1501	kfree(table->ops);
1502	kfree(table);
1503
1504	return private;
1505}
1506EXPORT_SYMBOL_GPL(xt_unregister_table);
1507
1508#ifdef CONFIG_PROC_FS
1509static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1510{
1511	u8 af = (unsigned long)PDE_DATA(file_inode(seq->file));
1512	struct net *net = seq_file_net(seq);
1513	struct xt_pernet *xt_net;
1514
1515	xt_net = net_generic(net, xt_pernet_id);
1516
1517	mutex_lock(&xt[af].mutex);
1518	return seq_list_start(&xt_net->tables[af], *pos);
1519}
1520
1521static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1522{
1523	u8 af = (unsigned long)PDE_DATA(file_inode(seq->file));
1524	struct net *net = seq_file_net(seq);
1525	struct xt_pernet *xt_net;
1526
1527	xt_net = net_generic(net, xt_pernet_id);
1528
1529	return seq_list_next(v, &xt_net->tables[af], pos);
1530}
1531
1532static void xt_table_seq_stop(struct seq_file *seq, void *v)
1533{
1534	u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1535
1536	mutex_unlock(&xt[af].mutex);
1537}
1538
1539static int xt_table_seq_show(struct seq_file *seq, void *v)
1540{
1541	struct xt_table *table = list_entry(v, struct xt_table, list);
1542
1543	if (*table->name)
1544		seq_printf(seq, "%s\n", table->name);
1545	return 0;
1546}
1547
1548static const struct seq_operations xt_table_seq_ops = {
1549	.start	= xt_table_seq_start,
1550	.next	= xt_table_seq_next,
1551	.stop	= xt_table_seq_stop,
1552	.show	= xt_table_seq_show,
1553};
1554
1555/*
1556 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1557 * the multi-AF mutexes.
1558 */
1559struct nf_mttg_trav {
1560	struct list_head *head, *curr;
1561	uint8_t class;
1562};
1563
1564enum {
1565	MTTG_TRAV_INIT,
1566	MTTG_TRAV_NFP_UNSPEC,
1567	MTTG_TRAV_NFP_SPEC,
1568	MTTG_TRAV_DONE,
1569};
1570
1571static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1572    bool is_target)
1573{
1574	static const uint8_t next_class[] = {
1575		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1576		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
1577	};
1578	uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1579	struct nf_mttg_trav *trav = seq->private;
1580
1581	if (ppos != NULL)
1582		++(*ppos);
1583
1584	switch (trav->class) {
1585	case MTTG_TRAV_INIT:
1586		trav->class = MTTG_TRAV_NFP_UNSPEC;
1587		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1588		trav->head = trav->curr = is_target ?
1589			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1590 		break;
1591	case MTTG_TRAV_NFP_UNSPEC:
1592		trav->curr = trav->curr->next;
1593		if (trav->curr != trav->head)
1594			break;
1595		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1596		mutex_lock(&xt[nfproto].mutex);
1597		trav->head = trav->curr = is_target ?
1598			&xt[nfproto].target : &xt[nfproto].match;
1599		trav->class = next_class[trav->class];
1600		break;
1601	case MTTG_TRAV_NFP_SPEC:
1602		trav->curr = trav->curr->next;
1603		if (trav->curr != trav->head)
1604			break;
1605		fallthrough;
1606	default:
1607		return NULL;
1608	}
1609	return trav;
1610}
1611
1612static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1613    bool is_target)
1614{
1615	struct nf_mttg_trav *trav = seq->private;
1616	unsigned int j;
1617
1618	trav->class = MTTG_TRAV_INIT;
1619	for (j = 0; j < *pos; ++j)
1620		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1621			return NULL;
1622	return trav;
1623}
1624
1625static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1626{
1627	uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1628	struct nf_mttg_trav *trav = seq->private;
1629
1630	switch (trav->class) {
1631	case MTTG_TRAV_NFP_UNSPEC:
1632		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1633		break;
1634	case MTTG_TRAV_NFP_SPEC:
1635		mutex_unlock(&xt[nfproto].mutex);
1636		break;
1637	}
1638}
1639
1640static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1641{
1642	return xt_mttg_seq_start(seq, pos, false);
1643}
1644
1645static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1646{
1647	return xt_mttg_seq_next(seq, v, ppos, false);
1648}
1649
1650static int xt_match_seq_show(struct seq_file *seq, void *v)
1651{
1652	const struct nf_mttg_trav *trav = seq->private;
1653	const struct xt_match *match;
1654
1655	switch (trav->class) {
1656	case MTTG_TRAV_NFP_UNSPEC:
1657	case MTTG_TRAV_NFP_SPEC:
1658		if (trav->curr == trav->head)
1659			return 0;
1660		match = list_entry(trav->curr, struct xt_match, list);
1661		if (*match->name)
1662			seq_printf(seq, "%s\n", match->name);
1663	}
1664	return 0;
1665}
1666
1667static const struct seq_operations xt_match_seq_ops = {
1668	.start	= xt_match_seq_start,
1669	.next	= xt_match_seq_next,
1670	.stop	= xt_mttg_seq_stop,
1671	.show	= xt_match_seq_show,
1672};
1673
1674static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1675{
1676	return xt_mttg_seq_start(seq, pos, true);
1677}
1678
1679static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1680{
1681	return xt_mttg_seq_next(seq, v, ppos, true);
1682}
1683
1684static int xt_target_seq_show(struct seq_file *seq, void *v)
1685{
1686	const struct nf_mttg_trav *trav = seq->private;
1687	const struct xt_target *target;
1688
1689	switch (trav->class) {
1690	case MTTG_TRAV_NFP_UNSPEC:
1691	case MTTG_TRAV_NFP_SPEC:
1692		if (trav->curr == trav->head)
1693			return 0;
1694		target = list_entry(trav->curr, struct xt_target, list);
1695		if (*target->name)
1696			seq_printf(seq, "%s\n", target->name);
1697	}
1698	return 0;
1699}
1700
1701static const struct seq_operations xt_target_seq_ops = {
1702	.start	= xt_target_seq_start,
1703	.next	= xt_target_seq_next,
1704	.stop	= xt_mttg_seq_stop,
1705	.show	= xt_target_seq_show,
1706};
1707
1708#define FORMAT_TABLES	"_tables_names"
1709#define	FORMAT_MATCHES	"_tables_matches"
1710#define FORMAT_TARGETS 	"_tables_targets"
1711
1712#endif /* CONFIG_PROC_FS */
1713
1714/**
1715 * xt_hook_ops_alloc - set up hooks for a new table
1716 * @table:	table with metadata needed to set up hooks
1717 * @fn:		Hook function
1718 *
1719 * This function will create the nf_hook_ops that the x_table needs
1720 * to hand to xt_hook_link_net().
1721 */
1722struct nf_hook_ops *
1723xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1724{
1725	unsigned int hook_mask = table->valid_hooks;
1726	uint8_t i, num_hooks = hweight32(hook_mask);
1727	uint8_t hooknum;
1728	struct nf_hook_ops *ops;
1729
1730	if (!num_hooks)
1731		return ERR_PTR(-EINVAL);
1732
1733	ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1734	if (ops == NULL)
1735		return ERR_PTR(-ENOMEM);
1736
1737	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1738	     hook_mask >>= 1, ++hooknum) {
1739		if (!(hook_mask & 1))
1740			continue;
1741		ops[i].hook     = fn;
1742		ops[i].pf       = table->af;
1743		ops[i].hooknum  = hooknum;
1744		ops[i].priority = table->priority;
1745		++i;
1746	}
1747
1748	return ops;
1749}
1750EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1752int xt_proto_init(struct net *net, u_int8_t af)
1753{
1754#ifdef CONFIG_PROC_FS
1755	char buf[XT_FUNCTION_MAXNAMELEN];
1756	struct proc_dir_entry *proc;
1757	kuid_t root_uid;
1758	kgid_t root_gid;
1759#endif
1760
1761	if (af >= ARRAY_SIZE(xt_prefix))
1762		return -EINVAL;
1763
1764
1765#ifdef CONFIG_PROC_FS
1766	root_uid = make_kuid(net->user_ns, 0);
1767	root_gid = make_kgid(net->user_ns, 0);
1768
1769	strlcpy(buf, xt_prefix[af], sizeof(buf));
1770	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1771	proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1772			sizeof(struct seq_net_private),
1773			(void *)(unsigned long)af);
1774	if (!proc)
1775		goto out;
1776	if (uid_valid(root_uid) && gid_valid(root_gid))
1777		proc_set_user(proc, root_uid, root_gid);
1778
1779	strlcpy(buf, xt_prefix[af], sizeof(buf));
1780	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1781	proc = proc_create_seq_private(buf, 0440, net->proc_net,
1782			&xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1783			(void *)(unsigned long)af);
1784	if (!proc)
1785		goto out_remove_tables;
1786	if (uid_valid(root_uid) && gid_valid(root_gid))
1787		proc_set_user(proc, root_uid, root_gid);
1788
1789	strlcpy(buf, xt_prefix[af], sizeof(buf));
1790	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1791	proc = proc_create_seq_private(buf, 0440, net->proc_net,
1792			 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1793			 (void *)(unsigned long)af);
1794	if (!proc)
1795		goto out_remove_matches;
1796	if (uid_valid(root_uid) && gid_valid(root_gid))
1797		proc_set_user(proc, root_uid, root_gid);
1798#endif
1799
1800	return 0;
1801
1802#ifdef CONFIG_PROC_FS
1803out_remove_matches:
1804	strlcpy(buf, xt_prefix[af], sizeof(buf));
1805	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1806	remove_proc_entry(buf, net->proc_net);
1807
1808out_remove_tables:
1809	strlcpy(buf, xt_prefix[af], sizeof(buf));
1810	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1811	remove_proc_entry(buf, net->proc_net);
1812out:
1813	return -1;
1814#endif
1815}
1816EXPORT_SYMBOL_GPL(xt_proto_init);
1817
1818void xt_proto_fini(struct net *net, u_int8_t af)
1819{
1820#ifdef CONFIG_PROC_FS
1821	char buf[XT_FUNCTION_MAXNAMELEN];
1822
1823	strlcpy(buf, xt_prefix[af], sizeof(buf));
1824	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1825	remove_proc_entry(buf, net->proc_net);
1826
1827	strlcpy(buf, xt_prefix[af], sizeof(buf));
1828	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1829	remove_proc_entry(buf, net->proc_net);
1830
1831	strlcpy(buf, xt_prefix[af], sizeof(buf));
1832	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1833	remove_proc_entry(buf, net->proc_net);
1834#endif /*CONFIG_PROC_FS*/
1835}
1836EXPORT_SYMBOL_GPL(xt_proto_fini);
1837
1838/**
1839 * xt_percpu_counter_alloc - allocate x_tables rule counter
1840 *
1841 * @state: pointer to xt_percpu allocation state
1842 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1843 *
1844 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1845 * contain the address of the real (percpu) counter.
1846 *
1847 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1848 * to fetch the real percpu counter.
1849 *
1850 * To speed up allocation and improve data locality, a 4kb block is
1851 * allocated.  Freeing any counter may free an entire block, so all
1852 * counters allocated using the same state must be freed at the same
1853 * time.
1854 *
1855 * xt_percpu_counter_alloc_state contains the base address of the
1856 * allocated page and the current sub-offset.
1857 *
1858 * returns false on error.
1859 */
1860bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1861			     struct xt_counters *counter)
1862{
1863	BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1864
1865	if (nr_cpu_ids <= 1)
1866		return true;
1867
1868	if (!state->mem) {
1869		state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1870					    XT_PCPU_BLOCK_SIZE);
1871		if (!state->mem)
1872			return false;
1873	}
1874	counter->pcnt = (__force unsigned long)(state->mem + state->off);
1875	state->off += sizeof(*counter);
1876	if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1877		state->mem = NULL;
1878		state->off = 0;
1879	}
1880	return true;
1881}
1882EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1883
1884void xt_percpu_counter_free(struct xt_counters *counters)
1885{
1886	unsigned long pcnt = counters->pcnt;
1887
1888	if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1889		free_percpu((void __percpu *)pcnt);
1890}
1891EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1892
1893static int __net_init xt_net_init(struct net *net)
1894{
1895	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1896	int i;
1897
1898	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1899		INIT_LIST_HEAD(&xt_net->tables[i]);
1900	return 0;
1901}
1902
1903static void __net_exit xt_net_exit(struct net *net)
1904{
1905	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1906	int i;
1907
1908	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1909		WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
1910}
1911
1912static struct pernet_operations xt_net_ops = {
1913	.init = xt_net_init,
1914	.exit = xt_net_exit,
1915	.id   = &xt_pernet_id,
1916	.size = sizeof(struct xt_pernet),
1917};
1918
1919static int __init xt_init(void)
1920{
1921	unsigned int i;
1922	int rv;
1923
1924	for_each_possible_cpu(i) {
1925		seqcount_init(&per_cpu(xt_recseq, i));
1926	}
1927
1928	xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1929	if (!xt)
1930		return -ENOMEM;
1931
1932	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1933		mutex_init(&xt[i].mutex);
1934#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1935		mutex_init(&xt[i].compat_mutex);
1936		xt[i].compat_tab = NULL;
1937#endif
1938		INIT_LIST_HEAD(&xt[i].target);
1939		INIT_LIST_HEAD(&xt[i].match);
 
1940	}
1941	rv = register_pernet_subsys(&xt_net_ops);
1942	if (rv < 0)
1943		kfree(xt);
1944	return rv;
1945}
1946
1947static void __exit xt_fini(void)
1948{
1949	unregister_pernet_subsys(&xt_net_ops);
1950	kfree(xt);
1951}
1952
1953module_init(xt_init);
1954module_exit(xt_fini);
1955
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * x_tables core - Backend for {ip,ip6,arp}_tables
   4 *
   5 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
   6 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
   7 *
   8 * Based on existing ip_tables code which is
   9 *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  10 *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
  11 */
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/socket.h>
  16#include <linux/net.h>
  17#include <linux/proc_fs.h>
  18#include <linux/seq_file.h>
  19#include <linux/string.h>
  20#include <linux/vmalloc.h>
  21#include <linux/mutex.h>
  22#include <linux/mm.h>
  23#include <linux/slab.h>
  24#include <linux/audit.h>
  25#include <linux/user_namespace.h>
  26#include <net/net_namespace.h>
  27#include <net/netns/generic.h>
  28
  29#include <linux/netfilter/x_tables.h>
  30#include <linux/netfilter_arp.h>
  31#include <linux/netfilter_ipv4/ip_tables.h>
  32#include <linux/netfilter_ipv6/ip6_tables.h>
  33#include <linux/netfilter_arp/arp_tables.h>
  34
  35MODULE_LICENSE("GPL");
  36MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  37MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
  38
  39#define XT_PCPU_BLOCK_SIZE 4096
  40#define XT_MAX_TABLE_SIZE	(512 * 1024 * 1024)
  41
  42struct xt_template {
  43	struct list_head list;
  44
  45	/* called when table is needed in the given netns */
  46	int (*table_init)(struct net *net);
  47
  48	struct module *me;
  49
  50	/* A unique name... */
  51	char name[XT_TABLE_MAXNAMELEN];
  52};
  53
  54static struct list_head xt_templates[NFPROTO_NUMPROTO];
  55
  56struct xt_pernet {
  57	struct list_head tables[NFPROTO_NUMPROTO];
  58};
  59
  60struct compat_delta {
  61	unsigned int offset; /* offset in kernel */
  62	int delta; /* delta in 32bit user land */
  63};
  64
  65struct xt_af {
  66	struct mutex mutex;
  67	struct list_head match;
  68	struct list_head target;
  69#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
  70	struct mutex compat_mutex;
  71	struct compat_delta *compat_tab;
  72	unsigned int number; /* number of slots in compat_tab[] */
  73	unsigned int cur; /* number of used slots in compat_tab[] */
  74#endif
  75};
  76
  77static unsigned int xt_pernet_id __read_mostly;
  78static struct xt_af *xt __read_mostly;
  79
  80static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
  81	[NFPROTO_UNSPEC] = "x",
  82	[NFPROTO_IPV4]   = "ip",
  83	[NFPROTO_ARP]    = "arp",
  84	[NFPROTO_BRIDGE] = "eb",
  85	[NFPROTO_IPV6]   = "ip6",
  86};
  87
  88/* Registration hooks for targets. */
  89int xt_register_target(struct xt_target *target)
  90{
  91	u_int8_t af = target->family;
  92
  93	mutex_lock(&xt[af].mutex);
  94	list_add(&target->list, &xt[af].target);
  95	mutex_unlock(&xt[af].mutex);
  96	return 0;
  97}
  98EXPORT_SYMBOL(xt_register_target);
  99
 100void
 101xt_unregister_target(struct xt_target *target)
 102{
 103	u_int8_t af = target->family;
 104
 105	mutex_lock(&xt[af].mutex);
 106	list_del(&target->list);
 107	mutex_unlock(&xt[af].mutex);
 108}
 109EXPORT_SYMBOL(xt_unregister_target);
 110
 111int
 112xt_register_targets(struct xt_target *target, unsigned int n)
 113{
 114	unsigned int i;
 115	int err = 0;
 116
 117	for (i = 0; i < n; i++) {
 118		err = xt_register_target(&target[i]);
 119		if (err)
 120			goto err;
 121	}
 122	return err;
 123
 124err:
 125	if (i > 0)
 126		xt_unregister_targets(target, i);
 127	return err;
 128}
 129EXPORT_SYMBOL(xt_register_targets);
 130
 131void
 132xt_unregister_targets(struct xt_target *target, unsigned int n)
 133{
 134	while (n-- > 0)
 135		xt_unregister_target(&target[n]);
 136}
 137EXPORT_SYMBOL(xt_unregister_targets);
 138
 139int xt_register_match(struct xt_match *match)
 140{
 141	u_int8_t af = match->family;
 142
 143	mutex_lock(&xt[af].mutex);
 144	list_add(&match->list, &xt[af].match);
 145	mutex_unlock(&xt[af].mutex);
 146	return 0;
 147}
 148EXPORT_SYMBOL(xt_register_match);
 149
 150void
 151xt_unregister_match(struct xt_match *match)
 152{
 153	u_int8_t af = match->family;
 154
 155	mutex_lock(&xt[af].mutex);
 156	list_del(&match->list);
 157	mutex_unlock(&xt[af].mutex);
 158}
 159EXPORT_SYMBOL(xt_unregister_match);
 160
 161int
 162xt_register_matches(struct xt_match *match, unsigned int n)
 163{
 164	unsigned int i;
 165	int err = 0;
 166
 167	for (i = 0; i < n; i++) {
 168		err = xt_register_match(&match[i]);
 169		if (err)
 170			goto err;
 171	}
 172	return err;
 173
 174err:
 175	if (i > 0)
 176		xt_unregister_matches(match, i);
 177	return err;
 178}
 179EXPORT_SYMBOL(xt_register_matches);
 180
 181void
 182xt_unregister_matches(struct xt_match *match, unsigned int n)
 183{
 184	while (n-- > 0)
 185		xt_unregister_match(&match[n]);
 186}
 187EXPORT_SYMBOL(xt_unregister_matches);
 188
 189
 190/*
 191 * These are weird, but module loading must not be done with mutex
 192 * held (since they will register), and we have to have a single
 193 * function to use.
 194 */
 195
 196/* Find match, grabs ref.  Returns ERR_PTR() on error. */
 197struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
 198{
 199	struct xt_match *m;
 200	int err = -ENOENT;
 201
 202	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 203		return ERR_PTR(-EINVAL);
 204
 205	mutex_lock(&xt[af].mutex);
 206	list_for_each_entry(m, &xt[af].match, list) {
 207		if (strcmp(m->name, name) == 0) {
 208			if (m->revision == revision) {
 209				if (try_module_get(m->me)) {
 210					mutex_unlock(&xt[af].mutex);
 211					return m;
 212				}
 213			} else
 214				err = -EPROTOTYPE; /* Found something. */
 215		}
 216	}
 217	mutex_unlock(&xt[af].mutex);
 218
 219	if (af != NFPROTO_UNSPEC)
 220		/* Try searching again in the family-independent list */
 221		return xt_find_match(NFPROTO_UNSPEC, name, revision);
 222
 223	return ERR_PTR(err);
 224}
 225EXPORT_SYMBOL(xt_find_match);
 226
 227struct xt_match *
 228xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
 229{
 230	struct xt_match *match;
 231
 232	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 233		return ERR_PTR(-EINVAL);
 234
 235	match = xt_find_match(nfproto, name, revision);
 236	if (IS_ERR(match)) {
 237		request_module("%st_%s", xt_prefix[nfproto], name);
 238		match = xt_find_match(nfproto, name, revision);
 239	}
 240
 241	return match;
 242}
 243EXPORT_SYMBOL_GPL(xt_request_find_match);
 244
 245/* Find target, grabs ref.  Returns ERR_PTR() on error. */
 246static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
 247{
 248	struct xt_target *t;
 249	int err = -ENOENT;
 250
 251	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 252		return ERR_PTR(-EINVAL);
 253
 254	mutex_lock(&xt[af].mutex);
 255	list_for_each_entry(t, &xt[af].target, list) {
 256		if (strcmp(t->name, name) == 0) {
 257			if (t->revision == revision) {
 258				if (try_module_get(t->me)) {
 259					mutex_unlock(&xt[af].mutex);
 260					return t;
 261				}
 262			} else
 263				err = -EPROTOTYPE; /* Found something. */
 264		}
 265	}
 266	mutex_unlock(&xt[af].mutex);
 267
 268	if (af != NFPROTO_UNSPEC)
 269		/* Try searching again in the family-independent list */
 270		return xt_find_target(NFPROTO_UNSPEC, name, revision);
 271
 272	return ERR_PTR(err);
 273}
 274
 275struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
 276{
 277	struct xt_target *target;
 278
 279	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 280		return ERR_PTR(-EINVAL);
 281
 282	target = xt_find_target(af, name, revision);
 283	if (IS_ERR(target)) {
 284		request_module("%st_%s", xt_prefix[af], name);
 285		target = xt_find_target(af, name, revision);
 286	}
 287
 288	return target;
 289}
 290EXPORT_SYMBOL_GPL(xt_request_find_target);
 291
 292
 293static int xt_obj_to_user(u16 __user *psize, u16 size,
 294			  void __user *pname, const char *name,
 295			  u8 __user *prev, u8 rev)
 296{
 297	if (put_user(size, psize))
 298		return -EFAULT;
 299	if (copy_to_user(pname, name, strlen(name) + 1))
 300		return -EFAULT;
 301	if (put_user(rev, prev))
 302		return -EFAULT;
 303
 304	return 0;
 305}
 306
 307#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE)				\
 308	xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size,	\
 309		       U->u.user.name, K->u.kernel.TYPE->name,		\
 310		       &U->u.user.revision, K->u.kernel.TYPE->revision)
 311
 312int xt_data_to_user(void __user *dst, const void *src,
 313		    int usersize, int size, int aligned_size)
 314{
 315	usersize = usersize ? : size;
 316	if (copy_to_user(dst, src, usersize))
 317		return -EFAULT;
 318	if (usersize != aligned_size &&
 319	    clear_user(dst + usersize, aligned_size - usersize))
 320		return -EFAULT;
 321
 322	return 0;
 323}
 324EXPORT_SYMBOL_GPL(xt_data_to_user);
 325
 326#define XT_DATA_TO_USER(U, K, TYPE)					\
 327	xt_data_to_user(U->data, K->data,				\
 328			K->u.kernel.TYPE->usersize,			\
 329			K->u.kernel.TYPE->TYPE##size,			\
 330			XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
 331
 332int xt_match_to_user(const struct xt_entry_match *m,
 333		     struct xt_entry_match __user *u)
 334{
 335	return XT_OBJ_TO_USER(u, m, match, 0) ||
 336	       XT_DATA_TO_USER(u, m, match);
 337}
 338EXPORT_SYMBOL_GPL(xt_match_to_user);
 339
 340int xt_target_to_user(const struct xt_entry_target *t,
 341		      struct xt_entry_target __user *u)
 342{
 343	return XT_OBJ_TO_USER(u, t, target, 0) ||
 344	       XT_DATA_TO_USER(u, t, target);
 345}
 346EXPORT_SYMBOL_GPL(xt_target_to_user);
 347
 348static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
 349{
 350	const struct xt_match *m;
 351	int have_rev = 0;
 352
 353	mutex_lock(&xt[af].mutex);
 354	list_for_each_entry(m, &xt[af].match, list) {
 355		if (strcmp(m->name, name) == 0) {
 356			if (m->revision > *bestp)
 357				*bestp = m->revision;
 358			if (m->revision == revision)
 359				have_rev = 1;
 360		}
 361	}
 362	mutex_unlock(&xt[af].mutex);
 363
 364	if (af != NFPROTO_UNSPEC && !have_rev)
 365		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
 366
 367	return have_rev;
 368}
 369
 370static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
 371{
 372	const struct xt_target *t;
 373	int have_rev = 0;
 374
 375	mutex_lock(&xt[af].mutex);
 376	list_for_each_entry(t, &xt[af].target, list) {
 377		if (strcmp(t->name, name) == 0) {
 378			if (t->revision > *bestp)
 379				*bestp = t->revision;
 380			if (t->revision == revision)
 381				have_rev = 1;
 382		}
 383	}
 384	mutex_unlock(&xt[af].mutex);
 385
 386	if (af != NFPROTO_UNSPEC && !have_rev)
 387		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
 388
 389	return have_rev;
 390}
 391
 392/* Returns true or false (if no such extension at all) */
 393int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 394		     int *err)
 395{
 396	int have_rev, best = -1;
 397
 398	if (target == 1)
 399		have_rev = target_revfn(af, name, revision, &best);
 400	else
 401		have_rev = match_revfn(af, name, revision, &best);
 402
 403	/* Nothing at all?  Return 0 to try loading module. */
 404	if (best == -1) {
 405		*err = -ENOENT;
 406		return 0;
 407	}
 408
 409	*err = best;
 410	if (!have_rev)
 411		*err = -EPROTONOSUPPORT;
 412	return 1;
 413}
 414EXPORT_SYMBOL_GPL(xt_find_revision);
 415
 416static char *
 417textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
 418{
 419	static const char *const inetbr_names[] = {
 420		"PREROUTING", "INPUT", "FORWARD",
 421		"OUTPUT", "POSTROUTING", "BROUTING",
 422	};
 423	static const char *const arp_names[] = {
 424		"INPUT", "FORWARD", "OUTPUT",
 425	};
 426	const char *const *names;
 427	unsigned int i, max;
 428	char *p = buf;
 429	bool np = false;
 430	int res;
 431
 432	names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
 433	max   = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
 434	                                   ARRAY_SIZE(inetbr_names);
 435	*p = '\0';
 436	for (i = 0; i < max; ++i) {
 437		if (!(mask & (1 << i)))
 438			continue;
 439		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
 440		if (res > 0) {
 441			size -= res;
 442			p += res;
 443		}
 444		np = true;
 445	}
 446
 447	return buf;
 448}
 449
 450/**
 451 * xt_check_proc_name - check that name is suitable for /proc file creation
 452 *
 453 * @name: file name candidate
 454 * @size: length of buffer
 455 *
 456 * some x_tables modules wish to create a file in /proc.
 457 * This function makes sure that the name is suitable for this
 458 * purpose, it checks that name is NUL terminated and isn't a 'special'
 459 * name, like "..".
 460 *
 461 * returns negative number on error or 0 if name is useable.
 462 */
 463int xt_check_proc_name(const char *name, unsigned int size)
 464{
 465	if (name[0] == '\0')
 466		return -EINVAL;
 467
 468	if (strnlen(name, size) == size)
 469		return -ENAMETOOLONG;
 470
 471	if (strcmp(name, ".") == 0 ||
 472	    strcmp(name, "..") == 0 ||
 473	    strchr(name, '/'))
 474		return -EINVAL;
 475
 476	return 0;
 477}
 478EXPORT_SYMBOL(xt_check_proc_name);
 479
 480int xt_check_match(struct xt_mtchk_param *par,
 481		   unsigned int size, u16 proto, bool inv_proto)
 482{
 483	int ret;
 484
 485	if (XT_ALIGN(par->match->matchsize) != size &&
 486	    par->match->matchsize != -1) {
 487		/*
 488		 * ebt_among is exempt from centralized matchsize checking
 489		 * because it uses a dynamic-size data set.
 490		 */
 491		pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
 492				   xt_prefix[par->family], par->match->name,
 493				   par->match->revision,
 494				   XT_ALIGN(par->match->matchsize), size);
 495		return -EINVAL;
 496	}
 497	if (par->match->table != NULL &&
 498	    strcmp(par->match->table, par->table) != 0) {
 499		pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
 500				    xt_prefix[par->family], par->match->name,
 501				    par->match->table, par->table);
 502		return -EINVAL;
 503	}
 504	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
 505		char used[64], allow[64];
 506
 507		pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
 508				    xt_prefix[par->family], par->match->name,
 509				    textify_hooks(used, sizeof(used),
 510						  par->hook_mask, par->family),
 511				    textify_hooks(allow, sizeof(allow),
 512						  par->match->hooks,
 513						  par->family));
 514		return -EINVAL;
 515	}
 516	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
 517		pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
 518				    xt_prefix[par->family], par->match->name,
 519				    par->match->proto);
 520		return -EINVAL;
 521	}
 522	if (par->match->checkentry != NULL) {
 523		ret = par->match->checkentry(par);
 524		if (ret < 0)
 525			return ret;
 526		else if (ret > 0)
 527			/* Flag up potential errors. */
 528			return -EIO;
 529	}
 530	return 0;
 531}
 532EXPORT_SYMBOL_GPL(xt_check_match);
 533
 534/** xt_check_entry_match - check that matches end before start of target
 535 *
 536 * @match: beginning of xt_entry_match
 537 * @target: beginning of this rules target (alleged end of matches)
 538 * @alignment: alignment requirement of match structures
 539 *
 540 * Validates that all matches add up to the beginning of the target,
 541 * and that each match covers at least the base structure size.
 542 *
 543 * Return: 0 on success, negative errno on failure.
 544 */
 545static int xt_check_entry_match(const char *match, const char *target,
 546				const size_t alignment)
 547{
 548	const struct xt_entry_match *pos;
 549	int length = target - match;
 550
 551	if (length == 0) /* no matches */
 552		return 0;
 553
 554	pos = (struct xt_entry_match *)match;
 555	do {
 556		if ((unsigned long)pos % alignment)
 557			return -EINVAL;
 558
 559		if (length < (int)sizeof(struct xt_entry_match))
 560			return -EINVAL;
 561
 562		if (pos->u.match_size < sizeof(struct xt_entry_match))
 563			return -EINVAL;
 564
 565		if (pos->u.match_size > length)
 566			return -EINVAL;
 567
 568		length -= pos->u.match_size;
 569		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
 570	} while (length > 0);
 571
 572	return 0;
 573}
 574
 575/** xt_check_table_hooks - check hook entry points are sane
 576 *
 577 * @info xt_table_info to check
 578 * @valid_hooks - hook entry points that we can enter from
 579 *
 580 * Validates that the hook entry and underflows points are set up.
 581 *
 582 * Return: 0 on success, negative errno on failure.
 583 */
 584int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
 585{
 586	const char *err = "unsorted underflow";
 587	unsigned int i, max_uflow, max_entry;
 588	bool check_hooks = false;
 589
 590	BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
 591
 592	max_entry = 0;
 593	max_uflow = 0;
 594
 595	for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
 596		if (!(valid_hooks & (1 << i)))
 597			continue;
 598
 599		if (info->hook_entry[i] == 0xFFFFFFFF)
 600			return -EINVAL;
 601		if (info->underflow[i] == 0xFFFFFFFF)
 602			return -EINVAL;
 603
 604		if (check_hooks) {
 605			if (max_uflow > info->underflow[i])
 606				goto error;
 607
 608			if (max_uflow == info->underflow[i]) {
 609				err = "duplicate underflow";
 610				goto error;
 611			}
 612			if (max_entry > info->hook_entry[i]) {
 613				err = "unsorted entry";
 614				goto error;
 615			}
 616			if (max_entry == info->hook_entry[i]) {
 617				err = "duplicate entry";
 618				goto error;
 619			}
 620		}
 621		max_entry = info->hook_entry[i];
 622		max_uflow = info->underflow[i];
 623		check_hooks = true;
 624	}
 625
 626	return 0;
 627error:
 628	pr_err_ratelimited("%s at hook %d\n", err, i);
 629	return -EINVAL;
 630}
 631EXPORT_SYMBOL(xt_check_table_hooks);
 632
 633static bool verdict_ok(int verdict)
 634{
 635	if (verdict > 0)
 636		return true;
 637
 638	if (verdict < 0) {
 639		int v = -verdict - 1;
 640
 641		if (verdict == XT_RETURN)
 642			return true;
 643
 644		switch (v) {
 645		case NF_ACCEPT: return true;
 646		case NF_DROP: return true;
 647		case NF_QUEUE: return true;
 648		default:
 649			break;
 650		}
 651
 652		return false;
 653	}
 654
 655	return false;
 656}
 657
 658static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
 659			const char *msg, unsigned int msglen)
 660{
 661	return usersize == kernsize && strnlen(msg, msglen) < msglen;
 662}
 663
 664#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
 665int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 666{
 667	struct xt_af *xp = &xt[af];
 668
 669	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 670
 671	if (WARN_ON(!xp->compat_tab))
 672		return -ENOMEM;
 673
 674	if (xp->cur >= xp->number)
 675		return -EINVAL;
 676
 677	if (xp->cur)
 678		delta += xp->compat_tab[xp->cur - 1].delta;
 679	xp->compat_tab[xp->cur].offset = offset;
 680	xp->compat_tab[xp->cur].delta = delta;
 681	xp->cur++;
 682	return 0;
 683}
 684EXPORT_SYMBOL_GPL(xt_compat_add_offset);
 685
 686void xt_compat_flush_offsets(u_int8_t af)
 687{
 688	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 689
 690	if (xt[af].compat_tab) {
 691		vfree(xt[af].compat_tab);
 692		xt[af].compat_tab = NULL;
 693		xt[af].number = 0;
 694		xt[af].cur = 0;
 695	}
 696}
 697EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
 698
 699int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
 700{
 701	struct compat_delta *tmp = xt[af].compat_tab;
 702	int mid, left = 0, right = xt[af].cur - 1;
 703
 704	while (left <= right) {
 705		mid = (left + right) >> 1;
 706		if (offset > tmp[mid].offset)
 707			left = mid + 1;
 708		else if (offset < tmp[mid].offset)
 709			right = mid - 1;
 710		else
 711			return mid ? tmp[mid - 1].delta : 0;
 712	}
 713	return left ? tmp[left - 1].delta : 0;
 714}
 715EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 716
 717int xt_compat_init_offsets(u8 af, unsigned int number)
 718{
 719	size_t mem;
 720
 721	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 722
 723	if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
 724		return -EINVAL;
 725
 726	if (WARN_ON(xt[af].compat_tab))
 727		return -EINVAL;
 728
 729	mem = sizeof(struct compat_delta) * number;
 730	if (mem > XT_MAX_TABLE_SIZE)
 731		return -ENOMEM;
 732
 733	xt[af].compat_tab = vmalloc(mem);
 734	if (!xt[af].compat_tab)
 735		return -ENOMEM;
 736
 737	xt[af].number = number;
 738	xt[af].cur = 0;
 739
 740	return 0;
 741}
 742EXPORT_SYMBOL(xt_compat_init_offsets);
 743
 744int xt_compat_match_offset(const struct xt_match *match)
 745{
 746	u_int16_t csize = match->compatsize ? : match->matchsize;
 747	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
 748}
 749EXPORT_SYMBOL_GPL(xt_compat_match_offset);
 750
 751void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 752			       unsigned int *size)
 753{
 754	const struct xt_match *match = m->u.kernel.match;
 755	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
 756	int off = xt_compat_match_offset(match);
 757	u_int16_t msize = cm->u.user.match_size;
 758	char name[sizeof(m->u.user.name)];
 759
 760	m = *dstptr;
 761	memcpy(m, cm, sizeof(*cm));
 762	if (match->compat_from_user)
 763		match->compat_from_user(m->data, cm->data);
 764	else
 765		memcpy(m->data, cm->data, msize - sizeof(*cm));
 766
 767	msize += off;
 768	m->u.user.match_size = msize;
 769	strscpy(name, match->name, sizeof(name));
 770	module_put(match->me);
 771	strscpy_pad(m->u.user.name, name, sizeof(m->u.user.name));
 772
 773	*size += off;
 774	*dstptr += msize;
 775}
 776EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
 777
 778#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE)			\
 779	xt_data_to_user(U->data, K->data,				\
 780			K->u.kernel.TYPE->usersize,			\
 781			C_SIZE,						\
 782			COMPAT_XT_ALIGN(C_SIZE))
 783
 784int xt_compat_match_to_user(const struct xt_entry_match *m,
 785			    void __user **dstptr, unsigned int *size)
 786{
 787	const struct xt_match *match = m->u.kernel.match;
 788	struct compat_xt_entry_match __user *cm = *dstptr;
 789	int off = xt_compat_match_offset(match);
 790	u_int16_t msize = m->u.user.match_size - off;
 791
 792	if (XT_OBJ_TO_USER(cm, m, match, msize))
 793		return -EFAULT;
 794
 795	if (match->compat_to_user) {
 796		if (match->compat_to_user((void __user *)cm->data, m->data))
 797			return -EFAULT;
 798	} else {
 799		if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
 800			return -EFAULT;
 801	}
 802
 803	*size -= off;
 804	*dstptr += msize;
 805	return 0;
 806}
 807EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
 808
 809/* non-compat version may have padding after verdict */
 810struct compat_xt_standard_target {
 811	struct compat_xt_entry_target t;
 812	compat_uint_t verdict;
 813};
 814
 815struct compat_xt_error_target {
 816	struct compat_xt_entry_target t;
 817	char errorname[XT_FUNCTION_MAXNAMELEN];
 818};
 819
 820int xt_compat_check_entry_offsets(const void *base, const char *elems,
 821				  unsigned int target_offset,
 822				  unsigned int next_offset)
 823{
 824	long size_of_base_struct = elems - (const char *)base;
 825	const struct compat_xt_entry_target *t;
 826	const char *e = base;
 827
 828	if (target_offset < size_of_base_struct)
 829		return -EINVAL;
 830
 831	if (target_offset + sizeof(*t) > next_offset)
 832		return -EINVAL;
 833
 834	t = (void *)(e + target_offset);
 835	if (t->u.target_size < sizeof(*t))
 836		return -EINVAL;
 837
 838	if (target_offset + t->u.target_size > next_offset)
 839		return -EINVAL;
 840
 841	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
 842		const struct compat_xt_standard_target *st = (const void *)t;
 843
 844		if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
 845			return -EINVAL;
 846
 847		if (!verdict_ok(st->verdict))
 848			return -EINVAL;
 849	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
 850		const struct compat_xt_error_target *et = (const void *)t;
 851
 852		if (!error_tg_ok(t->u.target_size, sizeof(*et),
 853				 et->errorname, sizeof(et->errorname)))
 854			return -EINVAL;
 855	}
 856
 857	/* compat_xt_entry match has less strict alignment requirements,
 858	 * otherwise they are identical.  In case of padding differences
 859	 * we need to add compat version of xt_check_entry_match.
 860	 */
 861	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
 862
 863	return xt_check_entry_match(elems, base + target_offset,
 864				    __alignof__(struct compat_xt_entry_match));
 865}
 866EXPORT_SYMBOL(xt_compat_check_entry_offsets);
 867#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
 868
 869/**
 870 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
 871 *
 872 * @base: pointer to arp/ip/ip6t_entry
 873 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
 874 * @target_offset: the arp/ip/ip6_t->target_offset
 875 * @next_offset: the arp/ip/ip6_t->next_offset
 876 *
 877 * validates that target_offset and next_offset are sane and that all
 878 * match sizes (if any) align with the target offset.
 879 *
 880 * This function does not validate the targets or matches themselves, it
 881 * only tests that all the offsets and sizes are correct, that all
 882 * match structures are aligned, and that the last structure ends where
 883 * the target structure begins.
 884 *
 885 * Also see xt_compat_check_entry_offsets for CONFIG_NETFILTER_XTABLES_COMPAT version.
 886 *
 887 * The arp/ip/ip6t_entry structure @base must have passed following tests:
 888 * - it must point to a valid memory location
 889 * - base to base + next_offset must be accessible, i.e. not exceed allocated
 890 *   length.
 891 *
 892 * A well-formed entry looks like this:
 893 *
 894 * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
 895 * e->elems[]-----'                              |               |
 896 *                matchsize                      |               |
 897 *                                matchsize      |               |
 898 *                                               |               |
 899 * target_offset---------------------------------'               |
 900 * next_offset---------------------------------------------------'
 901 *
 902 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
 903 *          This is where matches (if any) and the target reside.
 904 * target_offset: beginning of target.
 905 * next_offset: start of the next rule; also: size of this rule.
 906 * Since targets have a minimum size, target_offset + minlen <= next_offset.
 907 *
 908 * Every match stores its size, sum of sizes must not exceed target_offset.
 909 *
 910 * Return: 0 on success, negative errno on failure.
 911 */
 912int xt_check_entry_offsets(const void *base,
 913			   const char *elems,
 914			   unsigned int target_offset,
 915			   unsigned int next_offset)
 916{
 917	long size_of_base_struct = elems - (const char *)base;
 918	const struct xt_entry_target *t;
 919	const char *e = base;
 920
 921	/* target start is within the ip/ip6/arpt_entry struct */
 922	if (target_offset < size_of_base_struct)
 923		return -EINVAL;
 924
 925	if (target_offset + sizeof(*t) > next_offset)
 926		return -EINVAL;
 927
 928	t = (void *)(e + target_offset);
 929	if (t->u.target_size < sizeof(*t))
 930		return -EINVAL;
 931
 932	if (target_offset + t->u.target_size > next_offset)
 933		return -EINVAL;
 934
 935	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
 936		const struct xt_standard_target *st = (const void *)t;
 937
 938		if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
 939			return -EINVAL;
 940
 941		if (!verdict_ok(st->verdict))
 942			return -EINVAL;
 943	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
 944		const struct xt_error_target *et = (const void *)t;
 945
 946		if (!error_tg_ok(t->u.target_size, sizeof(*et),
 947				 et->errorname, sizeof(et->errorname)))
 948			return -EINVAL;
 949	}
 950
 951	return xt_check_entry_match(elems, base + target_offset,
 952				    __alignof__(struct xt_entry_match));
 953}
 954EXPORT_SYMBOL(xt_check_entry_offsets);
 955
 956/**
 957 * xt_alloc_entry_offsets - allocate array to store rule head offsets
 958 *
 959 * @size: number of entries
 960 *
 961 * Return: NULL or zeroed kmalloc'd or vmalloc'd array
 962 */
 963unsigned int *xt_alloc_entry_offsets(unsigned int size)
 964{
 965	if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
 966		return NULL;
 967
 968	return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
 969
 970}
 971EXPORT_SYMBOL(xt_alloc_entry_offsets);
 972
 973/**
 974 * xt_find_jump_offset - check if target is a valid jump offset
 975 *
 976 * @offsets: array containing all valid rule start offsets of a rule blob
 977 * @target: the jump target to search for
 978 * @size: entries in @offset
 979 */
 980bool xt_find_jump_offset(const unsigned int *offsets,
 981			 unsigned int target, unsigned int size)
 982{
 983	int m, low = 0, hi = size;
 984
 985	while (hi > low) {
 986		m = (low + hi) / 2u;
 987
 988		if (offsets[m] > target)
 989			hi = m;
 990		else if (offsets[m] < target)
 991			low = m + 1;
 992		else
 993			return true;
 994	}
 995
 996	return false;
 997}
 998EXPORT_SYMBOL(xt_find_jump_offset);
 999
1000int xt_check_target(struct xt_tgchk_param *par,
1001		    unsigned int size, u16 proto, bool inv_proto)
1002{
1003	int ret;
1004
1005	if (XT_ALIGN(par->target->targetsize) != size) {
1006		pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
1007				   xt_prefix[par->family], par->target->name,
1008				   par->target->revision,
1009				   XT_ALIGN(par->target->targetsize), size);
1010		return -EINVAL;
1011	}
1012	if (par->target->table != NULL &&
1013	    strcmp(par->target->table, par->table) != 0) {
1014		pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1015				    xt_prefix[par->family], par->target->name,
1016				    par->target->table, par->table);
1017		return -EINVAL;
1018	}
1019	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1020		char used[64], allow[64];
1021
1022		pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1023				    xt_prefix[par->family], par->target->name,
1024				    textify_hooks(used, sizeof(used),
1025						  par->hook_mask, par->family),
1026				    textify_hooks(allow, sizeof(allow),
1027						  par->target->hooks,
1028						  par->family));
1029		return -EINVAL;
1030	}
1031	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1032		pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1033				    xt_prefix[par->family], par->target->name,
1034				    par->target->proto);
1035		return -EINVAL;
1036	}
1037	if (par->target->checkentry != NULL) {
1038		ret = par->target->checkentry(par);
1039		if (ret < 0)
1040			return ret;
1041		else if (ret > 0)
1042			/* Flag up potential errors. */
1043			return -EIO;
1044	}
1045	return 0;
1046}
1047EXPORT_SYMBOL_GPL(xt_check_target);
1048
1049/**
1050 * xt_copy_counters - copy counters and metadata from a sockptr_t
1051 *
1052 * @arg: src sockptr
1053 * @len: alleged size of userspace memory
1054 * @info: where to store the xt_counters_info metadata
1055 *
1056 * Copies counter meta data from @user and stores it in @info.
1057 *
1058 * vmallocs memory to hold the counters, then copies the counter data
1059 * from @user to the new memory and returns a pointer to it.
1060 *
1061 * If called from a compat syscall, @info gets converted automatically to the
1062 * 64bit representation.
1063 *
1064 * The metadata associated with the counters is stored in @info.
1065 *
1066 * Return: returns pointer that caller has to test via IS_ERR().
1067 * If IS_ERR is false, caller has to vfree the pointer.
1068 */
1069void *xt_copy_counters(sockptr_t arg, unsigned int len,
1070		       struct xt_counters_info *info)
1071{
1072	size_t offset;
1073	void *mem;
1074	u64 size;
1075
1076#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1077	if (in_compat_syscall()) {
1078		/* structures only differ in size due to alignment */
1079		struct compat_xt_counters_info compat_tmp;
1080
1081		if (len <= sizeof(compat_tmp))
1082			return ERR_PTR(-EINVAL);
1083
1084		len -= sizeof(compat_tmp);
1085		if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1086			return ERR_PTR(-EFAULT);
1087
1088		memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1089		info->num_counters = compat_tmp.num_counters;
1090		offset = sizeof(compat_tmp);
1091	} else
1092#endif
1093	{
1094		if (len <= sizeof(*info))
1095			return ERR_PTR(-EINVAL);
1096
1097		len -= sizeof(*info);
1098		if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1099			return ERR_PTR(-EFAULT);
1100
1101		offset = sizeof(*info);
1102	}
1103	info->name[sizeof(info->name) - 1] = '\0';
1104
1105	size = sizeof(struct xt_counters);
1106	size *= info->num_counters;
1107
1108	if (size != (u64)len)
1109		return ERR_PTR(-EINVAL);
1110
1111	mem = vmalloc(len);
1112	if (!mem)
1113		return ERR_PTR(-ENOMEM);
1114
1115	if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1116		return mem;
1117
1118	vfree(mem);
1119	return ERR_PTR(-EFAULT);
1120}
1121EXPORT_SYMBOL_GPL(xt_copy_counters);
1122
1123#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1124int xt_compat_target_offset(const struct xt_target *target)
1125{
1126	u_int16_t csize = target->compatsize ? : target->targetsize;
1127	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1128}
1129EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1130
1131void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1132				unsigned int *size)
1133{
1134	const struct xt_target *target = t->u.kernel.target;
1135	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1136	int off = xt_compat_target_offset(target);
1137	u_int16_t tsize = ct->u.user.target_size;
1138	char name[sizeof(t->u.user.name)];
1139
1140	t = *dstptr;
1141	memcpy(t, ct, sizeof(*ct));
1142	if (target->compat_from_user)
1143		target->compat_from_user(t->data, ct->data);
1144	else
1145		unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
1146			      /* UAPI 0-sized destination */);
1147
1148	tsize += off;
1149	t->u.user.target_size = tsize;
1150	strscpy(name, target->name, sizeof(name));
1151	module_put(target->me);
1152	strscpy_pad(t->u.user.name, name, sizeof(t->u.user.name));
1153
1154	*size += off;
1155	*dstptr += tsize;
1156}
1157EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1158
1159int xt_compat_target_to_user(const struct xt_entry_target *t,
1160			     void __user **dstptr, unsigned int *size)
1161{
1162	const struct xt_target *target = t->u.kernel.target;
1163	struct compat_xt_entry_target __user *ct = *dstptr;
1164	int off = xt_compat_target_offset(target);
1165	u_int16_t tsize = t->u.user.target_size - off;
1166
1167	if (XT_OBJ_TO_USER(ct, t, target, tsize))
1168		return -EFAULT;
1169
1170	if (target->compat_to_user) {
1171		if (target->compat_to_user((void __user *)ct->data, t->data))
1172			return -EFAULT;
1173	} else {
1174		if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1175			return -EFAULT;
1176	}
1177
1178	*size -= off;
1179	*dstptr += tsize;
1180	return 0;
1181}
1182EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1183#endif
1184
1185struct xt_table_info *xt_alloc_table_info(unsigned int size)
1186{
1187	struct xt_table_info *info = NULL;
1188	size_t sz = sizeof(*info) + size;
1189
1190	if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1191		return NULL;
1192
1193	info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1194	if (!info)
1195		return NULL;
1196
1197	memset(info, 0, sizeof(*info));
1198	info->size = size;
1199	return info;
1200}
1201EXPORT_SYMBOL(xt_alloc_table_info);
1202
1203void xt_free_table_info(struct xt_table_info *info)
1204{
1205	int cpu;
1206
1207	if (info->jumpstack != NULL) {
1208		for_each_possible_cpu(cpu)
1209			kvfree(info->jumpstack[cpu]);
1210		kvfree(info->jumpstack);
1211	}
1212
1213	kvfree(info);
1214}
1215EXPORT_SYMBOL(xt_free_table_info);
1216
1217struct xt_table *xt_find_table(struct net *net, u8 af, const char *name)
1218{
1219	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1220	struct xt_table *t;
1221
1222	mutex_lock(&xt[af].mutex);
1223	list_for_each_entry(t, &xt_net->tables[af], list) {
1224		if (strcmp(t->name, name) == 0) {
1225			mutex_unlock(&xt[af].mutex);
1226			return t;
1227		}
1228	}
1229	mutex_unlock(&xt[af].mutex);
1230	return NULL;
1231}
1232EXPORT_SYMBOL(xt_find_table);
1233
1234/* Find table by name, grabs mutex & ref.  Returns ERR_PTR on error. */
1235struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1236				    const char *name)
1237{
1238	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1239	struct module *owner = NULL;
1240	struct xt_template *tmpl;
1241	struct xt_table *t;
1242
1243	mutex_lock(&xt[af].mutex);
1244	list_for_each_entry(t, &xt_net->tables[af], list)
1245		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1246			return t;
1247
1248	/* Table doesn't exist in this netns, check larval list */
1249	list_for_each_entry(tmpl, &xt_templates[af], list) {
 
 
 
 
1250		int err;
1251
1252		if (strcmp(tmpl->name, name))
1253			continue;
1254		if (!try_module_get(tmpl->me))
1255			goto out;
1256
1257		owner = tmpl->me;
1258
1259		mutex_unlock(&xt[af].mutex);
1260		err = tmpl->table_init(net);
1261		if (err < 0) {
1262			module_put(owner);
1263			return ERR_PTR(err);
1264		}
1265
 
 
1266		mutex_lock(&xt[af].mutex);
1267		break;
1268	}
1269
 
 
 
 
1270	/* and once again: */
1271	list_for_each_entry(t, &xt_net->tables[af], list)
1272		if (strcmp(t->name, name) == 0 && owner == t->me)
1273			return t;
1274
1275	module_put(owner);
1276 out:
1277	mutex_unlock(&xt[af].mutex);
1278	return ERR_PTR(-ENOENT);
1279}
1280EXPORT_SYMBOL_GPL(xt_find_table_lock);
1281
1282struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1283					    const char *name)
1284{
1285	struct xt_table *t = xt_find_table_lock(net, af, name);
1286
1287#ifdef CONFIG_MODULES
1288	if (IS_ERR(t)) {
1289		int err = request_module("%stable_%s", xt_prefix[af], name);
1290		if (err < 0)
1291			return ERR_PTR(err);
1292		t = xt_find_table_lock(net, af, name);
1293	}
1294#endif
1295
1296	return t;
1297}
1298EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1299
1300void xt_table_unlock(struct xt_table *table)
1301{
1302	mutex_unlock(&xt[table->af].mutex);
1303}
1304EXPORT_SYMBOL_GPL(xt_table_unlock);
1305
1306#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1307void xt_compat_lock(u_int8_t af)
1308{
1309	mutex_lock(&xt[af].compat_mutex);
1310}
1311EXPORT_SYMBOL_GPL(xt_compat_lock);
1312
1313void xt_compat_unlock(u_int8_t af)
1314{
1315	mutex_unlock(&xt[af].compat_mutex);
1316}
1317EXPORT_SYMBOL_GPL(xt_compat_unlock);
1318#endif
1319
1320DEFINE_PER_CPU(seqcount_t, xt_recseq);
1321EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1322
1323struct static_key xt_tee_enabled __read_mostly;
1324EXPORT_SYMBOL_GPL(xt_tee_enabled);
1325
1326static int xt_jumpstack_alloc(struct xt_table_info *i)
1327{
1328	unsigned int size;
1329	int cpu;
1330
1331	size = sizeof(void **) * nr_cpu_ids;
1332	if (size > PAGE_SIZE)
1333		i->jumpstack = kvzalloc(size, GFP_KERNEL);
1334	else
1335		i->jumpstack = kzalloc(size, GFP_KERNEL);
1336	if (i->jumpstack == NULL)
1337		return -ENOMEM;
1338
1339	/* ruleset without jumps -- no stack needed */
1340	if (i->stacksize == 0)
1341		return 0;
1342
1343	/* Jumpstack needs to be able to record two full callchains, one
1344	 * from the first rule set traversal, plus one table reentrancy
1345	 * via -j TEE without clobbering the callchain that brought us to
1346	 * TEE target.
1347	 *
1348	 * This is done by allocating two jumpstacks per cpu, on reentry
1349	 * the upper half of the stack is used.
1350	 *
1351	 * see the jumpstack setup in ipt_do_table() for more details.
1352	 */
1353	size = sizeof(void *) * i->stacksize * 2u;
1354	for_each_possible_cpu(cpu) {
1355		i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1356			cpu_to_node(cpu));
1357		if (i->jumpstack[cpu] == NULL)
1358			/*
1359			 * Freeing will be done later on by the callers. The
1360			 * chain is: xt_replace_table -> __do_replace ->
1361			 * do_replace -> xt_free_table_info.
1362			 */
1363			return -ENOMEM;
1364	}
1365
1366	return 0;
1367}
1368
1369struct xt_counters *xt_counters_alloc(unsigned int counters)
1370{
1371	struct xt_counters *mem;
1372
1373	if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1374		return NULL;
1375
1376	counters *= sizeof(*mem);
1377	if (counters > XT_MAX_TABLE_SIZE)
1378		return NULL;
1379
1380	return vzalloc(counters);
1381}
1382EXPORT_SYMBOL(xt_counters_alloc);
1383
1384struct xt_table_info *
1385xt_replace_table(struct xt_table *table,
1386	      unsigned int num_counters,
1387	      struct xt_table_info *newinfo,
1388	      int *error)
1389{
1390	struct xt_table_info *private;
1391	unsigned int cpu;
1392	int ret;
1393
1394	ret = xt_jumpstack_alloc(newinfo);
1395	if (ret < 0) {
1396		*error = ret;
1397		return NULL;
1398	}
1399
1400	/* Do the substitution. */
1401	local_bh_disable();
1402	private = table->private;
1403
1404	/* Check inside lock: is the old number correct? */
1405	if (num_counters != private->number) {
1406		pr_debug("num_counters != table->private->number (%u/%u)\n",
1407			 num_counters, private->number);
1408		local_bh_enable();
1409		*error = -EAGAIN;
1410		return NULL;
1411	}
1412
1413	newinfo->initial_entries = private->initial_entries;
1414	/*
1415	 * Ensure contents of newinfo are visible before assigning to
1416	 * private.
1417	 */
1418	smp_wmb();
1419	table->private = newinfo;
1420
1421	/* make sure all cpus see new ->private value */
1422	smp_mb();
1423
1424	/*
1425	 * Even though table entries have now been swapped, other CPU's
1426	 * may still be using the old entries...
1427	 */
1428	local_bh_enable();
1429
1430	/* ... so wait for even xt_recseq on all cpus */
1431	for_each_possible_cpu(cpu) {
1432		seqcount_t *s = &per_cpu(xt_recseq, cpu);
1433		u32 seq = raw_read_seqcount(s);
1434
1435		if (seq & 1) {
1436			do {
1437				cond_resched();
1438				cpu_relax();
1439			} while (seq == raw_read_seqcount(s));
1440		}
1441	}
1442
1443	audit_log_nfcfg(table->name, table->af, private->number,
1444			!private->number ? AUDIT_XT_OP_REGISTER :
1445					   AUDIT_XT_OP_REPLACE,
1446			GFP_KERNEL);
1447	return private;
1448}
1449EXPORT_SYMBOL_GPL(xt_replace_table);
1450
1451struct xt_table *xt_register_table(struct net *net,
1452				   const struct xt_table *input_table,
1453				   struct xt_table_info *bootstrap,
1454				   struct xt_table_info *newinfo)
1455{
1456	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1457	struct xt_table_info *private;
1458	struct xt_table *t, *table;
1459	int ret;
1460
1461	/* Don't add one object to multiple lists. */
1462	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1463	if (!table) {
1464		ret = -ENOMEM;
1465		goto out;
1466	}
1467
1468	mutex_lock(&xt[table->af].mutex);
1469	/* Don't autoload: we'd eat our tail... */
1470	list_for_each_entry(t, &xt_net->tables[table->af], list) {
1471		if (strcmp(t->name, table->name) == 0) {
1472			ret = -EEXIST;
1473			goto unlock;
1474		}
1475	}
1476
1477	/* Simplifies replace_table code. */
1478	table->private = bootstrap;
1479
1480	if (!xt_replace_table(table, 0, newinfo, &ret))
1481		goto unlock;
1482
1483	private = table->private;
1484	pr_debug("table->private->number = %u\n", private->number);
1485
1486	/* save number of initial entries */
1487	private->initial_entries = private->number;
1488
1489	list_add(&table->list, &xt_net->tables[table->af]);
1490	mutex_unlock(&xt[table->af].mutex);
1491	return table;
1492
1493unlock:
1494	mutex_unlock(&xt[table->af].mutex);
1495	kfree(table);
1496out:
1497	return ERR_PTR(ret);
1498}
1499EXPORT_SYMBOL_GPL(xt_register_table);
1500
1501void *xt_unregister_table(struct xt_table *table)
1502{
1503	struct xt_table_info *private;
1504
1505	mutex_lock(&xt[table->af].mutex);
1506	private = table->private;
1507	list_del(&table->list);
1508	mutex_unlock(&xt[table->af].mutex);
1509	audit_log_nfcfg(table->name, table->af, private->number,
1510			AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1511	kfree(table->ops);
1512	kfree(table);
1513
1514	return private;
1515}
1516EXPORT_SYMBOL_GPL(xt_unregister_table);
1517
1518#ifdef CONFIG_PROC_FS
1519static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1520{
1521	u8 af = (unsigned long)pde_data(file_inode(seq->file));
1522	struct net *net = seq_file_net(seq);
1523	struct xt_pernet *xt_net;
1524
1525	xt_net = net_generic(net, xt_pernet_id);
1526
1527	mutex_lock(&xt[af].mutex);
1528	return seq_list_start(&xt_net->tables[af], *pos);
1529}
1530
1531static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1532{
1533	u8 af = (unsigned long)pde_data(file_inode(seq->file));
1534	struct net *net = seq_file_net(seq);
1535	struct xt_pernet *xt_net;
1536
1537	xt_net = net_generic(net, xt_pernet_id);
1538
1539	return seq_list_next(v, &xt_net->tables[af], pos);
1540}
1541
1542static void xt_table_seq_stop(struct seq_file *seq, void *v)
1543{
1544	u_int8_t af = (unsigned long)pde_data(file_inode(seq->file));
1545
1546	mutex_unlock(&xt[af].mutex);
1547}
1548
1549static int xt_table_seq_show(struct seq_file *seq, void *v)
1550{
1551	struct xt_table *table = list_entry(v, struct xt_table, list);
1552
1553	if (*table->name)
1554		seq_printf(seq, "%s\n", table->name);
1555	return 0;
1556}
1557
1558static const struct seq_operations xt_table_seq_ops = {
1559	.start	= xt_table_seq_start,
1560	.next	= xt_table_seq_next,
1561	.stop	= xt_table_seq_stop,
1562	.show	= xt_table_seq_show,
1563};
1564
1565/*
1566 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1567 * the multi-AF mutexes.
1568 */
1569struct nf_mttg_trav {
1570	struct list_head *head, *curr;
1571	uint8_t class;
1572};
1573
1574enum {
1575	MTTG_TRAV_INIT,
1576	MTTG_TRAV_NFP_UNSPEC,
1577	MTTG_TRAV_NFP_SPEC,
1578	MTTG_TRAV_DONE,
1579};
1580
1581static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1582    bool is_target)
1583{
1584	static const uint8_t next_class[] = {
1585		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1586		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
1587	};
1588	uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1589	struct nf_mttg_trav *trav = seq->private;
1590
1591	if (ppos != NULL)
1592		++(*ppos);
1593
1594	switch (trav->class) {
1595	case MTTG_TRAV_INIT:
1596		trav->class = MTTG_TRAV_NFP_UNSPEC;
1597		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1598		trav->head = trav->curr = is_target ?
1599			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1600 		break;
1601	case MTTG_TRAV_NFP_UNSPEC:
1602		trav->curr = trav->curr->next;
1603		if (trav->curr != trav->head)
1604			break;
1605		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1606		mutex_lock(&xt[nfproto].mutex);
1607		trav->head = trav->curr = is_target ?
1608			&xt[nfproto].target : &xt[nfproto].match;
1609		trav->class = next_class[trav->class];
1610		break;
1611	case MTTG_TRAV_NFP_SPEC:
1612		trav->curr = trav->curr->next;
1613		if (trav->curr != trav->head)
1614			break;
1615		fallthrough;
1616	default:
1617		return NULL;
1618	}
1619	return trav;
1620}
1621
1622static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1623    bool is_target)
1624{
1625	struct nf_mttg_trav *trav = seq->private;
1626	unsigned int j;
1627
1628	trav->class = MTTG_TRAV_INIT;
1629	for (j = 0; j < *pos; ++j)
1630		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1631			return NULL;
1632	return trav;
1633}
1634
1635static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1636{
1637	uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1638	struct nf_mttg_trav *trav = seq->private;
1639
1640	switch (trav->class) {
1641	case MTTG_TRAV_NFP_UNSPEC:
1642		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1643		break;
1644	case MTTG_TRAV_NFP_SPEC:
1645		mutex_unlock(&xt[nfproto].mutex);
1646		break;
1647	}
1648}
1649
1650static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1651{
1652	return xt_mttg_seq_start(seq, pos, false);
1653}
1654
1655static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1656{
1657	return xt_mttg_seq_next(seq, v, ppos, false);
1658}
1659
1660static int xt_match_seq_show(struct seq_file *seq, void *v)
1661{
1662	const struct nf_mttg_trav *trav = seq->private;
1663	const struct xt_match *match;
1664
1665	switch (trav->class) {
1666	case MTTG_TRAV_NFP_UNSPEC:
1667	case MTTG_TRAV_NFP_SPEC:
1668		if (trav->curr == trav->head)
1669			return 0;
1670		match = list_entry(trav->curr, struct xt_match, list);
1671		if (*match->name)
1672			seq_printf(seq, "%s\n", match->name);
1673	}
1674	return 0;
1675}
1676
1677static const struct seq_operations xt_match_seq_ops = {
1678	.start	= xt_match_seq_start,
1679	.next	= xt_match_seq_next,
1680	.stop	= xt_mttg_seq_stop,
1681	.show	= xt_match_seq_show,
1682};
1683
1684static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1685{
1686	return xt_mttg_seq_start(seq, pos, true);
1687}
1688
1689static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1690{
1691	return xt_mttg_seq_next(seq, v, ppos, true);
1692}
1693
1694static int xt_target_seq_show(struct seq_file *seq, void *v)
1695{
1696	const struct nf_mttg_trav *trav = seq->private;
1697	const struct xt_target *target;
1698
1699	switch (trav->class) {
1700	case MTTG_TRAV_NFP_UNSPEC:
1701	case MTTG_TRAV_NFP_SPEC:
1702		if (trav->curr == trav->head)
1703			return 0;
1704		target = list_entry(trav->curr, struct xt_target, list);
1705		if (*target->name)
1706			seq_printf(seq, "%s\n", target->name);
1707	}
1708	return 0;
1709}
1710
1711static const struct seq_operations xt_target_seq_ops = {
1712	.start	= xt_target_seq_start,
1713	.next	= xt_target_seq_next,
1714	.stop	= xt_mttg_seq_stop,
1715	.show	= xt_target_seq_show,
1716};
1717
1718#define FORMAT_TABLES	"_tables_names"
1719#define	FORMAT_MATCHES	"_tables_matches"
1720#define FORMAT_TARGETS 	"_tables_targets"
1721
1722#endif /* CONFIG_PROC_FS */
1723
1724/**
1725 * xt_hook_ops_alloc - set up hooks for a new table
1726 * @table:	table with metadata needed to set up hooks
1727 * @fn:		Hook function
1728 *
1729 * This function will create the nf_hook_ops that the x_table needs
1730 * to hand to xt_hook_link_net().
1731 */
1732struct nf_hook_ops *
1733xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1734{
1735	unsigned int hook_mask = table->valid_hooks;
1736	uint8_t i, num_hooks = hweight32(hook_mask);
1737	uint8_t hooknum;
1738	struct nf_hook_ops *ops;
1739
1740	if (!num_hooks)
1741		return ERR_PTR(-EINVAL);
1742
1743	ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1744	if (ops == NULL)
1745		return ERR_PTR(-ENOMEM);
1746
1747	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1748	     hook_mask >>= 1, ++hooknum) {
1749		if (!(hook_mask & 1))
1750			continue;
1751		ops[i].hook     = fn;
1752		ops[i].pf       = table->af;
1753		ops[i].hooknum  = hooknum;
1754		ops[i].priority = table->priority;
1755		++i;
1756	}
1757
1758	return ops;
1759}
1760EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1761
1762int xt_register_template(const struct xt_table *table,
1763			 int (*table_init)(struct net *net))
1764{
1765	int ret = -EEXIST, af = table->af;
1766	struct xt_template *t;
1767
1768	mutex_lock(&xt[af].mutex);
1769
1770	list_for_each_entry(t, &xt_templates[af], list) {
1771		if (WARN_ON_ONCE(strcmp(table->name, t->name) == 0))
1772			goto out_unlock;
1773	}
1774
1775	ret = -ENOMEM;
1776	t = kzalloc(sizeof(*t), GFP_KERNEL);
1777	if (!t)
1778		goto out_unlock;
1779
1780	BUILD_BUG_ON(sizeof(t->name) != sizeof(table->name));
1781
1782	strscpy(t->name, table->name, sizeof(t->name));
1783	t->table_init = table_init;
1784	t->me = table->me;
1785	list_add(&t->list, &xt_templates[af]);
1786	ret = 0;
1787out_unlock:
1788	mutex_unlock(&xt[af].mutex);
1789	return ret;
1790}
1791EXPORT_SYMBOL_GPL(xt_register_template);
1792
1793void xt_unregister_template(const struct xt_table *table)
1794{
1795	struct xt_template *t;
1796	int af = table->af;
1797
1798	mutex_lock(&xt[af].mutex);
1799	list_for_each_entry(t, &xt_templates[af], list) {
1800		if (strcmp(table->name, t->name))
1801			continue;
1802
1803		list_del(&t->list);
1804		mutex_unlock(&xt[af].mutex);
1805		kfree(t);
1806		return;
1807	}
1808
1809	mutex_unlock(&xt[af].mutex);
1810	WARN_ON_ONCE(1);
1811}
1812EXPORT_SYMBOL_GPL(xt_unregister_template);
1813
1814int xt_proto_init(struct net *net, u_int8_t af)
1815{
1816#ifdef CONFIG_PROC_FS
1817	char buf[XT_FUNCTION_MAXNAMELEN];
1818	struct proc_dir_entry *proc;
1819	kuid_t root_uid;
1820	kgid_t root_gid;
1821#endif
1822
1823	if (af >= ARRAY_SIZE(xt_prefix))
1824		return -EINVAL;
1825
1826
1827#ifdef CONFIG_PROC_FS
1828	root_uid = make_kuid(net->user_ns, 0);
1829	root_gid = make_kgid(net->user_ns, 0);
1830
1831	strscpy(buf, xt_prefix[af], sizeof(buf));
1832	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1833	proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1834			sizeof(struct seq_net_private),
1835			(void *)(unsigned long)af);
1836	if (!proc)
1837		goto out;
1838	if (uid_valid(root_uid) && gid_valid(root_gid))
1839		proc_set_user(proc, root_uid, root_gid);
1840
1841	strscpy(buf, xt_prefix[af], sizeof(buf));
1842	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1843	proc = proc_create_seq_private(buf, 0440, net->proc_net,
1844			&xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1845			(void *)(unsigned long)af);
1846	if (!proc)
1847		goto out_remove_tables;
1848	if (uid_valid(root_uid) && gid_valid(root_gid))
1849		proc_set_user(proc, root_uid, root_gid);
1850
1851	strscpy(buf, xt_prefix[af], sizeof(buf));
1852	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1853	proc = proc_create_seq_private(buf, 0440, net->proc_net,
1854			 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1855			 (void *)(unsigned long)af);
1856	if (!proc)
1857		goto out_remove_matches;
1858	if (uid_valid(root_uid) && gid_valid(root_gid))
1859		proc_set_user(proc, root_uid, root_gid);
1860#endif
1861
1862	return 0;
1863
1864#ifdef CONFIG_PROC_FS
1865out_remove_matches:
1866	strscpy(buf, xt_prefix[af], sizeof(buf));
1867	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1868	remove_proc_entry(buf, net->proc_net);
1869
1870out_remove_tables:
1871	strscpy(buf, xt_prefix[af], sizeof(buf));
1872	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1873	remove_proc_entry(buf, net->proc_net);
1874out:
1875	return -1;
1876#endif
1877}
1878EXPORT_SYMBOL_GPL(xt_proto_init);
1879
1880void xt_proto_fini(struct net *net, u_int8_t af)
1881{
1882#ifdef CONFIG_PROC_FS
1883	char buf[XT_FUNCTION_MAXNAMELEN];
1884
1885	strscpy(buf, xt_prefix[af], sizeof(buf));
1886	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1887	remove_proc_entry(buf, net->proc_net);
1888
1889	strscpy(buf, xt_prefix[af], sizeof(buf));
1890	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1891	remove_proc_entry(buf, net->proc_net);
1892
1893	strscpy(buf, xt_prefix[af], sizeof(buf));
1894	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1895	remove_proc_entry(buf, net->proc_net);
1896#endif /*CONFIG_PROC_FS*/
1897}
1898EXPORT_SYMBOL_GPL(xt_proto_fini);
1899
1900/**
1901 * xt_percpu_counter_alloc - allocate x_tables rule counter
1902 *
1903 * @state: pointer to xt_percpu allocation state
1904 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1905 *
1906 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1907 * contain the address of the real (percpu) counter.
1908 *
1909 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1910 * to fetch the real percpu counter.
1911 *
1912 * To speed up allocation and improve data locality, a 4kb block is
1913 * allocated.  Freeing any counter may free an entire block, so all
1914 * counters allocated using the same state must be freed at the same
1915 * time.
1916 *
1917 * xt_percpu_counter_alloc_state contains the base address of the
1918 * allocated page and the current sub-offset.
1919 *
1920 * returns false on error.
1921 */
1922bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1923			     struct xt_counters *counter)
1924{
1925	BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1926
1927	if (nr_cpu_ids <= 1)
1928		return true;
1929
1930	if (!state->mem) {
1931		state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1932					    XT_PCPU_BLOCK_SIZE);
1933		if (!state->mem)
1934			return false;
1935	}
1936	counter->pcnt = (__force unsigned long)(state->mem + state->off);
1937	state->off += sizeof(*counter);
1938	if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1939		state->mem = NULL;
1940		state->off = 0;
1941	}
1942	return true;
1943}
1944EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1945
1946void xt_percpu_counter_free(struct xt_counters *counters)
1947{
1948	unsigned long pcnt = counters->pcnt;
1949
1950	if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1951		free_percpu((void __percpu *)pcnt);
1952}
1953EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1954
1955static int __net_init xt_net_init(struct net *net)
1956{
1957	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1958	int i;
1959
1960	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1961		INIT_LIST_HEAD(&xt_net->tables[i]);
1962	return 0;
1963}
1964
1965static void __net_exit xt_net_exit(struct net *net)
1966{
1967	struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1968	int i;
1969
1970	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1971		WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
1972}
1973
1974static struct pernet_operations xt_net_ops = {
1975	.init = xt_net_init,
1976	.exit = xt_net_exit,
1977	.id   = &xt_pernet_id,
1978	.size = sizeof(struct xt_pernet),
1979};
1980
1981static int __init xt_init(void)
1982{
1983	unsigned int i;
1984	int rv;
1985
1986	for_each_possible_cpu(i) {
1987		seqcount_init(&per_cpu(xt_recseq, i));
1988	}
1989
1990	xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1991	if (!xt)
1992		return -ENOMEM;
1993
1994	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1995		mutex_init(&xt[i].mutex);
1996#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1997		mutex_init(&xt[i].compat_mutex);
1998		xt[i].compat_tab = NULL;
1999#endif
2000		INIT_LIST_HEAD(&xt[i].target);
2001		INIT_LIST_HEAD(&xt[i].match);
2002		INIT_LIST_HEAD(&xt_templates[i]);
2003	}
2004	rv = register_pernet_subsys(&xt_net_ops);
2005	if (rv < 0)
2006		kfree(xt);
2007	return rv;
2008}
2009
2010static void __exit xt_fini(void)
2011{
2012	unregister_pernet_subsys(&xt_net_ops);
2013	kfree(xt);
2014}
2015
2016module_init(xt_init);
2017module_exit(xt_fini);