Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * x_tables core - Backend for {ip,ip6,arp}_tables
   3 *
   4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
 
   5 *
   6 * Based on existing ip_tables code which is
   7 *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
   8 *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 */
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/socket.h>
  19#include <linux/net.h>
  20#include <linux/proc_fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/string.h>
  23#include <linux/vmalloc.h>
  24#include <linux/mutex.h>
  25#include <linux/mm.h>
  26#include <linux/slab.h>
  27#include <linux/audit.h>
 
  28#include <net/net_namespace.h>
  29
  30#include <linux/netfilter/x_tables.h>
  31#include <linux/netfilter_arp.h>
  32#include <linux/netfilter_ipv4/ip_tables.h>
  33#include <linux/netfilter_ipv6/ip6_tables.h>
  34#include <linux/netfilter_arp/arp_tables.h>
  35
  36MODULE_LICENSE("GPL");
  37MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  38MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
  39
  40#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
 
  41
  42struct compat_delta {
  43	unsigned int offset; /* offset in kernel */
  44	int delta; /* delta in 32bit user land */
  45};
  46
  47struct xt_af {
  48	struct mutex mutex;
  49	struct list_head match;
  50	struct list_head target;
  51#ifdef CONFIG_COMPAT
  52	struct mutex compat_mutex;
  53	struct compat_delta *compat_tab;
  54	unsigned int number; /* number of slots in compat_tab[] */
  55	unsigned int cur; /* number of used slots in compat_tab[] */
  56#endif
  57};
  58
  59static struct xt_af *xt;
  60
  61static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
  62	[NFPROTO_UNSPEC] = "x",
  63	[NFPROTO_IPV4]   = "ip",
  64	[NFPROTO_ARP]    = "arp",
  65	[NFPROTO_BRIDGE] = "eb",
  66	[NFPROTO_IPV6]   = "ip6",
  67};
  68
  69/* Allow this many total (re)entries. */
  70static const unsigned int xt_jumpstack_multiplier = 2;
  71
  72/* Registration hooks for targets. */
  73int
  74xt_register_target(struct xt_target *target)
  75{
  76	u_int8_t af = target->family;
  77	int ret;
  78
  79	ret = mutex_lock_interruptible(&xt[af].mutex);
  80	if (ret != 0)
  81		return ret;
  82	list_add(&target->list, &xt[af].target);
  83	mutex_unlock(&xt[af].mutex);
  84	return ret;
  85}
  86EXPORT_SYMBOL(xt_register_target);
  87
  88void
  89xt_unregister_target(struct xt_target *target)
  90{
  91	u_int8_t af = target->family;
  92
  93	mutex_lock(&xt[af].mutex);
  94	list_del(&target->list);
  95	mutex_unlock(&xt[af].mutex);
  96}
  97EXPORT_SYMBOL(xt_unregister_target);
  98
  99int
 100xt_register_targets(struct xt_target *target, unsigned int n)
 101{
 102	unsigned int i;
 103	int err = 0;
 104
 105	for (i = 0; i < n; i++) {
 106		err = xt_register_target(&target[i]);
 107		if (err)
 108			goto err;
 109	}
 110	return err;
 111
 112err:
 113	if (i > 0)
 114		xt_unregister_targets(target, i);
 115	return err;
 116}
 117EXPORT_SYMBOL(xt_register_targets);
 118
 119void
 120xt_unregister_targets(struct xt_target *target, unsigned int n)
 121{
 122	while (n-- > 0)
 123		xt_unregister_target(&target[n]);
 124}
 125EXPORT_SYMBOL(xt_unregister_targets);
 126
 127int
 128xt_register_match(struct xt_match *match)
 129{
 130	u_int8_t af = match->family;
 131	int ret;
 132
 133	ret = mutex_lock_interruptible(&xt[af].mutex);
 134	if (ret != 0)
 135		return ret;
 136
 
 137	list_add(&match->list, &xt[af].match);
 138	mutex_unlock(&xt[af].mutex);
 139
 140	return ret;
 141}
 142EXPORT_SYMBOL(xt_register_match);
 143
 144void
 145xt_unregister_match(struct xt_match *match)
 146{
 147	u_int8_t af = match->family;
 148
 149	mutex_lock(&xt[af].mutex);
 150	list_del(&match->list);
 151	mutex_unlock(&xt[af].mutex);
 152}
 153EXPORT_SYMBOL(xt_unregister_match);
 154
 155int
 156xt_register_matches(struct xt_match *match, unsigned int n)
 157{
 158	unsigned int i;
 159	int err = 0;
 160
 161	for (i = 0; i < n; i++) {
 162		err = xt_register_match(&match[i]);
 163		if (err)
 164			goto err;
 165	}
 166	return err;
 167
 168err:
 169	if (i > 0)
 170		xt_unregister_matches(match, i);
 171	return err;
 172}
 173EXPORT_SYMBOL(xt_register_matches);
 174
 175void
 176xt_unregister_matches(struct xt_match *match, unsigned int n)
 177{
 178	while (n-- > 0)
 179		xt_unregister_match(&match[n]);
 180}
 181EXPORT_SYMBOL(xt_unregister_matches);
 182
 183
 184/*
 185 * These are weird, but module loading must not be done with mutex
 186 * held (since they will register), and we have to have a single
 187 * function to use.
 188 */
 189
 190/* Find match, grabs ref.  Returns ERR_PTR() on error. */
 191struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
 192{
 193	struct xt_match *m;
 194	int err = -ENOENT;
 195
 196	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
 197		return ERR_PTR(-EINTR);
 198
 
 199	list_for_each_entry(m, &xt[af].match, list) {
 200		if (strcmp(m->name, name) == 0) {
 201			if (m->revision == revision) {
 202				if (try_module_get(m->me)) {
 203					mutex_unlock(&xt[af].mutex);
 204					return m;
 205				}
 206			} else
 207				err = -EPROTOTYPE; /* Found something. */
 208		}
 209	}
 210	mutex_unlock(&xt[af].mutex);
 211
 212	if (af != NFPROTO_UNSPEC)
 213		/* Try searching again in the family-independent list */
 214		return xt_find_match(NFPROTO_UNSPEC, name, revision);
 215
 216	return ERR_PTR(err);
 217}
 218EXPORT_SYMBOL(xt_find_match);
 219
 220struct xt_match *
 221xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
 222{
 223	struct xt_match *match;
 224
 
 
 
 225	match = xt_find_match(nfproto, name, revision);
 226	if (IS_ERR(match)) {
 227		request_module("%st_%s", xt_prefix[nfproto], name);
 228		match = xt_find_match(nfproto, name, revision);
 229	}
 230
 231	return match;
 232}
 233EXPORT_SYMBOL_GPL(xt_request_find_match);
 234
 235/* Find target, grabs ref.  Returns ERR_PTR() on error. */
 236struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
 237{
 238	struct xt_target *t;
 239	int err = -ENOENT;
 240
 241	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
 242		return ERR_PTR(-EINTR);
 243
 
 244	list_for_each_entry(t, &xt[af].target, list) {
 245		if (strcmp(t->name, name) == 0) {
 246			if (t->revision == revision) {
 247				if (try_module_get(t->me)) {
 248					mutex_unlock(&xt[af].mutex);
 249					return t;
 250				}
 251			} else
 252				err = -EPROTOTYPE; /* Found something. */
 253		}
 254	}
 255	mutex_unlock(&xt[af].mutex);
 256
 257	if (af != NFPROTO_UNSPEC)
 258		/* Try searching again in the family-independent list */
 259		return xt_find_target(NFPROTO_UNSPEC, name, revision);
 260
 261	return ERR_PTR(err);
 262}
 263EXPORT_SYMBOL(xt_find_target);
 264
 265struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
 266{
 267	struct xt_target *target;
 268
 
 
 
 269	target = xt_find_target(af, name, revision);
 270	if (IS_ERR(target)) {
 271		request_module("%st_%s", xt_prefix[af], name);
 272		target = xt_find_target(af, name, revision);
 273	}
 274
 275	return target;
 276}
 277EXPORT_SYMBOL_GPL(xt_request_find_target);
 278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
 280{
 281	const struct xt_match *m;
 282	int have_rev = 0;
 283
 284	list_for_each_entry(m, &xt[af].match, list) {
 285		if (strcmp(m->name, name) == 0) {
 286			if (m->revision > *bestp)
 287				*bestp = m->revision;
 288			if (m->revision == revision)
 289				have_rev = 1;
 290		}
 291	}
 292
 293	if (af != NFPROTO_UNSPEC && !have_rev)
 294		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
 295
 296	return have_rev;
 297}
 298
 299static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
 300{
 301	const struct xt_target *t;
 302	int have_rev = 0;
 303
 304	list_for_each_entry(t, &xt[af].target, list) {
 305		if (strcmp(t->name, name) == 0) {
 306			if (t->revision > *bestp)
 307				*bestp = t->revision;
 308			if (t->revision == revision)
 309				have_rev = 1;
 310		}
 311	}
 312
 313	if (af != NFPROTO_UNSPEC && !have_rev)
 314		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
 315
 316	return have_rev;
 317}
 318
 319/* Returns true or false (if no such extension at all) */
 320int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 321		     int *err)
 322{
 323	int have_rev, best = -1;
 324
 325	if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
 326		*err = -EINTR;
 327		return 1;
 328	}
 329	if (target == 1)
 330		have_rev = target_revfn(af, name, revision, &best);
 331	else
 332		have_rev = match_revfn(af, name, revision, &best);
 333	mutex_unlock(&xt[af].mutex);
 334
 335	/* Nothing at all?  Return 0 to try loading module. */
 336	if (best == -1) {
 337		*err = -ENOENT;
 338		return 0;
 339	}
 340
 341	*err = best;
 342	if (!have_rev)
 343		*err = -EPROTONOSUPPORT;
 344	return 1;
 345}
 346EXPORT_SYMBOL_GPL(xt_find_revision);
 347
 348static char *textify_hooks(char *buf, size_t size, unsigned int mask)
 
 349{
 350	static const char *const names[] = {
 351		"PREROUTING", "INPUT", "FORWARD",
 352		"OUTPUT", "POSTROUTING", "BROUTING",
 353	};
 354	unsigned int i;
 
 
 
 
 355	char *p = buf;
 356	bool np = false;
 357	int res;
 358
 
 
 
 359	*p = '\0';
 360	for (i = 0; i < ARRAY_SIZE(names); ++i) {
 361		if (!(mask & (1 << i)))
 362			continue;
 363		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
 364		if (res > 0) {
 365			size -= res;
 366			p += res;
 367		}
 368		np = true;
 369	}
 370
 371	return buf;
 372}
 373
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374int xt_check_match(struct xt_mtchk_param *par,
 375		   unsigned int size, u_int8_t proto, bool inv_proto)
 376{
 377	int ret;
 378
 379	if (XT_ALIGN(par->match->matchsize) != size &&
 380	    par->match->matchsize != -1) {
 381		/*
 382		 * ebt_among is exempt from centralized matchsize checking
 383		 * because it uses a dynamic-size data set.
 384		 */
 385		pr_err("%s_tables: %s.%u match: invalid size "
 386		       "%u (kernel) != (user) %u\n",
 387		       xt_prefix[par->family], par->match->name,
 388		       par->match->revision,
 389		       XT_ALIGN(par->match->matchsize), size);
 390		return -EINVAL;
 391	}
 392	if (par->match->table != NULL &&
 393	    strcmp(par->match->table, par->table) != 0) {
 394		pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
 395		       xt_prefix[par->family], par->match->name,
 396		       par->match->table, par->table);
 397		return -EINVAL;
 398	}
 399	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
 400		char used[64], allow[64];
 401
 402		pr_err("%s_tables: %s match: used from hooks %s, but only "
 403		       "valid from %s\n",
 404		       xt_prefix[par->family], par->match->name,
 405		       textify_hooks(used, sizeof(used), par->hook_mask),
 406		       textify_hooks(allow, sizeof(allow), par->match->hooks));
 
 
 407		return -EINVAL;
 408	}
 409	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
 410		pr_err("%s_tables: %s match: only valid for protocol %u\n",
 411		       xt_prefix[par->family], par->match->name,
 412		       par->match->proto);
 413		return -EINVAL;
 414	}
 415	if (par->match->checkentry != NULL) {
 416		ret = par->match->checkentry(par);
 417		if (ret < 0)
 418			return ret;
 419		else if (ret > 0)
 420			/* Flag up potential errors. */
 421			return -EIO;
 422	}
 423	return 0;
 424}
 425EXPORT_SYMBOL_GPL(xt_check_match);
 426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 427#ifdef CONFIG_COMPAT
 428int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 429{
 430	struct xt_af *xp = &xt[af];
 431
 432	if (!xp->compat_tab) {
 433		if (!xp->number)
 434			return -EINVAL;
 435		xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
 436		if (!xp->compat_tab)
 437			return -ENOMEM;
 438		xp->cur = 0;
 439	}
 440
 441	if (xp->cur >= xp->number)
 442		return -EINVAL;
 443
 444	if (xp->cur)
 445		delta += xp->compat_tab[xp->cur - 1].delta;
 446	xp->compat_tab[xp->cur].offset = offset;
 447	xp->compat_tab[xp->cur].delta = delta;
 448	xp->cur++;
 449	return 0;
 450}
 451EXPORT_SYMBOL_GPL(xt_compat_add_offset);
 452
 453void xt_compat_flush_offsets(u_int8_t af)
 454{
 
 
 455	if (xt[af].compat_tab) {
 456		vfree(xt[af].compat_tab);
 457		xt[af].compat_tab = NULL;
 458		xt[af].number = 0;
 459		xt[af].cur = 0;
 460	}
 461}
 462EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
 463
 464int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
 465{
 466	struct compat_delta *tmp = xt[af].compat_tab;
 467	int mid, left = 0, right = xt[af].cur - 1;
 468
 469	while (left <= right) {
 470		mid = (left + right) >> 1;
 471		if (offset > tmp[mid].offset)
 472			left = mid + 1;
 473		else if (offset < tmp[mid].offset)
 474			right = mid - 1;
 475		else
 476			return mid ? tmp[mid - 1].delta : 0;
 477	}
 478	return left ? tmp[left - 1].delta : 0;
 479}
 480EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 481
 482void xt_compat_init_offsets(u_int8_t af, unsigned int number)
 483{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 484	xt[af].number = number;
 485	xt[af].cur = 0;
 
 
 486}
 487EXPORT_SYMBOL(xt_compat_init_offsets);
 488
 489int xt_compat_match_offset(const struct xt_match *match)
 490{
 491	u_int16_t csize = match->compatsize ? : match->matchsize;
 492	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
 493}
 494EXPORT_SYMBOL_GPL(xt_compat_match_offset);
 495
 496int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 497			      unsigned int *size)
 498{
 499	const struct xt_match *match = m->u.kernel.match;
 500	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
 501	int pad, off = xt_compat_match_offset(match);
 502	u_int16_t msize = cm->u.user.match_size;
 
 503
 504	m = *dstptr;
 505	memcpy(m, cm, sizeof(*cm));
 506	if (match->compat_from_user)
 507		match->compat_from_user(m->data, cm->data);
 508	else
 509		memcpy(m->data, cm->data, msize - sizeof(*cm));
 510	pad = XT_ALIGN(match->matchsize) - match->matchsize;
 511	if (pad > 0)
 512		memset(m->data + match->matchsize, 0, pad);
 513
 514	msize += off;
 515	m->u.user.match_size = msize;
 
 
 
 516
 517	*size += off;
 518	*dstptr += msize;
 519	return 0;
 520}
 521EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
 522
 
 
 
 
 
 
 523int xt_compat_match_to_user(const struct xt_entry_match *m,
 524			    void __user **dstptr, unsigned int *size)
 525{
 526	const struct xt_match *match = m->u.kernel.match;
 527	struct compat_xt_entry_match __user *cm = *dstptr;
 528	int off = xt_compat_match_offset(match);
 529	u_int16_t msize = m->u.user.match_size - off;
 530
 531	if (copy_to_user(cm, m, sizeof(*cm)) ||
 532	    put_user(msize, &cm->u.user.match_size) ||
 533	    copy_to_user(cm->u.user.name, m->u.kernel.match->name,
 534			 strlen(m->u.kernel.match->name) + 1))
 535		return -EFAULT;
 536
 537	if (match->compat_to_user) {
 538		if (match->compat_to_user((void __user *)cm->data, m->data))
 539			return -EFAULT;
 540	} else {
 541		if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
 542			return -EFAULT;
 543	}
 544
 545	*size -= off;
 546	*dstptr += msize;
 547	return 0;
 548}
 549EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550#endif /* CONFIG_COMPAT */
 551
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 552int xt_check_target(struct xt_tgchk_param *par,
 553		    unsigned int size, u_int8_t proto, bool inv_proto)
 554{
 555	int ret;
 556
 557	if (XT_ALIGN(par->target->targetsize) != size) {
 558		pr_err("%s_tables: %s.%u target: invalid size "
 559		       "%u (kernel) != (user) %u\n",
 560		       xt_prefix[par->family], par->target->name,
 561		       par->target->revision,
 562		       XT_ALIGN(par->target->targetsize), size);
 563		return -EINVAL;
 564	}
 565	if (par->target->table != NULL &&
 566	    strcmp(par->target->table, par->table) != 0) {
 567		pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
 568		       xt_prefix[par->family], par->target->name,
 569		       par->target->table, par->table);
 570		return -EINVAL;
 571	}
 572	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
 573		char used[64], allow[64];
 574
 575		pr_err("%s_tables: %s target: used from hooks %s, but only "
 576		       "usable from %s\n",
 577		       xt_prefix[par->family], par->target->name,
 578		       textify_hooks(used, sizeof(used), par->hook_mask),
 579		       textify_hooks(allow, sizeof(allow), par->target->hooks));
 
 
 580		return -EINVAL;
 581	}
 582	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
 583		pr_err("%s_tables: %s target: only valid for protocol %u\n",
 584		       xt_prefix[par->family], par->target->name,
 585		       par->target->proto);
 586		return -EINVAL;
 587	}
 588	if (par->target->checkentry != NULL) {
 589		ret = par->target->checkentry(par);
 590		if (ret < 0)
 591			return ret;
 592		else if (ret > 0)
 593			/* Flag up potential errors. */
 594			return -EIO;
 595	}
 596	return 0;
 597}
 598EXPORT_SYMBOL_GPL(xt_check_target);
 599
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600#ifdef CONFIG_COMPAT
 601int xt_compat_target_offset(const struct xt_target *target)
 602{
 603	u_int16_t csize = target->compatsize ? : target->targetsize;
 604	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
 605}
 606EXPORT_SYMBOL_GPL(xt_compat_target_offset);
 607
 608void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
 609				unsigned int *size)
 610{
 611	const struct xt_target *target = t->u.kernel.target;
 612	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
 613	int pad, off = xt_compat_target_offset(target);
 614	u_int16_t tsize = ct->u.user.target_size;
 
 615
 616	t = *dstptr;
 617	memcpy(t, ct, sizeof(*ct));
 618	if (target->compat_from_user)
 619		target->compat_from_user(t->data, ct->data);
 620	else
 621		memcpy(t->data, ct->data, tsize - sizeof(*ct));
 622	pad = XT_ALIGN(target->targetsize) - target->targetsize;
 623	if (pad > 0)
 624		memset(t->data + target->targetsize, 0, pad);
 625
 626	tsize += off;
 627	t->u.user.target_size = tsize;
 
 
 
 628
 629	*size += off;
 630	*dstptr += tsize;
 631}
 632EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
 633
 634int xt_compat_target_to_user(const struct xt_entry_target *t,
 635			     void __user **dstptr, unsigned int *size)
 636{
 637	const struct xt_target *target = t->u.kernel.target;
 638	struct compat_xt_entry_target __user *ct = *dstptr;
 639	int off = xt_compat_target_offset(target);
 640	u_int16_t tsize = t->u.user.target_size - off;
 641
 642	if (copy_to_user(ct, t, sizeof(*ct)) ||
 643	    put_user(tsize, &ct->u.user.target_size) ||
 644	    copy_to_user(ct->u.user.name, t->u.kernel.target->name,
 645			 strlen(t->u.kernel.target->name) + 1))
 646		return -EFAULT;
 647
 648	if (target->compat_to_user) {
 649		if (target->compat_to_user((void __user *)ct->data, t->data))
 650			return -EFAULT;
 651	} else {
 652		if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
 653			return -EFAULT;
 654	}
 655
 656	*size -= off;
 657	*dstptr += tsize;
 658	return 0;
 659}
 660EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
 661#endif
 662
 663struct xt_table_info *xt_alloc_table_info(unsigned int size)
 664{
 665	struct xt_table_info *newinfo;
 666	int cpu;
 667
 668	/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
 669	if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
 670		return NULL;
 671
 672	newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
 673	if (!newinfo)
 674		return NULL;
 675
 676	newinfo->size = size;
 677
 678	for_each_possible_cpu(cpu) {
 679		if (size <= PAGE_SIZE)
 680			newinfo->entries[cpu] = kmalloc_node(size,
 681							GFP_KERNEL,
 682							cpu_to_node(cpu));
 683		else
 684			newinfo->entries[cpu] = vmalloc_node(size,
 685							cpu_to_node(cpu));
 686
 687		if (newinfo->entries[cpu] == NULL) {
 688			xt_free_table_info(newinfo);
 689			return NULL;
 690		}
 691	}
 692
 693	return newinfo;
 694}
 695EXPORT_SYMBOL(xt_alloc_table_info);
 696
 697void xt_free_table_info(struct xt_table_info *info)
 698{
 699	int cpu;
 700
 701	for_each_possible_cpu(cpu) {
 702		if (info->size <= PAGE_SIZE)
 703			kfree(info->entries[cpu]);
 704		else
 705			vfree(info->entries[cpu]);
 706	}
 707
 708	if (info->jumpstack != NULL) {
 709		if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
 710			for_each_possible_cpu(cpu)
 711				vfree(info->jumpstack[cpu]);
 712		} else {
 713			for_each_possible_cpu(cpu)
 714				kfree(info->jumpstack[cpu]);
 715		}
 716	}
 717
 718	if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
 719		vfree(info->jumpstack);
 720	else
 721		kfree(info->jumpstack);
 722
 723	free_percpu(info->stackptr);
 724
 725	kfree(info);
 726}
 727EXPORT_SYMBOL(xt_free_table_info);
 728
 729/* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
 730struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 731				    const char *name)
 732{
 733	struct xt_table *t;
 734
 735	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
 736		return ERR_PTR(-EINTR);
 737
 
 738	list_for_each_entry(t, &net->xt.tables[af], list)
 739		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
 740			return t;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 741	mutex_unlock(&xt[af].mutex);
 742	return NULL;
 743}
 744EXPORT_SYMBOL_GPL(xt_find_table_lock);
 745
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 746void xt_table_unlock(struct xt_table *table)
 747{
 748	mutex_unlock(&xt[table->af].mutex);
 749}
 750EXPORT_SYMBOL_GPL(xt_table_unlock);
 751
 752#ifdef CONFIG_COMPAT
 753void xt_compat_lock(u_int8_t af)
 754{
 755	mutex_lock(&xt[af].compat_mutex);
 756}
 757EXPORT_SYMBOL_GPL(xt_compat_lock);
 758
 759void xt_compat_unlock(u_int8_t af)
 760{
 761	mutex_unlock(&xt[af].compat_mutex);
 762}
 763EXPORT_SYMBOL_GPL(xt_compat_unlock);
 764#endif
 765
 766DEFINE_PER_CPU(seqcount_t, xt_recseq);
 767EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
 768
 
 
 
 769static int xt_jumpstack_alloc(struct xt_table_info *i)
 770{
 771	unsigned int size;
 772	int cpu;
 773
 774	i->stackptr = alloc_percpu(unsigned int);
 775	if (i->stackptr == NULL)
 776		return -ENOMEM;
 777
 778	size = sizeof(void **) * nr_cpu_ids;
 779	if (size > PAGE_SIZE)
 780		i->jumpstack = vzalloc(size);
 781	else
 782		i->jumpstack = kzalloc(size, GFP_KERNEL);
 783	if (i->jumpstack == NULL)
 784		return -ENOMEM;
 785
 786	i->stacksize *= xt_jumpstack_multiplier;
 787	size = sizeof(void *) * i->stacksize;
 
 
 
 
 
 
 
 
 
 
 
 
 
 788	for_each_possible_cpu(cpu) {
 789		if (size > PAGE_SIZE)
 790			i->jumpstack[cpu] = vmalloc_node(size,
 791				cpu_to_node(cpu));
 792		else
 793			i->jumpstack[cpu] = kmalloc_node(size,
 794				GFP_KERNEL, cpu_to_node(cpu));
 795		if (i->jumpstack[cpu] == NULL)
 796			/*
 797			 * Freeing will be done later on by the callers. The
 798			 * chain is: xt_replace_table -> __do_replace ->
 799			 * do_replace -> xt_free_table_info.
 800			 */
 801			return -ENOMEM;
 802	}
 803
 804	return 0;
 805}
 806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 807struct xt_table_info *
 808xt_replace_table(struct xt_table *table,
 809	      unsigned int num_counters,
 810	      struct xt_table_info *newinfo,
 811	      int *error)
 812{
 813	struct xt_table_info *private;
 
 814	int ret;
 815
 816	ret = xt_jumpstack_alloc(newinfo);
 817	if (ret < 0) {
 818		*error = ret;
 819		return NULL;
 820	}
 821
 822	/* Do the substitution. */
 823	local_bh_disable();
 824	private = table->private;
 825
 826	/* Check inside lock: is the old number correct? */
 827	if (num_counters != private->number) {
 828		pr_debug("num_counters != table->private->number (%u/%u)\n",
 829			 num_counters, private->number);
 830		local_bh_enable();
 831		*error = -EAGAIN;
 832		return NULL;
 833	}
 834
 835	table->private = newinfo;
 836	newinfo->initial_entries = private->initial_entries;
 
 
 
 
 
 
 
 
 
 837
 838	/*
 839	 * Even though table entries have now been swapped, other CPU's
 840	 * may still be using the old entries. This is okay, because
 841	 * resynchronization happens because of the locking done
 842	 * during the get_counters() routine.
 843	 */
 844	local_bh_enable();
 845
 846#ifdef CONFIG_AUDIT
 847	if (audit_enabled) {
 848		struct audit_buffer *ab;
 849
 850		ab = audit_log_start(current->audit_context, GFP_KERNEL,
 851				     AUDIT_NETFILTER_CFG);
 852		if (ab) {
 853			audit_log_format(ab, "table=%s family=%u entries=%u",
 854					 table->name, table->af,
 855					 private->number);
 856			audit_log_end(ab);
 857		}
 858	}
 859#endif
 860
 
 
 
 
 861	return private;
 862}
 863EXPORT_SYMBOL_GPL(xt_replace_table);
 864
 865struct xt_table *xt_register_table(struct net *net,
 866				   const struct xt_table *input_table,
 867				   struct xt_table_info *bootstrap,
 868				   struct xt_table_info *newinfo)
 869{
 870	int ret;
 871	struct xt_table_info *private;
 872	struct xt_table *t, *table;
 873
 874	/* Don't add one object to multiple lists. */
 875	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
 876	if (!table) {
 877		ret = -ENOMEM;
 878		goto out;
 879	}
 880
 881	ret = mutex_lock_interruptible(&xt[table->af].mutex);
 882	if (ret != 0)
 883		goto out_free;
 884
 885	/* Don't autoload: we'd eat our tail... */
 886	list_for_each_entry(t, &net->xt.tables[table->af], list) {
 887		if (strcmp(t->name, table->name) == 0) {
 888			ret = -EEXIST;
 889			goto unlock;
 890		}
 891	}
 892
 893	/* Simplifies replace_table code. */
 894	table->private = bootstrap;
 895
 896	if (!xt_replace_table(table, 0, newinfo, &ret))
 897		goto unlock;
 898
 899	private = table->private;
 900	pr_debug("table->private->number = %u\n", private->number);
 901
 902	/* save number of initial entries */
 903	private->initial_entries = private->number;
 904
 905	list_add(&table->list, &net->xt.tables[table->af]);
 906	mutex_unlock(&xt[table->af].mutex);
 907	return table;
 908
 909 unlock:
 910	mutex_unlock(&xt[table->af].mutex);
 911out_free:
 912	kfree(table);
 913out:
 914	return ERR_PTR(ret);
 915}
 916EXPORT_SYMBOL_GPL(xt_register_table);
 917
 918void *xt_unregister_table(struct xt_table *table)
 919{
 920	struct xt_table_info *private;
 921
 922	mutex_lock(&xt[table->af].mutex);
 923	private = table->private;
 924	list_del(&table->list);
 925	mutex_unlock(&xt[table->af].mutex);
 
 
 926	kfree(table);
 927
 928	return private;
 929}
 930EXPORT_SYMBOL_GPL(xt_unregister_table);
 931
 932#ifdef CONFIG_PROC_FS
 933struct xt_names_priv {
 934	struct seq_net_private p;
 935	u_int8_t af;
 936};
 937static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
 938{
 939	struct xt_names_priv *priv = seq->private;
 940	struct net *net = seq_file_net(seq);
 941	u_int8_t af = priv->af;
 942
 943	mutex_lock(&xt[af].mutex);
 944	return seq_list_start(&net->xt.tables[af], *pos);
 945}
 946
 947static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 948{
 949	struct xt_names_priv *priv = seq->private;
 950	struct net *net = seq_file_net(seq);
 951	u_int8_t af = priv->af;
 952
 953	return seq_list_next(v, &net->xt.tables[af], pos);
 954}
 955
 956static void xt_table_seq_stop(struct seq_file *seq, void *v)
 957{
 958	struct xt_names_priv *priv = seq->private;
 959	u_int8_t af = priv->af;
 960
 961	mutex_unlock(&xt[af].mutex);
 962}
 963
 964static int xt_table_seq_show(struct seq_file *seq, void *v)
 965{
 966	struct xt_table *table = list_entry(v, struct xt_table, list);
 967
 968	if (strlen(table->name))
 969		return seq_printf(seq, "%s\n", table->name);
 970	else
 971		return 0;
 972}
 973
 974static const struct seq_operations xt_table_seq_ops = {
 975	.start	= xt_table_seq_start,
 976	.next	= xt_table_seq_next,
 977	.stop	= xt_table_seq_stop,
 978	.show	= xt_table_seq_show,
 979};
 980
 981static int xt_table_open(struct inode *inode, struct file *file)
 982{
 983	int ret;
 984	struct xt_names_priv *priv;
 985
 986	ret = seq_open_net(inode, file, &xt_table_seq_ops,
 987			   sizeof(struct xt_names_priv));
 988	if (!ret) {
 989		priv = ((struct seq_file *)file->private_data)->private;
 990		priv->af = (unsigned long)PDE(inode)->data;
 991	}
 992	return ret;
 993}
 994
 995static const struct file_operations xt_table_ops = {
 996	.owner	 = THIS_MODULE,
 997	.open	 = xt_table_open,
 998	.read	 = seq_read,
 999	.llseek	 = seq_lseek,
1000	.release = seq_release_net,
1001};
1002
1003/*
1004 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1005 * the multi-AF mutexes.
1006 */
1007struct nf_mttg_trav {
1008	struct list_head *head, *curr;
1009	uint8_t class, nfproto;
1010};
1011
1012enum {
1013	MTTG_TRAV_INIT,
1014	MTTG_TRAV_NFP_UNSPEC,
1015	MTTG_TRAV_NFP_SPEC,
1016	MTTG_TRAV_DONE,
1017};
1018
1019static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1020    bool is_target)
1021{
1022	static const uint8_t next_class[] = {
1023		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1024		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
1025	};
 
1026	struct nf_mttg_trav *trav = seq->private;
1027
 
 
 
1028	switch (trav->class) {
1029	case MTTG_TRAV_INIT:
1030		trav->class = MTTG_TRAV_NFP_UNSPEC;
1031		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1032		trav->head = trav->curr = is_target ?
1033			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1034 		break;
1035	case MTTG_TRAV_NFP_UNSPEC:
1036		trav->curr = trav->curr->next;
1037		if (trav->curr != trav->head)
1038			break;
1039		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1040		mutex_lock(&xt[trav->nfproto].mutex);
1041		trav->head = trav->curr = is_target ?
1042			&xt[trav->nfproto].target : &xt[trav->nfproto].match;
1043		trav->class = next_class[trav->class];
1044		break;
1045	case MTTG_TRAV_NFP_SPEC:
1046		trav->curr = trav->curr->next;
1047		if (trav->curr != trav->head)
1048			break;
1049		/* fallthru, _stop will unlock */
1050	default:
1051		return NULL;
1052	}
1053
1054	if (ppos != NULL)
1055		++*ppos;
1056	return trav;
1057}
1058
1059static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1060    bool is_target)
1061{
1062	struct nf_mttg_trav *trav = seq->private;
1063	unsigned int j;
1064
1065	trav->class = MTTG_TRAV_INIT;
1066	for (j = 0; j < *pos; ++j)
1067		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1068			return NULL;
1069	return trav;
1070}
1071
1072static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1073{
 
1074	struct nf_mttg_trav *trav = seq->private;
1075
1076	switch (trav->class) {
1077	case MTTG_TRAV_NFP_UNSPEC:
1078		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1079		break;
1080	case MTTG_TRAV_NFP_SPEC:
1081		mutex_unlock(&xt[trav->nfproto].mutex);
1082		break;
1083	}
1084}
1085
1086static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1087{
1088	return xt_mttg_seq_start(seq, pos, false);
1089}
1090
1091static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1092{
1093	return xt_mttg_seq_next(seq, v, ppos, false);
1094}
1095
1096static int xt_match_seq_show(struct seq_file *seq, void *v)
1097{
1098	const struct nf_mttg_trav *trav = seq->private;
1099	const struct xt_match *match;
1100
1101	switch (trav->class) {
1102	case MTTG_TRAV_NFP_UNSPEC:
1103	case MTTG_TRAV_NFP_SPEC:
1104		if (trav->curr == trav->head)
1105			return 0;
1106		match = list_entry(trav->curr, struct xt_match, list);
1107		return (*match->name == '\0') ? 0 :
1108		       seq_printf(seq, "%s\n", match->name);
1109	}
1110	return 0;
1111}
1112
1113static const struct seq_operations xt_match_seq_ops = {
1114	.start	= xt_match_seq_start,
1115	.next	= xt_match_seq_next,
1116	.stop	= xt_mttg_seq_stop,
1117	.show	= xt_match_seq_show,
1118};
1119
1120static int xt_match_open(struct inode *inode, struct file *file)
1121{
1122	struct seq_file *seq;
1123	struct nf_mttg_trav *trav;
1124	int ret;
1125
1126	trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1127	if (trav == NULL)
1128		return -ENOMEM;
1129
1130	ret = seq_open(file, &xt_match_seq_ops);
1131	if (ret < 0) {
1132		kfree(trav);
1133		return ret;
1134	}
1135
1136	seq = file->private_data;
1137	seq->private = trav;
1138	trav->nfproto = (unsigned long)PDE(inode)->data;
1139	return 0;
1140}
1141
1142static const struct file_operations xt_match_ops = {
1143	.owner	 = THIS_MODULE,
1144	.open	 = xt_match_open,
1145	.read	 = seq_read,
1146	.llseek	 = seq_lseek,
1147	.release = seq_release_private,
1148};
1149
1150static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1151{
1152	return xt_mttg_seq_start(seq, pos, true);
1153}
1154
1155static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1156{
1157	return xt_mttg_seq_next(seq, v, ppos, true);
1158}
1159
1160static int xt_target_seq_show(struct seq_file *seq, void *v)
1161{
1162	const struct nf_mttg_trav *trav = seq->private;
1163	const struct xt_target *target;
1164
1165	switch (trav->class) {
1166	case MTTG_TRAV_NFP_UNSPEC:
1167	case MTTG_TRAV_NFP_SPEC:
1168		if (trav->curr == trav->head)
1169			return 0;
1170		target = list_entry(trav->curr, struct xt_target, list);
1171		return (*target->name == '\0') ? 0 :
1172		       seq_printf(seq, "%s\n", target->name);
1173	}
1174	return 0;
1175}
1176
1177static const struct seq_operations xt_target_seq_ops = {
1178	.start	= xt_target_seq_start,
1179	.next	= xt_target_seq_next,
1180	.stop	= xt_mttg_seq_stop,
1181	.show	= xt_target_seq_show,
1182};
1183
1184static int xt_target_open(struct inode *inode, struct file *file)
1185{
1186	struct seq_file *seq;
1187	struct nf_mttg_trav *trav;
1188	int ret;
1189
1190	trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1191	if (trav == NULL)
1192		return -ENOMEM;
1193
1194	ret = seq_open(file, &xt_target_seq_ops);
1195	if (ret < 0) {
1196		kfree(trav);
1197		return ret;
1198	}
1199
1200	seq = file->private_data;
1201	seq->private = trav;
1202	trav->nfproto = (unsigned long)PDE(inode)->data;
1203	return 0;
1204}
1205
1206static const struct file_operations xt_target_ops = {
1207	.owner	 = THIS_MODULE,
1208	.open	 = xt_target_open,
1209	.read	 = seq_read,
1210	.llseek	 = seq_lseek,
1211	.release = seq_release_private,
1212};
1213
1214#define FORMAT_TABLES	"_tables_names"
1215#define	FORMAT_MATCHES	"_tables_matches"
1216#define FORMAT_TARGETS 	"_tables_targets"
1217
1218#endif /* CONFIG_PROC_FS */
1219
1220/**
1221 * xt_hook_link - set up hooks for a new table
1222 * @table:	table with metadata needed to set up hooks
1223 * @fn:		Hook function
1224 *
1225 * This function will take care of creating and registering the necessary
1226 * Netfilter hooks for XT tables.
1227 */
1228struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
 
1229{
1230	unsigned int hook_mask = table->valid_hooks;
1231	uint8_t i, num_hooks = hweight32(hook_mask);
1232	uint8_t hooknum;
1233	struct nf_hook_ops *ops;
1234	int ret;
1235
1236	ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
 
 
 
1237	if (ops == NULL)
1238		return ERR_PTR(-ENOMEM);
1239
1240	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1241	     hook_mask >>= 1, ++hooknum) {
1242		if (!(hook_mask & 1))
1243			continue;
1244		ops[i].hook     = fn;
1245		ops[i].owner    = table->me;
1246		ops[i].pf       = table->af;
1247		ops[i].hooknum  = hooknum;
1248		ops[i].priority = table->priority;
1249		++i;
1250	}
1251
1252	ret = nf_register_hooks(ops, num_hooks);
1253	if (ret < 0) {
1254		kfree(ops);
1255		return ERR_PTR(ret);
1256	}
1257
1258	return ops;
1259}
1260EXPORT_SYMBOL_GPL(xt_hook_link);
1261
1262/**
1263 * xt_hook_unlink - remove hooks for a table
1264 * @ops:	nf_hook_ops array as returned by nf_hook_link
1265 * @hook_mask:	the very same mask that was passed to nf_hook_link
1266 */
1267void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1268{
1269	nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1270	kfree(ops);
1271}
1272EXPORT_SYMBOL_GPL(xt_hook_unlink);
1273
1274int xt_proto_init(struct net *net, u_int8_t af)
1275{
1276#ifdef CONFIG_PROC_FS
1277	char buf[XT_FUNCTION_MAXNAMELEN];
1278	struct proc_dir_entry *proc;
 
 
1279#endif
1280
1281	if (af >= ARRAY_SIZE(xt_prefix))
1282		return -EINVAL;
1283
1284
1285#ifdef CONFIG_PROC_FS
 
 
 
1286	strlcpy(buf, xt_prefix[af], sizeof(buf));
1287	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1288	proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1289				(void *)(unsigned long)af);
 
1290	if (!proc)
1291		goto out;
 
 
1292
1293	strlcpy(buf, xt_prefix[af], sizeof(buf));
1294	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1295	proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1296				(void *)(unsigned long)af);
 
1297	if (!proc)
1298		goto out_remove_tables;
 
 
1299
1300	strlcpy(buf, xt_prefix[af], sizeof(buf));
1301	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1302	proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1303				(void *)(unsigned long)af);
 
1304	if (!proc)
1305		goto out_remove_matches;
 
 
1306#endif
1307
1308	return 0;
1309
1310#ifdef CONFIG_PROC_FS
1311out_remove_matches:
1312	strlcpy(buf, xt_prefix[af], sizeof(buf));
1313	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1314	proc_net_remove(net, buf);
1315
1316out_remove_tables:
1317	strlcpy(buf, xt_prefix[af], sizeof(buf));
1318	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1319	proc_net_remove(net, buf);
1320out:
1321	return -1;
1322#endif
1323}
1324EXPORT_SYMBOL_GPL(xt_proto_init);
1325
1326void xt_proto_fini(struct net *net, u_int8_t af)
1327{
1328#ifdef CONFIG_PROC_FS
1329	char buf[XT_FUNCTION_MAXNAMELEN];
1330
1331	strlcpy(buf, xt_prefix[af], sizeof(buf));
1332	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1333	proc_net_remove(net, buf);
1334
1335	strlcpy(buf, xt_prefix[af], sizeof(buf));
1336	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1337	proc_net_remove(net, buf);
1338
1339	strlcpy(buf, xt_prefix[af], sizeof(buf));
1340	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1341	proc_net_remove(net, buf);
1342#endif /*CONFIG_PROC_FS*/
1343}
1344EXPORT_SYMBOL_GPL(xt_proto_fini);
1345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346static int __net_init xt_net_init(struct net *net)
1347{
1348	int i;
1349
1350	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1351		INIT_LIST_HEAD(&net->xt.tables[i]);
1352	return 0;
1353}
1354
 
 
 
 
 
 
 
 
1355static struct pernet_operations xt_net_ops = {
1356	.init = xt_net_init,
 
1357};
1358
1359static int __init xt_init(void)
1360{
1361	unsigned int i;
1362	int rv;
1363
1364	for_each_possible_cpu(i) {
1365		seqcount_init(&per_cpu(xt_recseq, i));
1366	}
1367
1368	xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1369	if (!xt)
1370		return -ENOMEM;
1371
1372	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1373		mutex_init(&xt[i].mutex);
1374#ifdef CONFIG_COMPAT
1375		mutex_init(&xt[i].compat_mutex);
1376		xt[i].compat_tab = NULL;
1377#endif
1378		INIT_LIST_HEAD(&xt[i].target);
1379		INIT_LIST_HEAD(&xt[i].match);
1380	}
1381	rv = register_pernet_subsys(&xt_net_ops);
1382	if (rv < 0)
1383		kfree(xt);
1384	return rv;
1385}
1386
1387static void __exit xt_fini(void)
1388{
1389	unregister_pernet_subsys(&xt_net_ops);
1390	kfree(xt);
1391}
1392
1393module_init(xt_init);
1394module_exit(xt_fini);
1395
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * x_tables core - Backend for {ip,ip6,arp}_tables
   4 *
   5 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
   6 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
   7 *
   8 * Based on existing ip_tables code which is
   9 *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  10 *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
 
 
 
 
 
  11 */
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/socket.h>
  16#include <linux/net.h>
  17#include <linux/proc_fs.h>
  18#include <linux/seq_file.h>
  19#include <linux/string.h>
  20#include <linux/vmalloc.h>
  21#include <linux/mutex.h>
  22#include <linux/mm.h>
  23#include <linux/slab.h>
  24#include <linux/audit.h>
  25#include <linux/user_namespace.h>
  26#include <net/net_namespace.h>
  27
  28#include <linux/netfilter/x_tables.h>
  29#include <linux/netfilter_arp.h>
  30#include <linux/netfilter_ipv4/ip_tables.h>
  31#include <linux/netfilter_ipv6/ip6_tables.h>
  32#include <linux/netfilter_arp/arp_tables.h>
  33
  34MODULE_LICENSE("GPL");
  35MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  36MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
  37
  38#define XT_PCPU_BLOCK_SIZE 4096
  39#define XT_MAX_TABLE_SIZE	(512 * 1024 * 1024)
  40
  41struct compat_delta {
  42	unsigned int offset; /* offset in kernel */
  43	int delta; /* delta in 32bit user land */
  44};
  45
  46struct xt_af {
  47	struct mutex mutex;
  48	struct list_head match;
  49	struct list_head target;
  50#ifdef CONFIG_COMPAT
  51	struct mutex compat_mutex;
  52	struct compat_delta *compat_tab;
  53	unsigned int number; /* number of slots in compat_tab[] */
  54	unsigned int cur; /* number of used slots in compat_tab[] */
  55#endif
  56};
  57
  58static struct xt_af *xt;
  59
  60static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
  61	[NFPROTO_UNSPEC] = "x",
  62	[NFPROTO_IPV4]   = "ip",
  63	[NFPROTO_ARP]    = "arp",
  64	[NFPROTO_BRIDGE] = "eb",
  65	[NFPROTO_IPV6]   = "ip6",
  66};
  67
 
 
 
  68/* Registration hooks for targets. */
  69int xt_register_target(struct xt_target *target)
 
  70{
  71	u_int8_t af = target->family;
 
  72
  73	mutex_lock(&xt[af].mutex);
 
 
  74	list_add(&target->list, &xt[af].target);
  75	mutex_unlock(&xt[af].mutex);
  76	return 0;
  77}
  78EXPORT_SYMBOL(xt_register_target);
  79
  80void
  81xt_unregister_target(struct xt_target *target)
  82{
  83	u_int8_t af = target->family;
  84
  85	mutex_lock(&xt[af].mutex);
  86	list_del(&target->list);
  87	mutex_unlock(&xt[af].mutex);
  88}
  89EXPORT_SYMBOL(xt_unregister_target);
  90
  91int
  92xt_register_targets(struct xt_target *target, unsigned int n)
  93{
  94	unsigned int i;
  95	int err = 0;
  96
  97	for (i = 0; i < n; i++) {
  98		err = xt_register_target(&target[i]);
  99		if (err)
 100			goto err;
 101	}
 102	return err;
 103
 104err:
 105	if (i > 0)
 106		xt_unregister_targets(target, i);
 107	return err;
 108}
 109EXPORT_SYMBOL(xt_register_targets);
 110
 111void
 112xt_unregister_targets(struct xt_target *target, unsigned int n)
 113{
 114	while (n-- > 0)
 115		xt_unregister_target(&target[n]);
 116}
 117EXPORT_SYMBOL(xt_unregister_targets);
 118
 119int xt_register_match(struct xt_match *match)
 
 120{
 121	u_int8_t af = match->family;
 
 
 
 
 
 122
 123	mutex_lock(&xt[af].mutex);
 124	list_add(&match->list, &xt[af].match);
 125	mutex_unlock(&xt[af].mutex);
 126	return 0;
 
 127}
 128EXPORT_SYMBOL(xt_register_match);
 129
 130void
 131xt_unregister_match(struct xt_match *match)
 132{
 133	u_int8_t af = match->family;
 134
 135	mutex_lock(&xt[af].mutex);
 136	list_del(&match->list);
 137	mutex_unlock(&xt[af].mutex);
 138}
 139EXPORT_SYMBOL(xt_unregister_match);
 140
 141int
 142xt_register_matches(struct xt_match *match, unsigned int n)
 143{
 144	unsigned int i;
 145	int err = 0;
 146
 147	for (i = 0; i < n; i++) {
 148		err = xt_register_match(&match[i]);
 149		if (err)
 150			goto err;
 151	}
 152	return err;
 153
 154err:
 155	if (i > 0)
 156		xt_unregister_matches(match, i);
 157	return err;
 158}
 159EXPORT_SYMBOL(xt_register_matches);
 160
 161void
 162xt_unregister_matches(struct xt_match *match, unsigned int n)
 163{
 164	while (n-- > 0)
 165		xt_unregister_match(&match[n]);
 166}
 167EXPORT_SYMBOL(xt_unregister_matches);
 168
 169
 170/*
 171 * These are weird, but module loading must not be done with mutex
 172 * held (since they will register), and we have to have a single
 173 * function to use.
 174 */
 175
 176/* Find match, grabs ref.  Returns ERR_PTR() on error. */
 177struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
 178{
 179	struct xt_match *m;
 180	int err = -ENOENT;
 181
 182	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 183		return ERR_PTR(-EINVAL);
 184
 185	mutex_lock(&xt[af].mutex);
 186	list_for_each_entry(m, &xt[af].match, list) {
 187		if (strcmp(m->name, name) == 0) {
 188			if (m->revision == revision) {
 189				if (try_module_get(m->me)) {
 190					mutex_unlock(&xt[af].mutex);
 191					return m;
 192				}
 193			} else
 194				err = -EPROTOTYPE; /* Found something. */
 195		}
 196	}
 197	mutex_unlock(&xt[af].mutex);
 198
 199	if (af != NFPROTO_UNSPEC)
 200		/* Try searching again in the family-independent list */
 201		return xt_find_match(NFPROTO_UNSPEC, name, revision);
 202
 203	return ERR_PTR(err);
 204}
 205EXPORT_SYMBOL(xt_find_match);
 206
 207struct xt_match *
 208xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
 209{
 210	struct xt_match *match;
 211
 212	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 213		return ERR_PTR(-EINVAL);
 214
 215	match = xt_find_match(nfproto, name, revision);
 216	if (IS_ERR(match)) {
 217		request_module("%st_%s", xt_prefix[nfproto], name);
 218		match = xt_find_match(nfproto, name, revision);
 219	}
 220
 221	return match;
 222}
 223EXPORT_SYMBOL_GPL(xt_request_find_match);
 224
 225/* Find target, grabs ref.  Returns ERR_PTR() on error. */
 226static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
 227{
 228	struct xt_target *t;
 229	int err = -ENOENT;
 230
 231	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 232		return ERR_PTR(-EINVAL);
 233
 234	mutex_lock(&xt[af].mutex);
 235	list_for_each_entry(t, &xt[af].target, list) {
 236		if (strcmp(t->name, name) == 0) {
 237			if (t->revision == revision) {
 238				if (try_module_get(t->me)) {
 239					mutex_unlock(&xt[af].mutex);
 240					return t;
 241				}
 242			} else
 243				err = -EPROTOTYPE; /* Found something. */
 244		}
 245	}
 246	mutex_unlock(&xt[af].mutex);
 247
 248	if (af != NFPROTO_UNSPEC)
 249		/* Try searching again in the family-independent list */
 250		return xt_find_target(NFPROTO_UNSPEC, name, revision);
 251
 252	return ERR_PTR(err);
 253}
 
 254
 255struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
 256{
 257	struct xt_target *target;
 258
 259	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
 260		return ERR_PTR(-EINVAL);
 261
 262	target = xt_find_target(af, name, revision);
 263	if (IS_ERR(target)) {
 264		request_module("%st_%s", xt_prefix[af], name);
 265		target = xt_find_target(af, name, revision);
 266	}
 267
 268	return target;
 269}
 270EXPORT_SYMBOL_GPL(xt_request_find_target);
 271
 272
 273static int xt_obj_to_user(u16 __user *psize, u16 size,
 274			  void __user *pname, const char *name,
 275			  u8 __user *prev, u8 rev)
 276{
 277	if (put_user(size, psize))
 278		return -EFAULT;
 279	if (copy_to_user(pname, name, strlen(name) + 1))
 280		return -EFAULT;
 281	if (put_user(rev, prev))
 282		return -EFAULT;
 283
 284	return 0;
 285}
 286
 287#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE)				\
 288	xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size,	\
 289		       U->u.user.name, K->u.kernel.TYPE->name,		\
 290		       &U->u.user.revision, K->u.kernel.TYPE->revision)
 291
 292int xt_data_to_user(void __user *dst, const void *src,
 293		    int usersize, int size, int aligned_size)
 294{
 295	usersize = usersize ? : size;
 296	if (copy_to_user(dst, src, usersize))
 297		return -EFAULT;
 298	if (usersize != aligned_size &&
 299	    clear_user(dst + usersize, aligned_size - usersize))
 300		return -EFAULT;
 301
 302	return 0;
 303}
 304EXPORT_SYMBOL_GPL(xt_data_to_user);
 305
 306#define XT_DATA_TO_USER(U, K, TYPE)					\
 307	xt_data_to_user(U->data, K->data,				\
 308			K->u.kernel.TYPE->usersize,			\
 309			K->u.kernel.TYPE->TYPE##size,			\
 310			XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
 311
 312int xt_match_to_user(const struct xt_entry_match *m,
 313		     struct xt_entry_match __user *u)
 314{
 315	return XT_OBJ_TO_USER(u, m, match, 0) ||
 316	       XT_DATA_TO_USER(u, m, match);
 317}
 318EXPORT_SYMBOL_GPL(xt_match_to_user);
 319
 320int xt_target_to_user(const struct xt_entry_target *t,
 321		      struct xt_entry_target __user *u)
 322{
 323	return XT_OBJ_TO_USER(u, t, target, 0) ||
 324	       XT_DATA_TO_USER(u, t, target);
 325}
 326EXPORT_SYMBOL_GPL(xt_target_to_user);
 327
 328static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
 329{
 330	const struct xt_match *m;
 331	int have_rev = 0;
 332
 333	list_for_each_entry(m, &xt[af].match, list) {
 334		if (strcmp(m->name, name) == 0) {
 335			if (m->revision > *bestp)
 336				*bestp = m->revision;
 337			if (m->revision == revision)
 338				have_rev = 1;
 339		}
 340	}
 341
 342	if (af != NFPROTO_UNSPEC && !have_rev)
 343		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
 344
 345	return have_rev;
 346}
 347
 348static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
 349{
 350	const struct xt_target *t;
 351	int have_rev = 0;
 352
 353	list_for_each_entry(t, &xt[af].target, list) {
 354		if (strcmp(t->name, name) == 0) {
 355			if (t->revision > *bestp)
 356				*bestp = t->revision;
 357			if (t->revision == revision)
 358				have_rev = 1;
 359		}
 360	}
 361
 362	if (af != NFPROTO_UNSPEC && !have_rev)
 363		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
 364
 365	return have_rev;
 366}
 367
 368/* Returns true or false (if no such extension at all) */
 369int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 370		     int *err)
 371{
 372	int have_rev, best = -1;
 373
 374	mutex_lock(&xt[af].mutex);
 
 
 
 375	if (target == 1)
 376		have_rev = target_revfn(af, name, revision, &best);
 377	else
 378		have_rev = match_revfn(af, name, revision, &best);
 379	mutex_unlock(&xt[af].mutex);
 380
 381	/* Nothing at all?  Return 0 to try loading module. */
 382	if (best == -1) {
 383		*err = -ENOENT;
 384		return 0;
 385	}
 386
 387	*err = best;
 388	if (!have_rev)
 389		*err = -EPROTONOSUPPORT;
 390	return 1;
 391}
 392EXPORT_SYMBOL_GPL(xt_find_revision);
 393
 394static char *
 395textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
 396{
 397	static const char *const inetbr_names[] = {
 398		"PREROUTING", "INPUT", "FORWARD",
 399		"OUTPUT", "POSTROUTING", "BROUTING",
 400	};
 401	static const char *const arp_names[] = {
 402		"INPUT", "FORWARD", "OUTPUT",
 403	};
 404	const char *const *names;
 405	unsigned int i, max;
 406	char *p = buf;
 407	bool np = false;
 408	int res;
 409
 410	names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
 411	max   = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
 412	                                   ARRAY_SIZE(inetbr_names);
 413	*p = '\0';
 414	for (i = 0; i < max; ++i) {
 415		if (!(mask & (1 << i)))
 416			continue;
 417		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
 418		if (res > 0) {
 419			size -= res;
 420			p += res;
 421		}
 422		np = true;
 423	}
 424
 425	return buf;
 426}
 427
 428/**
 429 * xt_check_proc_name - check that name is suitable for /proc file creation
 430 *
 431 * @name: file name candidate
 432 * @size: length of buffer
 433 *
 434 * some x_tables modules wish to create a file in /proc.
 435 * This function makes sure that the name is suitable for this
 436 * purpose, it checks that name is NUL terminated and isn't a 'special'
 437 * name, like "..".
 438 *
 439 * returns negative number on error or 0 if name is useable.
 440 */
 441int xt_check_proc_name(const char *name, unsigned int size)
 442{
 443	if (name[0] == '\0')
 444		return -EINVAL;
 445
 446	if (strnlen(name, size) == size)
 447		return -ENAMETOOLONG;
 448
 449	if (strcmp(name, ".") == 0 ||
 450	    strcmp(name, "..") == 0 ||
 451	    strchr(name, '/'))
 452		return -EINVAL;
 453
 454	return 0;
 455}
 456EXPORT_SYMBOL(xt_check_proc_name);
 457
 458int xt_check_match(struct xt_mtchk_param *par,
 459		   unsigned int size, u16 proto, bool inv_proto)
 460{
 461	int ret;
 462
 463	if (XT_ALIGN(par->match->matchsize) != size &&
 464	    par->match->matchsize != -1) {
 465		/*
 466		 * ebt_among is exempt from centralized matchsize checking
 467		 * because it uses a dynamic-size data set.
 468		 */
 469		pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
 470				   xt_prefix[par->family], par->match->name,
 471				   par->match->revision,
 472				   XT_ALIGN(par->match->matchsize), size);
 
 473		return -EINVAL;
 474	}
 475	if (par->match->table != NULL &&
 476	    strcmp(par->match->table, par->table) != 0) {
 477		pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
 478				    xt_prefix[par->family], par->match->name,
 479				    par->match->table, par->table);
 480		return -EINVAL;
 481	}
 482	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
 483		char used[64], allow[64];
 484
 485		pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
 486				    xt_prefix[par->family], par->match->name,
 487				    textify_hooks(used, sizeof(used),
 488						  par->hook_mask, par->family),
 489				    textify_hooks(allow, sizeof(allow),
 490						  par->match->hooks,
 491						  par->family));
 492		return -EINVAL;
 493	}
 494	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
 495		pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
 496				    xt_prefix[par->family], par->match->name,
 497				    par->match->proto);
 498		return -EINVAL;
 499	}
 500	if (par->match->checkentry != NULL) {
 501		ret = par->match->checkentry(par);
 502		if (ret < 0)
 503			return ret;
 504		else if (ret > 0)
 505			/* Flag up potential errors. */
 506			return -EIO;
 507	}
 508	return 0;
 509}
 510EXPORT_SYMBOL_GPL(xt_check_match);
 511
 512/** xt_check_entry_match - check that matches end before start of target
 513 *
 514 * @match: beginning of xt_entry_match
 515 * @target: beginning of this rules target (alleged end of matches)
 516 * @alignment: alignment requirement of match structures
 517 *
 518 * Validates that all matches add up to the beginning of the target,
 519 * and that each match covers at least the base structure size.
 520 *
 521 * Return: 0 on success, negative errno on failure.
 522 */
 523static int xt_check_entry_match(const char *match, const char *target,
 524				const size_t alignment)
 525{
 526	const struct xt_entry_match *pos;
 527	int length = target - match;
 528
 529	if (length == 0) /* no matches */
 530		return 0;
 531
 532	pos = (struct xt_entry_match *)match;
 533	do {
 534		if ((unsigned long)pos % alignment)
 535			return -EINVAL;
 536
 537		if (length < (int)sizeof(struct xt_entry_match))
 538			return -EINVAL;
 539
 540		if (pos->u.match_size < sizeof(struct xt_entry_match))
 541			return -EINVAL;
 542
 543		if (pos->u.match_size > length)
 544			return -EINVAL;
 545
 546		length -= pos->u.match_size;
 547		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
 548	} while (length > 0);
 549
 550	return 0;
 551}
 552
 553/** xt_check_table_hooks - check hook entry points are sane
 554 *
 555 * @info xt_table_info to check
 556 * @valid_hooks - hook entry points that we can enter from
 557 *
 558 * Validates that the hook entry and underflows points are set up.
 559 *
 560 * Return: 0 on success, negative errno on failure.
 561 */
 562int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
 563{
 564	const char *err = "unsorted underflow";
 565	unsigned int i, max_uflow, max_entry;
 566	bool check_hooks = false;
 567
 568	BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
 569
 570	max_entry = 0;
 571	max_uflow = 0;
 572
 573	for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
 574		if (!(valid_hooks & (1 << i)))
 575			continue;
 576
 577		if (info->hook_entry[i] == 0xFFFFFFFF)
 578			return -EINVAL;
 579		if (info->underflow[i] == 0xFFFFFFFF)
 580			return -EINVAL;
 581
 582		if (check_hooks) {
 583			if (max_uflow > info->underflow[i])
 584				goto error;
 585
 586			if (max_uflow == info->underflow[i]) {
 587				err = "duplicate underflow";
 588				goto error;
 589			}
 590			if (max_entry > info->hook_entry[i]) {
 591				err = "unsorted entry";
 592				goto error;
 593			}
 594			if (max_entry == info->hook_entry[i]) {
 595				err = "duplicate entry";
 596				goto error;
 597			}
 598		}
 599		max_entry = info->hook_entry[i];
 600		max_uflow = info->underflow[i];
 601		check_hooks = true;
 602	}
 603
 604	return 0;
 605error:
 606	pr_err_ratelimited("%s at hook %d\n", err, i);
 607	return -EINVAL;
 608}
 609EXPORT_SYMBOL(xt_check_table_hooks);
 610
 611static bool verdict_ok(int verdict)
 612{
 613	if (verdict > 0)
 614		return true;
 615
 616	if (verdict < 0) {
 617		int v = -verdict - 1;
 618
 619		if (verdict == XT_RETURN)
 620			return true;
 621
 622		switch (v) {
 623		case NF_ACCEPT: return true;
 624		case NF_DROP: return true;
 625		case NF_QUEUE: return true;
 626		default:
 627			break;
 628		}
 629
 630		return false;
 631	}
 632
 633	return false;
 634}
 635
 636static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
 637			const char *msg, unsigned int msglen)
 638{
 639	return usersize == kernsize && strnlen(msg, msglen) < msglen;
 640}
 641
 642#ifdef CONFIG_COMPAT
 643int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 644{
 645	struct xt_af *xp = &xt[af];
 646
 647	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 648
 649	if (WARN_ON(!xp->compat_tab))
 650		return -ENOMEM;
 
 
 
 
 651
 652	if (xp->cur >= xp->number)
 653		return -EINVAL;
 654
 655	if (xp->cur)
 656		delta += xp->compat_tab[xp->cur - 1].delta;
 657	xp->compat_tab[xp->cur].offset = offset;
 658	xp->compat_tab[xp->cur].delta = delta;
 659	xp->cur++;
 660	return 0;
 661}
 662EXPORT_SYMBOL_GPL(xt_compat_add_offset);
 663
 664void xt_compat_flush_offsets(u_int8_t af)
 665{
 666	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 667
 668	if (xt[af].compat_tab) {
 669		vfree(xt[af].compat_tab);
 670		xt[af].compat_tab = NULL;
 671		xt[af].number = 0;
 672		xt[af].cur = 0;
 673	}
 674}
 675EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
 676
 677int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
 678{
 679	struct compat_delta *tmp = xt[af].compat_tab;
 680	int mid, left = 0, right = xt[af].cur - 1;
 681
 682	while (left <= right) {
 683		mid = (left + right) >> 1;
 684		if (offset > tmp[mid].offset)
 685			left = mid + 1;
 686		else if (offset < tmp[mid].offset)
 687			right = mid - 1;
 688		else
 689			return mid ? tmp[mid - 1].delta : 0;
 690	}
 691	return left ? tmp[left - 1].delta : 0;
 692}
 693EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 694
 695int xt_compat_init_offsets(u8 af, unsigned int number)
 696{
 697	size_t mem;
 698
 699	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
 700
 701	if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
 702		return -EINVAL;
 703
 704	if (WARN_ON(xt[af].compat_tab))
 705		return -EINVAL;
 706
 707	mem = sizeof(struct compat_delta) * number;
 708	if (mem > XT_MAX_TABLE_SIZE)
 709		return -ENOMEM;
 710
 711	xt[af].compat_tab = vmalloc(mem);
 712	if (!xt[af].compat_tab)
 713		return -ENOMEM;
 714
 715	xt[af].number = number;
 716	xt[af].cur = 0;
 717
 718	return 0;
 719}
 720EXPORT_SYMBOL(xt_compat_init_offsets);
 721
 722int xt_compat_match_offset(const struct xt_match *match)
 723{
 724	u_int16_t csize = match->compatsize ? : match->matchsize;
 725	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
 726}
 727EXPORT_SYMBOL_GPL(xt_compat_match_offset);
 728
 729void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 730			       unsigned int *size)
 731{
 732	const struct xt_match *match = m->u.kernel.match;
 733	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
 734	int pad, off = xt_compat_match_offset(match);
 735	u_int16_t msize = cm->u.user.match_size;
 736	char name[sizeof(m->u.user.name)];
 737
 738	m = *dstptr;
 739	memcpy(m, cm, sizeof(*cm));
 740	if (match->compat_from_user)
 741		match->compat_from_user(m->data, cm->data);
 742	else
 743		memcpy(m->data, cm->data, msize - sizeof(*cm));
 744	pad = XT_ALIGN(match->matchsize) - match->matchsize;
 745	if (pad > 0)
 746		memset(m->data + match->matchsize, 0, pad);
 747
 748	msize += off;
 749	m->u.user.match_size = msize;
 750	strlcpy(name, match->name, sizeof(name));
 751	module_put(match->me);
 752	strncpy(m->u.user.name, name, sizeof(m->u.user.name));
 753
 754	*size += off;
 755	*dstptr += msize;
 
 756}
 757EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
 758
 759#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE)			\
 760	xt_data_to_user(U->data, K->data,				\
 761			K->u.kernel.TYPE->usersize,			\
 762			C_SIZE,						\
 763			COMPAT_XT_ALIGN(C_SIZE))
 764
 765int xt_compat_match_to_user(const struct xt_entry_match *m,
 766			    void __user **dstptr, unsigned int *size)
 767{
 768	const struct xt_match *match = m->u.kernel.match;
 769	struct compat_xt_entry_match __user *cm = *dstptr;
 770	int off = xt_compat_match_offset(match);
 771	u_int16_t msize = m->u.user.match_size - off;
 772
 773	if (XT_OBJ_TO_USER(cm, m, match, msize))
 
 
 
 774		return -EFAULT;
 775
 776	if (match->compat_to_user) {
 777		if (match->compat_to_user((void __user *)cm->data, m->data))
 778			return -EFAULT;
 779	} else {
 780		if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
 781			return -EFAULT;
 782	}
 783
 784	*size -= off;
 785	*dstptr += msize;
 786	return 0;
 787}
 788EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
 789
 790/* non-compat version may have padding after verdict */
 791struct compat_xt_standard_target {
 792	struct compat_xt_entry_target t;
 793	compat_uint_t verdict;
 794};
 795
 796struct compat_xt_error_target {
 797	struct compat_xt_entry_target t;
 798	char errorname[XT_FUNCTION_MAXNAMELEN];
 799};
 800
 801int xt_compat_check_entry_offsets(const void *base, const char *elems,
 802				  unsigned int target_offset,
 803				  unsigned int next_offset)
 804{
 805	long size_of_base_struct = elems - (const char *)base;
 806	const struct compat_xt_entry_target *t;
 807	const char *e = base;
 808
 809	if (target_offset < size_of_base_struct)
 810		return -EINVAL;
 811
 812	if (target_offset + sizeof(*t) > next_offset)
 813		return -EINVAL;
 814
 815	t = (void *)(e + target_offset);
 816	if (t->u.target_size < sizeof(*t))
 817		return -EINVAL;
 818
 819	if (target_offset + t->u.target_size > next_offset)
 820		return -EINVAL;
 821
 822	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
 823		const struct compat_xt_standard_target *st = (const void *)t;
 824
 825		if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
 826			return -EINVAL;
 827
 828		if (!verdict_ok(st->verdict))
 829			return -EINVAL;
 830	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
 831		const struct compat_xt_error_target *et = (const void *)t;
 832
 833		if (!error_tg_ok(t->u.target_size, sizeof(*et),
 834				 et->errorname, sizeof(et->errorname)))
 835			return -EINVAL;
 836	}
 837
 838	/* compat_xt_entry match has less strict alignment requirements,
 839	 * otherwise they are identical.  In case of padding differences
 840	 * we need to add compat version of xt_check_entry_match.
 841	 */
 842	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
 843
 844	return xt_check_entry_match(elems, base + target_offset,
 845				    __alignof__(struct compat_xt_entry_match));
 846}
 847EXPORT_SYMBOL(xt_compat_check_entry_offsets);
 848#endif /* CONFIG_COMPAT */
 849
 850/**
 851 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
 852 *
 853 * @base: pointer to arp/ip/ip6t_entry
 854 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
 855 * @target_offset: the arp/ip/ip6_t->target_offset
 856 * @next_offset: the arp/ip/ip6_t->next_offset
 857 *
 858 * validates that target_offset and next_offset are sane and that all
 859 * match sizes (if any) align with the target offset.
 860 *
 861 * This function does not validate the targets or matches themselves, it
 862 * only tests that all the offsets and sizes are correct, that all
 863 * match structures are aligned, and that the last structure ends where
 864 * the target structure begins.
 865 *
 866 * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
 867 *
 868 * The arp/ip/ip6t_entry structure @base must have passed following tests:
 869 * - it must point to a valid memory location
 870 * - base to base + next_offset must be accessible, i.e. not exceed allocated
 871 *   length.
 872 *
 873 * A well-formed entry looks like this:
 874 *
 875 * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
 876 * e->elems[]-----'                              |               |
 877 *                matchsize                      |               |
 878 *                                matchsize      |               |
 879 *                                               |               |
 880 * target_offset---------------------------------'               |
 881 * next_offset---------------------------------------------------'
 882 *
 883 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
 884 *          This is where matches (if any) and the target reside.
 885 * target_offset: beginning of target.
 886 * next_offset: start of the next rule; also: size of this rule.
 887 * Since targets have a minimum size, target_offset + minlen <= next_offset.
 888 *
 889 * Every match stores its size, sum of sizes must not exceed target_offset.
 890 *
 891 * Return: 0 on success, negative errno on failure.
 892 */
 893int xt_check_entry_offsets(const void *base,
 894			   const char *elems,
 895			   unsigned int target_offset,
 896			   unsigned int next_offset)
 897{
 898	long size_of_base_struct = elems - (const char *)base;
 899	const struct xt_entry_target *t;
 900	const char *e = base;
 901
 902	/* target start is within the ip/ip6/arpt_entry struct */
 903	if (target_offset < size_of_base_struct)
 904		return -EINVAL;
 905
 906	if (target_offset + sizeof(*t) > next_offset)
 907		return -EINVAL;
 908
 909	t = (void *)(e + target_offset);
 910	if (t->u.target_size < sizeof(*t))
 911		return -EINVAL;
 912
 913	if (target_offset + t->u.target_size > next_offset)
 914		return -EINVAL;
 915
 916	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
 917		const struct xt_standard_target *st = (const void *)t;
 918
 919		if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
 920			return -EINVAL;
 921
 922		if (!verdict_ok(st->verdict))
 923			return -EINVAL;
 924	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
 925		const struct xt_error_target *et = (const void *)t;
 926
 927		if (!error_tg_ok(t->u.target_size, sizeof(*et),
 928				 et->errorname, sizeof(et->errorname)))
 929			return -EINVAL;
 930	}
 931
 932	return xt_check_entry_match(elems, base + target_offset,
 933				    __alignof__(struct xt_entry_match));
 934}
 935EXPORT_SYMBOL(xt_check_entry_offsets);
 936
 937/**
 938 * xt_alloc_entry_offsets - allocate array to store rule head offsets
 939 *
 940 * @size: number of entries
 941 *
 942 * Return: NULL or zeroed kmalloc'd or vmalloc'd array
 943 */
 944unsigned int *xt_alloc_entry_offsets(unsigned int size)
 945{
 946	if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
 947		return NULL;
 948
 949	return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
 950
 951}
 952EXPORT_SYMBOL(xt_alloc_entry_offsets);
 953
 954/**
 955 * xt_find_jump_offset - check if target is a valid jump offset
 956 *
 957 * @offsets: array containing all valid rule start offsets of a rule blob
 958 * @target: the jump target to search for
 959 * @size: entries in @offset
 960 */
 961bool xt_find_jump_offset(const unsigned int *offsets,
 962			 unsigned int target, unsigned int size)
 963{
 964	int m, low = 0, hi = size;
 965
 966	while (hi > low) {
 967		m = (low + hi) / 2u;
 968
 969		if (offsets[m] > target)
 970			hi = m;
 971		else if (offsets[m] < target)
 972			low = m + 1;
 973		else
 974			return true;
 975	}
 976
 977	return false;
 978}
 979EXPORT_SYMBOL(xt_find_jump_offset);
 980
 981int xt_check_target(struct xt_tgchk_param *par,
 982		    unsigned int size, u16 proto, bool inv_proto)
 983{
 984	int ret;
 985
 986	if (XT_ALIGN(par->target->targetsize) != size) {
 987		pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
 988				   xt_prefix[par->family], par->target->name,
 989				   par->target->revision,
 990				   XT_ALIGN(par->target->targetsize), size);
 
 991		return -EINVAL;
 992	}
 993	if (par->target->table != NULL &&
 994	    strcmp(par->target->table, par->table) != 0) {
 995		pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
 996				    xt_prefix[par->family], par->target->name,
 997				    par->target->table, par->table);
 998		return -EINVAL;
 999	}
1000	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1001		char used[64], allow[64];
1002
1003		pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1004				    xt_prefix[par->family], par->target->name,
1005				    textify_hooks(used, sizeof(used),
1006						  par->hook_mask, par->family),
1007				    textify_hooks(allow, sizeof(allow),
1008						  par->target->hooks,
1009						  par->family));
1010		return -EINVAL;
1011	}
1012	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1013		pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1014				    xt_prefix[par->family], par->target->name,
1015				    par->target->proto);
1016		return -EINVAL;
1017	}
1018	if (par->target->checkentry != NULL) {
1019		ret = par->target->checkentry(par);
1020		if (ret < 0)
1021			return ret;
1022		else if (ret > 0)
1023			/* Flag up potential errors. */
1024			return -EIO;
1025	}
1026	return 0;
1027}
1028EXPORT_SYMBOL_GPL(xt_check_target);
1029
1030/**
1031 * xt_copy_counters - copy counters and metadata from a sockptr_t
1032 *
1033 * @arg: src sockptr
1034 * @len: alleged size of userspace memory
1035 * @info: where to store the xt_counters_info metadata
1036 *
1037 * Copies counter meta data from @user and stores it in @info.
1038 *
1039 * vmallocs memory to hold the counters, then copies the counter data
1040 * from @user to the new memory and returns a pointer to it.
1041 *
1042 * If called from a compat syscall, @info gets converted automatically to the
1043 * 64bit representation.
1044 *
1045 * The metadata associated with the counters is stored in @info.
1046 *
1047 * Return: returns pointer that caller has to test via IS_ERR().
1048 * If IS_ERR is false, caller has to vfree the pointer.
1049 */
1050void *xt_copy_counters(sockptr_t arg, unsigned int len,
1051		       struct xt_counters_info *info)
1052{
1053	size_t offset;
1054	void *mem;
1055	u64 size;
1056
1057#ifdef CONFIG_COMPAT
1058	if (in_compat_syscall()) {
1059		/* structures only differ in size due to alignment */
1060		struct compat_xt_counters_info compat_tmp;
1061
1062		if (len <= sizeof(compat_tmp))
1063			return ERR_PTR(-EINVAL);
1064
1065		len -= sizeof(compat_tmp);
1066		if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1067			return ERR_PTR(-EFAULT);
1068
1069		memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1070		info->num_counters = compat_tmp.num_counters;
1071		offset = sizeof(compat_tmp);
1072	} else
1073#endif
1074	{
1075		if (len <= sizeof(*info))
1076			return ERR_PTR(-EINVAL);
1077
1078		len -= sizeof(*info);
1079		if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1080			return ERR_PTR(-EFAULT);
1081
1082		offset = sizeof(*info);
1083	}
1084	info->name[sizeof(info->name) - 1] = '\0';
1085
1086	size = sizeof(struct xt_counters);
1087	size *= info->num_counters;
1088
1089	if (size != (u64)len)
1090		return ERR_PTR(-EINVAL);
1091
1092	mem = vmalloc(len);
1093	if (!mem)
1094		return ERR_PTR(-ENOMEM);
1095
1096	if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1097		return mem;
1098
1099	vfree(mem);
1100	return ERR_PTR(-EFAULT);
1101}
1102EXPORT_SYMBOL_GPL(xt_copy_counters);
1103
1104#ifdef CONFIG_COMPAT
1105int xt_compat_target_offset(const struct xt_target *target)
1106{
1107	u_int16_t csize = target->compatsize ? : target->targetsize;
1108	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1109}
1110EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1111
1112void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1113				unsigned int *size)
1114{
1115	const struct xt_target *target = t->u.kernel.target;
1116	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1117	int pad, off = xt_compat_target_offset(target);
1118	u_int16_t tsize = ct->u.user.target_size;
1119	char name[sizeof(t->u.user.name)];
1120
1121	t = *dstptr;
1122	memcpy(t, ct, sizeof(*ct));
1123	if (target->compat_from_user)
1124		target->compat_from_user(t->data, ct->data);
1125	else
1126		memcpy(t->data, ct->data, tsize - sizeof(*ct));
1127	pad = XT_ALIGN(target->targetsize) - target->targetsize;
1128	if (pad > 0)
1129		memset(t->data + target->targetsize, 0, pad);
1130
1131	tsize += off;
1132	t->u.user.target_size = tsize;
1133	strlcpy(name, target->name, sizeof(name));
1134	module_put(target->me);
1135	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1136
1137	*size += off;
1138	*dstptr += tsize;
1139}
1140EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1141
1142int xt_compat_target_to_user(const struct xt_entry_target *t,
1143			     void __user **dstptr, unsigned int *size)
1144{
1145	const struct xt_target *target = t->u.kernel.target;
1146	struct compat_xt_entry_target __user *ct = *dstptr;
1147	int off = xt_compat_target_offset(target);
1148	u_int16_t tsize = t->u.user.target_size - off;
1149
1150	if (XT_OBJ_TO_USER(ct, t, target, tsize))
 
 
 
1151		return -EFAULT;
1152
1153	if (target->compat_to_user) {
1154		if (target->compat_to_user((void __user *)ct->data, t->data))
1155			return -EFAULT;
1156	} else {
1157		if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1158			return -EFAULT;
1159	}
1160
1161	*size -= off;
1162	*dstptr += tsize;
1163	return 0;
1164}
1165EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1166#endif
1167
1168struct xt_table_info *xt_alloc_table_info(unsigned int size)
1169{
1170	struct xt_table_info *info = NULL;
1171	size_t sz = sizeof(*info) + size;
1172
1173	if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
 
1174		return NULL;
1175
1176	info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1177	if (!info)
1178		return NULL;
1179
1180	memset(info, 0, sizeof(*info));
1181	info->size = size;
1182	return info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1183}
1184EXPORT_SYMBOL(xt_alloc_table_info);
1185
1186void xt_free_table_info(struct xt_table_info *info)
1187{
1188	int cpu;
1189
 
 
 
 
 
 
 
1190	if (info->jumpstack != NULL) {
1191		for_each_possible_cpu(cpu)
1192			kvfree(info->jumpstack[cpu]);
1193		kvfree(info->jumpstack);
 
 
 
 
1194	}
1195
1196	kvfree(info);
 
 
 
 
 
 
 
1197}
1198EXPORT_SYMBOL(xt_free_table_info);
1199
1200/* Find table by name, grabs mutex & ref.  Returns ERR_PTR on error. */
1201struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1202				    const char *name)
1203{
1204	struct xt_table *t, *found = NULL;
 
 
 
1205
1206	mutex_lock(&xt[af].mutex);
1207	list_for_each_entry(t, &net->xt.tables[af], list)
1208		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1209			return t;
1210
1211	if (net == &init_net)
1212		goto out;
1213
1214	/* Table doesn't exist in this netns, re-try init */
1215	list_for_each_entry(t, &init_net.xt.tables[af], list) {
1216		int err;
1217
1218		if (strcmp(t->name, name))
1219			continue;
1220		if (!try_module_get(t->me))
1221			goto out;
1222		mutex_unlock(&xt[af].mutex);
1223		err = t->table_init(net);
1224		if (err < 0) {
1225			module_put(t->me);
1226			return ERR_PTR(err);
1227		}
1228
1229		found = t;
1230
1231		mutex_lock(&xt[af].mutex);
1232		break;
1233	}
1234
1235	if (!found)
1236		goto out;
1237
1238	/* and once again: */
1239	list_for_each_entry(t, &net->xt.tables[af], list)
1240		if (strcmp(t->name, name) == 0)
1241			return t;
1242
1243	module_put(found->me);
1244 out:
1245	mutex_unlock(&xt[af].mutex);
1246	return ERR_PTR(-ENOENT);
1247}
1248EXPORT_SYMBOL_GPL(xt_find_table_lock);
1249
1250struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1251					    const char *name)
1252{
1253	struct xt_table *t = xt_find_table_lock(net, af, name);
1254
1255#ifdef CONFIG_MODULES
1256	if (IS_ERR(t)) {
1257		int err = request_module("%stable_%s", xt_prefix[af], name);
1258		if (err < 0)
1259			return ERR_PTR(err);
1260		t = xt_find_table_lock(net, af, name);
1261	}
1262#endif
1263
1264	return t;
1265}
1266EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1267
1268void xt_table_unlock(struct xt_table *table)
1269{
1270	mutex_unlock(&xt[table->af].mutex);
1271}
1272EXPORT_SYMBOL_GPL(xt_table_unlock);
1273
1274#ifdef CONFIG_COMPAT
1275void xt_compat_lock(u_int8_t af)
1276{
1277	mutex_lock(&xt[af].compat_mutex);
1278}
1279EXPORT_SYMBOL_GPL(xt_compat_lock);
1280
1281void xt_compat_unlock(u_int8_t af)
1282{
1283	mutex_unlock(&xt[af].compat_mutex);
1284}
1285EXPORT_SYMBOL_GPL(xt_compat_unlock);
1286#endif
1287
1288DEFINE_PER_CPU(seqcount_t, xt_recseq);
1289EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1290
1291struct static_key xt_tee_enabled __read_mostly;
1292EXPORT_SYMBOL_GPL(xt_tee_enabled);
1293
1294static int xt_jumpstack_alloc(struct xt_table_info *i)
1295{
1296	unsigned int size;
1297	int cpu;
1298
 
 
 
 
1299	size = sizeof(void **) * nr_cpu_ids;
1300	if (size > PAGE_SIZE)
1301		i->jumpstack = kvzalloc(size, GFP_KERNEL);
1302	else
1303		i->jumpstack = kzalloc(size, GFP_KERNEL);
1304	if (i->jumpstack == NULL)
1305		return -ENOMEM;
1306
1307	/* ruleset without jumps -- no stack needed */
1308	if (i->stacksize == 0)
1309		return 0;
1310
1311	/* Jumpstack needs to be able to record two full callchains, one
1312	 * from the first rule set traversal, plus one table reentrancy
1313	 * via -j TEE without clobbering the callchain that brought us to
1314	 * TEE target.
1315	 *
1316	 * This is done by allocating two jumpstacks per cpu, on reentry
1317	 * the upper half of the stack is used.
1318	 *
1319	 * see the jumpstack setup in ipt_do_table() for more details.
1320	 */
1321	size = sizeof(void *) * i->stacksize * 2u;
1322	for_each_possible_cpu(cpu) {
1323		i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1324			cpu_to_node(cpu));
 
 
 
 
1325		if (i->jumpstack[cpu] == NULL)
1326			/*
1327			 * Freeing will be done later on by the callers. The
1328			 * chain is: xt_replace_table -> __do_replace ->
1329			 * do_replace -> xt_free_table_info.
1330			 */
1331			return -ENOMEM;
1332	}
1333
1334	return 0;
1335}
1336
1337struct xt_counters *xt_counters_alloc(unsigned int counters)
1338{
1339	struct xt_counters *mem;
1340
1341	if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1342		return NULL;
1343
1344	counters *= sizeof(*mem);
1345	if (counters > XT_MAX_TABLE_SIZE)
1346		return NULL;
1347
1348	return vzalloc(counters);
1349}
1350EXPORT_SYMBOL(xt_counters_alloc);
1351
1352struct xt_table_info *
1353xt_replace_table(struct xt_table *table,
1354	      unsigned int num_counters,
1355	      struct xt_table_info *newinfo,
1356	      int *error)
1357{
1358	struct xt_table_info *private;
1359	unsigned int cpu;
1360	int ret;
1361
1362	ret = xt_jumpstack_alloc(newinfo);
1363	if (ret < 0) {
1364		*error = ret;
1365		return NULL;
1366	}
1367
1368	/* Do the substitution. */
1369	local_bh_disable();
1370	private = table->private;
1371
1372	/* Check inside lock: is the old number correct? */
1373	if (num_counters != private->number) {
1374		pr_debug("num_counters != table->private->number (%u/%u)\n",
1375			 num_counters, private->number);
1376		local_bh_enable();
1377		*error = -EAGAIN;
1378		return NULL;
1379	}
1380
 
1381	newinfo->initial_entries = private->initial_entries;
1382	/*
1383	 * Ensure contents of newinfo are visible before assigning to
1384	 * private.
1385	 */
1386	smp_wmb();
1387	table->private = newinfo;
1388
1389	/* make sure all cpus see new ->private value */
1390	smp_wmb();
1391
1392	/*
1393	 * Even though table entries have now been swapped, other CPU's
1394	 * may still be using the old entries...
 
 
1395	 */
1396	local_bh_enable();
1397
1398	/* ... so wait for even xt_recseq on all cpus */
1399	for_each_possible_cpu(cpu) {
1400		seqcount_t *s = &per_cpu(xt_recseq, cpu);
1401		u32 seq = raw_read_seqcount(s);
1402
1403		if (seq & 1) {
1404			do {
1405				cond_resched();
1406				cpu_relax();
1407			} while (seq == raw_read_seqcount(s));
 
1408		}
1409	}
 
1410
1411	audit_log_nfcfg(table->name, table->af, private->number,
1412			!private->number ? AUDIT_XT_OP_REGISTER :
1413					   AUDIT_XT_OP_REPLACE,
1414			GFP_KERNEL);
1415	return private;
1416}
1417EXPORT_SYMBOL_GPL(xt_replace_table);
1418
1419struct xt_table *xt_register_table(struct net *net,
1420				   const struct xt_table *input_table,
1421				   struct xt_table_info *bootstrap,
1422				   struct xt_table_info *newinfo)
1423{
1424	int ret;
1425	struct xt_table_info *private;
1426	struct xt_table *t, *table;
1427
1428	/* Don't add one object to multiple lists. */
1429	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1430	if (!table) {
1431		ret = -ENOMEM;
1432		goto out;
1433	}
1434
1435	mutex_lock(&xt[table->af].mutex);
 
 
 
1436	/* Don't autoload: we'd eat our tail... */
1437	list_for_each_entry(t, &net->xt.tables[table->af], list) {
1438		if (strcmp(t->name, table->name) == 0) {
1439			ret = -EEXIST;
1440			goto unlock;
1441		}
1442	}
1443
1444	/* Simplifies replace_table code. */
1445	table->private = bootstrap;
1446
1447	if (!xt_replace_table(table, 0, newinfo, &ret))
1448		goto unlock;
1449
1450	private = table->private;
1451	pr_debug("table->private->number = %u\n", private->number);
1452
1453	/* save number of initial entries */
1454	private->initial_entries = private->number;
1455
1456	list_add(&table->list, &net->xt.tables[table->af]);
1457	mutex_unlock(&xt[table->af].mutex);
1458	return table;
1459
1460unlock:
1461	mutex_unlock(&xt[table->af].mutex);
 
1462	kfree(table);
1463out:
1464	return ERR_PTR(ret);
1465}
1466EXPORT_SYMBOL_GPL(xt_register_table);
1467
1468void *xt_unregister_table(struct xt_table *table)
1469{
1470	struct xt_table_info *private;
1471
1472	mutex_lock(&xt[table->af].mutex);
1473	private = table->private;
1474	list_del(&table->list);
1475	mutex_unlock(&xt[table->af].mutex);
1476	audit_log_nfcfg(table->name, table->af, private->number,
1477			AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1478	kfree(table);
1479
1480	return private;
1481}
1482EXPORT_SYMBOL_GPL(xt_unregister_table);
1483
1484#ifdef CONFIG_PROC_FS
 
 
 
 
1485static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1486{
 
1487	struct net *net = seq_file_net(seq);
1488	u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1489
1490	mutex_lock(&xt[af].mutex);
1491	return seq_list_start(&net->xt.tables[af], *pos);
1492}
1493
1494static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1495{
 
1496	struct net *net = seq_file_net(seq);
1497	u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1498
1499	return seq_list_next(v, &net->xt.tables[af], pos);
1500}
1501
1502static void xt_table_seq_stop(struct seq_file *seq, void *v)
1503{
1504	u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
 
1505
1506	mutex_unlock(&xt[af].mutex);
1507}
1508
1509static int xt_table_seq_show(struct seq_file *seq, void *v)
1510{
1511	struct xt_table *table = list_entry(v, struct xt_table, list);
1512
1513	if (*table->name)
1514		seq_printf(seq, "%s\n", table->name);
1515	return 0;
 
1516}
1517
1518static const struct seq_operations xt_table_seq_ops = {
1519	.start	= xt_table_seq_start,
1520	.next	= xt_table_seq_next,
1521	.stop	= xt_table_seq_stop,
1522	.show	= xt_table_seq_show,
1523};
1524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1525/*
1526 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1527 * the multi-AF mutexes.
1528 */
1529struct nf_mttg_trav {
1530	struct list_head *head, *curr;
1531	uint8_t class;
1532};
1533
1534enum {
1535	MTTG_TRAV_INIT,
1536	MTTG_TRAV_NFP_UNSPEC,
1537	MTTG_TRAV_NFP_SPEC,
1538	MTTG_TRAV_DONE,
1539};
1540
1541static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1542    bool is_target)
1543{
1544	static const uint8_t next_class[] = {
1545		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1546		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
1547	};
1548	uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1549	struct nf_mttg_trav *trav = seq->private;
1550
1551	if (ppos != NULL)
1552		++(*ppos);
1553
1554	switch (trav->class) {
1555	case MTTG_TRAV_INIT:
1556		trav->class = MTTG_TRAV_NFP_UNSPEC;
1557		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1558		trav->head = trav->curr = is_target ?
1559			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1560 		break;
1561	case MTTG_TRAV_NFP_UNSPEC:
1562		trav->curr = trav->curr->next;
1563		if (trav->curr != trav->head)
1564			break;
1565		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1566		mutex_lock(&xt[nfproto].mutex);
1567		trav->head = trav->curr = is_target ?
1568			&xt[nfproto].target : &xt[nfproto].match;
1569		trav->class = next_class[trav->class];
1570		break;
1571	case MTTG_TRAV_NFP_SPEC:
1572		trav->curr = trav->curr->next;
1573		if (trav->curr != trav->head)
1574			break;
1575		fallthrough;
1576	default:
1577		return NULL;
1578	}
 
 
 
1579	return trav;
1580}
1581
1582static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1583    bool is_target)
1584{
1585	struct nf_mttg_trav *trav = seq->private;
1586	unsigned int j;
1587
1588	trav->class = MTTG_TRAV_INIT;
1589	for (j = 0; j < *pos; ++j)
1590		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1591			return NULL;
1592	return trav;
1593}
1594
1595static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1596{
1597	uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1598	struct nf_mttg_trav *trav = seq->private;
1599
1600	switch (trav->class) {
1601	case MTTG_TRAV_NFP_UNSPEC:
1602		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1603		break;
1604	case MTTG_TRAV_NFP_SPEC:
1605		mutex_unlock(&xt[nfproto].mutex);
1606		break;
1607	}
1608}
1609
1610static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1611{
1612	return xt_mttg_seq_start(seq, pos, false);
1613}
1614
1615static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1616{
1617	return xt_mttg_seq_next(seq, v, ppos, false);
1618}
1619
1620static int xt_match_seq_show(struct seq_file *seq, void *v)
1621{
1622	const struct nf_mttg_trav *trav = seq->private;
1623	const struct xt_match *match;
1624
1625	switch (trav->class) {
1626	case MTTG_TRAV_NFP_UNSPEC:
1627	case MTTG_TRAV_NFP_SPEC:
1628		if (trav->curr == trav->head)
1629			return 0;
1630		match = list_entry(trav->curr, struct xt_match, list);
1631		if (*match->name)
1632			seq_printf(seq, "%s\n", match->name);
1633	}
1634	return 0;
1635}
1636
1637static const struct seq_operations xt_match_seq_ops = {
1638	.start	= xt_match_seq_start,
1639	.next	= xt_match_seq_next,
1640	.stop	= xt_mttg_seq_stop,
1641	.show	= xt_match_seq_show,
1642};
1643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1644static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1645{
1646	return xt_mttg_seq_start(seq, pos, true);
1647}
1648
1649static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1650{
1651	return xt_mttg_seq_next(seq, v, ppos, true);
1652}
1653
1654static int xt_target_seq_show(struct seq_file *seq, void *v)
1655{
1656	const struct nf_mttg_trav *trav = seq->private;
1657	const struct xt_target *target;
1658
1659	switch (trav->class) {
1660	case MTTG_TRAV_NFP_UNSPEC:
1661	case MTTG_TRAV_NFP_SPEC:
1662		if (trav->curr == trav->head)
1663			return 0;
1664		target = list_entry(trav->curr, struct xt_target, list);
1665		if (*target->name)
1666			seq_printf(seq, "%s\n", target->name);
1667	}
1668	return 0;
1669}
1670
1671static const struct seq_operations xt_target_seq_ops = {
1672	.start	= xt_target_seq_start,
1673	.next	= xt_target_seq_next,
1674	.stop	= xt_mttg_seq_stop,
1675	.show	= xt_target_seq_show,
1676};
1677
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1678#define FORMAT_TABLES	"_tables_names"
1679#define	FORMAT_MATCHES	"_tables_matches"
1680#define FORMAT_TARGETS 	"_tables_targets"
1681
1682#endif /* CONFIG_PROC_FS */
1683
1684/**
1685 * xt_hook_ops_alloc - set up hooks for a new table
1686 * @table:	table with metadata needed to set up hooks
1687 * @fn:		Hook function
1688 *
1689 * This function will create the nf_hook_ops that the x_table needs
1690 * to hand to xt_hook_link_net().
1691 */
1692struct nf_hook_ops *
1693xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1694{
1695	unsigned int hook_mask = table->valid_hooks;
1696	uint8_t i, num_hooks = hweight32(hook_mask);
1697	uint8_t hooknum;
1698	struct nf_hook_ops *ops;
 
1699
1700	if (!num_hooks)
1701		return ERR_PTR(-EINVAL);
1702
1703	ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1704	if (ops == NULL)
1705		return ERR_PTR(-ENOMEM);
1706
1707	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1708	     hook_mask >>= 1, ++hooknum) {
1709		if (!(hook_mask & 1))
1710			continue;
1711		ops[i].hook     = fn;
 
1712		ops[i].pf       = table->af;
1713		ops[i].hooknum  = hooknum;
1714		ops[i].priority = table->priority;
1715		++i;
1716	}
1717
 
 
 
 
 
 
1718	return ops;
1719}
1720EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
 
 
 
 
 
 
 
 
 
 
 
 
1721
1722int xt_proto_init(struct net *net, u_int8_t af)
1723{
1724#ifdef CONFIG_PROC_FS
1725	char buf[XT_FUNCTION_MAXNAMELEN];
1726	struct proc_dir_entry *proc;
1727	kuid_t root_uid;
1728	kgid_t root_gid;
1729#endif
1730
1731	if (af >= ARRAY_SIZE(xt_prefix))
1732		return -EINVAL;
1733
1734
1735#ifdef CONFIG_PROC_FS
1736	root_uid = make_kuid(net->user_ns, 0);
1737	root_gid = make_kgid(net->user_ns, 0);
1738
1739	strlcpy(buf, xt_prefix[af], sizeof(buf));
1740	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1741	proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1742			sizeof(struct seq_net_private),
1743			(void *)(unsigned long)af);
1744	if (!proc)
1745		goto out;
1746	if (uid_valid(root_uid) && gid_valid(root_gid))
1747		proc_set_user(proc, root_uid, root_gid);
1748
1749	strlcpy(buf, xt_prefix[af], sizeof(buf));
1750	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1751	proc = proc_create_seq_private(buf, 0440, net->proc_net,
1752			&xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1753			(void *)(unsigned long)af);
1754	if (!proc)
1755		goto out_remove_tables;
1756	if (uid_valid(root_uid) && gid_valid(root_gid))
1757		proc_set_user(proc, root_uid, root_gid);
1758
1759	strlcpy(buf, xt_prefix[af], sizeof(buf));
1760	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1761	proc = proc_create_seq_private(buf, 0440, net->proc_net,
1762			 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1763			 (void *)(unsigned long)af);
1764	if (!proc)
1765		goto out_remove_matches;
1766	if (uid_valid(root_uid) && gid_valid(root_gid))
1767		proc_set_user(proc, root_uid, root_gid);
1768#endif
1769
1770	return 0;
1771
1772#ifdef CONFIG_PROC_FS
1773out_remove_matches:
1774	strlcpy(buf, xt_prefix[af], sizeof(buf));
1775	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1776	remove_proc_entry(buf, net->proc_net);
1777
1778out_remove_tables:
1779	strlcpy(buf, xt_prefix[af], sizeof(buf));
1780	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1781	remove_proc_entry(buf, net->proc_net);
1782out:
1783	return -1;
1784#endif
1785}
1786EXPORT_SYMBOL_GPL(xt_proto_init);
1787
1788void xt_proto_fini(struct net *net, u_int8_t af)
1789{
1790#ifdef CONFIG_PROC_FS
1791	char buf[XT_FUNCTION_MAXNAMELEN];
1792
1793	strlcpy(buf, xt_prefix[af], sizeof(buf));
1794	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1795	remove_proc_entry(buf, net->proc_net);
1796
1797	strlcpy(buf, xt_prefix[af], sizeof(buf));
1798	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1799	remove_proc_entry(buf, net->proc_net);
1800
1801	strlcpy(buf, xt_prefix[af], sizeof(buf));
1802	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1803	remove_proc_entry(buf, net->proc_net);
1804#endif /*CONFIG_PROC_FS*/
1805}
1806EXPORT_SYMBOL_GPL(xt_proto_fini);
1807
1808/**
1809 * xt_percpu_counter_alloc - allocate x_tables rule counter
1810 *
1811 * @state: pointer to xt_percpu allocation state
1812 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1813 *
1814 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1815 * contain the address of the real (percpu) counter.
1816 *
1817 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1818 * to fetch the real percpu counter.
1819 *
1820 * To speed up allocation and improve data locality, a 4kb block is
1821 * allocated.  Freeing any counter may free an entire block, so all
1822 * counters allocated using the same state must be freed at the same
1823 * time.
1824 *
1825 * xt_percpu_counter_alloc_state contains the base address of the
1826 * allocated page and the current sub-offset.
1827 *
1828 * returns false on error.
1829 */
1830bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1831			     struct xt_counters *counter)
1832{
1833	BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1834
1835	if (nr_cpu_ids <= 1)
1836		return true;
1837
1838	if (!state->mem) {
1839		state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1840					    XT_PCPU_BLOCK_SIZE);
1841		if (!state->mem)
1842			return false;
1843	}
1844	counter->pcnt = (__force unsigned long)(state->mem + state->off);
1845	state->off += sizeof(*counter);
1846	if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1847		state->mem = NULL;
1848		state->off = 0;
1849	}
1850	return true;
1851}
1852EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1853
1854void xt_percpu_counter_free(struct xt_counters *counters)
1855{
1856	unsigned long pcnt = counters->pcnt;
1857
1858	if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1859		free_percpu((void __percpu *)pcnt);
1860}
1861EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1862
1863static int __net_init xt_net_init(struct net *net)
1864{
1865	int i;
1866
1867	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1868		INIT_LIST_HEAD(&net->xt.tables[i]);
1869	return 0;
1870}
1871
1872static void __net_exit xt_net_exit(struct net *net)
1873{
1874	int i;
1875
1876	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1877		WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1878}
1879
1880static struct pernet_operations xt_net_ops = {
1881	.init = xt_net_init,
1882	.exit = xt_net_exit,
1883};
1884
1885static int __init xt_init(void)
1886{
1887	unsigned int i;
1888	int rv;
1889
1890	for_each_possible_cpu(i) {
1891		seqcount_init(&per_cpu(xt_recseq, i));
1892	}
1893
1894	xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1895	if (!xt)
1896		return -ENOMEM;
1897
1898	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1899		mutex_init(&xt[i].mutex);
1900#ifdef CONFIG_COMPAT
1901		mutex_init(&xt[i].compat_mutex);
1902		xt[i].compat_tab = NULL;
1903#endif
1904		INIT_LIST_HEAD(&xt[i].target);
1905		INIT_LIST_HEAD(&xt[i].match);
1906	}
1907	rv = register_pernet_subsys(&xt_net_ops);
1908	if (rv < 0)
1909		kfree(xt);
1910	return rv;
1911}
1912
1913static void __exit xt_fini(void)
1914{
1915	unregister_pernet_subsys(&xt_net_ops);
1916	kfree(xt);
1917}
1918
1919module_init(xt_init);
1920module_exit(xt_fini);
1921