Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  ebtables
   3 *
   4 *  Author:
   5 *  Bart De Schuymer		<bdschuym@pandora.be>
   6 *
   7 *  ebtables.c,v 2.0, July, 2002
   8 *
   9 *  This code is strongly inspired by the iptables code which is
  10 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18#include <linux/kmod.h>
  19#include <linux/module.h>
  20#include <linux/vmalloc.h>
  21#include <linux/netfilter/x_tables.h>
  22#include <linux/netfilter_bridge/ebtables.h>
  23#include <linux/spinlock.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <asm/uaccess.h>
  27#include <linux/smp.h>
  28#include <linux/cpumask.h>
  29#include <linux/audit.h>
  30#include <net/sock.h>
  31/* needed for logical [in,out]-dev filtering */
  32#include "../br_private.h"
  33
  34#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
  35					 "report to author: "format, ## args)
  36/* #define BUGPRINT(format, args...) */
  37
  38/* Each cpu has its own set of counters, so there is no need for write_lock in
 
  39 * the softirq
  40 * For reading or updating the counters, the user context needs to
  41 * get a write_lock
  42 */
  43
  44/* The size of each set of counters is altered to get cache alignment */
  45#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  46#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
  47#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
  48				 COUNTER_OFFSET(n) * cpu))
  49
  50
  51
  52static DEFINE_MUTEX(ebt_mutex);
  53
  54#ifdef CONFIG_COMPAT
  55static void ebt_standard_compat_from_user(void *dst, const void *src)
  56{
  57	int v = *(compat_int_t *)src;
  58
  59	if (v >= 0)
  60		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
  61	memcpy(dst, &v, sizeof(v));
  62}
  63
  64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
  65{
  66	compat_int_t cv = *(int *)src;
  67
  68	if (cv >= 0)
  69		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
  70	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  71}
  72#endif
  73
  74
  75static struct xt_target ebt_standard_target = {
  76	.name       = "standard",
  77	.revision   = 0,
  78	.family     = NFPROTO_BRIDGE,
  79	.targetsize = sizeof(int),
  80#ifdef CONFIG_COMPAT
  81	.compatsize = sizeof(compat_int_t),
  82	.compat_from_user = ebt_standard_compat_from_user,
  83	.compat_to_user =  ebt_standard_compat_to_user,
  84#endif
  85};
  86
  87static inline int
  88ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
  89	       struct xt_action_param *par)
  90{
  91	par->target   = w->u.watcher;
  92	par->targinfo = w->data;
  93	w->u.watcher->target(skb, par);
  94	/* watchers don't give a verdict */
  95	return 0;
  96}
  97
  98static inline int
  99ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
 100	     struct xt_action_param *par)
 101{
 102	par->match     = m->u.match;
 103	par->matchinfo = m->data;
 104	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
 105}
 106
 107static inline int
 108ebt_dev_check(const char *entry, const struct net_device *device)
 109{
 110	int i = 0;
 111	const char *devname;
 112
 113	if (*entry == '\0')
 114		return 0;
 115	if (!device)
 116		return 1;
 117	devname = device->name;
 118	/* 1 is the wildcard token */
 119	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 120		i++;
 121	return devname[i] != entry[i] && entry[i] != 1;
 122}
 123
 124#define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
 125/* process standard matches */
 126static inline int
 127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
 128		const struct net_device *in, const struct net_device *out)
 129{
 130	const struct ethhdr *h = eth_hdr(skb);
 131	const struct net_bridge_port *p;
 132	__be16 ethproto;
 133	int verdict, i;
 134
 135	if (skb_vlan_tag_present(skb))
 136		ethproto = htons(ETH_P_8021Q);
 137	else
 138		ethproto = h->h_proto;
 139
 140	if (e->bitmask & EBT_802_3) {
 141		if (FWINV2(eth_proto_is_802_3(ethproto), EBT_IPROTO))
 142			return 1;
 143	} else if (!(e->bitmask & EBT_NOPROTO) &&
 144	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
 145		return 1;
 146
 147	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
 148		return 1;
 149	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
 150		return 1;
 151	/* rcu_read_lock()ed by nf_hook_slow */
 152	if (in && (p = br_port_get_rcu(in)) != NULL &&
 153	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
 154		return 1;
 155	if (out && (p = br_port_get_rcu(out)) != NULL &&
 156	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
 157		return 1;
 158
 159	if (e->bitmask & EBT_SOURCEMAC) {
 160		verdict = 0;
 161		for (i = 0; i < 6; i++)
 162			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
 163			   e->sourcemsk[i];
 164		if (FWINV2(verdict != 0, EBT_ISOURCE))
 165			return 1;
 166	}
 167	if (e->bitmask & EBT_DESTMAC) {
 168		verdict = 0;
 169		for (i = 0; i < 6; i++)
 170			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
 171			   e->destmsk[i];
 172		if (FWINV2(verdict != 0, EBT_IDEST))
 173			return 1;
 174	}
 175	return 0;
 176}
 177
 178static inline
 179struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 180{
 181	return (void *)entry + entry->next_offset;
 182}
 183
 184/* Do some firewalling */
 185unsigned int ebt_do_table(struct sk_buff *skb,
 186			  const struct nf_hook_state *state,
 187			  struct ebt_table *table)
 188{
 189	unsigned int hook = state->hook;
 190	int i, nentries;
 191	struct ebt_entry *point;
 192	struct ebt_counter *counter_base, *cb_base;
 193	const struct ebt_entry_target *t;
 194	int verdict, sp = 0;
 195	struct ebt_chainstack *cs;
 196	struct ebt_entries *chaininfo;
 197	const char *base;
 198	const struct ebt_table_info *private;
 199	struct xt_action_param acpar;
 200
 201	acpar.family  = NFPROTO_BRIDGE;
 202	acpar.net     = state->net;
 203	acpar.in      = state->in;
 204	acpar.out     = state->out;
 205	acpar.hotdrop = false;
 206	acpar.hooknum = hook;
 207
 208	read_lock_bh(&table->lock);
 209	private = table->private;
 210	cb_base = COUNTER_BASE(private->counters, private->nentries,
 211	   smp_processor_id());
 212	if (private->chainstack)
 213		cs = private->chainstack[smp_processor_id()];
 214	else
 215		cs = NULL;
 216	chaininfo = private->hook_entry[hook];
 217	nentries = private->hook_entry[hook]->nentries;
 218	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
 219	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
 220	/* base for chain jumps */
 221	base = private->entries;
 222	i = 0;
 223	while (i < nentries) {
 224		if (ebt_basic_match(point, skb, state->in, state->out))
 225			goto letscontinue;
 226
 227		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
 228			goto letscontinue;
 229		if (acpar.hotdrop) {
 230			read_unlock_bh(&table->lock);
 231			return NF_DROP;
 232		}
 233
 234		/* increase counter */
 235		(*(counter_base + i)).pcnt++;
 236		(*(counter_base + i)).bcnt += skb->len;
 237
 238		/* these should only watch: not modify, nor tell us
 239		 * what to do with the packet
 240		 */
 241		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
 242
 243		t = (struct ebt_entry_target *)
 244		   (((char *)point) + point->target_offset);
 245		/* standard target */
 246		if (!t->u.target->target)
 247			verdict = ((struct ebt_standard_target *)t)->verdict;
 248		else {
 249			acpar.target   = t->u.target;
 250			acpar.targinfo = t->data;
 251			verdict = t->u.target->target(skb, &acpar);
 252		}
 253		if (verdict == EBT_ACCEPT) {
 254			read_unlock_bh(&table->lock);
 255			return NF_ACCEPT;
 256		}
 257		if (verdict == EBT_DROP) {
 258			read_unlock_bh(&table->lock);
 259			return NF_DROP;
 260		}
 261		if (verdict == EBT_RETURN) {
 262letsreturn:
 263#ifdef CONFIG_NETFILTER_DEBUG
 264			if (sp == 0) {
 265				BUGPRINT("RETURN on base chain");
 266				/* act like this is EBT_CONTINUE */
 267				goto letscontinue;
 268			}
 269#endif
 270			sp--;
 271			/* put all the local variables right */
 272			i = cs[sp].n;
 273			chaininfo = cs[sp].chaininfo;
 274			nentries = chaininfo->nentries;
 275			point = cs[sp].e;
 276			counter_base = cb_base +
 277			   chaininfo->counter_offset;
 278			continue;
 279		}
 280		if (verdict == EBT_CONTINUE)
 281			goto letscontinue;
 282#ifdef CONFIG_NETFILTER_DEBUG
 283		if (verdict < 0) {
 284			BUGPRINT("bogus standard verdict\n");
 285			read_unlock_bh(&table->lock);
 286			return NF_DROP;
 287		}
 288#endif
 289		/* jump to a udc */
 290		cs[sp].n = i + 1;
 291		cs[sp].chaininfo = chaininfo;
 292		cs[sp].e = ebt_next_entry(point);
 293		i = 0;
 294		chaininfo = (struct ebt_entries *) (base + verdict);
 295#ifdef CONFIG_NETFILTER_DEBUG
 296		if (chaininfo->distinguisher) {
 297			BUGPRINT("jump to non-chain\n");
 298			read_unlock_bh(&table->lock);
 299			return NF_DROP;
 300		}
 301#endif
 302		nentries = chaininfo->nentries;
 303		point = (struct ebt_entry *)chaininfo->data;
 304		counter_base = cb_base + chaininfo->counter_offset;
 305		sp++;
 306		continue;
 307letscontinue:
 308		point = ebt_next_entry(point);
 309		i++;
 310	}
 311
 312	/* I actually like this :) */
 313	if (chaininfo->policy == EBT_RETURN)
 314		goto letsreturn;
 315	if (chaininfo->policy == EBT_ACCEPT) {
 316		read_unlock_bh(&table->lock);
 317		return NF_ACCEPT;
 318	}
 319	read_unlock_bh(&table->lock);
 320	return NF_DROP;
 321}
 322
 323/* If it succeeds, returns element and locks mutex */
 324static inline void *
 325find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
 326			struct mutex *mutex)
 327{
 328	struct {
 329		struct list_head list;
 330		char name[EBT_FUNCTION_MAXNAMELEN];
 331	} *e;
 332
 333	mutex_lock(mutex);
 
 
 
 334	list_for_each_entry(e, head, list) {
 335		if (strcmp(e->name, name) == 0)
 336			return e;
 337	}
 338	*error = -ENOENT;
 339	mutex_unlock(mutex);
 340	return NULL;
 341}
 342
 343static void *
 344find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
 345		 int *error, struct mutex *mutex)
 346{
 347	return try_then_request_module(
 348			find_inlist_lock_noload(head, name, error, mutex),
 349			"%s%s", prefix, name);
 350}
 351
 352static inline struct ebt_table *
 353find_table_lock(struct net *net, const char *name, int *error,
 354		struct mutex *mutex)
 355{
 356	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
 357				"ebtable_", error, mutex);
 358}
 359
 360static inline int
 361ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 362		unsigned int *cnt)
 363{
 364	const struct ebt_entry *e = par->entryinfo;
 365	struct xt_match *match;
 366	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 367	int ret;
 368
 369	if (left < sizeof(struct ebt_entry_match) ||
 370	    left - sizeof(struct ebt_entry_match) < m->match_size)
 371		return -EINVAL;
 372
 373	match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
 374	if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
 375		request_module("ebt_%s", m->u.name);
 376		match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
 377	}
 378	if (IS_ERR(match))
 379		return PTR_ERR(match);
 380	m->u.match = match;
 381
 382	par->match     = match;
 383	par->matchinfo = m->data;
 384	ret = xt_check_match(par, m->match_size,
 385	      e->ethproto, e->invflags & EBT_IPROTO);
 386	if (ret < 0) {
 387		module_put(match->me);
 388		return ret;
 389	}
 390
 391	(*cnt)++;
 392	return 0;
 393}
 394
 395static inline int
 396ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
 397		  unsigned int *cnt)
 398{
 399	const struct ebt_entry *e = par->entryinfo;
 400	struct xt_target *watcher;
 401	size_t left = ((char *)e + e->target_offset) - (char *)w;
 402	int ret;
 403
 404	if (left < sizeof(struct ebt_entry_watcher) ||
 405	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 406		return -EINVAL;
 407
 408	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 409	if (IS_ERR(watcher))
 410		return PTR_ERR(watcher);
 411	w->u.watcher = watcher;
 412
 413	par->target   = watcher;
 414	par->targinfo = w->data;
 415	ret = xt_check_target(par, w->watcher_size,
 416	      e->ethproto, e->invflags & EBT_IPROTO);
 417	if (ret < 0) {
 418		module_put(watcher->me);
 419		return ret;
 420	}
 421
 422	(*cnt)++;
 423	return 0;
 424}
 425
 426static int ebt_verify_pointers(const struct ebt_replace *repl,
 427			       struct ebt_table_info *newinfo)
 428{
 429	unsigned int limit = repl->entries_size;
 430	unsigned int valid_hooks = repl->valid_hooks;
 431	unsigned int offset = 0;
 432	int i;
 433
 434	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 435		newinfo->hook_entry[i] = NULL;
 436
 437	newinfo->entries_size = repl->entries_size;
 438	newinfo->nentries = repl->nentries;
 439
 440	while (offset < limit) {
 441		size_t left = limit - offset;
 442		struct ebt_entry *e = (void *)newinfo->entries + offset;
 443
 444		if (left < sizeof(unsigned int))
 445			break;
 446
 447		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 448			if ((valid_hooks & (1 << i)) == 0)
 449				continue;
 450			if ((char __user *)repl->hook_entry[i] ==
 451			     repl->entries + offset)
 452				break;
 453		}
 454
 455		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
 456			if (e->bitmask != 0) {
 457				/* we make userspace set this right,
 458				 * so there is no misunderstanding
 459				 */
 460				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
 461					 "in distinguisher\n");
 462				return -EINVAL;
 463			}
 464			if (i != NF_BR_NUMHOOKS)
 465				newinfo->hook_entry[i] = (struct ebt_entries *)e;
 466			if (left < sizeof(struct ebt_entries))
 467				break;
 468			offset += sizeof(struct ebt_entries);
 469		} else {
 470			if (left < sizeof(struct ebt_entry))
 471				break;
 472			if (left < e->next_offset)
 473				break;
 474			if (e->next_offset < sizeof(struct ebt_entry))
 475				return -EINVAL;
 476			offset += e->next_offset;
 477		}
 478	}
 479	if (offset != limit) {
 480		BUGPRINT("entries_size too small\n");
 481		return -EINVAL;
 482	}
 483
 484	/* check if all valid hooks have a chain */
 485	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 486		if (!newinfo->hook_entry[i] &&
 487		   (valid_hooks & (1 << i))) {
 488			BUGPRINT("Valid hook without chain\n");
 489			return -EINVAL;
 490		}
 491	}
 492	return 0;
 493}
 494
 495/* this one is very careful, as it is the first function
 
 496 * to parse the userspace data
 497 */
 498static inline int
 499ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
 500			       const struct ebt_table_info *newinfo,
 501			       unsigned int *n, unsigned int *cnt,
 502			       unsigned int *totalcnt, unsigned int *udc_cnt)
 503{
 504	int i;
 505
 506	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 507		if ((void *)e == (void *)newinfo->hook_entry[i])
 508			break;
 509	}
 510	/* beginning of a new chain
 511	 * if i == NF_BR_NUMHOOKS it must be a user defined chain
 512	 */
 513	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 514		/* this checks if the previous chain has as many entries
 515		 * as it said it has
 516		 */
 517		if (*n != *cnt) {
 518			BUGPRINT("nentries does not equal the nr of entries "
 519				 "in the chain\n");
 520			return -EINVAL;
 521		}
 522		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 523		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 524			/* only RETURN from udc */
 525			if (i != NF_BR_NUMHOOKS ||
 526			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
 527				BUGPRINT("bad policy\n");
 528				return -EINVAL;
 529			}
 530		}
 531		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 532			(*udc_cnt)++;
 533		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
 534			BUGPRINT("counter_offset != totalcnt");
 535			return -EINVAL;
 536		}
 537		*n = ((struct ebt_entries *)e)->nentries;
 538		*cnt = 0;
 539		return 0;
 540	}
 541	/* a plain old entry, heh */
 542	if (sizeof(struct ebt_entry) > e->watchers_offset ||
 543	   e->watchers_offset > e->target_offset ||
 544	   e->target_offset >= e->next_offset) {
 545		BUGPRINT("entry offsets not in right order\n");
 546		return -EINVAL;
 547	}
 548	/* this is not checked anywhere else */
 549	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
 550		BUGPRINT("target size too small\n");
 551		return -EINVAL;
 552	}
 553	(*cnt)++;
 554	(*totalcnt)++;
 555	return 0;
 556}
 557
 558struct ebt_cl_stack {
 
 559	struct ebt_chainstack cs;
 560	int from;
 561	unsigned int hookmask;
 562};
 563
 564/* We need these positions to check that the jumps to a different part of the
 
 565 * entries is a jump to the beginning of a new chain.
 566 */
 567static inline int
 568ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
 569		      unsigned int *n, struct ebt_cl_stack *udc)
 570{
 571	int i;
 572
 573	/* we're only interested in chain starts */
 574	if (e->bitmask)
 575		return 0;
 576	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 577		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 578			break;
 579	}
 580	/* only care about udc */
 581	if (i != NF_BR_NUMHOOKS)
 582		return 0;
 583
 584	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
 585	/* these initialisations are depended on later in check_chainloops() */
 586	udc[*n].cs.n = 0;
 587	udc[*n].hookmask = 0;
 588
 589	(*n)++;
 590	return 0;
 591}
 592
 593static inline int
 594ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
 595{
 596	struct xt_mtdtor_param par;
 597
 598	if (i && (*i)-- == 0)
 599		return 1;
 600
 601	par.net       = net;
 602	par.match     = m->u.match;
 603	par.matchinfo = m->data;
 604	par.family    = NFPROTO_BRIDGE;
 605	if (par.match->destroy != NULL)
 606		par.match->destroy(&par);
 607	module_put(par.match->me);
 608	return 0;
 609}
 610
 611static inline int
 612ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
 613{
 614	struct xt_tgdtor_param par;
 615
 616	if (i && (*i)-- == 0)
 617		return 1;
 618
 619	par.net      = net;
 620	par.target   = w->u.watcher;
 621	par.targinfo = w->data;
 622	par.family   = NFPROTO_BRIDGE;
 623	if (par.target->destroy != NULL)
 624		par.target->destroy(&par);
 625	module_put(par.target->me);
 626	return 0;
 627}
 628
 629static inline int
 630ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
 631{
 632	struct xt_tgdtor_param par;
 633	struct ebt_entry_target *t;
 634
 635	if (e->bitmask == 0)
 636		return 0;
 637	/* we're done */
 638	if (cnt && (*cnt)-- == 0)
 639		return 1;
 640	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
 641	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
 642	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 643
 644	par.net      = net;
 645	par.target   = t->u.target;
 646	par.targinfo = t->data;
 647	par.family   = NFPROTO_BRIDGE;
 648	if (par.target->destroy != NULL)
 649		par.target->destroy(&par);
 650	module_put(par.target->me);
 651	return 0;
 652}
 653
 654static inline int
 655ebt_check_entry(struct ebt_entry *e, struct net *net,
 656		const struct ebt_table_info *newinfo,
 657		const char *name, unsigned int *cnt,
 658		struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 659{
 660	struct ebt_entry_target *t;
 661	struct xt_target *target;
 662	unsigned int i, j, hook = 0, hookmask = 0;
 663	size_t gap;
 664	int ret;
 665	struct xt_mtchk_param mtpar;
 666	struct xt_tgchk_param tgpar;
 667
 668	/* don't mess with the struct ebt_entries */
 669	if (e->bitmask == 0)
 670		return 0;
 671
 672	if (e->bitmask & ~EBT_F_MASK) {
 673		BUGPRINT("Unknown flag for bitmask\n");
 674		return -EINVAL;
 675	}
 676	if (e->invflags & ~EBT_INV_MASK) {
 677		BUGPRINT("Unknown flag for inv bitmask\n");
 678		return -EINVAL;
 679	}
 680	if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) {
 681		BUGPRINT("NOPROTO & 802_3 not allowed\n");
 682		return -EINVAL;
 683	}
 684	/* what hook do we belong to? */
 685	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 686		if (!newinfo->hook_entry[i])
 687			continue;
 688		if ((char *)newinfo->hook_entry[i] < (char *)e)
 689			hook = i;
 690		else
 691			break;
 692	}
 693	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
 694	 * a base chain
 695	 */
 696	if (i < NF_BR_NUMHOOKS)
 697		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 698	else {
 699		for (i = 0; i < udc_cnt; i++)
 700			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
 701				break;
 702		if (i == 0)
 703			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 704		else
 705			hookmask = cl_s[i - 1].hookmask;
 706	}
 707	i = 0;
 708
 709	mtpar.net	= tgpar.net       = net;
 710	mtpar.table     = tgpar.table     = name;
 711	mtpar.entryinfo = tgpar.entryinfo = e;
 712	mtpar.hook_mask = tgpar.hook_mask = hookmask;
 713	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
 714	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
 715	if (ret != 0)
 716		goto cleanup_matches;
 717	j = 0;
 718	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
 719	if (ret != 0)
 720		goto cleanup_watchers;
 721	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 722	gap = e->next_offset - e->target_offset;
 723
 724	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
 725	if (IS_ERR(target)) {
 726		ret = PTR_ERR(target);
 727		goto cleanup_watchers;
 728	}
 729
 730	t->u.target = target;
 731	if (t->u.target == &ebt_standard_target) {
 732		if (gap < sizeof(struct ebt_standard_target)) {
 733			BUGPRINT("Standard target size too big\n");
 734			ret = -EFAULT;
 735			goto cleanup_watchers;
 736		}
 737		if (((struct ebt_standard_target *)t)->verdict <
 738		   -NUM_STANDARD_TARGETS) {
 739			BUGPRINT("Invalid standard target\n");
 740			ret = -EFAULT;
 741			goto cleanup_watchers;
 742		}
 743	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
 744		module_put(t->u.target->me);
 745		ret = -EFAULT;
 746		goto cleanup_watchers;
 747	}
 748
 749	tgpar.target   = target;
 750	tgpar.targinfo = t->data;
 751	ret = xt_check_target(&tgpar, t->target_size,
 752	      e->ethproto, e->invflags & EBT_IPROTO);
 753	if (ret < 0) {
 754		module_put(target->me);
 755		goto cleanup_watchers;
 756	}
 757	(*cnt)++;
 758	return 0;
 759cleanup_watchers:
 760	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
 761cleanup_matches:
 762	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
 763	return ret;
 764}
 765
 766/* checks for loops and sets the hook mask for udc
 
 767 * the hook mask for udc tells us from which base chains the udc can be
 768 * accessed. This mask is a parameter to the check() functions of the extensions
 769 */
 770static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
 771			    unsigned int udc_cnt, unsigned int hooknr, char *base)
 772{
 773	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
 774	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
 775	const struct ebt_entry_target *t;
 776
 777	while (pos < nentries || chain_nr != -1) {
 778		/* end of udc, go back one 'recursion' step */
 779		if (pos == nentries) {
 780			/* put back values of the time when this chain was called */
 781			e = cl_s[chain_nr].cs.e;
 782			if (cl_s[chain_nr].from != -1)
 783				nentries =
 784				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
 785			else
 786				nentries = chain->nentries;
 787			pos = cl_s[chain_nr].cs.n;
 788			/* make sure we won't see a loop that isn't one */
 789			cl_s[chain_nr].cs.n = 0;
 790			chain_nr = cl_s[chain_nr].from;
 791			if (pos == nentries)
 792				continue;
 793		}
 794		t = (struct ebt_entry_target *)
 795		   (((char *)e) + e->target_offset);
 796		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
 797			goto letscontinue;
 798		if (e->target_offset + sizeof(struct ebt_standard_target) >
 799		   e->next_offset) {
 800			BUGPRINT("Standard target size too big\n");
 801			return -1;
 802		}
 803		verdict = ((struct ebt_standard_target *)t)->verdict;
 804		if (verdict >= 0) { /* jump to another chain */
 805			struct ebt_entries *hlp2 =
 806			   (struct ebt_entries *)(base + verdict);
 807			for (i = 0; i < udc_cnt; i++)
 808				if (hlp2 == cl_s[i].cs.chaininfo)
 809					break;
 810			/* bad destination or loop */
 811			if (i == udc_cnt) {
 812				BUGPRINT("bad destination\n");
 813				return -1;
 814			}
 815			if (cl_s[i].cs.n) {
 816				BUGPRINT("loop\n");
 817				return -1;
 818			}
 819			if (cl_s[i].hookmask & (1 << hooknr))
 820				goto letscontinue;
 821			/* this can't be 0, so the loop test is correct */
 822			cl_s[i].cs.n = pos + 1;
 823			pos = 0;
 824			cl_s[i].cs.e = ebt_next_entry(e);
 825			e = (struct ebt_entry *)(hlp2->data);
 826			nentries = hlp2->nentries;
 827			cl_s[i].from = chain_nr;
 828			chain_nr = i;
 829			/* this udc is accessible from the base chain for hooknr */
 830			cl_s[i].hookmask |= (1 << hooknr);
 831			continue;
 832		}
 833letscontinue:
 834		e = ebt_next_entry(e);
 835		pos++;
 836	}
 837	return 0;
 838}
 839
 840/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
 841static int translate_table(struct net *net, const char *name,
 842			   struct ebt_table_info *newinfo)
 843{
 844	unsigned int i, j, k, udc_cnt;
 845	int ret;
 846	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 847
 848	i = 0;
 849	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 850		i++;
 851	if (i == NF_BR_NUMHOOKS) {
 852		BUGPRINT("No valid hooks specified\n");
 853		return -EINVAL;
 854	}
 855	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
 856		BUGPRINT("Chains don't start at beginning\n");
 857		return -EINVAL;
 858	}
 859	/* make sure chains are ordered after each other in same order
 860	 * as their corresponding hooks
 861	 */
 862	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
 863		if (!newinfo->hook_entry[j])
 864			continue;
 865		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
 866			BUGPRINT("Hook order must be followed\n");
 867			return -EINVAL;
 868		}
 869		i = j;
 870	}
 871
 872	/* do some early checkings and initialize some things */
 873	i = 0; /* holds the expected nr. of entries for the chain */
 874	j = 0; /* holds the up to now counted entries for the chain */
 875	k = 0; /* holds the total nr. of entries, should equal
 876		* newinfo->nentries afterwards
 877		*/
 878	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 879	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 880	   ebt_check_entry_size_and_hooks, newinfo,
 881	   &i, &j, &k, &udc_cnt);
 882
 883	if (ret != 0)
 884		return ret;
 885
 886	if (i != j) {
 887		BUGPRINT("nentries does not equal the nr of entries in the "
 888			 "(last) chain\n");
 889		return -EINVAL;
 890	}
 891	if (k != newinfo->nentries) {
 892		BUGPRINT("Total nentries is wrong\n");
 893		return -EINVAL;
 894	}
 895
 896	/* get the location of the udc, put them in an array
 897	 * while we're at it, allocate the chainstack
 898	 */
 899	if (udc_cnt) {
 900		/* this will get free'd in do_replace()/ebt_register_table()
 901		 * if an error occurs
 902		 */
 903		newinfo->chainstack =
 904			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
 905		if (!newinfo->chainstack)
 906			return -ENOMEM;
 907		for_each_possible_cpu(i) {
 908			newinfo->chainstack[i] =
 909			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
 910			if (!newinfo->chainstack[i]) {
 911				while (i)
 912					vfree(newinfo->chainstack[--i]);
 913				vfree(newinfo->chainstack);
 914				newinfo->chainstack = NULL;
 915				return -ENOMEM;
 916			}
 917		}
 918
 919		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
 920		if (!cl_s)
 921			return -ENOMEM;
 922		i = 0; /* the i'th udc */
 923		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 924		   ebt_get_udc_positions, newinfo, &i, cl_s);
 925		/* sanity check */
 926		if (i != udc_cnt) {
 927			BUGPRINT("i != udc_cnt\n");
 928			vfree(cl_s);
 929			return -EFAULT;
 930		}
 931	}
 932
 933	/* Check for loops */
 934	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 935		if (newinfo->hook_entry[i])
 936			if (check_chainloops(newinfo->hook_entry[i],
 937			   cl_s, udc_cnt, i, newinfo->entries)) {
 938				vfree(cl_s);
 939				return -EINVAL;
 940			}
 941
 942	/* we now know the following (along with E=mc²):
 943	 *  - the nr of entries in each chain is right
 944	 *  - the size of the allocated space is right
 945	 *  - all valid hooks have a corresponding chain
 946	 *  - there are no loops
 947	 *  - wrong data can still be on the level of a single entry
 948	 *  - could be there are jumps to places that are not the
 949	 *    beginning of a chain. This can only occur in chains that
 950	 *    are not accessible from any base chains, so we don't care.
 951	 */
 952
 953	/* used to know what we need to clean up if something goes wrong */
 954	i = 0;
 955	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 956	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
 957	if (ret != 0) {
 958		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 959				  ebt_cleanup_entry, net, &i);
 960	}
 961	vfree(cl_s);
 962	return ret;
 963}
 964
 965/* called under write_lock */
 966static void get_counters(const struct ebt_counter *oldcounters,
 967			 struct ebt_counter *counters, unsigned int nentries)
 968{
 969	int i, cpu;
 970	struct ebt_counter *counter_base;
 971
 972	/* counters of cpu 0 */
 973	memcpy(counters, oldcounters,
 974	       sizeof(struct ebt_counter) * nentries);
 975
 976	/* add other counters to those of cpu 0 */
 977	for_each_possible_cpu(cpu) {
 978		if (cpu == 0)
 979			continue;
 980		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 981		for (i = 0; i < nentries; i++) {
 982			counters[i].pcnt += counter_base[i].pcnt;
 983			counters[i].bcnt += counter_base[i].bcnt;
 984		}
 985	}
 986}
 987
 988static int do_replace_finish(struct net *net, struct ebt_replace *repl,
 989			      struct ebt_table_info *newinfo)
 990{
 991	int ret, i;
 992	struct ebt_counter *counterstmp = NULL;
 993	/* used to be able to unlock earlier */
 994	struct ebt_table_info *table;
 995	struct ebt_table *t;
 996
 997	/* the user wants counters back
 998	 * the check on the size is done later, when we have the lock
 999	 */
1000	if (repl->num_counters) {
1001		unsigned long size = repl->num_counters * sizeof(*counterstmp);
1002		counterstmp = vmalloc(size);
1003		if (!counterstmp)
1004			return -ENOMEM;
1005	}
1006
1007	newinfo->chainstack = NULL;
1008	ret = ebt_verify_pointers(repl, newinfo);
1009	if (ret != 0)
1010		goto free_counterstmp;
1011
1012	ret = translate_table(net, repl->name, newinfo);
1013
1014	if (ret != 0)
1015		goto free_counterstmp;
1016
1017	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1018	if (!t) {
1019		ret = -ENOENT;
1020		goto free_iterate;
1021	}
1022
1023	/* the table doesn't like it */
1024	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1025		goto free_unlock;
1026
1027	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1028		BUGPRINT("Wrong nr. of counters requested\n");
1029		ret = -EINVAL;
1030		goto free_unlock;
1031	}
1032
1033	/* we have the mutex lock, so no danger in reading this pointer */
1034	table = t->private;
1035	/* make sure the table can only be rmmod'ed if it contains no rules */
1036	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1037		ret = -ENOENT;
1038		goto free_unlock;
1039	} else if (table->nentries && !newinfo->nentries)
1040		module_put(t->me);
1041	/* we need an atomic snapshot of the counters */
1042	write_lock_bh(&t->lock);
1043	if (repl->num_counters)
1044		get_counters(t->private->counters, counterstmp,
1045		   t->private->nentries);
1046
1047	t->private = newinfo;
1048	write_unlock_bh(&t->lock);
1049	mutex_unlock(&ebt_mutex);
1050	/* so, a user can change the chains while having messed up her counter
1051	 * allocation. Only reason why this is done is because this way the lock
1052	 * is held only once, while this doesn't bring the kernel into a
1053	 * dangerous state.
1054	 */
1055	if (repl->num_counters &&
1056	   copy_to_user(repl->counters, counterstmp,
1057	   repl->num_counters * sizeof(struct ebt_counter))) {
1058		/* Silent error, can't fail, new table is already in place */
1059		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1060	}
1061
1062	/* decrease module count and free resources */
1063	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1064			  ebt_cleanup_entry, net, NULL);
1065
1066	vfree(table->entries);
1067	if (table->chainstack) {
1068		for_each_possible_cpu(i)
1069			vfree(table->chainstack[i]);
1070		vfree(table->chainstack);
1071	}
1072	vfree(table);
1073
1074	vfree(counterstmp);
1075
1076#ifdef CONFIG_AUDIT
1077	if (audit_enabled) {
1078		struct audit_buffer *ab;
1079
1080		ab = audit_log_start(current->audit_context, GFP_KERNEL,
1081				     AUDIT_NETFILTER_CFG);
1082		if (ab) {
1083			audit_log_format(ab, "table=%s family=%u entries=%u",
1084					 repl->name, AF_BRIDGE, repl->nentries);
1085			audit_log_end(ab);
1086		}
1087	}
1088#endif
1089	return ret;
1090
1091free_unlock:
1092	mutex_unlock(&ebt_mutex);
1093free_iterate:
1094	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1095			  ebt_cleanup_entry, net, NULL);
1096free_counterstmp:
1097	vfree(counterstmp);
1098	/* can be initialized in translate_table() */
1099	if (newinfo->chainstack) {
1100		for_each_possible_cpu(i)
1101			vfree(newinfo->chainstack[i]);
1102		vfree(newinfo->chainstack);
1103	}
1104	return ret;
1105}
1106
1107/* replace the table */
1108static int do_replace(struct net *net, const void __user *user,
1109		      unsigned int len)
1110{
1111	int ret, countersize;
1112	struct ebt_table_info *newinfo;
1113	struct ebt_replace tmp;
1114
1115	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1116		return -EFAULT;
1117
1118	if (len != sizeof(tmp) + tmp.entries_size) {
1119		BUGPRINT("Wrong len argument\n");
1120		return -EINVAL;
1121	}
1122
1123	if (tmp.entries_size == 0) {
1124		BUGPRINT("Entries_size never zero\n");
1125		return -EINVAL;
1126	}
1127	/* overflow check */
1128	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1129			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1130		return -ENOMEM;
1131	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1132		return -ENOMEM;
1133
1134	tmp.name[sizeof(tmp.name) - 1] = 0;
1135
1136	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1137	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1138	if (!newinfo)
1139		return -ENOMEM;
1140
1141	if (countersize)
1142		memset(newinfo->counters, 0, countersize);
1143
1144	newinfo->entries = vmalloc(tmp.entries_size);
1145	if (!newinfo->entries) {
1146		ret = -ENOMEM;
1147		goto free_newinfo;
1148	}
1149	if (copy_from_user(
1150	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1151		BUGPRINT("Couldn't copy entries from userspace\n");
1152		ret = -EFAULT;
1153		goto free_entries;
1154	}
1155
1156	ret = do_replace_finish(net, &tmp, newinfo);
1157	if (ret == 0)
1158		return ret;
1159free_entries:
1160	vfree(newinfo->entries);
1161free_newinfo:
1162	vfree(newinfo);
1163	return ret;
1164}
1165
1166struct ebt_table *
1167ebt_register_table(struct net *net, const struct ebt_table *input_table)
1168{
1169	struct ebt_table_info *newinfo;
1170	struct ebt_table *t, *table;
1171	struct ebt_replace_kernel *repl;
1172	int ret, i, countersize;
1173	void *p;
1174
1175	if (input_table == NULL || (repl = input_table->table) == NULL ||
1176	    repl->entries == NULL || repl->entries_size == 0 ||
1177	    repl->counters != NULL || input_table->private != NULL) {
1178		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1179		return ERR_PTR(-EINVAL);
1180	}
1181
1182	/* Don't add one table to multiple lists. */
1183	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1184	if (!table) {
1185		ret = -ENOMEM;
1186		goto out;
1187	}
1188
1189	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1190	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1191	ret = -ENOMEM;
1192	if (!newinfo)
1193		goto free_table;
1194
1195	p = vmalloc(repl->entries_size);
1196	if (!p)
1197		goto free_newinfo;
1198
1199	memcpy(p, repl->entries, repl->entries_size);
1200	newinfo->entries = p;
1201
1202	newinfo->entries_size = repl->entries_size;
1203	newinfo->nentries = repl->nentries;
1204
1205	if (countersize)
1206		memset(newinfo->counters, 0, countersize);
1207
1208	/* fill in newinfo and parse the entries */
1209	newinfo->chainstack = NULL;
1210	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1211		if ((repl->valid_hooks & (1 << i)) == 0)
1212			newinfo->hook_entry[i] = NULL;
1213		else
1214			newinfo->hook_entry[i] = p +
1215				((char *)repl->hook_entry[i] - repl->entries);
1216	}
1217	ret = translate_table(net, repl->name, newinfo);
1218	if (ret != 0) {
1219		BUGPRINT("Translate_table failed\n");
1220		goto free_chainstack;
1221	}
1222
1223	if (table->check && table->check(newinfo, table->valid_hooks)) {
1224		BUGPRINT("The table doesn't like its own initial data, lol\n");
1225		ret = -EINVAL;
1226		goto free_chainstack;
1227	}
1228
1229	table->private = newinfo;
1230	rwlock_init(&table->lock);
1231	mutex_lock(&ebt_mutex);
 
 
 
1232	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1233		if (strcmp(t->name, table->name) == 0) {
1234			ret = -EEXIST;
1235			BUGPRINT("Table name already exists\n");
1236			goto free_unlock;
1237		}
1238	}
1239
1240	/* Hold a reference count if the chains aren't empty */
1241	if (newinfo->nentries && !try_module_get(table->me)) {
1242		ret = -ENOENT;
1243		goto free_unlock;
1244	}
1245	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1246	mutex_unlock(&ebt_mutex);
1247	return table;
1248free_unlock:
1249	mutex_unlock(&ebt_mutex);
1250free_chainstack:
1251	if (newinfo->chainstack) {
1252		for_each_possible_cpu(i)
1253			vfree(newinfo->chainstack[i]);
1254		vfree(newinfo->chainstack);
1255	}
1256	vfree(newinfo->entries);
1257free_newinfo:
1258	vfree(newinfo);
1259free_table:
1260	kfree(table);
1261out:
1262	return ERR_PTR(ret);
1263}
1264
1265void ebt_unregister_table(struct net *net, struct ebt_table *table)
1266{
1267	int i;
1268
1269	if (!table) {
1270		BUGPRINT("Request to unregister NULL table!!!\n");
1271		return;
1272	}
1273	mutex_lock(&ebt_mutex);
1274	list_del(&table->list);
1275	mutex_unlock(&ebt_mutex);
1276	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1277			  ebt_cleanup_entry, net, NULL);
1278	if (table->private->nentries)
1279		module_put(table->me);
1280	vfree(table->private->entries);
1281	if (table->private->chainstack) {
1282		for_each_possible_cpu(i)
1283			vfree(table->private->chainstack[i]);
1284		vfree(table->private->chainstack);
1285	}
1286	vfree(table->private);
1287	kfree(table);
1288}
1289
1290/* userspace just supplied us with counters */
1291static int do_update_counters(struct net *net, const char *name,
1292				struct ebt_counter __user *counters,
1293				unsigned int num_counters,
1294				const void __user *user, unsigned int len)
1295{
1296	int i, ret;
1297	struct ebt_counter *tmp;
1298	struct ebt_table *t;
1299
1300	if (num_counters == 0)
1301		return -EINVAL;
1302
1303	tmp = vmalloc(num_counters * sizeof(*tmp));
1304	if (!tmp)
1305		return -ENOMEM;
1306
1307	t = find_table_lock(net, name, &ret, &ebt_mutex);
1308	if (!t)
1309		goto free_tmp;
1310
1311	if (num_counters != t->private->nentries) {
1312		BUGPRINT("Wrong nr of counters\n");
1313		ret = -EINVAL;
1314		goto unlock_mutex;
1315	}
1316
1317	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1318		ret = -EFAULT;
1319		goto unlock_mutex;
1320	}
1321
1322	/* we want an atomic add of the counters */
1323	write_lock_bh(&t->lock);
1324
1325	/* we add to the counters of the first cpu */
1326	for (i = 0; i < num_counters; i++) {
1327		t->private->counters[i].pcnt += tmp[i].pcnt;
1328		t->private->counters[i].bcnt += tmp[i].bcnt;
1329	}
1330
1331	write_unlock_bh(&t->lock);
1332	ret = 0;
1333unlock_mutex:
1334	mutex_unlock(&ebt_mutex);
1335free_tmp:
1336	vfree(tmp);
1337	return ret;
1338}
1339
1340static int update_counters(struct net *net, const void __user *user,
1341			    unsigned int len)
1342{
1343	struct ebt_replace hlp;
1344
1345	if (copy_from_user(&hlp, user, sizeof(hlp)))
1346		return -EFAULT;
1347
1348	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1349		return -EINVAL;
1350
1351	return do_update_counters(net, hlp.name, hlp.counters,
1352				hlp.num_counters, user, len);
1353}
1354
1355static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1356				     const char *base, char __user *ubase)
1357{
1358	char __user *hlp = ubase + ((char *)m - base);
1359	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1360
1361	/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1362	 * long. Copy 29 bytes and fill remaining bytes with zeroes.
1363	 */
1364	strlcpy(name, m->u.match->name, sizeof(name));
1365	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1366		return -EFAULT;
1367	return 0;
1368}
1369
1370static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1371				       const char *base, char __user *ubase)
1372{
1373	char __user *hlp = ubase + ((char *)w - base);
1374	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1375
1376	strlcpy(name, w->u.watcher->name, sizeof(name));
1377	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1378		return -EFAULT;
1379	return 0;
1380}
1381
1382static inline int ebt_make_names(struct ebt_entry *e, const char *base,
1383				 char __user *ubase)
1384{
1385	int ret;
1386	char __user *hlp;
1387	const struct ebt_entry_target *t;
1388	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1389
1390	if (e->bitmask == 0)
1391		return 0;
1392
1393	hlp = ubase + (((char *)e + e->target_offset) - base);
1394	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1395
1396	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1397	if (ret != 0)
1398		return ret;
1399	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1400	if (ret != 0)
1401		return ret;
1402	strlcpy(name, t->u.target->name, sizeof(name));
1403	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1404		return -EFAULT;
1405	return 0;
1406}
1407
1408static int copy_counters_to_user(struct ebt_table *t,
1409				 const struct ebt_counter *oldcounters,
1410				 void __user *user, unsigned int num_counters,
1411				 unsigned int nentries)
1412{
1413	struct ebt_counter *counterstmp;
1414	int ret = 0;
1415
1416	/* userspace might not need the counters */
1417	if (num_counters == 0)
1418		return 0;
1419
1420	if (num_counters != nentries) {
1421		BUGPRINT("Num_counters wrong\n");
1422		return -EINVAL;
1423	}
1424
1425	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1426	if (!counterstmp)
1427		return -ENOMEM;
1428
1429	write_lock_bh(&t->lock);
1430	get_counters(oldcounters, counterstmp, nentries);
1431	write_unlock_bh(&t->lock);
1432
1433	if (copy_to_user(user, counterstmp,
1434	   nentries * sizeof(struct ebt_counter)))
1435		ret = -EFAULT;
1436	vfree(counterstmp);
1437	return ret;
1438}
1439
1440/* called with ebt_mutex locked */
1441static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1442				   const int *len, int cmd)
1443{
1444	struct ebt_replace tmp;
1445	const struct ebt_counter *oldcounters;
1446	unsigned int entries_size, nentries;
1447	int ret;
1448	char *entries;
1449
1450	if (cmd == EBT_SO_GET_ENTRIES) {
1451		entries_size = t->private->entries_size;
1452		nentries = t->private->nentries;
1453		entries = t->private->entries;
1454		oldcounters = t->private->counters;
1455	} else {
1456		entries_size = t->table->entries_size;
1457		nentries = t->table->nentries;
1458		entries = t->table->entries;
1459		oldcounters = t->table->counters;
1460	}
1461
1462	if (copy_from_user(&tmp, user, sizeof(tmp)))
1463		return -EFAULT;
1464
1465	if (*len != sizeof(struct ebt_replace) + entries_size +
1466	   (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1467		return -EINVAL;
1468
1469	if (tmp.nentries != nentries) {
1470		BUGPRINT("Nentries wrong\n");
1471		return -EINVAL;
1472	}
1473
1474	if (tmp.entries_size != entries_size) {
1475		BUGPRINT("Wrong size\n");
1476		return -EINVAL;
1477	}
1478
1479	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1480					tmp.num_counters, nentries);
1481	if (ret)
1482		return ret;
1483
1484	if (copy_to_user(tmp.entries, entries, entries_size)) {
1485		BUGPRINT("Couldn't copy entries to userspace\n");
1486		return -EFAULT;
1487	}
1488	/* set the match/watcher/target names right */
1489	return EBT_ENTRY_ITERATE(entries, entries_size,
1490	   ebt_make_names, entries, tmp.entries);
1491}
1492
1493static int do_ebt_set_ctl(struct sock *sk,
1494	int cmd, void __user *user, unsigned int len)
1495{
1496	int ret;
1497	struct net *net = sock_net(sk);
1498
1499	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1500		return -EPERM;
1501
1502	switch (cmd) {
1503	case EBT_SO_SET_ENTRIES:
1504		ret = do_replace(net, user, len);
1505		break;
1506	case EBT_SO_SET_COUNTERS:
1507		ret = update_counters(net, user, len);
1508		break;
1509	default:
1510		ret = -EINVAL;
1511	}
1512	return ret;
1513}
1514
1515static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1516{
1517	int ret;
1518	struct ebt_replace tmp;
1519	struct ebt_table *t;
1520	struct net *net = sock_net(sk);
1521
1522	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1523		return -EPERM;
1524
1525	if (copy_from_user(&tmp, user, sizeof(tmp)))
1526		return -EFAULT;
1527
1528	tmp.name[sizeof(tmp.name) - 1] = '\0';
1529
1530	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1531	if (!t)
1532		return ret;
1533
1534	switch (cmd) {
1535	case EBT_SO_GET_INFO:
1536	case EBT_SO_GET_INIT_INFO:
1537		if (*len != sizeof(struct ebt_replace)) {
1538			ret = -EINVAL;
1539			mutex_unlock(&ebt_mutex);
1540			break;
1541		}
1542		if (cmd == EBT_SO_GET_INFO) {
1543			tmp.nentries = t->private->nentries;
1544			tmp.entries_size = t->private->entries_size;
1545			tmp.valid_hooks = t->valid_hooks;
1546		} else {
1547			tmp.nentries = t->table->nentries;
1548			tmp.entries_size = t->table->entries_size;
1549			tmp.valid_hooks = t->table->valid_hooks;
1550		}
1551		mutex_unlock(&ebt_mutex);
1552		if (copy_to_user(user, &tmp, *len) != 0) {
1553			BUGPRINT("c2u Didn't work\n");
1554			ret = -EFAULT;
1555			break;
1556		}
1557		ret = 0;
1558		break;
1559
1560	case EBT_SO_GET_ENTRIES:
1561	case EBT_SO_GET_INIT_ENTRIES:
1562		ret = copy_everything_to_user(t, user, len, cmd);
1563		mutex_unlock(&ebt_mutex);
1564		break;
1565
1566	default:
1567		mutex_unlock(&ebt_mutex);
1568		ret = -EINVAL;
1569	}
1570
1571	return ret;
1572}
1573
1574#ifdef CONFIG_COMPAT
1575/* 32 bit-userspace compatibility definitions. */
1576struct compat_ebt_replace {
1577	char name[EBT_TABLE_MAXNAMELEN];
1578	compat_uint_t valid_hooks;
1579	compat_uint_t nentries;
1580	compat_uint_t entries_size;
1581	/* start of the chains */
1582	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1583	/* nr of counters userspace expects back */
1584	compat_uint_t num_counters;
1585	/* where the kernel will put the old counters. */
1586	compat_uptr_t counters;
1587	compat_uptr_t entries;
1588};
1589
1590/* struct ebt_entry_match, _target and _watcher have same layout */
1591struct compat_ebt_entry_mwt {
1592	union {
1593		char name[EBT_FUNCTION_MAXNAMELEN];
1594		compat_uptr_t ptr;
1595	} u;
1596	compat_uint_t match_size;
1597	compat_uint_t data[0];
1598};
1599
1600/* account for possible padding between match_size and ->data */
1601static int ebt_compat_entry_padsize(void)
1602{
1603	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1604			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1605	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1606			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1607}
1608
1609static int ebt_compat_match_offset(const struct xt_match *match,
1610				   unsigned int userlen)
1611{
1612	/* ebt_among needs special handling. The kernel .matchsize is
 
1613	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1614	 * value is expected.
1615	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1616	 */
1617	if (unlikely(match->matchsize == -1))
1618		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1619	return xt_compat_match_offset(match);
1620}
1621
1622static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1623				unsigned int *size)
1624{
1625	const struct xt_match *match = m->u.match;
1626	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1627	int off = ebt_compat_match_offset(match, m->match_size);
1628	compat_uint_t msize = m->match_size - off;
1629
1630	BUG_ON(off >= m->match_size);
1631
1632	if (copy_to_user(cm->u.name, match->name,
1633	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1634		return -EFAULT;
1635
1636	if (match->compat_to_user) {
1637		if (match->compat_to_user(cm->data, m->data))
1638			return -EFAULT;
1639	} else if (copy_to_user(cm->data, m->data, msize))
1640			return -EFAULT;
1641
1642	*size -= ebt_compat_entry_padsize() + off;
1643	*dstptr = cm->data;
1644	*dstptr += msize;
1645	return 0;
1646}
1647
1648static int compat_target_to_user(struct ebt_entry_target *t,
1649				 void __user **dstptr,
1650				 unsigned int *size)
1651{
1652	const struct xt_target *target = t->u.target;
1653	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1654	int off = xt_compat_target_offset(target);
1655	compat_uint_t tsize = t->target_size - off;
1656
1657	BUG_ON(off >= t->target_size);
1658
1659	if (copy_to_user(cm->u.name, target->name,
1660	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1661		return -EFAULT;
1662
1663	if (target->compat_to_user) {
1664		if (target->compat_to_user(cm->data, t->data))
1665			return -EFAULT;
1666	} else if (copy_to_user(cm->data, t->data, tsize))
1667		return -EFAULT;
1668
1669	*size -= ebt_compat_entry_padsize() + off;
1670	*dstptr = cm->data;
1671	*dstptr += tsize;
1672	return 0;
1673}
1674
1675static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1676				  void __user **dstptr,
1677				  unsigned int *size)
1678{
1679	return compat_target_to_user((struct ebt_entry_target *)w,
1680							dstptr, size);
1681}
1682
1683static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1684				unsigned int *size)
1685{
1686	struct ebt_entry_target *t;
1687	struct ebt_entry __user *ce;
1688	u32 watchers_offset, target_offset, next_offset;
1689	compat_uint_t origsize;
1690	int ret;
1691
1692	if (e->bitmask == 0) {
1693		if (*size < sizeof(struct ebt_entries))
1694			return -EINVAL;
1695		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1696			return -EFAULT;
1697
1698		*dstptr += sizeof(struct ebt_entries);
1699		*size -= sizeof(struct ebt_entries);
1700		return 0;
1701	}
1702
1703	if (*size < sizeof(*ce))
1704		return -EINVAL;
1705
1706	ce = (struct ebt_entry __user *)*dstptr;
1707	if (copy_to_user(ce, e, sizeof(*ce)))
1708		return -EFAULT;
1709
1710	origsize = *size;
1711	*dstptr += sizeof(*ce);
1712
1713	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1714	if (ret)
1715		return ret;
1716	watchers_offset = e->watchers_offset - (origsize - *size);
1717
1718	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1719	if (ret)
1720		return ret;
1721	target_offset = e->target_offset - (origsize - *size);
1722
1723	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1724
1725	ret = compat_target_to_user(t, dstptr, size);
1726	if (ret)
1727		return ret;
1728	next_offset = e->next_offset - (origsize - *size);
1729
1730	if (put_user(watchers_offset, &ce->watchers_offset) ||
1731	    put_user(target_offset, &ce->target_offset) ||
1732	    put_user(next_offset, &ce->next_offset))
1733		return -EFAULT;
1734
1735	*size -= sizeof(*ce);
1736	return 0;
1737}
1738
1739static int compat_calc_match(struct ebt_entry_match *m, int *off)
1740{
1741	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1742	*off += ebt_compat_entry_padsize();
1743	return 0;
1744}
1745
1746static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1747{
1748	*off += xt_compat_target_offset(w->u.watcher);
1749	*off += ebt_compat_entry_padsize();
1750	return 0;
1751}
1752
1753static int compat_calc_entry(const struct ebt_entry *e,
1754			     const struct ebt_table_info *info,
1755			     const void *base,
1756			     struct compat_ebt_replace *newinfo)
1757{
1758	const struct ebt_entry_target *t;
1759	unsigned int entry_offset;
1760	int off, ret, i;
1761
1762	if (e->bitmask == 0)
1763		return 0;
1764
1765	off = 0;
1766	entry_offset = (void *)e - base;
1767
1768	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1769	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1770
1771	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1772
1773	off += xt_compat_target_offset(t->u.target);
1774	off += ebt_compat_entry_padsize();
1775
1776	newinfo->entries_size -= off;
1777
1778	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1779	if (ret)
1780		return ret;
1781
1782	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1783		const void *hookptr = info->hook_entry[i];
1784		if (info->hook_entry[i] &&
1785		    (e < (struct ebt_entry *)(base - hookptr))) {
1786			newinfo->hook_entry[i] -= off;
1787			pr_debug("0x%08X -> 0x%08X\n",
1788					newinfo->hook_entry[i] + off,
1789					newinfo->hook_entry[i]);
1790		}
1791	}
1792
1793	return 0;
1794}
1795
1796
1797static int compat_table_info(const struct ebt_table_info *info,
1798			     struct compat_ebt_replace *newinfo)
1799{
1800	unsigned int size = info->entries_size;
1801	const void *entries = info->entries;
1802
1803	newinfo->entries_size = size;
1804
1805	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1806	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1807							entries, newinfo);
1808}
1809
1810static int compat_copy_everything_to_user(struct ebt_table *t,
1811					  void __user *user, int *len, int cmd)
1812{
1813	struct compat_ebt_replace repl, tmp;
1814	struct ebt_counter *oldcounters;
1815	struct ebt_table_info tinfo;
1816	int ret;
1817	void __user *pos;
1818
1819	memset(&tinfo, 0, sizeof(tinfo));
1820
1821	if (cmd == EBT_SO_GET_ENTRIES) {
1822		tinfo.entries_size = t->private->entries_size;
1823		tinfo.nentries = t->private->nentries;
1824		tinfo.entries = t->private->entries;
1825		oldcounters = t->private->counters;
1826	} else {
1827		tinfo.entries_size = t->table->entries_size;
1828		tinfo.nentries = t->table->nentries;
1829		tinfo.entries = t->table->entries;
1830		oldcounters = t->table->counters;
1831	}
1832
1833	if (copy_from_user(&tmp, user, sizeof(tmp)))
1834		return -EFAULT;
1835
1836	if (tmp.nentries != tinfo.nentries ||
1837	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1838		return -EINVAL;
1839
1840	memcpy(&repl, &tmp, sizeof(repl));
1841	if (cmd == EBT_SO_GET_ENTRIES)
1842		ret = compat_table_info(t->private, &repl);
1843	else
1844		ret = compat_table_info(&tinfo, &repl);
1845	if (ret)
1846		return ret;
1847
1848	if (*len != sizeof(tmp) + repl.entries_size +
1849	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1850		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1851				*len, tinfo.entries_size, repl.entries_size);
1852		return -EINVAL;
1853	}
1854
1855	/* userspace might not need the counters */
1856	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1857					tmp.num_counters, tinfo.nentries);
1858	if (ret)
1859		return ret;
1860
1861	pos = compat_ptr(tmp.entries);
1862	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1863			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1864}
1865
1866struct ebt_entries_buf_state {
1867	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1868	u32 buf_kern_len;	/* total size of kernel buffer */
1869	u32 buf_kern_offset;	/* amount of data copied so far */
1870	u32 buf_user_offset;	/* read position in userspace buffer */
1871};
1872
1873static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1874{
1875	state->buf_kern_offset += sz;
1876	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1877}
1878
1879static int ebt_buf_add(struct ebt_entries_buf_state *state,
1880		       void *data, unsigned int sz)
1881{
1882	if (state->buf_kern_start == NULL)
1883		goto count_only;
1884
1885	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1886
1887	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1888
1889 count_only:
1890	state->buf_user_offset += sz;
1891	return ebt_buf_count(state, sz);
1892}
1893
1894static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1895{
1896	char *b = state->buf_kern_start;
1897
1898	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1899
1900	if (b != NULL && sz > 0)
1901		memset(b + state->buf_kern_offset, 0, sz);
1902	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1903	return ebt_buf_count(state, sz);
1904}
1905
1906enum compat_mwt {
1907	EBT_COMPAT_MATCH,
1908	EBT_COMPAT_WATCHER,
1909	EBT_COMPAT_TARGET,
1910};
1911
1912static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1913				enum compat_mwt compat_mwt,
1914				struct ebt_entries_buf_state *state,
1915				const unsigned char *base)
1916{
1917	char name[EBT_FUNCTION_MAXNAMELEN];
1918	struct xt_match *match;
1919	struct xt_target *wt;
1920	void *dst = NULL;
1921	int off, pad = 0;
1922	unsigned int size_kern, match_size = mwt->match_size;
1923
1924	strlcpy(name, mwt->u.name, sizeof(name));
1925
1926	if (state->buf_kern_start)
1927		dst = state->buf_kern_start + state->buf_kern_offset;
1928
1929	switch (compat_mwt) {
1930	case EBT_COMPAT_MATCH:
1931		match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1932		if (IS_ERR(match))
1933			return PTR_ERR(match);
1934
1935		off = ebt_compat_match_offset(match, match_size);
1936		if (dst) {
1937			if (match->compat_from_user)
1938				match->compat_from_user(dst, mwt->data);
1939			else
1940				memcpy(dst, mwt->data, match_size);
1941		}
1942
1943		size_kern = match->matchsize;
1944		if (unlikely(size_kern == -1))
1945			size_kern = match_size;
1946		module_put(match->me);
1947		break;
1948	case EBT_COMPAT_WATCHER: /* fallthrough */
1949	case EBT_COMPAT_TARGET:
1950		wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1951		if (IS_ERR(wt))
1952			return PTR_ERR(wt);
1953		off = xt_compat_target_offset(wt);
1954
1955		if (dst) {
1956			if (wt->compat_from_user)
1957				wt->compat_from_user(dst, mwt->data);
1958			else
1959				memcpy(dst, mwt->data, match_size);
1960		}
1961
1962		size_kern = wt->targetsize;
1963		module_put(wt->me);
1964		break;
1965
1966	default:
1967		return -EINVAL;
1968	}
1969
1970	state->buf_kern_offset += match_size + off;
1971	state->buf_user_offset += match_size;
1972	pad = XT_ALIGN(size_kern) - size_kern;
1973
1974	if (pad > 0 && dst) {
1975		BUG_ON(state->buf_kern_len <= pad);
1976		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1977		memset(dst + size_kern, 0, pad);
1978	}
1979	return off + match_size;
1980}
1981
1982/* return size of all matches, watchers or target, including necessary
 
1983 * alignment and padding.
1984 */
1985static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1986			unsigned int size_left, enum compat_mwt type,
1987			struct ebt_entries_buf_state *state, const void *base)
1988{
1989	int growth = 0;
1990	char *buf;
1991
1992	if (size_left == 0)
1993		return 0;
1994
1995	buf = (char *) match32;
1996
1997	while (size_left >= sizeof(*match32)) {
1998		struct ebt_entry_match *match_kern;
1999		int ret;
2000
2001		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
2002		if (match_kern) {
2003			char *tmp;
2004			tmp = state->buf_kern_start + state->buf_kern_offset;
2005			match_kern = (struct ebt_entry_match *) tmp;
2006		}
2007		ret = ebt_buf_add(state, buf, sizeof(*match32));
2008		if (ret < 0)
2009			return ret;
2010		size_left -= sizeof(*match32);
2011
2012		/* add padding before match->data (if any) */
2013		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
2014		if (ret < 0)
2015			return ret;
2016
2017		if (match32->match_size > size_left)
2018			return -EINVAL;
2019
2020		size_left -= match32->match_size;
2021
2022		ret = compat_mtw_from_user(match32, type, state, base);
2023		if (ret < 0)
2024			return ret;
2025
2026		BUG_ON(ret < match32->match_size);
2027		growth += ret - match32->match_size;
2028		growth += ebt_compat_entry_padsize();
2029
2030		buf += sizeof(*match32);
2031		buf += match32->match_size;
2032
2033		if (match_kern)
2034			match_kern->match_size = ret;
2035
2036		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2037		match32 = (struct compat_ebt_entry_mwt *) buf;
2038	}
2039
2040	return growth;
2041}
2042
2043/* called for all ebt_entry structures. */
2044static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2045			  unsigned int *total,
2046			  struct ebt_entries_buf_state *state)
2047{
2048	unsigned int i, j, startoff, new_offset = 0;
2049	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2050	unsigned int offsets[4];
2051	unsigned int *offsets_update = NULL;
2052	int ret;
2053	char *buf_start;
2054
2055	if (*total < sizeof(struct ebt_entries))
2056		return -EINVAL;
2057
2058	if (!entry->bitmask) {
2059		*total -= sizeof(struct ebt_entries);
2060		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2061	}
2062	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2063		return -EINVAL;
2064
2065	startoff = state->buf_user_offset;
2066	/* pull in most part of ebt_entry, it does not need to be changed. */
2067	ret = ebt_buf_add(state, entry,
2068			offsetof(struct ebt_entry, watchers_offset));
2069	if (ret < 0)
2070		return ret;
2071
2072	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2073	memcpy(&offsets[1], &entry->watchers_offset,
2074			sizeof(offsets) - sizeof(offsets[0]));
2075
2076	if (state->buf_kern_start) {
2077		buf_start = state->buf_kern_start + state->buf_kern_offset;
2078		offsets_update = (unsigned int *) buf_start;
2079	}
2080	ret = ebt_buf_add(state, &offsets[1],
2081			sizeof(offsets) - sizeof(offsets[0]));
2082	if (ret < 0)
2083		return ret;
2084	buf_start = (char *) entry;
2085	/* 0: matches offset, always follows ebt_entry.
 
2086	 * 1: watchers offset, from ebt_entry structure
2087	 * 2: target offset, from ebt_entry structure
2088	 * 3: next ebt_entry offset, from ebt_entry structure
2089	 *
2090	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2091	 */
2092	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2093		struct compat_ebt_entry_mwt *match32;
2094		unsigned int size;
2095		char *buf = buf_start;
2096
2097		buf = buf_start + offsets[i];
2098		if (offsets[i] > offsets[j])
2099			return -EINVAL;
2100
2101		match32 = (struct compat_ebt_entry_mwt *) buf;
2102		size = offsets[j] - offsets[i];
2103		ret = ebt_size_mwt(match32, size, i, state, base);
2104		if (ret < 0)
2105			return ret;
2106		new_offset += ret;
2107		if (offsets_update && new_offset) {
2108			pr_debug("change offset %d to %d\n",
2109				offsets_update[i], offsets[j] + new_offset);
2110			offsets_update[i] = offsets[j] + new_offset;
2111		}
2112	}
2113
2114	if (state->buf_kern_start == NULL) {
2115		unsigned int offset = buf_start - (char *) base;
2116
2117		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2118		if (ret < 0)
2119			return ret;
2120	}
2121
2122	startoff = state->buf_user_offset - startoff;
2123
2124	BUG_ON(*total < startoff);
2125	*total -= startoff;
2126	return 0;
2127}
2128
2129/* repl->entries_size is the size of the ebt_entry blob in userspace.
 
2130 * It might need more memory when copied to a 64 bit kernel in case
2131 * userspace is 32-bit. So, first task: find out how much memory is needed.
2132 *
2133 * Called before validation is performed.
2134 */
2135static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2136				struct ebt_entries_buf_state *state)
2137{
2138	unsigned int size_remaining = size_user;
2139	int ret;
2140
2141	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2142					&size_remaining, state);
2143	if (ret < 0)
2144		return ret;
2145
2146	WARN_ON(size_remaining);
2147	return state->buf_kern_offset;
2148}
2149
2150
2151static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2152					    void __user *user, unsigned int len)
2153{
2154	struct compat_ebt_replace tmp;
2155	int i;
2156
2157	if (len < sizeof(tmp))
2158		return -EINVAL;
2159
2160	if (copy_from_user(&tmp, user, sizeof(tmp)))
2161		return -EFAULT;
2162
2163	if (len != sizeof(tmp) + tmp.entries_size)
2164		return -EINVAL;
2165
2166	if (tmp.entries_size == 0)
2167		return -EINVAL;
2168
2169	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2170			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2171		return -ENOMEM;
2172	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2173		return -ENOMEM;
2174
2175	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2176
2177	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2178	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2179		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2180
2181	repl->num_counters = tmp.num_counters;
2182	repl->counters = compat_ptr(tmp.counters);
2183	repl->entries = compat_ptr(tmp.entries);
2184	return 0;
2185}
2186
2187static int compat_do_replace(struct net *net, void __user *user,
2188			     unsigned int len)
2189{
2190	int ret, i, countersize, size64;
2191	struct ebt_table_info *newinfo;
2192	struct ebt_replace tmp;
2193	struct ebt_entries_buf_state state;
2194	void *entries_tmp;
2195
2196	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2197	if (ret) {
2198		/* try real handler in case userland supplied needed padding */
2199		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2200			ret = 0;
2201		return ret;
2202	}
2203
2204	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2205	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2206	if (!newinfo)
2207		return -ENOMEM;
2208
2209	if (countersize)
2210		memset(newinfo->counters, 0, countersize);
2211
2212	memset(&state, 0, sizeof(state));
2213
2214	newinfo->entries = vmalloc(tmp.entries_size);
2215	if (!newinfo->entries) {
2216		ret = -ENOMEM;
2217		goto free_newinfo;
2218	}
2219	if (copy_from_user(
2220	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2221		ret = -EFAULT;
2222		goto free_entries;
2223	}
2224
2225	entries_tmp = newinfo->entries;
2226
2227	xt_compat_lock(NFPROTO_BRIDGE);
2228
2229	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2230	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2231	if (ret < 0)
2232		goto out_unlock;
2233
2234	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2235		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2236		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2237
2238	size64 = ret;
2239	newinfo->entries = vmalloc(size64);
2240	if (!newinfo->entries) {
2241		vfree(entries_tmp);
2242		ret = -ENOMEM;
2243		goto out_unlock;
2244	}
2245
2246	memset(&state, 0, sizeof(state));
2247	state.buf_kern_start = newinfo->entries;
2248	state.buf_kern_len = size64;
2249
2250	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2251	BUG_ON(ret < 0);	/* parses same data again */
2252
2253	vfree(entries_tmp);
2254	tmp.entries_size = size64;
2255
2256	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2257		char __user *usrptr;
2258		if (tmp.hook_entry[i]) {
2259			unsigned int delta;
2260			usrptr = (char __user *) tmp.hook_entry[i];
2261			delta = usrptr - tmp.entries;
2262			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2263			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2264		}
2265	}
2266
2267	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2268	xt_compat_unlock(NFPROTO_BRIDGE);
2269
2270	ret = do_replace_finish(net, &tmp, newinfo);
2271	if (ret == 0)
2272		return ret;
2273free_entries:
2274	vfree(newinfo->entries);
2275free_newinfo:
2276	vfree(newinfo);
2277	return ret;
2278out_unlock:
2279	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2280	xt_compat_unlock(NFPROTO_BRIDGE);
2281	goto free_entries;
2282}
2283
2284static int compat_update_counters(struct net *net, void __user *user,
2285				  unsigned int len)
2286{
2287	struct compat_ebt_replace hlp;
2288
2289	if (copy_from_user(&hlp, user, sizeof(hlp)))
2290		return -EFAULT;
2291
2292	/* try real handler in case userland supplied needed padding */
2293	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2294		return update_counters(net, user, len);
2295
2296	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2297					hlp.num_counters, user, len);
2298}
2299
2300static int compat_do_ebt_set_ctl(struct sock *sk,
2301		int cmd, void __user *user, unsigned int len)
2302{
2303	int ret;
2304	struct net *net = sock_net(sk);
2305
2306	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2307		return -EPERM;
2308
2309	switch (cmd) {
2310	case EBT_SO_SET_ENTRIES:
2311		ret = compat_do_replace(net, user, len);
2312		break;
2313	case EBT_SO_SET_COUNTERS:
2314		ret = compat_update_counters(net, user, len);
2315		break;
2316	default:
2317		ret = -EINVAL;
2318	}
2319	return ret;
2320}
2321
2322static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2323		void __user *user, int *len)
2324{
2325	int ret;
2326	struct compat_ebt_replace tmp;
2327	struct ebt_table *t;
2328	struct net *net = sock_net(sk);
2329
2330	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2331		return -EPERM;
2332
2333	/* try real handler in case userland supplied needed padding */
2334	if ((cmd == EBT_SO_GET_INFO ||
2335	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2336			return do_ebt_get_ctl(sk, cmd, user, len);
2337
2338	if (copy_from_user(&tmp, user, sizeof(tmp)))
2339		return -EFAULT;
2340
2341	tmp.name[sizeof(tmp.name) - 1] = '\0';
2342
2343	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2344	if (!t)
2345		return ret;
2346
2347	xt_compat_lock(NFPROTO_BRIDGE);
2348	switch (cmd) {
2349	case EBT_SO_GET_INFO:
2350		tmp.nentries = t->private->nentries;
2351		ret = compat_table_info(t->private, &tmp);
2352		if (ret)
2353			goto out;
2354		tmp.valid_hooks = t->valid_hooks;
2355
2356		if (copy_to_user(user, &tmp, *len) != 0) {
2357			ret = -EFAULT;
2358			break;
2359		}
2360		ret = 0;
2361		break;
2362	case EBT_SO_GET_INIT_INFO:
2363		tmp.nentries = t->table->nentries;
2364		tmp.entries_size = t->table->entries_size;
2365		tmp.valid_hooks = t->table->valid_hooks;
2366
2367		if (copy_to_user(user, &tmp, *len) != 0) {
2368			ret = -EFAULT;
2369			break;
2370		}
2371		ret = 0;
2372		break;
2373	case EBT_SO_GET_ENTRIES:
2374	case EBT_SO_GET_INIT_ENTRIES:
2375		/* try real handler first in case of userland-side padding.
 
2376		 * in case we are dealing with an 'ordinary' 32 bit binary
2377		 * without 64bit compatibility padding, this will fail right
2378		 * after copy_from_user when the *len argument is validated.
2379		 *
2380		 * the compat_ variant needs to do one pass over the kernel
2381		 * data set to adjust for size differences before it the check.
2382		 */
2383		if (copy_everything_to_user(t, user, len, cmd) == 0)
2384			ret = 0;
2385		else
2386			ret = compat_copy_everything_to_user(t, user, len, cmd);
2387		break;
2388	default:
2389		ret = -EINVAL;
2390	}
2391 out:
2392	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2393	xt_compat_unlock(NFPROTO_BRIDGE);
2394	mutex_unlock(&ebt_mutex);
2395	return ret;
2396}
2397#endif
2398
2399static struct nf_sockopt_ops ebt_sockopts = {
2400	.pf		= PF_INET,
2401	.set_optmin	= EBT_BASE_CTL,
2402	.set_optmax	= EBT_SO_SET_MAX + 1,
2403	.set		= do_ebt_set_ctl,
2404#ifdef CONFIG_COMPAT
2405	.compat_set	= compat_do_ebt_set_ctl,
2406#endif
2407	.get_optmin	= EBT_BASE_CTL,
2408	.get_optmax	= EBT_SO_GET_MAX + 1,
2409	.get		= do_ebt_get_ctl,
2410#ifdef CONFIG_COMPAT
2411	.compat_get	= compat_do_ebt_get_ctl,
2412#endif
2413	.owner		= THIS_MODULE,
2414};
2415
2416static int __init ebtables_init(void)
2417{
2418	int ret;
2419
2420	ret = xt_register_target(&ebt_standard_target);
2421	if (ret < 0)
2422		return ret;
2423	ret = nf_register_sockopt(&ebt_sockopts);
2424	if (ret < 0) {
2425		xt_unregister_target(&ebt_standard_target);
2426		return ret;
2427	}
2428
2429	printk(KERN_INFO "Ebtables v2.0 registered\n");
2430	return 0;
2431}
2432
2433static void __exit ebtables_fini(void)
2434{
2435	nf_unregister_sockopt(&ebt_sockopts);
2436	xt_unregister_target(&ebt_standard_target);
2437	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2438}
2439
2440EXPORT_SYMBOL(ebt_register_table);
2441EXPORT_SYMBOL(ebt_unregister_table);
2442EXPORT_SYMBOL(ebt_do_table);
2443module_init(ebtables_init);
2444module_exit(ebtables_fini);
2445MODULE_LICENSE("GPL");
v3.15
   1/*
   2 *  ebtables
   3 *
   4 *  Author:
   5 *  Bart De Schuymer		<bdschuym@pandora.be>
   6 *
   7 *  ebtables.c,v 2.0, July, 2002
   8 *
   9 *  This code is stongly inspired on the iptables code which is
  10 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18#include <linux/kmod.h>
  19#include <linux/module.h>
  20#include <linux/vmalloc.h>
  21#include <linux/netfilter/x_tables.h>
  22#include <linux/netfilter_bridge/ebtables.h>
  23#include <linux/spinlock.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <asm/uaccess.h>
  27#include <linux/smp.h>
  28#include <linux/cpumask.h>
 
  29#include <net/sock.h>
  30/* needed for logical [in,out]-dev filtering */
  31#include "../br_private.h"
  32
  33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
  34					 "report to author: "format, ## args)
  35/* #define BUGPRINT(format, args...) */
  36
  37/*
  38 * Each cpu has its own set of counters, so there is no need for write_lock in
  39 * the softirq
  40 * For reading or updating the counters, the user context needs to
  41 * get a write_lock
  42 */
  43
  44/* The size of each set of counters is altered to get cache alignment */
  45#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  46#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
  47#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
  48   COUNTER_OFFSET(n) * cpu))
  49
  50
  51
  52static DEFINE_MUTEX(ebt_mutex);
  53
  54#ifdef CONFIG_COMPAT
  55static void ebt_standard_compat_from_user(void *dst, const void *src)
  56{
  57	int v = *(compat_int_t *)src;
  58
  59	if (v >= 0)
  60		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
  61	memcpy(dst, &v, sizeof(v));
  62}
  63
  64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
  65{
  66	compat_int_t cv = *(int *)src;
  67
  68	if (cv >= 0)
  69		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
  70	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  71}
  72#endif
  73
  74
  75static struct xt_target ebt_standard_target = {
  76	.name       = "standard",
  77	.revision   = 0,
  78	.family     = NFPROTO_BRIDGE,
  79	.targetsize = sizeof(int),
  80#ifdef CONFIG_COMPAT
  81	.compatsize = sizeof(compat_int_t),
  82	.compat_from_user = ebt_standard_compat_from_user,
  83	.compat_to_user =  ebt_standard_compat_to_user,
  84#endif
  85};
  86
  87static inline int
  88ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
  89	       struct xt_action_param *par)
  90{
  91	par->target   = w->u.watcher;
  92	par->targinfo = w->data;
  93	w->u.watcher->target(skb, par);
  94	/* watchers don't give a verdict */
  95	return 0;
  96}
  97
  98static inline int
  99ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
 100	     struct xt_action_param *par)
 101{
 102	par->match     = m->u.match;
 103	par->matchinfo = m->data;
 104	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
 105}
 106
 107static inline int
 108ebt_dev_check(const char *entry, const struct net_device *device)
 109{
 110	int i = 0;
 111	const char *devname;
 112
 113	if (*entry == '\0')
 114		return 0;
 115	if (!device)
 116		return 1;
 117	devname = device->name;
 118	/* 1 is the wildcard token */
 119	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 120		i++;
 121	return devname[i] != entry[i] && entry[i] != 1;
 122}
 123
 124#define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
 125/* process standard matches */
 126static inline int
 127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
 128                const struct net_device *in, const struct net_device *out)
 129{
 130	const struct ethhdr *h = eth_hdr(skb);
 131	const struct net_bridge_port *p;
 132	__be16 ethproto;
 133	int verdict, i;
 134
 135	if (vlan_tx_tag_present(skb))
 136		ethproto = htons(ETH_P_8021Q);
 137	else
 138		ethproto = h->h_proto;
 139
 140	if (e->bitmask & EBT_802_3) {
 141		if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
 142			return 1;
 143	} else if (!(e->bitmask & EBT_NOPROTO) &&
 144	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
 145		return 1;
 146
 147	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
 148		return 1;
 149	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
 150		return 1;
 151	/* rcu_read_lock()ed by nf_hook_slow */
 152	if (in && (p = br_port_get_rcu(in)) != NULL &&
 153	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
 154		return 1;
 155	if (out && (p = br_port_get_rcu(out)) != NULL &&
 156	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
 157		return 1;
 158
 159	if (e->bitmask & EBT_SOURCEMAC) {
 160		verdict = 0;
 161		for (i = 0; i < 6; i++)
 162			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
 163			   e->sourcemsk[i];
 164		if (FWINV2(verdict != 0, EBT_ISOURCE) )
 165			return 1;
 166	}
 167	if (e->bitmask & EBT_DESTMAC) {
 168		verdict = 0;
 169		for (i = 0; i < 6; i++)
 170			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
 171			   e->destmsk[i];
 172		if (FWINV2(verdict != 0, EBT_IDEST) )
 173			return 1;
 174	}
 175	return 0;
 176}
 177
 178static inline __pure
 179struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 180{
 181	return (void *)entry + entry->next_offset;
 182}
 183
 184/* Do some firewalling */
 185unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
 186   const struct net_device *in, const struct net_device *out,
 187   struct ebt_table *table)
 188{
 
 189	int i, nentries;
 190	struct ebt_entry *point;
 191	struct ebt_counter *counter_base, *cb_base;
 192	const struct ebt_entry_target *t;
 193	int verdict, sp = 0;
 194	struct ebt_chainstack *cs;
 195	struct ebt_entries *chaininfo;
 196	const char *base;
 197	const struct ebt_table_info *private;
 198	struct xt_action_param acpar;
 199
 200	acpar.family  = NFPROTO_BRIDGE;
 201	acpar.in      = in;
 202	acpar.out     = out;
 
 203	acpar.hotdrop = false;
 204	acpar.hooknum = hook;
 205
 206	read_lock_bh(&table->lock);
 207	private = table->private;
 208	cb_base = COUNTER_BASE(private->counters, private->nentries,
 209	   smp_processor_id());
 210	if (private->chainstack)
 211		cs = private->chainstack[smp_processor_id()];
 212	else
 213		cs = NULL;
 214	chaininfo = private->hook_entry[hook];
 215	nentries = private->hook_entry[hook]->nentries;
 216	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
 217	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
 218	/* base for chain jumps */
 219	base = private->entries;
 220	i = 0;
 221	while (i < nentries) {
 222		if (ebt_basic_match(point, skb, in, out))
 223			goto letscontinue;
 224
 225		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
 226			goto letscontinue;
 227		if (acpar.hotdrop) {
 228			read_unlock_bh(&table->lock);
 229			return NF_DROP;
 230		}
 231
 232		/* increase counter */
 233		(*(counter_base + i)).pcnt++;
 234		(*(counter_base + i)).bcnt += skb->len;
 235
 236		/* these should only watch: not modify, nor tell us
 237		   what to do with the packet */
 
 238		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
 239
 240		t = (struct ebt_entry_target *)
 241		   (((char *)point) + point->target_offset);
 242		/* standard target */
 243		if (!t->u.target->target)
 244			verdict = ((struct ebt_standard_target *)t)->verdict;
 245		else {
 246			acpar.target   = t->u.target;
 247			acpar.targinfo = t->data;
 248			verdict = t->u.target->target(skb, &acpar);
 249		}
 250		if (verdict == EBT_ACCEPT) {
 251			read_unlock_bh(&table->lock);
 252			return NF_ACCEPT;
 253		}
 254		if (verdict == EBT_DROP) {
 255			read_unlock_bh(&table->lock);
 256			return NF_DROP;
 257		}
 258		if (verdict == EBT_RETURN) {
 259letsreturn:
 260#ifdef CONFIG_NETFILTER_DEBUG
 261			if (sp == 0) {
 262				BUGPRINT("RETURN on base chain");
 263				/* act like this is EBT_CONTINUE */
 264				goto letscontinue;
 265			}
 266#endif
 267			sp--;
 268			/* put all the local variables right */
 269			i = cs[sp].n;
 270			chaininfo = cs[sp].chaininfo;
 271			nentries = chaininfo->nentries;
 272			point = cs[sp].e;
 273			counter_base = cb_base +
 274			   chaininfo->counter_offset;
 275			continue;
 276		}
 277		if (verdict == EBT_CONTINUE)
 278			goto letscontinue;
 279#ifdef CONFIG_NETFILTER_DEBUG
 280		if (verdict < 0) {
 281			BUGPRINT("bogus standard verdict\n");
 282			read_unlock_bh(&table->lock);
 283			return NF_DROP;
 284		}
 285#endif
 286		/* jump to a udc */
 287		cs[sp].n = i + 1;
 288		cs[sp].chaininfo = chaininfo;
 289		cs[sp].e = ebt_next_entry(point);
 290		i = 0;
 291		chaininfo = (struct ebt_entries *) (base + verdict);
 292#ifdef CONFIG_NETFILTER_DEBUG
 293		if (chaininfo->distinguisher) {
 294			BUGPRINT("jump to non-chain\n");
 295			read_unlock_bh(&table->lock);
 296			return NF_DROP;
 297		}
 298#endif
 299		nentries = chaininfo->nentries;
 300		point = (struct ebt_entry *)chaininfo->data;
 301		counter_base = cb_base + chaininfo->counter_offset;
 302		sp++;
 303		continue;
 304letscontinue:
 305		point = ebt_next_entry(point);
 306		i++;
 307	}
 308
 309	/* I actually like this :) */
 310	if (chaininfo->policy == EBT_RETURN)
 311		goto letsreturn;
 312	if (chaininfo->policy == EBT_ACCEPT) {
 313		read_unlock_bh(&table->lock);
 314		return NF_ACCEPT;
 315	}
 316	read_unlock_bh(&table->lock);
 317	return NF_DROP;
 318}
 319
 320/* If it succeeds, returns element and locks mutex */
 321static inline void *
 322find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
 323   struct mutex *mutex)
 324{
 325	struct {
 326		struct list_head list;
 327		char name[EBT_FUNCTION_MAXNAMELEN];
 328	} *e;
 329
 330	*error = mutex_lock_interruptible(mutex);
 331	if (*error != 0)
 332		return NULL;
 333
 334	list_for_each_entry(e, head, list) {
 335		if (strcmp(e->name, name) == 0)
 336			return e;
 337	}
 338	*error = -ENOENT;
 339	mutex_unlock(mutex);
 340	return NULL;
 341}
 342
 343static void *
 344find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
 345   int *error, struct mutex *mutex)
 346{
 347	return try_then_request_module(
 348			find_inlist_lock_noload(head, name, error, mutex),
 349			"%s%s", prefix, name);
 350}
 351
 352static inline struct ebt_table *
 353find_table_lock(struct net *net, const char *name, int *error,
 354		struct mutex *mutex)
 355{
 356	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
 357				"ebtable_", error, mutex);
 358}
 359
 360static inline int
 361ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 362		unsigned int *cnt)
 363{
 364	const struct ebt_entry *e = par->entryinfo;
 365	struct xt_match *match;
 366	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 367	int ret;
 368
 369	if (left < sizeof(struct ebt_entry_match) ||
 370	    left - sizeof(struct ebt_entry_match) < m->match_size)
 371		return -EINVAL;
 372
 373	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
 
 
 
 
 374	if (IS_ERR(match))
 375		return PTR_ERR(match);
 376	m->u.match = match;
 377
 378	par->match     = match;
 379	par->matchinfo = m->data;
 380	ret = xt_check_match(par, m->match_size,
 381	      e->ethproto, e->invflags & EBT_IPROTO);
 382	if (ret < 0) {
 383		module_put(match->me);
 384		return ret;
 385	}
 386
 387	(*cnt)++;
 388	return 0;
 389}
 390
 391static inline int
 392ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
 393		  unsigned int *cnt)
 394{
 395	const struct ebt_entry *e = par->entryinfo;
 396	struct xt_target *watcher;
 397	size_t left = ((char *)e + e->target_offset) - (char *)w;
 398	int ret;
 399
 400	if (left < sizeof(struct ebt_entry_watcher) ||
 401	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 402		return -EINVAL;
 403
 404	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 405	if (IS_ERR(watcher))
 406		return PTR_ERR(watcher);
 407	w->u.watcher = watcher;
 408
 409	par->target   = watcher;
 410	par->targinfo = w->data;
 411	ret = xt_check_target(par, w->watcher_size,
 412	      e->ethproto, e->invflags & EBT_IPROTO);
 413	if (ret < 0) {
 414		module_put(watcher->me);
 415		return ret;
 416	}
 417
 418	(*cnt)++;
 419	return 0;
 420}
 421
 422static int ebt_verify_pointers(const struct ebt_replace *repl,
 423			       struct ebt_table_info *newinfo)
 424{
 425	unsigned int limit = repl->entries_size;
 426	unsigned int valid_hooks = repl->valid_hooks;
 427	unsigned int offset = 0;
 428	int i;
 429
 430	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 431		newinfo->hook_entry[i] = NULL;
 432
 433	newinfo->entries_size = repl->entries_size;
 434	newinfo->nentries = repl->nentries;
 435
 436	while (offset < limit) {
 437		size_t left = limit - offset;
 438		struct ebt_entry *e = (void *)newinfo->entries + offset;
 439
 440		if (left < sizeof(unsigned int))
 441			break;
 442
 443		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 444			if ((valid_hooks & (1 << i)) == 0)
 445				continue;
 446			if ((char __user *)repl->hook_entry[i] ==
 447			     repl->entries + offset)
 448				break;
 449		}
 450
 451		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
 452			if (e->bitmask != 0) {
 453				/* we make userspace set this right,
 454				   so there is no misunderstanding */
 
 455				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
 456					 "in distinguisher\n");
 457				return -EINVAL;
 458			}
 459			if (i != NF_BR_NUMHOOKS)
 460				newinfo->hook_entry[i] = (struct ebt_entries *)e;
 461			if (left < sizeof(struct ebt_entries))
 462				break;
 463			offset += sizeof(struct ebt_entries);
 464		} else {
 465			if (left < sizeof(struct ebt_entry))
 466				break;
 467			if (left < e->next_offset)
 468				break;
 469			if (e->next_offset < sizeof(struct ebt_entry))
 470				return -EINVAL;
 471			offset += e->next_offset;
 472		}
 473	}
 474	if (offset != limit) {
 475		BUGPRINT("entries_size too small\n");
 476		return -EINVAL;
 477	}
 478
 479	/* check if all valid hooks have a chain */
 480	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 481		if (!newinfo->hook_entry[i] &&
 482		   (valid_hooks & (1 << i))) {
 483			BUGPRINT("Valid hook without chain\n");
 484			return -EINVAL;
 485		}
 486	}
 487	return 0;
 488}
 489
 490/*
 491 * this one is very careful, as it is the first function
 492 * to parse the userspace data
 493 */
 494static inline int
 495ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
 496   const struct ebt_table_info *newinfo,
 497   unsigned int *n, unsigned int *cnt,
 498   unsigned int *totalcnt, unsigned int *udc_cnt)
 499{
 500	int i;
 501
 502	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 503		if ((void *)e == (void *)newinfo->hook_entry[i])
 504			break;
 505	}
 506	/* beginning of a new chain
 507	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
 
 508	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 509		/* this checks if the previous chain has as many entries
 510		   as it said it has */
 
 511		if (*n != *cnt) {
 512			BUGPRINT("nentries does not equal the nr of entries "
 513				 "in the chain\n");
 514			return -EINVAL;
 515		}
 516		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 517		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 518			/* only RETURN from udc */
 519			if (i != NF_BR_NUMHOOKS ||
 520			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
 521				BUGPRINT("bad policy\n");
 522				return -EINVAL;
 523			}
 524		}
 525		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 526			(*udc_cnt)++;
 527		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
 528			BUGPRINT("counter_offset != totalcnt");
 529			return -EINVAL;
 530		}
 531		*n = ((struct ebt_entries *)e)->nentries;
 532		*cnt = 0;
 533		return 0;
 534	}
 535	/* a plain old entry, heh */
 536	if (sizeof(struct ebt_entry) > e->watchers_offset ||
 537	   e->watchers_offset > e->target_offset ||
 538	   e->target_offset >= e->next_offset) {
 539		BUGPRINT("entry offsets not in right order\n");
 540		return -EINVAL;
 541	}
 542	/* this is not checked anywhere else */
 543	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
 544		BUGPRINT("target size too small\n");
 545		return -EINVAL;
 546	}
 547	(*cnt)++;
 548	(*totalcnt)++;
 549	return 0;
 550}
 551
 552struct ebt_cl_stack
 553{
 554	struct ebt_chainstack cs;
 555	int from;
 556	unsigned int hookmask;
 557};
 558
 559/*
 560 * we need these positions to check that the jumps to a different part of the
 561 * entries is a jump to the beginning of a new chain.
 562 */
 563static inline int
 564ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
 565   unsigned int *n, struct ebt_cl_stack *udc)
 566{
 567	int i;
 568
 569	/* we're only interested in chain starts */
 570	if (e->bitmask)
 571		return 0;
 572	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 573		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 574			break;
 575	}
 576	/* only care about udc */
 577	if (i != NF_BR_NUMHOOKS)
 578		return 0;
 579
 580	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
 581	/* these initialisations are depended on later in check_chainloops() */
 582	udc[*n].cs.n = 0;
 583	udc[*n].hookmask = 0;
 584
 585	(*n)++;
 586	return 0;
 587}
 588
 589static inline int
 590ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
 591{
 592	struct xt_mtdtor_param par;
 593
 594	if (i && (*i)-- == 0)
 595		return 1;
 596
 597	par.net       = net;
 598	par.match     = m->u.match;
 599	par.matchinfo = m->data;
 600	par.family    = NFPROTO_BRIDGE;
 601	if (par.match->destroy != NULL)
 602		par.match->destroy(&par);
 603	module_put(par.match->me);
 604	return 0;
 605}
 606
 607static inline int
 608ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
 609{
 610	struct xt_tgdtor_param par;
 611
 612	if (i && (*i)-- == 0)
 613		return 1;
 614
 615	par.net      = net;
 616	par.target   = w->u.watcher;
 617	par.targinfo = w->data;
 618	par.family   = NFPROTO_BRIDGE;
 619	if (par.target->destroy != NULL)
 620		par.target->destroy(&par);
 621	module_put(par.target->me);
 622	return 0;
 623}
 624
 625static inline int
 626ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
 627{
 628	struct xt_tgdtor_param par;
 629	struct ebt_entry_target *t;
 630
 631	if (e->bitmask == 0)
 632		return 0;
 633	/* we're done */
 634	if (cnt && (*cnt)-- == 0)
 635		return 1;
 636	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
 637	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
 638	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 639
 640	par.net      = net;
 641	par.target   = t->u.target;
 642	par.targinfo = t->data;
 643	par.family   = NFPROTO_BRIDGE;
 644	if (par.target->destroy != NULL)
 645		par.target->destroy(&par);
 646	module_put(par.target->me);
 647	return 0;
 648}
 649
 650static inline int
 651ebt_check_entry(struct ebt_entry *e, struct net *net,
 652   const struct ebt_table_info *newinfo,
 653   const char *name, unsigned int *cnt,
 654   struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 655{
 656	struct ebt_entry_target *t;
 657	struct xt_target *target;
 658	unsigned int i, j, hook = 0, hookmask = 0;
 659	size_t gap;
 660	int ret;
 661	struct xt_mtchk_param mtpar;
 662	struct xt_tgchk_param tgpar;
 663
 664	/* don't mess with the struct ebt_entries */
 665	if (e->bitmask == 0)
 666		return 0;
 667
 668	if (e->bitmask & ~EBT_F_MASK) {
 669		BUGPRINT("Unknown flag for bitmask\n");
 670		return -EINVAL;
 671	}
 672	if (e->invflags & ~EBT_INV_MASK) {
 673		BUGPRINT("Unknown flag for inv bitmask\n");
 674		return -EINVAL;
 675	}
 676	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
 677		BUGPRINT("NOPROTO & 802_3 not allowed\n");
 678		return -EINVAL;
 679	}
 680	/* what hook do we belong to? */
 681	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 682		if (!newinfo->hook_entry[i])
 683			continue;
 684		if ((char *)newinfo->hook_entry[i] < (char *)e)
 685			hook = i;
 686		else
 687			break;
 688	}
 689	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
 690	   a base chain */
 
 691	if (i < NF_BR_NUMHOOKS)
 692		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 693	else {
 694		for (i = 0; i < udc_cnt; i++)
 695			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
 696				break;
 697		if (i == 0)
 698			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 699		else
 700			hookmask = cl_s[i - 1].hookmask;
 701	}
 702	i = 0;
 703
 704	mtpar.net	= tgpar.net       = net;
 705	mtpar.table     = tgpar.table     = name;
 706	mtpar.entryinfo = tgpar.entryinfo = e;
 707	mtpar.hook_mask = tgpar.hook_mask = hookmask;
 708	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
 709	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
 710	if (ret != 0)
 711		goto cleanup_matches;
 712	j = 0;
 713	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
 714	if (ret != 0)
 715		goto cleanup_watchers;
 716	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 717	gap = e->next_offset - e->target_offset;
 718
 719	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
 720	if (IS_ERR(target)) {
 721		ret = PTR_ERR(target);
 722		goto cleanup_watchers;
 723	}
 724
 725	t->u.target = target;
 726	if (t->u.target == &ebt_standard_target) {
 727		if (gap < sizeof(struct ebt_standard_target)) {
 728			BUGPRINT("Standard target size too big\n");
 729			ret = -EFAULT;
 730			goto cleanup_watchers;
 731		}
 732		if (((struct ebt_standard_target *)t)->verdict <
 733		   -NUM_STANDARD_TARGETS) {
 734			BUGPRINT("Invalid standard target\n");
 735			ret = -EFAULT;
 736			goto cleanup_watchers;
 737		}
 738	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
 739		module_put(t->u.target->me);
 740		ret = -EFAULT;
 741		goto cleanup_watchers;
 742	}
 743
 744	tgpar.target   = target;
 745	tgpar.targinfo = t->data;
 746	ret = xt_check_target(&tgpar, t->target_size,
 747	      e->ethproto, e->invflags & EBT_IPROTO);
 748	if (ret < 0) {
 749		module_put(target->me);
 750		goto cleanup_watchers;
 751	}
 752	(*cnt)++;
 753	return 0;
 754cleanup_watchers:
 755	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
 756cleanup_matches:
 757	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
 758	return ret;
 759}
 760
 761/*
 762 * checks for loops and sets the hook mask for udc
 763 * the hook mask for udc tells us from which base chains the udc can be
 764 * accessed. This mask is a parameter to the check() functions of the extensions
 765 */
 766static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
 767   unsigned int udc_cnt, unsigned int hooknr, char *base)
 768{
 769	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
 770	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
 771	const struct ebt_entry_target *t;
 772
 773	while (pos < nentries || chain_nr != -1) {
 774		/* end of udc, go back one 'recursion' step */
 775		if (pos == nentries) {
 776			/* put back values of the time when this chain was called */
 777			e = cl_s[chain_nr].cs.e;
 778			if (cl_s[chain_nr].from != -1)
 779				nentries =
 780				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
 781			else
 782				nentries = chain->nentries;
 783			pos = cl_s[chain_nr].cs.n;
 784			/* make sure we won't see a loop that isn't one */
 785			cl_s[chain_nr].cs.n = 0;
 786			chain_nr = cl_s[chain_nr].from;
 787			if (pos == nentries)
 788				continue;
 789		}
 790		t = (struct ebt_entry_target *)
 791		   (((char *)e) + e->target_offset);
 792		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
 793			goto letscontinue;
 794		if (e->target_offset + sizeof(struct ebt_standard_target) >
 795		   e->next_offset) {
 796			BUGPRINT("Standard target size too big\n");
 797			return -1;
 798		}
 799		verdict = ((struct ebt_standard_target *)t)->verdict;
 800		if (verdict >= 0) { /* jump to another chain */
 801			struct ebt_entries *hlp2 =
 802			   (struct ebt_entries *)(base + verdict);
 803			for (i = 0; i < udc_cnt; i++)
 804				if (hlp2 == cl_s[i].cs.chaininfo)
 805					break;
 806			/* bad destination or loop */
 807			if (i == udc_cnt) {
 808				BUGPRINT("bad destination\n");
 809				return -1;
 810			}
 811			if (cl_s[i].cs.n) {
 812				BUGPRINT("loop\n");
 813				return -1;
 814			}
 815			if (cl_s[i].hookmask & (1 << hooknr))
 816				goto letscontinue;
 817			/* this can't be 0, so the loop test is correct */
 818			cl_s[i].cs.n = pos + 1;
 819			pos = 0;
 820			cl_s[i].cs.e = ebt_next_entry(e);
 821			e = (struct ebt_entry *)(hlp2->data);
 822			nentries = hlp2->nentries;
 823			cl_s[i].from = chain_nr;
 824			chain_nr = i;
 825			/* this udc is accessible from the base chain for hooknr */
 826			cl_s[i].hookmask |= (1 << hooknr);
 827			continue;
 828		}
 829letscontinue:
 830		e = ebt_next_entry(e);
 831		pos++;
 832	}
 833	return 0;
 834}
 835
 836/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
 837static int translate_table(struct net *net, const char *name,
 838			   struct ebt_table_info *newinfo)
 839{
 840	unsigned int i, j, k, udc_cnt;
 841	int ret;
 842	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 843
 844	i = 0;
 845	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 846		i++;
 847	if (i == NF_BR_NUMHOOKS) {
 848		BUGPRINT("No valid hooks specified\n");
 849		return -EINVAL;
 850	}
 851	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
 852		BUGPRINT("Chains don't start at beginning\n");
 853		return -EINVAL;
 854	}
 855	/* make sure chains are ordered after each other in same order
 856	   as their corresponding hooks */
 
 857	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
 858		if (!newinfo->hook_entry[j])
 859			continue;
 860		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
 861			BUGPRINT("Hook order must be followed\n");
 862			return -EINVAL;
 863		}
 864		i = j;
 865	}
 866
 867	/* do some early checkings and initialize some things */
 868	i = 0; /* holds the expected nr. of entries for the chain */
 869	j = 0; /* holds the up to now counted entries for the chain */
 870	k = 0; /* holds the total nr. of entries, should equal
 871		  newinfo->nentries afterwards */
 
 872	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 873	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 874	   ebt_check_entry_size_and_hooks, newinfo,
 875	   &i, &j, &k, &udc_cnt);
 876
 877	if (ret != 0)
 878		return ret;
 879
 880	if (i != j) {
 881		BUGPRINT("nentries does not equal the nr of entries in the "
 882			 "(last) chain\n");
 883		return -EINVAL;
 884	}
 885	if (k != newinfo->nentries) {
 886		BUGPRINT("Total nentries is wrong\n");
 887		return -EINVAL;
 888	}
 889
 890	/* get the location of the udc, put them in an array
 891	   while we're at it, allocate the chainstack */
 
 892	if (udc_cnt) {
 893		/* this will get free'd in do_replace()/ebt_register_table()
 894		   if an error occurs */
 
 895		newinfo->chainstack =
 896			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
 897		if (!newinfo->chainstack)
 898			return -ENOMEM;
 899		for_each_possible_cpu(i) {
 900			newinfo->chainstack[i] =
 901			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
 902			if (!newinfo->chainstack[i]) {
 903				while (i)
 904					vfree(newinfo->chainstack[--i]);
 905				vfree(newinfo->chainstack);
 906				newinfo->chainstack = NULL;
 907				return -ENOMEM;
 908			}
 909		}
 910
 911		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
 912		if (!cl_s)
 913			return -ENOMEM;
 914		i = 0; /* the i'th udc */
 915		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 916		   ebt_get_udc_positions, newinfo, &i, cl_s);
 917		/* sanity check */
 918		if (i != udc_cnt) {
 919			BUGPRINT("i != udc_cnt\n");
 920			vfree(cl_s);
 921			return -EFAULT;
 922		}
 923	}
 924
 925	/* Check for loops */
 926	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 927		if (newinfo->hook_entry[i])
 928			if (check_chainloops(newinfo->hook_entry[i],
 929			   cl_s, udc_cnt, i, newinfo->entries)) {
 930				vfree(cl_s);
 931				return -EINVAL;
 932			}
 933
 934	/* we now know the following (along with E=mc²):
 935	   - the nr of entries in each chain is right
 936	   - the size of the allocated space is right
 937	   - all valid hooks have a corresponding chain
 938	   - there are no loops
 939	   - wrong data can still be on the level of a single entry
 940	   - could be there are jumps to places that are not the
 941	     beginning of a chain. This can only occur in chains that
 942	     are not accessible from any base chains, so we don't care. */
 
 943
 944	/* used to know what we need to clean up if something goes wrong */
 945	i = 0;
 946	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 947	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
 948	if (ret != 0) {
 949		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 950				  ebt_cleanup_entry, net, &i);
 951	}
 952	vfree(cl_s);
 953	return ret;
 954}
 955
 956/* called under write_lock */
 957static void get_counters(const struct ebt_counter *oldcounters,
 958   struct ebt_counter *counters, unsigned int nentries)
 959{
 960	int i, cpu;
 961	struct ebt_counter *counter_base;
 962
 963	/* counters of cpu 0 */
 964	memcpy(counters, oldcounters,
 965	       sizeof(struct ebt_counter) * nentries);
 966
 967	/* add other counters to those of cpu 0 */
 968	for_each_possible_cpu(cpu) {
 969		if (cpu == 0)
 970			continue;
 971		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 972		for (i = 0; i < nentries; i++) {
 973			counters[i].pcnt += counter_base[i].pcnt;
 974			counters[i].bcnt += counter_base[i].bcnt;
 975		}
 976	}
 977}
 978
 979static int do_replace_finish(struct net *net, struct ebt_replace *repl,
 980			      struct ebt_table_info *newinfo)
 981{
 982	int ret, i;
 983	struct ebt_counter *counterstmp = NULL;
 984	/* used to be able to unlock earlier */
 985	struct ebt_table_info *table;
 986	struct ebt_table *t;
 987
 988	/* the user wants counters back
 989	   the check on the size is done later, when we have the lock */
 
 990	if (repl->num_counters) {
 991		unsigned long size = repl->num_counters * sizeof(*counterstmp);
 992		counterstmp = vmalloc(size);
 993		if (!counterstmp)
 994			return -ENOMEM;
 995	}
 996
 997	newinfo->chainstack = NULL;
 998	ret = ebt_verify_pointers(repl, newinfo);
 999	if (ret != 0)
1000		goto free_counterstmp;
1001
1002	ret = translate_table(net, repl->name, newinfo);
1003
1004	if (ret != 0)
1005		goto free_counterstmp;
1006
1007	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008	if (!t) {
1009		ret = -ENOENT;
1010		goto free_iterate;
1011	}
1012
1013	/* the table doesn't like it */
1014	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015		goto free_unlock;
1016
1017	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018		BUGPRINT("Wrong nr. of counters requested\n");
1019		ret = -EINVAL;
1020		goto free_unlock;
1021	}
1022
1023	/* we have the mutex lock, so no danger in reading this pointer */
1024	table = t->private;
1025	/* make sure the table can only be rmmod'ed if it contains no rules */
1026	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027		ret = -ENOENT;
1028		goto free_unlock;
1029	} else if (table->nentries && !newinfo->nentries)
1030		module_put(t->me);
1031	/* we need an atomic snapshot of the counters */
1032	write_lock_bh(&t->lock);
1033	if (repl->num_counters)
1034		get_counters(t->private->counters, counterstmp,
1035		   t->private->nentries);
1036
1037	t->private = newinfo;
1038	write_unlock_bh(&t->lock);
1039	mutex_unlock(&ebt_mutex);
1040	/* so, a user can change the chains while having messed up her counter
1041	   allocation. Only reason why this is done is because this way the lock
1042	   is held only once, while this doesn't bring the kernel into a
1043	   dangerous state. */
 
1044	if (repl->num_counters &&
1045	   copy_to_user(repl->counters, counterstmp,
1046	   repl->num_counters * sizeof(struct ebt_counter))) {
1047		/* Silent error, can't fail, new table is already in place */
1048		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1049	}
1050
1051	/* decrease module count and free resources */
1052	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1053			  ebt_cleanup_entry, net, NULL);
1054
1055	vfree(table->entries);
1056	if (table->chainstack) {
1057		for_each_possible_cpu(i)
1058			vfree(table->chainstack[i]);
1059		vfree(table->chainstack);
1060	}
1061	vfree(table);
1062
1063	vfree(counterstmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1064	return ret;
1065
1066free_unlock:
1067	mutex_unlock(&ebt_mutex);
1068free_iterate:
1069	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1070			  ebt_cleanup_entry, net, NULL);
1071free_counterstmp:
1072	vfree(counterstmp);
1073	/* can be initialized in translate_table() */
1074	if (newinfo->chainstack) {
1075		for_each_possible_cpu(i)
1076			vfree(newinfo->chainstack[i]);
1077		vfree(newinfo->chainstack);
1078	}
1079	return ret;
1080}
1081
1082/* replace the table */
1083static int do_replace(struct net *net, const void __user *user,
1084		      unsigned int len)
1085{
1086	int ret, countersize;
1087	struct ebt_table_info *newinfo;
1088	struct ebt_replace tmp;
1089
1090	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1091		return -EFAULT;
1092
1093	if (len != sizeof(tmp) + tmp.entries_size) {
1094		BUGPRINT("Wrong len argument\n");
1095		return -EINVAL;
1096	}
1097
1098	if (tmp.entries_size == 0) {
1099		BUGPRINT("Entries_size never zero\n");
1100		return -EINVAL;
1101	}
1102	/* overflow check */
1103	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1104			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1105		return -ENOMEM;
1106	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1107		return -ENOMEM;
1108
1109	tmp.name[sizeof(tmp.name) - 1] = 0;
1110
1111	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1112	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1113	if (!newinfo)
1114		return -ENOMEM;
1115
1116	if (countersize)
1117		memset(newinfo->counters, 0, countersize);
1118
1119	newinfo->entries = vmalloc(tmp.entries_size);
1120	if (!newinfo->entries) {
1121		ret = -ENOMEM;
1122		goto free_newinfo;
1123	}
1124	if (copy_from_user(
1125	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1126		BUGPRINT("Couldn't copy entries from userspace\n");
1127		ret = -EFAULT;
1128		goto free_entries;
1129	}
1130
1131	ret = do_replace_finish(net, &tmp, newinfo);
1132	if (ret == 0)
1133		return ret;
1134free_entries:
1135	vfree(newinfo->entries);
1136free_newinfo:
1137	vfree(newinfo);
1138	return ret;
1139}
1140
1141struct ebt_table *
1142ebt_register_table(struct net *net, const struct ebt_table *input_table)
1143{
1144	struct ebt_table_info *newinfo;
1145	struct ebt_table *t, *table;
1146	struct ebt_replace_kernel *repl;
1147	int ret, i, countersize;
1148	void *p;
1149
1150	if (input_table == NULL || (repl = input_table->table) == NULL ||
1151	    repl->entries == NULL || repl->entries_size == 0 ||
1152	    repl->counters != NULL || input_table->private != NULL) {
1153		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1154		return ERR_PTR(-EINVAL);
1155	}
1156
1157	/* Don't add one table to multiple lists. */
1158	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1159	if (!table) {
1160		ret = -ENOMEM;
1161		goto out;
1162	}
1163
1164	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1165	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1166	ret = -ENOMEM;
1167	if (!newinfo)
1168		goto free_table;
1169
1170	p = vmalloc(repl->entries_size);
1171	if (!p)
1172		goto free_newinfo;
1173
1174	memcpy(p, repl->entries, repl->entries_size);
1175	newinfo->entries = p;
1176
1177	newinfo->entries_size = repl->entries_size;
1178	newinfo->nentries = repl->nentries;
1179
1180	if (countersize)
1181		memset(newinfo->counters, 0, countersize);
1182
1183	/* fill in newinfo and parse the entries */
1184	newinfo->chainstack = NULL;
1185	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1186		if ((repl->valid_hooks & (1 << i)) == 0)
1187			newinfo->hook_entry[i] = NULL;
1188		else
1189			newinfo->hook_entry[i] = p +
1190				((char *)repl->hook_entry[i] - repl->entries);
1191	}
1192	ret = translate_table(net, repl->name, newinfo);
1193	if (ret != 0) {
1194		BUGPRINT("Translate_table failed\n");
1195		goto free_chainstack;
1196	}
1197
1198	if (table->check && table->check(newinfo, table->valid_hooks)) {
1199		BUGPRINT("The table doesn't like its own initial data, lol\n");
1200		ret = -EINVAL;
1201		goto free_chainstack;
1202	}
1203
1204	table->private = newinfo;
1205	rwlock_init(&table->lock);
1206	ret = mutex_lock_interruptible(&ebt_mutex);
1207	if (ret != 0)
1208		goto free_chainstack;
1209
1210	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1211		if (strcmp(t->name, table->name) == 0) {
1212			ret = -EEXIST;
1213			BUGPRINT("Table name already exists\n");
1214			goto free_unlock;
1215		}
1216	}
1217
1218	/* Hold a reference count if the chains aren't empty */
1219	if (newinfo->nentries && !try_module_get(table->me)) {
1220		ret = -ENOENT;
1221		goto free_unlock;
1222	}
1223	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1224	mutex_unlock(&ebt_mutex);
1225	return table;
1226free_unlock:
1227	mutex_unlock(&ebt_mutex);
1228free_chainstack:
1229	if (newinfo->chainstack) {
1230		for_each_possible_cpu(i)
1231			vfree(newinfo->chainstack[i]);
1232		vfree(newinfo->chainstack);
1233	}
1234	vfree(newinfo->entries);
1235free_newinfo:
1236	vfree(newinfo);
1237free_table:
1238	kfree(table);
1239out:
1240	return ERR_PTR(ret);
1241}
1242
1243void ebt_unregister_table(struct net *net, struct ebt_table *table)
1244{
1245	int i;
1246
1247	if (!table) {
1248		BUGPRINT("Request to unregister NULL table!!!\n");
1249		return;
1250	}
1251	mutex_lock(&ebt_mutex);
1252	list_del(&table->list);
1253	mutex_unlock(&ebt_mutex);
1254	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1255			  ebt_cleanup_entry, net, NULL);
1256	if (table->private->nentries)
1257		module_put(table->me);
1258	vfree(table->private->entries);
1259	if (table->private->chainstack) {
1260		for_each_possible_cpu(i)
1261			vfree(table->private->chainstack[i]);
1262		vfree(table->private->chainstack);
1263	}
1264	vfree(table->private);
1265	kfree(table);
1266}
1267
1268/* userspace just supplied us with counters */
1269static int do_update_counters(struct net *net, const char *name,
1270				struct ebt_counter __user *counters,
1271				unsigned int num_counters,
1272				const void __user *user, unsigned int len)
1273{
1274	int i, ret;
1275	struct ebt_counter *tmp;
1276	struct ebt_table *t;
1277
1278	if (num_counters == 0)
1279		return -EINVAL;
1280
1281	tmp = vmalloc(num_counters * sizeof(*tmp));
1282	if (!tmp)
1283		return -ENOMEM;
1284
1285	t = find_table_lock(net, name, &ret, &ebt_mutex);
1286	if (!t)
1287		goto free_tmp;
1288
1289	if (num_counters != t->private->nentries) {
1290		BUGPRINT("Wrong nr of counters\n");
1291		ret = -EINVAL;
1292		goto unlock_mutex;
1293	}
1294
1295	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1296		ret = -EFAULT;
1297		goto unlock_mutex;
1298	}
1299
1300	/* we want an atomic add of the counters */
1301	write_lock_bh(&t->lock);
1302
1303	/* we add to the counters of the first cpu */
1304	for (i = 0; i < num_counters; i++) {
1305		t->private->counters[i].pcnt += tmp[i].pcnt;
1306		t->private->counters[i].bcnt += tmp[i].bcnt;
1307	}
1308
1309	write_unlock_bh(&t->lock);
1310	ret = 0;
1311unlock_mutex:
1312	mutex_unlock(&ebt_mutex);
1313free_tmp:
1314	vfree(tmp);
1315	return ret;
1316}
1317
1318static int update_counters(struct net *net, const void __user *user,
1319			    unsigned int len)
1320{
1321	struct ebt_replace hlp;
1322
1323	if (copy_from_user(&hlp, user, sizeof(hlp)))
1324		return -EFAULT;
1325
1326	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1327		return -EINVAL;
1328
1329	return do_update_counters(net, hlp.name, hlp.counters,
1330				hlp.num_counters, user, len);
1331}
1332
1333static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1334    const char *base, char __user *ubase)
1335{
1336	char __user *hlp = ubase + ((char *)m - base);
1337	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1338
1339	/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1340	   long. Copy 29 bytes and fill remaining bytes with zeroes. */
 
1341	strlcpy(name, m->u.match->name, sizeof(name));
1342	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1343		return -EFAULT;
1344	return 0;
1345}
1346
1347static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1348    const char *base, char __user *ubase)
1349{
1350	char __user *hlp = ubase + ((char *)w - base);
1351	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1352
1353	strlcpy(name, w->u.watcher->name, sizeof(name));
1354	if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1355		return -EFAULT;
1356	return 0;
1357}
1358
1359static inline int
1360ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1361{
1362	int ret;
1363	char __user *hlp;
1364	const struct ebt_entry_target *t;
1365	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1366
1367	if (e->bitmask == 0)
1368		return 0;
1369
1370	hlp = ubase + (((char *)e + e->target_offset) - base);
1371	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1372
1373	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1374	if (ret != 0)
1375		return ret;
1376	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1377	if (ret != 0)
1378		return ret;
1379	strlcpy(name, t->u.target->name, sizeof(name));
1380	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1381		return -EFAULT;
1382	return 0;
1383}
1384
1385static int copy_counters_to_user(struct ebt_table *t,
1386				  const struct ebt_counter *oldcounters,
1387				  void __user *user, unsigned int num_counters,
1388				  unsigned int nentries)
1389{
1390	struct ebt_counter *counterstmp;
1391	int ret = 0;
1392
1393	/* userspace might not need the counters */
1394	if (num_counters == 0)
1395		return 0;
1396
1397	if (num_counters != nentries) {
1398		BUGPRINT("Num_counters wrong\n");
1399		return -EINVAL;
1400	}
1401
1402	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1403	if (!counterstmp)
1404		return -ENOMEM;
1405
1406	write_lock_bh(&t->lock);
1407	get_counters(oldcounters, counterstmp, nentries);
1408	write_unlock_bh(&t->lock);
1409
1410	if (copy_to_user(user, counterstmp,
1411	   nentries * sizeof(struct ebt_counter)))
1412		ret = -EFAULT;
1413	vfree(counterstmp);
1414	return ret;
1415}
1416
1417/* called with ebt_mutex locked */
1418static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1419    const int *len, int cmd)
1420{
1421	struct ebt_replace tmp;
1422	const struct ebt_counter *oldcounters;
1423	unsigned int entries_size, nentries;
1424	int ret;
1425	char *entries;
1426
1427	if (cmd == EBT_SO_GET_ENTRIES) {
1428		entries_size = t->private->entries_size;
1429		nentries = t->private->nentries;
1430		entries = t->private->entries;
1431		oldcounters = t->private->counters;
1432	} else {
1433		entries_size = t->table->entries_size;
1434		nentries = t->table->nentries;
1435		entries = t->table->entries;
1436		oldcounters = t->table->counters;
1437	}
1438
1439	if (copy_from_user(&tmp, user, sizeof(tmp)))
1440		return -EFAULT;
1441
1442	if (*len != sizeof(struct ebt_replace) + entries_size +
1443	   (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1444		return -EINVAL;
1445
1446	if (tmp.nentries != nentries) {
1447		BUGPRINT("Nentries wrong\n");
1448		return -EINVAL;
1449	}
1450
1451	if (tmp.entries_size != entries_size) {
1452		BUGPRINT("Wrong size\n");
1453		return -EINVAL;
1454	}
1455
1456	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1457					tmp.num_counters, nentries);
1458	if (ret)
1459		return ret;
1460
1461	if (copy_to_user(tmp.entries, entries, entries_size)) {
1462		BUGPRINT("Couldn't copy entries to userspace\n");
1463		return -EFAULT;
1464	}
1465	/* set the match/watcher/target names right */
1466	return EBT_ENTRY_ITERATE(entries, entries_size,
1467	   ebt_make_names, entries, tmp.entries);
1468}
1469
1470static int do_ebt_set_ctl(struct sock *sk,
1471	int cmd, void __user *user, unsigned int len)
1472{
1473	int ret;
1474	struct net *net = sock_net(sk);
1475
1476	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1477		return -EPERM;
1478
1479	switch (cmd) {
1480	case EBT_SO_SET_ENTRIES:
1481		ret = do_replace(net, user, len);
1482		break;
1483	case EBT_SO_SET_COUNTERS:
1484		ret = update_counters(net, user, len);
1485		break;
1486	default:
1487		ret = -EINVAL;
1488	}
1489	return ret;
1490}
1491
1492static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1493{
1494	int ret;
1495	struct ebt_replace tmp;
1496	struct ebt_table *t;
1497	struct net *net = sock_net(sk);
1498
1499	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1500		return -EPERM;
1501
1502	if (copy_from_user(&tmp, user, sizeof(tmp)))
1503		return -EFAULT;
1504
 
 
1505	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1506	if (!t)
1507		return ret;
1508
1509	switch (cmd) {
1510	case EBT_SO_GET_INFO:
1511	case EBT_SO_GET_INIT_INFO:
1512		if (*len != sizeof(struct ebt_replace)) {
1513			ret = -EINVAL;
1514			mutex_unlock(&ebt_mutex);
1515			break;
1516		}
1517		if (cmd == EBT_SO_GET_INFO) {
1518			tmp.nentries = t->private->nentries;
1519			tmp.entries_size = t->private->entries_size;
1520			tmp.valid_hooks = t->valid_hooks;
1521		} else {
1522			tmp.nentries = t->table->nentries;
1523			tmp.entries_size = t->table->entries_size;
1524			tmp.valid_hooks = t->table->valid_hooks;
1525		}
1526		mutex_unlock(&ebt_mutex);
1527		if (copy_to_user(user, &tmp, *len) != 0) {
1528			BUGPRINT("c2u Didn't work\n");
1529			ret = -EFAULT;
1530			break;
1531		}
1532		ret = 0;
1533		break;
1534
1535	case EBT_SO_GET_ENTRIES:
1536	case EBT_SO_GET_INIT_ENTRIES:
1537		ret = copy_everything_to_user(t, user, len, cmd);
1538		mutex_unlock(&ebt_mutex);
1539		break;
1540
1541	default:
1542		mutex_unlock(&ebt_mutex);
1543		ret = -EINVAL;
1544	}
1545
1546	return ret;
1547}
1548
1549#ifdef CONFIG_COMPAT
1550/* 32 bit-userspace compatibility definitions. */
1551struct compat_ebt_replace {
1552	char name[EBT_TABLE_MAXNAMELEN];
1553	compat_uint_t valid_hooks;
1554	compat_uint_t nentries;
1555	compat_uint_t entries_size;
1556	/* start of the chains */
1557	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1558	/* nr of counters userspace expects back */
1559	compat_uint_t num_counters;
1560	/* where the kernel will put the old counters. */
1561	compat_uptr_t counters;
1562	compat_uptr_t entries;
1563};
1564
1565/* struct ebt_entry_match, _target and _watcher have same layout */
1566struct compat_ebt_entry_mwt {
1567	union {
1568		char name[EBT_FUNCTION_MAXNAMELEN];
1569		compat_uptr_t ptr;
1570	} u;
1571	compat_uint_t match_size;
1572	compat_uint_t data[0];
1573};
1574
1575/* account for possible padding between match_size and ->data */
1576static int ebt_compat_entry_padsize(void)
1577{
1578	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1579			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1580	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1581			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1582}
1583
1584static int ebt_compat_match_offset(const struct xt_match *match,
1585				   unsigned int userlen)
1586{
1587	/*
1588	 * ebt_among needs special handling. The kernel .matchsize is
1589	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1590	 * value is expected.
1591	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1592	 */
1593	if (unlikely(match->matchsize == -1))
1594		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1595	return xt_compat_match_offset(match);
1596}
1597
1598static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1599				unsigned int *size)
1600{
1601	const struct xt_match *match = m->u.match;
1602	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1603	int off = ebt_compat_match_offset(match, m->match_size);
1604	compat_uint_t msize = m->match_size - off;
1605
1606	BUG_ON(off >= m->match_size);
1607
1608	if (copy_to_user(cm->u.name, match->name,
1609	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1610		return -EFAULT;
1611
1612	if (match->compat_to_user) {
1613		if (match->compat_to_user(cm->data, m->data))
1614			return -EFAULT;
1615	} else if (copy_to_user(cm->data, m->data, msize))
1616			return -EFAULT;
1617
1618	*size -= ebt_compat_entry_padsize() + off;
1619	*dstptr = cm->data;
1620	*dstptr += msize;
1621	return 0;
1622}
1623
1624static int compat_target_to_user(struct ebt_entry_target *t,
1625				 void __user **dstptr,
1626				 unsigned int *size)
1627{
1628	const struct xt_target *target = t->u.target;
1629	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1630	int off = xt_compat_target_offset(target);
1631	compat_uint_t tsize = t->target_size - off;
1632
1633	BUG_ON(off >= t->target_size);
1634
1635	if (copy_to_user(cm->u.name, target->name,
1636	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1637		return -EFAULT;
1638
1639	if (target->compat_to_user) {
1640		if (target->compat_to_user(cm->data, t->data))
1641			return -EFAULT;
1642	} else if (copy_to_user(cm->data, t->data, tsize))
1643		return -EFAULT;
1644
1645	*size -= ebt_compat_entry_padsize() + off;
1646	*dstptr = cm->data;
1647	*dstptr += tsize;
1648	return 0;
1649}
1650
1651static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1652				  void __user **dstptr,
1653				  unsigned int *size)
1654{
1655	return compat_target_to_user((struct ebt_entry_target *)w,
1656							dstptr, size);
1657}
1658
1659static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1660				unsigned int *size)
1661{
1662	struct ebt_entry_target *t;
1663	struct ebt_entry __user *ce;
1664	u32 watchers_offset, target_offset, next_offset;
1665	compat_uint_t origsize;
1666	int ret;
1667
1668	if (e->bitmask == 0) {
1669		if (*size < sizeof(struct ebt_entries))
1670			return -EINVAL;
1671		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1672			return -EFAULT;
1673
1674		*dstptr += sizeof(struct ebt_entries);
1675		*size -= sizeof(struct ebt_entries);
1676		return 0;
1677	}
1678
1679	if (*size < sizeof(*ce))
1680		return -EINVAL;
1681
1682	ce = (struct ebt_entry __user *)*dstptr;
1683	if (copy_to_user(ce, e, sizeof(*ce)))
1684		return -EFAULT;
1685
1686	origsize = *size;
1687	*dstptr += sizeof(*ce);
1688
1689	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1690	if (ret)
1691		return ret;
1692	watchers_offset = e->watchers_offset - (origsize - *size);
1693
1694	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1695	if (ret)
1696		return ret;
1697	target_offset = e->target_offset - (origsize - *size);
1698
1699	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1700
1701	ret = compat_target_to_user(t, dstptr, size);
1702	if (ret)
1703		return ret;
1704	next_offset = e->next_offset - (origsize - *size);
1705
1706	if (put_user(watchers_offset, &ce->watchers_offset) ||
1707	    put_user(target_offset, &ce->target_offset) ||
1708	    put_user(next_offset, &ce->next_offset))
1709		return -EFAULT;
1710
1711	*size -= sizeof(*ce);
1712	return 0;
1713}
1714
1715static int compat_calc_match(struct ebt_entry_match *m, int *off)
1716{
1717	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1718	*off += ebt_compat_entry_padsize();
1719	return 0;
1720}
1721
1722static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1723{
1724	*off += xt_compat_target_offset(w->u.watcher);
1725	*off += ebt_compat_entry_padsize();
1726	return 0;
1727}
1728
1729static int compat_calc_entry(const struct ebt_entry *e,
1730			     const struct ebt_table_info *info,
1731			     const void *base,
1732			     struct compat_ebt_replace *newinfo)
1733{
1734	const struct ebt_entry_target *t;
1735	unsigned int entry_offset;
1736	int off, ret, i;
1737
1738	if (e->bitmask == 0)
1739		return 0;
1740
1741	off = 0;
1742	entry_offset = (void *)e - base;
1743
1744	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1745	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1746
1747	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1748
1749	off += xt_compat_target_offset(t->u.target);
1750	off += ebt_compat_entry_padsize();
1751
1752	newinfo->entries_size -= off;
1753
1754	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1755	if (ret)
1756		return ret;
1757
1758	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1759		const void *hookptr = info->hook_entry[i];
1760		if (info->hook_entry[i] &&
1761		    (e < (struct ebt_entry *)(base - hookptr))) {
1762			newinfo->hook_entry[i] -= off;
1763			pr_debug("0x%08X -> 0x%08X\n",
1764					newinfo->hook_entry[i] + off,
1765					newinfo->hook_entry[i]);
1766		}
1767	}
1768
1769	return 0;
1770}
1771
1772
1773static int compat_table_info(const struct ebt_table_info *info,
1774			     struct compat_ebt_replace *newinfo)
1775{
1776	unsigned int size = info->entries_size;
1777	const void *entries = info->entries;
1778
1779	newinfo->entries_size = size;
1780
1781	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1782	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1783							entries, newinfo);
1784}
1785
1786static int compat_copy_everything_to_user(struct ebt_table *t,
1787					  void __user *user, int *len, int cmd)
1788{
1789	struct compat_ebt_replace repl, tmp;
1790	struct ebt_counter *oldcounters;
1791	struct ebt_table_info tinfo;
1792	int ret;
1793	void __user *pos;
1794
1795	memset(&tinfo, 0, sizeof(tinfo));
1796
1797	if (cmd == EBT_SO_GET_ENTRIES) {
1798		tinfo.entries_size = t->private->entries_size;
1799		tinfo.nentries = t->private->nentries;
1800		tinfo.entries = t->private->entries;
1801		oldcounters = t->private->counters;
1802	} else {
1803		tinfo.entries_size = t->table->entries_size;
1804		tinfo.nentries = t->table->nentries;
1805		tinfo.entries = t->table->entries;
1806		oldcounters = t->table->counters;
1807	}
1808
1809	if (copy_from_user(&tmp, user, sizeof(tmp)))
1810		return -EFAULT;
1811
1812	if (tmp.nentries != tinfo.nentries ||
1813	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1814		return -EINVAL;
1815
1816	memcpy(&repl, &tmp, sizeof(repl));
1817	if (cmd == EBT_SO_GET_ENTRIES)
1818		ret = compat_table_info(t->private, &repl);
1819	else
1820		ret = compat_table_info(&tinfo, &repl);
1821	if (ret)
1822		return ret;
1823
1824	if (*len != sizeof(tmp) + repl.entries_size +
1825	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1826		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1827				*len, tinfo.entries_size, repl.entries_size);
1828		return -EINVAL;
1829	}
1830
1831	/* userspace might not need the counters */
1832	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1833					tmp.num_counters, tinfo.nentries);
1834	if (ret)
1835		return ret;
1836
1837	pos = compat_ptr(tmp.entries);
1838	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1839			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1840}
1841
1842struct ebt_entries_buf_state {
1843	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1844	u32 buf_kern_len;	/* total size of kernel buffer */
1845	u32 buf_kern_offset;	/* amount of data copied so far */
1846	u32 buf_user_offset;	/* read position in userspace buffer */
1847};
1848
1849static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1850{
1851	state->buf_kern_offset += sz;
1852	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1853}
1854
1855static int ebt_buf_add(struct ebt_entries_buf_state *state,
1856		       void *data, unsigned int sz)
1857{
1858	if (state->buf_kern_start == NULL)
1859		goto count_only;
1860
1861	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1862
1863	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1864
1865 count_only:
1866	state->buf_user_offset += sz;
1867	return ebt_buf_count(state, sz);
1868}
1869
1870static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1871{
1872	char *b = state->buf_kern_start;
1873
1874	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1875
1876	if (b != NULL && sz > 0)
1877		memset(b + state->buf_kern_offset, 0, sz);
1878	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1879	return ebt_buf_count(state, sz);
1880}
1881
1882enum compat_mwt {
1883	EBT_COMPAT_MATCH,
1884	EBT_COMPAT_WATCHER,
1885	EBT_COMPAT_TARGET,
1886};
1887
1888static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1889				enum compat_mwt compat_mwt,
1890				struct ebt_entries_buf_state *state,
1891				const unsigned char *base)
1892{
1893	char name[EBT_FUNCTION_MAXNAMELEN];
1894	struct xt_match *match;
1895	struct xt_target *wt;
1896	void *dst = NULL;
1897	int off, pad = 0;
1898	unsigned int size_kern, match_size = mwt->match_size;
1899
1900	strlcpy(name, mwt->u.name, sizeof(name));
1901
1902	if (state->buf_kern_start)
1903		dst = state->buf_kern_start + state->buf_kern_offset;
1904
1905	switch (compat_mwt) {
1906	case EBT_COMPAT_MATCH:
1907		match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1908		if (IS_ERR(match))
1909			return PTR_ERR(match);
1910
1911		off = ebt_compat_match_offset(match, match_size);
1912		if (dst) {
1913			if (match->compat_from_user)
1914				match->compat_from_user(dst, mwt->data);
1915			else
1916				memcpy(dst, mwt->data, match_size);
1917		}
1918
1919		size_kern = match->matchsize;
1920		if (unlikely(size_kern == -1))
1921			size_kern = match_size;
1922		module_put(match->me);
1923		break;
1924	case EBT_COMPAT_WATCHER: /* fallthrough */
1925	case EBT_COMPAT_TARGET:
1926		wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1927		if (IS_ERR(wt))
1928			return PTR_ERR(wt);
1929		off = xt_compat_target_offset(wt);
1930
1931		if (dst) {
1932			if (wt->compat_from_user)
1933				wt->compat_from_user(dst, mwt->data);
1934			else
1935				memcpy(dst, mwt->data, match_size);
1936		}
1937
1938		size_kern = wt->targetsize;
1939		module_put(wt->me);
1940		break;
1941
1942	default:
1943		return -EINVAL;
1944	}
1945
1946	state->buf_kern_offset += match_size + off;
1947	state->buf_user_offset += match_size;
1948	pad = XT_ALIGN(size_kern) - size_kern;
1949
1950	if (pad > 0 && dst) {
1951		BUG_ON(state->buf_kern_len <= pad);
1952		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1953		memset(dst + size_kern, 0, pad);
1954	}
1955	return off + match_size;
1956}
1957
1958/*
1959 * return size of all matches, watchers or target, including necessary
1960 * alignment and padding.
1961 */
1962static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1963			unsigned int size_left, enum compat_mwt type,
1964			struct ebt_entries_buf_state *state, const void *base)
1965{
1966	int growth = 0;
1967	char *buf;
1968
1969	if (size_left == 0)
1970		return 0;
1971
1972	buf = (char *) match32;
1973
1974	while (size_left >= sizeof(*match32)) {
1975		struct ebt_entry_match *match_kern;
1976		int ret;
1977
1978		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1979		if (match_kern) {
1980			char *tmp;
1981			tmp = state->buf_kern_start + state->buf_kern_offset;
1982			match_kern = (struct ebt_entry_match *) tmp;
1983		}
1984		ret = ebt_buf_add(state, buf, sizeof(*match32));
1985		if (ret < 0)
1986			return ret;
1987		size_left -= sizeof(*match32);
1988
1989		/* add padding before match->data (if any) */
1990		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1991		if (ret < 0)
1992			return ret;
1993
1994		if (match32->match_size > size_left)
1995			return -EINVAL;
1996
1997		size_left -= match32->match_size;
1998
1999		ret = compat_mtw_from_user(match32, type, state, base);
2000		if (ret < 0)
2001			return ret;
2002
2003		BUG_ON(ret < match32->match_size);
2004		growth += ret - match32->match_size;
2005		growth += ebt_compat_entry_padsize();
2006
2007		buf += sizeof(*match32);
2008		buf += match32->match_size;
2009
2010		if (match_kern)
2011			match_kern->match_size = ret;
2012
2013		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2014		match32 = (struct compat_ebt_entry_mwt *) buf;
2015	}
2016
2017	return growth;
2018}
2019
2020/* called for all ebt_entry structures. */
2021static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2022			  unsigned int *total,
2023			  struct ebt_entries_buf_state *state)
2024{
2025	unsigned int i, j, startoff, new_offset = 0;
2026	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2027	unsigned int offsets[4];
2028	unsigned int *offsets_update = NULL;
2029	int ret;
2030	char *buf_start;
2031
2032	if (*total < sizeof(struct ebt_entries))
2033		return -EINVAL;
2034
2035	if (!entry->bitmask) {
2036		*total -= sizeof(struct ebt_entries);
2037		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2038	}
2039	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2040		return -EINVAL;
2041
2042	startoff = state->buf_user_offset;
2043	/* pull in most part of ebt_entry, it does not need to be changed. */
2044	ret = ebt_buf_add(state, entry,
2045			offsetof(struct ebt_entry, watchers_offset));
2046	if (ret < 0)
2047		return ret;
2048
2049	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2050	memcpy(&offsets[1], &entry->watchers_offset,
2051			sizeof(offsets) - sizeof(offsets[0]));
2052
2053	if (state->buf_kern_start) {
2054		buf_start = state->buf_kern_start + state->buf_kern_offset;
2055		offsets_update = (unsigned int *) buf_start;
2056	}
2057	ret = ebt_buf_add(state, &offsets[1],
2058			sizeof(offsets) - sizeof(offsets[0]));
2059	if (ret < 0)
2060		return ret;
2061	buf_start = (char *) entry;
2062	/*
2063	 * 0: matches offset, always follows ebt_entry.
2064	 * 1: watchers offset, from ebt_entry structure
2065	 * 2: target offset, from ebt_entry structure
2066	 * 3: next ebt_entry offset, from ebt_entry structure
2067	 *
2068	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2069	 */
2070	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2071		struct compat_ebt_entry_mwt *match32;
2072		unsigned int size;
2073		char *buf = buf_start;
2074
2075		buf = buf_start + offsets[i];
2076		if (offsets[i] > offsets[j])
2077			return -EINVAL;
2078
2079		match32 = (struct compat_ebt_entry_mwt *) buf;
2080		size = offsets[j] - offsets[i];
2081		ret = ebt_size_mwt(match32, size, i, state, base);
2082		if (ret < 0)
2083			return ret;
2084		new_offset += ret;
2085		if (offsets_update && new_offset) {
2086			pr_debug("change offset %d to %d\n",
2087				offsets_update[i], offsets[j] + new_offset);
2088			offsets_update[i] = offsets[j] + new_offset;
2089		}
2090	}
2091
2092	if (state->buf_kern_start == NULL) {
2093		unsigned int offset = buf_start - (char *) base;
2094
2095		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2096		if (ret < 0)
2097			return ret;
2098	}
2099
2100	startoff = state->buf_user_offset - startoff;
2101
2102	BUG_ON(*total < startoff);
2103	*total -= startoff;
2104	return 0;
2105}
2106
2107/*
2108 * repl->entries_size is the size of the ebt_entry blob in userspace.
2109 * It might need more memory when copied to a 64 bit kernel in case
2110 * userspace is 32-bit. So, first task: find out how much memory is needed.
2111 *
2112 * Called before validation is performed.
2113 */
2114static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2115				struct ebt_entries_buf_state *state)
2116{
2117	unsigned int size_remaining = size_user;
2118	int ret;
2119
2120	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2121					&size_remaining, state);
2122	if (ret < 0)
2123		return ret;
2124
2125	WARN_ON(size_remaining);
2126	return state->buf_kern_offset;
2127}
2128
2129
2130static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2131					    void __user *user, unsigned int len)
2132{
2133	struct compat_ebt_replace tmp;
2134	int i;
2135
2136	if (len < sizeof(tmp))
2137		return -EINVAL;
2138
2139	if (copy_from_user(&tmp, user, sizeof(tmp)))
2140		return -EFAULT;
2141
2142	if (len != sizeof(tmp) + tmp.entries_size)
2143		return -EINVAL;
2144
2145	if (tmp.entries_size == 0)
2146		return -EINVAL;
2147
2148	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2149			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2150		return -ENOMEM;
2151	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2152		return -ENOMEM;
2153
2154	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2155
2156	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2157	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2158		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2159
2160	repl->num_counters = tmp.num_counters;
2161	repl->counters = compat_ptr(tmp.counters);
2162	repl->entries = compat_ptr(tmp.entries);
2163	return 0;
2164}
2165
2166static int compat_do_replace(struct net *net, void __user *user,
2167			     unsigned int len)
2168{
2169	int ret, i, countersize, size64;
2170	struct ebt_table_info *newinfo;
2171	struct ebt_replace tmp;
2172	struct ebt_entries_buf_state state;
2173	void *entries_tmp;
2174
2175	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2176	if (ret) {
2177		/* try real handler in case userland supplied needed padding */
2178		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2179			ret = 0;
2180		return ret;
2181	}
2182
2183	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2184	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2185	if (!newinfo)
2186		return -ENOMEM;
2187
2188	if (countersize)
2189		memset(newinfo->counters, 0, countersize);
2190
2191	memset(&state, 0, sizeof(state));
2192
2193	newinfo->entries = vmalloc(tmp.entries_size);
2194	if (!newinfo->entries) {
2195		ret = -ENOMEM;
2196		goto free_newinfo;
2197	}
2198	if (copy_from_user(
2199	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2200		ret = -EFAULT;
2201		goto free_entries;
2202	}
2203
2204	entries_tmp = newinfo->entries;
2205
2206	xt_compat_lock(NFPROTO_BRIDGE);
2207
2208	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2209	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2210	if (ret < 0)
2211		goto out_unlock;
2212
2213	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2214		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2215		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2216
2217	size64 = ret;
2218	newinfo->entries = vmalloc(size64);
2219	if (!newinfo->entries) {
2220		vfree(entries_tmp);
2221		ret = -ENOMEM;
2222		goto out_unlock;
2223	}
2224
2225	memset(&state, 0, sizeof(state));
2226	state.buf_kern_start = newinfo->entries;
2227	state.buf_kern_len = size64;
2228
2229	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2230	BUG_ON(ret < 0);	/* parses same data again */
2231
2232	vfree(entries_tmp);
2233	tmp.entries_size = size64;
2234
2235	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2236		char __user *usrptr;
2237		if (tmp.hook_entry[i]) {
2238			unsigned int delta;
2239			usrptr = (char __user *) tmp.hook_entry[i];
2240			delta = usrptr - tmp.entries;
2241			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2242			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2243		}
2244	}
2245
2246	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2247	xt_compat_unlock(NFPROTO_BRIDGE);
2248
2249	ret = do_replace_finish(net, &tmp, newinfo);
2250	if (ret == 0)
2251		return ret;
2252free_entries:
2253	vfree(newinfo->entries);
2254free_newinfo:
2255	vfree(newinfo);
2256	return ret;
2257out_unlock:
2258	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2259	xt_compat_unlock(NFPROTO_BRIDGE);
2260	goto free_entries;
2261}
2262
2263static int compat_update_counters(struct net *net, void __user *user,
2264				  unsigned int len)
2265{
2266	struct compat_ebt_replace hlp;
2267
2268	if (copy_from_user(&hlp, user, sizeof(hlp)))
2269		return -EFAULT;
2270
2271	/* try real handler in case userland supplied needed padding */
2272	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2273		return update_counters(net, user, len);
2274
2275	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2276					hlp.num_counters, user, len);
2277}
2278
2279static int compat_do_ebt_set_ctl(struct sock *sk,
2280		int cmd, void __user *user, unsigned int len)
2281{
2282	int ret;
2283	struct net *net = sock_net(sk);
2284
2285	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2286		return -EPERM;
2287
2288	switch (cmd) {
2289	case EBT_SO_SET_ENTRIES:
2290		ret = compat_do_replace(net, user, len);
2291		break;
2292	case EBT_SO_SET_COUNTERS:
2293		ret = compat_update_counters(net, user, len);
2294		break;
2295	default:
2296		ret = -EINVAL;
2297  }
2298	return ret;
2299}
2300
2301static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2302		void __user *user, int *len)
2303{
2304	int ret;
2305	struct compat_ebt_replace tmp;
2306	struct ebt_table *t;
2307	struct net *net = sock_net(sk);
2308
2309	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2310		return -EPERM;
2311
2312	/* try real handler in case userland supplied needed padding */
2313	if ((cmd == EBT_SO_GET_INFO ||
2314	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2315			return do_ebt_get_ctl(sk, cmd, user, len);
2316
2317	if (copy_from_user(&tmp, user, sizeof(tmp)))
2318		return -EFAULT;
2319
 
 
2320	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2321	if (!t)
2322		return ret;
2323
2324	xt_compat_lock(NFPROTO_BRIDGE);
2325	switch (cmd) {
2326	case EBT_SO_GET_INFO:
2327		tmp.nentries = t->private->nentries;
2328		ret = compat_table_info(t->private, &tmp);
2329		if (ret)
2330			goto out;
2331		tmp.valid_hooks = t->valid_hooks;
2332
2333		if (copy_to_user(user, &tmp, *len) != 0) {
2334			ret = -EFAULT;
2335			break;
2336		}
2337		ret = 0;
2338		break;
2339	case EBT_SO_GET_INIT_INFO:
2340		tmp.nentries = t->table->nentries;
2341		tmp.entries_size = t->table->entries_size;
2342		tmp.valid_hooks = t->table->valid_hooks;
2343
2344		if (copy_to_user(user, &tmp, *len) != 0) {
2345			ret = -EFAULT;
2346			break;
2347		}
2348		ret = 0;
2349		break;
2350	case EBT_SO_GET_ENTRIES:
2351	case EBT_SO_GET_INIT_ENTRIES:
2352		/*
2353		 * try real handler first in case of userland-side padding.
2354		 * in case we are dealing with an 'ordinary' 32 bit binary
2355		 * without 64bit compatibility padding, this will fail right
2356		 * after copy_from_user when the *len argument is validated.
2357		 *
2358		 * the compat_ variant needs to do one pass over the kernel
2359		 * data set to adjust for size differences before it the check.
2360		 */
2361		if (copy_everything_to_user(t, user, len, cmd) == 0)
2362			ret = 0;
2363		else
2364			ret = compat_copy_everything_to_user(t, user, len, cmd);
2365		break;
2366	default:
2367		ret = -EINVAL;
2368	}
2369 out:
2370	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2371	xt_compat_unlock(NFPROTO_BRIDGE);
2372	mutex_unlock(&ebt_mutex);
2373	return ret;
2374}
2375#endif
2376
2377static struct nf_sockopt_ops ebt_sockopts = {
2378	.pf		= PF_INET,
2379	.set_optmin	= EBT_BASE_CTL,
2380	.set_optmax	= EBT_SO_SET_MAX + 1,
2381	.set		= do_ebt_set_ctl,
2382#ifdef CONFIG_COMPAT
2383	.compat_set	= compat_do_ebt_set_ctl,
2384#endif
2385	.get_optmin	= EBT_BASE_CTL,
2386	.get_optmax	= EBT_SO_GET_MAX + 1,
2387	.get		= do_ebt_get_ctl,
2388#ifdef CONFIG_COMPAT
2389	.compat_get	= compat_do_ebt_get_ctl,
2390#endif
2391	.owner		= THIS_MODULE,
2392};
2393
2394static int __init ebtables_init(void)
2395{
2396	int ret;
2397
2398	ret = xt_register_target(&ebt_standard_target);
2399	if (ret < 0)
2400		return ret;
2401	ret = nf_register_sockopt(&ebt_sockopts);
2402	if (ret < 0) {
2403		xt_unregister_target(&ebt_standard_target);
2404		return ret;
2405	}
2406
2407	printk(KERN_INFO "Ebtables v2.0 registered\n");
2408	return 0;
2409}
2410
2411static void __exit ebtables_fini(void)
2412{
2413	nf_unregister_sockopt(&ebt_sockopts);
2414	xt_unregister_target(&ebt_standard_target);
2415	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2416}
2417
2418EXPORT_SYMBOL(ebt_register_table);
2419EXPORT_SYMBOL(ebt_unregister_table);
2420EXPORT_SYMBOL(ebt_do_table);
2421module_init(ebtables_init);
2422module_exit(ebtables_fini);
2423MODULE_LICENSE("GPL");