Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  ebtables
   3 *
   4 *  Author:
   5 *  Bart De Schuymer		<bdschuym@pandora.be>
   6 *
   7 *  ebtables.c,v 2.0, July, 2002
   8 *
   9 *  This code is stongly inspired on the iptables code which is
  10 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18#include <linux/kmod.h>
  19#include <linux/module.h>
  20#include <linux/vmalloc.h>
  21#include <linux/netfilter/x_tables.h>
  22#include <linux/netfilter_bridge/ebtables.h>
  23#include <linux/spinlock.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <asm/uaccess.h>
  27#include <linux/smp.h>
  28#include <linux/cpumask.h>
 
  29#include <net/sock.h>
  30/* needed for logical [in,out]-dev filtering */
  31#include "../br_private.h"
  32
  33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
  34					 "report to author: "format, ## args)
  35/* #define BUGPRINT(format, args...) */
  36
  37/*
  38 * Each cpu has its own set of counters, so there is no need for write_lock in
  39 * the softirq
  40 * For reading or updating the counters, the user context needs to
  41 * get a write_lock
  42 */
  43
  44/* The size of each set of counters is altered to get cache alignment */
  45#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  46#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
  47#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
  48   COUNTER_OFFSET(n) * cpu))
  49
  50
  51
  52static DEFINE_MUTEX(ebt_mutex);
  53
  54#ifdef CONFIG_COMPAT
  55static void ebt_standard_compat_from_user(void *dst, const void *src)
  56{
  57	int v = *(compat_int_t *)src;
  58
  59	if (v >= 0)
  60		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
  61	memcpy(dst, &v, sizeof(v));
  62}
  63
  64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
  65{
  66	compat_int_t cv = *(int *)src;
  67
  68	if (cv >= 0)
  69		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
  70	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  71}
  72#endif
  73
  74
  75static struct xt_target ebt_standard_target = {
  76	.name       = "standard",
  77	.revision   = 0,
  78	.family     = NFPROTO_BRIDGE,
  79	.targetsize = sizeof(int),
  80#ifdef CONFIG_COMPAT
  81	.compatsize = sizeof(compat_int_t),
  82	.compat_from_user = ebt_standard_compat_from_user,
  83	.compat_to_user =  ebt_standard_compat_to_user,
  84#endif
  85};
  86
  87static inline int
  88ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
  89	       struct xt_action_param *par)
  90{
  91	par->target   = w->u.watcher;
  92	par->targinfo = w->data;
  93	w->u.watcher->target(skb, par);
  94	/* watchers don't give a verdict */
  95	return 0;
  96}
  97
  98static inline int
  99ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
 100	     struct xt_action_param *par)
 101{
 102	par->match     = m->u.match;
 103	par->matchinfo = m->data;
 104	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
 105}
 106
 107static inline int
 108ebt_dev_check(const char *entry, const struct net_device *device)
 109{
 110	int i = 0;
 111	const char *devname;
 112
 113	if (*entry == '\0')
 114		return 0;
 115	if (!device)
 116		return 1;
 117	devname = device->name;
 118	/* 1 is the wildcard token */
 119	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 120		i++;
 121	return (devname[i] != entry[i] && entry[i] != 1);
 122}
 123
 124#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
 125/* process standard matches */
 126static inline int
 127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
 128                const struct net_device *in, const struct net_device *out)
 129{
 130	const struct ethhdr *h = eth_hdr(skb);
 131	const struct net_bridge_port *p;
 132	__be16 ethproto;
 133	int verdict, i;
 134
 135	if (vlan_tx_tag_present(skb))
 136		ethproto = htons(ETH_P_8021Q);
 137	else
 138		ethproto = h->h_proto;
 139
 140	if (e->bitmask & EBT_802_3) {
 141		if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO))
 142			return 1;
 143	} else if (!(e->bitmask & EBT_NOPROTO) &&
 144	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
 145		return 1;
 146
 147	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
 148		return 1;
 149	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
 150		return 1;
 151	/* rcu_read_lock()ed by nf_hook_slow */
 152	if (in && (p = br_port_get_rcu(in)) != NULL &&
 153	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
 
 154		return 1;
 155	if (out && (p = br_port_get_rcu(out)) != NULL &&
 156	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
 
 157		return 1;
 158
 159	if (e->bitmask & EBT_SOURCEMAC) {
 160		verdict = 0;
 161		for (i = 0; i < 6; i++)
 162			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
 163			   e->sourcemsk[i];
 164		if (FWINV2(verdict != 0, EBT_ISOURCE) )
 165			return 1;
 166	}
 167	if (e->bitmask & EBT_DESTMAC) {
 168		verdict = 0;
 169		for (i = 0; i < 6; i++)
 170			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
 171			   e->destmsk[i];
 172		if (FWINV2(verdict != 0, EBT_IDEST) )
 173			return 1;
 174	}
 175	return 0;
 176}
 177
 178static inline __pure
 179struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 180{
 181	return (void *)entry + entry->next_offset;
 182}
 183
 184/* Do some firewalling */
 185unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
 186   const struct net_device *in, const struct net_device *out,
 187   struct ebt_table *table)
 188{
 
 189	int i, nentries;
 190	struct ebt_entry *point;
 191	struct ebt_counter *counter_base, *cb_base;
 192	const struct ebt_entry_target *t;
 193	int verdict, sp = 0;
 194	struct ebt_chainstack *cs;
 195	struct ebt_entries *chaininfo;
 196	const char *base;
 197	const struct ebt_table_info *private;
 198	struct xt_action_param acpar;
 199
 200	acpar.family  = NFPROTO_BRIDGE;
 201	acpar.in      = in;
 202	acpar.out     = out;
 203	acpar.hotdrop = false;
 204	acpar.hooknum = hook;
 205
 206	read_lock_bh(&table->lock);
 207	private = table->private;
 208	cb_base = COUNTER_BASE(private->counters, private->nentries,
 209	   smp_processor_id());
 210	if (private->chainstack)
 211		cs = private->chainstack[smp_processor_id()];
 212	else
 213		cs = NULL;
 214	chaininfo = private->hook_entry[hook];
 215	nentries = private->hook_entry[hook]->nentries;
 216	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
 217	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
 218	/* base for chain jumps */
 219	base = private->entries;
 220	i = 0;
 221	while (i < nentries) {
 222		if (ebt_basic_match(point, skb, in, out))
 223			goto letscontinue;
 224
 225		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
 226			goto letscontinue;
 227		if (acpar.hotdrop) {
 228			read_unlock_bh(&table->lock);
 229			return NF_DROP;
 230		}
 231
 232		/* increase counter */
 233		(*(counter_base + i)).pcnt++;
 234		(*(counter_base + i)).bcnt += skb->len;
 235
 236		/* these should only watch: not modify, nor tell us
 237		   what to do with the packet */
 
 238		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
 239
 240		t = (struct ebt_entry_target *)
 241		   (((char *)point) + point->target_offset);
 242		/* standard target */
 243		if (!t->u.target->target)
 244			verdict = ((struct ebt_standard_target *)t)->verdict;
 245		else {
 246			acpar.target   = t->u.target;
 247			acpar.targinfo = t->data;
 248			verdict = t->u.target->target(skb, &acpar);
 249		}
 250		if (verdict == EBT_ACCEPT) {
 251			read_unlock_bh(&table->lock);
 252			return NF_ACCEPT;
 253		}
 254		if (verdict == EBT_DROP) {
 255			read_unlock_bh(&table->lock);
 256			return NF_DROP;
 257		}
 258		if (verdict == EBT_RETURN) {
 259letsreturn:
 260#ifdef CONFIG_NETFILTER_DEBUG
 261			if (sp == 0) {
 262				BUGPRINT("RETURN on base chain");
 263				/* act like this is EBT_CONTINUE */
 264				goto letscontinue;
 265			}
 266#endif
 267			sp--;
 268			/* put all the local variables right */
 269			i = cs[sp].n;
 270			chaininfo = cs[sp].chaininfo;
 271			nentries = chaininfo->nentries;
 272			point = cs[sp].e;
 273			counter_base = cb_base +
 274			   chaininfo->counter_offset;
 275			continue;
 276		}
 277		if (verdict == EBT_CONTINUE)
 278			goto letscontinue;
 279#ifdef CONFIG_NETFILTER_DEBUG
 280		if (verdict < 0) {
 281			BUGPRINT("bogus standard verdict\n");
 282			read_unlock_bh(&table->lock);
 283			return NF_DROP;
 284		}
 285#endif
 286		/* jump to a udc */
 287		cs[sp].n = i + 1;
 288		cs[sp].chaininfo = chaininfo;
 289		cs[sp].e = ebt_next_entry(point);
 290		i = 0;
 291		chaininfo = (struct ebt_entries *) (base + verdict);
 292#ifdef CONFIG_NETFILTER_DEBUG
 293		if (chaininfo->distinguisher) {
 294			BUGPRINT("jump to non-chain\n");
 295			read_unlock_bh(&table->lock);
 296			return NF_DROP;
 297		}
 298#endif
 299		nentries = chaininfo->nentries;
 300		point = (struct ebt_entry *)chaininfo->data;
 301		counter_base = cb_base + chaininfo->counter_offset;
 302		sp++;
 303		continue;
 304letscontinue:
 305		point = ebt_next_entry(point);
 306		i++;
 307	}
 308
 309	/* I actually like this :) */
 310	if (chaininfo->policy == EBT_RETURN)
 311		goto letsreturn;
 312	if (chaininfo->policy == EBT_ACCEPT) {
 313		read_unlock_bh(&table->lock);
 314		return NF_ACCEPT;
 315	}
 316	read_unlock_bh(&table->lock);
 317	return NF_DROP;
 318}
 319
 320/* If it succeeds, returns element and locks mutex */
 321static inline void *
 322find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
 323   struct mutex *mutex)
 324{
 325	struct {
 326		struct list_head list;
 327		char name[EBT_FUNCTION_MAXNAMELEN];
 328	} *e;
 329
 330	*error = mutex_lock_interruptible(mutex);
 331	if (*error != 0)
 332		return NULL;
 333
 334	list_for_each_entry(e, head, list) {
 335		if (strcmp(e->name, name) == 0)
 336			return e;
 337	}
 338	*error = -ENOENT;
 339	mutex_unlock(mutex);
 340	return NULL;
 341}
 342
 343static void *
 344find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
 345   int *error, struct mutex *mutex)
 346{
 347	return try_then_request_module(
 348			find_inlist_lock_noload(head, name, error, mutex),
 349			"%s%s", prefix, name);
 350}
 351
 352static inline struct ebt_table *
 353find_table_lock(struct net *net, const char *name, int *error,
 354		struct mutex *mutex)
 355{
 356	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
 357				"ebtable_", error, mutex);
 358}
 359
 360static inline int
 361ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 362		unsigned int *cnt)
 363{
 364	const struct ebt_entry *e = par->entryinfo;
 365	struct xt_match *match;
 366	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 367	int ret;
 368
 369	if (left < sizeof(struct ebt_entry_match) ||
 370	    left - sizeof(struct ebt_entry_match) < m->match_size)
 371		return -EINVAL;
 372
 373	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
 
 
 
 
 
 
 374	if (IS_ERR(match))
 375		return PTR_ERR(match);
 376	m->u.match = match;
 377
 378	par->match     = match;
 379	par->matchinfo = m->data;
 380	ret = xt_check_match(par, m->match_size,
 381	      e->ethproto, e->invflags & EBT_IPROTO);
 382	if (ret < 0) {
 383		module_put(match->me);
 384		return ret;
 385	}
 386
 387	(*cnt)++;
 388	return 0;
 389}
 390
 391static inline int
 392ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
 393		  unsigned int *cnt)
 394{
 395	const struct ebt_entry *e = par->entryinfo;
 396	struct xt_target *watcher;
 397	size_t left = ((char *)e + e->target_offset) - (char *)w;
 398	int ret;
 399
 400	if (left < sizeof(struct ebt_entry_watcher) ||
 401	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 402		return -EINVAL;
 403
 404	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 405	if (IS_ERR(watcher))
 406		return PTR_ERR(watcher);
 407	w->u.watcher = watcher;
 408
 409	par->target   = watcher;
 410	par->targinfo = w->data;
 411	ret = xt_check_target(par, w->watcher_size,
 412	      e->ethproto, e->invflags & EBT_IPROTO);
 413	if (ret < 0) {
 414		module_put(watcher->me);
 415		return ret;
 416	}
 417
 418	(*cnt)++;
 419	return 0;
 420}
 421
 422static int ebt_verify_pointers(const struct ebt_replace *repl,
 423			       struct ebt_table_info *newinfo)
 424{
 425	unsigned int limit = repl->entries_size;
 426	unsigned int valid_hooks = repl->valid_hooks;
 427	unsigned int offset = 0;
 428	int i;
 429
 430	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 431		newinfo->hook_entry[i] = NULL;
 432
 433	newinfo->entries_size = repl->entries_size;
 434	newinfo->nentries = repl->nentries;
 435
 436	while (offset < limit) {
 437		size_t left = limit - offset;
 438		struct ebt_entry *e = (void *)newinfo->entries + offset;
 439
 440		if (left < sizeof(unsigned int))
 441			break;
 442
 443		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 444			if ((valid_hooks & (1 << i)) == 0)
 445				continue;
 446			if ((char __user *)repl->hook_entry[i] ==
 447			     repl->entries + offset)
 448				break;
 449		}
 450
 451		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
 452			if (e->bitmask != 0) {
 453				/* we make userspace set this right,
 454				   so there is no misunderstanding */
 
 455				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
 456					 "in distinguisher\n");
 457				return -EINVAL;
 458			}
 459			if (i != NF_BR_NUMHOOKS)
 460				newinfo->hook_entry[i] = (struct ebt_entries *)e;
 461			if (left < sizeof(struct ebt_entries))
 462				break;
 463			offset += sizeof(struct ebt_entries);
 464		} else {
 465			if (left < sizeof(struct ebt_entry))
 466				break;
 467			if (left < e->next_offset)
 468				break;
 469			if (e->next_offset < sizeof(struct ebt_entry))
 470				return -EINVAL;
 471			offset += e->next_offset;
 472		}
 473	}
 474	if (offset != limit) {
 475		BUGPRINT("entries_size too small\n");
 476		return -EINVAL;
 477	}
 478
 479	/* check if all valid hooks have a chain */
 480	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 481		if (!newinfo->hook_entry[i] &&
 482		   (valid_hooks & (1 << i))) {
 483			BUGPRINT("Valid hook without chain\n");
 484			return -EINVAL;
 485		}
 486	}
 487	return 0;
 488}
 489
 490/*
 491 * this one is very careful, as it is the first function
 492 * to parse the userspace data
 493 */
 494static inline int
 495ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
 496   const struct ebt_table_info *newinfo,
 497   unsigned int *n, unsigned int *cnt,
 498   unsigned int *totalcnt, unsigned int *udc_cnt)
 499{
 500	int i;
 501
 502	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 503		if ((void *)e == (void *)newinfo->hook_entry[i])
 504			break;
 505	}
 506	/* beginning of a new chain
 507	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
 
 508	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 509		/* this checks if the previous chain has as many entries
 510		   as it said it has */
 
 511		if (*n != *cnt) {
 512			BUGPRINT("nentries does not equal the nr of entries "
 513				 "in the chain\n");
 514			return -EINVAL;
 515		}
 516		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 517		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 518			/* only RETURN from udc */
 519			if (i != NF_BR_NUMHOOKS ||
 520			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
 521				BUGPRINT("bad policy\n");
 522				return -EINVAL;
 523			}
 524		}
 525		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 526			(*udc_cnt)++;
 527		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
 528			BUGPRINT("counter_offset != totalcnt");
 529			return -EINVAL;
 530		}
 531		*n = ((struct ebt_entries *)e)->nentries;
 532		*cnt = 0;
 533		return 0;
 534	}
 535	/* a plain old entry, heh */
 536	if (sizeof(struct ebt_entry) > e->watchers_offset ||
 537	   e->watchers_offset > e->target_offset ||
 538	   e->target_offset >= e->next_offset) {
 539		BUGPRINT("entry offsets not in right order\n");
 540		return -EINVAL;
 541	}
 542	/* this is not checked anywhere else */
 543	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
 544		BUGPRINT("target size too small\n");
 545		return -EINVAL;
 546	}
 547	(*cnt)++;
 548	(*totalcnt)++;
 549	return 0;
 550}
 551
 552struct ebt_cl_stack
 553{
 554	struct ebt_chainstack cs;
 555	int from;
 556	unsigned int hookmask;
 557};
 558
 559/*
 560 * we need these positions to check that the jumps to a different part of the
 561 * entries is a jump to the beginning of a new chain.
 562 */
 563static inline int
 564ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
 565   unsigned int *n, struct ebt_cl_stack *udc)
 566{
 567	int i;
 568
 569	/* we're only interested in chain starts */
 570	if (e->bitmask)
 571		return 0;
 572	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 573		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 574			break;
 575	}
 576	/* only care about udc */
 577	if (i != NF_BR_NUMHOOKS)
 578		return 0;
 579
 580	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
 581	/* these initialisations are depended on later in check_chainloops() */
 582	udc[*n].cs.n = 0;
 583	udc[*n].hookmask = 0;
 584
 585	(*n)++;
 586	return 0;
 587}
 588
 589static inline int
 590ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
 591{
 592	struct xt_mtdtor_param par;
 593
 594	if (i && (*i)-- == 0)
 595		return 1;
 596
 597	par.net       = net;
 598	par.match     = m->u.match;
 599	par.matchinfo = m->data;
 600	par.family    = NFPROTO_BRIDGE;
 601	if (par.match->destroy != NULL)
 602		par.match->destroy(&par);
 603	module_put(par.match->me);
 604	return 0;
 605}
 606
 607static inline int
 608ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
 609{
 610	struct xt_tgdtor_param par;
 611
 612	if (i && (*i)-- == 0)
 613		return 1;
 614
 615	par.net      = net;
 616	par.target   = w->u.watcher;
 617	par.targinfo = w->data;
 618	par.family   = NFPROTO_BRIDGE;
 619	if (par.target->destroy != NULL)
 620		par.target->destroy(&par);
 621	module_put(par.target->me);
 622	return 0;
 623}
 624
 625static inline int
 626ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
 627{
 628	struct xt_tgdtor_param par;
 629	struct ebt_entry_target *t;
 630
 631	if (e->bitmask == 0)
 632		return 0;
 633	/* we're done */
 634	if (cnt && (*cnt)-- == 0)
 635		return 1;
 636	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
 637	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
 638	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 639
 640	par.net      = net;
 641	par.target   = t->u.target;
 642	par.targinfo = t->data;
 643	par.family   = NFPROTO_BRIDGE;
 644	if (par.target->destroy != NULL)
 645		par.target->destroy(&par);
 646	module_put(par.target->me);
 647	return 0;
 648}
 649
 650static inline int
 651ebt_check_entry(struct ebt_entry *e, struct net *net,
 652   const struct ebt_table_info *newinfo,
 653   const char *name, unsigned int *cnt,
 654   struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 655{
 656	struct ebt_entry_target *t;
 657	struct xt_target *target;
 658	unsigned int i, j, hook = 0, hookmask = 0;
 659	size_t gap;
 660	int ret;
 661	struct xt_mtchk_param mtpar;
 662	struct xt_tgchk_param tgpar;
 663
 664	/* don't mess with the struct ebt_entries */
 665	if (e->bitmask == 0)
 666		return 0;
 667
 668	if (e->bitmask & ~EBT_F_MASK) {
 669		BUGPRINT("Unknown flag for bitmask\n");
 670		return -EINVAL;
 671	}
 672	if (e->invflags & ~EBT_INV_MASK) {
 673		BUGPRINT("Unknown flag for inv bitmask\n");
 674		return -EINVAL;
 675	}
 676	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
 677		BUGPRINT("NOPROTO & 802_3 not allowed\n");
 678		return -EINVAL;
 679	}
 680	/* what hook do we belong to? */
 681	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 682		if (!newinfo->hook_entry[i])
 683			continue;
 684		if ((char *)newinfo->hook_entry[i] < (char *)e)
 685			hook = i;
 686		else
 687			break;
 688	}
 689	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
 690	   a base chain */
 
 691	if (i < NF_BR_NUMHOOKS)
 692		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 693	else {
 694		for (i = 0; i < udc_cnt; i++)
 695			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
 696				break;
 697		if (i == 0)
 698			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 699		else
 700			hookmask = cl_s[i - 1].hookmask;
 701	}
 702	i = 0;
 703
 704	mtpar.net	= tgpar.net       = net;
 705	mtpar.table     = tgpar.table     = name;
 706	mtpar.entryinfo = tgpar.entryinfo = e;
 707	mtpar.hook_mask = tgpar.hook_mask = hookmask;
 708	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
 709	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
 710	if (ret != 0)
 711		goto cleanup_matches;
 712	j = 0;
 713	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
 714	if (ret != 0)
 715		goto cleanup_watchers;
 716	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 717	gap = e->next_offset - e->target_offset;
 718
 719	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
 720	if (IS_ERR(target)) {
 721		ret = PTR_ERR(target);
 722		goto cleanup_watchers;
 723	}
 724
 725	t->u.target = target;
 726	if (t->u.target == &ebt_standard_target) {
 727		if (gap < sizeof(struct ebt_standard_target)) {
 728			BUGPRINT("Standard target size too big\n");
 729			ret = -EFAULT;
 730			goto cleanup_watchers;
 731		}
 732		if (((struct ebt_standard_target *)t)->verdict <
 733		   -NUM_STANDARD_TARGETS) {
 734			BUGPRINT("Invalid standard target\n");
 735			ret = -EFAULT;
 736			goto cleanup_watchers;
 737		}
 738	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
 739		module_put(t->u.target->me);
 740		ret = -EFAULT;
 741		goto cleanup_watchers;
 742	}
 743
 744	tgpar.target   = target;
 745	tgpar.targinfo = t->data;
 746	ret = xt_check_target(&tgpar, t->target_size,
 747	      e->ethproto, e->invflags & EBT_IPROTO);
 748	if (ret < 0) {
 749		module_put(target->me);
 750		goto cleanup_watchers;
 751	}
 752	(*cnt)++;
 753	return 0;
 754cleanup_watchers:
 755	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
 756cleanup_matches:
 757	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
 758	return ret;
 759}
 760
 761/*
 762 * checks for loops and sets the hook mask for udc
 763 * the hook mask for udc tells us from which base chains the udc can be
 764 * accessed. This mask is a parameter to the check() functions of the extensions
 765 */
 766static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
 767   unsigned int udc_cnt, unsigned int hooknr, char *base)
 768{
 769	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
 770	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
 771	const struct ebt_entry_target *t;
 772
 773	while (pos < nentries || chain_nr != -1) {
 774		/* end of udc, go back one 'recursion' step */
 775		if (pos == nentries) {
 776			/* put back values of the time when this chain was called */
 777			e = cl_s[chain_nr].cs.e;
 778			if (cl_s[chain_nr].from != -1)
 779				nentries =
 780				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
 781			else
 782				nentries = chain->nentries;
 783			pos = cl_s[chain_nr].cs.n;
 784			/* make sure we won't see a loop that isn't one */
 785			cl_s[chain_nr].cs.n = 0;
 786			chain_nr = cl_s[chain_nr].from;
 787			if (pos == nentries)
 788				continue;
 789		}
 790		t = (struct ebt_entry_target *)
 791		   (((char *)e) + e->target_offset);
 792		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
 793			goto letscontinue;
 794		if (e->target_offset + sizeof(struct ebt_standard_target) >
 795		   e->next_offset) {
 796			BUGPRINT("Standard target size too big\n");
 797			return -1;
 798		}
 799		verdict = ((struct ebt_standard_target *)t)->verdict;
 800		if (verdict >= 0) { /* jump to another chain */
 801			struct ebt_entries *hlp2 =
 802			   (struct ebt_entries *)(base + verdict);
 803			for (i = 0; i < udc_cnt; i++)
 804				if (hlp2 == cl_s[i].cs.chaininfo)
 805					break;
 806			/* bad destination or loop */
 807			if (i == udc_cnt) {
 808				BUGPRINT("bad destination\n");
 809				return -1;
 810			}
 811			if (cl_s[i].cs.n) {
 812				BUGPRINT("loop\n");
 813				return -1;
 814			}
 815			if (cl_s[i].hookmask & (1 << hooknr))
 816				goto letscontinue;
 817			/* this can't be 0, so the loop test is correct */
 818			cl_s[i].cs.n = pos + 1;
 819			pos = 0;
 820			cl_s[i].cs.e = ebt_next_entry(e);
 821			e = (struct ebt_entry *)(hlp2->data);
 822			nentries = hlp2->nentries;
 823			cl_s[i].from = chain_nr;
 824			chain_nr = i;
 825			/* this udc is accessible from the base chain for hooknr */
 826			cl_s[i].hookmask |= (1 << hooknr);
 827			continue;
 828		}
 829letscontinue:
 830		e = ebt_next_entry(e);
 831		pos++;
 832	}
 833	return 0;
 834}
 835
 836/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
 837static int translate_table(struct net *net, const char *name,
 838			   struct ebt_table_info *newinfo)
 839{
 840	unsigned int i, j, k, udc_cnt;
 841	int ret;
 842	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 843
 844	i = 0;
 845	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 846		i++;
 847	if (i == NF_BR_NUMHOOKS) {
 848		BUGPRINT("No valid hooks specified\n");
 849		return -EINVAL;
 850	}
 851	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
 852		BUGPRINT("Chains don't start at beginning\n");
 853		return -EINVAL;
 854	}
 855	/* make sure chains are ordered after each other in same order
 856	   as their corresponding hooks */
 
 857	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
 858		if (!newinfo->hook_entry[j])
 859			continue;
 860		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
 861			BUGPRINT("Hook order must be followed\n");
 862			return -EINVAL;
 863		}
 864		i = j;
 865	}
 866
 867	/* do some early checkings and initialize some things */
 868	i = 0; /* holds the expected nr. of entries for the chain */
 869	j = 0; /* holds the up to now counted entries for the chain */
 870	k = 0; /* holds the total nr. of entries, should equal
 871		  newinfo->nentries afterwards */
 
 872	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 873	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 874	   ebt_check_entry_size_and_hooks, newinfo,
 875	   &i, &j, &k, &udc_cnt);
 876
 877	if (ret != 0)
 878		return ret;
 879
 880	if (i != j) {
 881		BUGPRINT("nentries does not equal the nr of entries in the "
 882			 "(last) chain\n");
 883		return -EINVAL;
 884	}
 885	if (k != newinfo->nentries) {
 886		BUGPRINT("Total nentries is wrong\n");
 887		return -EINVAL;
 888	}
 889
 890	/* get the location of the udc, put them in an array
 891	   while we're at it, allocate the chainstack */
 
 892	if (udc_cnt) {
 893		/* this will get free'd in do_replace()/ebt_register_table()
 894		   if an error occurs */
 
 895		newinfo->chainstack =
 896			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
 897		if (!newinfo->chainstack)
 898			return -ENOMEM;
 899		for_each_possible_cpu(i) {
 900			newinfo->chainstack[i] =
 901			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
 902			if (!newinfo->chainstack[i]) {
 903				while (i)
 904					vfree(newinfo->chainstack[--i]);
 905				vfree(newinfo->chainstack);
 906				newinfo->chainstack = NULL;
 907				return -ENOMEM;
 908			}
 909		}
 910
 911		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
 912		if (!cl_s)
 913			return -ENOMEM;
 914		i = 0; /* the i'th udc */
 915		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 916		   ebt_get_udc_positions, newinfo, &i, cl_s);
 917		/* sanity check */
 918		if (i != udc_cnt) {
 919			BUGPRINT("i != udc_cnt\n");
 920			vfree(cl_s);
 921			return -EFAULT;
 922		}
 923	}
 924
 925	/* Check for loops */
 926	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 927		if (newinfo->hook_entry[i])
 928			if (check_chainloops(newinfo->hook_entry[i],
 929			   cl_s, udc_cnt, i, newinfo->entries)) {
 930				vfree(cl_s);
 931				return -EINVAL;
 932			}
 933
 934	/* we now know the following (along with E=mc²):
 935	   - the nr of entries in each chain is right
 936	   - the size of the allocated space is right
 937	   - all valid hooks have a corresponding chain
 938	   - there are no loops
 939	   - wrong data can still be on the level of a single entry
 940	   - could be there are jumps to places that are not the
 941	     beginning of a chain. This can only occur in chains that
 942	     are not accessible from any base chains, so we don't care. */
 
 943
 944	/* used to know what we need to clean up if something goes wrong */
 945	i = 0;
 946	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 947	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
 948	if (ret != 0) {
 949		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 950				  ebt_cleanup_entry, net, &i);
 951	}
 952	vfree(cl_s);
 953	return ret;
 954}
 955
 956/* called under write_lock */
 957static void get_counters(const struct ebt_counter *oldcounters,
 958   struct ebt_counter *counters, unsigned int nentries)
 959{
 960	int i, cpu;
 961	struct ebt_counter *counter_base;
 962
 963	/* counters of cpu 0 */
 964	memcpy(counters, oldcounters,
 965	       sizeof(struct ebt_counter) * nentries);
 966
 967	/* add other counters to those of cpu 0 */
 968	for_each_possible_cpu(cpu) {
 969		if (cpu == 0)
 970			continue;
 971		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 972		for (i = 0; i < nentries; i++) {
 973			counters[i].pcnt += counter_base[i].pcnt;
 974			counters[i].bcnt += counter_base[i].bcnt;
 975		}
 976	}
 977}
 978
 979static int do_replace_finish(struct net *net, struct ebt_replace *repl,
 980			      struct ebt_table_info *newinfo)
 981{
 982	int ret, i;
 983	struct ebt_counter *counterstmp = NULL;
 984	/* used to be able to unlock earlier */
 985	struct ebt_table_info *table;
 986	struct ebt_table *t;
 987
 988	/* the user wants counters back
 989	   the check on the size is done later, when we have the lock */
 
 990	if (repl->num_counters) {
 991		unsigned long size = repl->num_counters * sizeof(*counterstmp);
 992		counterstmp = vmalloc(size);
 993		if (!counterstmp)
 994			return -ENOMEM;
 995	}
 996
 997	newinfo->chainstack = NULL;
 998	ret = ebt_verify_pointers(repl, newinfo);
 999	if (ret != 0)
1000		goto free_counterstmp;
1001
1002	ret = translate_table(net, repl->name, newinfo);
1003
1004	if (ret != 0)
1005		goto free_counterstmp;
1006
1007	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008	if (!t) {
1009		ret = -ENOENT;
1010		goto free_iterate;
1011	}
1012
1013	/* the table doesn't like it */
1014	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015		goto free_unlock;
1016
1017	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018		BUGPRINT("Wrong nr. of counters requested\n");
1019		ret = -EINVAL;
1020		goto free_unlock;
1021	}
1022
1023	/* we have the mutex lock, so no danger in reading this pointer */
1024	table = t->private;
1025	/* make sure the table can only be rmmod'ed if it contains no rules */
1026	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027		ret = -ENOENT;
1028		goto free_unlock;
1029	} else if (table->nentries && !newinfo->nentries)
1030		module_put(t->me);
1031	/* we need an atomic snapshot of the counters */
1032	write_lock_bh(&t->lock);
1033	if (repl->num_counters)
1034		get_counters(t->private->counters, counterstmp,
1035		   t->private->nentries);
1036
1037	t->private = newinfo;
1038	write_unlock_bh(&t->lock);
1039	mutex_unlock(&ebt_mutex);
1040	/* so, a user can change the chains while having messed up her counter
1041	   allocation. Only reason why this is done is because this way the lock
1042	   is held only once, while this doesn't bring the kernel into a
1043	   dangerous state. */
 
1044	if (repl->num_counters &&
1045	   copy_to_user(repl->counters, counterstmp,
1046	   repl->num_counters * sizeof(struct ebt_counter))) {
1047		ret = -EFAULT;
 
1048	}
1049	else
1050		ret = 0;
1051
1052	/* decrease module count and free resources */
1053	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1054			  ebt_cleanup_entry, net, NULL);
1055
1056	vfree(table->entries);
1057	if (table->chainstack) {
1058		for_each_possible_cpu(i)
1059			vfree(table->chainstack[i]);
1060		vfree(table->chainstack);
1061	}
1062	vfree(table);
1063
1064	vfree(counterstmp);
 
 
 
 
 
 
 
 
 
1065	return ret;
1066
1067free_unlock:
1068	mutex_unlock(&ebt_mutex);
1069free_iterate:
1070	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1071			  ebt_cleanup_entry, net, NULL);
1072free_counterstmp:
1073	vfree(counterstmp);
1074	/* can be initialized in translate_table() */
1075	if (newinfo->chainstack) {
1076		for_each_possible_cpu(i)
1077			vfree(newinfo->chainstack[i]);
1078		vfree(newinfo->chainstack);
1079	}
1080	return ret;
1081}
1082
1083/* replace the table */
1084static int do_replace(struct net *net, const void __user *user,
1085		      unsigned int len)
1086{
1087	int ret, countersize;
1088	struct ebt_table_info *newinfo;
1089	struct ebt_replace tmp;
1090
1091	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1092		return -EFAULT;
1093
1094	if (len != sizeof(tmp) + tmp.entries_size) {
1095		BUGPRINT("Wrong len argument\n");
1096		return -EINVAL;
1097	}
1098
1099	if (tmp.entries_size == 0) {
1100		BUGPRINT("Entries_size never zero\n");
1101		return -EINVAL;
1102	}
1103	/* overflow check */
1104	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1105			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1106		return -ENOMEM;
1107	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1108		return -ENOMEM;
1109
1110	tmp.name[sizeof(tmp.name) - 1] = 0;
1111
1112	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1113	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1114	if (!newinfo)
1115		return -ENOMEM;
1116
1117	if (countersize)
1118		memset(newinfo->counters, 0, countersize);
1119
1120	newinfo->entries = vmalloc(tmp.entries_size);
1121	if (!newinfo->entries) {
1122		ret = -ENOMEM;
1123		goto free_newinfo;
1124	}
1125	if (copy_from_user(
1126	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1127		BUGPRINT("Couldn't copy entries from userspace\n");
1128		ret = -EFAULT;
1129		goto free_entries;
1130	}
1131
1132	ret = do_replace_finish(net, &tmp, newinfo);
1133	if (ret == 0)
1134		return ret;
1135free_entries:
1136	vfree(newinfo->entries);
1137free_newinfo:
1138	vfree(newinfo);
1139	return ret;
1140}
1141
1142struct ebt_table *
1143ebt_register_table(struct net *net, const struct ebt_table *input_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1144{
1145	struct ebt_table_info *newinfo;
1146	struct ebt_table *t, *table;
1147	struct ebt_replace_kernel *repl;
1148	int ret, i, countersize;
1149	void *p;
1150
1151	if (input_table == NULL || (repl = input_table->table) == NULL ||
1152	    repl->entries == NULL || repl->entries_size == 0 ||
1153	    repl->counters != NULL || input_table->private != NULL) {
1154		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1155		return ERR_PTR(-EINVAL);
1156	}
1157
1158	/* Don't add one table to multiple lists. */
1159	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1160	if (!table) {
1161		ret = -ENOMEM;
1162		goto out;
1163	}
1164
1165	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1166	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1167	ret = -ENOMEM;
1168	if (!newinfo)
1169		goto free_table;
1170
1171	p = vmalloc(repl->entries_size);
1172	if (!p)
1173		goto free_newinfo;
1174
1175	memcpy(p, repl->entries, repl->entries_size);
1176	newinfo->entries = p;
1177
1178	newinfo->entries_size = repl->entries_size;
1179	newinfo->nentries = repl->nentries;
1180
1181	if (countersize)
1182		memset(newinfo->counters, 0, countersize);
1183
1184	/* fill in newinfo and parse the entries */
1185	newinfo->chainstack = NULL;
1186	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1187		if ((repl->valid_hooks & (1 << i)) == 0)
1188			newinfo->hook_entry[i] = NULL;
1189		else
1190			newinfo->hook_entry[i] = p +
1191				((char *)repl->hook_entry[i] - repl->entries);
1192	}
1193	ret = translate_table(net, repl->name, newinfo);
1194	if (ret != 0) {
1195		BUGPRINT("Translate_table failed\n");
1196		goto free_chainstack;
1197	}
1198
1199	if (table->check && table->check(newinfo, table->valid_hooks)) {
1200		BUGPRINT("The table doesn't like its own initial data, lol\n");
1201		ret = -EINVAL;
1202		goto free_chainstack;
1203	}
1204
1205	table->private = newinfo;
1206	rwlock_init(&table->lock);
1207	ret = mutex_lock_interruptible(&ebt_mutex);
1208	if (ret != 0)
1209		goto free_chainstack;
1210
1211	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1212		if (strcmp(t->name, table->name) == 0) {
1213			ret = -EEXIST;
1214			BUGPRINT("Table name already exists\n");
1215			goto free_unlock;
1216		}
1217	}
1218
1219	/* Hold a reference count if the chains aren't empty */
1220	if (newinfo->nentries && !try_module_get(table->me)) {
1221		ret = -ENOENT;
1222		goto free_unlock;
1223	}
1224	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1225	mutex_unlock(&ebt_mutex);
1226	return table;
 
 
 
 
 
 
 
 
 
 
 
 
1227free_unlock:
1228	mutex_unlock(&ebt_mutex);
1229free_chainstack:
1230	if (newinfo->chainstack) {
1231		for_each_possible_cpu(i)
1232			vfree(newinfo->chainstack[i]);
1233		vfree(newinfo->chainstack);
1234	}
1235	vfree(newinfo->entries);
1236free_newinfo:
1237	vfree(newinfo);
1238free_table:
1239	kfree(table);
1240out:
1241	return ERR_PTR(ret);
1242}
1243
1244void ebt_unregister_table(struct net *net, struct ebt_table *table)
 
1245{
1246	int i;
1247
1248	if (!table) {
1249		BUGPRINT("Request to unregister NULL table!!!\n");
1250		return;
1251	}
1252	mutex_lock(&ebt_mutex);
1253	list_del(&table->list);
1254	mutex_unlock(&ebt_mutex);
1255	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1256			  ebt_cleanup_entry, net, NULL);
1257	if (table->private->nentries)
1258		module_put(table->me);
1259	vfree(table->private->entries);
1260	if (table->private->chainstack) {
1261		for_each_possible_cpu(i)
1262			vfree(table->private->chainstack[i]);
1263		vfree(table->private->chainstack);
1264	}
1265	vfree(table->private);
1266	kfree(table);
1267}
1268
1269/* userspace just supplied us with counters */
1270static int do_update_counters(struct net *net, const char *name,
1271				struct ebt_counter __user *counters,
1272				unsigned int num_counters,
1273				const void __user *user, unsigned int len)
1274{
1275	int i, ret;
1276	struct ebt_counter *tmp;
1277	struct ebt_table *t;
1278
1279	if (num_counters == 0)
1280		return -EINVAL;
1281
1282	tmp = vmalloc(num_counters * sizeof(*tmp));
1283	if (!tmp)
1284		return -ENOMEM;
1285
1286	t = find_table_lock(net, name, &ret, &ebt_mutex);
1287	if (!t)
1288		goto free_tmp;
1289
1290	if (num_counters != t->private->nentries) {
1291		BUGPRINT("Wrong nr of counters\n");
1292		ret = -EINVAL;
1293		goto unlock_mutex;
1294	}
1295
1296	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1297		ret = -EFAULT;
1298		goto unlock_mutex;
1299	}
1300
1301	/* we want an atomic add of the counters */
1302	write_lock_bh(&t->lock);
1303
1304	/* we add to the counters of the first cpu */
1305	for (i = 0; i < num_counters; i++) {
1306		t->private->counters[i].pcnt += tmp[i].pcnt;
1307		t->private->counters[i].bcnt += tmp[i].bcnt;
1308	}
1309
1310	write_unlock_bh(&t->lock);
1311	ret = 0;
1312unlock_mutex:
1313	mutex_unlock(&ebt_mutex);
1314free_tmp:
1315	vfree(tmp);
1316	return ret;
1317}
1318
1319static int update_counters(struct net *net, const void __user *user,
1320			    unsigned int len)
1321{
1322	struct ebt_replace hlp;
1323
1324	if (copy_from_user(&hlp, user, sizeof(hlp)))
1325		return -EFAULT;
1326
1327	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1328		return -EINVAL;
1329
1330	return do_update_counters(net, hlp.name, hlp.counters,
1331				hlp.num_counters, user, len);
1332}
1333
1334static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1335    const char *base, char __user *ubase)
 
1336{
1337	char __user *hlp = ubase + ((char *)m - base);
1338	if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
 
 
 
 
 
 
 
 
 
1339		return -EFAULT;
 
1340	return 0;
1341}
1342
1343static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1344    const char *base, char __user *ubase)
1345{
1346	char __user *hlp = ubase + ((char *)w - base);
1347	if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1348		return -EFAULT;
1349	return 0;
1350}
1351
1352static inline int
1353ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
 
 
 
 
 
 
 
 
 
1354{
1355	int ret;
1356	char __user *hlp;
1357	const struct ebt_entry_target *t;
1358
1359	if (e->bitmask == 0)
 
 
 
 
1360		return 0;
 
 
 
 
1361
1362	hlp = ubase + (((char *)e + e->target_offset) - base);
1363	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1364
1365	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1366	if (ret != 0)
1367		return ret;
1368	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1369	if (ret != 0)
1370		return ret;
1371	if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1372		return -EFAULT;
 
 
 
 
1373	return 0;
1374}
1375
1376static int copy_counters_to_user(struct ebt_table *t,
1377				  const struct ebt_counter *oldcounters,
1378				  void __user *user, unsigned int num_counters,
1379				  unsigned int nentries)
1380{
1381	struct ebt_counter *counterstmp;
1382	int ret = 0;
1383
1384	/* userspace might not need the counters */
1385	if (num_counters == 0)
1386		return 0;
1387
1388	if (num_counters != nentries) {
1389		BUGPRINT("Num_counters wrong\n");
1390		return -EINVAL;
1391	}
1392
1393	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1394	if (!counterstmp)
1395		return -ENOMEM;
1396
1397	write_lock_bh(&t->lock);
1398	get_counters(oldcounters, counterstmp, nentries);
1399	write_unlock_bh(&t->lock);
1400
1401	if (copy_to_user(user, counterstmp,
1402	   nentries * sizeof(struct ebt_counter)))
1403		ret = -EFAULT;
1404	vfree(counterstmp);
1405	return ret;
1406}
1407
1408/* called with ebt_mutex locked */
1409static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1410    const int *len, int cmd)
1411{
1412	struct ebt_replace tmp;
1413	const struct ebt_counter *oldcounters;
1414	unsigned int entries_size, nentries;
1415	int ret;
1416	char *entries;
1417
1418	if (cmd == EBT_SO_GET_ENTRIES) {
1419		entries_size = t->private->entries_size;
1420		nentries = t->private->nentries;
1421		entries = t->private->entries;
1422		oldcounters = t->private->counters;
1423	} else {
1424		entries_size = t->table->entries_size;
1425		nentries = t->table->nentries;
1426		entries = t->table->entries;
1427		oldcounters = t->table->counters;
1428	}
1429
1430	if (copy_from_user(&tmp, user, sizeof(tmp)))
1431		return -EFAULT;
1432
1433	if (*len != sizeof(struct ebt_replace) + entries_size +
1434	   (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1435		return -EINVAL;
1436
1437	if (tmp.nentries != nentries) {
1438		BUGPRINT("Nentries wrong\n");
1439		return -EINVAL;
1440	}
1441
1442	if (tmp.entries_size != entries_size) {
1443		BUGPRINT("Wrong size\n");
1444		return -EINVAL;
1445	}
1446
1447	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1448					tmp.num_counters, nentries);
1449	if (ret)
1450		return ret;
1451
1452	if (copy_to_user(tmp.entries, entries, entries_size)) {
1453		BUGPRINT("Couldn't copy entries to userspace\n");
1454		return -EFAULT;
1455	}
1456	/* set the match/watcher/target names right */
1457	return EBT_ENTRY_ITERATE(entries, entries_size,
1458	   ebt_make_names, entries, tmp.entries);
1459}
1460
1461static int do_ebt_set_ctl(struct sock *sk,
1462	int cmd, void __user *user, unsigned int len)
1463{
1464	int ret;
 
1465
1466	if (!capable(CAP_NET_ADMIN))
1467		return -EPERM;
1468
1469	switch(cmd) {
1470	case EBT_SO_SET_ENTRIES:
1471		ret = do_replace(sock_net(sk), user, len);
1472		break;
1473	case EBT_SO_SET_COUNTERS:
1474		ret = update_counters(sock_net(sk), user, len);
1475		break;
1476	default:
1477		ret = -EINVAL;
1478	}
1479	return ret;
1480}
1481
1482static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1483{
1484	int ret;
1485	struct ebt_replace tmp;
1486	struct ebt_table *t;
 
1487
1488	if (!capable(CAP_NET_ADMIN))
1489		return -EPERM;
1490
1491	if (copy_from_user(&tmp, user, sizeof(tmp)))
1492		return -EFAULT;
1493
1494	t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
 
 
1495	if (!t)
1496		return ret;
1497
1498	switch(cmd) {
1499	case EBT_SO_GET_INFO:
1500	case EBT_SO_GET_INIT_INFO:
1501		if (*len != sizeof(struct ebt_replace)){
1502			ret = -EINVAL;
1503			mutex_unlock(&ebt_mutex);
1504			break;
1505		}
1506		if (cmd == EBT_SO_GET_INFO) {
1507			tmp.nentries = t->private->nentries;
1508			tmp.entries_size = t->private->entries_size;
1509			tmp.valid_hooks = t->valid_hooks;
1510		} else {
1511			tmp.nentries = t->table->nentries;
1512			tmp.entries_size = t->table->entries_size;
1513			tmp.valid_hooks = t->table->valid_hooks;
1514		}
1515		mutex_unlock(&ebt_mutex);
1516		if (copy_to_user(user, &tmp, *len) != 0){
1517			BUGPRINT("c2u Didn't work\n");
1518			ret = -EFAULT;
1519			break;
1520		}
1521		ret = 0;
1522		break;
1523
1524	case EBT_SO_GET_ENTRIES:
1525	case EBT_SO_GET_INIT_ENTRIES:
1526		ret = copy_everything_to_user(t, user, len, cmd);
1527		mutex_unlock(&ebt_mutex);
1528		break;
1529
1530	default:
1531		mutex_unlock(&ebt_mutex);
1532		ret = -EINVAL;
1533	}
1534
1535	return ret;
1536}
1537
1538#ifdef CONFIG_COMPAT
1539/* 32 bit-userspace compatibility definitions. */
1540struct compat_ebt_replace {
1541	char name[EBT_TABLE_MAXNAMELEN];
1542	compat_uint_t valid_hooks;
1543	compat_uint_t nentries;
1544	compat_uint_t entries_size;
1545	/* start of the chains */
1546	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1547	/* nr of counters userspace expects back */
1548	compat_uint_t num_counters;
1549	/* where the kernel will put the old counters. */
1550	compat_uptr_t counters;
1551	compat_uptr_t entries;
1552};
1553
1554/* struct ebt_entry_match, _target and _watcher have same layout */
1555struct compat_ebt_entry_mwt {
1556	union {
1557		char name[EBT_FUNCTION_MAXNAMELEN];
 
 
 
1558		compat_uptr_t ptr;
1559	} u;
1560	compat_uint_t match_size;
1561	compat_uint_t data[0];
1562};
1563
1564/* account for possible padding between match_size and ->data */
1565static int ebt_compat_entry_padsize(void)
1566{
1567	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1568			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1569	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1570			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1571}
1572
1573static int ebt_compat_match_offset(const struct xt_match *match,
1574				   unsigned int userlen)
1575{
1576	/*
1577	 * ebt_among needs special handling. The kernel .matchsize is
1578	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1579	 * value is expected.
1580	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1581	 */
1582	if (unlikely(match->matchsize == -1))
1583		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1584	return xt_compat_match_offset(match);
1585}
1586
1587static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1588				unsigned int *size)
1589{
1590	const struct xt_match *match = m->u.match;
1591	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1592	int off = ebt_compat_match_offset(match, m->match_size);
1593	compat_uint_t msize = m->match_size - off;
1594
1595	BUG_ON(off >= m->match_size);
 
1596
1597	if (copy_to_user(cm->u.name, match->name,
1598	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
 
1599		return -EFAULT;
1600
1601	if (match->compat_to_user) {
1602		if (match->compat_to_user(cm->data, m->data))
1603			return -EFAULT;
1604	} else if (copy_to_user(cm->data, m->data, msize))
 
 
1605			return -EFAULT;
 
1606
1607	*size -= ebt_compat_entry_padsize() + off;
1608	*dstptr = cm->data;
1609	*dstptr += msize;
1610	return 0;
1611}
1612
1613static int compat_target_to_user(struct ebt_entry_target *t,
1614				 void __user **dstptr,
1615				 unsigned int *size)
1616{
1617	const struct xt_target *target = t->u.target;
1618	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1619	int off = xt_compat_target_offset(target);
1620	compat_uint_t tsize = t->target_size - off;
1621
1622	BUG_ON(off >= t->target_size);
 
1623
1624	if (copy_to_user(cm->u.name, target->name,
1625	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
 
1626		return -EFAULT;
1627
1628	if (target->compat_to_user) {
1629		if (target->compat_to_user(cm->data, t->data))
1630			return -EFAULT;
1631	} else if (copy_to_user(cm->data, t->data, tsize))
1632		return -EFAULT;
 
 
 
1633
1634	*size -= ebt_compat_entry_padsize() + off;
1635	*dstptr = cm->data;
1636	*dstptr += tsize;
1637	return 0;
1638}
1639
1640static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1641				  void __user **dstptr,
1642				  unsigned int *size)
1643{
1644	return compat_target_to_user((struct ebt_entry_target *)w,
1645							dstptr, size);
1646}
1647
1648static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1649				unsigned int *size)
1650{
1651	struct ebt_entry_target *t;
1652	struct ebt_entry __user *ce;
1653	u32 watchers_offset, target_offset, next_offset;
1654	compat_uint_t origsize;
1655	int ret;
1656
1657	if (e->bitmask == 0) {
1658		if (*size < sizeof(struct ebt_entries))
1659			return -EINVAL;
1660		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1661			return -EFAULT;
1662
1663		*dstptr += sizeof(struct ebt_entries);
1664		*size -= sizeof(struct ebt_entries);
1665		return 0;
1666	}
1667
1668	if (*size < sizeof(*ce))
1669		return -EINVAL;
1670
1671	ce = (struct ebt_entry __user *)*dstptr;
1672	if (copy_to_user(ce, e, sizeof(*ce)))
1673		return -EFAULT;
1674
1675	origsize = *size;
1676	*dstptr += sizeof(*ce);
1677
1678	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1679	if (ret)
1680		return ret;
1681	watchers_offset = e->watchers_offset - (origsize - *size);
1682
1683	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1684	if (ret)
1685		return ret;
1686	target_offset = e->target_offset - (origsize - *size);
1687
1688	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1689
1690	ret = compat_target_to_user(t, dstptr, size);
1691	if (ret)
1692		return ret;
1693	next_offset = e->next_offset - (origsize - *size);
1694
1695	if (put_user(watchers_offset, &ce->watchers_offset) ||
1696	    put_user(target_offset, &ce->target_offset) ||
1697	    put_user(next_offset, &ce->next_offset))
1698		return -EFAULT;
1699
1700	*size -= sizeof(*ce);
1701	return 0;
1702}
1703
1704static int compat_calc_match(struct ebt_entry_match *m, int *off)
1705{
1706	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1707	*off += ebt_compat_entry_padsize();
1708	return 0;
1709}
1710
1711static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1712{
1713	*off += xt_compat_target_offset(w->u.watcher);
1714	*off += ebt_compat_entry_padsize();
1715	return 0;
1716}
1717
1718static int compat_calc_entry(const struct ebt_entry *e,
1719			     const struct ebt_table_info *info,
1720			     const void *base,
1721			     struct compat_ebt_replace *newinfo)
1722{
1723	const struct ebt_entry_target *t;
1724	unsigned int entry_offset;
1725	int off, ret, i;
1726
1727	if (e->bitmask == 0)
1728		return 0;
1729
1730	off = 0;
1731	entry_offset = (void *)e - base;
1732
1733	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1734	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1735
1736	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1737
1738	off += xt_compat_target_offset(t->u.target);
1739	off += ebt_compat_entry_padsize();
1740
1741	newinfo->entries_size -= off;
1742
1743	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1744	if (ret)
1745		return ret;
1746
1747	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1748		const void *hookptr = info->hook_entry[i];
1749		if (info->hook_entry[i] &&
1750		    (e < (struct ebt_entry *)(base - hookptr))) {
1751			newinfo->hook_entry[i] -= off;
1752			pr_debug("0x%08X -> 0x%08X\n",
1753					newinfo->hook_entry[i] + off,
1754					newinfo->hook_entry[i]);
1755		}
1756	}
1757
1758	return 0;
1759}
1760
1761
1762static int compat_table_info(const struct ebt_table_info *info,
1763			     struct compat_ebt_replace *newinfo)
1764{
1765	unsigned int size = info->entries_size;
1766	const void *entries = info->entries;
1767
1768	newinfo->entries_size = size;
 
 
 
 
 
 
1769
1770	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1771	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1772							entries, newinfo);
1773}
1774
1775static int compat_copy_everything_to_user(struct ebt_table *t,
1776					  void __user *user, int *len, int cmd)
1777{
1778	struct compat_ebt_replace repl, tmp;
1779	struct ebt_counter *oldcounters;
1780	struct ebt_table_info tinfo;
1781	int ret;
1782	void __user *pos;
1783
1784	memset(&tinfo, 0, sizeof(tinfo));
1785
1786	if (cmd == EBT_SO_GET_ENTRIES) {
1787		tinfo.entries_size = t->private->entries_size;
1788		tinfo.nentries = t->private->nentries;
1789		tinfo.entries = t->private->entries;
1790		oldcounters = t->private->counters;
1791	} else {
1792		tinfo.entries_size = t->table->entries_size;
1793		tinfo.nentries = t->table->nentries;
1794		tinfo.entries = t->table->entries;
1795		oldcounters = t->table->counters;
1796	}
1797
1798	if (copy_from_user(&tmp, user, sizeof(tmp)))
1799		return -EFAULT;
1800
1801	if (tmp.nentries != tinfo.nentries ||
1802	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1803		return -EINVAL;
1804
1805	memcpy(&repl, &tmp, sizeof(repl));
1806	if (cmd == EBT_SO_GET_ENTRIES)
1807		ret = compat_table_info(t->private, &repl);
1808	else
1809		ret = compat_table_info(&tinfo, &repl);
1810	if (ret)
1811		return ret;
1812
1813	if (*len != sizeof(tmp) + repl.entries_size +
1814	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1815		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1816				*len, tinfo.entries_size, repl.entries_size);
1817		return -EINVAL;
1818	}
1819
1820	/* userspace might not need the counters */
1821	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1822					tmp.num_counters, tinfo.nentries);
1823	if (ret)
1824		return ret;
1825
1826	pos = compat_ptr(tmp.entries);
1827	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1828			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1829}
1830
1831struct ebt_entries_buf_state {
1832	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1833	u32 buf_kern_len;	/* total size of kernel buffer */
1834	u32 buf_kern_offset;	/* amount of data copied so far */
1835	u32 buf_user_offset;	/* read position in userspace buffer */
1836};
1837
1838static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1839{
1840	state->buf_kern_offset += sz;
1841	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1842}
1843
1844static int ebt_buf_add(struct ebt_entries_buf_state *state,
1845		       void *data, unsigned int sz)
1846{
1847	if (state->buf_kern_start == NULL)
1848		goto count_only;
1849
1850	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
 
1851
1852	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1853
1854 count_only:
1855	state->buf_user_offset += sz;
1856	return ebt_buf_count(state, sz);
1857}
1858
1859static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1860{
1861	char *b = state->buf_kern_start;
1862
1863	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
 
1864
1865	if (b != NULL && sz > 0)
1866		memset(b + state->buf_kern_offset, 0, sz);
1867	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1868	return ebt_buf_count(state, sz);
1869}
1870
1871enum compat_mwt {
1872	EBT_COMPAT_MATCH,
1873	EBT_COMPAT_WATCHER,
1874	EBT_COMPAT_TARGET,
1875};
1876
1877static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1878				enum compat_mwt compat_mwt,
1879				struct ebt_entries_buf_state *state,
1880				const unsigned char *base)
1881{
1882	char name[EBT_FUNCTION_MAXNAMELEN];
1883	struct xt_match *match;
1884	struct xt_target *wt;
1885	void *dst = NULL;
1886	int off, pad = 0;
1887	unsigned int size_kern, match_size = mwt->match_size;
1888
1889	strlcpy(name, mwt->u.name, sizeof(name));
 
1890
1891	if (state->buf_kern_start)
1892		dst = state->buf_kern_start + state->buf_kern_offset;
1893
1894	switch (compat_mwt) {
1895	case EBT_COMPAT_MATCH:
1896		match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1897						name, 0), "ebt_%s", name);
1898		if (match == NULL)
1899			return -ENOENT;
1900		if (IS_ERR(match))
1901			return PTR_ERR(match);
1902
1903		off = ebt_compat_match_offset(match, match_size);
1904		if (dst) {
1905			if (match->compat_from_user)
1906				match->compat_from_user(dst, mwt->data);
1907			else
1908				memcpy(dst, mwt->data, match_size);
1909		}
1910
1911		size_kern = match->matchsize;
1912		if (unlikely(size_kern == -1))
1913			size_kern = match_size;
1914		module_put(match->me);
1915		break;
1916	case EBT_COMPAT_WATCHER: /* fallthrough */
1917	case EBT_COMPAT_TARGET:
1918		wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1919						name, 0), "ebt_%s", name);
1920		if (wt == NULL)
1921			return -ENOENT;
1922		if (IS_ERR(wt))
1923			return PTR_ERR(wt);
1924		off = xt_compat_target_offset(wt);
1925
1926		if (dst) {
1927			if (wt->compat_from_user)
1928				wt->compat_from_user(dst, mwt->data);
1929			else
1930				memcpy(dst, mwt->data, match_size);
1931		}
1932
1933		size_kern = wt->targetsize;
1934		module_put(wt->me);
1935		break;
1936
1937	default:
1938		return -EINVAL;
1939	}
1940
1941	state->buf_kern_offset += match_size + off;
1942	state->buf_user_offset += match_size;
1943	pad = XT_ALIGN(size_kern) - size_kern;
1944
1945	if (pad > 0 && dst) {
1946		BUG_ON(state->buf_kern_len <= pad);
1947		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
 
 
1948		memset(dst + size_kern, 0, pad);
1949	}
1950	return off + match_size;
1951}
1952
1953/*
1954 * return size of all matches, watchers or target, including necessary
1955 * alignment and padding.
1956 */
1957static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1958			unsigned int size_left, enum compat_mwt type,
1959			struct ebt_entries_buf_state *state, const void *base)
1960{
1961	int growth = 0;
1962	char *buf;
1963
1964	if (size_left == 0)
1965		return 0;
1966
1967	buf = (char *) match32;
1968
1969	while (size_left >= sizeof(*match32)) {
1970		struct ebt_entry_match *match_kern;
1971		int ret;
1972
1973		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1974		if (match_kern) {
1975			char *tmp;
1976			tmp = state->buf_kern_start + state->buf_kern_offset;
1977			match_kern = (struct ebt_entry_match *) tmp;
1978		}
1979		ret = ebt_buf_add(state, buf, sizeof(*match32));
1980		if (ret < 0)
1981			return ret;
1982		size_left -= sizeof(*match32);
1983
1984		/* add padding before match->data (if any) */
1985		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1986		if (ret < 0)
1987			return ret;
1988
1989		if (match32->match_size > size_left)
1990			return -EINVAL;
1991
1992		size_left -= match32->match_size;
1993
1994		ret = compat_mtw_from_user(match32, type, state, base);
1995		if (ret < 0)
1996			return ret;
1997
1998		BUG_ON(ret < match32->match_size);
 
1999		growth += ret - match32->match_size;
2000		growth += ebt_compat_entry_padsize();
2001
2002		buf += sizeof(*match32);
2003		buf += match32->match_size;
2004
2005		if (match_kern)
2006			match_kern->match_size = ret;
2007
2008		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
 
 
2009		match32 = (struct compat_ebt_entry_mwt *) buf;
2010	}
2011
2012	return growth;
2013}
2014
2015/* called for all ebt_entry structures. */
2016static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2017			  unsigned int *total,
2018			  struct ebt_entries_buf_state *state)
2019{
2020	unsigned int i, j, startoff, new_offset = 0;
2021	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2022	unsigned int offsets[4];
2023	unsigned int *offsets_update = NULL;
2024	int ret;
2025	char *buf_start;
2026
2027	if (*total < sizeof(struct ebt_entries))
2028		return -EINVAL;
2029
2030	if (!entry->bitmask) {
2031		*total -= sizeof(struct ebt_entries);
2032		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2033	}
2034	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2035		return -EINVAL;
2036
2037	startoff = state->buf_user_offset;
2038	/* pull in most part of ebt_entry, it does not need to be changed. */
2039	ret = ebt_buf_add(state, entry,
2040			offsetof(struct ebt_entry, watchers_offset));
2041	if (ret < 0)
2042		return ret;
2043
2044	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2045	memcpy(&offsets[1], &entry->watchers_offset,
2046			sizeof(offsets) - sizeof(offsets[0]));
2047
2048	if (state->buf_kern_start) {
2049		buf_start = state->buf_kern_start + state->buf_kern_offset;
2050		offsets_update = (unsigned int *) buf_start;
2051	}
2052	ret = ebt_buf_add(state, &offsets[1],
2053			sizeof(offsets) - sizeof(offsets[0]));
2054	if (ret < 0)
2055		return ret;
2056	buf_start = (char *) entry;
2057	/*
2058	 * 0: matches offset, always follows ebt_entry.
2059	 * 1: watchers offset, from ebt_entry structure
2060	 * 2: target offset, from ebt_entry structure
2061	 * 3: next ebt_entry offset, from ebt_entry structure
2062	 *
2063	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2064	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
2065	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2066		struct compat_ebt_entry_mwt *match32;
2067		unsigned int size;
2068		char *buf = buf_start;
2069
2070		buf = buf_start + offsets[i];
2071		if (offsets[i] > offsets[j])
2072			return -EINVAL;
2073
2074		match32 = (struct compat_ebt_entry_mwt *) buf;
2075		size = offsets[j] - offsets[i];
2076		ret = ebt_size_mwt(match32, size, i, state, base);
2077		if (ret < 0)
2078			return ret;
2079		new_offset += ret;
2080		if (offsets_update && new_offset) {
2081			pr_debug("change offset %d to %d\n",
2082				offsets_update[i], offsets[j] + new_offset);
2083			offsets_update[i] = offsets[j] + new_offset;
2084		}
2085	}
2086
2087	if (state->buf_kern_start == NULL) {
2088		unsigned int offset = buf_start - (char *) base;
2089
2090		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2091		if (ret < 0)
2092			return ret;
2093	}
2094
2095	startoff = state->buf_user_offset - startoff;
2096
2097	BUG_ON(*total < startoff);
 
2098	*total -= startoff;
2099	return 0;
2100}
2101
2102/*
2103 * repl->entries_size is the size of the ebt_entry blob in userspace.
2104 * It might need more memory when copied to a 64 bit kernel in case
2105 * userspace is 32-bit. So, first task: find out how much memory is needed.
2106 *
2107 * Called before validation is performed.
2108 */
2109static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2110				struct ebt_entries_buf_state *state)
2111{
2112	unsigned int size_remaining = size_user;
2113	int ret;
2114
2115	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2116					&size_remaining, state);
2117	if (ret < 0)
2118		return ret;
2119
2120	WARN_ON(size_remaining);
2121	return state->buf_kern_offset;
2122}
2123
2124
2125static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2126					    void __user *user, unsigned int len)
2127{
2128	struct compat_ebt_replace tmp;
2129	int i;
2130
2131	if (len < sizeof(tmp))
2132		return -EINVAL;
2133
2134	if (copy_from_user(&tmp, user, sizeof(tmp)))
2135		return -EFAULT;
2136
2137	if (len != sizeof(tmp) + tmp.entries_size)
2138		return -EINVAL;
2139
2140	if (tmp.entries_size == 0)
2141		return -EINVAL;
2142
2143	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2144			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2145		return -ENOMEM;
2146	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2147		return -ENOMEM;
2148
2149	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2150
2151	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2152	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2153		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2154
2155	repl->num_counters = tmp.num_counters;
2156	repl->counters = compat_ptr(tmp.counters);
2157	repl->entries = compat_ptr(tmp.entries);
2158	return 0;
2159}
2160
2161static int compat_do_replace(struct net *net, void __user *user,
2162			     unsigned int len)
2163{
2164	int ret, i, countersize, size64;
2165	struct ebt_table_info *newinfo;
2166	struct ebt_replace tmp;
2167	struct ebt_entries_buf_state state;
2168	void *entries_tmp;
2169
2170	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2171	if (ret) {
2172		/* try real handler in case userland supplied needed padding */
2173		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2174			ret = 0;
2175		return ret;
2176	}
2177
2178	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2179	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2180	if (!newinfo)
2181		return -ENOMEM;
2182
2183	if (countersize)
2184		memset(newinfo->counters, 0, countersize);
2185
2186	memset(&state, 0, sizeof(state));
2187
2188	newinfo->entries = vmalloc(tmp.entries_size);
2189	if (!newinfo->entries) {
2190		ret = -ENOMEM;
2191		goto free_newinfo;
2192	}
2193	if (copy_from_user(
2194	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2195		ret = -EFAULT;
2196		goto free_entries;
2197	}
2198
2199	entries_tmp = newinfo->entries;
2200
2201	xt_compat_lock(NFPROTO_BRIDGE);
2202
2203	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
 
 
2204	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2205	if (ret < 0)
2206		goto out_unlock;
2207
2208	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2209		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2210		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2211
2212	size64 = ret;
2213	newinfo->entries = vmalloc(size64);
2214	if (!newinfo->entries) {
2215		vfree(entries_tmp);
2216		ret = -ENOMEM;
2217		goto out_unlock;
2218	}
2219
2220	memset(&state, 0, sizeof(state));
2221	state.buf_kern_start = newinfo->entries;
2222	state.buf_kern_len = size64;
2223
2224	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2225	BUG_ON(ret < 0);	/* parses same data again */
 
2226
2227	vfree(entries_tmp);
2228	tmp.entries_size = size64;
2229
2230	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2231		char __user *usrptr;
2232		if (tmp.hook_entry[i]) {
2233			unsigned int delta;
2234			usrptr = (char __user *) tmp.hook_entry[i];
2235			delta = usrptr - tmp.entries;
2236			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2237			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2238		}
2239	}
2240
2241	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2242	xt_compat_unlock(NFPROTO_BRIDGE);
2243
2244	ret = do_replace_finish(net, &tmp, newinfo);
2245	if (ret == 0)
2246		return ret;
2247free_entries:
2248	vfree(newinfo->entries);
2249free_newinfo:
2250	vfree(newinfo);
2251	return ret;
2252out_unlock:
2253	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2254	xt_compat_unlock(NFPROTO_BRIDGE);
2255	goto free_entries;
2256}
2257
2258static int compat_update_counters(struct net *net, void __user *user,
2259				  unsigned int len)
2260{
2261	struct compat_ebt_replace hlp;
2262
2263	if (copy_from_user(&hlp, user, sizeof(hlp)))
2264		return -EFAULT;
2265
2266	/* try real handler in case userland supplied needed padding */
2267	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2268		return update_counters(net, user, len);
2269
2270	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2271					hlp.num_counters, user, len);
2272}
2273
2274static int compat_do_ebt_set_ctl(struct sock *sk,
2275		int cmd, void __user *user, unsigned int len)
2276{
2277	int ret;
 
2278
2279	if (!capable(CAP_NET_ADMIN))
2280		return -EPERM;
2281
2282	switch (cmd) {
2283	case EBT_SO_SET_ENTRIES:
2284		ret = compat_do_replace(sock_net(sk), user, len);
2285		break;
2286	case EBT_SO_SET_COUNTERS:
2287		ret = compat_update_counters(sock_net(sk), user, len);
2288		break;
2289	default:
2290		ret = -EINVAL;
2291  }
2292	return ret;
2293}
2294
2295static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2296		void __user *user, int *len)
2297{
2298	int ret;
2299	struct compat_ebt_replace tmp;
2300	struct ebt_table *t;
 
2301
2302	if (!capable(CAP_NET_ADMIN))
2303		return -EPERM;
2304
2305	/* try real handler in case userland supplied needed padding */
2306	if ((cmd == EBT_SO_GET_INFO ||
2307	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2308			return do_ebt_get_ctl(sk, cmd, user, len);
2309
2310	if (copy_from_user(&tmp, user, sizeof(tmp)))
2311		return -EFAULT;
2312
2313	t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
 
 
2314	if (!t)
2315		return ret;
2316
2317	xt_compat_lock(NFPROTO_BRIDGE);
2318	switch (cmd) {
2319	case EBT_SO_GET_INFO:
2320		tmp.nentries = t->private->nentries;
2321		ret = compat_table_info(t->private, &tmp);
2322		if (ret)
2323			goto out;
2324		tmp.valid_hooks = t->valid_hooks;
2325
2326		if (copy_to_user(user, &tmp, *len) != 0) {
2327			ret = -EFAULT;
2328			break;
2329		}
2330		ret = 0;
2331		break;
2332	case EBT_SO_GET_INIT_INFO:
2333		tmp.nentries = t->table->nentries;
2334		tmp.entries_size = t->table->entries_size;
2335		tmp.valid_hooks = t->table->valid_hooks;
2336
2337		if (copy_to_user(user, &tmp, *len) != 0) {
2338			ret = -EFAULT;
2339			break;
2340		}
2341		ret = 0;
2342		break;
2343	case EBT_SO_GET_ENTRIES:
2344	case EBT_SO_GET_INIT_ENTRIES:
2345		/*
2346		 * try real handler first in case of userland-side padding.
2347		 * in case we are dealing with an 'ordinary' 32 bit binary
2348		 * without 64bit compatibility padding, this will fail right
2349		 * after copy_from_user when the *len argument is validated.
2350		 *
2351		 * the compat_ variant needs to do one pass over the kernel
2352		 * data set to adjust for size differences before it the check.
2353		 */
2354		if (copy_everything_to_user(t, user, len, cmd) == 0)
2355			ret = 0;
2356		else
2357			ret = compat_copy_everything_to_user(t, user, len, cmd);
2358		break;
2359	default:
2360		ret = -EINVAL;
2361	}
2362 out:
2363	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2364	xt_compat_unlock(NFPROTO_BRIDGE);
2365	mutex_unlock(&ebt_mutex);
2366	return ret;
2367}
2368#endif
2369
2370static struct nf_sockopt_ops ebt_sockopts =
2371{
2372	.pf		= PF_INET,
2373	.set_optmin	= EBT_BASE_CTL,
2374	.set_optmax	= EBT_SO_SET_MAX + 1,
2375	.set		= do_ebt_set_ctl,
2376#ifdef CONFIG_COMPAT
2377	.compat_set	= compat_do_ebt_set_ctl,
2378#endif
2379	.get_optmin	= EBT_BASE_CTL,
2380	.get_optmax	= EBT_SO_GET_MAX + 1,
2381	.get		= do_ebt_get_ctl,
2382#ifdef CONFIG_COMPAT
2383	.compat_get	= compat_do_ebt_get_ctl,
2384#endif
2385	.owner		= THIS_MODULE,
2386};
2387
2388static int __init ebtables_init(void)
2389{
2390	int ret;
2391
2392	ret = xt_register_target(&ebt_standard_target);
2393	if (ret < 0)
2394		return ret;
2395	ret = nf_register_sockopt(&ebt_sockopts);
2396	if (ret < 0) {
2397		xt_unregister_target(&ebt_standard_target);
2398		return ret;
2399	}
2400
2401	printk(KERN_INFO "Ebtables v2.0 registered\n");
2402	return 0;
2403}
2404
2405static void __exit ebtables_fini(void)
2406{
2407	nf_unregister_sockopt(&ebt_sockopts);
2408	xt_unregister_target(&ebt_standard_target);
2409	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2410}
2411
2412EXPORT_SYMBOL(ebt_register_table);
2413EXPORT_SYMBOL(ebt_unregister_table);
2414EXPORT_SYMBOL(ebt_do_table);
2415module_init(ebtables_init);
2416module_exit(ebtables_fini);
2417MODULE_LICENSE("GPL");
v4.17
   1/*
   2 *  ebtables
   3 *
   4 *  Author:
   5 *  Bart De Schuymer		<bdschuym@pandora.be>
   6 *
   7 *  ebtables.c,v 2.0, July, 2002
   8 *
   9 *  This code is strongly inspired by the iptables code which is
  10 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18#include <linux/kmod.h>
  19#include <linux/module.h>
  20#include <linux/vmalloc.h>
  21#include <linux/netfilter/x_tables.h>
  22#include <linux/netfilter_bridge/ebtables.h>
  23#include <linux/spinlock.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <linux/uaccess.h>
  27#include <linux/smp.h>
  28#include <linux/cpumask.h>
  29#include <linux/audit.h>
  30#include <net/sock.h>
  31/* needed for logical [in,out]-dev filtering */
  32#include "../br_private.h"
  33
  34#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
  35					 "report to author: "format, ## args)
  36/* #define BUGPRINT(format, args...) */
  37
  38/* Each cpu has its own set of counters, so there is no need for write_lock in
 
  39 * the softirq
  40 * For reading or updating the counters, the user context needs to
  41 * get a write_lock
  42 */
  43
  44/* The size of each set of counters is altered to get cache alignment */
  45#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  46#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
  47#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
  48				 COUNTER_OFFSET(n) * cpu))
  49
  50
  51
  52static DEFINE_MUTEX(ebt_mutex);
  53
  54#ifdef CONFIG_COMPAT
  55static void ebt_standard_compat_from_user(void *dst, const void *src)
  56{
  57	int v = *(compat_int_t *)src;
  58
  59	if (v >= 0)
  60		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
  61	memcpy(dst, &v, sizeof(v));
  62}
  63
  64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
  65{
  66	compat_int_t cv = *(int *)src;
  67
  68	if (cv >= 0)
  69		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
  70	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  71}
  72#endif
  73
  74
  75static struct xt_target ebt_standard_target = {
  76	.name       = "standard",
  77	.revision   = 0,
  78	.family     = NFPROTO_BRIDGE,
  79	.targetsize = sizeof(int),
  80#ifdef CONFIG_COMPAT
  81	.compatsize = sizeof(compat_int_t),
  82	.compat_from_user = ebt_standard_compat_from_user,
  83	.compat_to_user =  ebt_standard_compat_to_user,
  84#endif
  85};
  86
  87static inline int
  88ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
  89	       struct xt_action_param *par)
  90{
  91	par->target   = w->u.watcher;
  92	par->targinfo = w->data;
  93	w->u.watcher->target(skb, par);
  94	/* watchers don't give a verdict */
  95	return 0;
  96}
  97
  98static inline int
  99ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
 100	     struct xt_action_param *par)
 101{
 102	par->match     = m->u.match;
 103	par->matchinfo = m->data;
 104	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
 105}
 106
 107static inline int
 108ebt_dev_check(const char *entry, const struct net_device *device)
 109{
 110	int i = 0;
 111	const char *devname;
 112
 113	if (*entry == '\0')
 114		return 0;
 115	if (!device)
 116		return 1;
 117	devname = device->name;
 118	/* 1 is the wildcard token */
 119	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 120		i++;
 121	return devname[i] != entry[i] && entry[i] != 1;
 122}
 123
 
 124/* process standard matches */
 125static inline int
 126ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
 127		const struct net_device *in, const struct net_device *out)
 128{
 129	const struct ethhdr *h = eth_hdr(skb);
 130	const struct net_bridge_port *p;
 131	__be16 ethproto;
 
 132
 133	if (skb_vlan_tag_present(skb))
 134		ethproto = htons(ETH_P_8021Q);
 135	else
 136		ethproto = h->h_proto;
 137
 138	if (e->bitmask & EBT_802_3) {
 139		if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto)))
 140			return 1;
 141	} else if (!(e->bitmask & EBT_NOPROTO) &&
 142		   NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto))
 143		return 1;
 144
 145	if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in)))
 146		return 1;
 147	if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
 148		return 1;
 149	/* rcu_read_lock()ed by nf_hook_thresh */
 150	if (in && (p = br_port_get_rcu(in)) != NULL &&
 151	    NF_INVF(e, EBT_ILOGICALIN,
 152		    ebt_dev_check(e->logical_in, p->br->dev)))
 153		return 1;
 154	if (out && (p = br_port_get_rcu(out)) != NULL &&
 155	    NF_INVF(e, EBT_ILOGICALOUT,
 156		    ebt_dev_check(e->logical_out, p->br->dev)))
 157		return 1;
 158
 159	if (e->bitmask & EBT_SOURCEMAC) {
 160		if (NF_INVF(e, EBT_ISOURCE,
 161			    !ether_addr_equal_masked(h->h_source, e->sourcemac,
 162						     e->sourcemsk)))
 
 
 163			return 1;
 164	}
 165	if (e->bitmask & EBT_DESTMAC) {
 166		if (NF_INVF(e, EBT_IDEST,
 167			    !ether_addr_equal_masked(h->h_dest, e->destmac,
 168						     e->destmsk)))
 
 
 169			return 1;
 170	}
 171	return 0;
 172}
 173
 174static inline
 175struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 176{
 177	return (void *)entry + entry->next_offset;
 178}
 179
 180/* Do some firewalling */
 181unsigned int ebt_do_table(struct sk_buff *skb,
 182			  const struct nf_hook_state *state,
 183			  struct ebt_table *table)
 184{
 185	unsigned int hook = state->hook;
 186	int i, nentries;
 187	struct ebt_entry *point;
 188	struct ebt_counter *counter_base, *cb_base;
 189	const struct ebt_entry_target *t;
 190	int verdict, sp = 0;
 191	struct ebt_chainstack *cs;
 192	struct ebt_entries *chaininfo;
 193	const char *base;
 194	const struct ebt_table_info *private;
 195	struct xt_action_param acpar;
 196
 197	acpar.state   = state;
 
 
 198	acpar.hotdrop = false;
 
 199
 200	read_lock_bh(&table->lock);
 201	private = table->private;
 202	cb_base = COUNTER_BASE(private->counters, private->nentries,
 203	   smp_processor_id());
 204	if (private->chainstack)
 205		cs = private->chainstack[smp_processor_id()];
 206	else
 207		cs = NULL;
 208	chaininfo = private->hook_entry[hook];
 209	nentries = private->hook_entry[hook]->nentries;
 210	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
 211	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
 212	/* base for chain jumps */
 213	base = private->entries;
 214	i = 0;
 215	while (i < nentries) {
 216		if (ebt_basic_match(point, skb, state->in, state->out))
 217			goto letscontinue;
 218
 219		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
 220			goto letscontinue;
 221		if (acpar.hotdrop) {
 222			read_unlock_bh(&table->lock);
 223			return NF_DROP;
 224		}
 225
 226		ADD_COUNTER(*(counter_base + i), 1, skb->len);
 
 
 227
 228		/* these should only watch: not modify, nor tell us
 229		 * what to do with the packet
 230		 */
 231		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
 232
 233		t = (struct ebt_entry_target *)
 234		   (((char *)point) + point->target_offset);
 235		/* standard target */
 236		if (!t->u.target->target)
 237			verdict = ((struct ebt_standard_target *)t)->verdict;
 238		else {
 239			acpar.target   = t->u.target;
 240			acpar.targinfo = t->data;
 241			verdict = t->u.target->target(skb, &acpar);
 242		}
 243		if (verdict == EBT_ACCEPT) {
 244			read_unlock_bh(&table->lock);
 245			return NF_ACCEPT;
 246		}
 247		if (verdict == EBT_DROP) {
 248			read_unlock_bh(&table->lock);
 249			return NF_DROP;
 250		}
 251		if (verdict == EBT_RETURN) {
 252letsreturn:
 253			if (WARN(sp == 0, "RETURN on base chain")) {
 
 
 254				/* act like this is EBT_CONTINUE */
 255				goto letscontinue;
 256			}
 257
 258			sp--;
 259			/* put all the local variables right */
 260			i = cs[sp].n;
 261			chaininfo = cs[sp].chaininfo;
 262			nentries = chaininfo->nentries;
 263			point = cs[sp].e;
 264			counter_base = cb_base +
 265			   chaininfo->counter_offset;
 266			continue;
 267		}
 268		if (verdict == EBT_CONTINUE)
 269			goto letscontinue;
 270
 271		if (WARN(verdict < 0, "bogus standard verdict\n")) {
 
 272			read_unlock_bh(&table->lock);
 273			return NF_DROP;
 274		}
 275
 276		/* jump to a udc */
 277		cs[sp].n = i + 1;
 278		cs[sp].chaininfo = chaininfo;
 279		cs[sp].e = ebt_next_entry(point);
 280		i = 0;
 281		chaininfo = (struct ebt_entries *) (base + verdict);
 282
 283		if (WARN(chaininfo->distinguisher, "jump to non-chain\n")) {
 
 284			read_unlock_bh(&table->lock);
 285			return NF_DROP;
 286		}
 287
 288		nentries = chaininfo->nentries;
 289		point = (struct ebt_entry *)chaininfo->data;
 290		counter_base = cb_base + chaininfo->counter_offset;
 291		sp++;
 292		continue;
 293letscontinue:
 294		point = ebt_next_entry(point);
 295		i++;
 296	}
 297
 298	/* I actually like this :) */
 299	if (chaininfo->policy == EBT_RETURN)
 300		goto letsreturn;
 301	if (chaininfo->policy == EBT_ACCEPT) {
 302		read_unlock_bh(&table->lock);
 303		return NF_ACCEPT;
 304	}
 305	read_unlock_bh(&table->lock);
 306	return NF_DROP;
 307}
 308
 309/* If it succeeds, returns element and locks mutex */
 310static inline void *
 311find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
 312			struct mutex *mutex)
 313{
 314	struct {
 315		struct list_head list;
 316		char name[EBT_FUNCTION_MAXNAMELEN];
 317	} *e;
 318
 319	mutex_lock(mutex);
 
 
 
 320	list_for_each_entry(e, head, list) {
 321		if (strcmp(e->name, name) == 0)
 322			return e;
 323	}
 324	*error = -ENOENT;
 325	mutex_unlock(mutex);
 326	return NULL;
 327}
 328
 329static void *
 330find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
 331		 int *error, struct mutex *mutex)
 332{
 333	return try_then_request_module(
 334			find_inlist_lock_noload(head, name, error, mutex),
 335			"%s%s", prefix, name);
 336}
 337
 338static inline struct ebt_table *
 339find_table_lock(struct net *net, const char *name, int *error,
 340		struct mutex *mutex)
 341{
 342	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
 343				"ebtable_", error, mutex);
 344}
 345
 346static inline int
 347ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 348		unsigned int *cnt)
 349{
 350	const struct ebt_entry *e = par->entryinfo;
 351	struct xt_match *match;
 352	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 353	int ret;
 354
 355	if (left < sizeof(struct ebt_entry_match) ||
 356	    left - sizeof(struct ebt_entry_match) < m->match_size)
 357		return -EINVAL;
 358
 359	match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision);
 360	if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
 361		if (!IS_ERR(match))
 362			module_put(match->me);
 363		request_module("ebt_%s", m->u.name);
 364		match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision);
 365	}
 366	if (IS_ERR(match))
 367		return PTR_ERR(match);
 368	m->u.match = match;
 369
 370	par->match     = match;
 371	par->matchinfo = m->data;
 372	ret = xt_check_match(par, m->match_size,
 373	      e->ethproto, e->invflags & EBT_IPROTO);
 374	if (ret < 0) {
 375		module_put(match->me);
 376		return ret;
 377	}
 378
 379	(*cnt)++;
 380	return 0;
 381}
 382
 383static inline int
 384ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
 385		  unsigned int *cnt)
 386{
 387	const struct ebt_entry *e = par->entryinfo;
 388	struct xt_target *watcher;
 389	size_t left = ((char *)e + e->target_offset) - (char *)w;
 390	int ret;
 391
 392	if (left < sizeof(struct ebt_entry_watcher) ||
 393	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 394		return -EINVAL;
 395
 396	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 397	if (IS_ERR(watcher))
 398		return PTR_ERR(watcher);
 399	w->u.watcher = watcher;
 400
 401	par->target   = watcher;
 402	par->targinfo = w->data;
 403	ret = xt_check_target(par, w->watcher_size,
 404	      e->ethproto, e->invflags & EBT_IPROTO);
 405	if (ret < 0) {
 406		module_put(watcher->me);
 407		return ret;
 408	}
 409
 410	(*cnt)++;
 411	return 0;
 412}
 413
 414static int ebt_verify_pointers(const struct ebt_replace *repl,
 415			       struct ebt_table_info *newinfo)
 416{
 417	unsigned int limit = repl->entries_size;
 418	unsigned int valid_hooks = repl->valid_hooks;
 419	unsigned int offset = 0;
 420	int i;
 421
 422	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 423		newinfo->hook_entry[i] = NULL;
 424
 425	newinfo->entries_size = repl->entries_size;
 426	newinfo->nentries = repl->nentries;
 427
 428	while (offset < limit) {
 429		size_t left = limit - offset;
 430		struct ebt_entry *e = (void *)newinfo->entries + offset;
 431
 432		if (left < sizeof(unsigned int))
 433			break;
 434
 435		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 436			if ((valid_hooks & (1 << i)) == 0)
 437				continue;
 438			if ((char __user *)repl->hook_entry[i] ==
 439			     repl->entries + offset)
 440				break;
 441		}
 442
 443		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
 444			if (e->bitmask != 0) {
 445				/* we make userspace set this right,
 446				 * so there is no misunderstanding
 447				 */
 448				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
 449					 "in distinguisher\n");
 450				return -EINVAL;
 451			}
 452			if (i != NF_BR_NUMHOOKS)
 453				newinfo->hook_entry[i] = (struct ebt_entries *)e;
 454			if (left < sizeof(struct ebt_entries))
 455				break;
 456			offset += sizeof(struct ebt_entries);
 457		} else {
 458			if (left < sizeof(struct ebt_entry))
 459				break;
 460			if (left < e->next_offset)
 461				break;
 462			if (e->next_offset < sizeof(struct ebt_entry))
 463				return -EINVAL;
 464			offset += e->next_offset;
 465		}
 466	}
 467	if (offset != limit) {
 468		BUGPRINT("entries_size too small\n");
 469		return -EINVAL;
 470	}
 471
 472	/* check if all valid hooks have a chain */
 473	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 474		if (!newinfo->hook_entry[i] &&
 475		   (valid_hooks & (1 << i))) {
 476			BUGPRINT("Valid hook without chain\n");
 477			return -EINVAL;
 478		}
 479	}
 480	return 0;
 481}
 482
 483/* this one is very careful, as it is the first function
 
 484 * to parse the userspace data
 485 */
 486static inline int
 487ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
 488			       const struct ebt_table_info *newinfo,
 489			       unsigned int *n, unsigned int *cnt,
 490			       unsigned int *totalcnt, unsigned int *udc_cnt)
 491{
 492	int i;
 493
 494	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 495		if ((void *)e == (void *)newinfo->hook_entry[i])
 496			break;
 497	}
 498	/* beginning of a new chain
 499	 * if i == NF_BR_NUMHOOKS it must be a user defined chain
 500	 */
 501	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 502		/* this checks if the previous chain has as many entries
 503		 * as it said it has
 504		 */
 505		if (*n != *cnt) {
 506			BUGPRINT("nentries does not equal the nr of entries "
 507				 "in the chain\n");
 508			return -EINVAL;
 509		}
 510		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 511		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 512			/* only RETURN from udc */
 513			if (i != NF_BR_NUMHOOKS ||
 514			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
 515				BUGPRINT("bad policy\n");
 516				return -EINVAL;
 517			}
 518		}
 519		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 520			(*udc_cnt)++;
 521		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
 522			BUGPRINT("counter_offset != totalcnt");
 523			return -EINVAL;
 524		}
 525		*n = ((struct ebt_entries *)e)->nentries;
 526		*cnt = 0;
 527		return 0;
 528	}
 529	/* a plain old entry, heh */
 530	if (sizeof(struct ebt_entry) > e->watchers_offset ||
 531	   e->watchers_offset > e->target_offset ||
 532	   e->target_offset >= e->next_offset) {
 533		BUGPRINT("entry offsets not in right order\n");
 534		return -EINVAL;
 535	}
 536	/* this is not checked anywhere else */
 537	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
 538		BUGPRINT("target size too small\n");
 539		return -EINVAL;
 540	}
 541	(*cnt)++;
 542	(*totalcnt)++;
 543	return 0;
 544}
 545
 546struct ebt_cl_stack {
 
 547	struct ebt_chainstack cs;
 548	int from;
 549	unsigned int hookmask;
 550};
 551
 552/* We need these positions to check that the jumps to a different part of the
 
 553 * entries is a jump to the beginning of a new chain.
 554 */
 555static inline int
 556ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
 557		      unsigned int *n, struct ebt_cl_stack *udc)
 558{
 559	int i;
 560
 561	/* we're only interested in chain starts */
 562	if (e->bitmask)
 563		return 0;
 564	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 565		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 566			break;
 567	}
 568	/* only care about udc */
 569	if (i != NF_BR_NUMHOOKS)
 570		return 0;
 571
 572	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
 573	/* these initialisations are depended on later in check_chainloops() */
 574	udc[*n].cs.n = 0;
 575	udc[*n].hookmask = 0;
 576
 577	(*n)++;
 578	return 0;
 579}
 580
 581static inline int
 582ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
 583{
 584	struct xt_mtdtor_param par;
 585
 586	if (i && (*i)-- == 0)
 587		return 1;
 588
 589	par.net       = net;
 590	par.match     = m->u.match;
 591	par.matchinfo = m->data;
 592	par.family    = NFPROTO_BRIDGE;
 593	if (par.match->destroy != NULL)
 594		par.match->destroy(&par);
 595	module_put(par.match->me);
 596	return 0;
 597}
 598
 599static inline int
 600ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
 601{
 602	struct xt_tgdtor_param par;
 603
 604	if (i && (*i)-- == 0)
 605		return 1;
 606
 607	par.net      = net;
 608	par.target   = w->u.watcher;
 609	par.targinfo = w->data;
 610	par.family   = NFPROTO_BRIDGE;
 611	if (par.target->destroy != NULL)
 612		par.target->destroy(&par);
 613	module_put(par.target->me);
 614	return 0;
 615}
 616
 617static inline int
 618ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
 619{
 620	struct xt_tgdtor_param par;
 621	struct ebt_entry_target *t;
 622
 623	if (e->bitmask == 0)
 624		return 0;
 625	/* we're done */
 626	if (cnt && (*cnt)-- == 0)
 627		return 1;
 628	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
 629	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
 630	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 631
 632	par.net      = net;
 633	par.target   = t->u.target;
 634	par.targinfo = t->data;
 635	par.family   = NFPROTO_BRIDGE;
 636	if (par.target->destroy != NULL)
 637		par.target->destroy(&par);
 638	module_put(par.target->me);
 639	return 0;
 640}
 641
 642static inline int
 643ebt_check_entry(struct ebt_entry *e, struct net *net,
 644		const struct ebt_table_info *newinfo,
 645		const char *name, unsigned int *cnt,
 646		struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 647{
 648	struct ebt_entry_target *t;
 649	struct xt_target *target;
 650	unsigned int i, j, hook = 0, hookmask = 0;
 651	size_t gap;
 652	int ret;
 653	struct xt_mtchk_param mtpar;
 654	struct xt_tgchk_param tgpar;
 655
 656	/* don't mess with the struct ebt_entries */
 657	if (e->bitmask == 0)
 658		return 0;
 659
 660	if (e->bitmask & ~EBT_F_MASK) {
 661		BUGPRINT("Unknown flag for bitmask\n");
 662		return -EINVAL;
 663	}
 664	if (e->invflags & ~EBT_INV_MASK) {
 665		BUGPRINT("Unknown flag for inv bitmask\n");
 666		return -EINVAL;
 667	}
 668	if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) {
 669		BUGPRINT("NOPROTO & 802_3 not allowed\n");
 670		return -EINVAL;
 671	}
 672	/* what hook do we belong to? */
 673	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 674		if (!newinfo->hook_entry[i])
 675			continue;
 676		if ((char *)newinfo->hook_entry[i] < (char *)e)
 677			hook = i;
 678		else
 679			break;
 680	}
 681	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
 682	 * a base chain
 683	 */
 684	if (i < NF_BR_NUMHOOKS)
 685		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 686	else {
 687		for (i = 0; i < udc_cnt; i++)
 688			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
 689				break;
 690		if (i == 0)
 691			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 692		else
 693			hookmask = cl_s[i - 1].hookmask;
 694	}
 695	i = 0;
 696
 697	mtpar.net	= tgpar.net       = net;
 698	mtpar.table     = tgpar.table     = name;
 699	mtpar.entryinfo = tgpar.entryinfo = e;
 700	mtpar.hook_mask = tgpar.hook_mask = hookmask;
 701	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
 702	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
 703	if (ret != 0)
 704		goto cleanup_matches;
 705	j = 0;
 706	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
 707	if (ret != 0)
 708		goto cleanup_watchers;
 709	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 710	gap = e->next_offset - e->target_offset;
 711
 712	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
 713	if (IS_ERR(target)) {
 714		ret = PTR_ERR(target);
 715		goto cleanup_watchers;
 716	}
 717
 718	t->u.target = target;
 719	if (t->u.target == &ebt_standard_target) {
 720		if (gap < sizeof(struct ebt_standard_target)) {
 721			BUGPRINT("Standard target size too big\n");
 722			ret = -EFAULT;
 723			goto cleanup_watchers;
 724		}
 725		if (((struct ebt_standard_target *)t)->verdict <
 726		   -NUM_STANDARD_TARGETS) {
 727			BUGPRINT("Invalid standard target\n");
 728			ret = -EFAULT;
 729			goto cleanup_watchers;
 730		}
 731	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
 732		module_put(t->u.target->me);
 733		ret = -EFAULT;
 734		goto cleanup_watchers;
 735	}
 736
 737	tgpar.target   = target;
 738	tgpar.targinfo = t->data;
 739	ret = xt_check_target(&tgpar, t->target_size,
 740	      e->ethproto, e->invflags & EBT_IPROTO);
 741	if (ret < 0) {
 742		module_put(target->me);
 743		goto cleanup_watchers;
 744	}
 745	(*cnt)++;
 746	return 0;
 747cleanup_watchers:
 748	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
 749cleanup_matches:
 750	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
 751	return ret;
 752}
 753
 754/* checks for loops and sets the hook mask for udc
 
 755 * the hook mask for udc tells us from which base chains the udc can be
 756 * accessed. This mask is a parameter to the check() functions of the extensions
 757 */
 758static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
 759			    unsigned int udc_cnt, unsigned int hooknr, char *base)
 760{
 761	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
 762	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
 763	const struct ebt_entry_target *t;
 764
 765	while (pos < nentries || chain_nr != -1) {
 766		/* end of udc, go back one 'recursion' step */
 767		if (pos == nentries) {
 768			/* put back values of the time when this chain was called */
 769			e = cl_s[chain_nr].cs.e;
 770			if (cl_s[chain_nr].from != -1)
 771				nentries =
 772				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
 773			else
 774				nentries = chain->nentries;
 775			pos = cl_s[chain_nr].cs.n;
 776			/* make sure we won't see a loop that isn't one */
 777			cl_s[chain_nr].cs.n = 0;
 778			chain_nr = cl_s[chain_nr].from;
 779			if (pos == nentries)
 780				continue;
 781		}
 782		t = (struct ebt_entry_target *)
 783		   (((char *)e) + e->target_offset);
 784		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
 785			goto letscontinue;
 786		if (e->target_offset + sizeof(struct ebt_standard_target) >
 787		   e->next_offset) {
 788			BUGPRINT("Standard target size too big\n");
 789			return -1;
 790		}
 791		verdict = ((struct ebt_standard_target *)t)->verdict;
 792		if (verdict >= 0) { /* jump to another chain */
 793			struct ebt_entries *hlp2 =
 794			   (struct ebt_entries *)(base + verdict);
 795			for (i = 0; i < udc_cnt; i++)
 796				if (hlp2 == cl_s[i].cs.chaininfo)
 797					break;
 798			/* bad destination or loop */
 799			if (i == udc_cnt) {
 800				BUGPRINT("bad destination\n");
 801				return -1;
 802			}
 803			if (cl_s[i].cs.n) {
 804				BUGPRINT("loop\n");
 805				return -1;
 806			}
 807			if (cl_s[i].hookmask & (1 << hooknr))
 808				goto letscontinue;
 809			/* this can't be 0, so the loop test is correct */
 810			cl_s[i].cs.n = pos + 1;
 811			pos = 0;
 812			cl_s[i].cs.e = ebt_next_entry(e);
 813			e = (struct ebt_entry *)(hlp2->data);
 814			nentries = hlp2->nentries;
 815			cl_s[i].from = chain_nr;
 816			chain_nr = i;
 817			/* this udc is accessible from the base chain for hooknr */
 818			cl_s[i].hookmask |= (1 << hooknr);
 819			continue;
 820		}
 821letscontinue:
 822		e = ebt_next_entry(e);
 823		pos++;
 824	}
 825	return 0;
 826}
 827
 828/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
 829static int translate_table(struct net *net, const char *name,
 830			   struct ebt_table_info *newinfo)
 831{
 832	unsigned int i, j, k, udc_cnt;
 833	int ret;
 834	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 835
 836	i = 0;
 837	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 838		i++;
 839	if (i == NF_BR_NUMHOOKS) {
 840		BUGPRINT("No valid hooks specified\n");
 841		return -EINVAL;
 842	}
 843	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
 844		BUGPRINT("Chains don't start at beginning\n");
 845		return -EINVAL;
 846	}
 847	/* make sure chains are ordered after each other in same order
 848	 * as their corresponding hooks
 849	 */
 850	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
 851		if (!newinfo->hook_entry[j])
 852			continue;
 853		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
 854			BUGPRINT("Hook order must be followed\n");
 855			return -EINVAL;
 856		}
 857		i = j;
 858	}
 859
 860	/* do some early checkings and initialize some things */
 861	i = 0; /* holds the expected nr. of entries for the chain */
 862	j = 0; /* holds the up to now counted entries for the chain */
 863	k = 0; /* holds the total nr. of entries, should equal
 864		* newinfo->nentries afterwards
 865		*/
 866	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 867	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 868	   ebt_check_entry_size_and_hooks, newinfo,
 869	   &i, &j, &k, &udc_cnt);
 870
 871	if (ret != 0)
 872		return ret;
 873
 874	if (i != j) {
 875		BUGPRINT("nentries does not equal the nr of entries in the "
 876			 "(last) chain\n");
 877		return -EINVAL;
 878	}
 879	if (k != newinfo->nentries) {
 880		BUGPRINT("Total nentries is wrong\n");
 881		return -EINVAL;
 882	}
 883
 884	/* get the location of the udc, put them in an array
 885	 * while we're at it, allocate the chainstack
 886	 */
 887	if (udc_cnt) {
 888		/* this will get free'd in do_replace()/ebt_register_table()
 889		 * if an error occurs
 890		 */
 891		newinfo->chainstack =
 892			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
 893		if (!newinfo->chainstack)
 894			return -ENOMEM;
 895		for_each_possible_cpu(i) {
 896			newinfo->chainstack[i] =
 897			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
 898			if (!newinfo->chainstack[i]) {
 899				while (i)
 900					vfree(newinfo->chainstack[--i]);
 901				vfree(newinfo->chainstack);
 902				newinfo->chainstack = NULL;
 903				return -ENOMEM;
 904			}
 905		}
 906
 907		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
 908		if (!cl_s)
 909			return -ENOMEM;
 910		i = 0; /* the i'th udc */
 911		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 912		   ebt_get_udc_positions, newinfo, &i, cl_s);
 913		/* sanity check */
 914		if (i != udc_cnt) {
 915			BUGPRINT("i != udc_cnt\n");
 916			vfree(cl_s);
 917			return -EFAULT;
 918		}
 919	}
 920
 921	/* Check for loops */
 922	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 923		if (newinfo->hook_entry[i])
 924			if (check_chainloops(newinfo->hook_entry[i],
 925			   cl_s, udc_cnt, i, newinfo->entries)) {
 926				vfree(cl_s);
 927				return -EINVAL;
 928			}
 929
 930	/* we now know the following (along with E=mc²):
 931	 *  - the nr of entries in each chain is right
 932	 *  - the size of the allocated space is right
 933	 *  - all valid hooks have a corresponding chain
 934	 *  - there are no loops
 935	 *  - wrong data can still be on the level of a single entry
 936	 *  - could be there are jumps to places that are not the
 937	 *    beginning of a chain. This can only occur in chains that
 938	 *    are not accessible from any base chains, so we don't care.
 939	 */
 940
 941	/* used to know what we need to clean up if something goes wrong */
 942	i = 0;
 943	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 944	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
 945	if (ret != 0) {
 946		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 947				  ebt_cleanup_entry, net, &i);
 948	}
 949	vfree(cl_s);
 950	return ret;
 951}
 952
 953/* called under write_lock */
 954static void get_counters(const struct ebt_counter *oldcounters,
 955			 struct ebt_counter *counters, unsigned int nentries)
 956{
 957	int i, cpu;
 958	struct ebt_counter *counter_base;
 959
 960	/* counters of cpu 0 */
 961	memcpy(counters, oldcounters,
 962	       sizeof(struct ebt_counter) * nentries);
 963
 964	/* add other counters to those of cpu 0 */
 965	for_each_possible_cpu(cpu) {
 966		if (cpu == 0)
 967			continue;
 968		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 969		for (i = 0; i < nentries; i++)
 970			ADD_COUNTER(counters[i], counter_base[i].pcnt,
 971				    counter_base[i].bcnt);
 
 972	}
 973}
 974
 975static int do_replace_finish(struct net *net, struct ebt_replace *repl,
 976			      struct ebt_table_info *newinfo)
 977{
 978	int ret, i;
 979	struct ebt_counter *counterstmp = NULL;
 980	/* used to be able to unlock earlier */
 981	struct ebt_table_info *table;
 982	struct ebt_table *t;
 983
 984	/* the user wants counters back
 985	 * the check on the size is done later, when we have the lock
 986	 */
 987	if (repl->num_counters) {
 988		unsigned long size = repl->num_counters * sizeof(*counterstmp);
 989		counterstmp = vmalloc(size);
 990		if (!counterstmp)
 991			return -ENOMEM;
 992	}
 993
 994	newinfo->chainstack = NULL;
 995	ret = ebt_verify_pointers(repl, newinfo);
 996	if (ret != 0)
 997		goto free_counterstmp;
 998
 999	ret = translate_table(net, repl->name, newinfo);
1000
1001	if (ret != 0)
1002		goto free_counterstmp;
1003
1004	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1005	if (!t) {
1006		ret = -ENOENT;
1007		goto free_iterate;
1008	}
1009
1010	/* the table doesn't like it */
1011	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1012		goto free_unlock;
1013
1014	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1015		BUGPRINT("Wrong nr. of counters requested\n");
1016		ret = -EINVAL;
1017		goto free_unlock;
1018	}
1019
1020	/* we have the mutex lock, so no danger in reading this pointer */
1021	table = t->private;
1022	/* make sure the table can only be rmmod'ed if it contains no rules */
1023	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1024		ret = -ENOENT;
1025		goto free_unlock;
1026	} else if (table->nentries && !newinfo->nentries)
1027		module_put(t->me);
1028	/* we need an atomic snapshot of the counters */
1029	write_lock_bh(&t->lock);
1030	if (repl->num_counters)
1031		get_counters(t->private->counters, counterstmp,
1032		   t->private->nentries);
1033
1034	t->private = newinfo;
1035	write_unlock_bh(&t->lock);
1036	mutex_unlock(&ebt_mutex);
1037	/* so, a user can change the chains while having messed up her counter
1038	 * allocation. Only reason why this is done is because this way the lock
1039	 * is held only once, while this doesn't bring the kernel into a
1040	 * dangerous state.
1041	 */
1042	if (repl->num_counters &&
1043	   copy_to_user(repl->counters, counterstmp,
1044	   repl->num_counters * sizeof(struct ebt_counter))) {
1045		/* Silent error, can't fail, new table is already in place */
1046		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1047	}
 
 
1048
1049	/* decrease module count and free resources */
1050	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1051			  ebt_cleanup_entry, net, NULL);
1052
1053	vfree(table->entries);
1054	if (table->chainstack) {
1055		for_each_possible_cpu(i)
1056			vfree(table->chainstack[i]);
1057		vfree(table->chainstack);
1058	}
1059	vfree(table);
1060
1061	vfree(counterstmp);
1062
1063#ifdef CONFIG_AUDIT
1064	if (audit_enabled) {
1065		audit_log(current->audit_context, GFP_KERNEL,
1066			  AUDIT_NETFILTER_CFG,
1067			  "table=%s family=%u entries=%u",
1068			  repl->name, AF_BRIDGE, repl->nentries);
1069	}
1070#endif
1071	return ret;
1072
1073free_unlock:
1074	mutex_unlock(&ebt_mutex);
1075free_iterate:
1076	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1077			  ebt_cleanup_entry, net, NULL);
1078free_counterstmp:
1079	vfree(counterstmp);
1080	/* can be initialized in translate_table() */
1081	if (newinfo->chainstack) {
1082		for_each_possible_cpu(i)
1083			vfree(newinfo->chainstack[i]);
1084		vfree(newinfo->chainstack);
1085	}
1086	return ret;
1087}
1088
1089/* replace the table */
1090static int do_replace(struct net *net, const void __user *user,
1091		      unsigned int len)
1092{
1093	int ret, countersize;
1094	struct ebt_table_info *newinfo;
1095	struct ebt_replace tmp;
1096
1097	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1098		return -EFAULT;
1099
1100	if (len != sizeof(tmp) + tmp.entries_size) {
1101		BUGPRINT("Wrong len argument\n");
1102		return -EINVAL;
1103	}
1104
1105	if (tmp.entries_size == 0) {
1106		BUGPRINT("Entries_size never zero\n");
1107		return -EINVAL;
1108	}
1109	/* overflow check */
1110	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1111			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1112		return -ENOMEM;
1113	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1114		return -ENOMEM;
1115
1116	tmp.name[sizeof(tmp.name) - 1] = 0;
1117
1118	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1119	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1120	if (!newinfo)
1121		return -ENOMEM;
1122
1123	if (countersize)
1124		memset(newinfo->counters, 0, countersize);
1125
1126	newinfo->entries = vmalloc(tmp.entries_size);
1127	if (!newinfo->entries) {
1128		ret = -ENOMEM;
1129		goto free_newinfo;
1130	}
1131	if (copy_from_user(
1132	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1133		BUGPRINT("Couldn't copy entries from userspace\n");
1134		ret = -EFAULT;
1135		goto free_entries;
1136	}
1137
1138	ret = do_replace_finish(net, &tmp, newinfo);
1139	if (ret == 0)
1140		return ret;
1141free_entries:
1142	vfree(newinfo->entries);
1143free_newinfo:
1144	vfree(newinfo);
1145	return ret;
1146}
1147
1148static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
1149{
1150	int i;
1151
1152	mutex_lock(&ebt_mutex);
1153	list_del(&table->list);
1154	mutex_unlock(&ebt_mutex);
1155	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1156			  ebt_cleanup_entry, net, NULL);
1157	if (table->private->nentries)
1158		module_put(table->me);
1159	vfree(table->private->entries);
1160	if (table->private->chainstack) {
1161		for_each_possible_cpu(i)
1162			vfree(table->private->chainstack[i]);
1163		vfree(table->private->chainstack);
1164	}
1165	vfree(table->private);
1166	kfree(table);
1167}
1168
1169int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1170		       const struct nf_hook_ops *ops, struct ebt_table **res)
1171{
1172	struct ebt_table_info *newinfo;
1173	struct ebt_table *t, *table;
1174	struct ebt_replace_kernel *repl;
1175	int ret, i, countersize;
1176	void *p;
1177
1178	if (input_table == NULL || (repl = input_table->table) == NULL ||
1179	    repl->entries == NULL || repl->entries_size == 0 ||
1180	    repl->counters != NULL || input_table->private != NULL) {
1181		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1182		return -EINVAL;
1183	}
1184
1185	/* Don't add one table to multiple lists. */
1186	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1187	if (!table) {
1188		ret = -ENOMEM;
1189		goto out;
1190	}
1191
1192	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1193	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1194	ret = -ENOMEM;
1195	if (!newinfo)
1196		goto free_table;
1197
1198	p = vmalloc(repl->entries_size);
1199	if (!p)
1200		goto free_newinfo;
1201
1202	memcpy(p, repl->entries, repl->entries_size);
1203	newinfo->entries = p;
1204
1205	newinfo->entries_size = repl->entries_size;
1206	newinfo->nentries = repl->nentries;
1207
1208	if (countersize)
1209		memset(newinfo->counters, 0, countersize);
1210
1211	/* fill in newinfo and parse the entries */
1212	newinfo->chainstack = NULL;
1213	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1214		if ((repl->valid_hooks & (1 << i)) == 0)
1215			newinfo->hook_entry[i] = NULL;
1216		else
1217			newinfo->hook_entry[i] = p +
1218				((char *)repl->hook_entry[i] - repl->entries);
1219	}
1220	ret = translate_table(net, repl->name, newinfo);
1221	if (ret != 0) {
1222		BUGPRINT("Translate_table failed\n");
1223		goto free_chainstack;
1224	}
1225
1226	if (table->check && table->check(newinfo, table->valid_hooks)) {
1227		BUGPRINT("The table doesn't like its own initial data, lol\n");
1228		ret = -EINVAL;
1229		goto free_chainstack;
1230	}
1231
1232	table->private = newinfo;
1233	rwlock_init(&table->lock);
1234	mutex_lock(&ebt_mutex);
 
 
 
1235	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1236		if (strcmp(t->name, table->name) == 0) {
1237			ret = -EEXIST;
1238			BUGPRINT("Table name already exists\n");
1239			goto free_unlock;
1240		}
1241	}
1242
1243	/* Hold a reference count if the chains aren't empty */
1244	if (newinfo->nentries && !try_module_get(table->me)) {
1245		ret = -ENOENT;
1246		goto free_unlock;
1247	}
1248	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1249	mutex_unlock(&ebt_mutex);
1250
1251	WRITE_ONCE(*res, table);
1252
1253	if (!ops)
1254		return 0;
1255
1256	ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1257	if (ret) {
1258		__ebt_unregister_table(net, table);
1259		*res = NULL;
1260	}
1261
1262	return ret;
1263free_unlock:
1264	mutex_unlock(&ebt_mutex);
1265free_chainstack:
1266	if (newinfo->chainstack) {
1267		for_each_possible_cpu(i)
1268			vfree(newinfo->chainstack[i]);
1269		vfree(newinfo->chainstack);
1270	}
1271	vfree(newinfo->entries);
1272free_newinfo:
1273	vfree(newinfo);
1274free_table:
1275	kfree(table);
1276out:
1277	return ret;
1278}
1279
1280void ebt_unregister_table(struct net *net, struct ebt_table *table,
1281			  const struct nf_hook_ops *ops)
1282{
1283	if (ops)
1284		nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1285	__ebt_unregister_table(net, table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286}
1287
1288/* userspace just supplied us with counters */
1289static int do_update_counters(struct net *net, const char *name,
1290				struct ebt_counter __user *counters,
1291				unsigned int num_counters,
1292				const void __user *user, unsigned int len)
1293{
1294	int i, ret;
1295	struct ebt_counter *tmp;
1296	struct ebt_table *t;
1297
1298	if (num_counters == 0)
1299		return -EINVAL;
1300
1301	tmp = vmalloc(num_counters * sizeof(*tmp));
1302	if (!tmp)
1303		return -ENOMEM;
1304
1305	t = find_table_lock(net, name, &ret, &ebt_mutex);
1306	if (!t)
1307		goto free_tmp;
1308
1309	if (num_counters != t->private->nentries) {
1310		BUGPRINT("Wrong nr of counters\n");
1311		ret = -EINVAL;
1312		goto unlock_mutex;
1313	}
1314
1315	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1316		ret = -EFAULT;
1317		goto unlock_mutex;
1318	}
1319
1320	/* we want an atomic add of the counters */
1321	write_lock_bh(&t->lock);
1322
1323	/* we add to the counters of the first cpu */
1324	for (i = 0; i < num_counters; i++)
1325		ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt);
 
 
1326
1327	write_unlock_bh(&t->lock);
1328	ret = 0;
1329unlock_mutex:
1330	mutex_unlock(&ebt_mutex);
1331free_tmp:
1332	vfree(tmp);
1333	return ret;
1334}
1335
1336static int update_counters(struct net *net, const void __user *user,
1337			    unsigned int len)
1338{
1339	struct ebt_replace hlp;
1340
1341	if (copy_from_user(&hlp, user, sizeof(hlp)))
1342		return -EFAULT;
1343
1344	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1345		return -EINVAL;
1346
1347	return do_update_counters(net, hlp.name, hlp.counters,
1348				hlp.num_counters, user, len);
1349}
1350
1351static inline int ebt_obj_to_user(char __user *um, const char *_name,
1352				  const char *data, int entrysize,
1353				  int usersize, int datasize, u8 revision)
1354{
1355	char name[EBT_EXTENSION_MAXNAMELEN] = {0};
1356
1357	/* ebtables expects 31 bytes long names but xt_match names are 29 bytes
1358	 * long. Copy 29 bytes and fill remaining bytes with zeroes.
1359	 */
1360	strlcpy(name, _name, sizeof(name));
1361	if (copy_to_user(um, name, EBT_EXTENSION_MAXNAMELEN) ||
1362	    put_user(revision, (u8 __user *)(um + EBT_EXTENSION_MAXNAMELEN)) ||
1363	    put_user(datasize, (int __user *)(um + EBT_EXTENSION_MAXNAMELEN + 1)) ||
1364	    xt_data_to_user(um + entrysize, data, usersize, datasize,
1365			    XT_ALIGN(datasize)))
1366		return -EFAULT;
1367
1368	return 0;
1369}
1370
1371static inline int ebt_match_to_user(const struct ebt_entry_match *m,
1372				    const char *base, char __user *ubase)
1373{
1374	return ebt_obj_to_user(ubase + ((char *)m - base),
1375			       m->u.match->name, m->data, sizeof(*m),
1376			       m->u.match->usersize, m->match_size,
1377			       m->u.match->revision);
1378}
1379
1380static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w,
1381				      const char *base, char __user *ubase)
1382{
1383	return ebt_obj_to_user(ubase + ((char *)w - base),
1384			       w->u.watcher->name, w->data, sizeof(*w),
1385			       w->u.watcher->usersize, w->watcher_size,
1386			       w->u.watcher->revision);
1387}
1388
1389static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base,
1390				    char __user *ubase)
1391{
1392	int ret;
1393	char __user *hlp;
1394	const struct ebt_entry_target *t;
1395
1396	if (e->bitmask == 0) {
1397		/* special case !EBT_ENTRY_OR_ENTRIES */
1398		if (copy_to_user(ubase + ((char *)e - base), e,
1399				 sizeof(struct ebt_entries)))
1400			return -EFAULT;
1401		return 0;
1402	}
1403
1404	if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e)))
1405		return -EFAULT;
1406
1407	hlp = ubase + (((char *)e + e->target_offset) - base);
1408	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1409
1410	ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase);
1411	if (ret != 0)
1412		return ret;
1413	ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase);
1414	if (ret != 0)
1415		return ret;
1416	ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t),
1417			      t->u.target->usersize, t->target_size,
1418			      t->u.target->revision);
1419	if (ret != 0)
1420		return ret;
1421
1422	return 0;
1423}
1424
1425static int copy_counters_to_user(struct ebt_table *t,
1426				 const struct ebt_counter *oldcounters,
1427				 void __user *user, unsigned int num_counters,
1428				 unsigned int nentries)
1429{
1430	struct ebt_counter *counterstmp;
1431	int ret = 0;
1432
1433	/* userspace might not need the counters */
1434	if (num_counters == 0)
1435		return 0;
1436
1437	if (num_counters != nentries) {
1438		BUGPRINT("Num_counters wrong\n");
1439		return -EINVAL;
1440	}
1441
1442	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1443	if (!counterstmp)
1444		return -ENOMEM;
1445
1446	write_lock_bh(&t->lock);
1447	get_counters(oldcounters, counterstmp, nentries);
1448	write_unlock_bh(&t->lock);
1449
1450	if (copy_to_user(user, counterstmp,
1451	   nentries * sizeof(struct ebt_counter)))
1452		ret = -EFAULT;
1453	vfree(counterstmp);
1454	return ret;
1455}
1456
1457/* called with ebt_mutex locked */
1458static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1459				   const int *len, int cmd)
1460{
1461	struct ebt_replace tmp;
1462	const struct ebt_counter *oldcounters;
1463	unsigned int entries_size, nentries;
1464	int ret;
1465	char *entries;
1466
1467	if (cmd == EBT_SO_GET_ENTRIES) {
1468		entries_size = t->private->entries_size;
1469		nentries = t->private->nentries;
1470		entries = t->private->entries;
1471		oldcounters = t->private->counters;
1472	} else {
1473		entries_size = t->table->entries_size;
1474		nentries = t->table->nentries;
1475		entries = t->table->entries;
1476		oldcounters = t->table->counters;
1477	}
1478
1479	if (copy_from_user(&tmp, user, sizeof(tmp)))
1480		return -EFAULT;
1481
1482	if (*len != sizeof(struct ebt_replace) + entries_size +
1483	   (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1484		return -EINVAL;
1485
1486	if (tmp.nentries != nentries) {
1487		BUGPRINT("Nentries wrong\n");
1488		return -EINVAL;
1489	}
1490
1491	if (tmp.entries_size != entries_size) {
1492		BUGPRINT("Wrong size\n");
1493		return -EINVAL;
1494	}
1495
1496	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1497					tmp.num_counters, nentries);
1498	if (ret)
1499		return ret;
1500
 
 
 
 
1501	/* set the match/watcher/target names right */
1502	return EBT_ENTRY_ITERATE(entries, entries_size,
1503	   ebt_entry_to_user, entries, tmp.entries);
1504}
1505
1506static int do_ebt_set_ctl(struct sock *sk,
1507	int cmd, void __user *user, unsigned int len)
1508{
1509	int ret;
1510	struct net *net = sock_net(sk);
1511
1512	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1513		return -EPERM;
1514
1515	switch (cmd) {
1516	case EBT_SO_SET_ENTRIES:
1517		ret = do_replace(net, user, len);
1518		break;
1519	case EBT_SO_SET_COUNTERS:
1520		ret = update_counters(net, user, len);
1521		break;
1522	default:
1523		ret = -EINVAL;
1524	}
1525	return ret;
1526}
1527
1528static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1529{
1530	int ret;
1531	struct ebt_replace tmp;
1532	struct ebt_table *t;
1533	struct net *net = sock_net(sk);
1534
1535	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1536		return -EPERM;
1537
1538	if (copy_from_user(&tmp, user, sizeof(tmp)))
1539		return -EFAULT;
1540
1541	tmp.name[sizeof(tmp.name) - 1] = '\0';
1542
1543	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1544	if (!t)
1545		return ret;
1546
1547	switch (cmd) {
1548	case EBT_SO_GET_INFO:
1549	case EBT_SO_GET_INIT_INFO:
1550		if (*len != sizeof(struct ebt_replace)) {
1551			ret = -EINVAL;
1552			mutex_unlock(&ebt_mutex);
1553			break;
1554		}
1555		if (cmd == EBT_SO_GET_INFO) {
1556			tmp.nentries = t->private->nentries;
1557			tmp.entries_size = t->private->entries_size;
1558			tmp.valid_hooks = t->valid_hooks;
1559		} else {
1560			tmp.nentries = t->table->nentries;
1561			tmp.entries_size = t->table->entries_size;
1562			tmp.valid_hooks = t->table->valid_hooks;
1563		}
1564		mutex_unlock(&ebt_mutex);
1565		if (copy_to_user(user, &tmp, *len) != 0) {
1566			BUGPRINT("c2u Didn't work\n");
1567			ret = -EFAULT;
1568			break;
1569		}
1570		ret = 0;
1571		break;
1572
1573	case EBT_SO_GET_ENTRIES:
1574	case EBT_SO_GET_INIT_ENTRIES:
1575		ret = copy_everything_to_user(t, user, len, cmd);
1576		mutex_unlock(&ebt_mutex);
1577		break;
1578
1579	default:
1580		mutex_unlock(&ebt_mutex);
1581		ret = -EINVAL;
1582	}
1583
1584	return ret;
1585}
1586
1587#ifdef CONFIG_COMPAT
1588/* 32 bit-userspace compatibility definitions. */
1589struct compat_ebt_replace {
1590	char name[EBT_TABLE_MAXNAMELEN];
1591	compat_uint_t valid_hooks;
1592	compat_uint_t nentries;
1593	compat_uint_t entries_size;
1594	/* start of the chains */
1595	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1596	/* nr of counters userspace expects back */
1597	compat_uint_t num_counters;
1598	/* where the kernel will put the old counters. */
1599	compat_uptr_t counters;
1600	compat_uptr_t entries;
1601};
1602
1603/* struct ebt_entry_match, _target and _watcher have same layout */
1604struct compat_ebt_entry_mwt {
1605	union {
1606		struct {
1607			char name[EBT_EXTENSION_MAXNAMELEN];
1608			u8 revision;
1609		};
1610		compat_uptr_t ptr;
1611	} u;
1612	compat_uint_t match_size;
1613	compat_uint_t data[0];
1614};
1615
1616/* account for possible padding between match_size and ->data */
1617static int ebt_compat_entry_padsize(void)
1618{
1619	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1620			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1621	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1622			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1623}
1624
1625static int ebt_compat_match_offset(const struct xt_match *match,
1626				   unsigned int userlen)
1627{
1628	/* ebt_among needs special handling. The kernel .matchsize is
 
1629	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1630	 * value is expected.
1631	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1632	 */
1633	if (unlikely(match->matchsize == -1))
1634		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1635	return xt_compat_match_offset(match);
1636}
1637
1638static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1639				unsigned int *size)
1640{
1641	const struct xt_match *match = m->u.match;
1642	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1643	int off = ebt_compat_match_offset(match, m->match_size);
1644	compat_uint_t msize = m->match_size - off;
1645
1646	if (WARN_ON(off >= m->match_size))
1647		return -EINVAL;
1648
1649	if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) ||
1650	    put_user(match->revision, &cm->u.revision) ||
1651	    put_user(msize, &cm->match_size))
1652		return -EFAULT;
1653
1654	if (match->compat_to_user) {
1655		if (match->compat_to_user(cm->data, m->data))
1656			return -EFAULT;
1657	} else {
1658		if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
1659				    COMPAT_XT_ALIGN(msize)))
1660			return -EFAULT;
1661	}
1662
1663	*size -= ebt_compat_entry_padsize() + off;
1664	*dstptr = cm->data;
1665	*dstptr += msize;
1666	return 0;
1667}
1668
1669static int compat_target_to_user(struct ebt_entry_target *t,
1670				 void __user **dstptr,
1671				 unsigned int *size)
1672{
1673	const struct xt_target *target = t->u.target;
1674	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1675	int off = xt_compat_target_offset(target);
1676	compat_uint_t tsize = t->target_size - off;
1677
1678	if (WARN_ON(off >= t->target_size))
1679		return -EINVAL;
1680
1681	if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) ||
1682	    put_user(target->revision, &cm->u.revision) ||
1683	    put_user(tsize, &cm->match_size))
1684		return -EFAULT;
1685
1686	if (target->compat_to_user) {
1687		if (target->compat_to_user(cm->data, t->data))
1688			return -EFAULT;
1689	} else {
1690		if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
1691				    COMPAT_XT_ALIGN(tsize)))
1692			return -EFAULT;
1693	}
1694
1695	*size -= ebt_compat_entry_padsize() + off;
1696	*dstptr = cm->data;
1697	*dstptr += tsize;
1698	return 0;
1699}
1700
1701static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1702				  void __user **dstptr,
1703				  unsigned int *size)
1704{
1705	return compat_target_to_user((struct ebt_entry_target *)w,
1706							dstptr, size);
1707}
1708
1709static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1710				unsigned int *size)
1711{
1712	struct ebt_entry_target *t;
1713	struct ebt_entry __user *ce;
1714	u32 watchers_offset, target_offset, next_offset;
1715	compat_uint_t origsize;
1716	int ret;
1717
1718	if (e->bitmask == 0) {
1719		if (*size < sizeof(struct ebt_entries))
1720			return -EINVAL;
1721		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1722			return -EFAULT;
1723
1724		*dstptr += sizeof(struct ebt_entries);
1725		*size -= sizeof(struct ebt_entries);
1726		return 0;
1727	}
1728
1729	if (*size < sizeof(*ce))
1730		return -EINVAL;
1731
1732	ce = *dstptr;
1733	if (copy_to_user(ce, e, sizeof(*ce)))
1734		return -EFAULT;
1735
1736	origsize = *size;
1737	*dstptr += sizeof(*ce);
1738
1739	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1740	if (ret)
1741		return ret;
1742	watchers_offset = e->watchers_offset - (origsize - *size);
1743
1744	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1745	if (ret)
1746		return ret;
1747	target_offset = e->target_offset - (origsize - *size);
1748
1749	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1750
1751	ret = compat_target_to_user(t, dstptr, size);
1752	if (ret)
1753		return ret;
1754	next_offset = e->next_offset - (origsize - *size);
1755
1756	if (put_user(watchers_offset, &ce->watchers_offset) ||
1757	    put_user(target_offset, &ce->target_offset) ||
1758	    put_user(next_offset, &ce->next_offset))
1759		return -EFAULT;
1760
1761	*size -= sizeof(*ce);
1762	return 0;
1763}
1764
1765static int compat_calc_match(struct ebt_entry_match *m, int *off)
1766{
1767	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1768	*off += ebt_compat_entry_padsize();
1769	return 0;
1770}
1771
1772static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1773{
1774	*off += xt_compat_target_offset(w->u.watcher);
1775	*off += ebt_compat_entry_padsize();
1776	return 0;
1777}
1778
1779static int compat_calc_entry(const struct ebt_entry *e,
1780			     const struct ebt_table_info *info,
1781			     const void *base,
1782			     struct compat_ebt_replace *newinfo)
1783{
1784	const struct ebt_entry_target *t;
1785	unsigned int entry_offset;
1786	int off, ret, i;
1787
1788	if (e->bitmask == 0)
1789		return 0;
1790
1791	off = 0;
1792	entry_offset = (void *)e - base;
1793
1794	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1795	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1796
1797	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1798
1799	off += xt_compat_target_offset(t->u.target);
1800	off += ebt_compat_entry_padsize();
1801
1802	newinfo->entries_size -= off;
1803
1804	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1805	if (ret)
1806		return ret;
1807
1808	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1809		const void *hookptr = info->hook_entry[i];
1810		if (info->hook_entry[i] &&
1811		    (e < (struct ebt_entry *)(base - hookptr))) {
1812			newinfo->hook_entry[i] -= off;
1813			pr_debug("0x%08X -> 0x%08X\n",
1814					newinfo->hook_entry[i] + off,
1815					newinfo->hook_entry[i]);
1816		}
1817	}
1818
1819	return 0;
1820}
1821
1822
1823static int compat_table_info(const struct ebt_table_info *info,
1824			     struct compat_ebt_replace *newinfo)
1825{
1826	unsigned int size = info->entries_size;
1827	const void *entries = info->entries;
1828
1829	newinfo->entries_size = size;
1830	if (info->nentries) {
1831		int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
1832						 info->nentries);
1833		if (ret)
1834			return ret;
1835	}
1836
 
1837	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1838							entries, newinfo);
1839}
1840
1841static int compat_copy_everything_to_user(struct ebt_table *t,
1842					  void __user *user, int *len, int cmd)
1843{
1844	struct compat_ebt_replace repl, tmp;
1845	struct ebt_counter *oldcounters;
1846	struct ebt_table_info tinfo;
1847	int ret;
1848	void __user *pos;
1849
1850	memset(&tinfo, 0, sizeof(tinfo));
1851
1852	if (cmd == EBT_SO_GET_ENTRIES) {
1853		tinfo.entries_size = t->private->entries_size;
1854		tinfo.nentries = t->private->nentries;
1855		tinfo.entries = t->private->entries;
1856		oldcounters = t->private->counters;
1857	} else {
1858		tinfo.entries_size = t->table->entries_size;
1859		tinfo.nentries = t->table->nentries;
1860		tinfo.entries = t->table->entries;
1861		oldcounters = t->table->counters;
1862	}
1863
1864	if (copy_from_user(&tmp, user, sizeof(tmp)))
1865		return -EFAULT;
1866
1867	if (tmp.nentries != tinfo.nentries ||
1868	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1869		return -EINVAL;
1870
1871	memcpy(&repl, &tmp, sizeof(repl));
1872	if (cmd == EBT_SO_GET_ENTRIES)
1873		ret = compat_table_info(t->private, &repl);
1874	else
1875		ret = compat_table_info(&tinfo, &repl);
1876	if (ret)
1877		return ret;
1878
1879	if (*len != sizeof(tmp) + repl.entries_size +
1880	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1881		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1882				*len, tinfo.entries_size, repl.entries_size);
1883		return -EINVAL;
1884	}
1885
1886	/* userspace might not need the counters */
1887	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1888					tmp.num_counters, tinfo.nentries);
1889	if (ret)
1890		return ret;
1891
1892	pos = compat_ptr(tmp.entries);
1893	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1894			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1895}
1896
1897struct ebt_entries_buf_state {
1898	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1899	u32 buf_kern_len;	/* total size of kernel buffer */
1900	u32 buf_kern_offset;	/* amount of data copied so far */
1901	u32 buf_user_offset;	/* read position in userspace buffer */
1902};
1903
1904static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1905{
1906	state->buf_kern_offset += sz;
1907	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1908}
1909
1910static int ebt_buf_add(struct ebt_entries_buf_state *state,
1911		       void *data, unsigned int sz)
1912{
1913	if (state->buf_kern_start == NULL)
1914		goto count_only;
1915
1916	if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
1917		return -EINVAL;
1918
1919	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1920
1921 count_only:
1922	state->buf_user_offset += sz;
1923	return ebt_buf_count(state, sz);
1924}
1925
1926static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1927{
1928	char *b = state->buf_kern_start;
1929
1930	if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
1931		return -EINVAL;
1932
1933	if (b != NULL && sz > 0)
1934		memset(b + state->buf_kern_offset, 0, sz);
1935	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1936	return ebt_buf_count(state, sz);
1937}
1938
1939enum compat_mwt {
1940	EBT_COMPAT_MATCH,
1941	EBT_COMPAT_WATCHER,
1942	EBT_COMPAT_TARGET,
1943};
1944
1945static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1946				enum compat_mwt compat_mwt,
1947				struct ebt_entries_buf_state *state,
1948				const unsigned char *base)
1949{
1950	char name[EBT_EXTENSION_MAXNAMELEN];
1951	struct xt_match *match;
1952	struct xt_target *wt;
1953	void *dst = NULL;
1954	int off, pad = 0;
1955	unsigned int size_kern, match_size = mwt->match_size;
1956
1957	if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
1958		return -EINVAL;
1959
1960	if (state->buf_kern_start)
1961		dst = state->buf_kern_start + state->buf_kern_offset;
1962
1963	switch (compat_mwt) {
1964	case EBT_COMPAT_MATCH:
1965		match = xt_request_find_match(NFPROTO_BRIDGE, name,
1966					      mwt->u.revision);
 
 
1967		if (IS_ERR(match))
1968			return PTR_ERR(match);
1969
1970		off = ebt_compat_match_offset(match, match_size);
1971		if (dst) {
1972			if (match->compat_from_user)
1973				match->compat_from_user(dst, mwt->data);
1974			else
1975				memcpy(dst, mwt->data, match_size);
1976		}
1977
1978		size_kern = match->matchsize;
1979		if (unlikely(size_kern == -1))
1980			size_kern = match_size;
1981		module_put(match->me);
1982		break;
1983	case EBT_COMPAT_WATCHER: /* fallthrough */
1984	case EBT_COMPAT_TARGET:
1985		wt = xt_request_find_target(NFPROTO_BRIDGE, name,
1986					    mwt->u.revision);
 
 
1987		if (IS_ERR(wt))
1988			return PTR_ERR(wt);
1989		off = xt_compat_target_offset(wt);
1990
1991		if (dst) {
1992			if (wt->compat_from_user)
1993				wt->compat_from_user(dst, mwt->data);
1994			else
1995				memcpy(dst, mwt->data, match_size);
1996		}
1997
1998		size_kern = wt->targetsize;
1999		module_put(wt->me);
2000		break;
2001
2002	default:
2003		return -EINVAL;
2004	}
2005
2006	state->buf_kern_offset += match_size + off;
2007	state->buf_user_offset += match_size;
2008	pad = XT_ALIGN(size_kern) - size_kern;
2009
2010	if (pad > 0 && dst) {
2011		if (WARN_ON(state->buf_kern_len <= pad))
2012			return -EINVAL;
2013		if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
2014			return -EINVAL;
2015		memset(dst + size_kern, 0, pad);
2016	}
2017	return off + match_size;
2018}
2019
2020/* return size of all matches, watchers or target, including necessary
 
2021 * alignment and padding.
2022 */
2023static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2024			unsigned int size_left, enum compat_mwt type,
2025			struct ebt_entries_buf_state *state, const void *base)
2026{
2027	int growth = 0;
2028	char *buf;
2029
2030	if (size_left == 0)
2031		return 0;
2032
2033	buf = (char *) match32;
2034
2035	while (size_left >= sizeof(*match32)) {
2036		struct ebt_entry_match *match_kern;
2037		int ret;
2038
2039		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
2040		if (match_kern) {
2041			char *tmp;
2042			tmp = state->buf_kern_start + state->buf_kern_offset;
2043			match_kern = (struct ebt_entry_match *) tmp;
2044		}
2045		ret = ebt_buf_add(state, buf, sizeof(*match32));
2046		if (ret < 0)
2047			return ret;
2048		size_left -= sizeof(*match32);
2049
2050		/* add padding before match->data (if any) */
2051		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
2052		if (ret < 0)
2053			return ret;
2054
2055		if (match32->match_size > size_left)
2056			return -EINVAL;
2057
2058		size_left -= match32->match_size;
2059
2060		ret = compat_mtw_from_user(match32, type, state, base);
2061		if (ret < 0)
2062			return ret;
2063
2064		if (WARN_ON(ret < match32->match_size))
2065			return -EINVAL;
2066		growth += ret - match32->match_size;
2067		growth += ebt_compat_entry_padsize();
2068
2069		buf += sizeof(*match32);
2070		buf += match32->match_size;
2071
2072		if (match_kern)
2073			match_kern->match_size = ret;
2074
2075		if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
2076			return -EINVAL;
2077
2078		match32 = (struct compat_ebt_entry_mwt *) buf;
2079	}
2080
2081	return growth;
2082}
2083
2084/* called for all ebt_entry structures. */
2085static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2086			  unsigned int *total,
2087			  struct ebt_entries_buf_state *state)
2088{
2089	unsigned int i, j, startoff, new_offset = 0;
2090	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2091	unsigned int offsets[4];
2092	unsigned int *offsets_update = NULL;
2093	int ret;
2094	char *buf_start;
2095
2096	if (*total < sizeof(struct ebt_entries))
2097		return -EINVAL;
2098
2099	if (!entry->bitmask) {
2100		*total -= sizeof(struct ebt_entries);
2101		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2102	}
2103	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2104		return -EINVAL;
2105
2106	startoff = state->buf_user_offset;
2107	/* pull in most part of ebt_entry, it does not need to be changed. */
2108	ret = ebt_buf_add(state, entry,
2109			offsetof(struct ebt_entry, watchers_offset));
2110	if (ret < 0)
2111		return ret;
2112
2113	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2114	memcpy(&offsets[1], &entry->watchers_offset,
2115			sizeof(offsets) - sizeof(offsets[0]));
2116
2117	if (state->buf_kern_start) {
2118		buf_start = state->buf_kern_start + state->buf_kern_offset;
2119		offsets_update = (unsigned int *) buf_start;
2120	}
2121	ret = ebt_buf_add(state, &offsets[1],
2122			sizeof(offsets) - sizeof(offsets[0]));
2123	if (ret < 0)
2124		return ret;
2125	buf_start = (char *) entry;
2126	/* 0: matches offset, always follows ebt_entry.
 
2127	 * 1: watchers offset, from ebt_entry structure
2128	 * 2: target offset, from ebt_entry structure
2129	 * 3: next ebt_entry offset, from ebt_entry structure
2130	 *
2131	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2132	 */
2133	for (i = 0; i < 4 ; ++i) {
2134		if (offsets[i] > *total)
2135			return -EINVAL;
2136
2137		if (i < 3 && offsets[i] == *total)
2138			return -EINVAL;
2139
2140		if (i == 0)
2141			continue;
2142		if (offsets[i-1] > offsets[i])
2143			return -EINVAL;
2144	}
2145
2146	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2147		struct compat_ebt_entry_mwt *match32;
2148		unsigned int size;
2149		char *buf = buf_start + offsets[i];
2150
 
2151		if (offsets[i] > offsets[j])
2152			return -EINVAL;
2153
2154		match32 = (struct compat_ebt_entry_mwt *) buf;
2155		size = offsets[j] - offsets[i];
2156		ret = ebt_size_mwt(match32, size, i, state, base);
2157		if (ret < 0)
2158			return ret;
2159		new_offset += ret;
2160		if (offsets_update && new_offset) {
2161			pr_debug("change offset %d to %d\n",
2162				offsets_update[i], offsets[j] + new_offset);
2163			offsets_update[i] = offsets[j] + new_offset;
2164		}
2165	}
2166
2167	if (state->buf_kern_start == NULL) {
2168		unsigned int offset = buf_start - (char *) base;
2169
2170		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2171		if (ret < 0)
2172			return ret;
2173	}
2174
2175	startoff = state->buf_user_offset - startoff;
2176
2177	if (WARN_ON(*total < startoff))
2178		return -EINVAL;
2179	*total -= startoff;
2180	return 0;
2181}
2182
2183/* repl->entries_size is the size of the ebt_entry blob in userspace.
 
2184 * It might need more memory when copied to a 64 bit kernel in case
2185 * userspace is 32-bit. So, first task: find out how much memory is needed.
2186 *
2187 * Called before validation is performed.
2188 */
2189static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2190				struct ebt_entries_buf_state *state)
2191{
2192	unsigned int size_remaining = size_user;
2193	int ret;
2194
2195	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2196					&size_remaining, state);
2197	if (ret < 0)
2198		return ret;
2199
2200	WARN_ON(size_remaining);
2201	return state->buf_kern_offset;
2202}
2203
2204
2205static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2206					    void __user *user, unsigned int len)
2207{
2208	struct compat_ebt_replace tmp;
2209	int i;
2210
2211	if (len < sizeof(tmp))
2212		return -EINVAL;
2213
2214	if (copy_from_user(&tmp, user, sizeof(tmp)))
2215		return -EFAULT;
2216
2217	if (len != sizeof(tmp) + tmp.entries_size)
2218		return -EINVAL;
2219
2220	if (tmp.entries_size == 0)
2221		return -EINVAL;
2222
2223	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2224			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2225		return -ENOMEM;
2226	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2227		return -ENOMEM;
2228
2229	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2230
2231	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2232	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2233		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2234
2235	repl->num_counters = tmp.num_counters;
2236	repl->counters = compat_ptr(tmp.counters);
2237	repl->entries = compat_ptr(tmp.entries);
2238	return 0;
2239}
2240
2241static int compat_do_replace(struct net *net, void __user *user,
2242			     unsigned int len)
2243{
2244	int ret, i, countersize, size64;
2245	struct ebt_table_info *newinfo;
2246	struct ebt_replace tmp;
2247	struct ebt_entries_buf_state state;
2248	void *entries_tmp;
2249
2250	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2251	if (ret) {
2252		/* try real handler in case userland supplied needed padding */
2253		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2254			ret = 0;
2255		return ret;
2256	}
2257
2258	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2259	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2260	if (!newinfo)
2261		return -ENOMEM;
2262
2263	if (countersize)
2264		memset(newinfo->counters, 0, countersize);
2265
2266	memset(&state, 0, sizeof(state));
2267
2268	newinfo->entries = vmalloc(tmp.entries_size);
2269	if (!newinfo->entries) {
2270		ret = -ENOMEM;
2271		goto free_newinfo;
2272	}
2273	if (copy_from_user(
2274	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2275		ret = -EFAULT;
2276		goto free_entries;
2277	}
2278
2279	entries_tmp = newinfo->entries;
2280
2281	xt_compat_lock(NFPROTO_BRIDGE);
2282
2283	ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2284	if (ret < 0)
2285		goto out_unlock;
2286	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2287	if (ret < 0)
2288		goto out_unlock;
2289
2290	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2291		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2292		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2293
2294	size64 = ret;
2295	newinfo->entries = vmalloc(size64);
2296	if (!newinfo->entries) {
2297		vfree(entries_tmp);
2298		ret = -ENOMEM;
2299		goto out_unlock;
2300	}
2301
2302	memset(&state, 0, sizeof(state));
2303	state.buf_kern_start = newinfo->entries;
2304	state.buf_kern_len = size64;
2305
2306	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2307	if (WARN_ON(ret < 0))
2308		goto out_unlock;
2309
2310	vfree(entries_tmp);
2311	tmp.entries_size = size64;
2312
2313	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2314		char __user *usrptr;
2315		if (tmp.hook_entry[i]) {
2316			unsigned int delta;
2317			usrptr = (char __user *) tmp.hook_entry[i];
2318			delta = usrptr - tmp.entries;
2319			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2320			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2321		}
2322	}
2323
2324	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2325	xt_compat_unlock(NFPROTO_BRIDGE);
2326
2327	ret = do_replace_finish(net, &tmp, newinfo);
2328	if (ret == 0)
2329		return ret;
2330free_entries:
2331	vfree(newinfo->entries);
2332free_newinfo:
2333	vfree(newinfo);
2334	return ret;
2335out_unlock:
2336	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2337	xt_compat_unlock(NFPROTO_BRIDGE);
2338	goto free_entries;
2339}
2340
2341static int compat_update_counters(struct net *net, void __user *user,
2342				  unsigned int len)
2343{
2344	struct compat_ebt_replace hlp;
2345
2346	if (copy_from_user(&hlp, user, sizeof(hlp)))
2347		return -EFAULT;
2348
2349	/* try real handler in case userland supplied needed padding */
2350	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2351		return update_counters(net, user, len);
2352
2353	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2354					hlp.num_counters, user, len);
2355}
2356
2357static int compat_do_ebt_set_ctl(struct sock *sk,
2358		int cmd, void __user *user, unsigned int len)
2359{
2360	int ret;
2361	struct net *net = sock_net(sk);
2362
2363	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2364		return -EPERM;
2365
2366	switch (cmd) {
2367	case EBT_SO_SET_ENTRIES:
2368		ret = compat_do_replace(net, user, len);
2369		break;
2370	case EBT_SO_SET_COUNTERS:
2371		ret = compat_update_counters(net, user, len);
2372		break;
2373	default:
2374		ret = -EINVAL;
2375	}
2376	return ret;
2377}
2378
2379static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2380		void __user *user, int *len)
2381{
2382	int ret;
2383	struct compat_ebt_replace tmp;
2384	struct ebt_table *t;
2385	struct net *net = sock_net(sk);
2386
2387	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2388		return -EPERM;
2389
2390	/* try real handler in case userland supplied needed padding */
2391	if ((cmd == EBT_SO_GET_INFO ||
2392	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2393			return do_ebt_get_ctl(sk, cmd, user, len);
2394
2395	if (copy_from_user(&tmp, user, sizeof(tmp)))
2396		return -EFAULT;
2397
2398	tmp.name[sizeof(tmp.name) - 1] = '\0';
2399
2400	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2401	if (!t)
2402		return ret;
2403
2404	xt_compat_lock(NFPROTO_BRIDGE);
2405	switch (cmd) {
2406	case EBT_SO_GET_INFO:
2407		tmp.nentries = t->private->nentries;
2408		ret = compat_table_info(t->private, &tmp);
2409		if (ret)
2410			goto out;
2411		tmp.valid_hooks = t->valid_hooks;
2412
2413		if (copy_to_user(user, &tmp, *len) != 0) {
2414			ret = -EFAULT;
2415			break;
2416		}
2417		ret = 0;
2418		break;
2419	case EBT_SO_GET_INIT_INFO:
2420		tmp.nentries = t->table->nentries;
2421		tmp.entries_size = t->table->entries_size;
2422		tmp.valid_hooks = t->table->valid_hooks;
2423
2424		if (copy_to_user(user, &tmp, *len) != 0) {
2425			ret = -EFAULT;
2426			break;
2427		}
2428		ret = 0;
2429		break;
2430	case EBT_SO_GET_ENTRIES:
2431	case EBT_SO_GET_INIT_ENTRIES:
2432		/* try real handler first in case of userland-side padding.
 
2433		 * in case we are dealing with an 'ordinary' 32 bit binary
2434		 * without 64bit compatibility padding, this will fail right
2435		 * after copy_from_user when the *len argument is validated.
2436		 *
2437		 * the compat_ variant needs to do one pass over the kernel
2438		 * data set to adjust for size differences before it the check.
2439		 */
2440		if (copy_everything_to_user(t, user, len, cmd) == 0)
2441			ret = 0;
2442		else
2443			ret = compat_copy_everything_to_user(t, user, len, cmd);
2444		break;
2445	default:
2446		ret = -EINVAL;
2447	}
2448 out:
2449	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2450	xt_compat_unlock(NFPROTO_BRIDGE);
2451	mutex_unlock(&ebt_mutex);
2452	return ret;
2453}
2454#endif
2455
2456static struct nf_sockopt_ops ebt_sockopts = {
 
2457	.pf		= PF_INET,
2458	.set_optmin	= EBT_BASE_CTL,
2459	.set_optmax	= EBT_SO_SET_MAX + 1,
2460	.set		= do_ebt_set_ctl,
2461#ifdef CONFIG_COMPAT
2462	.compat_set	= compat_do_ebt_set_ctl,
2463#endif
2464	.get_optmin	= EBT_BASE_CTL,
2465	.get_optmax	= EBT_SO_GET_MAX + 1,
2466	.get		= do_ebt_get_ctl,
2467#ifdef CONFIG_COMPAT
2468	.compat_get	= compat_do_ebt_get_ctl,
2469#endif
2470	.owner		= THIS_MODULE,
2471};
2472
2473static int __init ebtables_init(void)
2474{
2475	int ret;
2476
2477	ret = xt_register_target(&ebt_standard_target);
2478	if (ret < 0)
2479		return ret;
2480	ret = nf_register_sockopt(&ebt_sockopts);
2481	if (ret < 0) {
2482		xt_unregister_target(&ebt_standard_target);
2483		return ret;
2484	}
2485
 
2486	return 0;
2487}
2488
2489static void __exit ebtables_fini(void)
2490{
2491	nf_unregister_sockopt(&ebt_sockopts);
2492	xt_unregister_target(&ebt_standard_target);
 
2493}
2494
2495EXPORT_SYMBOL(ebt_register_table);
2496EXPORT_SYMBOL(ebt_unregister_table);
2497EXPORT_SYMBOL(ebt_do_table);
2498module_init(ebtables_init);
2499module_exit(ebtables_fini);
2500MODULE_LICENSE("GPL");