Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  ebtables
   4 *
   5 *  Author:
   6 *  Bart De Schuymer		<bdschuym@pandora.be>
   7 *
   8 *  ebtables.c,v 2.0, July, 2002
   9 *
  10 *  This code is strongly inspired by the iptables code which is
  11 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
 
 
 
 
 
  12 */
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14#include <linux/kmod.h>
  15#include <linux/module.h>
  16#include <linux/vmalloc.h>
  17#include <linux/netfilter/x_tables.h>
  18#include <linux/netfilter_bridge/ebtables.h>
  19#include <linux/spinlock.h>
  20#include <linux/mutex.h>
  21#include <linux/slab.h>
  22#include <linux/uaccess.h>
  23#include <linux/smp.h>
  24#include <linux/cpumask.h>
  25#include <linux/audit.h>
  26#include <net/sock.h>
  27#include <net/netns/generic.h>
  28/* needed for logical [in,out]-dev filtering */
  29#include "../br_private.h"
  30
  31/* Each cpu has its own set of counters, so there is no need for write_lock in
 
 
 
 
 
  32 * the softirq
  33 * For reading or updating the counters, the user context needs to
  34 * get a write_lock
  35 */
  36
  37/* The size of each set of counters is altered to get cache alignment */
  38#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  39#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
  40#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
  41				 COUNTER_OFFSET(n) * cpu))
  42
  43struct ebt_pernet {
  44	struct list_head tables;
  45};
  46
  47struct ebt_template {
  48	struct list_head list;
  49	char name[EBT_TABLE_MAXNAMELEN];
  50	struct module *owner;
  51	/* called when table is needed in the given netns */
  52	int (*table_init)(struct net *net);
  53};
  54
  55static unsigned int ebt_pernet_id __read_mostly;
  56static LIST_HEAD(template_tables);
  57static DEFINE_MUTEX(ebt_mutex);
  58
  59#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
  60static void ebt_standard_compat_from_user(void *dst, const void *src)
  61{
  62	int v = *(compat_int_t *)src;
  63
  64	if (v >= 0)
  65		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
  66	memcpy(dst, &v, sizeof(v));
  67}
  68
  69static int ebt_standard_compat_to_user(void __user *dst, const void *src)
  70{
  71	compat_int_t cv = *(int *)src;
  72
  73	if (cv >= 0)
  74		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
  75	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  76}
  77#endif
  78
  79
  80static struct xt_target ebt_standard_target = {
  81	.name       = "standard",
  82	.revision   = 0,
  83	.family     = NFPROTO_BRIDGE,
  84	.targetsize = sizeof(int),
  85#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
  86	.compatsize = sizeof(compat_int_t),
  87	.compat_from_user = ebt_standard_compat_from_user,
  88	.compat_to_user =  ebt_standard_compat_to_user,
  89#endif
  90};
  91
  92static inline int
  93ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
  94	       struct xt_action_param *par)
  95{
  96	par->target   = w->u.watcher;
  97	par->targinfo = w->data;
  98	w->u.watcher->target(skb, par);
  99	/* watchers don't give a verdict */
 100	return 0;
 101}
 102
 103static inline int
 104ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
 105	     struct xt_action_param *par)
 106{
 107	par->match     = m->u.match;
 108	par->matchinfo = m->data;
 109	return !m->u.match->match(skb, par);
 110}
 111
 112static inline int
 113ebt_dev_check(const char *entry, const struct net_device *device)
 114{
 115	int i = 0;
 116	const char *devname;
 117
 118	if (*entry == '\0')
 119		return 0;
 120	if (!device)
 121		return 1;
 122	devname = device->name;
 123	/* 1 is the wildcard token */
 124	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 125		i++;
 126	return devname[i] != entry[i] && entry[i] != 1;
 127}
 128
 
 129/* process standard matches */
 130static inline int
 131ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
 132		const struct net_device *in, const struct net_device *out)
 133{
 134	const struct ethhdr *h = eth_hdr(skb);
 135	const struct net_bridge_port *p;
 136	__be16 ethproto;
 
 137
 138	if (skb_vlan_tag_present(skb))
 139		ethproto = htons(ETH_P_8021Q);
 140	else
 141		ethproto = h->h_proto;
 142
 143	if (e->bitmask & EBT_802_3) {
 144		if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto)))
 145			return 1;
 146	} else if (!(e->bitmask & EBT_NOPROTO) &&
 147		   NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto))
 148		return 1;
 149
 150	if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in)))
 151		return 1;
 152	if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
 153		return 1;
 154	/* rcu_read_lock()ed by nf_hook_thresh */
 155	if (in && (p = br_port_get_rcu(in)) != NULL &&
 156	    NF_INVF(e, EBT_ILOGICALIN,
 157		    ebt_dev_check(e->logical_in, p->br->dev)))
 158		return 1;
 159	if (out && (p = br_port_get_rcu(out)) != NULL &&
 160	    NF_INVF(e, EBT_ILOGICALOUT,
 161		    ebt_dev_check(e->logical_out, p->br->dev)))
 162		return 1;
 163
 164	if (e->bitmask & EBT_SOURCEMAC) {
 165		if (NF_INVF(e, EBT_ISOURCE,
 166			    !ether_addr_equal_masked(h->h_source, e->sourcemac,
 167						     e->sourcemsk)))
 
 
 168			return 1;
 169	}
 170	if (e->bitmask & EBT_DESTMAC) {
 171		if (NF_INVF(e, EBT_IDEST,
 172			    !ether_addr_equal_masked(h->h_dest, e->destmac,
 173						     e->destmsk)))
 
 
 174			return 1;
 175	}
 176	return 0;
 177}
 178
 179static inline
 180struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 181{
 182	return (void *)entry + entry->next_offset;
 183}
 184
 185static inline const struct ebt_entry_target *
 186ebt_get_target_c(const struct ebt_entry *e)
 187{
 188	return ebt_get_target((struct ebt_entry *)e);
 189}
 190
 191/* Do some firewalling */
 192unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
 193			  const struct nf_hook_state *state)
 
 194{
 195	struct ebt_table *table = priv;
 196	unsigned int hook = state->hook;
 197	int i, nentries;
 198	struct ebt_entry *point;
 199	struct ebt_counter *counter_base, *cb_base;
 200	const struct ebt_entry_target *t;
 201	int verdict, sp = 0;
 202	struct ebt_chainstack *cs;
 203	struct ebt_entries *chaininfo;
 204	const char *base;
 205	const struct ebt_table_info *private;
 206	struct xt_action_param acpar;
 207
 208	acpar.state   = state;
 
 
 209	acpar.hotdrop = false;
 
 210
 211	read_lock_bh(&table->lock);
 212	private = table->private;
 213	cb_base = COUNTER_BASE(private->counters, private->nentries,
 214	   smp_processor_id());
 215	if (private->chainstack)
 216		cs = private->chainstack[smp_processor_id()];
 217	else
 218		cs = NULL;
 219	chaininfo = private->hook_entry[hook];
 220	nentries = private->hook_entry[hook]->nentries;
 221	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
 222	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
 223	/* base for chain jumps */
 224	base = private->entries;
 225	i = 0;
 226	while (i < nentries) {
 227		if (ebt_basic_match(point, skb, state->in, state->out))
 228			goto letscontinue;
 229
 230		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
 231			goto letscontinue;
 232		if (acpar.hotdrop) {
 233			read_unlock_bh(&table->lock);
 234			return NF_DROP;
 235		}
 236
 237		ADD_COUNTER(*(counter_base + i), skb->len, 1);
 
 
 238
 239		/* these should only watch: not modify, nor tell us
 240		 * what to do with the packet
 241		 */
 242		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
 243
 244		t = ebt_get_target_c(point);
 
 245		/* standard target */
 246		if (!t->u.target->target)
 247			verdict = ((struct ebt_standard_target *)t)->verdict;
 248		else {
 249			acpar.target   = t->u.target;
 250			acpar.targinfo = t->data;
 251			verdict = t->u.target->target(skb, &acpar);
 252		}
 253		if (verdict == EBT_ACCEPT) {
 254			read_unlock_bh(&table->lock);
 255			return NF_ACCEPT;
 256		}
 257		if (verdict == EBT_DROP) {
 258			read_unlock_bh(&table->lock);
 259			return NF_DROP;
 260		}
 261		if (verdict == EBT_RETURN) {
 262letsreturn:
 263			if (WARN(sp == 0, "RETURN on base chain")) {
 
 
 264				/* act like this is EBT_CONTINUE */
 265				goto letscontinue;
 266			}
 267
 268			sp--;
 269			/* put all the local variables right */
 270			i = cs[sp].n;
 271			chaininfo = cs[sp].chaininfo;
 272			nentries = chaininfo->nentries;
 273			point = cs[sp].e;
 274			counter_base = cb_base +
 275			   chaininfo->counter_offset;
 276			continue;
 277		}
 278		if (verdict == EBT_CONTINUE)
 279			goto letscontinue;
 280
 281		if (WARN(verdict < 0, "bogus standard verdict\n")) {
 
 282			read_unlock_bh(&table->lock);
 283			return NF_DROP;
 284		}
 285
 286		/* jump to a udc */
 287		cs[sp].n = i + 1;
 288		cs[sp].chaininfo = chaininfo;
 289		cs[sp].e = ebt_next_entry(point);
 290		i = 0;
 291		chaininfo = (struct ebt_entries *) (base + verdict);
 292
 293		if (WARN(chaininfo->distinguisher, "jump to non-chain\n")) {
 
 294			read_unlock_bh(&table->lock);
 295			return NF_DROP;
 296		}
 297
 298		nentries = chaininfo->nentries;
 299		point = (struct ebt_entry *)chaininfo->data;
 300		counter_base = cb_base + chaininfo->counter_offset;
 301		sp++;
 302		continue;
 303letscontinue:
 304		point = ebt_next_entry(point);
 305		i++;
 306	}
 307
 308	/* I actually like this :) */
 309	if (chaininfo->policy == EBT_RETURN)
 310		goto letsreturn;
 311	if (chaininfo->policy == EBT_ACCEPT) {
 312		read_unlock_bh(&table->lock);
 313		return NF_ACCEPT;
 314	}
 315	read_unlock_bh(&table->lock);
 316	return NF_DROP;
 317}
 318
 319/* If it succeeds, returns element and locks mutex */
 320static inline void *
 321find_inlist_lock_noload(struct net *net, const char *name, int *error,
 322			struct mutex *mutex)
 323{
 324	struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
 325	struct ebt_template *tmpl;
 326	struct ebt_table *table;
 327
 328	mutex_lock(mutex);
 329	list_for_each_entry(table, &ebt_net->tables, list) {
 330		if (strcmp(table->name, name) == 0)
 331			return table;
 332	}
 333
 334	list_for_each_entry(tmpl, &template_tables, list) {
 335		if (strcmp(name, tmpl->name) == 0) {
 336			struct module *owner = tmpl->owner;
 337
 338			if (!try_module_get(owner))
 339				goto out;
 340
 341			mutex_unlock(mutex);
 342
 343			*error = tmpl->table_init(net);
 344			if (*error) {
 345				module_put(owner);
 346				return NULL;
 347			}
 348
 349			mutex_lock(mutex);
 350			module_put(owner);
 351			break;
 352		}
 353	}
 354
 355	list_for_each_entry(table, &ebt_net->tables, list) {
 356		if (strcmp(table->name, name) == 0)
 357			return table;
 358	}
 359
 360out:
 361	*error = -ENOENT;
 362	mutex_unlock(mutex);
 363	return NULL;
 364}
 365
 366static void *
 367find_inlist_lock(struct net *net, const char *name, const char *prefix,
 368		 int *error, struct mutex *mutex)
 369{
 370	return try_then_request_module(
 371			find_inlist_lock_noload(net, name, error, mutex),
 372			"%s%s", prefix, name);
 373}
 374
 375static inline struct ebt_table *
 376find_table_lock(struct net *net, const char *name, int *error,
 377		struct mutex *mutex)
 378{
 379	return find_inlist_lock(net, name, "ebtable_", error, mutex);
 
 380}
 381
 382static inline void ebt_free_table_info(struct ebt_table_info *info)
 383{
 384	int i;
 385
 386	if (info->chainstack) {
 387		for_each_possible_cpu(i)
 388			vfree(info->chainstack[i]);
 389		vfree(info->chainstack);
 390	}
 391}
 392static inline int
 393ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 394		unsigned int *cnt)
 395{
 396	const struct ebt_entry *e = par->entryinfo;
 397	struct xt_match *match;
 398	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 399	int ret;
 400
 401	if (left < sizeof(struct ebt_entry_match) ||
 402	    left - sizeof(struct ebt_entry_match) < m->match_size)
 403		return -EINVAL;
 404
 405	match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision);
 406	if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
 407		if (!IS_ERR(match))
 408			module_put(match->me);
 409		request_module("ebt_%s", m->u.name);
 410		match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision);
 411	}
 412	if (IS_ERR(match))
 413		return PTR_ERR(match);
 414	m->u.match = match;
 415
 416	par->match     = match;
 417	par->matchinfo = m->data;
 418	ret = xt_check_match(par, m->match_size,
 419	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
 420	if (ret < 0) {
 421		module_put(match->me);
 422		return ret;
 423	}
 424
 425	(*cnt)++;
 426	return 0;
 427}
 428
 429static inline int
 430ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
 431		  unsigned int *cnt)
 432{
 433	const struct ebt_entry *e = par->entryinfo;
 434	struct xt_target *watcher;
 435	size_t left = ((char *)e + e->target_offset) - (char *)w;
 436	int ret;
 437
 438	if (left < sizeof(struct ebt_entry_watcher) ||
 439	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 440		return -EINVAL;
 441
 442	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 443	if (IS_ERR(watcher))
 444		return PTR_ERR(watcher);
 445
 446	if (watcher->family != NFPROTO_BRIDGE) {
 447		module_put(watcher->me);
 448		return -ENOENT;
 449	}
 450
 451	w->u.watcher = watcher;
 452
 453	par->target   = watcher;
 454	par->targinfo = w->data;
 455	ret = xt_check_target(par, w->watcher_size,
 456	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
 457	if (ret < 0) {
 458		module_put(watcher->me);
 459		return ret;
 460	}
 461
 462	(*cnt)++;
 463	return 0;
 464}
 465
 466static int ebt_verify_pointers(const struct ebt_replace *repl,
 467			       struct ebt_table_info *newinfo)
 468{
 469	unsigned int limit = repl->entries_size;
 470	unsigned int valid_hooks = repl->valid_hooks;
 471	unsigned int offset = 0;
 472	int i;
 473
 474	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 475		newinfo->hook_entry[i] = NULL;
 476
 477	newinfo->entries_size = repl->entries_size;
 478	newinfo->nentries = repl->nentries;
 479
 480	while (offset < limit) {
 481		size_t left = limit - offset;
 482		struct ebt_entry *e = (void *)newinfo->entries + offset;
 483
 484		if (left < sizeof(unsigned int))
 485			break;
 486
 487		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 488			if ((valid_hooks & (1 << i)) == 0)
 489				continue;
 490			if ((char __user *)repl->hook_entry[i] ==
 491			     repl->entries + offset)
 492				break;
 493		}
 494
 495		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
 496			if (e->bitmask != 0) {
 497				/* we make userspace set this right,
 498				 * so there is no misunderstanding
 499				 */
 
 500				return -EINVAL;
 501			}
 502			if (i != NF_BR_NUMHOOKS)
 503				newinfo->hook_entry[i] = (struct ebt_entries *)e;
 504			if (left < sizeof(struct ebt_entries))
 505				break;
 506			offset += sizeof(struct ebt_entries);
 507		} else {
 508			if (left < sizeof(struct ebt_entry))
 509				break;
 510			if (left < e->next_offset)
 511				break;
 512			if (e->next_offset < sizeof(struct ebt_entry))
 513				return -EINVAL;
 514			offset += e->next_offset;
 515		}
 516	}
 517	if (offset != limit)
 
 518		return -EINVAL;
 
 519
 520	/* check if all valid hooks have a chain */
 521	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 522		if (!newinfo->hook_entry[i] &&
 523		   (valid_hooks & (1 << i)))
 
 524			return -EINVAL;
 
 525	}
 526	return 0;
 527}
 528
 529/* this one is very careful, as it is the first function
 
 530 * to parse the userspace data
 531 */
 532static inline int
 533ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
 534			       const struct ebt_table_info *newinfo,
 535			       unsigned int *n, unsigned int *cnt,
 536			       unsigned int *totalcnt, unsigned int *udc_cnt)
 537{
 538	int i;
 539
 540	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 541		if ((void *)e == (void *)newinfo->hook_entry[i])
 542			break;
 543	}
 544	/* beginning of a new chain
 545	 * if i == NF_BR_NUMHOOKS it must be a user defined chain
 546	 */
 547	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 548		/* this checks if the previous chain has as many entries
 549		 * as it said it has
 550		 */
 551		if (*n != *cnt)
 
 552			return -EINVAL;
 553
 554		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 555		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 556			/* only RETURN from udc */
 557			if (i != NF_BR_NUMHOOKS ||
 558			   ((struct ebt_entries *)e)->policy != EBT_RETURN)
 
 559				return -EINVAL;
 
 560		}
 561		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 562			(*udc_cnt)++;
 563		if (((struct ebt_entries *)e)->counter_offset != *totalcnt)
 
 564			return -EINVAL;
 
 565		*n = ((struct ebt_entries *)e)->nentries;
 566		*cnt = 0;
 567		return 0;
 568	}
 569	/* a plain old entry, heh */
 570	if (sizeof(struct ebt_entry) > e->watchers_offset ||
 571	   e->watchers_offset > e->target_offset ||
 572	   e->target_offset >= e->next_offset)
 
 573		return -EINVAL;
 574
 575	/* this is not checked anywhere else */
 576	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target))
 
 577		return -EINVAL;
 578
 579	(*cnt)++;
 580	(*totalcnt)++;
 581	return 0;
 582}
 583
 584struct ebt_cl_stack {
 
 585	struct ebt_chainstack cs;
 586	int from;
 587	unsigned int hookmask;
 588};
 589
 590/* We need these positions to check that the jumps to a different part of the
 
 591 * entries is a jump to the beginning of a new chain.
 592 */
 593static inline int
 594ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
 595		      unsigned int *n, struct ebt_cl_stack *udc)
 596{
 597	int i;
 598
 599	/* we're only interested in chain starts */
 600	if (e->bitmask)
 601		return 0;
 602	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 603		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 604			break;
 605	}
 606	/* only care about udc */
 607	if (i != NF_BR_NUMHOOKS)
 608		return 0;
 609
 610	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
 611	/* these initialisations are depended on later in check_chainloops() */
 612	udc[*n].cs.n = 0;
 613	udc[*n].hookmask = 0;
 614
 615	(*n)++;
 616	return 0;
 617}
 618
 619static inline int
 620ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
 621{
 622	struct xt_mtdtor_param par;
 623
 624	if (i && (*i)-- == 0)
 625		return 1;
 626
 627	par.net       = net;
 628	par.match     = m->u.match;
 629	par.matchinfo = m->data;
 630	par.family    = NFPROTO_BRIDGE;
 631	if (par.match->destroy != NULL)
 632		par.match->destroy(&par);
 633	module_put(par.match->me);
 634	return 0;
 635}
 636
 637static inline int
 638ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
 639{
 640	struct xt_tgdtor_param par;
 641
 642	if (i && (*i)-- == 0)
 643		return 1;
 644
 645	par.net      = net;
 646	par.target   = w->u.watcher;
 647	par.targinfo = w->data;
 648	par.family   = NFPROTO_BRIDGE;
 649	if (par.target->destroy != NULL)
 650		par.target->destroy(&par);
 651	module_put(par.target->me);
 652	return 0;
 653}
 654
 655static inline int
 656ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
 657{
 658	struct xt_tgdtor_param par;
 659	struct ebt_entry_target *t;
 660
 661	if (e->bitmask == 0)
 662		return 0;
 663	/* we're done */
 664	if (cnt && (*cnt)-- == 0)
 665		return 1;
 666	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
 667	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
 668	t = ebt_get_target(e);
 669
 670	par.net      = net;
 671	par.target   = t->u.target;
 672	par.targinfo = t->data;
 673	par.family   = NFPROTO_BRIDGE;
 674	if (par.target->destroy != NULL)
 675		par.target->destroy(&par);
 676	module_put(par.target->me);
 677	return 0;
 678}
 679
 680static inline int
 681ebt_check_entry(struct ebt_entry *e, struct net *net,
 682		const struct ebt_table_info *newinfo,
 683		const char *name, unsigned int *cnt,
 684		struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 685{
 686	struct ebt_entry_target *t;
 687	struct xt_target *target;
 688	unsigned int i, j, hook = 0, hookmask = 0;
 689	size_t gap;
 690	int ret;
 691	struct xt_mtchk_param mtpar;
 692	struct xt_tgchk_param tgpar;
 693
 694	/* don't mess with the struct ebt_entries */
 695	if (e->bitmask == 0)
 696		return 0;
 697
 698	if (e->bitmask & ~EBT_F_MASK)
 
 699		return -EINVAL;
 700
 701	if (e->invflags & ~EBT_INV_MASK)
 
 702		return -EINVAL;
 703
 704	if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3))
 
 705		return -EINVAL;
 706
 707	/* what hook do we belong to? */
 708	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 709		if (!newinfo->hook_entry[i])
 710			continue;
 711		if ((char *)newinfo->hook_entry[i] < (char *)e)
 712			hook = i;
 713		else
 714			break;
 715	}
 716	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
 717	 * a base chain
 718	 */
 719	if (i < NF_BR_NUMHOOKS)
 720		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 721	else {
 722		for (i = 0; i < udc_cnt; i++)
 723			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
 724				break;
 725		if (i == 0)
 726			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 727		else
 728			hookmask = cl_s[i - 1].hookmask;
 729	}
 730	i = 0;
 731
 732	memset(&mtpar, 0, sizeof(mtpar));
 733	memset(&tgpar, 0, sizeof(tgpar));
 734	mtpar.net	= tgpar.net       = net;
 735	mtpar.table     = tgpar.table     = name;
 736	mtpar.entryinfo = tgpar.entryinfo = e;
 737	mtpar.hook_mask = tgpar.hook_mask = hookmask;
 738	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
 739	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
 740	if (ret != 0)
 741		goto cleanup_matches;
 742	j = 0;
 743	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
 744	if (ret != 0)
 745		goto cleanup_watchers;
 746	t = ebt_get_target(e);
 747	gap = e->next_offset - e->target_offset;
 748
 749	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
 750	if (IS_ERR(target)) {
 751		ret = PTR_ERR(target);
 752		goto cleanup_watchers;
 753	}
 754
 755	/* Reject UNSPEC, xtables verdicts/return values are incompatible */
 756	if (target->family != NFPROTO_BRIDGE) {
 757		module_put(target->me);
 758		ret = -ENOENT;
 759		goto cleanup_watchers;
 760	}
 761
 762	t->u.target = target;
 763	if (t->u.target == &ebt_standard_target) {
 764		if (gap < sizeof(struct ebt_standard_target)) {
 
 765			ret = -EFAULT;
 766			goto cleanup_watchers;
 767		}
 768		if (((struct ebt_standard_target *)t)->verdict <
 769		   -NUM_STANDARD_TARGETS) {
 
 770			ret = -EFAULT;
 771			goto cleanup_watchers;
 772		}
 773	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
 774		module_put(t->u.target->me);
 775		ret = -EFAULT;
 776		goto cleanup_watchers;
 777	}
 778
 779	tgpar.target   = target;
 780	tgpar.targinfo = t->data;
 781	ret = xt_check_target(&tgpar, t->target_size,
 782	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
 783	if (ret < 0) {
 784		module_put(target->me);
 785		goto cleanup_watchers;
 786	}
 787	(*cnt)++;
 788	return 0;
 789cleanup_watchers:
 790	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
 791cleanup_matches:
 792	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
 793	return ret;
 794}
 795
 796/* checks for loops and sets the hook mask for udc
 
 797 * the hook mask for udc tells us from which base chains the udc can be
 798 * accessed. This mask is a parameter to the check() functions of the extensions
 799 */
 800static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
 801			    unsigned int udc_cnt, unsigned int hooknr, char *base)
 802{
 803	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
 804	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
 805	const struct ebt_entry_target *t;
 806
 807	while (pos < nentries || chain_nr != -1) {
 808		/* end of udc, go back one 'recursion' step */
 809		if (pos == nentries) {
 810			/* put back values of the time when this chain was called */
 811			e = cl_s[chain_nr].cs.e;
 812			if (cl_s[chain_nr].from != -1)
 813				nentries =
 814				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
 815			else
 816				nentries = chain->nentries;
 817			pos = cl_s[chain_nr].cs.n;
 818			/* make sure we won't see a loop that isn't one */
 819			cl_s[chain_nr].cs.n = 0;
 820			chain_nr = cl_s[chain_nr].from;
 821			if (pos == nentries)
 822				continue;
 823		}
 824		t = ebt_get_target_c(e);
 
 825		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
 826			goto letscontinue;
 827		if (e->target_offset + sizeof(struct ebt_standard_target) >
 828		   e->next_offset)
 
 829			return -1;
 830
 831		verdict = ((struct ebt_standard_target *)t)->verdict;
 832		if (verdict >= 0) { /* jump to another chain */
 833			struct ebt_entries *hlp2 =
 834			   (struct ebt_entries *)(base + verdict);
 835			for (i = 0; i < udc_cnt; i++)
 836				if (hlp2 == cl_s[i].cs.chaininfo)
 837					break;
 838			/* bad destination or loop */
 839			if (i == udc_cnt)
 
 840				return -1;
 841
 842			if (cl_s[i].cs.n)
 
 843				return -1;
 844
 845			if (cl_s[i].hookmask & (1 << hooknr))
 846				goto letscontinue;
 847			/* this can't be 0, so the loop test is correct */
 848			cl_s[i].cs.n = pos + 1;
 849			pos = 0;
 850			cl_s[i].cs.e = ebt_next_entry(e);
 851			e = (struct ebt_entry *)(hlp2->data);
 852			nentries = hlp2->nentries;
 853			cl_s[i].from = chain_nr;
 854			chain_nr = i;
 855			/* this udc is accessible from the base chain for hooknr */
 856			cl_s[i].hookmask |= (1 << hooknr);
 857			continue;
 858		}
 859letscontinue:
 860		e = ebt_next_entry(e);
 861		pos++;
 862	}
 863	return 0;
 864}
 865
 866/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
 867static int translate_table(struct net *net, const char *name,
 868			   struct ebt_table_info *newinfo)
 869{
 870	unsigned int i, j, k, udc_cnt;
 871	int ret;
 872	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 873
 874	i = 0;
 875	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 876		i++;
 877	if (i == NF_BR_NUMHOOKS)
 
 878		return -EINVAL;
 879
 880	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries)
 
 881		return -EINVAL;
 882
 883	/* make sure chains are ordered after each other in same order
 884	 * as their corresponding hooks
 885	 */
 886	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
 887		if (!newinfo->hook_entry[j])
 888			continue;
 889		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i])
 
 890			return -EINVAL;
 891
 892		i = j;
 893	}
 894
 895	/* do some early checkings and initialize some things */
 896	i = 0; /* holds the expected nr. of entries for the chain */
 897	j = 0; /* holds the up to now counted entries for the chain */
 898	k = 0; /* holds the total nr. of entries, should equal
 899		* newinfo->nentries afterwards
 900		*/
 901	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 902	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 903	   ebt_check_entry_size_and_hooks, newinfo,
 904	   &i, &j, &k, &udc_cnt);
 905
 906	if (ret != 0)
 907		return ret;
 908
 909	if (i != j)
 
 
 910		return -EINVAL;
 911
 912	if (k != newinfo->nentries)
 
 913		return -EINVAL;
 
 914
 915	/* get the location of the udc, put them in an array
 916	 * while we're at it, allocate the chainstack
 917	 */
 918	if (udc_cnt) {
 919		/* this will get free'd in do_replace()/ebt_register_table()
 920		 * if an error occurs
 921		 */
 922		newinfo->chainstack =
 923			vmalloc(array_size(nr_cpu_ids,
 924					   sizeof(*(newinfo->chainstack))));
 925		if (!newinfo->chainstack)
 926			return -ENOMEM;
 927		for_each_possible_cpu(i) {
 928			newinfo->chainstack[i] =
 929			  vmalloc_node(array_size(udc_cnt,
 930					  sizeof(*(newinfo->chainstack[0]))),
 931				       cpu_to_node(i));
 932			if (!newinfo->chainstack[i]) {
 933				while (i)
 934					vfree(newinfo->chainstack[--i]);
 935				vfree(newinfo->chainstack);
 936				newinfo->chainstack = NULL;
 937				return -ENOMEM;
 938			}
 939		}
 940
 941		cl_s = vmalloc(array_size(udc_cnt, sizeof(*cl_s)));
 942		if (!cl_s)
 943			return -ENOMEM;
 944		i = 0; /* the i'th udc */
 945		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 946		   ebt_get_udc_positions, newinfo, &i, cl_s);
 947		/* sanity check */
 948		if (i != udc_cnt) {
 
 949			vfree(cl_s);
 950			return -EFAULT;
 951		}
 952	}
 953
 954	/* Check for loops */
 955	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 956		if (newinfo->hook_entry[i])
 957			if (check_chainloops(newinfo->hook_entry[i],
 958			   cl_s, udc_cnt, i, newinfo->entries)) {
 959				vfree(cl_s);
 960				return -EINVAL;
 961			}
 962
 963	/* we now know the following (along with E=mc²):
 964	 *  - the nr of entries in each chain is right
 965	 *  - the size of the allocated space is right
 966	 *  - all valid hooks have a corresponding chain
 967	 *  - there are no loops
 968	 *  - wrong data can still be on the level of a single entry
 969	 *  - could be there are jumps to places that are not the
 970	 *    beginning of a chain. This can only occur in chains that
 971	 *    are not accessible from any base chains, so we don't care.
 972	 */
 973
 974	/* used to know what we need to clean up if something goes wrong */
 975	i = 0;
 976	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 977	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
 978	if (ret != 0) {
 979		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 980				  ebt_cleanup_entry, net, &i);
 981	}
 982	vfree(cl_s);
 983	return ret;
 984}
 985
 986/* called under write_lock */
 987static void get_counters(const struct ebt_counter *oldcounters,
 988			 struct ebt_counter *counters, unsigned int nentries)
 989{
 990	int i, cpu;
 991	struct ebt_counter *counter_base;
 992
 993	/* counters of cpu 0 */
 994	memcpy(counters, oldcounters,
 995	       sizeof(struct ebt_counter) * nentries);
 996
 997	/* add other counters to those of cpu 0 */
 998	for_each_possible_cpu(cpu) {
 999		if (cpu == 0)
1000			continue;
1001		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
1002		for (i = 0; i < nentries; i++)
1003			ADD_COUNTER(counters[i], counter_base[i].bcnt,
1004				    counter_base[i].pcnt);
 
1005	}
1006}
1007
1008static int do_replace_finish(struct net *net, struct ebt_replace *repl,
1009			      struct ebt_table_info *newinfo)
1010{
1011	int ret;
1012	struct ebt_counter *counterstmp = NULL;
1013	/* used to be able to unlock earlier */
1014	struct ebt_table_info *table;
1015	struct ebt_table *t;
1016
1017	/* the user wants counters back
1018	 * the check on the size is done later, when we have the lock
1019	 */
1020	if (repl->num_counters) {
1021		unsigned long size = repl->num_counters * sizeof(*counterstmp);
1022		counterstmp = vmalloc(size);
1023		if (!counterstmp)
1024			return -ENOMEM;
1025	}
1026
1027	newinfo->chainstack = NULL;
1028	ret = ebt_verify_pointers(repl, newinfo);
1029	if (ret != 0)
1030		goto free_counterstmp;
1031
1032	ret = translate_table(net, repl->name, newinfo);
1033
1034	if (ret != 0)
1035		goto free_counterstmp;
1036
1037	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1038	if (!t) {
1039		ret = -ENOENT;
1040		goto free_iterate;
1041	}
1042
1043	if (repl->valid_hooks != t->valid_hooks) {
1044		ret = -EINVAL;
1045		goto free_unlock;
1046	}
1047
1048	if (repl->num_counters && repl->num_counters != t->private->nentries) {
 
1049		ret = -EINVAL;
1050		goto free_unlock;
1051	}
1052
1053	/* we have the mutex lock, so no danger in reading this pointer */
1054	table = t->private;
1055	/* make sure the table can only be rmmod'ed if it contains no rules */
1056	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1057		ret = -ENOENT;
1058		goto free_unlock;
1059	} else if (table->nentries && !newinfo->nentries)
1060		module_put(t->me);
1061	/* we need an atomic snapshot of the counters */
1062	write_lock_bh(&t->lock);
1063	if (repl->num_counters)
1064		get_counters(t->private->counters, counterstmp,
1065		   t->private->nentries);
1066
1067	t->private = newinfo;
1068	write_unlock_bh(&t->lock);
1069	mutex_unlock(&ebt_mutex);
1070	/* so, a user can change the chains while having messed up her counter
1071	 * allocation. Only reason why this is done is because this way the lock
1072	 * is held only once, while this doesn't bring the kernel into a
1073	 * dangerous state.
1074	 */
1075	if (repl->num_counters &&
1076	   copy_to_user(repl->counters, counterstmp,
1077	   array_size(repl->num_counters, sizeof(struct ebt_counter)))) {
1078		/* Silent error, can't fail, new table is already in place */
1079		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1080	}
 
 
1081
1082	/* decrease module count and free resources */
1083	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1084			  ebt_cleanup_entry, net, NULL);
1085
1086	vfree(table->entries);
1087	ebt_free_table_info(table);
 
 
 
 
1088	vfree(table);
1089	vfree(counterstmp);
1090
1091	audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
1092			AUDIT_XT_OP_REPLACE, GFP_KERNEL);
1093	return 0;
1094
1095free_unlock:
1096	mutex_unlock(&ebt_mutex);
1097free_iterate:
1098	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1099			  ebt_cleanup_entry, net, NULL);
1100free_counterstmp:
1101	vfree(counterstmp);
1102	/* can be initialized in translate_table() */
1103	ebt_free_table_info(newinfo);
 
 
 
 
1104	return ret;
1105}
1106
1107/* replace the table */
1108static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
 
1109{
1110	int ret, countersize;
1111	struct ebt_table_info *newinfo;
1112	struct ebt_replace tmp;
1113
1114	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
1115		return -EFAULT;
1116
1117	if (len != sizeof(tmp) + tmp.entries_size)
 
1118		return -EINVAL;
 
1119
1120	if (tmp.entries_size == 0)
 
1121		return -EINVAL;
1122
1123	/* overflow check */
1124	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1125			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1126		return -ENOMEM;
1127	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1128		return -ENOMEM;
1129
1130	tmp.name[sizeof(tmp.name) - 1] = 0;
1131
1132	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1133	newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT);
1134	if (!newinfo)
1135		return -ENOMEM;
1136
1137	if (countersize)
1138		memset(newinfo->counters, 0, countersize);
1139
1140	newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT);
1141	if (!newinfo->entries) {
1142		ret = -ENOMEM;
1143		goto free_newinfo;
1144	}
1145	if (copy_from_user(
1146	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
 
1147		ret = -EFAULT;
1148		goto free_entries;
1149	}
1150
1151	ret = do_replace_finish(net, &tmp, newinfo);
1152	if (ret == 0)
1153		return ret;
1154free_entries:
1155	vfree(newinfo->entries);
1156free_newinfo:
1157	vfree(newinfo);
1158	return ret;
1159}
1160
1161static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
 
1162{
1163	mutex_lock(&ebt_mutex);
1164	list_del(&table->list);
1165	mutex_unlock(&ebt_mutex);
1166	audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries,
1167			AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1168	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1169			  ebt_cleanup_entry, net, NULL);
1170	if (table->private->nentries)
1171		module_put(table->me);
1172	vfree(table->private->entries);
1173	ebt_free_table_info(table->private);
1174	vfree(table->private);
1175	kfree(table->ops);
1176	kfree(table);
1177}
1178
1179int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1180		       const struct nf_hook_ops *template_ops)
1181{
1182	struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
1183	struct ebt_table_info *newinfo;
1184	struct ebt_table *t, *table;
1185	struct nf_hook_ops *ops;
1186	unsigned int num_ops;
1187	struct ebt_replace_kernel *repl;
1188	int ret, i, countersize;
1189	void *p;
1190
1191	if (input_table == NULL || (repl = input_table->table) == NULL ||
1192	    repl->entries == NULL || repl->entries_size == 0 ||
1193	    repl->counters != NULL || input_table->private != NULL)
1194		return -EINVAL;
 
 
1195
1196	/* Don't add one table to multiple lists. */
1197	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1198	if (!table) {
1199		ret = -ENOMEM;
1200		goto out;
1201	}
1202
1203	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1204	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1205	ret = -ENOMEM;
1206	if (!newinfo)
1207		goto free_table;
1208
1209	p = vmalloc(repl->entries_size);
1210	if (!p)
1211		goto free_newinfo;
1212
1213	memcpy(p, repl->entries, repl->entries_size);
1214	newinfo->entries = p;
1215
1216	newinfo->entries_size = repl->entries_size;
1217	newinfo->nentries = repl->nentries;
1218
1219	if (countersize)
1220		memset(newinfo->counters, 0, countersize);
1221
1222	/* fill in newinfo and parse the entries */
1223	newinfo->chainstack = NULL;
1224	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1225		if ((repl->valid_hooks & (1 << i)) == 0)
1226			newinfo->hook_entry[i] = NULL;
1227		else
1228			newinfo->hook_entry[i] = p +
1229				((char *)repl->hook_entry[i] - repl->entries);
1230	}
1231	ret = translate_table(net, repl->name, newinfo);
1232	if (ret != 0)
 
1233		goto free_chainstack;
 
 
 
 
 
 
 
1234
1235	table->private = newinfo;
1236	rwlock_init(&table->lock);
1237	mutex_lock(&ebt_mutex);
1238	list_for_each_entry(t, &ebt_net->tables, list) {
 
 
 
1239		if (strcmp(t->name, table->name) == 0) {
1240			ret = -EEXIST;
 
1241			goto free_unlock;
1242		}
1243	}
1244
1245	/* Hold a reference count if the chains aren't empty */
1246	if (newinfo->nentries && !try_module_get(table->me)) {
1247		ret = -ENOENT;
1248		goto free_unlock;
1249	}
1250
1251	num_ops = hweight32(table->valid_hooks);
1252	if (num_ops == 0) {
1253		ret = -EINVAL;
1254		goto free_unlock;
1255	}
1256
1257	ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
1258	if (!ops) {
1259		ret = -ENOMEM;
1260		if (newinfo->nentries)
1261			module_put(table->me);
1262		goto free_unlock;
1263	}
1264
1265	for (i = 0; i < num_ops; i++)
1266		ops[i].priv = table;
1267
1268	list_add(&table->list, &ebt_net->tables);
1269	mutex_unlock(&ebt_mutex);
1270
1271	table->ops = ops;
1272	ret = nf_register_net_hooks(net, ops, num_ops);
1273	if (ret)
1274		__ebt_unregister_table(net, table);
1275
1276	audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
1277			AUDIT_XT_OP_REGISTER, GFP_KERNEL);
1278	return ret;
1279free_unlock:
1280	mutex_unlock(&ebt_mutex);
1281free_chainstack:
1282	ebt_free_table_info(newinfo);
 
 
 
 
1283	vfree(newinfo->entries);
1284free_newinfo:
1285	vfree(newinfo);
1286free_table:
1287	kfree(table);
1288out:
1289	return ret;
1290}
1291
1292int ebt_register_template(const struct ebt_table *t, int (*table_init)(struct net *net))
1293{
1294	struct ebt_template *tmpl;
1295
1296	mutex_lock(&ebt_mutex);
1297	list_for_each_entry(tmpl, &template_tables, list) {
1298		if (WARN_ON_ONCE(strcmp(t->name, tmpl->name) == 0)) {
1299			mutex_unlock(&ebt_mutex);
1300			return -EEXIST;
1301		}
1302	}
1303
1304	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
1305	if (!tmpl) {
1306		mutex_unlock(&ebt_mutex);
1307		return -ENOMEM;
1308	}
1309
1310	tmpl->table_init = table_init;
1311	strscpy(tmpl->name, t->name, sizeof(tmpl->name));
1312	tmpl->owner = t->me;
1313	list_add(&tmpl->list, &template_tables);
1314
1315	mutex_unlock(&ebt_mutex);
1316	return 0;
1317}
1318EXPORT_SYMBOL(ebt_register_template);
1319
1320void ebt_unregister_template(const struct ebt_table *t)
1321{
1322	struct ebt_template *tmpl;
1323
1324	mutex_lock(&ebt_mutex);
1325	list_for_each_entry(tmpl, &template_tables, list) {
1326		if (strcmp(t->name, tmpl->name))
1327			continue;
1328
1329		list_del(&tmpl->list);
1330		mutex_unlock(&ebt_mutex);
1331		kfree(tmpl);
1332		return;
1333	}
1334
1335	mutex_unlock(&ebt_mutex);
1336	WARN_ON_ONCE(1);
1337}
1338EXPORT_SYMBOL(ebt_unregister_template);
1339
1340static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
1341{
1342	struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
1343	struct ebt_table *t;
1344
1345	mutex_lock(&ebt_mutex);
1346
1347	list_for_each_entry(t, &ebt_net->tables, list) {
1348		if (strcmp(t->name, name) == 0) {
1349			mutex_unlock(&ebt_mutex);
1350			return t;
1351		}
1352	}
1353
1354	mutex_unlock(&ebt_mutex);
1355	return NULL;
1356}
1357
1358void ebt_unregister_table_pre_exit(struct net *net, const char *name)
1359{
1360	struct ebt_table *table = __ebt_find_table(net, name);
1361
1362	if (table)
1363		nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
1364}
1365EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
1366
1367void ebt_unregister_table(struct net *net, const char *name)
1368{
1369	struct ebt_table *table = __ebt_find_table(net, name);
1370
1371	if (table)
1372		__ebt_unregister_table(net, table);
1373}
1374
1375/* userspace just supplied us with counters */
1376static int do_update_counters(struct net *net, const char *name,
1377			      struct ebt_counter __user *counters,
1378			      unsigned int num_counters, unsigned int len)
 
1379{
1380	int i, ret;
1381	struct ebt_counter *tmp;
1382	struct ebt_table *t;
1383
1384	if (num_counters == 0)
1385		return -EINVAL;
1386
1387	tmp = vmalloc(array_size(num_counters, sizeof(*tmp)));
1388	if (!tmp)
1389		return -ENOMEM;
1390
1391	t = find_table_lock(net, name, &ret, &ebt_mutex);
1392	if (!t)
1393		goto free_tmp;
1394
1395	if (num_counters != t->private->nentries) {
 
1396		ret = -EINVAL;
1397		goto unlock_mutex;
1398	}
1399
1400	if (copy_from_user(tmp, counters,
1401			   array_size(num_counters, sizeof(*counters)))) {
1402		ret = -EFAULT;
1403		goto unlock_mutex;
1404	}
1405
1406	/* we want an atomic add of the counters */
1407	write_lock_bh(&t->lock);
1408
1409	/* we add to the counters of the first cpu */
1410	for (i = 0; i < num_counters; i++)
1411		ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
 
 
1412
1413	write_unlock_bh(&t->lock);
1414	ret = 0;
1415unlock_mutex:
1416	mutex_unlock(&ebt_mutex);
1417free_tmp:
1418	vfree(tmp);
1419	return ret;
1420}
1421
1422static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
 
1423{
1424	struct ebt_replace hlp;
1425
1426	if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
1427		return -EFAULT;
1428
1429	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1430		return -EINVAL;
1431
1432	return do_update_counters(net, hlp.name, hlp.counters,
1433				  hlp.num_counters, len);
1434}
1435
1436static inline int ebt_obj_to_user(char __user *um, const char *_name,
1437				  const char *data, int entrysize,
1438				  int usersize, int datasize, u8 revision)
1439{
1440	char name[EBT_EXTENSION_MAXNAMELEN] = {0};
1441
1442	/* ebtables expects 31 bytes long names but xt_match names are 29 bytes
1443	 * long. Copy 29 bytes and fill remaining bytes with zeroes.
1444	 */
1445	strscpy(name, _name, sizeof(name));
1446	if (copy_to_user(um, name, EBT_EXTENSION_MAXNAMELEN) ||
1447	    put_user(revision, (u8 __user *)(um + EBT_EXTENSION_MAXNAMELEN)) ||
1448	    put_user(datasize, (int __user *)(um + EBT_EXTENSION_MAXNAMELEN + 1)) ||
1449	    xt_data_to_user(um + entrysize, data, usersize, datasize,
1450			    XT_ALIGN(datasize)))
1451		return -EFAULT;
1452
1453	return 0;
1454}
1455
1456static inline int ebt_match_to_user(const struct ebt_entry_match *m,
1457				    const char *base, char __user *ubase)
1458{
1459	return ebt_obj_to_user(ubase + ((char *)m - base),
1460			       m->u.match->name, m->data, sizeof(*m),
1461			       m->u.match->usersize, m->match_size,
1462			       m->u.match->revision);
1463}
1464
1465static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w,
1466				      const char *base, char __user *ubase)
1467{
1468	return ebt_obj_to_user(ubase + ((char *)w - base),
1469			       w->u.watcher->name, w->data, sizeof(*w),
1470			       w->u.watcher->usersize, w->watcher_size,
1471			       w->u.watcher->revision);
1472}
1473
1474static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base,
1475				    char __user *ubase)
1476{
1477	int ret;
1478	char __user *hlp;
1479	const struct ebt_entry_target *t;
1480
1481	if (e->bitmask == 0) {
1482		/* special case !EBT_ENTRY_OR_ENTRIES */
1483		if (copy_to_user(ubase + ((char *)e - base), e,
1484				 sizeof(struct ebt_entries)))
1485			return -EFAULT;
1486		return 0;
1487	}
1488
1489	if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e)))
1490		return -EFAULT;
1491
1492	hlp = ubase + (((char *)e + e->target_offset) - base);
1493	t = ebt_get_target_c(e);
1494
1495	ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase);
1496	if (ret != 0)
1497		return ret;
1498	ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase);
1499	if (ret != 0)
1500		return ret;
1501	ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t),
1502			      t->u.target->usersize, t->target_size,
1503			      t->u.target->revision);
1504	if (ret != 0)
1505		return ret;
1506
1507	return 0;
1508}
1509
1510static int copy_counters_to_user(struct ebt_table *t,
1511				 const struct ebt_counter *oldcounters,
1512				 void __user *user, unsigned int num_counters,
1513				 unsigned int nentries)
1514{
1515	struct ebt_counter *counterstmp;
1516	int ret = 0;
1517
1518	/* userspace might not need the counters */
1519	if (num_counters == 0)
1520		return 0;
1521
1522	if (num_counters != nentries)
 
1523		return -EINVAL;
 
1524
1525	counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
1526	if (!counterstmp)
1527		return -ENOMEM;
1528
1529	write_lock_bh(&t->lock);
1530	get_counters(oldcounters, counterstmp, nentries);
1531	write_unlock_bh(&t->lock);
1532
1533	if (copy_to_user(user, counterstmp,
1534	    array_size(nentries, sizeof(struct ebt_counter))))
1535		ret = -EFAULT;
1536	vfree(counterstmp);
1537	return ret;
1538}
1539
1540/* called with ebt_mutex locked */
1541static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1542				   const int *len, int cmd)
1543{
1544	struct ebt_replace tmp;
1545	const struct ebt_counter *oldcounters;
1546	unsigned int entries_size, nentries;
1547	int ret;
1548	char *entries;
1549
1550	if (cmd == EBT_SO_GET_ENTRIES) {
1551		entries_size = t->private->entries_size;
1552		nentries = t->private->nentries;
1553		entries = t->private->entries;
1554		oldcounters = t->private->counters;
1555	} else {
1556		entries_size = t->table->entries_size;
1557		nentries = t->table->nentries;
1558		entries = t->table->entries;
1559		oldcounters = t->table->counters;
1560	}
1561
1562	if (copy_from_user(&tmp, user, sizeof(tmp)))
1563		return -EFAULT;
1564
1565	if (*len != sizeof(struct ebt_replace) + entries_size +
1566	   (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1567		return -EINVAL;
1568
1569	if (tmp.nentries != nentries)
 
1570		return -EINVAL;
 
1571
1572	if (tmp.entries_size != entries_size)
 
1573		return -EINVAL;
 
1574
1575	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1576					tmp.num_counters, nentries);
1577	if (ret)
1578		return ret;
1579
 
 
 
 
1580	/* set the match/watcher/target names right */
1581	return EBT_ENTRY_ITERATE(entries, entries_size,
1582	   ebt_entry_to_user, entries, tmp.entries);
1583}
1584
1585#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1586/* 32 bit-userspace compatibility definitions. */
1587struct compat_ebt_replace {
1588	char name[EBT_TABLE_MAXNAMELEN];
1589	compat_uint_t valid_hooks;
1590	compat_uint_t nentries;
1591	compat_uint_t entries_size;
1592	/* start of the chains */
1593	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1594	/* nr of counters userspace expects back */
1595	compat_uint_t num_counters;
1596	/* where the kernel will put the old counters. */
1597	compat_uptr_t counters;
1598	compat_uptr_t entries;
1599};
1600
1601/* struct ebt_entry_match, _target and _watcher have same layout */
1602struct compat_ebt_entry_mwt {
1603	union {
1604		struct {
1605			char name[EBT_EXTENSION_MAXNAMELEN];
1606			u8 revision;
1607		};
1608		compat_uptr_t ptr;
1609	} u;
1610	compat_uint_t match_size;
1611	compat_uint_t data[] __aligned(__alignof__(struct compat_ebt_replace));
1612};
1613
1614/* account for possible padding between match_size and ->data */
1615static int ebt_compat_entry_padsize(void)
1616{
1617	BUILD_BUG_ON(sizeof(struct ebt_entry_match) <
1618			sizeof(struct compat_ebt_entry_mwt));
1619	return (int) sizeof(struct ebt_entry_match) -
1620			sizeof(struct compat_ebt_entry_mwt);
1621}
1622
1623static int ebt_compat_match_offset(const struct xt_match *match,
1624				   unsigned int userlen)
1625{
1626	/* ebt_among needs special handling. The kernel .matchsize is
 
1627	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1628	 * value is expected.
1629	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1630	 */
1631	if (unlikely(match->matchsize == -1))
1632		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1633	return xt_compat_match_offset(match);
1634}
1635
1636static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1637				unsigned int *size)
1638{
1639	const struct xt_match *match = m->u.match;
1640	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1641	int off = ebt_compat_match_offset(match, m->match_size);
1642	compat_uint_t msize = m->match_size - off;
1643
1644	if (WARN_ON(off >= m->match_size))
1645		return -EINVAL;
1646
1647	if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) ||
1648	    put_user(match->revision, &cm->u.revision) ||
1649	    put_user(msize, &cm->match_size))
1650		return -EFAULT;
1651
1652	if (match->compat_to_user) {
1653		if (match->compat_to_user(cm->data, m->data))
1654			return -EFAULT;
1655	} else {
1656		if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
1657				    COMPAT_XT_ALIGN(msize)))
1658			return -EFAULT;
1659	}
1660
1661	*size -= ebt_compat_entry_padsize() + off;
1662	*dstptr = cm->data;
1663	*dstptr += msize;
1664	return 0;
1665}
1666
1667static int compat_target_to_user(struct ebt_entry_target *t,
1668				 void __user **dstptr,
1669				 unsigned int *size)
1670{
1671	const struct xt_target *target = t->u.target;
1672	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1673	int off = xt_compat_target_offset(target);
1674	compat_uint_t tsize = t->target_size - off;
1675
1676	if (WARN_ON(off >= t->target_size))
1677		return -EINVAL;
1678
1679	if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) ||
1680	    put_user(target->revision, &cm->u.revision) ||
1681	    put_user(tsize, &cm->match_size))
1682		return -EFAULT;
1683
1684	if (target->compat_to_user) {
1685		if (target->compat_to_user(cm->data, t->data))
1686			return -EFAULT;
1687	} else {
1688		if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
1689				    COMPAT_XT_ALIGN(tsize)))
1690			return -EFAULT;
1691	}
1692
1693	*size -= ebt_compat_entry_padsize() + off;
1694	*dstptr = cm->data;
1695	*dstptr += tsize;
1696	return 0;
1697}
1698
1699static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1700				  void __user **dstptr,
1701				  unsigned int *size)
1702{
1703	return compat_target_to_user((struct ebt_entry_target *)w,
1704							dstptr, size);
1705}
1706
1707static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1708				unsigned int *size)
1709{
1710	struct ebt_entry_target *t;
1711	struct ebt_entry __user *ce;
1712	u32 watchers_offset, target_offset, next_offset;
1713	compat_uint_t origsize;
1714	int ret;
1715
1716	if (e->bitmask == 0) {
1717		if (*size < sizeof(struct ebt_entries))
1718			return -EINVAL;
1719		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1720			return -EFAULT;
1721
1722		*dstptr += sizeof(struct ebt_entries);
1723		*size -= sizeof(struct ebt_entries);
1724		return 0;
1725	}
1726
1727	if (*size < sizeof(*ce))
1728		return -EINVAL;
1729
1730	ce = *dstptr;
1731	if (copy_to_user(ce, e, sizeof(*ce)))
1732		return -EFAULT;
1733
1734	origsize = *size;
1735	*dstptr += sizeof(*ce);
1736
1737	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1738	if (ret)
1739		return ret;
1740	watchers_offset = e->watchers_offset - (origsize - *size);
1741
1742	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1743	if (ret)
1744		return ret;
1745	target_offset = e->target_offset - (origsize - *size);
1746
1747	t = ebt_get_target(e);
1748
1749	ret = compat_target_to_user(t, dstptr, size);
1750	if (ret)
1751		return ret;
1752	next_offset = e->next_offset - (origsize - *size);
1753
1754	if (put_user(watchers_offset, &ce->watchers_offset) ||
1755	    put_user(target_offset, &ce->target_offset) ||
1756	    put_user(next_offset, &ce->next_offset))
1757		return -EFAULT;
1758
1759	*size -= sizeof(*ce);
1760	return 0;
1761}
1762
1763static int compat_calc_match(struct ebt_entry_match *m, int *off)
1764{
1765	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1766	*off += ebt_compat_entry_padsize();
1767	return 0;
1768}
1769
1770static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1771{
1772	*off += xt_compat_target_offset(w->u.watcher);
1773	*off += ebt_compat_entry_padsize();
1774	return 0;
1775}
1776
1777static int compat_calc_entry(const struct ebt_entry *e,
1778			     const struct ebt_table_info *info,
1779			     const void *base,
1780			     struct compat_ebt_replace *newinfo)
1781{
1782	const struct ebt_entry_target *t;
1783	unsigned int entry_offset;
1784	int off, ret, i;
1785
1786	if (e->bitmask == 0)
1787		return 0;
1788
1789	off = 0;
1790	entry_offset = (void *)e - base;
1791
1792	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1793	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1794
1795	t = ebt_get_target_c(e);
1796
1797	off += xt_compat_target_offset(t->u.target);
1798	off += ebt_compat_entry_padsize();
1799
1800	newinfo->entries_size -= off;
1801
1802	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1803	if (ret)
1804		return ret;
1805
1806	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1807		const void *hookptr = info->hook_entry[i];
1808		if (info->hook_entry[i] &&
1809		    (e < (struct ebt_entry *)(base - hookptr))) {
1810			newinfo->hook_entry[i] -= off;
1811			pr_debug("0x%08X -> 0x%08X\n",
1812					newinfo->hook_entry[i] + off,
1813					newinfo->hook_entry[i]);
1814		}
1815	}
1816
1817	return 0;
1818}
1819
1820static int ebt_compat_init_offsets(unsigned int number)
1821{
1822	if (number > INT_MAX)
1823		return -EINVAL;
1824
1825	/* also count the base chain policies */
1826	number += NF_BR_NUMHOOKS;
1827
1828	return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
1829}
1830
1831static int compat_table_info(const struct ebt_table_info *info,
1832			     struct compat_ebt_replace *newinfo)
1833{
1834	unsigned int size = info->entries_size;
1835	const void *entries = info->entries;
1836	int ret;
1837
1838	newinfo->entries_size = size;
1839	ret = ebt_compat_init_offsets(info->nentries);
1840	if (ret)
1841		return ret;
1842
 
1843	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1844							entries, newinfo);
1845}
1846
1847static int compat_copy_everything_to_user(struct ebt_table *t,
1848					  void __user *user, int *len, int cmd)
1849{
1850	struct compat_ebt_replace repl, tmp;
1851	struct ebt_counter *oldcounters;
1852	struct ebt_table_info tinfo;
1853	int ret;
1854	void __user *pos;
1855
1856	memset(&tinfo, 0, sizeof(tinfo));
1857
1858	if (cmd == EBT_SO_GET_ENTRIES) {
1859		tinfo.entries_size = t->private->entries_size;
1860		tinfo.nentries = t->private->nentries;
1861		tinfo.entries = t->private->entries;
1862		oldcounters = t->private->counters;
1863	} else {
1864		tinfo.entries_size = t->table->entries_size;
1865		tinfo.nentries = t->table->nentries;
1866		tinfo.entries = t->table->entries;
1867		oldcounters = t->table->counters;
1868	}
1869
1870	if (copy_from_user(&tmp, user, sizeof(tmp)))
1871		return -EFAULT;
1872
1873	if (tmp.nentries != tinfo.nentries ||
1874	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1875		return -EINVAL;
1876
1877	memcpy(&repl, &tmp, sizeof(repl));
1878	if (cmd == EBT_SO_GET_ENTRIES)
1879		ret = compat_table_info(t->private, &repl);
1880	else
1881		ret = compat_table_info(&tinfo, &repl);
1882	if (ret)
1883		return ret;
1884
1885	if (*len != sizeof(tmp) + repl.entries_size +
1886	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1887		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1888				*len, tinfo.entries_size, repl.entries_size);
1889		return -EINVAL;
1890	}
1891
1892	/* userspace might not need the counters */
1893	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1894					tmp.num_counters, tinfo.nentries);
1895	if (ret)
1896		return ret;
1897
1898	pos = compat_ptr(tmp.entries);
1899	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1900			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1901}
1902
1903struct ebt_entries_buf_state {
1904	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1905	u32 buf_kern_len;	/* total size of kernel buffer */
1906	u32 buf_kern_offset;	/* amount of data copied so far */
1907	u32 buf_user_offset;	/* read position in userspace buffer */
1908};
1909
1910static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1911{
1912	state->buf_kern_offset += sz;
1913	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1914}
1915
1916static int ebt_buf_add(struct ebt_entries_buf_state *state,
1917		       const void *data, unsigned int sz)
1918{
1919	if (state->buf_kern_start == NULL)
1920		goto count_only;
1921
1922	if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
1923		return -EINVAL;
1924
1925	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1926
1927 count_only:
1928	state->buf_user_offset += sz;
1929	return ebt_buf_count(state, sz);
1930}
1931
1932static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1933{
1934	char *b = state->buf_kern_start;
1935
1936	if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
1937		return -EINVAL;
1938
1939	if (b != NULL && sz > 0)
1940		memset(b + state->buf_kern_offset, 0, sz);
1941	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1942	return ebt_buf_count(state, sz);
1943}
1944
1945enum compat_mwt {
1946	EBT_COMPAT_MATCH,
1947	EBT_COMPAT_WATCHER,
1948	EBT_COMPAT_TARGET,
1949};
1950
1951static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
1952				enum compat_mwt compat_mwt,
1953				struct ebt_entries_buf_state *state,
1954				const unsigned char *base)
1955{
1956	char name[EBT_EXTENSION_MAXNAMELEN];
1957	struct xt_match *match;
1958	struct xt_target *wt;
1959	void *dst = NULL;
1960	int off, pad = 0;
1961	unsigned int size_kern, match_size = mwt->match_size;
1962
1963	if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
1964		return -EINVAL;
1965
1966	if (state->buf_kern_start)
1967		dst = state->buf_kern_start + state->buf_kern_offset;
1968
1969	switch (compat_mwt) {
1970	case EBT_COMPAT_MATCH:
1971		match = xt_request_find_match(NFPROTO_BRIDGE, name,
1972					      mwt->u.revision);
 
 
1973		if (IS_ERR(match))
1974			return PTR_ERR(match);
1975
1976		off = ebt_compat_match_offset(match, match_size);
1977		if (dst) {
1978			if (match->compat_from_user)
1979				match->compat_from_user(dst, mwt->data);
1980			else
1981				memcpy(dst, mwt->data, match_size);
1982		}
1983
1984		size_kern = match->matchsize;
1985		if (unlikely(size_kern == -1))
1986			size_kern = match_size;
1987		module_put(match->me);
1988		break;
1989	case EBT_COMPAT_WATCHER:
1990	case EBT_COMPAT_TARGET:
1991		wt = xt_request_find_target(NFPROTO_BRIDGE, name,
1992					    mwt->u.revision);
 
 
1993		if (IS_ERR(wt))
1994			return PTR_ERR(wt);
1995		off = xt_compat_target_offset(wt);
1996
1997		if (dst) {
1998			if (wt->compat_from_user)
1999				wt->compat_from_user(dst, mwt->data);
2000			else
2001				memcpy(dst, mwt->data, match_size);
2002		}
2003
2004		size_kern = wt->targetsize;
2005		module_put(wt->me);
2006		break;
2007
2008	default:
2009		return -EINVAL;
2010	}
2011
2012	state->buf_kern_offset += match_size + off;
2013	state->buf_user_offset += match_size;
2014	pad = XT_ALIGN(size_kern) - size_kern;
2015
2016	if (pad > 0 && dst) {
2017		if (WARN_ON(state->buf_kern_len <= pad))
2018			return -EINVAL;
2019		if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
2020			return -EINVAL;
2021		memset(dst + size_kern, 0, pad);
2022	}
2023	return off + match_size;
2024}
2025
2026/* return size of all matches, watchers or target, including necessary
 
2027 * alignment and padding.
2028 */
2029static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
2030			unsigned int size_left, enum compat_mwt type,
2031			struct ebt_entries_buf_state *state, const void *base)
2032{
2033	const char *buf = (const char *)match32;
2034	int growth = 0;
 
2035
2036	if (size_left == 0)
2037		return 0;
2038
2039	do {
 
 
2040		struct ebt_entry_match *match_kern;
2041		int ret;
2042
2043		if (size_left < sizeof(*match32))
2044			return -EINVAL;
2045
2046		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
2047		if (match_kern) {
2048			char *tmp;
2049			tmp = state->buf_kern_start + state->buf_kern_offset;
2050			match_kern = (struct ebt_entry_match *) tmp;
2051		}
2052		ret = ebt_buf_add(state, buf, sizeof(*match32));
2053		if (ret < 0)
2054			return ret;
2055		size_left -= sizeof(*match32);
2056
2057		/* add padding before match->data (if any) */
2058		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
2059		if (ret < 0)
2060			return ret;
2061
2062		if (match32->match_size > size_left)
2063			return -EINVAL;
2064
2065		size_left -= match32->match_size;
2066
2067		ret = compat_mtw_from_user(match32, type, state, base);
2068		if (ret < 0)
2069			return ret;
2070
2071		if (WARN_ON(ret < match32->match_size))
2072			return -EINVAL;
2073		growth += ret - match32->match_size;
2074		growth += ebt_compat_entry_padsize();
2075
2076		buf += sizeof(*match32);
2077		buf += match32->match_size;
2078
2079		if (match_kern)
2080			match_kern->match_size = ret;
2081
 
2082		match32 = (struct compat_ebt_entry_mwt *) buf;
2083	} while (size_left);
2084
2085	return growth;
2086}
2087
2088/* called for all ebt_entry structures. */
2089static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
2090			  unsigned int *total,
2091			  struct ebt_entries_buf_state *state)
2092{
2093	unsigned int i, j, startoff, next_expected_off, new_offset = 0;
2094	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2095	unsigned int offsets[4];
2096	unsigned int *offsets_update = NULL;
2097	int ret;
2098	char *buf_start;
2099
2100	if (*total < sizeof(struct ebt_entries))
2101		return -EINVAL;
2102
2103	if (!entry->bitmask) {
2104		*total -= sizeof(struct ebt_entries);
2105		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2106	}
2107	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2108		return -EINVAL;
2109
2110	startoff = state->buf_user_offset;
2111	/* pull in most part of ebt_entry, it does not need to be changed. */
2112	ret = ebt_buf_add(state, entry,
2113			offsetof(struct ebt_entry, watchers_offset));
2114	if (ret < 0)
2115		return ret;
2116
2117	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2118	memcpy(&offsets[1], &entry->offsets, sizeof(entry->offsets));
 
2119
2120	if (state->buf_kern_start) {
2121		buf_start = state->buf_kern_start + state->buf_kern_offset;
2122		offsets_update = (unsigned int *) buf_start;
2123	}
2124	ret = ebt_buf_add(state, &offsets[1],
2125			sizeof(offsets) - sizeof(offsets[0]));
2126	if (ret < 0)
2127		return ret;
2128	buf_start = (char *) entry;
2129	/* 0: matches offset, always follows ebt_entry.
 
2130	 * 1: watchers offset, from ebt_entry structure
2131	 * 2: target offset, from ebt_entry structure
2132	 * 3: next ebt_entry offset, from ebt_entry structure
2133	 *
2134	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2135	 */
2136	for (i = 0; i < 4 ; ++i) {
2137		if (offsets[i] > *total)
2138			return -EINVAL;
2139
2140		if (i < 3 && offsets[i] == *total)
2141			return -EINVAL;
2142
2143		if (i == 0)
2144			continue;
2145		if (offsets[i-1] > offsets[i])
2146			return -EINVAL;
2147	}
2148
2149	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2150		struct compat_ebt_entry_mwt *match32;
2151		unsigned int size;
2152		char *buf = buf_start + offsets[i];
2153
 
2154		if (offsets[i] > offsets[j])
2155			return -EINVAL;
2156
2157		match32 = (struct compat_ebt_entry_mwt *) buf;
2158		size = offsets[j] - offsets[i];
2159		ret = ebt_size_mwt(match32, size, i, state, base);
2160		if (ret < 0)
2161			return ret;
2162		new_offset += ret;
2163		if (offsets_update && new_offset) {
2164			pr_debug("change offset %d to %d\n",
2165				offsets_update[i], offsets[j] + new_offset);
2166			offsets_update[i] = offsets[j] + new_offset;
2167		}
2168	}
2169
2170	if (state->buf_kern_start == NULL) {
2171		unsigned int offset = buf_start - (char *) base;
2172
2173		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2174		if (ret < 0)
2175			return ret;
2176	}
2177
2178	next_expected_off = state->buf_user_offset - startoff;
2179	if (next_expected_off != entry->next_offset)
2180		return -EINVAL;
2181
2182	if (*total < entry->next_offset)
2183		return -EINVAL;
2184	*total -= entry->next_offset;
2185	return 0;
2186}
2187
2188/* repl->entries_size is the size of the ebt_entry blob in userspace.
 
2189 * It might need more memory when copied to a 64 bit kernel in case
2190 * userspace is 32-bit. So, first task: find out how much memory is needed.
2191 *
2192 * Called before validation is performed.
2193 */
2194static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2195				struct ebt_entries_buf_state *state)
2196{
2197	unsigned int size_remaining = size_user;
2198	int ret;
2199
2200	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2201					&size_remaining, state);
2202	if (ret < 0)
2203		return ret;
2204
2205	if (size_remaining)
2206		return -EINVAL;
2207
2208	return state->buf_kern_offset;
2209}
2210
2211
2212static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2213					     sockptr_t arg, unsigned int len)
2214{
2215	struct compat_ebt_replace tmp;
2216	int i;
2217
2218	if (len < sizeof(tmp))
2219		return -EINVAL;
2220
2221	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)))
2222		return -EFAULT;
2223
2224	if (len != sizeof(tmp) + tmp.entries_size)
2225		return -EINVAL;
2226
2227	if (tmp.entries_size == 0)
2228		return -EINVAL;
2229
2230	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2231			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2232		return -ENOMEM;
2233	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2234		return -ENOMEM;
2235
2236	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2237
2238	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2239	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2240		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2241
2242	repl->num_counters = tmp.num_counters;
2243	repl->counters = compat_ptr(tmp.counters);
2244	repl->entries = compat_ptr(tmp.entries);
2245	return 0;
2246}
2247
2248static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
 
2249{
2250	int ret, i, countersize, size64;
2251	struct ebt_table_info *newinfo;
2252	struct ebt_replace tmp;
2253	struct ebt_entries_buf_state state;
2254	void *entries_tmp;
2255
2256	ret = compat_copy_ebt_replace_from_user(&tmp, arg, len);
2257	if (ret) {
2258		/* try real handler in case userland supplied needed padding */
2259		if (ret == -EINVAL && do_replace(net, arg, len) == 0)
2260			ret = 0;
2261		return ret;
2262	}
2263
2264	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2265	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2266	if (!newinfo)
2267		return -ENOMEM;
2268
2269	if (countersize)
2270		memset(newinfo->counters, 0, countersize);
2271
2272	memset(&state, 0, sizeof(state));
2273
2274	newinfo->entries = vmalloc(tmp.entries_size);
2275	if (!newinfo->entries) {
2276		ret = -ENOMEM;
2277		goto free_newinfo;
2278	}
2279	if (copy_from_user(
2280	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2281		ret = -EFAULT;
2282		goto free_entries;
2283	}
2284
2285	entries_tmp = newinfo->entries;
2286
2287	xt_compat_lock(NFPROTO_BRIDGE);
2288
2289	ret = ebt_compat_init_offsets(tmp.nentries);
2290	if (ret < 0)
2291		goto out_unlock;
2292
2293	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2294	if (ret < 0)
2295		goto out_unlock;
2296
2297	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2298		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2299		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2300
2301	size64 = ret;
2302	newinfo->entries = vmalloc(size64);
2303	if (!newinfo->entries) {
2304		vfree(entries_tmp);
2305		ret = -ENOMEM;
2306		goto out_unlock;
2307	}
2308
2309	memset(&state, 0, sizeof(state));
2310	state.buf_kern_start = newinfo->entries;
2311	state.buf_kern_len = size64;
2312
2313	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2314	if (WARN_ON(ret < 0)) {
2315		vfree(entries_tmp);
2316		goto out_unlock;
2317	}
2318
2319	vfree(entries_tmp);
2320	tmp.entries_size = size64;
2321
2322	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2323		char __user *usrptr;
2324		if (tmp.hook_entry[i]) {
2325			unsigned int delta;
2326			usrptr = (char __user *) tmp.hook_entry[i];
2327			delta = usrptr - tmp.entries;
2328			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2329			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2330		}
2331	}
2332
2333	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2334	xt_compat_unlock(NFPROTO_BRIDGE);
2335
2336	ret = do_replace_finish(net, &tmp, newinfo);
2337	if (ret == 0)
2338		return ret;
2339free_entries:
2340	vfree(newinfo->entries);
2341free_newinfo:
2342	vfree(newinfo);
2343	return ret;
2344out_unlock:
2345	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2346	xt_compat_unlock(NFPROTO_BRIDGE);
2347	goto free_entries;
2348}
2349
2350static int compat_update_counters(struct net *net, sockptr_t arg,
2351				  unsigned int len)
2352{
2353	struct compat_ebt_replace hlp;
2354
2355	if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
2356		return -EFAULT;
2357
2358	/* try real handler in case userland supplied needed padding */
2359	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2360		return update_counters(net, arg, len);
2361
2362	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2363				  hlp.num_counters, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2364}
2365
2366static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2367		void __user *user, int *len)
2368{
2369	int ret;
2370	struct compat_ebt_replace tmp;
2371	struct ebt_table *t;
2372	struct net *net = sock_net(sk);
2373
2374	if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) &&
2375	    *len != sizeof(struct compat_ebt_replace))
2376		return -EINVAL;
 
 
 
 
2377
2378	if (copy_from_user(&tmp, user, sizeof(tmp)))
2379		return -EFAULT;
2380
2381	tmp.name[sizeof(tmp.name) - 1] = '\0';
2382
2383	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2384	if (!t)
2385		return ret;
2386
2387	xt_compat_lock(NFPROTO_BRIDGE);
2388	switch (cmd) {
2389	case EBT_SO_GET_INFO:
2390		tmp.nentries = t->private->nentries;
2391		ret = compat_table_info(t->private, &tmp);
2392		if (ret)
2393			goto out;
2394		tmp.valid_hooks = t->valid_hooks;
2395
2396		if (copy_to_user(user, &tmp, *len) != 0) {
2397			ret = -EFAULT;
2398			break;
2399		}
2400		ret = 0;
2401		break;
2402	case EBT_SO_GET_INIT_INFO:
2403		tmp.nentries = t->table->nentries;
2404		tmp.entries_size = t->table->entries_size;
2405		tmp.valid_hooks = t->table->valid_hooks;
2406
2407		if (copy_to_user(user, &tmp, *len) != 0) {
2408			ret = -EFAULT;
2409			break;
2410		}
2411		ret = 0;
2412		break;
2413	case EBT_SO_GET_ENTRIES:
2414	case EBT_SO_GET_INIT_ENTRIES:
2415		/* try real handler first in case of userland-side padding.
 
2416		 * in case we are dealing with an 'ordinary' 32 bit binary
2417		 * without 64bit compatibility padding, this will fail right
2418		 * after copy_from_user when the *len argument is validated.
2419		 *
2420		 * the compat_ variant needs to do one pass over the kernel
2421		 * data set to adjust for size differences before it the check.
2422		 */
2423		if (copy_everything_to_user(t, user, len, cmd) == 0)
2424			ret = 0;
2425		else
2426			ret = compat_copy_everything_to_user(t, user, len, cmd);
2427		break;
2428	default:
2429		ret = -EINVAL;
2430	}
2431 out:
2432	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2433	xt_compat_unlock(NFPROTO_BRIDGE);
2434	mutex_unlock(&ebt_mutex);
2435	return ret;
2436}
2437#endif
2438
2439static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2440{
2441	struct net *net = sock_net(sk);
2442	struct ebt_replace tmp;
2443	struct ebt_table *t;
2444	int ret;
2445
2446	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2447		return -EPERM;
2448
2449#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
2450	/* try real handler in case userland supplied needed padding */
2451	if (in_compat_syscall() &&
2452	    ((cmd != EBT_SO_GET_INFO && cmd != EBT_SO_GET_INIT_INFO) ||
2453	     *len != sizeof(tmp)))
2454		return compat_do_ebt_get_ctl(sk, cmd, user, len);
2455#endif
2456
2457	if (copy_from_user(&tmp, user, sizeof(tmp)))
2458		return -EFAULT;
2459
2460	tmp.name[sizeof(tmp.name) - 1] = '\0';
2461
2462	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2463	if (!t)
2464		return ret;
2465
2466	switch (cmd) {
2467	case EBT_SO_GET_INFO:
2468	case EBT_SO_GET_INIT_INFO:
2469		if (*len != sizeof(struct ebt_replace)) {
2470			ret = -EINVAL;
2471			mutex_unlock(&ebt_mutex);
2472			break;
2473		}
2474		if (cmd == EBT_SO_GET_INFO) {
2475			tmp.nentries = t->private->nentries;
2476			tmp.entries_size = t->private->entries_size;
2477			tmp.valid_hooks = t->valid_hooks;
2478		} else {
2479			tmp.nentries = t->table->nentries;
2480			tmp.entries_size = t->table->entries_size;
2481			tmp.valid_hooks = t->table->valid_hooks;
2482		}
2483		mutex_unlock(&ebt_mutex);
2484		if (copy_to_user(user, &tmp, *len) != 0) {
2485			ret = -EFAULT;
2486			break;
2487		}
2488		ret = 0;
2489		break;
2490
2491	case EBT_SO_GET_ENTRIES:
2492	case EBT_SO_GET_INIT_ENTRIES:
2493		ret = copy_everything_to_user(t, user, len, cmd);
2494		mutex_unlock(&ebt_mutex);
2495		break;
2496
2497	default:
2498		mutex_unlock(&ebt_mutex);
2499		ret = -EINVAL;
2500	}
2501
2502	return ret;
2503}
2504
2505static int do_ebt_set_ctl(struct sock *sk, int cmd, sockptr_t arg,
2506		unsigned int len)
2507{
2508	struct net *net = sock_net(sk);
2509	int ret;
2510
2511	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2512		return -EPERM;
2513
2514	switch (cmd) {
2515	case EBT_SO_SET_ENTRIES:
2516#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
2517		if (in_compat_syscall())
2518			ret = compat_do_replace(net, arg, len);
2519		else
2520#endif
2521			ret = do_replace(net, arg, len);
2522		break;
2523	case EBT_SO_SET_COUNTERS:
2524#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
2525		if (in_compat_syscall())
2526			ret = compat_update_counters(net, arg, len);
2527		else
2528#endif
2529			ret = update_counters(net, arg, len);
2530		break;
2531	default:
2532		ret = -EINVAL;
2533	}
2534	return ret;
2535}
2536
2537static struct nf_sockopt_ops ebt_sockopts = {
2538	.pf		= PF_INET,
2539	.set_optmin	= EBT_BASE_CTL,
2540	.set_optmax	= EBT_SO_SET_MAX + 1,
2541	.set		= do_ebt_set_ctl,
 
 
 
2542	.get_optmin	= EBT_BASE_CTL,
2543	.get_optmax	= EBT_SO_GET_MAX + 1,
2544	.get		= do_ebt_get_ctl,
 
 
 
2545	.owner		= THIS_MODULE,
2546};
2547
2548static int __net_init ebt_pernet_init(struct net *net)
2549{
2550	struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
2551
2552	INIT_LIST_HEAD(&ebt_net->tables);
2553	return 0;
2554}
2555
2556static struct pernet_operations ebt_net_ops = {
2557	.init = ebt_pernet_init,
2558	.id   = &ebt_pernet_id,
2559	.size = sizeof(struct ebt_pernet),
2560};
2561
2562static int __init ebtables_init(void)
2563{
2564	int ret;
2565
2566	ret = xt_register_target(&ebt_standard_target);
2567	if (ret < 0)
2568		return ret;
2569	ret = nf_register_sockopt(&ebt_sockopts);
2570	if (ret < 0) {
2571		xt_unregister_target(&ebt_standard_target);
2572		return ret;
2573	}
2574
2575	ret = register_pernet_subsys(&ebt_net_ops);
2576	if (ret < 0) {
2577		nf_unregister_sockopt(&ebt_sockopts);
2578		xt_unregister_target(&ebt_standard_target);
2579		return ret;
2580	}
2581
2582	return 0;
2583}
2584
2585static void ebtables_fini(void)
2586{
2587	nf_unregister_sockopt(&ebt_sockopts);
2588	xt_unregister_target(&ebt_standard_target);
2589	unregister_pernet_subsys(&ebt_net_ops);
2590}
2591
2592EXPORT_SYMBOL(ebt_register_table);
2593EXPORT_SYMBOL(ebt_unregister_table);
2594EXPORT_SYMBOL(ebt_do_table);
2595module_init(ebtables_init);
2596module_exit(ebtables_fini);
2597MODULE_LICENSE("GPL");
2598MODULE_DESCRIPTION("ebtables legacy core");
v3.1
 
   1/*
   2 *  ebtables
   3 *
   4 *  Author:
   5 *  Bart De Schuymer		<bdschuym@pandora.be>
   6 *
   7 *  ebtables.c,v 2.0, July, 2002
   8 *
   9 *  This code is stongly inspired on the iptables code which is
  10 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18#include <linux/kmod.h>
  19#include <linux/module.h>
  20#include <linux/vmalloc.h>
  21#include <linux/netfilter/x_tables.h>
  22#include <linux/netfilter_bridge/ebtables.h>
  23#include <linux/spinlock.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <asm/uaccess.h>
  27#include <linux/smp.h>
  28#include <linux/cpumask.h>
 
  29#include <net/sock.h>
 
  30/* needed for logical [in,out]-dev filtering */
  31#include "../br_private.h"
  32
  33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
  34					 "report to author: "format, ## args)
  35/* #define BUGPRINT(format, args...) */
  36
  37/*
  38 * Each cpu has its own set of counters, so there is no need for write_lock in
  39 * the softirq
  40 * For reading or updating the counters, the user context needs to
  41 * get a write_lock
  42 */
  43
  44/* The size of each set of counters is altered to get cache alignment */
  45#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  46#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
  47#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
  48   COUNTER_OFFSET(n) * cpu))
  49
 
 
 
  50
 
 
 
 
 
 
 
  51
 
 
  52static DEFINE_MUTEX(ebt_mutex);
  53
  54#ifdef CONFIG_COMPAT
  55static void ebt_standard_compat_from_user(void *dst, const void *src)
  56{
  57	int v = *(compat_int_t *)src;
  58
  59	if (v >= 0)
  60		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
  61	memcpy(dst, &v, sizeof(v));
  62}
  63
  64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
  65{
  66	compat_int_t cv = *(int *)src;
  67
  68	if (cv >= 0)
  69		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
  70	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  71}
  72#endif
  73
  74
  75static struct xt_target ebt_standard_target = {
  76	.name       = "standard",
  77	.revision   = 0,
  78	.family     = NFPROTO_BRIDGE,
  79	.targetsize = sizeof(int),
  80#ifdef CONFIG_COMPAT
  81	.compatsize = sizeof(compat_int_t),
  82	.compat_from_user = ebt_standard_compat_from_user,
  83	.compat_to_user =  ebt_standard_compat_to_user,
  84#endif
  85};
  86
  87static inline int
  88ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
  89	       struct xt_action_param *par)
  90{
  91	par->target   = w->u.watcher;
  92	par->targinfo = w->data;
  93	w->u.watcher->target(skb, par);
  94	/* watchers don't give a verdict */
  95	return 0;
  96}
  97
  98static inline int
  99ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
 100	     struct xt_action_param *par)
 101{
 102	par->match     = m->u.match;
 103	par->matchinfo = m->data;
 104	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
 105}
 106
 107static inline int
 108ebt_dev_check(const char *entry, const struct net_device *device)
 109{
 110	int i = 0;
 111	const char *devname;
 112
 113	if (*entry == '\0')
 114		return 0;
 115	if (!device)
 116		return 1;
 117	devname = device->name;
 118	/* 1 is the wildcard token */
 119	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 120		i++;
 121	return (devname[i] != entry[i] && entry[i] != 1);
 122}
 123
 124#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
 125/* process standard matches */
 126static inline int
 127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
 128                const struct net_device *in, const struct net_device *out)
 129{
 130	const struct ethhdr *h = eth_hdr(skb);
 131	const struct net_bridge_port *p;
 132	__be16 ethproto;
 133	int verdict, i;
 134
 135	if (vlan_tx_tag_present(skb))
 136		ethproto = htons(ETH_P_8021Q);
 137	else
 138		ethproto = h->h_proto;
 139
 140	if (e->bitmask & EBT_802_3) {
 141		if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO))
 142			return 1;
 143	} else if (!(e->bitmask & EBT_NOPROTO) &&
 144	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
 145		return 1;
 146
 147	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
 148		return 1;
 149	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
 150		return 1;
 151	/* rcu_read_lock()ed by nf_hook_slow */
 152	if (in && (p = br_port_get_rcu(in)) != NULL &&
 153	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
 
 154		return 1;
 155	if (out && (p = br_port_get_rcu(out)) != NULL &&
 156	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
 
 157		return 1;
 158
 159	if (e->bitmask & EBT_SOURCEMAC) {
 160		verdict = 0;
 161		for (i = 0; i < 6; i++)
 162			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
 163			   e->sourcemsk[i];
 164		if (FWINV2(verdict != 0, EBT_ISOURCE) )
 165			return 1;
 166	}
 167	if (e->bitmask & EBT_DESTMAC) {
 168		verdict = 0;
 169		for (i = 0; i < 6; i++)
 170			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
 171			   e->destmsk[i];
 172		if (FWINV2(verdict != 0, EBT_IDEST) )
 173			return 1;
 174	}
 175	return 0;
 176}
 177
 178static inline __pure
 179struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 180{
 181	return (void *)entry + entry->next_offset;
 182}
 183
 
 
 
 
 
 
 184/* Do some firewalling */
 185unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
 186   const struct net_device *in, const struct net_device *out,
 187   struct ebt_table *table)
 188{
 
 
 189	int i, nentries;
 190	struct ebt_entry *point;
 191	struct ebt_counter *counter_base, *cb_base;
 192	const struct ebt_entry_target *t;
 193	int verdict, sp = 0;
 194	struct ebt_chainstack *cs;
 195	struct ebt_entries *chaininfo;
 196	const char *base;
 197	const struct ebt_table_info *private;
 198	struct xt_action_param acpar;
 199
 200	acpar.family  = NFPROTO_BRIDGE;
 201	acpar.in      = in;
 202	acpar.out     = out;
 203	acpar.hotdrop = false;
 204	acpar.hooknum = hook;
 205
 206	read_lock_bh(&table->lock);
 207	private = table->private;
 208	cb_base = COUNTER_BASE(private->counters, private->nentries,
 209	   smp_processor_id());
 210	if (private->chainstack)
 211		cs = private->chainstack[smp_processor_id()];
 212	else
 213		cs = NULL;
 214	chaininfo = private->hook_entry[hook];
 215	nentries = private->hook_entry[hook]->nentries;
 216	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
 217	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
 218	/* base for chain jumps */
 219	base = private->entries;
 220	i = 0;
 221	while (i < nentries) {
 222		if (ebt_basic_match(point, skb, in, out))
 223			goto letscontinue;
 224
 225		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
 226			goto letscontinue;
 227		if (acpar.hotdrop) {
 228			read_unlock_bh(&table->lock);
 229			return NF_DROP;
 230		}
 231
 232		/* increase counter */
 233		(*(counter_base + i)).pcnt++;
 234		(*(counter_base + i)).bcnt += skb->len;
 235
 236		/* these should only watch: not modify, nor tell us
 237		   what to do with the packet */
 
 238		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
 239
 240		t = (struct ebt_entry_target *)
 241		   (((char *)point) + point->target_offset);
 242		/* standard target */
 243		if (!t->u.target->target)
 244			verdict = ((struct ebt_standard_target *)t)->verdict;
 245		else {
 246			acpar.target   = t->u.target;
 247			acpar.targinfo = t->data;
 248			verdict = t->u.target->target(skb, &acpar);
 249		}
 250		if (verdict == EBT_ACCEPT) {
 251			read_unlock_bh(&table->lock);
 252			return NF_ACCEPT;
 253		}
 254		if (verdict == EBT_DROP) {
 255			read_unlock_bh(&table->lock);
 256			return NF_DROP;
 257		}
 258		if (verdict == EBT_RETURN) {
 259letsreturn:
 260#ifdef CONFIG_NETFILTER_DEBUG
 261			if (sp == 0) {
 262				BUGPRINT("RETURN on base chain");
 263				/* act like this is EBT_CONTINUE */
 264				goto letscontinue;
 265			}
 266#endif
 267			sp--;
 268			/* put all the local variables right */
 269			i = cs[sp].n;
 270			chaininfo = cs[sp].chaininfo;
 271			nentries = chaininfo->nentries;
 272			point = cs[sp].e;
 273			counter_base = cb_base +
 274			   chaininfo->counter_offset;
 275			continue;
 276		}
 277		if (verdict == EBT_CONTINUE)
 278			goto letscontinue;
 279#ifdef CONFIG_NETFILTER_DEBUG
 280		if (verdict < 0) {
 281			BUGPRINT("bogus standard verdict\n");
 282			read_unlock_bh(&table->lock);
 283			return NF_DROP;
 284		}
 285#endif
 286		/* jump to a udc */
 287		cs[sp].n = i + 1;
 288		cs[sp].chaininfo = chaininfo;
 289		cs[sp].e = ebt_next_entry(point);
 290		i = 0;
 291		chaininfo = (struct ebt_entries *) (base + verdict);
 292#ifdef CONFIG_NETFILTER_DEBUG
 293		if (chaininfo->distinguisher) {
 294			BUGPRINT("jump to non-chain\n");
 295			read_unlock_bh(&table->lock);
 296			return NF_DROP;
 297		}
 298#endif
 299		nentries = chaininfo->nentries;
 300		point = (struct ebt_entry *)chaininfo->data;
 301		counter_base = cb_base + chaininfo->counter_offset;
 302		sp++;
 303		continue;
 304letscontinue:
 305		point = ebt_next_entry(point);
 306		i++;
 307	}
 308
 309	/* I actually like this :) */
 310	if (chaininfo->policy == EBT_RETURN)
 311		goto letsreturn;
 312	if (chaininfo->policy == EBT_ACCEPT) {
 313		read_unlock_bh(&table->lock);
 314		return NF_ACCEPT;
 315	}
 316	read_unlock_bh(&table->lock);
 317	return NF_DROP;
 318}
 319
 320/* If it succeeds, returns element and locks mutex */
 321static inline void *
 322find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
 323   struct mutex *mutex)
 324{
 325	struct {
 326		struct list_head list;
 327		char name[EBT_FUNCTION_MAXNAMELEN];
 328	} *e;
 329
 330	*error = mutex_lock_interruptible(mutex);
 331	if (*error != 0)
 332		return NULL;
 333
 334	list_for_each_entry(e, head, list) {
 335		if (strcmp(e->name, name) == 0)
 336			return e;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337	}
 
 
 338	*error = -ENOENT;
 339	mutex_unlock(mutex);
 340	return NULL;
 341}
 342
 343static void *
 344find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
 345   int *error, struct mutex *mutex)
 346{
 347	return try_then_request_module(
 348			find_inlist_lock_noload(head, name, error, mutex),
 349			"%s%s", prefix, name);
 350}
 351
 352static inline struct ebt_table *
 353find_table_lock(struct net *net, const char *name, int *error,
 354		struct mutex *mutex)
 355{
 356	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
 357				"ebtable_", error, mutex);
 358}
 359
 
 
 
 
 
 
 
 
 
 
 360static inline int
 361ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 362		unsigned int *cnt)
 363{
 364	const struct ebt_entry *e = par->entryinfo;
 365	struct xt_match *match;
 366	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 367	int ret;
 368
 369	if (left < sizeof(struct ebt_entry_match) ||
 370	    left - sizeof(struct ebt_entry_match) < m->match_size)
 371		return -EINVAL;
 372
 373	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
 
 
 
 
 
 
 374	if (IS_ERR(match))
 375		return PTR_ERR(match);
 376	m->u.match = match;
 377
 378	par->match     = match;
 379	par->matchinfo = m->data;
 380	ret = xt_check_match(par, m->match_size,
 381	      e->ethproto, e->invflags & EBT_IPROTO);
 382	if (ret < 0) {
 383		module_put(match->me);
 384		return ret;
 385	}
 386
 387	(*cnt)++;
 388	return 0;
 389}
 390
 391static inline int
 392ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
 393		  unsigned int *cnt)
 394{
 395	const struct ebt_entry *e = par->entryinfo;
 396	struct xt_target *watcher;
 397	size_t left = ((char *)e + e->target_offset) - (char *)w;
 398	int ret;
 399
 400	if (left < sizeof(struct ebt_entry_watcher) ||
 401	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 402		return -EINVAL;
 403
 404	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 405	if (IS_ERR(watcher))
 406		return PTR_ERR(watcher);
 
 
 
 
 
 
 407	w->u.watcher = watcher;
 408
 409	par->target   = watcher;
 410	par->targinfo = w->data;
 411	ret = xt_check_target(par, w->watcher_size,
 412	      e->ethproto, e->invflags & EBT_IPROTO);
 413	if (ret < 0) {
 414		module_put(watcher->me);
 415		return ret;
 416	}
 417
 418	(*cnt)++;
 419	return 0;
 420}
 421
 422static int ebt_verify_pointers(const struct ebt_replace *repl,
 423			       struct ebt_table_info *newinfo)
 424{
 425	unsigned int limit = repl->entries_size;
 426	unsigned int valid_hooks = repl->valid_hooks;
 427	unsigned int offset = 0;
 428	int i;
 429
 430	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 431		newinfo->hook_entry[i] = NULL;
 432
 433	newinfo->entries_size = repl->entries_size;
 434	newinfo->nentries = repl->nentries;
 435
 436	while (offset < limit) {
 437		size_t left = limit - offset;
 438		struct ebt_entry *e = (void *)newinfo->entries + offset;
 439
 440		if (left < sizeof(unsigned int))
 441			break;
 442
 443		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 444			if ((valid_hooks & (1 << i)) == 0)
 445				continue;
 446			if ((char __user *)repl->hook_entry[i] ==
 447			     repl->entries + offset)
 448				break;
 449		}
 450
 451		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
 452			if (e->bitmask != 0) {
 453				/* we make userspace set this right,
 454				   so there is no misunderstanding */
 455				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
 456					 "in distinguisher\n");
 457				return -EINVAL;
 458			}
 459			if (i != NF_BR_NUMHOOKS)
 460				newinfo->hook_entry[i] = (struct ebt_entries *)e;
 461			if (left < sizeof(struct ebt_entries))
 462				break;
 463			offset += sizeof(struct ebt_entries);
 464		} else {
 465			if (left < sizeof(struct ebt_entry))
 466				break;
 467			if (left < e->next_offset)
 468				break;
 469			if (e->next_offset < sizeof(struct ebt_entry))
 470				return -EINVAL;
 471			offset += e->next_offset;
 472		}
 473	}
 474	if (offset != limit) {
 475		BUGPRINT("entries_size too small\n");
 476		return -EINVAL;
 477	}
 478
 479	/* check if all valid hooks have a chain */
 480	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 481		if (!newinfo->hook_entry[i] &&
 482		   (valid_hooks & (1 << i))) {
 483			BUGPRINT("Valid hook without chain\n");
 484			return -EINVAL;
 485		}
 486	}
 487	return 0;
 488}
 489
 490/*
 491 * this one is very careful, as it is the first function
 492 * to parse the userspace data
 493 */
 494static inline int
 495ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
 496   const struct ebt_table_info *newinfo,
 497   unsigned int *n, unsigned int *cnt,
 498   unsigned int *totalcnt, unsigned int *udc_cnt)
 499{
 500	int i;
 501
 502	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 503		if ((void *)e == (void *)newinfo->hook_entry[i])
 504			break;
 505	}
 506	/* beginning of a new chain
 507	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
 
 508	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 509		/* this checks if the previous chain has as many entries
 510		   as it said it has */
 511		if (*n != *cnt) {
 512			BUGPRINT("nentries does not equal the nr of entries "
 513				 "in the chain\n");
 514			return -EINVAL;
 515		}
 516		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 517		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 518			/* only RETURN from udc */
 519			if (i != NF_BR_NUMHOOKS ||
 520			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
 521				BUGPRINT("bad policy\n");
 522				return -EINVAL;
 523			}
 524		}
 525		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 526			(*udc_cnt)++;
 527		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
 528			BUGPRINT("counter_offset != totalcnt");
 529			return -EINVAL;
 530		}
 531		*n = ((struct ebt_entries *)e)->nentries;
 532		*cnt = 0;
 533		return 0;
 534	}
 535	/* a plain old entry, heh */
 536	if (sizeof(struct ebt_entry) > e->watchers_offset ||
 537	   e->watchers_offset > e->target_offset ||
 538	   e->target_offset >= e->next_offset) {
 539		BUGPRINT("entry offsets not in right order\n");
 540		return -EINVAL;
 541	}
 542	/* this is not checked anywhere else */
 543	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
 544		BUGPRINT("target size too small\n");
 545		return -EINVAL;
 546	}
 547	(*cnt)++;
 548	(*totalcnt)++;
 549	return 0;
 550}
 551
 552struct ebt_cl_stack
 553{
 554	struct ebt_chainstack cs;
 555	int from;
 556	unsigned int hookmask;
 557};
 558
 559/*
 560 * we need these positions to check that the jumps to a different part of the
 561 * entries is a jump to the beginning of a new chain.
 562 */
 563static inline int
 564ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
 565   unsigned int *n, struct ebt_cl_stack *udc)
 566{
 567	int i;
 568
 569	/* we're only interested in chain starts */
 570	if (e->bitmask)
 571		return 0;
 572	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 573		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 574			break;
 575	}
 576	/* only care about udc */
 577	if (i != NF_BR_NUMHOOKS)
 578		return 0;
 579
 580	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
 581	/* these initialisations are depended on later in check_chainloops() */
 582	udc[*n].cs.n = 0;
 583	udc[*n].hookmask = 0;
 584
 585	(*n)++;
 586	return 0;
 587}
 588
 589static inline int
 590ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
 591{
 592	struct xt_mtdtor_param par;
 593
 594	if (i && (*i)-- == 0)
 595		return 1;
 596
 597	par.net       = net;
 598	par.match     = m->u.match;
 599	par.matchinfo = m->data;
 600	par.family    = NFPROTO_BRIDGE;
 601	if (par.match->destroy != NULL)
 602		par.match->destroy(&par);
 603	module_put(par.match->me);
 604	return 0;
 605}
 606
 607static inline int
 608ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
 609{
 610	struct xt_tgdtor_param par;
 611
 612	if (i && (*i)-- == 0)
 613		return 1;
 614
 615	par.net      = net;
 616	par.target   = w->u.watcher;
 617	par.targinfo = w->data;
 618	par.family   = NFPROTO_BRIDGE;
 619	if (par.target->destroy != NULL)
 620		par.target->destroy(&par);
 621	module_put(par.target->me);
 622	return 0;
 623}
 624
 625static inline int
 626ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
 627{
 628	struct xt_tgdtor_param par;
 629	struct ebt_entry_target *t;
 630
 631	if (e->bitmask == 0)
 632		return 0;
 633	/* we're done */
 634	if (cnt && (*cnt)-- == 0)
 635		return 1;
 636	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
 637	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
 638	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 639
 640	par.net      = net;
 641	par.target   = t->u.target;
 642	par.targinfo = t->data;
 643	par.family   = NFPROTO_BRIDGE;
 644	if (par.target->destroy != NULL)
 645		par.target->destroy(&par);
 646	module_put(par.target->me);
 647	return 0;
 648}
 649
 650static inline int
 651ebt_check_entry(struct ebt_entry *e, struct net *net,
 652   const struct ebt_table_info *newinfo,
 653   const char *name, unsigned int *cnt,
 654   struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 655{
 656	struct ebt_entry_target *t;
 657	struct xt_target *target;
 658	unsigned int i, j, hook = 0, hookmask = 0;
 659	size_t gap;
 660	int ret;
 661	struct xt_mtchk_param mtpar;
 662	struct xt_tgchk_param tgpar;
 663
 664	/* don't mess with the struct ebt_entries */
 665	if (e->bitmask == 0)
 666		return 0;
 667
 668	if (e->bitmask & ~EBT_F_MASK) {
 669		BUGPRINT("Unknown flag for bitmask\n");
 670		return -EINVAL;
 671	}
 672	if (e->invflags & ~EBT_INV_MASK) {
 673		BUGPRINT("Unknown flag for inv bitmask\n");
 674		return -EINVAL;
 675	}
 676	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
 677		BUGPRINT("NOPROTO & 802_3 not allowed\n");
 678		return -EINVAL;
 679	}
 680	/* what hook do we belong to? */
 681	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 682		if (!newinfo->hook_entry[i])
 683			continue;
 684		if ((char *)newinfo->hook_entry[i] < (char *)e)
 685			hook = i;
 686		else
 687			break;
 688	}
 689	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
 690	   a base chain */
 
 691	if (i < NF_BR_NUMHOOKS)
 692		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 693	else {
 694		for (i = 0; i < udc_cnt; i++)
 695			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
 696				break;
 697		if (i == 0)
 698			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 699		else
 700			hookmask = cl_s[i - 1].hookmask;
 701	}
 702	i = 0;
 703
 
 
 704	mtpar.net	= tgpar.net       = net;
 705	mtpar.table     = tgpar.table     = name;
 706	mtpar.entryinfo = tgpar.entryinfo = e;
 707	mtpar.hook_mask = tgpar.hook_mask = hookmask;
 708	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
 709	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
 710	if (ret != 0)
 711		goto cleanup_matches;
 712	j = 0;
 713	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
 714	if (ret != 0)
 715		goto cleanup_watchers;
 716	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 717	gap = e->next_offset - e->target_offset;
 718
 719	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
 720	if (IS_ERR(target)) {
 721		ret = PTR_ERR(target);
 722		goto cleanup_watchers;
 723	}
 724
 
 
 
 
 
 
 
 725	t->u.target = target;
 726	if (t->u.target == &ebt_standard_target) {
 727		if (gap < sizeof(struct ebt_standard_target)) {
 728			BUGPRINT("Standard target size too big\n");
 729			ret = -EFAULT;
 730			goto cleanup_watchers;
 731		}
 732		if (((struct ebt_standard_target *)t)->verdict <
 733		   -NUM_STANDARD_TARGETS) {
 734			BUGPRINT("Invalid standard target\n");
 735			ret = -EFAULT;
 736			goto cleanup_watchers;
 737		}
 738	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
 739		module_put(t->u.target->me);
 740		ret = -EFAULT;
 741		goto cleanup_watchers;
 742	}
 743
 744	tgpar.target   = target;
 745	tgpar.targinfo = t->data;
 746	ret = xt_check_target(&tgpar, t->target_size,
 747	      e->ethproto, e->invflags & EBT_IPROTO);
 748	if (ret < 0) {
 749		module_put(target->me);
 750		goto cleanup_watchers;
 751	}
 752	(*cnt)++;
 753	return 0;
 754cleanup_watchers:
 755	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
 756cleanup_matches:
 757	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
 758	return ret;
 759}
 760
 761/*
 762 * checks for loops and sets the hook mask for udc
 763 * the hook mask for udc tells us from which base chains the udc can be
 764 * accessed. This mask is a parameter to the check() functions of the extensions
 765 */
 766static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
 767   unsigned int udc_cnt, unsigned int hooknr, char *base)
 768{
 769	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
 770	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
 771	const struct ebt_entry_target *t;
 772
 773	while (pos < nentries || chain_nr != -1) {
 774		/* end of udc, go back one 'recursion' step */
 775		if (pos == nentries) {
 776			/* put back values of the time when this chain was called */
 777			e = cl_s[chain_nr].cs.e;
 778			if (cl_s[chain_nr].from != -1)
 779				nentries =
 780				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
 781			else
 782				nentries = chain->nentries;
 783			pos = cl_s[chain_nr].cs.n;
 784			/* make sure we won't see a loop that isn't one */
 785			cl_s[chain_nr].cs.n = 0;
 786			chain_nr = cl_s[chain_nr].from;
 787			if (pos == nentries)
 788				continue;
 789		}
 790		t = (struct ebt_entry_target *)
 791		   (((char *)e) + e->target_offset);
 792		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
 793			goto letscontinue;
 794		if (e->target_offset + sizeof(struct ebt_standard_target) >
 795		   e->next_offset) {
 796			BUGPRINT("Standard target size too big\n");
 797			return -1;
 798		}
 799		verdict = ((struct ebt_standard_target *)t)->verdict;
 800		if (verdict >= 0) { /* jump to another chain */
 801			struct ebt_entries *hlp2 =
 802			   (struct ebt_entries *)(base + verdict);
 803			for (i = 0; i < udc_cnt; i++)
 804				if (hlp2 == cl_s[i].cs.chaininfo)
 805					break;
 806			/* bad destination or loop */
 807			if (i == udc_cnt) {
 808				BUGPRINT("bad destination\n");
 809				return -1;
 810			}
 811			if (cl_s[i].cs.n) {
 812				BUGPRINT("loop\n");
 813				return -1;
 814			}
 815			if (cl_s[i].hookmask & (1 << hooknr))
 816				goto letscontinue;
 817			/* this can't be 0, so the loop test is correct */
 818			cl_s[i].cs.n = pos + 1;
 819			pos = 0;
 820			cl_s[i].cs.e = ebt_next_entry(e);
 821			e = (struct ebt_entry *)(hlp2->data);
 822			nentries = hlp2->nentries;
 823			cl_s[i].from = chain_nr;
 824			chain_nr = i;
 825			/* this udc is accessible from the base chain for hooknr */
 826			cl_s[i].hookmask |= (1 << hooknr);
 827			continue;
 828		}
 829letscontinue:
 830		e = ebt_next_entry(e);
 831		pos++;
 832	}
 833	return 0;
 834}
 835
 836/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
 837static int translate_table(struct net *net, const char *name,
 838			   struct ebt_table_info *newinfo)
 839{
 840	unsigned int i, j, k, udc_cnt;
 841	int ret;
 842	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 843
 844	i = 0;
 845	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 846		i++;
 847	if (i == NF_BR_NUMHOOKS) {
 848		BUGPRINT("No valid hooks specified\n");
 849		return -EINVAL;
 850	}
 851	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
 852		BUGPRINT("Chains don't start at beginning\n");
 853		return -EINVAL;
 854	}
 855	/* make sure chains are ordered after each other in same order
 856	   as their corresponding hooks */
 
 857	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
 858		if (!newinfo->hook_entry[j])
 859			continue;
 860		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
 861			BUGPRINT("Hook order must be followed\n");
 862			return -EINVAL;
 863		}
 864		i = j;
 865	}
 866
 867	/* do some early checkings and initialize some things */
 868	i = 0; /* holds the expected nr. of entries for the chain */
 869	j = 0; /* holds the up to now counted entries for the chain */
 870	k = 0; /* holds the total nr. of entries, should equal
 871		  newinfo->nentries afterwards */
 
 872	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 873	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 874	   ebt_check_entry_size_and_hooks, newinfo,
 875	   &i, &j, &k, &udc_cnt);
 876
 877	if (ret != 0)
 878		return ret;
 879
 880	if (i != j) {
 881		BUGPRINT("nentries does not equal the nr of entries in the "
 882			 "(last) chain\n");
 883		return -EINVAL;
 884	}
 885	if (k != newinfo->nentries) {
 886		BUGPRINT("Total nentries is wrong\n");
 887		return -EINVAL;
 888	}
 889
 890	/* get the location of the udc, put them in an array
 891	   while we're at it, allocate the chainstack */
 
 892	if (udc_cnt) {
 893		/* this will get free'd in do_replace()/ebt_register_table()
 894		   if an error occurs */
 
 895		newinfo->chainstack =
 896			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
 
 897		if (!newinfo->chainstack)
 898			return -ENOMEM;
 899		for_each_possible_cpu(i) {
 900			newinfo->chainstack[i] =
 901			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
 
 
 902			if (!newinfo->chainstack[i]) {
 903				while (i)
 904					vfree(newinfo->chainstack[--i]);
 905				vfree(newinfo->chainstack);
 906				newinfo->chainstack = NULL;
 907				return -ENOMEM;
 908			}
 909		}
 910
 911		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
 912		if (!cl_s)
 913			return -ENOMEM;
 914		i = 0; /* the i'th udc */
 915		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 916		   ebt_get_udc_positions, newinfo, &i, cl_s);
 917		/* sanity check */
 918		if (i != udc_cnt) {
 919			BUGPRINT("i != udc_cnt\n");
 920			vfree(cl_s);
 921			return -EFAULT;
 922		}
 923	}
 924
 925	/* Check for loops */
 926	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 927		if (newinfo->hook_entry[i])
 928			if (check_chainloops(newinfo->hook_entry[i],
 929			   cl_s, udc_cnt, i, newinfo->entries)) {
 930				vfree(cl_s);
 931				return -EINVAL;
 932			}
 933
 934	/* we now know the following (along with E=mc²):
 935	   - the nr of entries in each chain is right
 936	   - the size of the allocated space is right
 937	   - all valid hooks have a corresponding chain
 938	   - there are no loops
 939	   - wrong data can still be on the level of a single entry
 940	   - could be there are jumps to places that are not the
 941	     beginning of a chain. This can only occur in chains that
 942	     are not accessible from any base chains, so we don't care. */
 
 943
 944	/* used to know what we need to clean up if something goes wrong */
 945	i = 0;
 946	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 947	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
 948	if (ret != 0) {
 949		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 950				  ebt_cleanup_entry, net, &i);
 951	}
 952	vfree(cl_s);
 953	return ret;
 954}
 955
 956/* called under write_lock */
 957static void get_counters(const struct ebt_counter *oldcounters,
 958   struct ebt_counter *counters, unsigned int nentries)
 959{
 960	int i, cpu;
 961	struct ebt_counter *counter_base;
 962
 963	/* counters of cpu 0 */
 964	memcpy(counters, oldcounters,
 965	       sizeof(struct ebt_counter) * nentries);
 966
 967	/* add other counters to those of cpu 0 */
 968	for_each_possible_cpu(cpu) {
 969		if (cpu == 0)
 970			continue;
 971		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 972		for (i = 0; i < nentries; i++) {
 973			counters[i].pcnt += counter_base[i].pcnt;
 974			counters[i].bcnt += counter_base[i].bcnt;
 975		}
 976	}
 977}
 978
 979static int do_replace_finish(struct net *net, struct ebt_replace *repl,
 980			      struct ebt_table_info *newinfo)
 981{
 982	int ret, i;
 983	struct ebt_counter *counterstmp = NULL;
 984	/* used to be able to unlock earlier */
 985	struct ebt_table_info *table;
 986	struct ebt_table *t;
 987
 988	/* the user wants counters back
 989	   the check on the size is done later, when we have the lock */
 
 990	if (repl->num_counters) {
 991		unsigned long size = repl->num_counters * sizeof(*counterstmp);
 992		counterstmp = vmalloc(size);
 993		if (!counterstmp)
 994			return -ENOMEM;
 995	}
 996
 997	newinfo->chainstack = NULL;
 998	ret = ebt_verify_pointers(repl, newinfo);
 999	if (ret != 0)
1000		goto free_counterstmp;
1001
1002	ret = translate_table(net, repl->name, newinfo);
1003
1004	if (ret != 0)
1005		goto free_counterstmp;
1006
1007	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008	if (!t) {
1009		ret = -ENOENT;
1010		goto free_iterate;
1011	}
1012
1013	/* the table doesn't like it */
1014	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015		goto free_unlock;
 
1016
1017	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018		BUGPRINT("Wrong nr. of counters requested\n");
1019		ret = -EINVAL;
1020		goto free_unlock;
1021	}
1022
1023	/* we have the mutex lock, so no danger in reading this pointer */
1024	table = t->private;
1025	/* make sure the table can only be rmmod'ed if it contains no rules */
1026	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027		ret = -ENOENT;
1028		goto free_unlock;
1029	} else if (table->nentries && !newinfo->nentries)
1030		module_put(t->me);
1031	/* we need an atomic snapshot of the counters */
1032	write_lock_bh(&t->lock);
1033	if (repl->num_counters)
1034		get_counters(t->private->counters, counterstmp,
1035		   t->private->nentries);
1036
1037	t->private = newinfo;
1038	write_unlock_bh(&t->lock);
1039	mutex_unlock(&ebt_mutex);
1040	/* so, a user can change the chains while having messed up her counter
1041	   allocation. Only reason why this is done is because this way the lock
1042	   is held only once, while this doesn't bring the kernel into a
1043	   dangerous state. */
 
1044	if (repl->num_counters &&
1045	   copy_to_user(repl->counters, counterstmp,
1046	   repl->num_counters * sizeof(struct ebt_counter))) {
1047		ret = -EFAULT;
 
1048	}
1049	else
1050		ret = 0;
1051
1052	/* decrease module count and free resources */
1053	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1054			  ebt_cleanup_entry, net, NULL);
1055
1056	vfree(table->entries);
1057	if (table->chainstack) {
1058		for_each_possible_cpu(i)
1059			vfree(table->chainstack[i]);
1060		vfree(table->chainstack);
1061	}
1062	vfree(table);
 
1063
1064	vfree(counterstmp);
1065	return ret;
 
1066
1067free_unlock:
1068	mutex_unlock(&ebt_mutex);
1069free_iterate:
1070	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1071			  ebt_cleanup_entry, net, NULL);
1072free_counterstmp:
1073	vfree(counterstmp);
1074	/* can be initialized in translate_table() */
1075	if (newinfo->chainstack) {
1076		for_each_possible_cpu(i)
1077			vfree(newinfo->chainstack[i]);
1078		vfree(newinfo->chainstack);
1079	}
1080	return ret;
1081}
1082
1083/* replace the table */
1084static int do_replace(struct net *net, const void __user *user,
1085		      unsigned int len)
1086{
1087	int ret, countersize;
1088	struct ebt_table_info *newinfo;
1089	struct ebt_replace tmp;
1090
1091	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1092		return -EFAULT;
1093
1094	if (len != sizeof(tmp) + tmp.entries_size) {
1095		BUGPRINT("Wrong len argument\n");
1096		return -EINVAL;
1097	}
1098
1099	if (tmp.entries_size == 0) {
1100		BUGPRINT("Entries_size never zero\n");
1101		return -EINVAL;
1102	}
1103	/* overflow check */
1104	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1105			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1106		return -ENOMEM;
1107	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1108		return -ENOMEM;
1109
1110	tmp.name[sizeof(tmp.name) - 1] = 0;
1111
1112	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1113	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1114	if (!newinfo)
1115		return -ENOMEM;
1116
1117	if (countersize)
1118		memset(newinfo->counters, 0, countersize);
1119
1120	newinfo->entries = vmalloc(tmp.entries_size);
1121	if (!newinfo->entries) {
1122		ret = -ENOMEM;
1123		goto free_newinfo;
1124	}
1125	if (copy_from_user(
1126	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1127		BUGPRINT("Couldn't copy entries from userspace\n");
1128		ret = -EFAULT;
1129		goto free_entries;
1130	}
1131
1132	ret = do_replace_finish(net, &tmp, newinfo);
1133	if (ret == 0)
1134		return ret;
1135free_entries:
1136	vfree(newinfo->entries);
1137free_newinfo:
1138	vfree(newinfo);
1139	return ret;
1140}
1141
1142struct ebt_table *
1143ebt_register_table(struct net *net, const struct ebt_table *input_table)
1144{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1145	struct ebt_table_info *newinfo;
1146	struct ebt_table *t, *table;
 
 
1147	struct ebt_replace_kernel *repl;
1148	int ret, i, countersize;
1149	void *p;
1150
1151	if (input_table == NULL || (repl = input_table->table) == NULL ||
1152	    repl->entries == NULL || repl->entries_size == 0 ||
1153	    repl->counters != NULL || input_table->private != NULL) {
1154		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1155		return ERR_PTR(-EINVAL);
1156	}
1157
1158	/* Don't add one table to multiple lists. */
1159	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1160	if (!table) {
1161		ret = -ENOMEM;
1162		goto out;
1163	}
1164
1165	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1166	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1167	ret = -ENOMEM;
1168	if (!newinfo)
1169		goto free_table;
1170
1171	p = vmalloc(repl->entries_size);
1172	if (!p)
1173		goto free_newinfo;
1174
1175	memcpy(p, repl->entries, repl->entries_size);
1176	newinfo->entries = p;
1177
1178	newinfo->entries_size = repl->entries_size;
1179	newinfo->nentries = repl->nentries;
1180
1181	if (countersize)
1182		memset(newinfo->counters, 0, countersize);
1183
1184	/* fill in newinfo and parse the entries */
1185	newinfo->chainstack = NULL;
1186	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1187		if ((repl->valid_hooks & (1 << i)) == 0)
1188			newinfo->hook_entry[i] = NULL;
1189		else
1190			newinfo->hook_entry[i] = p +
1191				((char *)repl->hook_entry[i] - repl->entries);
1192	}
1193	ret = translate_table(net, repl->name, newinfo);
1194	if (ret != 0) {
1195		BUGPRINT("Translate_table failed\n");
1196		goto free_chainstack;
1197	}
1198
1199	if (table->check && table->check(newinfo, table->valid_hooks)) {
1200		BUGPRINT("The table doesn't like its own initial data, lol\n");
1201		ret = -EINVAL;
1202		goto free_chainstack;
1203	}
1204
1205	table->private = newinfo;
1206	rwlock_init(&table->lock);
1207	ret = mutex_lock_interruptible(&ebt_mutex);
1208	if (ret != 0)
1209		goto free_chainstack;
1210
1211	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1212		if (strcmp(t->name, table->name) == 0) {
1213			ret = -EEXIST;
1214			BUGPRINT("Table name already exists\n");
1215			goto free_unlock;
1216		}
1217	}
1218
1219	/* Hold a reference count if the chains aren't empty */
1220	if (newinfo->nentries && !try_module_get(table->me)) {
1221		ret = -ENOENT;
1222		goto free_unlock;
1223	}
1224	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1225	mutex_unlock(&ebt_mutex);
1226	return table;
 
 
 
 
 
 
 
 
1227free_unlock:
1228	mutex_unlock(&ebt_mutex);
1229free_chainstack:
1230	if (newinfo->chainstack) {
1231		for_each_possible_cpu(i)
1232			vfree(newinfo->chainstack[i]);
1233		vfree(newinfo->chainstack);
1234	}
1235	vfree(newinfo->entries);
1236free_newinfo:
1237	vfree(newinfo);
1238free_table:
1239	kfree(table);
1240out:
1241	return ERR_PTR(ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1242}
 
1243
1244void ebt_unregister_table(struct net *net, struct ebt_table *table)
1245{
1246	int i;
 
 
 
 
 
1247
1248	if (!table) {
1249		BUGPRINT("Request to unregister NULL table!!!\n");
 
1250		return;
1251	}
 
 
 
 
 
 
 
 
 
 
 
1252	mutex_lock(&ebt_mutex);
1253	list_del(&table->list);
 
 
 
 
 
 
 
1254	mutex_unlock(&ebt_mutex);
1255	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1256			  ebt_cleanup_entry, net, NULL);
1257	if (table->private->nentries)
1258		module_put(table->me);
1259	vfree(table->private->entries);
1260	if (table->private->chainstack) {
1261		for_each_possible_cpu(i)
1262			vfree(table->private->chainstack[i]);
1263		vfree(table->private->chainstack);
1264	}
1265	vfree(table->private);
1266	kfree(table);
 
 
 
 
 
 
1267}
1268
1269/* userspace just supplied us with counters */
1270static int do_update_counters(struct net *net, const char *name,
1271				struct ebt_counter __user *counters,
1272				unsigned int num_counters,
1273				const void __user *user, unsigned int len)
1274{
1275	int i, ret;
1276	struct ebt_counter *tmp;
1277	struct ebt_table *t;
1278
1279	if (num_counters == 0)
1280		return -EINVAL;
1281
1282	tmp = vmalloc(num_counters * sizeof(*tmp));
1283	if (!tmp)
1284		return -ENOMEM;
1285
1286	t = find_table_lock(net, name, &ret, &ebt_mutex);
1287	if (!t)
1288		goto free_tmp;
1289
1290	if (num_counters != t->private->nentries) {
1291		BUGPRINT("Wrong nr of counters\n");
1292		ret = -EINVAL;
1293		goto unlock_mutex;
1294	}
1295
1296	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
 
1297		ret = -EFAULT;
1298		goto unlock_mutex;
1299	}
1300
1301	/* we want an atomic add of the counters */
1302	write_lock_bh(&t->lock);
1303
1304	/* we add to the counters of the first cpu */
1305	for (i = 0; i < num_counters; i++) {
1306		t->private->counters[i].pcnt += tmp[i].pcnt;
1307		t->private->counters[i].bcnt += tmp[i].bcnt;
1308	}
1309
1310	write_unlock_bh(&t->lock);
1311	ret = 0;
1312unlock_mutex:
1313	mutex_unlock(&ebt_mutex);
1314free_tmp:
1315	vfree(tmp);
1316	return ret;
1317}
1318
1319static int update_counters(struct net *net, const void __user *user,
1320			    unsigned int len)
1321{
1322	struct ebt_replace hlp;
1323
1324	if (copy_from_user(&hlp, user, sizeof(hlp)))
1325		return -EFAULT;
1326
1327	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1328		return -EINVAL;
1329
1330	return do_update_counters(net, hlp.name, hlp.counters,
1331				hlp.num_counters, user, len);
1332}
1333
1334static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1335    const char *base, char __user *ubase)
 
1336{
1337	char __user *hlp = ubase + ((char *)m - base);
1338	if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
 
 
 
 
 
 
 
 
 
1339		return -EFAULT;
 
1340	return 0;
1341}
1342
1343static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1344    const char *base, char __user *ubase)
 
 
 
 
 
 
 
 
 
1345{
1346	char __user *hlp = ubase + ((char *)w - base);
1347	if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1348		return -EFAULT;
1349	return 0;
1350}
1351
1352static inline int
1353ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1354{
1355	int ret;
1356	char __user *hlp;
1357	const struct ebt_entry_target *t;
1358
1359	if (e->bitmask == 0)
 
 
 
 
1360		return 0;
 
 
 
 
1361
1362	hlp = ubase + (((char *)e + e->target_offset) - base);
1363	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1364
1365	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1366	if (ret != 0)
1367		return ret;
1368	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1369	if (ret != 0)
1370		return ret;
1371	if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1372		return -EFAULT;
 
 
 
 
1373	return 0;
1374}
1375
1376static int copy_counters_to_user(struct ebt_table *t,
1377				  const struct ebt_counter *oldcounters,
1378				  void __user *user, unsigned int num_counters,
1379				  unsigned int nentries)
1380{
1381	struct ebt_counter *counterstmp;
1382	int ret = 0;
1383
1384	/* userspace might not need the counters */
1385	if (num_counters == 0)
1386		return 0;
1387
1388	if (num_counters != nentries) {
1389		BUGPRINT("Num_counters wrong\n");
1390		return -EINVAL;
1391	}
1392
1393	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1394	if (!counterstmp)
1395		return -ENOMEM;
1396
1397	write_lock_bh(&t->lock);
1398	get_counters(oldcounters, counterstmp, nentries);
1399	write_unlock_bh(&t->lock);
1400
1401	if (copy_to_user(user, counterstmp,
1402	   nentries * sizeof(struct ebt_counter)))
1403		ret = -EFAULT;
1404	vfree(counterstmp);
1405	return ret;
1406}
1407
1408/* called with ebt_mutex locked */
1409static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1410    const int *len, int cmd)
1411{
1412	struct ebt_replace tmp;
1413	const struct ebt_counter *oldcounters;
1414	unsigned int entries_size, nentries;
1415	int ret;
1416	char *entries;
1417
1418	if (cmd == EBT_SO_GET_ENTRIES) {
1419		entries_size = t->private->entries_size;
1420		nentries = t->private->nentries;
1421		entries = t->private->entries;
1422		oldcounters = t->private->counters;
1423	} else {
1424		entries_size = t->table->entries_size;
1425		nentries = t->table->nentries;
1426		entries = t->table->entries;
1427		oldcounters = t->table->counters;
1428	}
1429
1430	if (copy_from_user(&tmp, user, sizeof(tmp)))
1431		return -EFAULT;
1432
1433	if (*len != sizeof(struct ebt_replace) + entries_size +
1434	   (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1435		return -EINVAL;
1436
1437	if (tmp.nentries != nentries) {
1438		BUGPRINT("Nentries wrong\n");
1439		return -EINVAL;
1440	}
1441
1442	if (tmp.entries_size != entries_size) {
1443		BUGPRINT("Wrong size\n");
1444		return -EINVAL;
1445	}
1446
1447	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1448					tmp.num_counters, nentries);
1449	if (ret)
1450		return ret;
1451
1452	if (copy_to_user(tmp.entries, entries, entries_size)) {
1453		BUGPRINT("Couldn't copy entries to userspace\n");
1454		return -EFAULT;
1455	}
1456	/* set the match/watcher/target names right */
1457	return EBT_ENTRY_ITERATE(entries, entries_size,
1458	   ebt_make_names, entries, tmp.entries);
1459}
1460
1461static int do_ebt_set_ctl(struct sock *sk,
1462	int cmd, void __user *user, unsigned int len)
1463{
1464	int ret;
1465
1466	if (!capable(CAP_NET_ADMIN))
1467		return -EPERM;
1468
1469	switch(cmd) {
1470	case EBT_SO_SET_ENTRIES:
1471		ret = do_replace(sock_net(sk), user, len);
1472		break;
1473	case EBT_SO_SET_COUNTERS:
1474		ret = update_counters(sock_net(sk), user, len);
1475		break;
1476	default:
1477		ret = -EINVAL;
1478	}
1479	return ret;
1480}
1481
1482static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1483{
1484	int ret;
1485	struct ebt_replace tmp;
1486	struct ebt_table *t;
1487
1488	if (!capable(CAP_NET_ADMIN))
1489		return -EPERM;
1490
1491	if (copy_from_user(&tmp, user, sizeof(tmp)))
1492		return -EFAULT;
1493
1494	t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1495	if (!t)
1496		return ret;
1497
1498	switch(cmd) {
1499	case EBT_SO_GET_INFO:
1500	case EBT_SO_GET_INIT_INFO:
1501		if (*len != sizeof(struct ebt_replace)){
1502			ret = -EINVAL;
1503			mutex_unlock(&ebt_mutex);
1504			break;
1505		}
1506		if (cmd == EBT_SO_GET_INFO) {
1507			tmp.nentries = t->private->nentries;
1508			tmp.entries_size = t->private->entries_size;
1509			tmp.valid_hooks = t->valid_hooks;
1510		} else {
1511			tmp.nentries = t->table->nentries;
1512			tmp.entries_size = t->table->entries_size;
1513			tmp.valid_hooks = t->table->valid_hooks;
1514		}
1515		mutex_unlock(&ebt_mutex);
1516		if (copy_to_user(user, &tmp, *len) != 0){
1517			BUGPRINT("c2u Didn't work\n");
1518			ret = -EFAULT;
1519			break;
1520		}
1521		ret = 0;
1522		break;
1523
1524	case EBT_SO_GET_ENTRIES:
1525	case EBT_SO_GET_INIT_ENTRIES:
1526		ret = copy_everything_to_user(t, user, len, cmd);
1527		mutex_unlock(&ebt_mutex);
1528		break;
1529
1530	default:
1531		mutex_unlock(&ebt_mutex);
1532		ret = -EINVAL;
1533	}
1534
1535	return ret;
1536}
1537
1538#ifdef CONFIG_COMPAT
1539/* 32 bit-userspace compatibility definitions. */
1540struct compat_ebt_replace {
1541	char name[EBT_TABLE_MAXNAMELEN];
1542	compat_uint_t valid_hooks;
1543	compat_uint_t nentries;
1544	compat_uint_t entries_size;
1545	/* start of the chains */
1546	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1547	/* nr of counters userspace expects back */
1548	compat_uint_t num_counters;
1549	/* where the kernel will put the old counters. */
1550	compat_uptr_t counters;
1551	compat_uptr_t entries;
1552};
1553
1554/* struct ebt_entry_match, _target and _watcher have same layout */
1555struct compat_ebt_entry_mwt {
1556	union {
1557		char name[EBT_FUNCTION_MAXNAMELEN];
 
 
 
1558		compat_uptr_t ptr;
1559	} u;
1560	compat_uint_t match_size;
1561	compat_uint_t data[0];
1562};
1563
1564/* account for possible padding between match_size and ->data */
1565static int ebt_compat_entry_padsize(void)
1566{
1567	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1568			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1569	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1570			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1571}
1572
1573static int ebt_compat_match_offset(const struct xt_match *match,
1574				   unsigned int userlen)
1575{
1576	/*
1577	 * ebt_among needs special handling. The kernel .matchsize is
1578	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1579	 * value is expected.
1580	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1581	 */
1582	if (unlikely(match->matchsize == -1))
1583		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1584	return xt_compat_match_offset(match);
1585}
1586
1587static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1588				unsigned int *size)
1589{
1590	const struct xt_match *match = m->u.match;
1591	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1592	int off = ebt_compat_match_offset(match, m->match_size);
1593	compat_uint_t msize = m->match_size - off;
1594
1595	BUG_ON(off >= m->match_size);
 
1596
1597	if (copy_to_user(cm->u.name, match->name,
1598	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
 
1599		return -EFAULT;
1600
1601	if (match->compat_to_user) {
1602		if (match->compat_to_user(cm->data, m->data))
1603			return -EFAULT;
1604	} else if (copy_to_user(cm->data, m->data, msize))
 
 
1605			return -EFAULT;
 
1606
1607	*size -= ebt_compat_entry_padsize() + off;
1608	*dstptr = cm->data;
1609	*dstptr += msize;
1610	return 0;
1611}
1612
1613static int compat_target_to_user(struct ebt_entry_target *t,
1614				 void __user **dstptr,
1615				 unsigned int *size)
1616{
1617	const struct xt_target *target = t->u.target;
1618	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1619	int off = xt_compat_target_offset(target);
1620	compat_uint_t tsize = t->target_size - off;
1621
1622	BUG_ON(off >= t->target_size);
 
1623
1624	if (copy_to_user(cm->u.name, target->name,
1625	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
 
1626		return -EFAULT;
1627
1628	if (target->compat_to_user) {
1629		if (target->compat_to_user(cm->data, t->data))
1630			return -EFAULT;
1631	} else if (copy_to_user(cm->data, t->data, tsize))
1632		return -EFAULT;
 
 
 
1633
1634	*size -= ebt_compat_entry_padsize() + off;
1635	*dstptr = cm->data;
1636	*dstptr += tsize;
1637	return 0;
1638}
1639
1640static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1641				  void __user **dstptr,
1642				  unsigned int *size)
1643{
1644	return compat_target_to_user((struct ebt_entry_target *)w,
1645							dstptr, size);
1646}
1647
1648static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1649				unsigned int *size)
1650{
1651	struct ebt_entry_target *t;
1652	struct ebt_entry __user *ce;
1653	u32 watchers_offset, target_offset, next_offset;
1654	compat_uint_t origsize;
1655	int ret;
1656
1657	if (e->bitmask == 0) {
1658		if (*size < sizeof(struct ebt_entries))
1659			return -EINVAL;
1660		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1661			return -EFAULT;
1662
1663		*dstptr += sizeof(struct ebt_entries);
1664		*size -= sizeof(struct ebt_entries);
1665		return 0;
1666	}
1667
1668	if (*size < sizeof(*ce))
1669		return -EINVAL;
1670
1671	ce = (struct ebt_entry __user *)*dstptr;
1672	if (copy_to_user(ce, e, sizeof(*ce)))
1673		return -EFAULT;
1674
1675	origsize = *size;
1676	*dstptr += sizeof(*ce);
1677
1678	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1679	if (ret)
1680		return ret;
1681	watchers_offset = e->watchers_offset - (origsize - *size);
1682
1683	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1684	if (ret)
1685		return ret;
1686	target_offset = e->target_offset - (origsize - *size);
1687
1688	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1689
1690	ret = compat_target_to_user(t, dstptr, size);
1691	if (ret)
1692		return ret;
1693	next_offset = e->next_offset - (origsize - *size);
1694
1695	if (put_user(watchers_offset, &ce->watchers_offset) ||
1696	    put_user(target_offset, &ce->target_offset) ||
1697	    put_user(next_offset, &ce->next_offset))
1698		return -EFAULT;
1699
1700	*size -= sizeof(*ce);
1701	return 0;
1702}
1703
1704static int compat_calc_match(struct ebt_entry_match *m, int *off)
1705{
1706	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1707	*off += ebt_compat_entry_padsize();
1708	return 0;
1709}
1710
1711static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1712{
1713	*off += xt_compat_target_offset(w->u.watcher);
1714	*off += ebt_compat_entry_padsize();
1715	return 0;
1716}
1717
1718static int compat_calc_entry(const struct ebt_entry *e,
1719			     const struct ebt_table_info *info,
1720			     const void *base,
1721			     struct compat_ebt_replace *newinfo)
1722{
1723	const struct ebt_entry_target *t;
1724	unsigned int entry_offset;
1725	int off, ret, i;
1726
1727	if (e->bitmask == 0)
1728		return 0;
1729
1730	off = 0;
1731	entry_offset = (void *)e - base;
1732
1733	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1734	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1735
1736	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1737
1738	off += xt_compat_target_offset(t->u.target);
1739	off += ebt_compat_entry_padsize();
1740
1741	newinfo->entries_size -= off;
1742
1743	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1744	if (ret)
1745		return ret;
1746
1747	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1748		const void *hookptr = info->hook_entry[i];
1749		if (info->hook_entry[i] &&
1750		    (e < (struct ebt_entry *)(base - hookptr))) {
1751			newinfo->hook_entry[i] -= off;
1752			pr_debug("0x%08X -> 0x%08X\n",
1753					newinfo->hook_entry[i] + off,
1754					newinfo->hook_entry[i]);
1755		}
1756	}
1757
1758	return 0;
1759}
1760
 
 
 
 
 
 
 
 
 
 
1761
1762static int compat_table_info(const struct ebt_table_info *info,
1763			     struct compat_ebt_replace *newinfo)
1764{
1765	unsigned int size = info->entries_size;
1766	const void *entries = info->entries;
 
1767
1768	newinfo->entries_size = size;
 
 
 
1769
1770	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1771	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1772							entries, newinfo);
1773}
1774
1775static int compat_copy_everything_to_user(struct ebt_table *t,
1776					  void __user *user, int *len, int cmd)
1777{
1778	struct compat_ebt_replace repl, tmp;
1779	struct ebt_counter *oldcounters;
1780	struct ebt_table_info tinfo;
1781	int ret;
1782	void __user *pos;
1783
1784	memset(&tinfo, 0, sizeof(tinfo));
1785
1786	if (cmd == EBT_SO_GET_ENTRIES) {
1787		tinfo.entries_size = t->private->entries_size;
1788		tinfo.nentries = t->private->nentries;
1789		tinfo.entries = t->private->entries;
1790		oldcounters = t->private->counters;
1791	} else {
1792		tinfo.entries_size = t->table->entries_size;
1793		tinfo.nentries = t->table->nentries;
1794		tinfo.entries = t->table->entries;
1795		oldcounters = t->table->counters;
1796	}
1797
1798	if (copy_from_user(&tmp, user, sizeof(tmp)))
1799		return -EFAULT;
1800
1801	if (tmp.nentries != tinfo.nentries ||
1802	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1803		return -EINVAL;
1804
1805	memcpy(&repl, &tmp, sizeof(repl));
1806	if (cmd == EBT_SO_GET_ENTRIES)
1807		ret = compat_table_info(t->private, &repl);
1808	else
1809		ret = compat_table_info(&tinfo, &repl);
1810	if (ret)
1811		return ret;
1812
1813	if (*len != sizeof(tmp) + repl.entries_size +
1814	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1815		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1816				*len, tinfo.entries_size, repl.entries_size);
1817		return -EINVAL;
1818	}
1819
1820	/* userspace might not need the counters */
1821	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1822					tmp.num_counters, tinfo.nentries);
1823	if (ret)
1824		return ret;
1825
1826	pos = compat_ptr(tmp.entries);
1827	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1828			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1829}
1830
1831struct ebt_entries_buf_state {
1832	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1833	u32 buf_kern_len;	/* total size of kernel buffer */
1834	u32 buf_kern_offset;	/* amount of data copied so far */
1835	u32 buf_user_offset;	/* read position in userspace buffer */
1836};
1837
1838static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1839{
1840	state->buf_kern_offset += sz;
1841	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1842}
1843
1844static int ebt_buf_add(struct ebt_entries_buf_state *state,
1845		       void *data, unsigned int sz)
1846{
1847	if (state->buf_kern_start == NULL)
1848		goto count_only;
1849
1850	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
 
1851
1852	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1853
1854 count_only:
1855	state->buf_user_offset += sz;
1856	return ebt_buf_count(state, sz);
1857}
1858
1859static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1860{
1861	char *b = state->buf_kern_start;
1862
1863	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
 
1864
1865	if (b != NULL && sz > 0)
1866		memset(b + state->buf_kern_offset, 0, sz);
1867	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1868	return ebt_buf_count(state, sz);
1869}
1870
1871enum compat_mwt {
1872	EBT_COMPAT_MATCH,
1873	EBT_COMPAT_WATCHER,
1874	EBT_COMPAT_TARGET,
1875};
1876
1877static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1878				enum compat_mwt compat_mwt,
1879				struct ebt_entries_buf_state *state,
1880				const unsigned char *base)
1881{
1882	char name[EBT_FUNCTION_MAXNAMELEN];
1883	struct xt_match *match;
1884	struct xt_target *wt;
1885	void *dst = NULL;
1886	int off, pad = 0;
1887	unsigned int size_kern, match_size = mwt->match_size;
1888
1889	strlcpy(name, mwt->u.name, sizeof(name));
 
1890
1891	if (state->buf_kern_start)
1892		dst = state->buf_kern_start + state->buf_kern_offset;
1893
1894	switch (compat_mwt) {
1895	case EBT_COMPAT_MATCH:
1896		match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1897						name, 0), "ebt_%s", name);
1898		if (match == NULL)
1899			return -ENOENT;
1900		if (IS_ERR(match))
1901			return PTR_ERR(match);
1902
1903		off = ebt_compat_match_offset(match, match_size);
1904		if (dst) {
1905			if (match->compat_from_user)
1906				match->compat_from_user(dst, mwt->data);
1907			else
1908				memcpy(dst, mwt->data, match_size);
1909		}
1910
1911		size_kern = match->matchsize;
1912		if (unlikely(size_kern == -1))
1913			size_kern = match_size;
1914		module_put(match->me);
1915		break;
1916	case EBT_COMPAT_WATCHER: /* fallthrough */
1917	case EBT_COMPAT_TARGET:
1918		wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1919						name, 0), "ebt_%s", name);
1920		if (wt == NULL)
1921			return -ENOENT;
1922		if (IS_ERR(wt))
1923			return PTR_ERR(wt);
1924		off = xt_compat_target_offset(wt);
1925
1926		if (dst) {
1927			if (wt->compat_from_user)
1928				wt->compat_from_user(dst, mwt->data);
1929			else
1930				memcpy(dst, mwt->data, match_size);
1931		}
1932
1933		size_kern = wt->targetsize;
1934		module_put(wt->me);
1935		break;
1936
1937	default:
1938		return -EINVAL;
1939	}
1940
1941	state->buf_kern_offset += match_size + off;
1942	state->buf_user_offset += match_size;
1943	pad = XT_ALIGN(size_kern) - size_kern;
1944
1945	if (pad > 0 && dst) {
1946		BUG_ON(state->buf_kern_len <= pad);
1947		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
 
 
1948		memset(dst + size_kern, 0, pad);
1949	}
1950	return off + match_size;
1951}
1952
1953/*
1954 * return size of all matches, watchers or target, including necessary
1955 * alignment and padding.
1956 */
1957static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1958			unsigned int size_left, enum compat_mwt type,
1959			struct ebt_entries_buf_state *state, const void *base)
1960{
 
1961	int growth = 0;
1962	char *buf;
1963
1964	if (size_left == 0)
1965		return 0;
1966
1967	buf = (char *) match32;
1968
1969	while (size_left >= sizeof(*match32)) {
1970		struct ebt_entry_match *match_kern;
1971		int ret;
1972
 
 
 
1973		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1974		if (match_kern) {
1975			char *tmp;
1976			tmp = state->buf_kern_start + state->buf_kern_offset;
1977			match_kern = (struct ebt_entry_match *) tmp;
1978		}
1979		ret = ebt_buf_add(state, buf, sizeof(*match32));
1980		if (ret < 0)
1981			return ret;
1982		size_left -= sizeof(*match32);
1983
1984		/* add padding before match->data (if any) */
1985		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1986		if (ret < 0)
1987			return ret;
1988
1989		if (match32->match_size > size_left)
1990			return -EINVAL;
1991
1992		size_left -= match32->match_size;
1993
1994		ret = compat_mtw_from_user(match32, type, state, base);
1995		if (ret < 0)
1996			return ret;
1997
1998		BUG_ON(ret < match32->match_size);
 
1999		growth += ret - match32->match_size;
2000		growth += ebt_compat_entry_padsize();
2001
2002		buf += sizeof(*match32);
2003		buf += match32->match_size;
2004
2005		if (match_kern)
2006			match_kern->match_size = ret;
2007
2008		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2009		match32 = (struct compat_ebt_entry_mwt *) buf;
2010	}
2011
2012	return growth;
2013}
2014
2015/* called for all ebt_entry structures. */
2016static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2017			  unsigned int *total,
2018			  struct ebt_entries_buf_state *state)
2019{
2020	unsigned int i, j, startoff, new_offset = 0;
2021	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2022	unsigned int offsets[4];
2023	unsigned int *offsets_update = NULL;
2024	int ret;
2025	char *buf_start;
2026
2027	if (*total < sizeof(struct ebt_entries))
2028		return -EINVAL;
2029
2030	if (!entry->bitmask) {
2031		*total -= sizeof(struct ebt_entries);
2032		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2033	}
2034	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2035		return -EINVAL;
2036
2037	startoff = state->buf_user_offset;
2038	/* pull in most part of ebt_entry, it does not need to be changed. */
2039	ret = ebt_buf_add(state, entry,
2040			offsetof(struct ebt_entry, watchers_offset));
2041	if (ret < 0)
2042		return ret;
2043
2044	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2045	memcpy(&offsets[1], &entry->watchers_offset,
2046			sizeof(offsets) - sizeof(offsets[0]));
2047
2048	if (state->buf_kern_start) {
2049		buf_start = state->buf_kern_start + state->buf_kern_offset;
2050		offsets_update = (unsigned int *) buf_start;
2051	}
2052	ret = ebt_buf_add(state, &offsets[1],
2053			sizeof(offsets) - sizeof(offsets[0]));
2054	if (ret < 0)
2055		return ret;
2056	buf_start = (char *) entry;
2057	/*
2058	 * 0: matches offset, always follows ebt_entry.
2059	 * 1: watchers offset, from ebt_entry structure
2060	 * 2: target offset, from ebt_entry structure
2061	 * 3: next ebt_entry offset, from ebt_entry structure
2062	 *
2063	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2064	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
2065	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2066		struct compat_ebt_entry_mwt *match32;
2067		unsigned int size;
2068		char *buf = buf_start;
2069
2070		buf = buf_start + offsets[i];
2071		if (offsets[i] > offsets[j])
2072			return -EINVAL;
2073
2074		match32 = (struct compat_ebt_entry_mwt *) buf;
2075		size = offsets[j] - offsets[i];
2076		ret = ebt_size_mwt(match32, size, i, state, base);
2077		if (ret < 0)
2078			return ret;
2079		new_offset += ret;
2080		if (offsets_update && new_offset) {
2081			pr_debug("change offset %d to %d\n",
2082				offsets_update[i], offsets[j] + new_offset);
2083			offsets_update[i] = offsets[j] + new_offset;
2084		}
2085	}
2086
2087	if (state->buf_kern_start == NULL) {
2088		unsigned int offset = buf_start - (char *) base;
2089
2090		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2091		if (ret < 0)
2092			return ret;
2093	}
2094
2095	startoff = state->buf_user_offset - startoff;
 
 
2096
2097	BUG_ON(*total < startoff);
2098	*total -= startoff;
 
2099	return 0;
2100}
2101
2102/*
2103 * repl->entries_size is the size of the ebt_entry blob in userspace.
2104 * It might need more memory when copied to a 64 bit kernel in case
2105 * userspace is 32-bit. So, first task: find out how much memory is needed.
2106 *
2107 * Called before validation is performed.
2108 */
2109static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2110				struct ebt_entries_buf_state *state)
2111{
2112	unsigned int size_remaining = size_user;
2113	int ret;
2114
2115	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2116					&size_remaining, state);
2117	if (ret < 0)
2118		return ret;
2119
2120	WARN_ON(size_remaining);
 
 
2121	return state->buf_kern_offset;
2122}
2123
2124
2125static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2126					    void __user *user, unsigned int len)
2127{
2128	struct compat_ebt_replace tmp;
2129	int i;
2130
2131	if (len < sizeof(tmp))
2132		return -EINVAL;
2133
2134	if (copy_from_user(&tmp, user, sizeof(tmp)))
2135		return -EFAULT;
2136
2137	if (len != sizeof(tmp) + tmp.entries_size)
2138		return -EINVAL;
2139
2140	if (tmp.entries_size == 0)
2141		return -EINVAL;
2142
2143	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2144			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2145		return -ENOMEM;
2146	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2147		return -ENOMEM;
2148
2149	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2150
2151	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2152	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2153		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2154
2155	repl->num_counters = tmp.num_counters;
2156	repl->counters = compat_ptr(tmp.counters);
2157	repl->entries = compat_ptr(tmp.entries);
2158	return 0;
2159}
2160
2161static int compat_do_replace(struct net *net, void __user *user,
2162			     unsigned int len)
2163{
2164	int ret, i, countersize, size64;
2165	struct ebt_table_info *newinfo;
2166	struct ebt_replace tmp;
2167	struct ebt_entries_buf_state state;
2168	void *entries_tmp;
2169
2170	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2171	if (ret) {
2172		/* try real handler in case userland supplied needed padding */
2173		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2174			ret = 0;
2175		return ret;
2176	}
2177
2178	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2179	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2180	if (!newinfo)
2181		return -ENOMEM;
2182
2183	if (countersize)
2184		memset(newinfo->counters, 0, countersize);
2185
2186	memset(&state, 0, sizeof(state));
2187
2188	newinfo->entries = vmalloc(tmp.entries_size);
2189	if (!newinfo->entries) {
2190		ret = -ENOMEM;
2191		goto free_newinfo;
2192	}
2193	if (copy_from_user(
2194	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2195		ret = -EFAULT;
2196		goto free_entries;
2197	}
2198
2199	entries_tmp = newinfo->entries;
2200
2201	xt_compat_lock(NFPROTO_BRIDGE);
2202
2203	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
 
 
 
2204	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2205	if (ret < 0)
2206		goto out_unlock;
2207
2208	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2209		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2210		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2211
2212	size64 = ret;
2213	newinfo->entries = vmalloc(size64);
2214	if (!newinfo->entries) {
2215		vfree(entries_tmp);
2216		ret = -ENOMEM;
2217		goto out_unlock;
2218	}
2219
2220	memset(&state, 0, sizeof(state));
2221	state.buf_kern_start = newinfo->entries;
2222	state.buf_kern_len = size64;
2223
2224	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2225	BUG_ON(ret < 0);	/* parses same data again */
 
 
 
2226
2227	vfree(entries_tmp);
2228	tmp.entries_size = size64;
2229
2230	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2231		char __user *usrptr;
2232		if (tmp.hook_entry[i]) {
2233			unsigned int delta;
2234			usrptr = (char __user *) tmp.hook_entry[i];
2235			delta = usrptr - tmp.entries;
2236			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2237			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2238		}
2239	}
2240
2241	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2242	xt_compat_unlock(NFPROTO_BRIDGE);
2243
2244	ret = do_replace_finish(net, &tmp, newinfo);
2245	if (ret == 0)
2246		return ret;
2247free_entries:
2248	vfree(newinfo->entries);
2249free_newinfo:
2250	vfree(newinfo);
2251	return ret;
2252out_unlock:
2253	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2254	xt_compat_unlock(NFPROTO_BRIDGE);
2255	goto free_entries;
2256}
2257
2258static int compat_update_counters(struct net *net, void __user *user,
2259				  unsigned int len)
2260{
2261	struct compat_ebt_replace hlp;
2262
2263	if (copy_from_user(&hlp, user, sizeof(hlp)))
2264		return -EFAULT;
2265
2266	/* try real handler in case userland supplied needed padding */
2267	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2268		return update_counters(net, user, len);
2269
2270	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2271					hlp.num_counters, user, len);
2272}
2273
2274static int compat_do_ebt_set_ctl(struct sock *sk,
2275		int cmd, void __user *user, unsigned int len)
2276{
2277	int ret;
2278
2279	if (!capable(CAP_NET_ADMIN))
2280		return -EPERM;
2281
2282	switch (cmd) {
2283	case EBT_SO_SET_ENTRIES:
2284		ret = compat_do_replace(sock_net(sk), user, len);
2285		break;
2286	case EBT_SO_SET_COUNTERS:
2287		ret = compat_update_counters(sock_net(sk), user, len);
2288		break;
2289	default:
2290		ret = -EINVAL;
2291  }
2292	return ret;
2293}
2294
2295static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2296		void __user *user, int *len)
2297{
2298	int ret;
2299	struct compat_ebt_replace tmp;
2300	struct ebt_table *t;
 
2301
2302	if (!capable(CAP_NET_ADMIN))
2303		return -EPERM;
2304
2305	/* try real handler in case userland supplied needed padding */
2306	if ((cmd == EBT_SO_GET_INFO ||
2307	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2308			return do_ebt_get_ctl(sk, cmd, user, len);
2309
2310	if (copy_from_user(&tmp, user, sizeof(tmp)))
2311		return -EFAULT;
2312
2313	t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
 
 
2314	if (!t)
2315		return ret;
2316
2317	xt_compat_lock(NFPROTO_BRIDGE);
2318	switch (cmd) {
2319	case EBT_SO_GET_INFO:
2320		tmp.nentries = t->private->nentries;
2321		ret = compat_table_info(t->private, &tmp);
2322		if (ret)
2323			goto out;
2324		tmp.valid_hooks = t->valid_hooks;
2325
2326		if (copy_to_user(user, &tmp, *len) != 0) {
2327			ret = -EFAULT;
2328			break;
2329		}
2330		ret = 0;
2331		break;
2332	case EBT_SO_GET_INIT_INFO:
2333		tmp.nentries = t->table->nentries;
2334		tmp.entries_size = t->table->entries_size;
2335		tmp.valid_hooks = t->table->valid_hooks;
2336
2337		if (copy_to_user(user, &tmp, *len) != 0) {
2338			ret = -EFAULT;
2339			break;
2340		}
2341		ret = 0;
2342		break;
2343	case EBT_SO_GET_ENTRIES:
2344	case EBT_SO_GET_INIT_ENTRIES:
2345		/*
2346		 * try real handler first in case of userland-side padding.
2347		 * in case we are dealing with an 'ordinary' 32 bit binary
2348		 * without 64bit compatibility padding, this will fail right
2349		 * after copy_from_user when the *len argument is validated.
2350		 *
2351		 * the compat_ variant needs to do one pass over the kernel
2352		 * data set to adjust for size differences before it the check.
2353		 */
2354		if (copy_everything_to_user(t, user, len, cmd) == 0)
2355			ret = 0;
2356		else
2357			ret = compat_copy_everything_to_user(t, user, len, cmd);
2358		break;
2359	default:
2360		ret = -EINVAL;
2361	}
2362 out:
2363	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2364	xt_compat_unlock(NFPROTO_BRIDGE);
2365	mutex_unlock(&ebt_mutex);
2366	return ret;
2367}
2368#endif
2369
2370static struct nf_sockopt_ops ebt_sockopts =
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2371{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2372	.pf		= PF_INET,
2373	.set_optmin	= EBT_BASE_CTL,
2374	.set_optmax	= EBT_SO_SET_MAX + 1,
2375	.set		= do_ebt_set_ctl,
2376#ifdef CONFIG_COMPAT
2377	.compat_set	= compat_do_ebt_set_ctl,
2378#endif
2379	.get_optmin	= EBT_BASE_CTL,
2380	.get_optmax	= EBT_SO_GET_MAX + 1,
2381	.get		= do_ebt_get_ctl,
2382#ifdef CONFIG_COMPAT
2383	.compat_get	= compat_do_ebt_get_ctl,
2384#endif
2385	.owner		= THIS_MODULE,
2386};
2387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2388static int __init ebtables_init(void)
2389{
2390	int ret;
2391
2392	ret = xt_register_target(&ebt_standard_target);
2393	if (ret < 0)
2394		return ret;
2395	ret = nf_register_sockopt(&ebt_sockopts);
2396	if (ret < 0) {
2397		xt_unregister_target(&ebt_standard_target);
2398		return ret;
2399	}
2400
2401	printk(KERN_INFO "Ebtables v2.0 registered\n");
 
 
 
 
 
 
2402	return 0;
2403}
2404
2405static void __exit ebtables_fini(void)
2406{
2407	nf_unregister_sockopt(&ebt_sockopts);
2408	xt_unregister_target(&ebt_standard_target);
2409	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2410}
2411
2412EXPORT_SYMBOL(ebt_register_table);
2413EXPORT_SYMBOL(ebt_unregister_table);
2414EXPORT_SYMBOL(ebt_do_table);
2415module_init(ebtables_init);
2416module_exit(ebtables_fini);
2417MODULE_LICENSE("GPL");