Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  ebtables
   4 *
   5 *  Author:
   6 *  Bart De Schuymer		<bdschuym@pandora.be>
   7 *
   8 *  ebtables.c,v 2.0, July, 2002
   9 *
  10 *  This code is strongly inspired by the iptables code which is
  11 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
 
 
 
 
 
  12 */
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14#include <linux/kmod.h>
  15#include <linux/module.h>
  16#include <linux/vmalloc.h>
  17#include <linux/netfilter/x_tables.h>
  18#include <linux/netfilter_bridge/ebtables.h>
  19#include <linux/spinlock.h>
  20#include <linux/mutex.h>
  21#include <linux/slab.h>
  22#include <linux/uaccess.h>
  23#include <linux/smp.h>
  24#include <linux/cpumask.h>
  25#include <linux/audit.h>
  26#include <net/sock.h>
  27/* needed for logical [in,out]-dev filtering */
  28#include "../br_private.h"
  29
  30/* Each cpu has its own set of counters, so there is no need for write_lock in
 
 
 
 
 
  31 * the softirq
  32 * For reading or updating the counters, the user context needs to
  33 * get a write_lock
  34 */
  35
  36/* The size of each set of counters is altered to get cache alignment */
  37#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  38#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
  39#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
  40				 COUNTER_OFFSET(n) * cpu))
  41
  42
  43
  44static DEFINE_MUTEX(ebt_mutex);
  45
  46#ifdef CONFIG_COMPAT
  47static void ebt_standard_compat_from_user(void *dst, const void *src)
  48{
  49	int v = *(compat_int_t *)src;
  50
  51	if (v >= 0)
  52		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
  53	memcpy(dst, &v, sizeof(v));
  54}
  55
  56static int ebt_standard_compat_to_user(void __user *dst, const void *src)
  57{
  58	compat_int_t cv = *(int *)src;
  59
  60	if (cv >= 0)
  61		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
  62	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  63}
  64#endif
  65
  66
  67static struct xt_target ebt_standard_target = {
  68	.name       = "standard",
  69	.revision   = 0,
  70	.family     = NFPROTO_BRIDGE,
  71	.targetsize = sizeof(int),
  72#ifdef CONFIG_COMPAT
  73	.compatsize = sizeof(compat_int_t),
  74	.compat_from_user = ebt_standard_compat_from_user,
  75	.compat_to_user =  ebt_standard_compat_to_user,
  76#endif
  77};
  78
  79static inline int
  80ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
  81	       struct xt_action_param *par)
  82{
  83	par->target   = w->u.watcher;
  84	par->targinfo = w->data;
  85	w->u.watcher->target(skb, par);
  86	/* watchers don't give a verdict */
  87	return 0;
  88}
  89
  90static inline int
  91ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
  92	     struct xt_action_param *par)
  93{
  94	par->match     = m->u.match;
  95	par->matchinfo = m->data;
  96	return !m->u.match->match(skb, par);
  97}
  98
  99static inline int
 100ebt_dev_check(const char *entry, const struct net_device *device)
 101{
 102	int i = 0;
 103	const char *devname;
 104
 105	if (*entry == '\0')
 106		return 0;
 107	if (!device)
 108		return 1;
 109	devname = device->name;
 110	/* 1 is the wildcard token */
 111	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 112		i++;
 113	return devname[i] != entry[i] && entry[i] != 1;
 114}
 115
 
 116/* process standard matches */
 117static inline int
 118ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
 119		const struct net_device *in, const struct net_device *out)
 120{
 121	const struct ethhdr *h = eth_hdr(skb);
 122	const struct net_bridge_port *p;
 123	__be16 ethproto;
 
 124
 125	if (skb_vlan_tag_present(skb))
 126		ethproto = htons(ETH_P_8021Q);
 127	else
 128		ethproto = h->h_proto;
 129
 130	if (e->bitmask & EBT_802_3) {
 131		if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto)))
 132			return 1;
 133	} else if (!(e->bitmask & EBT_NOPROTO) &&
 134		   NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto))
 135		return 1;
 136
 137	if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in)))
 138		return 1;
 139	if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
 140		return 1;
 141	/* rcu_read_lock()ed by nf_hook_thresh */
 142	if (in && (p = br_port_get_rcu(in)) != NULL &&
 143	    NF_INVF(e, EBT_ILOGICALIN,
 144		    ebt_dev_check(e->logical_in, p->br->dev)))
 145		return 1;
 146	if (out && (p = br_port_get_rcu(out)) != NULL &&
 147	    NF_INVF(e, EBT_ILOGICALOUT,
 148		    ebt_dev_check(e->logical_out, p->br->dev)))
 149		return 1;
 150
 151	if (e->bitmask & EBT_SOURCEMAC) {
 152		if (NF_INVF(e, EBT_ISOURCE,
 153			    !ether_addr_equal_masked(h->h_source, e->sourcemac,
 154						     e->sourcemsk)))
 
 
 155			return 1;
 156	}
 157	if (e->bitmask & EBT_DESTMAC) {
 158		if (NF_INVF(e, EBT_IDEST,
 159			    !ether_addr_equal_masked(h->h_dest, e->destmac,
 160						     e->destmsk)))
 
 
 161			return 1;
 162	}
 163	return 0;
 164}
 165
 166static inline
 167struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 168{
 169	return (void *)entry + entry->next_offset;
 170}
 171
 172static inline const struct ebt_entry_target *
 173ebt_get_target_c(const struct ebt_entry *e)
 174{
 175	return ebt_get_target((struct ebt_entry *)e);
 176}
 177
 178/* Do some firewalling */
 179unsigned int ebt_do_table(struct sk_buff *skb,
 180			  const struct nf_hook_state *state,
 181			  struct ebt_table *table)
 182{
 183	unsigned int hook = state->hook;
 184	int i, nentries;
 185	struct ebt_entry *point;
 186	struct ebt_counter *counter_base, *cb_base;
 187	const struct ebt_entry_target *t;
 188	int verdict, sp = 0;
 189	struct ebt_chainstack *cs;
 190	struct ebt_entries *chaininfo;
 191	const char *base;
 192	const struct ebt_table_info *private;
 193	struct xt_action_param acpar;
 194
 195	acpar.state   = state;
 
 
 196	acpar.hotdrop = false;
 
 197
 198	read_lock_bh(&table->lock);
 199	private = table->private;
 200	cb_base = COUNTER_BASE(private->counters, private->nentries,
 201	   smp_processor_id());
 202	if (private->chainstack)
 203		cs = private->chainstack[smp_processor_id()];
 204	else
 205		cs = NULL;
 206	chaininfo = private->hook_entry[hook];
 207	nentries = private->hook_entry[hook]->nentries;
 208	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
 209	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
 210	/* base for chain jumps */
 211	base = private->entries;
 212	i = 0;
 213	while (i < nentries) {
 214		if (ebt_basic_match(point, skb, state->in, state->out))
 215			goto letscontinue;
 216
 217		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
 218			goto letscontinue;
 219		if (acpar.hotdrop) {
 220			read_unlock_bh(&table->lock);
 221			return NF_DROP;
 222		}
 223
 224		ADD_COUNTER(*(counter_base + i), skb->len, 1);
 
 
 225
 226		/* these should only watch: not modify, nor tell us
 227		 * what to do with the packet
 228		 */
 229		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
 230
 231		t = ebt_get_target_c(point);
 
 232		/* standard target */
 233		if (!t->u.target->target)
 234			verdict = ((struct ebt_standard_target *)t)->verdict;
 235		else {
 236			acpar.target   = t->u.target;
 237			acpar.targinfo = t->data;
 238			verdict = t->u.target->target(skb, &acpar);
 239		}
 240		if (verdict == EBT_ACCEPT) {
 241			read_unlock_bh(&table->lock);
 242			return NF_ACCEPT;
 243		}
 244		if (verdict == EBT_DROP) {
 245			read_unlock_bh(&table->lock);
 246			return NF_DROP;
 247		}
 248		if (verdict == EBT_RETURN) {
 249letsreturn:
 250			if (WARN(sp == 0, "RETURN on base chain")) {
 
 
 251				/* act like this is EBT_CONTINUE */
 252				goto letscontinue;
 253			}
 254
 255			sp--;
 256			/* put all the local variables right */
 257			i = cs[sp].n;
 258			chaininfo = cs[sp].chaininfo;
 259			nentries = chaininfo->nentries;
 260			point = cs[sp].e;
 261			counter_base = cb_base +
 262			   chaininfo->counter_offset;
 263			continue;
 264		}
 265		if (verdict == EBT_CONTINUE)
 266			goto letscontinue;
 267
 268		if (WARN(verdict < 0, "bogus standard verdict\n")) {
 
 269			read_unlock_bh(&table->lock);
 270			return NF_DROP;
 271		}
 272
 273		/* jump to a udc */
 274		cs[sp].n = i + 1;
 275		cs[sp].chaininfo = chaininfo;
 276		cs[sp].e = ebt_next_entry(point);
 277		i = 0;
 278		chaininfo = (struct ebt_entries *) (base + verdict);
 279
 280		if (WARN(chaininfo->distinguisher, "jump to non-chain\n")) {
 
 281			read_unlock_bh(&table->lock);
 282			return NF_DROP;
 283		}
 284
 285		nentries = chaininfo->nentries;
 286		point = (struct ebt_entry *)chaininfo->data;
 287		counter_base = cb_base + chaininfo->counter_offset;
 288		sp++;
 289		continue;
 290letscontinue:
 291		point = ebt_next_entry(point);
 292		i++;
 293	}
 294
 295	/* I actually like this :) */
 296	if (chaininfo->policy == EBT_RETURN)
 297		goto letsreturn;
 298	if (chaininfo->policy == EBT_ACCEPT) {
 299		read_unlock_bh(&table->lock);
 300		return NF_ACCEPT;
 301	}
 302	read_unlock_bh(&table->lock);
 303	return NF_DROP;
 304}
 305
 306/* If it succeeds, returns element and locks mutex */
 307static inline void *
 308find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
 309			struct mutex *mutex)
 310{
 311	struct {
 312		struct list_head list;
 313		char name[EBT_FUNCTION_MAXNAMELEN];
 314	} *e;
 315
 316	mutex_lock(mutex);
 
 
 
 317	list_for_each_entry(e, head, list) {
 318		if (strcmp(e->name, name) == 0)
 319			return e;
 320	}
 321	*error = -ENOENT;
 322	mutex_unlock(mutex);
 323	return NULL;
 324}
 325
 326static void *
 327find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
 328		 int *error, struct mutex *mutex)
 329{
 330	return try_then_request_module(
 331			find_inlist_lock_noload(head, name, error, mutex),
 332			"%s%s", prefix, name);
 333}
 334
 335static inline struct ebt_table *
 336find_table_lock(struct net *net, const char *name, int *error,
 337		struct mutex *mutex)
 338{
 339	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
 340				"ebtable_", error, mutex);
 341}
 342
 343static inline void ebt_free_table_info(struct ebt_table_info *info)
 344{
 345	int i;
 346
 347	if (info->chainstack) {
 348		for_each_possible_cpu(i)
 349			vfree(info->chainstack[i]);
 350		vfree(info->chainstack);
 351	}
 352}
 353static inline int
 354ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 355		unsigned int *cnt)
 356{
 357	const struct ebt_entry *e = par->entryinfo;
 358	struct xt_match *match;
 359	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 360	int ret;
 361
 362	if (left < sizeof(struct ebt_entry_match) ||
 363	    left - sizeof(struct ebt_entry_match) < m->match_size)
 364		return -EINVAL;
 365
 366	match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision);
 367	if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
 368		if (!IS_ERR(match))
 369			module_put(match->me);
 370		request_module("ebt_%s", m->u.name);
 371		match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision);
 372	}
 373	if (IS_ERR(match))
 374		return PTR_ERR(match);
 375	m->u.match = match;
 376
 377	par->match     = match;
 378	par->matchinfo = m->data;
 379	ret = xt_check_match(par, m->match_size,
 380	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
 381	if (ret < 0) {
 382		module_put(match->me);
 383		return ret;
 384	}
 385
 386	(*cnt)++;
 387	return 0;
 388}
 389
 390static inline int
 391ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
 392		  unsigned int *cnt)
 393{
 394	const struct ebt_entry *e = par->entryinfo;
 395	struct xt_target *watcher;
 396	size_t left = ((char *)e + e->target_offset) - (char *)w;
 397	int ret;
 398
 399	if (left < sizeof(struct ebt_entry_watcher) ||
 400	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 401		return -EINVAL;
 402
 403	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 404	if (IS_ERR(watcher))
 405		return PTR_ERR(watcher);
 406
 407	if (watcher->family != NFPROTO_BRIDGE) {
 408		module_put(watcher->me);
 409		return -ENOENT;
 410	}
 411
 412	w->u.watcher = watcher;
 413
 414	par->target   = watcher;
 415	par->targinfo = w->data;
 416	ret = xt_check_target(par, w->watcher_size,
 417	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
 418	if (ret < 0) {
 419		module_put(watcher->me);
 420		return ret;
 421	}
 422
 423	(*cnt)++;
 424	return 0;
 425}
 426
 427static int ebt_verify_pointers(const struct ebt_replace *repl,
 428			       struct ebt_table_info *newinfo)
 429{
 430	unsigned int limit = repl->entries_size;
 431	unsigned int valid_hooks = repl->valid_hooks;
 432	unsigned int offset = 0;
 433	int i;
 434
 435	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 436		newinfo->hook_entry[i] = NULL;
 437
 438	newinfo->entries_size = repl->entries_size;
 439	newinfo->nentries = repl->nentries;
 440
 441	while (offset < limit) {
 442		size_t left = limit - offset;
 443		struct ebt_entry *e = (void *)newinfo->entries + offset;
 444
 445		if (left < sizeof(unsigned int))
 446			break;
 447
 448		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 449			if ((valid_hooks & (1 << i)) == 0)
 450				continue;
 451			if ((char __user *)repl->hook_entry[i] ==
 452			     repl->entries + offset)
 453				break;
 454		}
 455
 456		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
 457			if (e->bitmask != 0) {
 458				/* we make userspace set this right,
 459				 * so there is no misunderstanding
 460				 */
 
 461				return -EINVAL;
 462			}
 463			if (i != NF_BR_NUMHOOKS)
 464				newinfo->hook_entry[i] = (struct ebt_entries *)e;
 465			if (left < sizeof(struct ebt_entries))
 466				break;
 467			offset += sizeof(struct ebt_entries);
 468		} else {
 469			if (left < sizeof(struct ebt_entry))
 470				break;
 471			if (left < e->next_offset)
 472				break;
 473			if (e->next_offset < sizeof(struct ebt_entry))
 474				return -EINVAL;
 475			offset += e->next_offset;
 476		}
 477	}
 478	if (offset != limit)
 
 479		return -EINVAL;
 
 480
 481	/* check if all valid hooks have a chain */
 482	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 483		if (!newinfo->hook_entry[i] &&
 484		   (valid_hooks & (1 << i)))
 
 485			return -EINVAL;
 
 486	}
 487	return 0;
 488}
 489
 490/* this one is very careful, as it is the first function
 
 491 * to parse the userspace data
 492 */
 493static inline int
 494ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
 495			       const struct ebt_table_info *newinfo,
 496			       unsigned int *n, unsigned int *cnt,
 497			       unsigned int *totalcnt, unsigned int *udc_cnt)
 498{
 499	int i;
 500
 501	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 502		if ((void *)e == (void *)newinfo->hook_entry[i])
 503			break;
 504	}
 505	/* beginning of a new chain
 506	 * if i == NF_BR_NUMHOOKS it must be a user defined chain
 507	 */
 508	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 509		/* this checks if the previous chain has as many entries
 510		 * as it said it has
 511		 */
 512		if (*n != *cnt)
 
 513			return -EINVAL;
 514
 515		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 516		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 517			/* only RETURN from udc */
 518			if (i != NF_BR_NUMHOOKS ||
 519			   ((struct ebt_entries *)e)->policy != EBT_RETURN)
 
 520				return -EINVAL;
 
 521		}
 522		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 523			(*udc_cnt)++;
 524		if (((struct ebt_entries *)e)->counter_offset != *totalcnt)
 
 525			return -EINVAL;
 
 526		*n = ((struct ebt_entries *)e)->nentries;
 527		*cnt = 0;
 528		return 0;
 529	}
 530	/* a plain old entry, heh */
 531	if (sizeof(struct ebt_entry) > e->watchers_offset ||
 532	   e->watchers_offset > e->target_offset ||
 533	   e->target_offset >= e->next_offset)
 
 534		return -EINVAL;
 535
 536	/* this is not checked anywhere else */
 537	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target))
 
 538		return -EINVAL;
 539
 540	(*cnt)++;
 541	(*totalcnt)++;
 542	return 0;
 543}
 544
 545struct ebt_cl_stack {
 
 546	struct ebt_chainstack cs;
 547	int from;
 548	unsigned int hookmask;
 549};
 550
 551/* We need these positions to check that the jumps to a different part of the
 
 552 * entries is a jump to the beginning of a new chain.
 553 */
 554static inline int
 555ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
 556		      unsigned int *n, struct ebt_cl_stack *udc)
 557{
 558	int i;
 559
 560	/* we're only interested in chain starts */
 561	if (e->bitmask)
 562		return 0;
 563	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 564		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 565			break;
 566	}
 567	/* only care about udc */
 568	if (i != NF_BR_NUMHOOKS)
 569		return 0;
 570
 571	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
 572	/* these initialisations are depended on later in check_chainloops() */
 573	udc[*n].cs.n = 0;
 574	udc[*n].hookmask = 0;
 575
 576	(*n)++;
 577	return 0;
 578}
 579
 580static inline int
 581ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
 582{
 583	struct xt_mtdtor_param par;
 584
 585	if (i && (*i)-- == 0)
 586		return 1;
 587
 588	par.net       = net;
 589	par.match     = m->u.match;
 590	par.matchinfo = m->data;
 591	par.family    = NFPROTO_BRIDGE;
 592	if (par.match->destroy != NULL)
 593		par.match->destroy(&par);
 594	module_put(par.match->me);
 595	return 0;
 596}
 597
 598static inline int
 599ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
 600{
 601	struct xt_tgdtor_param par;
 602
 603	if (i && (*i)-- == 0)
 604		return 1;
 605
 606	par.net      = net;
 607	par.target   = w->u.watcher;
 608	par.targinfo = w->data;
 609	par.family   = NFPROTO_BRIDGE;
 610	if (par.target->destroy != NULL)
 611		par.target->destroy(&par);
 612	module_put(par.target->me);
 613	return 0;
 614}
 615
 616static inline int
 617ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
 618{
 619	struct xt_tgdtor_param par;
 620	struct ebt_entry_target *t;
 621
 622	if (e->bitmask == 0)
 623		return 0;
 624	/* we're done */
 625	if (cnt && (*cnt)-- == 0)
 626		return 1;
 627	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
 628	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
 629	t = ebt_get_target(e);
 630
 631	par.net      = net;
 632	par.target   = t->u.target;
 633	par.targinfo = t->data;
 634	par.family   = NFPROTO_BRIDGE;
 635	if (par.target->destroy != NULL)
 636		par.target->destroy(&par);
 637	module_put(par.target->me);
 638	return 0;
 639}
 640
 641static inline int
 642ebt_check_entry(struct ebt_entry *e, struct net *net,
 643		const struct ebt_table_info *newinfo,
 644		const char *name, unsigned int *cnt,
 645		struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 646{
 647	struct ebt_entry_target *t;
 648	struct xt_target *target;
 649	unsigned int i, j, hook = 0, hookmask = 0;
 650	size_t gap;
 651	int ret;
 652	struct xt_mtchk_param mtpar;
 653	struct xt_tgchk_param tgpar;
 654
 655	/* don't mess with the struct ebt_entries */
 656	if (e->bitmask == 0)
 657		return 0;
 658
 659	if (e->bitmask & ~EBT_F_MASK)
 
 660		return -EINVAL;
 661
 662	if (e->invflags & ~EBT_INV_MASK)
 
 663		return -EINVAL;
 664
 665	if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3))
 
 666		return -EINVAL;
 667
 668	/* what hook do we belong to? */
 669	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 670		if (!newinfo->hook_entry[i])
 671			continue;
 672		if ((char *)newinfo->hook_entry[i] < (char *)e)
 673			hook = i;
 674		else
 675			break;
 676	}
 677	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
 678	 * a base chain
 679	 */
 680	if (i < NF_BR_NUMHOOKS)
 681		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 682	else {
 683		for (i = 0; i < udc_cnt; i++)
 684			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
 685				break;
 686		if (i == 0)
 687			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 688		else
 689			hookmask = cl_s[i - 1].hookmask;
 690	}
 691	i = 0;
 692
 693	memset(&mtpar, 0, sizeof(mtpar));
 694	memset(&tgpar, 0, sizeof(tgpar));
 695	mtpar.net	= tgpar.net       = net;
 696	mtpar.table     = tgpar.table     = name;
 697	mtpar.entryinfo = tgpar.entryinfo = e;
 698	mtpar.hook_mask = tgpar.hook_mask = hookmask;
 699	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
 700	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
 701	if (ret != 0)
 702		goto cleanup_matches;
 703	j = 0;
 704	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
 705	if (ret != 0)
 706		goto cleanup_watchers;
 707	t = ebt_get_target(e);
 708	gap = e->next_offset - e->target_offset;
 709
 710	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
 711	if (IS_ERR(target)) {
 712		ret = PTR_ERR(target);
 713		goto cleanup_watchers;
 714	}
 715
 716	/* Reject UNSPEC, xtables verdicts/return values are incompatible */
 717	if (target->family != NFPROTO_BRIDGE) {
 718		module_put(target->me);
 719		ret = -ENOENT;
 720		goto cleanup_watchers;
 721	}
 722
 723	t->u.target = target;
 724	if (t->u.target == &ebt_standard_target) {
 725		if (gap < sizeof(struct ebt_standard_target)) {
 
 726			ret = -EFAULT;
 727			goto cleanup_watchers;
 728		}
 729		if (((struct ebt_standard_target *)t)->verdict <
 730		   -NUM_STANDARD_TARGETS) {
 
 731			ret = -EFAULT;
 732			goto cleanup_watchers;
 733		}
 734	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
 735		module_put(t->u.target->me);
 736		ret = -EFAULT;
 737		goto cleanup_watchers;
 738	}
 739
 740	tgpar.target   = target;
 741	tgpar.targinfo = t->data;
 742	ret = xt_check_target(&tgpar, t->target_size,
 743	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
 744	if (ret < 0) {
 745		module_put(target->me);
 746		goto cleanup_watchers;
 747	}
 748	(*cnt)++;
 749	return 0;
 750cleanup_watchers:
 751	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
 752cleanup_matches:
 753	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
 754	return ret;
 755}
 756
 757/* checks for loops and sets the hook mask for udc
 
 758 * the hook mask for udc tells us from which base chains the udc can be
 759 * accessed. This mask is a parameter to the check() functions of the extensions
 760 */
 761static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
 762			    unsigned int udc_cnt, unsigned int hooknr, char *base)
 763{
 764	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
 765	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
 766	const struct ebt_entry_target *t;
 767
 768	while (pos < nentries || chain_nr != -1) {
 769		/* end of udc, go back one 'recursion' step */
 770		if (pos == nentries) {
 771			/* put back values of the time when this chain was called */
 772			e = cl_s[chain_nr].cs.e;
 773			if (cl_s[chain_nr].from != -1)
 774				nentries =
 775				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
 776			else
 777				nentries = chain->nentries;
 778			pos = cl_s[chain_nr].cs.n;
 779			/* make sure we won't see a loop that isn't one */
 780			cl_s[chain_nr].cs.n = 0;
 781			chain_nr = cl_s[chain_nr].from;
 782			if (pos == nentries)
 783				continue;
 784		}
 785		t = ebt_get_target_c(e);
 
 786		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
 787			goto letscontinue;
 788		if (e->target_offset + sizeof(struct ebt_standard_target) >
 789		   e->next_offset)
 
 790			return -1;
 791
 792		verdict = ((struct ebt_standard_target *)t)->verdict;
 793		if (verdict >= 0) { /* jump to another chain */
 794			struct ebt_entries *hlp2 =
 795			   (struct ebt_entries *)(base + verdict);
 796			for (i = 0; i < udc_cnt; i++)
 797				if (hlp2 == cl_s[i].cs.chaininfo)
 798					break;
 799			/* bad destination or loop */
 800			if (i == udc_cnt)
 
 801				return -1;
 802
 803			if (cl_s[i].cs.n)
 
 804				return -1;
 805
 806			if (cl_s[i].hookmask & (1 << hooknr))
 807				goto letscontinue;
 808			/* this can't be 0, so the loop test is correct */
 809			cl_s[i].cs.n = pos + 1;
 810			pos = 0;
 811			cl_s[i].cs.e = ebt_next_entry(e);
 812			e = (struct ebt_entry *)(hlp2->data);
 813			nentries = hlp2->nentries;
 814			cl_s[i].from = chain_nr;
 815			chain_nr = i;
 816			/* this udc is accessible from the base chain for hooknr */
 817			cl_s[i].hookmask |= (1 << hooknr);
 818			continue;
 819		}
 820letscontinue:
 821		e = ebt_next_entry(e);
 822		pos++;
 823	}
 824	return 0;
 825}
 826
 827/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
 828static int translate_table(struct net *net, const char *name,
 829			   struct ebt_table_info *newinfo)
 830{
 831	unsigned int i, j, k, udc_cnt;
 832	int ret;
 833	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 834
 835	i = 0;
 836	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 837		i++;
 838	if (i == NF_BR_NUMHOOKS)
 
 839		return -EINVAL;
 840
 841	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries)
 
 842		return -EINVAL;
 843
 844	/* make sure chains are ordered after each other in same order
 845	 * as their corresponding hooks
 846	 */
 847	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
 848		if (!newinfo->hook_entry[j])
 849			continue;
 850		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i])
 
 851			return -EINVAL;
 852
 853		i = j;
 854	}
 855
 856	/* do some early checkings and initialize some things */
 857	i = 0; /* holds the expected nr. of entries for the chain */
 858	j = 0; /* holds the up to now counted entries for the chain */
 859	k = 0; /* holds the total nr. of entries, should equal
 860		* newinfo->nentries afterwards
 861		*/
 862	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 863	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 864	   ebt_check_entry_size_and_hooks, newinfo,
 865	   &i, &j, &k, &udc_cnt);
 866
 867	if (ret != 0)
 868		return ret;
 869
 870	if (i != j)
 
 
 871		return -EINVAL;
 872
 873	if (k != newinfo->nentries)
 
 874		return -EINVAL;
 
 875
 876	/* get the location of the udc, put them in an array
 877	 * while we're at it, allocate the chainstack
 878	 */
 879	if (udc_cnt) {
 880		/* this will get free'd in do_replace()/ebt_register_table()
 881		 * if an error occurs
 882		 */
 883		newinfo->chainstack =
 884			vmalloc(array_size(nr_cpu_ids,
 885					   sizeof(*(newinfo->chainstack))));
 886		if (!newinfo->chainstack)
 887			return -ENOMEM;
 888		for_each_possible_cpu(i) {
 889			newinfo->chainstack[i] =
 890			  vmalloc(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))));
 891			if (!newinfo->chainstack[i]) {
 892				while (i)
 893					vfree(newinfo->chainstack[--i]);
 894				vfree(newinfo->chainstack);
 895				newinfo->chainstack = NULL;
 896				return -ENOMEM;
 897			}
 898		}
 899
 900		cl_s = vmalloc(array_size(udc_cnt, sizeof(*cl_s)));
 901		if (!cl_s)
 902			return -ENOMEM;
 903		i = 0; /* the i'th udc */
 904		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 905		   ebt_get_udc_positions, newinfo, &i, cl_s);
 906		/* sanity check */
 907		if (i != udc_cnt) {
 
 908			vfree(cl_s);
 909			return -EFAULT;
 910		}
 911	}
 912
 913	/* Check for loops */
 914	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 915		if (newinfo->hook_entry[i])
 916			if (check_chainloops(newinfo->hook_entry[i],
 917			   cl_s, udc_cnt, i, newinfo->entries)) {
 918				vfree(cl_s);
 919				return -EINVAL;
 920			}
 921
 922	/* we now know the following (along with E=mc²):
 923	 *  - the nr of entries in each chain is right
 924	 *  - the size of the allocated space is right
 925	 *  - all valid hooks have a corresponding chain
 926	 *  - there are no loops
 927	 *  - wrong data can still be on the level of a single entry
 928	 *  - could be there are jumps to places that are not the
 929	 *    beginning of a chain. This can only occur in chains that
 930	 *    are not accessible from any base chains, so we don't care.
 931	 */
 932
 933	/* used to know what we need to clean up if something goes wrong */
 934	i = 0;
 935	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 936	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
 937	if (ret != 0) {
 938		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 939				  ebt_cleanup_entry, net, &i);
 940	}
 941	vfree(cl_s);
 942	return ret;
 943}
 944
 945/* called under write_lock */
 946static void get_counters(const struct ebt_counter *oldcounters,
 947			 struct ebt_counter *counters, unsigned int nentries)
 948{
 949	int i, cpu;
 950	struct ebt_counter *counter_base;
 951
 952	/* counters of cpu 0 */
 953	memcpy(counters, oldcounters,
 954	       sizeof(struct ebt_counter) * nentries);
 955
 956	/* add other counters to those of cpu 0 */
 957	for_each_possible_cpu(cpu) {
 958		if (cpu == 0)
 959			continue;
 960		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 961		for (i = 0; i < nentries; i++)
 962			ADD_COUNTER(counters[i], counter_base[i].bcnt,
 963				    counter_base[i].pcnt);
 
 964	}
 965}
 966
 967static int do_replace_finish(struct net *net, struct ebt_replace *repl,
 968			      struct ebt_table_info *newinfo)
 969{
 970	int ret;
 971	struct ebt_counter *counterstmp = NULL;
 972	/* used to be able to unlock earlier */
 973	struct ebt_table_info *table;
 974	struct ebt_table *t;
 975
 976	/* the user wants counters back
 977	 * the check on the size is done later, when we have the lock
 978	 */
 979	if (repl->num_counters) {
 980		unsigned long size = repl->num_counters * sizeof(*counterstmp);
 981		counterstmp = vmalloc(size);
 982		if (!counterstmp)
 983			return -ENOMEM;
 984	}
 985
 986	newinfo->chainstack = NULL;
 987	ret = ebt_verify_pointers(repl, newinfo);
 988	if (ret != 0)
 989		goto free_counterstmp;
 990
 991	ret = translate_table(net, repl->name, newinfo);
 992
 993	if (ret != 0)
 994		goto free_counterstmp;
 995
 996	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
 997	if (!t) {
 998		ret = -ENOENT;
 999		goto free_iterate;
1000	}
1001
1002	/* the table doesn't like it */
1003	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1004		goto free_unlock;
1005
1006	if (repl->num_counters && repl->num_counters != t->private->nentries) {
 
1007		ret = -EINVAL;
1008		goto free_unlock;
1009	}
1010
1011	/* we have the mutex lock, so no danger in reading this pointer */
1012	table = t->private;
1013	/* make sure the table can only be rmmod'ed if it contains no rules */
1014	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1015		ret = -ENOENT;
1016		goto free_unlock;
1017	} else if (table->nentries && !newinfo->nentries)
1018		module_put(t->me);
1019	/* we need an atomic snapshot of the counters */
1020	write_lock_bh(&t->lock);
1021	if (repl->num_counters)
1022		get_counters(t->private->counters, counterstmp,
1023		   t->private->nentries);
1024
1025	t->private = newinfo;
1026	write_unlock_bh(&t->lock);
1027	mutex_unlock(&ebt_mutex);
1028	/* so, a user can change the chains while having messed up her counter
1029	 * allocation. Only reason why this is done is because this way the lock
1030	 * is held only once, while this doesn't bring the kernel into a
1031	 * dangerous state.
1032	 */
1033	if (repl->num_counters &&
1034	   copy_to_user(repl->counters, counterstmp,
1035	   repl->num_counters * sizeof(struct ebt_counter))) {
1036		/* Silent error, can't fail, new table is already in place */
1037		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1038	}
1039
1040	/* decrease module count and free resources */
1041	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1042			  ebt_cleanup_entry, net, NULL);
1043
1044	vfree(table->entries);
1045	ebt_free_table_info(table);
 
 
 
 
1046	vfree(table);
1047	vfree(counterstmp);
1048
1049	audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
1050			AUDIT_XT_OP_REPLACE, GFP_KERNEL);
1051	return ret;
1052
1053free_unlock:
1054	mutex_unlock(&ebt_mutex);
1055free_iterate:
1056	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1057			  ebt_cleanup_entry, net, NULL);
1058free_counterstmp:
1059	vfree(counterstmp);
1060	/* can be initialized in translate_table() */
1061	ebt_free_table_info(newinfo);
 
 
 
 
1062	return ret;
1063}
1064
1065/* replace the table */
1066static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
 
1067{
1068	int ret, countersize;
1069	struct ebt_table_info *newinfo;
1070	struct ebt_replace tmp;
1071
1072	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
1073		return -EFAULT;
1074
1075	if (len != sizeof(tmp) + tmp.entries_size)
 
1076		return -EINVAL;
 
1077
1078	if (tmp.entries_size == 0)
 
1079		return -EINVAL;
1080
1081	/* overflow check */
1082	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1083			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1084		return -ENOMEM;
1085	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1086		return -ENOMEM;
1087
1088	tmp.name[sizeof(tmp.name) - 1] = 0;
1089
1090	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1091	newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT);
1092	if (!newinfo)
1093		return -ENOMEM;
1094
1095	if (countersize)
1096		memset(newinfo->counters, 0, countersize);
1097
1098	newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT);
1099	if (!newinfo->entries) {
1100		ret = -ENOMEM;
1101		goto free_newinfo;
1102	}
1103	if (copy_from_user(
1104	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
 
1105		ret = -EFAULT;
1106		goto free_entries;
1107	}
1108
1109	ret = do_replace_finish(net, &tmp, newinfo);
1110	if (ret == 0)
1111		return ret;
1112free_entries:
1113	vfree(newinfo->entries);
1114free_newinfo:
1115	vfree(newinfo);
1116	return ret;
1117}
1118
1119static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
1120{
1121	mutex_lock(&ebt_mutex);
1122	list_del(&table->list);
1123	mutex_unlock(&ebt_mutex);
1124	audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries,
1125			AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1126	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1127			  ebt_cleanup_entry, net, NULL);
1128	if (table->private->nentries)
1129		module_put(table->me);
1130	vfree(table->private->entries);
1131	ebt_free_table_info(table->private);
1132	vfree(table->private);
1133	kfree(table);
1134}
1135
1136int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1137		       const struct nf_hook_ops *ops, struct ebt_table **res)
1138{
1139	struct ebt_table_info *newinfo;
1140	struct ebt_table *t, *table;
1141	struct ebt_replace_kernel *repl;
1142	int ret, i, countersize;
1143	void *p;
1144
1145	if (input_table == NULL || (repl = input_table->table) == NULL ||
1146	    repl->entries == NULL || repl->entries_size == 0 ||
1147	    repl->counters != NULL || input_table->private != NULL)
1148		return -EINVAL;
 
 
1149
1150	/* Don't add one table to multiple lists. */
1151	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1152	if (!table) {
1153		ret = -ENOMEM;
1154		goto out;
1155	}
1156
1157	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1158	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1159	ret = -ENOMEM;
1160	if (!newinfo)
1161		goto free_table;
1162
1163	p = vmalloc(repl->entries_size);
1164	if (!p)
1165		goto free_newinfo;
1166
1167	memcpy(p, repl->entries, repl->entries_size);
1168	newinfo->entries = p;
1169
1170	newinfo->entries_size = repl->entries_size;
1171	newinfo->nentries = repl->nentries;
1172
1173	if (countersize)
1174		memset(newinfo->counters, 0, countersize);
1175
1176	/* fill in newinfo and parse the entries */
1177	newinfo->chainstack = NULL;
1178	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1179		if ((repl->valid_hooks & (1 << i)) == 0)
1180			newinfo->hook_entry[i] = NULL;
1181		else
1182			newinfo->hook_entry[i] = p +
1183				((char *)repl->hook_entry[i] - repl->entries);
1184	}
1185	ret = translate_table(net, repl->name, newinfo);
1186	if (ret != 0)
 
1187		goto free_chainstack;
 
1188
1189	if (table->check && table->check(newinfo, table->valid_hooks)) {
 
1190		ret = -EINVAL;
1191		goto free_chainstack;
1192	}
1193
1194	table->private = newinfo;
1195	rwlock_init(&table->lock);
1196	mutex_lock(&ebt_mutex);
 
 
 
1197	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1198		if (strcmp(t->name, table->name) == 0) {
1199			ret = -EEXIST;
 
1200			goto free_unlock;
1201		}
1202	}
1203
1204	/* Hold a reference count if the chains aren't empty */
1205	if (newinfo->nentries && !try_module_get(table->me)) {
1206		ret = -ENOENT;
1207		goto free_unlock;
1208	}
1209	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1210	mutex_unlock(&ebt_mutex);
1211
1212	WRITE_ONCE(*res, table);
1213	ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1214	if (ret) {
1215		__ebt_unregister_table(net, table);
1216		*res = NULL;
1217	}
1218
1219	audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
1220			AUDIT_XT_OP_REGISTER, GFP_KERNEL);
1221	return ret;
1222free_unlock:
1223	mutex_unlock(&ebt_mutex);
1224free_chainstack:
1225	ebt_free_table_info(newinfo);
 
 
 
 
1226	vfree(newinfo->entries);
1227free_newinfo:
1228	vfree(newinfo);
1229free_table:
1230	kfree(table);
1231out:
1232	return ret;
1233}
1234
1235void ebt_unregister_table(struct net *net, struct ebt_table *table,
1236			  const struct nf_hook_ops *ops)
1237{
1238	nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1239	__ebt_unregister_table(net, table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240}
1241
1242/* userspace just supplied us with counters */
1243static int do_update_counters(struct net *net, const char *name,
1244			      struct ebt_counter __user *counters,
1245			      unsigned int num_counters, unsigned int len)
 
1246{
1247	int i, ret;
1248	struct ebt_counter *tmp;
1249	struct ebt_table *t;
1250
1251	if (num_counters == 0)
1252		return -EINVAL;
1253
1254	tmp = vmalloc(array_size(num_counters, sizeof(*tmp)));
1255	if (!tmp)
1256		return -ENOMEM;
1257
1258	t = find_table_lock(net, name, &ret, &ebt_mutex);
1259	if (!t)
1260		goto free_tmp;
1261
1262	if (num_counters != t->private->nentries) {
 
1263		ret = -EINVAL;
1264		goto unlock_mutex;
1265	}
1266
1267	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1268		ret = -EFAULT;
1269		goto unlock_mutex;
1270	}
1271
1272	/* we want an atomic add of the counters */
1273	write_lock_bh(&t->lock);
1274
1275	/* we add to the counters of the first cpu */
1276	for (i = 0; i < num_counters; i++)
1277		ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
 
 
1278
1279	write_unlock_bh(&t->lock);
1280	ret = 0;
1281unlock_mutex:
1282	mutex_unlock(&ebt_mutex);
1283free_tmp:
1284	vfree(tmp);
1285	return ret;
1286}
1287
1288static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
 
1289{
1290	struct ebt_replace hlp;
1291
1292	if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
1293		return -EFAULT;
1294
1295	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1296		return -EINVAL;
1297
1298	return do_update_counters(net, hlp.name, hlp.counters,
1299				  hlp.num_counters, len);
1300}
1301
1302static inline int ebt_obj_to_user(char __user *um, const char *_name,
1303				  const char *data, int entrysize,
1304				  int usersize, int datasize, u8 revision)
1305{
1306	char name[EBT_EXTENSION_MAXNAMELEN] = {0};
 
1307
1308	/* ebtables expects 31 bytes long names but xt_match names are 29 bytes
1309	 * long. Copy 29 bytes and fill remaining bytes with zeroes.
1310	 */
1311	strlcpy(name, _name, sizeof(name));
1312	if (copy_to_user(um, name, EBT_EXTENSION_MAXNAMELEN) ||
1313	    put_user(revision, (u8 __user *)(um + EBT_EXTENSION_MAXNAMELEN)) ||
1314	    put_user(datasize, (int __user *)(um + EBT_EXTENSION_MAXNAMELEN + 1)) ||
1315	    xt_data_to_user(um + entrysize, data, usersize, datasize,
1316			    XT_ALIGN(datasize)))
1317		return -EFAULT;
1318
1319	return 0;
1320}
1321
1322static inline int ebt_match_to_user(const struct ebt_entry_match *m,
1323				    const char *base, char __user *ubase)
1324{
1325	return ebt_obj_to_user(ubase + ((char *)m - base),
1326			       m->u.match->name, m->data, sizeof(*m),
1327			       m->u.match->usersize, m->match_size,
1328			       m->u.match->revision);
1329}
1330
1331static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w,
1332				      const char *base, char __user *ubase)
1333{
1334	return ebt_obj_to_user(ubase + ((char *)w - base),
1335			       w->u.watcher->name, w->data, sizeof(*w),
1336			       w->u.watcher->usersize, w->watcher_size,
1337			       w->u.watcher->revision);
1338}
1339
1340static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base,
1341				    char __user *ubase)
1342{
1343	int ret;
1344	char __user *hlp;
1345	const struct ebt_entry_target *t;
 
1346
1347	if (e->bitmask == 0) {
1348		/* special case !EBT_ENTRY_OR_ENTRIES */
1349		if (copy_to_user(ubase + ((char *)e - base), e,
1350				 sizeof(struct ebt_entries)))
1351			return -EFAULT;
1352		return 0;
1353	}
1354
1355	if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e)))
1356		return -EFAULT;
1357
1358	hlp = ubase + (((char *)e + e->target_offset) - base);
1359	t = ebt_get_target_c(e);
1360
1361	ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase);
1362	if (ret != 0)
1363		return ret;
1364	ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase);
1365	if (ret != 0)
1366		return ret;
1367	ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t),
1368			      t->u.target->usersize, t->target_size,
1369			      t->u.target->revision);
1370	if (ret != 0)
1371		return ret;
1372
 
 
1373	return 0;
1374}
1375
1376static int copy_counters_to_user(struct ebt_table *t,
1377				 const struct ebt_counter *oldcounters,
1378				 void __user *user, unsigned int num_counters,
1379				 unsigned int nentries)
1380{
1381	struct ebt_counter *counterstmp;
1382	int ret = 0;
1383
1384	/* userspace might not need the counters */
1385	if (num_counters == 0)
1386		return 0;
1387
1388	if (num_counters != nentries)
 
1389		return -EINVAL;
 
1390
1391	counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
1392	if (!counterstmp)
1393		return -ENOMEM;
1394
1395	write_lock_bh(&t->lock);
1396	get_counters(oldcounters, counterstmp, nentries);
1397	write_unlock_bh(&t->lock);
1398
1399	if (copy_to_user(user, counterstmp,
1400	   nentries * sizeof(struct ebt_counter)))
1401		ret = -EFAULT;
1402	vfree(counterstmp);
1403	return ret;
1404}
1405
1406/* called with ebt_mutex locked */
1407static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1408				   const int *len, int cmd)
1409{
1410	struct ebt_replace tmp;
1411	const struct ebt_counter *oldcounters;
1412	unsigned int entries_size, nentries;
1413	int ret;
1414	char *entries;
1415
1416	if (cmd == EBT_SO_GET_ENTRIES) {
1417		entries_size = t->private->entries_size;
1418		nentries = t->private->nentries;
1419		entries = t->private->entries;
1420		oldcounters = t->private->counters;
1421	} else {
1422		entries_size = t->table->entries_size;
1423		nentries = t->table->nentries;
1424		entries = t->table->entries;
1425		oldcounters = t->table->counters;
1426	}
1427
1428	if (copy_from_user(&tmp, user, sizeof(tmp)))
1429		return -EFAULT;
1430
1431	if (*len != sizeof(struct ebt_replace) + entries_size +
1432	   (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1433		return -EINVAL;
1434
1435	if (tmp.nentries != nentries)
 
1436		return -EINVAL;
 
1437
1438	if (tmp.entries_size != entries_size)
 
1439		return -EINVAL;
 
1440
1441	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1442					tmp.num_counters, nentries);
1443	if (ret)
1444		return ret;
1445
 
 
 
 
1446	/* set the match/watcher/target names right */
1447	return EBT_ENTRY_ITERATE(entries, entries_size,
1448	   ebt_entry_to_user, entries, tmp.entries);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1449}
1450
1451#ifdef CONFIG_COMPAT
1452/* 32 bit-userspace compatibility definitions. */
1453struct compat_ebt_replace {
1454	char name[EBT_TABLE_MAXNAMELEN];
1455	compat_uint_t valid_hooks;
1456	compat_uint_t nentries;
1457	compat_uint_t entries_size;
1458	/* start of the chains */
1459	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1460	/* nr of counters userspace expects back */
1461	compat_uint_t num_counters;
1462	/* where the kernel will put the old counters. */
1463	compat_uptr_t counters;
1464	compat_uptr_t entries;
1465};
1466
1467/* struct ebt_entry_match, _target and _watcher have same layout */
1468struct compat_ebt_entry_mwt {
1469	union {
1470		struct {
1471			char name[EBT_EXTENSION_MAXNAMELEN];
1472			u8 revision;
1473		};
1474		compat_uptr_t ptr;
1475	} u;
1476	compat_uint_t match_size;
1477	compat_uint_t data[] __aligned(__alignof__(struct compat_ebt_replace));
1478};
1479
1480/* account for possible padding between match_size and ->data */
1481static int ebt_compat_entry_padsize(void)
1482{
1483	BUILD_BUG_ON(sizeof(struct ebt_entry_match) <
1484			sizeof(struct compat_ebt_entry_mwt));
1485	return (int) sizeof(struct ebt_entry_match) -
1486			sizeof(struct compat_ebt_entry_mwt);
1487}
1488
1489static int ebt_compat_match_offset(const struct xt_match *match,
1490				   unsigned int userlen)
1491{
1492	/* ebt_among needs special handling. The kernel .matchsize is
 
1493	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1494	 * value is expected.
1495	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1496	 */
1497	if (unlikely(match->matchsize == -1))
1498		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1499	return xt_compat_match_offset(match);
1500}
1501
1502static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1503				unsigned int *size)
1504{
1505	const struct xt_match *match = m->u.match;
1506	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1507	int off = ebt_compat_match_offset(match, m->match_size);
1508	compat_uint_t msize = m->match_size - off;
1509
1510	if (WARN_ON(off >= m->match_size))
1511		return -EINVAL;
1512
1513	if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) ||
1514	    put_user(match->revision, &cm->u.revision) ||
1515	    put_user(msize, &cm->match_size))
1516		return -EFAULT;
1517
1518	if (match->compat_to_user) {
1519		if (match->compat_to_user(cm->data, m->data))
1520			return -EFAULT;
1521	} else {
1522		if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
1523				    COMPAT_XT_ALIGN(msize)))
1524			return -EFAULT;
1525	}
1526
1527	*size -= ebt_compat_entry_padsize() + off;
1528	*dstptr = cm->data;
1529	*dstptr += msize;
1530	return 0;
1531}
1532
1533static int compat_target_to_user(struct ebt_entry_target *t,
1534				 void __user **dstptr,
1535				 unsigned int *size)
1536{
1537	const struct xt_target *target = t->u.target;
1538	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1539	int off = xt_compat_target_offset(target);
1540	compat_uint_t tsize = t->target_size - off;
1541
1542	if (WARN_ON(off >= t->target_size))
1543		return -EINVAL;
1544
1545	if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) ||
1546	    put_user(target->revision, &cm->u.revision) ||
1547	    put_user(tsize, &cm->match_size))
1548		return -EFAULT;
1549
1550	if (target->compat_to_user) {
1551		if (target->compat_to_user(cm->data, t->data))
1552			return -EFAULT;
1553	} else {
1554		if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
1555				    COMPAT_XT_ALIGN(tsize)))
1556			return -EFAULT;
1557	}
1558
1559	*size -= ebt_compat_entry_padsize() + off;
1560	*dstptr = cm->data;
1561	*dstptr += tsize;
1562	return 0;
1563}
1564
1565static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1566				  void __user **dstptr,
1567				  unsigned int *size)
1568{
1569	return compat_target_to_user((struct ebt_entry_target *)w,
1570							dstptr, size);
1571}
1572
1573static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1574				unsigned int *size)
1575{
1576	struct ebt_entry_target *t;
1577	struct ebt_entry __user *ce;
1578	u32 watchers_offset, target_offset, next_offset;
1579	compat_uint_t origsize;
1580	int ret;
1581
1582	if (e->bitmask == 0) {
1583		if (*size < sizeof(struct ebt_entries))
1584			return -EINVAL;
1585		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1586			return -EFAULT;
1587
1588		*dstptr += sizeof(struct ebt_entries);
1589		*size -= sizeof(struct ebt_entries);
1590		return 0;
1591	}
1592
1593	if (*size < sizeof(*ce))
1594		return -EINVAL;
1595
1596	ce = *dstptr;
1597	if (copy_to_user(ce, e, sizeof(*ce)))
1598		return -EFAULT;
1599
1600	origsize = *size;
1601	*dstptr += sizeof(*ce);
1602
1603	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1604	if (ret)
1605		return ret;
1606	watchers_offset = e->watchers_offset - (origsize - *size);
1607
1608	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1609	if (ret)
1610		return ret;
1611	target_offset = e->target_offset - (origsize - *size);
1612
1613	t = ebt_get_target(e);
1614
1615	ret = compat_target_to_user(t, dstptr, size);
1616	if (ret)
1617		return ret;
1618	next_offset = e->next_offset - (origsize - *size);
1619
1620	if (put_user(watchers_offset, &ce->watchers_offset) ||
1621	    put_user(target_offset, &ce->target_offset) ||
1622	    put_user(next_offset, &ce->next_offset))
1623		return -EFAULT;
1624
1625	*size -= sizeof(*ce);
1626	return 0;
1627}
1628
1629static int compat_calc_match(struct ebt_entry_match *m, int *off)
1630{
1631	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1632	*off += ebt_compat_entry_padsize();
1633	return 0;
1634}
1635
1636static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1637{
1638	*off += xt_compat_target_offset(w->u.watcher);
1639	*off += ebt_compat_entry_padsize();
1640	return 0;
1641}
1642
1643static int compat_calc_entry(const struct ebt_entry *e,
1644			     const struct ebt_table_info *info,
1645			     const void *base,
1646			     struct compat_ebt_replace *newinfo)
1647{
1648	const struct ebt_entry_target *t;
1649	unsigned int entry_offset;
1650	int off, ret, i;
1651
1652	if (e->bitmask == 0)
1653		return 0;
1654
1655	off = 0;
1656	entry_offset = (void *)e - base;
1657
1658	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1659	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1660
1661	t = ebt_get_target_c(e);
1662
1663	off += xt_compat_target_offset(t->u.target);
1664	off += ebt_compat_entry_padsize();
1665
1666	newinfo->entries_size -= off;
1667
1668	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1669	if (ret)
1670		return ret;
1671
1672	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1673		const void *hookptr = info->hook_entry[i];
1674		if (info->hook_entry[i] &&
1675		    (e < (struct ebt_entry *)(base - hookptr))) {
1676			newinfo->hook_entry[i] -= off;
1677			pr_debug("0x%08X -> 0x%08X\n",
1678					newinfo->hook_entry[i] + off,
1679					newinfo->hook_entry[i]);
1680		}
1681	}
1682
1683	return 0;
1684}
1685
1686static int ebt_compat_init_offsets(unsigned int number)
1687{
1688	if (number > INT_MAX)
1689		return -EINVAL;
1690
1691	/* also count the base chain policies */
1692	number += NF_BR_NUMHOOKS;
1693
1694	return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
1695}
1696
1697static int compat_table_info(const struct ebt_table_info *info,
1698			     struct compat_ebt_replace *newinfo)
1699{
1700	unsigned int size = info->entries_size;
1701	const void *entries = info->entries;
1702	int ret;
1703
1704	newinfo->entries_size = size;
1705	ret = ebt_compat_init_offsets(info->nentries);
1706	if (ret)
1707		return ret;
1708
 
1709	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1710							entries, newinfo);
1711}
1712
1713static int compat_copy_everything_to_user(struct ebt_table *t,
1714					  void __user *user, int *len, int cmd)
1715{
1716	struct compat_ebt_replace repl, tmp;
1717	struct ebt_counter *oldcounters;
1718	struct ebt_table_info tinfo;
1719	int ret;
1720	void __user *pos;
1721
1722	memset(&tinfo, 0, sizeof(tinfo));
1723
1724	if (cmd == EBT_SO_GET_ENTRIES) {
1725		tinfo.entries_size = t->private->entries_size;
1726		tinfo.nentries = t->private->nentries;
1727		tinfo.entries = t->private->entries;
1728		oldcounters = t->private->counters;
1729	} else {
1730		tinfo.entries_size = t->table->entries_size;
1731		tinfo.nentries = t->table->nentries;
1732		tinfo.entries = t->table->entries;
1733		oldcounters = t->table->counters;
1734	}
1735
1736	if (copy_from_user(&tmp, user, sizeof(tmp)))
1737		return -EFAULT;
1738
1739	if (tmp.nentries != tinfo.nentries ||
1740	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1741		return -EINVAL;
1742
1743	memcpy(&repl, &tmp, sizeof(repl));
1744	if (cmd == EBT_SO_GET_ENTRIES)
1745		ret = compat_table_info(t->private, &repl);
1746	else
1747		ret = compat_table_info(&tinfo, &repl);
1748	if (ret)
1749		return ret;
1750
1751	if (*len != sizeof(tmp) + repl.entries_size +
1752	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1753		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1754				*len, tinfo.entries_size, repl.entries_size);
1755		return -EINVAL;
1756	}
1757
1758	/* userspace might not need the counters */
1759	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1760					tmp.num_counters, tinfo.nentries);
1761	if (ret)
1762		return ret;
1763
1764	pos = compat_ptr(tmp.entries);
1765	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1766			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1767}
1768
1769struct ebt_entries_buf_state {
1770	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1771	u32 buf_kern_len;	/* total size of kernel buffer */
1772	u32 buf_kern_offset;	/* amount of data copied so far */
1773	u32 buf_user_offset;	/* read position in userspace buffer */
1774};
1775
1776static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1777{
1778	state->buf_kern_offset += sz;
1779	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1780}
1781
1782static int ebt_buf_add(struct ebt_entries_buf_state *state,
1783		       const void *data, unsigned int sz)
1784{
1785	if (state->buf_kern_start == NULL)
1786		goto count_only;
1787
1788	if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
1789		return -EINVAL;
1790
1791	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1792
1793 count_only:
1794	state->buf_user_offset += sz;
1795	return ebt_buf_count(state, sz);
1796}
1797
1798static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1799{
1800	char *b = state->buf_kern_start;
1801
1802	if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
1803		return -EINVAL;
1804
1805	if (b != NULL && sz > 0)
1806		memset(b + state->buf_kern_offset, 0, sz);
1807	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1808	return ebt_buf_count(state, sz);
1809}
1810
1811enum compat_mwt {
1812	EBT_COMPAT_MATCH,
1813	EBT_COMPAT_WATCHER,
1814	EBT_COMPAT_TARGET,
1815};
1816
1817static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
1818				enum compat_mwt compat_mwt,
1819				struct ebt_entries_buf_state *state,
1820				const unsigned char *base)
1821{
1822	char name[EBT_EXTENSION_MAXNAMELEN];
1823	struct xt_match *match;
1824	struct xt_target *wt;
1825	void *dst = NULL;
1826	int off, pad = 0;
1827	unsigned int size_kern, match_size = mwt->match_size;
1828
1829	if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
1830		return -EINVAL;
1831
1832	if (state->buf_kern_start)
1833		dst = state->buf_kern_start + state->buf_kern_offset;
1834
1835	switch (compat_mwt) {
1836	case EBT_COMPAT_MATCH:
1837		match = xt_request_find_match(NFPROTO_BRIDGE, name,
1838					      mwt->u.revision);
1839		if (IS_ERR(match))
1840			return PTR_ERR(match);
1841
1842		off = ebt_compat_match_offset(match, match_size);
1843		if (dst) {
1844			if (match->compat_from_user)
1845				match->compat_from_user(dst, mwt->data);
1846			else
1847				memcpy(dst, mwt->data, match_size);
1848		}
1849
1850		size_kern = match->matchsize;
1851		if (unlikely(size_kern == -1))
1852			size_kern = match_size;
1853		module_put(match->me);
1854		break;
1855	case EBT_COMPAT_WATCHER:
1856	case EBT_COMPAT_TARGET:
1857		wt = xt_request_find_target(NFPROTO_BRIDGE, name,
1858					    mwt->u.revision);
1859		if (IS_ERR(wt))
1860			return PTR_ERR(wt);
1861		off = xt_compat_target_offset(wt);
1862
1863		if (dst) {
1864			if (wt->compat_from_user)
1865				wt->compat_from_user(dst, mwt->data);
1866			else
1867				memcpy(dst, mwt->data, match_size);
1868		}
1869
1870		size_kern = wt->targetsize;
1871		module_put(wt->me);
1872		break;
1873
1874	default:
1875		return -EINVAL;
1876	}
1877
1878	state->buf_kern_offset += match_size + off;
1879	state->buf_user_offset += match_size;
1880	pad = XT_ALIGN(size_kern) - size_kern;
1881
1882	if (pad > 0 && dst) {
1883		if (WARN_ON(state->buf_kern_len <= pad))
1884			return -EINVAL;
1885		if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
1886			return -EINVAL;
1887		memset(dst + size_kern, 0, pad);
1888	}
1889	return off + match_size;
1890}
1891
1892/* return size of all matches, watchers or target, including necessary
 
1893 * alignment and padding.
1894 */
1895static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
1896			unsigned int size_left, enum compat_mwt type,
1897			struct ebt_entries_buf_state *state, const void *base)
1898{
1899	const char *buf = (const char *)match32;
1900	int growth = 0;
 
1901
1902	if (size_left == 0)
1903		return 0;
1904
1905	do {
 
 
1906		struct ebt_entry_match *match_kern;
1907		int ret;
1908
1909		if (size_left < sizeof(*match32))
1910			return -EINVAL;
1911
1912		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1913		if (match_kern) {
1914			char *tmp;
1915			tmp = state->buf_kern_start + state->buf_kern_offset;
1916			match_kern = (struct ebt_entry_match *) tmp;
1917		}
1918		ret = ebt_buf_add(state, buf, sizeof(*match32));
1919		if (ret < 0)
1920			return ret;
1921		size_left -= sizeof(*match32);
1922
1923		/* add padding before match->data (if any) */
1924		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1925		if (ret < 0)
1926			return ret;
1927
1928		if (match32->match_size > size_left)
1929			return -EINVAL;
1930
1931		size_left -= match32->match_size;
1932
1933		ret = compat_mtw_from_user(match32, type, state, base);
1934		if (ret < 0)
1935			return ret;
1936
1937		if (WARN_ON(ret < match32->match_size))
1938			return -EINVAL;
1939		growth += ret - match32->match_size;
1940		growth += ebt_compat_entry_padsize();
1941
1942		buf += sizeof(*match32);
1943		buf += match32->match_size;
1944
1945		if (match_kern)
1946			match_kern->match_size = ret;
1947
 
1948		match32 = (struct compat_ebt_entry_mwt *) buf;
1949	} while (size_left);
1950
1951	return growth;
1952}
1953
1954/* called for all ebt_entry structures. */
1955static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
1956			  unsigned int *total,
1957			  struct ebt_entries_buf_state *state)
1958{
1959	unsigned int i, j, startoff, next_expected_off, new_offset = 0;
1960	/* stores match/watchers/targets & offset of next struct ebt_entry: */
1961	unsigned int offsets[4];
1962	unsigned int *offsets_update = NULL;
1963	int ret;
1964	char *buf_start;
1965
1966	if (*total < sizeof(struct ebt_entries))
1967		return -EINVAL;
1968
1969	if (!entry->bitmask) {
1970		*total -= sizeof(struct ebt_entries);
1971		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
1972	}
1973	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
1974		return -EINVAL;
1975
1976	startoff = state->buf_user_offset;
1977	/* pull in most part of ebt_entry, it does not need to be changed. */
1978	ret = ebt_buf_add(state, entry,
1979			offsetof(struct ebt_entry, watchers_offset));
1980	if (ret < 0)
1981		return ret;
1982
1983	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
1984	memcpy(&offsets[1], &entry->watchers_offset,
1985			sizeof(offsets) - sizeof(offsets[0]));
1986
1987	if (state->buf_kern_start) {
1988		buf_start = state->buf_kern_start + state->buf_kern_offset;
1989		offsets_update = (unsigned int *) buf_start;
1990	}
1991	ret = ebt_buf_add(state, &offsets[1],
1992			sizeof(offsets) - sizeof(offsets[0]));
1993	if (ret < 0)
1994		return ret;
1995	buf_start = (char *) entry;
1996	/* 0: matches offset, always follows ebt_entry.
 
1997	 * 1: watchers offset, from ebt_entry structure
1998	 * 2: target offset, from ebt_entry structure
1999	 * 3: next ebt_entry offset, from ebt_entry structure
2000	 *
2001	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2002	 */
2003	for (i = 0; i < 4 ; ++i) {
2004		if (offsets[i] > *total)
2005			return -EINVAL;
2006
2007		if (i < 3 && offsets[i] == *total)
2008			return -EINVAL;
2009
2010		if (i == 0)
2011			continue;
2012		if (offsets[i-1] > offsets[i])
2013			return -EINVAL;
2014	}
2015
2016	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2017		struct compat_ebt_entry_mwt *match32;
2018		unsigned int size;
2019		char *buf = buf_start + offsets[i];
2020
 
2021		if (offsets[i] > offsets[j])
2022			return -EINVAL;
2023
2024		match32 = (struct compat_ebt_entry_mwt *) buf;
2025		size = offsets[j] - offsets[i];
2026		ret = ebt_size_mwt(match32, size, i, state, base);
2027		if (ret < 0)
2028			return ret;
2029		new_offset += ret;
2030		if (offsets_update && new_offset) {
2031			pr_debug("change offset %d to %d\n",
2032				offsets_update[i], offsets[j] + new_offset);
2033			offsets_update[i] = offsets[j] + new_offset;
2034		}
2035	}
2036
2037	if (state->buf_kern_start == NULL) {
2038		unsigned int offset = buf_start - (char *) base;
2039
2040		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2041		if (ret < 0)
2042			return ret;
2043	}
2044
2045	next_expected_off = state->buf_user_offset - startoff;
2046	if (next_expected_off != entry->next_offset)
2047		return -EINVAL;
2048
2049	if (*total < entry->next_offset)
2050		return -EINVAL;
2051	*total -= entry->next_offset;
2052	return 0;
2053}
2054
2055/* repl->entries_size is the size of the ebt_entry blob in userspace.
 
2056 * It might need more memory when copied to a 64 bit kernel in case
2057 * userspace is 32-bit. So, first task: find out how much memory is needed.
2058 *
2059 * Called before validation is performed.
2060 */
2061static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2062				struct ebt_entries_buf_state *state)
2063{
2064	unsigned int size_remaining = size_user;
2065	int ret;
2066
2067	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2068					&size_remaining, state);
2069	if (ret < 0)
2070		return ret;
2071
2072	if (size_remaining)
2073		return -EINVAL;
2074
2075	return state->buf_kern_offset;
2076}
2077
2078
2079static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2080					     sockptr_t arg, unsigned int len)
2081{
2082	struct compat_ebt_replace tmp;
2083	int i;
2084
2085	if (len < sizeof(tmp))
2086		return -EINVAL;
2087
2088	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)))
2089		return -EFAULT;
2090
2091	if (len != sizeof(tmp) + tmp.entries_size)
2092		return -EINVAL;
2093
2094	if (tmp.entries_size == 0)
2095		return -EINVAL;
2096
2097	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2098			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2099		return -ENOMEM;
2100	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2101		return -ENOMEM;
2102
2103	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2104
2105	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2106	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2107		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2108
2109	repl->num_counters = tmp.num_counters;
2110	repl->counters = compat_ptr(tmp.counters);
2111	repl->entries = compat_ptr(tmp.entries);
2112	return 0;
2113}
2114
2115static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
 
2116{
2117	int ret, i, countersize, size64;
2118	struct ebt_table_info *newinfo;
2119	struct ebt_replace tmp;
2120	struct ebt_entries_buf_state state;
2121	void *entries_tmp;
2122
2123	ret = compat_copy_ebt_replace_from_user(&tmp, arg, len);
2124	if (ret) {
2125		/* try real handler in case userland supplied needed padding */
2126		if (ret == -EINVAL && do_replace(net, arg, len) == 0)
2127			ret = 0;
2128		return ret;
2129	}
2130
2131	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2132	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2133	if (!newinfo)
2134		return -ENOMEM;
2135
2136	if (countersize)
2137		memset(newinfo->counters, 0, countersize);
2138
2139	memset(&state, 0, sizeof(state));
2140
2141	newinfo->entries = vmalloc(tmp.entries_size);
2142	if (!newinfo->entries) {
2143		ret = -ENOMEM;
2144		goto free_newinfo;
2145	}
2146	if (copy_from_user(
2147	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2148		ret = -EFAULT;
2149		goto free_entries;
2150	}
2151
2152	entries_tmp = newinfo->entries;
2153
2154	xt_compat_lock(NFPROTO_BRIDGE);
2155
2156	ret = ebt_compat_init_offsets(tmp.nentries);
2157	if (ret < 0)
2158		goto out_unlock;
2159
2160	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2161	if (ret < 0)
2162		goto out_unlock;
2163
2164	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2165		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2166		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2167
2168	size64 = ret;
2169	newinfo->entries = vmalloc(size64);
2170	if (!newinfo->entries) {
2171		vfree(entries_tmp);
2172		ret = -ENOMEM;
2173		goto out_unlock;
2174	}
2175
2176	memset(&state, 0, sizeof(state));
2177	state.buf_kern_start = newinfo->entries;
2178	state.buf_kern_len = size64;
2179
2180	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2181	if (WARN_ON(ret < 0)) {
2182		vfree(entries_tmp);
2183		goto out_unlock;
2184	}
2185
2186	vfree(entries_tmp);
2187	tmp.entries_size = size64;
2188
2189	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2190		char __user *usrptr;
2191		if (tmp.hook_entry[i]) {
2192			unsigned int delta;
2193			usrptr = (char __user *) tmp.hook_entry[i];
2194			delta = usrptr - tmp.entries;
2195			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2196			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2197		}
2198	}
2199
2200	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2201	xt_compat_unlock(NFPROTO_BRIDGE);
2202
2203	ret = do_replace_finish(net, &tmp, newinfo);
2204	if (ret == 0)
2205		return ret;
2206free_entries:
2207	vfree(newinfo->entries);
2208free_newinfo:
2209	vfree(newinfo);
2210	return ret;
2211out_unlock:
2212	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2213	xt_compat_unlock(NFPROTO_BRIDGE);
2214	goto free_entries;
2215}
2216
2217static int compat_update_counters(struct net *net, sockptr_t arg,
2218				  unsigned int len)
2219{
2220	struct compat_ebt_replace hlp;
2221
2222	if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
2223		return -EFAULT;
2224
2225	/* try real handler in case userland supplied needed padding */
2226	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2227		return update_counters(net, arg, len);
2228
2229	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2230				  hlp.num_counters, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2231}
2232
2233static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2234		void __user *user, int *len)
2235{
2236	int ret;
2237	struct compat_ebt_replace tmp;
2238	struct ebt_table *t;
2239	struct net *net = sock_net(sk);
2240
2241	if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) &&
2242	    *len != sizeof(struct compat_ebt_replace))
2243		return -EINVAL;
 
 
 
 
2244
2245	if (copy_from_user(&tmp, user, sizeof(tmp)))
2246		return -EFAULT;
2247
2248	tmp.name[sizeof(tmp.name) - 1] = '\0';
2249
2250	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2251	if (!t)
2252		return ret;
2253
2254	xt_compat_lock(NFPROTO_BRIDGE);
2255	switch (cmd) {
2256	case EBT_SO_GET_INFO:
2257		tmp.nentries = t->private->nentries;
2258		ret = compat_table_info(t->private, &tmp);
2259		if (ret)
2260			goto out;
2261		tmp.valid_hooks = t->valid_hooks;
2262
2263		if (copy_to_user(user, &tmp, *len) != 0) {
2264			ret = -EFAULT;
2265			break;
2266		}
2267		ret = 0;
2268		break;
2269	case EBT_SO_GET_INIT_INFO:
2270		tmp.nentries = t->table->nentries;
2271		tmp.entries_size = t->table->entries_size;
2272		tmp.valid_hooks = t->table->valid_hooks;
2273
2274		if (copy_to_user(user, &tmp, *len) != 0) {
2275			ret = -EFAULT;
2276			break;
2277		}
2278		ret = 0;
2279		break;
2280	case EBT_SO_GET_ENTRIES:
2281	case EBT_SO_GET_INIT_ENTRIES:
2282		/* try real handler first in case of userland-side padding.
 
2283		 * in case we are dealing with an 'ordinary' 32 bit binary
2284		 * without 64bit compatibility padding, this will fail right
2285		 * after copy_from_user when the *len argument is validated.
2286		 *
2287		 * the compat_ variant needs to do one pass over the kernel
2288		 * data set to adjust for size differences before it the check.
2289		 */
2290		if (copy_everything_to_user(t, user, len, cmd) == 0)
2291			ret = 0;
2292		else
2293			ret = compat_copy_everything_to_user(t, user, len, cmd);
2294		break;
2295	default:
2296		ret = -EINVAL;
2297	}
2298 out:
2299	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2300	xt_compat_unlock(NFPROTO_BRIDGE);
2301	mutex_unlock(&ebt_mutex);
2302	return ret;
2303}
2304#endif
2305
2306static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2307{
2308	struct net *net = sock_net(sk);
2309	struct ebt_replace tmp;
2310	struct ebt_table *t;
2311	int ret;
2312
2313	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2314		return -EPERM;
2315
2316#ifdef CONFIG_COMPAT
2317	/* try real handler in case userland supplied needed padding */
2318	if (in_compat_syscall() &&
2319	    ((cmd != EBT_SO_GET_INFO && cmd != EBT_SO_GET_INIT_INFO) ||
2320	     *len != sizeof(tmp)))
2321		return compat_do_ebt_get_ctl(sk, cmd, user, len);
2322#endif
2323
2324	if (copy_from_user(&tmp, user, sizeof(tmp)))
2325		return -EFAULT;
2326
2327	tmp.name[sizeof(tmp.name) - 1] = '\0';
2328
2329	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2330	if (!t)
2331		return ret;
2332
2333	switch (cmd) {
2334	case EBT_SO_GET_INFO:
2335	case EBT_SO_GET_INIT_INFO:
2336		if (*len != sizeof(struct ebt_replace)) {
2337			ret = -EINVAL;
2338			mutex_unlock(&ebt_mutex);
2339			break;
2340		}
2341		if (cmd == EBT_SO_GET_INFO) {
2342			tmp.nentries = t->private->nentries;
2343			tmp.entries_size = t->private->entries_size;
2344			tmp.valid_hooks = t->valid_hooks;
2345		} else {
2346			tmp.nentries = t->table->nentries;
2347			tmp.entries_size = t->table->entries_size;
2348			tmp.valid_hooks = t->table->valid_hooks;
2349		}
2350		mutex_unlock(&ebt_mutex);
2351		if (copy_to_user(user, &tmp, *len) != 0) {
2352			ret = -EFAULT;
2353			break;
2354		}
2355		ret = 0;
2356		break;
2357
2358	case EBT_SO_GET_ENTRIES:
2359	case EBT_SO_GET_INIT_ENTRIES:
2360		ret = copy_everything_to_user(t, user, len, cmd);
2361		mutex_unlock(&ebt_mutex);
2362		break;
2363
2364	default:
2365		mutex_unlock(&ebt_mutex);
2366		ret = -EINVAL;
2367	}
2368
2369	return ret;
2370}
2371
2372static int do_ebt_set_ctl(struct sock *sk, int cmd, sockptr_t arg,
2373		unsigned int len)
2374{
2375	struct net *net = sock_net(sk);
2376	int ret;
2377
2378	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2379		return -EPERM;
2380
2381	switch (cmd) {
2382	case EBT_SO_SET_ENTRIES:
2383#ifdef CONFIG_COMPAT
2384		if (in_compat_syscall())
2385			ret = compat_do_replace(net, arg, len);
2386		else
2387#endif
2388			ret = do_replace(net, arg, len);
2389		break;
2390	case EBT_SO_SET_COUNTERS:
2391#ifdef CONFIG_COMPAT
2392		if (in_compat_syscall())
2393			ret = compat_update_counters(net, arg, len);
2394		else
2395#endif
2396			ret = update_counters(net, arg, len);
2397		break;
2398	default:
2399		ret = -EINVAL;
2400	}
2401	return ret;
2402}
2403
2404static struct nf_sockopt_ops ebt_sockopts = {
2405	.pf		= PF_INET,
2406	.set_optmin	= EBT_BASE_CTL,
2407	.set_optmax	= EBT_SO_SET_MAX + 1,
2408	.set		= do_ebt_set_ctl,
 
 
 
2409	.get_optmin	= EBT_BASE_CTL,
2410	.get_optmax	= EBT_SO_GET_MAX + 1,
2411	.get		= do_ebt_get_ctl,
 
 
 
2412	.owner		= THIS_MODULE,
2413};
2414
2415static int __init ebtables_init(void)
2416{
2417	int ret;
2418
2419	ret = xt_register_target(&ebt_standard_target);
2420	if (ret < 0)
2421		return ret;
2422	ret = nf_register_sockopt(&ebt_sockopts);
2423	if (ret < 0) {
2424		xt_unregister_target(&ebt_standard_target);
2425		return ret;
2426	}
2427
 
2428	return 0;
2429}
2430
2431static void __exit ebtables_fini(void)
2432{
2433	nf_unregister_sockopt(&ebt_sockopts);
2434	xt_unregister_target(&ebt_standard_target);
 
2435}
2436
2437EXPORT_SYMBOL(ebt_register_table);
2438EXPORT_SYMBOL(ebt_unregister_table);
2439EXPORT_SYMBOL(ebt_do_table);
2440module_init(ebtables_init);
2441module_exit(ebtables_fini);
2442MODULE_LICENSE("GPL");
v3.15
 
   1/*
   2 *  ebtables
   3 *
   4 *  Author:
   5 *  Bart De Schuymer		<bdschuym@pandora.be>
   6 *
   7 *  ebtables.c,v 2.0, July, 2002
   8 *
   9 *  This code is stongly inspired on the iptables code which is
  10 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18#include <linux/kmod.h>
  19#include <linux/module.h>
  20#include <linux/vmalloc.h>
  21#include <linux/netfilter/x_tables.h>
  22#include <linux/netfilter_bridge/ebtables.h>
  23#include <linux/spinlock.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <asm/uaccess.h>
  27#include <linux/smp.h>
  28#include <linux/cpumask.h>
 
  29#include <net/sock.h>
  30/* needed for logical [in,out]-dev filtering */
  31#include "../br_private.h"
  32
  33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
  34					 "report to author: "format, ## args)
  35/* #define BUGPRINT(format, args...) */
  36
  37/*
  38 * Each cpu has its own set of counters, so there is no need for write_lock in
  39 * the softirq
  40 * For reading or updating the counters, the user context needs to
  41 * get a write_lock
  42 */
  43
  44/* The size of each set of counters is altered to get cache alignment */
  45#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  46#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
  47#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
  48   COUNTER_OFFSET(n) * cpu))
  49
  50
  51
  52static DEFINE_MUTEX(ebt_mutex);
  53
  54#ifdef CONFIG_COMPAT
  55static void ebt_standard_compat_from_user(void *dst, const void *src)
  56{
  57	int v = *(compat_int_t *)src;
  58
  59	if (v >= 0)
  60		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
  61	memcpy(dst, &v, sizeof(v));
  62}
  63
  64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
  65{
  66	compat_int_t cv = *(int *)src;
  67
  68	if (cv >= 0)
  69		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
  70	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  71}
  72#endif
  73
  74
  75static struct xt_target ebt_standard_target = {
  76	.name       = "standard",
  77	.revision   = 0,
  78	.family     = NFPROTO_BRIDGE,
  79	.targetsize = sizeof(int),
  80#ifdef CONFIG_COMPAT
  81	.compatsize = sizeof(compat_int_t),
  82	.compat_from_user = ebt_standard_compat_from_user,
  83	.compat_to_user =  ebt_standard_compat_to_user,
  84#endif
  85};
  86
  87static inline int
  88ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
  89	       struct xt_action_param *par)
  90{
  91	par->target   = w->u.watcher;
  92	par->targinfo = w->data;
  93	w->u.watcher->target(skb, par);
  94	/* watchers don't give a verdict */
  95	return 0;
  96}
  97
  98static inline int
  99ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
 100	     struct xt_action_param *par)
 101{
 102	par->match     = m->u.match;
 103	par->matchinfo = m->data;
 104	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
 105}
 106
 107static inline int
 108ebt_dev_check(const char *entry, const struct net_device *device)
 109{
 110	int i = 0;
 111	const char *devname;
 112
 113	if (*entry == '\0')
 114		return 0;
 115	if (!device)
 116		return 1;
 117	devname = device->name;
 118	/* 1 is the wildcard token */
 119	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 120		i++;
 121	return devname[i] != entry[i] && entry[i] != 1;
 122}
 123
 124#define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
 125/* process standard matches */
 126static inline int
 127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
 128                const struct net_device *in, const struct net_device *out)
 129{
 130	const struct ethhdr *h = eth_hdr(skb);
 131	const struct net_bridge_port *p;
 132	__be16 ethproto;
 133	int verdict, i;
 134
 135	if (vlan_tx_tag_present(skb))
 136		ethproto = htons(ETH_P_8021Q);
 137	else
 138		ethproto = h->h_proto;
 139
 140	if (e->bitmask & EBT_802_3) {
 141		if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
 142			return 1;
 143	} else if (!(e->bitmask & EBT_NOPROTO) &&
 144	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
 145		return 1;
 146
 147	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
 148		return 1;
 149	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
 150		return 1;
 151	/* rcu_read_lock()ed by nf_hook_slow */
 152	if (in && (p = br_port_get_rcu(in)) != NULL &&
 153	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
 
 154		return 1;
 155	if (out && (p = br_port_get_rcu(out)) != NULL &&
 156	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
 
 157		return 1;
 158
 159	if (e->bitmask & EBT_SOURCEMAC) {
 160		verdict = 0;
 161		for (i = 0; i < 6; i++)
 162			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
 163			   e->sourcemsk[i];
 164		if (FWINV2(verdict != 0, EBT_ISOURCE) )
 165			return 1;
 166	}
 167	if (e->bitmask & EBT_DESTMAC) {
 168		verdict = 0;
 169		for (i = 0; i < 6; i++)
 170			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
 171			   e->destmsk[i];
 172		if (FWINV2(verdict != 0, EBT_IDEST) )
 173			return 1;
 174	}
 175	return 0;
 176}
 177
 178static inline __pure
 179struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 180{
 181	return (void *)entry + entry->next_offset;
 182}
 183
 
 
 
 
 
 
 184/* Do some firewalling */
 185unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
 186   const struct net_device *in, const struct net_device *out,
 187   struct ebt_table *table)
 188{
 
 189	int i, nentries;
 190	struct ebt_entry *point;
 191	struct ebt_counter *counter_base, *cb_base;
 192	const struct ebt_entry_target *t;
 193	int verdict, sp = 0;
 194	struct ebt_chainstack *cs;
 195	struct ebt_entries *chaininfo;
 196	const char *base;
 197	const struct ebt_table_info *private;
 198	struct xt_action_param acpar;
 199
 200	acpar.family  = NFPROTO_BRIDGE;
 201	acpar.in      = in;
 202	acpar.out     = out;
 203	acpar.hotdrop = false;
 204	acpar.hooknum = hook;
 205
 206	read_lock_bh(&table->lock);
 207	private = table->private;
 208	cb_base = COUNTER_BASE(private->counters, private->nentries,
 209	   smp_processor_id());
 210	if (private->chainstack)
 211		cs = private->chainstack[smp_processor_id()];
 212	else
 213		cs = NULL;
 214	chaininfo = private->hook_entry[hook];
 215	nentries = private->hook_entry[hook]->nentries;
 216	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
 217	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
 218	/* base for chain jumps */
 219	base = private->entries;
 220	i = 0;
 221	while (i < nentries) {
 222		if (ebt_basic_match(point, skb, in, out))
 223			goto letscontinue;
 224
 225		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
 226			goto letscontinue;
 227		if (acpar.hotdrop) {
 228			read_unlock_bh(&table->lock);
 229			return NF_DROP;
 230		}
 231
 232		/* increase counter */
 233		(*(counter_base + i)).pcnt++;
 234		(*(counter_base + i)).bcnt += skb->len;
 235
 236		/* these should only watch: not modify, nor tell us
 237		   what to do with the packet */
 
 238		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
 239
 240		t = (struct ebt_entry_target *)
 241		   (((char *)point) + point->target_offset);
 242		/* standard target */
 243		if (!t->u.target->target)
 244			verdict = ((struct ebt_standard_target *)t)->verdict;
 245		else {
 246			acpar.target   = t->u.target;
 247			acpar.targinfo = t->data;
 248			verdict = t->u.target->target(skb, &acpar);
 249		}
 250		if (verdict == EBT_ACCEPT) {
 251			read_unlock_bh(&table->lock);
 252			return NF_ACCEPT;
 253		}
 254		if (verdict == EBT_DROP) {
 255			read_unlock_bh(&table->lock);
 256			return NF_DROP;
 257		}
 258		if (verdict == EBT_RETURN) {
 259letsreturn:
 260#ifdef CONFIG_NETFILTER_DEBUG
 261			if (sp == 0) {
 262				BUGPRINT("RETURN on base chain");
 263				/* act like this is EBT_CONTINUE */
 264				goto letscontinue;
 265			}
 266#endif
 267			sp--;
 268			/* put all the local variables right */
 269			i = cs[sp].n;
 270			chaininfo = cs[sp].chaininfo;
 271			nentries = chaininfo->nentries;
 272			point = cs[sp].e;
 273			counter_base = cb_base +
 274			   chaininfo->counter_offset;
 275			continue;
 276		}
 277		if (verdict == EBT_CONTINUE)
 278			goto letscontinue;
 279#ifdef CONFIG_NETFILTER_DEBUG
 280		if (verdict < 0) {
 281			BUGPRINT("bogus standard verdict\n");
 282			read_unlock_bh(&table->lock);
 283			return NF_DROP;
 284		}
 285#endif
 286		/* jump to a udc */
 287		cs[sp].n = i + 1;
 288		cs[sp].chaininfo = chaininfo;
 289		cs[sp].e = ebt_next_entry(point);
 290		i = 0;
 291		chaininfo = (struct ebt_entries *) (base + verdict);
 292#ifdef CONFIG_NETFILTER_DEBUG
 293		if (chaininfo->distinguisher) {
 294			BUGPRINT("jump to non-chain\n");
 295			read_unlock_bh(&table->lock);
 296			return NF_DROP;
 297		}
 298#endif
 299		nentries = chaininfo->nentries;
 300		point = (struct ebt_entry *)chaininfo->data;
 301		counter_base = cb_base + chaininfo->counter_offset;
 302		sp++;
 303		continue;
 304letscontinue:
 305		point = ebt_next_entry(point);
 306		i++;
 307	}
 308
 309	/* I actually like this :) */
 310	if (chaininfo->policy == EBT_RETURN)
 311		goto letsreturn;
 312	if (chaininfo->policy == EBT_ACCEPT) {
 313		read_unlock_bh(&table->lock);
 314		return NF_ACCEPT;
 315	}
 316	read_unlock_bh(&table->lock);
 317	return NF_DROP;
 318}
 319
 320/* If it succeeds, returns element and locks mutex */
 321static inline void *
 322find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
 323   struct mutex *mutex)
 324{
 325	struct {
 326		struct list_head list;
 327		char name[EBT_FUNCTION_MAXNAMELEN];
 328	} *e;
 329
 330	*error = mutex_lock_interruptible(mutex);
 331	if (*error != 0)
 332		return NULL;
 333
 334	list_for_each_entry(e, head, list) {
 335		if (strcmp(e->name, name) == 0)
 336			return e;
 337	}
 338	*error = -ENOENT;
 339	mutex_unlock(mutex);
 340	return NULL;
 341}
 342
 343static void *
 344find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
 345   int *error, struct mutex *mutex)
 346{
 347	return try_then_request_module(
 348			find_inlist_lock_noload(head, name, error, mutex),
 349			"%s%s", prefix, name);
 350}
 351
 352static inline struct ebt_table *
 353find_table_lock(struct net *net, const char *name, int *error,
 354		struct mutex *mutex)
 355{
 356	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
 357				"ebtable_", error, mutex);
 358}
 359
 
 
 
 
 
 
 
 
 
 
 360static inline int
 361ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 362		unsigned int *cnt)
 363{
 364	const struct ebt_entry *e = par->entryinfo;
 365	struct xt_match *match;
 366	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 367	int ret;
 368
 369	if (left < sizeof(struct ebt_entry_match) ||
 370	    left - sizeof(struct ebt_entry_match) < m->match_size)
 371		return -EINVAL;
 372
 373	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
 
 
 
 
 
 
 374	if (IS_ERR(match))
 375		return PTR_ERR(match);
 376	m->u.match = match;
 377
 378	par->match     = match;
 379	par->matchinfo = m->data;
 380	ret = xt_check_match(par, m->match_size,
 381	      e->ethproto, e->invflags & EBT_IPROTO);
 382	if (ret < 0) {
 383		module_put(match->me);
 384		return ret;
 385	}
 386
 387	(*cnt)++;
 388	return 0;
 389}
 390
 391static inline int
 392ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
 393		  unsigned int *cnt)
 394{
 395	const struct ebt_entry *e = par->entryinfo;
 396	struct xt_target *watcher;
 397	size_t left = ((char *)e + e->target_offset) - (char *)w;
 398	int ret;
 399
 400	if (left < sizeof(struct ebt_entry_watcher) ||
 401	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 402		return -EINVAL;
 403
 404	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 405	if (IS_ERR(watcher))
 406		return PTR_ERR(watcher);
 
 
 
 
 
 
 407	w->u.watcher = watcher;
 408
 409	par->target   = watcher;
 410	par->targinfo = w->data;
 411	ret = xt_check_target(par, w->watcher_size,
 412	      e->ethproto, e->invflags & EBT_IPROTO);
 413	if (ret < 0) {
 414		module_put(watcher->me);
 415		return ret;
 416	}
 417
 418	(*cnt)++;
 419	return 0;
 420}
 421
 422static int ebt_verify_pointers(const struct ebt_replace *repl,
 423			       struct ebt_table_info *newinfo)
 424{
 425	unsigned int limit = repl->entries_size;
 426	unsigned int valid_hooks = repl->valid_hooks;
 427	unsigned int offset = 0;
 428	int i;
 429
 430	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 431		newinfo->hook_entry[i] = NULL;
 432
 433	newinfo->entries_size = repl->entries_size;
 434	newinfo->nentries = repl->nentries;
 435
 436	while (offset < limit) {
 437		size_t left = limit - offset;
 438		struct ebt_entry *e = (void *)newinfo->entries + offset;
 439
 440		if (left < sizeof(unsigned int))
 441			break;
 442
 443		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 444			if ((valid_hooks & (1 << i)) == 0)
 445				continue;
 446			if ((char __user *)repl->hook_entry[i] ==
 447			     repl->entries + offset)
 448				break;
 449		}
 450
 451		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
 452			if (e->bitmask != 0) {
 453				/* we make userspace set this right,
 454				   so there is no misunderstanding */
 455				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
 456					 "in distinguisher\n");
 457				return -EINVAL;
 458			}
 459			if (i != NF_BR_NUMHOOKS)
 460				newinfo->hook_entry[i] = (struct ebt_entries *)e;
 461			if (left < sizeof(struct ebt_entries))
 462				break;
 463			offset += sizeof(struct ebt_entries);
 464		} else {
 465			if (left < sizeof(struct ebt_entry))
 466				break;
 467			if (left < e->next_offset)
 468				break;
 469			if (e->next_offset < sizeof(struct ebt_entry))
 470				return -EINVAL;
 471			offset += e->next_offset;
 472		}
 473	}
 474	if (offset != limit) {
 475		BUGPRINT("entries_size too small\n");
 476		return -EINVAL;
 477	}
 478
 479	/* check if all valid hooks have a chain */
 480	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 481		if (!newinfo->hook_entry[i] &&
 482		   (valid_hooks & (1 << i))) {
 483			BUGPRINT("Valid hook without chain\n");
 484			return -EINVAL;
 485		}
 486	}
 487	return 0;
 488}
 489
 490/*
 491 * this one is very careful, as it is the first function
 492 * to parse the userspace data
 493 */
 494static inline int
 495ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
 496   const struct ebt_table_info *newinfo,
 497   unsigned int *n, unsigned int *cnt,
 498   unsigned int *totalcnt, unsigned int *udc_cnt)
 499{
 500	int i;
 501
 502	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 503		if ((void *)e == (void *)newinfo->hook_entry[i])
 504			break;
 505	}
 506	/* beginning of a new chain
 507	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
 
 508	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 509		/* this checks if the previous chain has as many entries
 510		   as it said it has */
 511		if (*n != *cnt) {
 512			BUGPRINT("nentries does not equal the nr of entries "
 513				 "in the chain\n");
 514			return -EINVAL;
 515		}
 516		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 517		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 518			/* only RETURN from udc */
 519			if (i != NF_BR_NUMHOOKS ||
 520			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
 521				BUGPRINT("bad policy\n");
 522				return -EINVAL;
 523			}
 524		}
 525		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 526			(*udc_cnt)++;
 527		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
 528			BUGPRINT("counter_offset != totalcnt");
 529			return -EINVAL;
 530		}
 531		*n = ((struct ebt_entries *)e)->nentries;
 532		*cnt = 0;
 533		return 0;
 534	}
 535	/* a plain old entry, heh */
 536	if (sizeof(struct ebt_entry) > e->watchers_offset ||
 537	   e->watchers_offset > e->target_offset ||
 538	   e->target_offset >= e->next_offset) {
 539		BUGPRINT("entry offsets not in right order\n");
 540		return -EINVAL;
 541	}
 542	/* this is not checked anywhere else */
 543	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
 544		BUGPRINT("target size too small\n");
 545		return -EINVAL;
 546	}
 547	(*cnt)++;
 548	(*totalcnt)++;
 549	return 0;
 550}
 551
 552struct ebt_cl_stack
 553{
 554	struct ebt_chainstack cs;
 555	int from;
 556	unsigned int hookmask;
 557};
 558
 559/*
 560 * we need these positions to check that the jumps to a different part of the
 561 * entries is a jump to the beginning of a new chain.
 562 */
 563static inline int
 564ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
 565   unsigned int *n, struct ebt_cl_stack *udc)
 566{
 567	int i;
 568
 569	/* we're only interested in chain starts */
 570	if (e->bitmask)
 571		return 0;
 572	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 573		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 574			break;
 575	}
 576	/* only care about udc */
 577	if (i != NF_BR_NUMHOOKS)
 578		return 0;
 579
 580	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
 581	/* these initialisations are depended on later in check_chainloops() */
 582	udc[*n].cs.n = 0;
 583	udc[*n].hookmask = 0;
 584
 585	(*n)++;
 586	return 0;
 587}
 588
 589static inline int
 590ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
 591{
 592	struct xt_mtdtor_param par;
 593
 594	if (i && (*i)-- == 0)
 595		return 1;
 596
 597	par.net       = net;
 598	par.match     = m->u.match;
 599	par.matchinfo = m->data;
 600	par.family    = NFPROTO_BRIDGE;
 601	if (par.match->destroy != NULL)
 602		par.match->destroy(&par);
 603	module_put(par.match->me);
 604	return 0;
 605}
 606
 607static inline int
 608ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
 609{
 610	struct xt_tgdtor_param par;
 611
 612	if (i && (*i)-- == 0)
 613		return 1;
 614
 615	par.net      = net;
 616	par.target   = w->u.watcher;
 617	par.targinfo = w->data;
 618	par.family   = NFPROTO_BRIDGE;
 619	if (par.target->destroy != NULL)
 620		par.target->destroy(&par);
 621	module_put(par.target->me);
 622	return 0;
 623}
 624
 625static inline int
 626ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
 627{
 628	struct xt_tgdtor_param par;
 629	struct ebt_entry_target *t;
 630
 631	if (e->bitmask == 0)
 632		return 0;
 633	/* we're done */
 634	if (cnt && (*cnt)-- == 0)
 635		return 1;
 636	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
 637	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
 638	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 639
 640	par.net      = net;
 641	par.target   = t->u.target;
 642	par.targinfo = t->data;
 643	par.family   = NFPROTO_BRIDGE;
 644	if (par.target->destroy != NULL)
 645		par.target->destroy(&par);
 646	module_put(par.target->me);
 647	return 0;
 648}
 649
 650static inline int
 651ebt_check_entry(struct ebt_entry *e, struct net *net,
 652   const struct ebt_table_info *newinfo,
 653   const char *name, unsigned int *cnt,
 654   struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 655{
 656	struct ebt_entry_target *t;
 657	struct xt_target *target;
 658	unsigned int i, j, hook = 0, hookmask = 0;
 659	size_t gap;
 660	int ret;
 661	struct xt_mtchk_param mtpar;
 662	struct xt_tgchk_param tgpar;
 663
 664	/* don't mess with the struct ebt_entries */
 665	if (e->bitmask == 0)
 666		return 0;
 667
 668	if (e->bitmask & ~EBT_F_MASK) {
 669		BUGPRINT("Unknown flag for bitmask\n");
 670		return -EINVAL;
 671	}
 672	if (e->invflags & ~EBT_INV_MASK) {
 673		BUGPRINT("Unknown flag for inv bitmask\n");
 674		return -EINVAL;
 675	}
 676	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
 677		BUGPRINT("NOPROTO & 802_3 not allowed\n");
 678		return -EINVAL;
 679	}
 680	/* what hook do we belong to? */
 681	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
 682		if (!newinfo->hook_entry[i])
 683			continue;
 684		if ((char *)newinfo->hook_entry[i] < (char *)e)
 685			hook = i;
 686		else
 687			break;
 688	}
 689	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
 690	   a base chain */
 
 691	if (i < NF_BR_NUMHOOKS)
 692		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 693	else {
 694		for (i = 0; i < udc_cnt; i++)
 695			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
 696				break;
 697		if (i == 0)
 698			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
 699		else
 700			hookmask = cl_s[i - 1].hookmask;
 701	}
 702	i = 0;
 703
 
 
 704	mtpar.net	= tgpar.net       = net;
 705	mtpar.table     = tgpar.table     = name;
 706	mtpar.entryinfo = tgpar.entryinfo = e;
 707	mtpar.hook_mask = tgpar.hook_mask = hookmask;
 708	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
 709	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
 710	if (ret != 0)
 711		goto cleanup_matches;
 712	j = 0;
 713	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
 714	if (ret != 0)
 715		goto cleanup_watchers;
 716	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 717	gap = e->next_offset - e->target_offset;
 718
 719	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
 720	if (IS_ERR(target)) {
 721		ret = PTR_ERR(target);
 722		goto cleanup_watchers;
 723	}
 724
 
 
 
 
 
 
 
 725	t->u.target = target;
 726	if (t->u.target == &ebt_standard_target) {
 727		if (gap < sizeof(struct ebt_standard_target)) {
 728			BUGPRINT("Standard target size too big\n");
 729			ret = -EFAULT;
 730			goto cleanup_watchers;
 731		}
 732		if (((struct ebt_standard_target *)t)->verdict <
 733		   -NUM_STANDARD_TARGETS) {
 734			BUGPRINT("Invalid standard target\n");
 735			ret = -EFAULT;
 736			goto cleanup_watchers;
 737		}
 738	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
 739		module_put(t->u.target->me);
 740		ret = -EFAULT;
 741		goto cleanup_watchers;
 742	}
 743
 744	tgpar.target   = target;
 745	tgpar.targinfo = t->data;
 746	ret = xt_check_target(&tgpar, t->target_size,
 747	      e->ethproto, e->invflags & EBT_IPROTO);
 748	if (ret < 0) {
 749		module_put(target->me);
 750		goto cleanup_watchers;
 751	}
 752	(*cnt)++;
 753	return 0;
 754cleanup_watchers:
 755	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
 756cleanup_matches:
 757	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
 758	return ret;
 759}
 760
 761/*
 762 * checks for loops and sets the hook mask for udc
 763 * the hook mask for udc tells us from which base chains the udc can be
 764 * accessed. This mask is a parameter to the check() functions of the extensions
 765 */
 766static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
 767   unsigned int udc_cnt, unsigned int hooknr, char *base)
 768{
 769	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
 770	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
 771	const struct ebt_entry_target *t;
 772
 773	while (pos < nentries || chain_nr != -1) {
 774		/* end of udc, go back one 'recursion' step */
 775		if (pos == nentries) {
 776			/* put back values of the time when this chain was called */
 777			e = cl_s[chain_nr].cs.e;
 778			if (cl_s[chain_nr].from != -1)
 779				nentries =
 780				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
 781			else
 782				nentries = chain->nentries;
 783			pos = cl_s[chain_nr].cs.n;
 784			/* make sure we won't see a loop that isn't one */
 785			cl_s[chain_nr].cs.n = 0;
 786			chain_nr = cl_s[chain_nr].from;
 787			if (pos == nentries)
 788				continue;
 789		}
 790		t = (struct ebt_entry_target *)
 791		   (((char *)e) + e->target_offset);
 792		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
 793			goto letscontinue;
 794		if (e->target_offset + sizeof(struct ebt_standard_target) >
 795		   e->next_offset) {
 796			BUGPRINT("Standard target size too big\n");
 797			return -1;
 798		}
 799		verdict = ((struct ebt_standard_target *)t)->verdict;
 800		if (verdict >= 0) { /* jump to another chain */
 801			struct ebt_entries *hlp2 =
 802			   (struct ebt_entries *)(base + verdict);
 803			for (i = 0; i < udc_cnt; i++)
 804				if (hlp2 == cl_s[i].cs.chaininfo)
 805					break;
 806			/* bad destination or loop */
 807			if (i == udc_cnt) {
 808				BUGPRINT("bad destination\n");
 809				return -1;
 810			}
 811			if (cl_s[i].cs.n) {
 812				BUGPRINT("loop\n");
 813				return -1;
 814			}
 815			if (cl_s[i].hookmask & (1 << hooknr))
 816				goto letscontinue;
 817			/* this can't be 0, so the loop test is correct */
 818			cl_s[i].cs.n = pos + 1;
 819			pos = 0;
 820			cl_s[i].cs.e = ebt_next_entry(e);
 821			e = (struct ebt_entry *)(hlp2->data);
 822			nentries = hlp2->nentries;
 823			cl_s[i].from = chain_nr;
 824			chain_nr = i;
 825			/* this udc is accessible from the base chain for hooknr */
 826			cl_s[i].hookmask |= (1 << hooknr);
 827			continue;
 828		}
 829letscontinue:
 830		e = ebt_next_entry(e);
 831		pos++;
 832	}
 833	return 0;
 834}
 835
 836/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
 837static int translate_table(struct net *net, const char *name,
 838			   struct ebt_table_info *newinfo)
 839{
 840	unsigned int i, j, k, udc_cnt;
 841	int ret;
 842	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 843
 844	i = 0;
 845	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 846		i++;
 847	if (i == NF_BR_NUMHOOKS) {
 848		BUGPRINT("No valid hooks specified\n");
 849		return -EINVAL;
 850	}
 851	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
 852		BUGPRINT("Chains don't start at beginning\n");
 853		return -EINVAL;
 854	}
 855	/* make sure chains are ordered after each other in same order
 856	   as their corresponding hooks */
 
 857	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
 858		if (!newinfo->hook_entry[j])
 859			continue;
 860		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
 861			BUGPRINT("Hook order must be followed\n");
 862			return -EINVAL;
 863		}
 864		i = j;
 865	}
 866
 867	/* do some early checkings and initialize some things */
 868	i = 0; /* holds the expected nr. of entries for the chain */
 869	j = 0; /* holds the up to now counted entries for the chain */
 870	k = 0; /* holds the total nr. of entries, should equal
 871		  newinfo->nentries afterwards */
 
 872	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 873	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 874	   ebt_check_entry_size_and_hooks, newinfo,
 875	   &i, &j, &k, &udc_cnt);
 876
 877	if (ret != 0)
 878		return ret;
 879
 880	if (i != j) {
 881		BUGPRINT("nentries does not equal the nr of entries in the "
 882			 "(last) chain\n");
 883		return -EINVAL;
 884	}
 885	if (k != newinfo->nentries) {
 886		BUGPRINT("Total nentries is wrong\n");
 887		return -EINVAL;
 888	}
 889
 890	/* get the location of the udc, put them in an array
 891	   while we're at it, allocate the chainstack */
 
 892	if (udc_cnt) {
 893		/* this will get free'd in do_replace()/ebt_register_table()
 894		   if an error occurs */
 
 895		newinfo->chainstack =
 896			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
 
 897		if (!newinfo->chainstack)
 898			return -ENOMEM;
 899		for_each_possible_cpu(i) {
 900			newinfo->chainstack[i] =
 901			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
 902			if (!newinfo->chainstack[i]) {
 903				while (i)
 904					vfree(newinfo->chainstack[--i]);
 905				vfree(newinfo->chainstack);
 906				newinfo->chainstack = NULL;
 907				return -ENOMEM;
 908			}
 909		}
 910
 911		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
 912		if (!cl_s)
 913			return -ENOMEM;
 914		i = 0; /* the i'th udc */
 915		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 916		   ebt_get_udc_positions, newinfo, &i, cl_s);
 917		/* sanity check */
 918		if (i != udc_cnt) {
 919			BUGPRINT("i != udc_cnt\n");
 920			vfree(cl_s);
 921			return -EFAULT;
 922		}
 923	}
 924
 925	/* Check for loops */
 926	for (i = 0; i < NF_BR_NUMHOOKS; i++)
 927		if (newinfo->hook_entry[i])
 928			if (check_chainloops(newinfo->hook_entry[i],
 929			   cl_s, udc_cnt, i, newinfo->entries)) {
 930				vfree(cl_s);
 931				return -EINVAL;
 932			}
 933
 934	/* we now know the following (along with E=mc²):
 935	   - the nr of entries in each chain is right
 936	   - the size of the allocated space is right
 937	   - all valid hooks have a corresponding chain
 938	   - there are no loops
 939	   - wrong data can still be on the level of a single entry
 940	   - could be there are jumps to places that are not the
 941	     beginning of a chain. This can only occur in chains that
 942	     are not accessible from any base chains, so we don't care. */
 
 943
 944	/* used to know what we need to clean up if something goes wrong */
 945	i = 0;
 946	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 947	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
 948	if (ret != 0) {
 949		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 950				  ebt_cleanup_entry, net, &i);
 951	}
 952	vfree(cl_s);
 953	return ret;
 954}
 955
 956/* called under write_lock */
 957static void get_counters(const struct ebt_counter *oldcounters,
 958   struct ebt_counter *counters, unsigned int nentries)
 959{
 960	int i, cpu;
 961	struct ebt_counter *counter_base;
 962
 963	/* counters of cpu 0 */
 964	memcpy(counters, oldcounters,
 965	       sizeof(struct ebt_counter) * nentries);
 966
 967	/* add other counters to those of cpu 0 */
 968	for_each_possible_cpu(cpu) {
 969		if (cpu == 0)
 970			continue;
 971		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 972		for (i = 0; i < nentries; i++) {
 973			counters[i].pcnt += counter_base[i].pcnt;
 974			counters[i].bcnt += counter_base[i].bcnt;
 975		}
 976	}
 977}
 978
 979static int do_replace_finish(struct net *net, struct ebt_replace *repl,
 980			      struct ebt_table_info *newinfo)
 981{
 982	int ret, i;
 983	struct ebt_counter *counterstmp = NULL;
 984	/* used to be able to unlock earlier */
 985	struct ebt_table_info *table;
 986	struct ebt_table *t;
 987
 988	/* the user wants counters back
 989	   the check on the size is done later, when we have the lock */
 
 990	if (repl->num_counters) {
 991		unsigned long size = repl->num_counters * sizeof(*counterstmp);
 992		counterstmp = vmalloc(size);
 993		if (!counterstmp)
 994			return -ENOMEM;
 995	}
 996
 997	newinfo->chainstack = NULL;
 998	ret = ebt_verify_pointers(repl, newinfo);
 999	if (ret != 0)
1000		goto free_counterstmp;
1001
1002	ret = translate_table(net, repl->name, newinfo);
1003
1004	if (ret != 0)
1005		goto free_counterstmp;
1006
1007	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008	if (!t) {
1009		ret = -ENOENT;
1010		goto free_iterate;
1011	}
1012
1013	/* the table doesn't like it */
1014	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015		goto free_unlock;
1016
1017	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018		BUGPRINT("Wrong nr. of counters requested\n");
1019		ret = -EINVAL;
1020		goto free_unlock;
1021	}
1022
1023	/* we have the mutex lock, so no danger in reading this pointer */
1024	table = t->private;
1025	/* make sure the table can only be rmmod'ed if it contains no rules */
1026	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027		ret = -ENOENT;
1028		goto free_unlock;
1029	} else if (table->nentries && !newinfo->nentries)
1030		module_put(t->me);
1031	/* we need an atomic snapshot of the counters */
1032	write_lock_bh(&t->lock);
1033	if (repl->num_counters)
1034		get_counters(t->private->counters, counterstmp,
1035		   t->private->nentries);
1036
1037	t->private = newinfo;
1038	write_unlock_bh(&t->lock);
1039	mutex_unlock(&ebt_mutex);
1040	/* so, a user can change the chains while having messed up her counter
1041	   allocation. Only reason why this is done is because this way the lock
1042	   is held only once, while this doesn't bring the kernel into a
1043	   dangerous state. */
 
1044	if (repl->num_counters &&
1045	   copy_to_user(repl->counters, counterstmp,
1046	   repl->num_counters * sizeof(struct ebt_counter))) {
1047		/* Silent error, can't fail, new table is already in place */
1048		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1049	}
1050
1051	/* decrease module count and free resources */
1052	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1053			  ebt_cleanup_entry, net, NULL);
1054
1055	vfree(table->entries);
1056	if (table->chainstack) {
1057		for_each_possible_cpu(i)
1058			vfree(table->chainstack[i]);
1059		vfree(table->chainstack);
1060	}
1061	vfree(table);
 
1062
1063	vfree(counterstmp);
 
1064	return ret;
1065
1066free_unlock:
1067	mutex_unlock(&ebt_mutex);
1068free_iterate:
1069	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1070			  ebt_cleanup_entry, net, NULL);
1071free_counterstmp:
1072	vfree(counterstmp);
1073	/* can be initialized in translate_table() */
1074	if (newinfo->chainstack) {
1075		for_each_possible_cpu(i)
1076			vfree(newinfo->chainstack[i]);
1077		vfree(newinfo->chainstack);
1078	}
1079	return ret;
1080}
1081
1082/* replace the table */
1083static int do_replace(struct net *net, const void __user *user,
1084		      unsigned int len)
1085{
1086	int ret, countersize;
1087	struct ebt_table_info *newinfo;
1088	struct ebt_replace tmp;
1089
1090	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1091		return -EFAULT;
1092
1093	if (len != sizeof(tmp) + tmp.entries_size) {
1094		BUGPRINT("Wrong len argument\n");
1095		return -EINVAL;
1096	}
1097
1098	if (tmp.entries_size == 0) {
1099		BUGPRINT("Entries_size never zero\n");
1100		return -EINVAL;
1101	}
1102	/* overflow check */
1103	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1104			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1105		return -ENOMEM;
1106	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1107		return -ENOMEM;
1108
1109	tmp.name[sizeof(tmp.name) - 1] = 0;
1110
1111	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1112	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1113	if (!newinfo)
1114		return -ENOMEM;
1115
1116	if (countersize)
1117		memset(newinfo->counters, 0, countersize);
1118
1119	newinfo->entries = vmalloc(tmp.entries_size);
1120	if (!newinfo->entries) {
1121		ret = -ENOMEM;
1122		goto free_newinfo;
1123	}
1124	if (copy_from_user(
1125	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1126		BUGPRINT("Couldn't copy entries from userspace\n");
1127		ret = -EFAULT;
1128		goto free_entries;
1129	}
1130
1131	ret = do_replace_finish(net, &tmp, newinfo);
1132	if (ret == 0)
1133		return ret;
1134free_entries:
1135	vfree(newinfo->entries);
1136free_newinfo:
1137	vfree(newinfo);
1138	return ret;
1139}
1140
1141struct ebt_table *
1142ebt_register_table(struct net *net, const struct ebt_table *input_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143{
1144	struct ebt_table_info *newinfo;
1145	struct ebt_table *t, *table;
1146	struct ebt_replace_kernel *repl;
1147	int ret, i, countersize;
1148	void *p;
1149
1150	if (input_table == NULL || (repl = input_table->table) == NULL ||
1151	    repl->entries == NULL || repl->entries_size == 0 ||
1152	    repl->counters != NULL || input_table->private != NULL) {
1153		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1154		return ERR_PTR(-EINVAL);
1155	}
1156
1157	/* Don't add one table to multiple lists. */
1158	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1159	if (!table) {
1160		ret = -ENOMEM;
1161		goto out;
1162	}
1163
1164	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1165	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1166	ret = -ENOMEM;
1167	if (!newinfo)
1168		goto free_table;
1169
1170	p = vmalloc(repl->entries_size);
1171	if (!p)
1172		goto free_newinfo;
1173
1174	memcpy(p, repl->entries, repl->entries_size);
1175	newinfo->entries = p;
1176
1177	newinfo->entries_size = repl->entries_size;
1178	newinfo->nentries = repl->nentries;
1179
1180	if (countersize)
1181		memset(newinfo->counters, 0, countersize);
1182
1183	/* fill in newinfo and parse the entries */
1184	newinfo->chainstack = NULL;
1185	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1186		if ((repl->valid_hooks & (1 << i)) == 0)
1187			newinfo->hook_entry[i] = NULL;
1188		else
1189			newinfo->hook_entry[i] = p +
1190				((char *)repl->hook_entry[i] - repl->entries);
1191	}
1192	ret = translate_table(net, repl->name, newinfo);
1193	if (ret != 0) {
1194		BUGPRINT("Translate_table failed\n");
1195		goto free_chainstack;
1196	}
1197
1198	if (table->check && table->check(newinfo, table->valid_hooks)) {
1199		BUGPRINT("The table doesn't like its own initial data, lol\n");
1200		ret = -EINVAL;
1201		goto free_chainstack;
1202	}
1203
1204	table->private = newinfo;
1205	rwlock_init(&table->lock);
1206	ret = mutex_lock_interruptible(&ebt_mutex);
1207	if (ret != 0)
1208		goto free_chainstack;
1209
1210	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1211		if (strcmp(t->name, table->name) == 0) {
1212			ret = -EEXIST;
1213			BUGPRINT("Table name already exists\n");
1214			goto free_unlock;
1215		}
1216	}
1217
1218	/* Hold a reference count if the chains aren't empty */
1219	if (newinfo->nentries && !try_module_get(table->me)) {
1220		ret = -ENOENT;
1221		goto free_unlock;
1222	}
1223	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1224	mutex_unlock(&ebt_mutex);
1225	return table;
 
 
 
 
 
 
 
 
 
 
1226free_unlock:
1227	mutex_unlock(&ebt_mutex);
1228free_chainstack:
1229	if (newinfo->chainstack) {
1230		for_each_possible_cpu(i)
1231			vfree(newinfo->chainstack[i]);
1232		vfree(newinfo->chainstack);
1233	}
1234	vfree(newinfo->entries);
1235free_newinfo:
1236	vfree(newinfo);
1237free_table:
1238	kfree(table);
1239out:
1240	return ERR_PTR(ret);
1241}
1242
1243void ebt_unregister_table(struct net *net, struct ebt_table *table)
 
1244{
1245	int i;
1246
1247	if (!table) {
1248		BUGPRINT("Request to unregister NULL table!!!\n");
1249		return;
1250	}
1251	mutex_lock(&ebt_mutex);
1252	list_del(&table->list);
1253	mutex_unlock(&ebt_mutex);
1254	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1255			  ebt_cleanup_entry, net, NULL);
1256	if (table->private->nentries)
1257		module_put(table->me);
1258	vfree(table->private->entries);
1259	if (table->private->chainstack) {
1260		for_each_possible_cpu(i)
1261			vfree(table->private->chainstack[i]);
1262		vfree(table->private->chainstack);
1263	}
1264	vfree(table->private);
1265	kfree(table);
1266}
1267
1268/* userspace just supplied us with counters */
1269static int do_update_counters(struct net *net, const char *name,
1270				struct ebt_counter __user *counters,
1271				unsigned int num_counters,
1272				const void __user *user, unsigned int len)
1273{
1274	int i, ret;
1275	struct ebt_counter *tmp;
1276	struct ebt_table *t;
1277
1278	if (num_counters == 0)
1279		return -EINVAL;
1280
1281	tmp = vmalloc(num_counters * sizeof(*tmp));
1282	if (!tmp)
1283		return -ENOMEM;
1284
1285	t = find_table_lock(net, name, &ret, &ebt_mutex);
1286	if (!t)
1287		goto free_tmp;
1288
1289	if (num_counters != t->private->nentries) {
1290		BUGPRINT("Wrong nr of counters\n");
1291		ret = -EINVAL;
1292		goto unlock_mutex;
1293	}
1294
1295	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1296		ret = -EFAULT;
1297		goto unlock_mutex;
1298	}
1299
1300	/* we want an atomic add of the counters */
1301	write_lock_bh(&t->lock);
1302
1303	/* we add to the counters of the first cpu */
1304	for (i = 0; i < num_counters; i++) {
1305		t->private->counters[i].pcnt += tmp[i].pcnt;
1306		t->private->counters[i].bcnt += tmp[i].bcnt;
1307	}
1308
1309	write_unlock_bh(&t->lock);
1310	ret = 0;
1311unlock_mutex:
1312	mutex_unlock(&ebt_mutex);
1313free_tmp:
1314	vfree(tmp);
1315	return ret;
1316}
1317
1318static int update_counters(struct net *net, const void __user *user,
1319			    unsigned int len)
1320{
1321	struct ebt_replace hlp;
1322
1323	if (copy_from_user(&hlp, user, sizeof(hlp)))
1324		return -EFAULT;
1325
1326	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1327		return -EINVAL;
1328
1329	return do_update_counters(net, hlp.name, hlp.counters,
1330				hlp.num_counters, user, len);
1331}
1332
1333static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1334    const char *base, char __user *ubase)
 
1335{
1336	char __user *hlp = ubase + ((char *)m - base);
1337	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1338
1339	/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1340	   long. Copy 29 bytes and fill remaining bytes with zeroes. */
1341	strlcpy(name, m->u.match->name, sizeof(name));
1342	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
 
 
 
 
 
1343		return -EFAULT;
 
1344	return 0;
1345}
1346
1347static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1348    const char *base, char __user *ubase)
1349{
1350	char __user *hlp = ubase + ((char *)w - base);
1351	char name[EBT_FUNCTION_MAXNAMELEN] = {};
 
 
 
1352
1353	strlcpy(name, w->u.watcher->name, sizeof(name));
1354	if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1355		return -EFAULT;
1356	return 0;
 
 
 
1357}
1358
1359static inline int
1360ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1361{
1362	int ret;
1363	char __user *hlp;
1364	const struct ebt_entry_target *t;
1365	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1366
1367	if (e->bitmask == 0)
 
 
 
 
1368		return 0;
 
 
 
 
1369
1370	hlp = ubase + (((char *)e + e->target_offset) - base);
1371	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1372
1373	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
 
 
 
1374	if (ret != 0)
1375		return ret;
1376	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
 
 
1377	if (ret != 0)
1378		return ret;
1379	strlcpy(name, t->u.target->name, sizeof(name));
1380	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1381		return -EFAULT;
1382	return 0;
1383}
1384
1385static int copy_counters_to_user(struct ebt_table *t,
1386				  const struct ebt_counter *oldcounters,
1387				  void __user *user, unsigned int num_counters,
1388				  unsigned int nentries)
1389{
1390	struct ebt_counter *counterstmp;
1391	int ret = 0;
1392
1393	/* userspace might not need the counters */
1394	if (num_counters == 0)
1395		return 0;
1396
1397	if (num_counters != nentries) {
1398		BUGPRINT("Num_counters wrong\n");
1399		return -EINVAL;
1400	}
1401
1402	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1403	if (!counterstmp)
1404		return -ENOMEM;
1405
1406	write_lock_bh(&t->lock);
1407	get_counters(oldcounters, counterstmp, nentries);
1408	write_unlock_bh(&t->lock);
1409
1410	if (copy_to_user(user, counterstmp,
1411	   nentries * sizeof(struct ebt_counter)))
1412		ret = -EFAULT;
1413	vfree(counterstmp);
1414	return ret;
1415}
1416
1417/* called with ebt_mutex locked */
1418static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1419    const int *len, int cmd)
1420{
1421	struct ebt_replace tmp;
1422	const struct ebt_counter *oldcounters;
1423	unsigned int entries_size, nentries;
1424	int ret;
1425	char *entries;
1426
1427	if (cmd == EBT_SO_GET_ENTRIES) {
1428		entries_size = t->private->entries_size;
1429		nentries = t->private->nentries;
1430		entries = t->private->entries;
1431		oldcounters = t->private->counters;
1432	} else {
1433		entries_size = t->table->entries_size;
1434		nentries = t->table->nentries;
1435		entries = t->table->entries;
1436		oldcounters = t->table->counters;
1437	}
1438
1439	if (copy_from_user(&tmp, user, sizeof(tmp)))
1440		return -EFAULT;
1441
1442	if (*len != sizeof(struct ebt_replace) + entries_size +
1443	   (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1444		return -EINVAL;
1445
1446	if (tmp.nentries != nentries) {
1447		BUGPRINT("Nentries wrong\n");
1448		return -EINVAL;
1449	}
1450
1451	if (tmp.entries_size != entries_size) {
1452		BUGPRINT("Wrong size\n");
1453		return -EINVAL;
1454	}
1455
1456	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1457					tmp.num_counters, nentries);
1458	if (ret)
1459		return ret;
1460
1461	if (copy_to_user(tmp.entries, entries, entries_size)) {
1462		BUGPRINT("Couldn't copy entries to userspace\n");
1463		return -EFAULT;
1464	}
1465	/* set the match/watcher/target names right */
1466	return EBT_ENTRY_ITERATE(entries, entries_size,
1467	   ebt_make_names, entries, tmp.entries);
1468}
1469
1470static int do_ebt_set_ctl(struct sock *sk,
1471	int cmd, void __user *user, unsigned int len)
1472{
1473	int ret;
1474	struct net *net = sock_net(sk);
1475
1476	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1477		return -EPERM;
1478
1479	switch (cmd) {
1480	case EBT_SO_SET_ENTRIES:
1481		ret = do_replace(net, user, len);
1482		break;
1483	case EBT_SO_SET_COUNTERS:
1484		ret = update_counters(net, user, len);
1485		break;
1486	default:
1487		ret = -EINVAL;
1488	}
1489	return ret;
1490}
1491
1492static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1493{
1494	int ret;
1495	struct ebt_replace tmp;
1496	struct ebt_table *t;
1497	struct net *net = sock_net(sk);
1498
1499	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1500		return -EPERM;
1501
1502	if (copy_from_user(&tmp, user, sizeof(tmp)))
1503		return -EFAULT;
1504
1505	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1506	if (!t)
1507		return ret;
1508
1509	switch (cmd) {
1510	case EBT_SO_GET_INFO:
1511	case EBT_SO_GET_INIT_INFO:
1512		if (*len != sizeof(struct ebt_replace)) {
1513			ret = -EINVAL;
1514			mutex_unlock(&ebt_mutex);
1515			break;
1516		}
1517		if (cmd == EBT_SO_GET_INFO) {
1518			tmp.nentries = t->private->nentries;
1519			tmp.entries_size = t->private->entries_size;
1520			tmp.valid_hooks = t->valid_hooks;
1521		} else {
1522			tmp.nentries = t->table->nentries;
1523			tmp.entries_size = t->table->entries_size;
1524			tmp.valid_hooks = t->table->valid_hooks;
1525		}
1526		mutex_unlock(&ebt_mutex);
1527		if (copy_to_user(user, &tmp, *len) != 0) {
1528			BUGPRINT("c2u Didn't work\n");
1529			ret = -EFAULT;
1530			break;
1531		}
1532		ret = 0;
1533		break;
1534
1535	case EBT_SO_GET_ENTRIES:
1536	case EBT_SO_GET_INIT_ENTRIES:
1537		ret = copy_everything_to_user(t, user, len, cmd);
1538		mutex_unlock(&ebt_mutex);
1539		break;
1540
1541	default:
1542		mutex_unlock(&ebt_mutex);
1543		ret = -EINVAL;
1544	}
1545
1546	return ret;
1547}
1548
1549#ifdef CONFIG_COMPAT
1550/* 32 bit-userspace compatibility definitions. */
1551struct compat_ebt_replace {
1552	char name[EBT_TABLE_MAXNAMELEN];
1553	compat_uint_t valid_hooks;
1554	compat_uint_t nentries;
1555	compat_uint_t entries_size;
1556	/* start of the chains */
1557	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1558	/* nr of counters userspace expects back */
1559	compat_uint_t num_counters;
1560	/* where the kernel will put the old counters. */
1561	compat_uptr_t counters;
1562	compat_uptr_t entries;
1563};
1564
1565/* struct ebt_entry_match, _target and _watcher have same layout */
1566struct compat_ebt_entry_mwt {
1567	union {
1568		char name[EBT_FUNCTION_MAXNAMELEN];
 
 
 
1569		compat_uptr_t ptr;
1570	} u;
1571	compat_uint_t match_size;
1572	compat_uint_t data[0];
1573};
1574
1575/* account for possible padding between match_size and ->data */
1576static int ebt_compat_entry_padsize(void)
1577{
1578	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1579			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1580	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1581			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1582}
1583
1584static int ebt_compat_match_offset(const struct xt_match *match,
1585				   unsigned int userlen)
1586{
1587	/*
1588	 * ebt_among needs special handling. The kernel .matchsize is
1589	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1590	 * value is expected.
1591	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1592	 */
1593	if (unlikely(match->matchsize == -1))
1594		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1595	return xt_compat_match_offset(match);
1596}
1597
1598static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1599				unsigned int *size)
1600{
1601	const struct xt_match *match = m->u.match;
1602	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1603	int off = ebt_compat_match_offset(match, m->match_size);
1604	compat_uint_t msize = m->match_size - off;
1605
1606	BUG_ON(off >= m->match_size);
 
1607
1608	if (copy_to_user(cm->u.name, match->name,
1609	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
 
1610		return -EFAULT;
1611
1612	if (match->compat_to_user) {
1613		if (match->compat_to_user(cm->data, m->data))
1614			return -EFAULT;
1615	} else if (copy_to_user(cm->data, m->data, msize))
 
 
1616			return -EFAULT;
 
1617
1618	*size -= ebt_compat_entry_padsize() + off;
1619	*dstptr = cm->data;
1620	*dstptr += msize;
1621	return 0;
1622}
1623
1624static int compat_target_to_user(struct ebt_entry_target *t,
1625				 void __user **dstptr,
1626				 unsigned int *size)
1627{
1628	const struct xt_target *target = t->u.target;
1629	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1630	int off = xt_compat_target_offset(target);
1631	compat_uint_t tsize = t->target_size - off;
1632
1633	BUG_ON(off >= t->target_size);
 
1634
1635	if (copy_to_user(cm->u.name, target->name,
1636	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
 
1637		return -EFAULT;
1638
1639	if (target->compat_to_user) {
1640		if (target->compat_to_user(cm->data, t->data))
1641			return -EFAULT;
1642	} else if (copy_to_user(cm->data, t->data, tsize))
1643		return -EFAULT;
 
 
 
1644
1645	*size -= ebt_compat_entry_padsize() + off;
1646	*dstptr = cm->data;
1647	*dstptr += tsize;
1648	return 0;
1649}
1650
1651static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1652				  void __user **dstptr,
1653				  unsigned int *size)
1654{
1655	return compat_target_to_user((struct ebt_entry_target *)w,
1656							dstptr, size);
1657}
1658
1659static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1660				unsigned int *size)
1661{
1662	struct ebt_entry_target *t;
1663	struct ebt_entry __user *ce;
1664	u32 watchers_offset, target_offset, next_offset;
1665	compat_uint_t origsize;
1666	int ret;
1667
1668	if (e->bitmask == 0) {
1669		if (*size < sizeof(struct ebt_entries))
1670			return -EINVAL;
1671		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1672			return -EFAULT;
1673
1674		*dstptr += sizeof(struct ebt_entries);
1675		*size -= sizeof(struct ebt_entries);
1676		return 0;
1677	}
1678
1679	if (*size < sizeof(*ce))
1680		return -EINVAL;
1681
1682	ce = (struct ebt_entry __user *)*dstptr;
1683	if (copy_to_user(ce, e, sizeof(*ce)))
1684		return -EFAULT;
1685
1686	origsize = *size;
1687	*dstptr += sizeof(*ce);
1688
1689	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1690	if (ret)
1691		return ret;
1692	watchers_offset = e->watchers_offset - (origsize - *size);
1693
1694	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1695	if (ret)
1696		return ret;
1697	target_offset = e->target_offset - (origsize - *size);
1698
1699	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1700
1701	ret = compat_target_to_user(t, dstptr, size);
1702	if (ret)
1703		return ret;
1704	next_offset = e->next_offset - (origsize - *size);
1705
1706	if (put_user(watchers_offset, &ce->watchers_offset) ||
1707	    put_user(target_offset, &ce->target_offset) ||
1708	    put_user(next_offset, &ce->next_offset))
1709		return -EFAULT;
1710
1711	*size -= sizeof(*ce);
1712	return 0;
1713}
1714
1715static int compat_calc_match(struct ebt_entry_match *m, int *off)
1716{
1717	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1718	*off += ebt_compat_entry_padsize();
1719	return 0;
1720}
1721
1722static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1723{
1724	*off += xt_compat_target_offset(w->u.watcher);
1725	*off += ebt_compat_entry_padsize();
1726	return 0;
1727}
1728
1729static int compat_calc_entry(const struct ebt_entry *e,
1730			     const struct ebt_table_info *info,
1731			     const void *base,
1732			     struct compat_ebt_replace *newinfo)
1733{
1734	const struct ebt_entry_target *t;
1735	unsigned int entry_offset;
1736	int off, ret, i;
1737
1738	if (e->bitmask == 0)
1739		return 0;
1740
1741	off = 0;
1742	entry_offset = (void *)e - base;
1743
1744	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1745	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1746
1747	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1748
1749	off += xt_compat_target_offset(t->u.target);
1750	off += ebt_compat_entry_padsize();
1751
1752	newinfo->entries_size -= off;
1753
1754	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1755	if (ret)
1756		return ret;
1757
1758	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1759		const void *hookptr = info->hook_entry[i];
1760		if (info->hook_entry[i] &&
1761		    (e < (struct ebt_entry *)(base - hookptr))) {
1762			newinfo->hook_entry[i] -= off;
1763			pr_debug("0x%08X -> 0x%08X\n",
1764					newinfo->hook_entry[i] + off,
1765					newinfo->hook_entry[i]);
1766		}
1767	}
1768
1769	return 0;
1770}
1771
 
 
 
 
 
 
 
 
 
 
1772
1773static int compat_table_info(const struct ebt_table_info *info,
1774			     struct compat_ebt_replace *newinfo)
1775{
1776	unsigned int size = info->entries_size;
1777	const void *entries = info->entries;
 
1778
1779	newinfo->entries_size = size;
 
 
 
1780
1781	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1782	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1783							entries, newinfo);
1784}
1785
1786static int compat_copy_everything_to_user(struct ebt_table *t,
1787					  void __user *user, int *len, int cmd)
1788{
1789	struct compat_ebt_replace repl, tmp;
1790	struct ebt_counter *oldcounters;
1791	struct ebt_table_info tinfo;
1792	int ret;
1793	void __user *pos;
1794
1795	memset(&tinfo, 0, sizeof(tinfo));
1796
1797	if (cmd == EBT_SO_GET_ENTRIES) {
1798		tinfo.entries_size = t->private->entries_size;
1799		tinfo.nentries = t->private->nentries;
1800		tinfo.entries = t->private->entries;
1801		oldcounters = t->private->counters;
1802	} else {
1803		tinfo.entries_size = t->table->entries_size;
1804		tinfo.nentries = t->table->nentries;
1805		tinfo.entries = t->table->entries;
1806		oldcounters = t->table->counters;
1807	}
1808
1809	if (copy_from_user(&tmp, user, sizeof(tmp)))
1810		return -EFAULT;
1811
1812	if (tmp.nentries != tinfo.nentries ||
1813	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1814		return -EINVAL;
1815
1816	memcpy(&repl, &tmp, sizeof(repl));
1817	if (cmd == EBT_SO_GET_ENTRIES)
1818		ret = compat_table_info(t->private, &repl);
1819	else
1820		ret = compat_table_info(&tinfo, &repl);
1821	if (ret)
1822		return ret;
1823
1824	if (*len != sizeof(tmp) + repl.entries_size +
1825	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1826		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1827				*len, tinfo.entries_size, repl.entries_size);
1828		return -EINVAL;
1829	}
1830
1831	/* userspace might not need the counters */
1832	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1833					tmp.num_counters, tinfo.nentries);
1834	if (ret)
1835		return ret;
1836
1837	pos = compat_ptr(tmp.entries);
1838	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1839			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1840}
1841
1842struct ebt_entries_buf_state {
1843	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1844	u32 buf_kern_len;	/* total size of kernel buffer */
1845	u32 buf_kern_offset;	/* amount of data copied so far */
1846	u32 buf_user_offset;	/* read position in userspace buffer */
1847};
1848
1849static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1850{
1851	state->buf_kern_offset += sz;
1852	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1853}
1854
1855static int ebt_buf_add(struct ebt_entries_buf_state *state,
1856		       void *data, unsigned int sz)
1857{
1858	if (state->buf_kern_start == NULL)
1859		goto count_only;
1860
1861	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
 
1862
1863	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1864
1865 count_only:
1866	state->buf_user_offset += sz;
1867	return ebt_buf_count(state, sz);
1868}
1869
1870static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1871{
1872	char *b = state->buf_kern_start;
1873
1874	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
 
1875
1876	if (b != NULL && sz > 0)
1877		memset(b + state->buf_kern_offset, 0, sz);
1878	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1879	return ebt_buf_count(state, sz);
1880}
1881
1882enum compat_mwt {
1883	EBT_COMPAT_MATCH,
1884	EBT_COMPAT_WATCHER,
1885	EBT_COMPAT_TARGET,
1886};
1887
1888static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1889				enum compat_mwt compat_mwt,
1890				struct ebt_entries_buf_state *state,
1891				const unsigned char *base)
1892{
1893	char name[EBT_FUNCTION_MAXNAMELEN];
1894	struct xt_match *match;
1895	struct xt_target *wt;
1896	void *dst = NULL;
1897	int off, pad = 0;
1898	unsigned int size_kern, match_size = mwt->match_size;
1899
1900	strlcpy(name, mwt->u.name, sizeof(name));
 
1901
1902	if (state->buf_kern_start)
1903		dst = state->buf_kern_start + state->buf_kern_offset;
1904
1905	switch (compat_mwt) {
1906	case EBT_COMPAT_MATCH:
1907		match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
 
1908		if (IS_ERR(match))
1909			return PTR_ERR(match);
1910
1911		off = ebt_compat_match_offset(match, match_size);
1912		if (dst) {
1913			if (match->compat_from_user)
1914				match->compat_from_user(dst, mwt->data);
1915			else
1916				memcpy(dst, mwt->data, match_size);
1917		}
1918
1919		size_kern = match->matchsize;
1920		if (unlikely(size_kern == -1))
1921			size_kern = match_size;
1922		module_put(match->me);
1923		break;
1924	case EBT_COMPAT_WATCHER: /* fallthrough */
1925	case EBT_COMPAT_TARGET:
1926		wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
 
1927		if (IS_ERR(wt))
1928			return PTR_ERR(wt);
1929		off = xt_compat_target_offset(wt);
1930
1931		if (dst) {
1932			if (wt->compat_from_user)
1933				wt->compat_from_user(dst, mwt->data);
1934			else
1935				memcpy(dst, mwt->data, match_size);
1936		}
1937
1938		size_kern = wt->targetsize;
1939		module_put(wt->me);
1940		break;
1941
1942	default:
1943		return -EINVAL;
1944	}
1945
1946	state->buf_kern_offset += match_size + off;
1947	state->buf_user_offset += match_size;
1948	pad = XT_ALIGN(size_kern) - size_kern;
1949
1950	if (pad > 0 && dst) {
1951		BUG_ON(state->buf_kern_len <= pad);
1952		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
 
 
1953		memset(dst + size_kern, 0, pad);
1954	}
1955	return off + match_size;
1956}
1957
1958/*
1959 * return size of all matches, watchers or target, including necessary
1960 * alignment and padding.
1961 */
1962static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1963			unsigned int size_left, enum compat_mwt type,
1964			struct ebt_entries_buf_state *state, const void *base)
1965{
 
1966	int growth = 0;
1967	char *buf;
1968
1969	if (size_left == 0)
1970		return 0;
1971
1972	buf = (char *) match32;
1973
1974	while (size_left >= sizeof(*match32)) {
1975		struct ebt_entry_match *match_kern;
1976		int ret;
1977
 
 
 
1978		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1979		if (match_kern) {
1980			char *tmp;
1981			tmp = state->buf_kern_start + state->buf_kern_offset;
1982			match_kern = (struct ebt_entry_match *) tmp;
1983		}
1984		ret = ebt_buf_add(state, buf, sizeof(*match32));
1985		if (ret < 0)
1986			return ret;
1987		size_left -= sizeof(*match32);
1988
1989		/* add padding before match->data (if any) */
1990		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1991		if (ret < 0)
1992			return ret;
1993
1994		if (match32->match_size > size_left)
1995			return -EINVAL;
1996
1997		size_left -= match32->match_size;
1998
1999		ret = compat_mtw_from_user(match32, type, state, base);
2000		if (ret < 0)
2001			return ret;
2002
2003		BUG_ON(ret < match32->match_size);
 
2004		growth += ret - match32->match_size;
2005		growth += ebt_compat_entry_padsize();
2006
2007		buf += sizeof(*match32);
2008		buf += match32->match_size;
2009
2010		if (match_kern)
2011			match_kern->match_size = ret;
2012
2013		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2014		match32 = (struct compat_ebt_entry_mwt *) buf;
2015	}
2016
2017	return growth;
2018}
2019
2020/* called for all ebt_entry structures. */
2021static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2022			  unsigned int *total,
2023			  struct ebt_entries_buf_state *state)
2024{
2025	unsigned int i, j, startoff, new_offset = 0;
2026	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2027	unsigned int offsets[4];
2028	unsigned int *offsets_update = NULL;
2029	int ret;
2030	char *buf_start;
2031
2032	if (*total < sizeof(struct ebt_entries))
2033		return -EINVAL;
2034
2035	if (!entry->bitmask) {
2036		*total -= sizeof(struct ebt_entries);
2037		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2038	}
2039	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2040		return -EINVAL;
2041
2042	startoff = state->buf_user_offset;
2043	/* pull in most part of ebt_entry, it does not need to be changed. */
2044	ret = ebt_buf_add(state, entry,
2045			offsetof(struct ebt_entry, watchers_offset));
2046	if (ret < 0)
2047		return ret;
2048
2049	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2050	memcpy(&offsets[1], &entry->watchers_offset,
2051			sizeof(offsets) - sizeof(offsets[0]));
2052
2053	if (state->buf_kern_start) {
2054		buf_start = state->buf_kern_start + state->buf_kern_offset;
2055		offsets_update = (unsigned int *) buf_start;
2056	}
2057	ret = ebt_buf_add(state, &offsets[1],
2058			sizeof(offsets) - sizeof(offsets[0]));
2059	if (ret < 0)
2060		return ret;
2061	buf_start = (char *) entry;
2062	/*
2063	 * 0: matches offset, always follows ebt_entry.
2064	 * 1: watchers offset, from ebt_entry structure
2065	 * 2: target offset, from ebt_entry structure
2066	 * 3: next ebt_entry offset, from ebt_entry structure
2067	 *
2068	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2069	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
2070	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2071		struct compat_ebt_entry_mwt *match32;
2072		unsigned int size;
2073		char *buf = buf_start;
2074
2075		buf = buf_start + offsets[i];
2076		if (offsets[i] > offsets[j])
2077			return -EINVAL;
2078
2079		match32 = (struct compat_ebt_entry_mwt *) buf;
2080		size = offsets[j] - offsets[i];
2081		ret = ebt_size_mwt(match32, size, i, state, base);
2082		if (ret < 0)
2083			return ret;
2084		new_offset += ret;
2085		if (offsets_update && new_offset) {
2086			pr_debug("change offset %d to %d\n",
2087				offsets_update[i], offsets[j] + new_offset);
2088			offsets_update[i] = offsets[j] + new_offset;
2089		}
2090	}
2091
2092	if (state->buf_kern_start == NULL) {
2093		unsigned int offset = buf_start - (char *) base;
2094
2095		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2096		if (ret < 0)
2097			return ret;
2098	}
2099
2100	startoff = state->buf_user_offset - startoff;
 
 
2101
2102	BUG_ON(*total < startoff);
2103	*total -= startoff;
 
2104	return 0;
2105}
2106
2107/*
2108 * repl->entries_size is the size of the ebt_entry blob in userspace.
2109 * It might need more memory when copied to a 64 bit kernel in case
2110 * userspace is 32-bit. So, first task: find out how much memory is needed.
2111 *
2112 * Called before validation is performed.
2113 */
2114static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2115				struct ebt_entries_buf_state *state)
2116{
2117	unsigned int size_remaining = size_user;
2118	int ret;
2119
2120	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2121					&size_remaining, state);
2122	if (ret < 0)
2123		return ret;
2124
2125	WARN_ON(size_remaining);
 
 
2126	return state->buf_kern_offset;
2127}
2128
2129
2130static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2131					    void __user *user, unsigned int len)
2132{
2133	struct compat_ebt_replace tmp;
2134	int i;
2135
2136	if (len < sizeof(tmp))
2137		return -EINVAL;
2138
2139	if (copy_from_user(&tmp, user, sizeof(tmp)))
2140		return -EFAULT;
2141
2142	if (len != sizeof(tmp) + tmp.entries_size)
2143		return -EINVAL;
2144
2145	if (tmp.entries_size == 0)
2146		return -EINVAL;
2147
2148	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2149			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2150		return -ENOMEM;
2151	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2152		return -ENOMEM;
2153
2154	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2155
2156	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2157	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2158		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2159
2160	repl->num_counters = tmp.num_counters;
2161	repl->counters = compat_ptr(tmp.counters);
2162	repl->entries = compat_ptr(tmp.entries);
2163	return 0;
2164}
2165
2166static int compat_do_replace(struct net *net, void __user *user,
2167			     unsigned int len)
2168{
2169	int ret, i, countersize, size64;
2170	struct ebt_table_info *newinfo;
2171	struct ebt_replace tmp;
2172	struct ebt_entries_buf_state state;
2173	void *entries_tmp;
2174
2175	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2176	if (ret) {
2177		/* try real handler in case userland supplied needed padding */
2178		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2179			ret = 0;
2180		return ret;
2181	}
2182
2183	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2184	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2185	if (!newinfo)
2186		return -ENOMEM;
2187
2188	if (countersize)
2189		memset(newinfo->counters, 0, countersize);
2190
2191	memset(&state, 0, sizeof(state));
2192
2193	newinfo->entries = vmalloc(tmp.entries_size);
2194	if (!newinfo->entries) {
2195		ret = -ENOMEM;
2196		goto free_newinfo;
2197	}
2198	if (copy_from_user(
2199	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2200		ret = -EFAULT;
2201		goto free_entries;
2202	}
2203
2204	entries_tmp = newinfo->entries;
2205
2206	xt_compat_lock(NFPROTO_BRIDGE);
2207
2208	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
 
 
 
2209	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2210	if (ret < 0)
2211		goto out_unlock;
2212
2213	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2214		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2215		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2216
2217	size64 = ret;
2218	newinfo->entries = vmalloc(size64);
2219	if (!newinfo->entries) {
2220		vfree(entries_tmp);
2221		ret = -ENOMEM;
2222		goto out_unlock;
2223	}
2224
2225	memset(&state, 0, sizeof(state));
2226	state.buf_kern_start = newinfo->entries;
2227	state.buf_kern_len = size64;
2228
2229	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2230	BUG_ON(ret < 0);	/* parses same data again */
 
 
 
2231
2232	vfree(entries_tmp);
2233	tmp.entries_size = size64;
2234
2235	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2236		char __user *usrptr;
2237		if (tmp.hook_entry[i]) {
2238			unsigned int delta;
2239			usrptr = (char __user *) tmp.hook_entry[i];
2240			delta = usrptr - tmp.entries;
2241			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2242			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2243		}
2244	}
2245
2246	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2247	xt_compat_unlock(NFPROTO_BRIDGE);
2248
2249	ret = do_replace_finish(net, &tmp, newinfo);
2250	if (ret == 0)
2251		return ret;
2252free_entries:
2253	vfree(newinfo->entries);
2254free_newinfo:
2255	vfree(newinfo);
2256	return ret;
2257out_unlock:
2258	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2259	xt_compat_unlock(NFPROTO_BRIDGE);
2260	goto free_entries;
2261}
2262
2263static int compat_update_counters(struct net *net, void __user *user,
2264				  unsigned int len)
2265{
2266	struct compat_ebt_replace hlp;
2267
2268	if (copy_from_user(&hlp, user, sizeof(hlp)))
2269		return -EFAULT;
2270
2271	/* try real handler in case userland supplied needed padding */
2272	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2273		return update_counters(net, user, len);
2274
2275	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2276					hlp.num_counters, user, len);
2277}
2278
2279static int compat_do_ebt_set_ctl(struct sock *sk,
2280		int cmd, void __user *user, unsigned int len)
2281{
2282	int ret;
2283	struct net *net = sock_net(sk);
2284
2285	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2286		return -EPERM;
2287
2288	switch (cmd) {
2289	case EBT_SO_SET_ENTRIES:
2290		ret = compat_do_replace(net, user, len);
2291		break;
2292	case EBT_SO_SET_COUNTERS:
2293		ret = compat_update_counters(net, user, len);
2294		break;
2295	default:
2296		ret = -EINVAL;
2297  }
2298	return ret;
2299}
2300
2301static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2302		void __user *user, int *len)
2303{
2304	int ret;
2305	struct compat_ebt_replace tmp;
2306	struct ebt_table *t;
2307	struct net *net = sock_net(sk);
2308
2309	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2310		return -EPERM;
2311
2312	/* try real handler in case userland supplied needed padding */
2313	if ((cmd == EBT_SO_GET_INFO ||
2314	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2315			return do_ebt_get_ctl(sk, cmd, user, len);
2316
2317	if (copy_from_user(&tmp, user, sizeof(tmp)))
2318		return -EFAULT;
2319
 
 
2320	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2321	if (!t)
2322		return ret;
2323
2324	xt_compat_lock(NFPROTO_BRIDGE);
2325	switch (cmd) {
2326	case EBT_SO_GET_INFO:
2327		tmp.nentries = t->private->nentries;
2328		ret = compat_table_info(t->private, &tmp);
2329		if (ret)
2330			goto out;
2331		tmp.valid_hooks = t->valid_hooks;
2332
2333		if (copy_to_user(user, &tmp, *len) != 0) {
2334			ret = -EFAULT;
2335			break;
2336		}
2337		ret = 0;
2338		break;
2339	case EBT_SO_GET_INIT_INFO:
2340		tmp.nentries = t->table->nentries;
2341		tmp.entries_size = t->table->entries_size;
2342		tmp.valid_hooks = t->table->valid_hooks;
2343
2344		if (copy_to_user(user, &tmp, *len) != 0) {
2345			ret = -EFAULT;
2346			break;
2347		}
2348		ret = 0;
2349		break;
2350	case EBT_SO_GET_ENTRIES:
2351	case EBT_SO_GET_INIT_ENTRIES:
2352		/*
2353		 * try real handler first in case of userland-side padding.
2354		 * in case we are dealing with an 'ordinary' 32 bit binary
2355		 * without 64bit compatibility padding, this will fail right
2356		 * after copy_from_user when the *len argument is validated.
2357		 *
2358		 * the compat_ variant needs to do one pass over the kernel
2359		 * data set to adjust for size differences before it the check.
2360		 */
2361		if (copy_everything_to_user(t, user, len, cmd) == 0)
2362			ret = 0;
2363		else
2364			ret = compat_copy_everything_to_user(t, user, len, cmd);
2365		break;
2366	default:
2367		ret = -EINVAL;
2368	}
2369 out:
2370	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2371	xt_compat_unlock(NFPROTO_BRIDGE);
2372	mutex_unlock(&ebt_mutex);
2373	return ret;
2374}
2375#endif
2376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2377static struct nf_sockopt_ops ebt_sockopts = {
2378	.pf		= PF_INET,
2379	.set_optmin	= EBT_BASE_CTL,
2380	.set_optmax	= EBT_SO_SET_MAX + 1,
2381	.set		= do_ebt_set_ctl,
2382#ifdef CONFIG_COMPAT
2383	.compat_set	= compat_do_ebt_set_ctl,
2384#endif
2385	.get_optmin	= EBT_BASE_CTL,
2386	.get_optmax	= EBT_SO_GET_MAX + 1,
2387	.get		= do_ebt_get_ctl,
2388#ifdef CONFIG_COMPAT
2389	.compat_get	= compat_do_ebt_get_ctl,
2390#endif
2391	.owner		= THIS_MODULE,
2392};
2393
2394static int __init ebtables_init(void)
2395{
2396	int ret;
2397
2398	ret = xt_register_target(&ebt_standard_target);
2399	if (ret < 0)
2400		return ret;
2401	ret = nf_register_sockopt(&ebt_sockopts);
2402	if (ret < 0) {
2403		xt_unregister_target(&ebt_standard_target);
2404		return ret;
2405	}
2406
2407	printk(KERN_INFO "Ebtables v2.0 registered\n");
2408	return 0;
2409}
2410
2411static void __exit ebtables_fini(void)
2412{
2413	nf_unregister_sockopt(&ebt_sockopts);
2414	xt_unregister_target(&ebt_standard_target);
2415	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2416}
2417
2418EXPORT_SYMBOL(ebt_register_table);
2419EXPORT_SYMBOL(ebt_unregister_table);
2420EXPORT_SYMBOL(ebt_do_table);
2421module_init(ebtables_init);
2422module_exit(ebtables_fini);
2423MODULE_LICENSE("GPL");