Linux Audio

Check our new training course

Loading...
   1/*
   2 * Packet matching code for ARP packets.
   3 *
   4 * Based heavily, if not almost entirely, upon ip_tables.c framework.
   5 *
   6 * Some ARP specific bits are:
   7 *
   8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
   9 *
  10 */
  11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12#include <linux/kernel.h>
  13#include <linux/skbuff.h>
  14#include <linux/netdevice.h>
  15#include <linux/capability.h>
  16#include <linux/if_arp.h>
  17#include <linux/kmod.h>
  18#include <linux/vmalloc.h>
  19#include <linux/proc_fs.h>
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/mutex.h>
  23#include <linux/err.h>
  24#include <net/compat.h>
  25#include <net/sock.h>
  26#include <asm/uaccess.h>
  27
  28#include <linux/netfilter/x_tables.h>
  29#include <linux/netfilter_arp/arp_tables.h>
  30#include "../../netfilter/xt_repldata.h"
  31
  32MODULE_LICENSE("GPL");
  33MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
  34MODULE_DESCRIPTION("arptables core");
  35
  36/*#define DEBUG_ARP_TABLES*/
  37/*#define DEBUG_ARP_TABLES_USER*/
  38
  39#ifdef DEBUG_ARP_TABLES
  40#define dprintf(format, args...)  printk(format , ## args)
  41#else
  42#define dprintf(format, args...)
  43#endif
  44
  45#ifdef DEBUG_ARP_TABLES_USER
  46#define duprintf(format, args...) printk(format , ## args)
  47#else
  48#define duprintf(format, args...)
  49#endif
  50
  51#ifdef CONFIG_NETFILTER_DEBUG
  52#define ARP_NF_ASSERT(x)	WARN_ON(!(x))
  53#else
  54#define ARP_NF_ASSERT(x)
  55#endif
  56
  57void *arpt_alloc_initial_table(const struct xt_table *info)
  58{
  59	return xt_alloc_initial_table(arpt, ARPT);
  60}
  61EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
  62
  63static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
  64				      const char *hdr_addr, int len)
  65{
  66	int i, ret;
  67
  68	if (len > ARPT_DEV_ADDR_LEN_MAX)
  69		len = ARPT_DEV_ADDR_LEN_MAX;
  70
  71	ret = 0;
  72	for (i = 0; i < len; i++)
  73		ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i];
  74
  75	return ret != 0;
  76}
  77
  78/*
  79 * Unfortunately, _b and _mask are not aligned to an int (or long int)
  80 * Some arches dont care, unrolling the loop is a win on them.
  81 * For other arches, we only have a 16bit alignement.
  82 */
  83static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
  84{
  85#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  86	unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
  87#else
  88	unsigned long ret = 0;
  89	const u16 *a = (const u16 *)_a;
  90	const u16 *b = (const u16 *)_b;
  91	const u16 *mask = (const u16 *)_mask;
  92	int i;
  93
  94	for (i = 0; i < IFNAMSIZ/sizeof(u16); i++)
  95		ret |= (a[i] ^ b[i]) & mask[i];
  96#endif
  97	return ret;
  98}
  99
 100/* Returns whether packet matches rule or not. */
 101static inline int arp_packet_match(const struct arphdr *arphdr,
 102				   struct net_device *dev,
 103				   const char *indev,
 104				   const char *outdev,
 105				   const struct arpt_arp *arpinfo)
 106{
 107	const char *arpptr = (char *)(arphdr + 1);
 108	const char *src_devaddr, *tgt_devaddr;
 109	__be32 src_ipaddr, tgt_ipaddr;
 110	long ret;
 111
 112#define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
 113
 114	if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop,
 115		  ARPT_INV_ARPOP)) {
 116		dprintf("ARP operation field mismatch.\n");
 117		dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
 118			arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask);
 119		return 0;
 120	}
 121
 122	if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd,
 123		  ARPT_INV_ARPHRD)) {
 124		dprintf("ARP hardware address format mismatch.\n");
 125		dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
 126			arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask);
 127		return 0;
 128	}
 129
 130	if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro,
 131		  ARPT_INV_ARPPRO)) {
 132		dprintf("ARP protocol address format mismatch.\n");
 133		dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
 134			arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask);
 135		return 0;
 136	}
 137
 138	if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln,
 139		  ARPT_INV_ARPHLN)) {
 140		dprintf("ARP hardware address length mismatch.\n");
 141		dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
 142			arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask);
 143		return 0;
 144	}
 145
 146	src_devaddr = arpptr;
 147	arpptr += dev->addr_len;
 148	memcpy(&src_ipaddr, arpptr, sizeof(u32));
 149	arpptr += sizeof(u32);
 150	tgt_devaddr = arpptr;
 151	arpptr += dev->addr_len;
 152	memcpy(&tgt_ipaddr, arpptr, sizeof(u32));
 153
 154	if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len),
 155		  ARPT_INV_SRCDEVADDR) ||
 156	    FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len),
 157		  ARPT_INV_TGTDEVADDR)) {
 158		dprintf("Source or target device address mismatch.\n");
 159
 160		return 0;
 161	}
 162
 163	if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr,
 164		  ARPT_INV_SRCIP) ||
 165	    FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr),
 166		  ARPT_INV_TGTIP)) {
 167		dprintf("Source or target IP address mismatch.\n");
 168
 169		dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
 170			&src_ipaddr,
 171			&arpinfo->smsk.s_addr,
 172			&arpinfo->src.s_addr,
 173			arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : "");
 174		dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n",
 175			&tgt_ipaddr,
 176			&arpinfo->tmsk.s_addr,
 177			&arpinfo->tgt.s_addr,
 178			arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : "");
 179		return 0;
 180	}
 181
 182	/* Look for ifname matches.  */
 183	ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
 184
 185	if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
 186		dprintf("VIA in mismatch (%s vs %s).%s\n",
 187			indev, arpinfo->iniface,
 188			arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":"");
 189		return 0;
 190	}
 191
 192	ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
 193
 194	if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
 195		dprintf("VIA out mismatch (%s vs %s).%s\n",
 196			outdev, arpinfo->outiface,
 197			arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":"");
 198		return 0;
 199	}
 200
 201	return 1;
 202#undef FWINV
 203}
 204
 205static inline int arp_checkentry(const struct arpt_arp *arp)
 206{
 207	if (arp->flags & ~ARPT_F_MASK) {
 208		duprintf("Unknown flag bits set: %08X\n",
 209			 arp->flags & ~ARPT_F_MASK);
 210		return 0;
 211	}
 212	if (arp->invflags & ~ARPT_INV_MASK) {
 213		duprintf("Unknown invflag bits set: %08X\n",
 214			 arp->invflags & ~ARPT_INV_MASK);
 215		return 0;
 216	}
 217
 218	return 1;
 219}
 220
 221static unsigned int
 222arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
 223{
 224	net_err_ratelimited("arp_tables: error: '%s'\n",
 225			    (const char *)par->targinfo);
 226
 227	return NF_DROP;
 228}
 229
 230static inline const struct xt_entry_target *
 231arpt_get_target_c(const struct arpt_entry *e)
 232{
 233	return arpt_get_target((struct arpt_entry *)e);
 234}
 235
 236static inline struct arpt_entry *
 237get_entry(const void *base, unsigned int offset)
 238{
 239	return (struct arpt_entry *)(base + offset);
 240}
 241
 242static inline __pure
 243struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
 244{
 245	return (void *)entry + entry->next_offset;
 246}
 247
 248unsigned int arpt_do_table(struct sk_buff *skb,
 249			   unsigned int hook,
 250			   const struct net_device *in,
 251			   const struct net_device *out,
 252			   struct xt_table *table)
 253{
 254	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
 255	unsigned int verdict = NF_DROP;
 256	const struct arphdr *arp;
 257	struct arpt_entry *e, *back;
 258	const char *indev, *outdev;
 259	void *table_base;
 260	const struct xt_table_info *private;
 261	struct xt_action_param acpar;
 262	unsigned int addend;
 263
 264	if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
 265		return NF_DROP;
 266
 267	indev = in ? in->name : nulldevname;
 268	outdev = out ? out->name : nulldevname;
 269
 270	local_bh_disable();
 271	addend = xt_write_recseq_begin();
 272	private = table->private;
 273	table_base = private->entries[smp_processor_id()];
 274
 275	e = get_entry(table_base, private->hook_entry[hook]);
 276	back = get_entry(table_base, private->underflow[hook]);
 277
 278	acpar.in      = in;
 279	acpar.out     = out;
 280	acpar.hooknum = hook;
 281	acpar.family  = NFPROTO_ARP;
 282	acpar.hotdrop = false;
 283
 284	arp = arp_hdr(skb);
 285	do {
 286		const struct xt_entry_target *t;
 287
 288		if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
 289			e = arpt_next_entry(e);
 290			continue;
 291		}
 292
 293		ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1);
 294
 295		t = arpt_get_target_c(e);
 296
 297		/* Standard target? */
 298		if (!t->u.kernel.target->target) {
 299			int v;
 300
 301			v = ((struct xt_standard_target *)t)->verdict;
 302			if (v < 0) {
 303				/* Pop from stack? */
 304				if (v != XT_RETURN) {
 305					verdict = (unsigned int)(-v) - 1;
 306					break;
 307				}
 308				e = back;
 309				back = get_entry(table_base, back->comefrom);
 310				continue;
 311			}
 312			if (table_base + v
 313			    != arpt_next_entry(e)) {
 314				/* Save old back ptr in next entry */
 315				struct arpt_entry *next = arpt_next_entry(e);
 316				next->comefrom = (void *)back - table_base;
 317
 318				/* set back pointer to next entry */
 319				back = next;
 320			}
 321
 322			e = get_entry(table_base, v);
 323			continue;
 324		}
 325
 326		/* Targets which reenter must return
 327		 * abs. verdicts
 328		 */
 329		acpar.target   = t->u.kernel.target;
 330		acpar.targinfo = t->data;
 331		verdict = t->u.kernel.target->target(skb, &acpar);
 332
 333		/* Target might have changed stuff. */
 334		arp = arp_hdr(skb);
 335
 336		if (verdict == XT_CONTINUE)
 337			e = arpt_next_entry(e);
 338		else
 339			/* Verdict */
 340			break;
 341	} while (!acpar.hotdrop);
 342	xt_write_recseq_end(addend);
 343	local_bh_enable();
 344
 345	if (acpar.hotdrop)
 346		return NF_DROP;
 347	else
 348		return verdict;
 349}
 350
 351/* All zeroes == unconditional rule. */
 352static inline bool unconditional(const struct arpt_arp *arp)
 353{
 354	static const struct arpt_arp uncond;
 355
 356	return memcmp(arp, &uncond, sizeof(uncond)) == 0;
 357}
 358
 359/* Figures out from what hook each rule can be called: returns 0 if
 360 * there are loops.  Puts hook bitmask in comefrom.
 361 */
 362static int mark_source_chains(const struct xt_table_info *newinfo,
 363			      unsigned int valid_hooks, void *entry0)
 364{
 365	unsigned int hook;
 366
 367	/* No recursion; use packet counter to save back ptrs (reset
 368	 * to 0 as we leave), and comefrom to save source hook bitmask.
 369	 */
 370	for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) {
 371		unsigned int pos = newinfo->hook_entry[hook];
 372		struct arpt_entry *e
 373			= (struct arpt_entry *)(entry0 + pos);
 374
 375		if (!(valid_hooks & (1 << hook)))
 376			continue;
 377
 378		/* Set initial back pointer. */
 379		e->counters.pcnt = pos;
 380
 381		for (;;) {
 382			const struct xt_standard_target *t
 383				= (void *)arpt_get_target_c(e);
 384			int visited = e->comefrom & (1 << hook);
 385
 386			if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
 387				pr_notice("arptables: loop hook %u pos %u %08X.\n",
 388				       hook, pos, e->comefrom);
 389				return 0;
 390			}
 391			e->comefrom
 392				|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
 393
 394			/* Unconditional return/END. */
 395			if ((e->target_offset == sizeof(struct arpt_entry) &&
 396			     (strcmp(t->target.u.user.name,
 397				     XT_STANDARD_TARGET) == 0) &&
 398			     t->verdict < 0 && unconditional(&e->arp)) ||
 399			    visited) {
 400				unsigned int oldpos, size;
 401
 402				if ((strcmp(t->target.u.user.name,
 403					    XT_STANDARD_TARGET) == 0) &&
 404				    t->verdict < -NF_MAX_VERDICT - 1) {
 405					duprintf("mark_source_chains: bad "
 406						"negative verdict (%i)\n",
 407								t->verdict);
 408					return 0;
 409				}
 410
 411				/* Return: backtrack through the last
 412				 * big jump.
 413				 */
 414				do {
 415					e->comefrom ^= (1<<NF_ARP_NUMHOOKS);
 416					oldpos = pos;
 417					pos = e->counters.pcnt;
 418					e->counters.pcnt = 0;
 419
 420					/* We're at the start. */
 421					if (pos == oldpos)
 422						goto next;
 423
 424					e = (struct arpt_entry *)
 425						(entry0 + pos);
 426				} while (oldpos == pos + e->next_offset);
 427
 428				/* Move along one */
 429				size = e->next_offset;
 430				e = (struct arpt_entry *)
 431					(entry0 + pos + size);
 432				e->counters.pcnt = pos;
 433				pos += size;
 434			} else {
 435				int newpos = t->verdict;
 436
 437				if (strcmp(t->target.u.user.name,
 438					   XT_STANDARD_TARGET) == 0 &&
 439				    newpos >= 0) {
 440					if (newpos > newinfo->size -
 441						sizeof(struct arpt_entry)) {
 442						duprintf("mark_source_chains: "
 443							"bad verdict (%i)\n",
 444								newpos);
 445						return 0;
 446					}
 447
 448					/* This a jump; chase it. */
 449					duprintf("Jump rule %u -> %u\n",
 450						 pos, newpos);
 451				} else {
 452					/* ... this is a fallthru */
 453					newpos = pos + e->next_offset;
 454				}
 455				e = (struct arpt_entry *)
 456					(entry0 + newpos);
 457				e->counters.pcnt = pos;
 458				pos = newpos;
 459			}
 460		}
 461		next:
 462		duprintf("Finished chain %u\n", hook);
 463	}
 464	return 1;
 465}
 466
 467static inline int check_entry(const struct arpt_entry *e, const char *name)
 468{
 469	const struct xt_entry_target *t;
 470
 471	if (!arp_checkentry(&e->arp)) {
 472		duprintf("arp_tables: arp check failed %p %s.\n", e, name);
 473		return -EINVAL;
 474	}
 475
 476	if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
 477		return -EINVAL;
 478
 479	t = arpt_get_target_c(e);
 480	if (e->target_offset + t->u.target_size > e->next_offset)
 481		return -EINVAL;
 482
 483	return 0;
 484}
 485
 486static inline int check_target(struct arpt_entry *e, const char *name)
 487{
 488	struct xt_entry_target *t = arpt_get_target(e);
 489	int ret;
 490	struct xt_tgchk_param par = {
 491		.table     = name,
 492		.entryinfo = e,
 493		.target    = t->u.kernel.target,
 494		.targinfo  = t->data,
 495		.hook_mask = e->comefrom,
 496		.family    = NFPROTO_ARP,
 497	};
 498
 499	ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
 500	if (ret < 0) {
 501		duprintf("arp_tables: check failed for `%s'.\n",
 502			 t->u.kernel.target->name);
 503		return ret;
 504	}
 505	return 0;
 506}
 507
 508static inline int
 509find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
 510{
 511	struct xt_entry_target *t;
 512	struct xt_target *target;
 513	int ret;
 514
 515	ret = check_entry(e, name);
 516	if (ret)
 517		return ret;
 518
 519	t = arpt_get_target(e);
 520	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
 521					t->u.user.revision);
 522	if (IS_ERR(target)) {
 523		duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
 524		ret = PTR_ERR(target);
 525		goto out;
 526	}
 527	t->u.kernel.target = target;
 528
 529	ret = check_target(e, name);
 530	if (ret)
 531		goto err;
 532	return 0;
 533err:
 534	module_put(t->u.kernel.target->me);
 535out:
 536	return ret;
 537}
 538
 539static bool check_underflow(const struct arpt_entry *e)
 540{
 541	const struct xt_entry_target *t;
 542	unsigned int verdict;
 543
 544	if (!unconditional(&e->arp))
 545		return false;
 546	t = arpt_get_target_c(e);
 547	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
 548		return false;
 549	verdict = ((struct xt_standard_target *)t)->verdict;
 550	verdict = -verdict - 1;
 551	return verdict == NF_DROP || verdict == NF_ACCEPT;
 552}
 553
 554static inline int check_entry_size_and_hooks(struct arpt_entry *e,
 555					     struct xt_table_info *newinfo,
 556					     const unsigned char *base,
 557					     const unsigned char *limit,
 558					     const unsigned int *hook_entries,
 559					     const unsigned int *underflows,
 560					     unsigned int valid_hooks)
 561{
 562	unsigned int h;
 563
 564	if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
 565	    (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
 566		duprintf("Bad offset %p\n", e);
 567		return -EINVAL;
 568	}
 569
 570	if (e->next_offset
 571	    < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) {
 572		duprintf("checking: element %p size %u\n",
 573			 e, e->next_offset);
 574		return -EINVAL;
 575	}
 576
 577	/* Check hooks & underflows */
 578	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
 579		if (!(valid_hooks & (1 << h)))
 580			continue;
 581		if ((unsigned char *)e - base == hook_entries[h])
 582			newinfo->hook_entry[h] = hook_entries[h];
 583		if ((unsigned char *)e - base == underflows[h]) {
 584			if (!check_underflow(e)) {
 585				pr_err("Underflows must be unconditional and "
 586				       "use the STANDARD target with "
 587				       "ACCEPT/DROP\n");
 588				return -EINVAL;
 589			}
 590			newinfo->underflow[h] = underflows[h];
 591		}
 592	}
 593
 594	/* Clear counters and comefrom */
 595	e->counters = ((struct xt_counters) { 0, 0 });
 596	e->comefrom = 0;
 597	return 0;
 598}
 599
 600static inline void cleanup_entry(struct arpt_entry *e)
 601{
 602	struct xt_tgdtor_param par;
 603	struct xt_entry_target *t;
 604
 605	t = arpt_get_target(e);
 606	par.target   = t->u.kernel.target;
 607	par.targinfo = t->data;
 608	par.family   = NFPROTO_ARP;
 609	if (par.target->destroy != NULL)
 610		par.target->destroy(&par);
 611	module_put(par.target->me);
 612}
 613
 614/* Checks and translates the user-supplied table segment (held in
 615 * newinfo).
 616 */
 617static int translate_table(struct xt_table_info *newinfo, void *entry0,
 618                           const struct arpt_replace *repl)
 619{
 620	struct arpt_entry *iter;
 621	unsigned int i;
 622	int ret = 0;
 623
 624	newinfo->size = repl->size;
 625	newinfo->number = repl->num_entries;
 626
 627	/* Init all hooks to impossible value. */
 628	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
 629		newinfo->hook_entry[i] = 0xFFFFFFFF;
 630		newinfo->underflow[i] = 0xFFFFFFFF;
 631	}
 632
 633	duprintf("translate_table: size %u\n", newinfo->size);
 634	i = 0;
 635
 636	/* Walk through entries, checking offsets. */
 637	xt_entry_foreach(iter, entry0, newinfo->size) {
 638		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
 639						 entry0 + repl->size,
 640						 repl->hook_entry,
 641						 repl->underflow,
 642						 repl->valid_hooks);
 643		if (ret != 0)
 644			break;
 645		++i;
 646		if (strcmp(arpt_get_target(iter)->u.user.name,
 647		    XT_ERROR_TARGET) == 0)
 648			++newinfo->stacksize;
 649	}
 650	duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
 651	if (ret != 0)
 652		return ret;
 653
 654	if (i != repl->num_entries) {
 655		duprintf("translate_table: %u not %u entries\n",
 656			 i, repl->num_entries);
 657		return -EINVAL;
 658	}
 659
 660	/* Check hooks all assigned */
 661	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
 662		/* Only hooks which are valid */
 663		if (!(repl->valid_hooks & (1 << i)))
 664			continue;
 665		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
 666			duprintf("Invalid hook entry %u %u\n",
 667				 i, repl->hook_entry[i]);
 668			return -EINVAL;
 669		}
 670		if (newinfo->underflow[i] == 0xFFFFFFFF) {
 671			duprintf("Invalid underflow %u %u\n",
 672				 i, repl->underflow[i]);
 673			return -EINVAL;
 674		}
 675	}
 676
 677	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
 678		duprintf("Looping hook\n");
 679		return -ELOOP;
 680	}
 681
 682	/* Finally, each sanity check must pass */
 683	i = 0;
 684	xt_entry_foreach(iter, entry0, newinfo->size) {
 685		ret = find_check_entry(iter, repl->name, repl->size);
 686		if (ret != 0)
 687			break;
 688		++i;
 689	}
 690
 691	if (ret != 0) {
 692		xt_entry_foreach(iter, entry0, newinfo->size) {
 693			if (i-- == 0)
 694				break;
 695			cleanup_entry(iter);
 696		}
 697		return ret;
 698	}
 699
 700	/* And one copy for every other CPU */
 701	for_each_possible_cpu(i) {
 702		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
 703			memcpy(newinfo->entries[i], entry0, newinfo->size);
 704	}
 705
 706	return ret;
 707}
 708
 709static void get_counters(const struct xt_table_info *t,
 710			 struct xt_counters counters[])
 711{
 712	struct arpt_entry *iter;
 713	unsigned int cpu;
 714	unsigned int i;
 715
 716	for_each_possible_cpu(cpu) {
 717		seqcount_t *s = &per_cpu(xt_recseq, cpu);
 718
 719		i = 0;
 720		xt_entry_foreach(iter, t->entries[cpu], t->size) {
 721			u64 bcnt, pcnt;
 722			unsigned int start;
 723
 724			do {
 725				start = read_seqcount_begin(s);
 726				bcnt = iter->counters.bcnt;
 727				pcnt = iter->counters.pcnt;
 728			} while (read_seqcount_retry(s, start));
 729
 730			ADD_COUNTER(counters[i], bcnt, pcnt);
 731			++i;
 732		}
 733	}
 734}
 735
 736static struct xt_counters *alloc_counters(const struct xt_table *table)
 737{
 738	unsigned int countersize;
 739	struct xt_counters *counters;
 740	const struct xt_table_info *private = table->private;
 741
 742	/* We need atomic snapshot of counters: rest doesn't change
 743	 * (other than comefrom, which userspace doesn't care
 744	 * about).
 745	 */
 746	countersize = sizeof(struct xt_counters) * private->number;
 747	counters = vzalloc(countersize);
 748
 749	if (counters == NULL)
 750		return ERR_PTR(-ENOMEM);
 751
 752	get_counters(private, counters);
 753
 754	return counters;
 755}
 756
 757static int copy_entries_to_user(unsigned int total_size,
 758				const struct xt_table *table,
 759				void __user *userptr)
 760{
 761	unsigned int off, num;
 762	const struct arpt_entry *e;
 763	struct xt_counters *counters;
 764	struct xt_table_info *private = table->private;
 765	int ret = 0;
 766	void *loc_cpu_entry;
 767
 768	counters = alloc_counters(table);
 769	if (IS_ERR(counters))
 770		return PTR_ERR(counters);
 771
 772	loc_cpu_entry = private->entries[raw_smp_processor_id()];
 773	/* ... then copy entire thing ... */
 774	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
 775		ret = -EFAULT;
 776		goto free_counters;
 777	}
 778
 779	/* FIXME: use iterator macros --RR */
 780	/* ... then go back and fix counters and names */
 781	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
 782		const struct xt_entry_target *t;
 783
 784		e = (struct arpt_entry *)(loc_cpu_entry + off);
 785		if (copy_to_user(userptr + off
 786				 + offsetof(struct arpt_entry, counters),
 787				 &counters[num],
 788				 sizeof(counters[num])) != 0) {
 789			ret = -EFAULT;
 790			goto free_counters;
 791		}
 792
 793		t = arpt_get_target_c(e);
 794		if (copy_to_user(userptr + off + e->target_offset
 795				 + offsetof(struct xt_entry_target,
 796					    u.user.name),
 797				 t->u.kernel.target->name,
 798				 strlen(t->u.kernel.target->name)+1) != 0) {
 799			ret = -EFAULT;
 800			goto free_counters;
 801		}
 802	}
 803
 804 free_counters:
 805	vfree(counters);
 806	return ret;
 807}
 808
 809#ifdef CONFIG_COMPAT
 810static void compat_standard_from_user(void *dst, const void *src)
 811{
 812	int v = *(compat_int_t *)src;
 813
 814	if (v > 0)
 815		v += xt_compat_calc_jump(NFPROTO_ARP, v);
 816	memcpy(dst, &v, sizeof(v));
 817}
 818
 819static int compat_standard_to_user(void __user *dst, const void *src)
 820{
 821	compat_int_t cv = *(int *)src;
 822
 823	if (cv > 0)
 824		cv -= xt_compat_calc_jump(NFPROTO_ARP, cv);
 825	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
 826}
 827
 828static int compat_calc_entry(const struct arpt_entry *e,
 829			     const struct xt_table_info *info,
 830			     const void *base, struct xt_table_info *newinfo)
 831{
 832	const struct xt_entry_target *t;
 833	unsigned int entry_offset;
 834	int off, i, ret;
 835
 836	off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
 837	entry_offset = (void *)e - base;
 838
 839	t = arpt_get_target_c(e);
 840	off += xt_compat_target_offset(t->u.kernel.target);
 841	newinfo->size -= off;
 842	ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
 843	if (ret)
 844		return ret;
 845
 846	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
 847		if (info->hook_entry[i] &&
 848		    (e < (struct arpt_entry *)(base + info->hook_entry[i])))
 849			newinfo->hook_entry[i] -= off;
 850		if (info->underflow[i] &&
 851		    (e < (struct arpt_entry *)(base + info->underflow[i])))
 852			newinfo->underflow[i] -= off;
 853	}
 854	return 0;
 855}
 856
 857static int compat_table_info(const struct xt_table_info *info,
 858			     struct xt_table_info *newinfo)
 859{
 860	struct arpt_entry *iter;
 861	void *loc_cpu_entry;
 862	int ret;
 863
 864	if (!newinfo || !info)
 865		return -EINVAL;
 866
 867	/* we dont care about newinfo->entries[] */
 868	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
 869	newinfo->initial_entries = 0;
 870	loc_cpu_entry = info->entries[raw_smp_processor_id()];
 871	xt_compat_init_offsets(NFPROTO_ARP, info->number);
 872	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
 873		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
 874		if (ret != 0)
 875			return ret;
 876	}
 877	return 0;
 878}
 879#endif
 880
 881static int get_info(struct net *net, void __user *user,
 882                    const int *len, int compat)
 883{
 884	char name[XT_TABLE_MAXNAMELEN];
 885	struct xt_table *t;
 886	int ret;
 887
 888	if (*len != sizeof(struct arpt_getinfo)) {
 889		duprintf("length %u != %Zu\n", *len,
 890			 sizeof(struct arpt_getinfo));
 891		return -EINVAL;
 892	}
 893
 894	if (copy_from_user(name, user, sizeof(name)) != 0)
 895		return -EFAULT;
 896
 897	name[XT_TABLE_MAXNAMELEN-1] = '\0';
 898#ifdef CONFIG_COMPAT
 899	if (compat)
 900		xt_compat_lock(NFPROTO_ARP);
 901#endif
 902	t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
 903				    "arptable_%s", name);
 904	if (t && !IS_ERR(t)) {
 905		struct arpt_getinfo info;
 906		const struct xt_table_info *private = t->private;
 907#ifdef CONFIG_COMPAT
 908		struct xt_table_info tmp;
 909
 910		if (compat) {
 911			ret = compat_table_info(private, &tmp);
 912			xt_compat_flush_offsets(NFPROTO_ARP);
 913			private = &tmp;
 914		}
 915#endif
 916		memset(&info, 0, sizeof(info));
 917		info.valid_hooks = t->valid_hooks;
 918		memcpy(info.hook_entry, private->hook_entry,
 919		       sizeof(info.hook_entry));
 920		memcpy(info.underflow, private->underflow,
 921		       sizeof(info.underflow));
 922		info.num_entries = private->number;
 923		info.size = private->size;
 924		strcpy(info.name, name);
 925
 926		if (copy_to_user(user, &info, *len) != 0)
 927			ret = -EFAULT;
 928		else
 929			ret = 0;
 930		xt_table_unlock(t);
 931		module_put(t->me);
 932	} else
 933		ret = t ? PTR_ERR(t) : -ENOENT;
 934#ifdef CONFIG_COMPAT
 935	if (compat)
 936		xt_compat_unlock(NFPROTO_ARP);
 937#endif
 938	return ret;
 939}
 940
 941static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
 942		       const int *len)
 943{
 944	int ret;
 945	struct arpt_get_entries get;
 946	struct xt_table *t;
 947
 948	if (*len < sizeof(get)) {
 949		duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));
 950		return -EINVAL;
 951	}
 952	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
 953		return -EFAULT;
 954	if (*len != sizeof(struct arpt_get_entries) + get.size) {
 955		duprintf("get_entries: %u != %Zu\n", *len,
 956			 sizeof(struct arpt_get_entries) + get.size);
 957		return -EINVAL;
 958	}
 959
 960	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
 961	if (t && !IS_ERR(t)) {
 962		const struct xt_table_info *private = t->private;
 963
 964		duprintf("t->private->number = %u\n",
 965			 private->number);
 966		if (get.size == private->size)
 967			ret = copy_entries_to_user(private->size,
 968						   t, uptr->entrytable);
 969		else {
 970			duprintf("get_entries: I've got %u not %u!\n",
 971				 private->size, get.size);
 972			ret = -EAGAIN;
 973		}
 974		module_put(t->me);
 975		xt_table_unlock(t);
 976	} else
 977		ret = t ? PTR_ERR(t) : -ENOENT;
 978
 979	return ret;
 980}
 981
 982static int __do_replace(struct net *net, const char *name,
 983			unsigned int valid_hooks,
 984			struct xt_table_info *newinfo,
 985			unsigned int num_counters,
 986			void __user *counters_ptr)
 987{
 988	int ret;
 989	struct xt_table *t;
 990	struct xt_table_info *oldinfo;
 991	struct xt_counters *counters;
 992	void *loc_cpu_old_entry;
 993	struct arpt_entry *iter;
 994
 995	ret = 0;
 996	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 997	if (!counters) {
 998		ret = -ENOMEM;
 999		goto out;
1000	}
1001
1002	t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
1003				    "arptable_%s", name);
1004	if (!t || IS_ERR(t)) {
1005		ret = t ? PTR_ERR(t) : -ENOENT;
1006		goto free_newinfo_counters_untrans;
1007	}
1008
1009	/* You lied! */
1010	if (valid_hooks != t->valid_hooks) {
1011		duprintf("Valid hook crap: %08X vs %08X\n",
1012			 valid_hooks, t->valid_hooks);
1013		ret = -EINVAL;
1014		goto put_module;
1015	}
1016
1017	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1018	if (!oldinfo)
1019		goto put_module;
1020
1021	/* Update module usage count based on number of rules */
1022	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1023		oldinfo->number, oldinfo->initial_entries, newinfo->number);
1024	if ((oldinfo->number > oldinfo->initial_entries) ||
1025	    (newinfo->number <= oldinfo->initial_entries))
1026		module_put(t->me);
1027	if ((oldinfo->number > oldinfo->initial_entries) &&
1028	    (newinfo->number <= oldinfo->initial_entries))
1029		module_put(t->me);
1030
1031	/* Get the old counters, and synchronize with replace */
1032	get_counters(oldinfo, counters);
1033
1034	/* Decrease module usage counts and free resource */
1035	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1036	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1037		cleanup_entry(iter);
1038
1039	xt_free_table_info(oldinfo);
1040	if (copy_to_user(counters_ptr, counters,
1041			 sizeof(struct xt_counters) * num_counters) != 0)
1042		ret = -EFAULT;
1043	vfree(counters);
1044	xt_table_unlock(t);
1045	return ret;
1046
1047 put_module:
1048	module_put(t->me);
1049	xt_table_unlock(t);
1050 free_newinfo_counters_untrans:
1051	vfree(counters);
1052 out:
1053	return ret;
1054}
1055
1056static int do_replace(struct net *net, const void __user *user,
1057                      unsigned int len)
1058{
1059	int ret;
1060	struct arpt_replace tmp;
1061	struct xt_table_info *newinfo;
1062	void *loc_cpu_entry;
1063	struct arpt_entry *iter;
1064
1065	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1066		return -EFAULT;
1067
1068	/* overflow check */
1069	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1070		return -ENOMEM;
1071	tmp.name[sizeof(tmp.name)-1] = 0;
1072
1073	newinfo = xt_alloc_table_info(tmp.size);
1074	if (!newinfo)
1075		return -ENOMEM;
1076
1077	/* choose the copy that is on our node/cpu */
1078	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1079	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1080			   tmp.size) != 0) {
1081		ret = -EFAULT;
1082		goto free_newinfo;
1083	}
1084
1085	ret = translate_table(newinfo, loc_cpu_entry, &tmp);
1086	if (ret != 0)
1087		goto free_newinfo;
1088
1089	duprintf("arp_tables: Translated table\n");
1090
1091	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1092			   tmp.num_counters, tmp.counters);
1093	if (ret)
1094		goto free_newinfo_untrans;
1095	return 0;
1096
1097 free_newinfo_untrans:
1098	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1099		cleanup_entry(iter);
1100 free_newinfo:
1101	xt_free_table_info(newinfo);
1102	return ret;
1103}
1104
1105static int do_add_counters(struct net *net, const void __user *user,
1106			   unsigned int len, int compat)
1107{
1108	unsigned int i, curcpu;
1109	struct xt_counters_info tmp;
1110	struct xt_counters *paddc;
1111	unsigned int num_counters;
1112	const char *name;
1113	int size;
1114	void *ptmp;
1115	struct xt_table *t;
1116	const struct xt_table_info *private;
1117	int ret = 0;
1118	void *loc_cpu_entry;
1119	struct arpt_entry *iter;
1120	unsigned int addend;
1121#ifdef CONFIG_COMPAT
1122	struct compat_xt_counters_info compat_tmp;
1123
1124	if (compat) {
1125		ptmp = &compat_tmp;
1126		size = sizeof(struct compat_xt_counters_info);
1127	} else
1128#endif
1129	{
1130		ptmp = &tmp;
1131		size = sizeof(struct xt_counters_info);
1132	}
1133
1134	if (copy_from_user(ptmp, user, size) != 0)
1135		return -EFAULT;
1136
1137#ifdef CONFIG_COMPAT
1138	if (compat) {
1139		num_counters = compat_tmp.num_counters;
1140		name = compat_tmp.name;
1141	} else
1142#endif
1143	{
1144		num_counters = tmp.num_counters;
1145		name = tmp.name;
1146	}
1147
1148	if (len != size + num_counters * sizeof(struct xt_counters))
1149		return -EINVAL;
1150
1151	paddc = vmalloc(len - size);
1152	if (!paddc)
1153		return -ENOMEM;
1154
1155	if (copy_from_user(paddc, user + size, len - size) != 0) {
1156		ret = -EFAULT;
1157		goto free;
1158	}
1159
1160	t = xt_find_table_lock(net, NFPROTO_ARP, name);
1161	if (!t || IS_ERR(t)) {
1162		ret = t ? PTR_ERR(t) : -ENOENT;
1163		goto free;
1164	}
1165
1166	local_bh_disable();
1167	private = t->private;
1168	if (private->number != num_counters) {
1169		ret = -EINVAL;
1170		goto unlock_up_free;
1171	}
1172
1173	i = 0;
1174	/* Choose the copy that is on our node */
1175	curcpu = smp_processor_id();
1176	loc_cpu_entry = private->entries[curcpu];
1177	addend = xt_write_recseq_begin();
1178	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1179		ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1180		++i;
1181	}
1182	xt_write_recseq_end(addend);
1183 unlock_up_free:
1184	local_bh_enable();
1185	xt_table_unlock(t);
1186	module_put(t->me);
1187 free:
1188	vfree(paddc);
1189
1190	return ret;
1191}
1192
1193#ifdef CONFIG_COMPAT
1194static inline void compat_release_entry(struct compat_arpt_entry *e)
1195{
1196	struct xt_entry_target *t;
1197
1198	t = compat_arpt_get_target(e);
1199	module_put(t->u.kernel.target->me);
1200}
1201
1202static inline int
1203check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1204				  struct xt_table_info *newinfo,
1205				  unsigned int *size,
1206				  const unsigned char *base,
1207				  const unsigned char *limit,
1208				  const unsigned int *hook_entries,
1209				  const unsigned int *underflows,
1210				  const char *name)
1211{
1212	struct xt_entry_target *t;
1213	struct xt_target *target;
1214	unsigned int entry_offset;
1215	int ret, off, h;
1216
1217	duprintf("check_compat_entry_size_and_hooks %p\n", e);
1218	if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
1219	    (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
1220		duprintf("Bad offset %p, limit = %p\n", e, limit);
1221		return -EINVAL;
1222	}
1223
1224	if (e->next_offset < sizeof(struct compat_arpt_entry) +
1225			     sizeof(struct compat_xt_entry_target)) {
1226		duprintf("checking: element %p size %u\n",
1227			 e, e->next_offset);
1228		return -EINVAL;
1229	}
1230
1231	/* For purposes of check_entry casting the compat entry is fine */
1232	ret = check_entry((struct arpt_entry *)e, name);
1233	if (ret)
1234		return ret;
1235
1236	off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1237	entry_offset = (void *)e - (void *)base;
1238
1239	t = compat_arpt_get_target(e);
1240	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
1241					t->u.user.revision);
1242	if (IS_ERR(target)) {
1243		duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1244			 t->u.user.name);
1245		ret = PTR_ERR(target);
1246		goto out;
1247	}
1248	t->u.kernel.target = target;
1249
1250	off += xt_compat_target_offset(target);
1251	*size += off;
1252	ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
1253	if (ret)
1254		goto release_target;
1255
1256	/* Check hooks & underflows */
1257	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
1258		if ((unsigned char *)e - base == hook_entries[h])
1259			newinfo->hook_entry[h] = hook_entries[h];
1260		if ((unsigned char *)e - base == underflows[h])
1261			newinfo->underflow[h] = underflows[h];
1262	}
1263
1264	/* Clear counters and comefrom */
1265	memset(&e->counters, 0, sizeof(e->counters));
1266	e->comefrom = 0;
1267	return 0;
1268
1269release_target:
1270	module_put(t->u.kernel.target->me);
1271out:
1272	return ret;
1273}
1274
1275static int
1276compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
1277			    unsigned int *size, const char *name,
1278			    struct xt_table_info *newinfo, unsigned char *base)
1279{
1280	struct xt_entry_target *t;
1281	struct xt_target *target;
1282	struct arpt_entry *de;
1283	unsigned int origsize;
1284	int ret, h;
1285
1286	ret = 0;
1287	origsize = *size;
1288	de = (struct arpt_entry *)*dstptr;
1289	memcpy(de, e, sizeof(struct arpt_entry));
1290	memcpy(&de->counters, &e->counters, sizeof(e->counters));
1291
1292	*dstptr += sizeof(struct arpt_entry);
1293	*size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1294
1295	de->target_offset = e->target_offset - (origsize - *size);
1296	t = compat_arpt_get_target(e);
1297	target = t->u.kernel.target;
1298	xt_compat_target_from_user(t, dstptr, size);
1299
1300	de->next_offset = e->next_offset - (origsize - *size);
1301	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
1302		if ((unsigned char *)de - base < newinfo->hook_entry[h])
1303			newinfo->hook_entry[h] -= origsize - *size;
1304		if ((unsigned char *)de - base < newinfo->underflow[h])
1305			newinfo->underflow[h] -= origsize - *size;
1306	}
1307	return ret;
1308}
1309
1310static int translate_compat_table(const char *name,
1311				  unsigned int valid_hooks,
1312				  struct xt_table_info **pinfo,
1313				  void **pentry0,
1314				  unsigned int total_size,
1315				  unsigned int number,
1316				  unsigned int *hook_entries,
1317				  unsigned int *underflows)
1318{
1319	unsigned int i, j;
1320	struct xt_table_info *newinfo, *info;
1321	void *pos, *entry0, *entry1;
1322	struct compat_arpt_entry *iter0;
1323	struct arpt_entry *iter1;
1324	unsigned int size;
1325	int ret = 0;
1326
1327	info = *pinfo;
1328	entry0 = *pentry0;
1329	size = total_size;
1330	info->number = number;
1331
1332	/* Init all hooks to impossible value. */
1333	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1334		info->hook_entry[i] = 0xFFFFFFFF;
1335		info->underflow[i] = 0xFFFFFFFF;
1336	}
1337
1338	duprintf("translate_compat_table: size %u\n", info->size);
1339	j = 0;
1340	xt_compat_lock(NFPROTO_ARP);
1341	xt_compat_init_offsets(NFPROTO_ARP, number);
1342	/* Walk through entries, checking offsets. */
1343	xt_entry_foreach(iter0, entry0, total_size) {
1344		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1345							entry0,
1346							entry0 + total_size,
1347							hook_entries,
1348							underflows,
1349							name);
1350		if (ret != 0)
1351			goto out_unlock;
1352		++j;
1353	}
1354
1355	ret = -EINVAL;
1356	if (j != number) {
1357		duprintf("translate_compat_table: %u not %u entries\n",
1358			 j, number);
1359		goto out_unlock;
1360	}
1361
1362	/* Check hooks all assigned */
1363	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1364		/* Only hooks which are valid */
1365		if (!(valid_hooks & (1 << i)))
1366			continue;
1367		if (info->hook_entry[i] == 0xFFFFFFFF) {
1368			duprintf("Invalid hook entry %u %u\n",
1369				 i, hook_entries[i]);
1370			goto out_unlock;
1371		}
1372		if (info->underflow[i] == 0xFFFFFFFF) {
1373			duprintf("Invalid underflow %u %u\n",
1374				 i, underflows[i]);
1375			goto out_unlock;
1376		}
1377	}
1378
1379	ret = -ENOMEM;
1380	newinfo = xt_alloc_table_info(size);
1381	if (!newinfo)
1382		goto out_unlock;
1383
1384	newinfo->number = number;
1385	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1386		newinfo->hook_entry[i] = info->hook_entry[i];
1387		newinfo->underflow[i] = info->underflow[i];
1388	}
1389	entry1 = newinfo->entries[raw_smp_processor_id()];
1390	pos = entry1;
1391	size = total_size;
1392	xt_entry_foreach(iter0, entry0, total_size) {
1393		ret = compat_copy_entry_from_user(iter0, &pos, &size,
1394						  name, newinfo, entry1);
1395		if (ret != 0)
1396			break;
1397	}
1398	xt_compat_flush_offsets(NFPROTO_ARP);
1399	xt_compat_unlock(NFPROTO_ARP);
1400	if (ret)
1401		goto free_newinfo;
1402
1403	ret = -ELOOP;
1404	if (!mark_source_chains(newinfo, valid_hooks, entry1))
1405		goto free_newinfo;
1406
1407	i = 0;
1408	xt_entry_foreach(iter1, entry1, newinfo->size) {
1409		ret = check_target(iter1, name);
1410		if (ret != 0)
1411			break;
1412		++i;
1413		if (strcmp(arpt_get_target(iter1)->u.user.name,
1414		    XT_ERROR_TARGET) == 0)
1415			++newinfo->stacksize;
1416	}
1417	if (ret) {
1418		/*
1419		 * The first i matches need cleanup_entry (calls ->destroy)
1420		 * because they had called ->check already. The other j-i
1421		 * entries need only release.
1422		 */
1423		int skip = i;
1424		j -= i;
1425		xt_entry_foreach(iter0, entry0, newinfo->size) {
1426			if (skip-- > 0)
1427				continue;
1428			if (j-- == 0)
1429				break;
1430			compat_release_entry(iter0);
1431		}
1432		xt_entry_foreach(iter1, entry1, newinfo->size) {
1433			if (i-- == 0)
1434				break;
1435			cleanup_entry(iter1);
1436		}
1437		xt_free_table_info(newinfo);
1438		return ret;
1439	}
1440
1441	/* And one copy for every other CPU */
1442	for_each_possible_cpu(i)
1443		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1444			memcpy(newinfo->entries[i], entry1, newinfo->size);
1445
1446	*pinfo = newinfo;
1447	*pentry0 = entry1;
1448	xt_free_table_info(info);
1449	return 0;
1450
1451free_newinfo:
1452	xt_free_table_info(newinfo);
1453out:
1454	xt_entry_foreach(iter0, entry0, total_size) {
1455		if (j-- == 0)
1456			break;
1457		compat_release_entry(iter0);
1458	}
1459	return ret;
1460out_unlock:
1461	xt_compat_flush_offsets(NFPROTO_ARP);
1462	xt_compat_unlock(NFPROTO_ARP);
1463	goto out;
1464}
1465
1466struct compat_arpt_replace {
1467	char				name[XT_TABLE_MAXNAMELEN];
1468	u32				valid_hooks;
1469	u32				num_entries;
1470	u32				size;
1471	u32				hook_entry[NF_ARP_NUMHOOKS];
1472	u32				underflow[NF_ARP_NUMHOOKS];
1473	u32				num_counters;
1474	compat_uptr_t			counters;
1475	struct compat_arpt_entry	entries[0];
1476};
1477
1478static int compat_do_replace(struct net *net, void __user *user,
1479			     unsigned int len)
1480{
1481	int ret;
1482	struct compat_arpt_replace tmp;
1483	struct xt_table_info *newinfo;
1484	void *loc_cpu_entry;
1485	struct arpt_entry *iter;
1486
1487	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1488		return -EFAULT;
1489
1490	/* overflow check */
1491	if (tmp.size >= INT_MAX / num_possible_cpus())
1492		return -ENOMEM;
1493	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1494		return -ENOMEM;
1495	tmp.name[sizeof(tmp.name)-1] = 0;
1496
1497	newinfo = xt_alloc_table_info(tmp.size);
1498	if (!newinfo)
1499		return -ENOMEM;
1500
1501	/* choose the copy that is on our node/cpu */
1502	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1503	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
1504		ret = -EFAULT;
1505		goto free_newinfo;
1506	}
1507
1508	ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1509				     &newinfo, &loc_cpu_entry, tmp.size,
1510				     tmp.num_entries, tmp.hook_entry,
1511				     tmp.underflow);
1512	if (ret != 0)
1513		goto free_newinfo;
1514
1515	duprintf("compat_do_replace: Translated table\n");
1516
1517	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1518			   tmp.num_counters, compat_ptr(tmp.counters));
1519	if (ret)
1520		goto free_newinfo_untrans;
1521	return 0;
1522
1523 free_newinfo_untrans:
1524	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1525		cleanup_entry(iter);
1526 free_newinfo:
1527	xt_free_table_info(newinfo);
1528	return ret;
1529}
1530
1531static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1532				  unsigned int len)
1533{
1534	int ret;
1535
1536	if (!capable(CAP_NET_ADMIN))
1537		return -EPERM;
1538
1539	switch (cmd) {
1540	case ARPT_SO_SET_REPLACE:
1541		ret = compat_do_replace(sock_net(sk), user, len);
1542		break;
1543
1544	case ARPT_SO_SET_ADD_COUNTERS:
1545		ret = do_add_counters(sock_net(sk), user, len, 1);
1546		break;
1547
1548	default:
1549		duprintf("do_arpt_set_ctl:  unknown request %i\n", cmd);
1550		ret = -EINVAL;
1551	}
1552
1553	return ret;
1554}
1555
1556static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1557				     compat_uint_t *size,
1558				     struct xt_counters *counters,
1559				     unsigned int i)
1560{
1561	struct xt_entry_target *t;
1562	struct compat_arpt_entry __user *ce;
1563	u_int16_t target_offset, next_offset;
1564	compat_uint_t origsize;
1565	int ret;
1566
1567	origsize = *size;
1568	ce = (struct compat_arpt_entry __user *)*dstptr;
1569	if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
1570	    copy_to_user(&ce->counters, &counters[i],
1571	    sizeof(counters[i])) != 0)
1572		return -EFAULT;
1573
1574	*dstptr += sizeof(struct compat_arpt_entry);
1575	*size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1576
1577	target_offset = e->target_offset - (origsize - *size);
1578
1579	t = arpt_get_target(e);
1580	ret = xt_compat_target_to_user(t, dstptr, size);
1581	if (ret)
1582		return ret;
1583	next_offset = e->next_offset - (origsize - *size);
1584	if (put_user(target_offset, &ce->target_offset) != 0 ||
1585	    put_user(next_offset, &ce->next_offset) != 0)
1586		return -EFAULT;
1587	return 0;
1588}
1589
1590static int compat_copy_entries_to_user(unsigned int total_size,
1591				       struct xt_table *table,
1592				       void __user *userptr)
1593{
1594	struct xt_counters *counters;
1595	const struct xt_table_info *private = table->private;
1596	void __user *pos;
1597	unsigned int size;
1598	int ret = 0;
1599	void *loc_cpu_entry;
1600	unsigned int i = 0;
1601	struct arpt_entry *iter;
1602
1603	counters = alloc_counters(table);
1604	if (IS_ERR(counters))
1605		return PTR_ERR(counters);
1606
1607	/* choose the copy on our node/cpu */
1608	loc_cpu_entry = private->entries[raw_smp_processor_id()];
1609	pos = userptr;
1610	size = total_size;
1611	xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1612		ret = compat_copy_entry_to_user(iter, &pos,
1613						&size, counters, i++);
1614		if (ret != 0)
1615			break;
1616	}
1617	vfree(counters);
1618	return ret;
1619}
1620
1621struct compat_arpt_get_entries {
1622	char name[XT_TABLE_MAXNAMELEN];
1623	compat_uint_t size;
1624	struct compat_arpt_entry entrytable[0];
1625};
1626
1627static int compat_get_entries(struct net *net,
1628			      struct compat_arpt_get_entries __user *uptr,
1629			      int *len)
1630{
1631	int ret;
1632	struct compat_arpt_get_entries get;
1633	struct xt_table *t;
1634
1635	if (*len < sizeof(get)) {
1636		duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1637		return -EINVAL;
1638	}
1639	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1640		return -EFAULT;
1641	if (*len != sizeof(struct compat_arpt_get_entries) + get.size) {
1642		duprintf("compat_get_entries: %u != %zu\n",
1643			 *len, sizeof(get) + get.size);
1644		return -EINVAL;
1645	}
1646
1647	xt_compat_lock(NFPROTO_ARP);
1648	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
1649	if (t && !IS_ERR(t)) {
1650		const struct xt_table_info *private = t->private;
1651		struct xt_table_info info;
1652
1653		duprintf("t->private->number = %u\n", private->number);
1654		ret = compat_table_info(private, &info);
1655		if (!ret && get.size == info.size) {
1656			ret = compat_copy_entries_to_user(private->size,
1657							  t, uptr->entrytable);
1658		} else if (!ret) {
1659			duprintf("compat_get_entries: I've got %u not %u!\n",
1660				 private->size, get.size);
1661			ret = -EAGAIN;
1662		}
1663		xt_compat_flush_offsets(NFPROTO_ARP);
1664		module_put(t->me);
1665		xt_table_unlock(t);
1666	} else
1667		ret = t ? PTR_ERR(t) : -ENOENT;
1668
1669	xt_compat_unlock(NFPROTO_ARP);
1670	return ret;
1671}
1672
1673static int do_arpt_get_ctl(struct sock *, int, void __user *, int *);
1674
1675static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
1676				  int *len)
1677{
1678	int ret;
1679
1680	if (!capable(CAP_NET_ADMIN))
1681		return -EPERM;
1682
1683	switch (cmd) {
1684	case ARPT_SO_GET_INFO:
1685		ret = get_info(sock_net(sk), user, len, 1);
1686		break;
1687	case ARPT_SO_GET_ENTRIES:
1688		ret = compat_get_entries(sock_net(sk), user, len);
1689		break;
1690	default:
1691		ret = do_arpt_get_ctl(sk, cmd, user, len);
1692	}
1693	return ret;
1694}
1695#endif
1696
1697static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1698{
1699	int ret;
1700
1701	if (!capable(CAP_NET_ADMIN))
1702		return -EPERM;
1703
1704	switch (cmd) {
1705	case ARPT_SO_SET_REPLACE:
1706		ret = do_replace(sock_net(sk), user, len);
1707		break;
1708
1709	case ARPT_SO_SET_ADD_COUNTERS:
1710		ret = do_add_counters(sock_net(sk), user, len, 0);
1711		break;
1712
1713	default:
1714		duprintf("do_arpt_set_ctl:  unknown request %i\n", cmd);
1715		ret = -EINVAL;
1716	}
1717
1718	return ret;
1719}
1720
1721static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1722{
1723	int ret;
1724
1725	if (!capable(CAP_NET_ADMIN))
1726		return -EPERM;
1727
1728	switch (cmd) {
1729	case ARPT_SO_GET_INFO:
1730		ret = get_info(sock_net(sk), user, len, 0);
1731		break;
1732
1733	case ARPT_SO_GET_ENTRIES:
1734		ret = get_entries(sock_net(sk), user, len);
1735		break;
1736
1737	case ARPT_SO_GET_REVISION_TARGET: {
1738		struct xt_get_revision rev;
1739
1740		if (*len != sizeof(rev)) {
1741			ret = -EINVAL;
1742			break;
1743		}
1744		if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1745			ret = -EFAULT;
1746			break;
1747		}
1748		rev.name[sizeof(rev.name)-1] = 0;
1749
1750		try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
1751							 rev.revision, 1, &ret),
1752					"arpt_%s", rev.name);
1753		break;
1754	}
1755
1756	default:
1757		duprintf("do_arpt_get_ctl: unknown request %i\n", cmd);
1758		ret = -EINVAL;
1759	}
1760
1761	return ret;
1762}
1763
1764struct xt_table *arpt_register_table(struct net *net,
1765				     const struct xt_table *table,
1766				     const struct arpt_replace *repl)
1767{
1768	int ret;
1769	struct xt_table_info *newinfo;
1770	struct xt_table_info bootstrap = {0};
1771	void *loc_cpu_entry;
1772	struct xt_table *new_table;
1773
1774	newinfo = xt_alloc_table_info(repl->size);
1775	if (!newinfo) {
1776		ret = -ENOMEM;
1777		goto out;
1778	}
1779
1780	/* choose the copy on our node/cpu */
1781	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1782	memcpy(loc_cpu_entry, repl->entries, repl->size);
1783
1784	ret = translate_table(newinfo, loc_cpu_entry, repl);
1785	duprintf("arpt_register_table: translate table gives %d\n", ret);
1786	if (ret != 0)
1787		goto out_free;
1788
1789	new_table = xt_register_table(net, table, &bootstrap, newinfo);
1790	if (IS_ERR(new_table)) {
1791		ret = PTR_ERR(new_table);
1792		goto out_free;
1793	}
1794	return new_table;
1795
1796out_free:
1797	xt_free_table_info(newinfo);
1798out:
1799	return ERR_PTR(ret);
1800}
1801
1802void arpt_unregister_table(struct xt_table *table)
1803{
1804	struct xt_table_info *private;
1805	void *loc_cpu_entry;
1806	struct module *table_owner = table->me;
1807	struct arpt_entry *iter;
1808
1809	private = xt_unregister_table(table);
1810
1811	/* Decrease module usage counts and free resources */
1812	loc_cpu_entry = private->entries[raw_smp_processor_id()];
1813	xt_entry_foreach(iter, loc_cpu_entry, private->size)
1814		cleanup_entry(iter);
1815	if (private->number > private->initial_entries)
1816		module_put(table_owner);
1817	xt_free_table_info(private);
1818}
1819
1820/* The built-in targets: standard (NULL) and error. */
1821static struct xt_target arpt_builtin_tg[] __read_mostly = {
1822	{
1823		.name             = XT_STANDARD_TARGET,
1824		.targetsize       = sizeof(int),
1825		.family           = NFPROTO_ARP,
1826#ifdef CONFIG_COMPAT
1827		.compatsize       = sizeof(compat_int_t),
1828		.compat_from_user = compat_standard_from_user,
1829		.compat_to_user   = compat_standard_to_user,
1830#endif
1831	},
1832	{
1833		.name             = XT_ERROR_TARGET,
1834		.target           = arpt_error,
1835		.targetsize       = XT_FUNCTION_MAXNAMELEN,
1836		.family           = NFPROTO_ARP,
1837	},
1838};
1839
1840static struct nf_sockopt_ops arpt_sockopts = {
1841	.pf		= PF_INET,
1842	.set_optmin	= ARPT_BASE_CTL,
1843	.set_optmax	= ARPT_SO_SET_MAX+1,
1844	.set		= do_arpt_set_ctl,
1845#ifdef CONFIG_COMPAT
1846	.compat_set	= compat_do_arpt_set_ctl,
1847#endif
1848	.get_optmin	= ARPT_BASE_CTL,
1849	.get_optmax	= ARPT_SO_GET_MAX+1,
1850	.get		= do_arpt_get_ctl,
1851#ifdef CONFIG_COMPAT
1852	.compat_get	= compat_do_arpt_get_ctl,
1853#endif
1854	.owner		= THIS_MODULE,
1855};
1856
1857static int __net_init arp_tables_net_init(struct net *net)
1858{
1859	return xt_proto_init(net, NFPROTO_ARP);
1860}
1861
1862static void __net_exit arp_tables_net_exit(struct net *net)
1863{
1864	xt_proto_fini(net, NFPROTO_ARP);
1865}
1866
1867static struct pernet_operations arp_tables_net_ops = {
1868	.init = arp_tables_net_init,
1869	.exit = arp_tables_net_exit,
1870};
1871
1872static int __init arp_tables_init(void)
1873{
1874	int ret;
1875
1876	ret = register_pernet_subsys(&arp_tables_net_ops);
1877	if (ret < 0)
1878		goto err1;
1879
1880	/* No one else will be downing sem now, so we won't sleep */
1881	ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
1882	if (ret < 0)
1883		goto err2;
1884
1885	/* Register setsockopt */
1886	ret = nf_register_sockopt(&arpt_sockopts);
1887	if (ret < 0)
1888		goto err4;
1889
1890	printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n");
1891	return 0;
1892
1893err4:
1894	xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
1895err2:
1896	unregister_pernet_subsys(&arp_tables_net_ops);
1897err1:
1898	return ret;
1899}
1900
1901static void __exit arp_tables_fini(void)
1902{
1903	nf_unregister_sockopt(&arpt_sockopts);
1904	xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
1905	unregister_pernet_subsys(&arp_tables_net_ops);
1906}
1907
1908EXPORT_SYMBOL(arpt_register_table);
1909EXPORT_SYMBOL(arpt_unregister_table);
1910EXPORT_SYMBOL(arpt_do_table);
1911
1912module_init(arp_tables_init);
1913module_exit(arp_tables_fini);