Loading...
1/*
2 * x_tables core - Backend for {ip,ip6,arp}_tables
3 *
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 *
6 * Based on existing ip_tables code which is
7 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/socket.h>
19#include <linux/net.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
22#include <linux/string.h>
23#include <linux/vmalloc.h>
24#include <linux/mutex.h>
25#include <linux/mm.h>
26#include <linux/slab.h>
27#include <linux/audit.h>
28#include <net/net_namespace.h>
29
30#include <linux/netfilter/x_tables.h>
31#include <linux/netfilter_arp.h>
32#include <linux/netfilter_ipv4/ip_tables.h>
33#include <linux/netfilter_ipv6/ip6_tables.h>
34#include <linux/netfilter_arp/arp_tables.h>
35
36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
38MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
39
40#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
41
42struct compat_delta {
43 unsigned int offset; /* offset in kernel */
44 int delta; /* delta in 32bit user land */
45};
46
47struct xt_af {
48 struct mutex mutex;
49 struct list_head match;
50 struct list_head target;
51#ifdef CONFIG_COMPAT
52 struct mutex compat_mutex;
53 struct compat_delta *compat_tab;
54 unsigned int number; /* number of slots in compat_tab[] */
55 unsigned int cur; /* number of used slots in compat_tab[] */
56#endif
57};
58
59static struct xt_af *xt;
60
61static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
62 [NFPROTO_UNSPEC] = "x",
63 [NFPROTO_IPV4] = "ip",
64 [NFPROTO_ARP] = "arp",
65 [NFPROTO_BRIDGE] = "eb",
66 [NFPROTO_IPV6] = "ip6",
67};
68
69/* Allow this many total (re)entries. */
70static const unsigned int xt_jumpstack_multiplier = 2;
71
72/* Registration hooks for targets. */
73int
74xt_register_target(struct xt_target *target)
75{
76 u_int8_t af = target->family;
77 int ret;
78
79 ret = mutex_lock_interruptible(&xt[af].mutex);
80 if (ret != 0)
81 return ret;
82 list_add(&target->list, &xt[af].target);
83 mutex_unlock(&xt[af].mutex);
84 return ret;
85}
86EXPORT_SYMBOL(xt_register_target);
87
88void
89xt_unregister_target(struct xt_target *target)
90{
91 u_int8_t af = target->family;
92
93 mutex_lock(&xt[af].mutex);
94 list_del(&target->list);
95 mutex_unlock(&xt[af].mutex);
96}
97EXPORT_SYMBOL(xt_unregister_target);
98
99int
100xt_register_targets(struct xt_target *target, unsigned int n)
101{
102 unsigned int i;
103 int err = 0;
104
105 for (i = 0; i < n; i++) {
106 err = xt_register_target(&target[i]);
107 if (err)
108 goto err;
109 }
110 return err;
111
112err:
113 if (i > 0)
114 xt_unregister_targets(target, i);
115 return err;
116}
117EXPORT_SYMBOL(xt_register_targets);
118
119void
120xt_unregister_targets(struct xt_target *target, unsigned int n)
121{
122 while (n-- > 0)
123 xt_unregister_target(&target[n]);
124}
125EXPORT_SYMBOL(xt_unregister_targets);
126
127int
128xt_register_match(struct xt_match *match)
129{
130 u_int8_t af = match->family;
131 int ret;
132
133 ret = mutex_lock_interruptible(&xt[af].mutex);
134 if (ret != 0)
135 return ret;
136
137 list_add(&match->list, &xt[af].match);
138 mutex_unlock(&xt[af].mutex);
139
140 return ret;
141}
142EXPORT_SYMBOL(xt_register_match);
143
144void
145xt_unregister_match(struct xt_match *match)
146{
147 u_int8_t af = match->family;
148
149 mutex_lock(&xt[af].mutex);
150 list_del(&match->list);
151 mutex_unlock(&xt[af].mutex);
152}
153EXPORT_SYMBOL(xt_unregister_match);
154
155int
156xt_register_matches(struct xt_match *match, unsigned int n)
157{
158 unsigned int i;
159 int err = 0;
160
161 for (i = 0; i < n; i++) {
162 err = xt_register_match(&match[i]);
163 if (err)
164 goto err;
165 }
166 return err;
167
168err:
169 if (i > 0)
170 xt_unregister_matches(match, i);
171 return err;
172}
173EXPORT_SYMBOL(xt_register_matches);
174
175void
176xt_unregister_matches(struct xt_match *match, unsigned int n)
177{
178 while (n-- > 0)
179 xt_unregister_match(&match[n]);
180}
181EXPORT_SYMBOL(xt_unregister_matches);
182
183
184/*
185 * These are weird, but module loading must not be done with mutex
186 * held (since they will register), and we have to have a single
187 * function to use.
188 */
189
190/* Find match, grabs ref. Returns ERR_PTR() on error. */
191struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
192{
193 struct xt_match *m;
194 int err = -ENOENT;
195
196 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
197 return ERR_PTR(-EINTR);
198
199 list_for_each_entry(m, &xt[af].match, list) {
200 if (strcmp(m->name, name) == 0) {
201 if (m->revision == revision) {
202 if (try_module_get(m->me)) {
203 mutex_unlock(&xt[af].mutex);
204 return m;
205 }
206 } else
207 err = -EPROTOTYPE; /* Found something. */
208 }
209 }
210 mutex_unlock(&xt[af].mutex);
211
212 if (af != NFPROTO_UNSPEC)
213 /* Try searching again in the family-independent list */
214 return xt_find_match(NFPROTO_UNSPEC, name, revision);
215
216 return ERR_PTR(err);
217}
218EXPORT_SYMBOL(xt_find_match);
219
220struct xt_match *
221xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
222{
223 struct xt_match *match;
224
225 match = xt_find_match(nfproto, name, revision);
226 if (IS_ERR(match)) {
227 request_module("%st_%s", xt_prefix[nfproto], name);
228 match = xt_find_match(nfproto, name, revision);
229 }
230
231 return match;
232}
233EXPORT_SYMBOL_GPL(xt_request_find_match);
234
235/* Find target, grabs ref. Returns ERR_PTR() on error. */
236struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
237{
238 struct xt_target *t;
239 int err = -ENOENT;
240
241 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
242 return ERR_PTR(-EINTR);
243
244 list_for_each_entry(t, &xt[af].target, list) {
245 if (strcmp(t->name, name) == 0) {
246 if (t->revision == revision) {
247 if (try_module_get(t->me)) {
248 mutex_unlock(&xt[af].mutex);
249 return t;
250 }
251 } else
252 err = -EPROTOTYPE; /* Found something. */
253 }
254 }
255 mutex_unlock(&xt[af].mutex);
256
257 if (af != NFPROTO_UNSPEC)
258 /* Try searching again in the family-independent list */
259 return xt_find_target(NFPROTO_UNSPEC, name, revision);
260
261 return ERR_PTR(err);
262}
263EXPORT_SYMBOL(xt_find_target);
264
265struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
266{
267 struct xt_target *target;
268
269 target = xt_find_target(af, name, revision);
270 if (IS_ERR(target)) {
271 request_module("%st_%s", xt_prefix[af], name);
272 target = xt_find_target(af, name, revision);
273 }
274
275 return target;
276}
277EXPORT_SYMBOL_GPL(xt_request_find_target);
278
279static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
280{
281 const struct xt_match *m;
282 int have_rev = 0;
283
284 list_for_each_entry(m, &xt[af].match, list) {
285 if (strcmp(m->name, name) == 0) {
286 if (m->revision > *bestp)
287 *bestp = m->revision;
288 if (m->revision == revision)
289 have_rev = 1;
290 }
291 }
292
293 if (af != NFPROTO_UNSPEC && !have_rev)
294 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
295
296 return have_rev;
297}
298
299static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
300{
301 const struct xt_target *t;
302 int have_rev = 0;
303
304 list_for_each_entry(t, &xt[af].target, list) {
305 if (strcmp(t->name, name) == 0) {
306 if (t->revision > *bestp)
307 *bestp = t->revision;
308 if (t->revision == revision)
309 have_rev = 1;
310 }
311 }
312
313 if (af != NFPROTO_UNSPEC && !have_rev)
314 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
315
316 return have_rev;
317}
318
319/* Returns true or false (if no such extension at all) */
320int xt_find_revision(u8 af, const char *name, u8 revision, int target,
321 int *err)
322{
323 int have_rev, best = -1;
324
325 if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
326 *err = -EINTR;
327 return 1;
328 }
329 if (target == 1)
330 have_rev = target_revfn(af, name, revision, &best);
331 else
332 have_rev = match_revfn(af, name, revision, &best);
333 mutex_unlock(&xt[af].mutex);
334
335 /* Nothing at all? Return 0 to try loading module. */
336 if (best == -1) {
337 *err = -ENOENT;
338 return 0;
339 }
340
341 *err = best;
342 if (!have_rev)
343 *err = -EPROTONOSUPPORT;
344 return 1;
345}
346EXPORT_SYMBOL_GPL(xt_find_revision);
347
348static char *textify_hooks(char *buf, size_t size, unsigned int mask)
349{
350 static const char *const names[] = {
351 "PREROUTING", "INPUT", "FORWARD",
352 "OUTPUT", "POSTROUTING", "BROUTING",
353 };
354 unsigned int i;
355 char *p = buf;
356 bool np = false;
357 int res;
358
359 *p = '\0';
360 for (i = 0; i < ARRAY_SIZE(names); ++i) {
361 if (!(mask & (1 << i)))
362 continue;
363 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
364 if (res > 0) {
365 size -= res;
366 p += res;
367 }
368 np = true;
369 }
370
371 return buf;
372}
373
374int xt_check_match(struct xt_mtchk_param *par,
375 unsigned int size, u_int8_t proto, bool inv_proto)
376{
377 int ret;
378
379 if (XT_ALIGN(par->match->matchsize) != size &&
380 par->match->matchsize != -1) {
381 /*
382 * ebt_among is exempt from centralized matchsize checking
383 * because it uses a dynamic-size data set.
384 */
385 pr_err("%s_tables: %s.%u match: invalid size "
386 "%u (kernel) != (user) %u\n",
387 xt_prefix[par->family], par->match->name,
388 par->match->revision,
389 XT_ALIGN(par->match->matchsize), size);
390 return -EINVAL;
391 }
392 if (par->match->table != NULL &&
393 strcmp(par->match->table, par->table) != 0) {
394 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
395 xt_prefix[par->family], par->match->name,
396 par->match->table, par->table);
397 return -EINVAL;
398 }
399 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
400 char used[64], allow[64];
401
402 pr_err("%s_tables: %s match: used from hooks %s, but only "
403 "valid from %s\n",
404 xt_prefix[par->family], par->match->name,
405 textify_hooks(used, sizeof(used), par->hook_mask),
406 textify_hooks(allow, sizeof(allow), par->match->hooks));
407 return -EINVAL;
408 }
409 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
410 pr_err("%s_tables: %s match: only valid for protocol %u\n",
411 xt_prefix[par->family], par->match->name,
412 par->match->proto);
413 return -EINVAL;
414 }
415 if (par->match->checkentry != NULL) {
416 ret = par->match->checkentry(par);
417 if (ret < 0)
418 return ret;
419 else if (ret > 0)
420 /* Flag up potential errors. */
421 return -EIO;
422 }
423 return 0;
424}
425EXPORT_SYMBOL_GPL(xt_check_match);
426
427#ifdef CONFIG_COMPAT
428int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
429{
430 struct xt_af *xp = &xt[af];
431
432 if (!xp->compat_tab) {
433 if (!xp->number)
434 return -EINVAL;
435 xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
436 if (!xp->compat_tab)
437 return -ENOMEM;
438 xp->cur = 0;
439 }
440
441 if (xp->cur >= xp->number)
442 return -EINVAL;
443
444 if (xp->cur)
445 delta += xp->compat_tab[xp->cur - 1].delta;
446 xp->compat_tab[xp->cur].offset = offset;
447 xp->compat_tab[xp->cur].delta = delta;
448 xp->cur++;
449 return 0;
450}
451EXPORT_SYMBOL_GPL(xt_compat_add_offset);
452
453void xt_compat_flush_offsets(u_int8_t af)
454{
455 if (xt[af].compat_tab) {
456 vfree(xt[af].compat_tab);
457 xt[af].compat_tab = NULL;
458 xt[af].number = 0;
459 xt[af].cur = 0;
460 }
461}
462EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
463
464int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
465{
466 struct compat_delta *tmp = xt[af].compat_tab;
467 int mid, left = 0, right = xt[af].cur - 1;
468
469 while (left <= right) {
470 mid = (left + right) >> 1;
471 if (offset > tmp[mid].offset)
472 left = mid + 1;
473 else if (offset < tmp[mid].offset)
474 right = mid - 1;
475 else
476 return mid ? tmp[mid - 1].delta : 0;
477 }
478 return left ? tmp[left - 1].delta : 0;
479}
480EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
481
482void xt_compat_init_offsets(u_int8_t af, unsigned int number)
483{
484 xt[af].number = number;
485 xt[af].cur = 0;
486}
487EXPORT_SYMBOL(xt_compat_init_offsets);
488
489int xt_compat_match_offset(const struct xt_match *match)
490{
491 u_int16_t csize = match->compatsize ? : match->matchsize;
492 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
493}
494EXPORT_SYMBOL_GPL(xt_compat_match_offset);
495
496int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
497 unsigned int *size)
498{
499 const struct xt_match *match = m->u.kernel.match;
500 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
501 int pad, off = xt_compat_match_offset(match);
502 u_int16_t msize = cm->u.user.match_size;
503
504 m = *dstptr;
505 memcpy(m, cm, sizeof(*cm));
506 if (match->compat_from_user)
507 match->compat_from_user(m->data, cm->data);
508 else
509 memcpy(m->data, cm->data, msize - sizeof(*cm));
510 pad = XT_ALIGN(match->matchsize) - match->matchsize;
511 if (pad > 0)
512 memset(m->data + match->matchsize, 0, pad);
513
514 msize += off;
515 m->u.user.match_size = msize;
516
517 *size += off;
518 *dstptr += msize;
519 return 0;
520}
521EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
522
523int xt_compat_match_to_user(const struct xt_entry_match *m,
524 void __user **dstptr, unsigned int *size)
525{
526 const struct xt_match *match = m->u.kernel.match;
527 struct compat_xt_entry_match __user *cm = *dstptr;
528 int off = xt_compat_match_offset(match);
529 u_int16_t msize = m->u.user.match_size - off;
530
531 if (copy_to_user(cm, m, sizeof(*cm)) ||
532 put_user(msize, &cm->u.user.match_size) ||
533 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
534 strlen(m->u.kernel.match->name) + 1))
535 return -EFAULT;
536
537 if (match->compat_to_user) {
538 if (match->compat_to_user((void __user *)cm->data, m->data))
539 return -EFAULT;
540 } else {
541 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
542 return -EFAULT;
543 }
544
545 *size -= off;
546 *dstptr += msize;
547 return 0;
548}
549EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
550#endif /* CONFIG_COMPAT */
551
552int xt_check_target(struct xt_tgchk_param *par,
553 unsigned int size, u_int8_t proto, bool inv_proto)
554{
555 int ret;
556
557 if (XT_ALIGN(par->target->targetsize) != size) {
558 pr_err("%s_tables: %s.%u target: invalid size "
559 "%u (kernel) != (user) %u\n",
560 xt_prefix[par->family], par->target->name,
561 par->target->revision,
562 XT_ALIGN(par->target->targetsize), size);
563 return -EINVAL;
564 }
565 if (par->target->table != NULL &&
566 strcmp(par->target->table, par->table) != 0) {
567 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
568 xt_prefix[par->family], par->target->name,
569 par->target->table, par->table);
570 return -EINVAL;
571 }
572 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
573 char used[64], allow[64];
574
575 pr_err("%s_tables: %s target: used from hooks %s, but only "
576 "usable from %s\n",
577 xt_prefix[par->family], par->target->name,
578 textify_hooks(used, sizeof(used), par->hook_mask),
579 textify_hooks(allow, sizeof(allow), par->target->hooks));
580 return -EINVAL;
581 }
582 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
583 pr_err("%s_tables: %s target: only valid for protocol %u\n",
584 xt_prefix[par->family], par->target->name,
585 par->target->proto);
586 return -EINVAL;
587 }
588 if (par->target->checkentry != NULL) {
589 ret = par->target->checkentry(par);
590 if (ret < 0)
591 return ret;
592 else if (ret > 0)
593 /* Flag up potential errors. */
594 return -EIO;
595 }
596 return 0;
597}
598EXPORT_SYMBOL_GPL(xt_check_target);
599
600#ifdef CONFIG_COMPAT
601int xt_compat_target_offset(const struct xt_target *target)
602{
603 u_int16_t csize = target->compatsize ? : target->targetsize;
604 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
605}
606EXPORT_SYMBOL_GPL(xt_compat_target_offset);
607
608void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
609 unsigned int *size)
610{
611 const struct xt_target *target = t->u.kernel.target;
612 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
613 int pad, off = xt_compat_target_offset(target);
614 u_int16_t tsize = ct->u.user.target_size;
615
616 t = *dstptr;
617 memcpy(t, ct, sizeof(*ct));
618 if (target->compat_from_user)
619 target->compat_from_user(t->data, ct->data);
620 else
621 memcpy(t->data, ct->data, tsize - sizeof(*ct));
622 pad = XT_ALIGN(target->targetsize) - target->targetsize;
623 if (pad > 0)
624 memset(t->data + target->targetsize, 0, pad);
625
626 tsize += off;
627 t->u.user.target_size = tsize;
628
629 *size += off;
630 *dstptr += tsize;
631}
632EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
633
634int xt_compat_target_to_user(const struct xt_entry_target *t,
635 void __user **dstptr, unsigned int *size)
636{
637 const struct xt_target *target = t->u.kernel.target;
638 struct compat_xt_entry_target __user *ct = *dstptr;
639 int off = xt_compat_target_offset(target);
640 u_int16_t tsize = t->u.user.target_size - off;
641
642 if (copy_to_user(ct, t, sizeof(*ct)) ||
643 put_user(tsize, &ct->u.user.target_size) ||
644 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
645 strlen(t->u.kernel.target->name) + 1))
646 return -EFAULT;
647
648 if (target->compat_to_user) {
649 if (target->compat_to_user((void __user *)ct->data, t->data))
650 return -EFAULT;
651 } else {
652 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
653 return -EFAULT;
654 }
655
656 *size -= off;
657 *dstptr += tsize;
658 return 0;
659}
660EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
661#endif
662
663struct xt_table_info *xt_alloc_table_info(unsigned int size)
664{
665 struct xt_table_info *newinfo;
666 int cpu;
667
668 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
669 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
670 return NULL;
671
672 newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
673 if (!newinfo)
674 return NULL;
675
676 newinfo->size = size;
677
678 for_each_possible_cpu(cpu) {
679 if (size <= PAGE_SIZE)
680 newinfo->entries[cpu] = kmalloc_node(size,
681 GFP_KERNEL,
682 cpu_to_node(cpu));
683 else
684 newinfo->entries[cpu] = vmalloc_node(size,
685 cpu_to_node(cpu));
686
687 if (newinfo->entries[cpu] == NULL) {
688 xt_free_table_info(newinfo);
689 return NULL;
690 }
691 }
692
693 return newinfo;
694}
695EXPORT_SYMBOL(xt_alloc_table_info);
696
697void xt_free_table_info(struct xt_table_info *info)
698{
699 int cpu;
700
701 for_each_possible_cpu(cpu) {
702 if (info->size <= PAGE_SIZE)
703 kfree(info->entries[cpu]);
704 else
705 vfree(info->entries[cpu]);
706 }
707
708 if (info->jumpstack != NULL) {
709 if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
710 for_each_possible_cpu(cpu)
711 vfree(info->jumpstack[cpu]);
712 } else {
713 for_each_possible_cpu(cpu)
714 kfree(info->jumpstack[cpu]);
715 }
716 }
717
718 if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
719 vfree(info->jumpstack);
720 else
721 kfree(info->jumpstack);
722
723 free_percpu(info->stackptr);
724
725 kfree(info);
726}
727EXPORT_SYMBOL(xt_free_table_info);
728
729/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
730struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
731 const char *name)
732{
733 struct xt_table *t;
734
735 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
736 return ERR_PTR(-EINTR);
737
738 list_for_each_entry(t, &net->xt.tables[af], list)
739 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
740 return t;
741 mutex_unlock(&xt[af].mutex);
742 return NULL;
743}
744EXPORT_SYMBOL_GPL(xt_find_table_lock);
745
746void xt_table_unlock(struct xt_table *table)
747{
748 mutex_unlock(&xt[table->af].mutex);
749}
750EXPORT_SYMBOL_GPL(xt_table_unlock);
751
752#ifdef CONFIG_COMPAT
753void xt_compat_lock(u_int8_t af)
754{
755 mutex_lock(&xt[af].compat_mutex);
756}
757EXPORT_SYMBOL_GPL(xt_compat_lock);
758
759void xt_compat_unlock(u_int8_t af)
760{
761 mutex_unlock(&xt[af].compat_mutex);
762}
763EXPORT_SYMBOL_GPL(xt_compat_unlock);
764#endif
765
766DEFINE_PER_CPU(seqcount_t, xt_recseq);
767EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
768
769static int xt_jumpstack_alloc(struct xt_table_info *i)
770{
771 unsigned int size;
772 int cpu;
773
774 i->stackptr = alloc_percpu(unsigned int);
775 if (i->stackptr == NULL)
776 return -ENOMEM;
777
778 size = sizeof(void **) * nr_cpu_ids;
779 if (size > PAGE_SIZE)
780 i->jumpstack = vzalloc(size);
781 else
782 i->jumpstack = kzalloc(size, GFP_KERNEL);
783 if (i->jumpstack == NULL)
784 return -ENOMEM;
785
786 i->stacksize *= xt_jumpstack_multiplier;
787 size = sizeof(void *) * i->stacksize;
788 for_each_possible_cpu(cpu) {
789 if (size > PAGE_SIZE)
790 i->jumpstack[cpu] = vmalloc_node(size,
791 cpu_to_node(cpu));
792 else
793 i->jumpstack[cpu] = kmalloc_node(size,
794 GFP_KERNEL, cpu_to_node(cpu));
795 if (i->jumpstack[cpu] == NULL)
796 /*
797 * Freeing will be done later on by the callers. The
798 * chain is: xt_replace_table -> __do_replace ->
799 * do_replace -> xt_free_table_info.
800 */
801 return -ENOMEM;
802 }
803
804 return 0;
805}
806
807struct xt_table_info *
808xt_replace_table(struct xt_table *table,
809 unsigned int num_counters,
810 struct xt_table_info *newinfo,
811 int *error)
812{
813 struct xt_table_info *private;
814 int ret;
815
816 ret = xt_jumpstack_alloc(newinfo);
817 if (ret < 0) {
818 *error = ret;
819 return NULL;
820 }
821
822 /* Do the substitution. */
823 local_bh_disable();
824 private = table->private;
825
826 /* Check inside lock: is the old number correct? */
827 if (num_counters != private->number) {
828 pr_debug("num_counters != table->private->number (%u/%u)\n",
829 num_counters, private->number);
830 local_bh_enable();
831 *error = -EAGAIN;
832 return NULL;
833 }
834
835 table->private = newinfo;
836 newinfo->initial_entries = private->initial_entries;
837
838 /*
839 * Even though table entries have now been swapped, other CPU's
840 * may still be using the old entries. This is okay, because
841 * resynchronization happens because of the locking done
842 * during the get_counters() routine.
843 */
844 local_bh_enable();
845
846#ifdef CONFIG_AUDIT
847 if (audit_enabled) {
848 struct audit_buffer *ab;
849
850 ab = audit_log_start(current->audit_context, GFP_KERNEL,
851 AUDIT_NETFILTER_CFG);
852 if (ab) {
853 audit_log_format(ab, "table=%s family=%u entries=%u",
854 table->name, table->af,
855 private->number);
856 audit_log_end(ab);
857 }
858 }
859#endif
860
861 return private;
862}
863EXPORT_SYMBOL_GPL(xt_replace_table);
864
865struct xt_table *xt_register_table(struct net *net,
866 const struct xt_table *input_table,
867 struct xt_table_info *bootstrap,
868 struct xt_table_info *newinfo)
869{
870 int ret;
871 struct xt_table_info *private;
872 struct xt_table *t, *table;
873
874 /* Don't add one object to multiple lists. */
875 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
876 if (!table) {
877 ret = -ENOMEM;
878 goto out;
879 }
880
881 ret = mutex_lock_interruptible(&xt[table->af].mutex);
882 if (ret != 0)
883 goto out_free;
884
885 /* Don't autoload: we'd eat our tail... */
886 list_for_each_entry(t, &net->xt.tables[table->af], list) {
887 if (strcmp(t->name, table->name) == 0) {
888 ret = -EEXIST;
889 goto unlock;
890 }
891 }
892
893 /* Simplifies replace_table code. */
894 table->private = bootstrap;
895
896 if (!xt_replace_table(table, 0, newinfo, &ret))
897 goto unlock;
898
899 private = table->private;
900 pr_debug("table->private->number = %u\n", private->number);
901
902 /* save number of initial entries */
903 private->initial_entries = private->number;
904
905 list_add(&table->list, &net->xt.tables[table->af]);
906 mutex_unlock(&xt[table->af].mutex);
907 return table;
908
909 unlock:
910 mutex_unlock(&xt[table->af].mutex);
911out_free:
912 kfree(table);
913out:
914 return ERR_PTR(ret);
915}
916EXPORT_SYMBOL_GPL(xt_register_table);
917
918void *xt_unregister_table(struct xt_table *table)
919{
920 struct xt_table_info *private;
921
922 mutex_lock(&xt[table->af].mutex);
923 private = table->private;
924 list_del(&table->list);
925 mutex_unlock(&xt[table->af].mutex);
926 kfree(table);
927
928 return private;
929}
930EXPORT_SYMBOL_GPL(xt_unregister_table);
931
932#ifdef CONFIG_PROC_FS
933struct xt_names_priv {
934 struct seq_net_private p;
935 u_int8_t af;
936};
937static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
938{
939 struct xt_names_priv *priv = seq->private;
940 struct net *net = seq_file_net(seq);
941 u_int8_t af = priv->af;
942
943 mutex_lock(&xt[af].mutex);
944 return seq_list_start(&net->xt.tables[af], *pos);
945}
946
947static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
948{
949 struct xt_names_priv *priv = seq->private;
950 struct net *net = seq_file_net(seq);
951 u_int8_t af = priv->af;
952
953 return seq_list_next(v, &net->xt.tables[af], pos);
954}
955
956static void xt_table_seq_stop(struct seq_file *seq, void *v)
957{
958 struct xt_names_priv *priv = seq->private;
959 u_int8_t af = priv->af;
960
961 mutex_unlock(&xt[af].mutex);
962}
963
964static int xt_table_seq_show(struct seq_file *seq, void *v)
965{
966 struct xt_table *table = list_entry(v, struct xt_table, list);
967
968 if (strlen(table->name))
969 return seq_printf(seq, "%s\n", table->name);
970 else
971 return 0;
972}
973
974static const struct seq_operations xt_table_seq_ops = {
975 .start = xt_table_seq_start,
976 .next = xt_table_seq_next,
977 .stop = xt_table_seq_stop,
978 .show = xt_table_seq_show,
979};
980
981static int xt_table_open(struct inode *inode, struct file *file)
982{
983 int ret;
984 struct xt_names_priv *priv;
985
986 ret = seq_open_net(inode, file, &xt_table_seq_ops,
987 sizeof(struct xt_names_priv));
988 if (!ret) {
989 priv = ((struct seq_file *)file->private_data)->private;
990 priv->af = (unsigned long)PDE(inode)->data;
991 }
992 return ret;
993}
994
995static const struct file_operations xt_table_ops = {
996 .owner = THIS_MODULE,
997 .open = xt_table_open,
998 .read = seq_read,
999 .llseek = seq_lseek,
1000 .release = seq_release_net,
1001};
1002
1003/*
1004 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1005 * the multi-AF mutexes.
1006 */
1007struct nf_mttg_trav {
1008 struct list_head *head, *curr;
1009 uint8_t class, nfproto;
1010};
1011
1012enum {
1013 MTTG_TRAV_INIT,
1014 MTTG_TRAV_NFP_UNSPEC,
1015 MTTG_TRAV_NFP_SPEC,
1016 MTTG_TRAV_DONE,
1017};
1018
1019static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1020 bool is_target)
1021{
1022 static const uint8_t next_class[] = {
1023 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1024 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1025 };
1026 struct nf_mttg_trav *trav = seq->private;
1027
1028 switch (trav->class) {
1029 case MTTG_TRAV_INIT:
1030 trav->class = MTTG_TRAV_NFP_UNSPEC;
1031 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1032 trav->head = trav->curr = is_target ?
1033 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1034 break;
1035 case MTTG_TRAV_NFP_UNSPEC:
1036 trav->curr = trav->curr->next;
1037 if (trav->curr != trav->head)
1038 break;
1039 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1040 mutex_lock(&xt[trav->nfproto].mutex);
1041 trav->head = trav->curr = is_target ?
1042 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
1043 trav->class = next_class[trav->class];
1044 break;
1045 case MTTG_TRAV_NFP_SPEC:
1046 trav->curr = trav->curr->next;
1047 if (trav->curr != trav->head)
1048 break;
1049 /* fallthru, _stop will unlock */
1050 default:
1051 return NULL;
1052 }
1053
1054 if (ppos != NULL)
1055 ++*ppos;
1056 return trav;
1057}
1058
1059static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1060 bool is_target)
1061{
1062 struct nf_mttg_trav *trav = seq->private;
1063 unsigned int j;
1064
1065 trav->class = MTTG_TRAV_INIT;
1066 for (j = 0; j < *pos; ++j)
1067 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1068 return NULL;
1069 return trav;
1070}
1071
1072static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1073{
1074 struct nf_mttg_trav *trav = seq->private;
1075
1076 switch (trav->class) {
1077 case MTTG_TRAV_NFP_UNSPEC:
1078 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1079 break;
1080 case MTTG_TRAV_NFP_SPEC:
1081 mutex_unlock(&xt[trav->nfproto].mutex);
1082 break;
1083 }
1084}
1085
1086static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1087{
1088 return xt_mttg_seq_start(seq, pos, false);
1089}
1090
1091static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1092{
1093 return xt_mttg_seq_next(seq, v, ppos, false);
1094}
1095
1096static int xt_match_seq_show(struct seq_file *seq, void *v)
1097{
1098 const struct nf_mttg_trav *trav = seq->private;
1099 const struct xt_match *match;
1100
1101 switch (trav->class) {
1102 case MTTG_TRAV_NFP_UNSPEC:
1103 case MTTG_TRAV_NFP_SPEC:
1104 if (trav->curr == trav->head)
1105 return 0;
1106 match = list_entry(trav->curr, struct xt_match, list);
1107 return (*match->name == '\0') ? 0 :
1108 seq_printf(seq, "%s\n", match->name);
1109 }
1110 return 0;
1111}
1112
1113static const struct seq_operations xt_match_seq_ops = {
1114 .start = xt_match_seq_start,
1115 .next = xt_match_seq_next,
1116 .stop = xt_mttg_seq_stop,
1117 .show = xt_match_seq_show,
1118};
1119
1120static int xt_match_open(struct inode *inode, struct file *file)
1121{
1122 struct seq_file *seq;
1123 struct nf_mttg_trav *trav;
1124 int ret;
1125
1126 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1127 if (trav == NULL)
1128 return -ENOMEM;
1129
1130 ret = seq_open(file, &xt_match_seq_ops);
1131 if (ret < 0) {
1132 kfree(trav);
1133 return ret;
1134 }
1135
1136 seq = file->private_data;
1137 seq->private = trav;
1138 trav->nfproto = (unsigned long)PDE(inode)->data;
1139 return 0;
1140}
1141
1142static const struct file_operations xt_match_ops = {
1143 .owner = THIS_MODULE,
1144 .open = xt_match_open,
1145 .read = seq_read,
1146 .llseek = seq_lseek,
1147 .release = seq_release_private,
1148};
1149
1150static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1151{
1152 return xt_mttg_seq_start(seq, pos, true);
1153}
1154
1155static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1156{
1157 return xt_mttg_seq_next(seq, v, ppos, true);
1158}
1159
1160static int xt_target_seq_show(struct seq_file *seq, void *v)
1161{
1162 const struct nf_mttg_trav *trav = seq->private;
1163 const struct xt_target *target;
1164
1165 switch (trav->class) {
1166 case MTTG_TRAV_NFP_UNSPEC:
1167 case MTTG_TRAV_NFP_SPEC:
1168 if (trav->curr == trav->head)
1169 return 0;
1170 target = list_entry(trav->curr, struct xt_target, list);
1171 return (*target->name == '\0') ? 0 :
1172 seq_printf(seq, "%s\n", target->name);
1173 }
1174 return 0;
1175}
1176
1177static const struct seq_operations xt_target_seq_ops = {
1178 .start = xt_target_seq_start,
1179 .next = xt_target_seq_next,
1180 .stop = xt_mttg_seq_stop,
1181 .show = xt_target_seq_show,
1182};
1183
1184static int xt_target_open(struct inode *inode, struct file *file)
1185{
1186 struct seq_file *seq;
1187 struct nf_mttg_trav *trav;
1188 int ret;
1189
1190 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1191 if (trav == NULL)
1192 return -ENOMEM;
1193
1194 ret = seq_open(file, &xt_target_seq_ops);
1195 if (ret < 0) {
1196 kfree(trav);
1197 return ret;
1198 }
1199
1200 seq = file->private_data;
1201 seq->private = trav;
1202 trav->nfproto = (unsigned long)PDE(inode)->data;
1203 return 0;
1204}
1205
1206static const struct file_operations xt_target_ops = {
1207 .owner = THIS_MODULE,
1208 .open = xt_target_open,
1209 .read = seq_read,
1210 .llseek = seq_lseek,
1211 .release = seq_release_private,
1212};
1213
1214#define FORMAT_TABLES "_tables_names"
1215#define FORMAT_MATCHES "_tables_matches"
1216#define FORMAT_TARGETS "_tables_targets"
1217
1218#endif /* CONFIG_PROC_FS */
1219
1220/**
1221 * xt_hook_link - set up hooks for a new table
1222 * @table: table with metadata needed to set up hooks
1223 * @fn: Hook function
1224 *
1225 * This function will take care of creating and registering the necessary
1226 * Netfilter hooks for XT tables.
1227 */
1228struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1229{
1230 unsigned int hook_mask = table->valid_hooks;
1231 uint8_t i, num_hooks = hweight32(hook_mask);
1232 uint8_t hooknum;
1233 struct nf_hook_ops *ops;
1234 int ret;
1235
1236 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1237 if (ops == NULL)
1238 return ERR_PTR(-ENOMEM);
1239
1240 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1241 hook_mask >>= 1, ++hooknum) {
1242 if (!(hook_mask & 1))
1243 continue;
1244 ops[i].hook = fn;
1245 ops[i].owner = table->me;
1246 ops[i].pf = table->af;
1247 ops[i].hooknum = hooknum;
1248 ops[i].priority = table->priority;
1249 ++i;
1250 }
1251
1252 ret = nf_register_hooks(ops, num_hooks);
1253 if (ret < 0) {
1254 kfree(ops);
1255 return ERR_PTR(ret);
1256 }
1257
1258 return ops;
1259}
1260EXPORT_SYMBOL_GPL(xt_hook_link);
1261
1262/**
1263 * xt_hook_unlink - remove hooks for a table
1264 * @ops: nf_hook_ops array as returned by nf_hook_link
1265 * @hook_mask: the very same mask that was passed to nf_hook_link
1266 */
1267void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1268{
1269 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1270 kfree(ops);
1271}
1272EXPORT_SYMBOL_GPL(xt_hook_unlink);
1273
1274int xt_proto_init(struct net *net, u_int8_t af)
1275{
1276#ifdef CONFIG_PROC_FS
1277 char buf[XT_FUNCTION_MAXNAMELEN];
1278 struct proc_dir_entry *proc;
1279#endif
1280
1281 if (af >= ARRAY_SIZE(xt_prefix))
1282 return -EINVAL;
1283
1284
1285#ifdef CONFIG_PROC_FS
1286 strlcpy(buf, xt_prefix[af], sizeof(buf));
1287 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1288 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1289 (void *)(unsigned long)af);
1290 if (!proc)
1291 goto out;
1292
1293 strlcpy(buf, xt_prefix[af], sizeof(buf));
1294 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1295 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1296 (void *)(unsigned long)af);
1297 if (!proc)
1298 goto out_remove_tables;
1299
1300 strlcpy(buf, xt_prefix[af], sizeof(buf));
1301 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1302 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1303 (void *)(unsigned long)af);
1304 if (!proc)
1305 goto out_remove_matches;
1306#endif
1307
1308 return 0;
1309
1310#ifdef CONFIG_PROC_FS
1311out_remove_matches:
1312 strlcpy(buf, xt_prefix[af], sizeof(buf));
1313 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1314 proc_net_remove(net, buf);
1315
1316out_remove_tables:
1317 strlcpy(buf, xt_prefix[af], sizeof(buf));
1318 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1319 proc_net_remove(net, buf);
1320out:
1321 return -1;
1322#endif
1323}
1324EXPORT_SYMBOL_GPL(xt_proto_init);
1325
1326void xt_proto_fini(struct net *net, u_int8_t af)
1327{
1328#ifdef CONFIG_PROC_FS
1329 char buf[XT_FUNCTION_MAXNAMELEN];
1330
1331 strlcpy(buf, xt_prefix[af], sizeof(buf));
1332 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1333 proc_net_remove(net, buf);
1334
1335 strlcpy(buf, xt_prefix[af], sizeof(buf));
1336 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1337 proc_net_remove(net, buf);
1338
1339 strlcpy(buf, xt_prefix[af], sizeof(buf));
1340 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1341 proc_net_remove(net, buf);
1342#endif /*CONFIG_PROC_FS*/
1343}
1344EXPORT_SYMBOL_GPL(xt_proto_fini);
1345
1346static int __net_init xt_net_init(struct net *net)
1347{
1348 int i;
1349
1350 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1351 INIT_LIST_HEAD(&net->xt.tables[i]);
1352 return 0;
1353}
1354
1355static struct pernet_operations xt_net_ops = {
1356 .init = xt_net_init,
1357};
1358
1359static int __init xt_init(void)
1360{
1361 unsigned int i;
1362 int rv;
1363
1364 for_each_possible_cpu(i) {
1365 seqcount_init(&per_cpu(xt_recseq, i));
1366 }
1367
1368 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1369 if (!xt)
1370 return -ENOMEM;
1371
1372 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1373 mutex_init(&xt[i].mutex);
1374#ifdef CONFIG_COMPAT
1375 mutex_init(&xt[i].compat_mutex);
1376 xt[i].compat_tab = NULL;
1377#endif
1378 INIT_LIST_HEAD(&xt[i].target);
1379 INIT_LIST_HEAD(&xt[i].match);
1380 }
1381 rv = register_pernet_subsys(&xt_net_ops);
1382 if (rv < 0)
1383 kfree(xt);
1384 return rv;
1385}
1386
1387static void __exit xt_fini(void)
1388{
1389 unregister_pernet_subsys(&xt_net_ops);
1390 kfree(xt);
1391}
1392
1393module_init(xt_init);
1394module_exit(xt_fini);
1395
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * x_tables core - Backend for {ip,ip6,arp}_tables
4 *
5 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
6 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 *
8 * Based on existing ip_tables code which is
9 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
10 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/socket.h>
16#include <linux/net.h>
17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
19#include <linux/string.h>
20#include <linux/vmalloc.h>
21#include <linux/mutex.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/audit.h>
25#include <linux/user_namespace.h>
26#include <net/net_namespace.h>
27#include <net/netns/generic.h>
28
29#include <linux/netfilter/x_tables.h>
30#include <linux/netfilter_arp.h>
31#include <linux/netfilter_ipv4/ip_tables.h>
32#include <linux/netfilter_ipv6/ip6_tables.h>
33#include <linux/netfilter_arp/arp_tables.h>
34
35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
37MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
38
39#define XT_PCPU_BLOCK_SIZE 4096
40#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
41
42struct xt_template {
43 struct list_head list;
44
45 /* called when table is needed in the given netns */
46 int (*table_init)(struct net *net);
47
48 struct module *me;
49
50 /* A unique name... */
51 char name[XT_TABLE_MAXNAMELEN];
52};
53
54static struct list_head xt_templates[NFPROTO_NUMPROTO];
55
56struct xt_pernet {
57 struct list_head tables[NFPROTO_NUMPROTO];
58};
59
60struct compat_delta {
61 unsigned int offset; /* offset in kernel */
62 int delta; /* delta in 32bit user land */
63};
64
65struct xt_af {
66 struct mutex mutex;
67 struct list_head match;
68 struct list_head target;
69#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
70 struct mutex compat_mutex;
71 struct compat_delta *compat_tab;
72 unsigned int number; /* number of slots in compat_tab[] */
73 unsigned int cur; /* number of used slots in compat_tab[] */
74#endif
75};
76
77static unsigned int xt_pernet_id __read_mostly;
78static struct xt_af *xt __read_mostly;
79
80static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
81 [NFPROTO_UNSPEC] = "x",
82 [NFPROTO_IPV4] = "ip",
83 [NFPROTO_ARP] = "arp",
84 [NFPROTO_BRIDGE] = "eb",
85 [NFPROTO_IPV6] = "ip6",
86};
87
88/* Registration hooks for targets. */
89int xt_register_target(struct xt_target *target)
90{
91 u_int8_t af = target->family;
92
93 mutex_lock(&xt[af].mutex);
94 list_add(&target->list, &xt[af].target);
95 mutex_unlock(&xt[af].mutex);
96 return 0;
97}
98EXPORT_SYMBOL(xt_register_target);
99
100void
101xt_unregister_target(struct xt_target *target)
102{
103 u_int8_t af = target->family;
104
105 mutex_lock(&xt[af].mutex);
106 list_del(&target->list);
107 mutex_unlock(&xt[af].mutex);
108}
109EXPORT_SYMBOL(xt_unregister_target);
110
111int
112xt_register_targets(struct xt_target *target, unsigned int n)
113{
114 unsigned int i;
115 int err = 0;
116
117 for (i = 0; i < n; i++) {
118 err = xt_register_target(&target[i]);
119 if (err)
120 goto err;
121 }
122 return err;
123
124err:
125 if (i > 0)
126 xt_unregister_targets(target, i);
127 return err;
128}
129EXPORT_SYMBOL(xt_register_targets);
130
131void
132xt_unregister_targets(struct xt_target *target, unsigned int n)
133{
134 while (n-- > 0)
135 xt_unregister_target(&target[n]);
136}
137EXPORT_SYMBOL(xt_unregister_targets);
138
139int xt_register_match(struct xt_match *match)
140{
141 u_int8_t af = match->family;
142
143 mutex_lock(&xt[af].mutex);
144 list_add(&match->list, &xt[af].match);
145 mutex_unlock(&xt[af].mutex);
146 return 0;
147}
148EXPORT_SYMBOL(xt_register_match);
149
150void
151xt_unregister_match(struct xt_match *match)
152{
153 u_int8_t af = match->family;
154
155 mutex_lock(&xt[af].mutex);
156 list_del(&match->list);
157 mutex_unlock(&xt[af].mutex);
158}
159EXPORT_SYMBOL(xt_unregister_match);
160
161int
162xt_register_matches(struct xt_match *match, unsigned int n)
163{
164 unsigned int i;
165 int err = 0;
166
167 for (i = 0; i < n; i++) {
168 err = xt_register_match(&match[i]);
169 if (err)
170 goto err;
171 }
172 return err;
173
174err:
175 if (i > 0)
176 xt_unregister_matches(match, i);
177 return err;
178}
179EXPORT_SYMBOL(xt_register_matches);
180
181void
182xt_unregister_matches(struct xt_match *match, unsigned int n)
183{
184 while (n-- > 0)
185 xt_unregister_match(&match[n]);
186}
187EXPORT_SYMBOL(xt_unregister_matches);
188
189
190/*
191 * These are weird, but module loading must not be done with mutex
192 * held (since they will register), and we have to have a single
193 * function to use.
194 */
195
196/* Find match, grabs ref. Returns ERR_PTR() on error. */
197struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
198{
199 struct xt_match *m;
200 int err = -ENOENT;
201
202 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
203 return ERR_PTR(-EINVAL);
204
205 mutex_lock(&xt[af].mutex);
206 list_for_each_entry(m, &xt[af].match, list) {
207 if (strcmp(m->name, name) == 0) {
208 if (m->revision == revision) {
209 if (try_module_get(m->me)) {
210 mutex_unlock(&xt[af].mutex);
211 return m;
212 }
213 } else
214 err = -EPROTOTYPE; /* Found something. */
215 }
216 }
217 mutex_unlock(&xt[af].mutex);
218
219 if (af != NFPROTO_UNSPEC)
220 /* Try searching again in the family-independent list */
221 return xt_find_match(NFPROTO_UNSPEC, name, revision);
222
223 return ERR_PTR(err);
224}
225EXPORT_SYMBOL(xt_find_match);
226
227struct xt_match *
228xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
229{
230 struct xt_match *match;
231
232 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
233 return ERR_PTR(-EINVAL);
234
235 match = xt_find_match(nfproto, name, revision);
236 if (IS_ERR(match)) {
237 request_module("%st_%s", xt_prefix[nfproto], name);
238 match = xt_find_match(nfproto, name, revision);
239 }
240
241 return match;
242}
243EXPORT_SYMBOL_GPL(xt_request_find_match);
244
245/* Find target, grabs ref. Returns ERR_PTR() on error. */
246static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
247{
248 struct xt_target *t;
249 int err = -ENOENT;
250
251 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
252 return ERR_PTR(-EINVAL);
253
254 mutex_lock(&xt[af].mutex);
255 list_for_each_entry(t, &xt[af].target, list) {
256 if (strcmp(t->name, name) == 0) {
257 if (t->revision == revision) {
258 if (try_module_get(t->me)) {
259 mutex_unlock(&xt[af].mutex);
260 return t;
261 }
262 } else
263 err = -EPROTOTYPE; /* Found something. */
264 }
265 }
266 mutex_unlock(&xt[af].mutex);
267
268 if (af != NFPROTO_UNSPEC)
269 /* Try searching again in the family-independent list */
270 return xt_find_target(NFPROTO_UNSPEC, name, revision);
271
272 return ERR_PTR(err);
273}
274
275struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
276{
277 struct xt_target *target;
278
279 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
280 return ERR_PTR(-EINVAL);
281
282 target = xt_find_target(af, name, revision);
283 if (IS_ERR(target)) {
284 request_module("%st_%s", xt_prefix[af], name);
285 target = xt_find_target(af, name, revision);
286 }
287
288 return target;
289}
290EXPORT_SYMBOL_GPL(xt_request_find_target);
291
292
293static int xt_obj_to_user(u16 __user *psize, u16 size,
294 void __user *pname, const char *name,
295 u8 __user *prev, u8 rev)
296{
297 if (put_user(size, psize))
298 return -EFAULT;
299 if (copy_to_user(pname, name, strlen(name) + 1))
300 return -EFAULT;
301 if (put_user(rev, prev))
302 return -EFAULT;
303
304 return 0;
305}
306
307#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
308 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
309 U->u.user.name, K->u.kernel.TYPE->name, \
310 &U->u.user.revision, K->u.kernel.TYPE->revision)
311
312int xt_data_to_user(void __user *dst, const void *src,
313 int usersize, int size, int aligned_size)
314{
315 usersize = usersize ? : size;
316 if (copy_to_user(dst, src, usersize))
317 return -EFAULT;
318 if (usersize != aligned_size &&
319 clear_user(dst + usersize, aligned_size - usersize))
320 return -EFAULT;
321
322 return 0;
323}
324EXPORT_SYMBOL_GPL(xt_data_to_user);
325
326#define XT_DATA_TO_USER(U, K, TYPE) \
327 xt_data_to_user(U->data, K->data, \
328 K->u.kernel.TYPE->usersize, \
329 K->u.kernel.TYPE->TYPE##size, \
330 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
331
332int xt_match_to_user(const struct xt_entry_match *m,
333 struct xt_entry_match __user *u)
334{
335 return XT_OBJ_TO_USER(u, m, match, 0) ||
336 XT_DATA_TO_USER(u, m, match);
337}
338EXPORT_SYMBOL_GPL(xt_match_to_user);
339
340int xt_target_to_user(const struct xt_entry_target *t,
341 struct xt_entry_target __user *u)
342{
343 return XT_OBJ_TO_USER(u, t, target, 0) ||
344 XT_DATA_TO_USER(u, t, target);
345}
346EXPORT_SYMBOL_GPL(xt_target_to_user);
347
348static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
349{
350 const struct xt_match *m;
351 int have_rev = 0;
352
353 mutex_lock(&xt[af].mutex);
354 list_for_each_entry(m, &xt[af].match, list) {
355 if (strcmp(m->name, name) == 0) {
356 if (m->revision > *bestp)
357 *bestp = m->revision;
358 if (m->revision == revision)
359 have_rev = 1;
360 }
361 }
362 mutex_unlock(&xt[af].mutex);
363
364 if (af != NFPROTO_UNSPEC && !have_rev)
365 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
366
367 return have_rev;
368}
369
370static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
371{
372 const struct xt_target *t;
373 int have_rev = 0;
374
375 mutex_lock(&xt[af].mutex);
376 list_for_each_entry(t, &xt[af].target, list) {
377 if (strcmp(t->name, name) == 0) {
378 if (t->revision > *bestp)
379 *bestp = t->revision;
380 if (t->revision == revision)
381 have_rev = 1;
382 }
383 }
384 mutex_unlock(&xt[af].mutex);
385
386 if (af != NFPROTO_UNSPEC && !have_rev)
387 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
388
389 return have_rev;
390}
391
392/* Returns true or false (if no such extension at all) */
393int xt_find_revision(u8 af, const char *name, u8 revision, int target,
394 int *err)
395{
396 int have_rev, best = -1;
397
398 if (target == 1)
399 have_rev = target_revfn(af, name, revision, &best);
400 else
401 have_rev = match_revfn(af, name, revision, &best);
402
403 /* Nothing at all? Return 0 to try loading module. */
404 if (best == -1) {
405 *err = -ENOENT;
406 return 0;
407 }
408
409 *err = best;
410 if (!have_rev)
411 *err = -EPROTONOSUPPORT;
412 return 1;
413}
414EXPORT_SYMBOL_GPL(xt_find_revision);
415
416static char *
417textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
418{
419 static const char *const inetbr_names[] = {
420 "PREROUTING", "INPUT", "FORWARD",
421 "OUTPUT", "POSTROUTING", "BROUTING",
422 };
423 static const char *const arp_names[] = {
424 "INPUT", "FORWARD", "OUTPUT",
425 };
426 const char *const *names;
427 unsigned int i, max;
428 char *p = buf;
429 bool np = false;
430 int res;
431
432 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
433 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
434 ARRAY_SIZE(inetbr_names);
435 *p = '\0';
436 for (i = 0; i < max; ++i) {
437 if (!(mask & (1 << i)))
438 continue;
439 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
440 if (res > 0) {
441 size -= res;
442 p += res;
443 }
444 np = true;
445 }
446
447 return buf;
448}
449
450/**
451 * xt_check_proc_name - check that name is suitable for /proc file creation
452 *
453 * @name: file name candidate
454 * @size: length of buffer
455 *
456 * some x_tables modules wish to create a file in /proc.
457 * This function makes sure that the name is suitable for this
458 * purpose, it checks that name is NUL terminated and isn't a 'special'
459 * name, like "..".
460 *
461 * returns negative number on error or 0 if name is useable.
462 */
463int xt_check_proc_name(const char *name, unsigned int size)
464{
465 if (name[0] == '\0')
466 return -EINVAL;
467
468 if (strnlen(name, size) == size)
469 return -ENAMETOOLONG;
470
471 if (strcmp(name, ".") == 0 ||
472 strcmp(name, "..") == 0 ||
473 strchr(name, '/'))
474 return -EINVAL;
475
476 return 0;
477}
478EXPORT_SYMBOL(xt_check_proc_name);
479
480int xt_check_match(struct xt_mtchk_param *par,
481 unsigned int size, u16 proto, bool inv_proto)
482{
483 int ret;
484
485 if (XT_ALIGN(par->match->matchsize) != size &&
486 par->match->matchsize != -1) {
487 /*
488 * ebt_among is exempt from centralized matchsize checking
489 * because it uses a dynamic-size data set.
490 */
491 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
492 xt_prefix[par->family], par->match->name,
493 par->match->revision,
494 XT_ALIGN(par->match->matchsize), size);
495 return -EINVAL;
496 }
497 if (par->match->table != NULL &&
498 strcmp(par->match->table, par->table) != 0) {
499 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
500 xt_prefix[par->family], par->match->name,
501 par->match->table, par->table);
502 return -EINVAL;
503 }
504 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
505 char used[64], allow[64];
506
507 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
508 xt_prefix[par->family], par->match->name,
509 textify_hooks(used, sizeof(used),
510 par->hook_mask, par->family),
511 textify_hooks(allow, sizeof(allow),
512 par->match->hooks,
513 par->family));
514 return -EINVAL;
515 }
516 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
517 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
518 xt_prefix[par->family], par->match->name,
519 par->match->proto);
520 return -EINVAL;
521 }
522 if (par->match->checkentry != NULL) {
523 ret = par->match->checkentry(par);
524 if (ret < 0)
525 return ret;
526 else if (ret > 0)
527 /* Flag up potential errors. */
528 return -EIO;
529 }
530 return 0;
531}
532EXPORT_SYMBOL_GPL(xt_check_match);
533
534/** xt_check_entry_match - check that matches end before start of target
535 *
536 * @match: beginning of xt_entry_match
537 * @target: beginning of this rules target (alleged end of matches)
538 * @alignment: alignment requirement of match structures
539 *
540 * Validates that all matches add up to the beginning of the target,
541 * and that each match covers at least the base structure size.
542 *
543 * Return: 0 on success, negative errno on failure.
544 */
545static int xt_check_entry_match(const char *match, const char *target,
546 const size_t alignment)
547{
548 const struct xt_entry_match *pos;
549 int length = target - match;
550
551 if (length == 0) /* no matches */
552 return 0;
553
554 pos = (struct xt_entry_match *)match;
555 do {
556 if ((unsigned long)pos % alignment)
557 return -EINVAL;
558
559 if (length < (int)sizeof(struct xt_entry_match))
560 return -EINVAL;
561
562 if (pos->u.match_size < sizeof(struct xt_entry_match))
563 return -EINVAL;
564
565 if (pos->u.match_size > length)
566 return -EINVAL;
567
568 length -= pos->u.match_size;
569 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
570 } while (length > 0);
571
572 return 0;
573}
574
575/** xt_check_table_hooks - check hook entry points are sane
576 *
577 * @info xt_table_info to check
578 * @valid_hooks - hook entry points that we can enter from
579 *
580 * Validates that the hook entry and underflows points are set up.
581 *
582 * Return: 0 on success, negative errno on failure.
583 */
584int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
585{
586 const char *err = "unsorted underflow";
587 unsigned int i, max_uflow, max_entry;
588 bool check_hooks = false;
589
590 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
591
592 max_entry = 0;
593 max_uflow = 0;
594
595 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
596 if (!(valid_hooks & (1 << i)))
597 continue;
598
599 if (info->hook_entry[i] == 0xFFFFFFFF)
600 return -EINVAL;
601 if (info->underflow[i] == 0xFFFFFFFF)
602 return -EINVAL;
603
604 if (check_hooks) {
605 if (max_uflow > info->underflow[i])
606 goto error;
607
608 if (max_uflow == info->underflow[i]) {
609 err = "duplicate underflow";
610 goto error;
611 }
612 if (max_entry > info->hook_entry[i]) {
613 err = "unsorted entry";
614 goto error;
615 }
616 if (max_entry == info->hook_entry[i]) {
617 err = "duplicate entry";
618 goto error;
619 }
620 }
621 max_entry = info->hook_entry[i];
622 max_uflow = info->underflow[i];
623 check_hooks = true;
624 }
625
626 return 0;
627error:
628 pr_err_ratelimited("%s at hook %d\n", err, i);
629 return -EINVAL;
630}
631EXPORT_SYMBOL(xt_check_table_hooks);
632
633static bool verdict_ok(int verdict)
634{
635 if (verdict > 0)
636 return true;
637
638 if (verdict < 0) {
639 int v = -verdict - 1;
640
641 if (verdict == XT_RETURN)
642 return true;
643
644 switch (v) {
645 case NF_ACCEPT: return true;
646 case NF_DROP: return true;
647 case NF_QUEUE: return true;
648 default:
649 break;
650 }
651
652 return false;
653 }
654
655 return false;
656}
657
658static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
659 const char *msg, unsigned int msglen)
660{
661 return usersize == kernsize && strnlen(msg, msglen) < msglen;
662}
663
664#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
665int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
666{
667 struct xt_af *xp = &xt[af];
668
669 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
670
671 if (WARN_ON(!xp->compat_tab))
672 return -ENOMEM;
673
674 if (xp->cur >= xp->number)
675 return -EINVAL;
676
677 if (xp->cur)
678 delta += xp->compat_tab[xp->cur - 1].delta;
679 xp->compat_tab[xp->cur].offset = offset;
680 xp->compat_tab[xp->cur].delta = delta;
681 xp->cur++;
682 return 0;
683}
684EXPORT_SYMBOL_GPL(xt_compat_add_offset);
685
686void xt_compat_flush_offsets(u_int8_t af)
687{
688 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
689
690 if (xt[af].compat_tab) {
691 vfree(xt[af].compat_tab);
692 xt[af].compat_tab = NULL;
693 xt[af].number = 0;
694 xt[af].cur = 0;
695 }
696}
697EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
698
699int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
700{
701 struct compat_delta *tmp = xt[af].compat_tab;
702 int mid, left = 0, right = xt[af].cur - 1;
703
704 while (left <= right) {
705 mid = (left + right) >> 1;
706 if (offset > tmp[mid].offset)
707 left = mid + 1;
708 else if (offset < tmp[mid].offset)
709 right = mid - 1;
710 else
711 return mid ? tmp[mid - 1].delta : 0;
712 }
713 return left ? tmp[left - 1].delta : 0;
714}
715EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
716
717int xt_compat_init_offsets(u8 af, unsigned int number)
718{
719 size_t mem;
720
721 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
722
723 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
724 return -EINVAL;
725
726 if (WARN_ON(xt[af].compat_tab))
727 return -EINVAL;
728
729 mem = sizeof(struct compat_delta) * number;
730 if (mem > XT_MAX_TABLE_SIZE)
731 return -ENOMEM;
732
733 xt[af].compat_tab = vmalloc(mem);
734 if (!xt[af].compat_tab)
735 return -ENOMEM;
736
737 xt[af].number = number;
738 xt[af].cur = 0;
739
740 return 0;
741}
742EXPORT_SYMBOL(xt_compat_init_offsets);
743
744int xt_compat_match_offset(const struct xt_match *match)
745{
746 u_int16_t csize = match->compatsize ? : match->matchsize;
747 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
748}
749EXPORT_SYMBOL_GPL(xt_compat_match_offset);
750
751void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
752 unsigned int *size)
753{
754 const struct xt_match *match = m->u.kernel.match;
755 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
756 int off = xt_compat_match_offset(match);
757 u_int16_t msize = cm->u.user.match_size;
758 char name[sizeof(m->u.user.name)];
759
760 m = *dstptr;
761 memcpy(m, cm, sizeof(*cm));
762 if (match->compat_from_user)
763 match->compat_from_user(m->data, cm->data);
764 else
765 memcpy(m->data, cm->data, msize - sizeof(*cm));
766
767 msize += off;
768 m->u.user.match_size = msize;
769 strscpy(name, match->name, sizeof(name));
770 module_put(match->me);
771 strscpy_pad(m->u.user.name, name, sizeof(m->u.user.name));
772
773 *size += off;
774 *dstptr += msize;
775}
776EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
777
778#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
779 xt_data_to_user(U->data, K->data, \
780 K->u.kernel.TYPE->usersize, \
781 C_SIZE, \
782 COMPAT_XT_ALIGN(C_SIZE))
783
784int xt_compat_match_to_user(const struct xt_entry_match *m,
785 void __user **dstptr, unsigned int *size)
786{
787 const struct xt_match *match = m->u.kernel.match;
788 struct compat_xt_entry_match __user *cm = *dstptr;
789 int off = xt_compat_match_offset(match);
790 u_int16_t msize = m->u.user.match_size - off;
791
792 if (XT_OBJ_TO_USER(cm, m, match, msize))
793 return -EFAULT;
794
795 if (match->compat_to_user) {
796 if (match->compat_to_user((void __user *)cm->data, m->data))
797 return -EFAULT;
798 } else {
799 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
800 return -EFAULT;
801 }
802
803 *size -= off;
804 *dstptr += msize;
805 return 0;
806}
807EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
808
809/* non-compat version may have padding after verdict */
810struct compat_xt_standard_target {
811 struct compat_xt_entry_target t;
812 compat_uint_t verdict;
813};
814
815struct compat_xt_error_target {
816 struct compat_xt_entry_target t;
817 char errorname[XT_FUNCTION_MAXNAMELEN];
818};
819
820int xt_compat_check_entry_offsets(const void *base, const char *elems,
821 unsigned int target_offset,
822 unsigned int next_offset)
823{
824 long size_of_base_struct = elems - (const char *)base;
825 const struct compat_xt_entry_target *t;
826 const char *e = base;
827
828 if (target_offset < size_of_base_struct)
829 return -EINVAL;
830
831 if (target_offset + sizeof(*t) > next_offset)
832 return -EINVAL;
833
834 t = (void *)(e + target_offset);
835 if (t->u.target_size < sizeof(*t))
836 return -EINVAL;
837
838 if (target_offset + t->u.target_size > next_offset)
839 return -EINVAL;
840
841 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
842 const struct compat_xt_standard_target *st = (const void *)t;
843
844 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
845 return -EINVAL;
846
847 if (!verdict_ok(st->verdict))
848 return -EINVAL;
849 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
850 const struct compat_xt_error_target *et = (const void *)t;
851
852 if (!error_tg_ok(t->u.target_size, sizeof(*et),
853 et->errorname, sizeof(et->errorname)))
854 return -EINVAL;
855 }
856
857 /* compat_xt_entry match has less strict alignment requirements,
858 * otherwise they are identical. In case of padding differences
859 * we need to add compat version of xt_check_entry_match.
860 */
861 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
862
863 return xt_check_entry_match(elems, base + target_offset,
864 __alignof__(struct compat_xt_entry_match));
865}
866EXPORT_SYMBOL(xt_compat_check_entry_offsets);
867#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
868
869/**
870 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
871 *
872 * @base: pointer to arp/ip/ip6t_entry
873 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
874 * @target_offset: the arp/ip/ip6_t->target_offset
875 * @next_offset: the arp/ip/ip6_t->next_offset
876 *
877 * validates that target_offset and next_offset are sane and that all
878 * match sizes (if any) align with the target offset.
879 *
880 * This function does not validate the targets or matches themselves, it
881 * only tests that all the offsets and sizes are correct, that all
882 * match structures are aligned, and that the last structure ends where
883 * the target structure begins.
884 *
885 * Also see xt_compat_check_entry_offsets for CONFIG_NETFILTER_XTABLES_COMPAT version.
886 *
887 * The arp/ip/ip6t_entry structure @base must have passed following tests:
888 * - it must point to a valid memory location
889 * - base to base + next_offset must be accessible, i.e. not exceed allocated
890 * length.
891 *
892 * A well-formed entry looks like this:
893 *
894 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
895 * e->elems[]-----' | |
896 * matchsize | |
897 * matchsize | |
898 * | |
899 * target_offset---------------------------------' |
900 * next_offset---------------------------------------------------'
901 *
902 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
903 * This is where matches (if any) and the target reside.
904 * target_offset: beginning of target.
905 * next_offset: start of the next rule; also: size of this rule.
906 * Since targets have a minimum size, target_offset + minlen <= next_offset.
907 *
908 * Every match stores its size, sum of sizes must not exceed target_offset.
909 *
910 * Return: 0 on success, negative errno on failure.
911 */
912int xt_check_entry_offsets(const void *base,
913 const char *elems,
914 unsigned int target_offset,
915 unsigned int next_offset)
916{
917 long size_of_base_struct = elems - (const char *)base;
918 const struct xt_entry_target *t;
919 const char *e = base;
920
921 /* target start is within the ip/ip6/arpt_entry struct */
922 if (target_offset < size_of_base_struct)
923 return -EINVAL;
924
925 if (target_offset + sizeof(*t) > next_offset)
926 return -EINVAL;
927
928 t = (void *)(e + target_offset);
929 if (t->u.target_size < sizeof(*t))
930 return -EINVAL;
931
932 if (target_offset + t->u.target_size > next_offset)
933 return -EINVAL;
934
935 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
936 const struct xt_standard_target *st = (const void *)t;
937
938 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
939 return -EINVAL;
940
941 if (!verdict_ok(st->verdict))
942 return -EINVAL;
943 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
944 const struct xt_error_target *et = (const void *)t;
945
946 if (!error_tg_ok(t->u.target_size, sizeof(*et),
947 et->errorname, sizeof(et->errorname)))
948 return -EINVAL;
949 }
950
951 return xt_check_entry_match(elems, base + target_offset,
952 __alignof__(struct xt_entry_match));
953}
954EXPORT_SYMBOL(xt_check_entry_offsets);
955
956/**
957 * xt_alloc_entry_offsets - allocate array to store rule head offsets
958 *
959 * @size: number of entries
960 *
961 * Return: NULL or zeroed kmalloc'd or vmalloc'd array
962 */
963unsigned int *xt_alloc_entry_offsets(unsigned int size)
964{
965 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
966 return NULL;
967
968 return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
969
970}
971EXPORT_SYMBOL(xt_alloc_entry_offsets);
972
973/**
974 * xt_find_jump_offset - check if target is a valid jump offset
975 *
976 * @offsets: array containing all valid rule start offsets of a rule blob
977 * @target: the jump target to search for
978 * @size: entries in @offset
979 */
980bool xt_find_jump_offset(const unsigned int *offsets,
981 unsigned int target, unsigned int size)
982{
983 int m, low = 0, hi = size;
984
985 while (hi > low) {
986 m = (low + hi) / 2u;
987
988 if (offsets[m] > target)
989 hi = m;
990 else if (offsets[m] < target)
991 low = m + 1;
992 else
993 return true;
994 }
995
996 return false;
997}
998EXPORT_SYMBOL(xt_find_jump_offset);
999
1000int xt_check_target(struct xt_tgchk_param *par,
1001 unsigned int size, u16 proto, bool inv_proto)
1002{
1003 int ret;
1004
1005 if (XT_ALIGN(par->target->targetsize) != size) {
1006 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
1007 xt_prefix[par->family], par->target->name,
1008 par->target->revision,
1009 XT_ALIGN(par->target->targetsize), size);
1010 return -EINVAL;
1011 }
1012 if (par->target->table != NULL &&
1013 strcmp(par->target->table, par->table) != 0) {
1014 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1015 xt_prefix[par->family], par->target->name,
1016 par->target->table, par->table);
1017 return -EINVAL;
1018 }
1019 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1020 char used[64], allow[64];
1021
1022 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1023 xt_prefix[par->family], par->target->name,
1024 textify_hooks(used, sizeof(used),
1025 par->hook_mask, par->family),
1026 textify_hooks(allow, sizeof(allow),
1027 par->target->hooks,
1028 par->family));
1029 return -EINVAL;
1030 }
1031 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1032 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1033 xt_prefix[par->family], par->target->name,
1034 par->target->proto);
1035 return -EINVAL;
1036 }
1037 if (par->target->checkentry != NULL) {
1038 ret = par->target->checkentry(par);
1039 if (ret < 0)
1040 return ret;
1041 else if (ret > 0)
1042 /* Flag up potential errors. */
1043 return -EIO;
1044 }
1045 return 0;
1046}
1047EXPORT_SYMBOL_GPL(xt_check_target);
1048
1049/**
1050 * xt_copy_counters - copy counters and metadata from a sockptr_t
1051 *
1052 * @arg: src sockptr
1053 * @len: alleged size of userspace memory
1054 * @info: where to store the xt_counters_info metadata
1055 *
1056 * Copies counter meta data from @user and stores it in @info.
1057 *
1058 * vmallocs memory to hold the counters, then copies the counter data
1059 * from @user to the new memory and returns a pointer to it.
1060 *
1061 * If called from a compat syscall, @info gets converted automatically to the
1062 * 64bit representation.
1063 *
1064 * The metadata associated with the counters is stored in @info.
1065 *
1066 * Return: returns pointer that caller has to test via IS_ERR().
1067 * If IS_ERR is false, caller has to vfree the pointer.
1068 */
1069void *xt_copy_counters(sockptr_t arg, unsigned int len,
1070 struct xt_counters_info *info)
1071{
1072 size_t offset;
1073 void *mem;
1074 u64 size;
1075
1076#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1077 if (in_compat_syscall()) {
1078 /* structures only differ in size due to alignment */
1079 struct compat_xt_counters_info compat_tmp;
1080
1081 if (len <= sizeof(compat_tmp))
1082 return ERR_PTR(-EINVAL);
1083
1084 len -= sizeof(compat_tmp);
1085 if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1086 return ERR_PTR(-EFAULT);
1087
1088 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1089 info->num_counters = compat_tmp.num_counters;
1090 offset = sizeof(compat_tmp);
1091 } else
1092#endif
1093 {
1094 if (len <= sizeof(*info))
1095 return ERR_PTR(-EINVAL);
1096
1097 len -= sizeof(*info);
1098 if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1099 return ERR_PTR(-EFAULT);
1100
1101 offset = sizeof(*info);
1102 }
1103 info->name[sizeof(info->name) - 1] = '\0';
1104
1105 size = sizeof(struct xt_counters);
1106 size *= info->num_counters;
1107
1108 if (size != (u64)len)
1109 return ERR_PTR(-EINVAL);
1110
1111 mem = vmalloc(len);
1112 if (!mem)
1113 return ERR_PTR(-ENOMEM);
1114
1115 if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1116 return mem;
1117
1118 vfree(mem);
1119 return ERR_PTR(-EFAULT);
1120}
1121EXPORT_SYMBOL_GPL(xt_copy_counters);
1122
1123#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1124int xt_compat_target_offset(const struct xt_target *target)
1125{
1126 u_int16_t csize = target->compatsize ? : target->targetsize;
1127 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1128}
1129EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1130
1131void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1132 unsigned int *size)
1133{
1134 const struct xt_target *target = t->u.kernel.target;
1135 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1136 int off = xt_compat_target_offset(target);
1137 u_int16_t tsize = ct->u.user.target_size;
1138 char name[sizeof(t->u.user.name)];
1139
1140 t = *dstptr;
1141 memcpy(t, ct, sizeof(*ct));
1142 if (target->compat_from_user)
1143 target->compat_from_user(t->data, ct->data);
1144 else
1145 unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
1146 /* UAPI 0-sized destination */);
1147
1148 tsize += off;
1149 t->u.user.target_size = tsize;
1150 strscpy(name, target->name, sizeof(name));
1151 module_put(target->me);
1152 strscpy_pad(t->u.user.name, name, sizeof(t->u.user.name));
1153
1154 *size += off;
1155 *dstptr += tsize;
1156}
1157EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1158
1159int xt_compat_target_to_user(const struct xt_entry_target *t,
1160 void __user **dstptr, unsigned int *size)
1161{
1162 const struct xt_target *target = t->u.kernel.target;
1163 struct compat_xt_entry_target __user *ct = *dstptr;
1164 int off = xt_compat_target_offset(target);
1165 u_int16_t tsize = t->u.user.target_size - off;
1166
1167 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1168 return -EFAULT;
1169
1170 if (target->compat_to_user) {
1171 if (target->compat_to_user((void __user *)ct->data, t->data))
1172 return -EFAULT;
1173 } else {
1174 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1175 return -EFAULT;
1176 }
1177
1178 *size -= off;
1179 *dstptr += tsize;
1180 return 0;
1181}
1182EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1183#endif
1184
1185struct xt_table_info *xt_alloc_table_info(unsigned int size)
1186{
1187 struct xt_table_info *info = NULL;
1188 size_t sz = sizeof(*info) + size;
1189
1190 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1191 return NULL;
1192
1193 info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1194 if (!info)
1195 return NULL;
1196
1197 memset(info, 0, sizeof(*info));
1198 info->size = size;
1199 return info;
1200}
1201EXPORT_SYMBOL(xt_alloc_table_info);
1202
1203void xt_free_table_info(struct xt_table_info *info)
1204{
1205 int cpu;
1206
1207 if (info->jumpstack != NULL) {
1208 for_each_possible_cpu(cpu)
1209 kvfree(info->jumpstack[cpu]);
1210 kvfree(info->jumpstack);
1211 }
1212
1213 kvfree(info);
1214}
1215EXPORT_SYMBOL(xt_free_table_info);
1216
1217struct xt_table *xt_find_table(struct net *net, u8 af, const char *name)
1218{
1219 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1220 struct xt_table *t;
1221
1222 mutex_lock(&xt[af].mutex);
1223 list_for_each_entry(t, &xt_net->tables[af], list) {
1224 if (strcmp(t->name, name) == 0) {
1225 mutex_unlock(&xt[af].mutex);
1226 return t;
1227 }
1228 }
1229 mutex_unlock(&xt[af].mutex);
1230 return NULL;
1231}
1232EXPORT_SYMBOL(xt_find_table);
1233
1234/* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */
1235struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1236 const char *name)
1237{
1238 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1239 struct module *owner = NULL;
1240 struct xt_template *tmpl;
1241 struct xt_table *t;
1242
1243 mutex_lock(&xt[af].mutex);
1244 list_for_each_entry(t, &xt_net->tables[af], list)
1245 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1246 return t;
1247
1248 /* Table doesn't exist in this netns, check larval list */
1249 list_for_each_entry(tmpl, &xt_templates[af], list) {
1250 int err;
1251
1252 if (strcmp(tmpl->name, name))
1253 continue;
1254 if (!try_module_get(tmpl->me))
1255 goto out;
1256
1257 owner = tmpl->me;
1258
1259 mutex_unlock(&xt[af].mutex);
1260 err = tmpl->table_init(net);
1261 if (err < 0) {
1262 module_put(owner);
1263 return ERR_PTR(err);
1264 }
1265
1266 mutex_lock(&xt[af].mutex);
1267 break;
1268 }
1269
1270 /* and once again: */
1271 list_for_each_entry(t, &xt_net->tables[af], list)
1272 if (strcmp(t->name, name) == 0 && owner == t->me)
1273 return t;
1274
1275 module_put(owner);
1276 out:
1277 mutex_unlock(&xt[af].mutex);
1278 return ERR_PTR(-ENOENT);
1279}
1280EXPORT_SYMBOL_GPL(xt_find_table_lock);
1281
1282struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1283 const char *name)
1284{
1285 struct xt_table *t = xt_find_table_lock(net, af, name);
1286
1287#ifdef CONFIG_MODULES
1288 if (IS_ERR(t)) {
1289 int err = request_module("%stable_%s", xt_prefix[af], name);
1290 if (err < 0)
1291 return ERR_PTR(err);
1292 t = xt_find_table_lock(net, af, name);
1293 }
1294#endif
1295
1296 return t;
1297}
1298EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1299
1300void xt_table_unlock(struct xt_table *table)
1301{
1302 mutex_unlock(&xt[table->af].mutex);
1303}
1304EXPORT_SYMBOL_GPL(xt_table_unlock);
1305
1306#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1307void xt_compat_lock(u_int8_t af)
1308{
1309 mutex_lock(&xt[af].compat_mutex);
1310}
1311EXPORT_SYMBOL_GPL(xt_compat_lock);
1312
1313void xt_compat_unlock(u_int8_t af)
1314{
1315 mutex_unlock(&xt[af].compat_mutex);
1316}
1317EXPORT_SYMBOL_GPL(xt_compat_unlock);
1318#endif
1319
1320DEFINE_PER_CPU(seqcount_t, xt_recseq);
1321EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1322
1323struct static_key xt_tee_enabled __read_mostly;
1324EXPORT_SYMBOL_GPL(xt_tee_enabled);
1325
1326static int xt_jumpstack_alloc(struct xt_table_info *i)
1327{
1328 unsigned int size;
1329 int cpu;
1330
1331 size = sizeof(void **) * nr_cpu_ids;
1332 if (size > PAGE_SIZE)
1333 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1334 else
1335 i->jumpstack = kzalloc(size, GFP_KERNEL);
1336 if (i->jumpstack == NULL)
1337 return -ENOMEM;
1338
1339 /* ruleset without jumps -- no stack needed */
1340 if (i->stacksize == 0)
1341 return 0;
1342
1343 /* Jumpstack needs to be able to record two full callchains, one
1344 * from the first rule set traversal, plus one table reentrancy
1345 * via -j TEE without clobbering the callchain that brought us to
1346 * TEE target.
1347 *
1348 * This is done by allocating two jumpstacks per cpu, on reentry
1349 * the upper half of the stack is used.
1350 *
1351 * see the jumpstack setup in ipt_do_table() for more details.
1352 */
1353 size = sizeof(void *) * i->stacksize * 2u;
1354 for_each_possible_cpu(cpu) {
1355 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1356 cpu_to_node(cpu));
1357 if (i->jumpstack[cpu] == NULL)
1358 /*
1359 * Freeing will be done later on by the callers. The
1360 * chain is: xt_replace_table -> __do_replace ->
1361 * do_replace -> xt_free_table_info.
1362 */
1363 return -ENOMEM;
1364 }
1365
1366 return 0;
1367}
1368
1369struct xt_counters *xt_counters_alloc(unsigned int counters)
1370{
1371 struct xt_counters *mem;
1372
1373 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1374 return NULL;
1375
1376 counters *= sizeof(*mem);
1377 if (counters > XT_MAX_TABLE_SIZE)
1378 return NULL;
1379
1380 return vzalloc(counters);
1381}
1382EXPORT_SYMBOL(xt_counters_alloc);
1383
1384struct xt_table_info *
1385xt_replace_table(struct xt_table *table,
1386 unsigned int num_counters,
1387 struct xt_table_info *newinfo,
1388 int *error)
1389{
1390 struct xt_table_info *private;
1391 unsigned int cpu;
1392 int ret;
1393
1394 ret = xt_jumpstack_alloc(newinfo);
1395 if (ret < 0) {
1396 *error = ret;
1397 return NULL;
1398 }
1399
1400 /* Do the substitution. */
1401 local_bh_disable();
1402 private = table->private;
1403
1404 /* Check inside lock: is the old number correct? */
1405 if (num_counters != private->number) {
1406 pr_debug("num_counters != table->private->number (%u/%u)\n",
1407 num_counters, private->number);
1408 local_bh_enable();
1409 *error = -EAGAIN;
1410 return NULL;
1411 }
1412
1413 newinfo->initial_entries = private->initial_entries;
1414 /*
1415 * Ensure contents of newinfo are visible before assigning to
1416 * private.
1417 */
1418 smp_wmb();
1419 table->private = newinfo;
1420
1421 /* make sure all cpus see new ->private value */
1422 smp_mb();
1423
1424 /*
1425 * Even though table entries have now been swapped, other CPU's
1426 * may still be using the old entries...
1427 */
1428 local_bh_enable();
1429
1430 /* ... so wait for even xt_recseq on all cpus */
1431 for_each_possible_cpu(cpu) {
1432 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1433 u32 seq = raw_read_seqcount(s);
1434
1435 if (seq & 1) {
1436 do {
1437 cond_resched();
1438 cpu_relax();
1439 } while (seq == raw_read_seqcount(s));
1440 }
1441 }
1442
1443 audit_log_nfcfg(table->name, table->af, private->number,
1444 !private->number ? AUDIT_XT_OP_REGISTER :
1445 AUDIT_XT_OP_REPLACE,
1446 GFP_KERNEL);
1447 return private;
1448}
1449EXPORT_SYMBOL_GPL(xt_replace_table);
1450
1451struct xt_table *xt_register_table(struct net *net,
1452 const struct xt_table *input_table,
1453 struct xt_table_info *bootstrap,
1454 struct xt_table_info *newinfo)
1455{
1456 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1457 struct xt_table_info *private;
1458 struct xt_table *t, *table;
1459 int ret;
1460
1461 /* Don't add one object to multiple lists. */
1462 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1463 if (!table) {
1464 ret = -ENOMEM;
1465 goto out;
1466 }
1467
1468 mutex_lock(&xt[table->af].mutex);
1469 /* Don't autoload: we'd eat our tail... */
1470 list_for_each_entry(t, &xt_net->tables[table->af], list) {
1471 if (strcmp(t->name, table->name) == 0) {
1472 ret = -EEXIST;
1473 goto unlock;
1474 }
1475 }
1476
1477 /* Simplifies replace_table code. */
1478 table->private = bootstrap;
1479
1480 if (!xt_replace_table(table, 0, newinfo, &ret))
1481 goto unlock;
1482
1483 private = table->private;
1484 pr_debug("table->private->number = %u\n", private->number);
1485
1486 /* save number of initial entries */
1487 private->initial_entries = private->number;
1488
1489 list_add(&table->list, &xt_net->tables[table->af]);
1490 mutex_unlock(&xt[table->af].mutex);
1491 return table;
1492
1493unlock:
1494 mutex_unlock(&xt[table->af].mutex);
1495 kfree(table);
1496out:
1497 return ERR_PTR(ret);
1498}
1499EXPORT_SYMBOL_GPL(xt_register_table);
1500
1501void *xt_unregister_table(struct xt_table *table)
1502{
1503 struct xt_table_info *private;
1504
1505 mutex_lock(&xt[table->af].mutex);
1506 private = table->private;
1507 list_del(&table->list);
1508 mutex_unlock(&xt[table->af].mutex);
1509 audit_log_nfcfg(table->name, table->af, private->number,
1510 AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1511 kfree(table->ops);
1512 kfree(table);
1513
1514 return private;
1515}
1516EXPORT_SYMBOL_GPL(xt_unregister_table);
1517
1518#ifdef CONFIG_PROC_FS
1519static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1520{
1521 u8 af = (unsigned long)pde_data(file_inode(seq->file));
1522 struct net *net = seq_file_net(seq);
1523 struct xt_pernet *xt_net;
1524
1525 xt_net = net_generic(net, xt_pernet_id);
1526
1527 mutex_lock(&xt[af].mutex);
1528 return seq_list_start(&xt_net->tables[af], *pos);
1529}
1530
1531static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1532{
1533 u8 af = (unsigned long)pde_data(file_inode(seq->file));
1534 struct net *net = seq_file_net(seq);
1535 struct xt_pernet *xt_net;
1536
1537 xt_net = net_generic(net, xt_pernet_id);
1538
1539 return seq_list_next(v, &xt_net->tables[af], pos);
1540}
1541
1542static void xt_table_seq_stop(struct seq_file *seq, void *v)
1543{
1544 u_int8_t af = (unsigned long)pde_data(file_inode(seq->file));
1545
1546 mutex_unlock(&xt[af].mutex);
1547}
1548
1549static int xt_table_seq_show(struct seq_file *seq, void *v)
1550{
1551 struct xt_table *table = list_entry(v, struct xt_table, list);
1552
1553 if (*table->name)
1554 seq_printf(seq, "%s\n", table->name);
1555 return 0;
1556}
1557
1558static const struct seq_operations xt_table_seq_ops = {
1559 .start = xt_table_seq_start,
1560 .next = xt_table_seq_next,
1561 .stop = xt_table_seq_stop,
1562 .show = xt_table_seq_show,
1563};
1564
1565/*
1566 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1567 * the multi-AF mutexes.
1568 */
1569struct nf_mttg_trav {
1570 struct list_head *head, *curr;
1571 uint8_t class;
1572};
1573
1574enum {
1575 MTTG_TRAV_INIT,
1576 MTTG_TRAV_NFP_UNSPEC,
1577 MTTG_TRAV_NFP_SPEC,
1578 MTTG_TRAV_DONE,
1579};
1580
1581static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1582 bool is_target)
1583{
1584 static const uint8_t next_class[] = {
1585 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1586 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1587 };
1588 uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1589 struct nf_mttg_trav *trav = seq->private;
1590
1591 if (ppos != NULL)
1592 ++(*ppos);
1593
1594 switch (trav->class) {
1595 case MTTG_TRAV_INIT:
1596 trav->class = MTTG_TRAV_NFP_UNSPEC;
1597 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1598 trav->head = trav->curr = is_target ?
1599 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1600 break;
1601 case MTTG_TRAV_NFP_UNSPEC:
1602 trav->curr = trav->curr->next;
1603 if (trav->curr != trav->head)
1604 break;
1605 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1606 mutex_lock(&xt[nfproto].mutex);
1607 trav->head = trav->curr = is_target ?
1608 &xt[nfproto].target : &xt[nfproto].match;
1609 trav->class = next_class[trav->class];
1610 break;
1611 case MTTG_TRAV_NFP_SPEC:
1612 trav->curr = trav->curr->next;
1613 if (trav->curr != trav->head)
1614 break;
1615 fallthrough;
1616 default:
1617 return NULL;
1618 }
1619 return trav;
1620}
1621
1622static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1623 bool is_target)
1624{
1625 struct nf_mttg_trav *trav = seq->private;
1626 unsigned int j;
1627
1628 trav->class = MTTG_TRAV_INIT;
1629 for (j = 0; j < *pos; ++j)
1630 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1631 return NULL;
1632 return trav;
1633}
1634
1635static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1636{
1637 uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1638 struct nf_mttg_trav *trav = seq->private;
1639
1640 switch (trav->class) {
1641 case MTTG_TRAV_NFP_UNSPEC:
1642 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1643 break;
1644 case MTTG_TRAV_NFP_SPEC:
1645 mutex_unlock(&xt[nfproto].mutex);
1646 break;
1647 }
1648}
1649
1650static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1651{
1652 return xt_mttg_seq_start(seq, pos, false);
1653}
1654
1655static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1656{
1657 return xt_mttg_seq_next(seq, v, ppos, false);
1658}
1659
1660static int xt_match_seq_show(struct seq_file *seq, void *v)
1661{
1662 const struct nf_mttg_trav *trav = seq->private;
1663 const struct xt_match *match;
1664
1665 switch (trav->class) {
1666 case MTTG_TRAV_NFP_UNSPEC:
1667 case MTTG_TRAV_NFP_SPEC:
1668 if (trav->curr == trav->head)
1669 return 0;
1670 match = list_entry(trav->curr, struct xt_match, list);
1671 if (*match->name)
1672 seq_printf(seq, "%s\n", match->name);
1673 }
1674 return 0;
1675}
1676
1677static const struct seq_operations xt_match_seq_ops = {
1678 .start = xt_match_seq_start,
1679 .next = xt_match_seq_next,
1680 .stop = xt_mttg_seq_stop,
1681 .show = xt_match_seq_show,
1682};
1683
1684static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1685{
1686 return xt_mttg_seq_start(seq, pos, true);
1687}
1688
1689static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1690{
1691 return xt_mttg_seq_next(seq, v, ppos, true);
1692}
1693
1694static int xt_target_seq_show(struct seq_file *seq, void *v)
1695{
1696 const struct nf_mttg_trav *trav = seq->private;
1697 const struct xt_target *target;
1698
1699 switch (trav->class) {
1700 case MTTG_TRAV_NFP_UNSPEC:
1701 case MTTG_TRAV_NFP_SPEC:
1702 if (trav->curr == trav->head)
1703 return 0;
1704 target = list_entry(trav->curr, struct xt_target, list);
1705 if (*target->name)
1706 seq_printf(seq, "%s\n", target->name);
1707 }
1708 return 0;
1709}
1710
1711static const struct seq_operations xt_target_seq_ops = {
1712 .start = xt_target_seq_start,
1713 .next = xt_target_seq_next,
1714 .stop = xt_mttg_seq_stop,
1715 .show = xt_target_seq_show,
1716};
1717
1718#define FORMAT_TABLES "_tables_names"
1719#define FORMAT_MATCHES "_tables_matches"
1720#define FORMAT_TARGETS "_tables_targets"
1721
1722#endif /* CONFIG_PROC_FS */
1723
1724/**
1725 * xt_hook_ops_alloc - set up hooks for a new table
1726 * @table: table with metadata needed to set up hooks
1727 * @fn: Hook function
1728 *
1729 * This function will create the nf_hook_ops that the x_table needs
1730 * to hand to xt_hook_link_net().
1731 */
1732struct nf_hook_ops *
1733xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1734{
1735 unsigned int hook_mask = table->valid_hooks;
1736 uint8_t i, num_hooks = hweight32(hook_mask);
1737 uint8_t hooknum;
1738 struct nf_hook_ops *ops;
1739
1740 if (!num_hooks)
1741 return ERR_PTR(-EINVAL);
1742
1743 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1744 if (ops == NULL)
1745 return ERR_PTR(-ENOMEM);
1746
1747 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1748 hook_mask >>= 1, ++hooknum) {
1749 if (!(hook_mask & 1))
1750 continue;
1751 ops[i].hook = fn;
1752 ops[i].pf = table->af;
1753 ops[i].hooknum = hooknum;
1754 ops[i].priority = table->priority;
1755 ++i;
1756 }
1757
1758 return ops;
1759}
1760EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1761
1762int xt_register_template(const struct xt_table *table,
1763 int (*table_init)(struct net *net))
1764{
1765 int ret = -EEXIST, af = table->af;
1766 struct xt_template *t;
1767
1768 mutex_lock(&xt[af].mutex);
1769
1770 list_for_each_entry(t, &xt_templates[af], list) {
1771 if (WARN_ON_ONCE(strcmp(table->name, t->name) == 0))
1772 goto out_unlock;
1773 }
1774
1775 ret = -ENOMEM;
1776 t = kzalloc(sizeof(*t), GFP_KERNEL);
1777 if (!t)
1778 goto out_unlock;
1779
1780 BUILD_BUG_ON(sizeof(t->name) != sizeof(table->name));
1781
1782 strscpy(t->name, table->name, sizeof(t->name));
1783 t->table_init = table_init;
1784 t->me = table->me;
1785 list_add(&t->list, &xt_templates[af]);
1786 ret = 0;
1787out_unlock:
1788 mutex_unlock(&xt[af].mutex);
1789 return ret;
1790}
1791EXPORT_SYMBOL_GPL(xt_register_template);
1792
1793void xt_unregister_template(const struct xt_table *table)
1794{
1795 struct xt_template *t;
1796 int af = table->af;
1797
1798 mutex_lock(&xt[af].mutex);
1799 list_for_each_entry(t, &xt_templates[af], list) {
1800 if (strcmp(table->name, t->name))
1801 continue;
1802
1803 list_del(&t->list);
1804 mutex_unlock(&xt[af].mutex);
1805 kfree(t);
1806 return;
1807 }
1808
1809 mutex_unlock(&xt[af].mutex);
1810 WARN_ON_ONCE(1);
1811}
1812EXPORT_SYMBOL_GPL(xt_unregister_template);
1813
1814int xt_proto_init(struct net *net, u_int8_t af)
1815{
1816#ifdef CONFIG_PROC_FS
1817 char buf[XT_FUNCTION_MAXNAMELEN];
1818 struct proc_dir_entry *proc;
1819 kuid_t root_uid;
1820 kgid_t root_gid;
1821#endif
1822
1823 if (af >= ARRAY_SIZE(xt_prefix))
1824 return -EINVAL;
1825
1826
1827#ifdef CONFIG_PROC_FS
1828 root_uid = make_kuid(net->user_ns, 0);
1829 root_gid = make_kgid(net->user_ns, 0);
1830
1831 strscpy(buf, xt_prefix[af], sizeof(buf));
1832 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1833 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1834 sizeof(struct seq_net_private),
1835 (void *)(unsigned long)af);
1836 if (!proc)
1837 goto out;
1838 if (uid_valid(root_uid) && gid_valid(root_gid))
1839 proc_set_user(proc, root_uid, root_gid);
1840
1841 strscpy(buf, xt_prefix[af], sizeof(buf));
1842 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1843 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1844 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1845 (void *)(unsigned long)af);
1846 if (!proc)
1847 goto out_remove_tables;
1848 if (uid_valid(root_uid) && gid_valid(root_gid))
1849 proc_set_user(proc, root_uid, root_gid);
1850
1851 strscpy(buf, xt_prefix[af], sizeof(buf));
1852 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1853 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1854 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1855 (void *)(unsigned long)af);
1856 if (!proc)
1857 goto out_remove_matches;
1858 if (uid_valid(root_uid) && gid_valid(root_gid))
1859 proc_set_user(proc, root_uid, root_gid);
1860#endif
1861
1862 return 0;
1863
1864#ifdef CONFIG_PROC_FS
1865out_remove_matches:
1866 strscpy(buf, xt_prefix[af], sizeof(buf));
1867 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1868 remove_proc_entry(buf, net->proc_net);
1869
1870out_remove_tables:
1871 strscpy(buf, xt_prefix[af], sizeof(buf));
1872 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1873 remove_proc_entry(buf, net->proc_net);
1874out:
1875 return -1;
1876#endif
1877}
1878EXPORT_SYMBOL_GPL(xt_proto_init);
1879
1880void xt_proto_fini(struct net *net, u_int8_t af)
1881{
1882#ifdef CONFIG_PROC_FS
1883 char buf[XT_FUNCTION_MAXNAMELEN];
1884
1885 strscpy(buf, xt_prefix[af], sizeof(buf));
1886 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1887 remove_proc_entry(buf, net->proc_net);
1888
1889 strscpy(buf, xt_prefix[af], sizeof(buf));
1890 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1891 remove_proc_entry(buf, net->proc_net);
1892
1893 strscpy(buf, xt_prefix[af], sizeof(buf));
1894 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1895 remove_proc_entry(buf, net->proc_net);
1896#endif /*CONFIG_PROC_FS*/
1897}
1898EXPORT_SYMBOL_GPL(xt_proto_fini);
1899
1900/**
1901 * xt_percpu_counter_alloc - allocate x_tables rule counter
1902 *
1903 * @state: pointer to xt_percpu allocation state
1904 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1905 *
1906 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1907 * contain the address of the real (percpu) counter.
1908 *
1909 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1910 * to fetch the real percpu counter.
1911 *
1912 * To speed up allocation and improve data locality, a 4kb block is
1913 * allocated. Freeing any counter may free an entire block, so all
1914 * counters allocated using the same state must be freed at the same
1915 * time.
1916 *
1917 * xt_percpu_counter_alloc_state contains the base address of the
1918 * allocated page and the current sub-offset.
1919 *
1920 * returns false on error.
1921 */
1922bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1923 struct xt_counters *counter)
1924{
1925 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1926
1927 if (nr_cpu_ids <= 1)
1928 return true;
1929
1930 if (!state->mem) {
1931 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1932 XT_PCPU_BLOCK_SIZE);
1933 if (!state->mem)
1934 return false;
1935 }
1936 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1937 state->off += sizeof(*counter);
1938 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1939 state->mem = NULL;
1940 state->off = 0;
1941 }
1942 return true;
1943}
1944EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1945
1946void xt_percpu_counter_free(struct xt_counters *counters)
1947{
1948 unsigned long pcnt = counters->pcnt;
1949
1950 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1951 free_percpu((void __percpu *)pcnt);
1952}
1953EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1954
1955static int __net_init xt_net_init(struct net *net)
1956{
1957 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1958 int i;
1959
1960 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1961 INIT_LIST_HEAD(&xt_net->tables[i]);
1962 return 0;
1963}
1964
1965static void __net_exit xt_net_exit(struct net *net)
1966{
1967 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1968 int i;
1969
1970 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1971 WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
1972}
1973
1974static struct pernet_operations xt_net_ops = {
1975 .init = xt_net_init,
1976 .exit = xt_net_exit,
1977 .id = &xt_pernet_id,
1978 .size = sizeof(struct xt_pernet),
1979};
1980
1981static int __init xt_init(void)
1982{
1983 unsigned int i;
1984 int rv;
1985
1986 for_each_possible_cpu(i) {
1987 seqcount_init(&per_cpu(xt_recseq, i));
1988 }
1989
1990 xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1991 if (!xt)
1992 return -ENOMEM;
1993
1994 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1995 mutex_init(&xt[i].mutex);
1996#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1997 mutex_init(&xt[i].compat_mutex);
1998 xt[i].compat_tab = NULL;
1999#endif
2000 INIT_LIST_HEAD(&xt[i].target);
2001 INIT_LIST_HEAD(&xt[i].match);
2002 INIT_LIST_HEAD(&xt_templates[i]);
2003 }
2004 rv = register_pernet_subsys(&xt_net_ops);
2005 if (rv < 0)
2006 kfree(xt);
2007 return rv;
2008}
2009
2010static void __exit xt_fini(void)
2011{
2012 unregister_pernet_subsys(&xt_net_ops);
2013 kfree(xt);
2014}
2015
2016module_init(xt_init);
2017module_exit(xt_fini);