Loading...
1/*
2 * net/sched/ipt.c iptables target interface
3 *
4 *TODO: Add other tables. For now we only support the ipv4 table targets
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Copyright: Jamal Hadi Salim (2002-4)
12 */
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <linux/rtnetlink.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <net/netlink.h>
24#include <net/pkt_sched.h>
25#include <linux/tc_act/tc_ipt.h>
26#include <net/tc_act/tc_ipt.h>
27
28#include <linux/netfilter_ipv4/ip_tables.h>
29
30
31#define IPT_TAB_MASK 15
32static struct tcf_common *tcf_ipt_ht[IPT_TAB_MASK + 1];
33static u32 ipt_idx_gen;
34static DEFINE_RWLOCK(ipt_lock);
35
36static struct tcf_hashinfo ipt_hash_info = {
37 .htab = tcf_ipt_ht,
38 .hmask = IPT_TAB_MASK,
39 .lock = &ipt_lock,
40};
41
42static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
43{
44 struct xt_tgchk_param par;
45 struct xt_target *target;
46 int ret = 0;
47
48 target = xt_request_find_target(AF_INET, t->u.user.name,
49 t->u.user.revision);
50 if (IS_ERR(target))
51 return PTR_ERR(target);
52
53 t->u.kernel.target = target;
54 par.table = table;
55 par.entryinfo = NULL;
56 par.target = target;
57 par.targinfo = t->data;
58 par.hook_mask = hook;
59 par.family = NFPROTO_IPV4;
60
61 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
62 if (ret < 0) {
63 module_put(t->u.kernel.target->me);
64 return ret;
65 }
66 return 0;
67}
68
69static void ipt_destroy_target(struct xt_entry_target *t)
70{
71 struct xt_tgdtor_param par = {
72 .target = t->u.kernel.target,
73 .targinfo = t->data,
74 };
75 if (par.target->destroy != NULL)
76 par.target->destroy(&par);
77 module_put(par.target->me);
78}
79
80static int tcf_ipt_release(struct tcf_ipt *ipt, int bind)
81{
82 int ret = 0;
83 if (ipt) {
84 if (bind)
85 ipt->tcf_bindcnt--;
86 ipt->tcf_refcnt--;
87 if (ipt->tcf_bindcnt <= 0 && ipt->tcf_refcnt <= 0) {
88 ipt_destroy_target(ipt->tcfi_t);
89 kfree(ipt->tcfi_tname);
90 kfree(ipt->tcfi_t);
91 tcf_hash_destroy(&ipt->common, &ipt_hash_info);
92 ret = ACT_P_DELETED;
93 }
94 }
95 return ret;
96}
97
98static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
99 [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
100 [TCA_IPT_HOOK] = { .type = NLA_U32 },
101 [TCA_IPT_INDEX] = { .type = NLA_U32 },
102 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
103};
104
105static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
106 struct tc_action *a, int ovr, int bind)
107{
108 struct nlattr *tb[TCA_IPT_MAX + 1];
109 struct tcf_ipt *ipt;
110 struct tcf_common *pc;
111 struct xt_entry_target *td, *t;
112 char *tname;
113 int ret = 0, err;
114 u32 hook = 0;
115 u32 index = 0;
116
117 if (nla == NULL)
118 return -EINVAL;
119
120 err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy);
121 if (err < 0)
122 return err;
123
124 if (tb[TCA_IPT_HOOK] == NULL)
125 return -EINVAL;
126 if (tb[TCA_IPT_TARG] == NULL)
127 return -EINVAL;
128
129 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
130 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
131 return -EINVAL;
132
133 if (tb[TCA_IPT_INDEX] != NULL)
134 index = nla_get_u32(tb[TCA_IPT_INDEX]);
135
136 pc = tcf_hash_check(index, a, bind, &ipt_hash_info);
137 if (!pc) {
138 pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
139 &ipt_idx_gen, &ipt_hash_info);
140 if (IS_ERR(pc))
141 return PTR_ERR(pc);
142 ret = ACT_P_CREATED;
143 } else {
144 if (!ovr) {
145 tcf_ipt_release(to_ipt(pc), bind);
146 return -EEXIST;
147 }
148 }
149 ipt = to_ipt(pc);
150
151 hook = nla_get_u32(tb[TCA_IPT_HOOK]);
152
153 err = -ENOMEM;
154 tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
155 if (unlikely(!tname))
156 goto err1;
157 if (tb[TCA_IPT_TABLE] == NULL ||
158 nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
159 strcpy(tname, "mangle");
160
161 t = kmemdup(td, td->u.target_size, GFP_KERNEL);
162 if (unlikely(!t))
163 goto err2;
164
165 err = ipt_init_target(t, tname, hook);
166 if (err < 0)
167 goto err3;
168
169 spin_lock_bh(&ipt->tcf_lock);
170 if (ret != ACT_P_CREATED) {
171 ipt_destroy_target(ipt->tcfi_t);
172 kfree(ipt->tcfi_tname);
173 kfree(ipt->tcfi_t);
174 }
175 ipt->tcfi_tname = tname;
176 ipt->tcfi_t = t;
177 ipt->tcfi_hook = hook;
178 spin_unlock_bh(&ipt->tcf_lock);
179 if (ret == ACT_P_CREATED)
180 tcf_hash_insert(pc, &ipt_hash_info);
181 return ret;
182
183err3:
184 kfree(t);
185err2:
186 kfree(tname);
187err1:
188 kfree(pc);
189 return err;
190}
191
192static int tcf_ipt_cleanup(struct tc_action *a, int bind)
193{
194 struct tcf_ipt *ipt = a->priv;
195 return tcf_ipt_release(ipt, bind);
196}
197
198static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
199 struct tcf_result *res)
200{
201 int ret = 0, result = 0;
202 struct tcf_ipt *ipt = a->priv;
203 struct xt_action_param par;
204
205 if (skb_cloned(skb)) {
206 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
207 return TC_ACT_UNSPEC;
208 }
209
210 spin_lock(&ipt->tcf_lock);
211
212 ipt->tcf_tm.lastuse = jiffies;
213 bstats_update(&ipt->tcf_bstats, skb);
214
215 /* yes, we have to worry about both in and out dev
216 * worry later - danger - this API seems to have changed
217 * from earlier kernels
218 */
219 par.in = skb->dev;
220 par.out = NULL;
221 par.hooknum = ipt->tcfi_hook;
222 par.target = ipt->tcfi_t->u.kernel.target;
223 par.targinfo = ipt->tcfi_t->data;
224 ret = par.target->target(skb, &par);
225
226 switch (ret) {
227 case NF_ACCEPT:
228 result = TC_ACT_OK;
229 break;
230 case NF_DROP:
231 result = TC_ACT_SHOT;
232 ipt->tcf_qstats.drops++;
233 break;
234 case XT_CONTINUE:
235 result = TC_ACT_PIPE;
236 break;
237 default:
238 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
239 ret);
240 result = TC_POLICE_OK;
241 break;
242 }
243 spin_unlock(&ipt->tcf_lock);
244 return result;
245
246}
247
248static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
249{
250 unsigned char *b = skb_tail_pointer(skb);
251 struct tcf_ipt *ipt = a->priv;
252 struct xt_entry_target *t;
253 struct tcf_t tm;
254 struct tc_cnt c;
255
256 /* for simple targets kernel size == user size
257 * user name = target name
258 * for foolproof you need to not assume this
259 */
260
261 t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
262 if (unlikely(!t))
263 goto nla_put_failure;
264
265 c.bindcnt = ipt->tcf_bindcnt - bind;
266 c.refcnt = ipt->tcf_refcnt - ref;
267 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
268
269 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
270 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
271 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
272 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
273 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
274 goto nla_put_failure;
275 tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
276 tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
277 tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
278 if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
279 goto nla_put_failure;
280 kfree(t);
281 return skb->len;
282
283nla_put_failure:
284 nlmsg_trim(skb, b);
285 kfree(t);
286 return -1;
287}
288
289static struct tc_action_ops act_ipt_ops = {
290 .kind = "ipt",
291 .hinfo = &ipt_hash_info,
292 .type = TCA_ACT_IPT,
293 .capab = TCA_CAP_NONE,
294 .owner = THIS_MODULE,
295 .act = tcf_ipt,
296 .dump = tcf_ipt_dump,
297 .cleanup = tcf_ipt_cleanup,
298 .lookup = tcf_hash_search,
299 .init = tcf_ipt_init,
300 .walk = tcf_generic_walker
301};
302
303MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
304MODULE_DESCRIPTION("Iptables target actions");
305MODULE_LICENSE("GPL");
306
307static int __init ipt_init_module(void)
308{
309 return tcf_register_action(&act_ipt_ops);
310}
311
312static void __exit ipt_cleanup_module(void)
313{
314 tcf_unregister_action(&act_ipt_ops);
315}
316
317module_init(ipt_init_module);
318module_exit(ipt_cleanup_module);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/act_ipt.c iptables target interface
4 *
5 *TODO: Add other tables. For now we only support the ipv4 table targets
6 *
7 * Copyright: Jamal Hadi Salim (2002-13)
8 */
9
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <linux/rtnetlink.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <linux/tc_act/tc_ipt.h>
22#include <net/tc_act/tc_ipt.h>
23
24#include <linux/netfilter_ipv4/ip_tables.h>
25
26
27static unsigned int ipt_net_id;
28static struct tc_action_ops act_ipt_ops;
29
30static unsigned int xt_net_id;
31static struct tc_action_ops act_xt_ops;
32
33static int ipt_init_target(struct net *net, struct xt_entry_target *t,
34 char *table, unsigned int hook)
35{
36 struct xt_tgchk_param par;
37 struct xt_target *target;
38 struct ipt_entry e = {};
39 int ret = 0;
40
41 target = xt_request_find_target(AF_INET, t->u.user.name,
42 t->u.user.revision);
43 if (IS_ERR(target))
44 return PTR_ERR(target);
45
46 t->u.kernel.target = target;
47 memset(&par, 0, sizeof(par));
48 par.net = net;
49 par.table = table;
50 par.entryinfo = &e;
51 par.target = target;
52 par.targinfo = t->data;
53 par.hook_mask = hook;
54 par.family = NFPROTO_IPV4;
55
56 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
57 if (ret < 0) {
58 module_put(t->u.kernel.target->me);
59 return ret;
60 }
61 return 0;
62}
63
64static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
65{
66 struct xt_tgdtor_param par = {
67 .target = t->u.kernel.target,
68 .targinfo = t->data,
69 .family = NFPROTO_IPV4,
70 .net = net,
71 };
72 if (par.target->destroy != NULL)
73 par.target->destroy(&par);
74 module_put(par.target->me);
75}
76
77static void tcf_ipt_release(struct tc_action *a)
78{
79 struct tcf_ipt *ipt = to_ipt(a);
80
81 if (ipt->tcfi_t) {
82 ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
83 kfree(ipt->tcfi_t);
84 }
85 kfree(ipt->tcfi_tname);
86}
87
88static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
89 [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
90 [TCA_IPT_HOOK] = { .type = NLA_U32 },
91 [TCA_IPT_INDEX] = { .type = NLA_U32 },
92 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
93};
94
95static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
96 struct nlattr *est, struct tc_action **a,
97 const struct tc_action_ops *ops, int ovr, int bind,
98 struct tcf_proto *tp)
99{
100 struct tc_action_net *tn = net_generic(net, id);
101 struct nlattr *tb[TCA_IPT_MAX + 1];
102 struct tcf_ipt *ipt;
103 struct xt_entry_target *td, *t;
104 char *tname;
105 bool exists = false;
106 int ret = 0, err;
107 u32 hook = 0;
108 u32 index = 0;
109
110 if (nla == NULL)
111 return -EINVAL;
112
113 err = nla_parse_nested_deprecated(tb, TCA_IPT_MAX, nla, ipt_policy,
114 NULL);
115 if (err < 0)
116 return err;
117
118 if (tb[TCA_IPT_INDEX] != NULL)
119 index = nla_get_u32(tb[TCA_IPT_INDEX]);
120
121 err = tcf_idr_check_alloc(tn, &index, a, bind);
122 if (err < 0)
123 return err;
124 exists = err;
125 if (exists && bind)
126 return 0;
127
128 if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
129 if (exists)
130 tcf_idr_release(*a, bind);
131 else
132 tcf_idr_cleanup(tn, index);
133 return -EINVAL;
134 }
135
136 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
137 if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
138 if (exists)
139 tcf_idr_release(*a, bind);
140 else
141 tcf_idr_cleanup(tn, index);
142 return -EINVAL;
143 }
144
145 if (!exists) {
146 ret = tcf_idr_create(tn, index, est, a, ops, bind,
147 false);
148 if (ret) {
149 tcf_idr_cleanup(tn, index);
150 return ret;
151 }
152 ret = ACT_P_CREATED;
153 } else {
154 if (bind)/* dont override defaults */
155 return 0;
156
157 if (!ovr) {
158 tcf_idr_release(*a, bind);
159 return -EEXIST;
160 }
161 }
162 hook = nla_get_u32(tb[TCA_IPT_HOOK]);
163
164 err = -ENOMEM;
165 tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
166 if (unlikely(!tname))
167 goto err1;
168 if (tb[TCA_IPT_TABLE] == NULL ||
169 nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
170 strcpy(tname, "mangle");
171
172 t = kmemdup(td, td->u.target_size, GFP_KERNEL);
173 if (unlikely(!t))
174 goto err2;
175
176 err = ipt_init_target(net, t, tname, hook);
177 if (err < 0)
178 goto err3;
179
180 ipt = to_ipt(*a);
181
182 spin_lock_bh(&ipt->tcf_lock);
183 if (ret != ACT_P_CREATED) {
184 ipt_destroy_target(ipt->tcfi_t, net);
185 kfree(ipt->tcfi_tname);
186 kfree(ipt->tcfi_t);
187 }
188 ipt->tcfi_tname = tname;
189 ipt->tcfi_t = t;
190 ipt->tcfi_hook = hook;
191 spin_unlock_bh(&ipt->tcf_lock);
192 if (ret == ACT_P_CREATED)
193 tcf_idr_insert(tn, *a);
194 return ret;
195
196err3:
197 kfree(t);
198err2:
199 kfree(tname);
200err1:
201 tcf_idr_release(*a, bind);
202 return err;
203}
204
205static int tcf_ipt_init(struct net *net, struct nlattr *nla,
206 struct nlattr *est, struct tc_action **a, int ovr,
207 int bind, bool rtnl_held, struct tcf_proto *tp,
208 struct netlink_ext_ack *extack)
209{
210 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
211 bind, tp);
212}
213
214static int tcf_xt_init(struct net *net, struct nlattr *nla,
215 struct nlattr *est, struct tc_action **a, int ovr,
216 int bind, bool unlocked, struct tcf_proto *tp,
217 struct netlink_ext_ack *extack)
218{
219 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
220 bind, tp);
221}
222
223static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
224 struct tcf_result *res)
225{
226 int ret = 0, result = 0;
227 struct tcf_ipt *ipt = to_ipt(a);
228 struct xt_action_param par;
229 struct nf_hook_state state = {
230 .net = dev_net(skb->dev),
231 .in = skb->dev,
232 .hook = ipt->tcfi_hook,
233 .pf = NFPROTO_IPV4,
234 };
235
236 if (skb_unclone(skb, GFP_ATOMIC))
237 return TC_ACT_UNSPEC;
238
239 spin_lock(&ipt->tcf_lock);
240
241 tcf_lastuse_update(&ipt->tcf_tm);
242 bstats_update(&ipt->tcf_bstats, skb);
243
244 /* yes, we have to worry about both in and out dev
245 * worry later - danger - this API seems to have changed
246 * from earlier kernels
247 */
248 par.state = &state;
249 par.target = ipt->tcfi_t->u.kernel.target;
250 par.targinfo = ipt->tcfi_t->data;
251 ret = par.target->target(skb, &par);
252
253 switch (ret) {
254 case NF_ACCEPT:
255 result = TC_ACT_OK;
256 break;
257 case NF_DROP:
258 result = TC_ACT_SHOT;
259 ipt->tcf_qstats.drops++;
260 break;
261 case XT_CONTINUE:
262 result = TC_ACT_PIPE;
263 break;
264 default:
265 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
266 ret);
267 result = TC_ACT_OK;
268 break;
269 }
270 spin_unlock(&ipt->tcf_lock);
271 return result;
272
273}
274
275static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
276 int ref)
277{
278 unsigned char *b = skb_tail_pointer(skb);
279 struct tcf_ipt *ipt = to_ipt(a);
280 struct xt_entry_target *t;
281 struct tcf_t tm;
282 struct tc_cnt c;
283
284 /* for simple targets kernel size == user size
285 * user name = target name
286 * for foolproof you need to not assume this
287 */
288
289 spin_lock_bh(&ipt->tcf_lock);
290 t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
291 if (unlikely(!t))
292 goto nla_put_failure;
293
294 c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind;
295 c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref;
296 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
297
298 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
299 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
300 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
301 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
302 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
303 goto nla_put_failure;
304
305 tcf_tm_dump(&tm, &ipt->tcf_tm);
306 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
307 goto nla_put_failure;
308
309 spin_unlock_bh(&ipt->tcf_lock);
310 kfree(t);
311 return skb->len;
312
313nla_put_failure:
314 spin_unlock_bh(&ipt->tcf_lock);
315 nlmsg_trim(skb, b);
316 kfree(t);
317 return -1;
318}
319
320static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
321 struct netlink_callback *cb, int type,
322 const struct tc_action_ops *ops,
323 struct netlink_ext_ack *extack)
324{
325 struct tc_action_net *tn = net_generic(net, ipt_net_id);
326
327 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
328}
329
330static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
331{
332 struct tc_action_net *tn = net_generic(net, ipt_net_id);
333
334 return tcf_idr_search(tn, a, index);
335}
336
337static struct tc_action_ops act_ipt_ops = {
338 .kind = "ipt",
339 .id = TCA_ID_IPT,
340 .owner = THIS_MODULE,
341 .act = tcf_ipt_act,
342 .dump = tcf_ipt_dump,
343 .cleanup = tcf_ipt_release,
344 .init = tcf_ipt_init,
345 .walk = tcf_ipt_walker,
346 .lookup = tcf_ipt_search,
347 .size = sizeof(struct tcf_ipt),
348};
349
350static __net_init int ipt_init_net(struct net *net)
351{
352 struct tc_action_net *tn = net_generic(net, ipt_net_id);
353
354 return tc_action_net_init(net, tn, &act_ipt_ops);
355}
356
357static void __net_exit ipt_exit_net(struct list_head *net_list)
358{
359 tc_action_net_exit(net_list, ipt_net_id);
360}
361
362static struct pernet_operations ipt_net_ops = {
363 .init = ipt_init_net,
364 .exit_batch = ipt_exit_net,
365 .id = &ipt_net_id,
366 .size = sizeof(struct tc_action_net),
367};
368
369static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
370 struct netlink_callback *cb, int type,
371 const struct tc_action_ops *ops,
372 struct netlink_ext_ack *extack)
373{
374 struct tc_action_net *tn = net_generic(net, xt_net_id);
375
376 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
377}
378
379static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
380{
381 struct tc_action_net *tn = net_generic(net, xt_net_id);
382
383 return tcf_idr_search(tn, a, index);
384}
385
386static struct tc_action_ops act_xt_ops = {
387 .kind = "xt",
388 .id = TCA_ID_XT,
389 .owner = THIS_MODULE,
390 .act = tcf_ipt_act,
391 .dump = tcf_ipt_dump,
392 .cleanup = tcf_ipt_release,
393 .init = tcf_xt_init,
394 .walk = tcf_xt_walker,
395 .lookup = tcf_xt_search,
396 .size = sizeof(struct tcf_ipt),
397};
398
399static __net_init int xt_init_net(struct net *net)
400{
401 struct tc_action_net *tn = net_generic(net, xt_net_id);
402
403 return tc_action_net_init(net, tn, &act_xt_ops);
404}
405
406static void __net_exit xt_exit_net(struct list_head *net_list)
407{
408 tc_action_net_exit(net_list, xt_net_id);
409}
410
411static struct pernet_operations xt_net_ops = {
412 .init = xt_init_net,
413 .exit_batch = xt_exit_net,
414 .id = &xt_net_id,
415 .size = sizeof(struct tc_action_net),
416};
417
418MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
419MODULE_DESCRIPTION("Iptables target actions");
420MODULE_LICENSE("GPL");
421MODULE_ALIAS("act_xt");
422
423static int __init ipt_init_module(void)
424{
425 int ret1, ret2;
426
427 ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
428 if (ret1 < 0)
429 pr_err("Failed to load xt action\n");
430
431 ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
432 if (ret2 < 0)
433 pr_err("Failed to load ipt action\n");
434
435 if (ret1 < 0 && ret2 < 0) {
436 return ret1;
437 } else
438 return 0;
439}
440
441static void __exit ipt_cleanup_module(void)
442{
443 tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
444 tcf_unregister_action(&act_xt_ops, &xt_net_ops);
445}
446
447module_init(ipt_init_module);
448module_exit(ipt_cleanup_module);