Loading...
1/*
2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
4 */
5
6#include <linux/kernel.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/proc_fs.h>
11#include <linux/skbuff.h>
12#include <linux/netfilter.h>
13#include <linux/netfilter_ipv4.h>
14#include <linux/netfilter_ipv6.h>
15#include <linux/netfilter_bridge.h>
16#include <linux/seq_file.h>
17#include <linux/rcupdate.h>
18#include <net/protocol.h>
19#include <net/netfilter/nf_queue.h>
20#include <net/dst.h>
21
22#include "nf_internals.h"
23
24/*
25 * Hook for nfnetlink_queue to register its queue handler.
26 * We do this so that most of the NFQUEUE code can be modular.
27 *
28 * Once the queue is registered it must reinject all packets it
29 * receives, no matter what.
30 */
31
32/* return EBUSY when somebody else is registered, return EEXIST if the
33 * same handler is registered, return 0 in case of success. */
34void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
35{
36 /* should never happen, we only have one queueing backend in kernel */
37 WARN_ON(rcu_access_pointer(net->nf.queue_handler));
38 rcu_assign_pointer(net->nf.queue_handler, qh);
39}
40EXPORT_SYMBOL(nf_register_queue_handler);
41
42/* The caller must flush their queue before this */
43void nf_unregister_queue_handler(struct net *net)
44{
45 RCU_INIT_POINTER(net->nf.queue_handler, NULL);
46}
47EXPORT_SYMBOL(nf_unregister_queue_handler);
48
49static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
50{
51 struct nf_hook_state *state = &entry->state;
52
53 /* Release those devices we held, or Alexey will kill me. */
54 if (state->in)
55 dev_put(state->in);
56 if (state->out)
57 dev_put(state->out);
58 if (state->sk)
59 sock_put(state->sk);
60
61#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
62 if (entry->physin)
63 dev_put(entry->physin);
64 if (entry->physout)
65 dev_put(entry->physout);
66#endif
67}
68
69void nf_queue_entry_free(struct nf_queue_entry *entry)
70{
71 nf_queue_entry_release_refs(entry);
72 kfree(entry);
73}
74EXPORT_SYMBOL_GPL(nf_queue_entry_free);
75
76static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
77{
78#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
79 const struct sk_buff *skb = entry->skb;
80 struct nf_bridge_info *nf_bridge;
81
82 nf_bridge = nf_bridge_info_get(skb);
83 if (nf_bridge) {
84 entry->physin = nf_bridge_get_physindev(skb);
85 entry->physout = nf_bridge_get_physoutdev(skb);
86 } else {
87 entry->physin = NULL;
88 entry->physout = NULL;
89 }
90#endif
91}
92
93/* Bump dev refs so they don't vanish while packet is out */
94void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
95{
96 struct nf_hook_state *state = &entry->state;
97
98 if (state->in)
99 dev_hold(state->in);
100 if (state->out)
101 dev_hold(state->out);
102 if (state->sk)
103 sock_hold(state->sk);
104
105#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
106 if (entry->physin)
107 dev_hold(entry->physin);
108 if (entry->physout)
109 dev_hold(entry->physout);
110#endif
111}
112EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
113
114void nf_queue_nf_hook_drop(struct net *net)
115{
116 const struct nf_queue_handler *qh;
117
118 rcu_read_lock();
119 qh = rcu_dereference(net->nf.queue_handler);
120 if (qh)
121 qh->nf_hook_drop(net);
122 rcu_read_unlock();
123}
124EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
125
126static void nf_ip_saveroute(const struct sk_buff *skb,
127 struct nf_queue_entry *entry)
128{
129 struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
130
131 if (entry->state.hook == NF_INET_LOCAL_OUT) {
132 const struct iphdr *iph = ip_hdr(skb);
133
134 rt_info->tos = iph->tos;
135 rt_info->daddr = iph->daddr;
136 rt_info->saddr = iph->saddr;
137 rt_info->mark = skb->mark;
138 }
139}
140
141static void nf_ip6_saveroute(const struct sk_buff *skb,
142 struct nf_queue_entry *entry)
143{
144 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
145
146 if (entry->state.hook == NF_INET_LOCAL_OUT) {
147 const struct ipv6hdr *iph = ipv6_hdr(skb);
148
149 rt_info->daddr = iph->daddr;
150 rt_info->saddr = iph->saddr;
151 rt_info->mark = skb->mark;
152 }
153}
154
155static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
156 unsigned int index, unsigned int queuenum)
157{
158 struct nf_queue_entry *entry = NULL;
159 const struct nf_queue_handler *qh;
160 struct net *net = state->net;
161 unsigned int route_key_size;
162 int status;
163
164 /* QUEUE == DROP if no one is waiting, to be safe. */
165 qh = rcu_dereference(net->nf.queue_handler);
166 if (!qh)
167 return -ESRCH;
168
169 switch (state->pf) {
170 case AF_INET:
171 route_key_size = sizeof(struct ip_rt_info);
172 break;
173 case AF_INET6:
174 route_key_size = sizeof(struct ip6_rt_info);
175 break;
176 default:
177 route_key_size = 0;
178 break;
179 }
180
181 entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
182 if (!entry)
183 return -ENOMEM;
184
185 if (skb_dst(skb) && !skb_dst_force(skb)) {
186 kfree(entry);
187 return -ENETDOWN;
188 }
189
190 *entry = (struct nf_queue_entry) {
191 .skb = skb,
192 .state = *state,
193 .hook_index = index,
194 .size = sizeof(*entry) + route_key_size,
195 };
196
197 __nf_queue_entry_init_physdevs(entry);
198
199 nf_queue_entry_get_refs(entry);
200
201 switch (entry->state.pf) {
202 case AF_INET:
203 nf_ip_saveroute(skb, entry);
204 break;
205 case AF_INET6:
206 nf_ip6_saveroute(skb, entry);
207 break;
208 }
209
210 status = qh->outfn(entry, queuenum);
211 if (status < 0) {
212 nf_queue_entry_free(entry);
213 return status;
214 }
215
216 return 0;
217}
218
219/* Packets leaving via this function must come back through nf_reinject(). */
220int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
221 unsigned int index, unsigned int verdict)
222{
223 int ret;
224
225 ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
226 if (ret < 0) {
227 if (ret == -ESRCH &&
228 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
229 return 1;
230 kfree_skb(skb);
231 }
232
233 return 0;
234}
235EXPORT_SYMBOL_GPL(nf_queue);
236
237static unsigned int nf_iterate(struct sk_buff *skb,
238 struct nf_hook_state *state,
239 const struct nf_hook_entries *hooks,
240 unsigned int *index)
241{
242 const struct nf_hook_entry *hook;
243 unsigned int verdict, i = *index;
244
245 while (i < hooks->num_hook_entries) {
246 hook = &hooks->hooks[i];
247repeat:
248 verdict = nf_hook_entry_hookfn(hook, skb, state);
249 if (verdict != NF_ACCEPT) {
250 *index = i;
251 if (verdict != NF_REPEAT)
252 return verdict;
253 goto repeat;
254 }
255 i++;
256 }
257
258 *index = i;
259 return NF_ACCEPT;
260}
261
262static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
263{
264 switch (pf) {
265#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
266 case NFPROTO_BRIDGE:
267 return rcu_dereference(net->nf.hooks_bridge[hooknum]);
268#endif
269 case NFPROTO_IPV4:
270 return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
271 case NFPROTO_IPV6:
272 return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
273 default:
274 WARN_ON_ONCE(1);
275 return NULL;
276 }
277
278 return NULL;
279}
280
281/* Caller must hold rcu read-side lock */
282void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
283{
284 const struct nf_hook_entry *hook_entry;
285 const struct nf_hook_entries *hooks;
286 struct sk_buff *skb = entry->skb;
287 const struct net *net;
288 unsigned int i;
289 int err;
290 u8 pf;
291
292 net = entry->state.net;
293 pf = entry->state.pf;
294
295 hooks = nf_hook_entries_head(net, pf, entry->state.hook);
296
297 i = entry->hook_index;
298 if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
299 kfree_skb(skb);
300 nf_queue_entry_free(entry);
301 return;
302 }
303
304 hook_entry = &hooks->hooks[i];
305
306 /* Continue traversal iff userspace said ok... */
307 if (verdict == NF_REPEAT)
308 verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
309
310 if (verdict == NF_ACCEPT) {
311 if (nf_reroute(skb, entry) < 0)
312 verdict = NF_DROP;
313 }
314
315 if (verdict == NF_ACCEPT) {
316next_hook:
317 ++i;
318 verdict = nf_iterate(skb, &entry->state, hooks, &i);
319 }
320
321 switch (verdict & NF_VERDICT_MASK) {
322 case NF_ACCEPT:
323 case NF_STOP:
324 local_bh_disable();
325 entry->state.okfn(entry->state.net, entry->state.sk, skb);
326 local_bh_enable();
327 break;
328 case NF_QUEUE:
329 err = nf_queue(skb, &entry->state, i, verdict);
330 if (err == 1)
331 goto next_hook;
332 break;
333 case NF_STOLEN:
334 break;
335 default:
336 kfree_skb(skb);
337 }
338
339 nf_queue_entry_free(entry);
340}
341EXPORT_SYMBOL(nf_reinject);
1/*
2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
4 */
5
6#include <linux/kernel.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/proc_fs.h>
11#include <linux/skbuff.h>
12#include <linux/netfilter.h>
13#include <linux/netfilter_ipv4.h>
14#include <linux/netfilter_ipv6.h>
15#include <linux/netfilter_bridge.h>
16#include <linux/seq_file.h>
17#include <linux/rcupdate.h>
18#include <net/protocol.h>
19#include <net/netfilter/nf_queue.h>
20#include <net/dst.h>
21
22#include "nf_internals.h"
23
24/*
25 * Hook for nfnetlink_queue to register its queue handler.
26 * We do this so that most of the NFQUEUE code can be modular.
27 *
28 * Once the queue is registered it must reinject all packets it
29 * receives, no matter what.
30 */
31
32/* return EBUSY when somebody else is registered, return EEXIST if the
33 * same handler is registered, return 0 in case of success. */
34void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
35{
36 /* should never happen, we only have one queueing backend in kernel */
37 WARN_ON(rcu_access_pointer(net->nf.queue_handler));
38 rcu_assign_pointer(net->nf.queue_handler, qh);
39}
40EXPORT_SYMBOL(nf_register_queue_handler);
41
42/* The caller must flush their queue before this */
43void nf_unregister_queue_handler(struct net *net)
44{
45 RCU_INIT_POINTER(net->nf.queue_handler, NULL);
46}
47EXPORT_SYMBOL(nf_unregister_queue_handler);
48
49void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
50{
51 struct nf_hook_state *state = &entry->state;
52
53 /* Release those devices we held, or Alexey will kill me. */
54 if (state->in)
55 dev_put(state->in);
56 if (state->out)
57 dev_put(state->out);
58 if (state->sk)
59 sock_put(state->sk);
60#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
61 if (entry->skb->nf_bridge) {
62 struct net_device *physdev;
63
64 physdev = nf_bridge_get_physindev(entry->skb);
65 if (physdev)
66 dev_put(physdev);
67 physdev = nf_bridge_get_physoutdev(entry->skb);
68 if (physdev)
69 dev_put(physdev);
70 }
71#endif
72}
73EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
74
75/* Bump dev refs so they don't vanish while packet is out */
76void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
77{
78 struct nf_hook_state *state = &entry->state;
79
80 if (state->in)
81 dev_hold(state->in);
82 if (state->out)
83 dev_hold(state->out);
84 if (state->sk)
85 sock_hold(state->sk);
86#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
87 if (entry->skb->nf_bridge) {
88 struct net_device *physdev;
89
90 physdev = nf_bridge_get_physindev(entry->skb);
91 if (physdev)
92 dev_hold(physdev);
93 physdev = nf_bridge_get_physoutdev(entry->skb);
94 if (physdev)
95 dev_hold(physdev);
96 }
97#endif
98}
99EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
100
101void nf_queue_nf_hook_drop(struct net *net)
102{
103 const struct nf_queue_handler *qh;
104
105 rcu_read_lock();
106 qh = rcu_dereference(net->nf.queue_handler);
107 if (qh)
108 qh->nf_hook_drop(net);
109 rcu_read_unlock();
110}
111EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
112
113static void nf_ip_saveroute(const struct sk_buff *skb,
114 struct nf_queue_entry *entry)
115{
116 struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
117
118 if (entry->state.hook == NF_INET_LOCAL_OUT) {
119 const struct iphdr *iph = ip_hdr(skb);
120
121 rt_info->tos = iph->tos;
122 rt_info->daddr = iph->daddr;
123 rt_info->saddr = iph->saddr;
124 rt_info->mark = skb->mark;
125 }
126}
127
128static void nf_ip6_saveroute(const struct sk_buff *skb,
129 struct nf_queue_entry *entry)
130{
131 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
132
133 if (entry->state.hook == NF_INET_LOCAL_OUT) {
134 const struct ipv6hdr *iph = ipv6_hdr(skb);
135
136 rt_info->daddr = iph->daddr;
137 rt_info->saddr = iph->saddr;
138 rt_info->mark = skb->mark;
139 }
140}
141
142static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
143 const struct nf_hook_entries *entries,
144 unsigned int index, unsigned int queuenum)
145{
146 int status = -ENOENT;
147 struct nf_queue_entry *entry = NULL;
148 const struct nf_queue_handler *qh;
149 struct net *net = state->net;
150 unsigned int route_key_size;
151
152 /* QUEUE == DROP if no one is waiting, to be safe. */
153 qh = rcu_dereference(net->nf.queue_handler);
154 if (!qh) {
155 status = -ESRCH;
156 goto err;
157 }
158
159 switch (state->pf) {
160 case AF_INET:
161 route_key_size = sizeof(struct ip_rt_info);
162 break;
163 case AF_INET6:
164 route_key_size = sizeof(struct ip6_rt_info);
165 break;
166 default:
167 route_key_size = 0;
168 break;
169 }
170
171 entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
172 if (!entry) {
173 status = -ENOMEM;
174 goto err;
175 }
176
177 *entry = (struct nf_queue_entry) {
178 .skb = skb,
179 .state = *state,
180 .hook_index = index,
181 .size = sizeof(*entry) + route_key_size,
182 };
183
184 nf_queue_entry_get_refs(entry);
185 skb_dst_force(skb);
186
187 switch (entry->state.pf) {
188 case AF_INET:
189 nf_ip_saveroute(skb, entry);
190 break;
191 case AF_INET6:
192 nf_ip6_saveroute(skb, entry);
193 break;
194 }
195
196 status = qh->outfn(entry, queuenum);
197
198 if (status < 0) {
199 nf_queue_entry_release_refs(entry);
200 goto err;
201 }
202
203 return 0;
204
205err:
206 kfree(entry);
207 return status;
208}
209
210/* Packets leaving via this function must come back through nf_reinject(). */
211int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
212 const struct nf_hook_entries *entries, unsigned int index,
213 unsigned int verdict)
214{
215 int ret;
216
217 ret = __nf_queue(skb, state, entries, index, verdict >> NF_VERDICT_QBITS);
218 if (ret < 0) {
219 if (ret == -ESRCH &&
220 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
221 return 1;
222 kfree_skb(skb);
223 }
224
225 return 0;
226}
227
228static unsigned int nf_iterate(struct sk_buff *skb,
229 struct nf_hook_state *state,
230 const struct nf_hook_entries *hooks,
231 unsigned int *index)
232{
233 const struct nf_hook_entry *hook;
234 unsigned int verdict, i = *index;
235
236 while (i < hooks->num_hook_entries) {
237 hook = &hooks->hooks[i];
238repeat:
239 verdict = nf_hook_entry_hookfn(hook, skb, state);
240 if (verdict != NF_ACCEPT) {
241 if (verdict != NF_REPEAT)
242 return verdict;
243 goto repeat;
244 }
245 i++;
246 }
247
248 *index = i;
249 return NF_ACCEPT;
250}
251
252static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
253{
254 switch (pf) {
255#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
256 case NFPROTO_BRIDGE:
257 return rcu_dereference(net->nf.hooks_bridge[hooknum]);
258#endif
259 case NFPROTO_IPV4:
260 return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
261 case NFPROTO_IPV6:
262 return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
263 default:
264 WARN_ON_ONCE(1);
265 return NULL;
266 }
267
268 return NULL;
269}
270
271/* Caller must hold rcu read-side lock */
272void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
273{
274 const struct nf_hook_entry *hook_entry;
275 const struct nf_hook_entries *hooks;
276 struct sk_buff *skb = entry->skb;
277 const struct net *net;
278 unsigned int i;
279 int err;
280 u8 pf;
281
282 net = entry->state.net;
283 pf = entry->state.pf;
284
285 hooks = nf_hook_entries_head(net, pf, entry->state.hook);
286
287 nf_queue_entry_release_refs(entry);
288
289 i = entry->hook_index;
290 if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
291 kfree_skb(skb);
292 kfree(entry);
293 return;
294 }
295
296 hook_entry = &hooks->hooks[i];
297
298 /* Continue traversal iff userspace said ok... */
299 if (verdict == NF_REPEAT)
300 verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
301
302 if (verdict == NF_ACCEPT) {
303 if (nf_reroute(skb, entry) < 0)
304 verdict = NF_DROP;
305 }
306
307 if (verdict == NF_ACCEPT) {
308next_hook:
309 ++i;
310 verdict = nf_iterate(skb, &entry->state, hooks, &i);
311 }
312
313 switch (verdict & NF_VERDICT_MASK) {
314 case NF_ACCEPT:
315 case NF_STOP:
316 local_bh_disable();
317 entry->state.okfn(entry->state.net, entry->state.sk, skb);
318 local_bh_enable();
319 break;
320 case NF_QUEUE:
321 err = nf_queue(skb, &entry->state, hooks, i, verdict);
322 if (err == 1)
323 goto next_hook;
324 break;
325 case NF_STOLEN:
326 break;
327 default:
328 kfree_skb(skb);
329 }
330
331 kfree(entry);
332}
333EXPORT_SYMBOL(nf_reinject);