Loading...
1/*
2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
4 */
5
6#include <linux/kernel.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/proc_fs.h>
11#include <linux/skbuff.h>
12#include <linux/netfilter.h>
13#include <linux/netfilter_bridge.h>
14#include <linux/seq_file.h>
15#include <linux/rcupdate.h>
16#include <net/protocol.h>
17#include <net/netfilter/nf_queue.h>
18#include <net/dst.h>
19
20#include "nf_internals.h"
21
22/*
23 * Hook for nfnetlink_queue to register its queue handler.
24 * We do this so that most of the NFQUEUE code can be modular.
25 *
26 * Once the queue is registered it must reinject all packets it
27 * receives, no matter what.
28 */
29
30/* return EBUSY when somebody else is registered, return EEXIST if the
31 * same handler is registered, return 0 in case of success. */
32void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
33{
34 /* should never happen, we only have one queueing backend in kernel */
35 WARN_ON(rcu_access_pointer(net->nf.queue_handler));
36 rcu_assign_pointer(net->nf.queue_handler, qh);
37}
38EXPORT_SYMBOL(nf_register_queue_handler);
39
40/* The caller must flush their queue before this */
41void nf_unregister_queue_handler(struct net *net)
42{
43 RCU_INIT_POINTER(net->nf.queue_handler, NULL);
44}
45EXPORT_SYMBOL(nf_unregister_queue_handler);
46
47void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
48{
49 struct nf_hook_state *state = &entry->state;
50
51 /* Release those devices we held, or Alexey will kill me. */
52 if (state->in)
53 dev_put(state->in);
54 if (state->out)
55 dev_put(state->out);
56 if (state->sk)
57 sock_put(state->sk);
58#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
59 if (entry->skb->nf_bridge) {
60 struct net_device *physdev;
61
62 physdev = nf_bridge_get_physindev(entry->skb);
63 if (physdev)
64 dev_put(physdev);
65 physdev = nf_bridge_get_physoutdev(entry->skb);
66 if (physdev)
67 dev_put(physdev);
68 }
69#endif
70}
71EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
72
73/* Bump dev refs so they don't vanish while packet is out */
74void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
75{
76 struct nf_hook_state *state = &entry->state;
77
78 if (state->in)
79 dev_hold(state->in);
80 if (state->out)
81 dev_hold(state->out);
82 if (state->sk)
83 sock_hold(state->sk);
84#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
85 if (entry->skb->nf_bridge) {
86 struct net_device *physdev;
87
88 physdev = nf_bridge_get_physindev(entry->skb);
89 if (physdev)
90 dev_hold(physdev);
91 physdev = nf_bridge_get_physoutdev(entry->skb);
92 if (physdev)
93 dev_hold(physdev);
94 }
95#endif
96}
97EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
98
99void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
100{
101 const struct nf_queue_handler *qh;
102
103 rcu_read_lock();
104 qh = rcu_dereference(net->nf.queue_handler);
105 if (qh)
106 qh->nf_hook_drop(net, entry);
107 rcu_read_unlock();
108}
109
110static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
111 struct nf_hook_entry *hook_entry, unsigned int queuenum)
112{
113 int status = -ENOENT;
114 struct nf_queue_entry *entry = NULL;
115 const struct nf_afinfo *afinfo;
116 const struct nf_queue_handler *qh;
117 struct net *net = state->net;
118
119 /* QUEUE == DROP if no one is waiting, to be safe. */
120 qh = rcu_dereference(net->nf.queue_handler);
121 if (!qh) {
122 status = -ESRCH;
123 goto err;
124 }
125
126 afinfo = nf_get_afinfo(state->pf);
127 if (!afinfo)
128 goto err;
129
130 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
131 if (!entry) {
132 status = -ENOMEM;
133 goto err;
134 }
135
136 *entry = (struct nf_queue_entry) {
137 .skb = skb,
138 .state = *state,
139 .hook = hook_entry,
140 .size = sizeof(*entry) + afinfo->route_key_size,
141 };
142
143 nf_queue_entry_get_refs(entry);
144 skb_dst_force(skb);
145 afinfo->saveroute(skb, entry);
146 status = qh->outfn(entry, queuenum);
147
148 if (status < 0) {
149 nf_queue_entry_release_refs(entry);
150 goto err;
151 }
152
153 return 0;
154
155err:
156 kfree(entry);
157 return status;
158}
159
160/* Packets leaving via this function must come back through nf_reinject(). */
161int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
162 struct nf_hook_entry **entryp, unsigned int verdict)
163{
164 struct nf_hook_entry *entry = *entryp;
165 int ret;
166
167 ret = __nf_queue(skb, state, entry, verdict >> NF_VERDICT_QBITS);
168 if (ret < 0) {
169 if (ret == -ESRCH &&
170 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
171 *entryp = rcu_dereference(entry->next);
172 return 1;
173 }
174 kfree_skb(skb);
175 }
176
177 return 0;
178}
179
180static unsigned int nf_iterate(struct sk_buff *skb,
181 struct nf_hook_state *state,
182 struct nf_hook_entry **entryp)
183{
184 unsigned int verdict;
185
186 do {
187repeat:
188 verdict = nf_hook_entry_hookfn((*entryp), skb, state);
189 if (verdict != NF_ACCEPT) {
190 if (verdict != NF_REPEAT)
191 return verdict;
192 goto repeat;
193 }
194 *entryp = rcu_dereference((*entryp)->next);
195 } while (*entryp);
196
197 return NF_ACCEPT;
198}
199
200void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
201{
202 struct nf_hook_entry *hook_entry = entry->hook;
203 struct sk_buff *skb = entry->skb;
204 const struct nf_afinfo *afinfo;
205 int err;
206
207 nf_queue_entry_release_refs(entry);
208
209 /* Continue traversal iff userspace said ok... */
210 if (verdict == NF_REPEAT)
211 verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
212
213 if (verdict == NF_ACCEPT) {
214 afinfo = nf_get_afinfo(entry->state.pf);
215 if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
216 verdict = NF_DROP;
217 }
218
219 if (verdict == NF_ACCEPT) {
220 hook_entry = rcu_dereference(hook_entry->next);
221 if (hook_entry)
222next_hook:
223 verdict = nf_iterate(skb, &entry->state, &hook_entry);
224 }
225
226 switch (verdict & NF_VERDICT_MASK) {
227 case NF_ACCEPT:
228 case NF_STOP:
229okfn:
230 local_bh_disable();
231 entry->state.okfn(entry->state.net, entry->state.sk, skb);
232 local_bh_enable();
233 break;
234 case NF_QUEUE:
235 err = nf_queue(skb, &entry->state, &hook_entry, verdict);
236 if (err == 1) {
237 if (hook_entry)
238 goto next_hook;
239 goto okfn;
240 }
241 break;
242 case NF_STOLEN:
243 break;
244 default:
245 kfree_skb(skb);
246 }
247
248 kfree(entry);
249}
250EXPORT_SYMBOL(nf_reinject);
1/*
2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
4 */
5
6#include <linux/kernel.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/proc_fs.h>
11#include <linux/skbuff.h>
12#include <linux/netfilter.h>
13#include <linux/netfilter_bridge.h>
14#include <linux/seq_file.h>
15#include <linux/rcupdate.h>
16#include <net/protocol.h>
17#include <net/netfilter/nf_queue.h>
18#include <net/dst.h>
19
20#include "nf_internals.h"
21
22/*
23 * Hook for nfnetlink_queue to register its queue handler.
24 * We do this so that most of the NFQUEUE code can be modular.
25 *
26 * Once the queue is registered it must reinject all packets it
27 * receives, no matter what.
28 */
29static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
30
31/* return EBUSY when somebody else is registered, return EEXIST if the
32 * same handler is registered, return 0 in case of success. */
33void nf_register_queue_handler(const struct nf_queue_handler *qh)
34{
35 /* should never happen, we only have one queueing backend in kernel */
36 WARN_ON(rcu_access_pointer(queue_handler));
37 rcu_assign_pointer(queue_handler, qh);
38}
39EXPORT_SYMBOL(nf_register_queue_handler);
40
41/* The caller must flush their queue before this */
42void nf_unregister_queue_handler(void)
43{
44 RCU_INIT_POINTER(queue_handler, NULL);
45 synchronize_rcu();
46}
47EXPORT_SYMBOL(nf_unregister_queue_handler);
48
49void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
50{
51 struct nf_hook_state *state = &entry->state;
52
53 /* Release those devices we held, or Alexey will kill me. */
54 if (state->in)
55 dev_put(state->in);
56 if (state->out)
57 dev_put(state->out);
58 if (state->sk)
59 sock_put(state->sk);
60#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
61 if (entry->skb->nf_bridge) {
62 struct net_device *physdev;
63
64 physdev = nf_bridge_get_physindev(entry->skb);
65 if (physdev)
66 dev_put(physdev);
67 physdev = nf_bridge_get_physoutdev(entry->skb);
68 if (physdev)
69 dev_put(physdev);
70 }
71#endif
72}
73EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
74
75/* Bump dev refs so they don't vanish while packet is out */
76void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
77{
78 struct nf_hook_state *state = &entry->state;
79
80 if (state->in)
81 dev_hold(state->in);
82 if (state->out)
83 dev_hold(state->out);
84 if (state->sk)
85 sock_hold(state->sk);
86#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
87 if (entry->skb->nf_bridge) {
88 struct net_device *physdev;
89
90 physdev = nf_bridge_get_physindev(entry->skb);
91 if (physdev)
92 dev_hold(physdev);
93 physdev = nf_bridge_get_physoutdev(entry->skb);
94 if (physdev)
95 dev_hold(physdev);
96 }
97#endif
98}
99EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
100
101void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
102{
103 const struct nf_queue_handler *qh;
104
105 rcu_read_lock();
106 qh = rcu_dereference(queue_handler);
107 if (qh)
108 qh->nf_hook_drop(net, ops);
109 rcu_read_unlock();
110}
111
112/*
113 * Any packet that leaves via this function must come back
114 * through nf_reinject().
115 */
116int nf_queue(struct sk_buff *skb,
117 struct nf_hook_ops *elem,
118 struct nf_hook_state *state,
119 unsigned int queuenum)
120{
121 int status = -ENOENT;
122 struct nf_queue_entry *entry = NULL;
123 const struct nf_afinfo *afinfo;
124 const struct nf_queue_handler *qh;
125
126 /* QUEUE == DROP if no one is waiting, to be safe. */
127 qh = rcu_dereference(queue_handler);
128 if (!qh) {
129 status = -ESRCH;
130 goto err;
131 }
132
133 afinfo = nf_get_afinfo(state->pf);
134 if (!afinfo)
135 goto err;
136
137 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
138 if (!entry) {
139 status = -ENOMEM;
140 goto err;
141 }
142
143 *entry = (struct nf_queue_entry) {
144 .skb = skb,
145 .elem = elem,
146 .state = *state,
147 .size = sizeof(*entry) + afinfo->route_key_size,
148 };
149
150 nf_queue_entry_get_refs(entry);
151 skb_dst_force(skb);
152 afinfo->saveroute(skb, entry);
153 status = qh->outfn(entry, queuenum);
154
155 if (status < 0) {
156 nf_queue_entry_release_refs(entry);
157 goto err;
158 }
159
160 return 0;
161
162err:
163 kfree(entry);
164 return status;
165}
166
167void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
168{
169 struct sk_buff *skb = entry->skb;
170 struct nf_hook_ops *elem = entry->elem;
171 const struct nf_afinfo *afinfo;
172 int err;
173
174 nf_queue_entry_release_refs(entry);
175
176 /* Continue traversal iff userspace said ok... */
177 if (verdict == NF_REPEAT)
178 verdict = elem->hook(elem->priv, skb, &entry->state);
179
180 if (verdict == NF_ACCEPT) {
181 afinfo = nf_get_afinfo(entry->state.pf);
182 if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
183 verdict = NF_DROP;
184 }
185
186 entry->state.thresh = INT_MIN;
187
188 if (verdict == NF_ACCEPT) {
189 next_hook:
190 verdict = nf_iterate(entry->state.hook_list,
191 skb, &entry->state, &elem);
192 }
193
194 switch (verdict & NF_VERDICT_MASK) {
195 case NF_ACCEPT:
196 case NF_STOP:
197 local_bh_disable();
198 entry->state.okfn(entry->state.net, entry->state.sk, skb);
199 local_bh_enable();
200 break;
201 case NF_QUEUE:
202 err = nf_queue(skb, elem, &entry->state,
203 verdict >> NF_VERDICT_QBITS);
204 if (err < 0) {
205 if (err == -ESRCH &&
206 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
207 goto next_hook;
208 kfree_skb(skb);
209 }
210 break;
211 case NF_STOLEN:
212 break;
213 default:
214 kfree_skb(skb);
215 }
216
217 kfree(entry);
218}
219EXPORT_SYMBOL(nf_reinject);