Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v4.10.11
  1/*
  2 * Rusty Russell (C)2000 -- This code is GPL.
  3 * Patrick McHardy (c) 2006-2012
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/slab.h>
  8#include <linux/init.h>
  9#include <linux/module.h>
 10#include <linux/proc_fs.h>
 11#include <linux/skbuff.h>
 12#include <linux/netfilter.h>
 
 
 13#include <linux/netfilter_bridge.h>
 14#include <linux/seq_file.h>
 15#include <linux/rcupdate.h>
 16#include <net/protocol.h>
 17#include <net/netfilter/nf_queue.h>
 18#include <net/dst.h>
 19
 20#include "nf_internals.h"
 21
 22/*
 23 * Hook for nfnetlink_queue to register its queue handler.
 24 * We do this so that most of the NFQUEUE code can be modular.
 25 *
 26 * Once the queue is registered it must reinject all packets it
 27 * receives, no matter what.
 28 */
 29
 30/* return EBUSY when somebody else is registered, return EEXIST if the
 31 * same handler is registered, return 0 in case of success. */
 32void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
 33{
 34	/* should never happen, we only have one queueing backend in kernel */
 35	WARN_ON(rcu_access_pointer(net->nf.queue_handler));
 36	rcu_assign_pointer(net->nf.queue_handler, qh);
 37}
 38EXPORT_SYMBOL(nf_register_queue_handler);
 39
 40/* The caller must flush their queue before this */
 41void nf_unregister_queue_handler(struct net *net)
 42{
 43	RCU_INIT_POINTER(net->nf.queue_handler, NULL);
 44}
 45EXPORT_SYMBOL(nf_unregister_queue_handler);
 46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 48{
 49	struct nf_hook_state *state = &entry->state;
 50
 51	/* Release those devices we held, or Alexey will kill me. */
 52	if (state->in)
 53		dev_put(state->in);
 54	if (state->out)
 55		dev_put(state->out);
 56	if (state->sk)
 57		sock_put(state->sk);
 
 
 
 
 
 
 
 58#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 59	if (entry->skb->nf_bridge) {
 
 
 60		struct net_device *physdev;
 61
 62		physdev = nf_bridge_get_physindev(entry->skb);
 63		if (physdev)
 64			dev_put(physdev);
 65		physdev = nf_bridge_get_physoutdev(entry->skb);
 66		if (physdev)
 67			dev_put(physdev);
 68	}
 69#endif
 70}
 71EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
 72
 73/* Bump dev refs so they don't vanish while packet is out */
 74void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 75{
 76	struct nf_hook_state *state = &entry->state;
 77
 78	if (state->in)
 79		dev_hold(state->in);
 80	if (state->out)
 81		dev_hold(state->out);
 82	if (state->sk)
 83		sock_hold(state->sk);
 84#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 85	if (entry->skb->nf_bridge) {
 86		struct net_device *physdev;
 87
 88		physdev = nf_bridge_get_physindev(entry->skb);
 89		if (physdev)
 90			dev_hold(physdev);
 91		physdev = nf_bridge_get_physoutdev(entry->skb);
 92		if (physdev)
 93			dev_hold(physdev);
 94	}
 95#endif
 96}
 97EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 98
 99void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
100{
101	const struct nf_queue_handler *qh;
102
103	rcu_read_lock();
104	qh = rcu_dereference(net->nf.queue_handler);
105	if (qh)
106		qh->nf_hook_drop(net, entry);
107	rcu_read_unlock();
108}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
110static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
111		      struct nf_hook_entry *hook_entry, unsigned int queuenum)
112{
113	int status = -ENOENT;
114	struct nf_queue_entry *entry = NULL;
115	const struct nf_afinfo *afinfo;
116	const struct nf_queue_handler *qh;
117	struct net *net = state->net;
 
118
119	/* QUEUE == DROP if no one is waiting, to be safe. */
120	qh = rcu_dereference(net->nf.queue_handler);
121	if (!qh) {
122		status = -ESRCH;
123		goto err;
124	}
125
126	afinfo = nf_get_afinfo(state->pf);
127	if (!afinfo)
128		goto err;
 
 
 
 
 
 
 
 
129
130	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
131	if (!entry) {
132		status = -ENOMEM;
133		goto err;
134	}
135
 
 
 
 
 
136	*entry = (struct nf_queue_entry) {
137		.skb	= skb,
138		.state	= *state,
139		.hook	= hook_entry,
140		.size	= sizeof(*entry) + afinfo->route_key_size,
141	};
142
143	nf_queue_entry_get_refs(entry);
144	skb_dst_force(skb);
145	afinfo->saveroute(skb, entry);
 
 
 
 
 
 
 
 
146	status = qh->outfn(entry, queuenum);
147
148	if (status < 0) {
149		nf_queue_entry_release_refs(entry);
150		goto err;
151	}
152
153	return 0;
154
155err:
156	kfree(entry);
157	return status;
158}
159
160/* Packets leaving via this function must come back through nf_reinject(). */
161int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
162	     struct nf_hook_entry **entryp, unsigned int verdict)
163{
164	struct nf_hook_entry *entry = *entryp;
165	int ret;
166
167	ret = __nf_queue(skb, state, entry, verdict >> NF_VERDICT_QBITS);
168	if (ret < 0) {
169		if (ret == -ESRCH &&
170		    (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
171			*entryp = rcu_dereference(entry->next);
172			return 1;
173		}
174		kfree_skb(skb);
175	}
176
177	return 0;
178}
 
179
180static unsigned int nf_iterate(struct sk_buff *skb,
181			       struct nf_hook_state *state,
182			       struct nf_hook_entry **entryp)
 
183{
184	unsigned int verdict;
 
185
186	do {
 
187repeat:
188		verdict = nf_hook_entry_hookfn((*entryp), skb, state);
189		if (verdict != NF_ACCEPT) {
 
190			if (verdict != NF_REPEAT)
191				return verdict;
192			goto repeat;
193		}
194		*entryp = rcu_dereference((*entryp)->next);
195	} while (*entryp);
196
 
197	return NF_ACCEPT;
198}
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
201{
202	struct nf_hook_entry *hook_entry = entry->hook;
 
203	struct sk_buff *skb = entry->skb;
204	const struct nf_afinfo *afinfo;
 
205	int err;
 
 
 
 
 
 
206
207	nf_queue_entry_release_refs(entry);
208
 
 
 
 
 
 
 
 
 
209	/* Continue traversal iff userspace said ok... */
210	if (verdict == NF_REPEAT)
211		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
212
213	if (verdict == NF_ACCEPT) {
214		afinfo = nf_get_afinfo(entry->state.pf);
215		if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
216			verdict = NF_DROP;
217	}
218
219	if (verdict == NF_ACCEPT) {
220		hook_entry = rcu_dereference(hook_entry->next);
221		if (hook_entry)
222next_hook:
223			verdict = nf_iterate(skb, &entry->state, &hook_entry);
 
224	}
225
226	switch (verdict & NF_VERDICT_MASK) {
227	case NF_ACCEPT:
228	case NF_STOP:
229okfn:
230		local_bh_disable();
231		entry->state.okfn(entry->state.net, entry->state.sk, skb);
232		local_bh_enable();
233		break;
234	case NF_QUEUE:
235		err = nf_queue(skb, &entry->state, &hook_entry, verdict);
236		if (err == 1) {
237			if (hook_entry)
238				goto next_hook;
239			goto okfn;
240		}
241		break;
242	case NF_STOLEN:
243		break;
244	default:
245		kfree_skb(skb);
246	}
247
248	kfree(entry);
249}
250EXPORT_SYMBOL(nf_reinject);
v5.4
  1/*
  2 * Rusty Russell (C)2000 -- This code is GPL.
  3 * Patrick McHardy (c) 2006-2012
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/slab.h>
  8#include <linux/init.h>
  9#include <linux/module.h>
 10#include <linux/proc_fs.h>
 11#include <linux/skbuff.h>
 12#include <linux/netfilter.h>
 13#include <linux/netfilter_ipv4.h>
 14#include <linux/netfilter_ipv6.h>
 15#include <linux/netfilter_bridge.h>
 16#include <linux/seq_file.h>
 17#include <linux/rcupdate.h>
 18#include <net/protocol.h>
 19#include <net/netfilter/nf_queue.h>
 20#include <net/dst.h>
 21
 22#include "nf_internals.h"
 23
 24/*
 25 * Hook for nfnetlink_queue to register its queue handler.
 26 * We do this so that most of the NFQUEUE code can be modular.
 27 *
 28 * Once the queue is registered it must reinject all packets it
 29 * receives, no matter what.
 30 */
 31
 32/* return EBUSY when somebody else is registered, return EEXIST if the
 33 * same handler is registered, return 0 in case of success. */
 34void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
 35{
 36	/* should never happen, we only have one queueing backend in kernel */
 37	WARN_ON(rcu_access_pointer(net->nf.queue_handler));
 38	rcu_assign_pointer(net->nf.queue_handler, qh);
 39}
 40EXPORT_SYMBOL(nf_register_queue_handler);
 41
 42/* The caller must flush their queue before this */
 43void nf_unregister_queue_handler(struct net *net)
 44{
 45	RCU_INIT_POINTER(net->nf.queue_handler, NULL);
 46}
 47EXPORT_SYMBOL(nf_unregister_queue_handler);
 48
 49static void nf_queue_entry_release_br_nf_refs(struct sk_buff *skb)
 50{
 51#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 52	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 53
 54	if (nf_bridge) {
 55		struct net_device *physdev;
 56
 57		physdev = nf_bridge_get_physindev(skb);
 58		if (physdev)
 59			dev_put(physdev);
 60		physdev = nf_bridge_get_physoutdev(skb);
 61		if (physdev)
 62			dev_put(physdev);
 63	}
 64#endif
 65}
 66
 67void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 68{
 69	struct nf_hook_state *state = &entry->state;
 70
 71	/* Release those devices we held, or Alexey will kill me. */
 72	if (state->in)
 73		dev_put(state->in);
 74	if (state->out)
 75		dev_put(state->out);
 76	if (state->sk)
 77		sock_put(state->sk);
 78
 79	nf_queue_entry_release_br_nf_refs(entry->skb);
 80}
 81EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
 82
 83static void nf_queue_entry_get_br_nf_refs(struct sk_buff *skb)
 84{
 85#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 86	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 87
 88	if (nf_bridge) {
 89		struct net_device *physdev;
 90
 91		physdev = nf_bridge_get_physindev(skb);
 92		if (physdev)
 93			dev_hold(physdev);
 94		physdev = nf_bridge_get_physoutdev(skb);
 95		if (physdev)
 96			dev_hold(physdev);
 97	}
 98#endif
 99}
 
100
101/* Bump dev refs so they don't vanish while packet is out */
102void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
103{
104	struct nf_hook_state *state = &entry->state;
105
106	if (state->in)
107		dev_hold(state->in);
108	if (state->out)
109		dev_hold(state->out);
110	if (state->sk)
111		sock_hold(state->sk);
 
 
 
112
113	nf_queue_entry_get_br_nf_refs(entry->skb);
 
 
 
 
 
 
 
114}
115EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
116
117void nf_queue_nf_hook_drop(struct net *net)
118{
119	const struct nf_queue_handler *qh;
120
121	rcu_read_lock();
122	qh = rcu_dereference(net->nf.queue_handler);
123	if (qh)
124		qh->nf_hook_drop(net);
125	rcu_read_unlock();
126}
127EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
128
129static void nf_ip_saveroute(const struct sk_buff *skb,
130			    struct nf_queue_entry *entry)
131{
132	struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
133
134	if (entry->state.hook == NF_INET_LOCAL_OUT) {
135		const struct iphdr *iph = ip_hdr(skb);
136
137		rt_info->tos = iph->tos;
138		rt_info->daddr = iph->daddr;
139		rt_info->saddr = iph->saddr;
140		rt_info->mark = skb->mark;
141	}
142}
143
144static void nf_ip6_saveroute(const struct sk_buff *skb,
145			     struct nf_queue_entry *entry)
146{
147	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
148
149	if (entry->state.hook == NF_INET_LOCAL_OUT) {
150		const struct ipv6hdr *iph = ipv6_hdr(skb);
151
152		rt_info->daddr = iph->daddr;
153		rt_info->saddr = iph->saddr;
154		rt_info->mark = skb->mark;
155	}
156}
157
158static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
159		      unsigned int index, unsigned int queuenum)
160{
161	int status = -ENOENT;
162	struct nf_queue_entry *entry = NULL;
 
163	const struct nf_queue_handler *qh;
164	struct net *net = state->net;
165	unsigned int route_key_size;
166
167	/* QUEUE == DROP if no one is waiting, to be safe. */
168	qh = rcu_dereference(net->nf.queue_handler);
169	if (!qh) {
170		status = -ESRCH;
171		goto err;
172	}
173
174	switch (state->pf) {
175	case AF_INET:
176		route_key_size = sizeof(struct ip_rt_info);
177		break;
178	case AF_INET6:
179		route_key_size = sizeof(struct ip6_rt_info);
180		break;
181	default:
182		route_key_size = 0;
183		break;
184	}
185
186	entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
187	if (!entry) {
188		status = -ENOMEM;
189		goto err;
190	}
191
192	if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
193		status = -ENETDOWN;
194		goto err;
195	}
196
197	*entry = (struct nf_queue_entry) {
198		.skb	= skb,
199		.state	= *state,
200		.hook_index = index,
201		.size	= sizeof(*entry) + route_key_size,
202	};
203
204	nf_queue_entry_get_refs(entry);
205
206	switch (entry->state.pf) {
207	case AF_INET:
208		nf_ip_saveroute(skb, entry);
209		break;
210	case AF_INET6:
211		nf_ip6_saveroute(skb, entry);
212		break;
213	}
214
215	status = qh->outfn(entry, queuenum);
216
217	if (status < 0) {
218		nf_queue_entry_release_refs(entry);
219		goto err;
220	}
221
222	return 0;
223
224err:
225	kfree(entry);
226	return status;
227}
228
229/* Packets leaving via this function must come back through nf_reinject(). */
230int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
231	     unsigned int index, unsigned int verdict)
232{
 
233	int ret;
234
235	ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
236	if (ret < 0) {
237		if (ret == -ESRCH &&
238		    (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
 
239			return 1;
 
240		kfree_skb(skb);
241	}
242
243	return 0;
244}
245EXPORT_SYMBOL_GPL(nf_queue);
246
247static unsigned int nf_iterate(struct sk_buff *skb,
248			       struct nf_hook_state *state,
249			       const struct nf_hook_entries *hooks,
250			       unsigned int *index)
251{
252	const struct nf_hook_entry *hook;
253	unsigned int verdict, i = *index;
254
255	while (i < hooks->num_hook_entries) {
256		hook = &hooks->hooks[i];
257repeat:
258		verdict = nf_hook_entry_hookfn(hook, skb, state);
259		if (verdict != NF_ACCEPT) {
260			*index = i;
261			if (verdict != NF_REPEAT)
262				return verdict;
263			goto repeat;
264		}
265		i++;
266	}
267
268	*index = i;
269	return NF_ACCEPT;
270}
271
272static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
273{
274	switch (pf) {
275#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
276	case NFPROTO_BRIDGE:
277		return rcu_dereference(net->nf.hooks_bridge[hooknum]);
278#endif
279	case NFPROTO_IPV4:
280		return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
281	case NFPROTO_IPV6:
282		return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
283	default:
284		WARN_ON_ONCE(1);
285		return NULL;
286	}
287
288	return NULL;
289}
290
291/* Caller must hold rcu read-side lock */
292void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
293{
294	const struct nf_hook_entry *hook_entry;
295	const struct nf_hook_entries *hooks;
296	struct sk_buff *skb = entry->skb;
297	const struct net *net;
298	unsigned int i;
299	int err;
300	u8 pf;
301
302	net = entry->state.net;
303	pf = entry->state.pf;
304
305	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
306
307	nf_queue_entry_release_refs(entry);
308
309	i = entry->hook_index;
310	if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
311		kfree_skb(skb);
312		kfree(entry);
313		return;
314	}
315
316	hook_entry = &hooks->hooks[i];
317
318	/* Continue traversal iff userspace said ok... */
319	if (verdict == NF_REPEAT)
320		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
321
322	if (verdict == NF_ACCEPT) {
323		if (nf_reroute(skb, entry) < 0)
 
324			verdict = NF_DROP;
325	}
326
327	if (verdict == NF_ACCEPT) {
 
 
328next_hook:
329		++i;
330		verdict = nf_iterate(skb, &entry->state, hooks, &i);
331	}
332
333	switch (verdict & NF_VERDICT_MASK) {
334	case NF_ACCEPT:
335	case NF_STOP:
 
336		local_bh_disable();
337		entry->state.okfn(entry->state.net, entry->state.sk, skb);
338		local_bh_enable();
339		break;
340	case NF_QUEUE:
341		err = nf_queue(skb, &entry->state, i, verdict);
342		if (err == 1)
343			goto next_hook;
 
 
 
344		break;
345	case NF_STOLEN:
346		break;
347	default:
348		kfree_skb(skb);
349	}
350
351	kfree(entry);
352}
353EXPORT_SYMBOL(nf_reinject);