Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v4.10.11
  1/*
  2 * Rusty Russell (C)2000 -- This code is GPL.
  3 * Patrick McHardy (c) 2006-2012
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/slab.h>
  8#include <linux/init.h>
  9#include <linux/module.h>
 10#include <linux/proc_fs.h>
 11#include <linux/skbuff.h>
 12#include <linux/netfilter.h>
 13#include <linux/netfilter_bridge.h>
 14#include <linux/seq_file.h>
 15#include <linux/rcupdate.h>
 16#include <net/protocol.h>
 17#include <net/netfilter/nf_queue.h>
 18#include <net/dst.h>
 19
 20#include "nf_internals.h"
 21
 22/*
 23 * Hook for nfnetlink_queue to register its queue handler.
 24 * We do this so that most of the NFQUEUE code can be modular.
 25 *
 26 * Once the queue is registered it must reinject all packets it
 27 * receives, no matter what.
 28 */
 
 
 
 29
 30/* return EBUSY when somebody else is registered, return EEXIST if the
 31 * same handler is registered, return 0 in case of success. */
 32void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
 33{
 34	/* should never happen, we only have one queueing backend in kernel */
 35	WARN_ON(rcu_access_pointer(net->nf.queue_handler));
 36	rcu_assign_pointer(net->nf.queue_handler, qh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37}
 38EXPORT_SYMBOL(nf_register_queue_handler);
 39
 40/* The caller must flush their queue before this */
 41void nf_unregister_queue_handler(struct net *net)
 42{
 43	RCU_INIT_POINTER(net->nf.queue_handler, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44}
 45EXPORT_SYMBOL(nf_unregister_queue_handler);
 46
 47void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 48{
 49	struct nf_hook_state *state = &entry->state;
 50
 51	/* Release those devices we held, or Alexey will kill me. */
 52	if (state->in)
 53		dev_put(state->in);
 54	if (state->out)
 55		dev_put(state->out);
 56	if (state->sk)
 57		sock_put(state->sk);
 58#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 59	if (entry->skb->nf_bridge) {
 60		struct net_device *physdev;
 61
 62		physdev = nf_bridge_get_physindev(entry->skb);
 63		if (physdev)
 64			dev_put(physdev);
 65		physdev = nf_bridge_get_physoutdev(entry->skb);
 66		if (physdev)
 67			dev_put(physdev);
 
 68	}
 69#endif
 
 
 70}
 71EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
 72
 73/* Bump dev refs so they don't vanish while packet is out */
 74void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 75{
 76	struct nf_hook_state *state = &entry->state;
 77
 78	if (state->in)
 79		dev_hold(state->in);
 80	if (state->out)
 81		dev_hold(state->out);
 82	if (state->sk)
 83		sock_hold(state->sk);
 84#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 85	if (entry->skb->nf_bridge) {
 86		struct net_device *physdev;
 87
 88		physdev = nf_bridge_get_physindev(entry->skb);
 89		if (physdev)
 90			dev_hold(physdev);
 91		physdev = nf_bridge_get_physoutdev(entry->skb);
 92		if (physdev)
 93			dev_hold(physdev);
 94	}
 95#endif
 96}
 97EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 98
 99void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
100{
101	const struct nf_queue_handler *qh;
102
103	rcu_read_lock();
104	qh = rcu_dereference(net->nf.queue_handler);
105	if (qh)
106		qh->nf_hook_drop(net, entry);
107	rcu_read_unlock();
108}
109
110static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
111		      struct nf_hook_entry *hook_entry, unsigned int queuenum)
 
 
 
 
 
 
 
 
 
112{
113	int status = -ENOENT;
114	struct nf_queue_entry *entry = NULL;
 
 
 
 
115	const struct nf_afinfo *afinfo;
116	const struct nf_queue_handler *qh;
117	struct net *net = state->net;
118
119	/* QUEUE == DROP if no one is waiting, to be safe. */
120	qh = rcu_dereference(net->nf.queue_handler);
 
 
121	if (!qh) {
122		status = -ESRCH;
123		goto err;
124	}
125
126	afinfo = nf_get_afinfo(state->pf);
127	if (!afinfo)
128		goto err;
129
130	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
131	if (!entry) {
132		status = -ENOMEM;
133		goto err;
134	}
135
136	*entry = (struct nf_queue_entry) {
137		.skb	= skb,
138		.state	= *state,
139		.hook	= hook_entry,
140		.size	= sizeof(*entry) + afinfo->route_key_size,
 
 
 
141	};
142
143	nf_queue_entry_get_refs(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144	skb_dst_force(skb);
145	afinfo->saveroute(skb, entry);
146	status = qh->outfn(entry, queuenum);
147
 
 
148	if (status < 0) {
149		nf_queue_entry_release_refs(entry);
150		goto err;
151	}
152
153	return 0;
154
 
 
155err:
156	kfree(entry);
157	return status;
158}
159
160/* Packets leaving via this function must come back through nf_reinject(). */
161int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
162	     struct nf_hook_entry **entryp, unsigned int verdict)
 
 
 
 
163{
164	struct nf_hook_entry *entry = *entryp;
165	int ret;
 
166
167	ret = __nf_queue(skb, state, entry, verdict >> NF_VERDICT_QBITS);
168	if (ret < 0) {
169		if (ret == -ESRCH &&
170		    (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
171			*entryp = rcu_dereference(entry->next);
172			return 1;
173		}
174		kfree_skb(skb);
 
 
 
175	}
176
177	return 0;
178}
179
180static unsigned int nf_iterate(struct sk_buff *skb,
181			       struct nf_hook_state *state,
182			       struct nf_hook_entry **entryp)
183{
184	unsigned int verdict;
185
 
 
186	do {
187repeat:
188		verdict = nf_hook_entry_hookfn((*entryp), skb, state);
189		if (verdict != NF_ACCEPT) {
190			if (verdict != NF_REPEAT)
191				return verdict;
192			goto repeat;
193		}
194		*entryp = rcu_dereference((*entryp)->next);
195	} while (*entryp);
196
197	return NF_ACCEPT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198}
199
200void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
201{
202	struct nf_hook_entry *hook_entry = entry->hook;
203	struct sk_buff *skb = entry->skb;
 
204	const struct nf_afinfo *afinfo;
205	int err;
206
 
 
207	nf_queue_entry_release_refs(entry);
208
209	/* Continue traversal iff userspace said ok... */
210	if (verdict == NF_REPEAT)
211		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
 
 
212
213	if (verdict == NF_ACCEPT) {
214		afinfo = nf_get_afinfo(entry->state.pf);
215		if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
216			verdict = NF_DROP;
217	}
218
219	if (verdict == NF_ACCEPT) {
220		hook_entry = rcu_dereference(hook_entry->next);
221		if (hook_entry)
222next_hook:
223			verdict = nf_iterate(skb, &entry->state, &hook_entry);
 
224	}
225
226	switch (verdict & NF_VERDICT_MASK) {
227	case NF_ACCEPT:
228	case NF_STOP:
229okfn:
230		local_bh_disable();
231		entry->state.okfn(entry->state.net, entry->state.sk, skb);
232		local_bh_enable();
233		break;
234	case NF_QUEUE:
235		err = nf_queue(skb, &entry->state, &hook_entry, verdict);
236		if (err == 1) {
237			if (hook_entry)
 
 
 
 
 
238				goto next_hook;
239			goto okfn;
240		}
241		break;
242	case NF_STOLEN:
243		break;
244	default:
245		kfree_skb(skb);
246	}
247
248	kfree(entry);
249}
250EXPORT_SYMBOL(nf_reinject);
v3.1
 
 
 
 
 
  1#include <linux/kernel.h>
  2#include <linux/slab.h>
  3#include <linux/init.h>
  4#include <linux/module.h>
  5#include <linux/proc_fs.h>
  6#include <linux/skbuff.h>
  7#include <linux/netfilter.h>
 
  8#include <linux/seq_file.h>
  9#include <linux/rcupdate.h>
 10#include <net/protocol.h>
 11#include <net/netfilter/nf_queue.h>
 12#include <net/dst.h>
 13
 14#include "nf_internals.h"
 15
 16/*
 17 * A queue handler may be registered for each protocol.  Each is protected by
 18 * long term mutex.  The handler must provide an an outfn() to accept packets
 19 * for queueing and must reinject all packets it receives, no matter what.
 
 
 20 */
 21static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
 22
 23static DEFINE_MUTEX(queue_handler_mutex);
 24
 25/* return EBUSY when somebody else is registered, return EEXIST if the
 26 * same handler is registered, return 0 in case of success. */
 27int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 28{
 29	int ret;
 30	const struct nf_queue_handler *old;
 31
 32	if (pf >= ARRAY_SIZE(queue_handler))
 33		return -EINVAL;
 34
 35	mutex_lock(&queue_handler_mutex);
 36	old = rcu_dereference_protected(queue_handler[pf],
 37					lockdep_is_held(&queue_handler_mutex));
 38	if (old == qh)
 39		ret = -EEXIST;
 40	else if (old)
 41		ret = -EBUSY;
 42	else {
 43		rcu_assign_pointer(queue_handler[pf], qh);
 44		ret = 0;
 45	}
 46	mutex_unlock(&queue_handler_mutex);
 47
 48	return ret;
 49}
 50EXPORT_SYMBOL(nf_register_queue_handler);
 51
 52/* The caller must flush their queue before this */
 53int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 54{
 55	const struct nf_queue_handler *old;
 56
 57	if (pf >= ARRAY_SIZE(queue_handler))
 58		return -EINVAL;
 59
 60	mutex_lock(&queue_handler_mutex);
 61	old = rcu_dereference_protected(queue_handler[pf],
 62					lockdep_is_held(&queue_handler_mutex));
 63	if (old && old != qh) {
 64		mutex_unlock(&queue_handler_mutex);
 65		return -EINVAL;
 66	}
 67
 68	rcu_assign_pointer(queue_handler[pf], NULL);
 69	mutex_unlock(&queue_handler_mutex);
 70
 71	synchronize_rcu();
 72
 73	return 0;
 74}
 75EXPORT_SYMBOL(nf_unregister_queue_handler);
 76
 77void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
 78{
 79	u_int8_t pf;
 
 
 
 
 
 
 
 
 
 
 
 80
 81	mutex_lock(&queue_handler_mutex);
 82	for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
 83		if (rcu_dereference_protected(
 84				queue_handler[pf],
 85				lockdep_is_held(&queue_handler_mutex)
 86				) == qh)
 87			rcu_assign_pointer(queue_handler[pf], NULL);
 88	}
 89	mutex_unlock(&queue_handler_mutex);
 90
 91	synchronize_rcu();
 92}
 93EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
 94
 95static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 
 96{
 97	/* Release those devices we held, or Alexey will kill me. */
 98	if (entry->indev)
 99		dev_put(entry->indev);
100	if (entry->outdev)
101		dev_put(entry->outdev);
102#ifdef CONFIG_BRIDGE_NETFILTER
 
 
 
103	if (entry->skb->nf_bridge) {
104		struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
105
106		if (nf_bridge->physindev)
107			dev_put(nf_bridge->physindev);
108		if (nf_bridge->physoutdev)
109			dev_put(nf_bridge->physoutdev);
 
 
110	}
111#endif
112	/* Drop reference to owner of hook which queued us. */
113	module_put(entry->elem->owner);
 
 
 
 
 
 
 
 
 
 
114}
115
116/*
117 * Any packet that leaves via this function must come back
118 * through nf_reinject().
119 */
120static int __nf_queue(struct sk_buff *skb,
121		      struct list_head *elem,
122		      u_int8_t pf, unsigned int hook,
123		      struct net_device *indev,
124		      struct net_device *outdev,
125		      int (*okfn)(struct sk_buff *),
126		      unsigned int queuenum)
127{
128	int status = -ENOENT;
129	struct nf_queue_entry *entry = NULL;
130#ifdef CONFIG_BRIDGE_NETFILTER
131	struct net_device *physindev;
132	struct net_device *physoutdev;
133#endif
134	const struct nf_afinfo *afinfo;
135	const struct nf_queue_handler *qh;
 
136
137	/* QUEUE == DROP if no one is waiting, to be safe. */
138	rcu_read_lock();
139
140	qh = rcu_dereference(queue_handler[pf]);
141	if (!qh) {
142		status = -ESRCH;
143		goto err_unlock;
144	}
145
146	afinfo = nf_get_afinfo(pf);
147	if (!afinfo)
148		goto err_unlock;
149
150	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
151	if (!entry) {
152		status = -ENOMEM;
153		goto err_unlock;
154	}
155
156	*entry = (struct nf_queue_entry) {
157		.skb	= skb,
158		.elem	= list_entry(elem, struct nf_hook_ops, list),
159		.pf	= pf,
160		.hook	= hook,
161		.indev	= indev,
162		.outdev	= outdev,
163		.okfn	= okfn,
164	};
165
166	/* If it's going away, ignore hook. */
167	if (!try_module_get(entry->elem->owner)) {
168		status = -ECANCELED;
169		goto err_unlock;
170	}
171	/* Bump dev refs so they don't vanish while packet is out */
172	if (indev)
173		dev_hold(indev);
174	if (outdev)
175		dev_hold(outdev);
176#ifdef CONFIG_BRIDGE_NETFILTER
177	if (skb->nf_bridge) {
178		physindev = skb->nf_bridge->physindev;
179		if (physindev)
180			dev_hold(physindev);
181		physoutdev = skb->nf_bridge->physoutdev;
182		if (physoutdev)
183			dev_hold(physoutdev);
184	}
185#endif
186	skb_dst_force(skb);
187	afinfo->saveroute(skb, entry);
188	status = qh->outfn(entry, queuenum);
189
190	rcu_read_unlock();
191
192	if (status < 0) {
193		nf_queue_entry_release_refs(entry);
194		goto err;
195	}
196
197	return 0;
198
199err_unlock:
200	rcu_read_unlock();
201err:
202	kfree(entry);
203	return status;
204}
205
206int nf_queue(struct sk_buff *skb,
207	     struct list_head *elem,
208	     u_int8_t pf, unsigned int hook,
209	     struct net_device *indev,
210	     struct net_device *outdev,
211	     int (*okfn)(struct sk_buff *),
212	     unsigned int queuenum)
213{
214	struct sk_buff *segs;
215	int err;
216	unsigned int queued;
217
218	if (!skb_is_gso(skb))
219		return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
220				  queuenum);
221
222	switch (pf) {
223	case NFPROTO_IPV4:
224		skb->protocol = htons(ETH_P_IP);
225		break;
226	case NFPROTO_IPV6:
227		skb->protocol = htons(ETH_P_IPV6);
228		break;
229	}
230
231	segs = skb_gso_segment(skb, 0);
232	/* Does not use PTR_ERR to limit the number of error codes that can be
233	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to mean
234	 * 'ignore this hook'.
235	 */
236	if (IS_ERR(segs))
237		return -EINVAL;
 
238
239	queued = 0;
240	err = 0;
241	do {
242		struct sk_buff *nskb = segs->next;
 
 
 
 
 
 
 
 
243
244		segs->next = NULL;
245		if (err == 0)
246			err = __nf_queue(segs, elem, pf, hook, indev,
247					   outdev, okfn, queuenum);
248		if (err == 0)
249			queued++;
250		else
251			kfree_skb(segs);
252		segs = nskb;
253	} while (segs);
254
255	/* also free orig skb if only some segments were queued */
256	if (unlikely(err && queued))
257		err = 0;
258	if (err == 0)
259		kfree_skb(skb);
260	return err;
261}
262
263void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
264{
 
265	struct sk_buff *skb = entry->skb;
266	struct list_head *elem = &entry->elem->list;
267	const struct nf_afinfo *afinfo;
268	int err;
269
270	rcu_read_lock();
271
272	nf_queue_entry_release_refs(entry);
273
274	/* Continue traversal iff userspace said ok... */
275	if (verdict == NF_REPEAT) {
276		elem = elem->prev;
277		verdict = NF_ACCEPT;
278	}
279
280	if (verdict == NF_ACCEPT) {
281		afinfo = nf_get_afinfo(entry->pf);
282		if (!afinfo || afinfo->reroute(skb, entry) < 0)
283			verdict = NF_DROP;
284	}
285
286	if (verdict == NF_ACCEPT) {
287	next_hook:
288		verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
289				     skb, entry->hook,
290				     entry->indev, entry->outdev, &elem,
291				     entry->okfn, INT_MIN);
292	}
293
294	switch (verdict & NF_VERDICT_MASK) {
295	case NF_ACCEPT:
296	case NF_STOP:
 
297		local_bh_disable();
298		entry->okfn(skb);
299		local_bh_enable();
300		break;
301	case NF_QUEUE:
302		err = __nf_queue(skb, elem, entry->pf, entry->hook,
303				 entry->indev, entry->outdev, entry->okfn,
304				 verdict >> NF_VERDICT_QBITS);
305		if (err < 0) {
306			if (err == -ECANCELED)
307				goto next_hook;
308			if (err == -ESRCH &&
309			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
310				goto next_hook;
311			kfree_skb(skb);
312		}
313		break;
314	case NF_STOLEN:
315		break;
316	default:
317		kfree_skb(skb);
318	}
319	rcu_read_unlock();
320	kfree(entry);
321}
322EXPORT_SYMBOL(nf_reinject);
323
324#ifdef CONFIG_PROC_FS
325static void *seq_start(struct seq_file *seq, loff_t *pos)
326{
327	if (*pos >= ARRAY_SIZE(queue_handler))
328		return NULL;
329
330	return pos;
331}
332
333static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
334{
335	(*pos)++;
336
337	if (*pos >= ARRAY_SIZE(queue_handler))
338		return NULL;
339
340	return pos;
341}
342
343static void seq_stop(struct seq_file *s, void *v)
344{
345
346}
347
348static int seq_show(struct seq_file *s, void *v)
349{
350	int ret;
351	loff_t *pos = v;
352	const struct nf_queue_handler *qh;
353
354	rcu_read_lock();
355	qh = rcu_dereference(queue_handler[*pos]);
356	if (!qh)
357		ret = seq_printf(s, "%2lld NONE\n", *pos);
358	else
359		ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
360	rcu_read_unlock();
361
362	return ret;
363}
364
365static const struct seq_operations nfqueue_seq_ops = {
366	.start	= seq_start,
367	.next	= seq_next,
368	.stop	= seq_stop,
369	.show	= seq_show,
370};
371
372static int nfqueue_open(struct inode *inode, struct file *file)
373{
374	return seq_open(file, &nfqueue_seq_ops);
375}
376
377static const struct file_operations nfqueue_file_ops = {
378	.owner	 = THIS_MODULE,
379	.open	 = nfqueue_open,
380	.read	 = seq_read,
381	.llseek	 = seq_lseek,
382	.release = seq_release,
383};
384#endif /* PROC_FS */
385
386
387int __init netfilter_queue_init(void)
388{
389#ifdef CONFIG_PROC_FS
390	if (!proc_create("nf_queue", S_IRUGO,
391			 proc_net_netfilter, &nfqueue_file_ops))
392		return -1;
393#endif
394	return 0;
395}
396