Linux Audio

Check our new training course

Loading...
v3.5.6
 
 
 
 
 
  1#include <linux/kernel.h>
  2#include <linux/slab.h>
  3#include <linux/init.h>
  4#include <linux/module.h>
  5#include <linux/proc_fs.h>
  6#include <linux/skbuff.h>
  7#include <linux/netfilter.h>
 
 
 
  8#include <linux/seq_file.h>
  9#include <linux/rcupdate.h>
 10#include <net/protocol.h>
 11#include <net/netfilter/nf_queue.h>
 12#include <net/dst.h>
 13
 14#include "nf_internals.h"
 15
 
 
 16/*
 17 * A queue handler may be registered for each protocol.  Each is protected by
 18 * long term mutex.  The handler must provide an an outfn() to accept packets
 19 * for queueing and must reinject all packets it receives, no matter what.
 
 
 20 */
 21static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
 22
 23static DEFINE_MUTEX(queue_handler_mutex);
 
 
 
 
 
 
 24
 25/* return EBUSY when somebody else is registered, return EEXIST if the
 26 * same handler is registered, return 0 in case of success. */
 27int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 28{
 29	int ret;
 30	const struct nf_queue_handler *old;
 
 31
 32	if (pf >= ARRAY_SIZE(queue_handler))
 33		return -EINVAL;
 
 
 
 
 
 
 34
 35	mutex_lock(&queue_handler_mutex);
 36	old = rcu_dereference_protected(queue_handler[pf],
 37					lockdep_is_held(&queue_handler_mutex));
 38	if (old == qh)
 39		ret = -EEXIST;
 40	else if (old)
 41		ret = -EBUSY;
 42	else {
 43		rcu_assign_pointer(queue_handler[pf], qh);
 44		ret = 0;
 45	}
 46	mutex_unlock(&queue_handler_mutex);
 47
 48	return ret;
 
 
 
 
 
 
 
 
 
 49}
 50EXPORT_SYMBOL(nf_register_queue_handler);
 51
 52/* The caller must flush their queue before this */
 53int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 54{
 55	const struct nf_queue_handler *old;
 56
 57	if (pf >= ARRAY_SIZE(queue_handler))
 58		return -EINVAL;
 59
 60	mutex_lock(&queue_handler_mutex);
 61	old = rcu_dereference_protected(queue_handler[pf],
 62					lockdep_is_held(&queue_handler_mutex));
 63	if (old && old != qh) {
 64		mutex_unlock(&queue_handler_mutex);
 65		return -EINVAL;
 
 
 
 
 
 
 
 66	}
 
 
 67
 68	RCU_INIT_POINTER(queue_handler[pf], NULL);
 69	mutex_unlock(&queue_handler_mutex);
 
 
 70
 71	synchronize_rcu();
 
 72
 73	return 0;
 
 
 
 
 
 
 
 74}
 75EXPORT_SYMBOL(nf_unregister_queue_handler);
 76
 77void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
 78{
 79	u_int8_t pf;
 80
 81	mutex_lock(&queue_handler_mutex);
 82	for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
 83		if (rcu_dereference_protected(
 84				queue_handler[pf],
 85				lockdep_is_held(&queue_handler_mutex)
 86				) == qh)
 87			RCU_INIT_POINTER(queue_handler[pf], NULL);
 88	}
 89	mutex_unlock(&queue_handler_mutex);
 
 
 
 
 
 
 90
 91	synchronize_rcu();
 
 
 
 
 92}
 93EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
 94
 95static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 
 96{
 97	/* Release those devices we held, or Alexey will kill me. */
 98	if (entry->indev)
 99		dev_put(entry->indev);
100	if (entry->outdev)
101		dev_put(entry->outdev);
102#ifdef CONFIG_BRIDGE_NETFILTER
103	if (entry->skb->nf_bridge) {
104		struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
105
106		if (nf_bridge->physindev)
107			dev_put(nf_bridge->physindev);
108		if (nf_bridge->physoutdev)
109			dev_put(nf_bridge->physoutdev);
110	}
111#endif
112	/* Drop reference to owner of hook which queued us. */
113	module_put(entry->elem->owner);
114}
115
116/*
117 * Any packet that leaves via this function must come back
118 * through nf_reinject().
119 */
120static int __nf_queue(struct sk_buff *skb,
121		      struct list_head *elem,
122		      u_int8_t pf, unsigned int hook,
123		      struct net_device *indev,
124		      struct net_device *outdev,
125		      int (*okfn)(struct sk_buff *),
126		      unsigned int queuenum)
127{
128	int status = -ENOENT;
129	struct nf_queue_entry *entry = NULL;
130#ifdef CONFIG_BRIDGE_NETFILTER
131	struct net_device *physindev;
132	struct net_device *physoutdev;
133#endif
134	const struct nf_afinfo *afinfo;
135	const struct nf_queue_handler *qh;
 
 
136
137	/* QUEUE == DROP if no one is waiting, to be safe. */
138	rcu_read_lock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
140	qh = rcu_dereference(queue_handler[pf]);
141	if (!qh) {
142		status = -ESRCH;
143		goto err_unlock;
 
 
 
144	}
145
146	afinfo = nf_get_afinfo(pf);
147	if (!afinfo)
148		goto err_unlock;
149
150	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
151	if (!entry) {
152		status = -ENOMEM;
153		goto err_unlock;
154	}
155
156	*entry = (struct nf_queue_entry) {
157		.skb	= skb,
158		.elem	= list_entry(elem, struct nf_hook_ops, list),
159		.pf	= pf,
160		.hook	= hook,
161		.indev	= indev,
162		.outdev	= outdev,
163		.okfn	= okfn,
164	};
165
166	/* If it's going away, ignore hook. */
167	if (!try_module_get(entry->elem->owner)) {
168		status = -ECANCELED;
169		goto err_unlock;
170	}
171	/* Bump dev refs so they don't vanish while packet is out */
172	if (indev)
173		dev_hold(indev);
174	if (outdev)
175		dev_hold(outdev);
176#ifdef CONFIG_BRIDGE_NETFILTER
177	if (skb->nf_bridge) {
178		physindev = skb->nf_bridge->physindev;
179		if (physindev)
180			dev_hold(physindev);
181		physoutdev = skb->nf_bridge->physoutdev;
182		if (physoutdev)
183			dev_hold(physoutdev);
184	}
185#endif
186	skb_dst_force(skb);
187	afinfo->saveroute(skb, entry);
188	status = qh->outfn(entry, queuenum);
189
190	rcu_read_unlock();
 
 
 
 
 
 
 
191
 
192	if (status < 0) {
193		nf_queue_entry_release_refs(entry);
194		goto err;
195	}
196
197	return 0;
198
199err_unlock:
200	rcu_read_unlock();
201err:
202	kfree(entry);
203	return status;
204}
205
206#ifdef CONFIG_BRIDGE_NETFILTER
207/* When called from bridge netfilter, skb->data must point to MAC header
208 * before calling skb_gso_segment(). Else, original MAC header is lost
209 * and segmented skbs will be sent to wrong destination.
210 */
211static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
212{
213	if (skb->nf_bridge)
214		__skb_push(skb, skb->network_header - skb->mac_header);
215}
216
217static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
218{
219	if (skb->nf_bridge)
220		__skb_pull(skb, skb->network_header - skb->mac_header);
 
 
 
 
 
221}
222#else
223#define nf_bridge_adjust_skb_data(s) do {} while (0)
224#define nf_bridge_adjust_segmented_data(s) do {} while (0)
225#endif
226
227int nf_queue(struct sk_buff *skb,
228	     struct list_head *elem,
229	     u_int8_t pf, unsigned int hook,
230	     struct net_device *indev,
231	     struct net_device *outdev,
232	     int (*okfn)(struct sk_buff *),
233	     unsigned int queuenum)
234{
235	struct sk_buff *segs;
236	int err = -EINVAL;
237	unsigned int queued;
238
239	if (!skb_is_gso(skb))
240		return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
241				  queuenum);
 
 
 
 
 
 
 
 
 
242
 
 
243	switch (pf) {
 
 
 
 
244	case NFPROTO_IPV4:
245		skb->protocol = htons(ETH_P_IP);
246		break;
247	case NFPROTO_IPV6:
248		skb->protocol = htons(ETH_P_IPV6);
249		break;
 
 
250	}
251
252	nf_bridge_adjust_skb_data(skb);
253	segs = skb_gso_segment(skb, 0);
254	/* Does not use PTR_ERR to limit the number of error codes that can be
255	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to mean
256	 * 'ignore this hook'.
257	 */
258	if (IS_ERR(segs))
259		goto out_err;
260	queued = 0;
261	err = 0;
262	do {
263		struct sk_buff *nskb = segs->next;
264
265		segs->next = NULL;
266		if (err == 0) {
267			nf_bridge_adjust_segmented_data(segs);
268			err = __nf_queue(segs, elem, pf, hook, indev,
269					   outdev, okfn, queuenum);
270		}
271		if (err == 0)
272			queued++;
273		else
274			kfree_skb(segs);
275		segs = nskb;
276	} while (segs);
277
278	if (queued) {
279		kfree_skb(skb);
280		return 0;
281	}
282  out_err:
283	nf_bridge_adjust_segmented_data(skb);
284	return err;
285}
286
 
287void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
288{
 
 
289	struct sk_buff *skb = entry->skb;
290	struct list_head *elem = &entry->elem->list;
291	const struct nf_afinfo *afinfo;
292	int err;
 
293
294	rcu_read_lock();
 
295
296	nf_queue_entry_release_refs(entry);
297
298	/* Continue traversal iff userspace said ok... */
299	if (verdict == NF_REPEAT) {
300		elem = elem->prev;
301		verdict = NF_ACCEPT;
 
302	}
303
 
 
 
 
 
 
304	if (verdict == NF_ACCEPT) {
305		afinfo = nf_get_afinfo(entry->pf);
306		if (!afinfo || afinfo->reroute(skb, entry) < 0)
307			verdict = NF_DROP;
308	}
309
310	if (verdict == NF_ACCEPT) {
311	next_hook:
312		verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
313				     skb, entry->hook,
314				     entry->indev, entry->outdev, &elem,
315				     entry->okfn, INT_MIN);
316	}
317
318	switch (verdict & NF_VERDICT_MASK) {
319	case NF_ACCEPT:
320	case NF_STOP:
321		local_bh_disable();
322		entry->okfn(skb);
323		local_bh_enable();
324		break;
325	case NF_QUEUE:
326		err = __nf_queue(skb, elem, entry->pf, entry->hook,
327				 entry->indev, entry->outdev, entry->okfn,
328				 verdict >> NF_VERDICT_QBITS);
329		if (err < 0) {
330			if (err == -ECANCELED)
331				goto next_hook;
332			if (err == -ESRCH &&
333			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
334				goto next_hook;
335			kfree_skb(skb);
336		}
337		break;
338	case NF_STOLEN:
339		break;
340	default:
341		kfree_skb(skb);
342	}
343	rcu_read_unlock();
344	kfree(entry);
345}
346EXPORT_SYMBOL(nf_reinject);
347
348#ifdef CONFIG_PROC_FS
349static void *seq_start(struct seq_file *seq, loff_t *pos)
350{
351	if (*pos >= ARRAY_SIZE(queue_handler))
352		return NULL;
353
354	return pos;
355}
356
357static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
358{
359	(*pos)++;
360
361	if (*pos >= ARRAY_SIZE(queue_handler))
362		return NULL;
363
364	return pos;
365}
366
367static void seq_stop(struct seq_file *s, void *v)
368{
369
370}
371
372static int seq_show(struct seq_file *s, void *v)
373{
374	int ret;
375	loff_t *pos = v;
376	const struct nf_queue_handler *qh;
377
378	rcu_read_lock();
379	qh = rcu_dereference(queue_handler[*pos]);
380	if (!qh)
381		ret = seq_printf(s, "%2lld NONE\n", *pos);
382	else
383		ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
384	rcu_read_unlock();
385
386	return ret;
387}
388
389static const struct seq_operations nfqueue_seq_ops = {
390	.start	= seq_start,
391	.next	= seq_next,
392	.stop	= seq_stop,
393	.show	= seq_show,
394};
395
396static int nfqueue_open(struct inode *inode, struct file *file)
397{
398	return seq_open(file, &nfqueue_seq_ops);
399}
400
401static const struct file_operations nfqueue_file_ops = {
402	.owner	 = THIS_MODULE,
403	.open	 = nfqueue_open,
404	.read	 = seq_read,
405	.llseek	 = seq_lseek,
406	.release = seq_release,
407};
408#endif /* PROC_FS */
409
410
411int __init netfilter_queue_init(void)
412{
413#ifdef CONFIG_PROC_FS
414	if (!proc_create("nf_queue", S_IRUGO,
415			 proc_net_netfilter, &nfqueue_file_ops))
416		return -1;
417#endif
418	return 0;
419}
420
v6.2
  1/*
  2 * Rusty Russell (C)2000 -- This code is GPL.
  3 * Patrick McHardy (c) 2006-2012
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/slab.h>
  8#include <linux/init.h>
  9#include <linux/module.h>
 10#include <linux/proc_fs.h>
 11#include <linux/skbuff.h>
 12#include <linux/netfilter.h>
 13#include <linux/netfilter_ipv4.h>
 14#include <linux/netfilter_ipv6.h>
 15#include <linux/netfilter_bridge.h>
 16#include <linux/seq_file.h>
 17#include <linux/rcupdate.h>
 18#include <net/protocol.h>
 19#include <net/netfilter/nf_queue.h>
 20#include <net/dst.h>
 21
 22#include "nf_internals.h"
 23
 24static const struct nf_queue_handler __rcu *nf_queue_handler;
 25
 26/*
 27 * Hook for nfnetlink_queue to register its queue handler.
 28 * We do this so that most of the NFQUEUE code can be modular.
 29 *
 30 * Once the queue is registered it must reinject all packets it
 31 * receives, no matter what.
 32 */
 
 33
 34void nf_register_queue_handler(const struct nf_queue_handler *qh)
 35{
 36	/* should never happen, we only have one queueing backend in kernel */
 37	WARN_ON(rcu_access_pointer(nf_queue_handler));
 38	rcu_assign_pointer(nf_queue_handler, qh);
 39}
 40EXPORT_SYMBOL(nf_register_queue_handler);
 41
 42/* The caller must flush their queue before this */
 43void nf_unregister_queue_handler(void)
 
 44{
 45	RCU_INIT_POINTER(nf_queue_handler, NULL);
 46}
 47EXPORT_SYMBOL(nf_unregister_queue_handler);
 48
 49static void nf_queue_sock_put(struct sock *sk)
 50{
 51#ifdef CONFIG_INET
 52	sock_gen_put(sk);
 53#else
 54	sock_put(sk);
 55#endif
 56}
 57
 58static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 59{
 60	struct nf_hook_state *state = &entry->state;
 
 
 
 
 
 
 
 
 
 61
 62	/* Release those devices we held, or Alexey will kill me. */
 63	dev_put(state->in);
 64	dev_put(state->out);
 65	if (state->sk)
 66		nf_queue_sock_put(state->sk);
 67
 68#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 69	dev_put(entry->physin);
 70	dev_put(entry->physout);
 71#endif
 72}
 
 73
 74void nf_queue_entry_free(struct nf_queue_entry *entry)
 
 75{
 76	nf_queue_entry_release_refs(entry);
 77	kfree(entry);
 78}
 79EXPORT_SYMBOL_GPL(nf_queue_entry_free);
 80
 81static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
 82{
 83#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 84	const struct sk_buff *skb = entry->skb;
 85	struct nf_bridge_info *nf_bridge;
 86
 87	nf_bridge = nf_bridge_info_get(skb);
 88	if (nf_bridge) {
 89		entry->physin = nf_bridge_get_physindev(skb);
 90		entry->physout = nf_bridge_get_physoutdev(skb);
 91	} else {
 92		entry->physin = NULL;
 93		entry->physout = NULL;
 94	}
 95#endif
 96}
 97
 98/* Bump dev refs so they don't vanish while packet is out */
 99bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
100{
101	struct nf_hook_state *state = &entry->state;
102
103	if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
104		return false;
105
106	dev_hold(state->in);
107	dev_hold(state->out);
108
109#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
110	dev_hold(entry->physin);
111	dev_hold(entry->physout);
112#endif
113	return true;
114}
115EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
116
117void nf_queue_nf_hook_drop(struct net *net)
118{
119	const struct nf_queue_handler *qh;
120
121	rcu_read_lock();
122	qh = rcu_dereference(nf_queue_handler);
123	if (qh)
124		qh->nf_hook_drop(net);
125	rcu_read_unlock();
126}
127EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
128
129static void nf_ip_saveroute(const struct sk_buff *skb,
130			    struct nf_queue_entry *entry)
131{
132	struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
133
134	if (entry->state.hook == NF_INET_LOCAL_OUT) {
135		const struct iphdr *iph = ip_hdr(skb);
136
137		rt_info->tos = iph->tos;
138		rt_info->daddr = iph->daddr;
139		rt_info->saddr = iph->saddr;
140		rt_info->mark = skb->mark;
141	}
142}
 
143
144static void nf_ip6_saveroute(const struct sk_buff *skb,
145			     struct nf_queue_entry *entry)
146{
147	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
148
149	if (entry->state.hook == NF_INET_LOCAL_OUT) {
150		const struct ipv6hdr *iph = ipv6_hdr(skb);
151
152		rt_info->daddr = iph->daddr;
153		rt_info->saddr = iph->saddr;
154		rt_info->mark = skb->mark;
 
 
 
 
 
155	}
 
 
 
156}
157
158static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
159		      unsigned int index, unsigned int queuenum)
 
 
 
 
 
 
 
 
 
160{
 
161	struct nf_queue_entry *entry = NULL;
 
 
 
 
 
162	const struct nf_queue_handler *qh;
163	unsigned int route_key_size;
164	int status;
165
166	/* QUEUE == DROP if no one is waiting, to be safe. */
167	qh = rcu_dereference(nf_queue_handler);
168	if (!qh)
169		return -ESRCH;
170
171	switch (state->pf) {
172	case AF_INET:
173		route_key_size = sizeof(struct ip_rt_info);
174		break;
175	case AF_INET6:
176		route_key_size = sizeof(struct ip6_rt_info);
177		break;
178	default:
179		route_key_size = 0;
180		break;
181	}
182
183	if (skb_sk_is_prefetched(skb)) {
184		struct sock *sk = skb->sk;
185
186		if (!sk_is_refcounted(sk)) {
187			if (!refcount_inc_not_zero(&sk->sk_refcnt))
188				return -ENOTCONN;
189
190			/* drop refcount on skb_orphan */
191			skb->destructor = sock_edemux;
192		}
193	}
194
195	entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
196	if (!entry)
197		return -ENOMEM;
198
199	if (skb_dst(skb) && !skb_dst_force(skb)) {
200		kfree(entry);
201		return -ENETDOWN;
 
202	}
203
204	*entry = (struct nf_queue_entry) {
205		.skb	= skb,
206		.state	= *state,
207		.hook_index = index,
208		.size	= sizeof(*entry) + route_key_size,
 
 
 
209	};
210
211	__nf_queue_entry_init_physdevs(entry);
212
213	if (!nf_queue_entry_get_refs(entry)) {
214		kfree(entry);
215		return -ENOTCONN;
 
 
 
 
 
 
 
 
 
 
 
 
 
216	}
 
 
 
 
217
218	switch (entry->state.pf) {
219	case AF_INET:
220		nf_ip_saveroute(skb, entry);
221		break;
222	case AF_INET6:
223		nf_ip6_saveroute(skb, entry);
224		break;
225	}
226
227	status = qh->outfn(entry, queuenum);
228	if (status < 0) {
229		nf_queue_entry_free(entry);
230		return status;
231	}
232
233	return 0;
 
 
 
 
 
 
234}
235
236/* Packets leaving via this function must come back through nf_reinject(). */
237int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
238	     unsigned int index, unsigned int verdict)
 
 
 
239{
240	int ret;
 
 
241
242	ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
243	if (ret < 0) {
244		if (ret == -ESRCH &&
245		    (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
246			return 1;
247		kfree_skb(skb);
248	}
249
250	return 0;
251}
252EXPORT_SYMBOL_GPL(nf_queue);
 
 
 
253
254static unsigned int nf_iterate(struct sk_buff *skb,
255			       struct nf_hook_state *state,
256			       const struct nf_hook_entries *hooks,
257			       unsigned int *index)
258{
259	const struct nf_hook_entry *hook;
260	unsigned int verdict, i = *index;
261
262	while (i < hooks->num_hook_entries) {
263		hook = &hooks->hooks[i];
264repeat:
265		verdict = nf_hook_entry_hookfn(hook, skb, state);
266		if (verdict != NF_ACCEPT) {
267			*index = i;
268			if (verdict != NF_REPEAT)
269				return verdict;
270			goto repeat;
271		}
272		i++;
273	}
274
275	*index = i;
276	return NF_ACCEPT;
277}
278
279static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
280{
281	switch (pf) {
282#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
283	case NFPROTO_BRIDGE:
284		return rcu_dereference(net->nf.hooks_bridge[hooknum]);
285#endif
286	case NFPROTO_IPV4:
287		return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
 
288	case NFPROTO_IPV6:
289		return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
290	default:
291		WARN_ON_ONCE(1);
292		return NULL;
293	}
294
295	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296}
297
298/* Caller must hold rcu read-side lock */
299void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
300{
301	const struct nf_hook_entry *hook_entry;
302	const struct nf_hook_entries *hooks;
303	struct sk_buff *skb = entry->skb;
304	const struct net *net;
305	unsigned int i;
306	int err;
307	u8 pf;
308
309	net = entry->state.net;
310	pf = entry->state.pf;
311
312	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
313
314	i = entry->hook_index;
315	if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
316		kfree_skb(skb);
317		nf_queue_entry_free(entry);
318		return;
319	}
320
321	hook_entry = &hooks->hooks[i];
322
323	/* Continue traversal iff userspace said ok... */
324	if (verdict == NF_REPEAT)
325		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
326
327	if (verdict == NF_ACCEPT) {
328		if (nf_reroute(skb, entry) < 0)
 
329			verdict = NF_DROP;
330	}
331
332	if (verdict == NF_ACCEPT) {
333next_hook:
334		++i;
335		verdict = nf_iterate(skb, &entry->state, hooks, &i);
 
 
336	}
337
338	switch (verdict & NF_VERDICT_MASK) {
339	case NF_ACCEPT:
340	case NF_STOP:
341		local_bh_disable();
342		entry->state.okfn(entry->state.net, entry->state.sk, skb);
343		local_bh_enable();
344		break;
345	case NF_QUEUE:
346		err = nf_queue(skb, &entry->state, i, verdict);
347		if (err == 1)
348			goto next_hook;
 
 
 
 
 
 
 
 
349		break;
350	case NF_STOLEN:
351		break;
352	default:
353		kfree_skb(skb);
354	}
 
 
 
 
355
356	nf_queue_entry_free(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357}
358EXPORT_SYMBOL(nf_reinject);