Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This is a module which is used for queueing packets and communicating with
   4 * userspace via nfnetlink.
   5 *
   6 * (C) 2005 by Harald Welte <laforge@netfilter.org>
   7 * (C) 2007 by Patrick McHardy <kaber@trash.net>
   8 *
   9 * Based on the old ipv4-only ip_queue.c:
  10 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
  11 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/skbuff.h>
  18#include <linux/init.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/notifier.h>
  22#include <linux/netdevice.h>
  23#include <linux/netfilter.h>
  24#include <linux/proc_fs.h>
  25#include <linux/netfilter_ipv4.h>
  26#include <linux/netfilter_ipv6.h>
  27#include <linux/netfilter_bridge.h>
  28#include <linux/netfilter/nfnetlink.h>
  29#include <linux/netfilter/nfnetlink_queue.h>
  30#include <linux/netfilter/nf_conntrack_common.h>
  31#include <linux/list.h>
 
 
  32#include <net/sock.h>
  33#include <net/tcp_states.h>
  34#include <net/netfilter/nf_queue.h>
  35#include <net/netns/generic.h>
  36
  37#include <linux/atomic.h>
  38
  39#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  40#include "../bridge/br_private.h"
  41#endif
  42
  43#if IS_ENABLED(CONFIG_NF_CONNTRACK)
  44#include <net/netfilter/nf_conntrack.h>
  45#endif
  46
  47#define NFQNL_QMAX_DEFAULT 1024
  48
  49/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
  50 * includes the header length. Thus, the maximum packet length that we
  51 * support is 65531 bytes. We send truncated packets if the specified length
  52 * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
  53 * attribute to detect truncation.
  54 */
  55#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
  56
  57struct nfqnl_instance {
  58	struct hlist_node hlist;		/* global list of queues */
  59	struct rcu_head rcu;
  60
  61	u32 peer_portid;
  62	unsigned int queue_maxlen;
  63	unsigned int copy_range;
  64	unsigned int queue_dropped;
  65	unsigned int queue_user_dropped;
  66
  67
  68	u_int16_t queue_num;			/* number of this queue */
  69	u_int8_t copy_mode;
  70	u_int32_t flags;			/* Set using NFQA_CFG_FLAGS */
  71/*
  72 * Following fields are dirtied for each queued packet,
  73 * keep them in same cache line if possible.
  74 */
  75	spinlock_t	lock	____cacheline_aligned_in_smp;
  76	unsigned int	queue_total;
  77	unsigned int	id_sequence;		/* 'sequence' of pkt ids */
  78	struct list_head queue_list;		/* packets in queue */
  79};
  80
  81typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
  82
  83static unsigned int nfnl_queue_net_id __read_mostly;
  84
  85#define INSTANCE_BUCKETS	16
  86struct nfnl_queue_net {
  87	spinlock_t instances_lock;
  88	struct hlist_head instance_table[INSTANCE_BUCKETS];
  89};
  90
  91static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
  92{
  93	return net_generic(net, nfnl_queue_net_id);
  94}
  95
  96static inline u_int8_t instance_hashfn(u_int16_t queue_num)
  97{
  98	return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
  99}
 100
 101static struct nfqnl_instance *
 102instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
 103{
 104	struct hlist_head *head;
 105	struct nfqnl_instance *inst;
 106
 107	head = &q->instance_table[instance_hashfn(queue_num)];
 108	hlist_for_each_entry_rcu(inst, head, hlist) {
 109		if (inst->queue_num == queue_num)
 110			return inst;
 111	}
 112	return NULL;
 113}
 114
 115static struct nfqnl_instance *
 116instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
 117{
 118	struct nfqnl_instance *inst;
 119	unsigned int h;
 120	int err;
 121
 122	spin_lock(&q->instances_lock);
 123	if (instance_lookup(q, queue_num)) {
 124		err = -EEXIST;
 125		goto out_unlock;
 126	}
 127
 128	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
 129	if (!inst) {
 130		err = -ENOMEM;
 131		goto out_unlock;
 132	}
 133
 134	inst->queue_num = queue_num;
 135	inst->peer_portid = portid;
 136	inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
 137	inst->copy_range = NFQNL_MAX_COPY_RANGE;
 138	inst->copy_mode = NFQNL_COPY_NONE;
 139	spin_lock_init(&inst->lock);
 140	INIT_LIST_HEAD(&inst->queue_list);
 141
 142	if (!try_module_get(THIS_MODULE)) {
 143		err = -EAGAIN;
 144		goto out_free;
 145	}
 146
 147	h = instance_hashfn(queue_num);
 148	hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
 149
 150	spin_unlock(&q->instances_lock);
 151
 152	return inst;
 153
 154out_free:
 155	kfree(inst);
 156out_unlock:
 157	spin_unlock(&q->instances_lock);
 158	return ERR_PTR(err);
 159}
 160
 161static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
 162			unsigned long data);
 163
 164static void
 165instance_destroy_rcu(struct rcu_head *head)
 166{
 167	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
 168						   rcu);
 169
 
 170	nfqnl_flush(inst, NULL, 0);
 
 171	kfree(inst);
 172	module_put(THIS_MODULE);
 173}
 174
 175static void
 176__instance_destroy(struct nfqnl_instance *inst)
 177{
 178	hlist_del_rcu(&inst->hlist);
 179	call_rcu(&inst->rcu, instance_destroy_rcu);
 180}
 181
 182static void
 183instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
 184{
 185	spin_lock(&q->instances_lock);
 186	__instance_destroy(inst);
 187	spin_unlock(&q->instances_lock);
 188}
 189
 190static inline void
 191__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 192{
 193       list_add_tail(&entry->list, &queue->queue_list);
 194       queue->queue_total++;
 195}
 196
 197static void
 198__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 199{
 200	list_del(&entry->list);
 201	queue->queue_total--;
 202}
 203
 204static struct nf_queue_entry *
 205find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
 206{
 207	struct nf_queue_entry *entry = NULL, *i;
 208
 209	spin_lock_bh(&queue->lock);
 210
 211	list_for_each_entry(i, &queue->queue_list, list) {
 212		if (i->id == id) {
 213			entry = i;
 214			break;
 215		}
 216	}
 217
 218	if (entry)
 219		__dequeue_entry(queue, entry);
 220
 221	spin_unlock_bh(&queue->lock);
 222
 223	return entry;
 224}
 225
 226static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227{
 228	struct nf_ct_hook *ct_hook;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 230
 231	if (verdict == NF_ACCEPT ||
 232	    verdict == NF_REPEAT ||
 233	    verdict == NF_STOP) {
 
 
 234		rcu_read_lock();
 235		ct_hook = rcu_dereference(nf_ct_hook);
 236		if (ct_hook) {
 237			err = ct_hook->update(entry->state.net, entry->skb);
 238			if (err < 0)
 239				verdict = NF_DROP;
 240		}
 241		rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 242	}
 243	nf_reinject(entry, verdict);
 244}
 245
 246static void
 247nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
 248{
 249	struct nf_queue_entry *entry, *next;
 250
 251	spin_lock_bh(&queue->lock);
 252	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
 253		if (!cmpfn || cmpfn(entry, data)) {
 254			list_del(&entry->list);
 255			queue->queue_total--;
 256			nfqnl_reinject(entry, NF_DROP);
 257		}
 258	}
 259	spin_unlock_bh(&queue->lock);
 260}
 261
 262static int
 263nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
 264		      bool csum_verify)
 265{
 266	__u32 flags = 0;
 267
 268	if (packet->ip_summed == CHECKSUM_PARTIAL)
 269		flags = NFQA_SKB_CSUMNOTREADY;
 270	else if (csum_verify)
 271		flags = NFQA_SKB_CSUM_NOTVERIFIED;
 272
 273	if (skb_is_gso(packet))
 274		flags |= NFQA_SKB_GSO;
 275
 276	return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
 277}
 278
 279static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
 280{
 281	const struct cred *cred;
 282
 283	if (!sk_fullsock(sk))
 284		return 0;
 285
 286	read_lock_bh(&sk->sk_callback_lock);
 287	if (sk->sk_socket && sk->sk_socket->file) {
 288		cred = sk->sk_socket->file->f_cred;
 289		if (nla_put_be32(skb, NFQA_UID,
 290		    htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
 291			goto nla_put_failure;
 292		if (nla_put_be32(skb, NFQA_GID,
 293		    htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
 294			goto nla_put_failure;
 295	}
 296	read_unlock_bh(&sk->sk_callback_lock);
 297	return 0;
 298
 299nla_put_failure:
 300	read_unlock_bh(&sk->sk_callback_lock);
 301	return -1;
 302}
 303
 
 
 
 
 
 
 
 
 
 
 
 
 
 304static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
 305{
 306	u32 seclen = 0;
 307#if IS_ENABLED(CONFIG_NETWORK_SECMARK)
 308	if (!skb || !sk_fullsock(skb->sk))
 309		return 0;
 310
 311	read_lock_bh(&skb->sk->sk_callback_lock);
 312
 313	if (skb->secmark)
 314		security_secid_to_secctx(skb->secmark, secdata, &seclen);
 315
 316	read_unlock_bh(&skb->sk->sk_callback_lock);
 317#endif
 318	return seclen;
 319}
 320
 321static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
 322{
 323	struct sk_buff *entskb = entry->skb;
 324	u32 nlalen = 0;
 325
 326	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
 327		return 0;
 328
 329	if (skb_vlan_tag_present(entskb))
 330		nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
 331					 nla_total_size(sizeof(__be16)));
 332
 333	if (entskb->network_header > entskb->mac_header)
 334		nlalen += nla_total_size((entskb->network_header -
 335					  entskb->mac_header));
 336
 337	return nlalen;
 338}
 339
 340static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
 341{
 342	struct sk_buff *entskb = entry->skb;
 343
 344	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
 345		return 0;
 346
 347	if (skb_vlan_tag_present(entskb)) {
 348		struct nlattr *nest;
 349
 350		nest = nla_nest_start(skb, NFQA_VLAN);
 351		if (!nest)
 352			goto nla_put_failure;
 353
 354		if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
 355		    nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
 356			goto nla_put_failure;
 357
 358		nla_nest_end(skb, nest);
 359	}
 360
 361	if (entskb->mac_header < entskb->network_header) {
 362		int len = (int)(entskb->network_header - entskb->mac_header);
 363
 364		if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
 365			goto nla_put_failure;
 366	}
 367
 368	return 0;
 369
 370nla_put_failure:
 371	return -1;
 372}
 373
 
 
 
 
 
 
 
 
 374static struct sk_buff *
 375nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 376			   struct nf_queue_entry *entry,
 377			   __be32 **packet_id_ptr)
 378{
 379	size_t size;
 380	size_t data_len = 0, cap_len = 0;
 381	unsigned int hlen = 0;
 382	struct sk_buff *skb;
 383	struct nlattr *nla;
 384	struct nfqnl_msg_packet_hdr *pmsg;
 385	struct nlmsghdr *nlh;
 386	struct sk_buff *entskb = entry->skb;
 387	struct net_device *indev;
 388	struct net_device *outdev;
 389	struct nf_conn *ct = NULL;
 390	enum ip_conntrack_info ctinfo;
 391	struct nfnl_ct_hook *nfnl_ct;
 392	bool csum_verify;
 393	char *secdata = NULL;
 394	u32 seclen = 0;
 
 395
 396	size = nlmsg_total_size(sizeof(struct nfgenmsg))
 397		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
 398		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 399		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 400#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 401		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 402		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 403#endif
 404		+ nla_total_size(sizeof(u_int32_t))	/* mark */
 
 405		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
 406		+ nla_total_size(sizeof(u_int32_t))	/* skbinfo */
 
 
 
 407		+ nla_total_size(sizeof(u_int32_t));	/* cap_len */
 408
 409	if (entskb->tstamp)
 
 410		size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 411
 412	size += nfqnl_get_bridge_size(entry);
 413
 414	if (entry->state.hook <= NF_INET_FORWARD ||
 415	   (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
 416		csum_verify = !skb_csum_unnecessary(entskb);
 417	else
 418		csum_verify = false;
 419
 420	outdev = entry->state.out;
 421
 422	switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
 423	case NFQNL_COPY_META:
 424	case NFQNL_COPY_NONE:
 425		break;
 426
 427	case NFQNL_COPY_PACKET:
 428		if (!(queue->flags & NFQA_CFG_F_GSO) &&
 429		    entskb->ip_summed == CHECKSUM_PARTIAL &&
 430		    skb_checksum_help(entskb))
 431			return NULL;
 432
 433		data_len = READ_ONCE(queue->copy_range);
 434		if (data_len > entskb->len)
 435			data_len = entskb->len;
 436
 437		hlen = skb_zerocopy_headlen(entskb);
 438		hlen = min_t(unsigned int, hlen, data_len);
 439		size += sizeof(struct nlattr) + hlen;
 440		cap_len = entskb->len;
 441		break;
 442	}
 443
 444	nfnl_ct = rcu_dereference(nfnl_ct_hook);
 445
 446#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 447	if (queue->flags & NFQA_CFG_F_CONNTRACK) {
 448		if (nfnl_ct != NULL) {
 449			ct = nf_ct_get(entskb, &ctinfo);
 450			if (ct != NULL)
 451				size += nfnl_ct->build_size(ct);
 452		}
 453	}
 454#endif
 455
 456	if (queue->flags & NFQA_CFG_F_UID_GID) {
 457		size += (nla_total_size(sizeof(u_int32_t))	/* uid */
 458			+ nla_total_size(sizeof(u_int32_t)));	/* gid */
 459	}
 460
 461	if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
 462		seclen = nfqnl_get_sk_secctx(entskb, &secdata);
 463		if (seclen)
 464			size += nla_total_size(seclen);
 465	}
 466
 467	skb = alloc_skb(size, GFP_ATOMIC);
 468	if (!skb) {
 469		skb_tx_error(entskb);
 470		goto nlmsg_failure;
 471	}
 472
 473	nlh = nfnl_msg_put(skb, 0, 0,
 474			   nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET),
 475			   0, entry->state.pf, NFNETLINK_V0,
 476			   htons(queue->queue_num));
 477	if (!nlh) {
 478		skb_tx_error(entskb);
 479		kfree_skb(skb);
 480		goto nlmsg_failure;
 481	}
 482
 483	nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
 484	pmsg = nla_data(nla);
 485	pmsg->hw_protocol	= entskb->protocol;
 486	pmsg->hook		= entry->state.hook;
 487	*packet_id_ptr		= &pmsg->packet_id;
 488
 489	indev = entry->state.in;
 490	if (indev) {
 491#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 492		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
 493			goto nla_put_failure;
 494#else
 495		if (entry->state.pf == PF_BRIDGE) {
 496			/* Case 1: indev is physical input device, we need to
 497			 * look for bridge group (when called from
 498			 * netfilter_bridge) */
 499			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
 500					 htonl(indev->ifindex)) ||
 501			/* this is the bridge group "brX" */
 502			/* rcu_read_lock()ed by __nf_queue */
 503			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
 504					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
 505				goto nla_put_failure;
 506		} else {
 507			int physinif;
 508
 509			/* Case 2: indev is bridge group, we need to look for
 510			 * physical device (when called from ipv4) */
 511			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
 512					 htonl(indev->ifindex)))
 513				goto nla_put_failure;
 514
 515			physinif = nf_bridge_get_physinif(entskb);
 516			if (physinif &&
 517			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
 518					 htonl(physinif)))
 519				goto nla_put_failure;
 520		}
 521#endif
 522	}
 523
 524	if (outdev) {
 525#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 526		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
 527			goto nla_put_failure;
 528#else
 529		if (entry->state.pf == PF_BRIDGE) {
 530			/* Case 1: outdev is physical output device, we need to
 531			 * look for bridge group (when called from
 532			 * netfilter_bridge) */
 533			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 534					 htonl(outdev->ifindex)) ||
 535			/* this is the bridge group "brX" */
 536			/* rcu_read_lock()ed by __nf_queue */
 537			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
 538					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
 539				goto nla_put_failure;
 540		} else {
 541			int physoutif;
 542
 543			/* Case 2: outdev is bridge group, we need to look for
 544			 * physical output device (when called from ipv4) */
 545			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
 546					 htonl(outdev->ifindex)))
 547				goto nla_put_failure;
 548
 549			physoutif = nf_bridge_get_physoutif(entskb);
 550			if (physoutif &&
 551			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 552					 htonl(physoutif)))
 553				goto nla_put_failure;
 554		}
 555#endif
 556	}
 557
 558	if (entskb->mark &&
 559	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
 560		goto nla_put_failure;
 561
 
 
 
 
 562	if (indev && entskb->dev &&
 563	    entskb->mac_header != entskb->network_header) {
 
 564		struct nfqnl_msg_packet_hw phw;
 565		int len;
 566
 567		memset(&phw, 0, sizeof(phw));
 568		len = dev_parse_header(entskb, phw.hw_addr);
 569		if (len) {
 570			phw.hw_addrlen = htons(len);
 571			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
 572				goto nla_put_failure;
 573		}
 574	}
 575
 576	if (nfqnl_put_bridge(entry, skb) < 0)
 577		goto nla_put_failure;
 578
 579	if (entry->state.hook <= NF_INET_FORWARD && entskb->tstamp) {
 580		struct nfqnl_msg_packet_timestamp ts;
 581		struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
 582
 583		ts.sec = cpu_to_be64(kts.tv_sec);
 584		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
 585
 586		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
 587			goto nla_put_failure;
 588	}
 589
 590	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
 591	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
 592		goto nla_put_failure;
 593
 
 
 
 594	if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
 595		goto nla_put_failure;
 596
 597	if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
 598		goto nla_put_failure;
 599
 600	if (cap_len > data_len &&
 601	    nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
 602		goto nla_put_failure;
 603
 604	if (nfqnl_put_packet_info(skb, entskb, csum_verify))
 605		goto nla_put_failure;
 606
 607	if (data_len) {
 608		struct nlattr *nla;
 609
 610		if (skb_tailroom(skb) < sizeof(*nla) + hlen)
 611			goto nla_put_failure;
 612
 613		nla = skb_put(skb, sizeof(*nla));
 614		nla->nla_type = NFQA_PAYLOAD;
 615		nla->nla_len = nla_attr_size(data_len);
 616
 617		if (skb_zerocopy(skb, entskb, data_len, hlen))
 618			goto nla_put_failure;
 619	}
 620
 621	nlh->nlmsg_len = skb->len;
 622	if (seclen)
 623		security_release_secctx(secdata, seclen);
 624	return skb;
 625
 626nla_put_failure:
 627	skb_tx_error(entskb);
 628	kfree_skb(skb);
 629	net_err_ratelimited("nf_queue: error creating packet message\n");
 630nlmsg_failure:
 631	if (seclen)
 632		security_release_secctx(secdata, seclen);
 633	return NULL;
 634}
 635
 636static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
 637{
 638#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 639	static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
 640	const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
 
 
 641
 642	if (ct && ((ct->status & flags) == IPS_DYING))
 
 
 
 
 643		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 644#endif
 645	return false;
 646}
 647
 648static int
 649__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
 650			struct nf_queue_entry *entry)
 651{
 652	struct sk_buff *nskb;
 653	int err = -ENOBUFS;
 654	__be32 *packet_id_ptr;
 655	int failopen = 0;
 656
 657	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
 658	if (nskb == NULL) {
 659		err = -ENOMEM;
 660		goto err_out;
 661	}
 662	spin_lock_bh(&queue->lock);
 663
 664	if (nf_ct_drop_unconfirmed(entry))
 665		goto err_out_free_nskb;
 666
 667	if (queue->queue_total >= queue->queue_maxlen) {
 668		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
 669			failopen = 1;
 670			err = 0;
 671		} else {
 672			queue->queue_dropped++;
 673			net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
 674					     queue->queue_total);
 675		}
 676		goto err_out_free_nskb;
 677	}
 678	entry->id = ++queue->id_sequence;
 679	*packet_id_ptr = htonl(entry->id);
 680
 681	/* nfnetlink_unicast will either free the nskb or add it to a socket */
 682	err = nfnetlink_unicast(nskb, net, queue->peer_portid);
 683	if (err < 0) {
 684		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
 685			failopen = 1;
 686			err = 0;
 687		} else {
 688			queue->queue_user_dropped++;
 689		}
 690		goto err_out_unlock;
 691	}
 692
 693	__enqueue_entry(queue, entry);
 694
 695	spin_unlock_bh(&queue->lock);
 696	return 0;
 697
 698err_out_free_nskb:
 699	kfree_skb(nskb);
 700err_out_unlock:
 701	spin_unlock_bh(&queue->lock);
 702	if (failopen)
 703		nfqnl_reinject(entry, NF_ACCEPT);
 704err_out:
 705	return err;
 706}
 707
 708static struct nf_queue_entry *
 709nf_queue_entry_dup(struct nf_queue_entry *e)
 710{
 711	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
 712	if (entry)
 713		nf_queue_entry_get_refs(entry);
 714	return entry;
 
 
 
 
 
 
 715}
 716
 717#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 718/* When called from bridge netfilter, skb->data must point to MAC header
 719 * before calling skb_gso_segment(). Else, original MAC header is lost
 720 * and segmented skbs will be sent to wrong destination.
 721 */
 722static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
 723{
 724	if (nf_bridge_info_get(skb))
 725		__skb_push(skb, skb->network_header - skb->mac_header);
 726}
 727
 728static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
 729{
 730	if (nf_bridge_info_get(skb))
 731		__skb_pull(skb, skb->network_header - skb->mac_header);
 732}
 733#else
 734#define nf_bridge_adjust_skb_data(s) do {} while (0)
 735#define nf_bridge_adjust_segmented_data(s) do {} while (0)
 736#endif
 737
 738static int
 739__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
 740			   struct sk_buff *skb, struct nf_queue_entry *entry)
 741{
 742	int ret = -ENOMEM;
 743	struct nf_queue_entry *entry_seg;
 744
 745	nf_bridge_adjust_segmented_data(skb);
 746
 747	if (skb->next == NULL) { /* last packet, no need to copy entry */
 748		struct sk_buff *gso_skb = entry->skb;
 749		entry->skb = skb;
 750		ret = __nfqnl_enqueue_packet(net, queue, entry);
 751		if (ret)
 752			entry->skb = gso_skb;
 753		return ret;
 754	}
 755
 756	skb_mark_not_on_list(skb);
 757
 758	entry_seg = nf_queue_entry_dup(entry);
 759	if (entry_seg) {
 760		entry_seg->skb = skb;
 761		ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
 762		if (ret)
 763			nf_queue_entry_free(entry_seg);
 764	}
 765	return ret;
 766}
 767
 768static int
 769nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 770{
 771	unsigned int queued;
 772	struct nfqnl_instance *queue;
 773	struct sk_buff *skb, *segs, *nskb;
 774	int err = -ENOBUFS;
 775	struct net *net = entry->state.net;
 776	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 777
 778	/* rcu_read_lock()ed by nf_hook_thresh */
 779	queue = instance_lookup(q, queuenum);
 780	if (!queue)
 781		return -ESRCH;
 782
 783	if (queue->copy_mode == NFQNL_COPY_NONE)
 784		return -EINVAL;
 785
 786	skb = entry->skb;
 787
 788	switch (entry->state.pf) {
 789	case NFPROTO_IPV4:
 790		skb->protocol = htons(ETH_P_IP);
 791		break;
 792	case NFPROTO_IPV6:
 793		skb->protocol = htons(ETH_P_IPV6);
 794		break;
 795	}
 796
 797	if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
 798		return __nfqnl_enqueue_packet(net, queue, entry);
 799
 800	nf_bridge_adjust_skb_data(skb);
 801	segs = skb_gso_segment(skb, 0);
 802	/* Does not use PTR_ERR to limit the number of error codes that can be
 803	 * returned by nf_queue.  For instance, callers rely on -ESRCH to
 804	 * mean 'ignore this hook'.
 805	 */
 806	if (IS_ERR_OR_NULL(segs))
 807		goto out_err;
 808	queued = 0;
 809	err = 0;
 810	skb_list_walk_safe(segs, segs, nskb) {
 811		if (err == 0)
 812			err = __nfqnl_enqueue_packet_gso(net, queue,
 813							segs, entry);
 814		if (err == 0)
 815			queued++;
 816		else
 817			kfree_skb(segs);
 818	}
 819
 820	if (queued) {
 821		if (err) /* some segments are already queued */
 822			nf_queue_entry_free(entry);
 823		kfree_skb(skb);
 824		return 0;
 825	}
 826 out_err:
 827	nf_bridge_adjust_segmented_data(skb);
 828	return err;
 829}
 830
 831static int
 832nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
 833{
 834	struct sk_buff *nskb;
 835
 836	if (diff < 0) {
 
 
 
 
 
 837		if (pskb_trim(e->skb, data_len))
 838			return -ENOMEM;
 839	} else if (diff > 0) {
 840		if (data_len > 0xFFFF)
 841			return -EINVAL;
 842		if (diff > skb_tailroom(e->skb)) {
 843			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
 844					       diff, GFP_ATOMIC);
 845			if (!nskb)
 846				return -ENOMEM;
 847			kfree_skb(e->skb);
 848			e->skb = nskb;
 849		}
 850		skb_put(e->skb, diff);
 851	}
 852	if (skb_ensure_writable(e->skb, data_len))
 853		return -ENOMEM;
 854	skb_copy_to_linear_data(e->skb, data, data_len);
 855	e->skb->ip_summed = CHECKSUM_NONE;
 856	return 0;
 857}
 858
 859static int
 860nfqnl_set_mode(struct nfqnl_instance *queue,
 861	       unsigned char mode, unsigned int range)
 862{
 863	int status = 0;
 864
 865	spin_lock_bh(&queue->lock);
 866	switch (mode) {
 867	case NFQNL_COPY_NONE:
 868	case NFQNL_COPY_META:
 869		queue->copy_mode = mode;
 870		queue->copy_range = 0;
 871		break;
 872
 873	case NFQNL_COPY_PACKET:
 874		queue->copy_mode = mode;
 875		if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
 876			queue->copy_range = NFQNL_MAX_COPY_RANGE;
 877		else
 878			queue->copy_range = range;
 879		break;
 880
 881	default:
 882		status = -EINVAL;
 883
 884	}
 885	spin_unlock_bh(&queue->lock);
 886
 887	return status;
 888}
 889
 890static int
 891dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
 892{
 893#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 894	int physinif, physoutif;
 895
 896	physinif = nf_bridge_get_physinif(entry->skb);
 897	physoutif = nf_bridge_get_physoutif(entry->skb);
 898
 899	if (physinif == ifindex || physoutif == ifindex)
 900		return 1;
 901#endif
 902	if (entry->state.in)
 903		if (entry->state.in->ifindex == ifindex)
 904			return 1;
 905	if (entry->state.out)
 906		if (entry->state.out->ifindex == ifindex)
 907			return 1;
 908
 909	return 0;
 910}
 911
 912/* drop all packets with either indev or outdev == ifindex from all queue
 913 * instances */
 914static void
 915nfqnl_dev_drop(struct net *net, int ifindex)
 916{
 917	int i;
 918	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 919
 920	rcu_read_lock();
 921
 922	for (i = 0; i < INSTANCE_BUCKETS; i++) {
 923		struct nfqnl_instance *inst;
 924		struct hlist_head *head = &q->instance_table[i];
 925
 926		hlist_for_each_entry_rcu(inst, head, hlist)
 927			nfqnl_flush(inst, dev_cmp, ifindex);
 928	}
 929
 930	rcu_read_unlock();
 931}
 932
 933static int
 934nfqnl_rcv_dev_event(struct notifier_block *this,
 935		    unsigned long event, void *ptr)
 936{
 937	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 938
 939	/* Drop any packets associated with the downed device */
 940	if (event == NETDEV_DOWN)
 941		nfqnl_dev_drop(dev_net(dev), dev->ifindex);
 942	return NOTIFY_DONE;
 943}
 944
 945static struct notifier_block nfqnl_dev_notifier = {
 946	.notifier_call	= nfqnl_rcv_dev_event,
 947};
 948
 949static void nfqnl_nf_hook_drop(struct net *net)
 950{
 951	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 952	int i;
 953
 
 
 
 
 
 
 
 
 
 
 954	for (i = 0; i < INSTANCE_BUCKETS; i++) {
 955		struct nfqnl_instance *inst;
 956		struct hlist_head *head = &q->instance_table[i];
 957
 958		hlist_for_each_entry_rcu(inst, head, hlist)
 959			nfqnl_flush(inst, NULL, 0);
 960	}
 961}
 962
 963static int
 964nfqnl_rcv_nl_event(struct notifier_block *this,
 965		   unsigned long event, void *ptr)
 966{
 967	struct netlink_notify *n = ptr;
 968	struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
 969
 970	if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
 971		int i;
 972
 973		/* destroy all instances for this portid */
 974		spin_lock(&q->instances_lock);
 975		for (i = 0; i < INSTANCE_BUCKETS; i++) {
 976			struct hlist_node *t2;
 977			struct nfqnl_instance *inst;
 978			struct hlist_head *head = &q->instance_table[i];
 979
 980			hlist_for_each_entry_safe(inst, t2, head, hlist) {
 981				if (n->portid == inst->peer_portid)
 982					__instance_destroy(inst);
 983			}
 984		}
 985		spin_unlock(&q->instances_lock);
 986	}
 987	return NOTIFY_DONE;
 988}
 989
 990static struct notifier_block nfqnl_rtnl_notifier = {
 991	.notifier_call	= nfqnl_rcv_nl_event,
 992};
 993
 994static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
 995	[NFQA_VLAN_TCI]		= { .type = NLA_U16},
 996	[NFQA_VLAN_PROTO]	= { .type = NLA_U16},
 997};
 998
 999static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
1000	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
1001	[NFQA_MARK]		= { .type = NLA_U32 },
1002	[NFQA_PAYLOAD]		= { .type = NLA_UNSPEC },
1003	[NFQA_CT]		= { .type = NLA_UNSPEC },
1004	[NFQA_EXP]		= { .type = NLA_UNSPEC },
1005	[NFQA_VLAN]		= { .type = NLA_NESTED },
 
1006};
1007
1008static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
1009	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
1010	[NFQA_MARK]		= { .type = NLA_U32 },
 
1011};
1012
1013static struct nfqnl_instance *
1014verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
1015{
1016	struct nfqnl_instance *queue;
1017
1018	queue = instance_lookup(q, queue_num);
1019	if (!queue)
1020		return ERR_PTR(-ENODEV);
1021
1022	if (queue->peer_portid != nlportid)
1023		return ERR_PTR(-EPERM);
1024
1025	return queue;
1026}
1027
1028static struct nfqnl_msg_verdict_hdr*
1029verdicthdr_get(const struct nlattr * const nfqa[])
1030{
1031	struct nfqnl_msg_verdict_hdr *vhdr;
1032	unsigned int verdict;
1033
1034	if (!nfqa[NFQA_VERDICT_HDR])
1035		return NULL;
1036
1037	vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
1038	verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
1039	if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
1040		return NULL;
1041	return vhdr;
1042}
1043
1044static int nfq_id_after(unsigned int id, unsigned int max)
1045{
1046	return (int)(id - max) > 0;
1047}
1048
1049static int nfqnl_recv_verdict_batch(struct sk_buff *skb,
1050				    const struct nfnl_info *info,
1051				    const struct nlattr * const nfqa[])
1052{
1053	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1054	u16 queue_num = ntohs(info->nfmsg->res_id);
1055	struct nf_queue_entry *entry, *tmp;
1056	struct nfqnl_msg_verdict_hdr *vhdr;
1057	struct nfqnl_instance *queue;
1058	unsigned int verdict, maxid;
1059	LIST_HEAD(batch_list);
1060
1061	queue = verdict_instance_lookup(q, queue_num,
1062					NETLINK_CB(skb).portid);
1063	if (IS_ERR(queue))
1064		return PTR_ERR(queue);
1065
1066	vhdr = verdicthdr_get(nfqa);
1067	if (!vhdr)
1068		return -EINVAL;
1069
1070	verdict = ntohl(vhdr->verdict);
1071	maxid = ntohl(vhdr->id);
1072
1073	spin_lock_bh(&queue->lock);
1074
1075	list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
1076		if (nfq_id_after(entry->id, maxid))
1077			break;
1078		__dequeue_entry(queue, entry);
1079		list_add_tail(&entry->list, &batch_list);
1080	}
1081
1082	spin_unlock_bh(&queue->lock);
1083
1084	if (list_empty(&batch_list))
1085		return -ENOENT;
1086
1087	list_for_each_entry_safe(entry, tmp, &batch_list, list) {
1088		if (nfqa[NFQA_MARK])
1089			entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1090
 
 
 
1091		nfqnl_reinject(entry, verdict);
1092	}
1093	return 0;
1094}
1095
1096static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct,
1097				      const struct nlmsghdr *nlh,
1098				      const struct nlattr * const nfqa[],
1099				      struct nf_queue_entry *entry,
1100				      enum ip_conntrack_info *ctinfo)
1101{
1102#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1103	struct nf_conn *ct;
1104
1105	ct = nf_ct_get(entry->skb, ctinfo);
1106	if (ct == NULL)
1107		return NULL;
1108
1109	if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
1110		return NULL;
1111
1112	if (nfqa[NFQA_EXP])
1113		nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
1114				      NETLINK_CB(entry->skb).portid,
1115				      nlmsg_report(nlh));
1116	return ct;
1117#else
1118	return NULL;
1119#endif
1120}
1121
1122static int nfqa_parse_bridge(struct nf_queue_entry *entry,
1123			     const struct nlattr * const nfqa[])
1124{
1125	if (nfqa[NFQA_VLAN]) {
1126		struct nlattr *tb[NFQA_VLAN_MAX + 1];
1127		int err;
1128
1129		err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX,
1130						  nfqa[NFQA_VLAN],
1131						  nfqa_vlan_policy, NULL);
1132		if (err < 0)
1133			return err;
1134
1135		if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
1136			return -EINVAL;
1137
1138		__vlan_hwaccel_put_tag(entry->skb,
1139			nla_get_be16(tb[NFQA_VLAN_PROTO]),
1140			ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
1141	}
1142
1143	if (nfqa[NFQA_L2HDR]) {
1144		int mac_header_len = entry->skb->network_header -
1145			entry->skb->mac_header;
1146
1147		if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
1148			return -EINVAL;
1149		else if (mac_header_len > 0)
1150			memcpy(skb_mac_header(entry->skb),
1151			       nla_data(nfqa[NFQA_L2HDR]),
1152			       mac_header_len);
1153	}
1154
1155	return 0;
1156}
1157
1158static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info,
1159			      const struct nlattr * const nfqa[])
1160{
1161	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1162	u_int16_t queue_num = ntohs(info->nfmsg->res_id);
 
1163	struct nfqnl_msg_verdict_hdr *vhdr;
1164	enum ip_conntrack_info ctinfo;
1165	struct nfqnl_instance *queue;
1166	struct nf_queue_entry *entry;
1167	struct nfnl_ct_hook *nfnl_ct;
1168	struct nf_conn *ct = NULL;
1169	unsigned int verdict;
1170	int err;
1171
1172	queue = verdict_instance_lookup(q, queue_num,
1173					NETLINK_CB(skb).portid);
1174	if (IS_ERR(queue))
1175		return PTR_ERR(queue);
1176
1177	vhdr = verdicthdr_get(nfqa);
1178	if (!vhdr)
1179		return -EINVAL;
1180
1181	verdict = ntohl(vhdr->verdict);
1182
1183	entry = find_dequeue_entry(queue, ntohl(vhdr->id));
1184	if (entry == NULL)
1185		return -ENOENT;
1186
1187	/* rcu lock already held from nfnl->call_rcu. */
1188	nfnl_ct = rcu_dereference(nfnl_ct_hook);
1189
1190	if (nfqa[NFQA_CT]) {
1191		if (nfnl_ct != NULL)
1192			ct = nfqnl_ct_parse(nfnl_ct, info->nlh, nfqa, entry,
1193					    &ctinfo);
1194	}
1195
1196	if (entry->state.pf == PF_BRIDGE) {
1197		err = nfqa_parse_bridge(entry, nfqa);
1198		if (err < 0)
1199			return err;
1200	}
1201
1202	if (nfqa[NFQA_PAYLOAD]) {
1203		u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
1204		int diff = payload_len - entry->skb->len;
1205
1206		if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
1207				 payload_len, entry, diff) < 0)
1208			verdict = NF_DROP;
1209
1210		if (ct && diff)
1211			nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
1212	}
1213
1214	if (nfqa[NFQA_MARK])
1215		entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1216
 
 
 
1217	nfqnl_reinject(entry, verdict);
1218	return 0;
1219}
1220
1221static int nfqnl_recv_unsupp(struct sk_buff *skb, const struct nfnl_info *info,
1222			     const struct nlattr * const cda[])
1223{
1224	return -ENOTSUPP;
1225}
1226
1227static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
1228	[NFQA_CFG_CMD]		= { .len = sizeof(struct nfqnl_msg_config_cmd) },
1229	[NFQA_CFG_PARAMS]	= { .len = sizeof(struct nfqnl_msg_config_params) },
1230	[NFQA_CFG_QUEUE_MAXLEN]	= { .type = NLA_U32 },
1231	[NFQA_CFG_MASK]		= { .type = NLA_U32 },
1232	[NFQA_CFG_FLAGS]	= { .type = NLA_U32 },
1233};
1234
1235static const struct nf_queue_handler nfqh = {
1236	.outfn		= nfqnl_enqueue_packet,
1237	.nf_hook_drop	= nfqnl_nf_hook_drop,
1238};
1239
1240static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
1241			     const struct nlattr * const nfqa[])
1242{
1243	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1244	u_int16_t queue_num = ntohs(info->nfmsg->res_id);
1245	struct nfqnl_msg_config_cmd *cmd = NULL;
1246	struct nfqnl_instance *queue;
1247	__u32 flags = 0, mask = 0;
1248	int ret = 0;
1249
1250	if (nfqa[NFQA_CFG_CMD]) {
1251		cmd = nla_data(nfqa[NFQA_CFG_CMD]);
1252
1253		/* Obsolete commands without queue context */
1254		switch (cmd->command) {
1255		case NFQNL_CFG_CMD_PF_BIND: return 0;
1256		case NFQNL_CFG_CMD_PF_UNBIND: return 0;
1257		}
1258	}
1259
1260	/* Check if we support these flags in first place, dependencies should
1261	 * be there too not to break atomicity.
1262	 */
1263	if (nfqa[NFQA_CFG_FLAGS]) {
1264		if (!nfqa[NFQA_CFG_MASK]) {
1265			/* A mask is needed to specify which flags are being
1266			 * changed.
1267			 */
1268			return -EINVAL;
1269		}
1270
1271		flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
1272		mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
1273
1274		if (flags >= NFQA_CFG_F_MAX)
1275			return -EOPNOTSUPP;
1276
1277#if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
1278		if (flags & mask & NFQA_CFG_F_SECCTX)
1279			return -EOPNOTSUPP;
1280#endif
1281		if ((flags & mask & NFQA_CFG_F_CONNTRACK) &&
1282		    !rcu_access_pointer(nfnl_ct_hook)) {
1283#ifdef CONFIG_MODULES
1284			nfnl_unlock(NFNL_SUBSYS_QUEUE);
1285			request_module("ip_conntrack_netlink");
1286			nfnl_lock(NFNL_SUBSYS_QUEUE);
1287			if (rcu_access_pointer(nfnl_ct_hook))
1288				return -EAGAIN;
1289#endif
1290			return -EOPNOTSUPP;
1291		}
1292	}
1293
1294	rcu_read_lock();
1295	queue = instance_lookup(q, queue_num);
1296	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1297		ret = -EPERM;
1298		goto err_out_unlock;
1299	}
1300
1301	if (cmd != NULL) {
1302		switch (cmd->command) {
1303		case NFQNL_CFG_CMD_BIND:
1304			if (queue) {
1305				ret = -EBUSY;
1306				goto err_out_unlock;
1307			}
1308			queue = instance_create(q, queue_num,
1309						NETLINK_CB(skb).portid);
1310			if (IS_ERR(queue)) {
1311				ret = PTR_ERR(queue);
1312				goto err_out_unlock;
1313			}
1314			break;
1315		case NFQNL_CFG_CMD_UNBIND:
1316			if (!queue) {
1317				ret = -ENODEV;
1318				goto err_out_unlock;
1319			}
1320			instance_destroy(q, queue);
1321			goto err_out_unlock;
1322		case NFQNL_CFG_CMD_PF_BIND:
1323		case NFQNL_CFG_CMD_PF_UNBIND:
1324			break;
1325		default:
1326			ret = -ENOTSUPP;
1327			goto err_out_unlock;
1328		}
1329	}
1330
1331	if (!queue) {
1332		ret = -ENODEV;
1333		goto err_out_unlock;
1334	}
1335
1336	if (nfqa[NFQA_CFG_PARAMS]) {
1337		struct nfqnl_msg_config_params *params =
1338			nla_data(nfqa[NFQA_CFG_PARAMS]);
1339
1340		nfqnl_set_mode(queue, params->copy_mode,
1341				ntohl(params->copy_range));
1342	}
1343
1344	if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
1345		__be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
1346
1347		spin_lock_bh(&queue->lock);
1348		queue->queue_maxlen = ntohl(*queue_maxlen);
1349		spin_unlock_bh(&queue->lock);
1350	}
1351
1352	if (nfqa[NFQA_CFG_FLAGS]) {
1353		spin_lock_bh(&queue->lock);
1354		queue->flags &= ~mask;
1355		queue->flags |= flags & mask;
1356		spin_unlock_bh(&queue->lock);
1357	}
1358
1359err_out_unlock:
1360	rcu_read_unlock();
1361	return ret;
1362}
1363
1364static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
1365	[NFQNL_MSG_PACKET]	= {
1366		.call		= nfqnl_recv_unsupp,
1367		.type		= NFNL_CB_RCU,
1368		.attr_count	= NFQA_MAX,
1369	},
1370	[NFQNL_MSG_VERDICT]	= {
1371		.call		= nfqnl_recv_verdict,
1372		.type		= NFNL_CB_RCU,
1373		.attr_count	= NFQA_MAX,
1374		.policy		= nfqa_verdict_policy
1375	},
1376	[NFQNL_MSG_CONFIG]	= {
1377		.call		= nfqnl_recv_config,
1378		.type		= NFNL_CB_MUTEX,
1379		.attr_count	= NFQA_CFG_MAX,
1380		.policy		= nfqa_cfg_policy
1381	},
1382	[NFQNL_MSG_VERDICT_BATCH] = {
1383		.call		= nfqnl_recv_verdict_batch,
1384		.type		= NFNL_CB_RCU,
1385		.attr_count	= NFQA_MAX,
1386		.policy		= nfqa_verdict_batch_policy
1387	},
1388};
1389
1390static const struct nfnetlink_subsystem nfqnl_subsys = {
1391	.name		= "nf_queue",
1392	.subsys_id	= NFNL_SUBSYS_QUEUE,
1393	.cb_count	= NFQNL_MSG_MAX,
1394	.cb		= nfqnl_cb,
1395};
1396
1397#ifdef CONFIG_PROC_FS
1398struct iter_state {
1399	struct seq_net_private p;
1400	unsigned int bucket;
1401};
1402
1403static struct hlist_node *get_first(struct seq_file *seq)
1404{
1405	struct iter_state *st = seq->private;
1406	struct net *net;
1407	struct nfnl_queue_net *q;
1408
1409	if (!st)
1410		return NULL;
1411
1412	net = seq_file_net(seq);
1413	q = nfnl_queue_pernet(net);
1414	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1415		if (!hlist_empty(&q->instance_table[st->bucket]))
1416			return q->instance_table[st->bucket].first;
1417	}
1418	return NULL;
1419}
1420
1421static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1422{
1423	struct iter_state *st = seq->private;
1424	struct net *net = seq_file_net(seq);
1425
1426	h = h->next;
1427	while (!h) {
1428		struct nfnl_queue_net *q;
1429
1430		if (++st->bucket >= INSTANCE_BUCKETS)
1431			return NULL;
1432
1433		q = nfnl_queue_pernet(net);
1434		h = q->instance_table[st->bucket].first;
1435	}
1436	return h;
1437}
1438
1439static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1440{
1441	struct hlist_node *head;
1442	head = get_first(seq);
1443
1444	if (head)
1445		while (pos && (head = get_next(seq, head)))
1446			pos--;
1447	return pos ? NULL : head;
1448}
1449
1450static void *seq_start(struct seq_file *s, loff_t *pos)
1451	__acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1452{
1453	spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1454	return get_idx(s, *pos);
1455}
1456
1457static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1458{
1459	(*pos)++;
1460	return get_next(s, v);
1461}
1462
1463static void seq_stop(struct seq_file *s, void *v)
1464	__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1465{
1466	spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1467}
1468
1469static int seq_show(struct seq_file *s, void *v)
1470{
1471	const struct nfqnl_instance *inst = v;
1472
1473	seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
1474		   inst->queue_num,
1475		   inst->peer_portid, inst->queue_total,
1476		   inst->copy_mode, inst->copy_range,
1477		   inst->queue_dropped, inst->queue_user_dropped,
1478		   inst->id_sequence, 1);
1479	return 0;
1480}
1481
1482static const struct seq_operations nfqnl_seq_ops = {
1483	.start	= seq_start,
1484	.next	= seq_next,
1485	.stop	= seq_stop,
1486	.show	= seq_show,
1487};
1488#endif /* PROC_FS */
1489
1490static int __net_init nfnl_queue_net_init(struct net *net)
1491{
1492	unsigned int i;
1493	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1494
1495	for (i = 0; i < INSTANCE_BUCKETS; i++)
1496		INIT_HLIST_HEAD(&q->instance_table[i]);
1497
1498	spin_lock_init(&q->instances_lock);
1499
1500#ifdef CONFIG_PROC_FS
1501	if (!proc_create_net("nfnetlink_queue", 0440, net->nf.proc_netfilter,
1502			&nfqnl_seq_ops, sizeof(struct iter_state)))
1503		return -ENOMEM;
1504#endif
1505	nf_register_queue_handler(net, &nfqh);
1506	return 0;
1507}
1508
1509static void __net_exit nfnl_queue_net_exit(struct net *net)
1510{
1511	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1512	unsigned int i;
1513
1514	nf_unregister_queue_handler(net);
1515#ifdef CONFIG_PROC_FS
1516	remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1517#endif
1518	for (i = 0; i < INSTANCE_BUCKETS; i++)
1519		WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
1520}
1521
1522static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
1523{
1524	synchronize_rcu();
1525}
1526
1527static struct pernet_operations nfnl_queue_net_ops = {
1528	.init		= nfnl_queue_net_init,
1529	.exit		= nfnl_queue_net_exit,
1530	.exit_batch	= nfnl_queue_net_exit_batch,
1531	.id		= &nfnl_queue_net_id,
1532	.size		= sizeof(struct nfnl_queue_net),
1533};
1534
1535static int __init nfnetlink_queue_init(void)
1536{
1537	int status;
1538
1539	status = register_pernet_subsys(&nfnl_queue_net_ops);
1540	if (status < 0) {
1541		pr_err("failed to register pernet ops\n");
1542		goto out;
1543	}
1544
1545	netlink_register_notifier(&nfqnl_rtnl_notifier);
1546	status = nfnetlink_subsys_register(&nfqnl_subsys);
1547	if (status < 0) {
1548		pr_err("failed to create netlink socket\n");
1549		goto cleanup_netlink_notifier;
1550	}
1551
1552	status = register_netdevice_notifier(&nfqnl_dev_notifier);
1553	if (status < 0) {
1554		pr_err("failed to register netdevice notifier\n");
1555		goto cleanup_netlink_subsys;
1556	}
1557
 
 
1558	return status;
1559
1560cleanup_netlink_subsys:
1561	nfnetlink_subsys_unregister(&nfqnl_subsys);
1562cleanup_netlink_notifier:
1563	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1564	unregister_pernet_subsys(&nfnl_queue_net_ops);
1565out:
1566	return status;
1567}
1568
1569static void __exit nfnetlink_queue_fini(void)
1570{
 
1571	unregister_netdevice_notifier(&nfqnl_dev_notifier);
1572	nfnetlink_subsys_unregister(&nfqnl_subsys);
1573	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1574	unregister_pernet_subsys(&nfnl_queue_net_ops);
1575
1576	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1577}
1578
1579MODULE_DESCRIPTION("netfilter packet queue handler");
1580MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1581MODULE_LICENSE("GPL");
1582MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1583
1584module_init(nfnetlink_queue_init);
1585module_exit(nfnetlink_queue_fini);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This is a module which is used for queueing packets and communicating with
   4 * userspace via nfnetlink.
   5 *
   6 * (C) 2005 by Harald Welte <laforge@netfilter.org>
   7 * (C) 2007 by Patrick McHardy <kaber@trash.net>
   8 *
   9 * Based on the old ipv4-only ip_queue.c:
  10 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
  11 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/skbuff.h>
  18#include <linux/init.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/notifier.h>
  22#include <linux/netdevice.h>
  23#include <linux/netfilter.h>
  24#include <linux/proc_fs.h>
  25#include <linux/netfilter_ipv4.h>
  26#include <linux/netfilter_ipv6.h>
  27#include <linux/netfilter_bridge.h>
  28#include <linux/netfilter/nfnetlink.h>
  29#include <linux/netfilter/nfnetlink_queue.h>
  30#include <linux/netfilter/nf_conntrack_common.h>
  31#include <linux/list.h>
  32#include <linux/cgroup-defs.h>
  33#include <net/gso.h>
  34#include <net/sock.h>
  35#include <net/tcp_states.h>
  36#include <net/netfilter/nf_queue.h>
  37#include <net/netns/generic.h>
  38
  39#include <linux/atomic.h>
  40
  41#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  42#include "../bridge/br_private.h"
  43#endif
  44
  45#if IS_ENABLED(CONFIG_NF_CONNTRACK)
  46#include <net/netfilter/nf_conntrack.h>
  47#endif
  48
  49#define NFQNL_QMAX_DEFAULT 1024
  50
  51/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
  52 * includes the header length. Thus, the maximum packet length that we
  53 * support is 65531 bytes. We send truncated packets if the specified length
  54 * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
  55 * attribute to detect truncation.
  56 */
  57#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
  58
  59struct nfqnl_instance {
  60	struct hlist_node hlist;		/* global list of queues */
  61	struct rcu_head rcu;
  62
  63	u32 peer_portid;
  64	unsigned int queue_maxlen;
  65	unsigned int copy_range;
  66	unsigned int queue_dropped;
  67	unsigned int queue_user_dropped;
  68
  69
  70	u_int16_t queue_num;			/* number of this queue */
  71	u_int8_t copy_mode;
  72	u_int32_t flags;			/* Set using NFQA_CFG_FLAGS */
  73/*
  74 * Following fields are dirtied for each queued packet,
  75 * keep them in same cache line if possible.
  76 */
  77	spinlock_t	lock	____cacheline_aligned_in_smp;
  78	unsigned int	queue_total;
  79	unsigned int	id_sequence;		/* 'sequence' of pkt ids */
  80	struct list_head queue_list;		/* packets in queue */
  81};
  82
  83typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
  84
  85static unsigned int nfnl_queue_net_id __read_mostly;
  86
  87#define INSTANCE_BUCKETS	16
  88struct nfnl_queue_net {
  89	spinlock_t instances_lock;
  90	struct hlist_head instance_table[INSTANCE_BUCKETS];
  91};
  92
  93static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
  94{
  95	return net_generic(net, nfnl_queue_net_id);
  96}
  97
  98static inline u_int8_t instance_hashfn(u_int16_t queue_num)
  99{
 100	return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
 101}
 102
 103static struct nfqnl_instance *
 104instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
 105{
 106	struct hlist_head *head;
 107	struct nfqnl_instance *inst;
 108
 109	head = &q->instance_table[instance_hashfn(queue_num)];
 110	hlist_for_each_entry_rcu(inst, head, hlist) {
 111		if (inst->queue_num == queue_num)
 112			return inst;
 113	}
 114	return NULL;
 115}
 116
 117static struct nfqnl_instance *
 118instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
 119{
 120	struct nfqnl_instance *inst;
 121	unsigned int h;
 122	int err;
 123
 124	spin_lock(&q->instances_lock);
 125	if (instance_lookup(q, queue_num)) {
 126		err = -EEXIST;
 127		goto out_unlock;
 128	}
 129
 130	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
 131	if (!inst) {
 132		err = -ENOMEM;
 133		goto out_unlock;
 134	}
 135
 136	inst->queue_num = queue_num;
 137	inst->peer_portid = portid;
 138	inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
 139	inst->copy_range = NFQNL_MAX_COPY_RANGE;
 140	inst->copy_mode = NFQNL_COPY_NONE;
 141	spin_lock_init(&inst->lock);
 142	INIT_LIST_HEAD(&inst->queue_list);
 143
 144	if (!try_module_get(THIS_MODULE)) {
 145		err = -EAGAIN;
 146		goto out_free;
 147	}
 148
 149	h = instance_hashfn(queue_num);
 150	hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
 151
 152	spin_unlock(&q->instances_lock);
 153
 154	return inst;
 155
 156out_free:
 157	kfree(inst);
 158out_unlock:
 159	spin_unlock(&q->instances_lock);
 160	return ERR_PTR(err);
 161}
 162
 163static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
 164			unsigned long data);
 165
 166static void
 167instance_destroy_rcu(struct rcu_head *head)
 168{
 169	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
 170						   rcu);
 171
 172	rcu_read_lock();
 173	nfqnl_flush(inst, NULL, 0);
 174	rcu_read_unlock();
 175	kfree(inst);
 176	module_put(THIS_MODULE);
 177}
 178
 179static void
 180__instance_destroy(struct nfqnl_instance *inst)
 181{
 182	hlist_del_rcu(&inst->hlist);
 183	call_rcu(&inst->rcu, instance_destroy_rcu);
 184}
 185
 186static void
 187instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
 188{
 189	spin_lock(&q->instances_lock);
 190	__instance_destroy(inst);
 191	spin_unlock(&q->instances_lock);
 192}
 193
 194static inline void
 195__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 196{
 197       list_add_tail(&entry->list, &queue->queue_list);
 198       queue->queue_total++;
 199}
 200
 201static void
 202__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 203{
 204	list_del(&entry->list);
 205	queue->queue_total--;
 206}
 207
 208static struct nf_queue_entry *
 209find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
 210{
 211	struct nf_queue_entry *entry = NULL, *i;
 212
 213	spin_lock_bh(&queue->lock);
 214
 215	list_for_each_entry(i, &queue->queue_list, list) {
 216		if (i->id == id) {
 217			entry = i;
 218			break;
 219		}
 220	}
 221
 222	if (entry)
 223		__dequeue_entry(queue, entry);
 224
 225	spin_unlock_bh(&queue->lock);
 226
 227	return entry;
 228}
 229
 230static unsigned int nf_iterate(struct sk_buff *skb,
 231			       struct nf_hook_state *state,
 232			       const struct nf_hook_entries *hooks,
 233			       unsigned int *index)
 234{
 235	const struct nf_hook_entry *hook;
 236	unsigned int verdict, i = *index;
 237
 238	while (i < hooks->num_hook_entries) {
 239		hook = &hooks->hooks[i];
 240repeat:
 241		verdict = nf_hook_entry_hookfn(hook, skb, state);
 242		if (verdict != NF_ACCEPT) {
 243			*index = i;
 244			if (verdict != NF_REPEAT)
 245				return verdict;
 246			goto repeat;
 247		}
 248		i++;
 249	}
 250
 251	*index = i;
 252	return NF_ACCEPT;
 253}
 254
 255static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
 256{
 257	switch (pf) {
 258#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
 259	case NFPROTO_BRIDGE:
 260		return rcu_dereference(net->nf.hooks_bridge[hooknum]);
 261#endif
 262	case NFPROTO_IPV4:
 263		return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
 264	case NFPROTO_IPV6:
 265		return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
 266	default:
 267		WARN_ON_ONCE(1);
 268		return NULL;
 269	}
 270
 271	return NULL;
 272}
 273
 274static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
 275{
 276#ifdef CONFIG_INET
 277	const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
 278
 279	if (entry->state.hook == NF_INET_LOCAL_OUT) {
 280		const struct iphdr *iph = ip_hdr(skb);
 281
 282		if (!(iph->tos == rt_info->tos &&
 283		      skb->mark == rt_info->mark &&
 284		      iph->daddr == rt_info->daddr &&
 285		      iph->saddr == rt_info->saddr))
 286			return ip_route_me_harder(entry->state.net, entry->state.sk,
 287						  skb, RTN_UNSPEC);
 288	}
 289#endif
 290	return 0;
 291}
 292
 293static int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
 294{
 295	const struct nf_ipv6_ops *v6ops;
 296	int ret = 0;
 297
 298	switch (entry->state.pf) {
 299	case AF_INET:
 300		ret = nf_ip_reroute(skb, entry);
 301		break;
 302	case AF_INET6:
 303		v6ops = rcu_dereference(nf_ipv6_ops);
 304		if (v6ops)
 305			ret = v6ops->reroute(skb, entry);
 306		break;
 307	}
 308	return ret;
 309}
 310
 311/* caller must hold rcu read-side lock */
 312static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 313{
 314	const struct nf_hook_entry *hook_entry;
 315	const struct nf_hook_entries *hooks;
 316	struct sk_buff *skb = entry->skb;
 317	const struct net *net;
 318	unsigned int i;
 319	int err;
 320	u8 pf;
 321
 322	net = entry->state.net;
 323	pf = entry->state.pf;
 324
 325	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
 326
 327	i = entry->hook_index;
 328	if (!hooks || i >= hooks->num_hook_entries) {
 329		kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
 330		nf_queue_entry_free(entry);
 331		return;
 332	}
 333
 334	hook_entry = &hooks->hooks[i];
 335
 336	/* Continue traversal iff userspace said ok... */
 337	if (verdict == NF_REPEAT)
 338		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
 339
 340	if (verdict == NF_ACCEPT) {
 341		if (nf_reroute(skb, entry) < 0)
 342			verdict = NF_DROP;
 343	}
 344
 345	if (verdict == NF_ACCEPT) {
 346next_hook:
 347		++i;
 348		verdict = nf_iterate(skb, &entry->state, hooks, &i);
 349	}
 350
 351	switch (verdict & NF_VERDICT_MASK) {
 352	case NF_ACCEPT:
 353	case NF_STOP:
 354		local_bh_disable();
 355		entry->state.okfn(entry->state.net, entry->state.sk, skb);
 356		local_bh_enable();
 357		break;
 358	case NF_QUEUE:
 359		err = nf_queue(skb, &entry->state, i, verdict);
 360		if (err == 1)
 361			goto next_hook;
 362		break;
 363	case NF_STOLEN:
 364		break;
 365	default:
 366		kfree_skb(skb);
 367	}
 368
 369	nf_queue_entry_free(entry);
 370}
 371
 372static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 373{
 374	const struct nf_ct_hook *ct_hook;
 375
 376	if (verdict == NF_ACCEPT ||
 377	    verdict == NF_REPEAT ||
 378	    verdict == NF_STOP) {
 379		unsigned int ct_verdict = verdict;
 380
 381		rcu_read_lock();
 382		ct_hook = rcu_dereference(nf_ct_hook);
 383		if (ct_hook)
 384			ct_verdict = ct_hook->update(entry->state.net, entry->skb);
 
 
 
 385		rcu_read_unlock();
 386
 387		switch (ct_verdict & NF_VERDICT_MASK) {
 388		case NF_ACCEPT:
 389			/* follow userspace verdict, could be REPEAT */
 390			break;
 391		case NF_STOLEN:
 392			nf_queue_entry_free(entry);
 393			return;
 394		default:
 395			verdict = ct_verdict & NF_VERDICT_MASK;
 396			break;
 397		}
 398	}
 399	nf_reinject(entry, verdict);
 400}
 401
 402static void
 403nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
 404{
 405	struct nf_queue_entry *entry, *next;
 406
 407	spin_lock_bh(&queue->lock);
 408	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
 409		if (!cmpfn || cmpfn(entry, data)) {
 410			list_del(&entry->list);
 411			queue->queue_total--;
 412			nfqnl_reinject(entry, NF_DROP);
 413		}
 414	}
 415	spin_unlock_bh(&queue->lock);
 416}
 417
 418static int
 419nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
 420		      bool csum_verify)
 421{
 422	__u32 flags = 0;
 423
 424	if (packet->ip_summed == CHECKSUM_PARTIAL)
 425		flags = NFQA_SKB_CSUMNOTREADY;
 426	else if (csum_verify)
 427		flags = NFQA_SKB_CSUM_NOTVERIFIED;
 428
 429	if (skb_is_gso(packet))
 430		flags |= NFQA_SKB_GSO;
 431
 432	return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
 433}
 434
 435static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
 436{
 437	const struct cred *cred;
 438
 439	if (!sk_fullsock(sk))
 440		return 0;
 441
 442	read_lock_bh(&sk->sk_callback_lock);
 443	if (sk->sk_socket && sk->sk_socket->file) {
 444		cred = sk->sk_socket->file->f_cred;
 445		if (nla_put_be32(skb, NFQA_UID,
 446		    htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
 447			goto nla_put_failure;
 448		if (nla_put_be32(skb, NFQA_GID,
 449		    htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
 450			goto nla_put_failure;
 451	}
 452	read_unlock_bh(&sk->sk_callback_lock);
 453	return 0;
 454
 455nla_put_failure:
 456	read_unlock_bh(&sk->sk_callback_lock);
 457	return -1;
 458}
 459
 460static int nfqnl_put_sk_classid(struct sk_buff *skb, struct sock *sk)
 461{
 462#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
 463	if (sk && sk_fullsock(sk)) {
 464		u32 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
 465
 466		if (classid && nla_put_be32(skb, NFQA_CGROUP_CLASSID, htonl(classid)))
 467			return -1;
 468	}
 469#endif
 470	return 0;
 471}
 472
 473static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
 474{
 475	u32 seclen = 0;
 476#if IS_ENABLED(CONFIG_NETWORK_SECMARK)
 477	if (!skb || !sk_fullsock(skb->sk))
 478		return 0;
 479
 480	read_lock_bh(&skb->sk->sk_callback_lock);
 481
 482	if (skb->secmark)
 483		security_secid_to_secctx(skb->secmark, secdata, &seclen);
 484
 485	read_unlock_bh(&skb->sk->sk_callback_lock);
 486#endif
 487	return seclen;
 488}
 489
 490static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
 491{
 492	struct sk_buff *entskb = entry->skb;
 493	u32 nlalen = 0;
 494
 495	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
 496		return 0;
 497
 498	if (skb_vlan_tag_present(entskb))
 499		nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
 500					 nla_total_size(sizeof(__be16)));
 501
 502	if (entskb->network_header > entskb->mac_header)
 503		nlalen += nla_total_size((entskb->network_header -
 504					  entskb->mac_header));
 505
 506	return nlalen;
 507}
 508
 509static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
 510{
 511	struct sk_buff *entskb = entry->skb;
 512
 513	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
 514		return 0;
 515
 516	if (skb_vlan_tag_present(entskb)) {
 517		struct nlattr *nest;
 518
 519		nest = nla_nest_start(skb, NFQA_VLAN);
 520		if (!nest)
 521			goto nla_put_failure;
 522
 523		if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
 524		    nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
 525			goto nla_put_failure;
 526
 527		nla_nest_end(skb, nest);
 528	}
 529
 530	if (entskb->mac_header < entskb->network_header) {
 531		int len = (int)(entskb->network_header - entskb->mac_header);
 532
 533		if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
 534			goto nla_put_failure;
 535	}
 536
 537	return 0;
 538
 539nla_put_failure:
 540	return -1;
 541}
 542
 543static int nf_queue_checksum_help(struct sk_buff *entskb)
 544{
 545	if (skb_csum_is_sctp(entskb))
 546		return skb_crc32c_csum_help(entskb);
 547
 548	return skb_checksum_help(entskb);
 549}
 550
 551static struct sk_buff *
 552nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 553			   struct nf_queue_entry *entry,
 554			   __be32 **packet_id_ptr)
 555{
 556	size_t size;
 557	size_t data_len = 0, cap_len = 0;
 558	unsigned int hlen = 0;
 559	struct sk_buff *skb;
 560	struct nlattr *nla;
 561	struct nfqnl_msg_packet_hdr *pmsg;
 562	struct nlmsghdr *nlh;
 563	struct sk_buff *entskb = entry->skb;
 564	struct net_device *indev;
 565	struct net_device *outdev;
 566	struct nf_conn *ct = NULL;
 567	enum ip_conntrack_info ctinfo = 0;
 568	const struct nfnl_ct_hook *nfnl_ct;
 569	bool csum_verify;
 570	char *secdata = NULL;
 571	u32 seclen = 0;
 572	ktime_t tstamp;
 573
 574	size = nlmsg_total_size(sizeof(struct nfgenmsg))
 575		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
 576		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 577		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 578#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 579		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 580		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 581#endif
 582		+ nla_total_size(sizeof(u_int32_t))	/* mark */
 583		+ nla_total_size(sizeof(u_int32_t))	/* priority */
 584		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
 585		+ nla_total_size(sizeof(u_int32_t))	/* skbinfo */
 586#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
 587		+ nla_total_size(sizeof(u_int32_t))	/* classid */
 588#endif
 589		+ nla_total_size(sizeof(u_int32_t));	/* cap_len */
 590
 591	tstamp = skb_tstamp_cond(entskb, false);
 592	if (tstamp)
 593		size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 594
 595	size += nfqnl_get_bridge_size(entry);
 596
 597	if (entry->state.hook <= NF_INET_FORWARD ||
 598	   (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
 599		csum_verify = !skb_csum_unnecessary(entskb);
 600	else
 601		csum_verify = false;
 602
 603	outdev = entry->state.out;
 604
 605	switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
 606	case NFQNL_COPY_META:
 607	case NFQNL_COPY_NONE:
 608		break;
 609
 610	case NFQNL_COPY_PACKET:
 611		if (!(queue->flags & NFQA_CFG_F_GSO) &&
 612		    entskb->ip_summed == CHECKSUM_PARTIAL &&
 613		    nf_queue_checksum_help(entskb))
 614			return NULL;
 615
 616		data_len = READ_ONCE(queue->copy_range);
 617		if (data_len > entskb->len)
 618			data_len = entskb->len;
 619
 620		hlen = skb_zerocopy_headlen(entskb);
 621		hlen = min_t(unsigned int, hlen, data_len);
 622		size += sizeof(struct nlattr) + hlen;
 623		cap_len = entskb->len;
 624		break;
 625	}
 626
 627	nfnl_ct = rcu_dereference(nfnl_ct_hook);
 628
 629#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 630	if (queue->flags & NFQA_CFG_F_CONNTRACK) {
 631		if (nfnl_ct != NULL) {
 632			ct = nf_ct_get(entskb, &ctinfo);
 633			if (ct != NULL)
 634				size += nfnl_ct->build_size(ct);
 635		}
 636	}
 637#endif
 638
 639	if (queue->flags & NFQA_CFG_F_UID_GID) {
 640		size += (nla_total_size(sizeof(u_int32_t))	/* uid */
 641			+ nla_total_size(sizeof(u_int32_t)));	/* gid */
 642	}
 643
 644	if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
 645		seclen = nfqnl_get_sk_secctx(entskb, &secdata);
 646		if (seclen)
 647			size += nla_total_size(seclen);
 648	}
 649
 650	skb = alloc_skb(size, GFP_ATOMIC);
 651	if (!skb) {
 652		skb_tx_error(entskb);
 653		goto nlmsg_failure;
 654	}
 655
 656	nlh = nfnl_msg_put(skb, 0, 0,
 657			   nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET),
 658			   0, entry->state.pf, NFNETLINK_V0,
 659			   htons(queue->queue_num));
 660	if (!nlh) {
 661		skb_tx_error(entskb);
 662		kfree_skb(skb);
 663		goto nlmsg_failure;
 664	}
 665
 666	nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
 667	pmsg = nla_data(nla);
 668	pmsg->hw_protocol	= entskb->protocol;
 669	pmsg->hook		= entry->state.hook;
 670	*packet_id_ptr		= &pmsg->packet_id;
 671
 672	indev = entry->state.in;
 673	if (indev) {
 674#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 675		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
 676			goto nla_put_failure;
 677#else
 678		if (entry->state.pf == PF_BRIDGE) {
 679			/* Case 1: indev is physical input device, we need to
 680			 * look for bridge group (when called from
 681			 * netfilter_bridge) */
 682			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
 683					 htonl(indev->ifindex)) ||
 684			/* this is the bridge group "brX" */
 685			/* rcu_read_lock()ed by __nf_queue */
 686			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
 687					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
 688				goto nla_put_failure;
 689		} else {
 690			int physinif;
 691
 692			/* Case 2: indev is bridge group, we need to look for
 693			 * physical device (when called from ipv4) */
 694			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
 695					 htonl(indev->ifindex)))
 696				goto nla_put_failure;
 697
 698			physinif = nf_bridge_get_physinif(entskb);
 699			if (physinif &&
 700			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
 701					 htonl(physinif)))
 702				goto nla_put_failure;
 703		}
 704#endif
 705	}
 706
 707	if (outdev) {
 708#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 709		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
 710			goto nla_put_failure;
 711#else
 712		if (entry->state.pf == PF_BRIDGE) {
 713			/* Case 1: outdev is physical output device, we need to
 714			 * look for bridge group (when called from
 715			 * netfilter_bridge) */
 716			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 717					 htonl(outdev->ifindex)) ||
 718			/* this is the bridge group "brX" */
 719			/* rcu_read_lock()ed by __nf_queue */
 720			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
 721					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
 722				goto nla_put_failure;
 723		} else {
 724			int physoutif;
 725
 726			/* Case 2: outdev is bridge group, we need to look for
 727			 * physical output device (when called from ipv4) */
 728			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
 729					 htonl(outdev->ifindex)))
 730				goto nla_put_failure;
 731
 732			physoutif = nf_bridge_get_physoutif(entskb);
 733			if (physoutif &&
 734			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 735					 htonl(physoutif)))
 736				goto nla_put_failure;
 737		}
 738#endif
 739	}
 740
 741	if (entskb->mark &&
 742	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
 743		goto nla_put_failure;
 744
 745	if (entskb->priority &&
 746	    nla_put_be32(skb, NFQA_PRIORITY, htonl(entskb->priority)))
 747		goto nla_put_failure;
 748
 749	if (indev && entskb->dev &&
 750	    skb_mac_header_was_set(entskb) &&
 751	    skb_mac_header_len(entskb) != 0) {
 752		struct nfqnl_msg_packet_hw phw;
 753		int len;
 754
 755		memset(&phw, 0, sizeof(phw));
 756		len = dev_parse_header(entskb, phw.hw_addr);
 757		if (len) {
 758			phw.hw_addrlen = htons(len);
 759			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
 760				goto nla_put_failure;
 761		}
 762	}
 763
 764	if (nfqnl_put_bridge(entry, skb) < 0)
 765		goto nla_put_failure;
 766
 767	if (entry->state.hook <= NF_INET_FORWARD && tstamp) {
 768		struct nfqnl_msg_packet_timestamp ts;
 769		struct timespec64 kts = ktime_to_timespec64(tstamp);
 770
 771		ts.sec = cpu_to_be64(kts.tv_sec);
 772		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
 773
 774		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
 775			goto nla_put_failure;
 776	}
 777
 778	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
 779	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
 780		goto nla_put_failure;
 781
 782	if (nfqnl_put_sk_classid(skb, entskb->sk) < 0)
 783		goto nla_put_failure;
 784
 785	if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
 786		goto nla_put_failure;
 787
 788	if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
 789		goto nla_put_failure;
 790
 791	if (cap_len > data_len &&
 792	    nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
 793		goto nla_put_failure;
 794
 795	if (nfqnl_put_packet_info(skb, entskb, csum_verify))
 796		goto nla_put_failure;
 797
 798	if (data_len) {
 799		struct nlattr *nla;
 800
 801		if (skb_tailroom(skb) < sizeof(*nla) + hlen)
 802			goto nla_put_failure;
 803
 804		nla = skb_put(skb, sizeof(*nla));
 805		nla->nla_type = NFQA_PAYLOAD;
 806		nla->nla_len = nla_attr_size(data_len);
 807
 808		if (skb_zerocopy(skb, entskb, data_len, hlen))
 809			goto nla_put_failure;
 810	}
 811
 812	nlh->nlmsg_len = skb->len;
 813	if (seclen)
 814		security_release_secctx(secdata, seclen);
 815	return skb;
 816
 817nla_put_failure:
 818	skb_tx_error(entskb);
 819	kfree_skb(skb);
 820	net_err_ratelimited("nf_queue: error creating packet message\n");
 821nlmsg_failure:
 822	if (seclen)
 823		security_release_secctx(secdata, seclen);
 824	return NULL;
 825}
 826
 827static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
 828{
 829#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 830	static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
 831	struct nf_conn *ct = (void *)skb_nfct(entry->skb);
 832	unsigned long status;
 833	unsigned int use;
 834
 835	if (!ct)
 836		return false;
 837
 838	status = READ_ONCE(ct->status);
 839	if ((status & flags) == IPS_DYING)
 840		return true;
 841
 842	if (status & IPS_CONFIRMED)
 843		return false;
 844
 845	/* in some cases skb_clone() can occur after initial conntrack
 846	 * pickup, but conntrack assumes exclusive skb->_nfct ownership for
 847	 * unconfirmed entries.
 848	 *
 849	 * This happens for br_netfilter and with ip multicast routing.
 850	 * We can't be solved with serialization here because one clone could
 851	 * have been queued for local delivery.
 852	 */
 853	use = refcount_read(&ct->ct_general.use);
 854	if (likely(use == 1))
 855		return false;
 856
 857	/* Can't decrement further? Exclusive ownership. */
 858	if (!refcount_dec_not_one(&ct->ct_general.use))
 859		return false;
 860
 861	skb_set_nfct(entry->skb, 0);
 862	/* No nf_ct_put(): we already decremented .use and it cannot
 863	 * drop down to 0.
 864	 */
 865	return true;
 866#endif
 867	return false;
 868}
 869
 870static int
 871__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
 872			struct nf_queue_entry *entry)
 873{
 874	struct sk_buff *nskb;
 875	int err = -ENOBUFS;
 876	__be32 *packet_id_ptr;
 877	int failopen = 0;
 878
 879	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
 880	if (nskb == NULL) {
 881		err = -ENOMEM;
 882		goto err_out;
 883	}
 884	spin_lock_bh(&queue->lock);
 885
 886	if (nf_ct_drop_unconfirmed(entry))
 887		goto err_out_free_nskb;
 888
 889	if (queue->queue_total >= queue->queue_maxlen) {
 890		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
 891			failopen = 1;
 892			err = 0;
 893		} else {
 894			queue->queue_dropped++;
 895			net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
 896					     queue->queue_total);
 897		}
 898		goto err_out_free_nskb;
 899	}
 900	entry->id = ++queue->id_sequence;
 901	*packet_id_ptr = htonl(entry->id);
 902
 903	/* nfnetlink_unicast will either free the nskb or add it to a socket */
 904	err = nfnetlink_unicast(nskb, net, queue->peer_portid);
 905	if (err < 0) {
 906		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
 907			failopen = 1;
 908			err = 0;
 909		} else {
 910			queue->queue_user_dropped++;
 911		}
 912		goto err_out_unlock;
 913	}
 914
 915	__enqueue_entry(queue, entry);
 916
 917	spin_unlock_bh(&queue->lock);
 918	return 0;
 919
 920err_out_free_nskb:
 921	kfree_skb(nskb);
 922err_out_unlock:
 923	spin_unlock_bh(&queue->lock);
 924	if (failopen)
 925		nfqnl_reinject(entry, NF_ACCEPT);
 926err_out:
 927	return err;
 928}
 929
 930static struct nf_queue_entry *
 931nf_queue_entry_dup(struct nf_queue_entry *e)
 932{
 933	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
 934
 935	if (!entry)
 936		return NULL;
 937
 938	if (nf_queue_entry_get_refs(entry))
 939		return entry;
 940
 941	kfree(entry);
 942	return NULL;
 943}
 944
 945#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 946/* When called from bridge netfilter, skb->data must point to MAC header
 947 * before calling skb_gso_segment(). Else, original MAC header is lost
 948 * and segmented skbs will be sent to wrong destination.
 949 */
 950static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
 951{
 952	if (nf_bridge_info_get(skb))
 953		__skb_push(skb, skb->network_header - skb->mac_header);
 954}
 955
 956static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
 957{
 958	if (nf_bridge_info_get(skb))
 959		__skb_pull(skb, skb->network_header - skb->mac_header);
 960}
 961#else
 962#define nf_bridge_adjust_skb_data(s) do {} while (0)
 963#define nf_bridge_adjust_segmented_data(s) do {} while (0)
 964#endif
 965
 966static int
 967__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
 968			   struct sk_buff *skb, struct nf_queue_entry *entry)
 969{
 970	int ret = -ENOMEM;
 971	struct nf_queue_entry *entry_seg;
 972
 973	nf_bridge_adjust_segmented_data(skb);
 974
 975	if (skb->next == NULL) { /* last packet, no need to copy entry */
 976		struct sk_buff *gso_skb = entry->skb;
 977		entry->skb = skb;
 978		ret = __nfqnl_enqueue_packet(net, queue, entry);
 979		if (ret)
 980			entry->skb = gso_skb;
 981		return ret;
 982	}
 983
 984	skb_mark_not_on_list(skb);
 985
 986	entry_seg = nf_queue_entry_dup(entry);
 987	if (entry_seg) {
 988		entry_seg->skb = skb;
 989		ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
 990		if (ret)
 991			nf_queue_entry_free(entry_seg);
 992	}
 993	return ret;
 994}
 995
 996static int
 997nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 998{
 999	unsigned int queued;
1000	struct nfqnl_instance *queue;
1001	struct sk_buff *skb, *segs, *nskb;
1002	int err = -ENOBUFS;
1003	struct net *net = entry->state.net;
1004	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1005
1006	/* rcu_read_lock()ed by nf_hook_thresh */
1007	queue = instance_lookup(q, queuenum);
1008	if (!queue)
1009		return -ESRCH;
1010
1011	if (queue->copy_mode == NFQNL_COPY_NONE)
1012		return -EINVAL;
1013
1014	skb = entry->skb;
1015
1016	switch (entry->state.pf) {
1017	case NFPROTO_IPV4:
1018		skb->protocol = htons(ETH_P_IP);
1019		break;
1020	case NFPROTO_IPV6:
1021		skb->protocol = htons(ETH_P_IPV6);
1022		break;
1023	}
1024
1025	if (!skb_is_gso(skb) || ((queue->flags & NFQA_CFG_F_GSO) && !skb_is_gso_sctp(skb)))
1026		return __nfqnl_enqueue_packet(net, queue, entry);
1027
1028	nf_bridge_adjust_skb_data(skb);
1029	segs = skb_gso_segment(skb, 0);
1030	/* Does not use PTR_ERR to limit the number of error codes that can be
1031	 * returned by nf_queue.  For instance, callers rely on -ESRCH to
1032	 * mean 'ignore this hook'.
1033	 */
1034	if (IS_ERR_OR_NULL(segs))
1035		goto out_err;
1036	queued = 0;
1037	err = 0;
1038	skb_list_walk_safe(segs, segs, nskb) {
1039		if (err == 0)
1040			err = __nfqnl_enqueue_packet_gso(net, queue,
1041							segs, entry);
1042		if (err == 0)
1043			queued++;
1044		else
1045			kfree_skb(segs);
1046	}
1047
1048	if (queued) {
1049		if (err) /* some segments are already queued */
1050			nf_queue_entry_free(entry);
1051		kfree_skb(skb);
1052		return 0;
1053	}
1054 out_err:
1055	nf_bridge_adjust_segmented_data(skb);
1056	return err;
1057}
1058
1059static int
1060nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
1061{
1062	struct sk_buff *nskb;
1063
1064	if (diff < 0) {
1065		unsigned int min_len = skb_transport_offset(e->skb);
1066
1067		if (data_len < min_len)
1068			return -EINVAL;
1069
1070		if (pskb_trim(e->skb, data_len))
1071			return -ENOMEM;
1072	} else if (diff > 0) {
1073		if (data_len > 0xFFFF)
1074			return -EINVAL;
1075		if (diff > skb_tailroom(e->skb)) {
1076			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
1077					       diff, GFP_ATOMIC);
1078			if (!nskb)
1079				return -ENOMEM;
1080			kfree_skb(e->skb);
1081			e->skb = nskb;
1082		}
1083		skb_put(e->skb, diff);
1084	}
1085	if (skb_ensure_writable(e->skb, data_len))
1086		return -ENOMEM;
1087	skb_copy_to_linear_data(e->skb, data, data_len);
1088	e->skb->ip_summed = CHECKSUM_NONE;
1089	return 0;
1090}
1091
1092static int
1093nfqnl_set_mode(struct nfqnl_instance *queue,
1094	       unsigned char mode, unsigned int range)
1095{
1096	int status = 0;
1097
1098	spin_lock_bh(&queue->lock);
1099	switch (mode) {
1100	case NFQNL_COPY_NONE:
1101	case NFQNL_COPY_META:
1102		queue->copy_mode = mode;
1103		queue->copy_range = 0;
1104		break;
1105
1106	case NFQNL_COPY_PACKET:
1107		queue->copy_mode = mode;
1108		if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
1109			queue->copy_range = NFQNL_MAX_COPY_RANGE;
1110		else
1111			queue->copy_range = range;
1112		break;
1113
1114	default:
1115		status = -EINVAL;
1116
1117	}
1118	spin_unlock_bh(&queue->lock);
1119
1120	return status;
1121}
1122
1123static int
1124dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
1125{
1126#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1127	int physinif, physoutif;
1128
1129	physinif = nf_bridge_get_physinif(entry->skb);
1130	physoutif = nf_bridge_get_physoutif(entry->skb);
1131
1132	if (physinif == ifindex || physoutif == ifindex)
1133		return 1;
1134#endif
1135	if (entry->state.in)
1136		if (entry->state.in->ifindex == ifindex)
1137			return 1;
1138	if (entry->state.out)
1139		if (entry->state.out->ifindex == ifindex)
1140			return 1;
1141
1142	return 0;
1143}
1144
1145/* drop all packets with either indev or outdev == ifindex from all queue
1146 * instances */
1147static void
1148nfqnl_dev_drop(struct net *net, int ifindex)
1149{
1150	int i;
1151	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1152
1153	rcu_read_lock();
1154
1155	for (i = 0; i < INSTANCE_BUCKETS; i++) {
1156		struct nfqnl_instance *inst;
1157		struct hlist_head *head = &q->instance_table[i];
1158
1159		hlist_for_each_entry_rcu(inst, head, hlist)
1160			nfqnl_flush(inst, dev_cmp, ifindex);
1161	}
1162
1163	rcu_read_unlock();
1164}
1165
1166static int
1167nfqnl_rcv_dev_event(struct notifier_block *this,
1168		    unsigned long event, void *ptr)
1169{
1170	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1171
1172	/* Drop any packets associated with the downed device */
1173	if (event == NETDEV_DOWN)
1174		nfqnl_dev_drop(dev_net(dev), dev->ifindex);
1175	return NOTIFY_DONE;
1176}
1177
1178static struct notifier_block nfqnl_dev_notifier = {
1179	.notifier_call	= nfqnl_rcv_dev_event,
1180};
1181
1182static void nfqnl_nf_hook_drop(struct net *net)
1183{
1184	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1185	int i;
1186
1187	/* This function is also called on net namespace error unwind,
1188	 * when pernet_ops->init() failed and ->exit() functions of the
1189	 * previous pernet_ops gets called.
1190	 *
1191	 * This may result in a call to nfqnl_nf_hook_drop() before
1192	 * struct nfnl_queue_net was allocated.
1193	 */
1194	if (!q)
1195		return;
1196
1197	for (i = 0; i < INSTANCE_BUCKETS; i++) {
1198		struct nfqnl_instance *inst;
1199		struct hlist_head *head = &q->instance_table[i];
1200
1201		hlist_for_each_entry_rcu(inst, head, hlist)
1202			nfqnl_flush(inst, NULL, 0);
1203	}
1204}
1205
1206static int
1207nfqnl_rcv_nl_event(struct notifier_block *this,
1208		   unsigned long event, void *ptr)
1209{
1210	struct netlink_notify *n = ptr;
1211	struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
1212
1213	if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
1214		int i;
1215
1216		/* destroy all instances for this portid */
1217		spin_lock(&q->instances_lock);
1218		for (i = 0; i < INSTANCE_BUCKETS; i++) {
1219			struct hlist_node *t2;
1220			struct nfqnl_instance *inst;
1221			struct hlist_head *head = &q->instance_table[i];
1222
1223			hlist_for_each_entry_safe(inst, t2, head, hlist) {
1224				if (n->portid == inst->peer_portid)
1225					__instance_destroy(inst);
1226			}
1227		}
1228		spin_unlock(&q->instances_lock);
1229	}
1230	return NOTIFY_DONE;
1231}
1232
1233static struct notifier_block nfqnl_rtnl_notifier = {
1234	.notifier_call	= nfqnl_rcv_nl_event,
1235};
1236
1237static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
1238	[NFQA_VLAN_TCI]		= { .type = NLA_U16},
1239	[NFQA_VLAN_PROTO]	= { .type = NLA_U16},
1240};
1241
1242static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
1243	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
1244	[NFQA_MARK]		= { .type = NLA_U32 },
1245	[NFQA_PAYLOAD]		= { .type = NLA_UNSPEC },
1246	[NFQA_CT]		= { .type = NLA_UNSPEC },
1247	[NFQA_EXP]		= { .type = NLA_UNSPEC },
1248	[NFQA_VLAN]		= { .type = NLA_NESTED },
1249	[NFQA_PRIORITY]		= { .type = NLA_U32 },
1250};
1251
1252static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
1253	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
1254	[NFQA_MARK]		= { .type = NLA_U32 },
1255	[NFQA_PRIORITY]		= { .type = NLA_U32 },
1256};
1257
1258static struct nfqnl_instance *
1259verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
1260{
1261	struct nfqnl_instance *queue;
1262
1263	queue = instance_lookup(q, queue_num);
1264	if (!queue)
1265		return ERR_PTR(-ENODEV);
1266
1267	if (queue->peer_portid != nlportid)
1268		return ERR_PTR(-EPERM);
1269
1270	return queue;
1271}
1272
1273static struct nfqnl_msg_verdict_hdr*
1274verdicthdr_get(const struct nlattr * const nfqa[])
1275{
1276	struct nfqnl_msg_verdict_hdr *vhdr;
1277	unsigned int verdict;
1278
1279	if (!nfqa[NFQA_VERDICT_HDR])
1280		return NULL;
1281
1282	vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
1283	verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
1284	if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
1285		return NULL;
1286	return vhdr;
1287}
1288
1289static int nfq_id_after(unsigned int id, unsigned int max)
1290{
1291	return (int)(id - max) > 0;
1292}
1293
1294static int nfqnl_recv_verdict_batch(struct sk_buff *skb,
1295				    const struct nfnl_info *info,
1296				    const struct nlattr * const nfqa[])
1297{
1298	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1299	u16 queue_num = ntohs(info->nfmsg->res_id);
1300	struct nf_queue_entry *entry, *tmp;
1301	struct nfqnl_msg_verdict_hdr *vhdr;
1302	struct nfqnl_instance *queue;
1303	unsigned int verdict, maxid;
1304	LIST_HEAD(batch_list);
1305
1306	queue = verdict_instance_lookup(q, queue_num,
1307					NETLINK_CB(skb).portid);
1308	if (IS_ERR(queue))
1309		return PTR_ERR(queue);
1310
1311	vhdr = verdicthdr_get(nfqa);
1312	if (!vhdr)
1313		return -EINVAL;
1314
1315	verdict = ntohl(vhdr->verdict);
1316	maxid = ntohl(vhdr->id);
1317
1318	spin_lock_bh(&queue->lock);
1319
1320	list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
1321		if (nfq_id_after(entry->id, maxid))
1322			break;
1323		__dequeue_entry(queue, entry);
1324		list_add_tail(&entry->list, &batch_list);
1325	}
1326
1327	spin_unlock_bh(&queue->lock);
1328
1329	if (list_empty(&batch_list))
1330		return -ENOENT;
1331
1332	list_for_each_entry_safe(entry, tmp, &batch_list, list) {
1333		if (nfqa[NFQA_MARK])
1334			entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1335
1336		if (nfqa[NFQA_PRIORITY])
1337			entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY]));
1338
1339		nfqnl_reinject(entry, verdict);
1340	}
1341	return 0;
1342}
1343
1344static struct nf_conn *nfqnl_ct_parse(const struct nfnl_ct_hook *nfnl_ct,
1345				      const struct nlmsghdr *nlh,
1346				      const struct nlattr * const nfqa[],
1347				      struct nf_queue_entry *entry,
1348				      enum ip_conntrack_info *ctinfo)
1349{
1350#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1351	struct nf_conn *ct;
1352
1353	ct = nf_ct_get(entry->skb, ctinfo);
1354	if (ct == NULL)
1355		return NULL;
1356
1357	if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
1358		return NULL;
1359
1360	if (nfqa[NFQA_EXP])
1361		nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
1362				      NETLINK_CB(entry->skb).portid,
1363				      nlmsg_report(nlh));
1364	return ct;
1365#else
1366	return NULL;
1367#endif
1368}
1369
1370static int nfqa_parse_bridge(struct nf_queue_entry *entry,
1371			     const struct nlattr * const nfqa[])
1372{
1373	if (nfqa[NFQA_VLAN]) {
1374		struct nlattr *tb[NFQA_VLAN_MAX + 1];
1375		int err;
1376
1377		err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX,
1378						  nfqa[NFQA_VLAN],
1379						  nfqa_vlan_policy, NULL);
1380		if (err < 0)
1381			return err;
1382
1383		if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
1384			return -EINVAL;
1385
1386		__vlan_hwaccel_put_tag(entry->skb,
1387			nla_get_be16(tb[NFQA_VLAN_PROTO]),
1388			ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
1389	}
1390
1391	if (nfqa[NFQA_L2HDR]) {
1392		int mac_header_len = entry->skb->network_header -
1393			entry->skb->mac_header;
1394
1395		if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
1396			return -EINVAL;
1397		else if (mac_header_len > 0)
1398			memcpy(skb_mac_header(entry->skb),
1399			       nla_data(nfqa[NFQA_L2HDR]),
1400			       mac_header_len);
1401	}
1402
1403	return 0;
1404}
1405
1406static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info,
1407			      const struct nlattr * const nfqa[])
1408{
1409	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1410	u_int16_t queue_num = ntohs(info->nfmsg->res_id);
1411	const struct nfnl_ct_hook *nfnl_ct;
1412	struct nfqnl_msg_verdict_hdr *vhdr;
1413	enum ip_conntrack_info ctinfo;
1414	struct nfqnl_instance *queue;
1415	struct nf_queue_entry *entry;
 
1416	struct nf_conn *ct = NULL;
1417	unsigned int verdict;
1418	int err;
1419
1420	queue = verdict_instance_lookup(q, queue_num,
1421					NETLINK_CB(skb).portid);
1422	if (IS_ERR(queue))
1423		return PTR_ERR(queue);
1424
1425	vhdr = verdicthdr_get(nfqa);
1426	if (!vhdr)
1427		return -EINVAL;
1428
1429	verdict = ntohl(vhdr->verdict);
1430
1431	entry = find_dequeue_entry(queue, ntohl(vhdr->id));
1432	if (entry == NULL)
1433		return -ENOENT;
1434
1435	/* rcu lock already held from nfnl->call_rcu. */
1436	nfnl_ct = rcu_dereference(nfnl_ct_hook);
1437
1438	if (nfqa[NFQA_CT]) {
1439		if (nfnl_ct != NULL)
1440			ct = nfqnl_ct_parse(nfnl_ct, info->nlh, nfqa, entry,
1441					    &ctinfo);
1442	}
1443
1444	if (entry->state.pf == PF_BRIDGE) {
1445		err = nfqa_parse_bridge(entry, nfqa);
1446		if (err < 0)
1447			return err;
1448	}
1449
1450	if (nfqa[NFQA_PAYLOAD]) {
1451		u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
1452		int diff = payload_len - entry->skb->len;
1453
1454		if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
1455				 payload_len, entry, diff) < 0)
1456			verdict = NF_DROP;
1457
1458		if (ct && diff)
1459			nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
1460	}
1461
1462	if (nfqa[NFQA_MARK])
1463		entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1464
1465	if (nfqa[NFQA_PRIORITY])
1466		entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY]));
1467
1468	nfqnl_reinject(entry, verdict);
1469	return 0;
1470}
1471
1472static int nfqnl_recv_unsupp(struct sk_buff *skb, const struct nfnl_info *info,
1473			     const struct nlattr * const cda[])
1474{
1475	return -ENOTSUPP;
1476}
1477
1478static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
1479	[NFQA_CFG_CMD]		= { .len = sizeof(struct nfqnl_msg_config_cmd) },
1480	[NFQA_CFG_PARAMS]	= { .len = sizeof(struct nfqnl_msg_config_params) },
1481	[NFQA_CFG_QUEUE_MAXLEN]	= { .type = NLA_U32 },
1482	[NFQA_CFG_MASK]		= { .type = NLA_U32 },
1483	[NFQA_CFG_FLAGS]	= { .type = NLA_U32 },
1484};
1485
1486static const struct nf_queue_handler nfqh = {
1487	.outfn		= nfqnl_enqueue_packet,
1488	.nf_hook_drop	= nfqnl_nf_hook_drop,
1489};
1490
1491static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
1492			     const struct nlattr * const nfqa[])
1493{
1494	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1495	u_int16_t queue_num = ntohs(info->nfmsg->res_id);
1496	struct nfqnl_msg_config_cmd *cmd = NULL;
1497	struct nfqnl_instance *queue;
1498	__u32 flags = 0, mask = 0;
1499	int ret = 0;
1500
1501	if (nfqa[NFQA_CFG_CMD]) {
1502		cmd = nla_data(nfqa[NFQA_CFG_CMD]);
1503
1504		/* Obsolete commands without queue context */
1505		switch (cmd->command) {
1506		case NFQNL_CFG_CMD_PF_BIND: return 0;
1507		case NFQNL_CFG_CMD_PF_UNBIND: return 0;
1508		}
1509	}
1510
1511	/* Check if we support these flags in first place, dependencies should
1512	 * be there too not to break atomicity.
1513	 */
1514	if (nfqa[NFQA_CFG_FLAGS]) {
1515		if (!nfqa[NFQA_CFG_MASK]) {
1516			/* A mask is needed to specify which flags are being
1517			 * changed.
1518			 */
1519			return -EINVAL;
1520		}
1521
1522		flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
1523		mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
1524
1525		if (flags >= NFQA_CFG_F_MAX)
1526			return -EOPNOTSUPP;
1527
1528#if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
1529		if (flags & mask & NFQA_CFG_F_SECCTX)
1530			return -EOPNOTSUPP;
1531#endif
1532		if ((flags & mask & NFQA_CFG_F_CONNTRACK) &&
1533		    !rcu_access_pointer(nfnl_ct_hook)) {
1534#ifdef CONFIG_MODULES
1535			nfnl_unlock(NFNL_SUBSYS_QUEUE);
1536			request_module("ip_conntrack_netlink");
1537			nfnl_lock(NFNL_SUBSYS_QUEUE);
1538			if (rcu_access_pointer(nfnl_ct_hook))
1539				return -EAGAIN;
1540#endif
1541			return -EOPNOTSUPP;
1542		}
1543	}
1544
1545	rcu_read_lock();
1546	queue = instance_lookup(q, queue_num);
1547	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1548		ret = -EPERM;
1549		goto err_out_unlock;
1550	}
1551
1552	if (cmd != NULL) {
1553		switch (cmd->command) {
1554		case NFQNL_CFG_CMD_BIND:
1555			if (queue) {
1556				ret = -EBUSY;
1557				goto err_out_unlock;
1558			}
1559			queue = instance_create(q, queue_num,
1560						NETLINK_CB(skb).portid);
1561			if (IS_ERR(queue)) {
1562				ret = PTR_ERR(queue);
1563				goto err_out_unlock;
1564			}
1565			break;
1566		case NFQNL_CFG_CMD_UNBIND:
1567			if (!queue) {
1568				ret = -ENODEV;
1569				goto err_out_unlock;
1570			}
1571			instance_destroy(q, queue);
1572			goto err_out_unlock;
1573		case NFQNL_CFG_CMD_PF_BIND:
1574		case NFQNL_CFG_CMD_PF_UNBIND:
1575			break;
1576		default:
1577			ret = -ENOTSUPP;
1578			goto err_out_unlock;
1579		}
1580	}
1581
1582	if (!queue) {
1583		ret = -ENODEV;
1584		goto err_out_unlock;
1585	}
1586
1587	if (nfqa[NFQA_CFG_PARAMS]) {
1588		struct nfqnl_msg_config_params *params =
1589			nla_data(nfqa[NFQA_CFG_PARAMS]);
1590
1591		nfqnl_set_mode(queue, params->copy_mode,
1592				ntohl(params->copy_range));
1593	}
1594
1595	if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
1596		__be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
1597
1598		spin_lock_bh(&queue->lock);
1599		queue->queue_maxlen = ntohl(*queue_maxlen);
1600		spin_unlock_bh(&queue->lock);
1601	}
1602
1603	if (nfqa[NFQA_CFG_FLAGS]) {
1604		spin_lock_bh(&queue->lock);
1605		queue->flags &= ~mask;
1606		queue->flags |= flags & mask;
1607		spin_unlock_bh(&queue->lock);
1608	}
1609
1610err_out_unlock:
1611	rcu_read_unlock();
1612	return ret;
1613}
1614
1615static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
1616	[NFQNL_MSG_PACKET]	= {
1617		.call		= nfqnl_recv_unsupp,
1618		.type		= NFNL_CB_RCU,
1619		.attr_count	= NFQA_MAX,
1620	},
1621	[NFQNL_MSG_VERDICT]	= {
1622		.call		= nfqnl_recv_verdict,
1623		.type		= NFNL_CB_RCU,
1624		.attr_count	= NFQA_MAX,
1625		.policy		= nfqa_verdict_policy
1626	},
1627	[NFQNL_MSG_CONFIG]	= {
1628		.call		= nfqnl_recv_config,
1629		.type		= NFNL_CB_MUTEX,
1630		.attr_count	= NFQA_CFG_MAX,
1631		.policy		= nfqa_cfg_policy
1632	},
1633	[NFQNL_MSG_VERDICT_BATCH] = {
1634		.call		= nfqnl_recv_verdict_batch,
1635		.type		= NFNL_CB_RCU,
1636		.attr_count	= NFQA_MAX,
1637		.policy		= nfqa_verdict_batch_policy
1638	},
1639};
1640
1641static const struct nfnetlink_subsystem nfqnl_subsys = {
1642	.name		= "nf_queue",
1643	.subsys_id	= NFNL_SUBSYS_QUEUE,
1644	.cb_count	= NFQNL_MSG_MAX,
1645	.cb		= nfqnl_cb,
1646};
1647
1648#ifdef CONFIG_PROC_FS
1649struct iter_state {
1650	struct seq_net_private p;
1651	unsigned int bucket;
1652};
1653
1654static struct hlist_node *get_first(struct seq_file *seq)
1655{
1656	struct iter_state *st = seq->private;
1657	struct net *net;
1658	struct nfnl_queue_net *q;
1659
1660	if (!st)
1661		return NULL;
1662
1663	net = seq_file_net(seq);
1664	q = nfnl_queue_pernet(net);
1665	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1666		if (!hlist_empty(&q->instance_table[st->bucket]))
1667			return q->instance_table[st->bucket].first;
1668	}
1669	return NULL;
1670}
1671
1672static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1673{
1674	struct iter_state *st = seq->private;
1675	struct net *net = seq_file_net(seq);
1676
1677	h = h->next;
1678	while (!h) {
1679		struct nfnl_queue_net *q;
1680
1681		if (++st->bucket >= INSTANCE_BUCKETS)
1682			return NULL;
1683
1684		q = nfnl_queue_pernet(net);
1685		h = q->instance_table[st->bucket].first;
1686	}
1687	return h;
1688}
1689
1690static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1691{
1692	struct hlist_node *head;
1693	head = get_first(seq);
1694
1695	if (head)
1696		while (pos && (head = get_next(seq, head)))
1697			pos--;
1698	return pos ? NULL : head;
1699}
1700
1701static void *seq_start(struct seq_file *s, loff_t *pos)
1702	__acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1703{
1704	spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1705	return get_idx(s, *pos);
1706}
1707
1708static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1709{
1710	(*pos)++;
1711	return get_next(s, v);
1712}
1713
1714static void seq_stop(struct seq_file *s, void *v)
1715	__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1716{
1717	spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1718}
1719
1720static int seq_show(struct seq_file *s, void *v)
1721{
1722	const struct nfqnl_instance *inst = v;
1723
1724	seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
1725		   inst->queue_num,
1726		   inst->peer_portid, inst->queue_total,
1727		   inst->copy_mode, inst->copy_range,
1728		   inst->queue_dropped, inst->queue_user_dropped,
1729		   inst->id_sequence, 1);
1730	return 0;
1731}
1732
1733static const struct seq_operations nfqnl_seq_ops = {
1734	.start	= seq_start,
1735	.next	= seq_next,
1736	.stop	= seq_stop,
1737	.show	= seq_show,
1738};
1739#endif /* PROC_FS */
1740
1741static int __net_init nfnl_queue_net_init(struct net *net)
1742{
1743	unsigned int i;
1744	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1745
1746	for (i = 0; i < INSTANCE_BUCKETS; i++)
1747		INIT_HLIST_HEAD(&q->instance_table[i]);
1748
1749	spin_lock_init(&q->instances_lock);
1750
1751#ifdef CONFIG_PROC_FS
1752	if (!proc_create_net("nfnetlink_queue", 0440, net->nf.proc_netfilter,
1753			&nfqnl_seq_ops, sizeof(struct iter_state)))
1754		return -ENOMEM;
1755#endif
 
1756	return 0;
1757}
1758
1759static void __net_exit nfnl_queue_net_exit(struct net *net)
1760{
1761	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1762	unsigned int i;
1763
 
1764#ifdef CONFIG_PROC_FS
1765	remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1766#endif
1767	for (i = 0; i < INSTANCE_BUCKETS; i++)
1768		WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
1769}
1770
 
 
 
 
 
1771static struct pernet_operations nfnl_queue_net_ops = {
1772	.init		= nfnl_queue_net_init,
1773	.exit		= nfnl_queue_net_exit,
 
1774	.id		= &nfnl_queue_net_id,
1775	.size		= sizeof(struct nfnl_queue_net),
1776};
1777
1778static int __init nfnetlink_queue_init(void)
1779{
1780	int status;
1781
1782	status = register_pernet_subsys(&nfnl_queue_net_ops);
1783	if (status < 0) {
1784		pr_err("failed to register pernet ops\n");
1785		goto out;
1786	}
1787
1788	netlink_register_notifier(&nfqnl_rtnl_notifier);
1789	status = nfnetlink_subsys_register(&nfqnl_subsys);
1790	if (status < 0) {
1791		pr_err("failed to create netlink socket\n");
1792		goto cleanup_netlink_notifier;
1793	}
1794
1795	status = register_netdevice_notifier(&nfqnl_dev_notifier);
1796	if (status < 0) {
1797		pr_err("failed to register netdevice notifier\n");
1798		goto cleanup_netlink_subsys;
1799	}
1800
1801	nf_register_queue_handler(&nfqh);
1802
1803	return status;
1804
1805cleanup_netlink_subsys:
1806	nfnetlink_subsys_unregister(&nfqnl_subsys);
1807cleanup_netlink_notifier:
1808	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1809	unregister_pernet_subsys(&nfnl_queue_net_ops);
1810out:
1811	return status;
1812}
1813
1814static void __exit nfnetlink_queue_fini(void)
1815{
1816	nf_unregister_queue_handler();
1817	unregister_netdevice_notifier(&nfqnl_dev_notifier);
1818	nfnetlink_subsys_unregister(&nfqnl_subsys);
1819	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1820	unregister_pernet_subsys(&nfnl_queue_net_ops);
1821
1822	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1823}
1824
1825MODULE_DESCRIPTION("netfilter packet queue handler");
1826MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1827MODULE_LICENSE("GPL");
1828MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1829
1830module_init(nfnetlink_queue_init);
1831module_exit(nfnetlink_queue_fini);