Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * This is a module which is used for queueing packets and communicating with
   3 * userspace via nfnetlink.
   4 *
   5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
   6 * (C) 2007 by Patrick McHardy <kaber@trash.net>
   7 *
   8 * Based on the old ipv4-only ip_queue.c:
   9 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
  10 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License version 2 as
  14 * published by the Free Software Foundation.
  15 *
  16 */
 
 
 
  17#include <linux/module.h>
  18#include <linux/skbuff.h>
  19#include <linux/init.h>
  20#include <linux/spinlock.h>
  21#include <linux/slab.h>
  22#include <linux/notifier.h>
  23#include <linux/netdevice.h>
  24#include <linux/netfilter.h>
  25#include <linux/proc_fs.h>
  26#include <linux/netfilter_ipv4.h>
  27#include <linux/netfilter_ipv6.h>
  28#include <linux/netfilter_bridge.h>
  29#include <linux/netfilter/nfnetlink.h>
  30#include <linux/netfilter/nfnetlink_queue.h>
  31#include <linux/netfilter/nf_conntrack_common.h>
  32#include <linux/list.h>
  33#include <net/sock.h>
  34#include <net/tcp_states.h>
  35#include <net/netfilter/nf_queue.h>
  36#include <net/netns/generic.h>
  37
  38#include <linux/atomic.h>
  39
  40#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  41#include "../bridge/br_private.h"
  42#endif
  43
 
 
 
 
  44#define NFQNL_QMAX_DEFAULT 1024
  45
  46/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
  47 * includes the header length. Thus, the maximum packet length that we
  48 * support is 65531 bytes. We send truncated packets if the specified length
  49 * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
  50 * attribute to detect truncation.
  51 */
  52#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
  53
  54struct nfqnl_instance {
  55	struct hlist_node hlist;		/* global list of queues */
  56	struct rcu_head rcu;
  57
  58	u32 peer_portid;
  59	unsigned int queue_maxlen;
  60	unsigned int copy_range;
  61	unsigned int queue_dropped;
  62	unsigned int queue_user_dropped;
  63
  64
  65	u_int16_t queue_num;			/* number of this queue */
  66	u_int8_t copy_mode;
  67	u_int32_t flags;			/* Set using NFQA_CFG_FLAGS */
  68/*
  69 * Following fields are dirtied for each queued packet,
  70 * keep them in same cache line if possible.
  71 */
  72	spinlock_t	lock	____cacheline_aligned_in_smp;
  73	unsigned int	queue_total;
  74	unsigned int	id_sequence;		/* 'sequence' of pkt ids */
  75	struct list_head queue_list;		/* packets in queue */
  76};
  77
  78typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
  79
  80static unsigned int nfnl_queue_net_id __read_mostly;
  81
  82#define INSTANCE_BUCKETS	16
  83struct nfnl_queue_net {
  84	spinlock_t instances_lock;
  85	struct hlist_head instance_table[INSTANCE_BUCKETS];
  86};
  87
  88static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
  89{
  90	return net_generic(net, nfnl_queue_net_id);
  91}
  92
  93static inline u_int8_t instance_hashfn(u_int16_t queue_num)
  94{
  95	return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
  96}
  97
  98static struct nfqnl_instance *
  99instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
 100{
 101	struct hlist_head *head;
 102	struct nfqnl_instance *inst;
 103
 104	head = &q->instance_table[instance_hashfn(queue_num)];
 105	hlist_for_each_entry_rcu(inst, head, hlist) {
 106		if (inst->queue_num == queue_num)
 107			return inst;
 108	}
 109	return NULL;
 110}
 111
 112static struct nfqnl_instance *
 113instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
 114{
 115	struct nfqnl_instance *inst;
 116	unsigned int h;
 117	int err;
 118
 119	spin_lock(&q->instances_lock);
 120	if (instance_lookup(q, queue_num)) {
 121		err = -EEXIST;
 122		goto out_unlock;
 123	}
 124
 125	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
 126	if (!inst) {
 127		err = -ENOMEM;
 128		goto out_unlock;
 129	}
 130
 131	inst->queue_num = queue_num;
 132	inst->peer_portid = portid;
 133	inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
 134	inst->copy_range = NFQNL_MAX_COPY_RANGE;
 135	inst->copy_mode = NFQNL_COPY_NONE;
 136	spin_lock_init(&inst->lock);
 137	INIT_LIST_HEAD(&inst->queue_list);
 138
 139	if (!try_module_get(THIS_MODULE)) {
 140		err = -EAGAIN;
 141		goto out_free;
 142	}
 143
 144	h = instance_hashfn(queue_num);
 145	hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
 146
 147	spin_unlock(&q->instances_lock);
 148
 149	return inst;
 150
 151out_free:
 152	kfree(inst);
 153out_unlock:
 154	spin_unlock(&q->instances_lock);
 155	return ERR_PTR(err);
 156}
 157
 158static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
 159			unsigned long data);
 160
 161static void
 162instance_destroy_rcu(struct rcu_head *head)
 163{
 164	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
 165						   rcu);
 166
 167	nfqnl_flush(inst, NULL, 0);
 168	kfree(inst);
 169	module_put(THIS_MODULE);
 170}
 171
 172static void
 173__instance_destroy(struct nfqnl_instance *inst)
 174{
 175	hlist_del_rcu(&inst->hlist);
 176	call_rcu(&inst->rcu, instance_destroy_rcu);
 177}
 178
 179static void
 180instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
 181{
 182	spin_lock(&q->instances_lock);
 183	__instance_destroy(inst);
 184	spin_unlock(&q->instances_lock);
 185}
 186
 187static inline void
 188__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 189{
 190       list_add_tail(&entry->list, &queue->queue_list);
 191       queue->queue_total++;
 192}
 193
 194static void
 195__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 196{
 197	list_del(&entry->list);
 198	queue->queue_total--;
 199}
 200
 201static struct nf_queue_entry *
 202find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
 203{
 204	struct nf_queue_entry *entry = NULL, *i;
 205
 206	spin_lock_bh(&queue->lock);
 207
 208	list_for_each_entry(i, &queue->queue_list, list) {
 209		if (i->id == id) {
 210			entry = i;
 211			break;
 212		}
 213	}
 214
 215	if (entry)
 216		__dequeue_entry(queue, entry);
 217
 218	spin_unlock_bh(&queue->lock);
 219
 220	return entry;
 221}
 222
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 223static void
 224nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
 225{
 226	struct nf_queue_entry *entry, *next;
 227
 228	spin_lock_bh(&queue->lock);
 229	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
 230		if (!cmpfn || cmpfn(entry, data)) {
 231			list_del(&entry->list);
 232			queue->queue_total--;
 233			nf_reinject(entry, NF_DROP);
 234		}
 235	}
 236	spin_unlock_bh(&queue->lock);
 237}
 238
 239static int
 240nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
 241		      bool csum_verify)
 242{
 243	__u32 flags = 0;
 244
 245	if (packet->ip_summed == CHECKSUM_PARTIAL)
 246		flags = NFQA_SKB_CSUMNOTREADY;
 247	else if (csum_verify)
 248		flags = NFQA_SKB_CSUM_NOTVERIFIED;
 249
 250	if (skb_is_gso(packet))
 251		flags |= NFQA_SKB_GSO;
 252
 253	return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
 254}
 255
 256static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
 257{
 258	const struct cred *cred;
 259
 260	if (!sk_fullsock(sk))
 261		return 0;
 262
 263	read_lock_bh(&sk->sk_callback_lock);
 264	if (sk->sk_socket && sk->sk_socket->file) {
 265		cred = sk->sk_socket->file->f_cred;
 266		if (nla_put_be32(skb, NFQA_UID,
 267		    htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
 268			goto nla_put_failure;
 269		if (nla_put_be32(skb, NFQA_GID,
 270		    htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
 271			goto nla_put_failure;
 272	}
 273	read_unlock_bh(&sk->sk_callback_lock);
 274	return 0;
 275
 276nla_put_failure:
 277	read_unlock_bh(&sk->sk_callback_lock);
 278	return -1;
 279}
 280
 281static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
 282{
 283	u32 seclen = 0;
 284#if IS_ENABLED(CONFIG_NETWORK_SECMARK)
 285	if (!skb || !sk_fullsock(skb->sk))
 286		return 0;
 287
 288	read_lock_bh(&skb->sk->sk_callback_lock);
 289
 290	if (skb->secmark)
 291		security_secid_to_secctx(skb->secmark, secdata, &seclen);
 292
 293	read_unlock_bh(&skb->sk->sk_callback_lock);
 294#endif
 295	return seclen;
 296}
 297
 298static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
 299{
 300	struct sk_buff *entskb = entry->skb;
 301	u32 nlalen = 0;
 302
 303	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
 304		return 0;
 305
 306	if (skb_vlan_tag_present(entskb))
 307		nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
 308					 nla_total_size(sizeof(__be16)));
 309
 310	if (entskb->network_header > entskb->mac_header)
 311		nlalen += nla_total_size((entskb->network_header -
 312					  entskb->mac_header));
 313
 314	return nlalen;
 315}
 316
 317static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
 318{
 319	struct sk_buff *entskb = entry->skb;
 320
 321	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
 322		return 0;
 323
 324	if (skb_vlan_tag_present(entskb)) {
 325		struct nlattr *nest;
 326
 327		nest = nla_nest_start(skb, NFQA_VLAN | NLA_F_NESTED);
 328		if (!nest)
 329			goto nla_put_failure;
 330
 331		if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
 332		    nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
 333			goto nla_put_failure;
 334
 335		nla_nest_end(skb, nest);
 336	}
 337
 338	if (entskb->mac_header < entskb->network_header) {
 339		int len = (int)(entskb->network_header - entskb->mac_header);
 340
 341		if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
 342			goto nla_put_failure;
 343	}
 344
 345	return 0;
 346
 347nla_put_failure:
 348	return -1;
 349}
 350
 351static struct sk_buff *
 352nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 353			   struct nf_queue_entry *entry,
 354			   __be32 **packet_id_ptr)
 355{
 356	size_t size;
 357	size_t data_len = 0, cap_len = 0;
 358	unsigned int hlen = 0;
 359	struct sk_buff *skb;
 360	struct nlattr *nla;
 361	struct nfqnl_msg_packet_hdr *pmsg;
 362	struct nlmsghdr *nlh;
 363	struct nfgenmsg *nfmsg;
 364	struct sk_buff *entskb = entry->skb;
 365	struct net_device *indev;
 366	struct net_device *outdev;
 367	struct nf_conn *ct = NULL;
 368	enum ip_conntrack_info uninitialized_var(ctinfo);
 369	struct nfnl_ct_hook *nfnl_ct;
 370	bool csum_verify;
 371	char *secdata = NULL;
 372	u32 seclen = 0;
 
 373
 374	size =    nlmsg_total_size(sizeof(struct nfgenmsg))
 375		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
 376		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 377		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 378#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 379		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 380		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 381#endif
 382		+ nla_total_size(sizeof(u_int32_t))	/* mark */
 
 383		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
 384		+ nla_total_size(sizeof(u_int32_t))	/* skbinfo */
 385		+ nla_total_size(sizeof(u_int32_t));	/* cap_len */
 386
 387	if (entskb->tstamp)
 
 388		size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 389
 390	size += nfqnl_get_bridge_size(entry);
 391
 392	if (entry->state.hook <= NF_INET_FORWARD ||
 393	   (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
 394		csum_verify = !skb_csum_unnecessary(entskb);
 395	else
 396		csum_verify = false;
 397
 398	outdev = entry->state.out;
 399
 400	switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
 401	case NFQNL_COPY_META:
 402	case NFQNL_COPY_NONE:
 403		break;
 404
 405	case NFQNL_COPY_PACKET:
 406		if (!(queue->flags & NFQA_CFG_F_GSO) &&
 407		    entskb->ip_summed == CHECKSUM_PARTIAL &&
 408		    skb_checksum_help(entskb))
 409			return NULL;
 410
 411		data_len = ACCESS_ONCE(queue->copy_range);
 412		if (data_len > entskb->len)
 413			data_len = entskb->len;
 414
 415		hlen = skb_zerocopy_headlen(entskb);
 416		hlen = min_t(unsigned int, hlen, data_len);
 417		size += sizeof(struct nlattr) + hlen;
 418		cap_len = entskb->len;
 419		break;
 420	}
 421
 422	nfnl_ct = rcu_dereference(nfnl_ct_hook);
 423
 
 424	if (queue->flags & NFQA_CFG_F_CONNTRACK) {
 425		if (nfnl_ct != NULL) {
 426			ct = nfnl_ct->get_ct(entskb, &ctinfo);
 427			if (ct != NULL)
 428				size += nfnl_ct->build_size(ct);
 429		}
 430	}
 
 431
 432	if (queue->flags & NFQA_CFG_F_UID_GID) {
 433		size +=  (nla_total_size(sizeof(u_int32_t))	/* uid */
 434			+ nla_total_size(sizeof(u_int32_t)));	/* gid */
 435	}
 436
 437	if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
 438		seclen = nfqnl_get_sk_secctx(entskb, &secdata);
 439		if (seclen)
 440			size += nla_total_size(seclen);
 441	}
 442
 443	skb = alloc_skb(size, GFP_ATOMIC);
 444	if (!skb) {
 445		skb_tx_error(entskb);
 446		return NULL;
 447	}
 448
 449	nlh = nlmsg_put(skb, 0, 0,
 450			NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
 451			sizeof(struct nfgenmsg), 0);
 
 452	if (!nlh) {
 453		skb_tx_error(entskb);
 454		kfree_skb(skb);
 455		return NULL;
 456	}
 457	nfmsg = nlmsg_data(nlh);
 458	nfmsg->nfgen_family = entry->state.pf;
 459	nfmsg->version = NFNETLINK_V0;
 460	nfmsg->res_id = htons(queue->queue_num);
 461
 462	nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
 463	pmsg = nla_data(nla);
 464	pmsg->hw_protocol	= entskb->protocol;
 465	pmsg->hook		= entry->state.hook;
 466	*packet_id_ptr		= &pmsg->packet_id;
 467
 468	indev = entry->state.in;
 469	if (indev) {
 470#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 471		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
 472			goto nla_put_failure;
 473#else
 474		if (entry->state.pf == PF_BRIDGE) {
 475			/* Case 1: indev is physical input device, we need to
 476			 * look for bridge group (when called from
 477			 * netfilter_bridge) */
 478			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
 479					 htonl(indev->ifindex)) ||
 480			/* this is the bridge group "brX" */
 481			/* rcu_read_lock()ed by __nf_queue */
 482			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
 483					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
 484				goto nla_put_failure;
 485		} else {
 486			int physinif;
 487
 488			/* Case 2: indev is bridge group, we need to look for
 489			 * physical device (when called from ipv4) */
 490			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
 491					 htonl(indev->ifindex)))
 492				goto nla_put_failure;
 493
 494			physinif = nf_bridge_get_physinif(entskb);
 495			if (physinif &&
 496			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
 497					 htonl(physinif)))
 498				goto nla_put_failure;
 499		}
 500#endif
 501	}
 502
 503	if (outdev) {
 504#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 505		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
 506			goto nla_put_failure;
 507#else
 508		if (entry->state.pf == PF_BRIDGE) {
 509			/* Case 1: outdev is physical output device, we need to
 510			 * look for bridge group (when called from
 511			 * netfilter_bridge) */
 512			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 513					 htonl(outdev->ifindex)) ||
 514			/* this is the bridge group "brX" */
 515			/* rcu_read_lock()ed by __nf_queue */
 516			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
 517					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
 518				goto nla_put_failure;
 519		} else {
 520			int physoutif;
 521
 522			/* Case 2: outdev is bridge group, we need to look for
 523			 * physical output device (when called from ipv4) */
 524			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
 525					 htonl(outdev->ifindex)))
 526				goto nla_put_failure;
 527
 528			physoutif = nf_bridge_get_physoutif(entskb);
 529			if (physoutif &&
 530			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 531					 htonl(physoutif)))
 532				goto nla_put_failure;
 533		}
 534#endif
 535	}
 536
 537	if (entskb->mark &&
 538	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
 539		goto nla_put_failure;
 540
 
 
 
 
 541	if (indev && entskb->dev &&
 542	    entskb->mac_header != entskb->network_header) {
 
 543		struct nfqnl_msg_packet_hw phw;
 544		int len;
 545
 546		memset(&phw, 0, sizeof(phw));
 547		len = dev_parse_header(entskb, phw.hw_addr);
 548		if (len) {
 549			phw.hw_addrlen = htons(len);
 550			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
 551				goto nla_put_failure;
 552		}
 553	}
 554
 555	if (nfqnl_put_bridge(entry, skb) < 0)
 556		goto nla_put_failure;
 557
 558	if (entskb->tstamp) {
 559		struct nfqnl_msg_packet_timestamp ts;
 560		struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
 561
 562		ts.sec = cpu_to_be64(kts.tv_sec);
 563		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
 564
 565		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
 566			goto nla_put_failure;
 567	}
 568
 569	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
 570	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
 571		goto nla_put_failure;
 572
 573	if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
 574		goto nla_put_failure;
 575
 576	if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
 577		goto nla_put_failure;
 578
 579	if (cap_len > data_len &&
 580	    nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
 581		goto nla_put_failure;
 582
 583	if (nfqnl_put_packet_info(skb, entskb, csum_verify))
 584		goto nla_put_failure;
 585
 586	if (data_len) {
 587		struct nlattr *nla;
 588
 589		if (skb_tailroom(skb) < sizeof(*nla) + hlen)
 590			goto nla_put_failure;
 591
 592		nla = (struct nlattr *)skb_put(skb, sizeof(*nla));
 593		nla->nla_type = NFQA_PAYLOAD;
 594		nla->nla_len = nla_attr_size(data_len);
 595
 596		if (skb_zerocopy(skb, entskb, data_len, hlen))
 597			goto nla_put_failure;
 598	}
 599
 600	nlh->nlmsg_len = skb->len;
 
 
 601	return skb;
 602
 603nla_put_failure:
 604	skb_tx_error(entskb);
 605	kfree_skb(skb);
 606	net_err_ratelimited("nf_queue: error creating packet message\n");
 
 
 
 607	return NULL;
 608}
 609
 
 
 
 
 
 
 
 
 
 
 
 
 610static int
 611__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
 612			struct nf_queue_entry *entry)
 613{
 614	struct sk_buff *nskb;
 615	int err = -ENOBUFS;
 616	__be32 *packet_id_ptr;
 617	int failopen = 0;
 618
 619	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
 620	if (nskb == NULL) {
 621		err = -ENOMEM;
 622		goto err_out;
 623	}
 624	spin_lock_bh(&queue->lock);
 625
 
 
 
 626	if (queue->queue_total >= queue->queue_maxlen) {
 627		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
 628			failopen = 1;
 629			err = 0;
 630		} else {
 631			queue->queue_dropped++;
 632			net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
 633					     queue->queue_total);
 634		}
 635		goto err_out_free_nskb;
 636	}
 637	entry->id = ++queue->id_sequence;
 638	*packet_id_ptr = htonl(entry->id);
 639
 640	/* nfnetlink_unicast will either free the nskb or add it to a socket */
 641	err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
 642	if (err < 0) {
 643		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
 644			failopen = 1;
 645			err = 0;
 646		} else {
 647			queue->queue_user_dropped++;
 648		}
 649		goto err_out_unlock;
 650	}
 651
 652	__enqueue_entry(queue, entry);
 653
 654	spin_unlock_bh(&queue->lock);
 655	return 0;
 656
 657err_out_free_nskb:
 658	kfree_skb(nskb);
 659err_out_unlock:
 660	spin_unlock_bh(&queue->lock);
 661	if (failopen)
 662		nf_reinject(entry, NF_ACCEPT);
 663err_out:
 664	return err;
 665}
 666
 667static struct nf_queue_entry *
 668nf_queue_entry_dup(struct nf_queue_entry *e)
 669{
 670	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
 671	if (entry)
 672		nf_queue_entry_get_refs(entry);
 673	return entry;
 
 
 
 
 
 
 674}
 675
 676#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 677/* When called from bridge netfilter, skb->data must point to MAC header
 678 * before calling skb_gso_segment(). Else, original MAC header is lost
 679 * and segmented skbs will be sent to wrong destination.
 680 */
 681static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
 682{
 683	if (skb->nf_bridge)
 684		__skb_push(skb, skb->network_header - skb->mac_header);
 685}
 686
 687static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
 688{
 689	if (skb->nf_bridge)
 690		__skb_pull(skb, skb->network_header - skb->mac_header);
 691}
 692#else
 693#define nf_bridge_adjust_skb_data(s) do {} while (0)
 694#define nf_bridge_adjust_segmented_data(s) do {} while (0)
 695#endif
 696
 697static void free_entry(struct nf_queue_entry *entry)
 698{
 699	nf_queue_entry_release_refs(entry);
 700	kfree(entry);
 701}
 702
 703static int
 704__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
 705			   struct sk_buff *skb, struct nf_queue_entry *entry)
 706{
 707	int ret = -ENOMEM;
 708	struct nf_queue_entry *entry_seg;
 709
 710	nf_bridge_adjust_segmented_data(skb);
 711
 712	if (skb->next == NULL) { /* last packet, no need to copy entry */
 713		struct sk_buff *gso_skb = entry->skb;
 714		entry->skb = skb;
 715		ret = __nfqnl_enqueue_packet(net, queue, entry);
 716		if (ret)
 717			entry->skb = gso_skb;
 718		return ret;
 719	}
 720
 721	skb->next = NULL;
 722
 723	entry_seg = nf_queue_entry_dup(entry);
 724	if (entry_seg) {
 725		entry_seg->skb = skb;
 726		ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
 727		if (ret)
 728			free_entry(entry_seg);
 729	}
 730	return ret;
 731}
 732
 733static int
 734nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 735{
 736	unsigned int queued;
 737	struct nfqnl_instance *queue;
 738	struct sk_buff *skb, *segs;
 739	int err = -ENOBUFS;
 740	struct net *net = entry->state.net;
 741	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 742
 743	/* rcu_read_lock()ed by nf_hook_thresh */
 744	queue = instance_lookup(q, queuenum);
 745	if (!queue)
 746		return -ESRCH;
 747
 748	if (queue->copy_mode == NFQNL_COPY_NONE)
 749		return -EINVAL;
 750
 751	skb = entry->skb;
 752
 753	switch (entry->state.pf) {
 754	case NFPROTO_IPV4:
 755		skb->protocol = htons(ETH_P_IP);
 756		break;
 757	case NFPROTO_IPV6:
 758		skb->protocol = htons(ETH_P_IPV6);
 759		break;
 760	}
 761
 762	if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
 763		return __nfqnl_enqueue_packet(net, queue, entry);
 764
 765	nf_bridge_adjust_skb_data(skb);
 766	segs = skb_gso_segment(skb, 0);
 767	/* Does not use PTR_ERR to limit the number of error codes that can be
 768	 * returned by nf_queue.  For instance, callers rely on -ESRCH to
 769	 * mean 'ignore this hook'.
 770	 */
 771	if (IS_ERR_OR_NULL(segs))
 772		goto out_err;
 773	queued = 0;
 774	err = 0;
 775	do {
 776		struct sk_buff *nskb = segs->next;
 777		if (err == 0)
 778			err = __nfqnl_enqueue_packet_gso(net, queue,
 779							segs, entry);
 780		if (err == 0)
 781			queued++;
 782		else
 783			kfree_skb(segs);
 784		segs = nskb;
 785	} while (segs);
 786
 787	if (queued) {
 788		if (err) /* some segments are already queued */
 789			free_entry(entry);
 790		kfree_skb(skb);
 791		return 0;
 792	}
 793 out_err:
 794	nf_bridge_adjust_segmented_data(skb);
 795	return err;
 796}
 797
 798static int
 799nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
 800{
 801	struct sk_buff *nskb;
 802
 803	if (diff < 0) {
 
 
 
 
 
 804		if (pskb_trim(e->skb, data_len))
 805			return -ENOMEM;
 806	} else if (diff > 0) {
 807		if (data_len > 0xFFFF)
 808			return -EINVAL;
 809		if (diff > skb_tailroom(e->skb)) {
 810			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
 811					       diff, GFP_ATOMIC);
 812			if (!nskb) {
 813				printk(KERN_WARNING "nf_queue: OOM "
 814				      "in mangle, dropping packet\n");
 815				return -ENOMEM;
 816			}
 817			kfree_skb(e->skb);
 818			e->skb = nskb;
 819		}
 820		skb_put(e->skb, diff);
 821	}
 822	if (!skb_make_writable(e->skb, data_len))
 823		return -ENOMEM;
 824	skb_copy_to_linear_data(e->skb, data, data_len);
 825	e->skb->ip_summed = CHECKSUM_NONE;
 826	return 0;
 827}
 828
 829static int
 830nfqnl_set_mode(struct nfqnl_instance *queue,
 831	       unsigned char mode, unsigned int range)
 832{
 833	int status = 0;
 834
 835	spin_lock_bh(&queue->lock);
 836	switch (mode) {
 837	case NFQNL_COPY_NONE:
 838	case NFQNL_COPY_META:
 839		queue->copy_mode = mode;
 840		queue->copy_range = 0;
 841		break;
 842
 843	case NFQNL_COPY_PACKET:
 844		queue->copy_mode = mode;
 845		if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
 846			queue->copy_range = NFQNL_MAX_COPY_RANGE;
 847		else
 848			queue->copy_range = range;
 849		break;
 850
 851	default:
 852		status = -EINVAL;
 853
 854	}
 855	spin_unlock_bh(&queue->lock);
 856
 857	return status;
 858}
 859
 860static int
 861dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
 862{
 
 
 
 
 
 
 
 
 
 863	if (entry->state.in)
 864		if (entry->state.in->ifindex == ifindex)
 865			return 1;
 866	if (entry->state.out)
 867		if (entry->state.out->ifindex == ifindex)
 868			return 1;
 869#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 870	if (entry->skb->nf_bridge) {
 871		int physinif, physoutif;
 872
 873		physinif = nf_bridge_get_physinif(entry->skb);
 874		physoutif = nf_bridge_get_physoutif(entry->skb);
 875
 876		if (physinif == ifindex || physoutif == ifindex)
 877			return 1;
 878	}
 879#endif
 880	return 0;
 881}
 882
 883/* drop all packets with either indev or outdev == ifindex from all queue
 884 * instances */
 885static void
 886nfqnl_dev_drop(struct net *net, int ifindex)
 887{
 888	int i;
 889	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 890
 891	rcu_read_lock();
 892
 893	for (i = 0; i < INSTANCE_BUCKETS; i++) {
 894		struct nfqnl_instance *inst;
 895		struct hlist_head *head = &q->instance_table[i];
 896
 897		hlist_for_each_entry_rcu(inst, head, hlist)
 898			nfqnl_flush(inst, dev_cmp, ifindex);
 899	}
 900
 901	rcu_read_unlock();
 902}
 903
 904static int
 905nfqnl_rcv_dev_event(struct notifier_block *this,
 906		    unsigned long event, void *ptr)
 907{
 908	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 909
 910	/* Drop any packets associated with the downed device */
 911	if (event == NETDEV_DOWN)
 912		nfqnl_dev_drop(dev_net(dev), dev->ifindex);
 913	return NOTIFY_DONE;
 914}
 915
 916static struct notifier_block nfqnl_dev_notifier = {
 917	.notifier_call	= nfqnl_rcv_dev_event,
 918};
 919
 920static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long entry_ptr)
 921{
 922	return rcu_access_pointer(entry->hook) ==
 923		(struct nf_hook_entry *)entry_ptr;
 924}
 925
 926static void nfqnl_nf_hook_drop(struct net *net,
 927			       const struct nf_hook_entry *hook)
 928{
 929	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 930	int i;
 931
 932	rcu_read_lock();
 
 
 
 
 
 
 
 
 
 933	for (i = 0; i < INSTANCE_BUCKETS; i++) {
 934		struct nfqnl_instance *inst;
 935		struct hlist_head *head = &q->instance_table[i];
 936
 937		hlist_for_each_entry_rcu(inst, head, hlist)
 938			nfqnl_flush(inst, nf_hook_cmp, (unsigned long)hook);
 939	}
 940	rcu_read_unlock();
 941}
 942
 943static int
 944nfqnl_rcv_nl_event(struct notifier_block *this,
 945		   unsigned long event, void *ptr)
 946{
 947	struct netlink_notify *n = ptr;
 948	struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
 949
 950	if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
 951		int i;
 952
 953		/* destroy all instances for this portid */
 954		spin_lock(&q->instances_lock);
 955		for (i = 0; i < INSTANCE_BUCKETS; i++) {
 956			struct hlist_node *t2;
 957			struct nfqnl_instance *inst;
 958			struct hlist_head *head = &q->instance_table[i];
 959
 960			hlist_for_each_entry_safe(inst, t2, head, hlist) {
 961				if (n->portid == inst->peer_portid)
 962					__instance_destroy(inst);
 963			}
 964		}
 965		spin_unlock(&q->instances_lock);
 966	}
 967	return NOTIFY_DONE;
 968}
 969
 970static struct notifier_block nfqnl_rtnl_notifier = {
 971	.notifier_call	= nfqnl_rcv_nl_event,
 972};
 973
 974static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
 975	[NFQA_VLAN_TCI]		= { .type = NLA_U16},
 976	[NFQA_VLAN_PROTO]	= { .type = NLA_U16},
 977};
 978
 979static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
 980	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
 981	[NFQA_MARK]		= { .type = NLA_U32 },
 982	[NFQA_PAYLOAD]		= { .type = NLA_UNSPEC },
 983	[NFQA_CT]		= { .type = NLA_UNSPEC },
 984	[NFQA_EXP]		= { .type = NLA_UNSPEC },
 985	[NFQA_VLAN]		= { .type = NLA_NESTED },
 
 986};
 987
 988static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
 989	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
 990	[NFQA_MARK]		= { .type = NLA_U32 },
 
 991};
 992
 993static struct nfqnl_instance *
 994verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
 995{
 996	struct nfqnl_instance *queue;
 997
 998	queue = instance_lookup(q, queue_num);
 999	if (!queue)
1000		return ERR_PTR(-ENODEV);
1001
1002	if (queue->peer_portid != nlportid)
1003		return ERR_PTR(-EPERM);
1004
1005	return queue;
1006}
1007
1008static struct nfqnl_msg_verdict_hdr*
1009verdicthdr_get(const struct nlattr * const nfqa[])
1010{
1011	struct nfqnl_msg_verdict_hdr *vhdr;
1012	unsigned int verdict;
1013
1014	if (!nfqa[NFQA_VERDICT_HDR])
1015		return NULL;
1016
1017	vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
1018	verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
1019	if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
1020		return NULL;
1021	return vhdr;
1022}
1023
1024static int nfq_id_after(unsigned int id, unsigned int max)
1025{
1026	return (int)(id - max) > 0;
1027}
1028
1029static int nfqnl_recv_verdict_batch(struct net *net, struct sock *ctnl,
1030				    struct sk_buff *skb,
1031				    const struct nlmsghdr *nlh,
1032			            const struct nlattr * const nfqa[])
1033{
1034	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 
1035	struct nf_queue_entry *entry, *tmp;
1036	unsigned int verdict, maxid;
1037	struct nfqnl_msg_verdict_hdr *vhdr;
1038	struct nfqnl_instance *queue;
 
1039	LIST_HEAD(batch_list);
1040	u16 queue_num = ntohs(nfmsg->res_id);
1041	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1042
1043	queue = verdict_instance_lookup(q, queue_num,
1044					NETLINK_CB(skb).portid);
1045	if (IS_ERR(queue))
1046		return PTR_ERR(queue);
1047
1048	vhdr = verdicthdr_get(nfqa);
1049	if (!vhdr)
1050		return -EINVAL;
1051
1052	verdict = ntohl(vhdr->verdict);
1053	maxid = ntohl(vhdr->id);
1054
1055	spin_lock_bh(&queue->lock);
1056
1057	list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
1058		if (nfq_id_after(entry->id, maxid))
1059			break;
1060		__dequeue_entry(queue, entry);
1061		list_add_tail(&entry->list, &batch_list);
1062	}
1063
1064	spin_unlock_bh(&queue->lock);
1065
1066	if (list_empty(&batch_list))
1067		return -ENOENT;
1068
1069	list_for_each_entry_safe(entry, tmp, &batch_list, list) {
1070		if (nfqa[NFQA_MARK])
1071			entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1072		nf_reinject(entry, verdict);
 
 
 
 
1073	}
1074	return 0;
1075}
1076
1077static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct,
1078				      const struct nlmsghdr *nlh,
1079				      const struct nlattr * const nfqa[],
1080				      struct nf_queue_entry *entry,
1081				      enum ip_conntrack_info *ctinfo)
1082{
 
1083	struct nf_conn *ct;
1084
1085	ct = nfnl_ct->get_ct(entry->skb, ctinfo);
1086	if (ct == NULL)
1087		return NULL;
1088
1089	if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
1090		return NULL;
1091
1092	if (nfqa[NFQA_EXP])
1093		nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
1094				      NETLINK_CB(entry->skb).portid,
1095				      nlmsg_report(nlh));
1096	return ct;
 
 
 
1097}
1098
1099static int nfqa_parse_bridge(struct nf_queue_entry *entry,
1100			     const struct nlattr * const nfqa[])
1101{
1102	if (nfqa[NFQA_VLAN]) {
1103		struct nlattr *tb[NFQA_VLAN_MAX + 1];
1104		int err;
1105
1106		err = nla_parse_nested(tb, NFQA_VLAN_MAX, nfqa[NFQA_VLAN],
1107				       nfqa_vlan_policy);
 
1108		if (err < 0)
1109			return err;
1110
1111		if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
1112			return -EINVAL;
1113
1114		entry->skb->vlan_tci = ntohs(nla_get_be16(tb[NFQA_VLAN_TCI]));
1115		entry->skb->vlan_proto = nla_get_be16(tb[NFQA_VLAN_PROTO]);
 
1116	}
1117
1118	if (nfqa[NFQA_L2HDR]) {
1119		int mac_header_len = entry->skb->network_header -
1120			entry->skb->mac_header;
1121
1122		if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
1123			return -EINVAL;
1124		else if (mac_header_len > 0)
1125			memcpy(skb_mac_header(entry->skb),
1126			       nla_data(nfqa[NFQA_L2HDR]),
1127			       mac_header_len);
1128	}
1129
1130	return 0;
1131}
1132
1133static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
1134			      struct sk_buff *skb,
1135			      const struct nlmsghdr *nlh,
1136			      const struct nlattr * const nfqa[])
1137{
1138	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1139	u_int16_t queue_num = ntohs(nfmsg->res_id);
 
1140	struct nfqnl_msg_verdict_hdr *vhdr;
 
1141	struct nfqnl_instance *queue;
1142	unsigned int verdict;
1143	struct nf_queue_entry *entry;
1144	enum ip_conntrack_info uninitialized_var(ctinfo);
1145	struct nfnl_ct_hook *nfnl_ct;
1146	struct nf_conn *ct = NULL;
1147	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1148	int err;
1149
1150	queue = verdict_instance_lookup(q, queue_num,
1151					NETLINK_CB(skb).portid);
1152	if (IS_ERR(queue))
1153		return PTR_ERR(queue);
1154
1155	vhdr = verdicthdr_get(nfqa);
1156	if (!vhdr)
1157		return -EINVAL;
1158
1159	verdict = ntohl(vhdr->verdict);
1160
1161	entry = find_dequeue_entry(queue, ntohl(vhdr->id));
1162	if (entry == NULL)
1163		return -ENOENT;
1164
1165	/* rcu lock already held from nfnl->call_rcu. */
1166	nfnl_ct = rcu_dereference(nfnl_ct_hook);
1167
1168	if (nfqa[NFQA_CT]) {
1169		if (nfnl_ct != NULL)
1170			ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
 
1171	}
1172
1173	if (entry->state.pf == PF_BRIDGE) {
1174		err = nfqa_parse_bridge(entry, nfqa);
1175		if (err < 0)
1176			return err;
1177	}
1178
1179	if (nfqa[NFQA_PAYLOAD]) {
1180		u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
1181		int diff = payload_len - entry->skb->len;
1182
1183		if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
1184				 payload_len, entry, diff) < 0)
1185			verdict = NF_DROP;
1186
1187		if (ct && diff)
1188			nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
1189	}
1190
1191	if (nfqa[NFQA_MARK])
1192		entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1193
1194	nf_reinject(entry, verdict);
 
 
 
1195	return 0;
1196}
1197
1198static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
1199			     struct sk_buff *skb, const struct nlmsghdr *nlh,
1200			     const struct nlattr * const nfqa[])
1201{
1202	return -ENOTSUPP;
1203}
1204
1205static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
1206	[NFQA_CFG_CMD]		= { .len = sizeof(struct nfqnl_msg_config_cmd) },
1207	[NFQA_CFG_PARAMS]	= { .len = sizeof(struct nfqnl_msg_config_params) },
 
 
 
1208};
1209
1210static const struct nf_queue_handler nfqh = {
1211	.outfn		= &nfqnl_enqueue_packet,
1212	.nf_hook_drop	= &nfqnl_nf_hook_drop,
1213};
1214
1215static int nfqnl_recv_config(struct net *net, struct sock *ctnl,
1216			     struct sk_buff *skb, const struct nlmsghdr *nlh,
1217			     const struct nlattr * const nfqa[])
1218{
1219	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1220	u_int16_t queue_num = ntohs(nfmsg->res_id);
1221	struct nfqnl_instance *queue;
1222	struct nfqnl_msg_config_cmd *cmd = NULL;
1223	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1224	__u32 flags = 0, mask = 0;
1225	int ret = 0;
1226
1227	if (nfqa[NFQA_CFG_CMD]) {
1228		cmd = nla_data(nfqa[NFQA_CFG_CMD]);
1229
1230		/* Obsolete commands without queue context */
1231		switch (cmd->command) {
1232		case NFQNL_CFG_CMD_PF_BIND: return 0;
1233		case NFQNL_CFG_CMD_PF_UNBIND: return 0;
1234		}
1235	}
1236
1237	/* Check if we support these flags in first place, dependencies should
1238	 * be there too not to break atomicity.
1239	 */
1240	if (nfqa[NFQA_CFG_FLAGS]) {
1241		if (!nfqa[NFQA_CFG_MASK]) {
1242			/* A mask is needed to specify which flags are being
1243			 * changed.
1244			 */
1245			return -EINVAL;
1246		}
1247
1248		flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
1249		mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
1250
1251		if (flags >= NFQA_CFG_F_MAX)
1252			return -EOPNOTSUPP;
1253
1254#if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
1255		if (flags & mask & NFQA_CFG_F_SECCTX)
1256			return -EOPNOTSUPP;
1257#endif
1258		if ((flags & mask & NFQA_CFG_F_CONNTRACK) &&
1259		    !rcu_access_pointer(nfnl_ct_hook)) {
1260#ifdef CONFIG_MODULES
1261			nfnl_unlock(NFNL_SUBSYS_QUEUE);
1262			request_module("ip_conntrack_netlink");
1263			nfnl_lock(NFNL_SUBSYS_QUEUE);
1264			if (rcu_access_pointer(nfnl_ct_hook))
1265				return -EAGAIN;
1266#endif
1267			return -EOPNOTSUPP;
1268		}
1269	}
1270
1271	rcu_read_lock();
1272	queue = instance_lookup(q, queue_num);
1273	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1274		ret = -EPERM;
1275		goto err_out_unlock;
1276	}
1277
1278	if (cmd != NULL) {
1279		switch (cmd->command) {
1280		case NFQNL_CFG_CMD_BIND:
1281			if (queue) {
1282				ret = -EBUSY;
1283				goto err_out_unlock;
1284			}
1285			queue = instance_create(q, queue_num,
1286						NETLINK_CB(skb).portid);
1287			if (IS_ERR(queue)) {
1288				ret = PTR_ERR(queue);
1289				goto err_out_unlock;
1290			}
1291			break;
1292		case NFQNL_CFG_CMD_UNBIND:
1293			if (!queue) {
1294				ret = -ENODEV;
1295				goto err_out_unlock;
1296			}
1297			instance_destroy(q, queue);
1298			goto err_out_unlock;
1299		case NFQNL_CFG_CMD_PF_BIND:
1300		case NFQNL_CFG_CMD_PF_UNBIND:
1301			break;
1302		default:
1303			ret = -ENOTSUPP;
1304			goto err_out_unlock;
1305		}
1306	}
1307
1308	if (!queue) {
1309		ret = -ENODEV;
1310		goto err_out_unlock;
1311	}
1312
1313	if (nfqa[NFQA_CFG_PARAMS]) {
1314		struct nfqnl_msg_config_params *params =
1315			nla_data(nfqa[NFQA_CFG_PARAMS]);
1316
1317		nfqnl_set_mode(queue, params->copy_mode,
1318				ntohl(params->copy_range));
1319	}
1320
1321	if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
1322		__be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
1323
1324		spin_lock_bh(&queue->lock);
1325		queue->queue_maxlen = ntohl(*queue_maxlen);
1326		spin_unlock_bh(&queue->lock);
1327	}
1328
1329	if (nfqa[NFQA_CFG_FLAGS]) {
1330		spin_lock_bh(&queue->lock);
1331		queue->flags &= ~mask;
1332		queue->flags |= flags & mask;
1333		spin_unlock_bh(&queue->lock);
1334	}
1335
1336err_out_unlock:
1337	rcu_read_unlock();
1338	return ret;
1339}
1340
1341static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
1342	[NFQNL_MSG_PACKET]	= { .call_rcu = nfqnl_recv_unsupp,
1343				    .attr_count = NFQA_MAX, },
1344	[NFQNL_MSG_VERDICT]	= { .call_rcu = nfqnl_recv_verdict,
1345				    .attr_count = NFQA_MAX,
1346				    .policy = nfqa_verdict_policy },
1347	[NFQNL_MSG_CONFIG]	= { .call = nfqnl_recv_config,
1348				    .attr_count = NFQA_CFG_MAX,
1349				    .policy = nfqa_cfg_policy },
1350	[NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
1351				    .attr_count = NFQA_MAX,
1352				    .policy = nfqa_verdict_batch_policy },
 
 
 
 
 
 
 
 
 
 
 
 
1353};
1354
1355static const struct nfnetlink_subsystem nfqnl_subsys = {
1356	.name		= "nf_queue",
1357	.subsys_id	= NFNL_SUBSYS_QUEUE,
1358	.cb_count	= NFQNL_MSG_MAX,
1359	.cb		= nfqnl_cb,
1360};
1361
1362#ifdef CONFIG_PROC_FS
1363struct iter_state {
1364	struct seq_net_private p;
1365	unsigned int bucket;
1366};
1367
1368static struct hlist_node *get_first(struct seq_file *seq)
1369{
1370	struct iter_state *st = seq->private;
1371	struct net *net;
1372	struct nfnl_queue_net *q;
1373
1374	if (!st)
1375		return NULL;
1376
1377	net = seq_file_net(seq);
1378	q = nfnl_queue_pernet(net);
1379	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1380		if (!hlist_empty(&q->instance_table[st->bucket]))
1381			return q->instance_table[st->bucket].first;
1382	}
1383	return NULL;
1384}
1385
1386static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1387{
1388	struct iter_state *st = seq->private;
1389	struct net *net = seq_file_net(seq);
1390
1391	h = h->next;
1392	while (!h) {
1393		struct nfnl_queue_net *q;
1394
1395		if (++st->bucket >= INSTANCE_BUCKETS)
1396			return NULL;
1397
1398		q = nfnl_queue_pernet(net);
1399		h = q->instance_table[st->bucket].first;
1400	}
1401	return h;
1402}
1403
1404static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1405{
1406	struct hlist_node *head;
1407	head = get_first(seq);
1408
1409	if (head)
1410		while (pos && (head = get_next(seq, head)))
1411			pos--;
1412	return pos ? NULL : head;
1413}
1414
1415static void *seq_start(struct seq_file *s, loff_t *pos)
1416	__acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1417{
1418	spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1419	return get_idx(s, *pos);
1420}
1421
1422static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1423{
1424	(*pos)++;
1425	return get_next(s, v);
1426}
1427
1428static void seq_stop(struct seq_file *s, void *v)
1429	__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1430{
1431	spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1432}
1433
1434static int seq_show(struct seq_file *s, void *v)
1435{
1436	const struct nfqnl_instance *inst = v;
1437
1438	seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
1439		   inst->queue_num,
1440		   inst->peer_portid, inst->queue_total,
1441		   inst->copy_mode, inst->copy_range,
1442		   inst->queue_dropped, inst->queue_user_dropped,
1443		   inst->id_sequence, 1);
1444	return 0;
1445}
1446
1447static const struct seq_operations nfqnl_seq_ops = {
1448	.start	= seq_start,
1449	.next	= seq_next,
1450	.stop	= seq_stop,
1451	.show	= seq_show,
1452};
1453
1454static int nfqnl_open(struct inode *inode, struct file *file)
1455{
1456	return seq_open_net(inode, file, &nfqnl_seq_ops,
1457			sizeof(struct iter_state));
1458}
1459
1460static const struct file_operations nfqnl_file_ops = {
1461	.owner	 = THIS_MODULE,
1462	.open	 = nfqnl_open,
1463	.read	 = seq_read,
1464	.llseek	 = seq_lseek,
1465	.release = seq_release_net,
1466};
1467
1468#endif /* PROC_FS */
1469
1470static int __net_init nfnl_queue_net_init(struct net *net)
1471{
1472	unsigned int i;
1473	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1474
1475	for (i = 0; i < INSTANCE_BUCKETS; i++)
1476		INIT_HLIST_HEAD(&q->instance_table[i]);
1477
1478	spin_lock_init(&q->instances_lock);
1479
1480#ifdef CONFIG_PROC_FS
1481	if (!proc_create("nfnetlink_queue", 0440,
1482			 net->nf.proc_netfilter, &nfqnl_file_ops))
1483		return -ENOMEM;
1484#endif
1485	nf_register_queue_handler(net, &nfqh);
1486	return 0;
1487}
1488
1489static void __net_exit nfnl_queue_net_exit(struct net *net)
1490{
1491	nf_unregister_queue_handler(net);
 
 
1492#ifdef CONFIG_PROC_FS
1493	remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1494#endif
1495}
1496
1497static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
1498{
1499	synchronize_rcu();
1500}
1501
1502static struct pernet_operations nfnl_queue_net_ops = {
1503	.init		= nfnl_queue_net_init,
1504	.exit		= nfnl_queue_net_exit,
1505	.exit_batch	= nfnl_queue_net_exit_batch,
1506	.id		= &nfnl_queue_net_id,
1507	.size		= sizeof(struct nfnl_queue_net),
1508};
1509
1510static int __init nfnetlink_queue_init(void)
1511{
1512	int status;
1513
1514	status = register_pernet_subsys(&nfnl_queue_net_ops);
1515	if (status < 0) {
1516		pr_err("nf_queue: failed to register pernet ops\n");
1517		goto out;
1518	}
1519
1520	netlink_register_notifier(&nfqnl_rtnl_notifier);
1521	status = nfnetlink_subsys_register(&nfqnl_subsys);
1522	if (status < 0) {
1523		pr_err("nf_queue: failed to create netlink socket\n");
1524		goto cleanup_netlink_notifier;
1525	}
1526
1527	status = register_netdevice_notifier(&nfqnl_dev_notifier);
1528	if (status < 0) {
1529		pr_err("nf_queue: failed to register netdevice notifier\n");
1530		goto cleanup_netlink_subsys;
1531	}
1532
 
 
1533	return status;
1534
1535cleanup_netlink_subsys:
1536	nfnetlink_subsys_unregister(&nfqnl_subsys);
1537cleanup_netlink_notifier:
1538	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1539	unregister_pernet_subsys(&nfnl_queue_net_ops);
1540out:
1541	return status;
1542}
1543
1544static void __exit nfnetlink_queue_fini(void)
1545{
 
1546	unregister_netdevice_notifier(&nfqnl_dev_notifier);
1547	nfnetlink_subsys_unregister(&nfqnl_subsys);
1548	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1549	unregister_pernet_subsys(&nfnl_queue_net_ops);
1550
1551	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1552}
1553
1554MODULE_DESCRIPTION("netfilter packet queue handler");
1555MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1556MODULE_LICENSE("GPL");
1557MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1558
1559module_init(nfnetlink_queue_init);
1560module_exit(nfnetlink_queue_fini);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This is a module which is used for queueing packets and communicating with
   4 * userspace via nfnetlink.
   5 *
   6 * (C) 2005 by Harald Welte <laforge@netfilter.org>
   7 * (C) 2007 by Patrick McHardy <kaber@trash.net>
   8 *
   9 * Based on the old ipv4-only ip_queue.c:
  10 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
  11 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
 
 
 
 
 
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/skbuff.h>
  18#include <linux/init.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/notifier.h>
  22#include <linux/netdevice.h>
  23#include <linux/netfilter.h>
  24#include <linux/proc_fs.h>
  25#include <linux/netfilter_ipv4.h>
  26#include <linux/netfilter_ipv6.h>
  27#include <linux/netfilter_bridge.h>
  28#include <linux/netfilter/nfnetlink.h>
  29#include <linux/netfilter/nfnetlink_queue.h>
  30#include <linux/netfilter/nf_conntrack_common.h>
  31#include <linux/list.h>
  32#include <net/sock.h>
  33#include <net/tcp_states.h>
  34#include <net/netfilter/nf_queue.h>
  35#include <net/netns/generic.h>
  36
  37#include <linux/atomic.h>
  38
  39#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  40#include "../bridge/br_private.h"
  41#endif
  42
  43#if IS_ENABLED(CONFIG_NF_CONNTRACK)
  44#include <net/netfilter/nf_conntrack.h>
  45#endif
  46
  47#define NFQNL_QMAX_DEFAULT 1024
  48
  49/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
  50 * includes the header length. Thus, the maximum packet length that we
  51 * support is 65531 bytes. We send truncated packets if the specified length
  52 * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
  53 * attribute to detect truncation.
  54 */
  55#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
  56
  57struct nfqnl_instance {
  58	struct hlist_node hlist;		/* global list of queues */
  59	struct rcu_head rcu;
  60
  61	u32 peer_portid;
  62	unsigned int queue_maxlen;
  63	unsigned int copy_range;
  64	unsigned int queue_dropped;
  65	unsigned int queue_user_dropped;
  66
  67
  68	u_int16_t queue_num;			/* number of this queue */
  69	u_int8_t copy_mode;
  70	u_int32_t flags;			/* Set using NFQA_CFG_FLAGS */
  71/*
  72 * Following fields are dirtied for each queued packet,
  73 * keep them in same cache line if possible.
  74 */
  75	spinlock_t	lock	____cacheline_aligned_in_smp;
  76	unsigned int	queue_total;
  77	unsigned int	id_sequence;		/* 'sequence' of pkt ids */
  78	struct list_head queue_list;		/* packets in queue */
  79};
  80
  81typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
  82
  83static unsigned int nfnl_queue_net_id __read_mostly;
  84
  85#define INSTANCE_BUCKETS	16
  86struct nfnl_queue_net {
  87	spinlock_t instances_lock;
  88	struct hlist_head instance_table[INSTANCE_BUCKETS];
  89};
  90
  91static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
  92{
  93	return net_generic(net, nfnl_queue_net_id);
  94}
  95
  96static inline u_int8_t instance_hashfn(u_int16_t queue_num)
  97{
  98	return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
  99}
 100
 101static struct nfqnl_instance *
 102instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
 103{
 104	struct hlist_head *head;
 105	struct nfqnl_instance *inst;
 106
 107	head = &q->instance_table[instance_hashfn(queue_num)];
 108	hlist_for_each_entry_rcu(inst, head, hlist) {
 109		if (inst->queue_num == queue_num)
 110			return inst;
 111	}
 112	return NULL;
 113}
 114
 115static struct nfqnl_instance *
 116instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
 117{
 118	struct nfqnl_instance *inst;
 119	unsigned int h;
 120	int err;
 121
 122	spin_lock(&q->instances_lock);
 123	if (instance_lookup(q, queue_num)) {
 124		err = -EEXIST;
 125		goto out_unlock;
 126	}
 127
 128	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
 129	if (!inst) {
 130		err = -ENOMEM;
 131		goto out_unlock;
 132	}
 133
 134	inst->queue_num = queue_num;
 135	inst->peer_portid = portid;
 136	inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
 137	inst->copy_range = NFQNL_MAX_COPY_RANGE;
 138	inst->copy_mode = NFQNL_COPY_NONE;
 139	spin_lock_init(&inst->lock);
 140	INIT_LIST_HEAD(&inst->queue_list);
 141
 142	if (!try_module_get(THIS_MODULE)) {
 143		err = -EAGAIN;
 144		goto out_free;
 145	}
 146
 147	h = instance_hashfn(queue_num);
 148	hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
 149
 150	spin_unlock(&q->instances_lock);
 151
 152	return inst;
 153
 154out_free:
 155	kfree(inst);
 156out_unlock:
 157	spin_unlock(&q->instances_lock);
 158	return ERR_PTR(err);
 159}
 160
 161static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
 162			unsigned long data);
 163
 164static void
 165instance_destroy_rcu(struct rcu_head *head)
 166{
 167	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
 168						   rcu);
 169
 170	nfqnl_flush(inst, NULL, 0);
 171	kfree(inst);
 172	module_put(THIS_MODULE);
 173}
 174
 175static void
 176__instance_destroy(struct nfqnl_instance *inst)
 177{
 178	hlist_del_rcu(&inst->hlist);
 179	call_rcu(&inst->rcu, instance_destroy_rcu);
 180}
 181
 182static void
 183instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
 184{
 185	spin_lock(&q->instances_lock);
 186	__instance_destroy(inst);
 187	spin_unlock(&q->instances_lock);
 188}
 189
 190static inline void
 191__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 192{
 193       list_add_tail(&entry->list, &queue->queue_list);
 194       queue->queue_total++;
 195}
 196
 197static void
 198__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 199{
 200	list_del(&entry->list);
 201	queue->queue_total--;
 202}
 203
 204static struct nf_queue_entry *
 205find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
 206{
 207	struct nf_queue_entry *entry = NULL, *i;
 208
 209	spin_lock_bh(&queue->lock);
 210
 211	list_for_each_entry(i, &queue->queue_list, list) {
 212		if (i->id == id) {
 213			entry = i;
 214			break;
 215		}
 216	}
 217
 218	if (entry)
 219		__dequeue_entry(queue, entry);
 220
 221	spin_unlock_bh(&queue->lock);
 222
 223	return entry;
 224}
 225
 226static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 227{
 228	const struct nf_ct_hook *ct_hook;
 229	int err;
 230
 231	if (verdict == NF_ACCEPT ||
 232	    verdict == NF_REPEAT ||
 233	    verdict == NF_STOP) {
 234		rcu_read_lock();
 235		ct_hook = rcu_dereference(nf_ct_hook);
 236		if (ct_hook) {
 237			err = ct_hook->update(entry->state.net, entry->skb);
 238			if (err < 0)
 239				verdict = NF_DROP;
 240		}
 241		rcu_read_unlock();
 242	}
 243	nf_reinject(entry, verdict);
 244}
 245
 246static void
 247nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
 248{
 249	struct nf_queue_entry *entry, *next;
 250
 251	spin_lock_bh(&queue->lock);
 252	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
 253		if (!cmpfn || cmpfn(entry, data)) {
 254			list_del(&entry->list);
 255			queue->queue_total--;
 256			nfqnl_reinject(entry, NF_DROP);
 257		}
 258	}
 259	spin_unlock_bh(&queue->lock);
 260}
 261
 262static int
 263nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
 264		      bool csum_verify)
 265{
 266	__u32 flags = 0;
 267
 268	if (packet->ip_summed == CHECKSUM_PARTIAL)
 269		flags = NFQA_SKB_CSUMNOTREADY;
 270	else if (csum_verify)
 271		flags = NFQA_SKB_CSUM_NOTVERIFIED;
 272
 273	if (skb_is_gso(packet))
 274		flags |= NFQA_SKB_GSO;
 275
 276	return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
 277}
 278
 279static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
 280{
 281	const struct cred *cred;
 282
 283	if (!sk_fullsock(sk))
 284		return 0;
 285
 286	read_lock_bh(&sk->sk_callback_lock);
 287	if (sk->sk_socket && sk->sk_socket->file) {
 288		cred = sk->sk_socket->file->f_cred;
 289		if (nla_put_be32(skb, NFQA_UID,
 290		    htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
 291			goto nla_put_failure;
 292		if (nla_put_be32(skb, NFQA_GID,
 293		    htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
 294			goto nla_put_failure;
 295	}
 296	read_unlock_bh(&sk->sk_callback_lock);
 297	return 0;
 298
 299nla_put_failure:
 300	read_unlock_bh(&sk->sk_callback_lock);
 301	return -1;
 302}
 303
 304static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
 305{
 306	u32 seclen = 0;
 307#if IS_ENABLED(CONFIG_NETWORK_SECMARK)
 308	if (!skb || !sk_fullsock(skb->sk))
 309		return 0;
 310
 311	read_lock_bh(&skb->sk->sk_callback_lock);
 312
 313	if (skb->secmark)
 314		security_secid_to_secctx(skb->secmark, secdata, &seclen);
 315
 316	read_unlock_bh(&skb->sk->sk_callback_lock);
 317#endif
 318	return seclen;
 319}
 320
 321static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
 322{
 323	struct sk_buff *entskb = entry->skb;
 324	u32 nlalen = 0;
 325
 326	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
 327		return 0;
 328
 329	if (skb_vlan_tag_present(entskb))
 330		nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
 331					 nla_total_size(sizeof(__be16)));
 332
 333	if (entskb->network_header > entskb->mac_header)
 334		nlalen += nla_total_size((entskb->network_header -
 335					  entskb->mac_header));
 336
 337	return nlalen;
 338}
 339
 340static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
 341{
 342	struct sk_buff *entskb = entry->skb;
 343
 344	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
 345		return 0;
 346
 347	if (skb_vlan_tag_present(entskb)) {
 348		struct nlattr *nest;
 349
 350		nest = nla_nest_start(skb, NFQA_VLAN);
 351		if (!nest)
 352			goto nla_put_failure;
 353
 354		if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
 355		    nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
 356			goto nla_put_failure;
 357
 358		nla_nest_end(skb, nest);
 359	}
 360
 361	if (entskb->mac_header < entskb->network_header) {
 362		int len = (int)(entskb->network_header - entskb->mac_header);
 363
 364		if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
 365			goto nla_put_failure;
 366	}
 367
 368	return 0;
 369
 370nla_put_failure:
 371	return -1;
 372}
 373
 374static struct sk_buff *
 375nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 376			   struct nf_queue_entry *entry,
 377			   __be32 **packet_id_ptr)
 378{
 379	size_t size;
 380	size_t data_len = 0, cap_len = 0;
 381	unsigned int hlen = 0;
 382	struct sk_buff *skb;
 383	struct nlattr *nla;
 384	struct nfqnl_msg_packet_hdr *pmsg;
 385	struct nlmsghdr *nlh;
 
 386	struct sk_buff *entskb = entry->skb;
 387	struct net_device *indev;
 388	struct net_device *outdev;
 389	struct nf_conn *ct = NULL;
 390	enum ip_conntrack_info ctinfo = 0;
 391	const struct nfnl_ct_hook *nfnl_ct;
 392	bool csum_verify;
 393	char *secdata = NULL;
 394	u32 seclen = 0;
 395	ktime_t tstamp;
 396
 397	size = nlmsg_total_size(sizeof(struct nfgenmsg))
 398		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
 399		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 400		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 401#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 402		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 403		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
 404#endif
 405		+ nla_total_size(sizeof(u_int32_t))	/* mark */
 406		+ nla_total_size(sizeof(u_int32_t))	/* priority */
 407		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
 408		+ nla_total_size(sizeof(u_int32_t))	/* skbinfo */
 409		+ nla_total_size(sizeof(u_int32_t));	/* cap_len */
 410
 411	tstamp = skb_tstamp_cond(entskb, false);
 412	if (tstamp)
 413		size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 414
 415	size += nfqnl_get_bridge_size(entry);
 416
 417	if (entry->state.hook <= NF_INET_FORWARD ||
 418	   (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
 419		csum_verify = !skb_csum_unnecessary(entskb);
 420	else
 421		csum_verify = false;
 422
 423	outdev = entry->state.out;
 424
 425	switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
 426	case NFQNL_COPY_META:
 427	case NFQNL_COPY_NONE:
 428		break;
 429
 430	case NFQNL_COPY_PACKET:
 431		if (!(queue->flags & NFQA_CFG_F_GSO) &&
 432		    entskb->ip_summed == CHECKSUM_PARTIAL &&
 433		    skb_checksum_help(entskb))
 434			return NULL;
 435
 436		data_len = READ_ONCE(queue->copy_range);
 437		if (data_len > entskb->len)
 438			data_len = entskb->len;
 439
 440		hlen = skb_zerocopy_headlen(entskb);
 441		hlen = min_t(unsigned int, hlen, data_len);
 442		size += sizeof(struct nlattr) + hlen;
 443		cap_len = entskb->len;
 444		break;
 445	}
 446
 447	nfnl_ct = rcu_dereference(nfnl_ct_hook);
 448
 449#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 450	if (queue->flags & NFQA_CFG_F_CONNTRACK) {
 451		if (nfnl_ct != NULL) {
 452			ct = nf_ct_get(entskb, &ctinfo);
 453			if (ct != NULL)
 454				size += nfnl_ct->build_size(ct);
 455		}
 456	}
 457#endif
 458
 459	if (queue->flags & NFQA_CFG_F_UID_GID) {
 460		size += (nla_total_size(sizeof(u_int32_t))	/* uid */
 461			+ nla_total_size(sizeof(u_int32_t)));	/* gid */
 462	}
 463
 464	if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
 465		seclen = nfqnl_get_sk_secctx(entskb, &secdata);
 466		if (seclen)
 467			size += nla_total_size(seclen);
 468	}
 469
 470	skb = alloc_skb(size, GFP_ATOMIC);
 471	if (!skb) {
 472		skb_tx_error(entskb);
 473		goto nlmsg_failure;
 474	}
 475
 476	nlh = nfnl_msg_put(skb, 0, 0,
 477			   nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET),
 478			   0, entry->state.pf, NFNETLINK_V0,
 479			   htons(queue->queue_num));
 480	if (!nlh) {
 481		skb_tx_error(entskb);
 482		kfree_skb(skb);
 483		goto nlmsg_failure;
 484	}
 
 
 
 
 485
 486	nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
 487	pmsg = nla_data(nla);
 488	pmsg->hw_protocol	= entskb->protocol;
 489	pmsg->hook		= entry->state.hook;
 490	*packet_id_ptr		= &pmsg->packet_id;
 491
 492	indev = entry->state.in;
 493	if (indev) {
 494#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 495		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
 496			goto nla_put_failure;
 497#else
 498		if (entry->state.pf == PF_BRIDGE) {
 499			/* Case 1: indev is physical input device, we need to
 500			 * look for bridge group (when called from
 501			 * netfilter_bridge) */
 502			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
 503					 htonl(indev->ifindex)) ||
 504			/* this is the bridge group "brX" */
 505			/* rcu_read_lock()ed by __nf_queue */
 506			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
 507					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
 508				goto nla_put_failure;
 509		} else {
 510			int physinif;
 511
 512			/* Case 2: indev is bridge group, we need to look for
 513			 * physical device (when called from ipv4) */
 514			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
 515					 htonl(indev->ifindex)))
 516				goto nla_put_failure;
 517
 518			physinif = nf_bridge_get_physinif(entskb);
 519			if (physinif &&
 520			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
 521					 htonl(physinif)))
 522				goto nla_put_failure;
 523		}
 524#endif
 525	}
 526
 527	if (outdev) {
 528#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 529		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
 530			goto nla_put_failure;
 531#else
 532		if (entry->state.pf == PF_BRIDGE) {
 533			/* Case 1: outdev is physical output device, we need to
 534			 * look for bridge group (when called from
 535			 * netfilter_bridge) */
 536			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 537					 htonl(outdev->ifindex)) ||
 538			/* this is the bridge group "brX" */
 539			/* rcu_read_lock()ed by __nf_queue */
 540			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
 541					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
 542				goto nla_put_failure;
 543		} else {
 544			int physoutif;
 545
 546			/* Case 2: outdev is bridge group, we need to look for
 547			 * physical output device (when called from ipv4) */
 548			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
 549					 htonl(outdev->ifindex)))
 550				goto nla_put_failure;
 551
 552			physoutif = nf_bridge_get_physoutif(entskb);
 553			if (physoutif &&
 554			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 555					 htonl(physoutif)))
 556				goto nla_put_failure;
 557		}
 558#endif
 559	}
 560
 561	if (entskb->mark &&
 562	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
 563		goto nla_put_failure;
 564
 565	if (entskb->priority &&
 566	    nla_put_be32(skb, NFQA_PRIORITY, htonl(entskb->priority)))
 567		goto nla_put_failure;
 568
 569	if (indev && entskb->dev &&
 570	    skb_mac_header_was_set(entskb) &&
 571	    skb_mac_header_len(entskb) != 0) {
 572		struct nfqnl_msg_packet_hw phw;
 573		int len;
 574
 575		memset(&phw, 0, sizeof(phw));
 576		len = dev_parse_header(entskb, phw.hw_addr);
 577		if (len) {
 578			phw.hw_addrlen = htons(len);
 579			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
 580				goto nla_put_failure;
 581		}
 582	}
 583
 584	if (nfqnl_put_bridge(entry, skb) < 0)
 585		goto nla_put_failure;
 586
 587	if (entry->state.hook <= NF_INET_FORWARD && tstamp) {
 588		struct nfqnl_msg_packet_timestamp ts;
 589		struct timespec64 kts = ktime_to_timespec64(tstamp);
 590
 591		ts.sec = cpu_to_be64(kts.tv_sec);
 592		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
 593
 594		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
 595			goto nla_put_failure;
 596	}
 597
 598	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
 599	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
 600		goto nla_put_failure;
 601
 602	if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
 603		goto nla_put_failure;
 604
 605	if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
 606		goto nla_put_failure;
 607
 608	if (cap_len > data_len &&
 609	    nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
 610		goto nla_put_failure;
 611
 612	if (nfqnl_put_packet_info(skb, entskb, csum_verify))
 613		goto nla_put_failure;
 614
 615	if (data_len) {
 616		struct nlattr *nla;
 617
 618		if (skb_tailroom(skb) < sizeof(*nla) + hlen)
 619			goto nla_put_failure;
 620
 621		nla = skb_put(skb, sizeof(*nla));
 622		nla->nla_type = NFQA_PAYLOAD;
 623		nla->nla_len = nla_attr_size(data_len);
 624
 625		if (skb_zerocopy(skb, entskb, data_len, hlen))
 626			goto nla_put_failure;
 627	}
 628
 629	nlh->nlmsg_len = skb->len;
 630	if (seclen)
 631		security_release_secctx(secdata, seclen);
 632	return skb;
 633
 634nla_put_failure:
 635	skb_tx_error(entskb);
 636	kfree_skb(skb);
 637	net_err_ratelimited("nf_queue: error creating packet message\n");
 638nlmsg_failure:
 639	if (seclen)
 640		security_release_secctx(secdata, seclen);
 641	return NULL;
 642}
 643
 644static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
 645{
 646#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 647	static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
 648	const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
 649
 650	if (ct && ((ct->status & flags) == IPS_DYING))
 651		return true;
 652#endif
 653	return false;
 654}
 655
 656static int
 657__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
 658			struct nf_queue_entry *entry)
 659{
 660	struct sk_buff *nskb;
 661	int err = -ENOBUFS;
 662	__be32 *packet_id_ptr;
 663	int failopen = 0;
 664
 665	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
 666	if (nskb == NULL) {
 667		err = -ENOMEM;
 668		goto err_out;
 669	}
 670	spin_lock_bh(&queue->lock);
 671
 672	if (nf_ct_drop_unconfirmed(entry))
 673		goto err_out_free_nskb;
 674
 675	if (queue->queue_total >= queue->queue_maxlen) {
 676		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
 677			failopen = 1;
 678			err = 0;
 679		} else {
 680			queue->queue_dropped++;
 681			net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
 682					     queue->queue_total);
 683		}
 684		goto err_out_free_nskb;
 685	}
 686	entry->id = ++queue->id_sequence;
 687	*packet_id_ptr = htonl(entry->id);
 688
 689	/* nfnetlink_unicast will either free the nskb or add it to a socket */
 690	err = nfnetlink_unicast(nskb, net, queue->peer_portid);
 691	if (err < 0) {
 692		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
 693			failopen = 1;
 694			err = 0;
 695		} else {
 696			queue->queue_user_dropped++;
 697		}
 698		goto err_out_unlock;
 699	}
 700
 701	__enqueue_entry(queue, entry);
 702
 703	spin_unlock_bh(&queue->lock);
 704	return 0;
 705
 706err_out_free_nskb:
 707	kfree_skb(nskb);
 708err_out_unlock:
 709	spin_unlock_bh(&queue->lock);
 710	if (failopen)
 711		nfqnl_reinject(entry, NF_ACCEPT);
 712err_out:
 713	return err;
 714}
 715
 716static struct nf_queue_entry *
 717nf_queue_entry_dup(struct nf_queue_entry *e)
 718{
 719	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
 720
 721	if (!entry)
 722		return NULL;
 723
 724	if (nf_queue_entry_get_refs(entry))
 725		return entry;
 726
 727	kfree(entry);
 728	return NULL;
 729}
 730
 731#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 732/* When called from bridge netfilter, skb->data must point to MAC header
 733 * before calling skb_gso_segment(). Else, original MAC header is lost
 734 * and segmented skbs will be sent to wrong destination.
 735 */
 736static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
 737{
 738	if (nf_bridge_info_get(skb))
 739		__skb_push(skb, skb->network_header - skb->mac_header);
 740}
 741
 742static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
 743{
 744	if (nf_bridge_info_get(skb))
 745		__skb_pull(skb, skb->network_header - skb->mac_header);
 746}
 747#else
 748#define nf_bridge_adjust_skb_data(s) do {} while (0)
 749#define nf_bridge_adjust_segmented_data(s) do {} while (0)
 750#endif
 751
 
 
 
 
 
 
 752static int
 753__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
 754			   struct sk_buff *skb, struct nf_queue_entry *entry)
 755{
 756	int ret = -ENOMEM;
 757	struct nf_queue_entry *entry_seg;
 758
 759	nf_bridge_adjust_segmented_data(skb);
 760
 761	if (skb->next == NULL) { /* last packet, no need to copy entry */
 762		struct sk_buff *gso_skb = entry->skb;
 763		entry->skb = skb;
 764		ret = __nfqnl_enqueue_packet(net, queue, entry);
 765		if (ret)
 766			entry->skb = gso_skb;
 767		return ret;
 768	}
 769
 770	skb_mark_not_on_list(skb);
 771
 772	entry_seg = nf_queue_entry_dup(entry);
 773	if (entry_seg) {
 774		entry_seg->skb = skb;
 775		ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
 776		if (ret)
 777			nf_queue_entry_free(entry_seg);
 778	}
 779	return ret;
 780}
 781
 782static int
 783nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 784{
 785	unsigned int queued;
 786	struct nfqnl_instance *queue;
 787	struct sk_buff *skb, *segs, *nskb;
 788	int err = -ENOBUFS;
 789	struct net *net = entry->state.net;
 790	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 791
 792	/* rcu_read_lock()ed by nf_hook_thresh */
 793	queue = instance_lookup(q, queuenum);
 794	if (!queue)
 795		return -ESRCH;
 796
 797	if (queue->copy_mode == NFQNL_COPY_NONE)
 798		return -EINVAL;
 799
 800	skb = entry->skb;
 801
 802	switch (entry->state.pf) {
 803	case NFPROTO_IPV4:
 804		skb->protocol = htons(ETH_P_IP);
 805		break;
 806	case NFPROTO_IPV6:
 807		skb->protocol = htons(ETH_P_IPV6);
 808		break;
 809	}
 810
 811	if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
 812		return __nfqnl_enqueue_packet(net, queue, entry);
 813
 814	nf_bridge_adjust_skb_data(skb);
 815	segs = skb_gso_segment(skb, 0);
 816	/* Does not use PTR_ERR to limit the number of error codes that can be
 817	 * returned by nf_queue.  For instance, callers rely on -ESRCH to
 818	 * mean 'ignore this hook'.
 819	 */
 820	if (IS_ERR_OR_NULL(segs))
 821		goto out_err;
 822	queued = 0;
 823	err = 0;
 824	skb_list_walk_safe(segs, segs, nskb) {
 
 825		if (err == 0)
 826			err = __nfqnl_enqueue_packet_gso(net, queue,
 827							segs, entry);
 828		if (err == 0)
 829			queued++;
 830		else
 831			kfree_skb(segs);
 832	}
 
 833
 834	if (queued) {
 835		if (err) /* some segments are already queued */
 836			nf_queue_entry_free(entry);
 837		kfree_skb(skb);
 838		return 0;
 839	}
 840 out_err:
 841	nf_bridge_adjust_segmented_data(skb);
 842	return err;
 843}
 844
 845static int
 846nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
 847{
 848	struct sk_buff *nskb;
 849
 850	if (diff < 0) {
 851		unsigned int min_len = skb_transport_offset(e->skb);
 852
 853		if (data_len < min_len)
 854			return -EINVAL;
 855
 856		if (pskb_trim(e->skb, data_len))
 857			return -ENOMEM;
 858	} else if (diff > 0) {
 859		if (data_len > 0xFFFF)
 860			return -EINVAL;
 861		if (diff > skb_tailroom(e->skb)) {
 862			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
 863					       diff, GFP_ATOMIC);
 864			if (!nskb)
 
 
 865				return -ENOMEM;
 
 866			kfree_skb(e->skb);
 867			e->skb = nskb;
 868		}
 869		skb_put(e->skb, diff);
 870	}
 871	if (skb_ensure_writable(e->skb, data_len))
 872		return -ENOMEM;
 873	skb_copy_to_linear_data(e->skb, data, data_len);
 874	e->skb->ip_summed = CHECKSUM_NONE;
 875	return 0;
 876}
 877
 878static int
 879nfqnl_set_mode(struct nfqnl_instance *queue,
 880	       unsigned char mode, unsigned int range)
 881{
 882	int status = 0;
 883
 884	spin_lock_bh(&queue->lock);
 885	switch (mode) {
 886	case NFQNL_COPY_NONE:
 887	case NFQNL_COPY_META:
 888		queue->copy_mode = mode;
 889		queue->copy_range = 0;
 890		break;
 891
 892	case NFQNL_COPY_PACKET:
 893		queue->copy_mode = mode;
 894		if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
 895			queue->copy_range = NFQNL_MAX_COPY_RANGE;
 896		else
 897			queue->copy_range = range;
 898		break;
 899
 900	default:
 901		status = -EINVAL;
 902
 903	}
 904	spin_unlock_bh(&queue->lock);
 905
 906	return status;
 907}
 908
 909static int
 910dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
 911{
 912#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 913	int physinif, physoutif;
 914
 915	physinif = nf_bridge_get_physinif(entry->skb);
 916	physoutif = nf_bridge_get_physoutif(entry->skb);
 917
 918	if (physinif == ifindex || physoutif == ifindex)
 919		return 1;
 920#endif
 921	if (entry->state.in)
 922		if (entry->state.in->ifindex == ifindex)
 923			return 1;
 924	if (entry->state.out)
 925		if (entry->state.out->ifindex == ifindex)
 926			return 1;
 
 
 
 927
 
 
 
 
 
 
 
 928	return 0;
 929}
 930
 931/* drop all packets with either indev or outdev == ifindex from all queue
 932 * instances */
 933static void
 934nfqnl_dev_drop(struct net *net, int ifindex)
 935{
 936	int i;
 937	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 938
 939	rcu_read_lock();
 940
 941	for (i = 0; i < INSTANCE_BUCKETS; i++) {
 942		struct nfqnl_instance *inst;
 943		struct hlist_head *head = &q->instance_table[i];
 944
 945		hlist_for_each_entry_rcu(inst, head, hlist)
 946			nfqnl_flush(inst, dev_cmp, ifindex);
 947	}
 948
 949	rcu_read_unlock();
 950}
 951
 952static int
 953nfqnl_rcv_dev_event(struct notifier_block *this,
 954		    unsigned long event, void *ptr)
 955{
 956	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 957
 958	/* Drop any packets associated with the downed device */
 959	if (event == NETDEV_DOWN)
 960		nfqnl_dev_drop(dev_net(dev), dev->ifindex);
 961	return NOTIFY_DONE;
 962}
 963
 964static struct notifier_block nfqnl_dev_notifier = {
 965	.notifier_call	= nfqnl_rcv_dev_event,
 966};
 967
 968static void nfqnl_nf_hook_drop(struct net *net)
 
 
 
 
 
 
 
 969{
 970	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 971	int i;
 972
 973	/* This function is also called on net namespace error unwind,
 974	 * when pernet_ops->init() failed and ->exit() functions of the
 975	 * previous pernet_ops gets called.
 976	 *
 977	 * This may result in a call to nfqnl_nf_hook_drop() before
 978	 * struct nfnl_queue_net was allocated.
 979	 */
 980	if (!q)
 981		return;
 982
 983	for (i = 0; i < INSTANCE_BUCKETS; i++) {
 984		struct nfqnl_instance *inst;
 985		struct hlist_head *head = &q->instance_table[i];
 986
 987		hlist_for_each_entry_rcu(inst, head, hlist)
 988			nfqnl_flush(inst, NULL, 0);
 989	}
 
 990}
 991
 992static int
 993nfqnl_rcv_nl_event(struct notifier_block *this,
 994		   unsigned long event, void *ptr)
 995{
 996	struct netlink_notify *n = ptr;
 997	struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
 998
 999	if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
1000		int i;
1001
1002		/* destroy all instances for this portid */
1003		spin_lock(&q->instances_lock);
1004		for (i = 0; i < INSTANCE_BUCKETS; i++) {
1005			struct hlist_node *t2;
1006			struct nfqnl_instance *inst;
1007			struct hlist_head *head = &q->instance_table[i];
1008
1009			hlist_for_each_entry_safe(inst, t2, head, hlist) {
1010				if (n->portid == inst->peer_portid)
1011					__instance_destroy(inst);
1012			}
1013		}
1014		spin_unlock(&q->instances_lock);
1015	}
1016	return NOTIFY_DONE;
1017}
1018
1019static struct notifier_block nfqnl_rtnl_notifier = {
1020	.notifier_call	= nfqnl_rcv_nl_event,
1021};
1022
1023static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
1024	[NFQA_VLAN_TCI]		= { .type = NLA_U16},
1025	[NFQA_VLAN_PROTO]	= { .type = NLA_U16},
1026};
1027
1028static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
1029	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
1030	[NFQA_MARK]		= { .type = NLA_U32 },
1031	[NFQA_PAYLOAD]		= { .type = NLA_UNSPEC },
1032	[NFQA_CT]		= { .type = NLA_UNSPEC },
1033	[NFQA_EXP]		= { .type = NLA_UNSPEC },
1034	[NFQA_VLAN]		= { .type = NLA_NESTED },
1035	[NFQA_PRIORITY]		= { .type = NLA_U32 },
1036};
1037
1038static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
1039	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
1040	[NFQA_MARK]		= { .type = NLA_U32 },
1041	[NFQA_PRIORITY]		= { .type = NLA_U32 },
1042};
1043
1044static struct nfqnl_instance *
1045verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
1046{
1047	struct nfqnl_instance *queue;
1048
1049	queue = instance_lookup(q, queue_num);
1050	if (!queue)
1051		return ERR_PTR(-ENODEV);
1052
1053	if (queue->peer_portid != nlportid)
1054		return ERR_PTR(-EPERM);
1055
1056	return queue;
1057}
1058
1059static struct nfqnl_msg_verdict_hdr*
1060verdicthdr_get(const struct nlattr * const nfqa[])
1061{
1062	struct nfqnl_msg_verdict_hdr *vhdr;
1063	unsigned int verdict;
1064
1065	if (!nfqa[NFQA_VERDICT_HDR])
1066		return NULL;
1067
1068	vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
1069	verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
1070	if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
1071		return NULL;
1072	return vhdr;
1073}
1074
1075static int nfq_id_after(unsigned int id, unsigned int max)
1076{
1077	return (int)(id - max) > 0;
1078}
1079
1080static int nfqnl_recv_verdict_batch(struct sk_buff *skb,
1081				    const struct nfnl_info *info,
1082				    const struct nlattr * const nfqa[])
 
1083{
1084	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1085	u16 queue_num = ntohs(info->nfmsg->res_id);
1086	struct nf_queue_entry *entry, *tmp;
 
1087	struct nfqnl_msg_verdict_hdr *vhdr;
1088	struct nfqnl_instance *queue;
1089	unsigned int verdict, maxid;
1090	LIST_HEAD(batch_list);
 
 
1091
1092	queue = verdict_instance_lookup(q, queue_num,
1093					NETLINK_CB(skb).portid);
1094	if (IS_ERR(queue))
1095		return PTR_ERR(queue);
1096
1097	vhdr = verdicthdr_get(nfqa);
1098	if (!vhdr)
1099		return -EINVAL;
1100
1101	verdict = ntohl(vhdr->verdict);
1102	maxid = ntohl(vhdr->id);
1103
1104	spin_lock_bh(&queue->lock);
1105
1106	list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
1107		if (nfq_id_after(entry->id, maxid))
1108			break;
1109		__dequeue_entry(queue, entry);
1110		list_add_tail(&entry->list, &batch_list);
1111	}
1112
1113	spin_unlock_bh(&queue->lock);
1114
1115	if (list_empty(&batch_list))
1116		return -ENOENT;
1117
1118	list_for_each_entry_safe(entry, tmp, &batch_list, list) {
1119		if (nfqa[NFQA_MARK])
1120			entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1121
1122		if (nfqa[NFQA_PRIORITY])
1123			entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY]));
1124
1125		nfqnl_reinject(entry, verdict);
1126	}
1127	return 0;
1128}
1129
1130static struct nf_conn *nfqnl_ct_parse(const struct nfnl_ct_hook *nfnl_ct,
1131				      const struct nlmsghdr *nlh,
1132				      const struct nlattr * const nfqa[],
1133				      struct nf_queue_entry *entry,
1134				      enum ip_conntrack_info *ctinfo)
1135{
1136#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1137	struct nf_conn *ct;
1138
1139	ct = nf_ct_get(entry->skb, ctinfo);
1140	if (ct == NULL)
1141		return NULL;
1142
1143	if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
1144		return NULL;
1145
1146	if (nfqa[NFQA_EXP])
1147		nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
1148				      NETLINK_CB(entry->skb).portid,
1149				      nlmsg_report(nlh));
1150	return ct;
1151#else
1152	return NULL;
1153#endif
1154}
1155
1156static int nfqa_parse_bridge(struct nf_queue_entry *entry,
1157			     const struct nlattr * const nfqa[])
1158{
1159	if (nfqa[NFQA_VLAN]) {
1160		struct nlattr *tb[NFQA_VLAN_MAX + 1];
1161		int err;
1162
1163		err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX,
1164						  nfqa[NFQA_VLAN],
1165						  nfqa_vlan_policy, NULL);
1166		if (err < 0)
1167			return err;
1168
1169		if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
1170			return -EINVAL;
1171
1172		__vlan_hwaccel_put_tag(entry->skb,
1173			nla_get_be16(tb[NFQA_VLAN_PROTO]),
1174			ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
1175	}
1176
1177	if (nfqa[NFQA_L2HDR]) {
1178		int mac_header_len = entry->skb->network_header -
1179			entry->skb->mac_header;
1180
1181		if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
1182			return -EINVAL;
1183		else if (mac_header_len > 0)
1184			memcpy(skb_mac_header(entry->skb),
1185			       nla_data(nfqa[NFQA_L2HDR]),
1186			       mac_header_len);
1187	}
1188
1189	return 0;
1190}
1191
1192static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info,
 
 
1193			      const struct nlattr * const nfqa[])
1194{
1195	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1196	u_int16_t queue_num = ntohs(info->nfmsg->res_id);
1197	const struct nfnl_ct_hook *nfnl_ct;
1198	struct nfqnl_msg_verdict_hdr *vhdr;
1199	enum ip_conntrack_info ctinfo;
1200	struct nfqnl_instance *queue;
 
1201	struct nf_queue_entry *entry;
 
 
1202	struct nf_conn *ct = NULL;
1203	unsigned int verdict;
1204	int err;
1205
1206	queue = verdict_instance_lookup(q, queue_num,
1207					NETLINK_CB(skb).portid);
1208	if (IS_ERR(queue))
1209		return PTR_ERR(queue);
1210
1211	vhdr = verdicthdr_get(nfqa);
1212	if (!vhdr)
1213		return -EINVAL;
1214
1215	verdict = ntohl(vhdr->verdict);
1216
1217	entry = find_dequeue_entry(queue, ntohl(vhdr->id));
1218	if (entry == NULL)
1219		return -ENOENT;
1220
1221	/* rcu lock already held from nfnl->call_rcu. */
1222	nfnl_ct = rcu_dereference(nfnl_ct_hook);
1223
1224	if (nfqa[NFQA_CT]) {
1225		if (nfnl_ct != NULL)
1226			ct = nfqnl_ct_parse(nfnl_ct, info->nlh, nfqa, entry,
1227					    &ctinfo);
1228	}
1229
1230	if (entry->state.pf == PF_BRIDGE) {
1231		err = nfqa_parse_bridge(entry, nfqa);
1232		if (err < 0)
1233			return err;
1234	}
1235
1236	if (nfqa[NFQA_PAYLOAD]) {
1237		u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
1238		int diff = payload_len - entry->skb->len;
1239
1240		if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
1241				 payload_len, entry, diff) < 0)
1242			verdict = NF_DROP;
1243
1244		if (ct && diff)
1245			nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
1246	}
1247
1248	if (nfqa[NFQA_MARK])
1249		entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1250
1251	if (nfqa[NFQA_PRIORITY])
1252		entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY]));
1253
1254	nfqnl_reinject(entry, verdict);
1255	return 0;
1256}
1257
1258static int nfqnl_recv_unsupp(struct sk_buff *skb, const struct nfnl_info *info,
1259			     const struct nlattr * const cda[])
 
1260{
1261	return -ENOTSUPP;
1262}
1263
1264static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
1265	[NFQA_CFG_CMD]		= { .len = sizeof(struct nfqnl_msg_config_cmd) },
1266	[NFQA_CFG_PARAMS]	= { .len = sizeof(struct nfqnl_msg_config_params) },
1267	[NFQA_CFG_QUEUE_MAXLEN]	= { .type = NLA_U32 },
1268	[NFQA_CFG_MASK]		= { .type = NLA_U32 },
1269	[NFQA_CFG_FLAGS]	= { .type = NLA_U32 },
1270};
1271
1272static const struct nf_queue_handler nfqh = {
1273	.outfn		= nfqnl_enqueue_packet,
1274	.nf_hook_drop	= nfqnl_nf_hook_drop,
1275};
1276
1277static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
 
1278			     const struct nlattr * const nfqa[])
1279{
1280	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1281	u_int16_t queue_num = ntohs(info->nfmsg->res_id);
 
1282	struct nfqnl_msg_config_cmd *cmd = NULL;
1283	struct nfqnl_instance *queue;
1284	__u32 flags = 0, mask = 0;
1285	int ret = 0;
1286
1287	if (nfqa[NFQA_CFG_CMD]) {
1288		cmd = nla_data(nfqa[NFQA_CFG_CMD]);
1289
1290		/* Obsolete commands without queue context */
1291		switch (cmd->command) {
1292		case NFQNL_CFG_CMD_PF_BIND: return 0;
1293		case NFQNL_CFG_CMD_PF_UNBIND: return 0;
1294		}
1295	}
1296
1297	/* Check if we support these flags in first place, dependencies should
1298	 * be there too not to break atomicity.
1299	 */
1300	if (nfqa[NFQA_CFG_FLAGS]) {
1301		if (!nfqa[NFQA_CFG_MASK]) {
1302			/* A mask is needed to specify which flags are being
1303			 * changed.
1304			 */
1305			return -EINVAL;
1306		}
1307
1308		flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
1309		mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
1310
1311		if (flags >= NFQA_CFG_F_MAX)
1312			return -EOPNOTSUPP;
1313
1314#if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
1315		if (flags & mask & NFQA_CFG_F_SECCTX)
1316			return -EOPNOTSUPP;
1317#endif
1318		if ((flags & mask & NFQA_CFG_F_CONNTRACK) &&
1319		    !rcu_access_pointer(nfnl_ct_hook)) {
1320#ifdef CONFIG_MODULES
1321			nfnl_unlock(NFNL_SUBSYS_QUEUE);
1322			request_module("ip_conntrack_netlink");
1323			nfnl_lock(NFNL_SUBSYS_QUEUE);
1324			if (rcu_access_pointer(nfnl_ct_hook))
1325				return -EAGAIN;
1326#endif
1327			return -EOPNOTSUPP;
1328		}
1329	}
1330
1331	rcu_read_lock();
1332	queue = instance_lookup(q, queue_num);
1333	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1334		ret = -EPERM;
1335		goto err_out_unlock;
1336	}
1337
1338	if (cmd != NULL) {
1339		switch (cmd->command) {
1340		case NFQNL_CFG_CMD_BIND:
1341			if (queue) {
1342				ret = -EBUSY;
1343				goto err_out_unlock;
1344			}
1345			queue = instance_create(q, queue_num,
1346						NETLINK_CB(skb).portid);
1347			if (IS_ERR(queue)) {
1348				ret = PTR_ERR(queue);
1349				goto err_out_unlock;
1350			}
1351			break;
1352		case NFQNL_CFG_CMD_UNBIND:
1353			if (!queue) {
1354				ret = -ENODEV;
1355				goto err_out_unlock;
1356			}
1357			instance_destroy(q, queue);
1358			goto err_out_unlock;
1359		case NFQNL_CFG_CMD_PF_BIND:
1360		case NFQNL_CFG_CMD_PF_UNBIND:
1361			break;
1362		default:
1363			ret = -ENOTSUPP;
1364			goto err_out_unlock;
1365		}
1366	}
1367
1368	if (!queue) {
1369		ret = -ENODEV;
1370		goto err_out_unlock;
1371	}
1372
1373	if (nfqa[NFQA_CFG_PARAMS]) {
1374		struct nfqnl_msg_config_params *params =
1375			nla_data(nfqa[NFQA_CFG_PARAMS]);
1376
1377		nfqnl_set_mode(queue, params->copy_mode,
1378				ntohl(params->copy_range));
1379	}
1380
1381	if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
1382		__be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
1383
1384		spin_lock_bh(&queue->lock);
1385		queue->queue_maxlen = ntohl(*queue_maxlen);
1386		spin_unlock_bh(&queue->lock);
1387	}
1388
1389	if (nfqa[NFQA_CFG_FLAGS]) {
1390		spin_lock_bh(&queue->lock);
1391		queue->flags &= ~mask;
1392		queue->flags |= flags & mask;
1393		spin_unlock_bh(&queue->lock);
1394	}
1395
1396err_out_unlock:
1397	rcu_read_unlock();
1398	return ret;
1399}
1400
1401static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
1402	[NFQNL_MSG_PACKET]	= {
1403		.call		= nfqnl_recv_unsupp,
1404		.type		= NFNL_CB_RCU,
1405		.attr_count	= NFQA_MAX,
1406	},
1407	[NFQNL_MSG_VERDICT]	= {
1408		.call		= nfqnl_recv_verdict,
1409		.type		= NFNL_CB_RCU,
1410		.attr_count	= NFQA_MAX,
1411		.policy		= nfqa_verdict_policy
1412	},
1413	[NFQNL_MSG_CONFIG]	= {
1414		.call		= nfqnl_recv_config,
1415		.type		= NFNL_CB_MUTEX,
1416		.attr_count	= NFQA_CFG_MAX,
1417		.policy		= nfqa_cfg_policy
1418	},
1419	[NFQNL_MSG_VERDICT_BATCH] = {
1420		.call		= nfqnl_recv_verdict_batch,
1421		.type		= NFNL_CB_RCU,
1422		.attr_count	= NFQA_MAX,
1423		.policy		= nfqa_verdict_batch_policy
1424	},
1425};
1426
1427static const struct nfnetlink_subsystem nfqnl_subsys = {
1428	.name		= "nf_queue",
1429	.subsys_id	= NFNL_SUBSYS_QUEUE,
1430	.cb_count	= NFQNL_MSG_MAX,
1431	.cb		= nfqnl_cb,
1432};
1433
1434#ifdef CONFIG_PROC_FS
1435struct iter_state {
1436	struct seq_net_private p;
1437	unsigned int bucket;
1438};
1439
1440static struct hlist_node *get_first(struct seq_file *seq)
1441{
1442	struct iter_state *st = seq->private;
1443	struct net *net;
1444	struct nfnl_queue_net *q;
1445
1446	if (!st)
1447		return NULL;
1448
1449	net = seq_file_net(seq);
1450	q = nfnl_queue_pernet(net);
1451	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1452		if (!hlist_empty(&q->instance_table[st->bucket]))
1453			return q->instance_table[st->bucket].first;
1454	}
1455	return NULL;
1456}
1457
1458static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1459{
1460	struct iter_state *st = seq->private;
1461	struct net *net = seq_file_net(seq);
1462
1463	h = h->next;
1464	while (!h) {
1465		struct nfnl_queue_net *q;
1466
1467		if (++st->bucket >= INSTANCE_BUCKETS)
1468			return NULL;
1469
1470		q = nfnl_queue_pernet(net);
1471		h = q->instance_table[st->bucket].first;
1472	}
1473	return h;
1474}
1475
1476static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1477{
1478	struct hlist_node *head;
1479	head = get_first(seq);
1480
1481	if (head)
1482		while (pos && (head = get_next(seq, head)))
1483			pos--;
1484	return pos ? NULL : head;
1485}
1486
1487static void *seq_start(struct seq_file *s, loff_t *pos)
1488	__acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1489{
1490	spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1491	return get_idx(s, *pos);
1492}
1493
1494static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1495{
1496	(*pos)++;
1497	return get_next(s, v);
1498}
1499
1500static void seq_stop(struct seq_file *s, void *v)
1501	__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1502{
1503	spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1504}
1505
1506static int seq_show(struct seq_file *s, void *v)
1507{
1508	const struct nfqnl_instance *inst = v;
1509
1510	seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
1511		   inst->queue_num,
1512		   inst->peer_portid, inst->queue_total,
1513		   inst->copy_mode, inst->copy_range,
1514		   inst->queue_dropped, inst->queue_user_dropped,
1515		   inst->id_sequence, 1);
1516	return 0;
1517}
1518
1519static const struct seq_operations nfqnl_seq_ops = {
1520	.start	= seq_start,
1521	.next	= seq_next,
1522	.stop	= seq_stop,
1523	.show	= seq_show,
1524};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1525#endif /* PROC_FS */
1526
1527static int __net_init nfnl_queue_net_init(struct net *net)
1528{
1529	unsigned int i;
1530	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1531
1532	for (i = 0; i < INSTANCE_BUCKETS; i++)
1533		INIT_HLIST_HEAD(&q->instance_table[i]);
1534
1535	spin_lock_init(&q->instances_lock);
1536
1537#ifdef CONFIG_PROC_FS
1538	if (!proc_create_net("nfnetlink_queue", 0440, net->nf.proc_netfilter,
1539			&nfqnl_seq_ops, sizeof(struct iter_state)))
1540		return -ENOMEM;
1541#endif
 
1542	return 0;
1543}
1544
1545static void __net_exit nfnl_queue_net_exit(struct net *net)
1546{
1547	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1548	unsigned int i;
1549
1550#ifdef CONFIG_PROC_FS
1551	remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1552#endif
1553	for (i = 0; i < INSTANCE_BUCKETS; i++)
1554		WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
 
 
 
1555}
1556
1557static struct pernet_operations nfnl_queue_net_ops = {
1558	.init		= nfnl_queue_net_init,
1559	.exit		= nfnl_queue_net_exit,
 
1560	.id		= &nfnl_queue_net_id,
1561	.size		= sizeof(struct nfnl_queue_net),
1562};
1563
1564static int __init nfnetlink_queue_init(void)
1565{
1566	int status;
1567
1568	status = register_pernet_subsys(&nfnl_queue_net_ops);
1569	if (status < 0) {
1570		pr_err("failed to register pernet ops\n");
1571		goto out;
1572	}
1573
1574	netlink_register_notifier(&nfqnl_rtnl_notifier);
1575	status = nfnetlink_subsys_register(&nfqnl_subsys);
1576	if (status < 0) {
1577		pr_err("failed to create netlink socket\n");
1578		goto cleanup_netlink_notifier;
1579	}
1580
1581	status = register_netdevice_notifier(&nfqnl_dev_notifier);
1582	if (status < 0) {
1583		pr_err("failed to register netdevice notifier\n");
1584		goto cleanup_netlink_subsys;
1585	}
1586
1587	nf_register_queue_handler(&nfqh);
1588
1589	return status;
1590
1591cleanup_netlink_subsys:
1592	nfnetlink_subsys_unregister(&nfqnl_subsys);
1593cleanup_netlink_notifier:
1594	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1595	unregister_pernet_subsys(&nfnl_queue_net_ops);
1596out:
1597	return status;
1598}
1599
1600static void __exit nfnetlink_queue_fini(void)
1601{
1602	nf_unregister_queue_handler();
1603	unregister_netdevice_notifier(&nfqnl_dev_notifier);
1604	nfnetlink_subsys_unregister(&nfqnl_subsys);
1605	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1606	unregister_pernet_subsys(&nfnl_queue_net_ops);
1607
1608	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1609}
1610
1611MODULE_DESCRIPTION("netfilter packet queue handler");
1612MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1613MODULE_LICENSE("GPL");
1614MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1615
1616module_init(nfnetlink_queue_init);
1617module_exit(nfnetlink_queue_fini);